aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-mesh9
-rw-r--r--Documentation/DocBook/80211.tmpl2
-rw-r--r--Documentation/devicetree/bindings/net/mdio-mux-gpio.txt127
-rw-r--r--Documentation/devicetree/bindings/net/mdio-mux.txt136
-rw-r--r--Documentation/networking/batman-adv.txt19
-rw-r--r--Documentation/networking/ip-sysctl.txt19
-rw-r--r--Documentation/networking/mac80211-auth-assoc-deauth.txt10
-rw-r--r--Documentation/networking/stmmac.txt29
-rw-r--r--Documentation/nfc/nfc-hci.txt155
-rw-r--r--Documentation/sysctl/net.txt7
-rw-r--r--MAINTAINERS36
-rw-r--r--arch/arm/mach-at91/at91rm9200.c10
-rw-r--r--arch/arm/mach-at91/at91rm9200_devices.c4
-rw-r--r--arch/arm/mach-at91/include/mach/hardware.h1
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h3
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/Makefile1
-rw-r--r--arch/sparc/net/Makefile4
-rw-r--r--arch/sparc/net/bpf_jit.h68
-rw-r--r--arch/sparc/net/bpf_jit_asm.S205
-rw-r--r--arch/sparc/net/bpf_jit_comp.c802
-rw-r--r--crypto/ablkcipher.c12
-rw-r--r--crypto/aead.c12
-rw-r--r--crypto/ahash.c6
-rw-r--r--crypto/blkcipher.c6
-rw-r--r--crypto/crypto_user.c22
-rw-r--r--crypto/pcompress.c6
-rw-r--r--crypto/rng.c6
-rw-r--r--crypto/shash.c6
-rw-r--r--drivers/atm/ambassador.c2
-rw-r--r--drivers/atm/horizon.c5
-rw-r--r--drivers/atm/idt77252.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c6
-rw-r--r--drivers/hv/ring_buffer.c31
-rw-r--r--drivers/infiniband/core/cma.c6
-rw-r--r--drivers/infiniband/core/netlink.c3
-rw-r--r--drivers/infiniband/core/ucma.c10
-rw-r--r--drivers/isdn/capi/capi.c50
-rw-r--r--drivers/isdn/capi/capidrv.c8
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c44
-rw-r--r--drivers/isdn/gigaset/capi.c118
-rw-r--r--drivers/isdn/gigaset/common.c59
-rw-r--r--drivers/isdn/gigaset/dummyll.c2
-rw-r--r--drivers/isdn/gigaset/ev-layer.c319
-rw-r--r--drivers/isdn/gigaset/gigaset.h30
-rw-r--r--drivers/isdn/gigaset/i4l.c12
-rw-r--r--drivers/isdn/gigaset/isocdata.c12
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c21
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c19
-rw-r--r--drivers/isdn/hardware/eicon/capifunc.c6
-rw-r--r--drivers/isdn/hardware/eicon/capimain.c4
-rw-r--r--drivers/isdn/hardware/eicon/diddfunc.c8
-rw-r--r--drivers/isdn/hardware/eicon/diva_didd.c6
-rw-r--r--drivers/isdn/hardware/eicon/divamnt.c6
-rw-r--r--drivers/isdn/hardware/eicon/divasfunc.c4
-rw-r--r--drivers/isdn/hardware/eicon/divasi.c8
-rw-r--r--drivers/isdn/hardware/eicon/divasmain.c6
-rw-r--r--drivers/isdn/hardware/eicon/idifunc.c10
-rw-r--r--drivers/isdn/hardware/eicon/mntfunc.c8
-rw-r--r--drivers/isdn/hardware/eicon/platform.h3
-rw-r--r--drivers/isdn/hardware/mISDN/avmfritz.c5
-rw-r--r--drivers/isdn/hardware/mISDN/hfc_multi.h15
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c587
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c5
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNipac.c17
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c5
-rw-r--r--drivers/isdn/hardware/mISDN/speedfax.c5
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c5
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c10
-rw-r--r--drivers/isdn/mISDN/core.c16
-rw-r--r--drivers/isdn/mISDN/dsp.h4
-rw-r--r--drivers/isdn/mISDN/dsp_cmx.c19
-rw-r--r--drivers/isdn/mISDN/dsp_dtmf.c19
-rw-r--r--drivers/isdn/mISDN/layer1.c36
-rw-r--r--drivers/isdn/mISDN/layer2.c120
-rw-r--r--drivers/isdn/mISDN/tei.c72
-rw-r--r--drivers/net/bonding/bond_main.c52
-rw-r--r--drivers/net/caif/caif_hsi.c350
-rw-r--r--drivers/net/caif/caif_shmcore.c4
-rw-r--r--drivers/net/can/dev.c31
-rw-r--r--drivers/net/can/pch_can.c12
-rw-r--r--drivers/net/can/sja1000/ems_pci.c14
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c13
-rw-r--r--drivers/net/can/sja1000/peak_pci.c12
-rw-r--r--drivers/net/can/sja1000/plx_pci.c13
-rw-r--r--drivers/net/ethernet/8390/Kconfig1
-rw-r--r--drivers/net/ethernet/8390/ax88796.c1
-rw-r--r--drivers/net/ethernet/8390/etherh.c1
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c54
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c20
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h59
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c9
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.c569
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.h983
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c1007
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c17
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c171
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.h17
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c46
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h45
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c474
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h383
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c38
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h268
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h219
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c733
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c1111
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c114
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h39
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c273
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h15
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c75
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c61
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c142
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi_reg.h6
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c316
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h11
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c6
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c535
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.h1
-rw-r--r--drivers/net/ethernet/cadence/macb.c1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c22
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c34
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c301
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c27
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c443
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c17
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c280
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c416
-rw-r--r--drivers/net/ethernet/dlink/dl2k.h19
-rw-r--r--drivers/net/ethernet/dlink/sundance.c12
-rw-r--r--drivers/net/ethernet/dnet.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h30
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c110
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h44
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c249
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h74
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c259
-rw-r--r--drivers/net/ethernet/fealnx.c14
-rw-r--r--drivers/net/ethernet/freescale/fec.c1
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c13
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c30
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c1
-rw-r--r--drivers/net/ethernet/intel/Kconfig21
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c28
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c21
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c41
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h8
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h51
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c88
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h72
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c780
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c12
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c184
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c58
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c115
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile1
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h21
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c189
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c381
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c92
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c829
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c13
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c51
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c35
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c255
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c20
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c273
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h50
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c12
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c1
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c1
-rw-r--r--drivers/net/ethernet/marvell/sky2.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c255
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c62
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c2
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c7
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c67
-rw-r--r--drivers/net/ethernet/neterion/s2io.c14
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c24
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.h15
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c9
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c6
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c64
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c11
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c32
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h63
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c73
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c201
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c51
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c2
-rw-r--r--drivers/net/ethernet/rdc/r6040.c76
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c21
-rw-r--r--drivers/net/ethernet/realtek/8139too.c136
-rw-r--r--drivers/net/ethernet/realtek/r8169.c698
-rw-r--r--drivers/net/ethernet/renesas/Kconfig7
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c114
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h5
-rw-r--r--drivers/net/ethernet/s6gmac.c2
-rw-r--r--drivers/net/ethernet/silan/sc92031.c34
-rw-r--r--drivers/net/ethernet/sis/sis190.c26
-rw-r--r--drivers/net/ethernet/sis/sis900.c375
-rw-r--r--drivers/net/ethernet/smsc/epic100.c403
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c1
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c42
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h49
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c143
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c36
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c18
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c4
-rw-r--r--drivers/net/ethernet/sun/sunhme.c18
-rw-r--r--drivers/net/ethernet/sun/sunhme.h1
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c6
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c13
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c1
-rw-r--r--drivers/net/ethernet/ti/tlan.c2
-rw-r--r--drivers/net/ethernet/via/via-rhine.c12
-rw-r--r--drivers/net/ethernet/via/via-velocity.c9
-rw-r--r--drivers/net/ethernet/wiznet/Kconfig73
-rw-r--r--drivers/net/ethernet/wiznet/Makefile2
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c808
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c720
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c1
-rw-r--r--drivers/net/ethernet/xscale/Kconfig6
-rw-r--r--drivers/net/ethernet/xscale/Makefile1
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/Kconfig6
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/Makefile3
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/caleb.c136
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/caleb.h22
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/enp2611.c232
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c212
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h115
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc408
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode130
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc272
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode98
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixpdev.c437
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixpdev.h29
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h57
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/pm3386.c351
-rw-r--r--drivers/net/ethernet/xscale/ixp2000/pm3386.h29
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c29
-rw-r--r--drivers/net/hippi/rrunner.c83
-rw-r--r--drivers/net/hyperv/netvsc.c41
-rw-r--r--drivers/net/hyperv/netvsc_drv.c6
-rw-r--r--drivers/net/irda/Kconfig4
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/sh_irda.c2
-rw-r--r--drivers/net/irda/sh_sir.c2
-rw-r--r--drivers/net/irda/smsc-ircc2.c1
-rw-r--r--drivers/net/macvlan.c76
-rw-r--r--drivers/net/macvtap.c57
-rw-r--r--drivers/net/phy/Kconfig19
-rw-r--r--drivers/net/phy/Makefile2
-rw-r--r--drivers/net/phy/bcm63xx.c5
-rw-r--r--drivers/net/phy/davicom.c7
-rw-r--r--drivers/net/phy/dp83640.c31
-rw-r--r--drivers/net/phy/marvell.c18
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c142
-rw-r--r--drivers/net/phy/mdio-mux.c192
-rw-r--r--drivers/net/phy/mdio_bus.c32
-rw-r--r--drivers/net/phy/spi_ks8995.c1
-rw-r--r--drivers/net/ppp/pptp.c4
-rw-r--r--drivers/net/team/Kconfig11
-rw-r--r--drivers/net/team/Makefile1
-rw-r--r--drivers/net/team/team.c523
-rw-r--r--drivers/net/team/team_mode_activebackup.c20
-rw-r--r--drivers/net/team/team_mode_loadbalance.c174
-rw-r--r--drivers/net/team/team_mode_roundrobin.c2
-rw-r--r--drivers/net/tokenring/3c359.c14
-rw-r--r--drivers/net/tokenring/Kconfig6
-rw-r--r--drivers/net/tokenring/lanstreamer.c10
-rw-r--r--drivers/net/tokenring/olympic.c14
-rw-r--r--drivers/net/tokenring/tms380tr.c6
-rw-r--r--drivers/net/tokenring/tmspci.c14
-rw-r--r--drivers/net/usb/usbnet.c1
-rw-r--r--drivers/net/virtio_net.c64
-rw-r--r--drivers/net/wan/dscc4.c13
-rw-r--r--drivers/net/wan/lmc/lmc_main.c15
-rw-r--r--drivers/net/wimax/i2400m/Kconfig3
-rw-r--r--drivers/net/wimax/i2400m/usb-rx.c2
-rw-r--r--drivers/net/wimax/i2400m/usb.c2
-rw-r--r--drivers/net/wireless/Kconfig3
-rw-r--r--drivers/net/wireless/Makefile4
-rw-r--r--drivers/net/wireless/adm8211.c17
-rw-r--r--drivers/net/wireless/at76c50x-usb.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c44
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h31
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c26
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c17
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/initvals.c5
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c29
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c9
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c10
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/sysfs.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c419
-rw-r--r--drivers/net/wireless/ath/ath6kl/common.h4
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c30
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h34
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif-ops.h34
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h6
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc-ops.h113
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h98
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c (renamed from drivers/net/wireless/ath/ath6kl/htc.c)85
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c1713
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c59
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c6
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c2
-rw-r--r--drivers/net/wireless/ath/ath6kl/testmode.c5
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c25
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c785
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c80
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h40
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile5
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c57
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c56
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c116
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c91
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c170
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h44
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c84
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c46
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.h45
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c300
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h104
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_pri_detector.c452
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_pri_detector.h52
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c30
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c38
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c24
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c169
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h19
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c165
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c40
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.h6
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c2
-rw-r--r--drivers/net/wireless/ath/main.c4
-rw-r--r--drivers/net/wireless/ath/regd.c4
-rw-r--r--drivers/net/wireless/atmel.c3
-rw-r--r--drivers/net/wireless/atmel_pci.c13
-rw-r--r--drivers/net/wireless/b43/main.c16
-rw-r--r--drivers/net/wireless/b43/sdio.c2
-rw-r--r--drivers/net/wireless/b43/xmit.c5
-rw-r--r--drivers/net/wireless/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c15
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c127
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c36
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/d11.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c41
-rw-r--r--drivers/net/wireless/brcm80211/include/brcm_hw_ids.h40
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_pci.c16
-rw-r--r--drivers/net/wireless/hostap/hostap_plx.c16
-rw-r--r--drivers/net/wireless/ipw2x00/ipw.h23
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c142
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.h10
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c13
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h55
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c4
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig33
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c124
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c141
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c282
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c247
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c37
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-devices.c755
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c164
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c84
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h36
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rx.c319
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c688
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c147
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tt.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c191
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c1243
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h221
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h (renamed from drivers/net/wireless/iwlwifi/iwl-shared.h)282
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c1480
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h234
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h34
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c509
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h191
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c221
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c246
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h67
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-mac80211.c221
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h124
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.c44
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.h21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-pci.c65
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c288
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.h129
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c85
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c74
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.c105
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h233
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c550
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c334
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c570
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h163
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-ucode.c172
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Kconfig2
-rw-r--r--drivers/net/wireless/libertas/Makefile1
-rw-r--r--drivers/net/wireless/libertas/decl.h11
-rw-r--r--drivers/net/wireless/libertas/dev.h10
-rw-r--r--drivers/net/wireless/libertas/firmware.c223
-rw-r--r--drivers/net/wireless/libertas/if_cs.c90
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c229
-rw-r--r--drivers/net/wireless/libertas/if_spi.c11
-rw-r--r--drivers/net/wireless/libertas/if_usb.c265
-rw-r--r--drivers/net/wireless/libertas/main.c117
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c57
-rw-r--r--drivers/net/wireless/mwifiex/11n.c17
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c36
-rw-r--r--drivers/net/wireless/mwifiex/Kconfig11
-rw-r--r--drivers/net/wireless/mwifiex/Makefile3
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c112
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c31
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c63
-rw-r--r--drivers/net/wireless/mwifiex/debugfs.c2
-rw-r--r--drivers/net/wireless/mwifiex/decl.h1
-rw-r--r--drivers/net/wireless/mwifiex/fw.h31
-rw-r--r--drivers/net/wireless/mwifiex/init.c66
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h52
-rw-r--r--drivers/net/wireless/mwifiex/join.c64
-rw-r--r--drivers/net/wireless/mwifiex/main.c132
-rw-r--r--drivers/net/wireless/mwifiex/main.h54
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c3
-rw-r--r--drivers/net/wireless/mwifiex/scan.c80
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c3
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h8
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c100
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c80
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c15
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c120
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c15
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c12
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c21
-rw-r--r--drivers/net/wireless/mwifiex/usb.c1052
-rw-r--r--drivers/net/wireless/mwifiex/usb.h99
-rw-r--r--drivers/net/wireless/mwifiex/util.c22
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c18
-rw-r--r--drivers/net/wireless/mwl8k.c13
-rw-r--r--drivers/net/wireless/orinoco/fw.c7
-rw-r--r--drivers/net/wireless/p54/main.c11
-rw-r--r--drivers/net/wireless/p54/p54.h1
-rw-r--r--drivers/net/wireless/p54/p54pci.c13
-rw-r--r--drivers/net/wireless/p54/p54usb.c197
-rw-r--r--drivers/net/wireless/p54/p54usb.h3
-rw-r--r--drivers/net/wireless/p54/txrx.c3
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c13
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c13
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c48
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c28
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c25
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00leds.c16
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c47
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c13
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c13
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c1
-rw-r--r--drivers/net/wireless/rtlwifi/cam.c5
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rc.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c290
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h35
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/dm.h35
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c19
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.h7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/def.h16
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.c185
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/dm.h51
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/phy.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/trx.h8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/def.h7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/dm.c156
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/dm.h44
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/fw.h6
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c19
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h87
-rw-r--r--drivers/net/wireless/ti/Kconfig14
-rw-r--r--drivers/net/wireless/ti/Makefile4
-rw-r--r--drivers/net/wireless/ti/wl1251/Kconfig (renamed from drivers/net/wireless/wl1251/Kconfig)0
-rw-r--r--drivers/net/wireless/ti/wl1251/Makefile (renamed from drivers/net/wireless/wl1251/Makefile)0
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.c (renamed from drivers/net/wireless/wl1251/acx.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.h (renamed from drivers/net/wireless/wl1251/acx.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/boot.c (renamed from drivers/net/wireless/wl1251/boot.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/boot.h (renamed from drivers/net/wireless/wl1251/boot.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.c (renamed from drivers/net/wireless/wl1251/cmd.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.h (renamed from drivers/net/wireless/wl1251/cmd.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/debugfs.c (renamed from drivers/net/wireless/wl1251/debugfs.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/debugfs.h (renamed from drivers/net/wireless/wl1251/debugfs.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/event.c (renamed from drivers/net/wireless/wl1251/event.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/event.h (renamed from drivers/net/wireless/wl1251/event.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/init.c (renamed from drivers/net/wireless/wl1251/init.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/init.h (renamed from drivers/net/wireless/wl1251/init.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/io.c (renamed from drivers/net/wireless/wl1251/io.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/io.h (renamed from drivers/net/wireless/wl1251/io.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c (renamed from drivers/net/wireless/wl1251/main.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/ps.c (renamed from drivers/net/wireless/wl1251/ps.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/ps.h (renamed from drivers/net/wireless/wl1251/ps.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/reg.h (renamed from drivers/net/wireless/wl1251/reg.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/rx.c (renamed from drivers/net/wireless/wl1251/rx.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/rx.h (renamed from drivers/net/wireless/wl1251/rx.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c (renamed from drivers/net/wireless/wl1251/sdio.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c (renamed from drivers/net/wireless/wl1251/spi.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.h (renamed from drivers/net/wireless/wl1251/spi.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/tx.c (renamed from drivers/net/wireless/wl1251/tx.c)0
-rw-r--r--drivers/net/wireless/ti/wl1251/tx.h (renamed from drivers/net/wireless/wl1251/tx.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/wl1251.h (renamed from drivers/net/wireless/wl1251/wl1251.h)0
-rw-r--r--drivers/net/wireless/ti/wl1251/wl12xx_80211.h (renamed from drivers/net/wireless/wl1251/wl12xx_80211.h)0
-rw-r--r--drivers/net/wireless/ti/wl12xx/Kconfig8
-rw-r--r--drivers/net/wireless/ti/wl12xx/Makefile3
-rw-r--r--drivers/net/wireless/ti/wl12xx/acx.c53
-rw-r--r--drivers/net/wireless/ti/wl12xx/acx.h36
-rw-r--r--drivers/net/wireless/ti/wl12xx/cmd.c254
-rw-r--r--drivers/net/wireless/ti/wl12xx/cmd.h112
-rw-r--r--drivers/net/wireless/ti/wl12xx/conf.h50
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c1388
-rw-r--r--drivers/net/wireless/ti/wl12xx/reg.h (renamed from drivers/net/wireless/wl12xx/reg.h)315
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h31
-rw-r--r--drivers/net/wireless/ti/wlcore/Kconfig41
-rw-r--r--drivers/net/wireless/ti/wlcore/Makefile15
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c (renamed from drivers/net/wireless/wl12xx/acx.c)42
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h (renamed from drivers/net/wireless/wl12xx/acx.h)10
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.c443
-rw-r--r--drivers/net/wireless/ti/wlcore/boot.h54
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c (renamed from drivers/net/wireless/wl12xx/cmd.c)285
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h (renamed from drivers/net/wireless/wl12xx/cmd.h)98
-rw-r--r--drivers/net/wireless/ti/wlcore/conf.h (renamed from drivers/net/wireless/wl12xx/conf.h)85
-rw-r--r--drivers/net/wireless/ti/wlcore/debug.h (renamed from drivers/net/wireless/wl12xx/debug.h)1
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.c (renamed from drivers/net/wireless/wl12xx/debugfs.c)3
-rw-r--r--drivers/net/wireless/ti/wlcore/debugfs.h (renamed from drivers/net/wireless/wl12xx/debugfs.h)2
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c (renamed from drivers/net/wireless/wl12xx/event.c)31
-rw-r--r--drivers/net/wireless/ti/wlcore/event.h (renamed from drivers/net/wireless/wl12xx/event.h)3
-rw-r--r--drivers/net/wireless/ti/wlcore/hw_ops.h122
-rw-r--r--drivers/net/wireless/ti/wlcore/ini.h (renamed from drivers/net/wireless/wl12xx/ini.h)0
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c (renamed from drivers/net/wireless/wl12xx/init.c)66
-rw-r--r--drivers/net/wireless/ti/wlcore/init.h (renamed from drivers/net/wireless/wl12xx/init.h)2
-rw-r--r--drivers/net/wireless/ti/wlcore/io.c (renamed from drivers/net/wireless/wl12xx/io.c)191
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h (renamed from drivers/net/wireless/wl12xx/io.h)88
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c (renamed from drivers/net/wireless/wl12xx/main.c)824
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c (renamed from drivers/net/wireless/wl12xx/ps.c)8
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.h (renamed from drivers/net/wireless/wl12xx/ps.h)2
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c (renamed from drivers/net/wireless/wl12xx/rx.c)130
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.h (renamed from drivers/net/wireless/wl12xx/rx.h)12
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c (renamed from drivers/net/wireless/wl12xx/scan.c)30
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.h (renamed from drivers/net/wireless/wl12xx/scan.h)4
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c (renamed from drivers/net/wireless/wl12xx/sdio.c)6
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c (renamed from drivers/net/wireless/wl12xx/spi.c)4
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c (renamed from drivers/net/wireless/wl12xx/testmode.c)12
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.h (renamed from drivers/net/wireless/wl12xx/testmode.h)0
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c (renamed from drivers/net/wireless/wl12xx/tx.c)125
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h (renamed from drivers/net/wireless/wl12xx/tx.h)7
-rw-r--r--drivers/net/wireless/ti/wlcore/wl12xx.h (renamed from drivers/net/wireless/wl12xx/wl12xx.h)271
-rw-r--r--drivers/net/wireless/ti/wlcore/wl12xx_80211.h (renamed from drivers/net/wireless/wl12xx/wl12xx_80211.h)0
-rw-r--r--drivers/net/wireless/ti/wlcore/wl12xx_platform_data.c (renamed from drivers/net/wireless/wl12xx/wl12xx_platform_data.c)0
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h448
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig48
-rw-r--r--drivers/net/wireless/wl12xx/Makefile15
-rw-r--r--drivers/net/wireless/wl12xx/boot.c786
-rw-r--r--drivers/net/wireless/wl12xx/boot.h120
-rw-r--r--drivers/nfc/pn533.c228
-rw-r--r--drivers/of/of_mdio.c2
-rw-r--r--drivers/pci/quirks.c12
-rw-r--r--drivers/ptp/Kconfig10
-rw-r--r--drivers/ptp/ptp_clock.c6
-rw-r--r--drivers/ptp/ptp_ixp46x.c3
-rw-r--r--drivers/ptp/ptp_pch.c7
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/staging/octeon/ethernet-tx.c2
-rw-r--r--drivers/staging/ramster/cluster/tcp.c2
-rw-r--r--drivers/vhost/net.c7
-rw-r--r--drivers/vhost/vhost.c1
-rw-r--r--fs/ocfs2/cluster/tcp.c2
-rw-r--r--include/linux/dcbnl.h12
-rw-r--r--include/linux/dccp.h2
-rw-r--r--include/linux/ethtool.h29
-rw-r--r--include/linux/filter.h7
-rw-r--r--include/linux/hyperv.h27
-rw-r--r--include/linux/ieee80211.h36
-rw-r--r--include/linux/if_link.h5
-rw-r--r--include/linux/if_macvlan.h1
-rw-r--r--include/linux/if_pppol2tp.h28
-rw-r--r--include/linux/if_pppox.h20
-rw-r--r--include/linux/if_team.h67
-rw-r--r--include/linux/ip_vs.h12
-rw-r--r--include/linux/l2tp.h19
-rw-r--r--include/linux/mISDNhw.h3
-rw-r--r--include/linux/mISDNif.h9
-rw-r--r--include/linux/mdio-mux.h21
-rw-r--r--include/linux/mlx4/cmd.h4
-rw-r--r--include/linux/mlx4/device.h3
-rw-r--r--include/linux/mlx4/qp.h3
-rw-r--r--include/linux/neighbour.h3
-rw-r--r--include/linux/netdevice.h41
-rw-r--r--include/linux/netfilter.h6
-rw-r--r--include/linux/netfilter/ipset/ip_set.h54
-rw-r--r--include/linux/netfilter/ipset/ip_set_ahash.h21
-rw-r--r--include/linux/netfilter/nf_conntrack_h323_types.h12
-rw-r--r--include/linux/netfilter/nfnetlink.h2
-rw-r--r--include/linux/nfc.h1
-rw-r--r--include/linux/nl80211.h41
-rw-r--r--include/linux/of_mdio.h2
-rw-r--r--include/linux/phy.h3
-rw-r--r--include/linux/pkt_sched.h1
-rw-r--r--include/linux/platform_data/wiznet.h24
-rw-r--r--include/linux/ptp_clock_kernel.h8
-rw-r--r--include/linux/rtnetlink.h4
-rw-r--r--include/linux/skbuff.h68
-rw-r--r--include/linux/sock_diag.h4
-rw-r--r--include/linux/socket.h4
-rw-r--r--include/linux/stmmac.h56
-rw-r--r--include/linux/tcp.h22
-rw-r--r--include/linux/virtio_net.h14
-rw-r--r--include/net/addrconf.h2
-rw-r--r--include/net/af_unix.h2
-rw-r--r--include/net/ax25.h10
-rw-r--r--include/net/caif/caif_hsi.h19
-rw-r--r--include/net/caif/cfpkt.h9
-rw-r--r--include/net/cfg80211.h42
-rw-r--r--include/net/compat.h8
-rw-r--r--include/net/dcbnl.h2
-rw-r--r--include/net/dn.h2
-rw-r--r--include/net/dn_fib.h6
-rw-r--r--include/net/dn_route.h4
-rw-r--r--include/net/dst_ops.h2
-rw-r--r--include/net/icmp.h3
-rw-r--r--include/net/if_inet6.h2
-rw-r--r--include/net/inet6_connection_sock.h2
-rw-r--r--include/net/inet_connection_sock.h5
-rw-r--r--include/net/ip.h3
-rw-r--r--include/net/ip6_route.h2
-rw-r--r--include/net/ip_fib.h4
-rw-r--r--include/net/ip_vs.h18
-rw-r--r--include/net/ipip.h2
-rw-r--r--include/net/ipv6.h5
-rw-r--r--include/net/llc_c_ev.h2
-rw-r--r--include/net/mac80211.h154
-rw-r--r--include/net/ndisc.h1
-rw-r--r--include/net/neighbour.h5
-rw-r--r--include/net/net_namespace.h21
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h2
-rw-r--r--include/net/netlink.h169
-rw-r--r--include/net/netns/hash.h2
-rw-r--r--include/net/netns/ipv6.h4
-rw-r--r--include/net/nfc/hci.h198
-rw-r--r--include/net/nfc/nfc.h14
-rw-r--r--include/net/nfc/shdlc.h104
-rw-r--r--include/net/pkt_sched.h2
-rw-r--r--include/net/route.h6
-rw-r--r--include/net/rtnetlink.h11
-rw-r--r--include/net/sctp/structs.h8
-rw-r--r--include/net/sock.h21
-rw-r--r--include/net/tcp.h61
-rw-r--r--include/net/udp.h6
-rw-r--r--include/net/wimax.h4
-rw-r--r--include/net/x25.h2
-rw-r--r--include/net/xfrm.h5
-rw-r--r--net/802/fc.c2
-rw-r--r--net/802/fddi.c2
-rw-r--r--net/802/garp.c8
-rw-r--r--net/802/hippi.c2
-rw-r--r--net/802/tr.c12
-rw-r--r--net/8021q/vlan_netlink.c16
-rw-r--r--net/9p/client.c6
-rw-r--r--net/9p/trans_fd.c2
-rw-r--r--net/appletalk/ddp.c6
-rw-r--r--net/appletalk/sysctl_net_atalk.c10
-rw-r--r--net/atm/br2684.c2
-rw-r--r--net/atm/mpoa_proc.c2
-rw-r--r--net/atm/pppoatm.c95
-rw-r--r--net/atm/signaling.c2
-rw-r--r--net/ax25/af_ax25.c2
-rw-r--r--net/ax25/ax25_dev.c10
-rw-r--r--net/ax25/ax25_ip.c4
-rw-r--r--net/ax25/sysctl_net_ax25.c82
-rw-r--r--net/batman-adv/Kconfig27
-rw-r--r--net/batman-adv/Makefile1
-rw-r--r--net/batman-adv/bat_debugfs.c19
-rw-r--r--net/batman-adv/bat_iv_ogm.c78
-rw-r--r--net/batman-adv/bat_sysfs.c14
-rw-r--r--net/batman-adv/bitarray.c118
-rw-r--r--net/batman-adv/bitarray.h26
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c1580
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h98
-rw-r--r--net/batman-adv/hard-interface.c51
-rw-r--r--net/batman-adv/icmp_socket.c4
-rw-r--r--net/batman-adv/main.c14
-rw-r--r--net/batman-adv/main.h11
-rw-r--r--net/batman-adv/originator.c3
-rw-r--r--net/batman-adv/packet.h49
-rw-r--r--net/batman-adv/routing.c39
-rw-r--r--net/batman-adv/routing.h1
-rw-r--r--net/batman-adv/send.c14
-rw-r--r--net/batman-adv/soft-interface.c500
-rw-r--r--net/batman-adv/soft-interface.h2
-rw-r--r--net/batman-adv/translation-table.c437
-rw-r--r--net/batman-adv/translation-table.h8
-rw-r--r--net/batman-adv/types.h89
-rw-r--r--net/batman-adv/vis.c8
-rw-r--r--net/bridge/br_device.c3
-rw-r--r--net/bridge/br_fdb.c136
-rw-r--r--net/bridge/br_forward.c2
-rw-r--r--net/bridge/br_multicast.c73
-rw-r--r--net/bridge/br_netfilter.c10
-rw-r--r--net/bridge/br_netlink.c39
-rw-r--r--net/bridge/br_private.h17
-rw-r--r--net/bridge/br_private_stp.h7
-rw-r--r--net/bridge/br_stp.c4
-rw-r--r--net/bridge/br_stp_timer.c6
-rw-r--r--net/bridge/br_sysfs_br.c20
-rw-r--r--net/caif/caif_socket.c18
-rw-r--r--net/caif/cfctrl.c4
-rw-r--r--net/caif/cfpkt_skbuff.c7
-rw-r--r--net/caif/cfsrvl.c3
-rw-r--r--net/caif/chnl_net.c14
-rw-r--r--net/can/gw.c2
-rw-r--r--net/ceph/auth_x.h6
-rw-r--r--net/ceph/ceph_common.c4
-rw-r--r--net/ceph/ceph_hash.c6
-rw-r--r--net/ceph/crush/mapper.c9
-rw-r--r--net/ceph/debugfs.c6
-rw-r--r--net/ceph/messenger.c16
-rw-r--r--net/ceph/mon_client.c10
-rw-r--r--net/ceph/osd_client.c2
-rw-r--r--net/ceph/osdmap.c14
-rw-r--r--net/compat.c10
-rw-r--r--net/core/datagram.c6
-rw-r--r--net/core/dev.c15
-rw-r--r--net/core/dev_addr_lists.c97
-rw-r--r--net/core/ethtool.c59
-rw-r--r--net/core/fib_rules.c32
-rw-r--r--net/core/filter.c72
-rw-r--r--net/core/gen_stats.c3
-rw-r--r--net/core/kmap_skb.h19
-rw-r--r--net/core/neighbour.c115
-rw-r--r--net/core/net-sysfs.c13
-rw-r--r--net/core/pktgen.c20
-rw-r--r--net/core/rtnetlink.c351
-rw-r--r--net/core/skbuff.c249
-rw-r--r--net/core/sock.c54
-rw-r--r--net/core/sock_diag.c12
-rw-r--r--net/core/sysctl_net_core.c15
-rw-r--r--net/core/utils.c9
-rw-r--r--net/dcb/dcbnl.c94
-rw-r--r--net/dccp/ccids/ccid3.c12
-rw-r--r--net/dccp/dccp.h8
-rw-r--r--net/dccp/input.c10
-rw-r--r--net/dccp/ipv4.c6
-rw-r--r--net/dccp/ipv6.c3
-rw-r--r--net/dccp/sysctl.c11
-rw-r--r--net/decnet/af_decnet.c6
-rw-r--r--net/decnet/dn_dev.c35
-rw-r--r--net/decnet/dn_fib.c5
-rw-r--r--net/decnet/dn_nsp_in.c2
-rw-r--r--net/decnet/dn_nsp_out.c4
-rw-r--r--net/decnet/dn_route.c18
-rw-r--r--net/decnet/dn_rules.c14
-rw-r--r--net/decnet/sysctl_net_decnet.c10
-rw-r--r--net/dns_resolver/dns_key.c2
-rw-r--r--net/dns_resolver/internal.h2
-rw-r--r--net/econet/af_econet.c4
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/ieee802154/6lowpan.c87
-rw-r--r--net/ieee802154/6lowpan.h3
-rw-r--r--net/ieee802154/dgram.c6
-rw-r--r--net/ieee802154/nl-mac.c146
-rw-r--r--net/ieee802154/nl-phy.c29
-rw-r--r--net/ieee802154/raw.c2
-rw-r--r--net/ipv4/af_inet.c4
-rw-r--r--net/ipv4/ah4.c2
-rw-r--r--net/ipv4/arp.c4
-rw-r--r--net/ipv4/devinet.c61
-rw-r--r--net/ipv4/fib_frontend.c10
-rw-r--r--net/ipv4/fib_rules.c16
-rw-r--r--net/ipv4/fib_semantics.c47
-rw-r--r--net/ipv4/igmp.c18
-rw-r--r--net/ipv4/inet_connection_sock.c29
-rw-r--r--net/ipv4/inet_diag.c4
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/inet_timewait_sock.c2
-rw-r--r--net/ipv4/ip_forward.c4
-rw-r--r--net/ipv4/ip_fragment.c6
-rw-r--r--net/ipv4/ip_gre.c102
-rw-r--r--net/ipv4/ip_options.c26
-rw-r--r--net/ipv4/ip_sockglue.c19
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/ipip.c57
-rw-r--r--net/ipv4/ipmr.c9
-rw-r--r--net/ipv4/netfilter.c12
-rw-r--r--net/ipv4/netfilter/arp_tables.c2
-rw-r--r--net/ipv4/netfilter/ip_queue.c6
-rw-r--r--net/ipv4/netfilter/ip_tables.c2
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c7
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c12
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c2
-rw-r--r--net/ipv4/ping.c10
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c103
-rw-r--r--net/ipv4/sysctl_net_ipv4.c26
-rw-r--r--net/ipv4/tcp.c292
-rw-r--r--net/ipv4/tcp_input.c368
-rw-r--r--net/ipv4/tcp_ipv4.c79
-rw-r--r--net/ipv4/tcp_minisocks.c1
-rw-r--r--net/ipv4/tcp_output.c69
-rw-r--r--net/ipv4/tcp_probe.c4
-rw-r--r--net/ipv4/tcp_timer.c5
-rw-r--r--net/ipv4/udp.c20
-rw-r--r--net/ipv4/udp_impl.h2
-rw-r--r--net/ipv4/xfrm4_policy.c6
-rw-r--r--net/ipv6/addrconf.c101
-rw-r--r--net/ipv6/addrconf_core.c4
-rw-r--r--net/ipv6/af_inet6.c17
-rw-r--r--net/ipv6/datagram.c16
-rw-r--r--net/ipv6/exthdrs.c30
-rw-r--r--net/ipv6/exthdrs_core.c3
-rw-r--r--net/ipv6/fib6_rules.c18
-rw-r--r--net/ipv6/icmp.c5
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/ip6_flowlabel.c5
-rw-r--r--net/ipv6/ip6_output.c7
-rw-r--r--net/ipv6/ip6_tunnel.c4
-rw-r--r--net/ipv6/ip6mr.c9
-rw-r--r--net/ipv6/ipv6_sockglue.c3
-rw-r--r--net/ipv6/mcast.c2
-rw-r--r--net/ipv6/ndisc.c9
-rw-r--r--net/ipv6/netfilter/ip6_queue.c6
-rw-r--r--net/ipv6/netfilter/ip6_tables.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c9
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c12
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c6
-rw-r--r--net/ipv6/reassembly.c6
-rw-r--r--net/ipv6/route.c50
-rw-r--r--net/ipv6/sit.c58
-rw-r--r--net/ipv6/sysctl_net_ipv6.c83
-rw-r--r--net/ipv6/tcp_ipv6.c61
-rw-r--r--net/ipv6/udp.c174
-rw-r--r--net/ipv6/xfrm6_policy.c4
-rw-r--r--net/ipv6/xfrm6_tunnel.c6
-rw-r--r--net/ipx/sysctl_net_ipx.c11
-rw-r--r--net/irda/ircomm/ircomm_tty_ioctl.c2
-rw-r--r--net/irda/irsysctl.c10
-rw-r--r--net/key/af_key.c4
-rw-r--r--net/l2tp/Makefile3
-rw-r--r--net/l2tp/l2tp_core.c306
-rw-r--r--net/l2tp/l2tp_core.h21
-rw-r--r--net/l2tp/l2tp_debugfs.c8
-rw-r--r--net/l2tp/l2tp_ip.c86
-rw-r--r--net/l2tp/l2tp_ip6.c792
-rw-r--r--net/l2tp/l2tp_netlink.c190
-rw-r--r--net/l2tp/l2tp_ppp.c78
-rw-r--r--net/llc/af_llc.c2
-rw-r--r--net/llc/llc_conn.c2
-rw-r--r--net/llc/sysctl_net_llc.c52
-rw-r--r--net/mac80211/Kconfig11
-rw-r--r--net/mac80211/Makefile3
-rw-r--r--net/mac80211/agg-rx.c18
-rw-r--r--net/mac80211/agg-tx.c57
-rw-r--r--net/mac80211/cfg.c83
-rw-r--r--net/mac80211/chan.c26
-rw-r--r--net/mac80211/debugfs_netdev.c87
-rw-r--r--net/mac80211/debugfs_sta.c5
-rw-r--r--net/mac80211/driver-ops.h41
-rw-r--r--net/mac80211/driver-trace.h55
-rw-r--r--net/mac80211/ht.c9
-rw-r--r--net/mac80211/ibss.c22
-rw-r--r--net/mac80211/ieee80211_i.h77
-rw-r--r--net/mac80211/iface.c170
-rw-r--r--net/mac80211/main.c16
-rw-r--r--net/mac80211/mesh.c56
-rw-r--r--net/mac80211/mesh.h37
-rw-r--r--net/mac80211/mesh_hwmp.c33
-rw-r--r--net/mac80211/mesh_pathtbl.c2
-rw-r--r--net/mac80211/mesh_plink.c145
-rw-r--r--net/mac80211/mesh_sync.c316
-rw-r--r--net/mac80211/mlme.c338
-rw-r--r--net/mac80211/pm.c4
-rw-r--r--net/mac80211/rate.h7
-rw-r--r--net/mac80211/rc80211_minstrel.c13
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c20
-rw-r--r--net/mac80211/rx.c12
-rw-r--r--net/mac80211/scan.c138
-rw-r--r--net/mac80211/sta_info.c28
-rw-r--r--net/mac80211/sta_info.h11
-rw-r--r--net/mac80211/status.c8
-rw-r--r--net/mac80211/tx.c85
-rw-r--r--net/mac80211/util.c244
-rw-r--r--net/mac80211/wme.c46
-rw-r--r--net/mac80211/wme.h3
-rw-r--r--net/mac80211/work.c15
-rw-r--r--net/netfilter/core.c9
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c33
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c43
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c29
-rw-r--r--net/netfilter/ipset/ip_set_core.c49
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c20
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c37
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c45
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c69
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c45
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c52
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c61
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c23
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c34
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c144
-rw-r--r--net/netfilter/ipvs/ip_vs_dh.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c9
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c9
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c6
-rw-r--r--net/netfilter/nf_conntrack_acct.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c5
-rw-r--r--net/netfilter/nf_conntrack_ecache.c3
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c83
-rw-r--r--net/netfilter/nf_conntrack_proto.c10
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c22
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c3
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c9
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c22
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c68
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c9
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c9
-rw-r--r--net/netfilter/nf_conntrack_standalone.c14
-rw-r--r--net/netfilter/nf_conntrack_timestamp.c4
-rw-r--r--net/netfilter/nf_log.c9
-rw-r--r--net/netfilter/nfnetlink.c2
-rw-r--r--net/netfilter/nfnetlink_acct.c10
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c11
-rw-r--r--net/netfilter/nfnetlink_log.c100
-rw-r--r--net/netfilter/nfnetlink_queue.c59
-rw-r--r--net/netfilter/xt_recent.c2
-rw-r--r--net/netlink/af_netlink.c75
-rw-r--r--net/netlink/genetlink.c35
-rw-r--r--net/netrom/nr_dev.c2
-rw-r--r--net/netrom/sysctl_net_netrom.c10
-rw-r--r--net/nfc/Kconfig1
-rw-r--r--net/nfc/Makefile1
-rw-r--r--net/nfc/core.c144
-rw-r--r--net/nfc/hci/Kconfig16
-rw-r--r--net/nfc/hci/Makefile8
-rw-r--r--net/nfc/hci/command.c354
-rw-r--r--net/nfc/hci/core.c830
-rw-r--r--net/nfc/hci/hci.h139
-rw-r--r--net/nfc/hci/hcp.c156
-rw-r--r--net/nfc/hci/shdlc.c945
-rw-r--r--net/nfc/llcp/commands.c8
-rw-r--r--net/nfc/llcp/llcp.c21
-rw-r--r--net/nfc/nci/core.c2
-rw-r--r--net/nfc/nci/ntf.c11
-rw-r--r--net/nfc/netlink.c101
-rw-r--r--net/nfc/nfc.h3
-rw-r--r--net/nfc/rawsock.c6
-rw-r--r--net/openvswitch/datapath.c34
-rw-r--r--net/openvswitch/flow.c18
-rw-r--r--net/openvswitch/vport-netdev.c4
-rw-r--r--net/packet/af_packet.c50
-rw-r--r--net/phonet/af_phonet.c2
-rw-r--r--net/phonet/pep.c8
-rw-r--r--net/phonet/pn_dev.c4
-rw-r--r--net/phonet/pn_netlink.c8
-rw-r--r--net/phonet/socket.c12
-rw-r--r--net/phonet/sysctl.c17
-rw-r--r--net/rds/ib_sysctl.c11
-rw-r--r--net/rds/iw_sysctl.c11
-rw-r--r--net/rds/sysctl.c11
-rw-r--r--net/rds/tcp_listen.c2
-rw-r--r--net/rose/rose_dev.c2
-rw-r--r--net/rose/rose_subr.c2
-rw-r--r--net/rose/sysctl_net_rose.c10
-rw-r--r--net/rxrpc/af_rxrpc.c8
-rw-r--r--net/rxrpc/ar-ack.c6
-rw-r--r--net/rxrpc/ar-call.c4
-rw-r--r--net/rxrpc/ar-input.c2
-rw-r--r--net/rxrpc/ar-internal.h16
-rw-r--r--net/rxrpc/ar-key.c22
-rw-r--r--net/rxrpc/rxkad.c6
-rw-r--r--net/sched/act_api.c9
-rw-r--r--net/sched/act_csum.c6
-rw-r--r--net/sched/act_gact.c9
-rw-r--r--net/sched/act_ipt.c14
-rw-r--r--net/sched/act_mirred.c6
-rw-r--r--net/sched/act_nat.c6
-rw-r--r--net/sched/act_pedit.c6
-rw-r--r--net/sched/act_police.c13
-rw-r--r--net/sched/act_simple.c8
-rw-r--r--net/sched/act_skbedit.c27
-rw-r--r--net/sched/cls_api.c3
-rw-r--r--net/sched/cls_basic.c5
-rw-r--r--net/sched/cls_flow.c35
-rw-r--r--net/sched/cls_fw.c15
-rw-r--r--net/sched/cls_route.c16
-rw-r--r--net/sched/cls_rsvp.h16
-rw-r--r--net/sched/cls_tcindex.c14
-rw-r--r--net/sched/cls_u32.c40
-rw-r--r--net/sched/em_meta.c19
-rw-r--r--net/sched/ematch.c6
-rw-r--r--net/sched/sch_api.c9
-rw-r--r--net/sched/sch_atm.c21
-rw-r--r--net/sched/sch_cbq.c18
-rw-r--r--net/sched/sch_choke.c13
-rw-r--r--net/sched/sch_drr.c3
-rw-r--r--net/sched/sch_dsmark.c21
-rw-r--r--net/sched/sch_fifo.c3
-rw-r--r--net/sched/sch_generic.c3
-rw-r--r--net/sched/sch_gred.c6
-rw-r--r--net/sched/sch_hfsc.c6
-rw-r--r--net/sched/sch_htb.c10
-rw-r--r--net/sched/sch_mqprio.c3
-rw-r--r--net/sched/sch_multiq.c3
-rw-r--r--net/sched/sch_netem.c39
-rw-r--r--net/sched/sch_prio.c3
-rw-r--r--net/sched/sch_qfq.c5
-rw-r--r--net/sched/sch_red.c5
-rw-r--r--net/sched/sch_sfb.c3
-rw-r--r--net/sched/sch_sfq.c3
-rw-r--r--net/sched/sch_tbf.c3
-rw-r--r--net/sched/sch_teql.c4
-rw-r--r--net/sctp/associola.c4
-rw-r--r--net/sctp/input.c4
-rw-r--r--net/sctp/output.c4
-rw-r--r--net/sctp/outqueue.c2
-rw-r--r--net/sctp/sm_sideeffect.c4
-rw-r--r--net/sctp/sm_statefuns.c4
-rw-r--r--net/sctp/sysctl.c10
-rw-r--r--net/socket.c61
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c2
-rw-r--r--net/sunrpc/cache.c2
-rw-r--r--net/sunrpc/svcsock.c2
-rw-r--r--net/sunrpc/timer.c6
-rw-r--r--net/sunrpc/xdr.c2
-rw-r--r--net/sunrpc/xprt.c2
-rw-r--r--net/sysctl_net.c45
-rw-r--r--net/tipc/Makefile2
-rw-r--r--net/tipc/addr.c3
-rw-r--r--net/tipc/addr.h19
-rw-r--r--net/tipc/bcast.c22
-rw-r--r--net/tipc/bcast.h3
-rw-r--r--net/tipc/bearer.c24
-rw-r--r--net/tipc/bearer.h4
-rw-r--r--net/tipc/config.c30
-rw-r--r--net/tipc/config.h1
-rw-r--r--net/tipc/core.c11
-rw-r--r--net/tipc/core.h14
-rw-r--r--net/tipc/discover.c14
-rw-r--r--net/tipc/eth_media.c19
-rw-r--r--net/tipc/handler.c1
-rw-r--r--net/tipc/link.c122
-rw-r--r--net/tipc/link.h6
-rw-r--r--net/tipc/log.c14
-rw-r--r--net/tipc/log.h1
-rw-r--r--net/tipc/msg.c3
-rw-r--r--net/tipc/msg.h21
-rw-r--r--net/tipc/name_distr.c130
-rw-r--r--net/tipc/name_table.c98
-rw-r--r--net/tipc/name_table.h3
-rw-r--r--net/tipc/net.c9
-rw-r--r--net/tipc/node.c17
-rw-r--r--net/tipc/node.h2
-rw-r--r--net/tipc/node_subscr.c5
-rw-r--r--net/tipc/node_subscr.h1
-rw-r--r--net/tipc/port.c130
-rw-r--r--net/tipc/port.h14
-rw-r--r--net/tipc/ref.c13
-rw-r--r--net/tipc/socket.c103
-rw-r--r--net/tipc/subscr.c45
-rw-r--r--net/tipc/subscr.h2
-rw-r--r--net/unix/af_unix.c36
-rw-r--r--net/unix/diag.c2
-rw-r--r--net/unix/sysctl_net_unix.c10
-rw-r--r--net/wimax/stack.c5
-rw-r--r--net/wireless/core.c9
-rw-r--r--net/wireless/mesh.c3
-rw-r--r--net/wireless/mlme.c59
-rw-r--r--net/wireless/nl80211.c1279
-rw-r--r--net/wireless/nl80211.h4
-rw-r--r--net/wireless/reg.c10
-rw-r--r--net/wireless/scan.c4
-rw-r--r--net/wireless/util.c10
-rw-r--r--net/wireless/wext-compat.c3
-rw-r--r--net/wireless/wext-core.c9
-rw-r--r--net/x25/sysctl_net_x25.c10
-rw-r--r--net/x25/x25_dev.c2
-rw-r--r--net/x25/x25_facilities.c4
-rw-r--r--net/xfrm/xfrm_hash.h8
-rw-r--r--net/xfrm/xfrm_sysctl.c2
-rw-r--r--net/xfrm/xfrm_user.c105
-rw-r--r--tools/virtio/linux/virtio.h1
-rw-r--r--tools/virtio/virtio_test.c26
1218 files changed, 49332 insertions, 28671 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-net-mesh b/Documentation/ABI/testing/sysfs-class-net-mesh
index b218e0f8bdb3..c81fe89c4c46 100644
--- a/Documentation/ABI/testing/sysfs-class-net-mesh
+++ b/Documentation/ABI/testing/sysfs-class-net-mesh
@@ -14,6 +14,15 @@ Description:
14 mesh will be sent using multiple interfaces at the 14 mesh will be sent using multiple interfaces at the
15 same time (if available). 15 same time (if available).
16 16
17What: /sys/class/net/<mesh_iface>/mesh/bridge_loop_avoidance
18Date: November 2011
19Contact: Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
20Description:
21 Indicates whether the bridge loop avoidance feature
22 is enabled. This feature detects and avoids loops
23 between the mesh and devices bridged with the soft
24 interface <mesh_iface>.
25
17What: /sys/class/net/<mesh_iface>/mesh/fragmentation 26What: /sys/class/net/<mesh_iface>/mesh/fragmentation
18Date: October 2010 27Date: October 2010
19Contact: Andreas Langer <an.langer@gmx.de> 28Contact: Andreas Langer <an.langer@gmx.de>
diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl
index c5ac6929c41c..f3e214f9e256 100644
--- a/Documentation/DocBook/80211.tmpl
+++ b/Documentation/DocBook/80211.tmpl
@@ -516,7 +516,7 @@
516!Finclude/net/mac80211.h ieee80211_start_tx_ba_cb_irqsafe 516!Finclude/net/mac80211.h ieee80211_start_tx_ba_cb_irqsafe
517!Finclude/net/mac80211.h ieee80211_stop_tx_ba_session 517!Finclude/net/mac80211.h ieee80211_stop_tx_ba_session
518!Finclude/net/mac80211.h ieee80211_stop_tx_ba_cb_irqsafe 518!Finclude/net/mac80211.h ieee80211_stop_tx_ba_cb_irqsafe
519!Finclude/net/mac80211.h rate_control_changed 519!Finclude/net/mac80211.h ieee80211_rate_control_changed
520!Finclude/net/mac80211.h ieee80211_tx_rate_control 520!Finclude/net/mac80211.h ieee80211_tx_rate_control
521!Finclude/net/mac80211.h rate_control_send_low 521!Finclude/net/mac80211.h rate_control_send_low
522 </chapter> 522 </chapter>
diff --git a/Documentation/devicetree/bindings/net/mdio-mux-gpio.txt b/Documentation/devicetree/bindings/net/mdio-mux-gpio.txt
new file mode 100644
index 000000000000..79384113c2b0
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/mdio-mux-gpio.txt
@@ -0,0 +1,127 @@
1Properties for an MDIO bus multiplexer/switch controlled by GPIO pins.
2
3This is a special case of a MDIO bus multiplexer. One or more GPIO
4lines are used to control which child bus is connected.
5
6Required properties in addition to the generic multiplexer properties:
7
8- compatible : mdio-mux-gpio.
9- gpios : GPIO specifiers for each GPIO line. One or more must be specified.
10
11
12Example :
13
14 /* The parent MDIO bus. */
15 smi1: mdio@1180000001900 {
16 compatible = "cavium,octeon-3860-mdio";
17 #address-cells = <1>;
18 #size-cells = <0>;
19 reg = <0x11800 0x00001900 0x0 0x40>;
20 };
21
22 /*
23 An NXP sn74cbtlv3253 dual 1-of-4 switch controlled by a
24 pair of GPIO lines. Child busses 2 and 3 populated with 4
25 PHYs each.
26 */
27 mdio-mux {
28 compatible = "mdio-mux-gpio";
29 gpios = <&gpio1 3 0>, <&gpio1 4 0>;
30 mdio-parent-bus = <&smi1>;
31 #address-cells = <1>;
32 #size-cells = <0>;
33
34 mdio@2 {
35 reg = <2>;
36 #address-cells = <1>;
37 #size-cells = <0>;
38
39 phy11: ethernet-phy@1 {
40 reg = <1>;
41 compatible = "marvell,88e1149r";
42 marvell,reg-init = <3 0x10 0 0x5777>,
43 <3 0x11 0 0x00aa>,
44 <3 0x12 0 0x4105>,
45 <3 0x13 0 0x0a60>;
46 interrupt-parent = <&gpio>;
47 interrupts = <10 8>; /* Pin 10, active low */
48 };
49 phy12: ethernet-phy@2 {
50 reg = <2>;
51 compatible = "marvell,88e1149r";
52 marvell,reg-init = <3 0x10 0 0x5777>,
53 <3 0x11 0 0x00aa>,
54 <3 0x12 0 0x4105>,
55 <3 0x13 0 0x0a60>;
56 interrupt-parent = <&gpio>;
57 interrupts = <10 8>; /* Pin 10, active low */
58 };
59 phy13: ethernet-phy@3 {
60 reg = <3>;
61 compatible = "marvell,88e1149r";
62 marvell,reg-init = <3 0x10 0 0x5777>,
63 <3 0x11 0 0x00aa>,
64 <3 0x12 0 0x4105>,
65 <3 0x13 0 0x0a60>;
66 interrupt-parent = <&gpio>;
67 interrupts = <10 8>; /* Pin 10, active low */
68 };
69 phy14: ethernet-phy@4 {
70 reg = <4>;
71 compatible = "marvell,88e1149r";
72 marvell,reg-init = <3 0x10 0 0x5777>,
73 <3 0x11 0 0x00aa>,
74 <3 0x12 0 0x4105>,
75 <3 0x13 0 0x0a60>;
76 interrupt-parent = <&gpio>;
77 interrupts = <10 8>; /* Pin 10, active low */
78 };
79 };
80
81 mdio@3 {
82 reg = <3>;
83 #address-cells = <1>;
84 #size-cells = <0>;
85
86 phy21: ethernet-phy@1 {
87 reg = <1>;
88 compatible = "marvell,88e1149r";
89 marvell,reg-init = <3 0x10 0 0x5777>,
90 <3 0x11 0 0x00aa>,
91 <3 0x12 0 0x4105>,
92 <3 0x13 0 0x0a60>;
93 interrupt-parent = <&gpio>;
94 interrupts = <12 8>; /* Pin 12, active low */
95 };
96 phy22: ethernet-phy@2 {
97 reg = <2>;
98 compatible = "marvell,88e1149r";
99 marvell,reg-init = <3 0x10 0 0x5777>,
100 <3 0x11 0 0x00aa>,
101 <3 0x12 0 0x4105>,
102 <3 0x13 0 0x0a60>;
103 interrupt-parent = <&gpio>;
104 interrupts = <12 8>; /* Pin 12, active low */
105 };
106 phy23: ethernet-phy@3 {
107 reg = <3>;
108 compatible = "marvell,88e1149r";
109 marvell,reg-init = <3 0x10 0 0x5777>,
110 <3 0x11 0 0x00aa>,
111 <3 0x12 0 0x4105>,
112 <3 0x13 0 0x0a60>;
113 interrupt-parent = <&gpio>;
114 interrupts = <12 8>; /* Pin 12, active low */
115 };
116 phy24: ethernet-phy@4 {
117 reg = <4>;
118 compatible = "marvell,88e1149r";
119 marvell,reg-init = <3 0x10 0 0x5777>,
120 <3 0x11 0 0x00aa>,
121 <3 0x12 0 0x4105>,
122 <3 0x13 0 0x0a60>;
123 interrupt-parent = <&gpio>;
124 interrupts = <12 8>; /* Pin 12, active low */
125 };
126 };
127 };
diff --git a/Documentation/devicetree/bindings/net/mdio-mux.txt b/Documentation/devicetree/bindings/net/mdio-mux.txt
new file mode 100644
index 000000000000..f65606f8d632
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/mdio-mux.txt
@@ -0,0 +1,136 @@
1Common MDIO bus multiplexer/switch properties.
2
3An MDIO bus multiplexer/switch will have several child busses that are
4numbered uniquely in a device dependent manner. The nodes for an MDIO
5bus multiplexer/switch will have one child node for each child bus.
6
7Required properties:
8- mdio-parent-bus : phandle to the parent MDIO bus.
9- #address-cells = <1>;
10- #size-cells = <0>;
11
12Optional properties:
13- Other properties specific to the multiplexer/switch hardware.
14
15Required properties for child nodes:
16- #address-cells = <1>;
17- #size-cells = <0>;
18- reg : The sub-bus number.
19
20
21Example :
22
23 /* The parent MDIO bus. */
24 smi1: mdio@1180000001900 {
25 compatible = "cavium,octeon-3860-mdio";
26 #address-cells = <1>;
27 #size-cells = <0>;
28 reg = <0x11800 0x00001900 0x0 0x40>;
29 };
30
31 /*
32 An NXP sn74cbtlv3253 dual 1-of-4 switch controlled by a
33 pair of GPIO lines. Child busses 2 and 3 populated with 4
34 PHYs each.
35 */
36 mdio-mux {
37 compatible = "mdio-mux-gpio";
38 gpios = <&gpio1 3 0>, <&gpio1 4 0>;
39 mdio-parent-bus = <&smi1>;
40 #address-cells = <1>;
41 #size-cells = <0>;
42
43 mdio@2 {
44 reg = <2>;
45 #address-cells = <1>;
46 #size-cells = <0>;
47
48 phy11: ethernet-phy@1 {
49 reg = <1>;
50 compatible = "marvell,88e1149r";
51 marvell,reg-init = <3 0x10 0 0x5777>,
52 <3 0x11 0 0x00aa>,
53 <3 0x12 0 0x4105>,
54 <3 0x13 0 0x0a60>;
55 interrupt-parent = <&gpio>;
56 interrupts = <10 8>; /* Pin 10, active low */
57 };
58 phy12: ethernet-phy@2 {
59 reg = <2>;
60 compatible = "marvell,88e1149r";
61 marvell,reg-init = <3 0x10 0 0x5777>,
62 <3 0x11 0 0x00aa>,
63 <3 0x12 0 0x4105>,
64 <3 0x13 0 0x0a60>;
65 interrupt-parent = <&gpio>;
66 interrupts = <10 8>; /* Pin 10, active low */
67 };
68 phy13: ethernet-phy@3 {
69 reg = <3>;
70 compatible = "marvell,88e1149r";
71 marvell,reg-init = <3 0x10 0 0x5777>,
72 <3 0x11 0 0x00aa>,
73 <3 0x12 0 0x4105>,
74 <3 0x13 0 0x0a60>;
75 interrupt-parent = <&gpio>;
76 interrupts = <10 8>; /* Pin 10, active low */
77 };
78 phy14: ethernet-phy@4 {
79 reg = <4>;
80 compatible = "marvell,88e1149r";
81 marvell,reg-init = <3 0x10 0 0x5777>,
82 <3 0x11 0 0x00aa>,
83 <3 0x12 0 0x4105>,
84 <3 0x13 0 0x0a60>;
85 interrupt-parent = <&gpio>;
86 interrupts = <10 8>; /* Pin 10, active low */
87 };
88 };
89
90 mdio@3 {
91 reg = <3>;
92 #address-cells = <1>;
93 #size-cells = <0>;
94
95 phy21: ethernet-phy@1 {
96 reg = <1>;
97 compatible = "marvell,88e1149r";
98 marvell,reg-init = <3 0x10 0 0x5777>,
99 <3 0x11 0 0x00aa>,
100 <3 0x12 0 0x4105>,
101 <3 0x13 0 0x0a60>;
102 interrupt-parent = <&gpio>;
103 interrupts = <12 8>; /* Pin 12, active low */
104 };
105 phy22: ethernet-phy@2 {
106 reg = <2>;
107 compatible = "marvell,88e1149r";
108 marvell,reg-init = <3 0x10 0 0x5777>,
109 <3 0x11 0 0x00aa>,
110 <3 0x12 0 0x4105>,
111 <3 0x13 0 0x0a60>;
112 interrupt-parent = <&gpio>;
113 interrupts = <12 8>; /* Pin 12, active low */
114 };
115 phy23: ethernet-phy@3 {
116 reg = <3>;
117 compatible = "marvell,88e1149r";
118 marvell,reg-init = <3 0x10 0 0x5777>,
119 <3 0x11 0 0x00aa>,
120 <3 0x12 0 0x4105>,
121 <3 0x13 0 0x0a60>;
122 interrupt-parent = <&gpio>;
123 interrupts = <12 8>; /* Pin 12, active low */
124 };
125 phy24: ethernet-phy@4 {
126 reg = <4>;
127 compatible = "marvell,88e1149r";
128 marvell,reg-init = <3 0x10 0 0x5777>,
129 <3 0x11 0 0x00aa>,
130 <3 0x12 0 0x4105>,
131 <3 0x13 0 0x0a60>;
132 interrupt-parent = <&gpio>;
133 interrupts = <12 8>; /* Pin 12, active low */
134 };
135 };
136 };
diff --git a/Documentation/networking/batman-adv.txt b/Documentation/networking/batman-adv.txt
index 221ad0cdf11f..220a58c2fb11 100644
--- a/Documentation/networking/batman-adv.txt
+++ b/Documentation/networking/batman-adv.txt
@@ -67,18 +67,18 @@ To deactivate an interface you have to write "none" into its
67All mesh wide settings can be found in batman's own interface 67All mesh wide settings can be found in batman's own interface
68folder: 68folder:
69 69
70# ls /sys/class/net/bat0/mesh/ 70# ls /sys/class/net/bat0/mesh/
71# aggregated_ogms fragmentation gw_sel_class vis_mode 71# aggregated_ogms fragmentation hop_penalty
72# ap_isolation gw_bandwidth hop_penalty 72# ap_isolation gw_bandwidth log_level
73# bonding gw_mode orig_interval 73# bonding gw_mode orig_interval
74# bridge_loop_avoidance gw_sel_class vis_mode
74 75
75 76
76There is a special folder for debugging information: 77There is a special folder for debugging information:
77 78
78# ls /sys/kernel/debug/batman_adv/bat0/ 79# ls /sys/kernel/debug/batman_adv/bat0/
79# gateways socket transtable_global vis_data 80# bla_claim_table log socket transtable_local
80# originators softif_neigh transtable_local 81# gateways originators transtable_global vis_data
81
82 82
83Some of the files contain all sort of status information regard- 83Some of the files contain all sort of status information regard-
84ing the mesh network. For example, you can view the table of 84ing the mesh network. For example, you can view the table of
@@ -202,12 +202,13 @@ abled during run time. Following log_levels are defined:
2021 - Enable messages related to routing / flooding / broadcasting 2021 - Enable messages related to routing / flooding / broadcasting
2032 - Enable messages related to route added / changed / deleted 2032 - Enable messages related to route added / changed / deleted
2044 - Enable messages related to translation table operations 2044 - Enable messages related to translation table operations
2057 - Enable all messages 2058 - Enable messages related to bridge loop avoidance
20615 - enable all messages
206 207
207The debug output can be changed at runtime using the file 208The debug output can be changed at runtime using the file
208/sys/class/net/bat0/mesh/log_level. e.g. 209/sys/class/net/bat0/mesh/log_level. e.g.
209 210
210# echo 2 > /sys/class/net/bat0/mesh/log_level 211# echo 6 > /sys/class/net/bat0/mesh/log_level
211 212
212will enable debug messages for when routes change. 213will enable debug messages for when routes change.
213 214
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 1619a8c80873..90b0c4fd275b 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -190,6 +190,20 @@ tcp_cookie_size - INTEGER
190tcp_dsack - BOOLEAN 190tcp_dsack - BOOLEAN
191 Allows TCP to send "duplicate" SACKs. 191 Allows TCP to send "duplicate" SACKs.
192 192
193tcp_early_retrans - INTEGER
194 Enable Early Retransmit (ER), per RFC 5827. ER lowers the threshold
195 for triggering fast retransmit when the amount of outstanding data is
196 small and when no previously unsent data can be transmitted (such
197 that limited transmit could be used).
198 Possible values:
199 0 disables ER
200 1 enables ER
201 2 enables ER but delays fast recovery and fast retransmit
202 by a fourth of RTT. This mitigates connection falsely
203 recovers when network has a small degree of reordering
204 (less than 3 packets).
205 Default: 2
206
193tcp_ecn - INTEGER 207tcp_ecn - INTEGER
194 Enable Explicit Congestion Notification (ECN) in TCP. ECN is only 208 Enable Explicit Congestion Notification (ECN) in TCP. ECN is only
195 used when both ends of the TCP flow support it. It is useful to 209 used when both ends of the TCP flow support it. It is useful to
@@ -1484,11 +1498,8 @@ addr_scope_policy - INTEGER
1484 1498
1485 1499
1486/proc/sys/net/core/* 1500/proc/sys/net/core/*
1487dev_weight - INTEGER 1501 Please see: Documentation/sysctl/net.txt for descriptions of these entries.
1488 The maximum number of packets that kernel can handle on a NAPI
1489 interrupt, it's a Per-CPU variable.
1490 1502
1491 Default: 64
1492 1503
1493/proc/sys/net/unix/* 1504/proc/sys/net/unix/*
1494max_dgram_qlen - INTEGER 1505max_dgram_qlen - INTEGER
diff --git a/Documentation/networking/mac80211-auth-assoc-deauth.txt b/Documentation/networking/mac80211-auth-assoc-deauth.txt
index e0a2aa585ca3..d7a15fe91bf7 100644
--- a/Documentation/networking/mac80211-auth-assoc-deauth.txt
+++ b/Documentation/networking/mac80211-auth-assoc-deauth.txt
@@ -23,7 +23,7 @@ BA session stop & deauth/disassoc frames
23end note 23end note
24end 24end
25 25
26mac80211->driver: config(channel, non-HT) 26mac80211->driver: config(channel, channel type)
27mac80211->driver: bss_info_changed(set BSSID, basic rate bitmap) 27mac80211->driver: bss_info_changed(set BSSID, basic rate bitmap)
28mac80211->driver: sta_state(AP, exists) 28mac80211->driver: sta_state(AP, exists)
29 29
@@ -51,7 +51,7 @@ note over mac80211,driver: cleanup like for authenticate
51end 51end
52 52
53alt not previously authenticated (FT) 53alt not previously authenticated (FT)
54mac80211->driver: config(channel, non-HT) 54mac80211->driver: config(channel, channel type)
55mac80211->driver: bss_info_changed(set BSSID, basic rate bitmap) 55mac80211->driver: bss_info_changed(set BSSID, basic rate bitmap)
56mac80211->driver: sta_state(AP, exists) 56mac80211->driver: sta_state(AP, exists)
57mac80211->driver: sta_state(AP, authenticated) 57mac80211->driver: sta_state(AP, authenticated)
@@ -67,10 +67,6 @@ end
67 67
68mac80211->driver: set up QoS parameters 68mac80211->driver: set up QoS parameters
69 69
70alt is HT channel
71mac80211->driver: config(channel, HT params)
72end
73
74mac80211->driver: bss_info_changed(QoS, HT, associated with AID) 70mac80211->driver: bss_info_changed(QoS, HT, associated with AID)
75mac80211->userspace: associated 71mac80211->userspace: associated
76 72
@@ -95,5 +91,5 @@ mac80211->driver: sta_state(AP,exists)
95mac80211->driver: sta_state(AP,not-exists) 91mac80211->driver: sta_state(AP,not-exists)
96mac80211->driver: turn off powersave 92mac80211->driver: turn off powersave
97mac80211->driver: bss_info_changed(clear BSSID, not associated, no QoS, ...) 93mac80211->driver: bss_info_changed(clear BSSID, not associated, no QoS, ...)
98mac80211->driver: config(non-HT channel type) 94mac80211->driver: config(channel type to non-HT)
99mac80211->userspace: disconnected 95mac80211->userspace: disconnected
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index d0aeeadd264b..ab1e8d7004c5 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -111,11 +111,12 @@ and detailed below as well:
111 int phy_addr; 111 int phy_addr;
112 int interface; 112 int interface;
113 struct stmmac_mdio_bus_data *mdio_bus_data; 113 struct stmmac_mdio_bus_data *mdio_bus_data;
114 int pbl; 114 struct stmmac_dma_cfg *dma_cfg;
115 int clk_csr; 115 int clk_csr;
116 int has_gmac; 116 int has_gmac;
117 int enh_desc; 117 int enh_desc;
118 int tx_coe; 118 int tx_coe;
119 int rx_coe;
119 int bugged_jumbo; 120 int bugged_jumbo;
120 int pmt; 121 int pmt;
121 int force_sf_dma_mode; 122 int force_sf_dma_mode;
@@ -136,10 +137,12 @@ Where:
136 o pbl: the Programmable Burst Length is maximum number of beats to 137 o pbl: the Programmable Burst Length is maximum number of beats to
137 be transferred in one DMA transaction. 138 be transferred in one DMA transaction.
138 GMAC also enables the 4xPBL by default. 139 GMAC also enables the 4xPBL by default.
139 o clk_csr: CSR Clock range selection. 140 o clk_csr: fixed CSR Clock range selection.
140 o has_gmac: uses the GMAC core. 141 o has_gmac: uses the GMAC core.
141 o enh_desc: if sets the MAC will use the enhanced descriptor structure. 142 o enh_desc: if sets the MAC will use the enhanced descriptor structure.
142 o tx_coe: core is able to perform the tx csum in HW. 143 o tx_coe: core is able to perform the tx csum in HW.
144 o rx_coe: the supports three check sum offloading engine types:
145 type_1, type_2 (full csum) and no RX coe.
143 o bugged_jumbo: some HWs are not able to perform the csum in HW for 146 o bugged_jumbo: some HWs are not able to perform the csum in HW for
144 over-sized frames due to limited buffer sizes. 147 over-sized frames due to limited buffer sizes.
145 Setting this flag the csum will be done in SW on 148 Setting this flag the csum will be done in SW on
@@ -160,7 +163,7 @@ Where:
160 o custom_cfg: this is a custom configuration that can be passed while 163 o custom_cfg: this is a custom configuration that can be passed while
161 initialising the resources. 164 initialising the resources.
162 165
163The we have: 166For MDIO bus The we have:
164 167
165 struct stmmac_mdio_bus_data { 168 struct stmmac_mdio_bus_data {
166 int bus_id; 169 int bus_id;
@@ -177,10 +180,28 @@ Where:
177 o irqs: list of IRQs, one per PHY. 180 o irqs: list of IRQs, one per PHY.
178 o probed_phy_irq: if irqs is NULL, use this for probed PHY. 181 o probed_phy_irq: if irqs is NULL, use this for probed PHY.
179 182
183
184For DMA engine we have the following internal fields that should be
185tuned according to the HW capabilities.
186
187struct stmmac_dma_cfg {
188 int pbl;
189 int fixed_burst;
190 int burst_len_supported;
191};
192
193Where:
194 o pbl: Programmable Burst Length
195 o fixed_burst: program the DMA to use the fixed burst mode
196 o burst_len: this is the value we put in the register
197 supported values are provided as macros in
198 linux/stmmac.h header file.
199
200---
201
180Below an example how the structures above are using on ST platforms. 202Below an example how the structures above are using on ST platforms.
181 203
182 static struct plat_stmmacenet_data stxYYY_ethernet_platform_data = { 204 static struct plat_stmmacenet_data stxYYY_ethernet_platform_data = {
183 .pbl = 32,
184 .has_gmac = 0, 205 .has_gmac = 0,
185 .enh_desc = 0, 206 .enh_desc = 0,
186 .fix_mac_speed = stxYYY_ethernet_fix_mac_speed, 207 .fix_mac_speed = stxYYY_ethernet_fix_mac_speed,
diff --git a/Documentation/nfc/nfc-hci.txt b/Documentation/nfc/nfc-hci.txt
new file mode 100644
index 000000000000..216b7254fcc3
--- /dev/null
+++ b/Documentation/nfc/nfc-hci.txt
@@ -0,0 +1,155 @@
1HCI backend for NFC Core
2
3Author: Eric Lapuyade, Samuel Ortiz
4Contact: eric.lapuyade@intel.com, samuel.ortiz@intel.com
5
6General
7-------
8
9The HCI layer implements much of the ETSI TS 102 622 V10.2.0 specification. It
10enables easy writing of HCI-based NFC drivers. The HCI layer runs as an NFC Core
11backend, implementing an abstract nfc device and translating NFC Core API
12to HCI commands and events.
13
14HCI
15---
16
17HCI registers as an nfc device with NFC Core. Requests coming from userspace are
18routed through netlink sockets to NFC Core and then to HCI. From this point,
19they are translated in a sequence of HCI commands sent to the HCI layer in the
20host controller (the chip). The sending context blocks while waiting for the
21response to arrive.
22HCI events can also be received from the host controller. They will be handled
23and a translation will be forwarded to NFC Core as needed.
24HCI uses 2 execution contexts:
25- one if for executing commands : nfc_hci_msg_tx_work(). Only one command
26can be executing at any given moment.
27- one if for dispatching received events and responses : nfc_hci_msg_rx_work()
28
29HCI Session initialization:
30---------------------------
31
32The Session initialization is an HCI standard which must unfortunately
33support proprietary gates. This is the reason why the driver will pass a list
34of proprietary gates that must be part of the session. HCI will ensure all
35those gates have pipes connected when the hci device is set up.
36
37HCI Gates and Pipes
38-------------------
39
40A gate defines the 'port' where some service can be found. In order to access
41a service, one must create a pipe to that gate and open it. In this
42implementation, pipes are totally hidden. The public API only knows gates.
43This is consistent with the driver need to send commands to proprietary gates
44without knowing the pipe connected to it.
45
46Driver interface
47----------------
48
49A driver would normally register itself with HCI and provide the following
50entry points:
51
52struct nfc_hci_ops {
53 int (*open)(struct nfc_hci_dev *hdev);
54 void (*close)(struct nfc_hci_dev *hdev);
55 int (*xmit)(struct nfc_hci_dev *hdev, struct sk_buff *skb);
56 int (*start_poll)(struct nfc_hci_dev *hdev, u32 protocols);
57 int (*target_from_gate)(struct nfc_hci_dev *hdev, u8 gate,
58 struct nfc_target *target);
59};
60
61open() and close() shall turn the hardware on and off. xmit() shall simply
62write a frame to the chip. start_poll() is an optional entrypoint that shall
63set the hardware in polling mode. This must be implemented only if the hardware
64uses proprietary gates or a mechanism slightly different from the HCI standard.
65target_from_gate() is another optional entrypoint to return the protocols
66corresponding to a proprietary gate.
67
68On the rx path, the driver is responsible to push incoming HCP frames to HCI
69using nfc_hci_recv_frame(). HCI will take care of re-aggregation and handling
70This must be done from a context that can sleep.
71
72SHDLC
73-----
74
75Most chips use shdlc to ensure integrity and delivery ordering of the HCP
76frames between the host controller (the chip) and hosts (entities connected
77to the chip, like the cpu). In order to simplify writing the driver, an shdlc
78layer is available for use by the driver.
79When used, the driver actually registers with shdlc, and shdlc will register
80with HCI. HCI sees shdlc as the driver and thus send its HCP frames
81through shdlc->xmit.
82SHDLC adds a new execution context (nfc_shdlc_sm_work()) to run its state
83machine and handle both its rx and tx path.
84
85Included Drivers
86----------------
87
88An HCI based driver for an NXP PN544, connected through I2C bus, and using
89shdlc is included.
90
91Execution Contexts
92------------------
93
94The execution contexts are the following:
95- IRQ handler (IRQH):
96fast, cannot sleep. stores incoming frames into an shdlc rx queue
97
98- SHDLC State Machine worker (SMW)
99handles shdlc rx & tx queues. Dispatches HCI cmd responses.
100
101- HCI Tx Cmd worker (MSGTXWQ)
102Serialize execution of HCI commands. Complete execution in case of resp timeout.
103
104- HCI Rx worker (MSGRXWQ)
105Dispatches incoming HCI commands or events.
106
107- Syscall context from a userspace call (SYSCALL)
108Any entrypoint in HCI called from NFC Core
109
110Workflow executing an HCI command (using shdlc)
111-----------------------------------------------
112
113Executing an HCI command can easily be performed synchronously using the
114following API:
115
116int nfc_hci_send_cmd (struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
117 const u8 *param, size_t param_len, struct sk_buff **skb)
118
119The API must be invoked from a context that can sleep. Most of the time, this
120will be the syscall context. skb will return the result that was received in
121the response.
122
123Internally, execution is asynchronous. So all this API does is to enqueue the
124HCI command, setup a local wait queue on stack, and wait_event() for completion.
125The wait is not interruptible because it is guaranteed that the command will
126complete after some short timeout anyway.
127
128MSGTXWQ context will then be scheduled and invoke nfc_hci_msg_tx_work().
129This function will dequeue the next pending command and send its HCP fragments
130to the lower layer which happens to be shdlc. It will then start a timer to be
131able to complete the command with a timeout error if no response arrive.
132
133SMW context gets scheduled and invokes nfc_shdlc_sm_work(). This function
134handles shdlc framing in and out. It uses the driver xmit to send frames and
135receives incoming frames in an skb queue filled from the driver IRQ handler.
136SHDLC I(nformation) frames payload are HCP fragments. They are agregated to
137form complete HCI frames, which can be a response, command, or event.
138
139HCI Responses are dispatched immediately from this context to unblock
140waiting command execution. Reponse processing involves invoking the completion
141callback that was provided by nfc_hci_msg_tx_work() when it sent the command.
142The completion callback will then wake the syscall context.
143
144Workflow receiving an HCI event or command
145------------------------------------------
146
147HCI commands or events are not dispatched from SMW context. Instead, they are
148queued to HCI rx_queue and will be dispatched from HCI rx worker
149context (MSGRXWQ). This is done this way to allow a cmd or event handler
150to also execute other commands (for example, handling the
151NFC_HCI_EVT_TARGET_DISCOVERED event from PN544 requires to issue an
152ANY_GET_PARAMETER to the reader A gate to get information on the target
153that was discovered).
154
155Typically, such an event will be propagated to NFC Core from MSGRXWQ context.
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index 3201a7097e4d..98335b7a5337 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -43,6 +43,13 @@ Values :
43 1 - enable the JIT 43 1 - enable the JIT
44 2 - enable the JIT and ask the compiler to emit traces on kernel log. 44 2 - enable the JIT and ask the compiler to emit traces on kernel log.
45 45
46dev_weight
47--------------
48
49The maximum number of packets that kernel can handle on a NAPI interrupt,
50it's a Per-CPU variable.
51Default: 64
52
46rmem_default 53rmem_default
47------------ 54------------
48 55
diff --git a/MAINTAINERS b/MAINTAINERS
index 57bc6770f37a..887c965c2711 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1431,6 +1431,7 @@ F: include/linux/backlight.h
1431BATMAN ADVANCED 1431BATMAN ADVANCED
1432M: Marek Lindner <lindner_marek@yahoo.de> 1432M: Marek Lindner <lindner_marek@yahoo.de>
1433M: Simon Wunderlich <siwu@hrz.tu-chemnitz.de> 1433M: Simon Wunderlich <siwu@hrz.tu-chemnitz.de>
1434M: Antonio Quartulli <ordex@autistici.org>
1434L: b.a.t.m.a.n@lists.open-mesh.org 1435L: b.a.t.m.a.n@lists.open-mesh.org
1435W: http://www.open-mesh.org/ 1436W: http://www.open-mesh.org/
1436S: Maintained 1437S: Maintained
@@ -3519,12 +3520,6 @@ M: Deepak Saxena <dsaxena@plexity.net>
3519S: Maintained 3520S: Maintained
3520F: drivers/char/hw_random/ixp4xx-rng.c 3521F: drivers/char/hw_random/ixp4xx-rng.c
3521 3522
3522INTEL IXP2000 ETHERNET DRIVER
3523M: Lennert Buytenhek <kernel@wantstofly.org>
3524L: netdev@vger.kernel.org
3525S: Maintained
3526F: drivers/net/ethernet/xscale/ixp2000/
3527
3528INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf) 3523INTEL ETHERNET DRIVERS (e100/e1000/e1000e/igb/igbvf/ixgb/ixgbe/ixgbevf)
3529M: Jeff Kirsher <jeffrey.t.kirsher@intel.com> 3524M: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
3530M: Jesse Brandeburg <jesse.brandeburg@intel.com> 3525M: Jesse Brandeburg <jesse.brandeburg@intel.com>
@@ -5208,7 +5203,7 @@ S: Maintained
5208F: include/linux/personality.h 5203F: include/linux/personality.h
5209 5204
5210PHONET PROTOCOL 5205PHONET PROTOCOL
5211M: Remi Denis-Courmont <remi.denis-courmont@nokia.com> 5206M: Remi Denis-Courmont <courmisch@gmail.com>
5212S: Supported 5207S: Supported
5213F: Documentation/networking/phonet.txt 5208F: Documentation/networking/phonet.txt
5214F: include/linux/phonet.h 5209F: include/linux/phonet.h
@@ -6676,6 +6671,16 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers)
6676S: Maintained 6671S: Maintained
6677F: sound/soc/codecs/twl4030* 6672F: sound/soc/codecs/twl4030*
6678 6673
6674TI WILINK WIRELESS DRIVERS
6675M: Luciano Coelho <coelho@ti.com>
6676L: linux-wireless@vger.kernel.org
6677W: http://wireless.kernel.org/en/users/Drivers/wl12xx
6678W: http://wireless.kernel.org/en/users/Drivers/wl1251
6679T: git git://git.kernel.org/pub/scm/linux/kernel/git/luca/wl12xx.git
6680S: Maintained
6681F: drivers/net/wireless/ti/
6682F: include/linux/wl12xx.h
6683
6679TIPC NETWORK LAYER 6684TIPC NETWORK LAYER
6680M: Jon Maloy <jon.maloy@ericsson.com> 6685M: Jon Maloy <jon.maloy@ericsson.com>
6681M: Allan Stephens <allan.stephens@windriver.com> 6686M: Allan Stephens <allan.stephens@windriver.com>
@@ -7432,23 +7437,6 @@ M: Miloslav Trmac <mitr@volny.cz>
7432S: Maintained 7437S: Maintained
7433F: drivers/input/misc/wistron_btns.c 7438F: drivers/input/misc/wistron_btns.c
7434 7439
7435WL1251 WIRELESS DRIVER
7436M: Luciano Coelho <coelho@ti.com>
7437L: linux-wireless@vger.kernel.org
7438W: http://wireless.kernel.org/en/users/Drivers/wl1251
7439T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-testing.git
7440S: Maintained
7441F: drivers/net/wireless/wl1251/*
7442
7443WL1271 WIRELESS DRIVER
7444M: Luciano Coelho <coelho@ti.com>
7445L: linux-wireless@vger.kernel.org
7446W: http://wireless.kernel.org/en/users/Drivers/wl12xx
7447T: git git://git.kernel.org/pub/scm/linux/kernel/git/luca/wl12xx.git
7448S: Maintained
7449F: drivers/net/wireless/wl12xx/
7450F: include/linux/wl12xx.h
7451
7452WL3501 WIRELESS PCMCIA CARD DRIVER 7440WL3501 WIRELESS PCMCIA CARD DRIVER
7453M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 7441M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
7454L: linux-wireless@vger.kernel.org 7442L: linux-wireless@vger.kernel.org
diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c
index 364c19357e60..89106792d067 100644
--- a/arch/arm/mach-at91/at91rm9200.c
+++ b/arch/arm/mach-at91/at91rm9200.c
@@ -26,15 +26,6 @@
26#include "clock.h" 26#include "clock.h"
27#include "sam9_smc.h" 27#include "sam9_smc.h"
28 28
29static struct map_desc at91rm9200_io_desc[] __initdata = {
30 {
31 .virtual = AT91_VA_BASE_EMAC,
32 .pfn = __phys_to_pfn(AT91RM9200_BASE_EMAC),
33 .length = SZ_16K,
34 .type = MT_DEVICE,
35 },
36};
37
38/* -------------------------------------------------------------------- 29/* --------------------------------------------------------------------
39 * Clocks 30 * Clocks
40 * -------------------------------------------------------------------- */ 31 * -------------------------------------------------------------------- */
@@ -315,7 +306,6 @@ static void __init at91rm9200_map_io(void)
315{ 306{
316 /* Map peripherals */ 307 /* Map peripherals */
317 at91_init_sram(0, AT91RM9200_SRAM_BASE, AT91RM9200_SRAM_SIZE); 308 at91_init_sram(0, AT91RM9200_SRAM_BASE, AT91RM9200_SRAM_SIZE);
318 iotable_init(at91rm9200_io_desc, ARRAY_SIZE(at91rm9200_io_desc));
319} 309}
320 310
321static void __init at91rm9200_ioremap_registers(void) 311static void __init at91rm9200_ioremap_registers(void)
diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c
index 05774e5b1cba..60c472861e5e 100644
--- a/arch/arm/mach-at91/at91rm9200_devices.c
+++ b/arch/arm/mach-at91/at91rm9200_devices.c
@@ -140,8 +140,8 @@ static struct macb_platform_data eth_data;
140 140
141static struct resource eth_resources[] = { 141static struct resource eth_resources[] = {
142 [0] = { 142 [0] = {
143 .start = AT91_VA_BASE_EMAC, 143 .start = AT91RM9200_BASE_EMAC,
144 .end = AT91_VA_BASE_EMAC + SZ_16K - 1, 144 .end = AT91RM9200_BASE_EMAC + SZ_16K - 1,
145 .flags = IORESOURCE_MEM, 145 .flags = IORESOURCE_MEM,
146 }, 146 },
147 [1] = { 147 [1] = {
diff --git a/arch/arm/mach-at91/include/mach/hardware.h b/arch/arm/mach-at91/include/mach/hardware.h
index e9e29a6c3868..01db372be8e5 100644
--- a/arch/arm/mach-at91/include/mach/hardware.h
+++ b/arch/arm/mach-at91/include/mach/hardware.h
@@ -94,7 +94,6 @@
94 * Virtual to Physical Address mapping for IO devices. 94 * Virtual to Physical Address mapping for IO devices.
95 */ 95 */
96#define AT91_VA_BASE_SYS AT91_IO_P2V(AT91_BASE_SYS) 96#define AT91_VA_BASE_SYS AT91_IO_P2V(AT91_BASE_SYS)
97#define AT91_VA_BASE_EMAC AT91_IO_P2V(AT91RM9200_BASE_EMAC)
98 97
99 /* Internal SRAM is mapped below the IO devices */ 98 /* Internal SRAM is mapped below the IO devices */
100#define AT91_SRAM_MAX SZ_1M 99#define AT91_SRAM_MAX SZ_1M
diff --git a/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h b/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h
index 292d55ed2113..cf03614d250d 100644
--- a/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h
+++ b/arch/arm/mach-ixp4xx/include/mach/ixp46x_ts.h
@@ -75,4 +75,7 @@ struct ixp46x_ts_regs {
75#define TX_SNAPSHOT_LOCKED (1<<0) 75#define TX_SNAPSHOT_LOCKED (1<<0)
76#define RX_SNAPSHOT_LOCKED (1<<1) 76#define RX_SNAPSHOT_LOCKED (1<<1)
77 77
78/* The ptp_ixp46x module will set this variable */
79extern int ixp46x_phc_index;
80
78#endif 81#endif
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 6c0683d3fcba..5f6acce45a0d 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -30,6 +30,7 @@ config SPARC
30 select USE_GENERIC_SMP_HELPERS if SMP 30 select USE_GENERIC_SMP_HELPERS if SMP
31 select GENERIC_PCI_IOMAP 31 select GENERIC_PCI_IOMAP
32 select HAVE_NMI_WATCHDOG if SPARC64 32 select HAVE_NMI_WATCHDOG if SPARC64
33 select HAVE_BPF_JIT
33 34
34config SPARC32 35config SPARC32
35 def_bool !64BIT 36 def_bool !64BIT
diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
index eddcfb36aafb..0e5de13b56c5 100644
--- a/arch/sparc/Makefile
+++ b/arch/sparc/Makefile
@@ -66,6 +66,7 @@ head-y += arch/sparc/kernel/init_task.o
66 66
67core-y += arch/sparc/kernel/ 67core-y += arch/sparc/kernel/
68core-y += arch/sparc/mm/ arch/sparc/math-emu/ 68core-y += arch/sparc/mm/ arch/sparc/math-emu/
69core-y += arch/sparc/net/
69 70
70libs-y += arch/sparc/prom/ 71libs-y += arch/sparc/prom/
71libs-y += arch/sparc/lib/ 72libs-y += arch/sparc/lib/
diff --git a/arch/sparc/net/Makefile b/arch/sparc/net/Makefile
new file mode 100644
index 000000000000..1306a58ac541
--- /dev/null
+++ b/arch/sparc/net/Makefile
@@ -0,0 +1,4 @@
1#
2# Arch-specific network modules
3#
4obj-$(CONFIG_BPF_JIT) += bpf_jit_asm.o bpf_jit_comp.o
diff --git a/arch/sparc/net/bpf_jit.h b/arch/sparc/net/bpf_jit.h
new file mode 100644
index 000000000000..33d6b375ff12
--- /dev/null
+++ b/arch/sparc/net/bpf_jit.h
@@ -0,0 +1,68 @@
1#ifndef _BPF_JIT_H
2#define _BPF_JIT_H
3
4/* Conventions:
5 * %g1 : temporary
6 * %g2 : Secondary temporary used by SKB data helper stubs.
7 * %g3 : packet offset passed into SKB data helper stubs.
8 * %o0 : pointer to skb (first argument given to JIT function)
9 * %o1 : BPF A accumulator
10 * %o2 : BPF X accumulator
11 * %o3 : Holds saved %o7 so we can call helper functions without needing
12 * to allocate a register window.
13 * %o4 : skb->len - skb->data_len
14 * %o5 : skb->data
15 */
16
17#ifndef __ASSEMBLER__
18#define G0 0x00
19#define G1 0x01
20#define G3 0x03
21#define G6 0x06
22#define O0 0x08
23#define O1 0x09
24#define O2 0x0a
25#define O3 0x0b
26#define O4 0x0c
27#define O5 0x0d
28#define SP 0x0e
29#define O7 0x0f
30#define FP 0x1e
31
32#define r_SKB O0
33#define r_A O1
34#define r_X O2
35#define r_saved_O7 O3
36#define r_HEADLEN O4
37#define r_SKB_DATA O5
38#define r_TMP G1
39#define r_TMP2 G2
40#define r_OFF G3
41
42/* assembly code in arch/sparc/net/bpf_jit_asm.S */
43extern u32 bpf_jit_load_word[];
44extern u32 bpf_jit_load_half[];
45extern u32 bpf_jit_load_byte[];
46extern u32 bpf_jit_load_byte_msh[];
47extern u32 bpf_jit_load_word_positive_offset[];
48extern u32 bpf_jit_load_half_positive_offset[];
49extern u32 bpf_jit_load_byte_positive_offset[];
50extern u32 bpf_jit_load_byte_msh_positive_offset[];
51extern u32 bpf_jit_load_word_negative_offset[];
52extern u32 bpf_jit_load_half_negative_offset[];
53extern u32 bpf_jit_load_byte_negative_offset[];
54extern u32 bpf_jit_load_byte_msh_negative_offset[];
55
56#else
57#define r_SKB %o0
58#define r_A %o1
59#define r_X %o2
60#define r_saved_O7 %o3
61#define r_HEADLEN %o4
62#define r_SKB_DATA %o5
63#define r_TMP %g1
64#define r_TMP2 %g2
65#define r_OFF %g3
66#endif
67
68#endif /* _BPF_JIT_H */
diff --git a/arch/sparc/net/bpf_jit_asm.S b/arch/sparc/net/bpf_jit_asm.S
new file mode 100644
index 000000000000..9d016c7017f7
--- /dev/null
+++ b/arch/sparc/net/bpf_jit_asm.S
@@ -0,0 +1,205 @@
1#include <asm/ptrace.h>
2
3#include "bpf_jit.h"
4
5#ifdef CONFIG_SPARC64
6#define SAVE_SZ 176
7#define SCRATCH_OFF STACK_BIAS + 128
8#define BE_PTR(label) be,pn %xcc, label
9#else
10#define SAVE_SZ 96
11#define SCRATCH_OFF 72
12#define BE_PTR(label) be label
13#endif
14
15#define SKF_MAX_NEG_OFF (-0x200000) /* SKF_LL_OFF from filter.h */
16
17 .text
18 .globl bpf_jit_load_word
19bpf_jit_load_word:
20 cmp r_OFF, 0
21 bl bpf_slow_path_word_neg
22 nop
23 .globl bpf_jit_load_word_positive_offset
24bpf_jit_load_word_positive_offset:
25 sub r_HEADLEN, r_OFF, r_TMP
26 cmp r_TMP, 3
27 ble bpf_slow_path_word
28 add r_SKB_DATA, r_OFF, r_TMP
29 andcc r_TMP, 3, %g0
30 bne load_word_unaligned
31 nop
32 retl
33 ld [r_TMP], r_A
34load_word_unaligned:
35 ldub [r_TMP + 0x0], r_OFF
36 ldub [r_TMP + 0x1], r_TMP2
37 sll r_OFF, 8, r_OFF
38 or r_OFF, r_TMP2, r_OFF
39 ldub [r_TMP + 0x2], r_TMP2
40 sll r_OFF, 8, r_OFF
41 or r_OFF, r_TMP2, r_OFF
42 ldub [r_TMP + 0x3], r_TMP2
43 sll r_OFF, 8, r_OFF
44 retl
45 or r_OFF, r_TMP2, r_A
46
47 .globl bpf_jit_load_half
48bpf_jit_load_half:
49 cmp r_OFF, 0
50 bl bpf_slow_path_half_neg
51 nop
52 .globl bpf_jit_load_half_positive_offset
53bpf_jit_load_half_positive_offset:
54 sub r_HEADLEN, r_OFF, r_TMP
55 cmp r_TMP, 1
56 ble bpf_slow_path_half
57 add r_SKB_DATA, r_OFF, r_TMP
58 andcc r_TMP, 1, %g0
59 bne load_half_unaligned
60 nop
61 retl
62 lduh [r_TMP], r_A
63load_half_unaligned:
64 ldub [r_TMP + 0x0], r_OFF
65 ldub [r_TMP + 0x1], r_TMP2
66 sll r_OFF, 8, r_OFF
67 retl
68 or r_OFF, r_TMP2, r_A
69
70 .globl bpf_jit_load_byte
71bpf_jit_load_byte:
72 cmp r_OFF, 0
73 bl bpf_slow_path_byte_neg
74 nop
75 .globl bpf_jit_load_byte_positive_offset
76bpf_jit_load_byte_positive_offset:
77 cmp r_OFF, r_HEADLEN
78 bge bpf_slow_path_byte
79 nop
80 retl
81 ldub [r_SKB_DATA + r_OFF], r_A
82
83 .globl bpf_jit_load_byte_msh
84bpf_jit_load_byte_msh:
85 cmp r_OFF, 0
86 bl bpf_slow_path_byte_msh_neg
87 nop
88 .globl bpf_jit_load_byte_msh_positive_offset
89bpf_jit_load_byte_msh_positive_offset:
90 cmp r_OFF, r_HEADLEN
91 bge bpf_slow_path_byte_msh
92 nop
93 ldub [r_SKB_DATA + r_OFF], r_OFF
94 and r_OFF, 0xf, r_OFF
95 retl
96 sll r_OFF, 2, r_X
97
98#define bpf_slow_path_common(LEN) \
99 save %sp, -SAVE_SZ, %sp; \
100 mov %i0, %o0; \
101 mov r_OFF, %o1; \
102 add %fp, SCRATCH_OFF, %o2; \
103 call skb_copy_bits; \
104 mov (LEN), %o3; \
105 cmp %o0, 0; \
106 restore;
107
108bpf_slow_path_word:
109 bpf_slow_path_common(4)
110 bl bpf_error
111 ld [%sp + SCRATCH_OFF], r_A
112 retl
113 nop
114bpf_slow_path_half:
115 bpf_slow_path_common(2)
116 bl bpf_error
117 lduh [%sp + SCRATCH_OFF], r_A
118 retl
119 nop
120bpf_slow_path_byte:
121 bpf_slow_path_common(1)
122 bl bpf_error
123 ldub [%sp + SCRATCH_OFF], r_A
124 retl
125 nop
126bpf_slow_path_byte_msh:
127 bpf_slow_path_common(1)
128 bl bpf_error
129 ldub [%sp + SCRATCH_OFF], r_A
130 and r_OFF, 0xf, r_OFF
131 retl
132 sll r_OFF, 2, r_X
133
134#define bpf_negative_common(LEN) \
135 save %sp, -SAVE_SZ, %sp; \
136 mov %i0, %o0; \
137 mov r_OFF, %o1; \
138 call bpf_internal_load_pointer_neg_helper; \
139 mov (LEN), %o2; \
140 mov %o0, r_TMP; \
141 cmp %o0, 0; \
142 BE_PTR(bpf_error); \
143 restore;
144
145bpf_slow_path_word_neg:
146 sethi %hi(SKF_MAX_NEG_OFF), r_TMP
147 cmp r_OFF, r_TMP
148 bl bpf_error
149 nop
150 .globl bpf_jit_load_word_negative_offset
151bpf_jit_load_word_negative_offset:
152 bpf_negative_common(4)
153 andcc r_TMP, 3, %g0
154 bne load_word_unaligned
155 nop
156 retl
157 ld [r_TMP], r_A
158
159bpf_slow_path_half_neg:
160 sethi %hi(SKF_MAX_NEG_OFF), r_TMP
161 cmp r_OFF, r_TMP
162 bl bpf_error
163 nop
164 .globl bpf_jit_load_half_negative_offset
165bpf_jit_load_half_negative_offset:
166 bpf_negative_common(2)
167 andcc r_TMP, 1, %g0
168 bne load_half_unaligned
169 nop
170 retl
171 lduh [r_TMP], r_A
172
173bpf_slow_path_byte_neg:
174 sethi %hi(SKF_MAX_NEG_OFF), r_TMP
175 cmp r_OFF, r_TMP
176 bl bpf_error
177 nop
178 .globl bpf_jit_load_byte_negative_offset
179bpf_jit_load_byte_negative_offset:
180 bpf_negative_common(1)
181 retl
182 ldub [r_TMP], r_A
183
184bpf_slow_path_byte_msh_neg:
185 sethi %hi(SKF_MAX_NEG_OFF), r_TMP
186 cmp r_OFF, r_TMP
187 bl bpf_error
188 nop
189 .globl bpf_jit_load_byte_msh_negative_offset
190bpf_jit_load_byte_msh_negative_offset:
191 bpf_negative_common(1)
192 ldub [r_TMP], r_OFF
193 and r_OFF, 0xf, r_OFF
194 retl
195 sll r_OFF, 2, r_X
196
197bpf_error:
198 /* Make the JIT program return zero. The JIT epilogue
199 * stores away the original %o7 into r_saved_O7. The
200 * normal leaf function return is to use "retl" which
201 * would evalute to "jmpl %o7 + 8, %g0" but we want to
202 * use the saved value thus the sequence you see here.
203 */
204 jmpl r_saved_O7 + 8, %g0
205 clr %o0
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
new file mode 100644
index 000000000000..1a69244e785b
--- /dev/null
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -0,0 +1,802 @@
1#include <linux/moduleloader.h>
2#include <linux/workqueue.h>
3#include <linux/netdevice.h>
4#include <linux/filter.h>
5#include <linux/cache.h>
6
7#include <asm/cacheflush.h>
8#include <asm/ptrace.h>
9
10#include "bpf_jit.h"
11
12int bpf_jit_enable __read_mostly;
13
14static inline bool is_simm13(unsigned int value)
15{
16 return value + 0x1000 < 0x2000;
17}
18
19static void bpf_flush_icache(void *start_, void *end_)
20{
21#ifdef CONFIG_SPARC64
22 /* Cheetah's I-cache is fully coherent. */
23 if (tlb_type == spitfire) {
24 unsigned long start = (unsigned long) start_;
25 unsigned long end = (unsigned long) end_;
26
27 start &= ~7UL;
28 end = (end + 7UL) & ~7UL;
29 while (start < end) {
30 flushi(start);
31 start += 32;
32 }
33 }
34#endif
35}
36
37#define SEEN_DATAREF 1 /* might call external helpers */
38#define SEEN_XREG 2 /* ebx is used */
39#define SEEN_MEM 4 /* use mem[] for temporary storage */
40
41#define S13(X) ((X) & 0x1fff)
42#define IMMED 0x00002000
43#define RD(X) ((X) << 25)
44#define RS1(X) ((X) << 14)
45#define RS2(X) ((X))
46#define OP(X) ((X) << 30)
47#define OP2(X) ((X) << 22)
48#define OP3(X) ((X) << 19)
49#define COND(X) ((X) << 25)
50#define F1(X) OP(X)
51#define F2(X, Y) (OP(X) | OP2(Y))
52#define F3(X, Y) (OP(X) | OP3(Y))
53
54#define CONDN COND(0x0)
55#define CONDE COND(0x1)
56#define CONDLE COND(0x2)
57#define CONDL COND(0x3)
58#define CONDLEU COND(0x4)
59#define CONDCS COND(0x5)
60#define CONDNEG COND(0x6)
61#define CONDVC COND(0x7)
62#define CONDA COND(0x8)
63#define CONDNE COND(0x9)
64#define CONDG COND(0xa)
65#define CONDGE COND(0xb)
66#define CONDGU COND(0xc)
67#define CONDCC COND(0xd)
68#define CONDPOS COND(0xe)
69#define CONDVS COND(0xf)
70
71#define CONDGEU CONDCC
72#define CONDLU CONDCS
73
74#define WDISP22(X) (((X) >> 2) & 0x3fffff)
75
76#define BA (F2(0, 2) | CONDA)
77#define BGU (F2(0, 2) | CONDGU)
78#define BLEU (F2(0, 2) | CONDLEU)
79#define BGEU (F2(0, 2) | CONDGEU)
80#define BLU (F2(0, 2) | CONDLU)
81#define BE (F2(0, 2) | CONDE)
82#define BNE (F2(0, 2) | CONDNE)
83
84#ifdef CONFIG_SPARC64
85#define BNE_PTR (F2(0, 1) | CONDNE | (2 << 20))
86#else
87#define BNE_PTR BNE
88#endif
89
90#define SETHI(K, REG) \
91 (F2(0, 0x4) | RD(REG) | (((K) >> 10) & 0x3fffff))
92#define OR_LO(K, REG) \
93 (F3(2, 0x02) | IMMED | RS1(REG) | ((K) & 0x3ff) | RD(REG))
94
95#define ADD F3(2, 0x00)
96#define AND F3(2, 0x01)
97#define ANDCC F3(2, 0x11)
98#define OR F3(2, 0x02)
99#define SUB F3(2, 0x04)
100#define SUBCC F3(2, 0x14)
101#define MUL F3(2, 0x0a) /* umul */
102#define DIV F3(2, 0x0e) /* udiv */
103#define SLL F3(2, 0x25)
104#define SRL F3(2, 0x26)
105#define JMPL F3(2, 0x38)
106#define CALL F1(1)
107#define BR F2(0, 0x01)
108#define RD_Y F3(2, 0x28)
109#define WR_Y F3(2, 0x30)
110
111#define LD32 F3(3, 0x00)
112#define LD8 F3(3, 0x01)
113#define LD16 F3(3, 0x02)
114#define LD64 F3(3, 0x0b)
115#define ST32 F3(3, 0x04)
116
117#ifdef CONFIG_SPARC64
118#define LDPTR LD64
119#define BASE_STACKFRAME 176
120#else
121#define LDPTR LD32
122#define BASE_STACKFRAME 96
123#endif
124
125#define LD32I (LD32 | IMMED)
126#define LD8I (LD8 | IMMED)
127#define LD16I (LD16 | IMMED)
128#define LD64I (LD64 | IMMED)
129#define LDPTRI (LDPTR | IMMED)
130#define ST32I (ST32 | IMMED)
131
132#define emit_nop() \
133do { \
134 *prog++ = SETHI(0, G0); \
135} while (0)
136
137#define emit_neg() \
138do { /* sub %g0, r_A, r_A */ \
139 *prog++ = SUB | RS1(G0) | RS2(r_A) | RD(r_A); \
140} while (0)
141
142#define emit_reg_move(FROM, TO) \
143do { /* or %g0, FROM, TO */ \
144 *prog++ = OR | RS1(G0) | RS2(FROM) | RD(TO); \
145} while (0)
146
147#define emit_clear(REG) \
148do { /* or %g0, %g0, REG */ \
149 *prog++ = OR | RS1(G0) | RS2(G0) | RD(REG); \
150} while (0)
151
152#define emit_set_const(K, REG) \
153do { /* sethi %hi(K), REG */ \
154 *prog++ = SETHI(K, REG); \
155 /* or REG, %lo(K), REG */ \
156 *prog++ = OR_LO(K, REG); \
157} while (0)
158
159 /* Emit
160 *
161 * OP r_A, r_X, r_A
162 */
163#define emit_alu_X(OPCODE) \
164do { \
165 seen |= SEEN_XREG; \
166 *prog++ = OPCODE | RS1(r_A) | RS2(r_X) | RD(r_A); \
167} while (0)
168
169 /* Emit either:
170 *
171 * OP r_A, K, r_A
172 *
173 * or
174 *
175 * sethi %hi(K), r_TMP
176 * or r_TMP, %lo(K), r_TMP
177 * OP r_A, r_TMP, r_A
178 *
179 * depending upon whether K fits in a signed 13-bit
180 * immediate instruction field. Emit nothing if K
181 * is zero.
182 */
183#define emit_alu_K(OPCODE, K) \
184do { \
185 if (K) { \
186 unsigned int _insn = OPCODE; \
187 _insn |= RS1(r_A) | RD(r_A); \
188 if (is_simm13(K)) { \
189 *prog++ = _insn | IMMED | S13(K); \
190 } else { \
191 emit_set_const(K, r_TMP); \
192 *prog++ = _insn | RS2(r_TMP); \
193 } \
194 } \
195} while (0)
196
197#define emit_loadimm(K, DEST) \
198do { \
199 if (is_simm13(K)) { \
200 /* or %g0, K, DEST */ \
201 *prog++ = OR | IMMED | RS1(G0) | S13(K) | RD(DEST); \
202 } else { \
203 emit_set_const(K, DEST); \
204 } \
205} while (0)
206
207#define emit_loadptr(BASE, STRUCT, FIELD, DEST) \
208do { unsigned int _off = offsetof(STRUCT, FIELD); \
209 BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(void *)); \
210 *prog++ = LDPTRI | RS1(BASE) | S13(_off) | RD(DEST); \
211} while (0)
212
213#define emit_load32(BASE, STRUCT, FIELD, DEST) \
214do { unsigned int _off = offsetof(STRUCT, FIELD); \
215 BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u32)); \
216 *prog++ = LD32I | RS1(BASE) | S13(_off) | RD(DEST); \
217} while (0)
218
219#define emit_load16(BASE, STRUCT, FIELD, DEST) \
220do { unsigned int _off = offsetof(STRUCT, FIELD); \
221 BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u16)); \
222 *prog++ = LD16I | RS1(BASE) | S13(_off) | RD(DEST); \
223} while (0)
224
225#define __emit_load8(BASE, STRUCT, FIELD, DEST) \
226do { unsigned int _off = offsetof(STRUCT, FIELD); \
227 *prog++ = LD8I | RS1(BASE) | S13(_off) | RD(DEST); \
228} while (0)
229
230#define emit_load8(BASE, STRUCT, FIELD, DEST) \
231do { BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u8)); \
232 __emit_load8(BASE, STRUCT, FIELD, DEST); \
233} while (0)
234
235#define emit_ldmem(OFF, DEST) \
236do { *prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(DEST); \
237} while (0)
238
239#define emit_stmem(OFF, SRC) \
240do { *prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(SRC); \
241} while (0)
242
243#ifdef CONFIG_SMP
244#ifdef CONFIG_SPARC64
245#define emit_load_cpu(REG) \
246 emit_load16(G6, struct thread_info, cpu, REG)
247#else
248#define emit_load_cpu(REG) \
249 emit_load32(G6, struct thread_info, cpu, REG)
250#endif
251#else
252#define emit_load_cpu(REG) emit_clear(REG)
253#endif
254
255#define emit_skb_loadptr(FIELD, DEST) \
256 emit_loadptr(r_SKB, struct sk_buff, FIELD, DEST)
257#define emit_skb_load32(FIELD, DEST) \
258 emit_load32(r_SKB, struct sk_buff, FIELD, DEST)
259#define emit_skb_load16(FIELD, DEST) \
260 emit_load16(r_SKB, struct sk_buff, FIELD, DEST)
261#define __emit_skb_load8(FIELD, DEST) \
262 __emit_load8(r_SKB, struct sk_buff, FIELD, DEST)
263#define emit_skb_load8(FIELD, DEST) \
264 emit_load8(r_SKB, struct sk_buff, FIELD, DEST)
265
266#define emit_jmpl(BASE, IMM_OFF, LREG) \
267 *prog++ = (JMPL | IMMED | RS1(BASE) | S13(IMM_OFF) | RD(LREG))
268
269#define emit_call(FUNC) \
270do { void *_here = image + addrs[i] - 8; \
271 unsigned int _off = (void *)(FUNC) - _here; \
272 *prog++ = CALL | (((_off) >> 2) & 0x3fffffff); \
273 emit_nop(); \
274} while (0)
275
276#define emit_branch(BR_OPC, DEST) \
277do { unsigned int _here = addrs[i] - 8; \
278 *prog++ = BR_OPC | WDISP22((DEST) - _here); \
279} while (0)
280
281#define emit_branch_off(BR_OPC, OFF) \
282do { *prog++ = BR_OPC | WDISP22(OFF); \
283} while (0)
284
285#define emit_jump(DEST) emit_branch(BA, DEST)
286
287#define emit_read_y(REG) *prog++ = RD_Y | RD(REG)
288#define emit_write_y(REG) *prog++ = WR_Y | IMMED | RS1(REG) | S13(0)
289
290#define emit_cmp(R1, R2) \
291 *prog++ = (SUBCC | RS1(R1) | RS2(R2) | RD(G0))
292
293#define emit_cmpi(R1, IMM) \
294 *prog++ = (SUBCC | IMMED | RS1(R1) | S13(IMM) | RD(G0));
295
296#define emit_btst(R1, R2) \
297 *prog++ = (ANDCC | RS1(R1) | RS2(R2) | RD(G0))
298
299#define emit_btsti(R1, IMM) \
300 *prog++ = (ANDCC | IMMED | RS1(R1) | S13(IMM) | RD(G0));
301
302#define emit_sub(R1, R2, R3) \
303 *prog++ = (SUB | RS1(R1) | RS2(R2) | RD(R3))
304
305#define emit_subi(R1, IMM, R3) \
306 *prog++ = (SUB | IMMED | RS1(R1) | S13(IMM) | RD(R3))
307
308#define emit_add(R1, R2, R3) \
309 *prog++ = (ADD | RS1(R1) | RS2(R2) | RD(R3))
310
311#define emit_addi(R1, IMM, R3) \
312 *prog++ = (ADD | IMMED | RS1(R1) | S13(IMM) | RD(R3))
313
314#define emit_alloc_stack(SZ) \
315 *prog++ = (SUB | IMMED | RS1(SP) | S13(SZ) | RD(SP))
316
317#define emit_release_stack(SZ) \
318 *prog++ = (ADD | IMMED | RS1(SP) | S13(SZ) | RD(SP))
319
320/* A note about branch offset calculations. The addrs[] array,
321 * indexed by BPF instruction, records the address after all the
322 * sparc instructions emitted for that BPF instruction.
323 *
324 * The most common case is to emit a branch at the end of such
325 * a code sequence. So this would be two instructions, the
326 * branch and it's delay slot.
327 *
328 * Therefore by default the branch emitters calculate the branch
329 * offset field as:
330 *
331 * destination - (addrs[i] - 8)
332 *
333 * This "addrs[i] - 8" is the address of the branch itself or
334 * what "." would be in assembler notation. The "8" part is
335 * how we take into consideration the branch and it's delay
336 * slot mentioned above.
337 *
338 * Sometimes we need to emit a branch earlier in the code
339 * sequence. And in these situations we adjust "destination"
340 * to accomodate this difference. For example, if we needed
341 * to emit a branch (and it's delay slot) right before the
342 * final instruction emitted for a BPF opcode, we'd use
343 * "destination + 4" instead of just plain "destination" above.
344 *
345 * This is why you see all of these funny emit_branch() and
346 * emit_jump() calls with adjusted offsets.
347 */
348
349void bpf_jit_compile(struct sk_filter *fp)
350{
351 unsigned int cleanup_addr, proglen, oldproglen = 0;
352 u32 temp[8], *prog, *func, seen = 0, pass;
353 const struct sock_filter *filter = fp->insns;
354 int i, flen = fp->len, pc_ret0 = -1;
355 unsigned int *addrs;
356 void *image;
357
358 if (!bpf_jit_enable)
359 return;
360
361 addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
362 if (addrs == NULL)
363 return;
364
365 /* Before first pass, make a rough estimation of addrs[]
366 * each bpf instruction is translated to less than 64 bytes
367 */
368 for (proglen = 0, i = 0; i < flen; i++) {
369 proglen += 64;
370 addrs[i] = proglen;
371 }
372 cleanup_addr = proglen; /* epilogue address */
373 image = NULL;
374 for (pass = 0; pass < 10; pass++) {
375 u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
376
377 /* no prologue/epilogue for trivial filters (RET something) */
378 proglen = 0;
379 prog = temp;
380
381 /* Prologue */
382 if (seen_or_pass0) {
383 if (seen_or_pass0 & SEEN_MEM) {
384 unsigned int sz = BASE_STACKFRAME;
385 sz += BPF_MEMWORDS * sizeof(u32);
386 emit_alloc_stack(sz);
387 }
388
389 /* Make sure we dont leek kernel memory. */
390 if (seen_or_pass0 & SEEN_XREG)
391 emit_clear(r_X);
392
393 /* If this filter needs to access skb data,
394 * load %o4 and %o5 with:
395 * %o4 = skb->len - skb->data_len
396 * %o5 = skb->data
397 * And also back up %o7 into r_saved_O7 so we can
398 * invoke the stubs using 'call'.
399 */
400 if (seen_or_pass0 & SEEN_DATAREF) {
401 emit_load32(r_SKB, struct sk_buff, len, r_HEADLEN);
402 emit_load32(r_SKB, struct sk_buff, data_len, r_TMP);
403 emit_sub(r_HEADLEN, r_TMP, r_HEADLEN);
404 emit_loadptr(r_SKB, struct sk_buff, data, r_SKB_DATA);
405 }
406 }
407 emit_reg_move(O7, r_saved_O7);
408
409 switch (filter[0].code) {
410 case BPF_S_RET_K:
411 case BPF_S_LD_W_LEN:
412 case BPF_S_ANC_PROTOCOL:
413 case BPF_S_ANC_PKTTYPE:
414 case BPF_S_ANC_IFINDEX:
415 case BPF_S_ANC_MARK:
416 case BPF_S_ANC_RXHASH:
417 case BPF_S_ANC_CPU:
418 case BPF_S_ANC_QUEUE:
419 case BPF_S_LD_W_ABS:
420 case BPF_S_LD_H_ABS:
421 case BPF_S_LD_B_ABS:
422 /* The first instruction sets the A register (or is
423 * a "RET 'constant'")
424 */
425 break;
426 default:
427 /* Make sure we dont leak kernel information to the
428 * user.
429 */
430 emit_clear(r_A); /* A = 0 */
431 }
432
433 for (i = 0; i < flen; i++) {
434 unsigned int K = filter[i].k;
435 unsigned int t_offset;
436 unsigned int f_offset;
437 u32 t_op, f_op;
438 int ilen;
439
440 switch (filter[i].code) {
441 case BPF_S_ALU_ADD_X: /* A += X; */
442 emit_alu_X(ADD);
443 break;
444 case BPF_S_ALU_ADD_K: /* A += K; */
445 emit_alu_K(ADD, K);
446 break;
447 case BPF_S_ALU_SUB_X: /* A -= X; */
448 emit_alu_X(SUB);
449 break;
450 case BPF_S_ALU_SUB_K: /* A -= K */
451 emit_alu_K(SUB, K);
452 break;
453 case BPF_S_ALU_AND_X: /* A &= X */
454 emit_alu_X(AND);
455 break;
456 case BPF_S_ALU_AND_K: /* A &= K */
457 emit_alu_K(AND, K);
458 break;
459 case BPF_S_ALU_OR_X: /* A |= X */
460 emit_alu_X(OR);
461 break;
462 case BPF_S_ALU_OR_K: /* A |= K */
463 emit_alu_K(OR, K);
464 break;
465 case BPF_S_ALU_LSH_X: /* A <<= X */
466 emit_alu_X(SLL);
467 break;
468 case BPF_S_ALU_LSH_K: /* A <<= K */
469 emit_alu_K(SLL, K);
470 break;
471 case BPF_S_ALU_RSH_X: /* A >>= X */
472 emit_alu_X(SRL);
473 break;
474 case BPF_S_ALU_RSH_K: /* A >>= K */
475 emit_alu_K(SRL, K);
476 break;
477 case BPF_S_ALU_MUL_X: /* A *= X; */
478 emit_alu_X(MUL);
479 break;
480 case BPF_S_ALU_MUL_K: /* A *= K */
481 emit_alu_K(MUL, K);
482 break;
483 case BPF_S_ALU_DIV_K: /* A /= K */
484 emit_alu_K(MUL, K);
485 emit_read_y(r_A);
486 break;
487 case BPF_S_ALU_DIV_X: /* A /= X; */
488 emit_cmpi(r_X, 0);
489 if (pc_ret0 > 0) {
490 t_offset = addrs[pc_ret0 - 1];
491#ifdef CONFIG_SPARC32
492 emit_branch(BE, t_offset + 20);
493#else
494 emit_branch(BE, t_offset + 8);
495#endif
496 emit_nop(); /* delay slot */
497 } else {
498 emit_branch_off(BNE, 16);
499 emit_nop();
500#ifdef CONFIG_SPARC32
501 emit_jump(cleanup_addr + 20);
502#else
503 emit_jump(cleanup_addr + 8);
504#endif
505 emit_clear(r_A);
506 }
507 emit_write_y(G0);
508#ifdef CONFIG_SPARC32
509 /* The Sparc v8 architecture requires
510 * three instructions between a %y
511 * register write and the first use.
512 */
513 emit_nop();
514 emit_nop();
515 emit_nop();
516#endif
517 emit_alu_X(DIV);
518 break;
519 case BPF_S_ALU_NEG:
520 emit_neg();
521 break;
522 case BPF_S_RET_K:
523 if (!K) {
524 if (pc_ret0 == -1)
525 pc_ret0 = i;
526 emit_clear(r_A);
527 } else {
528 emit_loadimm(K, r_A);
529 }
530 /* Fallthrough */
531 case BPF_S_RET_A:
532 if (seen_or_pass0) {
533 if (i != flen - 1) {
534 emit_jump(cleanup_addr);
535 emit_nop();
536 break;
537 }
538 if (seen_or_pass0 & SEEN_MEM) {
539 unsigned int sz = BASE_STACKFRAME;
540 sz += BPF_MEMWORDS * sizeof(u32);
541 emit_release_stack(sz);
542 }
543 }
544 /* jmpl %r_saved_O7 + 8, %g0 */
545 emit_jmpl(r_saved_O7, 8, G0);
546 emit_reg_move(r_A, O0); /* delay slot */
547 break;
548 case BPF_S_MISC_TAX:
549 seen |= SEEN_XREG;
550 emit_reg_move(r_A, r_X);
551 break;
552 case BPF_S_MISC_TXA:
553 seen |= SEEN_XREG;
554 emit_reg_move(r_X, r_A);
555 break;
556 case BPF_S_ANC_CPU:
557 emit_load_cpu(r_A);
558 break;
559 case BPF_S_ANC_PROTOCOL:
560 emit_skb_load16(protocol, r_A);
561 break;
562#if 0
563 /* GCC won't let us take the address of
564 * a bit field even though we very much
565 * know what we are doing here.
566 */
567 case BPF_S_ANC_PKTTYPE:
568 __emit_skb_load8(pkt_type, r_A);
569 emit_alu_K(SRL, 5);
570 break;
571#endif
572 case BPF_S_ANC_IFINDEX:
573 emit_skb_loadptr(dev, r_A);
574 emit_cmpi(r_A, 0);
575 emit_branch(BNE_PTR, cleanup_addr + 4);
576 emit_nop();
577 emit_load32(r_A, struct net_device, ifindex, r_A);
578 break;
579 case BPF_S_ANC_MARK:
580 emit_skb_load32(mark, r_A);
581 break;
582 case BPF_S_ANC_QUEUE:
583 emit_skb_load16(queue_mapping, r_A);
584 break;
585 case BPF_S_ANC_HATYPE:
586 emit_skb_loadptr(dev, r_A);
587 emit_cmpi(r_A, 0);
588 emit_branch(BNE_PTR, cleanup_addr + 4);
589 emit_nop();
590 emit_load16(r_A, struct net_device, type, r_A);
591 break;
592 case BPF_S_ANC_RXHASH:
593 emit_skb_load32(rxhash, r_A);
594 break;
595
596 case BPF_S_LD_IMM:
597 emit_loadimm(K, r_A);
598 break;
599 case BPF_S_LDX_IMM:
600 emit_loadimm(K, r_X);
601 break;
602 case BPF_S_LD_MEM:
603 emit_ldmem(K * 4, r_A);
604 break;
605 case BPF_S_LDX_MEM:
606 emit_ldmem(K * 4, r_X);
607 break;
608 case BPF_S_ST:
609 emit_stmem(K * 4, r_A);
610 break;
611 case BPF_S_STX:
612 emit_stmem(K * 4, r_X);
613 break;
614
615#define CHOOSE_LOAD_FUNC(K, func) \
616 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
617
618 case BPF_S_LD_W_ABS:
619 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word);
620common_load: seen |= SEEN_DATAREF;
621 emit_loadimm(K, r_OFF);
622 emit_call(func);
623 break;
624 case BPF_S_LD_H_ABS:
625 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half);
626 goto common_load;
627 case BPF_S_LD_B_ABS:
628 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte);
629 goto common_load;
630 case BPF_S_LDX_B_MSH:
631 func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh);
632 goto common_load;
633 case BPF_S_LD_W_IND:
634 func = bpf_jit_load_word;
635common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
636 if (K) {
637 if (is_simm13(K)) {
638 emit_addi(r_X, K, r_OFF);
639 } else {
640 emit_loadimm(K, r_TMP);
641 emit_add(r_X, r_TMP, r_OFF);
642 }
643 } else {
644 emit_reg_move(r_X, r_OFF);
645 }
646 emit_call(func);
647 break;
648 case BPF_S_LD_H_IND:
649 func = bpf_jit_load_half;
650 goto common_load_ind;
651 case BPF_S_LD_B_IND:
652 func = bpf_jit_load_byte;
653 goto common_load_ind;
654 case BPF_S_JMP_JA:
655 emit_jump(addrs[i + K]);
656 emit_nop();
657 break;
658
659#define COND_SEL(CODE, TOP, FOP) \
660 case CODE: \
661 t_op = TOP; \
662 f_op = FOP; \
663 goto cond_branch
664
665 COND_SEL(BPF_S_JMP_JGT_K, BGU, BLEU);
666 COND_SEL(BPF_S_JMP_JGE_K, BGEU, BLU);
667 COND_SEL(BPF_S_JMP_JEQ_K, BE, BNE);
668 COND_SEL(BPF_S_JMP_JSET_K, BNE, BE);
669 COND_SEL(BPF_S_JMP_JGT_X, BGU, BLEU);
670 COND_SEL(BPF_S_JMP_JGE_X, BGEU, BLU);
671 COND_SEL(BPF_S_JMP_JEQ_X, BE, BNE);
672 COND_SEL(BPF_S_JMP_JSET_X, BNE, BE);
673
674cond_branch: f_offset = addrs[i + filter[i].jf];
675 t_offset = addrs[i + filter[i].jt];
676
677 /* same targets, can avoid doing the test :) */
678 if (filter[i].jt == filter[i].jf) {
679 emit_jump(t_offset);
680 emit_nop();
681 break;
682 }
683
684 switch (filter[i].code) {
685 case BPF_S_JMP_JGT_X:
686 case BPF_S_JMP_JGE_X:
687 case BPF_S_JMP_JEQ_X:
688 seen |= SEEN_XREG;
689 emit_cmp(r_A, r_X);
690 break;
691 case BPF_S_JMP_JSET_X:
692 seen |= SEEN_XREG;
693 emit_btst(r_A, r_X);
694 break;
695 case BPF_S_JMP_JEQ_K:
696 case BPF_S_JMP_JGT_K:
697 case BPF_S_JMP_JGE_K:
698 if (is_simm13(K)) {
699 emit_cmpi(r_A, K);
700 } else {
701 emit_loadimm(K, r_TMP);
702 emit_cmp(r_A, r_TMP);
703 }
704 break;
705 case BPF_S_JMP_JSET_K:
706 if (is_simm13(K)) {
707 emit_btsti(r_A, K);
708 } else {
709 emit_loadimm(K, r_TMP);
710 emit_btst(r_A, r_TMP);
711 }
712 break;
713 }
714 if (filter[i].jt != 0) {
715 if (filter[i].jf)
716 t_offset += 8;
717 emit_branch(t_op, t_offset);
718 emit_nop(); /* delay slot */
719 if (filter[i].jf) {
720 emit_jump(f_offset);
721 emit_nop();
722 }
723 break;
724 }
725 emit_branch(f_op, f_offset);
726 emit_nop(); /* delay slot */
727 break;
728
729 default:
730 /* hmm, too complex filter, give up with jit compiler */
731 goto out;
732 }
733 ilen = (void *) prog - (void *) temp;
734 if (image) {
735 if (unlikely(proglen + ilen > oldproglen)) {
736 pr_err("bpb_jit_compile fatal error\n");
737 kfree(addrs);
738 module_free(NULL, image);
739 return;
740 }
741 memcpy(image + proglen, temp, ilen);
742 }
743 proglen += ilen;
744 addrs[i] = proglen;
745 prog = temp;
746 }
747 /* last bpf instruction is always a RET :
748 * use it to give the cleanup instruction(s) addr
749 */
750 cleanup_addr = proglen - 8; /* jmpl; mov r_A,%o0; */
751 if (seen_or_pass0 & SEEN_MEM)
752 cleanup_addr -= 4; /* add %sp, X, %sp; */
753
754 if (image) {
755 if (proglen != oldproglen)
756 pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n",
757 proglen, oldproglen);
758 break;
759 }
760 if (proglen == oldproglen) {
761 image = module_alloc(max_t(unsigned int,
762 proglen,
763 sizeof(struct work_struct)));
764 if (!image)
765 goto out;
766 }
767 oldproglen = proglen;
768 }
769
770 if (bpf_jit_enable > 1)
771 pr_err("flen=%d proglen=%u pass=%d image=%p\n",
772 flen, proglen, pass, image);
773
774 if (image) {
775 if (bpf_jit_enable > 1)
776 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS,
777 16, 1, image, proglen, false);
778 bpf_flush_icache(image, image + proglen);
779 fp->bpf_func = (void *)image;
780 }
781out:
782 kfree(addrs);
783 return;
784}
785
786static void jit_free_defer(struct work_struct *arg)
787{
788 module_free(NULL, arg);
789}
790
791/* run from softirq, we must use a work_struct to call
792 * module_free() from process context
793 */
794void bpf_jit_free(struct sk_filter *fp)
795{
796 if (fp->bpf_func != sk_run_filter) {
797 struct work_struct *work = (struct work_struct *)fp->bpf_func;
798
799 INIT_WORK(work, jit_free_defer);
800 schedule_work(work);
801 }
802}
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index 8d3a056ebeea..533de9550a82 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -397,9 +397,9 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
397 rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize; 397 rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
398 rblkcipher.ivsize = alg->cra_ablkcipher.ivsize; 398 rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
399 399
400 NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER, 400 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
401 sizeof(struct crypto_report_blkcipher), &rblkcipher); 401 sizeof(struct crypto_report_blkcipher), &rblkcipher))
402 402 goto nla_put_failure;
403 return 0; 403 return 0;
404 404
405nla_put_failure: 405nla_put_failure:
@@ -478,9 +478,9 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
478 rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize; 478 rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize;
479 rblkcipher.ivsize = alg->cra_ablkcipher.ivsize; 479 rblkcipher.ivsize = alg->cra_ablkcipher.ivsize;
480 480
481 NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER, 481 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
482 sizeof(struct crypto_report_blkcipher), &rblkcipher); 482 sizeof(struct crypto_report_blkcipher), &rblkcipher))
483 483 goto nla_put_failure;
484 return 0; 484 return 0;
485 485
486nla_put_failure: 486nla_put_failure:
diff --git a/crypto/aead.c b/crypto/aead.c
index e4cb35159be4..0b8121ebec07 100644
--- a/crypto/aead.c
+++ b/crypto/aead.c
@@ -125,9 +125,9 @@ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
125 raead.maxauthsize = aead->maxauthsize; 125 raead.maxauthsize = aead->maxauthsize;
126 raead.ivsize = aead->ivsize; 126 raead.ivsize = aead->ivsize;
127 127
128 NLA_PUT(skb, CRYPTOCFGA_REPORT_AEAD, 128 if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD,
129 sizeof(struct crypto_report_aead), &raead); 129 sizeof(struct crypto_report_aead), &raead))
130 130 goto nla_put_failure;
131 return 0; 131 return 0;
132 132
133nla_put_failure: 133nla_put_failure:
@@ -210,9 +210,9 @@ static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
210 raead.maxauthsize = aead->maxauthsize; 210 raead.maxauthsize = aead->maxauthsize;
211 raead.ivsize = aead->ivsize; 211 raead.ivsize = aead->ivsize;
212 212
213 NLA_PUT(skb, CRYPTOCFGA_REPORT_AEAD, 213 if (nla_put(skb, CRYPTOCFGA_REPORT_AEAD,
214 sizeof(struct crypto_report_aead), &raead); 214 sizeof(struct crypto_report_aead), &raead))
215 215 goto nla_put_failure;
216 return 0; 216 return 0;
217 217
218nla_put_failure: 218nla_put_failure:
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 33bc9b62e9ae..3887856c2dd6 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -409,9 +409,9 @@ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
409 rhash.blocksize = alg->cra_blocksize; 409 rhash.blocksize = alg->cra_blocksize;
410 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize; 410 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
411 411
412 NLA_PUT(skb, CRYPTOCFGA_REPORT_HASH, 412 if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
413 sizeof(struct crypto_report_hash), &rhash); 413 sizeof(struct crypto_report_hash), &rhash))
414 414 goto nla_put_failure;
415 return 0; 415 return 0;
416 416
417nla_put_failure: 417nla_put_failure:
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 4dd80c725498..a8d85a1d670e 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -508,9 +508,9 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
508 rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize; 508 rblkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
509 rblkcipher.ivsize = alg->cra_blkcipher.ivsize; 509 rblkcipher.ivsize = alg->cra_blkcipher.ivsize;
510 510
511 NLA_PUT(skb, CRYPTOCFGA_REPORT_BLKCIPHER, 511 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
512 sizeof(struct crypto_report_blkcipher), &rblkcipher); 512 sizeof(struct crypto_report_blkcipher), &rblkcipher))
513 513 goto nla_put_failure;
514 return 0; 514 return 0;
515 515
516nla_put_failure: 516nla_put_failure:
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index f1ea0a064135..5a37eadb4e56 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -81,9 +81,9 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
81 rcipher.min_keysize = alg->cra_cipher.cia_min_keysize; 81 rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
82 rcipher.max_keysize = alg->cra_cipher.cia_max_keysize; 82 rcipher.max_keysize = alg->cra_cipher.cia_max_keysize;
83 83
84 NLA_PUT(skb, CRYPTOCFGA_REPORT_CIPHER, 84 if (nla_put(skb, CRYPTOCFGA_REPORT_CIPHER,
85 sizeof(struct crypto_report_cipher), &rcipher); 85 sizeof(struct crypto_report_cipher), &rcipher))
86 86 goto nla_put_failure;
87 return 0; 87 return 0;
88 88
89nla_put_failure: 89nla_put_failure:
@@ -96,9 +96,9 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
96 96
97 snprintf(rcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "compression"); 97 snprintf(rcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "compression");
98 98
99 NLA_PUT(skb, CRYPTOCFGA_REPORT_COMPRESS, 99 if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
100 sizeof(struct crypto_report_comp), &rcomp); 100 sizeof(struct crypto_report_comp), &rcomp))
101 101 goto nla_put_failure;
102 return 0; 102 return 0;
103 103
104nla_put_failure: 104nla_put_failure:
@@ -117,16 +117,16 @@ static int crypto_report_one(struct crypto_alg *alg,
117 ualg->cru_flags = alg->cra_flags; 117 ualg->cru_flags = alg->cra_flags;
118 ualg->cru_refcnt = atomic_read(&alg->cra_refcnt); 118 ualg->cru_refcnt = atomic_read(&alg->cra_refcnt);
119 119
120 NLA_PUT_U32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority); 120 if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority))
121 121 goto nla_put_failure;
122 if (alg->cra_flags & CRYPTO_ALG_LARVAL) { 122 if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
123 struct crypto_report_larval rl; 123 struct crypto_report_larval rl;
124 124
125 snprintf(rl.type, CRYPTO_MAX_ALG_NAME, "%s", "larval"); 125 snprintf(rl.type, CRYPTO_MAX_ALG_NAME, "%s", "larval");
126 126
127 NLA_PUT(skb, CRYPTOCFGA_REPORT_LARVAL, 127 if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL,
128 sizeof(struct crypto_report_larval), &rl); 128 sizeof(struct crypto_report_larval), &rl))
129 129 goto nla_put_failure;
130 goto out; 130 goto out;
131 } 131 }
132 132
diff --git a/crypto/pcompress.c b/crypto/pcompress.c
index 2e458e5482d0..04e083ff5373 100644
--- a/crypto/pcompress.c
+++ b/crypto/pcompress.c
@@ -55,9 +55,9 @@ static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
55 55
56 snprintf(rpcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "pcomp"); 56 snprintf(rpcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "pcomp");
57 57
58 NLA_PUT(skb, CRYPTOCFGA_REPORT_COMPRESS, 58 if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
59 sizeof(struct crypto_report_comp), &rpcomp); 59 sizeof(struct crypto_report_comp), &rpcomp))
60 60 goto nla_put_failure;
61 return 0; 61 return 0;
62 62
63nla_put_failure: 63nla_put_failure:
diff --git a/crypto/rng.c b/crypto/rng.c
index 64f864fa8043..f3b7894dec00 100644
--- a/crypto/rng.c
+++ b/crypto/rng.c
@@ -69,9 +69,9 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
69 69
70 rrng.seedsize = alg->cra_rng.seedsize; 70 rrng.seedsize = alg->cra_rng.seedsize;
71 71
72 NLA_PUT(skb, CRYPTOCFGA_REPORT_RNG, 72 if (nla_put(skb, CRYPTOCFGA_REPORT_RNG,
73 sizeof(struct crypto_report_rng), &rrng); 73 sizeof(struct crypto_report_rng), &rrng))
74 74 goto nla_put_failure;
75 return 0; 75 return 0;
76 76
77nla_put_failure: 77nla_put_failure:
diff --git a/crypto/shash.c b/crypto/shash.c
index 21fc12e2378f..32067f47e6c7 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -534,9 +534,9 @@ static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
534 rhash.blocksize = alg->cra_blocksize; 534 rhash.blocksize = alg->cra_blocksize;
535 rhash.digestsize = salg->digestsize; 535 rhash.digestsize = salg->digestsize;
536 536
537 NLA_PUT(skb, CRYPTOCFGA_REPORT_HASH, 537 if (nla_put(skb, CRYPTOCFGA_REPORT_HASH,
538 sizeof(struct crypto_report_hash), &rhash); 538 sizeof(struct crypto_report_hash), &rhash))
539 539 goto nla_put_failure;
540 return 0; 540 return 0;
541 541
542nla_put_failure: 542nla_put_failure:
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index f8f41e0e8a8c..89b30f32ba68 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -802,7 +802,7 @@ static void fill_rx_pool (amb_dev * dev, unsigned char pool,
802 } 802 }
803 // cast needed as there is no %? for pointer differences 803 // cast needed as there is no %? for pointer differences
804 PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li", 804 PRINTD (DBG_SKB, "allocated skb at %p, head %p, area %li",
805 skb, skb->head, (long) (skb_end_pointer(skb) - skb->head)); 805 skb, skb->head, (long) skb_end_offset(skb));
806 rx.handle = virt_to_bus (skb); 806 rx.handle = virt_to_bus (skb);
807 rx.host_address = cpu_to_be32 (virt_to_bus (skb->data)); 807 rx.host_address = cpu_to_be32 (virt_to_bus (skb->data));
808 if (rx_give (dev, &rx, pool)) 808 if (rx_give (dev, &rx, pool))
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 75fd691cd43e..7d01c2a75256 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -2182,7 +2182,6 @@ static int hrz_open (struct atm_vcc *atm_vcc)
2182 default: 2182 default:
2183 PRINTD (DBG_QOS|DBG_VCC, "Bad AAL!"); 2183 PRINTD (DBG_QOS|DBG_VCC, "Bad AAL!");
2184 return -EINVAL; 2184 return -EINVAL;
2185 break;
2186 } 2185 }
2187 2186
2188 // TX traffic parameters 2187 // TX traffic parameters
@@ -2357,7 +2356,6 @@ static int hrz_open (struct atm_vcc *atm_vcc)
2357 default: { 2356 default: {
2358 PRINTD (DBG_QOS, "unsupported TX traffic class"); 2357 PRINTD (DBG_QOS, "unsupported TX traffic class");
2359 return -EINVAL; 2358 return -EINVAL;
2360 break;
2361 } 2359 }
2362 } 2360 }
2363 } 2361 }
@@ -2433,7 +2431,6 @@ static int hrz_open (struct atm_vcc *atm_vcc)
2433 default: { 2431 default: {
2434 PRINTD (DBG_QOS, "unsupported RX traffic class"); 2432 PRINTD (DBG_QOS, "unsupported RX traffic class");
2435 return -EINVAL; 2433 return -EINVAL;
2436 break;
2437 } 2434 }
2438 } 2435 }
2439 } 2436 }
@@ -2581,7 +2578,6 @@ static int hrz_getsockopt (struct atm_vcc * atm_vcc, int level, int optname,
2581// break; 2578// break;
2582 default: 2579 default:
2583 return -ENOPROTOOPT; 2580 return -ENOPROTOOPT;
2584 break;
2585 }; 2581 };
2586 break; 2582 break;
2587 } 2583 }
@@ -2601,7 +2597,6 @@ static int hrz_setsockopt (struct atm_vcc * atm_vcc, int level, int optname,
2601// break; 2597// break;
2602 default: 2598 default:
2603 return -ENOPROTOOPT; 2599 return -ENOPROTOOPT;
2604 break;
2605 }; 2600 };
2606 break; 2601 break;
2607 } 2602 }
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 1c052127548c..8974bd2b961e 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -1258,7 +1258,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
1258 tail = readl(SAR_REG_RAWCT); 1258 tail = readl(SAR_REG_RAWCT);
1259 1259
1260 pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue), 1260 pci_dma_sync_single_for_cpu(card->pcidev, IDT77252_PRV_PADDR(queue),
1261 skb_end_pointer(queue) - queue->head - 16, 1261 skb_end_offset(queue) - 16,
1262 PCI_DMA_FROMDEVICE); 1262 PCI_DMA_FROMDEVICE);
1263 1263
1264 while (head != tail) { 1264 while (head != tail) {
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 43beaca53179..436f519bed1c 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -664,7 +664,7 @@ static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
664 timeo = mdev->net_conf->try_connect_int * HZ; 664 timeo = mdev->net_conf->try_connect_int * HZ;
665 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */ 665 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
666 666
667 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */ 667 s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
668 s_listen->sk->sk_rcvtimeo = timeo; 668 s_listen->sk->sk_rcvtimeo = timeo;
669 s_listen->sk->sk_sndtimeo = timeo; 669 s_listen->sk->sk_sndtimeo = timeo;
670 drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size, 670 drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
@@ -841,8 +841,8 @@ retry:
841 } 841 }
842 } while (1); 842 } while (1);
843 843
844 msock->sk->sk_reuse = 1; /* SO_REUSEADDR */ 844 msock->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
845 sock->sk->sk_reuse = 1; /* SO_REUSEADDR */ 845 sock->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
846 846
847 sock->sk->sk_allocation = GFP_NOIO; 847 sock->sk->sk_allocation = GFP_NOIO;
848 msock->sk->sk_allocation = GFP_NOIO; 848 msock->sk->sk_allocation = GFP_NOIO;
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 8af25a097d75..7233c88f01b8 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -30,37 +30,6 @@
30#include "hyperv_vmbus.h" 30#include "hyperv_vmbus.h"
31 31
32 32
33/* #defines */
34
35
36/* Amount of space to write to */
37#define BYTES_AVAIL_TO_WRITE(r, w, z) \
38 ((w) >= (r)) ? ((z) - ((w) - (r))) : ((r) - (w))
39
40
41/*
42 *
43 * hv_get_ringbuffer_availbytes()
44 *
45 * Get number of bytes available to read and to write to
46 * for the specified ring buffer
47 */
48static inline void
49hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
50 u32 *read, u32 *write)
51{
52 u32 read_loc, write_loc;
53
54 smp_read_barrier_depends();
55
56 /* Capture the read/write indices before they changed */
57 read_loc = rbi->ring_buffer->read_index;
58 write_loc = rbi->ring_buffer->write_index;
59
60 *write = BYTES_AVAIL_TO_WRITE(read_loc, write_loc, rbi->ring_datasize);
61 *read = rbi->ring_datasize - *write;
62}
63
64/* 33/*
65 * hv_get_next_write_location() 34 * hv_get_next_write_location()
66 * 35 *
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index e3e470fecaa9..59fbd704a1ec 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -42,6 +42,7 @@
42#include <linux/inetdevice.h> 42#include <linux/inetdevice.h>
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <linux/module.h> 44#include <linux/module.h>
45#include <net/route.h>
45 46
46#include <net/tcp.h> 47#include <net/tcp.h>
47#include <net/ipv6.h> 48#include <net/ipv6.h>
@@ -1826,7 +1827,10 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
1826 route->path_rec->reversible = 1; 1827 route->path_rec->reversible = 1;
1827 route->path_rec->pkey = cpu_to_be16(0xffff); 1828 route->path_rec->pkey = cpu_to_be16(0xffff);
1828 route->path_rec->mtu_selector = IB_SA_EQ; 1829 route->path_rec->mtu_selector = IB_SA_EQ;
1829 route->path_rec->sl = id_priv->tos >> 5; 1830 route->path_rec->sl = netdev_get_prio_tc_map(
1831 ndev->priv_flags & IFF_802_1Q_VLAN ?
1832 vlan_dev_real_dev(ndev) : ndev,
1833 rt_tos2priority(id_priv->tos));
1830 1834
1831 route->path_rec->mtu = iboe_get_mtu(ndev->mtu); 1835 route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
1832 route->path_rec->rate_selector = IB_SA_EQ; 1836 route->path_rec->rate_selector = IB_SA_EQ;
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c
index 396e29370304..e497dfbee435 100644
--- a/drivers/infiniband/core/netlink.c
+++ b/drivers/infiniband/core/netlink.c
@@ -125,7 +125,8 @@ int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh,
125 unsigned char *prev_tail; 125 unsigned char *prev_tail;
126 126
127 prev_tail = skb_tail_pointer(skb); 127 prev_tail = skb_tail_pointer(skb);
128 NLA_PUT(skb, type, len, data); 128 if (nla_put(skb, type, len, data))
129 goto nla_put_failure;
129 nlh->nlmsg_len += skb_tail_pointer(skb) - prev_tail; 130 nlh->nlmsg_len += skb_tail_pointer(skb) - prev_tail;
130 return 0; 131 return 0;
131 132
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 5861cdb22b7c..8002ae642cfe 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -66,12 +66,6 @@ static ctl_table ucma_ctl_table[] = {
66 { } 66 { }
67}; 67};
68 68
69static struct ctl_path ucma_ctl_path[] = {
70 { .procname = "net" },
71 { .procname = "rdma_ucm" },
72 { }
73};
74
75struct ucma_file { 69struct ucma_file {
76 struct mutex mut; 70 struct mutex mut;
77 struct file *filp; 71 struct file *filp;
@@ -1392,7 +1386,7 @@ static int __init ucma_init(void)
1392 goto err1; 1386 goto err1;
1393 } 1387 }
1394 1388
1395 ucma_ctl_table_hdr = register_sysctl_paths(ucma_ctl_path, ucma_ctl_table); 1389 ucma_ctl_table_hdr = register_net_sysctl(&init_net, "net/rdma_ucm", ucma_ctl_table);
1396 if (!ucma_ctl_table_hdr) { 1390 if (!ucma_ctl_table_hdr) {
1397 printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n"); 1391 printk(KERN_ERR "rdma_ucm: couldn't register sysctl paths\n");
1398 ret = -ENOMEM; 1392 ret = -ENOMEM;
@@ -1408,7 +1402,7 @@ err1:
1408 1402
1409static void __exit ucma_cleanup(void) 1403static void __exit ucma_cleanup(void)
1410{ 1404{
1411 unregister_sysctl_table(ucma_ctl_table_hdr); 1405 unregister_net_sysctl_table(ucma_ctl_table_hdr);
1412 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version); 1406 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1413 misc_deregister(&ucma_misc); 1407 misc_deregister(&ucma_misc);
1414 idr_destroy(&ctx_idr); 1408 idr_destroy(&ctx_idr);
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index b902794bbf07..38c4bd87b2c9 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -336,11 +336,6 @@ static inline void
336capincci_alloc_minor(struct capidev *cdev, struct capincci *np) { } 336capincci_alloc_minor(struct capidev *cdev, struct capincci *np) { }
337static inline void capincci_free_minor(struct capincci *np) { } 337static inline void capincci_free_minor(struct capincci *np) { }
338 338
339static inline unsigned int capincci_minor_opencount(struct capincci *np)
340{
341 return 0;
342}
343
344#endif /* !CONFIG_ISDN_CAPI_MIDDLEWARE */ 339#endif /* !CONFIG_ISDN_CAPI_MIDDLEWARE */
345 340
346static struct capincci *capincci_alloc(struct capidev *cdev, u32 ncci) 341static struct capincci *capincci_alloc(struct capidev *cdev, u32 ncci)
@@ -372,6 +367,7 @@ static void capincci_free(struct capidev *cdev, u32 ncci)
372 } 367 }
373} 368}
374 369
370#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
375static struct capincci *capincci_find(struct capidev *cdev, u32 ncci) 371static struct capincci *capincci_find(struct capidev *cdev, u32 ncci)
376{ 372{
377 struct capincci *np; 373 struct capincci *np;
@@ -382,7 +378,6 @@ static struct capincci *capincci_find(struct capidev *cdev, u32 ncci)
382 return NULL; 378 return NULL;
383} 379}
384 380
385#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
386/* -------- handle data queue --------------------------------------- */ 381/* -------- handle data queue --------------------------------------- */
387 382
388static struct sk_buff * 383static struct sk_buff *
@@ -578,8 +573,8 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
578 struct tty_struct *tty; 573 struct tty_struct *tty;
579 struct capiminor *mp; 574 struct capiminor *mp;
580 u16 datahandle; 575 u16 datahandle;
581#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
582 struct capincci *np; 576 struct capincci *np;
577#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
583 578
584 mutex_lock(&cdev->lock); 579 mutex_lock(&cdev->lock);
585 580
@@ -597,6 +592,12 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
597 goto unlock_out; 592 goto unlock_out;
598 } 593 }
599 594
595#ifndef CONFIG_ISDN_CAPI_MIDDLEWARE
596 skb_queue_tail(&cdev->recvqueue, skb);
597 wake_up_interruptible(&cdev->recvwait);
598
599#else /* CONFIG_ISDN_CAPI_MIDDLEWARE */
600
600 np = capincci_find(cdev, CAPIMSG_CONTROL(skb->data)); 601 np = capincci_find(cdev, CAPIMSG_CONTROL(skb->data));
601 if (!np) { 602 if (!np) {
602 printk(KERN_ERR "BUG: capi_signal: ncci not found\n"); 603 printk(KERN_ERR "BUG: capi_signal: ncci not found\n");
@@ -605,12 +606,6 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
605 goto unlock_out; 606 goto unlock_out;
606 } 607 }
607 608
608#ifndef CONFIG_ISDN_CAPI_MIDDLEWARE
609 skb_queue_tail(&cdev->recvqueue, skb);
610 wake_up_interruptible(&cdev->recvwait);
611
612#else /* CONFIG_ISDN_CAPI_MIDDLEWARE */
613
614 mp = np->minorp; 609 mp = np->minorp;
615 if (!mp) { 610 if (!mp) {
616 skb_queue_tail(&cdev->recvqueue, skb); 611 skb_queue_tail(&cdev->recvqueue, skb);
@@ -786,7 +781,6 @@ register_out:
786 return retval; 781 return retval;
787 782
788 case CAPI_GET_VERSION: 783 case CAPI_GET_VERSION:
789 {
790 if (copy_from_user(&data.contr, argp, 784 if (copy_from_user(&data.contr, argp,
791 sizeof(data.contr))) 785 sizeof(data.contr)))
792 return -EFAULT; 786 return -EFAULT;
@@ -796,11 +790,9 @@ register_out:
796 if (copy_to_user(argp, &data.version, 790 if (copy_to_user(argp, &data.version,
797 sizeof(data.version))) 791 sizeof(data.version)))
798 return -EFAULT; 792 return -EFAULT;
799 } 793 return 0;
800 return 0;
801 794
802 case CAPI_GET_SERIAL: 795 case CAPI_GET_SERIAL:
803 {
804 if (copy_from_user(&data.contr, argp, 796 if (copy_from_user(&data.contr, argp,
805 sizeof(data.contr))) 797 sizeof(data.contr)))
806 return -EFAULT; 798 return -EFAULT;
@@ -810,10 +802,9 @@ register_out:
810 if (copy_to_user(argp, data.serial, 802 if (copy_to_user(argp, data.serial,
811 sizeof(data.serial))) 803 sizeof(data.serial)))
812 return -EFAULT; 804 return -EFAULT;
813 } 805 return 0;
814 return 0; 806
815 case CAPI_GET_PROFILE: 807 case CAPI_GET_PROFILE:
816 {
817 if (copy_from_user(&data.contr, argp, 808 if (copy_from_user(&data.contr, argp,
818 sizeof(data.contr))) 809 sizeof(data.contr)))
819 return -EFAULT; 810 return -EFAULT;
@@ -837,11 +828,9 @@ register_out:
837 } 828 }
838 if (retval) 829 if (retval)
839 return -EFAULT; 830 return -EFAULT;
840 } 831 return 0;
841 return 0;
842 832
843 case CAPI_GET_MANUFACTURER: 833 case CAPI_GET_MANUFACTURER:
844 {
845 if (copy_from_user(&data.contr, argp, 834 if (copy_from_user(&data.contr, argp,
846 sizeof(data.contr))) 835 sizeof(data.contr)))
847 return -EFAULT; 836 return -EFAULT;
@@ -853,8 +842,8 @@ register_out:
853 sizeof(data.manufacturer))) 842 sizeof(data.manufacturer)))
854 return -EFAULT; 843 return -EFAULT;
855 844
856 } 845 return 0;
857 return 0; 846
858 case CAPI_GET_ERRCODE: 847 case CAPI_GET_ERRCODE:
859 data.errcode = cdev->errcode; 848 data.errcode = cdev->errcode;
860 cdev->errcode = CAPI_NOERROR; 849 cdev->errcode = CAPI_NOERROR;
@@ -870,8 +859,7 @@ register_out:
870 return 0; 859 return 0;
871 return -ENXIO; 860 return -ENXIO;
872 861
873 case CAPI_MANUFACTURER_CMD: 862 case CAPI_MANUFACTURER_CMD: {
874 {
875 struct capi_manufacturer_cmd mcmd; 863 struct capi_manufacturer_cmd mcmd;
876 if (!capable(CAP_SYS_ADMIN)) 864 if (!capable(CAP_SYS_ADMIN))
877 return -EPERM; 865 return -EPERM;
@@ -879,8 +867,6 @@ register_out:
879 return -EFAULT; 867 return -EFAULT;
880 return capi20_manufacturer(mcmd.cmd, mcmd.data); 868 return capi20_manufacturer(mcmd.cmd, mcmd.data);
881 } 869 }
882 return 0;
883
884 case CAPI_SET_FLAGS: 870 case CAPI_SET_FLAGS:
885 case CAPI_CLR_FLAGS: { 871 case CAPI_CLR_FLAGS: {
886 unsigned userflags; 872 unsigned userflags;
@@ -902,6 +888,11 @@ register_out:
902 return -EFAULT; 888 return -EFAULT;
903 return 0; 889 return 0;
904 890
891#ifndef CONFIG_ISDN_CAPI_MIDDLEWARE
892 case CAPI_NCCI_OPENCOUNT:
893 return 0;
894
895#else /* CONFIG_ISDN_CAPI_MIDDLEWARE */
905 case CAPI_NCCI_OPENCOUNT: { 896 case CAPI_NCCI_OPENCOUNT: {
906 struct capincci *nccip; 897 struct capincci *nccip;
907 unsigned ncci; 898 unsigned ncci;
@@ -918,7 +909,6 @@ register_out:
918 return count; 909 return count;
919 } 910 }
920 911
921#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
922 case CAPI_NCCI_GETUNIT: { 912 case CAPI_NCCI_GETUNIT: {
923 struct capincci *nccip; 913 struct capincci *nccip;
924 struct capiminor *mp; 914 struct capiminor *mp;
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 6f5016b479f8..832bc807ed20 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -1593,7 +1593,7 @@ static int capidrv_command(isdn_ctrl *c, capidrv_contr *card)
1593 return capidrv_ioctl(c, card); 1593 return capidrv_ioctl(c, card);
1594 1594
1595 switch (c->command) { 1595 switch (c->command) {
1596 case ISDN_CMD_DIAL:{ 1596 case ISDN_CMD_DIAL: {
1597 u8 calling[ISDN_MSNLEN + 3]; 1597 u8 calling[ISDN_MSNLEN + 3];
1598 u8 called[ISDN_MSNLEN + 2]; 1598 u8 called[ISDN_MSNLEN + 2];
1599 1599
@@ -2072,7 +2072,8 @@ static int capidrv_addcontr(u16 contr, struct capi_profile *profp)
2072 card->interface.writebuf_skb = if_sendbuf; 2072 card->interface.writebuf_skb = if_sendbuf;
2073 card->interface.writecmd = NULL; 2073 card->interface.writecmd = NULL;
2074 card->interface.readstat = if_readstat; 2074 card->interface.readstat = if_readstat;
2075 card->interface.features = ISDN_FEATURE_L2_HDLC | 2075 card->interface.features =
2076 ISDN_FEATURE_L2_HDLC |
2076 ISDN_FEATURE_L2_TRANS | 2077 ISDN_FEATURE_L2_TRANS |
2077 ISDN_FEATURE_L3_TRANS | 2078 ISDN_FEATURE_L3_TRANS |
2078 ISDN_FEATURE_P_UNKNOWN | 2079 ISDN_FEATURE_P_UNKNOWN |
@@ -2080,7 +2081,8 @@ static int capidrv_addcontr(u16 contr, struct capi_profile *profp)
2080 ISDN_FEATURE_L2_X75UI | 2081 ISDN_FEATURE_L2_X75UI |
2081 ISDN_FEATURE_L2_X75BUI; 2082 ISDN_FEATURE_L2_X75BUI;
2082 if (profp->support1 & (1 << 2)) 2083 if (profp->support1 & (1 << 2))
2083 card->interface.features |= ISDN_FEATURE_L2_V11096 | 2084 card->interface.features |=
2085 ISDN_FEATURE_L2_V11096 |
2084 ISDN_FEATURE_L2_V11019 | 2086 ISDN_FEATURE_L2_V11019 |
2085 ISDN_FEATURE_L2_V11038; 2087 ISDN_FEATURE_L2_V11038;
2086 if (profp->support1 & (1 << 8)) 2088 if (profp->support1 & (1 << 8))
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index afa080258bfa..3b9278b333ba 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -410,10 +410,10 @@ static void check_pending(struct bas_cardstate *ucs)
410 if (!(ucs->basstate & BS_RESETTING)) 410 if (!(ucs->basstate & BS_RESETTING))
411 ucs->pending = 0; 411 ucs->pending = 0;
412 break; 412 break;
413 /* 413 /*
414 * HD_READ_ATMESSAGE and HD_WRITE_ATMESSAGE are handled separately 414 * HD_READ_ATMESSAGE and HD_WRITE_ATMESSAGE are handled separately
415 * and should never end up here 415 * and should never end up here
416 */ 416 */
417 default: 417 default:
418 dev_warn(&ucs->interface->dev, 418 dev_warn(&ucs->interface->dev,
419 "unknown pending request 0x%02x cleared\n", 419 "unknown pending request 0x%02x cleared\n",
@@ -877,8 +877,7 @@ static void read_iso_callback(struct urb *urb)
877 for (i = 0; i < BAS_NUMFRAMES; i++) { 877 for (i = 0; i < BAS_NUMFRAMES; i++) {
878 ubc->isoinlost += urb->iso_frame_desc[i].actual_length; 878 ubc->isoinlost += urb->iso_frame_desc[i].actual_length;
879 if (unlikely(urb->iso_frame_desc[i].status != 0 && 879 if (unlikely(urb->iso_frame_desc[i].status != 0 &&
880 urb->iso_frame_desc[i].status != 880 urb->iso_frame_desc[i].status != -EINPROGRESS))
881 -EINPROGRESS))
882 ubc->loststatus = urb->iso_frame_desc[i].status; 881 ubc->loststatus = urb->iso_frame_desc[i].status;
883 urb->iso_frame_desc[i].status = 0; 882 urb->iso_frame_desc[i].status = 0;
884 urb->iso_frame_desc[i].actual_length = 0; 883 urb->iso_frame_desc[i].actual_length = 0;
@@ -2078,16 +2077,14 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
2078/* Free hardware dependent part of the B channel structure 2077/* Free hardware dependent part of the B channel structure
2079 * parameter: 2078 * parameter:
2080 * bcs B channel structure 2079 * bcs B channel structure
2081 * return value:
2082 * !=0 on success
2083 */ 2080 */
2084static int gigaset_freebcshw(struct bc_state *bcs) 2081static void gigaset_freebcshw(struct bc_state *bcs)
2085{ 2082{
2086 struct bas_bc_state *ubc = bcs->hw.bas; 2083 struct bas_bc_state *ubc = bcs->hw.bas;
2087 int i; 2084 int i;
2088 2085
2089 if (!ubc) 2086 if (!ubc)
2090 return 0; 2087 return;
2091 2088
2092 /* kill URBs and tasklets before freeing - better safe than sorry */ 2089 /* kill URBs and tasklets before freeing - better safe than sorry */
2093 ubc->running = 0; 2090 ubc->running = 0;
@@ -2105,14 +2102,13 @@ static int gigaset_freebcshw(struct bc_state *bcs)
2105 kfree(ubc->isooutbuf); 2102 kfree(ubc->isooutbuf);
2106 kfree(ubc); 2103 kfree(ubc);
2107 bcs->hw.bas = NULL; 2104 bcs->hw.bas = NULL;
2108 return 1;
2109} 2105}
2110 2106
2111/* Initialize hardware dependent part of the B channel structure 2107/* Initialize hardware dependent part of the B channel structure
2112 * parameter: 2108 * parameter:
2113 * bcs B channel structure 2109 * bcs B channel structure
2114 * return value: 2110 * return value:
2115 * !=0 on success 2111 * 0 on success, error code < 0 on failure
2116 */ 2112 */
2117static int gigaset_initbcshw(struct bc_state *bcs) 2113static int gigaset_initbcshw(struct bc_state *bcs)
2118{ 2114{
@@ -2122,7 +2118,7 @@ static int gigaset_initbcshw(struct bc_state *bcs)
2122 bcs->hw.bas = ubc = kmalloc(sizeof(struct bas_bc_state), GFP_KERNEL); 2118 bcs->hw.bas = ubc = kmalloc(sizeof(struct bas_bc_state), GFP_KERNEL);
2123 if (!ubc) { 2119 if (!ubc) {
2124 pr_err("out of memory\n"); 2120 pr_err("out of memory\n");
2125 return 0; 2121 return -ENOMEM;
2126 } 2122 }
2127 2123
2128 ubc->running = 0; 2124 ubc->running = 0;
@@ -2139,7 +2135,7 @@ static int gigaset_initbcshw(struct bc_state *bcs)
2139 pr_err("out of memory\n"); 2135 pr_err("out of memory\n");
2140 kfree(ubc); 2136 kfree(ubc);
2141 bcs->hw.bas = NULL; 2137 bcs->hw.bas = NULL;
2142 return 0; 2138 return -ENOMEM;
2143 } 2139 }
2144 tasklet_init(&ubc->sent_tasklet, 2140 tasklet_init(&ubc->sent_tasklet,
2145 write_iso_tasklet, (unsigned long) bcs); 2141 write_iso_tasklet, (unsigned long) bcs);
@@ -2164,7 +2160,7 @@ static int gigaset_initbcshw(struct bc_state *bcs)
2164 ubc->stolen0s = 0; 2160 ubc->stolen0s = 0;
2165 tasklet_init(&ubc->rcvd_tasklet, 2161 tasklet_init(&ubc->rcvd_tasklet,
2166 read_iso_tasklet, (unsigned long) bcs); 2162 read_iso_tasklet, (unsigned long) bcs);
2167 return 1; 2163 return 0;
2168} 2164}
2169 2165
2170static void gigaset_reinitbcshw(struct bc_state *bcs) 2166static void gigaset_reinitbcshw(struct bc_state *bcs)
@@ -2187,6 +2183,12 @@ static void gigaset_freecshw(struct cardstate *cs)
2187 cs->hw.bas = NULL; 2183 cs->hw.bas = NULL;
2188} 2184}
2189 2185
2186/* Initialize hardware dependent part of the cardstate structure
2187 * parameter:
2188 * cs cardstate structure
2189 * return value:
2190 * 0 on success, error code < 0 on failure
2191 */
2190static int gigaset_initcshw(struct cardstate *cs) 2192static int gigaset_initcshw(struct cardstate *cs)
2191{ 2193{
2192 struct bas_cardstate *ucs; 2194 struct bas_cardstate *ucs;
@@ -2194,13 +2196,13 @@ static int gigaset_initcshw(struct cardstate *cs)
2194 cs->hw.bas = ucs = kmalloc(sizeof *ucs, GFP_KERNEL); 2196 cs->hw.bas = ucs = kmalloc(sizeof *ucs, GFP_KERNEL);
2195 if (!ucs) { 2197 if (!ucs) {
2196 pr_err("out of memory\n"); 2198 pr_err("out of memory\n");
2197 return 0; 2199 return -ENOMEM;
2198 } 2200 }
2199 ucs->int_in_buf = kmalloc(IP_MSGSIZE, GFP_KERNEL); 2201 ucs->int_in_buf = kmalloc(IP_MSGSIZE, GFP_KERNEL);
2200 if (!ucs->int_in_buf) { 2202 if (!ucs->int_in_buf) {
2201 kfree(ucs); 2203 kfree(ucs);
2202 pr_err("out of memory\n"); 2204 pr_err("out of memory\n");
2203 return 0; 2205 return -ENOMEM;
2204 } 2206 }
2205 2207
2206 ucs->urb_cmd_in = NULL; 2208 ucs->urb_cmd_in = NULL;
@@ -2219,7 +2221,7 @@ static int gigaset_initcshw(struct cardstate *cs)
2219 init_waitqueue_head(&ucs->waitqueue); 2221 init_waitqueue_head(&ucs->waitqueue);
2220 INIT_WORK(&ucs->int_in_wq, int_in_work); 2222 INIT_WORK(&ucs->int_in_wq, int_in_work);
2221 2223
2222 return 1; 2224 return 0;
2223} 2225}
2224 2226
2225/* freeurbs 2227/* freeurbs
@@ -2379,18 +2381,20 @@ static int gigaset_probe(struct usb_interface *interface,
2379 /* save address of controller structure */ 2381 /* save address of controller structure */
2380 usb_set_intfdata(interface, cs); 2382 usb_set_intfdata(interface, cs);
2381 2383
2382 if (!gigaset_start(cs)) 2384 rc = gigaset_start(cs);
2385 if (rc < 0)
2383 goto error; 2386 goto error;
2384 2387
2385 return 0; 2388 return 0;
2386 2389
2387allocerr: 2390allocerr:
2388 dev_err(cs->dev, "could not allocate URBs\n"); 2391 dev_err(cs->dev, "could not allocate URBs\n");
2392 rc = -ENOMEM;
2389error: 2393error:
2390 freeurbs(cs); 2394 freeurbs(cs);
2391 usb_set_intfdata(interface, NULL); 2395 usb_set_intfdata(interface, NULL);
2392 gigaset_freecs(cs); 2396 gigaset_freecs(cs);
2393 return -ENODEV; 2397 return rc;
2394} 2398}
2395 2399
2396/* gigaset_disconnect 2400/* gigaset_disconnect
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index 343b5c80cb7b..27e4a3e21d64 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -14,6 +14,7 @@
14#include "gigaset.h" 14#include "gigaset.h"
15#include <linux/proc_fs.h> 15#include <linux/proc_fs.h>
16#include <linux/seq_file.h> 16#include <linux/seq_file.h>
17#include <linux/ratelimit.h>
17#include <linux/isdn/capilli.h> 18#include <linux/isdn/capilli.h>
18#include <linux/isdn/capicmd.h> 19#include <linux/isdn/capicmd.h>
19#include <linux/isdn/capiutil.h> 20#include <linux/isdn/capiutil.h>
@@ -108,51 +109,35 @@ static struct {
108 u8 *bc; 109 u8 *bc;
109 u8 *hlc; 110 u8 *hlc;
110} cip2bchlc[] = { 111} cip2bchlc[] = {
111 [1] = { "8090A3", NULL }, 112 [1] = { "8090A3", NULL }, /* Speech (A-law) */
112 /* Speech (A-law) */ 113 [2] = { "8890", NULL }, /* Unrestricted digital information */
113 [2] = { "8890", NULL }, 114 [3] = { "8990", NULL }, /* Restricted digital information */
114 /* Unrestricted digital information */ 115 [4] = { "9090A3", NULL }, /* 3,1 kHz audio (A-law) */
115 [3] = { "8990", NULL }, 116 [5] = { "9190", NULL }, /* 7 kHz audio */
116 /* Restricted digital information */ 117 [6] = { "9890", NULL }, /* Video */
117 [4] = { "9090A3", NULL }, 118 [7] = { "88C0C6E6", NULL }, /* Packet mode */
118 /* 3,1 kHz audio (A-law) */ 119 [8] = { "8890218F", NULL }, /* 56 kbit/s rate adaptation */
119 [5] = { "9190", NULL }, 120 [9] = { "9190A5", NULL }, /* Unrestricted digital information
120 /* 7 kHz audio */ 121 * with tones/announcements */
121 [6] = { "9890", NULL }, 122 [16] = { "8090A3", "9181" }, /* Telephony */
122 /* Video */ 123 [17] = { "9090A3", "9184" }, /* Group 2/3 facsimile */
123 [7] = { "88C0C6E6", NULL }, 124 [18] = { "8890", "91A1" }, /* Group 4 facsimile Class 1 */
124 /* Packet mode */ 125 [19] = { "8890", "91A4" }, /* Teletex service basic and mixed mode
125 [8] = { "8890218F", NULL }, 126 * and Group 4 facsimile service
126 /* 56 kbit/s rate adaptation */ 127 * Classes II and III */
127 [9] = { "9190A5", NULL }, 128 [20] = { "8890", "91A8" }, /* Teletex service basic and
128 /* Unrestricted digital information with tones/announcements */ 129 * processable mode */
129 [16] = { "8090A3", "9181" }, 130 [21] = { "8890", "91B1" }, /* Teletex service basic mode */
130 /* Telephony */ 131 [22] = { "8890", "91B2" }, /* International interworking for
131 [17] = { "9090A3", "9184" }, 132 * Videotex */
132 /* Group 2/3 facsimile */ 133 [23] = { "8890", "91B5" }, /* Telex */
133 [18] = { "8890", "91A1" }, 134 [24] = { "8890", "91B8" }, /* Message Handling Systems
134 /* Group 4 facsimile Class 1 */ 135 * in accordance with X.400 */
135 [19] = { "8890", "91A4" }, 136 [25] = { "8890", "91C1" }, /* OSI application
136 /* Teletex service basic and mixed mode 137 * in accordance with X.200 */
137 and Group 4 facsimile service Classes II and III */ 138 [26] = { "9190A5", "9181" }, /* 7 kHz telephony */
138 [20] = { "8890", "91A8" }, 139 [27] = { "9190A5", "916001" }, /* Video telephony, first connection */
139 /* Teletex service basic and processable mode */ 140 [28] = { "8890", "916002" }, /* Video telephony, second connection */
140 [21] = { "8890", "91B1" },
141 /* Teletex service basic mode */
142 [22] = { "8890", "91B2" },
143 /* International interworking for Videotex */
144 [23] = { "8890", "91B5" },
145 /* Telex */
146 [24] = { "8890", "91B8" },
147 /* Message Handling Systems in accordance with X.400 */
148 [25] = { "8890", "91C1" },
149 /* OSI application in accordance with X.200 */
150 [26] = { "9190A5", "9181" },
151 /* 7 kHz telephony */
152 [27] = { "9190A5", "916001" },
153 /* Video telephony, first connection */
154 [28] = { "8890", "916002" },
155 /* Video telephony, second connection */
156}; 141};
157 142
158/* 143/*
@@ -223,10 +208,14 @@ get_appl(struct gigaset_capi_ctr *iif, u16 appl)
223static inline void dump_cmsg(enum debuglevel level, const char *tag, _cmsg *p) 208static inline void dump_cmsg(enum debuglevel level, const char *tag, _cmsg *p)
224{ 209{
225#ifdef CONFIG_GIGASET_DEBUG 210#ifdef CONFIG_GIGASET_DEBUG
211 /* dump at most 20 messages in 20 secs */
212 static DEFINE_RATELIMIT_STATE(msg_dump_ratelimit, 20 * HZ, 20);
226 _cdebbuf *cdb; 213 _cdebbuf *cdb;
227 214
228 if (!(gigaset_debuglevel & level)) 215 if (!(gigaset_debuglevel & level))
229 return; 216 return;
217 if (!___ratelimit(&msg_dump_ratelimit, tag))
218 return;
230 219
231 cdb = capi_cmsg2str(p); 220 cdb = capi_cmsg2str(p);
232 if (cdb) { 221 if (cdb) {
@@ -1192,7 +1181,9 @@ static void do_facility_req(struct gigaset_capi_ctr *iif,
1192 confparam[3] = 2; /* length */ 1181 confparam[3] = 2; /* length */
1193 capimsg_setu16(confparam, 4, CapiSuccess); 1182 capimsg_setu16(confparam, 4, CapiSuccess);
1194 break; 1183 break;
1195 /* ToDo: add supported services */ 1184
1185 /* ToDo: add supported services */
1186
1196 default: 1187 default:
1197 dev_notice(cs->dev, 1188 dev_notice(cs->dev,
1198 "%s: unsupported supplementary service function 0x%04x\n", 1189 "%s: unsupported supplementary service function 0x%04x\n",
@@ -1766,7 +1757,8 @@ static void do_connect_b3_req(struct gigaset_capi_ctr *iif,
1766 1757
1767 /* NCPI parameter: not applicable for B3 Transparent */ 1758 /* NCPI parameter: not applicable for B3 Transparent */
1768 ignore_cstruct_param(cs, cmsg->NCPI, "CONNECT_B3_REQ", "NCPI"); 1759 ignore_cstruct_param(cs, cmsg->NCPI, "CONNECT_B3_REQ", "NCPI");
1769 send_conf(iif, ap, skb, (cmsg->NCPI && cmsg->NCPI[0]) ? 1760 send_conf(iif, ap, skb,
1761 (cmsg->NCPI && cmsg->NCPI[0]) ?
1770 CapiNcpiNotSupportedByProtocol : CapiSuccess); 1762 CapiNcpiNotSupportedByProtocol : CapiSuccess);
1771} 1763}
1772 1764
@@ -1882,6 +1874,9 @@ static void do_disconnect_req(struct gigaset_capi_ctr *iif,
1882 1874
1883 /* check for active logical connection */ 1875 /* check for active logical connection */
1884 if (bcs->apconnstate >= APCONN_ACTIVE) { 1876 if (bcs->apconnstate >= APCONN_ACTIVE) {
1877 /* clear it */
1878 bcs->apconnstate = APCONN_SETUP;
1879
1885 /* 1880 /*
1886 * emit DISCONNECT_B3_IND with cause 0x3301 1881 * emit DISCONNECT_B3_IND with cause 0x3301
1887 * use separate cmsg structure, as the content of iif->acmsg 1882 * use separate cmsg structure, as the content of iif->acmsg
@@ -1906,6 +1901,7 @@ static void do_disconnect_req(struct gigaset_capi_ctr *iif,
1906 } 1901 }
1907 capi_cmsg2message(b3cmsg, 1902 capi_cmsg2message(b3cmsg,
1908 __skb_put(b3skb, CAPI_DISCONNECT_B3_IND_BASELEN)); 1903 __skb_put(b3skb, CAPI_DISCONNECT_B3_IND_BASELEN));
1904 dump_cmsg(DEBUG_CMD, __func__, b3cmsg);
1909 kfree(b3cmsg); 1905 kfree(b3cmsg);
1910 capi_ctr_handle_message(&iif->ctr, ap->id, b3skb); 1906 capi_ctr_handle_message(&iif->ctr, ap->id, b3skb);
1911 } 1907 }
@@ -1966,7 +1962,8 @@ static void do_disconnect_b3_req(struct gigaset_capi_ctr *iif,
1966 /* NCPI parameter: not applicable for B3 Transparent */ 1962 /* NCPI parameter: not applicable for B3 Transparent */
1967 ignore_cstruct_param(cs, cmsg->NCPI, 1963 ignore_cstruct_param(cs, cmsg->NCPI,
1968 "DISCONNECT_B3_REQ", "NCPI"); 1964 "DISCONNECT_B3_REQ", "NCPI");
1969 send_conf(iif, ap, skb, (cmsg->NCPI && cmsg->NCPI[0]) ? 1965 send_conf(iif, ap, skb,
1966 (cmsg->NCPI && cmsg->NCPI[0]) ?
1970 CapiNcpiNotSupportedByProtocol : CapiSuccess); 1967 CapiNcpiNotSupportedByProtocol : CapiSuccess);
1971} 1968}
1972 1969
@@ -2059,12 +2056,6 @@ static void do_reset_b3_req(struct gigaset_capi_ctr *iif,
2059} 2056}
2060 2057
2061/* 2058/*
2062 * dump unsupported/ignored messages at most twice per minute,
2063 * some apps send those very frequently
2064 */
2065static unsigned long ignored_msg_dump_time;
2066
2067/*
2068 * unsupported CAPI message handler 2059 * unsupported CAPI message handler
2069 */ 2060 */
2070static void do_unsupported(struct gigaset_capi_ctr *iif, 2061static void do_unsupported(struct gigaset_capi_ctr *iif,
@@ -2073,8 +2064,7 @@ static void do_unsupported(struct gigaset_capi_ctr *iif,
2073{ 2064{
2074 /* decode message */ 2065 /* decode message */
2075 capi_message2cmsg(&iif->acmsg, skb->data); 2066 capi_message2cmsg(&iif->acmsg, skb->data);
2076 if (printk_timed_ratelimit(&ignored_msg_dump_time, 30 * 1000)) 2067 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
2077 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
2078 send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState); 2068 send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState);
2079} 2069}
2080 2070
@@ -2085,11 +2075,9 @@ static void do_nothing(struct gigaset_capi_ctr *iif,
2085 struct gigaset_capi_appl *ap, 2075 struct gigaset_capi_appl *ap,
2086 struct sk_buff *skb) 2076 struct sk_buff *skb)
2087{ 2077{
2088 if (printk_timed_ratelimit(&ignored_msg_dump_time, 30 * 1000)) { 2078 /* decode message */
2089 /* decode message */ 2079 capi_message2cmsg(&iif->acmsg, skb->data);
2090 capi_message2cmsg(&iif->acmsg, skb->data); 2080 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
2091 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
2092 }
2093 dev_kfree_skb_any(skb); 2081 dev_kfree_skb_any(skb);
2094} 2082}
2095 2083
@@ -2358,7 +2346,7 @@ static const struct file_operations gigaset_proc_fops = {
2358 * @cs: device descriptor structure. 2346 * @cs: device descriptor structure.
2359 * @isdnid: device name. 2347 * @isdnid: device name.
2360 * 2348 *
2361 * Return value: 1 for success, 0 for failure 2349 * Return value: 0 on success, error code < 0 on failure
2362 */ 2350 */
2363int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) 2351int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
2364{ 2352{
@@ -2368,7 +2356,7 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
2368 iif = kmalloc(sizeof(*iif), GFP_KERNEL); 2356 iif = kmalloc(sizeof(*iif), GFP_KERNEL);
2369 if (!iif) { 2357 if (!iif) {
2370 pr_err("%s: out of memory\n", __func__); 2358 pr_err("%s: out of memory\n", __func__);
2371 return 0; 2359 return -ENOMEM;
2372 } 2360 }
2373 2361
2374 /* prepare controller structure */ 2362 /* prepare controller structure */
@@ -2392,12 +2380,12 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
2392 if (rc) { 2380 if (rc) {
2393 pr_err("attach_capi_ctr failed (%d)\n", rc); 2381 pr_err("attach_capi_ctr failed (%d)\n", rc);
2394 kfree(iif); 2382 kfree(iif);
2395 return 0; 2383 return rc;
2396 } 2384 }
2397 2385
2398 cs->iif = iif; 2386 cs->iif = iif;
2399 cs->hw_hdr_len = CAPI_DATA_B3_REQ_LEN; 2387 cs->hw_hdr_len = CAPI_DATA_B3_REQ_LEN;
2400 return 1; 2388 return 0;
2401} 2389}
2402 2390
2403/** 2391/**
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index 76792707f995..aa41485bc594 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -194,13 +194,13 @@ int gigaset_get_channel(struct bc_state *bcs)
194 gig_dbg(DEBUG_CHANNEL, "could not allocate channel %d", 194 gig_dbg(DEBUG_CHANNEL, "could not allocate channel %d",
195 bcs->channel); 195 bcs->channel);
196 spin_unlock_irqrestore(&bcs->cs->lock, flags); 196 spin_unlock_irqrestore(&bcs->cs->lock, flags);
197 return 0; 197 return -EBUSY;
198 } 198 }
199 ++bcs->use_count; 199 ++bcs->use_count;
200 bcs->busy = 1; 200 bcs->busy = 1;
201 gig_dbg(DEBUG_CHANNEL, "allocated channel %d", bcs->channel); 201 gig_dbg(DEBUG_CHANNEL, "allocated channel %d", bcs->channel);
202 spin_unlock_irqrestore(&bcs->cs->lock, flags); 202 spin_unlock_irqrestore(&bcs->cs->lock, flags);
203 return 1; 203 return 0;
204} 204}
205 205
206struct bc_state *gigaset_get_free_channel(struct cardstate *cs) 206struct bc_state *gigaset_get_free_channel(struct cardstate *cs)
@@ -258,7 +258,7 @@ int gigaset_get_channels(struct cardstate *cs)
258 spin_unlock_irqrestore(&cs->lock, flags); 258 spin_unlock_irqrestore(&cs->lock, flags);
259 gig_dbg(DEBUG_CHANNEL, 259 gig_dbg(DEBUG_CHANNEL,
260 "could not allocate all channels"); 260 "could not allocate all channels");
261 return 0; 261 return -EBUSY;
262 } 262 }
263 for (i = 0; i < cs->channels; ++i) 263 for (i = 0; i < cs->channels; ++i)
264 ++cs->bcs[i].use_count; 264 ++cs->bcs[i].use_count;
@@ -266,7 +266,7 @@ int gigaset_get_channels(struct cardstate *cs)
266 266
267 gig_dbg(DEBUG_CHANNEL, "allocated all channels"); 267 gig_dbg(DEBUG_CHANNEL, "allocated all channels");
268 268
269 return 1; 269 return 0;
270} 270}
271 271
272void gigaset_free_channels(struct cardstate *cs) 272void gigaset_free_channels(struct cardstate *cs)
@@ -362,7 +362,7 @@ struct event_t *gigaset_add_event(struct cardstate *cs,
362} 362}
363EXPORT_SYMBOL_GPL(gigaset_add_event); 363EXPORT_SYMBOL_GPL(gigaset_add_event);
364 364
365static void free_strings(struct at_state_t *at_state) 365static void clear_at_state(struct at_state_t *at_state)
366{ 366{
367 int i; 367 int i;
368 368
@@ -372,18 +372,13 @@ static void free_strings(struct at_state_t *at_state)
372 } 372 }
373} 373}
374 374
375static void clear_at_state(struct at_state_t *at_state) 375static void dealloc_temp_at_states(struct cardstate *cs)
376{
377 free_strings(at_state);
378}
379
380static void dealloc_at_states(struct cardstate *cs)
381{ 376{
382 struct at_state_t *cur, *next; 377 struct at_state_t *cur, *next;
383 378
384 list_for_each_entry_safe(cur, next, &cs->temp_at_states, list) { 379 list_for_each_entry_safe(cur, next, &cs->temp_at_states, list) {
385 list_del(&cur->list); 380 list_del(&cur->list);
386 free_strings(cur); 381 clear_at_state(cur);
387 kfree(cur); 382 kfree(cur);
388 } 383 }
389} 384}
@@ -393,8 +388,7 @@ static void gigaset_freebcs(struct bc_state *bcs)
393 int i; 388 int i;
394 389
395 gig_dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel); 390 gig_dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel);
396 if (!bcs->cs->ops->freebcshw(bcs)) 391 bcs->cs->ops->freebcshw(bcs);
397 gig_dbg(DEBUG_INIT, "failed");
398 392
399 gig_dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel); 393 gig_dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel);
400 clear_at_state(&bcs->at_state); 394 clear_at_state(&bcs->at_state);
@@ -512,7 +506,7 @@ void gigaset_freecs(struct cardstate *cs)
512 case 1: /* error when registering to LL */ 506 case 1: /* error when registering to LL */
513 gig_dbg(DEBUG_INIT, "clearing at_state"); 507 gig_dbg(DEBUG_INIT, "clearing at_state");
514 clear_at_state(&cs->at_state); 508 clear_at_state(&cs->at_state);
515 dealloc_at_states(cs); 509 dealloc_temp_at_states(cs);
516 510
517 /* fall through */ 511 /* fall through */
518 case 0: /* error in basic setup */ 512 case 0: /* error in basic setup */
@@ -571,6 +565,8 @@ static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct cardstate *cs)
571 * @inbuf: buffer structure. 565 * @inbuf: buffer structure.
572 * @src: received data. 566 * @src: received data.
573 * @numbytes: number of bytes received. 567 * @numbytes: number of bytes received.
568 *
569 * Return value: !=0 if some data was appended
574 */ 570 */
575int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src, 571int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
576 unsigned numbytes) 572 unsigned numbytes)
@@ -614,8 +610,8 @@ int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
614EXPORT_SYMBOL_GPL(gigaset_fill_inbuf); 610EXPORT_SYMBOL_GPL(gigaset_fill_inbuf);
615 611
616/* Initialize the b-channel structure */ 612/* Initialize the b-channel structure */
617static struct bc_state *gigaset_initbcs(struct bc_state *bcs, 613static int gigaset_initbcs(struct bc_state *bcs, struct cardstate *cs,
618 struct cardstate *cs, int channel) 614 int channel)
619{ 615{
620 int i; 616 int i;
621 617
@@ -654,11 +650,7 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs,
654 bcs->apconnstate = 0; 650 bcs->apconnstate = 0;
655 651
656 gig_dbg(DEBUG_INIT, " setting up bcs[%d]->hw", channel); 652 gig_dbg(DEBUG_INIT, " setting up bcs[%d]->hw", channel);
657 if (cs->ops->initbcshw(bcs)) 653 return cs->ops->initbcshw(bcs);
658 return bcs;
659
660 gig_dbg(DEBUG_INIT, " failed");
661 return NULL;
662} 654}
663 655
664/** 656/**
@@ -757,7 +749,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
757 cs->cmdbytes = 0; 749 cs->cmdbytes = 0;
758 750
759 gig_dbg(DEBUG_INIT, "setting up iif"); 751 gig_dbg(DEBUG_INIT, "setting up iif");
760 if (!gigaset_isdn_regdev(cs, modulename)) { 752 if (gigaset_isdn_regdev(cs, modulename) < 0) {
761 pr_err("error registering ISDN device\n"); 753 pr_err("error registering ISDN device\n");
762 goto error; 754 goto error;
763 } 755 }
@@ -765,7 +757,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
765 make_valid(cs, VALID_ID); 757 make_valid(cs, VALID_ID);
766 ++cs->cs_init; 758 ++cs->cs_init;
767 gig_dbg(DEBUG_INIT, "setting up hw"); 759 gig_dbg(DEBUG_INIT, "setting up hw");
768 if (!cs->ops->initcshw(cs)) 760 if (cs->ops->initcshw(cs) < 0)
769 goto error; 761 goto error;
770 762
771 ++cs->cs_init; 763 ++cs->cs_init;
@@ -779,7 +771,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
779 /* set up channel data structures */ 771 /* set up channel data structures */
780 for (i = 0; i < channels; ++i) { 772 for (i = 0; i < channels; ++i) {
781 gig_dbg(DEBUG_INIT, "setting up bcs[%d]", i); 773 gig_dbg(DEBUG_INIT, "setting up bcs[%d]", i);
782 if (!gigaset_initbcs(cs->bcs + i, cs, i)) { 774 if (gigaset_initbcs(cs->bcs + i, cs, i) < 0) {
783 pr_err("could not allocate channel %d data\n", i); 775 pr_err("could not allocate channel %d data\n", i);
784 goto error; 776 goto error;
785 } 777 }
@@ -848,8 +840,7 @@ static void cleanup_cs(struct cardstate *cs)
848 cs->mstate = MS_UNINITIALIZED; 840 cs->mstate = MS_UNINITIALIZED;
849 841
850 clear_at_state(&cs->at_state); 842 clear_at_state(&cs->at_state);
851 dealloc_at_states(cs); 843 dealloc_temp_at_states(cs);
852 free_strings(&cs->at_state);
853 gigaset_at_init(&cs->at_state, NULL, cs, 0); 844 gigaset_at_init(&cs->at_state, NULL, cs, 0);
854 845
855 cs->inbuf->inputstate = INS_command; 846 cs->inbuf->inputstate = INS_command;
@@ -875,7 +866,7 @@ static void cleanup_cs(struct cardstate *cs)
875 866
876 for (i = 0; i < cs->channels; ++i) { 867 for (i = 0; i < cs->channels; ++i) {
877 gigaset_freebcs(cs->bcs + i); 868 gigaset_freebcs(cs->bcs + i);
878 if (!gigaset_initbcs(cs->bcs + i, cs, i)) 869 if (gigaset_initbcs(cs->bcs + i, cs, i) < 0)
879 pr_err("could not allocate channel %d data\n", i); 870 pr_err("could not allocate channel %d data\n", i);
880 } 871 }
881 872
@@ -896,14 +887,14 @@ static void cleanup_cs(struct cardstate *cs)
896 * waiting for completion of the initialization. 887 * waiting for completion of the initialization.
897 * 888 *
898 * Return value: 889 * Return value:
899 * 1 - success, 0 - error 890 * 0 on success, error code < 0 on failure
900 */ 891 */
901int gigaset_start(struct cardstate *cs) 892int gigaset_start(struct cardstate *cs)
902{ 893{
903 unsigned long flags; 894 unsigned long flags;
904 895
905 if (mutex_lock_interruptible(&cs->mutex)) 896 if (mutex_lock_interruptible(&cs->mutex))
906 return 0; 897 return -EBUSY;
907 898
908 spin_lock_irqsave(&cs->lock, flags); 899 spin_lock_irqsave(&cs->lock, flags);
909 cs->connected = 1; 900 cs->connected = 1;
@@ -927,11 +918,11 @@ int gigaset_start(struct cardstate *cs)
927 wait_event(cs->waitqueue, !cs->waiting); 918 wait_event(cs->waitqueue, !cs->waiting);
928 919
929 mutex_unlock(&cs->mutex); 920 mutex_unlock(&cs->mutex);
930 return 1; 921 return 0;
931 922
932error: 923error:
933 mutex_unlock(&cs->mutex); 924 mutex_unlock(&cs->mutex);
934 return 0; 925 return -ENOMEM;
935} 926}
936EXPORT_SYMBOL_GPL(gigaset_start); 927EXPORT_SYMBOL_GPL(gigaset_start);
937 928
@@ -943,7 +934,7 @@ EXPORT_SYMBOL_GPL(gigaset_start);
943 * waiting for completion of the shutdown. 934 * waiting for completion of the shutdown.
944 * 935 *
945 * Return value: 936 * Return value:
946 * 0 - success, -1 - error (no device associated) 937 * 0 - success, -ENODEV - error (no device associated)
947 */ 938 */
948int gigaset_shutdown(struct cardstate *cs) 939int gigaset_shutdown(struct cardstate *cs)
949{ 940{
@@ -951,7 +942,7 @@ int gigaset_shutdown(struct cardstate *cs)
951 942
952 if (!(cs->flags & VALID_MINOR)) { 943 if (!(cs->flags & VALID_MINOR)) {
953 mutex_unlock(&cs->mutex); 944 mutex_unlock(&cs->mutex);
954 return -1; 945 return -ENODEV;
955 } 946 }
956 947
957 cs->waiting = 1; 948 cs->waiting = 1;
diff --git a/drivers/isdn/gigaset/dummyll.c b/drivers/isdn/gigaset/dummyll.c
index 19b1c779d50f..570c2d53b84e 100644
--- a/drivers/isdn/gigaset/dummyll.c
+++ b/drivers/isdn/gigaset/dummyll.c
@@ -60,7 +60,7 @@ void gigaset_isdn_stop(struct cardstate *cs)
60 60
61int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) 61int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
62{ 62{
63 return 1; 63 return 0;
64} 64}
65 65
66void gigaset_isdn_unregdev(struct cardstate *cs) 66void gigaset_isdn_unregdev(struct cardstate *cs)
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index 624a8256a77f..2e6963dc740e 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -153,103 +153,104 @@ struct reply_t gigaset_tab_nocid[] =
153 * action, command */ 153 * action, command */
154 154
155/* initialize device, set cid mode if possible */ 155/* initialize device, set cid mode if possible */
156 {RSP_INIT, -1, -1, SEQ_INIT, 100, 1, {ACT_TIMEOUT} }, 156 {RSP_INIT, -1, -1, SEQ_INIT, 100, 1, {ACT_TIMEOUT} },
157 157
158 {EV_TIMEOUT, 100, 100, -1, 101, 3, {0}, "Z\r"}, 158 {EV_TIMEOUT, 100, 100, -1, 101, 3, {0}, "Z\r"},
159 {RSP_OK, 101, 103, -1, 120, 5, {ACT_GETSTRING}, 159 {RSP_OK, 101, 103, -1, 120, 5, {ACT_GETSTRING},
160 "+GMR\r"}, 160 "+GMR\r"},
161 161
162 {EV_TIMEOUT, 101, 101, -1, 102, 5, {0}, "Z\r"}, 162 {EV_TIMEOUT, 101, 101, -1, 102, 5, {0}, "Z\r"},
163 {RSP_ERROR, 101, 101, -1, 102, 5, {0}, "Z\r"}, 163 {RSP_ERROR, 101, 101, -1, 102, 5, {0}, "Z\r"},
164 164
165 {EV_TIMEOUT, 102, 102, -1, 108, 5, {ACT_SETDLE1}, 165 {EV_TIMEOUT, 102, 102, -1, 108, 5, {ACT_SETDLE1},
166 "^SDLE=0\r"}, 166 "^SDLE=0\r"},
167 {RSP_OK, 108, 108, -1, 104, -1}, 167 {RSP_OK, 108, 108, -1, 104, -1},
168 {RSP_ZDLE, 104, 104, 0, 103, 5, {0}, "Z\r"}, 168 {RSP_ZDLE, 104, 104, 0, 103, 5, {0}, "Z\r"},
169 {EV_TIMEOUT, 104, 104, -1, 0, 0, {ACT_FAILINIT} }, 169 {EV_TIMEOUT, 104, 104, -1, 0, 0, {ACT_FAILINIT} },
170 {RSP_ERROR, 108, 108, -1, 0, 0, {ACT_FAILINIT} }, 170 {RSP_ERROR, 108, 108, -1, 0, 0, {ACT_FAILINIT} },
171 171
172 {EV_TIMEOUT, 108, 108, -1, 105, 2, {ACT_SETDLE0, 172 {EV_TIMEOUT, 108, 108, -1, 105, 2, {ACT_SETDLE0,
173 ACT_HUPMODEM, 173 ACT_HUPMODEM,
174 ACT_TIMEOUT} }, 174 ACT_TIMEOUT} },
175 {EV_TIMEOUT, 105, 105, -1, 103, 5, {0}, "Z\r"}, 175 {EV_TIMEOUT, 105, 105, -1, 103, 5, {0}, "Z\r"},
176 176
177 {RSP_ERROR, 102, 102, -1, 107, 5, {0}, "^GETPRE\r"}, 177 {RSP_ERROR, 102, 102, -1, 107, 5, {0}, "^GETPRE\r"},
178 {RSP_OK, 107, 107, -1, 0, 0, {ACT_CONFIGMODE} }, 178 {RSP_OK, 107, 107, -1, 0, 0, {ACT_CONFIGMODE} },
179 {RSP_ERROR, 107, 107, -1, 0, 0, {ACT_FAILINIT} }, 179 {RSP_ERROR, 107, 107, -1, 0, 0, {ACT_FAILINIT} },
180 {EV_TIMEOUT, 107, 107, -1, 0, 0, {ACT_FAILINIT} }, 180 {EV_TIMEOUT, 107, 107, -1, 0, 0, {ACT_FAILINIT} },
181 181
182 {RSP_ERROR, 103, 103, -1, 0, 0, {ACT_FAILINIT} }, 182 {RSP_ERROR, 103, 103, -1, 0, 0, {ACT_FAILINIT} },
183 {EV_TIMEOUT, 103, 103, -1, 0, 0, {ACT_FAILINIT} }, 183 {EV_TIMEOUT, 103, 103, -1, 0, 0, {ACT_FAILINIT} },
184 184
185 {RSP_STRING, 120, 120, -1, 121, -1, {ACT_SETVER} }, 185 {RSP_STRING, 120, 120, -1, 121, -1, {ACT_SETVER} },
186 186
187 {EV_TIMEOUT, 120, 121, -1, 0, 0, {ACT_FAILVER, 187 {EV_TIMEOUT, 120, 121, -1, 0, 0, {ACT_FAILVER,
188 ACT_INIT} }, 188 ACT_INIT} },
189 {RSP_ERROR, 120, 121, -1, 0, 0, {ACT_FAILVER, 189 {RSP_ERROR, 120, 121, -1, 0, 0, {ACT_FAILVER,
190 ACT_INIT} }, 190 ACT_INIT} },
191 {RSP_OK, 121, 121, -1, 0, 0, {ACT_GOTVER, 191 {RSP_OK, 121, 121, -1, 0, 0, {ACT_GOTVER,
192 ACT_INIT} }, 192 ACT_INIT} },
193 {RSP_NONE, 121, 121, -1, 120, 0, {ACT_GETSTRING} },
193 194
194/* leave dle mode */ 195/* leave dle mode */
195 {RSP_INIT, 0, 0, SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"}, 196 {RSP_INIT, 0, 0, SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"},
196 {RSP_OK, 201, 201, -1, 202, -1}, 197 {RSP_OK, 201, 201, -1, 202, -1},
197 {RSP_ZDLE, 202, 202, 0, 0, 0, {ACT_DLE0} }, 198 {RSP_ZDLE, 202, 202, 0, 0, 0, {ACT_DLE0} },
198 {RSP_NODEV, 200, 249, -1, 0, 0, {ACT_FAKEDLE0} }, 199 {RSP_NODEV, 200, 249, -1, 0, 0, {ACT_FAKEDLE0} },
199 {RSP_ERROR, 200, 249, -1, 0, 0, {ACT_FAILDLE0} }, 200 {RSP_ERROR, 200, 249, -1, 0, 0, {ACT_FAILDLE0} },
200 {EV_TIMEOUT, 200, 249, -1, 0, 0, {ACT_FAILDLE0} }, 201 {EV_TIMEOUT, 200, 249, -1, 0, 0, {ACT_FAILDLE0} },
201 202
202/* enter dle mode */ 203/* enter dle mode */
203 {RSP_INIT, 0, 0, SEQ_DLE1, 251, 5, {0}, "^SDLE=1\r"}, 204 {RSP_INIT, 0, 0, SEQ_DLE1, 251, 5, {0}, "^SDLE=1\r"},
204 {RSP_OK, 251, 251, -1, 252, -1}, 205 {RSP_OK, 251, 251, -1, 252, -1},
205 {RSP_ZDLE, 252, 252, 1, 0, 0, {ACT_DLE1} }, 206 {RSP_ZDLE, 252, 252, 1, 0, 0, {ACT_DLE1} },
206 {RSP_ERROR, 250, 299, -1, 0, 0, {ACT_FAILDLE1} }, 207 {RSP_ERROR, 250, 299, -1, 0, 0, {ACT_FAILDLE1} },
207 {EV_TIMEOUT, 250, 299, -1, 0, 0, {ACT_FAILDLE1} }, 208 {EV_TIMEOUT, 250, 299, -1, 0, 0, {ACT_FAILDLE1} },
208 209
209/* incoming call */ 210/* incoming call */
210 {RSP_RING, -1, -1, -1, -1, -1, {ACT_RING} }, 211 {RSP_RING, -1, -1, -1, -1, -1, {ACT_RING} },
211 212
212/* get cid */ 213/* get cid */
213 {RSP_INIT, 0, 0, SEQ_CID, 301, 5, {0}, "^SGCI?\r"}, 214 {RSP_INIT, 0, 0, SEQ_CID, 301, 5, {0}, "^SGCI?\r"},
214 {RSP_OK, 301, 301, -1, 302, -1}, 215 {RSP_OK, 301, 301, -1, 302, -1},
215 {RSP_ZGCI, 302, 302, -1, 0, 0, {ACT_CID} }, 216 {RSP_ZGCI, 302, 302, -1, 0, 0, {ACT_CID} },
216 {RSP_ERROR, 301, 349, -1, 0, 0, {ACT_FAILCID} }, 217 {RSP_ERROR, 301, 349, -1, 0, 0, {ACT_FAILCID} },
217 {EV_TIMEOUT, 301, 349, -1, 0, 0, {ACT_FAILCID} }, 218 {EV_TIMEOUT, 301, 349, -1, 0, 0, {ACT_FAILCID} },
218 219
219/* enter cid mode */ 220/* enter cid mode */
220 {RSP_INIT, 0, 0, SEQ_CIDMODE, 150, 5, {0}, "^SGCI=1\r"}, 221 {RSP_INIT, 0, 0, SEQ_CIDMODE, 150, 5, {0}, "^SGCI=1\r"},
221 {RSP_OK, 150, 150, -1, 0, 0, {ACT_CMODESET} }, 222 {RSP_OK, 150, 150, -1, 0, 0, {ACT_CMODESET} },
222 {RSP_ERROR, 150, 150, -1, 0, 0, {ACT_FAILCMODE} }, 223 {RSP_ERROR, 150, 150, -1, 0, 0, {ACT_FAILCMODE} },
223 {EV_TIMEOUT, 150, 150, -1, 0, 0, {ACT_FAILCMODE} }, 224 {EV_TIMEOUT, 150, 150, -1, 0, 0, {ACT_FAILCMODE} },
224 225
225/* leave cid mode */ 226/* leave cid mode */
226 {RSP_INIT, 0, 0, SEQ_UMMODE, 160, 5, {0}, "Z\r"}, 227 {RSP_INIT, 0, 0, SEQ_UMMODE, 160, 5, {0}, "Z\r"},
227 {RSP_OK, 160, 160, -1, 0, 0, {ACT_UMODESET} }, 228 {RSP_OK, 160, 160, -1, 0, 0, {ACT_UMODESET} },
228 {RSP_ERROR, 160, 160, -1, 0, 0, {ACT_FAILUMODE} }, 229 {RSP_ERROR, 160, 160, -1, 0, 0, {ACT_FAILUMODE} },
229 {EV_TIMEOUT, 160, 160, -1, 0, 0, {ACT_FAILUMODE} }, 230 {EV_TIMEOUT, 160, 160, -1, 0, 0, {ACT_FAILUMODE} },
230 231
231/* abort getting cid */ 232/* abort getting cid */
232 {RSP_INIT, 0, 0, SEQ_NOCID, 0, 0, {ACT_ABORTCID} }, 233 {RSP_INIT, 0, 0, SEQ_NOCID, 0, 0, {ACT_ABORTCID} },
233 234
234/* reset */ 235/* reset */
235 {RSP_INIT, 0, 0, SEQ_SHUTDOWN, 504, 5, {0}, "Z\r"}, 236 {RSP_INIT, 0, 0, SEQ_SHUTDOWN, 504, 5, {0}, "Z\r"},
236 {RSP_OK, 504, 504, -1, 0, 0, {ACT_SDOWN} }, 237 {RSP_OK, 504, 504, -1, 0, 0, {ACT_SDOWN} },
237 {RSP_ERROR, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} }, 238 {RSP_ERROR, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} },
238 {EV_TIMEOUT, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} }, 239 {EV_TIMEOUT, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} },
239 {RSP_NODEV, 501, 599, -1, 0, 0, {ACT_FAKESDOWN} }, 240 {RSP_NODEV, 501, 599, -1, 0, 0, {ACT_FAKESDOWN} },
240 241
241 {EV_PROC_CIDMODE, -1, -1, -1, -1, -1, {ACT_PROC_CIDMODE} }, 242 {EV_PROC_CIDMODE, -1, -1, -1, -1, -1, {ACT_PROC_CIDMODE} },
242 {EV_IF_LOCK, -1, -1, -1, -1, -1, {ACT_IF_LOCK} }, 243 {EV_IF_LOCK, -1, -1, -1, -1, -1, {ACT_IF_LOCK} },
243 {EV_IF_VER, -1, -1, -1, -1, -1, {ACT_IF_VER} }, 244 {EV_IF_VER, -1, -1, -1, -1, -1, {ACT_IF_VER} },
244 {EV_START, -1, -1, -1, -1, -1, {ACT_START} }, 245 {EV_START, -1, -1, -1, -1, -1, {ACT_START} },
245 {EV_STOP, -1, -1, -1, -1, -1, {ACT_STOP} }, 246 {EV_STOP, -1, -1, -1, -1, -1, {ACT_STOP} },
246 {EV_SHUTDOWN, -1, -1, -1, -1, -1, {ACT_SHUTDOWN} }, 247 {EV_SHUTDOWN, -1, -1, -1, -1, -1, {ACT_SHUTDOWN} },
247 248
248/* misc. */ 249/* misc. */
249 {RSP_ERROR, -1, -1, -1, -1, -1, {ACT_ERROR} }, 250 {RSP_ERROR, -1, -1, -1, -1, -1, {ACT_ERROR} },
250 {RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} }, 251 {RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} },
251 {RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} }, 252 {RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} },
252 {RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} }, 253 {RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} },
253 {RSP_LAST} 254 {RSP_LAST}
254}; 255};
255 256
@@ -261,90 +262,90 @@ struct reply_t gigaset_tab_cid[] =
261 * action, command */ 262 * action, command */
262 263
263/* dial */ 264/* dial */
264 {EV_DIAL, -1, -1, -1, -1, -1, {ACT_DIAL} }, 265 {EV_DIAL, -1, -1, -1, -1, -1, {ACT_DIAL} },
265 {RSP_INIT, 0, 0, SEQ_DIAL, 601, 5, {ACT_CMD + AT_BC} }, 266 {RSP_INIT, 0, 0, SEQ_DIAL, 601, 5, {ACT_CMD + AT_BC} },
266 {RSP_OK, 601, 601, -1, 603, 5, {ACT_CMD + AT_PROTO} }, 267 {RSP_OK, 601, 601, -1, 603, 5, {ACT_CMD + AT_PROTO} },
267 {RSP_OK, 603, 603, -1, 604, 5, {ACT_CMD + AT_TYPE} }, 268 {RSP_OK, 603, 603, -1, 604, 5, {ACT_CMD + AT_TYPE} },
268 {RSP_OK, 604, 604, -1, 605, 5, {ACT_CMD + AT_MSN} }, 269 {RSP_OK, 604, 604, -1, 605, 5, {ACT_CMD + AT_MSN} },
269 {RSP_NULL, 605, 605, -1, 606, 5, {ACT_CMD + AT_CLIP} }, 270 {RSP_NULL, 605, 605, -1, 606, 5, {ACT_CMD + AT_CLIP} },
270 {RSP_OK, 605, 605, -1, 606, 5, {ACT_CMD + AT_CLIP} }, 271 {RSP_OK, 605, 605, -1, 606, 5, {ACT_CMD + AT_CLIP} },
271 {RSP_NULL, 606, 606, -1, 607, 5, {ACT_CMD + AT_ISO} }, 272 {RSP_NULL, 606, 606, -1, 607, 5, {ACT_CMD + AT_ISO} },
272 {RSP_OK, 606, 606, -1, 607, 5, {ACT_CMD + AT_ISO} }, 273 {RSP_OK, 606, 606, -1, 607, 5, {ACT_CMD + AT_ISO} },
273 {RSP_OK, 607, 607, -1, 608, 5, {0}, "+VLS=17\r"}, 274 {RSP_OK, 607, 607, -1, 608, 5, {0}, "+VLS=17\r"},
274 {RSP_OK, 608, 608, -1, 609, -1}, 275 {RSP_OK, 608, 608, -1, 609, -1},
275 {RSP_ZSAU, 609, 609, ZSAU_PROCEEDING, 610, 5, {ACT_CMD + AT_DIAL} }, 276 {RSP_ZSAU, 609, 609, ZSAU_PROCEEDING, 610, 5, {ACT_CMD + AT_DIAL} },
276 {RSP_OK, 610, 610, -1, 650, 0, {ACT_DIALING} }, 277 {RSP_OK, 610, 610, -1, 650, 0, {ACT_DIALING} },
277 278
278 {RSP_ERROR, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} }, 279 {RSP_ERROR, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} },
279 {EV_TIMEOUT, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} }, 280 {EV_TIMEOUT, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} },
280 281
281/* optional dialing responses */ 282/* optional dialing responses */
282 {EV_BC_OPEN, 650, 650, -1, 651, -1}, 283 {EV_BC_OPEN, 650, 650, -1, 651, -1},
283 {RSP_ZVLS, 609, 651, 17, -1, -1, {ACT_DEBUG} }, 284 {RSP_ZVLS, 609, 651, 17, -1, -1, {ACT_DEBUG} },
284 {RSP_ZCTP, 610, 651, -1, -1, -1, {ACT_DEBUG} }, 285 {RSP_ZCTP, 610, 651, -1, -1, -1, {ACT_DEBUG} },
285 {RSP_ZCPN, 610, 651, -1, -1, -1, {ACT_DEBUG} }, 286 {RSP_ZCPN, 610, 651, -1, -1, -1, {ACT_DEBUG} },
286 {RSP_ZSAU, 650, 651, ZSAU_CALL_DELIVERED, -1, -1, {ACT_DEBUG} }, 287 {RSP_ZSAU, 650, 651, ZSAU_CALL_DELIVERED, -1, -1, {ACT_DEBUG} },
287 288
288/* connect */ 289/* connect */
289 {RSP_ZSAU, 650, 650, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} }, 290 {RSP_ZSAU, 650, 650, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} },
290 {RSP_ZSAU, 651, 651, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT, 291 {RSP_ZSAU, 651, 651, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT,
291 ACT_NOTIFY_BC_UP} }, 292 ACT_NOTIFY_BC_UP} },
292 {RSP_ZSAU, 750, 750, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} }, 293 {RSP_ZSAU, 750, 750, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} },
293 {RSP_ZSAU, 751, 751, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT, 294 {RSP_ZSAU, 751, 751, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT,
294 ACT_NOTIFY_BC_UP} }, 295 ACT_NOTIFY_BC_UP} },
295 {EV_BC_OPEN, 800, 800, -1, 800, -1, {ACT_NOTIFY_BC_UP} }, 296 {EV_BC_OPEN, 800, 800, -1, 800, -1, {ACT_NOTIFY_BC_UP} },
296 297
297/* remote hangup */ 298/* remote hangup */
298 {RSP_ZSAU, 650, 651, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT} }, 299 {RSP_ZSAU, 650, 651, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT} },
299 {RSP_ZSAU, 750, 751, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} }, 300 {RSP_ZSAU, 750, 751, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} },
300 {RSP_ZSAU, 800, 800, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} }, 301 {RSP_ZSAU, 800, 800, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} },
301 302
302/* hangup */ 303/* hangup */
303 {EV_HUP, -1, -1, -1, -1, -1, {ACT_HUP} }, 304 {EV_HUP, -1, -1, -1, -1, -1, {ACT_HUP} },
304 {RSP_INIT, -1, -1, SEQ_HUP, 401, 5, {0}, "+VLS=0\r"}, 305 {RSP_INIT, -1, -1, SEQ_HUP, 401, 5, {0}, "+VLS=0\r"},
305 {RSP_OK, 401, 401, -1, 402, 5}, 306 {RSP_OK, 401, 401, -1, 402, 5},
306 {RSP_ZVLS, 402, 402, 0, 403, 5}, 307 {RSP_ZVLS, 402, 402, 0, 403, 5},
307 {RSP_ZSAU, 403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} }, 308 {RSP_ZSAU, 403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} },
308 {RSP_ZSAU, 403, 403, ZSAU_NULL, 0, 0, {ACT_DISCONNECT} }, 309 {RSP_ZSAU, 403, 403, ZSAU_NULL, 0, 0, {ACT_DISCONNECT} },
309 {RSP_NODEV, 401, 403, -1, 0, 0, {ACT_FAKEHUP} }, 310 {RSP_NODEV, 401, 403, -1, 0, 0, {ACT_FAKEHUP} },
310 {RSP_ERROR, 401, 401, -1, 0, 0, {ACT_ABORTHUP} }, 311 {RSP_ERROR, 401, 401, -1, 0, 0, {ACT_ABORTHUP} },
311 {EV_TIMEOUT, 401, 403, -1, 0, 0, {ACT_ABORTHUP} }, 312 {EV_TIMEOUT, 401, 403, -1, 0, 0, {ACT_ABORTHUP} },
312 313
313 {EV_BC_CLOSED, 0, 0, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} }, 314 {EV_BC_CLOSED, 0, 0, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} },
314 315
315/* ring */ 316/* ring */
316 {RSP_ZBC, 700, 700, -1, -1, -1, {0} }, 317 {RSP_ZBC, 700, 700, -1, -1, -1, {0} },
317 {RSP_ZHLC, 700, 700, -1, -1, -1, {0} }, 318 {RSP_ZHLC, 700, 700, -1, -1, -1, {0} },
318 {RSP_NMBR, 700, 700, -1, -1, -1, {0} }, 319 {RSP_NMBR, 700, 700, -1, -1, -1, {0} },
319 {RSP_ZCPN, 700, 700, -1, -1, -1, {0} }, 320 {RSP_ZCPN, 700, 700, -1, -1, -1, {0} },
320 {RSP_ZCTP, 700, 700, -1, -1, -1, {0} }, 321 {RSP_ZCTP, 700, 700, -1, -1, -1, {0} },
321 {EV_TIMEOUT, 700, 700, -1, 720, 720, {ACT_ICALL} }, 322 {EV_TIMEOUT, 700, 700, -1, 720, 720, {ACT_ICALL} },
322 {EV_BC_CLOSED, 720, 720, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} }, 323 {EV_BC_CLOSED, 720, 720, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} },
323 324
324/*accept icall*/ 325/*accept icall*/
325 {EV_ACCEPT, -1, -1, -1, -1, -1, {ACT_ACCEPT} }, 326 {EV_ACCEPT, -1, -1, -1, -1, -1, {ACT_ACCEPT} },
326 {RSP_INIT, 720, 720, SEQ_ACCEPT, 721, 5, {ACT_CMD + AT_PROTO} }, 327 {RSP_INIT, 720, 720, SEQ_ACCEPT, 721, 5, {ACT_CMD + AT_PROTO} },
327 {RSP_OK, 721, 721, -1, 722, 5, {ACT_CMD + AT_ISO} }, 328 {RSP_OK, 721, 721, -1, 722, 5, {ACT_CMD + AT_ISO} },
328 {RSP_OK, 722, 722, -1, 723, 5, {0}, "+VLS=17\r"}, 329 {RSP_OK, 722, 722, -1, 723, 5, {0}, "+VLS=17\r"},
329 {RSP_OK, 723, 723, -1, 724, 5, {0} }, 330 {RSP_OK, 723, 723, -1, 724, 5, {0} },
330 {RSP_ZVLS, 724, 724, 17, 750, 50, {ACT_ACCEPTED} }, 331 {RSP_ZVLS, 724, 724, 17, 750, 50, {ACT_ACCEPTED} },
331 {RSP_ERROR, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} }, 332 {RSP_ERROR, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} },
332 {EV_TIMEOUT, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} }, 333 {EV_TIMEOUT, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} },
333 {RSP_ZSAU, 700, 729, ZSAU_NULL, 0, 0, {ACT_ABORTACCEPT} }, 334 {RSP_ZSAU, 700, 729, ZSAU_NULL, 0, 0, {ACT_ABORTACCEPT} },
334 {RSP_ZSAU, 700, 729, ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT} }, 335 {RSP_ZSAU, 700, 729, ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT} },
335 {RSP_ZSAU, 700, 729, ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT} }, 336 {RSP_ZSAU, 700, 729, ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT} },
336 337
337 {EV_BC_OPEN, 750, 750, -1, 751, -1}, 338 {EV_BC_OPEN, 750, 750, -1, 751, -1},
338 {EV_TIMEOUT, 750, 751, -1, 0, 0, {ACT_CONNTIMEOUT} }, 339 {EV_TIMEOUT, 750, 751, -1, 0, 0, {ACT_CONNTIMEOUT} },
339 340
340/* B channel closed (general case) */ 341/* B channel closed (general case) */
341 {EV_BC_CLOSED, -1, -1, -1, -1, -1, {ACT_NOTIFY_BC_DOWN} }, 342 {EV_BC_CLOSED, -1, -1, -1, -1, -1, {ACT_NOTIFY_BC_DOWN} },
342 343
343/* misc. */ 344/* misc. */
344 {RSP_ZCON, -1, -1, -1, -1, -1, {ACT_DEBUG} }, 345 {RSP_ZCON, -1, -1, -1, -1, -1, {ACT_DEBUG} },
345 {RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} }, 346 {RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} },
346 {RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} }, 347 {RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} },
347 {RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} }, 348 {RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} },
348 {RSP_LAST} 349 {RSP_LAST}
349}; 350};
350 351
@@ -648,16 +649,16 @@ static void disconnect(struct at_state_t **at_state_p)
648static inline struct at_state_t *get_free_channel(struct cardstate *cs, 649static inline struct at_state_t *get_free_channel(struct cardstate *cs,
649 int cid) 650 int cid)
650/* cids: >0: siemens-cid 651/* cids: >0: siemens-cid
651 0: without cid 652 * 0: without cid
652 -1: no cid assigned yet 653 * -1: no cid assigned yet
653*/ 654 */
654{ 655{
655 unsigned long flags; 656 unsigned long flags;
656 int i; 657 int i;
657 struct at_state_t *ret; 658 struct at_state_t *ret;
658 659
659 for (i = 0; i < cs->channels; ++i) 660 for (i = 0; i < cs->channels; ++i)
660 if (gigaset_get_channel(cs->bcs + i)) { 661 if (gigaset_get_channel(cs->bcs + i) >= 0) {
661 ret = &cs->bcs[i].at_state; 662 ret = &cs->bcs[i].at_state;
662 ret->cid = cid; 663 ret->cid = cid;
663 return ret; 664 return ret;
@@ -922,18 +923,18 @@ static void do_stop(struct cardstate *cs)
922 * channel >= 0: getting cid for the channel failed 923 * channel >= 0: getting cid for the channel failed
923 * channel < 0: entering cid mode failed 924 * channel < 0: entering cid mode failed
924 * 925 *
925 * returns 0 on failure 926 * returns 0 on success, <0 on failure
926 */ 927 */
927static int reinit_and_retry(struct cardstate *cs, int channel) 928static int reinit_and_retry(struct cardstate *cs, int channel)
928{ 929{
929 int i; 930 int i;
930 931
931 if (--cs->retry_count <= 0) 932 if (--cs->retry_count <= 0)
932 return 0; 933 return -EFAULT;
933 934
934 for (i = 0; i < cs->channels; ++i) 935 for (i = 0; i < cs->channels; ++i)
935 if (cs->bcs[i].at_state.cid > 0) 936 if (cs->bcs[i].at_state.cid > 0)
936 return 0; 937 return -EBUSY;
937 938
938 if (channel < 0) 939 if (channel < 0)
939 dev_warn(cs->dev, 940 dev_warn(cs->dev,
@@ -944,7 +945,7 @@ static int reinit_and_retry(struct cardstate *cs, int channel)
944 cs->bcs[channel].at_state.pending_commands |= PC_CID; 945 cs->bcs[channel].at_state.pending_commands |= PC_CID;
945 } 946 }
946 schedule_init(cs, MS_INIT); 947 schedule_init(cs, MS_INIT);
947 return 1; 948 return 0;
948} 949}
949 950
950static int at_state_invalid(struct cardstate *cs, 951static int at_state_invalid(struct cardstate *cs,
@@ -1015,7 +1016,7 @@ static int do_lock(struct cardstate *cs)
1015 if (cs->bcs[i].at_state.pending_commands) 1016 if (cs->bcs[i].at_state.pending_commands)
1016 return -EBUSY; 1017 return -EBUSY;
1017 1018
1018 if (!gigaset_get_channels(cs)) 1019 if (gigaset_get_channels(cs) < 0)
1019 return -EBUSY; 1020 return -EBUSY;
1020 1021
1021 break; 1022 break;
@@ -1124,7 +1125,7 @@ static void do_action(int action, struct cardstate *cs,
1124 init_failed(cs, M_UNKNOWN); 1125 init_failed(cs, M_UNKNOWN);
1125 break; 1126 break;
1126 } 1127 }
1127 if (!reinit_and_retry(cs, -1)) 1128 if (reinit_and_retry(cs, -1) < 0)
1128 schedule_init(cs, MS_RECOVER); 1129 schedule_init(cs, MS_RECOVER);
1129 break; 1130 break;
1130 case ACT_FAILUMODE: 1131 case ACT_FAILUMODE:
@@ -1267,7 +1268,7 @@ static void do_action(int action, struct cardstate *cs,
1267 case ACT_FAILCID: 1268 case ACT_FAILCID:
1268 cs->cur_at_seq = SEQ_NONE; 1269 cs->cur_at_seq = SEQ_NONE;
1269 channel = cs->curchannel; 1270 channel = cs->curchannel;
1270 if (!reinit_and_retry(cs, channel)) { 1271 if (reinit_and_retry(cs, channel) < 0) {
1271 dev_warn(cs->dev, 1272 dev_warn(cs->dev,
1272 "Could not get a call ID. Cannot dial.\n"); 1273 "Could not get a call ID. Cannot dial.\n");
1273 at_state2 = &cs->bcs[channel].at_state; 1274 at_state2 = &cs->bcs[channel].at_state;
@@ -1314,8 +1315,9 @@ static void do_action(int action, struct cardstate *cs,
1314 s = ev->ptr; 1315 s = ev->ptr;
1315 1316
1316 if (!strcmp(s, "OK")) { 1317 if (!strcmp(s, "OK")) {
1318 /* OK without version string: assume old response */
1317 *p_genresp = 1; 1319 *p_genresp = 1;
1318 *p_resp_code = RSP_ERROR; 1320 *p_resp_code = RSP_NONE;
1319 break; 1321 break;
1320 } 1322 }
1321 1323
@@ -1372,7 +1374,8 @@ static void do_action(int action, struct cardstate *cs,
1372 ev->parameter, at_state->ConState); 1374 ev->parameter, at_state->ConState);
1373 break; 1375 break;
1374 1376
1375 /* events from the LL */ 1377 /* events from the LL */
1378
1376 case ACT_DIAL: 1379 case ACT_DIAL:
1377 start_dial(at_state, ev->ptr, ev->parameter); 1380 start_dial(at_state, ev->ptr, ev->parameter);
1378 break; 1381 break;
@@ -1385,7 +1388,8 @@ static void do_action(int action, struct cardstate *cs,
1385 cs->commands_pending = 1; 1388 cs->commands_pending = 1;
1386 break; 1389 break;
1387 1390
1388 /* hotplug events */ 1391 /* hotplug events */
1392
1389 case ACT_STOP: 1393 case ACT_STOP:
1390 do_stop(cs); 1394 do_stop(cs);
1391 break; 1395 break;
@@ -1393,7 +1397,8 @@ static void do_action(int action, struct cardstate *cs,
1393 do_start(cs); 1397 do_start(cs);
1394 break; 1398 break;
1395 1399
1396 /* events from the interface */ 1400 /* events from the interface */
1401
1397 case ACT_IF_LOCK: 1402 case ACT_IF_LOCK:
1398 cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs); 1403 cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs);
1399 cs->waiting = 0; 1404 cs->waiting = 0;
@@ -1412,7 +1417,8 @@ static void do_action(int action, struct cardstate *cs,
1412 wake_up(&cs->waitqueue); 1417 wake_up(&cs->waitqueue);
1413 break; 1418 break;
1414 1419
1415 /* events from the proc file system */ 1420 /* events from the proc file system */
1421
1416 case ACT_PROC_CIDMODE: 1422 case ACT_PROC_CIDMODE:
1417 spin_lock_irqsave(&cs->lock, flags); 1423 spin_lock_irqsave(&cs->lock, flags);
1418 if (ev->parameter != cs->cidmode) { 1424 if (ev->parameter != cs->cidmode) {
@@ -1431,7 +1437,8 @@ static void do_action(int action, struct cardstate *cs,
1431 wake_up(&cs->waitqueue); 1437 wake_up(&cs->waitqueue);
1432 break; 1438 break;
1433 1439
1434 /* events from the hardware drivers */ 1440 /* events from the hardware drivers */
1441
1435 case ACT_NOTIFY_BC_DOWN: 1442 case ACT_NOTIFY_BC_DOWN:
1436 bchannel_down(bcs); 1443 bchannel_down(bcs);
1437 break; 1444 break;
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index 1dc25131e670..8e2fc8f31d16 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -163,8 +163,8 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
163#define BAS_LOWFRAME 5 /* " " with negative flow control */ 163#define BAS_LOWFRAME 5 /* " " with negative flow control */
164#define BAS_CORRFRAMES 4 /* flow control multiplicator */ 164#define BAS_CORRFRAMES 4 /* flow control multiplicator */
165 165
166#define BAS_INBUFSIZE (BAS_MAXFRAME * BAS_NUMFRAMES) 166#define BAS_INBUFSIZE (BAS_MAXFRAME * BAS_NUMFRAMES) /* size of isoc in buf
167/* size of isoc in buf per URB */ 167 * per URB */
168#define BAS_OUTBUFSIZE 4096 /* size of common isoc out buffer */ 168#define BAS_OUTBUFSIZE 4096 /* size of common isoc out buffer */
169#define BAS_OUTBUFPAD BAS_MAXFRAME /* size of pad area for isoc out buf */ 169#define BAS_OUTBUFPAD BAS_MAXFRAME /* size of pad area for isoc out buf */
170 170
@@ -471,18 +471,18 @@ struct cardstate {
471 for */ 471 for */
472 int commands_pending; /* flag(s) in xxx.commands_pending have 472 int commands_pending; /* flag(s) in xxx.commands_pending have
473 been set */ 473 been set */
474 struct tasklet_struct event_tasklet; 474 struct tasklet_struct
475 /* tasklet for serializing AT commands. 475 event_tasklet; /* tasklet for serializing AT commands.
476 * Scheduled 476 * Scheduled
477 * -> for modem reponses (and 477 * -> for modem reponses (and
478 * incoming data for M10x) 478 * incoming data for M10x)
479 * -> on timeout 479 * -> on timeout
480 * -> after setting bits in 480 * -> after setting bits in
481 * xxx.at_state.pending_command 481 * xxx.at_state.pending_command
482 * (e.g. command from LL) */ 482 * (e.g. command from LL) */
483 struct tasklet_struct write_tasklet; 483 struct tasklet_struct
484 /* tasklet for serial output 484 write_tasklet; /* tasklet for serial output
485 * (not used in base driver) */ 485 * (not used in base driver) */
486 486
487 /* event queue */ 487 /* event queue */
488 struct event_t events[MAX_EVENTS]; 488 struct event_t events[MAX_EVENTS];
@@ -583,7 +583,7 @@ struct gigaset_ops {
583 int (*initbcshw)(struct bc_state *bcs); 583 int (*initbcshw)(struct bc_state *bcs);
584 584
585 /* Called by gigaset_freecs() for freeing bcs->hw.xxx */ 585 /* Called by gigaset_freecs() for freeing bcs->hw.xxx */
586 int (*freebcshw)(struct bc_state *bcs); 586 void (*freebcshw)(struct bc_state *bcs);
587 587
588 /* Called by gigaset_bchannel_down() for resetting bcs->hw.xxx */ 588 /* Called by gigaset_bchannel_down() for resetting bcs->hw.xxx */
589 void (*reinitbcshw)(struct bc_state *bcs); 589 void (*reinitbcshw)(struct bc_state *bcs);
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index 0f13eb1de657..2d75329007f1 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -229,7 +229,7 @@ static int command_from_LL(isdn_ctrl *cntrl)
229 return -EINVAL; 229 return -EINVAL;
230 } 230 }
231 bcs = cs->bcs + ch; 231 bcs = cs->bcs + ch;
232 if (!gigaset_get_channel(bcs)) { 232 if (gigaset_get_channel(bcs) < 0) {
233 dev_err(cs->dev, "ISDN_CMD_DIAL: channel not free\n"); 233 dev_err(cs->dev, "ISDN_CMD_DIAL: channel not free\n");
234 return -EBUSY; 234 return -EBUSY;
235 } 235 }
@@ -618,7 +618,7 @@ void gigaset_isdn_stop(struct cardstate *cs)
618 * @cs: device descriptor structure. 618 * @cs: device descriptor structure.
619 * @isdnid: device name. 619 * @isdnid: device name.
620 * 620 *
621 * Return value: 1 for success, 0 for failure 621 * Return value: 0 on success, error code < 0 on failure
622 */ 622 */
623int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid) 623int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
624{ 624{
@@ -627,14 +627,14 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
627 iif = kmalloc(sizeof *iif, GFP_KERNEL); 627 iif = kmalloc(sizeof *iif, GFP_KERNEL);
628 if (!iif) { 628 if (!iif) {
629 pr_err("out of memory\n"); 629 pr_err("out of memory\n");
630 return 0; 630 return -ENOMEM;
631 } 631 }
632 632
633 if (snprintf(iif->id, sizeof iif->id, "%s_%u", isdnid, cs->minor_index) 633 if (snprintf(iif->id, sizeof iif->id, "%s_%u", isdnid, cs->minor_index)
634 >= sizeof iif->id) { 634 >= sizeof iif->id) {
635 pr_err("ID too long: %s\n", isdnid); 635 pr_err("ID too long: %s\n", isdnid);
636 kfree(iif); 636 kfree(iif);
637 return 0; 637 return -EINVAL;
638 } 638 }
639 639
640 iif->owner = THIS_MODULE; 640 iif->owner = THIS_MODULE;
@@ -656,13 +656,13 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
656 if (!register_isdn(iif)) { 656 if (!register_isdn(iif)) {
657 pr_err("register_isdn failed\n"); 657 pr_err("register_isdn failed\n");
658 kfree(iif); 658 kfree(iif);
659 return 0; 659 return -EINVAL;
660 } 660 }
661 661
662 cs->iif = iif; 662 cs->iif = iif;
663 cs->myid = iif->channels; /* Set my device id */ 663 cs->myid = iif->channels; /* Set my device id */
664 cs->hw_hdr_len = HW_HDR_LEN; 664 cs->hw_hdr_len = HW_HDR_LEN;
665 return 1; 665 return 0;
666} 666}
667 667
668/** 668/**
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
index a351c16705bd..bc29f1d52a2f 100644
--- a/drivers/isdn/gigaset/isocdata.c
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -56,7 +56,7 @@ static inline int isowbuf_freebytes(struct isowbuf_t *iwb)
56 56
57/* start writing 57/* start writing
58 * acquire the write semaphore 58 * acquire the write semaphore
59 * return true if acquired, false if busy 59 * return 0 if acquired, <0 if busy
60 */ 60 */
61static inline int isowbuf_startwrite(struct isowbuf_t *iwb) 61static inline int isowbuf_startwrite(struct isowbuf_t *iwb)
62{ 62{
@@ -64,12 +64,12 @@ static inline int isowbuf_startwrite(struct isowbuf_t *iwb)
64 atomic_inc(&iwb->writesem); 64 atomic_inc(&iwb->writesem);
65 gig_dbg(DEBUG_ISO, "%s: couldn't acquire iso write semaphore", 65 gig_dbg(DEBUG_ISO, "%s: couldn't acquire iso write semaphore",
66 __func__); 66 __func__);
67 return 0; 67 return -EBUSY;
68 } 68 }
69 gig_dbg(DEBUG_ISO, 69 gig_dbg(DEBUG_ISO,
70 "%s: acquired iso write semaphore, data[write]=%02x, nbits=%d", 70 "%s: acquired iso write semaphore, data[write]=%02x, nbits=%d",
71 __func__, iwb->data[iwb->write], iwb->wbits); 71 __func__, iwb->data[iwb->write], iwb->wbits);
72 return 1; 72 return 0;
73} 73}
74 74
75/* finish writing 75/* finish writing
@@ -158,7 +158,7 @@ int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size)
158 /* no wraparound in valid data */ 158 /* no wraparound in valid data */
159 if (limit >= write) { 159 if (limit >= write) {
160 /* append idle frame */ 160 /* append idle frame */
161 if (!isowbuf_startwrite(iwb)) 161 if (isowbuf_startwrite(iwb) < 0)
162 return -EBUSY; 162 return -EBUSY;
163 /* write position could have changed */ 163 /* write position could have changed */
164 write = iwb->write; 164 write = iwb->write;
@@ -403,7 +403,7 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb,
403 unsigned char c; 403 unsigned char c;
404 404
405 if (isowbuf_freebytes(iwb) < count + count / 5 + 6 || 405 if (isowbuf_freebytes(iwb) < count + count / 5 + 6 ||
406 !isowbuf_startwrite(iwb)) { 406 isowbuf_startwrite(iwb) < 0) {
407 gig_dbg(DEBUG_ISO, "%s: %d bytes free -> -EAGAIN", 407 gig_dbg(DEBUG_ISO, "%s: %d bytes free -> -EAGAIN",
408 __func__, isowbuf_freebytes(iwb)); 408 __func__, isowbuf_freebytes(iwb));
409 return -EAGAIN; 409 return -EAGAIN;
@@ -457,7 +457,7 @@ static inline int trans_buildframe(struct isowbuf_t *iwb,
457 return iwb->write; 457 return iwb->write;
458 458
459 if (isowbuf_freebytes(iwb) < count || 459 if (isowbuf_freebytes(iwb) < count ||
460 !isowbuf_startwrite(iwb)) { 460 isowbuf_startwrite(iwb) < 0) {
461 gig_dbg(DEBUG_ISO, "can't put %d bytes", count); 461 gig_dbg(DEBUG_ISO, "can't put %d bytes", count);
462 return -EAGAIN; 462 return -EAGAIN;
463 } 463 }
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 6f3fd4cf4378..8c91fd5eb6fd 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -340,17 +340,16 @@ static int gigaset_initbcshw(struct bc_state *bcs)
340{ 340{
341 /* unused */ 341 /* unused */
342 bcs->hw.ser = NULL; 342 bcs->hw.ser = NULL;
343 return 1; 343 return 0;
344} 344}
345 345
346/* 346/*
347 * Free B channel structure 347 * Free B channel structure
348 * Called by "gigaset_freebcs" in common.c 348 * Called by "gigaset_freebcs" in common.c
349 */ 349 */
350static int gigaset_freebcshw(struct bc_state *bcs) 350static void gigaset_freebcshw(struct bc_state *bcs)
351{ 351{
352 /* unused */ 352 /* unused */
353 return 1;
354} 353}
355 354
356/* 355/*
@@ -398,7 +397,7 @@ static int gigaset_initcshw(struct cardstate *cs)
398 scs = kzalloc(sizeof(struct ser_cardstate), GFP_KERNEL); 397 scs = kzalloc(sizeof(struct ser_cardstate), GFP_KERNEL);
399 if (!scs) { 398 if (!scs) {
400 pr_err("out of memory\n"); 399 pr_err("out of memory\n");
401 return 0; 400 return -ENOMEM;
402 } 401 }
403 cs->hw.ser = scs; 402 cs->hw.ser = scs;
404 403
@@ -410,13 +409,13 @@ static int gigaset_initcshw(struct cardstate *cs)
410 pr_err("error %d registering platform device\n", rc); 409 pr_err("error %d registering platform device\n", rc);
411 kfree(cs->hw.ser); 410 kfree(cs->hw.ser);
412 cs->hw.ser = NULL; 411 cs->hw.ser = NULL;
413 return 0; 412 return rc;
414 } 413 }
415 dev_set_drvdata(&cs->hw.ser->dev.dev, cs); 414 dev_set_drvdata(&cs->hw.ser->dev.dev, cs);
416 415
417 tasklet_init(&cs->write_tasklet, 416 tasklet_init(&cs->write_tasklet,
418 gigaset_modem_fill, (unsigned long) cs); 417 gigaset_modem_fill, (unsigned long) cs);
419 return 1; 418 return 0;
420} 419}
421 420
422/* 421/*
@@ -503,6 +502,7 @@ static int
503gigaset_tty_open(struct tty_struct *tty) 502gigaset_tty_open(struct tty_struct *tty)
504{ 503{
505 struct cardstate *cs; 504 struct cardstate *cs;
505 int rc;
506 506
507 gig_dbg(DEBUG_INIT, "Starting HLL for Gigaset M101"); 507 gig_dbg(DEBUG_INIT, "Starting HLL for Gigaset M101");
508 508
@@ -515,8 +515,10 @@ gigaset_tty_open(struct tty_struct *tty)
515 515
516 /* allocate memory for our device state and initialize it */ 516 /* allocate memory for our device state and initialize it */
517 cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME); 517 cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
518 if (!cs) 518 if (!cs) {
519 rc = -ENODEV;
519 goto error; 520 goto error;
521 }
520 522
521 cs->dev = &cs->hw.ser->dev.dev; 523 cs->dev = &cs->hw.ser->dev.dev;
522 cs->hw.ser->tty = tty; 524 cs->hw.ser->tty = tty;
@@ -530,7 +532,8 @@ gigaset_tty_open(struct tty_struct *tty)
530 */ 532 */
531 if (startmode == SM_LOCKED) 533 if (startmode == SM_LOCKED)
532 cs->mstate = MS_LOCKED; 534 cs->mstate = MS_LOCKED;
533 if (!gigaset_start(cs)) { 535 rc = gigaset_start(cs);
536 if (rc < 0) {
534 tasklet_kill(&cs->write_tasklet); 537 tasklet_kill(&cs->write_tasklet);
535 goto error; 538 goto error;
536 } 539 }
@@ -542,7 +545,7 @@ error:
542 gig_dbg(DEBUG_INIT, "Startup of HLL failed"); 545 gig_dbg(DEBUG_INIT, "Startup of HLL failed");
543 tty->disc_data = NULL; 546 tty->disc_data = NULL;
544 gigaset_freecs(cs); 547 gigaset_freecs(cs);
545 return -ENODEV; 548 return rc;
546} 549}
547 550
548/* 551/*
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 049da67f6392..bb12d8051732 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -549,10 +549,9 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
549 0, 0, &buf, 6, 2000); 549 0, 0, &buf, 6, 2000);
550} 550}
551 551
552static int gigaset_freebcshw(struct bc_state *bcs) 552static void gigaset_freebcshw(struct bc_state *bcs)
553{ 553{
554 /* unused */ 554 /* unused */
555 return 1;
556} 555}
557 556
558/* Initialize the b-channel structure */ 557/* Initialize the b-channel structure */
@@ -560,7 +559,7 @@ static int gigaset_initbcshw(struct bc_state *bcs)
560{ 559{
561 /* unused */ 560 /* unused */
562 bcs->hw.usb = NULL; 561 bcs->hw.usb = NULL;
563 return 1; 562 return 0;
564} 563}
565 564
566static void gigaset_reinitbcshw(struct bc_state *bcs) 565static void gigaset_reinitbcshw(struct bc_state *bcs)
@@ -582,7 +581,7 @@ static int gigaset_initcshw(struct cardstate *cs)
582 kmalloc(sizeof(struct usb_cardstate), GFP_KERNEL); 581 kmalloc(sizeof(struct usb_cardstate), GFP_KERNEL);
583 if (!ucs) { 582 if (!ucs) {
584 pr_err("out of memory\n"); 583 pr_err("out of memory\n");
585 return 0; 584 return -ENOMEM;
586 } 585 }
587 586
588 ucs->bchars[0] = 0; 587 ucs->bchars[0] = 0;
@@ -597,7 +596,7 @@ static int gigaset_initcshw(struct cardstate *cs)
597 tasklet_init(&cs->write_tasklet, 596 tasklet_init(&cs->write_tasklet,
598 gigaset_modem_fill, (unsigned long) cs); 597 gigaset_modem_fill, (unsigned long) cs);
599 598
600 return 1; 599 return 0;
601} 600}
602 601
603/* Send data from current skb to the device. */ 602/* Send data from current skb to the device. */
@@ -766,9 +765,9 @@ static int gigaset_probe(struct usb_interface *interface,
766 if (startmode == SM_LOCKED) 765 if (startmode == SM_LOCKED)
767 cs->mstate = MS_LOCKED; 766 cs->mstate = MS_LOCKED;
768 767
769 if (!gigaset_start(cs)) { 768 retval = gigaset_start(cs);
769 if (retval < 0) {
770 tasklet_kill(&cs->write_tasklet); 770 tasklet_kill(&cs->write_tasklet);
771 retval = -ENODEV;
772 goto error; 771 goto error;
773 } 772 }
774 return 0; 773 return 0;
@@ -898,8 +897,10 @@ static int __init usb_gigaset_init(void)
898 driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS, 897 driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
899 GIGASET_MODULENAME, GIGASET_DEVNAME, 898 GIGASET_MODULENAME, GIGASET_DEVNAME,
900 &ops, THIS_MODULE); 899 &ops, THIS_MODULE);
901 if (driver == NULL) 900 if (driver == NULL) {
901 result = -ENOMEM;
902 goto error; 902 goto error;
903 }
903 904
904 /* register this driver with the USB subsystem */ 905 /* register this driver with the USB subsystem */
905 result = usb_register(&gigaset_usb_driver); 906 result = usb_register(&gigaset_usb_driver);
@@ -915,7 +916,7 @@ error:
915 if (driver) 916 if (driver)
916 gigaset_freedriver(driver); 917 gigaset_freedriver(driver);
917 driver = NULL; 918 driver = NULL;
918 return -1; 919 return result;
919} 920}
920 921
921/* 922/*
diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
index a576f32e6635..7a0bdbdd87ea 100644
--- a/drivers/isdn/hardware/eicon/capifunc.c
+++ b/drivers/isdn/hardware/eicon/capifunc.c
@@ -1120,7 +1120,7 @@ int fax_head_line_time(char *buffer)
1120/* 1120/*
1121 * init (alloc) main structures 1121 * init (alloc) main structures
1122 */ 1122 */
1123static int DIVA_INIT_FUNCTION init_main_structs(void) 1123static int __init init_main_structs(void)
1124{ 1124{
1125 if (!(mapped_msg = (CAPI_MSG *) diva_os_malloc(0, MAX_MSG_SIZE))) { 1125 if (!(mapped_msg = (CAPI_MSG *) diva_os_malloc(0, MAX_MSG_SIZE))) {
1126 DBG_ERR(("init: failed alloc mapped_msg.")) 1126 DBG_ERR(("init: failed alloc mapped_msg."))
@@ -1181,7 +1181,7 @@ static void do_api_remove_start(void)
1181/* 1181/*
1182 * init 1182 * init
1183 */ 1183 */
1184int DIVA_INIT_FUNCTION init_capifunc(void) 1184int __init init_capifunc(void)
1185{ 1185{
1186 diva_os_initialize_spin_lock(&api_lock, "capifunc"); 1186 diva_os_initialize_spin_lock(&api_lock, "capifunc");
1187 memset(ControllerMap, 0, MAX_DESCRIPTORS + 1); 1187 memset(ControllerMap, 0, MAX_DESCRIPTORS + 1);
@@ -1209,7 +1209,7 @@ int DIVA_INIT_FUNCTION init_capifunc(void)
1209/* 1209/*
1210 * finit 1210 * finit
1211 */ 1211 */
1212void DIVA_EXIT_FUNCTION finit_capifunc(void) 1212void __exit finit_capifunc(void)
1213{ 1213{
1214 do_api_remove_start(); 1214 do_api_remove_start();
1215 divacapi_disconnect_didd(); 1215 divacapi_disconnect_didd();
diff --git a/drivers/isdn/hardware/eicon/capimain.c b/drivers/isdn/hardware/eicon/capimain.c
index eabe0fa1b627..997d46abf5b2 100644
--- a/drivers/isdn/hardware/eicon/capimain.c
+++ b/drivers/isdn/hardware/eicon/capimain.c
@@ -118,7 +118,7 @@ void diva_os_set_controller_struct(struct capi_ctr *ctrl)
118/* 118/*
119 * module init 119 * module init
120 */ 120 */
121static int DIVA_INIT_FUNCTION divacapi_init(void) 121static int __init divacapi_init(void)
122{ 122{
123 char tmprev[32]; 123 char tmprev[32];
124 int ret = 0; 124 int ret = 0;
@@ -144,7 +144,7 @@ static int DIVA_INIT_FUNCTION divacapi_init(void)
144/* 144/*
145 * module exit 145 * module exit
146 */ 146 */
147static void DIVA_EXIT_FUNCTION divacapi_exit(void) 147static void __exit divacapi_exit(void)
148{ 148{
149 finit_capifunc(); 149 finit_capifunc();
150 printk(KERN_INFO "%s: module unloaded.\n", DRIVERLNAME); 150 printk(KERN_INFO "%s: module unloaded.\n", DRIVERLNAME);
diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
index c4c8220c9d72..b0b23ed8b374 100644
--- a/drivers/isdn/hardware/eicon/diddfunc.c
+++ b/drivers/isdn/hardware/eicon/diddfunc.c
@@ -47,7 +47,7 @@ static void *didd_callback(void *context, DESCRIPTOR *adapter,
47/* 47/*
48 * connect to didd 48 * connect to didd
49 */ 49 */
50static int DIVA_INIT_FUNCTION connect_didd(void) 50static int __init connect_didd(void)
51{ 51{
52 int x = 0; 52 int x = 0;
53 int dadapter = 0; 53 int dadapter = 0;
@@ -79,7 +79,7 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
79/* 79/*
80 * disconnect from didd 80 * disconnect from didd
81 */ 81 */
82static void DIVA_EXIT_FUNCTION disconnect_didd(void) 82static void __exit disconnect_didd(void)
83{ 83{
84 IDI_SYNC_REQ req; 84 IDI_SYNC_REQ req;
85 85
@@ -92,7 +92,7 @@ static void DIVA_EXIT_FUNCTION disconnect_didd(void)
92/* 92/*
93 * init 93 * init
94 */ 94 */
95int DIVA_INIT_FUNCTION diddfunc_init(void) 95int __init diddfunc_init(void)
96{ 96{
97 diva_didd_load_time_init(); 97 diva_didd_load_time_init();
98 98
@@ -107,7 +107,7 @@ int DIVA_INIT_FUNCTION diddfunc_init(void)
107/* 107/*
108 * finit 108 * finit
109 */ 109 */
110void DIVA_EXIT_FUNCTION diddfunc_finit(void) 110void __exit diddfunc_finit(void)
111{ 111{
112 DbgDeregister(); 112 DbgDeregister();
113 disconnect_didd(); 113 disconnect_didd();
diff --git a/drivers/isdn/hardware/eicon/diva_didd.c b/drivers/isdn/hardware/eicon/diva_didd.c
index d1d3de03cced..fab6ccfb00d5 100644
--- a/drivers/isdn/hardware/eicon/diva_didd.c
+++ b/drivers/isdn/hardware/eicon/diva_didd.c
@@ -91,7 +91,7 @@ static const struct file_operations divadidd_proc_fops = {
91 .release = single_release, 91 .release = single_release,
92}; 92};
93 93
94static int DIVA_INIT_FUNCTION create_proc(void) 94static int __init create_proc(void)
95{ 95{
96 proc_net_eicon = proc_mkdir("eicon", init_net.proc_net); 96 proc_net_eicon = proc_mkdir("eicon", init_net.proc_net);
97 97
@@ -109,7 +109,7 @@ static void remove_proc(void)
109 remove_proc_entry("eicon", init_net.proc_net); 109 remove_proc_entry("eicon", init_net.proc_net);
110} 110}
111 111
112static int DIVA_INIT_FUNCTION divadidd_init(void) 112static int __init divadidd_init(void)
113{ 113{
114 char tmprev[32]; 114 char tmprev[32];
115 int ret = 0; 115 int ret = 0;
@@ -141,7 +141,7 @@ out:
141 return (ret); 141 return (ret);
142} 142}
143 143
144static void DIVA_EXIT_FUNCTION divadidd_exit(void) 144static void __exit divadidd_exit(void)
145{ 145{
146 diddfunc_finit(); 146 diddfunc_finit();
147 remove_proc(); 147 remove_proc();
diff --git a/drivers/isdn/hardware/eicon/divamnt.c b/drivers/isdn/hardware/eicon/divamnt.c
index ffa0c31be745..48db08d0bb3d 100644
--- a/drivers/isdn/hardware/eicon/divamnt.c
+++ b/drivers/isdn/hardware/eicon/divamnt.c
@@ -184,7 +184,7 @@ static void divas_maint_unregister_chrdev(void)
184 unregister_chrdev(major, DEVNAME); 184 unregister_chrdev(major, DEVNAME);
185} 185}
186 186
187static int DIVA_INIT_FUNCTION divas_maint_register_chrdev(void) 187static int __init divas_maint_register_chrdev(void)
188{ 188{
189 if ((major = register_chrdev(0, DEVNAME, &divas_maint_fops)) < 0) 189 if ((major = register_chrdev(0, DEVNAME, &divas_maint_fops)) < 0)
190 { 190 {
@@ -207,7 +207,7 @@ void diva_maint_wakeup_read(void)
207/* 207/*
208 * Driver Load 208 * Driver Load
209 */ 209 */
210static int DIVA_INIT_FUNCTION maint_init(void) 210static int __init maint_init(void)
211{ 211{
212 char tmprev[50]; 212 char tmprev[50];
213 int ret = 0; 213 int ret = 0;
@@ -245,7 +245,7 @@ out:
245/* 245/*
246** Driver Unload 246** Driver Unload
247*/ 247*/
248static void DIVA_EXIT_FUNCTION maint_exit(void) 248static void __exit maint_exit(void)
249{ 249{
250 divas_maint_unregister_chrdev(); 250 divas_maint_unregister_chrdev();
251 mntfunc_finit(); 251 mntfunc_finit();
diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
index 60aaf9580956..4be5f8814777 100644
--- a/drivers/isdn/hardware/eicon/divasfunc.c
+++ b/drivers/isdn/hardware/eicon/divasfunc.c
@@ -153,7 +153,7 @@ static void *didd_callback(void *context, DESCRIPTOR *adapter,
153/* 153/*
154 * connect to didd 154 * connect to didd
155 */ 155 */
156static int DIVA_INIT_FUNCTION connect_didd(void) 156static int __init connect_didd(void)
157{ 157{
158 int x = 0; 158 int x = 0;
159 int dadapter = 0; 159 int dadapter = 0;
@@ -209,7 +209,7 @@ static void disconnect_didd(void)
209/* 209/*
210 * init 210 * init
211 */ 211 */
212int DIVA_INIT_FUNCTION divasfunc_init(int dbgmask) 212int __init divasfunc_init(int dbgmask)
213{ 213{
214 char *version; 214 char *version;
215 215
diff --git a/drivers/isdn/hardware/eicon/divasi.c b/drivers/isdn/hardware/eicon/divasi.c
index a5c8f90b3b37..4103a8c178d7 100644
--- a/drivers/isdn/hardware/eicon/divasi.c
+++ b/drivers/isdn/hardware/eicon/divasi.c
@@ -114,7 +114,7 @@ static const struct file_operations um_idi_proc_fops = {
114 .release = single_release, 114 .release = single_release,
115}; 115};
116 116
117static int DIVA_INIT_FUNCTION create_um_idi_proc(void) 117static int __init create_um_idi_proc(void)
118{ 118{
119 um_idi_proc_entry = proc_create(DRIVERLNAME, S_IRUGO, proc_net_eicon, 119 um_idi_proc_entry = proc_create(DRIVERLNAME, S_IRUGO, proc_net_eicon,
120 &um_idi_proc_fops); 120 &um_idi_proc_fops);
@@ -146,7 +146,7 @@ static void divas_idi_unregister_chrdev(void)
146 unregister_chrdev(major, DEVNAME); 146 unregister_chrdev(major, DEVNAME);
147} 147}
148 148
149static int DIVA_INIT_FUNCTION divas_idi_register_chrdev(void) 149static int __init divas_idi_register_chrdev(void)
150{ 150{
151 if ((major = register_chrdev(0, DEVNAME, &divas_idi_fops)) < 0) 151 if ((major = register_chrdev(0, DEVNAME, &divas_idi_fops)) < 0)
152 { 152 {
@@ -161,7 +161,7 @@ static int DIVA_INIT_FUNCTION divas_idi_register_chrdev(void)
161/* 161/*
162** Driver Load 162** Driver Load
163*/ 163*/
164static int DIVA_INIT_FUNCTION divasi_init(void) 164static int __init divasi_init(void)
165{ 165{
166 char tmprev[50]; 166 char tmprev[50];
167 int ret = 0; 167 int ret = 0;
@@ -202,7 +202,7 @@ out:
202/* 202/*
203** Driver Unload 203** Driver Unload
204*/ 204*/
205static void DIVA_EXIT_FUNCTION divasi_exit(void) 205static void __exit divasi_exit(void)
206{ 206{
207 idifunc_finit(); 207 idifunc_finit();
208 remove_um_idi_proc(); 208 remove_um_idi_proc();
diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
index 7eaab06276f9..ca6d276bb256 100644
--- a/drivers/isdn/hardware/eicon/divasmain.c
+++ b/drivers/isdn/hardware/eicon/divasmain.c
@@ -673,7 +673,7 @@ static void divas_unregister_chrdev(void)
673 unregister_chrdev(major, DEVNAME); 673 unregister_chrdev(major, DEVNAME);
674} 674}
675 675
676static int DIVA_INIT_FUNCTION divas_register_chrdev(void) 676static int __init divas_register_chrdev(void)
677{ 677{
678 if ((major = register_chrdev(0, DEVNAME, &divas_fops)) < 0) 678 if ((major = register_chrdev(0, DEVNAME, &divas_fops)) < 0)
679 { 679 {
@@ -767,7 +767,7 @@ static void __devexit divas_remove_one(struct pci_dev *pdev)
767/* -------------------------------------------------------------------------- 767/* --------------------------------------------------------------------------
768 Driver Load / Startup 768 Driver Load / Startup
769 -------------------------------------------------------------------------- */ 769 -------------------------------------------------------------------------- */
770static int DIVA_INIT_FUNCTION divas_init(void) 770static int __init divas_init(void)
771{ 771{
772 char tmprev[50]; 772 char tmprev[50];
773 int ret = 0; 773 int ret = 0;
@@ -831,7 +831,7 @@ out:
831/* -------------------------------------------------------------------------- 831/* --------------------------------------------------------------------------
832 Driver Unload 832 Driver Unload
833 -------------------------------------------------------------------------- */ 833 -------------------------------------------------------------------------- */
834static void DIVA_EXIT_FUNCTION divas_exit(void) 834static void __exit divas_exit(void)
835{ 835{
836 pci_unregister_driver(&diva_pci_driver); 836 pci_unregister_driver(&diva_pci_driver);
837 remove_divas_proc(); 837 remove_divas_proc();
diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
index d153e3cdecf7..fef6586fe5ac 100644
--- a/drivers/isdn/hardware/eicon/idifunc.c
+++ b/drivers/isdn/hardware/eicon/idifunc.c
@@ -133,7 +133,7 @@ static void um_remove_card(DESCRIPTOR *d)
133/* 133/*
134 * remove all adapter 134 * remove all adapter
135 */ 135 */
136static void DIVA_EXIT_FUNCTION remove_all_idi_proc(void) 136static void __exit remove_all_idi_proc(void)
137{ 137{
138 udiva_card *card; 138 udiva_card *card;
139 diva_os_spin_lock_magic_t old_irql; 139 diva_os_spin_lock_magic_t old_irql;
@@ -181,7 +181,7 @@ static void *didd_callback(void *context, DESCRIPTOR *adapter,
181/* 181/*
182 * connect DIDD 182 * connect DIDD
183 */ 183 */
184static int DIVA_INIT_FUNCTION connect_didd(void) 184static int __init connect_didd(void)
185{ 185{
186 int x = 0; 186 int x = 0;
187 int dadapter = 0; 187 int dadapter = 0;
@@ -225,7 +225,7 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
225/* 225/*
226 * Disconnect from DIDD 226 * Disconnect from DIDD
227 */ 227 */
228static void DIVA_EXIT_FUNCTION disconnect_didd(void) 228static void __exit disconnect_didd(void)
229{ 229{
230 IDI_SYNC_REQ req; 230 IDI_SYNC_REQ req;
231 231
@@ -240,7 +240,7 @@ static void DIVA_EXIT_FUNCTION disconnect_didd(void)
240/* 240/*
241 * init 241 * init
242 */ 242 */
243int DIVA_INIT_FUNCTION idifunc_init(void) 243int __init idifunc_init(void)
244{ 244{
245 diva_os_initialize_spin_lock(&ll_lock, "idifunc"); 245 diva_os_initialize_spin_lock(&ll_lock, "idifunc");
246 246
@@ -260,7 +260,7 @@ int DIVA_INIT_FUNCTION idifunc_init(void)
260/* 260/*
261 * finit 261 * finit
262 */ 262 */
263void DIVA_EXIT_FUNCTION idifunc_finit(void) 263void __exit idifunc_finit(void)
264{ 264{
265 diva_user_mode_idi_finit(); 265 diva_user_mode_idi_finit();
266 disconnect_didd(); 266 disconnect_didd();
diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
index d6072607305c..1cd9affb6058 100644
--- a/drivers/isdn/hardware/eicon/mntfunc.c
+++ b/drivers/isdn/hardware/eicon/mntfunc.c
@@ -72,7 +72,7 @@ static void *didd_callback(void *context, DESCRIPTOR *adapter,
72/* 72/*
73 * connect to didd 73 * connect to didd
74 */ 74 */
75static int DIVA_INIT_FUNCTION connect_didd(void) 75static int __init connect_didd(void)
76{ 76{
77 int x = 0; 77 int x = 0;
78 int dadapter = 0; 78 int dadapter = 0;
@@ -114,7 +114,7 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
114/* 114/*
115 * disconnect from didd 115 * disconnect from didd
116 */ 116 */
117static void DIVA_EXIT_FUNCTION disconnect_didd(void) 117static void __exit disconnect_didd(void)
118{ 118{
119 IDI_SYNC_REQ req; 119 IDI_SYNC_REQ req;
120 120
@@ -300,7 +300,7 @@ int maint_read_write(void __user *buf, int count)
300/* 300/*
301 * init 301 * init
302 */ 302 */
303int DIVA_INIT_FUNCTION mntfunc_init(int *buffer_length, void **buffer, 303int __init mntfunc_init(int *buffer_length, void **buffer,
304 unsigned long diva_dbg_mem) 304 unsigned long diva_dbg_mem)
305{ 305{
306 if (*buffer_length < 64) { 306 if (*buffer_length < 64) {
@@ -348,7 +348,7 @@ int DIVA_INIT_FUNCTION mntfunc_init(int *buffer_length, void **buffer,
348/* 348/*
349 * exit 349 * exit
350 */ 350 */
351void DIVA_EXIT_FUNCTION mntfunc_finit(void) 351void __exit mntfunc_finit(void)
352{ 352{
353 void *buffer; 353 void *buffer;
354 int i = 100; 354 int i = 100;
diff --git a/drivers/isdn/hardware/eicon/platform.h b/drivers/isdn/hardware/eicon/platform.h
index 7331c3b14a5f..b2edb7590dda 100644
--- a/drivers/isdn/hardware/eicon/platform.h
+++ b/drivers/isdn/hardware/eicon/platform.h
@@ -38,9 +38,6 @@
38#define DIVA_NO_DEBUGLIB 38#define DIVA_NO_DEBUGLIB
39#endif 39#endif
40 40
41#define DIVA_INIT_FUNCTION __init
42#define DIVA_EXIT_FUNCTION __exit
43
44#define DIVA_USER_MODE_CARD_CONFIG 1 41#define DIVA_USER_MODE_CARD_CONFIG 1
45#define USE_EXTENDED_DEBUGS 1 42#define USE_EXTENDED_DEBUGS 1
46 43
diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c
index c0b8c960ee3f..6bf2c58795a3 100644
--- a/drivers/isdn/hardware/mISDN/avmfritz.c
+++ b/drivers/isdn/hardware/mISDN/avmfritz.c
@@ -868,7 +868,7 @@ channel_ctrl(struct fritzcard *fc, struct mISDN_ctrl_req *cq)
868 868
869 switch (cq->op) { 869 switch (cq->op) {
870 case MISDN_CTRL_GETOP: 870 case MISDN_CTRL_GETOP:
871 cq->op = MISDN_CTRL_LOOP; 871 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
872 break; 872 break;
873 case MISDN_CTRL_LOOP: 873 case MISDN_CTRL_LOOP:
874 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */ 874 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
@@ -878,6 +878,9 @@ channel_ctrl(struct fritzcard *fc, struct mISDN_ctrl_req *cq)
878 } 878 }
879 ret = fc->isac.ctrl(&fc->isac, HW_TESTLOOP, cq->channel); 879 ret = fc->isac.ctrl(&fc->isac, HW_TESTLOOP, cq->channel);
880 break; 880 break;
881 case MISDN_CTRL_L1_TIMER3:
882 ret = fc->isac.ctrl(&fc->isac, HW_TIMER3_VALUE, cq->p1);
883 break;
881 default: 884 default:
882 pr_info("%s: %s unknown Op %x\n", fc->name, __func__, cq->op); 885 pr_info("%s: %s unknown Op %x\n", fc->name, __func__, cq->op);
883 ret = -EINVAL; 886 ret = -EINVAL;
diff --git a/drivers/isdn/hardware/mISDN/hfc_multi.h b/drivers/isdn/hardware/mISDN/hfc_multi.h
index b0588acbb47d..c601f880141e 100644
--- a/drivers/isdn/hardware/mISDN/hfc_multi.h
+++ b/drivers/isdn/hardware/mISDN/hfc_multi.h
@@ -205,18 +205,22 @@ struct hfc_multi {
205 205
206 u_int slots; /* number of PCM slots */ 206 u_int slots; /* number of PCM slots */
207 u_int leds; /* type of leds */ 207 u_int leds; /* type of leds */
208 u_int ledcount; /* used to animate leds */
209 u_long ledstate; /* save last state of leds */ 208 u_long ledstate; /* save last state of leds */
210 int opticalsupport; /* has the e1 board */ 209 int opticalsupport; /* has the e1 board */
211 /* an optical Interface */ 210 /* an optical Interface */
212 int dslot; /* channel # of d-channel (E1) default 16 */ 211
212 u_int bmask[32]; /* bitmask of bchannels for port */
213 u_char dnum[32]; /* array of used dchannel numbers for port */
214 u_char created[32]; /* what port is created */
215 u_int activity_tx; /* if there is data TX / RX */
216 u_int activity_rx; /* bitmask according to port number */
217 /* (will be cleared after */
218 /* showing led-states) */
219 u_int flash[8]; /* counter for flashing 8 leds on activity */
213 220
214 u_long wdcount; /* every 500 ms we need to */ 221 u_long wdcount; /* every 500 ms we need to */
215 /* send the watchdog a signal */ 222 /* send the watchdog a signal */
216 u_char wdbyte; /* watchdog toggle byte */ 223 u_char wdbyte; /* watchdog toggle byte */
217 u_int activity[8]; /* if there is any action on this */
218 /* port (will be cleared after */
219 /* showing led-states) */
220 int e1_state; /* keep track of last state */ 224 int e1_state; /* keep track of last state */
221 int e1_getclock; /* if sync is retrieved from interface */ 225 int e1_getclock; /* if sync is retrieved from interface */
222 int syncronized; /* keep track of existing sync interface */ 226 int syncronized; /* keep track of existing sync interface */
@@ -233,7 +237,6 @@ struct hfc_multi {
233 * the bch->channel is equvalent to the hfc-channel 237 * the bch->channel is equvalent to the hfc-channel
234 */ 238 */
235 struct hfc_chan chan[32]; 239 struct hfc_chan chan[32];
236 u_char created[8]; /* what port is created */
237 signed char slot_owner[256]; /* owner channel of slot */ 240 signed char slot_owner[256]; /* owner channel of slot */
238}; 241};
239 242
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 033223180b55..4c128e4bb5cf 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -103,14 +103,26 @@
103 * Omit this value, if all cards are interconnected or none is connected. 103 * Omit this value, if all cards are interconnected or none is connected.
104 * If unsure, don't give this parameter. 104 * If unsure, don't give this parameter.
105 * 105 *
106 * dslot: 106 * dmask and bmask:
107 * NOTE: only one dslot value must be given for every card. 107 * NOTE: One dmask value must be given for every HFC-E1 card.
108 * Also this value must be given for non-E1 cards. If omitted, the E1 108 * If omitted, the E1 card has D-channel on time slot 16, which is default.
109 * card has D-channel on time slot 16, which is default. 109 * dmask is a 32 bit mask. The bit must be set for an alternate time slot.
110 * If 1..15 or 17..31, an alternate time slot is used for D-channel. 110 * If multiple bits are set, multiple virtual card fragments are created.
111 * In this case, the application must be able to handle this. 111 * For each bit set, a bmask value must be given. Each bit on the bmask
112 * If -1 is given, the D-channel is disabled and all 31 slots can be used 112 * value stands for a B-channel. The bmask may not overlap with dmask or
113 * for B-channel. (only for specific applications) 113 * with other bmask values for that card.
114 * Example: dmask=0x00020002 bmask=0x0000fffc,0xfffc0000
115 * This will create one fragment with D-channel on slot 1 with
116 * B-channels on slots 2..15, and a second fragment with D-channel
117 * on slot 17 with B-channels on slot 18..31. Slot 16 is unused.
118 * If bit 0 is set (dmask=0x00000001) the D-channel is on slot 0 and will
119 * not function.
120 * Example: dmask=0x00000001 bmask=0xfffffffe
121 * This will create a port with all 31 usable timeslots as
122 * B-channels.
123 * If no bits are set on bmask, no B-channel is created for that fragment.
124 * Example: dmask=0xfffffffe bmask=0,0,0,0.... (31 0-values for bmask)
125 * This will create 31 ports with one D-channel only.
114 * If you don't know how to use it, you don't need it! 126 * If you don't know how to use it, you don't need it!
115 * 127 *
116 * iomode: 128 * iomode:
@@ -172,6 +184,7 @@
172 184
173#define MAX_CARDS 8 185#define MAX_CARDS 8
174#define MAX_PORTS (8 * MAX_CARDS) 186#define MAX_PORTS (8 * MAX_CARDS)
187#define MAX_FRAGS (32 * MAX_CARDS)
175 188
176static LIST_HEAD(HFClist); 189static LIST_HEAD(HFClist);
177static spinlock_t HFClock; /* global hfc list lock */ 190static spinlock_t HFClock; /* global hfc list lock */
@@ -203,7 +216,8 @@ static int nt_t1_count[] = { 3840, 1920, 960, 480, 240, 120, 60, 30 };
203 216
204static uint type[MAX_CARDS]; 217static uint type[MAX_CARDS];
205static int pcm[MAX_CARDS]; 218static int pcm[MAX_CARDS];
206static int dslot[MAX_CARDS]; 219static uint dmask[MAX_CARDS];
220static uint bmask[MAX_FRAGS];
207static uint iomode[MAX_CARDS]; 221static uint iomode[MAX_CARDS];
208static uint port[MAX_PORTS]; 222static uint port[MAX_PORTS];
209static uint debug; 223static uint debug;
@@ -218,7 +232,7 @@ static uint clockdelay_nt = CLKDEL_NT;
218#define HWID_MINIP16 3 232#define HWID_MINIP16 3
219static uint hwid = HWID_NONE; 233static uint hwid = HWID_NONE;
220 234
221static int HFC_cnt, Port_cnt, PCM_cnt = 99; 235static int HFC_cnt, E1_cnt, bmask_cnt, Port_cnt, PCM_cnt = 99;
222 236
223MODULE_AUTHOR("Andreas Eversberg"); 237MODULE_AUTHOR("Andreas Eversberg");
224MODULE_LICENSE("GPL"); 238MODULE_LICENSE("GPL");
@@ -231,7 +245,8 @@ module_param(clockdelay_te, uint, S_IRUGO | S_IWUSR);
231module_param(clockdelay_nt, uint, S_IRUGO | S_IWUSR); 245module_param(clockdelay_nt, uint, S_IRUGO | S_IWUSR);
232module_param_array(type, uint, NULL, S_IRUGO | S_IWUSR); 246module_param_array(type, uint, NULL, S_IRUGO | S_IWUSR);
233module_param_array(pcm, int, NULL, S_IRUGO | S_IWUSR); 247module_param_array(pcm, int, NULL, S_IRUGO | S_IWUSR);
234module_param_array(dslot, int, NULL, S_IRUGO | S_IWUSR); 248module_param_array(dmask, uint, NULL, S_IRUGO | S_IWUSR);
249module_param_array(bmask, uint, NULL, S_IRUGO | S_IWUSR);
235module_param_array(iomode, uint, NULL, S_IRUGO | S_IWUSR); 250module_param_array(iomode, uint, NULL, S_IRUGO | S_IWUSR);
236module_param_array(port, uint, NULL, S_IRUGO | S_IWUSR); 251module_param_array(port, uint, NULL, S_IRUGO | S_IWUSR);
237module_param(hwid, uint, S_IRUGO | S_IWUSR); /* The hardware ID */ 252module_param(hwid, uint, S_IRUGO | S_IWUSR); /* The hardware ID */
@@ -1607,40 +1622,46 @@ hfcmulti_leds(struct hfc_multi *hc)
1607 struct dchannel *dch; 1622 struct dchannel *dch;
1608 int led[4]; 1623 int led[4];
1609 1624
1610 hc->ledcount += poll;
1611 if (hc->ledcount > 4096) {
1612 hc->ledcount -= 4096;
1613 hc->ledstate = 0xAFFEAFFE;
1614 }
1615
1616 switch (hc->leds) { 1625 switch (hc->leds) {
1617 case 1: /* HFC-E1 OEM */ 1626 case 1: /* HFC-E1 OEM */
1618 /* 2 red blinking: NT mode deactivate 1627 /* 2 red steady: LOS
1619 * 2 red steady: TE mode deactivate 1628 * 1 red steady: L1 not active
1620 * left green: L1 active 1629 * 2 green steady: L1 active
1621 * left red: frame sync, but no L1 1630 * 1st green flashing: activity on TX
1622 * right green: L2 active 1631 * 2nd green flashing: activity on RX
1623 */ 1632 */
1624 if (hc->chan[hc->dslot].sync != 2) { /* no frame sync */ 1633 led[0] = 0;
1625 if (hc->chan[hc->dslot].dch->dev.D.protocol 1634 led[1] = 0;
1626 != ISDN_P_NT_E1) { 1635 led[2] = 0;
1627 led[0] = 1; 1636 led[3] = 0;
1637 dch = hc->chan[hc->dnum[0]].dch;
1638 if (dch) {
1639 if (hc->chan[hc->dnum[0]].los)
1628 led[1] = 1; 1640 led[1] = 1;
1629 } else if (hc->ledcount >> 11) { 1641 if (hc->e1_state != 1) {
1630 led[0] = 1; 1642 led[0] = 1;
1631 led[1] = 1; 1643 hc->flash[2] = 0;
1644 hc->flash[3] = 0;
1632 } else { 1645 } else {
1633 led[0] = 0; 1646 led[2] = 1;
1634 led[1] = 0; 1647 led[3] = 1;
1648 if (!hc->flash[2] && hc->activity_tx)
1649 hc->flash[2] = poll;
1650 if (!hc->flash[3] && hc->activity_rx)
1651 hc->flash[3] = poll;
1652 if (hc->flash[2] && hc->flash[2] < 1024)
1653 led[2] = 0;
1654 if (hc->flash[3] && hc->flash[3] < 1024)
1655 led[3] = 0;
1656 if (hc->flash[2] >= 2048)
1657 hc->flash[2] = 0;
1658 if (hc->flash[3] >= 2048)
1659 hc->flash[3] = 0;
1660 if (hc->flash[2])
1661 hc->flash[2] += poll;
1662 if (hc->flash[3])
1663 hc->flash[3] += poll;
1635 } 1664 }
1636 led[2] = 0;
1637 led[3] = 0;
1638 } else { /* with frame sync */
1639 /* TODO make it work */
1640 led[0] = 0;
1641 led[1] = 0;
1642 led[2] = 0;
1643 led[3] = 1;
1644 } 1665 }
1645 leds = (led[0] | (led[1]<<2) | (led[2]<<1) | (led[3]<<3))^0xF; 1666 leds = (led[0] | (led[1]<<2) | (led[2]<<1) | (led[3]<<3))^0xF;
1646 /* leds are inverted */ 1667 /* leds are inverted */
@@ -1651,9 +1672,9 @@ hfcmulti_leds(struct hfc_multi *hc)
1651 break; 1672 break;
1652 1673
1653 case 2: /* HFC-4S OEM */ 1674 case 2: /* HFC-4S OEM */
1654 /* red blinking = PH_DEACTIVATE NT Mode 1675 /* red steady: PH_DEACTIVATE
1655 * red steady = PH_DEACTIVATE TE Mode 1676 * green steady: PH_ACTIVATE
1656 * green steady = PH_ACTIVATE 1677 * green flashing: activity on TX
1657 */ 1678 */
1658 for (i = 0; i < 4; i++) { 1679 for (i = 0; i < 4; i++) {
1659 state = 0; 1680 state = 0;
@@ -1669,17 +1690,20 @@ hfcmulti_leds(struct hfc_multi *hc)
1669 if (state) { 1690 if (state) {
1670 if (state == active) { 1691 if (state == active) {
1671 led[i] = 1; /* led green */ 1692 led[i] = 1; /* led green */
1672 } else 1693 hc->activity_tx |= hc->activity_rx;
1673 if (dch->dev.D.protocol == ISDN_P_TE_S0) 1694 if (!hc->flash[i] &&
1674 /* TE mode: led red */ 1695 (hc->activity_tx & (1 << i)))
1675 led[i] = 2; 1696 hc->flash[i] = poll;
1676 else 1697 if (hc->flash[i] && hc->flash[i] < 1024)
1677 if (hc->ledcount >> 11) 1698 led[i] = 0; /* led off */
1678 /* led red */ 1699 if (hc->flash[i] >= 2048)
1679 led[i] = 2; 1700 hc->flash[i] = 0;
1680 else 1701 if (hc->flash[i])
1681 /* led off */ 1702 hc->flash[i] += poll;
1682 led[i] = 0; 1703 } else {
1704 led[i] = 2; /* led red */
1705 hc->flash[i] = 0;
1706 }
1683 } else 1707 } else
1684 led[i] = 0; /* led off */ 1708 led[i] = 0; /* led off */
1685 } 1709 }
@@ -1712,9 +1736,9 @@ hfcmulti_leds(struct hfc_multi *hc)
1712 break; 1736 break;
1713 1737
1714 case 3: /* HFC 1S/2S Beronet */ 1738 case 3: /* HFC 1S/2S Beronet */
1715 /* red blinking = PH_DEACTIVATE NT Mode 1739 /* red steady: PH_DEACTIVATE
1716 * red steady = PH_DEACTIVATE TE Mode 1740 * green steady: PH_ACTIVATE
1717 * green steady = PH_ACTIVATE 1741 * green flashing: activity on TX
1718 */ 1742 */
1719 for (i = 0; i < 2; i++) { 1743 for (i = 0; i < 2; i++) {
1720 state = 0; 1744 state = 0;
@@ -1730,22 +1754,23 @@ hfcmulti_leds(struct hfc_multi *hc)
1730 if (state) { 1754 if (state) {
1731 if (state == active) { 1755 if (state == active) {
1732 led[i] = 1; /* led green */ 1756 led[i] = 1; /* led green */
1733 } else 1757 hc->activity_tx |= hc->activity_rx;
1734 if (dch->dev.D.protocol == ISDN_P_TE_S0) 1758 if (!hc->flash[i] &&
1735 /* TE mode: led red */ 1759 (hc->activity_tx & (1 << i)))
1736 led[i] = 2; 1760 hc->flash[i] = poll;
1737 else 1761 if (hc->flash[i] < 1024)
1738 if (hc->ledcount >> 11) 1762 led[i] = 0; /* led off */
1739 /* led red */ 1763 if (hc->flash[i] >= 2048)
1740 led[i] = 2; 1764 hc->flash[i] = 0;
1741 else 1765 if (hc->flash[i])
1742 /* led off */ 1766 hc->flash[i] += poll;
1743 led[i] = 0; 1767 } else {
1768 led[i] = 2; /* led red */
1769 hc->flash[i] = 0;
1770 }
1744 } else 1771 } else
1745 led[i] = 0; /* led off */ 1772 led[i] = 0; /* led off */
1746 } 1773 }
1747
1748
1749 leds = (led[0] > 0) | ((led[1] > 0) << 1) | ((led[0]&1) << 2) 1774 leds = (led[0] > 0) | ((led[1] > 0) << 1) | ((led[0]&1) << 2)
1750 | ((led[1]&1) << 3); 1775 | ((led[1]&1) << 3);
1751 if (leds != (int)hc->ledstate) { 1776 if (leds != (int)hc->ledstate) {
@@ -1757,8 +1782,11 @@ hfcmulti_leds(struct hfc_multi *hc)
1757 } 1782 }
1758 break; 1783 break;
1759 case 8: /* HFC 8S+ Beronet */ 1784 case 8: /* HFC 8S+ Beronet */
1760 lled = 0; 1785 /* off: PH_DEACTIVATE
1761 1786 * steady: PH_ACTIVATE
1787 * flashing: activity on TX
1788 */
1789 lled = 0xff; /* leds off */
1762 for (i = 0; i < 8; i++) { 1790 for (i = 0; i < 8; i++) {
1763 state = 0; 1791 state = 0;
1764 active = -1; 1792 active = -1;
@@ -1772,14 +1800,20 @@ hfcmulti_leds(struct hfc_multi *hc)
1772 } 1800 }
1773 if (state) { 1801 if (state) {
1774 if (state == active) { 1802 if (state == active) {
1775 lled |= 0 << i; 1803 lled &= ~(1 << i); /* led on */
1804 hc->activity_tx |= hc->activity_rx;
1805 if (!hc->flash[i] &&
1806 (hc->activity_tx & (1 << i)))
1807 hc->flash[i] = poll;
1808 if (hc->flash[i] < 1024)
1809 lled |= 1 << i; /* led off */
1810 if (hc->flash[i] >= 2048)
1811 hc->flash[i] = 0;
1812 if (hc->flash[i])
1813 hc->flash[i] += poll;
1776 } else 1814 } else
1777 if (hc->ledcount >> 11) 1815 hc->flash[i] = 0;
1778 lled |= 0 << i; 1816 }
1779 else
1780 lled |= 1 << i;
1781 } else
1782 lled |= 1 << i;
1783 } 1817 }
1784 leddw = lled << 24 | lled << 16 | lled << 8 | lled; 1818 leddw = lled << 24 | lled << 16 | lled << 8 | lled;
1785 if (leddw != hc->ledstate) { 1819 if (leddw != hc->ledstate) {
@@ -1794,6 +1828,8 @@ hfcmulti_leds(struct hfc_multi *hc)
1794 } 1828 }
1795 break; 1829 break;
1796 } 1830 }
1831 hc->activity_tx = 0;
1832 hc->activity_rx = 0;
1797} 1833}
1798/* 1834/*
1799 * read dtmf coefficients 1835 * read dtmf coefficients
@@ -2093,7 +2129,8 @@ next_frame:
2093 *txpending = 1; 2129 *txpending = 1;
2094 2130
2095 /* show activity */ 2131 /* show activity */
2096 hc->activity[hc->chan[ch].port] = 1; 2132 if (dch)
2133 hc->activity_tx |= 1 << hc->chan[ch].port;
2097 2134
2098 /* fill fifo to what we have left */ 2135 /* fill fifo to what we have left */
2099 ii = len; 2136 ii = len;
@@ -2236,7 +2273,8 @@ next_frame:
2236 } 2273 }
2237 } 2274 }
2238 /* show activity */ 2275 /* show activity */
2239 hc->activity[hc->chan[ch].port] = 1; 2276 if (dch)
2277 hc->activity_rx |= 1 << hc->chan[ch].port;
2240 2278
2241 /* empty fifo with what we have */ 2279 /* empty fifo with what we have */
2242 if (dch || test_bit(FLG_HDLC, &bch->Flags)) { 2280 if (dch || test_bit(FLG_HDLC, &bch->Flags)) {
@@ -2430,55 +2468,55 @@ handle_timer_irq(struct hfc_multi *hc)
2430 } 2468 }
2431 } 2469 }
2432 if (hc->ctype == HFC_TYPE_E1 && hc->created[0]) { 2470 if (hc->ctype == HFC_TYPE_E1 && hc->created[0]) {
2433 dch = hc->chan[hc->dslot].dch; 2471 dch = hc->chan[hc->dnum[0]].dch;
2434 if (test_bit(HFC_CFG_REPORT_LOS, &hc->chan[hc->dslot].cfg)) { 2472 /* LOS */
2435 /* LOS */ 2473 temp = HFC_inb_nodebug(hc, R_SYNC_STA) & V_SIG_LOS;
2436 temp = HFC_inb_nodebug(hc, R_SYNC_STA) & V_SIG_LOS; 2474 hc->chan[hc->dnum[0]].los = temp;
2437 if (!temp && hc->chan[hc->dslot].los) 2475 if (test_bit(HFC_CFG_REPORT_LOS, &hc->chan[hc->dnum[0]].cfg)) {
2476 if (!temp && hc->chan[hc->dnum[0]].los)
2438 signal_state_up(dch, L1_SIGNAL_LOS_ON, 2477 signal_state_up(dch, L1_SIGNAL_LOS_ON,
2439 "LOS detected"); 2478 "LOS detected");
2440 if (temp && !hc->chan[hc->dslot].los) 2479 if (temp && !hc->chan[hc->dnum[0]].los)
2441 signal_state_up(dch, L1_SIGNAL_LOS_OFF, 2480 signal_state_up(dch, L1_SIGNAL_LOS_OFF,
2442 "LOS gone"); 2481 "LOS gone");
2443 hc->chan[hc->dslot].los = temp;
2444 } 2482 }
2445 if (test_bit(HFC_CFG_REPORT_AIS, &hc->chan[hc->dslot].cfg)) { 2483 if (test_bit(HFC_CFG_REPORT_AIS, &hc->chan[hc->dnum[0]].cfg)) {
2446 /* AIS */ 2484 /* AIS */
2447 temp = HFC_inb_nodebug(hc, R_SYNC_STA) & V_AIS; 2485 temp = HFC_inb_nodebug(hc, R_SYNC_STA) & V_AIS;
2448 if (!temp && hc->chan[hc->dslot].ais) 2486 if (!temp && hc->chan[hc->dnum[0]].ais)
2449 signal_state_up(dch, L1_SIGNAL_AIS_ON, 2487 signal_state_up(dch, L1_SIGNAL_AIS_ON,
2450 "AIS detected"); 2488 "AIS detected");
2451 if (temp && !hc->chan[hc->dslot].ais) 2489 if (temp && !hc->chan[hc->dnum[0]].ais)
2452 signal_state_up(dch, L1_SIGNAL_AIS_OFF, 2490 signal_state_up(dch, L1_SIGNAL_AIS_OFF,
2453 "AIS gone"); 2491 "AIS gone");
2454 hc->chan[hc->dslot].ais = temp; 2492 hc->chan[hc->dnum[0]].ais = temp;
2455 } 2493 }
2456 if (test_bit(HFC_CFG_REPORT_SLIP, &hc->chan[hc->dslot].cfg)) { 2494 if (test_bit(HFC_CFG_REPORT_SLIP, &hc->chan[hc->dnum[0]].cfg)) {
2457 /* SLIP */ 2495 /* SLIP */
2458 temp = HFC_inb_nodebug(hc, R_SLIP) & V_FOSLIP_RX; 2496 temp = HFC_inb_nodebug(hc, R_SLIP) & V_FOSLIP_RX;
2459 if (!temp && hc->chan[hc->dslot].slip_rx) 2497 if (!temp && hc->chan[hc->dnum[0]].slip_rx)
2460 signal_state_up(dch, L1_SIGNAL_SLIP_RX, 2498 signal_state_up(dch, L1_SIGNAL_SLIP_RX,
2461 " bit SLIP detected RX"); 2499 " bit SLIP detected RX");
2462 hc->chan[hc->dslot].slip_rx = temp; 2500 hc->chan[hc->dnum[0]].slip_rx = temp;
2463 temp = HFC_inb_nodebug(hc, R_SLIP) & V_FOSLIP_TX; 2501 temp = HFC_inb_nodebug(hc, R_SLIP) & V_FOSLIP_TX;
2464 if (!temp && hc->chan[hc->dslot].slip_tx) 2502 if (!temp && hc->chan[hc->dnum[0]].slip_tx)
2465 signal_state_up(dch, L1_SIGNAL_SLIP_TX, 2503 signal_state_up(dch, L1_SIGNAL_SLIP_TX,
2466 " bit SLIP detected TX"); 2504 " bit SLIP detected TX");
2467 hc->chan[hc->dslot].slip_tx = temp; 2505 hc->chan[hc->dnum[0]].slip_tx = temp;
2468 } 2506 }
2469 if (test_bit(HFC_CFG_REPORT_RDI, &hc->chan[hc->dslot].cfg)) { 2507 if (test_bit(HFC_CFG_REPORT_RDI, &hc->chan[hc->dnum[0]].cfg)) {
2470 /* RDI */ 2508 /* RDI */
2471 temp = HFC_inb_nodebug(hc, R_RX_SL0_0) & V_A; 2509 temp = HFC_inb_nodebug(hc, R_RX_SL0_0) & V_A;
2472 if (!temp && hc->chan[hc->dslot].rdi) 2510 if (!temp && hc->chan[hc->dnum[0]].rdi)
2473 signal_state_up(dch, L1_SIGNAL_RDI_ON, 2511 signal_state_up(dch, L1_SIGNAL_RDI_ON,
2474 "RDI detected"); 2512 "RDI detected");
2475 if (temp && !hc->chan[hc->dslot].rdi) 2513 if (temp && !hc->chan[hc->dnum[0]].rdi)
2476 signal_state_up(dch, L1_SIGNAL_RDI_OFF, 2514 signal_state_up(dch, L1_SIGNAL_RDI_OFF,
2477 "RDI gone"); 2515 "RDI gone");
2478 hc->chan[hc->dslot].rdi = temp; 2516 hc->chan[hc->dnum[0]].rdi = temp;
2479 } 2517 }
2480 temp = HFC_inb_nodebug(hc, R_JATT_DIR); 2518 temp = HFC_inb_nodebug(hc, R_JATT_DIR);
2481 switch (hc->chan[hc->dslot].sync) { 2519 switch (hc->chan[hc->dnum[0]].sync) {
2482 case 0: 2520 case 0:
2483 if ((temp & 0x60) == 0x60) { 2521 if ((temp & 0x60) == 0x60) {
2484 if (debug & DEBUG_HFCMULTI_SYNC) 2522 if (debug & DEBUG_HFCMULTI_SYNC)
@@ -2487,10 +2525,10 @@ handle_timer_irq(struct hfc_multi *hc)
2487 "in clock sync\n", 2525 "in clock sync\n",
2488 __func__, hc->id); 2526 __func__, hc->id);
2489 HFC_outb(hc, R_RX_OFF, 2527 HFC_outb(hc, R_RX_OFF,
2490 hc->chan[hc->dslot].jitter | V_RX_INIT); 2528 hc->chan[hc->dnum[0]].jitter | V_RX_INIT);
2491 HFC_outb(hc, R_TX_OFF, 2529 HFC_outb(hc, R_TX_OFF,
2492 hc->chan[hc->dslot].jitter | V_RX_INIT); 2530 hc->chan[hc->dnum[0]].jitter | V_RX_INIT);
2493 hc->chan[hc->dslot].sync = 1; 2531 hc->chan[hc->dnum[0]].sync = 1;
2494 goto check_framesync; 2532 goto check_framesync;
2495 } 2533 }
2496 break; 2534 break;
@@ -2501,7 +2539,7 @@ handle_timer_irq(struct hfc_multi *hc)
2501 "%s: (id=%d) E1 " 2539 "%s: (id=%d) E1 "
2502 "lost clock sync\n", 2540 "lost clock sync\n",
2503 __func__, hc->id); 2541 __func__, hc->id);
2504 hc->chan[hc->dslot].sync = 0; 2542 hc->chan[hc->dnum[0]].sync = 0;
2505 break; 2543 break;
2506 } 2544 }
2507 check_framesync: 2545 check_framesync:
@@ -2512,7 +2550,7 @@ handle_timer_irq(struct hfc_multi *hc)
2512 "%s: (id=%d) E1 " 2550 "%s: (id=%d) E1 "
2513 "now in frame sync\n", 2551 "now in frame sync\n",
2514 __func__, hc->id); 2552 __func__, hc->id);
2515 hc->chan[hc->dslot].sync = 2; 2553 hc->chan[hc->dnum[0]].sync = 2;
2516 } 2554 }
2517 break; 2555 break;
2518 case 2: 2556 case 2:
@@ -2522,7 +2560,7 @@ handle_timer_irq(struct hfc_multi *hc)
2522 "%s: (id=%d) E1 lost " 2560 "%s: (id=%d) E1 lost "
2523 "clock & frame sync\n", 2561 "clock & frame sync\n",
2524 __func__, hc->id); 2562 __func__, hc->id);
2525 hc->chan[hc->dslot].sync = 0; 2563 hc->chan[hc->dnum[0]].sync = 0;
2526 break; 2564 break;
2527 } 2565 }
2528 temp = HFC_inb_nodebug(hc, R_SYNC_STA); 2566 temp = HFC_inb_nodebug(hc, R_SYNC_STA);
@@ -2532,7 +2570,7 @@ handle_timer_irq(struct hfc_multi *hc)
2532 "%s: (id=%d) E1 " 2570 "%s: (id=%d) E1 "
2533 "lost frame sync\n", 2571 "lost frame sync\n",
2534 __func__, hc->id); 2572 __func__, hc->id);
2535 hc->chan[hc->dslot].sync = 1; 2573 hc->chan[hc->dnum[0]].sync = 1;
2536 } 2574 }
2537 break; 2575 break;
2538 } 2576 }
@@ -2673,7 +2711,7 @@ hfcmulti_interrupt(int intno, void *dev_id)
2673 int i; 2711 int i;
2674 void __iomem *plx_acc; 2712 void __iomem *plx_acc;
2675 u_short wval; 2713 u_short wval;
2676 u_char e1_syncsta, temp; 2714 u_char e1_syncsta, temp, temp2;
2677 u_long flags; 2715 u_long flags;
2678 2716
2679 if (!hc) { 2717 if (!hc) {
@@ -2748,7 +2786,7 @@ hfcmulti_interrupt(int intno, void *dev_id)
2748 if (r_irq_misc & V_STA_IRQ) { 2786 if (r_irq_misc & V_STA_IRQ) {
2749 if (hc->ctype == HFC_TYPE_E1) { 2787 if (hc->ctype == HFC_TYPE_E1) {
2750 /* state machine */ 2788 /* state machine */
2751 dch = hc->chan[hc->dslot].dch; 2789 dch = hc->chan[hc->dnum[0]].dch;
2752 e1_syncsta = HFC_inb_nodebug(hc, R_SYNC_STA); 2790 e1_syncsta = HFC_inb_nodebug(hc, R_SYNC_STA);
2753 if (test_bit(HFC_CHIP_PLXSD, &hc->chip) 2791 if (test_bit(HFC_CHIP_PLXSD, &hc->chip)
2754 && hc->e1_getclock) { 2792 && hc->e1_getclock) {
@@ -2758,23 +2796,26 @@ hfcmulti_interrupt(int intno, void *dev_id)
2758 hc->syncronized = 0; 2796 hc->syncronized = 0;
2759 } 2797 }
2760 /* undocumented: status changes during read */ 2798 /* undocumented: status changes during read */
2761 dch->state = HFC_inb_nodebug(hc, R_E1_RD_STA); 2799 temp = HFC_inb_nodebug(hc, R_E1_RD_STA);
2762 while (dch->state != (temp = 2800 while (temp != (temp2 =
2763 HFC_inb_nodebug(hc, R_E1_RD_STA))) { 2801 HFC_inb_nodebug(hc, R_E1_RD_STA))) {
2764 if (debug & DEBUG_HFCMULTI_STATE) 2802 if (debug & DEBUG_HFCMULTI_STATE)
2765 printk(KERN_DEBUG "%s: reread " 2803 printk(KERN_DEBUG "%s: reread "
2766 "STATE because %d!=%d\n", 2804 "STATE because %d!=%d\n",
2767 __func__, temp, 2805 __func__, temp, temp2);
2768 dch->state); 2806 temp = temp2; /* repeat */
2769 dch->state = temp; /* repeat */
2770 } 2807 }
2771 dch->state = HFC_inb_nodebug(hc, R_E1_RD_STA) 2808 /* broadcast state change to all fragments */
2772 & 0x7;
2773 schedule_event(dch, FLG_PHCHANGE);
2774 if (debug & DEBUG_HFCMULTI_STATE) 2809 if (debug & DEBUG_HFCMULTI_STATE)
2775 printk(KERN_DEBUG 2810 printk(KERN_DEBUG
2776 "%s: E1 (id=%d) newstate %x\n", 2811 "%s: E1 (id=%d) newstate %x\n",
2777 __func__, hc->id, dch->state); 2812 __func__, hc->id, temp & 0x7);
2813 for (i = 0; i < hc->ports; i++) {
2814 dch = hc->chan[hc->dnum[i]].dch;
2815 dch->state = temp & 0x7;
2816 schedule_event(dch, FLG_PHCHANGE);
2817 }
2818
2778 if (test_bit(HFC_CHIP_PLXSD, &hc->chip)) 2819 if (test_bit(HFC_CHIP_PLXSD, &hc->chip))
2779 plxsd_checksync(hc, 0); 2820 plxsd_checksync(hc, 0);
2780 } 2821 }
@@ -3018,8 +3059,10 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx,
3018 HFC_outb(hc, A_CON_HDLC, 0x20 | V_HDLC_TRP | V_IFF); 3059 HFC_outb(hc, A_CON_HDLC, 0x20 | V_HDLC_TRP | V_IFF);
3019 HFC_outb(hc, A_SUBCH_CFG, 0); 3060 HFC_outb(hc, A_SUBCH_CFG, 0);
3020 HFC_outb(hc, A_IRQ_MSK, 0); 3061 HFC_outb(hc, A_IRQ_MSK, 0);
3021 HFC_outb(hc, R_INC_RES_FIFO, V_RES_F); 3062 if (hc->chan[ch].protocol != protocol) {
3022 HFC_wait(hc); 3063 HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
3064 HFC_wait(hc);
3065 }
3023 HFC_outb(hc, R_SLOT, ((((ch / 4) * 8) + 3066 HFC_outb(hc, R_SLOT, ((((ch / 4) * 8) +
3024 ((ch % 4) * 4) + 1) << 1) | 1); 3067 ((ch % 4) * 4) + 1) << 1) | 1);
3025 HFC_outb(hc, A_SL_CFG, 0x80 | 0x20 | (ch << 1) | 1); 3068 HFC_outb(hc, A_SL_CFG, 0x80 | 0x20 | (ch << 1) | 1);
@@ -3039,8 +3082,10 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx,
3039 HFC_outb(hc, A_CON_HDLC, 0x20 | V_HDLC_TRP | V_IFF); 3082 HFC_outb(hc, A_CON_HDLC, 0x20 | V_HDLC_TRP | V_IFF);
3040 HFC_outb(hc, A_SUBCH_CFG, 0); 3083 HFC_outb(hc, A_SUBCH_CFG, 0);
3041 HFC_outb(hc, A_IRQ_MSK, 0); 3084 HFC_outb(hc, A_IRQ_MSK, 0);
3042 HFC_outb(hc, R_INC_RES_FIFO, V_RES_F); 3085 if (hc->chan[ch].protocol != protocol) {
3043 HFC_wait(hc); 3086 HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
3087 HFC_wait(hc);
3088 }
3044 /* tx silence */ 3089 /* tx silence */
3045 HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, hc->silence); 3090 HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, hc->silence);
3046 HFC_outb(hc, R_SLOT, (((ch / 4) * 8) + 3091 HFC_outb(hc, R_SLOT, (((ch / 4) * 8) +
@@ -3059,8 +3104,10 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx,
3059 V_HDLC_TRP | V_IFF); 3104 V_HDLC_TRP | V_IFF);
3060 HFC_outb(hc, A_SUBCH_CFG, 0); 3105 HFC_outb(hc, A_SUBCH_CFG, 0);
3061 HFC_outb(hc, A_IRQ_MSK, 0); 3106 HFC_outb(hc, A_IRQ_MSK, 0);
3062 HFC_outb(hc, R_INC_RES_FIFO, V_RES_F); 3107 if (hc->chan[ch].protocol != protocol) {
3063 HFC_wait(hc); 3108 HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
3109 HFC_wait(hc);
3110 }
3064 /* tx silence */ 3111 /* tx silence */
3065 HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, hc->silence); 3112 HFC_outb_nodebug(hc, A_FIFO_DATA0_NOINC, hc->silence);
3066 /* enable RX fifo */ 3113 /* enable RX fifo */
@@ -3075,8 +3122,10 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx,
3075 V_HDLC_TRP); 3122 V_HDLC_TRP);
3076 HFC_outb(hc, A_SUBCH_CFG, 0); 3123 HFC_outb(hc, A_SUBCH_CFG, 0);
3077 HFC_outb(hc, A_IRQ_MSK, 0); 3124 HFC_outb(hc, A_IRQ_MSK, 0);
3078 HFC_outb(hc, R_INC_RES_FIFO, V_RES_F); 3125 if (hc->chan[ch].protocol != protocol) {
3079 HFC_wait(hc); 3126 HFC_outb(hc, R_INC_RES_FIFO, V_RES_F);
3127 HFC_wait(hc);
3128 }
3080 } 3129 }
3081 if (hc->ctype != HFC_TYPE_E1) { 3130 if (hc->ctype != HFC_TYPE_E1) {
3082 hc->hw.a_st_ctrl0[hc->chan[ch].port] |= 3131 hc->hw.a_st_ctrl0[hc->chan[ch].port] |=
@@ -3839,31 +3888,37 @@ hfcmulti_initmode(struct dchannel *dch)
3839 if (debug & DEBUG_HFCMULTI_INIT) 3888 if (debug & DEBUG_HFCMULTI_INIT)
3840 printk(KERN_DEBUG "%s: entered\n", __func__); 3889 printk(KERN_DEBUG "%s: entered\n", __func__);
3841 3890
3891 i = dch->slot;
3892 pt = hc->chan[i].port;
3842 if (hc->ctype == HFC_TYPE_E1) { 3893 if (hc->ctype == HFC_TYPE_E1) {
3843 hc->chan[hc->dslot].slot_tx = -1; 3894 /* E1 */
3844 hc->chan[hc->dslot].slot_rx = -1; 3895 hc->chan[hc->dnum[pt]].slot_tx = -1;
3845 hc->chan[hc->dslot].conf = -1; 3896 hc->chan[hc->dnum[pt]].slot_rx = -1;
3846 if (hc->dslot) { 3897 hc->chan[hc->dnum[pt]].conf = -1;
3847 mode_hfcmulti(hc, hc->dslot, dch->dev.D.protocol, 3898 if (hc->dnum[pt]) {
3899 mode_hfcmulti(hc, dch->slot, dch->dev.D.protocol,
3848 -1, 0, -1, 0); 3900 -1, 0, -1, 0);
3849 dch->timer.function = (void *) hfcmulti_dbusy_timer; 3901 dch->timer.function = (void *) hfcmulti_dbusy_timer;
3850 dch->timer.data = (long) dch; 3902 dch->timer.data = (long) dch;
3851 init_timer(&dch->timer); 3903 init_timer(&dch->timer);
3852 } 3904 }
3853 for (i = 1; i <= 31; i++) { 3905 for (i = 1; i <= 31; i++) {
3854 if (i == hc->dslot) 3906 if (!((1 << i) & hc->bmask[pt])) /* skip unused chan */
3855 continue; 3907 continue;
3856 hc->chan[i].slot_tx = -1; 3908 hc->chan[i].slot_tx = -1;
3857 hc->chan[i].slot_rx = -1; 3909 hc->chan[i].slot_rx = -1;
3858 hc->chan[i].conf = -1; 3910 hc->chan[i].conf = -1;
3859 mode_hfcmulti(hc, i, ISDN_P_NONE, -1, 0, -1, 0); 3911 mode_hfcmulti(hc, i, ISDN_P_NONE, -1, 0, -1, 0);
3860 } 3912 }
3861 /* E1 */ 3913 }
3862 if (test_bit(HFC_CFG_REPORT_LOS, &hc->chan[hc->dslot].cfg)) { 3914 if (hc->ctype == HFC_TYPE_E1 && pt == 0) {
3915 /* E1, port 0 */
3916 dch = hc->chan[hc->dnum[0]].dch;
3917 if (test_bit(HFC_CFG_REPORT_LOS, &hc->chan[hc->dnum[0]].cfg)) {
3863 HFC_outb(hc, R_LOS0, 255); /* 2 ms */ 3918 HFC_outb(hc, R_LOS0, 255); /* 2 ms */
3864 HFC_outb(hc, R_LOS1, 255); /* 512 ms */ 3919 HFC_outb(hc, R_LOS1, 255); /* 512 ms */
3865 } 3920 }
3866 if (test_bit(HFC_CFG_OPTICAL, &hc->chan[hc->dslot].cfg)) { 3921 if (test_bit(HFC_CFG_OPTICAL, &hc->chan[hc->dnum[0]].cfg)) {
3867 HFC_outb(hc, R_RX0, 0); 3922 HFC_outb(hc, R_RX0, 0);
3868 hc->hw.r_tx0 = 0 | V_OUT_EN; 3923 hc->hw.r_tx0 = 0 | V_OUT_EN;
3869 } else { 3924 } else {
@@ -3876,12 +3931,12 @@ hfcmulti_initmode(struct dchannel *dch)
3876 HFC_outb(hc, R_TX_FR0, 0x00); 3931 HFC_outb(hc, R_TX_FR0, 0x00);
3877 HFC_outb(hc, R_TX_FR1, 0xf8); 3932 HFC_outb(hc, R_TX_FR1, 0xf8);
3878 3933
3879 if (test_bit(HFC_CFG_CRC4, &hc->chan[hc->dslot].cfg)) 3934 if (test_bit(HFC_CFG_CRC4, &hc->chan[hc->dnum[0]].cfg))
3880 HFC_outb(hc, R_TX_FR2, V_TX_MF | V_TX_E | V_NEG_E); 3935 HFC_outb(hc, R_TX_FR2, V_TX_MF | V_TX_E | V_NEG_E);
3881 3936
3882 HFC_outb(hc, R_RX_FR0, V_AUTO_RESYNC | V_AUTO_RECO | 0); 3937 HFC_outb(hc, R_RX_FR0, V_AUTO_RESYNC | V_AUTO_RECO | 0);
3883 3938
3884 if (test_bit(HFC_CFG_CRC4, &hc->chan[hc->dslot].cfg)) 3939 if (test_bit(HFC_CFG_CRC4, &hc->chan[hc->dnum[0]].cfg))
3885 HFC_outb(hc, R_RX_FR1, V_RX_MF | V_RX_MF_SYNC); 3940 HFC_outb(hc, R_RX_FR1, V_RX_MF | V_RX_MF_SYNC);
3886 3941
3887 if (dch->dev.D.protocol == ISDN_P_NT_E1) { 3942 if (dch->dev.D.protocol == ISDN_P_NT_E1) {
@@ -3944,13 +3999,14 @@ hfcmulti_initmode(struct dchannel *dch)
3944 hc->syncronized = 0; 3999 hc->syncronized = 0;
3945 plxsd_checksync(hc, 0); 4000 plxsd_checksync(hc, 0);
3946 } 4001 }
3947 } else { 4002 }
3948 i = dch->slot; 4003 if (hc->ctype != HFC_TYPE_E1) {
4004 /* ST */
3949 hc->chan[i].slot_tx = -1; 4005 hc->chan[i].slot_tx = -1;
3950 hc->chan[i].slot_rx = -1; 4006 hc->chan[i].slot_rx = -1;
3951 hc->chan[i].conf = -1; 4007 hc->chan[i].conf = -1;
3952 mode_hfcmulti(hc, i, dch->dev.D.protocol, -1, 0, -1, 0); 4008 mode_hfcmulti(hc, i, dch->dev.D.protocol, -1, 0, -1, 0);
3953 dch->timer.function = (void *)hfcmulti_dbusy_timer; 4009 dch->timer.function = (void *) hfcmulti_dbusy_timer;
3954 dch->timer.data = (long) dch; 4010 dch->timer.data = (long) dch;
3955 init_timer(&dch->timer); 4011 init_timer(&dch->timer);
3956 hc->chan[i - 2].slot_tx = -1; 4012 hc->chan[i - 2].slot_tx = -1;
@@ -3961,8 +4017,6 @@ hfcmulti_initmode(struct dchannel *dch)
3961 hc->chan[i - 1].slot_rx = -1; 4017 hc->chan[i - 1].slot_rx = -1;
3962 hc->chan[i - 1].conf = -1; 4018 hc->chan[i - 1].conf = -1;
3963 mode_hfcmulti(hc, i - 1, ISDN_P_NONE, -1, 0, -1, 0); 4019 mode_hfcmulti(hc, i - 1, ISDN_P_NONE, -1, 0, -1, 0);
3964 /* ST */
3965 pt = hc->chan[i].port;
3966 /* select interface */ 4020 /* select interface */
3967 HFC_outb(hc, R_ST_SEL, pt); 4021 HFC_outb(hc, R_ST_SEL, pt);
3968 /* undocumented: delay after R_ST_SEL */ 4022 /* undocumented: delay after R_ST_SEL */
@@ -4054,14 +4108,9 @@ open_dchannel(struct hfc_multi *hc, struct dchannel *dch,
4054 hfcmulti_initmode(dch); 4108 hfcmulti_initmode(dch);
4055 spin_unlock_irqrestore(&hc->lock, flags); 4109 spin_unlock_irqrestore(&hc->lock, flags);
4056 } 4110 }
4057 4111 if (test_bit(FLG_ACTIVE, &dch->Flags))
4058 if (((rq->protocol == ISDN_P_NT_S0) && (dch->state == 3)) ||
4059 ((rq->protocol == ISDN_P_TE_S0) && (dch->state == 7)) ||
4060 ((rq->protocol == ISDN_P_NT_E1) && (dch->state == 1)) ||
4061 ((rq->protocol == ISDN_P_TE_E1) && (dch->state == 1))) {
4062 _queue_data(&dch->dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY, 4112 _queue_data(&dch->dev.D, PH_ACTIVATE_IND, MISDN_ID_ANY,
4063 0, NULL, GFP_KERNEL); 4113 0, NULL, GFP_KERNEL);
4064 }
4065 rq->ch = &dch->dev.D; 4114 rq->ch = &dch->dev.D;
4066 if (!try_module_get(THIS_MODULE)) 4115 if (!try_module_get(THIS_MODULE))
4067 printk(KERN_WARNING "%s:cannot get module\n", __func__); 4116 printk(KERN_WARNING "%s:cannot get module\n", __func__);
@@ -4112,7 +4161,7 @@ channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq)
4112 4161
4113 switch (cq->op) { 4162 switch (cq->op) {
4114 case MISDN_CTRL_GETOP: 4163 case MISDN_CTRL_GETOP:
4115 cq->op = MISDN_CTRL_HFC_OP; 4164 cq->op = MISDN_CTRL_HFC_OP | MISDN_CTRL_L1_TIMER3;
4116 break; 4165 break;
4117 case MISDN_CTRL_HFC_WD_INIT: /* init the watchdog */ 4166 case MISDN_CTRL_HFC_WD_INIT: /* init the watchdog */
4118 wd_cnt = cq->p1 & 0xf; 4167 wd_cnt = cq->p1 & 0xf;
@@ -4142,6 +4191,9 @@ channel_dctrl(struct dchannel *dch, struct mISDN_ctrl_req *cq)
4142 __func__); 4191 __func__);
4143 HFC_outb(hc, R_BERT_WD_MD, hc->hw.r_bert_wd_md | V_WD_RES); 4192 HFC_outb(hc, R_BERT_WD_MD, hc->hw.r_bert_wd_md | V_WD_RES);
4144 break; 4193 break;
4194 case MISDN_CTRL_L1_TIMER3:
4195 ret = l1_event(dch->l1, HW_TIMER3_VALUE | (cq->p1 & 0xff));
4196 break;
4145 default: 4197 default:
4146 printk(KERN_WARNING "%s: unknown Op %x\n", 4198 printk(KERN_WARNING "%s: unknown Op %x\n",
4147 __func__, cq->op); 4199 __func__, cq->op);
@@ -4545,6 +4597,8 @@ release_port(struct hfc_multi *hc, struct dchannel *dch)
4545 } 4597 }
4546 /* free channels */ 4598 /* free channels */
4547 for (i = 0; i <= 31; i++) { 4599 for (i = 0; i <= 31; i++) {
4600 if (!((1 << i) & hc->bmask[pt])) /* skip unused chan */
4601 continue;
4548 if (hc->chan[i].bch) { 4602 if (hc->chan[i].bch) {
4549 if (debug & DEBUG_HFCMULTI_INIT) 4603 if (debug & DEBUG_HFCMULTI_INIT)
4550 printk(KERN_DEBUG 4604 printk(KERN_DEBUG
@@ -4600,7 +4654,8 @@ release_port(struct hfc_multi *hc, struct dchannel *dch)
4600 spin_unlock_irqrestore(&hc->lock, flags); 4654 spin_unlock_irqrestore(&hc->lock, flags);
4601 4655
4602 if (debug & DEBUG_HFCMULTI_INIT) 4656 if (debug & DEBUG_HFCMULTI_INIT)
4603 printk(KERN_DEBUG "%s: free port %d channel D\n", __func__, pt); 4657 printk(KERN_DEBUG "%s: free port %d channel D(%d)\n", __func__,
4658 pt+1, ci);
4604 mISDN_freedchannel(dch); 4659 mISDN_freedchannel(dch);
4605 kfree(dch); 4660 kfree(dch);
4606 4661
@@ -4622,15 +4677,19 @@ release_card(struct hfc_multi *hc)
4622 if (hc->iclock) 4677 if (hc->iclock)
4623 mISDN_unregister_clock(hc->iclock); 4678 mISDN_unregister_clock(hc->iclock);
4624 4679
4625 /* disable irq */ 4680 /* disable and free irq */
4626 spin_lock_irqsave(&hc->lock, flags); 4681 spin_lock_irqsave(&hc->lock, flags);
4627 disable_hwirq(hc); 4682 disable_hwirq(hc);
4628 spin_unlock_irqrestore(&hc->lock, flags); 4683 spin_unlock_irqrestore(&hc->lock, flags);
4629 udelay(1000); 4684 udelay(1000);
4685 if (hc->irq) {
4686 if (debug & DEBUG_HFCMULTI_INIT)
4687 printk(KERN_DEBUG "%s: free irq %d (hc=%p)\n",
4688 __func__, hc->irq, hc);
4689 free_irq(hc->irq, hc);
4690 hc->irq = 0;
4630 4691
4631 /* dimm leds */ 4692 }
4632 if (hc->leds)
4633 hfcmulti_leds(hc);
4634 4693
4635 /* disable D-channels & B-channels */ 4694 /* disable D-channels & B-channels */
4636 if (debug & DEBUG_HFCMULTI_INIT) 4695 if (debug & DEBUG_HFCMULTI_INIT)
@@ -4641,15 +4700,11 @@ release_card(struct hfc_multi *hc)
4641 release_port(hc, hc->chan[ch].dch); 4700 release_port(hc, hc->chan[ch].dch);
4642 } 4701 }
4643 4702
4644 /* release hardware & irq */ 4703 /* dimm leds */
4645 if (hc->irq) { 4704 if (hc->leds)
4646 if (debug & DEBUG_HFCMULTI_INIT) 4705 hfcmulti_leds(hc);
4647 printk(KERN_DEBUG "%s: free irq %d\n",
4648 __func__, hc->irq);
4649 free_irq(hc->irq, hc);
4650 hc->irq = 0;
4651 4706
4652 } 4707 /* release hardware */
4653 release_io_hfcmulti(hc); 4708 release_io_hfcmulti(hc);
4654 4709
4655 if (debug & DEBUG_HFCMULTI_INIT) 4710 if (debug & DEBUG_HFCMULTI_INIT)
@@ -4667,61 +4722,9 @@ release_card(struct hfc_multi *hc)
4667 __func__); 4722 __func__);
4668} 4723}
4669 4724
4670static int 4725static void
4671init_e1_port(struct hfc_multi *hc, struct hm_map *m) 4726init_e1_port_hw(struct hfc_multi *hc, struct hm_map *m)
4672{ 4727{
4673 struct dchannel *dch;
4674 struct bchannel *bch;
4675 int ch, ret = 0;
4676 char name[MISDN_MAX_IDLEN];
4677
4678 dch = kzalloc(sizeof(struct dchannel), GFP_KERNEL);
4679 if (!dch)
4680 return -ENOMEM;
4681 dch->debug = debug;
4682 mISDN_initdchannel(dch, MAX_DFRAME_LEN_L1, ph_state_change);
4683 dch->hw = hc;
4684 dch->dev.Dprotocols = (1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1);
4685 dch->dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
4686 (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
4687 dch->dev.D.send = handle_dmsg;
4688 dch->dev.D.ctrl = hfcm_dctrl;
4689 dch->dev.nrbchan = (hc->dslot) ? 30 : 31;
4690 dch->slot = hc->dslot;
4691 hc->chan[hc->dslot].dch = dch;
4692 hc->chan[hc->dslot].port = 0;
4693 hc->chan[hc->dslot].nt_timer = -1;
4694 for (ch = 1; ch <= 31; ch++) {
4695 if (ch == hc->dslot) /* skip dchannel */
4696 continue;
4697 bch = kzalloc(sizeof(struct bchannel), GFP_KERNEL);
4698 if (!bch) {
4699 printk(KERN_ERR "%s: no memory for bchannel\n",
4700 __func__);
4701 ret = -ENOMEM;
4702 goto free_chan;
4703 }
4704 hc->chan[ch].coeff = kzalloc(512, GFP_KERNEL);
4705 if (!hc->chan[ch].coeff) {
4706 printk(KERN_ERR "%s: no memory for coeffs\n",
4707 __func__);
4708 ret = -ENOMEM;
4709 kfree(bch);
4710 goto free_chan;
4711 }
4712 bch->nr = ch;
4713 bch->slot = ch;
4714 bch->debug = debug;
4715 mISDN_initbchannel(bch, MAX_DATA_MEM);
4716 bch->hw = hc;
4717 bch->ch.send = handle_bmsg;
4718 bch->ch.ctrl = hfcm_bctrl;
4719 bch->ch.nr = ch;
4720 list_add(&bch->ch.list, &dch->dev.bchannels);
4721 hc->chan[ch].bch = bch;
4722 hc->chan[ch].port = 0;
4723 set_channelmap(bch->nr, dch->dev.channelmap);
4724 }
4725 /* set optical line type */ 4728 /* set optical line type */
4726 if (port[Port_cnt] & 0x001) { 4729 if (port[Port_cnt] & 0x001) {
4727 if (!m->opticalsupport) { 4730 if (!m->opticalsupport) {
@@ -4737,7 +4740,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m)
4737 __func__, 4740 __func__,
4738 HFC_cnt + 1, 1); 4741 HFC_cnt + 1, 1);
4739 test_and_set_bit(HFC_CFG_OPTICAL, 4742 test_and_set_bit(HFC_CFG_OPTICAL,
4740 &hc->chan[hc->dslot].cfg); 4743 &hc->chan[hc->dnum[0]].cfg);
4741 } 4744 }
4742 } 4745 }
4743 /* set LOS report */ 4746 /* set LOS report */
@@ -4747,7 +4750,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m)
4747 "LOS report: card(%d) port(%d)\n", 4750 "LOS report: card(%d) port(%d)\n",
4748 __func__, HFC_cnt + 1, 1); 4751 __func__, HFC_cnt + 1, 1);
4749 test_and_set_bit(HFC_CFG_REPORT_LOS, 4752 test_and_set_bit(HFC_CFG_REPORT_LOS,
4750 &hc->chan[hc->dslot].cfg); 4753 &hc->chan[hc->dnum[0]].cfg);
4751 } 4754 }
4752 /* set AIS report */ 4755 /* set AIS report */
4753 if (port[Port_cnt] & 0x008) { 4756 if (port[Port_cnt] & 0x008) {
@@ -4756,7 +4759,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m)
4756 "AIS report: card(%d) port(%d)\n", 4759 "AIS report: card(%d) port(%d)\n",
4757 __func__, HFC_cnt + 1, 1); 4760 __func__, HFC_cnt + 1, 1);
4758 test_and_set_bit(HFC_CFG_REPORT_AIS, 4761 test_and_set_bit(HFC_CFG_REPORT_AIS,
4759 &hc->chan[hc->dslot].cfg); 4762 &hc->chan[hc->dnum[0]].cfg);
4760 } 4763 }
4761 /* set SLIP report */ 4764 /* set SLIP report */
4762 if (port[Port_cnt] & 0x010) { 4765 if (port[Port_cnt] & 0x010) {
@@ -4766,7 +4769,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m)
4766 "card(%d) port(%d)\n", 4769 "card(%d) port(%d)\n",
4767 __func__, HFC_cnt + 1, 1); 4770 __func__, HFC_cnt + 1, 1);
4768 test_and_set_bit(HFC_CFG_REPORT_SLIP, 4771 test_and_set_bit(HFC_CFG_REPORT_SLIP,
4769 &hc->chan[hc->dslot].cfg); 4772 &hc->chan[hc->dnum[0]].cfg);
4770 } 4773 }
4771 /* set RDI report */ 4774 /* set RDI report */
4772 if (port[Port_cnt] & 0x020) { 4775 if (port[Port_cnt] & 0x020) {
@@ -4776,7 +4779,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m)
4776 "card(%d) port(%d)\n", 4779 "card(%d) port(%d)\n",
4777 __func__, HFC_cnt + 1, 1); 4780 __func__, HFC_cnt + 1, 1);
4778 test_and_set_bit(HFC_CFG_REPORT_RDI, 4781 test_and_set_bit(HFC_CFG_REPORT_RDI,
4779 &hc->chan[hc->dslot].cfg); 4782 &hc->chan[hc->dnum[0]].cfg);
4780 } 4783 }
4781 /* set CRC-4 Mode */ 4784 /* set CRC-4 Mode */
4782 if (!(port[Port_cnt] & 0x100)) { 4785 if (!(port[Port_cnt] & 0x100)) {
@@ -4785,7 +4788,7 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m)
4785 " card(%d) port(%d)\n", 4788 " card(%d) port(%d)\n",
4786 __func__, HFC_cnt + 1, 1); 4789 __func__, HFC_cnt + 1, 1);
4787 test_and_set_bit(HFC_CFG_CRC4, 4790 test_and_set_bit(HFC_CFG_CRC4,
4788 &hc->chan[hc->dslot].cfg); 4791 &hc->chan[hc->dnum[0]].cfg);
4789 } else { 4792 } else {
4790 if (debug & DEBUG_HFCMULTI_INIT) 4793 if (debug & DEBUG_HFCMULTI_INIT)
4791 printk(KERN_DEBUG "%s: PORT turn off CRC4" 4794 printk(KERN_DEBUG "%s: PORT turn off CRC4"
@@ -4817,20 +4820,85 @@ init_e1_port(struct hfc_multi *hc, struct hm_map *m)
4817 } 4820 }
4818 /* set elastic jitter buffer */ 4821 /* set elastic jitter buffer */
4819 if (port[Port_cnt] & 0x3000) { 4822 if (port[Port_cnt] & 0x3000) {
4820 hc->chan[hc->dslot].jitter = (port[Port_cnt]>>12) & 0x3; 4823 hc->chan[hc->dnum[0]].jitter = (port[Port_cnt]>>12) & 0x3;
4821 if (debug & DEBUG_HFCMULTI_INIT) 4824 if (debug & DEBUG_HFCMULTI_INIT)
4822 printk(KERN_DEBUG 4825 printk(KERN_DEBUG
4823 "%s: PORT set elastic " 4826 "%s: PORT set elastic "
4824 "buffer to %d: card(%d) port(%d)\n", 4827 "buffer to %d: card(%d) port(%d)\n",
4825 __func__, hc->chan[hc->dslot].jitter, 4828 __func__, hc->chan[hc->dnum[0]].jitter,
4826 HFC_cnt + 1, 1); 4829 HFC_cnt + 1, 1);
4827 } else 4830 } else
4828 hc->chan[hc->dslot].jitter = 2; /* default */ 4831 hc->chan[hc->dnum[0]].jitter = 2; /* default */
4829 snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-e1.%d", HFC_cnt + 1); 4832}
4833
4834static int
4835init_e1_port(struct hfc_multi *hc, struct hm_map *m, int pt)
4836{
4837 struct dchannel *dch;
4838 struct bchannel *bch;
4839 int ch, ret = 0;
4840 char name[MISDN_MAX_IDLEN];
4841 int bcount = 0;
4842
4843 dch = kzalloc(sizeof(struct dchannel), GFP_KERNEL);
4844 if (!dch)
4845 return -ENOMEM;
4846 dch->debug = debug;
4847 mISDN_initdchannel(dch, MAX_DFRAME_LEN_L1, ph_state_change);
4848 dch->hw = hc;
4849 dch->dev.Dprotocols = (1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1);
4850 dch->dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
4851 (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
4852 dch->dev.D.send = handle_dmsg;
4853 dch->dev.D.ctrl = hfcm_dctrl;
4854 dch->slot = hc->dnum[pt];
4855 hc->chan[hc->dnum[pt]].dch = dch;
4856 hc->chan[hc->dnum[pt]].port = pt;
4857 hc->chan[hc->dnum[pt]].nt_timer = -1;
4858 for (ch = 1; ch <= 31; ch++) {
4859 if (!((1 << ch) & hc->bmask[pt])) /* skip unused channel */
4860 continue;
4861 bch = kzalloc(sizeof(struct bchannel), GFP_KERNEL);
4862 if (!bch) {
4863 printk(KERN_ERR "%s: no memory for bchannel\n",
4864 __func__);
4865 ret = -ENOMEM;
4866 goto free_chan;
4867 }
4868 hc->chan[ch].coeff = kzalloc(512, GFP_KERNEL);
4869 if (!hc->chan[ch].coeff) {
4870 printk(KERN_ERR "%s: no memory for coeffs\n",
4871 __func__);
4872 ret = -ENOMEM;
4873 kfree(bch);
4874 goto free_chan;
4875 }
4876 bch->nr = ch;
4877 bch->slot = ch;
4878 bch->debug = debug;
4879 mISDN_initbchannel(bch, MAX_DATA_MEM);
4880 bch->hw = hc;
4881 bch->ch.send = handle_bmsg;
4882 bch->ch.ctrl = hfcm_bctrl;
4883 bch->ch.nr = ch;
4884 list_add(&bch->ch.list, &dch->dev.bchannels);
4885 hc->chan[ch].bch = bch;
4886 hc->chan[ch].port = pt;
4887 set_channelmap(bch->nr, dch->dev.channelmap);
4888 bcount++;
4889 }
4890 dch->dev.nrbchan = bcount;
4891 if (pt == 0)
4892 init_e1_port_hw(hc, m);
4893 if (hc->ports > 1)
4894 snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-e1.%d-%d",
4895 HFC_cnt + 1, pt+1);
4896 else
4897 snprintf(name, MISDN_MAX_IDLEN - 1, "hfc-e1.%d", HFC_cnt + 1);
4830 ret = mISDN_register_device(&dch->dev, &hc->pci_dev->dev, name); 4898 ret = mISDN_register_device(&dch->dev, &hc->pci_dev->dev, name);
4831 if (ret) 4899 if (ret)
4832 goto free_chan; 4900 goto free_chan;
4833 hc->created[0] = 1; 4901 hc->created[pt] = 1;
4834 return ret; 4902 return ret;
4835free_chan: 4903free_chan:
4836 release_port(hc, dch); 4904 release_port(hc, dch);
@@ -4963,7 +5031,8 @@ hfcmulti_init(struct hm_map *m, struct pci_dev *pdev,
4963 struct hfc_multi *hc; 5031 struct hfc_multi *hc;
4964 u_long flags; 5032 u_long flags;
4965 u_char dips = 0, pmj = 0; /* dip settings, port mode Jumpers */ 5033 u_char dips = 0, pmj = 0; /* dip settings, port mode Jumpers */
4966 int i; 5034 int i, ch;
5035 u_int maskcheck;
4967 5036
4968 if (HFC_cnt >= MAX_CARDS) { 5037 if (HFC_cnt >= MAX_CARDS) {
4969 printk(KERN_ERR "too many cards (max=%d).\n", 5038 printk(KERN_ERR "too many cards (max=%d).\n",
@@ -4997,18 +5066,36 @@ hfcmulti_init(struct hm_map *m, struct pci_dev *pdev,
4997 hc->id = HFC_cnt; 5066 hc->id = HFC_cnt;
4998 hc->pcm = pcm[HFC_cnt]; 5067 hc->pcm = pcm[HFC_cnt];
4999 hc->io_mode = iomode[HFC_cnt]; 5068 hc->io_mode = iomode[HFC_cnt];
5000 if (dslot[HFC_cnt] < 0 && hc->ctype == HFC_TYPE_E1) { 5069 if (hc->ctype == HFC_TYPE_E1 && dmask[E1_cnt]) {
5001 hc->dslot = 0; 5070 /* fragment card */
5002 printk(KERN_INFO "HFC-E1 card has disabled D-channel, but " 5071 pt = 0;
5003 "31 B-channels\n"); 5072 maskcheck = 0;
5004 } 5073 for (ch = 0; ch <= 31; ch++) {
5005 if (dslot[HFC_cnt] > 0 && dslot[HFC_cnt] < 32 5074 if (!((1 << ch) & dmask[E1_cnt]))
5006 && hc->ctype == HFC_TYPE_E1) { 5075 continue;
5007 hc->dslot = dslot[HFC_cnt]; 5076 hc->dnum[pt] = ch;
5008 printk(KERN_INFO "HFC-E1 card has alternating D-channel on " 5077 hc->bmask[pt] = bmask[bmask_cnt++];
5009 "time slot %d\n", dslot[HFC_cnt]); 5078 if ((maskcheck & hc->bmask[pt])
5010 } else 5079 || (dmask[E1_cnt] & hc->bmask[pt])) {
5011 hc->dslot = 16; 5080 printk(KERN_INFO
5081 "HFC-E1 #%d has overlapping B-channels on fragment #%d\n",
5082 E1_cnt + 1, pt);
5083 return -EINVAL;
5084 }
5085 maskcheck |= hc->bmask[pt];
5086 printk(KERN_INFO
5087 "HFC-E1 #%d uses D-channel on slot %d and a B-channel map of 0x%08x\n",
5088 E1_cnt + 1, ch, hc->bmask[pt]);
5089 pt++;
5090 }
5091 hc->ports = pt;
5092 }
5093 if (hc->ctype == HFC_TYPE_E1 && !dmask[E1_cnt]) {
5094 /* default card layout */
5095 hc->dnum[0] = 16;
5096 hc->bmask[0] = 0xfffefffe;
5097 hc->ports = 1;
5098 }
5012 5099
5013 /* set chip specific features */ 5100 /* set chip specific features */
5014 hc->masterclk = -1; 5101 hc->masterclk = -1;
@@ -5091,23 +5178,33 @@ hfcmulti_init(struct hm_map *m, struct pci_dev *pdev,
5091 goto free_card; 5178 goto free_card;
5092 } 5179 }
5093 if (hc->ctype == HFC_TYPE_E1) 5180 if (hc->ctype == HFC_TYPE_E1)
5094 ret_err = init_e1_port(hc, m); 5181 ret_err = init_e1_port(hc, m, pt);
5095 else 5182 else
5096 ret_err = init_multi_port(hc, pt); 5183 ret_err = init_multi_port(hc, pt);
5097 if (debug & DEBUG_HFCMULTI_INIT) 5184 if (debug & DEBUG_HFCMULTI_INIT)
5098 printk(KERN_DEBUG 5185 printk(KERN_DEBUG
5099 "%s: Registering D-channel, card(%d) port(%d)" 5186 "%s: Registering D-channel, card(%d) port(%d) "
5100 "result %d\n", 5187 "result %d\n",
5101 __func__, HFC_cnt + 1, pt, ret_err); 5188 __func__, HFC_cnt + 1, pt + 1, ret_err);
5102 5189
5103 if (ret_err) { 5190 if (ret_err) {
5104 while (pt) { /* release already registered ports */ 5191 while (pt) { /* release already registered ports */
5105 pt--; 5192 pt--;
5106 release_port(hc, hc->chan[(pt << 2) + 2].dch); 5193 if (hc->ctype == HFC_TYPE_E1)
5194 release_port(hc,
5195 hc->chan[hc->dnum[pt]].dch);
5196 else
5197 release_port(hc,
5198 hc->chan[(pt << 2) + 2].dch);
5107 } 5199 }
5108 goto free_card; 5200 goto free_card;
5109 } 5201 }
5110 Port_cnt++; 5202 if (hc->ctype != HFC_TYPE_E1)
5203 Port_cnt++; /* for each S0 port */
5204 }
5205 if (hc->ctype == HFC_TYPE_E1) {
5206 Port_cnt++; /* for each E1 port */
5207 E1_cnt++;
5111 } 5208 }
5112 5209
5113 /* disp switches */ 5210 /* disp switches */
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index e2c83a2d7691..5fe993e2dee9 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -1819,7 +1819,7 @@ channel_ctrl(struct hfc_pci *hc, struct mISDN_ctrl_req *cq)
1819 switch (cq->op) { 1819 switch (cq->op) {
1820 case MISDN_CTRL_GETOP: 1820 case MISDN_CTRL_GETOP:
1821 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT | 1821 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_CONNECT |
1822 MISDN_CTRL_DISCONNECT; 1822 MISDN_CTRL_DISCONNECT | MISDN_CTRL_L1_TIMER3;
1823 break; 1823 break;
1824 case MISDN_CTRL_LOOP: 1824 case MISDN_CTRL_LOOP:
1825 /* channel 0 disabled loop */ 1825 /* channel 0 disabled loop */
@@ -1896,6 +1896,9 @@ channel_ctrl(struct hfc_pci *hc, struct mISDN_ctrl_req *cq)
1896 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn); 1896 Write_hfc(hc, HFCPCI_CONNECT, hc->hw.conn);
1897 hc->hw.trm &= 0x7f; /* disable IOM-loop */ 1897 hc->hw.trm &= 0x7f; /* disable IOM-loop */
1898 break; 1898 break;
1899 case MISDN_CTRL_L1_TIMER3:
1900 ret = l1_event(hc->dch.l1, HW_TIMER3_VALUE | (cq->p1 & 0xff));
1901 break;
1899 default: 1902 default:
1900 printk(KERN_WARNING "%s: unknown Op %x\n", 1903 printk(KERN_WARNING "%s: unknown Op %x\n",
1901 __func__, cq->op); 1904 __func__, cq->op);
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index 884369f09cad..92d4a78bc0a5 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -603,10 +603,11 @@ isac_l1hw(struct mISDNchannel *ch, struct sk_buff *skb)
603} 603}
604 604
605static int 605static int
606isac_ctrl(struct isac_hw *isac, u32 cmd, u_long para) 606isac_ctrl(struct isac_hw *isac, u32 cmd, unsigned long para)
607{ 607{
608 u8 tl = 0; 608 u8 tl = 0;
609 u_long flags; 609 unsigned long flags;
610 int ret = 0;
610 611
611 switch (cmd) { 612 switch (cmd) {
612 case HW_TESTLOOP: 613 case HW_TESTLOOP:
@@ -626,12 +627,15 @@ isac_ctrl(struct isac_hw *isac, u32 cmd, u_long para)
626 } 627 }
627 spin_unlock_irqrestore(isac->hwlock, flags); 628 spin_unlock_irqrestore(isac->hwlock, flags);
628 break; 629 break;
630 case HW_TIMER3_VALUE:
631 ret = l1_event(isac->dch.l1, HW_TIMER3_VALUE | (para & 0xff));
632 break;
629 default: 633 default:
630 pr_debug("%s: %s unknown command %x %lx\n", isac->name, 634 pr_debug("%s: %s unknown command %x %lx\n", isac->name,
631 __func__, cmd, para); 635 __func__, cmd, para);
632 return -1; 636 ret = -1;
633 } 637 }
634 return 0; 638 return ret;
635} 639}
636 640
637static int 641static int
@@ -1526,7 +1530,7 @@ channel_ctrl(struct ipac_hw *ipac, struct mISDN_ctrl_req *cq)
1526 1530
1527 switch (cq->op) { 1531 switch (cq->op) {
1528 case MISDN_CTRL_GETOP: 1532 case MISDN_CTRL_GETOP:
1529 cq->op = MISDN_CTRL_LOOP; 1533 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
1530 break; 1534 break;
1531 case MISDN_CTRL_LOOP: 1535 case MISDN_CTRL_LOOP:
1532 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */ 1536 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
@@ -1536,6 +1540,9 @@ channel_ctrl(struct ipac_hw *ipac, struct mISDN_ctrl_req *cq)
1536 } 1540 }
1537 ret = ipac->ctrl(ipac, HW_TESTLOOP, cq->channel); 1541 ret = ipac->ctrl(ipac, HW_TESTLOOP, cq->channel);
1538 break; 1542 break;
1543 case MISDN_CTRL_L1_TIMER3:
1544 ret = ipac->isac.ctrl(&ipac->isac, HW_TIMER3_VALUE, cq->p1);
1545 break;
1539 default: 1546 default:
1540 pr_info("%s: unknown CTRL OP %x\n", ipac->name, cq->op); 1547 pr_info("%s: unknown CTRL OP %x\n", ipac->name, cq->op);
1541 ret = -EINVAL; 1548 ret = -EINVAL;
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index c726e09d0981..27998d7188a5 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -837,7 +837,7 @@ channel_ctrl(struct tiger_hw *card, struct mISDN_ctrl_req *cq)
837 837
838 switch (cq->op) { 838 switch (cq->op) {
839 case MISDN_CTRL_GETOP: 839 case MISDN_CTRL_GETOP:
840 cq->op = MISDN_CTRL_LOOP; 840 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
841 break; 841 break;
842 case MISDN_CTRL_LOOP: 842 case MISDN_CTRL_LOOP:
843 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */ 843 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
@@ -847,6 +847,9 @@ channel_ctrl(struct tiger_hw *card, struct mISDN_ctrl_req *cq)
847 } 847 }
848 ret = card->isac.ctrl(&card->isac, HW_TESTLOOP, cq->channel); 848 ret = card->isac.ctrl(&card->isac, HW_TESTLOOP, cq->channel);
849 break; 849 break;
850 case MISDN_CTRL_L1_TIMER3:
851 ret = card->isac.ctrl(&card->isac, HW_TIMER3_VALUE, cq->p1);
852 break;
850 default: 853 default:
851 pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op); 854 pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op);
852 ret = -EINVAL; 855 ret = -EINVAL;
diff --git a/drivers/isdn/hardware/mISDN/speedfax.c b/drivers/isdn/hardware/mISDN/speedfax.c
index 04689935148b..93f344d74e54 100644
--- a/drivers/isdn/hardware/mISDN/speedfax.c
+++ b/drivers/isdn/hardware/mISDN/speedfax.c
@@ -224,7 +224,7 @@ channel_ctrl(struct sfax_hw *sf, struct mISDN_ctrl_req *cq)
224 224
225 switch (cq->op) { 225 switch (cq->op) {
226 case MISDN_CTRL_GETOP: 226 case MISDN_CTRL_GETOP:
227 cq->op = MISDN_CTRL_LOOP; 227 cq->op = MISDN_CTRL_LOOP | MISDN_CTRL_L1_TIMER3;
228 break; 228 break;
229 case MISDN_CTRL_LOOP: 229 case MISDN_CTRL_LOOP:
230 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */ 230 /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
@@ -234,6 +234,9 @@ channel_ctrl(struct sfax_hw *sf, struct mISDN_ctrl_req *cq)
234 } 234 }
235 ret = sf->isac.ctrl(&sf->isac, HW_TESTLOOP, cq->channel); 235 ret = sf->isac.ctrl(&sf->isac, HW_TESTLOOP, cq->channel);
236 break; 236 break;
237 case MISDN_CTRL_L1_TIMER3:
238 ret = sf->isac.ctrl(&sf->isac, HW_TIMER3_VALUE, cq->p1);
239 break;
237 default: 240 default:
238 pr_info("%s: unknown Op %x\n", sf->name, cq->op); 241 pr_info("%s: unknown Op %x\n", sf->name, cq->op);
239 ret = -EINVAL; 242 ret = -EINVAL;
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index 2183357f0799..1d044670ff66 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -1035,7 +1035,10 @@ channel_ctrl(struct w6692_hw *card, struct mISDN_ctrl_req *cq)
1035 1035
1036 switch (cq->op) { 1036 switch (cq->op) {
1037 case MISDN_CTRL_GETOP: 1037 case MISDN_CTRL_GETOP:
1038 cq->op = 0; 1038 cq->op = MISDN_CTRL_L1_TIMER3;
1039 break;
1040 case MISDN_CTRL_L1_TIMER3:
1041 ret = l1_event(card->dch.l1, HW_TIMER3_VALUE | (cq->p1 & 0xff));
1039 break; 1042 break;
1040 default: 1043 default:
1041 pr_info("%s: unknown CTRL OP %x\n", card->name, cq->op); 1044 pr_info("%s: unknown CTRL OP %x\n", card->name, cq->op);
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index ba91333e3e41..88e4f0ee073c 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -156,17 +156,9 @@ static ssize_t
156hysdn_log_write(struct file *file, const char __user *buf, size_t count, loff_t *off) 156hysdn_log_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
157{ 157{
158 int rc; 158 int rc;
159 unsigned char valbuf[128];
160 hysdn_card *card = file->private_data; 159 hysdn_card *card = file->private_data;
161 160
162 if (count > (sizeof(valbuf) - 1)) 161 rc = kstrtoul_from_user(buf, count, 0, &card->debug_flags);
163 count = sizeof(valbuf) - 1; /* limit length */
164 if (copy_from_user(valbuf, buf, count))
165 return (-EFAULT); /* copy failed */
166
167 valbuf[count] = 0; /* terminating 0 */
168
169 rc = kstrtoul(valbuf, 0, &card->debug_flags);
170 if (rc < 0) 162 if (rc < 0)
171 return rc; 163 return rc;
172 hysdn_addlog(card, "debug set to 0x%lx", card->debug_flags); 164 hysdn_addlog(card, "debug set to 0x%lx", card->debug_flags);
diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c
index a24530f05db0..c401634c00ec 100644
--- a/drivers/isdn/mISDN/core.c
+++ b/drivers/isdn/mISDN/core.c
@@ -355,6 +355,22 @@ mISDN_unregister_Bprotocol(struct Bprotocol *bp)
355} 355}
356EXPORT_SYMBOL(mISDN_unregister_Bprotocol); 356EXPORT_SYMBOL(mISDN_unregister_Bprotocol);
357 357
358static const char *msg_no_channel = "<no channel>";
359static const char *msg_no_stack = "<no stack>";
360static const char *msg_no_stackdev = "<no stack device>";
361
362const char *mISDNDevName4ch(struct mISDNchannel *ch)
363{
364 if (!ch)
365 return msg_no_channel;
366 if (!ch->st)
367 return msg_no_stack;
368 if (!ch->st->dev)
369 return msg_no_stackdev;
370 return dev_name(&ch->st->dev->dev);
371};
372EXPORT_SYMBOL(mISDNDevName4ch);
373
358static int 374static int
359mISDNInit(void) 375mISDNInit(void)
360{ 376{
diff --git a/drivers/isdn/mISDN/dsp.h b/drivers/isdn/mISDN/dsp.h
index afe4173ae007..fc1733a08845 100644
--- a/drivers/isdn/mISDN/dsp.h
+++ b/drivers/isdn/mISDN/dsp.h
@@ -76,7 +76,9 @@ extern u8 dsp_silence;
76#define MAX_SECONDS_JITTER_CHECK 5 76#define MAX_SECONDS_JITTER_CHECK 5
77 77
78extern struct timer_list dsp_spl_tl; 78extern struct timer_list dsp_spl_tl;
79extern u32 dsp_spl_jiffies; 79
80/* the datatype need to match jiffies datatype */
81extern unsigned long dsp_spl_jiffies;
80 82
81/* the structure of conferences: 83/* the structure of conferences:
82 * 84 *
diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
index 334feab060a1..a4f05c54c32b 100644
--- a/drivers/isdn/mISDN/dsp_cmx.c
+++ b/drivers/isdn/mISDN/dsp_cmx.c
@@ -742,8 +742,8 @@ dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp)
742 member->dsp->pcm_slot_tx, 742 member->dsp->pcm_slot_tx,
743 member->dsp->pcm_bank_tx, 743 member->dsp->pcm_bank_tx,
744 member->dsp->pcm_bank_rx); 744 member->dsp->pcm_bank_rx);
745 conf->hardware = 0; 745 conf->hardware = 1;
746 conf->software = 1; 746 conf->software = tx_data;
747 return; 747 return;
748 } 748 }
749 /* find a new slot */ 749 /* find a new slot */
@@ -834,8 +834,8 @@ dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp)
834 nextm->dsp->name, 834 nextm->dsp->name,
835 member->dsp->pcm_slot_tx, 835 member->dsp->pcm_slot_tx,
836 member->dsp->pcm_slot_rx); 836 member->dsp->pcm_slot_rx);
837 conf->hardware = 0; 837 conf->hardware = 1;
838 conf->software = 1; 838 conf->software = tx_data;
839 return; 839 return;
840 } 840 }
841 /* find two new slot */ 841 /* find two new slot */
@@ -939,8 +939,11 @@ dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp)
939 /* for more than two members.. */ 939 /* for more than two members.. */
940 940
941 /* if all members already have the same conference */ 941 /* if all members already have the same conference */
942 if (all_conf) 942 if (all_conf) {
943 conf->hardware = 1;
944 conf->software = tx_data;
943 return; 945 return;
946 }
944 947
945 /* 948 /*
946 * if there is an existing conference, but not all members have joined 949 * if there is an existing conference, but not all members have joined
@@ -1013,6 +1016,8 @@ dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp)
1013 dsp_cmx_hw_message(member->dsp, 1016 dsp_cmx_hw_message(member->dsp,
1014 MISDN_CTRL_HFC_CONF_JOIN, current_conf, 0, 0, 0); 1017 MISDN_CTRL_HFC_CONF_JOIN, current_conf, 0, 0, 0);
1015 } 1018 }
1019 conf->hardware = 1;
1020 conf->software = tx_data;
1016 return; 1021 return;
1017 } 1022 }
1018 1023
@@ -1328,7 +1333,7 @@ dsp_cmx_send_member(struct dsp *dsp, int len, s32 *c, int members)
1328 } 1333 }
1329 if (dsp->conf && dsp->conf->software && dsp->conf->hardware) 1334 if (dsp->conf && dsp->conf->software && dsp->conf->hardware)
1330 tx_data_only = 1; 1335 tx_data_only = 1;
1331 if (dsp->conf->software && dsp->echo.hardware) 1336 if (dsp->echo.software && dsp->echo.hardware)
1332 tx_data_only = 1; 1337 tx_data_only = 1;
1333 } 1338 }
1334 1339
@@ -1619,7 +1624,7 @@ send_packet:
1619 1624
1620static u32 jittercount; /* counter for jitter check */ 1625static u32 jittercount; /* counter for jitter check */
1621struct timer_list dsp_spl_tl; 1626struct timer_list dsp_spl_tl;
1622u32 dsp_spl_jiffies; /* calculate the next time to fire */ 1627unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
1623static u16 dsp_count; /* last sample count */ 1628static u16 dsp_count; /* last sample count */
1624static int dsp_count_valid; /* if we have last sample count */ 1629static int dsp_count_valid; /* if we have last sample count */
1625 1630
diff --git a/drivers/isdn/mISDN/dsp_dtmf.c b/drivers/isdn/mISDN/dsp_dtmf.c
index 887860bdc63b..642f30be5ce2 100644
--- a/drivers/isdn/mISDN/dsp_dtmf.c
+++ b/drivers/isdn/mISDN/dsp_dtmf.c
@@ -222,16 +222,25 @@ coefficients:
222 goto storedigit; 222 goto storedigit;
223 } 223 }
224 224
225 if (dsp_debug & DEBUG_DSP_DTMFCOEFF) 225 if (dsp_debug & DEBUG_DSP_DTMFCOEFF) {
226 s32 tresh_100 = tresh/100;
227
228 if (tresh_100 == 0) {
229 tresh_100 = 1;
230 printk(KERN_DEBUG
231 "tresh(%d) too small set tresh/100 to 1\n",
232 tresh);
233 }
226 printk(KERN_DEBUG "a %3d %3d %3d %3d %3d %3d %3d %3d" 234 printk(KERN_DEBUG "a %3d %3d %3d %3d %3d %3d %3d %3d"
227 " tr:%3d r %3d %3d %3d %3d %3d %3d %3d %3d\n", 235 " tr:%3d r %3d %3d %3d %3d %3d %3d %3d %3d\n",
228 result[0] / 10000, result[1] / 10000, result[2] / 10000, 236 result[0] / 10000, result[1] / 10000, result[2] / 10000,
229 result[3] / 10000, result[4] / 10000, result[5] / 10000, 237 result[3] / 10000, result[4] / 10000, result[5] / 10000,
230 result[6] / 10000, result[7] / 10000, tresh / 10000, 238 result[6] / 10000, result[7] / 10000, tresh / 10000,
231 result[0] / (tresh / 100), result[1] / (tresh / 100), 239 result[0] / (tresh_100), result[1] / (tresh_100),
232 result[2] / (tresh / 100), result[3] / (tresh / 100), 240 result[2] / (tresh_100), result[3] / (tresh_100),
233 result[4] / (tresh / 100), result[5] / (tresh / 100), 241 result[4] / (tresh_100), result[5] / (tresh_100),
234 result[6] / (tresh / 100), result[7] / (tresh / 100)); 242 result[6] / (tresh_100), result[7] / (tresh_100));
243 }
235 244
236 /* calc digit (lowgroup/highgroup) */ 245 /* calc digit (lowgroup/highgroup) */
237 lowgroup = -1; 246 lowgroup = -1;
diff --git a/drivers/isdn/mISDN/layer1.c b/drivers/isdn/mISDN/layer1.c
index 0fc49b375514..bebc57b72138 100644
--- a/drivers/isdn/mISDN/layer1.c
+++ b/drivers/isdn/mISDN/layer1.c
@@ -28,13 +28,15 @@ static u_int *debug;
28struct layer1 { 28struct layer1 {
29 u_long Flags; 29 u_long Flags;
30 struct FsmInst l1m; 30 struct FsmInst l1m;
31 struct FsmTimer timer; 31 struct FsmTimer timer3;
32 struct FsmTimer timerX;
32 int delay; 33 int delay;
34 int t3_value;
33 struct dchannel *dch; 35 struct dchannel *dch;
34 dchannel_l1callback *dcb; 36 dchannel_l1callback *dcb;
35}; 37};
36 38
37#define TIMER3_VALUE 7000 39#define TIMER3_DEFAULT_VALUE 7000
38 40
39static 41static
40struct Fsm l1fsm_s = {NULL, 0, 0, NULL, NULL}; 42struct Fsm l1fsm_s = {NULL, 0, 0, NULL, NULL};
@@ -134,7 +136,7 @@ l1_deact_req_s(struct FsmInst *fi, int event, void *arg)
134 struct layer1 *l1 = fi->userdata; 136 struct layer1 *l1 = fi->userdata;
135 137
136 mISDN_FsmChangeState(fi, ST_L1_F3); 138 mISDN_FsmChangeState(fi, ST_L1_F3);
137 mISDN_FsmRestartTimer(&l1->timer, 550, EV_TIMER_DEACT, NULL, 2); 139 mISDN_FsmRestartTimer(&l1->timerX, 550, EV_TIMER_DEACT, NULL, 2);
138 test_and_set_bit(FLG_L1_DEACTTIMER, &l1->Flags); 140 test_and_set_bit(FLG_L1_DEACTTIMER, &l1->Flags);
139} 141}
140 142
@@ -179,11 +181,11 @@ l1_info4_ind(struct FsmInst *fi, int event, void *arg)
179 mISDN_FsmChangeState(fi, ST_L1_F7); 181 mISDN_FsmChangeState(fi, ST_L1_F7);
180 l1->dcb(l1->dch, INFO3_P8); 182 l1->dcb(l1->dch, INFO3_P8);
181 if (test_and_clear_bit(FLG_L1_DEACTTIMER, &l1->Flags)) 183 if (test_and_clear_bit(FLG_L1_DEACTTIMER, &l1->Flags))
182 mISDN_FsmDelTimer(&l1->timer, 4); 184 mISDN_FsmDelTimer(&l1->timerX, 4);
183 if (!test_bit(FLG_L1_ACTIVATED, &l1->Flags)) { 185 if (!test_bit(FLG_L1_ACTIVATED, &l1->Flags)) {
184 if (test_and_clear_bit(FLG_L1_T3RUN, &l1->Flags)) 186 if (test_and_clear_bit(FLG_L1_T3RUN, &l1->Flags))
185 mISDN_FsmDelTimer(&l1->timer, 3); 187 mISDN_FsmDelTimer(&l1->timer3, 3);
186 mISDN_FsmRestartTimer(&l1->timer, 110, EV_TIMER_ACT, NULL, 2); 188 mISDN_FsmRestartTimer(&l1->timerX, 110, EV_TIMER_ACT, NULL, 2);
187 test_and_set_bit(FLG_L1_ACTTIMER, &l1->Flags); 189 test_and_set_bit(FLG_L1_ACTTIMER, &l1->Flags);
188 } 190 }
189} 191}
@@ -201,7 +203,7 @@ l1_timer3(struct FsmInst *fi, int event, void *arg)
201 } 203 }
202 if (l1->l1m.state != ST_L1_F6) { 204 if (l1->l1m.state != ST_L1_F6) {
203 mISDN_FsmChangeState(fi, ST_L1_F3); 205 mISDN_FsmChangeState(fi, ST_L1_F3);
204 l1->dcb(l1->dch, HW_POWERUP_REQ); 206 /* do not force anything here, we need send INFO 0 */
205 } 207 }
206} 208}
207 209
@@ -233,8 +235,9 @@ l1_activate_s(struct FsmInst *fi, int event, void *arg)
233{ 235{
234 struct layer1 *l1 = fi->userdata; 236 struct layer1 *l1 = fi->userdata;
235 237
236 mISDN_FsmRestartTimer(&l1->timer, TIMER3_VALUE, EV_TIMER3, NULL, 2); 238 mISDN_FsmRestartTimer(&l1->timer3, l1->t3_value, EV_TIMER3, NULL, 2);
237 test_and_set_bit(FLG_L1_T3RUN, &l1->Flags); 239 test_and_set_bit(FLG_L1_T3RUN, &l1->Flags);
240 /* Tell HW to send INFO 1 */
238 l1->dcb(l1->dch, HW_RESET_REQ); 241 l1->dcb(l1->dch, HW_RESET_REQ);
239} 242}
240 243
@@ -302,7 +305,8 @@ static struct FsmNode L1SFnList[] =
302 305
303static void 306static void
304release_l1(struct layer1 *l1) { 307release_l1(struct layer1 *l1) {
305 mISDN_FsmDelTimer(&l1->timer, 0); 308 mISDN_FsmDelTimer(&l1->timerX, 0);
309 mISDN_FsmDelTimer(&l1->timer3, 0);
306 if (l1->dch) 310 if (l1->dch)
307 l1->dch->l1 = NULL; 311 l1->dch->l1 = NULL;
308 module_put(THIS_MODULE); 312 module_put(THIS_MODULE);
@@ -356,6 +360,16 @@ l1_event(struct layer1 *l1, u_int event)
356 release_l1(l1); 360 release_l1(l1);
357 break; 361 break;
358 default: 362 default:
363 if ((event & ~HW_TIMER3_VMASK) == HW_TIMER3_VALUE) {
364 int val = event & HW_TIMER3_VMASK;
365
366 if (val < 5)
367 val = 5;
368 if (val > 30)
369 val = 30;
370 l1->t3_value = val;
371 break;
372 }
359 if (*debug & DEBUG_L1) 373 if (*debug & DEBUG_L1)
360 printk(KERN_DEBUG "%s %x unhandled\n", 374 printk(KERN_DEBUG "%s %x unhandled\n",
361 __func__, event); 375 __func__, event);
@@ -377,13 +391,15 @@ create_l1(struct dchannel *dch, dchannel_l1callback *dcb) {
377 nl1->l1m.fsm = &l1fsm_s; 391 nl1->l1m.fsm = &l1fsm_s;
378 nl1->l1m.state = ST_L1_F3; 392 nl1->l1m.state = ST_L1_F3;
379 nl1->Flags = 0; 393 nl1->Flags = 0;
394 nl1->t3_value = TIMER3_DEFAULT_VALUE;
380 nl1->l1m.debug = *debug & DEBUG_L1_FSM; 395 nl1->l1m.debug = *debug & DEBUG_L1_FSM;
381 nl1->l1m.userdata = nl1; 396 nl1->l1m.userdata = nl1;
382 nl1->l1m.userint = 0; 397 nl1->l1m.userint = 0;
383 nl1->l1m.printdebug = l1m_debug; 398 nl1->l1m.printdebug = l1m_debug;
384 nl1->dch = dch; 399 nl1->dch = dch;
385 nl1->dcb = dcb; 400 nl1->dcb = dcb;
386 mISDN_FsmInitTimer(&nl1->l1m, &nl1->timer); 401 mISDN_FsmInitTimer(&nl1->l1m, &nl1->timer3);
402 mISDN_FsmInitTimer(&nl1->l1m, &nl1->timerX);
387 __module_get(THIS_MODULE); 403 __module_get(THIS_MODULE);
388 dch->l1 = nl1; 404 dch->l1 = nl1;
389 return 0; 405 return 0;
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
index 39d7375fa551..0dc8abca1407 100644
--- a/drivers/isdn/mISDN/layer2.c
+++ b/drivers/isdn/mISDN/layer2.c
@@ -58,6 +58,8 @@ enum {
58 EV_L1_DEACTIVATE, 58 EV_L1_DEACTIVATE,
59 EV_L2_T200, 59 EV_L2_T200,
60 EV_L2_T203, 60 EV_L2_T203,
61 EV_L2_T200I,
62 EV_L2_T203I,
61 EV_L2_SET_OWN_BUSY, 63 EV_L2_SET_OWN_BUSY,
62 EV_L2_CLEAR_OWN_BUSY, 64 EV_L2_CLEAR_OWN_BUSY,
63 EV_L2_FRAME_ERROR, 65 EV_L2_FRAME_ERROR,
@@ -86,6 +88,8 @@ static char *strL2Event[] =
86 "EV_L1_DEACTIVATE", 88 "EV_L1_DEACTIVATE",
87 "EV_L2_T200", 89 "EV_L2_T200",
88 "EV_L2_T203", 90 "EV_L2_T203",
91 "EV_L2_T200I",
92 "EV_L2_T203I",
89 "EV_L2_SET_OWN_BUSY", 93 "EV_L2_SET_OWN_BUSY",
90 "EV_L2_CLEAR_OWN_BUSY", 94 "EV_L2_CLEAR_OWN_BUSY",
91 "EV_L2_FRAME_ERROR", 95 "EV_L2_FRAME_ERROR",
@@ -106,8 +110,8 @@ l2m_debug(struct FsmInst *fi, char *fmt, ...)
106 vaf.fmt = fmt; 110 vaf.fmt = fmt;
107 vaf.va = &va; 111 vaf.va = &va;
108 112
109 printk(KERN_DEBUG "l2 (sapi %d tei %d): %pV\n", 113 printk(KERN_DEBUG "%s l2 (sapi %d tei %d): %pV\n",
110 l2->sapi, l2->tei, &vaf); 114 mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei, &vaf);
111 115
112 va_end(va); 116 va_end(va);
113} 117}
@@ -150,7 +154,8 @@ l2up(struct layer2 *l2, u_int prim, struct sk_buff *skb)
150 mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr; 154 mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr;
151 err = l2->up->send(l2->up, skb); 155 err = l2->up->send(l2->up, skb);
152 if (err) { 156 if (err) {
153 printk(KERN_WARNING "%s: err=%d\n", __func__, err); 157 printk(KERN_WARNING "%s: dev %s err=%d\n", __func__,
158 mISDNDevName4ch(&l2->ch), err);
154 dev_kfree_skb(skb); 159 dev_kfree_skb(skb);
155 } 160 }
156} 161}
@@ -174,7 +179,8 @@ l2up_create(struct layer2 *l2, u_int prim, int len, void *arg)
174 memcpy(skb_put(skb, len), arg, len); 179 memcpy(skb_put(skb, len), arg, len);
175 err = l2->up->send(l2->up, skb); 180 err = l2->up->send(l2->up, skb);
176 if (err) { 181 if (err) {
177 printk(KERN_WARNING "%s: err=%d\n", __func__, err); 182 printk(KERN_WARNING "%s: dev %s err=%d\n", __func__,
183 mISDNDevName4ch(&l2->ch), err);
178 dev_kfree_skb(skb); 184 dev_kfree_skb(skb);
179 } 185 }
180} 186}
@@ -185,7 +191,8 @@ l2down_skb(struct layer2 *l2, struct sk_buff *skb) {
185 191
186 ret = l2->ch.recv(l2->ch.peer, skb); 192 ret = l2->ch.recv(l2->ch.peer, skb);
187 if (ret && (*debug & DEBUG_L2_RECV)) 193 if (ret && (*debug & DEBUG_L2_RECV))
188 printk(KERN_DEBUG "l2down_skb: ret(%d)\n", ret); 194 printk(KERN_DEBUG "l2down_skb: dev %s ret(%d)\n",
195 mISDNDevName4ch(&l2->ch), ret);
189 return ret; 196 return ret;
190} 197}
191 198
@@ -276,12 +283,37 @@ ph_data_confirm(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) {
276 return ret; 283 return ret;
277} 284}
278 285
286static void
287l2_timeout(struct FsmInst *fi, int event, void *arg)
288{
289 struct layer2 *l2 = fi->userdata;
290 struct sk_buff *skb;
291 struct mISDNhead *hh;
292
293 skb = mI_alloc_skb(0, GFP_ATOMIC);
294 if (!skb) {
295 printk(KERN_WARNING "%s: L2(%d,%d) nr:%x timer %s no skb\n",
296 mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei,
297 l2->ch.nr, event == EV_L2_T200 ? "T200" : "T203");
298 return;
299 }
300 hh = mISDN_HEAD_P(skb);
301 hh->prim = event == EV_L2_T200 ? DL_TIMER200_IND : DL_TIMER203_IND;
302 hh->id = l2->ch.nr;
303 if (*debug & DEBUG_TIMER)
304 printk(KERN_DEBUG "%s: L2(%d,%d) nr:%x timer %s expired\n",
305 mISDNDevName4ch(&l2->ch), l2->sapi, l2->tei,
306 l2->ch.nr, event == EV_L2_T200 ? "T200" : "T203");
307 if (l2->ch.st)
308 l2->ch.st->own.recv(&l2->ch.st->own, skb);
309}
310
279static int 311static int
280l2mgr(struct layer2 *l2, u_int prim, void *arg) { 312l2mgr(struct layer2 *l2, u_int prim, void *arg) {
281 long c = (long)arg; 313 long c = (long)arg;
282 314
283 printk(KERN_WARNING 315 printk(KERN_WARNING "l2mgr: dev %s addr:%x prim %x %c\n",
284 "l2mgr: addr:%x prim %x %c\n", l2->id, prim, (char)c); 316 mISDNDevName4ch(&l2->ch), l2->id, prim, (char)c);
285 if (test_bit(FLG_LAPD, &l2->flag) && 317 if (test_bit(FLG_LAPD, &l2->flag) &&
286 !test_bit(FLG_FIXED_TEI, &l2->flag)) { 318 !test_bit(FLG_FIXED_TEI, &l2->flag)) {
287 switch (c) { 319 switch (c) {
@@ -603,8 +635,8 @@ send_uframe(struct layer2 *l2, struct sk_buff *skb, u_char cmd, u_char cr)
603 else { 635 else {
604 skb = mI_alloc_skb(i, GFP_ATOMIC); 636 skb = mI_alloc_skb(i, GFP_ATOMIC);
605 if (!skb) { 637 if (!skb) {
606 printk(KERN_WARNING "%s: can't alloc skbuff\n", 638 printk(KERN_WARNING "%s: can't alloc skbuff in %s\n",
607 __func__); 639 mISDNDevName4ch(&l2->ch), __func__);
608 return; 640 return;
609 } 641 }
610 } 642 }
@@ -1089,8 +1121,8 @@ enquiry_cr(struct layer2 *l2, u_char typ, u_char cr, u_char pf)
1089 tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0); 1121 tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0);
1090 skb = mI_alloc_skb(i, GFP_ATOMIC); 1122 skb = mI_alloc_skb(i, GFP_ATOMIC);
1091 if (!skb) { 1123 if (!skb) {
1092 printk(KERN_WARNING 1124 printk(KERN_WARNING "%s: isdnl2 can't alloc sbbuff in %s\n",
1093 "isdnl2 can't alloc sbbuff for enquiry_cr\n"); 1125 mISDNDevName4ch(&l2->ch), __func__);
1094 return; 1126 return;
1095 } 1127 }
1096 memcpy(skb_put(skb, i), tmp, i); 1128 memcpy(skb_put(skb, i), tmp, i);
@@ -1150,7 +1182,7 @@ invoke_retransmission(struct layer2 *l2, unsigned int nr)
1150 else 1182 else
1151 printk(KERN_WARNING 1183 printk(KERN_WARNING
1152 "%s: windowar[%d] is NULL\n", 1184 "%s: windowar[%d] is NULL\n",
1153 __func__, p1); 1185 mISDNDevName4ch(&l2->ch), p1);
1154 l2->windowar[p1] = NULL; 1186 l2->windowar[p1] = NULL;
1155 } 1187 }
1156 mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL); 1188 mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
@@ -1461,8 +1493,8 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1461 p1 = (l2->vs - l2->va) % 8; 1493 p1 = (l2->vs - l2->va) % 8;
1462 p1 = (p1 + l2->sow) % l2->window; 1494 p1 = (p1 + l2->sow) % l2->window;
1463 if (l2->windowar[p1]) { 1495 if (l2->windowar[p1]) {
1464 printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n", 1496 printk(KERN_WARNING "%s: l2 try overwrite ack queue entry %d\n",
1465 p1); 1497 mISDNDevName4ch(&l2->ch), p1);
1466 dev_kfree_skb(l2->windowar[p1]); 1498 dev_kfree_skb(l2->windowar[p1]);
1467 } 1499 }
1468 l2->windowar[p1] = skb; 1500 l2->windowar[p1] = skb;
@@ -1482,12 +1514,14 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1482 memcpy(skb_push(nskb, i), header, i); 1514 memcpy(skb_push(nskb, i), header, i);
1483 else { 1515 else {
1484 printk(KERN_WARNING 1516 printk(KERN_WARNING
1485 "isdnl2 pull_iqueue skb header(%d/%d) too short\n", i, p1); 1517 "%s: L2 pull_iqueue skb header(%d/%d) too short\n",
1518 mISDNDevName4ch(&l2->ch), i, p1);
1486 oskb = nskb; 1519 oskb = nskb;
1487 nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC); 1520 nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC);
1488 if (!nskb) { 1521 if (!nskb) {
1489 dev_kfree_skb(oskb); 1522 dev_kfree_skb(oskb);
1490 printk(KERN_WARNING "%s: no skb mem\n", __func__); 1523 printk(KERN_WARNING "%s: no skb mem in %s\n",
1524 mISDNDevName4ch(&l2->ch), __func__);
1491 return; 1525 return;
1492 } 1526 }
1493 memcpy(skb_put(nskb, i), header, i); 1527 memcpy(skb_put(nskb, i), header, i);
@@ -1814,11 +1848,16 @@ static struct FsmNode L2FnList[] =
1814 {ST_L2_8, EV_L2_SUPER, l2_st8_got_super}, 1848 {ST_L2_8, EV_L2_SUPER, l2_st8_got_super},
1815 {ST_L2_7, EV_L2_I, l2_got_iframe}, 1849 {ST_L2_7, EV_L2_I, l2_got_iframe},
1816 {ST_L2_8, EV_L2_I, l2_got_iframe}, 1850 {ST_L2_8, EV_L2_I, l2_got_iframe},
1817 {ST_L2_5, EV_L2_T200, l2_st5_tout_200}, 1851 {ST_L2_5, EV_L2_T200, l2_timeout},
1818 {ST_L2_6, EV_L2_T200, l2_st6_tout_200}, 1852 {ST_L2_6, EV_L2_T200, l2_timeout},
1819 {ST_L2_7, EV_L2_T200, l2_st7_tout_200}, 1853 {ST_L2_7, EV_L2_T200, l2_timeout},
1820 {ST_L2_8, EV_L2_T200, l2_st8_tout_200}, 1854 {ST_L2_8, EV_L2_T200, l2_timeout},
1821 {ST_L2_7, EV_L2_T203, l2_st7_tout_203}, 1855 {ST_L2_7, EV_L2_T203, l2_timeout},
1856 {ST_L2_5, EV_L2_T200I, l2_st5_tout_200},
1857 {ST_L2_6, EV_L2_T200I, l2_st6_tout_200},
1858 {ST_L2_7, EV_L2_T200I, l2_st7_tout_200},
1859 {ST_L2_8, EV_L2_T200I, l2_st8_tout_200},
1860 {ST_L2_7, EV_L2_T203I, l2_st7_tout_203},
1822 {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue}, 1861 {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue},
1823 {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy}, 1862 {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
1824 {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy}, 1863 {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
@@ -1858,7 +1897,8 @@ ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
1858 ptei = *datap++; 1897 ptei = *datap++;
1859 if ((psapi & 1) || !(ptei & 1)) { 1898 if ((psapi & 1) || !(ptei & 1)) {
1860 printk(KERN_WARNING 1899 printk(KERN_WARNING
1861 "l2 D-channel frame wrong EA0/EA1\n"); 1900 "%s l2 D-channel frame wrong EA0/EA1\n",
1901 mISDNDevName4ch(&l2->ch));
1862 return ret; 1902 return ret;
1863 } 1903 }
1864 psapi >>= 2; 1904 psapi >>= 2;
@@ -1867,7 +1907,8 @@ ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
1867 /* not our business */ 1907 /* not our business */
1868 if (*debug & DEBUG_L2) 1908 if (*debug & DEBUG_L2)
1869 printk(KERN_DEBUG "%s: sapi %d/%d mismatch\n", 1909 printk(KERN_DEBUG "%s: sapi %d/%d mismatch\n",
1870 __func__, psapi, l2->sapi); 1910 mISDNDevName4ch(&l2->ch), psapi,
1911 l2->sapi);
1871 dev_kfree_skb(skb); 1912 dev_kfree_skb(skb);
1872 return 0; 1913 return 0;
1873 } 1914 }
@@ -1875,7 +1916,7 @@ ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
1875 /* not our business */ 1916 /* not our business */
1876 if (*debug & DEBUG_L2) 1917 if (*debug & DEBUG_L2)
1877 printk(KERN_DEBUG "%s: tei %d/%d mismatch\n", 1918 printk(KERN_DEBUG "%s: tei %d/%d mismatch\n",
1878 __func__, ptei, l2->tei); 1919 mISDNDevName4ch(&l2->ch), ptei, l2->tei);
1879 dev_kfree_skb(skb); 1920 dev_kfree_skb(skb);
1880 return 0; 1921 return 0;
1881 } 1922 }
@@ -1916,7 +1957,8 @@ ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
1916 } else 1957 } else
1917 c = 'L'; 1958 c = 'L';
1918 if (c) { 1959 if (c) {
1919 printk(KERN_WARNING "l2 D-channel frame error %c\n", c); 1960 printk(KERN_WARNING "%s:l2 D-channel frame error %c\n",
1961 mISDNDevName4ch(&l2->ch), c);
1920 mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c); 1962 mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c);
1921 } 1963 }
1922 return ret; 1964 return ret;
@@ -1930,8 +1972,17 @@ l2_send(struct mISDNchannel *ch, struct sk_buff *skb)
1930 int ret = -EINVAL; 1972 int ret = -EINVAL;
1931 1973
1932 if (*debug & DEBUG_L2_RECV) 1974 if (*debug & DEBUG_L2_RECV)
1933 printk(KERN_DEBUG "%s: prim(%x) id(%x) sapi(%d) tei(%d)\n", 1975 printk(KERN_DEBUG "%s: %s prim(%x) id(%x) sapi(%d) tei(%d)\n",
1934 __func__, hh->prim, hh->id, l2->sapi, l2->tei); 1976 __func__, mISDNDevName4ch(&l2->ch), hh->prim, hh->id,
1977 l2->sapi, l2->tei);
1978 if (hh->prim == DL_INTERN_MSG) {
1979 struct mISDNhead *chh = hh + 1; /* saved copy */
1980
1981 *hh = *chh;
1982 if (*debug & DEBUG_L2_RECV)
1983 printk(KERN_DEBUG "%s: prim(%x) id(%x) internal msg\n",
1984 mISDNDevName4ch(&l2->ch), hh->prim, hh->id);
1985 }
1935 switch (hh->prim) { 1986 switch (hh->prim) {
1936 case PH_DATA_IND: 1987 case PH_DATA_IND:
1937 ret = ph_data_indication(l2, hh, skb); 1988 ret = ph_data_indication(l2, hh, skb);
@@ -1987,6 +2038,12 @@ l2_send(struct mISDNchannel *ch, struct sk_buff *skb)
1987 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_RELEASE_REQ, 2038 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_RELEASE_REQ,
1988 skb); 2039 skb);
1989 break; 2040 break;
2041 case DL_TIMER200_IND:
2042 mISDN_FsmEvent(&l2->l2m, EV_L2_T200I, NULL);
2043 break;
2044 case DL_TIMER203_IND:
2045 mISDN_FsmEvent(&l2->l2m, EV_L2_T203I, NULL);
2046 break;
1990 default: 2047 default:
1991 if (*debug & DEBUG_L2) 2048 if (*debug & DEBUG_L2)
1992 l2m_debug(&l2->l2m, "l2 unknown pr %04x", 2049 l2m_debug(&l2->l2m, "l2 unknown pr %04x",
@@ -2005,7 +2062,8 @@ tei_l2(struct layer2 *l2, u_int cmd, u_long arg)
2005 int ret = -EINVAL; 2062 int ret = -EINVAL;
2006 2063
2007 if (*debug & DEBUG_L2_TEI) 2064 if (*debug & DEBUG_L2_TEI)
2008 printk(KERN_DEBUG "%s: cmd(%x)\n", __func__, cmd); 2065 printk(KERN_DEBUG "%s: cmd(%x) in %s\n",
2066 mISDNDevName4ch(&l2->ch), cmd, __func__);
2009 switch (cmd) { 2067 switch (cmd) {
2010 case (MDL_ASSIGN_REQ): 2068 case (MDL_ASSIGN_REQ):
2011 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ASSIGN, (void *)arg); 2069 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ASSIGN, (void *)arg);
@@ -2018,7 +2076,8 @@ tei_l2(struct layer2 *l2, u_int cmd, u_long arg)
2018 break; 2076 break;
2019 case (MDL_ERROR_RSP): 2077 case (MDL_ERROR_RSP):
2020 /* ETS 300-125 5.3.2.1 Test: TC13010 */ 2078 /* ETS 300-125 5.3.2.1 Test: TC13010 */
2021 printk(KERN_NOTICE "MDL_ERROR|REQ (tei_l2)\n"); 2079 printk(KERN_NOTICE "%s: MDL_ERROR|REQ (tei_l2)\n",
2080 mISDNDevName4ch(&l2->ch));
2022 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL); 2081 ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
2023 break; 2082 break;
2024 } 2083 }
@@ -2050,7 +2109,8 @@ l2_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
2050 u_int info; 2109 u_int info;
2051 2110
2052 if (*debug & DEBUG_L2_CTRL) 2111 if (*debug & DEBUG_L2_CTRL)
2053 printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd); 2112 printk(KERN_DEBUG "%s: %s cmd(%x)\n",
2113 mISDNDevName4ch(ch), __func__, cmd);
2054 2114
2055 switch (cmd) { 2115 switch (cmd) {
2056 case OPEN_CHANNEL: 2116 case OPEN_CHANNEL:
diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c
index ba2bc0c776e2..be88728f1106 100644
--- a/drivers/isdn/mISDN/tei.c
+++ b/drivers/isdn/mISDN/tei.c
@@ -790,18 +790,23 @@ tei_ph_data_ind(struct teimgr *tm, u_int mt, u_char *dp, int len)
790static struct layer2 * 790static struct layer2 *
791create_new_tei(struct manager *mgr, int tei, int sapi) 791create_new_tei(struct manager *mgr, int tei, int sapi)
792{ 792{
793 u_long opt = 0; 793 unsigned long opt = 0;
794 u_long flags; 794 unsigned long flags;
795 int id; 795 int id;
796 struct layer2 *l2; 796 struct layer2 *l2;
797 struct channel_req rq;
797 798
798 if (!mgr->up) 799 if (!mgr->up)
799 return NULL; 800 return NULL;
800 if ((tei >= 0) && (tei < 64)) 801 if ((tei >= 0) && (tei < 64))
801 test_and_set_bit(OPTION_L2_FIXEDTEI, &opt); 802 test_and_set_bit(OPTION_L2_FIXEDTEI, &opt);
802 if (mgr->ch.st->dev->Dprotocols 803 if (mgr->ch.st->dev->Dprotocols & ((1 << ISDN_P_TE_E1) |
803 & ((1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1))) 804 (1 << ISDN_P_NT_E1))) {
804 test_and_set_bit(OPTION_L2_PMX, &opt); 805 test_and_set_bit(OPTION_L2_PMX, &opt);
806 rq.protocol = ISDN_P_NT_E1;
807 } else {
808 rq.protocol = ISDN_P_NT_S0;
809 }
805 l2 = create_l2(mgr->up, ISDN_P_LAPD_NT, opt, tei, sapi); 810 l2 = create_l2(mgr->up, ISDN_P_LAPD_NT, opt, tei, sapi);
806 if (!l2) { 811 if (!l2) {
807 printk(KERN_WARNING "%s:no memory for layer2\n", __func__); 812 printk(KERN_WARNING "%s:no memory for layer2\n", __func__);
@@ -836,6 +841,14 @@ create_new_tei(struct manager *mgr, int tei, int sapi)
836 l2->ch.recv = mgr->ch.recv; 841 l2->ch.recv = mgr->ch.recv;
837 l2->ch.peer = mgr->ch.peer; 842 l2->ch.peer = mgr->ch.peer;
838 l2->ch.ctrl(&l2->ch, OPEN_CHANNEL, NULL); 843 l2->ch.ctrl(&l2->ch, OPEN_CHANNEL, NULL);
844 /* We need open here L1 for the manager as well (refcounting) */
845 rq.adr.dev = mgr->ch.st->dev->id;
846 id = mgr->ch.st->own.ctrl(&mgr->ch.st->own, OPEN_CHANNEL, &rq);
847 if (id < 0) {
848 printk(KERN_WARNING "%s: cannot open L1\n", __func__);
849 l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
850 l2 = NULL;
851 }
839 } 852 }
840 return l2; 853 return l2;
841} 854}
@@ -978,10 +991,11 @@ TEIrelease(struct layer2 *l2)
978static int 991static int
979create_teimgr(struct manager *mgr, struct channel_req *crq) 992create_teimgr(struct manager *mgr, struct channel_req *crq)
980{ 993{
981 struct layer2 *l2; 994 struct layer2 *l2;
982 u_long opt = 0; 995 unsigned long opt = 0;
983 u_long flags; 996 unsigned long flags;
984 int id; 997 int id;
998 struct channel_req l1rq;
985 999
986 if (*debug & DEBUG_L2_TEI) 1000 if (*debug & DEBUG_L2_TEI)
987 printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n", 1001 printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
@@ -1016,6 +1030,7 @@ create_teimgr(struct manager *mgr, struct channel_req *crq)
1016 if (crq->protocol == ISDN_P_LAPD_TE) 1030 if (crq->protocol == ISDN_P_LAPD_TE)
1017 test_and_set_bit(MGR_OPT_USER, &mgr->options); 1031 test_and_set_bit(MGR_OPT_USER, &mgr->options);
1018 } 1032 }
1033 l1rq.adr = crq->adr;
1019 if (mgr->ch.st->dev->Dprotocols 1034 if (mgr->ch.st->dev->Dprotocols
1020 & ((1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1))) 1035 & ((1 << ISDN_P_TE_E1) | (1 << ISDN_P_NT_E1)))
1021 test_and_set_bit(OPTION_L2_PMX, &opt); 1036 test_and_set_bit(OPTION_L2_PMX, &opt);
@@ -1023,6 +1038,8 @@ create_teimgr(struct manager *mgr, struct channel_req *crq)
1023 mgr->up = crq->ch; 1038 mgr->up = crq->ch;
1024 id = DL_INFO_L2_CONNECT; 1039 id = DL_INFO_L2_CONNECT;
1025 teiup_create(mgr, DL_INFORMATION_IND, sizeof(id), &id); 1040 teiup_create(mgr, DL_INFORMATION_IND, sizeof(id), &id);
1041 if (test_bit(MGR_PH_ACTIVE, &mgr->options))
1042 teiup_create(mgr, PH_ACTIVATE_IND, 0, NULL);
1026 crq->ch = NULL; 1043 crq->ch = NULL;
1027 if (!list_empty(&mgr->layer2)) { 1044 if (!list_empty(&mgr->layer2)) {
1028 read_lock_irqsave(&mgr->lock, flags); 1045 read_lock_irqsave(&mgr->lock, flags);
@@ -1053,24 +1070,34 @@ create_teimgr(struct manager *mgr, struct channel_req *crq)
1053 l2->tm->tei_m.fsm = &teifsmu; 1070 l2->tm->tei_m.fsm = &teifsmu;
1054 l2->tm->tei_m.state = ST_TEI_NOP; 1071 l2->tm->tei_m.state = ST_TEI_NOP;
1055 l2->tm->tval = 1000; /* T201 1 sec */ 1072 l2->tm->tval = 1000; /* T201 1 sec */
1073 if (test_bit(OPTION_L2_PMX, &opt))
1074 l1rq.protocol = ISDN_P_TE_E1;
1075 else
1076 l1rq.protocol = ISDN_P_TE_S0;
1056 } else { 1077 } else {
1057 l2->tm->tei_m.fsm = &teifsmn; 1078 l2->tm->tei_m.fsm = &teifsmn;
1058 l2->tm->tei_m.state = ST_TEI_NOP; 1079 l2->tm->tei_m.state = ST_TEI_NOP;
1059 l2->tm->tval = 2000; /* T202 2 sec */ 1080 l2->tm->tval = 2000; /* T202 2 sec */
1081 if (test_bit(OPTION_L2_PMX, &opt))
1082 l1rq.protocol = ISDN_P_NT_E1;
1083 else
1084 l1rq.protocol = ISDN_P_NT_S0;
1060 } 1085 }
1061 mISDN_FsmInitTimer(&l2->tm->tei_m, &l2->tm->timer); 1086 mISDN_FsmInitTimer(&l2->tm->tei_m, &l2->tm->timer);
1062 write_lock_irqsave(&mgr->lock, flags); 1087 write_lock_irqsave(&mgr->lock, flags);
1063 id = get_free_id(mgr); 1088 id = get_free_id(mgr);
1064 list_add_tail(&l2->list, &mgr->layer2); 1089 list_add_tail(&l2->list, &mgr->layer2);
1065 write_unlock_irqrestore(&mgr->lock, flags); 1090 write_unlock_irqrestore(&mgr->lock, flags);
1066 if (id < 0) { 1091 if (id >= 0) {
1067 l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
1068 } else {
1069 l2->ch.nr = id; 1092 l2->ch.nr = id;
1070 l2->up->nr = id; 1093 l2->up->nr = id;
1071 crq->ch = &l2->ch; 1094 crq->ch = &l2->ch;
1072 id = 0; 1095 /* We need open here L1 for the manager as well (refcounting) */
1096 id = mgr->ch.st->own.ctrl(&mgr->ch.st->own, OPEN_CHANNEL,
1097 &l1rq);
1073 } 1098 }
1099 if (id < 0)
1100 l2->ch.ctrl(&l2->ch, CLOSE_CHANNEL, NULL);
1074 return id; 1101 return id;
1075} 1102}
1076 1103
@@ -1096,12 +1123,16 @@ mgr_send(struct mISDNchannel *ch, struct sk_buff *skb)
1096 break; 1123 break;
1097 case PH_ACTIVATE_IND: 1124 case PH_ACTIVATE_IND:
1098 test_and_set_bit(MGR_PH_ACTIVE, &mgr->options); 1125 test_and_set_bit(MGR_PH_ACTIVE, &mgr->options);
1126 if (mgr->up)
1127 teiup_create(mgr, PH_ACTIVATE_IND, 0, NULL);
1099 mISDN_FsmEvent(&mgr->deact, EV_ACTIVATE_IND, NULL); 1128 mISDN_FsmEvent(&mgr->deact, EV_ACTIVATE_IND, NULL);
1100 do_send(mgr); 1129 do_send(mgr);
1101 ret = 0; 1130 ret = 0;
1102 break; 1131 break;
1103 case PH_DEACTIVATE_IND: 1132 case PH_DEACTIVATE_IND:
1104 test_and_clear_bit(MGR_PH_ACTIVE, &mgr->options); 1133 test_and_clear_bit(MGR_PH_ACTIVE, &mgr->options);
1134 if (mgr->up)
1135 teiup_create(mgr, PH_DEACTIVATE_IND, 0, NULL);
1105 mISDN_FsmEvent(&mgr->deact, EV_DEACTIVATE_IND, NULL); 1136 mISDN_FsmEvent(&mgr->deact, EV_DEACTIVATE_IND, NULL);
1106 ret = 0; 1137 ret = 0;
1107 break; 1138 break;
@@ -1263,7 +1294,7 @@ static int
1263mgr_bcast(struct mISDNchannel *ch, struct sk_buff *skb) 1294mgr_bcast(struct mISDNchannel *ch, struct sk_buff *skb)
1264{ 1295{
1265 struct manager *mgr = container_of(ch, struct manager, bcast); 1296 struct manager *mgr = container_of(ch, struct manager, bcast);
1266 struct mISDNhead *hh = mISDN_HEAD_P(skb); 1297 struct mISDNhead *hhc, *hh = mISDN_HEAD_P(skb);
1267 struct sk_buff *cskb = NULL; 1298 struct sk_buff *cskb = NULL;
1268 struct layer2 *l2; 1299 struct layer2 *l2;
1269 u_long flags; 1300 u_long flags;
@@ -1278,10 +1309,17 @@ mgr_bcast(struct mISDNchannel *ch, struct sk_buff *skb)
1278 skb = NULL; 1309 skb = NULL;
1279 } else { 1310 } else {
1280 if (!cskb) 1311 if (!cskb)
1281 cskb = skb_copy(skb, GFP_KERNEL); 1312 cskb = skb_copy(skb, GFP_ATOMIC);
1282 } 1313 }
1283 if (cskb) { 1314 if (cskb) {
1284 ret = l2->ch.send(&l2->ch, cskb); 1315 hhc = mISDN_HEAD_P(cskb);
1316 /* save original header behind normal header */
1317 hhc++;
1318 *hhc = *hh;
1319 hhc--;
1320 hhc->prim = DL_INTERN_MSG;
1321 hhc->id = l2->ch.nr;
1322 ret = ch->st->own.recv(&ch->st->own, cskb);
1285 if (ret) { 1323 if (ret) {
1286 if (*debug & DEBUG_SEND_ERR) 1324 if (*debug & DEBUG_SEND_ERR)
1287 printk(KERN_DEBUG 1325 printk(KERN_DEBUG
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 62d2409bb293..16dbf53e314b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -549,9 +549,9 @@ down:
549 * Get link speed and duplex from the slave's base driver 549 * Get link speed and duplex from the slave's base driver
550 * using ethtool. If for some reason the call fails or the 550 * using ethtool. If for some reason the call fails or the
551 * values are invalid, set speed and duplex to -1, 551 * values are invalid, set speed and duplex to -1,
552 * and return error. 552 * and return.
553 */ 553 */
554static int bond_update_speed_duplex(struct slave *slave) 554static void bond_update_speed_duplex(struct slave *slave)
555{ 555{
556 struct net_device *slave_dev = slave->dev; 556 struct net_device *slave_dev = slave->dev;
557 struct ethtool_cmd ecmd; 557 struct ethtool_cmd ecmd;
@@ -563,24 +563,24 @@ static int bond_update_speed_duplex(struct slave *slave)
563 563
564 res = __ethtool_get_settings(slave_dev, &ecmd); 564 res = __ethtool_get_settings(slave_dev, &ecmd);
565 if (res < 0) 565 if (res < 0)
566 return -1; 566 return;
567 567
568 slave_speed = ethtool_cmd_speed(&ecmd); 568 slave_speed = ethtool_cmd_speed(&ecmd);
569 if (slave_speed == 0 || slave_speed == ((__u32) -1)) 569 if (slave_speed == 0 || slave_speed == ((__u32) -1))
570 return -1; 570 return;
571 571
572 switch (ecmd.duplex) { 572 switch (ecmd.duplex) {
573 case DUPLEX_FULL: 573 case DUPLEX_FULL:
574 case DUPLEX_HALF: 574 case DUPLEX_HALF:
575 break; 575 break;
576 default: 576 default:
577 return -1; 577 return;
578 } 578 }
579 579
580 slave->speed = slave_speed; 580 slave->speed = slave_speed;
581 slave->duplex = ecmd.duplex; 581 slave->duplex = ecmd.duplex;
582 582
583 return 0; 583 return;
584} 584}
585 585
586/* 586/*
@@ -1726,7 +1726,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1726 1726
1727 read_lock(&bond->lock); 1727 read_lock(&bond->lock);
1728 1728
1729 new_slave->last_arp_rx = jiffies; 1729 new_slave->last_arp_rx = jiffies -
1730 (msecs_to_jiffies(bond->params.arp_interval) + 1);
1730 1731
1731 if (bond->params.miimon && !bond->params.use_carrier) { 1732 if (bond->params.miimon && !bond->params.use_carrier) {
1732 link_reporting = bond_check_dev_link(bond, slave_dev, 1); 1733 link_reporting = bond_check_dev_link(bond, slave_dev, 1);
@@ -1751,22 +1752,30 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1751 } 1752 }
1752 1753
1753 /* check for initial state */ 1754 /* check for initial state */
1754 if (!bond->params.miimon || 1755 if (bond->params.miimon) {
1755 (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS)) { 1756 if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
1756 if (bond->params.updelay) { 1757 if (bond->params.updelay) {
1757 pr_debug("Initial state of slave_dev is BOND_LINK_BACK\n"); 1758 new_slave->link = BOND_LINK_BACK;
1758 new_slave->link = BOND_LINK_BACK; 1759 new_slave->delay = bond->params.updelay;
1759 new_slave->delay = bond->params.updelay; 1760 } else {
1761 new_slave->link = BOND_LINK_UP;
1762 }
1760 } else { 1763 } else {
1761 pr_debug("Initial state of slave_dev is BOND_LINK_UP\n"); 1764 new_slave->link = BOND_LINK_DOWN;
1762 new_slave->link = BOND_LINK_UP;
1763 } 1765 }
1764 new_slave->jiffies = jiffies; 1766 } else if (bond->params.arp_interval) {
1767 new_slave->link = (netif_carrier_ok(slave_dev) ?
1768 BOND_LINK_UP : BOND_LINK_DOWN);
1765 } else { 1769 } else {
1766 pr_debug("Initial state of slave_dev is BOND_LINK_DOWN\n"); 1770 new_slave->link = BOND_LINK_UP;
1767 new_slave->link = BOND_LINK_DOWN;
1768 } 1771 }
1769 1772
1773 if (new_slave->link != BOND_LINK_DOWN)
1774 new_slave->jiffies = jiffies;
1775 pr_debug("Initial state of slave_dev is BOND_LINK_%s\n",
1776 new_slave->link == BOND_LINK_DOWN ? "DOWN" :
1777 (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
1778
1770 bond_update_speed_duplex(new_slave); 1779 bond_update_speed_duplex(new_slave);
1771 1780
1772 if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) { 1781 if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
@@ -4820,12 +4829,9 @@ static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
4820 return 0; 4829 return 0;
4821} 4830}
4822 4831
4823static int bond_get_tx_queues(struct net *net, struct nlattr *tb[], 4832static int bond_get_tx_queues(struct net *net, struct nlattr *tb[])
4824 unsigned int *num_queues,
4825 unsigned int *real_num_queues)
4826{ 4833{
4827 *num_queues = tx_queues; 4834 return tx_queues;
4828 return 0;
4829} 4835}
4830 4836
4831static struct rtnl_link_ops bond_link_ops __read_mostly = { 4837static struct rtnl_link_ops bond_link_ops __read_mostly = {
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 9c1c8cd5223f..1520814c77c7 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -6,6 +6,8 @@
6 * License terms: GNU General Public License (GPL) version 2. 6 * License terms: GNU General Public License (GPL) version 2.
7 */ 7 */
8 8
9#define pr_fmt(fmt) KBUILD_MODNAME fmt
10
9#include <linux/init.h> 11#include <linux/init.h>
10#include <linux/module.h> 12#include <linux/module.h>
11#include <linux/device.h> 13#include <linux/device.h>
@@ -19,6 +21,7 @@
19#include <linux/if_arp.h> 21#include <linux/if_arp.h>
20#include <linux/timer.h> 22#include <linux/timer.h>
21#include <linux/rtnetlink.h> 23#include <linux/rtnetlink.h>
24#include <linux/pkt_sched.h>
22#include <net/caif/caif_layer.h> 25#include <net/caif/caif_layer.h>
23#include <net/caif/caif_hsi.h> 26#include <net/caif/caif_hsi.h>
24 27
@@ -34,6 +37,10 @@ static int inactivity_timeout = 1000;
34module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR); 37module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR);
35MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms."); 38MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms.");
36 39
40static int aggregation_timeout = 1;
41module_param(aggregation_timeout, int, S_IRUGO | S_IWUSR);
42MODULE_PARM_DESC(aggregation_timeout, "Aggregation timeout on HSI, ms.");
43
37/* 44/*
38 * HSI padding options. 45 * HSI padding options.
39 * Warning: must be a base of 2 (& operation used) and can not be zero ! 46 * Warning: must be a base of 2 (& operation used) and can not be zero !
@@ -86,24 +93,84 @@ static void cfhsi_inactivity_tout(unsigned long arg)
86 queue_work(cfhsi->wq, &cfhsi->wake_down_work); 93 queue_work(cfhsi->wq, &cfhsi->wake_down_work);
87} 94}
88 95
96static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
97 const struct sk_buff *skb,
98 int direction)
99{
100 struct caif_payload_info *info;
101 int hpad, tpad, len;
102
103 info = (struct caif_payload_info *)&skb->cb;
104 hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align);
105 tpad = PAD_POW2((skb->len + hpad), hsi_tail_align);
106 len = skb->len + hpad + tpad;
107
108 if (direction > 0)
109 cfhsi->aggregation_len += len;
110 else if (direction < 0)
111 cfhsi->aggregation_len -= len;
112}
113
114static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
115{
116 int i;
117
118 if (cfhsi->aggregation_timeout < 0)
119 return true;
120
121 for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
122 if (cfhsi->qhead[i].qlen)
123 return true;
124 }
125
126 /* TODO: Use aggregation_len instead */
127 if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS)
128 return true;
129
130 return false;
131}
132
133static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi)
134{
135 struct sk_buff *skb;
136 int i;
137
138 for (i = 0; i < CFHSI_PRIO_LAST; ++i) {
139 skb = skb_dequeue(&cfhsi->qhead[i]);
140 if (skb)
141 break;
142 }
143
144 return skb;
145}
146
147static int cfhsi_tx_queue_len(struct cfhsi *cfhsi)
148{
149 int i, len = 0;
150 for (i = 0; i < CFHSI_PRIO_LAST; ++i)
151 len += skb_queue_len(&cfhsi->qhead[i]);
152 return len;
153}
154
89static void cfhsi_abort_tx(struct cfhsi *cfhsi) 155static void cfhsi_abort_tx(struct cfhsi *cfhsi)
90{ 156{
91 struct sk_buff *skb; 157 struct sk_buff *skb;
92 158
93 for (;;) { 159 for (;;) {
94 spin_lock_bh(&cfhsi->lock); 160 spin_lock_bh(&cfhsi->lock);
95 skb = skb_dequeue(&cfhsi->qhead); 161 skb = cfhsi_dequeue(cfhsi);
96 if (!skb) 162 if (!skb)
97 break; 163 break;
98 164
99 cfhsi->ndev->stats.tx_errors++; 165 cfhsi->ndev->stats.tx_errors++;
100 cfhsi->ndev->stats.tx_dropped++; 166 cfhsi->ndev->stats.tx_dropped++;
167 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
101 spin_unlock_bh(&cfhsi->lock); 168 spin_unlock_bh(&cfhsi->lock);
102 kfree_skb(skb); 169 kfree_skb(skb);
103 } 170 }
104 cfhsi->tx_state = CFHSI_TX_STATE_IDLE; 171 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
105 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 172 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
106 mod_timer(&cfhsi->timer, 173 mod_timer(&cfhsi->inactivity_timer,
107 jiffies + cfhsi->inactivity_timeout); 174 jiffies + cfhsi->inactivity_timeout);
108 spin_unlock_bh(&cfhsi->lock); 175 spin_unlock_bh(&cfhsi->lock);
109} 176}
@@ -169,7 +236,7 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
169 struct sk_buff *skb; 236 struct sk_buff *skb;
170 u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ; 237 u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
171 238
172 skb = skb_dequeue(&cfhsi->qhead); 239 skb = cfhsi_dequeue(cfhsi);
173 if (!skb) 240 if (!skb)
174 return 0; 241 return 0;
175 242
@@ -196,11 +263,16 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
196 pemb += hpad; 263 pemb += hpad;
197 264
198 /* Update network statistics. */ 265 /* Update network statistics. */
266 spin_lock_bh(&cfhsi->lock);
199 cfhsi->ndev->stats.tx_packets++; 267 cfhsi->ndev->stats.tx_packets++;
200 cfhsi->ndev->stats.tx_bytes += skb->len; 268 cfhsi->ndev->stats.tx_bytes += skb->len;
269 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
270 spin_unlock_bh(&cfhsi->lock);
201 271
202 /* Copy in embedded CAIF frame. */ 272 /* Copy in embedded CAIF frame. */
203 skb_copy_bits(skb, 0, pemb, skb->len); 273 skb_copy_bits(skb, 0, pemb, skb->len);
274
275 /* Consume the SKB */
204 consume_skb(skb); 276 consume_skb(skb);
205 skb = NULL; 277 skb = NULL;
206 } 278 }
@@ -214,7 +286,7 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
214 int tpad = 0; 286 int tpad = 0;
215 287
216 if (!skb) 288 if (!skb)
217 skb = skb_dequeue(&cfhsi->qhead); 289 skb = cfhsi_dequeue(cfhsi);
218 290
219 if (!skb) 291 if (!skb)
220 break; 292 break;
@@ -233,8 +305,11 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
233 pfrm += hpad; 305 pfrm += hpad;
234 306
235 /* Update network statistics. */ 307 /* Update network statistics. */
308 spin_lock_bh(&cfhsi->lock);
236 cfhsi->ndev->stats.tx_packets++; 309 cfhsi->ndev->stats.tx_packets++;
237 cfhsi->ndev->stats.tx_bytes += skb->len; 310 cfhsi->ndev->stats.tx_bytes += skb->len;
311 cfhsi_update_aggregation_stats(cfhsi, skb, -1);
312 spin_unlock_bh(&cfhsi->lock);
238 313
239 /* Copy in CAIF frame. */ 314 /* Copy in CAIF frame. */
240 skb_copy_bits(skb, 0, pfrm, skb->len); 315 skb_copy_bits(skb, 0, pfrm, skb->len);
@@ -244,6 +319,8 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
244 319
245 /* Update frame pointer. */ 320 /* Update frame pointer. */
246 pfrm += skb->len + tpad; 321 pfrm += skb->len + tpad;
322
323 /* Consume the SKB */
247 consume_skb(skb); 324 consume_skb(skb);
248 skb = NULL; 325 skb = NULL;
249 326
@@ -258,8 +335,7 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
258 } 335 }
259 336
260 /* Check if we can piggy-back another descriptor. */ 337 /* Check if we can piggy-back another descriptor. */
261 skb = skb_peek(&cfhsi->qhead); 338 if (cfhsi_can_send_aggregate(cfhsi))
262 if (skb)
263 desc->header |= CFHSI_PIGGY_DESC; 339 desc->header |= CFHSI_PIGGY_DESC;
264 else 340 else
265 desc->header &= ~CFHSI_PIGGY_DESC; 341 desc->header &= ~CFHSI_PIGGY_DESC;
@@ -267,61 +343,71 @@ static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
267 return CFHSI_DESC_SZ + pld_len; 343 return CFHSI_DESC_SZ + pld_len;
268} 344}
269 345
270static void cfhsi_tx_done(struct cfhsi *cfhsi) 346static void cfhsi_start_tx(struct cfhsi *cfhsi)
271{ 347{
272 struct cfhsi_desc *desc = NULL; 348 struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
273 int len = 0; 349 int len, res;
274 int res;
275 350
276 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); 351 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
277 352
278 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 353 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
279 return; 354 return;
280 355
281 desc = (struct cfhsi_desc *)cfhsi->tx_buf;
282
283 do { 356 do {
284 /*
285 * Send flow on if flow off has been previously signalled
286 * and number of packets is below low water mark.
287 */
288 spin_lock_bh(&cfhsi->lock);
289 if (cfhsi->flow_off_sent &&
290 cfhsi->qhead.qlen <= cfhsi->q_low_mark &&
291 cfhsi->cfdev.flowctrl) {
292
293 cfhsi->flow_off_sent = 0;
294 cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
295 }
296 spin_unlock_bh(&cfhsi->lock);
297
298 /* Create HSI frame. */ 357 /* Create HSI frame. */
299 do { 358 len = cfhsi_tx_frm(desc, cfhsi);
300 len = cfhsi_tx_frm(desc, cfhsi); 359 if (!len) {
301 if (!len) { 360 spin_lock_bh(&cfhsi->lock);
302 spin_lock_bh(&cfhsi->lock); 361 if (unlikely(cfhsi_tx_queue_len(cfhsi))) {
303 if (unlikely(skb_peek(&cfhsi->qhead))) {
304 spin_unlock_bh(&cfhsi->lock);
305 continue;
306 }
307 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
308 /* Start inactivity timer. */
309 mod_timer(&cfhsi->timer,
310 jiffies + cfhsi->inactivity_timeout);
311 spin_unlock_bh(&cfhsi->lock); 362 spin_unlock_bh(&cfhsi->lock);
312 goto done; 363 res = -EAGAIN;
364 continue;
313 } 365 }
314 } while (!len); 366 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
367 /* Start inactivity timer. */
368 mod_timer(&cfhsi->inactivity_timer,
369 jiffies + cfhsi->inactivity_timeout);
370 spin_unlock_bh(&cfhsi->lock);
371 break;
372 }
315 373
316 /* Set up new transfer. */ 374 /* Set up new transfer. */
317 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev); 375 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
318 if (WARN_ON(res < 0)) { 376 if (WARN_ON(res < 0))
319 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n", 377 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n",
320 __func__, res); 378 __func__, res);
321 }
322 } while (res < 0); 379 } while (res < 0);
380}
381
382static void cfhsi_tx_done(struct cfhsi *cfhsi)
383{
384 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__);
385
386 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
387 return;
388
389 /*
390 * Send flow on if flow off has been previously signalled
391 * and number of packets is below low water mark.
392 */
393 spin_lock_bh(&cfhsi->lock);
394 if (cfhsi->flow_off_sent &&
395 cfhsi_tx_queue_len(cfhsi) <= cfhsi->q_low_mark &&
396 cfhsi->cfdev.flowctrl) {
397
398 cfhsi->flow_off_sent = 0;
399 cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
400 }
401
402 if (cfhsi_can_send_aggregate(cfhsi)) {
403 spin_unlock_bh(&cfhsi->lock);
404 cfhsi_start_tx(cfhsi);
405 } else {
406 mod_timer(&cfhsi->aggregation_timer,
407 jiffies + cfhsi->aggregation_timeout);
408 spin_unlock_bh(&cfhsi->lock);
409 }
323 410
324done:
325 return; 411 return;
326} 412}
327 413
@@ -560,7 +646,7 @@ static void cfhsi_rx_done(struct cfhsi *cfhsi)
560 646
561 /* Update inactivity timer if pending. */ 647 /* Update inactivity timer if pending. */
562 spin_lock_bh(&cfhsi->lock); 648 spin_lock_bh(&cfhsi->lock);
563 mod_timer_pending(&cfhsi->timer, 649 mod_timer_pending(&cfhsi->inactivity_timer,
564 jiffies + cfhsi->inactivity_timeout); 650 jiffies + cfhsi->inactivity_timeout);
565 spin_unlock_bh(&cfhsi->lock); 651 spin_unlock_bh(&cfhsi->lock);
566 652
@@ -793,12 +879,12 @@ wake_ack:
793 879
794 spin_lock_bh(&cfhsi->lock); 880 spin_lock_bh(&cfhsi->lock);
795 881
796 /* Resume transmit if queue is not empty. */ 882 /* Resume transmit if queues are not empty. */
797 if (!skb_peek(&cfhsi->qhead)) { 883 if (!cfhsi_tx_queue_len(cfhsi)) {
798 dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n", 884 dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n",
799 __func__); 885 __func__);
800 /* Start inactivity timer. */ 886 /* Start inactivity timer. */
801 mod_timer(&cfhsi->timer, 887 mod_timer(&cfhsi->inactivity_timer,
802 jiffies + cfhsi->inactivity_timeout); 888 jiffies + cfhsi->inactivity_timeout);
803 spin_unlock_bh(&cfhsi->lock); 889 spin_unlock_bh(&cfhsi->lock);
804 return; 890 return;
@@ -934,20 +1020,53 @@ static void cfhsi_wake_down_cb(struct cfhsi_drv *drv)
934 wake_up_interruptible(&cfhsi->wake_down_wait); 1020 wake_up_interruptible(&cfhsi->wake_down_wait);
935} 1021}
936 1022
1023static void cfhsi_aggregation_tout(unsigned long arg)
1024{
1025 struct cfhsi *cfhsi = (struct cfhsi *)arg;
1026
1027 dev_dbg(&cfhsi->ndev->dev, "%s.\n",
1028 __func__);
1029
1030 cfhsi_start_tx(cfhsi);
1031}
1032
937static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) 1033static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
938{ 1034{
939 struct cfhsi *cfhsi = NULL; 1035 struct cfhsi *cfhsi = NULL;
940 int start_xfer = 0; 1036 int start_xfer = 0;
941 int timer_active; 1037 int timer_active;
1038 int prio;
942 1039
943 if (!dev) 1040 if (!dev)
944 return -EINVAL; 1041 return -EINVAL;
945 1042
946 cfhsi = netdev_priv(dev); 1043 cfhsi = netdev_priv(dev);
947 1044
1045 switch (skb->priority) {
1046 case TC_PRIO_BESTEFFORT:
1047 case TC_PRIO_FILLER:
1048 case TC_PRIO_BULK:
1049 prio = CFHSI_PRIO_BEBK;
1050 break;
1051 case TC_PRIO_INTERACTIVE_BULK:
1052 prio = CFHSI_PRIO_VI;
1053 break;
1054 case TC_PRIO_INTERACTIVE:
1055 prio = CFHSI_PRIO_VO;
1056 break;
1057 case TC_PRIO_CONTROL:
1058 default:
1059 prio = CFHSI_PRIO_CTL;
1060 break;
1061 }
1062
948 spin_lock_bh(&cfhsi->lock); 1063 spin_lock_bh(&cfhsi->lock);
949 1064
950 skb_queue_tail(&cfhsi->qhead, skb); 1065 /* Update aggregation statistics */
1066 cfhsi_update_aggregation_stats(cfhsi, skb, 1);
1067
1068 /* Queue the SKB */
1069 skb_queue_tail(&cfhsi->qhead[prio], skb);
951 1070
952 /* Sanity check; xmit should not be called after unregister_netdev */ 1071 /* Sanity check; xmit should not be called after unregister_netdev */
953 if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) { 1072 if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
@@ -958,7 +1077,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
958 1077
959 /* Send flow off if number of packets is above high water mark. */ 1078 /* Send flow off if number of packets is above high water mark. */
960 if (!cfhsi->flow_off_sent && 1079 if (!cfhsi->flow_off_sent &&
961 cfhsi->qhead.qlen > cfhsi->q_high_mark && 1080 cfhsi_tx_queue_len(cfhsi) > cfhsi->q_high_mark &&
962 cfhsi->cfdev.flowctrl) { 1081 cfhsi->cfdev.flowctrl) {
963 cfhsi->flow_off_sent = 1; 1082 cfhsi->flow_off_sent = 1;
964 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF); 1083 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
@@ -970,12 +1089,18 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
970 } 1089 }
971 1090
972 if (!start_xfer) { 1091 if (!start_xfer) {
1092 /* Send aggregate if it is possible */
1093 bool aggregate_ready =
1094 cfhsi_can_send_aggregate(cfhsi) &&
1095 del_timer(&cfhsi->aggregation_timer) > 0;
973 spin_unlock_bh(&cfhsi->lock); 1096 spin_unlock_bh(&cfhsi->lock);
1097 if (aggregate_ready)
1098 cfhsi_start_tx(cfhsi);
974 return 0; 1099 return 0;
975 } 1100 }
976 1101
977 /* Delete inactivity timer if started. */ 1102 /* Delete inactivity timer if started. */
978 timer_active = del_timer_sync(&cfhsi->timer); 1103 timer_active = del_timer_sync(&cfhsi->inactivity_timer);
979 1104
980 spin_unlock_bh(&cfhsi->lock); 1105 spin_unlock_bh(&cfhsi->lock);
981 1106
@@ -1004,28 +1129,11 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
1004 return 0; 1129 return 0;
1005} 1130}
1006 1131
1007static int cfhsi_open(struct net_device *dev) 1132static const struct net_device_ops cfhsi_ops;
1008{
1009 netif_wake_queue(dev);
1010
1011 return 0;
1012}
1013
1014static int cfhsi_close(struct net_device *dev)
1015{
1016 netif_stop_queue(dev);
1017
1018 return 0;
1019}
1020
1021static const struct net_device_ops cfhsi_ops = {
1022 .ndo_open = cfhsi_open,
1023 .ndo_stop = cfhsi_close,
1024 .ndo_start_xmit = cfhsi_xmit
1025};
1026 1133
1027static void cfhsi_setup(struct net_device *dev) 1134static void cfhsi_setup(struct net_device *dev)
1028{ 1135{
1136 int i;
1029 struct cfhsi *cfhsi = netdev_priv(dev); 1137 struct cfhsi *cfhsi = netdev_priv(dev);
1030 dev->features = 0; 1138 dev->features = 0;
1031 dev->netdev_ops = &cfhsi_ops; 1139 dev->netdev_ops = &cfhsi_ops;
@@ -1034,7 +1142,8 @@ static void cfhsi_setup(struct net_device *dev)
1034 dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ; 1142 dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
1035 dev->tx_queue_len = 0; 1143 dev->tx_queue_len = 0;
1036 dev->destructor = free_netdev; 1144 dev->destructor = free_netdev;
1037 skb_queue_head_init(&cfhsi->qhead); 1145 for (i = 0; i < CFHSI_PRIO_LAST; ++i)
1146 skb_queue_head_init(&cfhsi->qhead[i]);
1038 cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; 1147 cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
1039 cfhsi->cfdev.use_frag = false; 1148 cfhsi->cfdev.use_frag = false;
1040 cfhsi->cfdev.use_stx = false; 1149 cfhsi->cfdev.use_stx = false;
@@ -1046,7 +1155,7 @@ int cfhsi_probe(struct platform_device *pdev)
1046{ 1155{
1047 struct cfhsi *cfhsi = NULL; 1156 struct cfhsi *cfhsi = NULL;
1048 struct net_device *ndev; 1157 struct net_device *ndev;
1049 struct cfhsi_dev *dev; 1158
1050 int res; 1159 int res;
1051 1160
1052 ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup); 1161 ndev = alloc_netdev(sizeof(struct cfhsi), "cfhsi%d", cfhsi_setup);
@@ -1057,6 +1166,34 @@ int cfhsi_probe(struct platform_device *pdev)
1057 cfhsi->ndev = ndev; 1166 cfhsi->ndev = ndev;
1058 cfhsi->pdev = pdev; 1167 cfhsi->pdev = pdev;
1059 1168
1169 /* Assign the HSI device. */
1170 cfhsi->dev = pdev->dev.platform_data;
1171
1172 /* Assign the driver to this HSI device. */
1173 cfhsi->dev->drv = &cfhsi->drv;
1174
1175 /* Register network device. */
1176 res = register_netdev(ndev);
1177 if (res) {
1178 dev_err(&ndev->dev, "%s: Registration error: %d.\n",
1179 __func__, res);
1180 free_netdev(ndev);
1181 }
1182 /* Add CAIF HSI device to list. */
1183 spin_lock(&cfhsi_list_lock);
1184 list_add_tail(&cfhsi->list, &cfhsi_list);
1185 spin_unlock(&cfhsi_list_lock);
1186
1187 return res;
1188}
1189
1190static int cfhsi_open(struct net_device *ndev)
1191{
1192 struct cfhsi *cfhsi = netdev_priv(ndev);
1193 int res;
1194
1195 clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1196
1060 /* Initialize state vaiables. */ 1197 /* Initialize state vaiables. */
1061 cfhsi->tx_state = CFHSI_TX_STATE_IDLE; 1198 cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
1062 cfhsi->rx_state.state = CFHSI_RX_STATE_DESC; 1199 cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
@@ -1066,12 +1203,6 @@ int cfhsi_probe(struct platform_device *pdev)
1066 cfhsi->q_low_mark = LOW_WATER_MARK; 1203 cfhsi->q_low_mark = LOW_WATER_MARK;
1067 cfhsi->q_high_mark = HIGH_WATER_MARK; 1204 cfhsi->q_high_mark = HIGH_WATER_MARK;
1068 1205
1069 /* Assign the HSI device. */
1070 dev = (struct cfhsi_dev *)pdev->dev.platform_data;
1071 cfhsi->dev = dev;
1072
1073 /* Assign the driver to this HSI device. */
1074 dev->drv = &cfhsi->drv;
1075 1206
1076 /* 1207 /*
1077 * Allocate a TX buffer with the size of a HSI packet descriptors 1208 * Allocate a TX buffer with the size of a HSI packet descriptors
@@ -1111,6 +1242,9 @@ int cfhsi_probe(struct platform_device *pdev)
1111 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA; 1242 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA;
1112 } 1243 }
1113 1244
1245 /* Initialize aggregation timeout */
1246 cfhsi->aggregation_timeout = aggregation_timeout;
1247
1114 /* Initialize recieve vaiables. */ 1248 /* Initialize recieve vaiables. */
1115 cfhsi->rx_ptr = cfhsi->rx_buf; 1249 cfhsi->rx_ptr = cfhsi->rx_buf;
1116 cfhsi->rx_len = CFHSI_DESC_SZ; 1250 cfhsi->rx_len = CFHSI_DESC_SZ;
@@ -1136,9 +1270,9 @@ int cfhsi_probe(struct platform_device *pdev)
1136 clear_bit(CFHSI_AWAKE, &cfhsi->bits); 1270 clear_bit(CFHSI_AWAKE, &cfhsi->bits);
1137 1271
1138 /* Create work thread. */ 1272 /* Create work thread. */
1139 cfhsi->wq = create_singlethread_workqueue(pdev->name); 1273 cfhsi->wq = create_singlethread_workqueue(cfhsi->pdev->name);
1140 if (!cfhsi->wq) { 1274 if (!cfhsi->wq) {
1141 dev_err(&ndev->dev, "%s: Failed to create work queue.\n", 1275 dev_err(&cfhsi->ndev->dev, "%s: Failed to create work queue.\n",
1142 __func__); 1276 __func__);
1143 res = -ENODEV; 1277 res = -ENODEV;
1144 goto err_create_wq; 1278 goto err_create_wq;
@@ -1150,18 +1284,17 @@ int cfhsi_probe(struct platform_device *pdev)
1150 init_waitqueue_head(&cfhsi->flush_fifo_wait); 1284 init_waitqueue_head(&cfhsi->flush_fifo_wait);
1151 1285
1152 /* Setup the inactivity timer. */ 1286 /* Setup the inactivity timer. */
1153 init_timer(&cfhsi->timer); 1287 init_timer(&cfhsi->inactivity_timer);
1154 cfhsi->timer.data = (unsigned long)cfhsi; 1288 cfhsi->inactivity_timer.data = (unsigned long)cfhsi;
1155 cfhsi->timer.function = cfhsi_inactivity_tout; 1289 cfhsi->inactivity_timer.function = cfhsi_inactivity_tout;
1156 /* Setup the slowpath RX timer. */ 1290 /* Setup the slowpath RX timer. */
1157 init_timer(&cfhsi->rx_slowpath_timer); 1291 init_timer(&cfhsi->rx_slowpath_timer);
1158 cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi; 1292 cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi;
1159 cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath; 1293 cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath;
1160 1294 /* Setup the aggregation timer. */
1161 /* Add CAIF HSI device to list. */ 1295 init_timer(&cfhsi->aggregation_timer);
1162 spin_lock(&cfhsi_list_lock); 1296 cfhsi->aggregation_timer.data = (unsigned long)cfhsi;
1163 list_add_tail(&cfhsi->list, &cfhsi_list); 1297 cfhsi->aggregation_timer.function = cfhsi_aggregation_tout;
1164 spin_unlock(&cfhsi_list_lock);
1165 1298
1166 /* Activate HSI interface. */ 1299 /* Activate HSI interface. */
1167 res = cfhsi->dev->cfhsi_up(cfhsi->dev); 1300 res = cfhsi->dev->cfhsi_up(cfhsi->dev);
@@ -1175,21 +1308,10 @@ int cfhsi_probe(struct platform_device *pdev)
1175 /* Flush FIFO */ 1308 /* Flush FIFO */
1176 res = cfhsi_flush_fifo(cfhsi); 1309 res = cfhsi_flush_fifo(cfhsi);
1177 if (res) { 1310 if (res) {
1178 dev_err(&ndev->dev, "%s: Can't flush FIFO: %d.\n", 1311 dev_err(&cfhsi->ndev->dev, "%s: Can't flush FIFO: %d.\n",
1179 __func__, res); 1312 __func__, res);
1180 goto err_net_reg; 1313 goto err_net_reg;
1181 } 1314 }
1182
1183 /* Register network device. */
1184 res = register_netdev(ndev);
1185 if (res) {
1186 dev_err(&ndev->dev, "%s: Registration error: %d.\n",
1187 __func__, res);
1188 goto err_net_reg;
1189 }
1190
1191 netif_stop_queue(ndev);
1192
1193 return res; 1315 return res;
1194 1316
1195 err_net_reg: 1317 err_net_reg:
@@ -1203,18 +1325,14 @@ int cfhsi_probe(struct platform_device *pdev)
1203 err_alloc_rx: 1325 err_alloc_rx:
1204 kfree(cfhsi->tx_buf); 1326 kfree(cfhsi->tx_buf);
1205 err_alloc_tx: 1327 err_alloc_tx:
1206 free_netdev(ndev);
1207
1208 return res; 1328 return res;
1209} 1329}
1210 1330
1211static void cfhsi_shutdown(struct cfhsi *cfhsi) 1331static int cfhsi_close(struct net_device *ndev)
1212{ 1332{
1333 struct cfhsi *cfhsi = netdev_priv(ndev);
1213 u8 *tx_buf, *rx_buf, *flip_buf; 1334 u8 *tx_buf, *rx_buf, *flip_buf;
1214 1335
1215 /* Stop TXing */
1216 netif_tx_stop_all_queues(cfhsi->ndev);
1217
1218 /* going to shutdown driver */ 1336 /* going to shutdown driver */
1219 set_bit(CFHSI_SHUTDOWN, &cfhsi->bits); 1337 set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
1220 1338
@@ -1222,8 +1340,9 @@ static void cfhsi_shutdown(struct cfhsi *cfhsi)
1222 flush_workqueue(cfhsi->wq); 1340 flush_workqueue(cfhsi->wq);
1223 1341
1224 /* Delete timers if pending */ 1342 /* Delete timers if pending */
1225 del_timer_sync(&cfhsi->timer); 1343 del_timer_sync(&cfhsi->inactivity_timer);
1226 del_timer_sync(&cfhsi->rx_slowpath_timer); 1344 del_timer_sync(&cfhsi->rx_slowpath_timer);
1345 del_timer_sync(&cfhsi->aggregation_timer);
1227 1346
1228 /* Cancel pending RX request (if any) */ 1347 /* Cancel pending RX request (if any) */
1229 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev); 1348 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
@@ -1241,15 +1360,19 @@ static void cfhsi_shutdown(struct cfhsi *cfhsi)
1241 /* Deactivate interface */ 1360 /* Deactivate interface */
1242 cfhsi->dev->cfhsi_down(cfhsi->dev); 1361 cfhsi->dev->cfhsi_down(cfhsi->dev);
1243 1362
1244 /* Finally unregister the network device. */
1245 unregister_netdev(cfhsi->ndev);
1246
1247 /* Free buffers. */ 1363 /* Free buffers. */
1248 kfree(tx_buf); 1364 kfree(tx_buf);
1249 kfree(rx_buf); 1365 kfree(rx_buf);
1250 kfree(flip_buf); 1366 kfree(flip_buf);
1367 return 0;
1251} 1368}
1252 1369
1370static const struct net_device_ops cfhsi_ops = {
1371 .ndo_open = cfhsi_open,
1372 .ndo_stop = cfhsi_close,
1373 .ndo_start_xmit = cfhsi_xmit
1374};
1375
1253int cfhsi_remove(struct platform_device *pdev) 1376int cfhsi_remove(struct platform_device *pdev)
1254{ 1377{
1255 struct list_head *list_node; 1378 struct list_head *list_node;
@@ -1266,10 +1389,6 @@ int cfhsi_remove(struct platform_device *pdev)
1266 /* Remove from list. */ 1389 /* Remove from list. */
1267 list_del(list_node); 1390 list_del(list_node);
1268 spin_unlock(&cfhsi_list_lock); 1391 spin_unlock(&cfhsi_list_lock);
1269
1270 /* Shutdown driver. */
1271 cfhsi_shutdown(cfhsi);
1272
1273 return 0; 1392 return 0;
1274 } 1393 }
1275 } 1394 }
@@ -1300,8 +1419,7 @@ static void __exit cfhsi_exit_module(void)
1300 list_del(list_node); 1419 list_del(list_node);
1301 spin_unlock(&cfhsi_list_lock); 1420 spin_unlock(&cfhsi_list_lock);
1302 1421
1303 /* Shutdown driver. */ 1422 unregister_netdevice(cfhsi->ndev);
1304 cfhsi_shutdown(cfhsi);
1305 1423
1306 spin_lock(&cfhsi_list_lock); 1424 spin_lock(&cfhsi_list_lock);
1307 } 1425 }
@@ -1326,8 +1444,6 @@ static int __init cfhsi_init_module(void)
1326 goto err_dev_register; 1444 goto err_dev_register;
1327 } 1445 }
1328 1446
1329 return result;
1330
1331 err_dev_register: 1447 err_dev_register:
1332 return result; 1448 return result;
1333} 1449}
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
index 5b2041319a32..bc497d718858 100644
--- a/drivers/net/caif/caif_shmcore.c
+++ b/drivers/net/caif/caif_shmcore.c
@@ -13,6 +13,7 @@
13#include <linux/list.h> 13#include <linux/list.h>
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/if_arp.h> 15#include <linux/if_arp.h>
16#include <linux/io.h>
16 17
17#include <net/caif/caif_device.h> 18#include <net/caif/caif_device.h>
18#include <net/caif/caif_shm.h> 19#include <net/caif/caif_shm.h>
@@ -647,6 +648,9 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
647 if (pshm_dev->shm_loopback) 648 if (pshm_dev->shm_loopback)
648 tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr; 649 tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
649 else 650 else
651 /*
652 * FIXME: the result of ioremap is not a pointer - arnd
653 */
650 tx_buf->desc_vptr = 654 tx_buf->desc_vptr =
651 ioremap(tx_buf->phy_addr, TX_BUF_SZ); 655 ioremap(tx_buf->phy_addr, TX_BUF_SZ);
652 656
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index c5fe3a3db8c9..f03d7a481a80 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -687,18 +687,19 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
687 687
688 if (priv->do_get_state) 688 if (priv->do_get_state)
689 priv->do_get_state(dev, &state); 689 priv->do_get_state(dev, &state);
690 NLA_PUT_U32(skb, IFLA_CAN_STATE, state); 690 if (nla_put_u32(skb, IFLA_CAN_STATE, state) ||
691 NLA_PUT(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm); 691 nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
692 NLA_PUT_U32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms); 692 nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
693 NLA_PUT(skb, IFLA_CAN_BITTIMING, 693 nla_put(skb, IFLA_CAN_BITTIMING,
694 sizeof(priv->bittiming), &priv->bittiming); 694 sizeof(priv->bittiming), &priv->bittiming) ||
695 NLA_PUT(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock); 695 nla_put(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock) ||
696 if (priv->do_get_berr_counter && !priv->do_get_berr_counter(dev, &bec)) 696 (priv->do_get_berr_counter &&
697 NLA_PUT(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec); 697 !priv->do_get_berr_counter(dev, &bec) &&
698 if (priv->bittiming_const) 698 nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
699 NLA_PUT(skb, IFLA_CAN_BITTIMING_CONST, 699 (priv->bittiming_const &&
700 sizeof(*priv->bittiming_const), priv->bittiming_const); 700 nla_put(skb, IFLA_CAN_BITTIMING_CONST,
701 701 sizeof(*priv->bittiming_const), priv->bittiming_const)))
702 goto nla_put_failure;
702 return 0; 703 return 0;
703 704
704nla_put_failure: 705nla_put_failure:
@@ -714,9 +715,9 @@ static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
714{ 715{
715 struct can_priv *priv = netdev_priv(dev); 716 struct can_priv *priv = netdev_priv(dev);
716 717
717 NLA_PUT(skb, IFLA_INFO_XSTATS, 718 if (nla_put(skb, IFLA_INFO_XSTATS,
718 sizeof(priv->can_stats), &priv->can_stats); 719 sizeof(priv->can_stats), &priv->can_stats))
719 720 goto nla_put_failure;
720 return 0; 721 return 0;
721 722
722nla_put_failure: 723nla_put_failure:
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 2bb215e00eb1..1226297e7676 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -1274,17 +1274,7 @@ static struct pci_driver pch_can_pci_driver = {
1274 .resume = pch_can_resume, 1274 .resume = pch_can_resume,
1275}; 1275};
1276 1276
1277static int __init pch_can_pci_init(void) 1277module_pci_driver(pch_can_pci_driver);
1278{
1279 return pci_register_driver(&pch_can_pci_driver);
1280}
1281module_init(pch_can_pci_init);
1282
1283static void __exit pch_can_pci_exit(void)
1284{
1285 pci_unregister_driver(&pch_can_pci_driver);
1286}
1287module_exit(pch_can_pci_exit);
1288 1278
1289MODULE_DESCRIPTION("Intel EG20T PCH CAN(Controller Area Network) Driver"); 1279MODULE_DESCRIPTION("Intel EG20T PCH CAN(Controller Area Network) Driver");
1290MODULE_LICENSE("GPL v2"); 1280MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 36f4f9780c30..5c6d412bafb5 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -371,16 +371,4 @@ static struct pci_driver ems_pci_driver = {
371 .remove = ems_pci_del_card, 371 .remove = ems_pci_del_card,
372}; 372};
373 373
374static int __init ems_pci_init(void) 374module_pci_driver(ems_pci_driver);
375{
376 return pci_register_driver(&ems_pci_driver);
377}
378
379static void __exit ems_pci_exit(void)
380{
381 pci_unregister_driver(&ems_pci_driver);
382}
383
384module_init(ems_pci_init);
385module_exit(ems_pci_exit);
386
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index ed004cebd31f..23ed6ea4c7c3 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -397,15 +397,4 @@ static struct pci_driver kvaser_pci_driver = {
397 .remove = __devexit_p(kvaser_pci_remove_one), 397 .remove = __devexit_p(kvaser_pci_remove_one),
398}; 398};
399 399
400static int __init kvaser_pci_init(void) 400module_pci_driver(kvaser_pci_driver);
401{
402 return pci_register_driver(&kvaser_pci_driver);
403}
404
405static void __exit kvaser_pci_exit(void)
406{
407 pci_unregister_driver(&kvaser_pci_driver);
408}
409
410module_init(kvaser_pci_init);
411module_exit(kvaser_pci_exit);
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 5f92b865f64b..f0a12962f7b6 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -749,14 +749,4 @@ static struct pci_driver peak_pci_driver = {
749 .remove = __devexit_p(peak_pci_remove), 749 .remove = __devexit_p(peak_pci_remove),
750}; 750};
751 751
752static int __init peak_pci_init(void) 752module_pci_driver(peak_pci_driver);
753{
754 return pci_register_driver(&peak_pci_driver);
755}
756module_init(peak_pci_init);
757
758static void __exit peak_pci_exit(void)
759{
760 pci_unregister_driver(&peak_pci_driver);
761}
762module_exit(peak_pci_exit);
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index a227586ddd52..8bc95982840f 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -609,15 +609,4 @@ static struct pci_driver plx_pci_driver = {
609 .remove = plx_pci_del_card, 609 .remove = plx_pci_del_card,
610}; 610};
611 611
612static int __init plx_pci_init(void) 612module_pci_driver(plx_pci_driver);
613{
614 return pci_register_driver(&plx_pci_driver);
615}
616
617static void __exit plx_pci_exit(void)
618{
619 pci_unregister_driver(&plx_pci_driver);
620}
621
622module_init(plx_pci_init);
623module_exit(plx_pci_exit);
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index e04ade444247..910895c5ec97 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -60,6 +60,7 @@ config PCMCIA_AXNET
60config AX88796 60config AX88796
61 tristate "ASIX AX88796 NE2000 clone support" 61 tristate "ASIX AX88796 NE2000 clone support"
62 depends on (ARM || MIPS || SUPERH) 62 depends on (ARM || MIPS || SUPERH)
63 select CRC32
63 select PHYLIB 64 select PHYLIB
64 select MDIO_BITBANG 65 select MDIO_BITBANG
65 ---help--- 66 ---help---
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 11476ca95e93..203ff9dccadb 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -501,6 +501,7 @@ static const struct ethtool_ops ax_ethtool_ops = {
501 .get_settings = ax_get_settings, 501 .get_settings = ax_get_settings,
502 .set_settings = ax_set_settings, 502 .set_settings = ax_set_settings,
503 .get_link = ethtool_op_get_link, 503 .get_link = ethtool_op_get_link,
504 .get_ts_info = ethtool_op_get_ts_info,
504}; 505};
505 506
506#ifdef CONFIG_AX88796_93CX6 507#ifdef CONFIG_AX88796_93CX6
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index dbefd5658c14..8322c54972f3 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -635,6 +635,7 @@ static const struct ethtool_ops etherh_ethtool_ops = {
635 .get_settings = etherh_get_settings, 635 .get_settings = etherh_get_settings,
636 .set_settings = etherh_set_settings, 636 .set_settings = etherh_set_settings,
637 .get_drvinfo = etherh_get_drvinfo, 637 .get_drvinfo = etherh_get_drvinfo,
638 .get_ts_info = ethtool_op_get_ts_info,
638}; 639};
639 640
640static const struct net_device_ops etherh_netdev_ops = { 641static const struct net_device_ops etherh_netdev_ops = {
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index c63a64cb6085..a11af5cc4844 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -174,6 +174,7 @@ source "drivers/net/ethernet/tile/Kconfig"
174source "drivers/net/ethernet/toshiba/Kconfig" 174source "drivers/net/ethernet/toshiba/Kconfig"
175source "drivers/net/ethernet/tundra/Kconfig" 175source "drivers/net/ethernet/tundra/Kconfig"
176source "drivers/net/ethernet/via/Kconfig" 176source "drivers/net/ethernet/via/Kconfig"
177source "drivers/net/ethernet/wiznet/Kconfig"
177source "drivers/net/ethernet/xilinx/Kconfig" 178source "drivers/net/ethernet/xilinx/Kconfig"
178source "drivers/net/ethernet/xircom/Kconfig" 179source "drivers/net/ethernet/xircom/Kconfig"
179 180
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 9676a5109d94..878ad32b93f2 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -73,5 +73,6 @@ obj-$(CONFIG_TILE_NET) += tile/
73obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/ 73obj-$(CONFIG_NET_VENDOR_TOSHIBA) += toshiba/
74obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/ 74obj-$(CONFIG_NET_VENDOR_TUNDRA) += tundra/
75obj-$(CONFIG_NET_VENDOR_VIA) += via/ 75obj-$(CONFIG_NET_VENDOR_VIA) += via/
76obj-$(CONFIG_NET_VENDOR_WIZNET) += wiznet/
76obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/ 77obj-$(CONFIG_NET_VENDOR_XILINX) += xilinx/
77obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/ 78obj-$(CONFIG_NET_VENDOR_XIRCOM) += xircom/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index d896816512ca..d920a529ba22 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -114,15 +114,6 @@ static int rx_copybreak /* = 0 */;
114#define DMA_BURST_SIZE 128 114#define DMA_BURST_SIZE 128
115#endif 115#endif
116 116
117/* Used to pass the media type, etc.
118 Both 'options[]' and 'full_duplex[]' exist for driver interoperability.
119 The media type is usually passed in 'options[]'.
120 These variables are deprecated, use ethtool instead. -Ion
121*/
122#define MAX_UNITS 8 /* More are supported, limit only on options */
123static int options[MAX_UNITS] = {0, };
124static int full_duplex[MAX_UNITS] = {0, };
125
126/* Operational parameters that are set at compile time. */ 117/* Operational parameters that are set at compile time. */
127 118
128/* The "native" ring sizes are either 256 or 2048. 119/* The "native" ring sizes are either 256 or 2048.
@@ -192,8 +183,6 @@ module_param(debug, int, 0);
192module_param(rx_copybreak, int, 0); 183module_param(rx_copybreak, int, 0);
193module_param(intr_latency, int, 0); 184module_param(intr_latency, int, 0);
194module_param(small_frames, int, 0); 185module_param(small_frames, int, 0);
195module_param_array(options, int, NULL, 0);
196module_param_array(full_duplex, int, NULL, 0);
197module_param(enable_hw_cksum, int, 0); 186module_param(enable_hw_cksum, int, 0);
198MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt"); 187MODULE_PARM_DESC(max_interrupt_work, "Maximum events handled per interrupt");
199MODULE_PARM_DESC(mtu, "MTU (all boards)"); 188MODULE_PARM_DESC(mtu, "MTU (all boards)");
@@ -201,8 +190,6 @@ MODULE_PARM_DESC(debug, "Debug level (0-6)");
201MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); 190MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
202MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds"); 191MODULE_PARM_DESC(intr_latency, "Maximum interrupt latency, in microseconds");
203MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)"); 192MODULE_PARM_DESC(small_frames, "Maximum size of receive frames that bypass interrupt latency (0,64,128,256,512)");
204MODULE_PARM_DESC(options, "Deprecated: Bits 0-3: media type, bit 17: full duplex");
205MODULE_PARM_DESC(full_duplex, "Deprecated: Forced full-duplex setting (0/1)");
206MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)"); 193MODULE_PARM_DESC(enable_hw_cksum, "Enable/disable hardware cksum support (0/1)");
207 194
208/* 195/*
@@ -657,10 +644,10 @@ static const struct net_device_ops netdev_ops = {
657static int __devinit starfire_init_one(struct pci_dev *pdev, 644static int __devinit starfire_init_one(struct pci_dev *pdev,
658 const struct pci_device_id *ent) 645 const struct pci_device_id *ent)
659{ 646{
647 struct device *d = &pdev->dev;
660 struct netdev_private *np; 648 struct netdev_private *np;
661 int i, irq, option, chip_idx = ent->driver_data; 649 int i, irq, chip_idx = ent->driver_data;
662 struct net_device *dev; 650 struct net_device *dev;
663 static int card_idx = -1;
664 long ioaddr; 651 long ioaddr;
665 void __iomem *base; 652 void __iomem *base;
666 int drv_flags, io_size; 653 int drv_flags, io_size;
@@ -673,15 +660,13 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
673 printk(version); 660 printk(version);
674#endif 661#endif
675 662
676 card_idx++;
677
678 if (pci_enable_device (pdev)) 663 if (pci_enable_device (pdev))
679 return -EIO; 664 return -EIO;
680 665
681 ioaddr = pci_resource_start(pdev, 0); 666 ioaddr = pci_resource_start(pdev, 0);
682 io_size = pci_resource_len(pdev, 0); 667 io_size = pci_resource_len(pdev, 0);
683 if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) { 668 if (!ioaddr || ((pci_resource_flags(pdev, 0) & IORESOURCE_MEM) == 0)) {
684 printk(KERN_ERR DRV_NAME " %d: no PCI MEM resources, aborting\n", card_idx); 669 dev_err(d, "no PCI MEM resources, aborting\n");
685 return -ENODEV; 670 return -ENODEV;
686 } 671 }
687 672
@@ -694,14 +679,14 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
694 irq = pdev->irq; 679 irq = pdev->irq;
695 680
696 if (pci_request_regions (pdev, DRV_NAME)) { 681 if (pci_request_regions (pdev, DRV_NAME)) {
697 printk(KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", card_idx); 682 dev_err(d, "cannot reserve PCI resources, aborting\n");
698 goto err_out_free_netdev; 683 goto err_out_free_netdev;
699 } 684 }
700 685
701 base = ioremap(ioaddr, io_size); 686 base = ioremap(ioaddr, io_size);
702 if (!base) { 687 if (!base) {
703 printk(KERN_ERR DRV_NAME " %d: cannot remap %#x @ %#lx, aborting\n", 688 dev_err(d, "cannot remap %#x @ %#lx, aborting\n",
704 card_idx, io_size, ioaddr); 689 io_size, ioaddr);
705 goto err_out_free_res; 690 goto err_out_free_res;
706 } 691 }
707 692
@@ -753,9 +738,6 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
753 /* wait a little longer */ 738 /* wait a little longer */
754 udelay(1000); 739 udelay(1000);
755 740
756 dev->base_addr = (unsigned long)base;
757 dev->irq = irq;
758
759 np = netdev_priv(dev); 741 np = netdev_priv(dev);
760 np->dev = dev; 742 np->dev = dev;
761 np->base = base; 743 np->base = base;
@@ -772,21 +754,6 @@ static int __devinit starfire_init_one(struct pci_dev *pdev,
772 754
773 drv_flags = netdrv_tbl[chip_idx].drv_flags; 755 drv_flags = netdrv_tbl[chip_idx].drv_flags;
774 756
775 option = card_idx < MAX_UNITS ? options[card_idx] : 0;
776 if (dev->mem_start)
777 option = dev->mem_start;
778
779 /* The lower four bits are the media type. */
780 if (option & 0x200)
781 np->mii_if.full_duplex = 1;
782
783 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
784 np->mii_if.full_duplex = 1;
785
786 if (np->mii_if.full_duplex)
787 np->mii_if.force_media = 1;
788 else
789 np->mii_if.force_media = 0;
790 np->speed100 = 1; 757 np->speed100 = 1;
791 758
792 /* timer resolution is 128 * 0.8us */ 759 /* timer resolution is 128 * 0.8us */
@@ -909,13 +876,14 @@ static int netdev_open(struct net_device *dev)
909 const __be32 *fw_rx_data, *fw_tx_data; 876 const __be32 *fw_rx_data, *fw_tx_data;
910 struct netdev_private *np = netdev_priv(dev); 877 struct netdev_private *np = netdev_priv(dev);
911 void __iomem *ioaddr = np->base; 878 void __iomem *ioaddr = np->base;
879 const int irq = np->pci_dev->irq;
912 int i, retval; 880 int i, retval;
913 size_t tx_size, rx_size; 881 size_t tx_size, rx_size;
914 size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size; 882 size_t tx_done_q_size, rx_done_q_size, tx_ring_size, rx_ring_size;
915 883
916 /* Do we ever need to reset the chip??? */ 884 /* Do we ever need to reset the chip??? */
917 885
918 retval = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev); 886 retval = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
919 if (retval) 887 if (retval)
920 return retval; 888 return retval;
921 889
@@ -924,7 +892,7 @@ static int netdev_open(struct net_device *dev)
924 writel(1, ioaddr + PCIDeviceConfig); 892 writel(1, ioaddr + PCIDeviceConfig);
925 if (debug > 1) 893 if (debug > 1)
926 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n", 894 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
927 dev->name, dev->irq); 895 dev->name, irq);
928 896
929 /* Allocate the various queues. */ 897 /* Allocate the various queues. */
930 if (!np->queue_mem) { 898 if (!np->queue_mem) {
@@ -935,7 +903,7 @@ static int netdev_open(struct net_device *dev)
935 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size; 903 np->queue_mem_size = tx_done_q_size + rx_done_q_size + tx_ring_size + rx_ring_size;
936 np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma); 904 np->queue_mem = pci_alloc_consistent(np->pci_dev, np->queue_mem_size, &np->queue_mem_dma);
937 if (np->queue_mem == NULL) { 905 if (np->queue_mem == NULL) {
938 free_irq(dev->irq, dev); 906 free_irq(irq, dev);
939 return -ENOMEM; 907 return -ENOMEM;
940 } 908 }
941 909
@@ -1962,7 +1930,7 @@ static int netdev_close(struct net_device *dev)
1962 } 1930 }
1963 } 1931 }
1964 1932
1965 free_irq(dev->irq, dev); 1933 free_irq(np->pci_dev->irq, dev);
1966 1934
1967 /* Free all the skbuffs in the Rx queue. */ 1935 /* Free all the skbuffs in the Rx queue. */
1968 for (i = 0; i < RX_RING_SIZE; i++) { 1936 for (i = 0; i < RX_RING_SIZE; i++) {
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index ab4daeccdf98..f816426e1085 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -548,6 +548,25 @@ static int bfin_mac_ethtool_setwol(struct net_device *dev,
548 return 0; 548 return 0;
549} 549}
550 550
551static int bfin_mac_ethtool_get_ts_info(struct net_device *dev,
552 struct ethtool_ts_info *info)
553{
554 info->so_timestamping =
555 SOF_TIMESTAMPING_TX_HARDWARE |
556 SOF_TIMESTAMPING_RX_HARDWARE |
557 SOF_TIMESTAMPING_SYS_HARDWARE;
558 info->phc_index = -1;
559 info->tx_types =
560 (1 << HWTSTAMP_TX_OFF) |
561 (1 << HWTSTAMP_TX_ON);
562 info->rx_filters =
563 (1 << HWTSTAMP_FILTER_NONE) |
564 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
565 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
566 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
567 return 0;
568}
569
551static const struct ethtool_ops bfin_mac_ethtool_ops = { 570static const struct ethtool_ops bfin_mac_ethtool_ops = {
552 .get_settings = bfin_mac_ethtool_getsettings, 571 .get_settings = bfin_mac_ethtool_getsettings,
553 .set_settings = bfin_mac_ethtool_setsettings, 572 .set_settings = bfin_mac_ethtool_setsettings,
@@ -555,6 +574,7 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = {
555 .get_drvinfo = bfin_mac_ethtool_getdrvinfo, 574 .get_drvinfo = bfin_mac_ethtool_getdrvinfo,
556 .get_wol = bfin_mac_ethtool_getwol, 575 .get_wol = bfin_mac_ethtool_getwol,
557 .set_wol = bfin_mac_ethtool_setwol, 576 .set_wol = bfin_mac_ethtool_setwol,
577 .get_ts_info = bfin_mac_ethtool_get_ts_info,
558}; 578};
559 579
560/**************************************************************************/ 580/**************************************************************************/
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index ca70e16b6e2c..b2bf324631dc 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -74,8 +74,6 @@
74 74
75#define AT_RX_BUF_SIZE (ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN) 75#define AT_RX_BUF_SIZE (ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)
76#define MAX_JUMBO_FRAME_SIZE (6*1024) 76#define MAX_JUMBO_FRAME_SIZE (6*1024)
77#define MAX_TSO_FRAME_SIZE (7*1024)
78#define MAX_TX_OFFLOAD_THRESH (9*1024)
79 77
80#define AT_MAX_RECEIVE_QUEUE 4 78#define AT_MAX_RECEIVE_QUEUE 4
81#define AT_DEF_RECEIVE_QUEUE 1 79#define AT_DEF_RECEIVE_QUEUE 1
@@ -100,7 +98,7 @@
100#define ATL1C_ASPM_L0s_ENABLE 0x0001 98#define ATL1C_ASPM_L0s_ENABLE 0x0001
101#define ATL1C_ASPM_L1_ENABLE 0x0002 99#define ATL1C_ASPM_L1_ENABLE 0x0002
102 100
103#define AT_REGS_LEN (75 * sizeof(u32)) 101#define AT_REGS_LEN (74 * sizeof(u32))
104#define AT_EEPROM_LEN 512 102#define AT_EEPROM_LEN 512
105 103
106#define ATL1C_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i])) 104#define ATL1C_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i]))
@@ -297,20 +295,6 @@ enum atl1c_dma_req_block {
297 atl1c_dma_req_4096 = 5 295 atl1c_dma_req_4096 = 5
298}; 296};
299 297
300enum atl1c_rss_mode {
301 atl1c_rss_mode_disable = 0,
302 atl1c_rss_sig_que = 1,
303 atl1c_rss_mul_que_sig_int = 2,
304 atl1c_rss_mul_que_mul_int = 4,
305};
306
307enum atl1c_rss_type {
308 atl1c_rss_disable = 0,
309 atl1c_rss_ipv4 = 1,
310 atl1c_rss_ipv4_tcp = 2,
311 atl1c_rss_ipv6 = 4,
312 atl1c_rss_ipv6_tcp = 8
313};
314 298
315enum atl1c_nic_type { 299enum atl1c_nic_type {
316 athr_l1c = 0, 300 athr_l1c = 0,
@@ -388,7 +372,6 @@ struct atl1c_hw {
388 enum atl1c_dma_order dma_order; 372 enum atl1c_dma_order dma_order;
389 enum atl1c_dma_rcb rcb_value; 373 enum atl1c_dma_rcb rcb_value;
390 enum atl1c_dma_req_block dmar_block; 374 enum atl1c_dma_req_block dmar_block;
391 enum atl1c_dma_req_block dmaw_block;
392 375
393 u16 device_id; 376 u16 device_id;
394 u16 vendor_id; 377 u16 vendor_id;
@@ -399,8 +382,6 @@ struct atl1c_hw {
399 u16 phy_id2; 382 u16 phy_id2;
400 383
401 u32 intr_mask; 384 u32 intr_mask;
402 u8 dmaw_dly_cnt;
403 u8 dmar_dly_cnt;
404 385
405 u8 preamble_len; 386 u8 preamble_len;
406 u16 max_frame_size; 387 u16 max_frame_size;
@@ -440,10 +421,6 @@ struct atl1c_hw {
440#define ATL1C_FPGA_VERSION 0x8000 421#define ATL1C_FPGA_VERSION 0x8000
441 u16 link_cap_flags; 422 u16 link_cap_flags;
442#define ATL1C_LINK_CAP_1000M 0x0001 423#define ATL1C_LINK_CAP_1000M 0x0001
443 u16 cmb_tpd;
444 u16 cmb_rrd;
445 u16 cmb_rx_timer; /* 2us resolution */
446 u16 cmb_tx_timer;
447 u32 smb_timer; 424 u32 smb_timer;
448 425
449 u16 rrd_thresh; /* Threshold of number of RRD produced to trigger 426 u16 rrd_thresh; /* Threshold of number of RRD produced to trigger
@@ -451,9 +428,6 @@ struct atl1c_hw {
451 u16 tpd_thresh; 428 u16 tpd_thresh;
452 u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. */ 429 u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. */
453 u8 rfd_burst; 430 u8 rfd_burst;
454 enum atl1c_rss_type rss_type;
455 enum atl1c_rss_mode rss_mode;
456 u8 rss_hash_bits;
457 u32 base_cpu; 431 u32 base_cpu;
458 u32 indirect_tab; 432 u32 indirect_tab;
459 u8 mac_addr[ETH_ALEN]; 433 u8 mac_addr[ETH_ALEN];
@@ -462,12 +436,12 @@ struct atl1c_hw {
462 bool phy_configured; 436 bool phy_configured;
463 bool re_autoneg; 437 bool re_autoneg;
464 bool emi_ca; 438 bool emi_ca;
439 bool msi_lnkpatch; /* link patch for specific platforms */
465}; 440};
466 441
467/* 442/*
468 * atl1c_ring_header represents a single, contiguous block of DMA space 443 * atl1c_ring_header represents a single, contiguous block of DMA space
469 * mapped for the three descriptor rings (tpd, rfd, rrd) and the two 444 * mapped for the three descriptor rings (tpd, rfd, rrd) described below
470 * message blocks (cmb, smb) described below
471 */ 445 */
472struct atl1c_ring_header { 446struct atl1c_ring_header {
473 void *desc; /* virtual address */ 447 void *desc; /* virtual address */
@@ -541,16 +515,6 @@ struct atl1c_rrd_ring {
541 u16 next_to_clean; 515 u16 next_to_clean;
542}; 516};
543 517
544struct atl1c_cmb {
545 void *cmb;
546 dma_addr_t dma;
547};
548
549struct atl1c_smb {
550 void *smb;
551 dma_addr_t dma;
552};
553
554/* board specific private data structure */ 518/* board specific private data structure */
555struct atl1c_adapter { 519struct atl1c_adapter {
556 struct net_device *netdev; 520 struct net_device *netdev;
@@ -586,11 +550,8 @@ struct atl1c_adapter {
586 /* All Descriptor memory */ 550 /* All Descriptor memory */
587 struct atl1c_ring_header ring_header; 551 struct atl1c_ring_header ring_header;
588 struct atl1c_tpd_ring tpd_ring[AT_MAX_TRANSMIT_QUEUE]; 552 struct atl1c_tpd_ring tpd_ring[AT_MAX_TRANSMIT_QUEUE];
589 struct atl1c_rfd_ring rfd_ring[AT_MAX_RECEIVE_QUEUE]; 553 struct atl1c_rfd_ring rfd_ring;
590 struct atl1c_rrd_ring rrd_ring[AT_MAX_RECEIVE_QUEUE]; 554 struct atl1c_rrd_ring rrd_ring;
591 struct atl1c_cmb cmb;
592 struct atl1c_smb smb;
593 int num_rx_queues;
594 u32 bd_number; /* board number;*/ 555 u32 bd_number; /* board number;*/
595}; 556};
596 557
@@ -618,8 +579,14 @@ struct atl1c_adapter {
618#define AT_WRITE_REGW(a, reg, value) (\ 579#define AT_WRITE_REGW(a, reg, value) (\
619 writew((value), ((a)->hw_addr + reg))) 580 writew((value), ((a)->hw_addr + reg)))
620 581
621#define AT_READ_REGW(a, reg) (\ 582#define AT_READ_REGW(a, reg, pdata) do { \
622 readw((a)->hw_addr + reg)) 583 if (unlikely((a)->hibernate)) { \
584 readw((a)->hw_addr + reg); \
585 *(u16 *)pdata = readw((a)->hw_addr + reg); \
586 } else { \
587 *(u16 *)pdata = readw((a)->hw_addr + reg); \
588 } \
589 } while (0)
623 590
624#define AT_WRITE_REG_ARRAY(a, reg, offset, value) ( \ 591#define AT_WRITE_REG_ARRAY(a, reg, offset, value) ( \
625 writel((value), (((a)->hw_addr + reg) + ((offset) << 2)))) 592 writel((value), (((a)->hw_addr + reg) + ((offset) << 2))))
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index 0a9326aa58b5..859ea844ba0f 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -141,8 +141,7 @@ static void atl1c_get_regs(struct net_device *netdev,
141 141
142 memset(p, 0, AT_REGS_LEN); 142 memset(p, 0, AT_REGS_LEN);
143 143
144 regs->version = 0; 144 regs->version = 1;
145 AT_READ_REG(hw, REG_VPD_CAP, p++);
146 AT_READ_REG(hw, REG_PM_CTRL, p++); 145 AT_READ_REG(hw, REG_PM_CTRL, p++);
147 AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL, p++); 146 AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL, p++);
148 AT_READ_REG(hw, REG_TWSI_CTRL, p++); 147 AT_READ_REG(hw, REG_TWSI_CTRL, p++);
@@ -154,7 +153,7 @@ static void atl1c_get_regs(struct net_device *netdev,
154 AT_READ_REG(hw, REG_LINK_CTRL, p++); 153 AT_READ_REG(hw, REG_LINK_CTRL, p++);
155 AT_READ_REG(hw, REG_IDLE_STATUS, p++); 154 AT_READ_REG(hw, REG_IDLE_STATUS, p++);
156 AT_READ_REG(hw, REG_MDIO_CTRL, p++); 155 AT_READ_REG(hw, REG_MDIO_CTRL, p++);
157 AT_READ_REG(hw, REG_SERDES_LOCK, p++); 156 AT_READ_REG(hw, REG_SERDES, p++);
158 AT_READ_REG(hw, REG_MAC_CTRL, p++); 157 AT_READ_REG(hw, REG_MAC_CTRL, p++);
159 AT_READ_REG(hw, REG_MAC_IPG_IFG, p++); 158 AT_READ_REG(hw, REG_MAC_IPG_IFG, p++);
160 AT_READ_REG(hw, REG_MAC_STA_ADDR, p++); 159 AT_READ_REG(hw, REG_MAC_STA_ADDR, p++);
@@ -167,9 +166,9 @@ static void atl1c_get_regs(struct net_device *netdev,
167 AT_READ_REG(hw, REG_WOL_CTRL, p++); 166 AT_READ_REG(hw, REG_WOL_CTRL, p++);
168 167
169 atl1c_read_phy_reg(hw, MII_BMCR, &phy_data); 168 atl1c_read_phy_reg(hw, MII_BMCR, &phy_data);
170 regs_buff[73] = (u32) phy_data; 169 regs_buff[AT_REGS_LEN/sizeof(u32) - 2] = (u32) phy_data;
171 atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); 170 atl1c_read_phy_reg(hw, MII_BMSR, &phy_data);
172 regs_buff[74] = (u32) phy_data; 171 regs_buff[AT_REGS_LEN/sizeof(u32) - 1] = (u32) phy_data;
173} 172}
174 173
175static int atl1c_get_eeprom_len(struct net_device *netdev) 174static int atl1c_get_eeprom_len(struct net_device *netdev)
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
index bd1667cbffa6..ff9c73859d45 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
@@ -43,7 +43,7 @@ int atl1c_check_eeprom_exist(struct atl1c_hw *hw)
43 return 0; 43 return 0;
44} 44}
45 45
46void atl1c_hw_set_mac_addr(struct atl1c_hw *hw) 46void atl1c_hw_set_mac_addr(struct atl1c_hw *hw, u8 *mac_addr)
47{ 47{
48 u32 value; 48 u32 value;
49 /* 49 /*
@@ -51,35 +51,48 @@ void atl1c_hw_set_mac_addr(struct atl1c_hw *hw)
51 * 0: 6AF600DC 1: 000B 51 * 0: 6AF600DC 1: 000B
52 * low dword 52 * low dword
53 */ 53 */
54 value = (((u32)hw->mac_addr[2]) << 24) | 54 value = mac_addr[2] << 24 |
55 (((u32)hw->mac_addr[3]) << 16) | 55 mac_addr[3] << 16 |
56 (((u32)hw->mac_addr[4]) << 8) | 56 mac_addr[4] << 8 |
57 (((u32)hw->mac_addr[5])) ; 57 mac_addr[5];
58 AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value); 58 AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value);
59 /* hight dword */ 59 /* hight dword */
60 value = (((u32)hw->mac_addr[0]) << 8) | 60 value = mac_addr[0] << 8 |
61 (((u32)hw->mac_addr[1])) ; 61 mac_addr[1];
62 AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value); 62 AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value);
63} 63}
64 64
65/* read mac address from hardware register */
66static bool atl1c_read_current_addr(struct atl1c_hw *hw, u8 *eth_addr)
67{
68 u32 addr[2];
69
70 AT_READ_REG(hw, REG_MAC_STA_ADDR, &addr[0]);
71 AT_READ_REG(hw, REG_MAC_STA_ADDR + 4, &addr[1]);
72
73 *(u32 *) &eth_addr[2] = htonl(addr[0]);
74 *(u16 *) &eth_addr[0] = htons((u16)addr[1]);
75
76 return is_valid_ether_addr(eth_addr);
77}
78
65/* 79/*
66 * atl1c_get_permanent_address 80 * atl1c_get_permanent_address
67 * return 0 if get valid mac address, 81 * return 0 if get valid mac address,
68 */ 82 */
69static int atl1c_get_permanent_address(struct atl1c_hw *hw) 83static int atl1c_get_permanent_address(struct atl1c_hw *hw)
70{ 84{
71 u32 addr[2];
72 u32 i; 85 u32 i;
73 u32 otp_ctrl_data; 86 u32 otp_ctrl_data;
74 u32 twsi_ctrl_data; 87 u32 twsi_ctrl_data;
75 u32 ltssm_ctrl_data;
76 u32 wol_data;
77 u8 eth_addr[ETH_ALEN];
78 u16 phy_data; 88 u16 phy_data;
79 bool raise_vol = false; 89 bool raise_vol = false;
80 90
91 /* MAC-address from BIOS is the 1st priority */
92 if (atl1c_read_current_addr(hw, hw->perm_mac_addr))
93 return 0;
94
81 /* init */ 95 /* init */
82 addr[0] = addr[1] = 0;
83 AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data); 96 AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data);
84 if (atl1c_check_eeprom_exist(hw)) { 97 if (atl1c_check_eeprom_exist(hw)) {
85 if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) { 98 if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) {
@@ -91,33 +104,17 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
91 msleep(1); 104 msleep(1);
92 } 105 }
93 } 106 }
94 107 /* raise voltage temporally for l2cb */
95 if (hw->nic_type == athr_l2c_b || 108 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) {
96 hw->nic_type == athr_l2c_b2 || 109 atl1c_read_phy_dbg(hw, MIIDBG_ANACTRL, &phy_data);
97 hw->nic_type == athr_l1d) { 110 phy_data &= ~ANACTRL_HB_EN;
98 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00); 111 atl1c_write_phy_dbg(hw, MIIDBG_ANACTRL, phy_data);
99 if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data)) 112 atl1c_read_phy_dbg(hw, MIIDBG_VOLT_CTRL, &phy_data);
100 goto out; 113 phy_data |= VOLT_CTRL_SWLOWEST;
101 phy_data &= 0xFF7F; 114 atl1c_write_phy_dbg(hw, MIIDBG_VOLT_CTRL, phy_data);
102 atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
103
104 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
105 if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
106 goto out;
107 phy_data |= 0x8;
108 atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
109 udelay(20); 115 udelay(20);
110 raise_vol = true; 116 raise_vol = true;
111 } 117 }
112 /* close open bit of ReadOnly*/
113 AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &ltssm_ctrl_data);
114 ltssm_ctrl_data &= ~LTSSM_ID_EN_WRO;
115 AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, ltssm_ctrl_data);
116
117 /* clear any WOL settings */
118 AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
119 AT_READ_REG(hw, REG_WOL_CTRL, &wol_data);
120
121 118
122 AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data); 119 AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data);
123 twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART; 120 twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART;
@@ -138,37 +135,18 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
138 msleep(1); 135 msleep(1);
139 } 136 }
140 if (raise_vol) { 137 if (raise_vol) {
141 if (hw->nic_type == athr_l2c_b || 138 atl1c_read_phy_dbg(hw, MIIDBG_ANACTRL, &phy_data);
142 hw->nic_type == athr_l2c_b2 || 139 phy_data |= ANACTRL_HB_EN;
143 hw->nic_type == athr_l1d || 140 atl1c_write_phy_dbg(hw, MIIDBG_ANACTRL, phy_data);
144 hw->nic_type == athr_l1d_2) { 141 atl1c_read_phy_dbg(hw, MIIDBG_VOLT_CTRL, &phy_data);
145 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00); 142 phy_data &= ~VOLT_CTRL_SWLOWEST;
146 if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data)) 143 atl1c_write_phy_dbg(hw, MIIDBG_VOLT_CTRL, phy_data);
147 goto out; 144 udelay(20);
148 phy_data |= 0x80;
149 atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
150
151 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
152 if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
153 goto out;
154 phy_data &= 0xFFF7;
155 atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
156 udelay(20);
157 }
158 } 145 }
159 146
160 /* maybe MAC-address is from BIOS */ 147 if (atl1c_read_current_addr(hw, hw->perm_mac_addr))
161 AT_READ_REG(hw, REG_MAC_STA_ADDR, &addr[0]);
162 AT_READ_REG(hw, REG_MAC_STA_ADDR + 4, &addr[1]);
163 *(u32 *) &eth_addr[2] = swab32(addr[0]);
164 *(u16 *) &eth_addr[0] = swab16(*(u16 *)&addr[1]);
165
166 if (is_valid_ether_addr(eth_addr)) {
167 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
168 return 0; 148 return 0;
169 }
170 149
171out:
172 return -1; 150 return -1;
173} 151}
174 152
@@ -278,33 +256,158 @@ void atl1c_hash_set(struct atl1c_hw *hw, u32 hash_value)
278} 256}
279 257
280/* 258/*
281 * Reads the value from a PHY register 259 * wait mdio module be idle
282 * hw - Struct containing variables accessed by shared code 260 * return true: idle
283 * reg_addr - address of the PHY register to read 261 * false: still busy
284 */ 262 */
285int atl1c_read_phy_reg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data) 263bool atl1c_wait_mdio_idle(struct atl1c_hw *hw)
286{ 264{
287 u32 val; 265 u32 val;
288 int i; 266 int i;
289 267
290 val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | 268 for (i = 0; i < MDIO_MAX_AC_TO; i++) {
291 MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | 269 AT_READ_REG(hw, REG_MDIO_CTRL, &val);
292 MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; 270 if (!(val & (MDIO_CTRL_BUSY | MDIO_CTRL_START)))
271 break;
272 udelay(10);
273 }
274
275 return i != MDIO_MAX_AC_TO;
276}
277
278void atl1c_stop_phy_polling(struct atl1c_hw *hw)
279{
280 if (!(hw->ctrl_flags & ATL1C_FPGA_VERSION))
281 return;
282
283 AT_WRITE_REG(hw, REG_MDIO_CTRL, 0);
284 atl1c_wait_mdio_idle(hw);
285}
286
287void atl1c_start_phy_polling(struct atl1c_hw *hw, u16 clk_sel)
288{
289 u32 val;
290
291 if (!(hw->ctrl_flags & ATL1C_FPGA_VERSION))
292 return;
293 293
294 val = MDIO_CTRL_SPRES_PRMBL |
295 FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) |
296 FIELDX(MDIO_CTRL_REG, 1) |
297 MDIO_CTRL_START |
298 MDIO_CTRL_OP_READ;
299 AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
300 atl1c_wait_mdio_idle(hw);
301 val |= MDIO_CTRL_AP_EN;
302 val &= ~MDIO_CTRL_START;
294 AT_WRITE_REG(hw, REG_MDIO_CTRL, val); 303 AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
304 udelay(30);
305}
295 306
296 for (i = 0; i < MDIO_WAIT_TIMES; i++) { 307
297 udelay(2); 308/*
298 AT_READ_REG(hw, REG_MDIO_CTRL, &val); 309 * atl1c_read_phy_core
299 if (!(val & (MDIO_START | MDIO_BUSY))) 310 * core funtion to read register in PHY via MDIO control regsiter.
300 break; 311 * ext: extension register (see IEEE 802.3)
312 * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0)
313 * reg: reg to read
314 */
315int atl1c_read_phy_core(struct atl1c_hw *hw, bool ext, u8 dev,
316 u16 reg, u16 *phy_data)
317{
318 u32 val;
319 u16 clk_sel = MDIO_CTRL_CLK_25_4;
320
321 atl1c_stop_phy_polling(hw);
322
323 *phy_data = 0;
324
325 /* only l2c_b2 & l1d_2 could use slow clock */
326 if ((hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) &&
327 hw->hibernate)
328 clk_sel = MDIO_CTRL_CLK_25_128;
329 if (ext) {
330 val = FIELDX(MDIO_EXTN_DEVAD, dev) | FIELDX(MDIO_EXTN_REG, reg);
331 AT_WRITE_REG(hw, REG_MDIO_EXTN, val);
332 val = MDIO_CTRL_SPRES_PRMBL |
333 FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) |
334 MDIO_CTRL_START |
335 MDIO_CTRL_MODE_EXT |
336 MDIO_CTRL_OP_READ;
337 } else {
338 val = MDIO_CTRL_SPRES_PRMBL |
339 FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) |
340 FIELDX(MDIO_CTRL_REG, reg) |
341 MDIO_CTRL_START |
342 MDIO_CTRL_OP_READ;
301 } 343 }
302 if (!(val & (MDIO_START | MDIO_BUSY))) { 344 AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
303 *phy_data = (u16)val; 345
304 return 0; 346 if (!atl1c_wait_mdio_idle(hw))
347 return -1;
348
349 AT_READ_REG(hw, REG_MDIO_CTRL, &val);
350 *phy_data = (u16)FIELD_GETX(val, MDIO_CTRL_DATA);
351
352 atl1c_start_phy_polling(hw, clk_sel);
353
354 return 0;
355}
356
357/*
358 * atl1c_write_phy_core
359 * core funtion to write to register in PHY via MDIO control regsiter.
360 * ext: extension register (see IEEE 802.3)
361 * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0)
362 * reg: reg to write
363 */
364int atl1c_write_phy_core(struct atl1c_hw *hw, bool ext, u8 dev,
365 u16 reg, u16 phy_data)
366{
367 u32 val;
368 u16 clk_sel = MDIO_CTRL_CLK_25_4;
369
370 atl1c_stop_phy_polling(hw);
371
372
373 /* only l2c_b2 & l1d_2 could use slow clock */
374 if ((hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) &&
375 hw->hibernate)
376 clk_sel = MDIO_CTRL_CLK_25_128;
377
378 if (ext) {
379 val = FIELDX(MDIO_EXTN_DEVAD, dev) | FIELDX(MDIO_EXTN_REG, reg);
380 AT_WRITE_REG(hw, REG_MDIO_EXTN, val);
381 val = MDIO_CTRL_SPRES_PRMBL |
382 FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) |
383 FIELDX(MDIO_CTRL_DATA, phy_data) |
384 MDIO_CTRL_START |
385 MDIO_CTRL_MODE_EXT;
386 } else {
387 val = MDIO_CTRL_SPRES_PRMBL |
388 FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) |
389 FIELDX(MDIO_CTRL_DATA, phy_data) |
390 FIELDX(MDIO_CTRL_REG, reg) |
391 MDIO_CTRL_START;
305 } 392 }
393 AT_WRITE_REG(hw, REG_MDIO_CTRL, val);
306 394
307 return -1; 395 if (!atl1c_wait_mdio_idle(hw))
396 return -1;
397
398 atl1c_start_phy_polling(hw, clk_sel);
399
400 return 0;
401}
402
403/*
404 * Reads the value from a PHY register
405 * hw - Struct containing variables accessed by shared code
406 * reg_addr - address of the PHY register to read
407 */
408int atl1c_read_phy_reg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data)
409{
410 return atl1c_read_phy_core(hw, false, 0, reg_addr, phy_data);
308} 411}
309 412
310/* 413/*
@@ -315,27 +418,47 @@ int atl1c_read_phy_reg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data)
315 */ 418 */
316int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data) 419int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data)
317{ 420{
318 int i; 421 return atl1c_write_phy_core(hw, false, 0, reg_addr, phy_data);
319 u32 val; 422}
320 423
321 val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | 424/* read from PHY extension register */
322 (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | 425int atl1c_read_phy_ext(struct atl1c_hw *hw, u8 dev_addr,
323 MDIO_SUP_PREAMBLE | MDIO_START | 426 u16 reg_addr, u16 *phy_data)
324 MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; 427{
428 return atl1c_read_phy_core(hw, true, dev_addr, reg_addr, phy_data);
429}
325 430
326 AT_WRITE_REG(hw, REG_MDIO_CTRL, val); 431/* write to PHY extension register */
432int atl1c_write_phy_ext(struct atl1c_hw *hw, u8 dev_addr,
433 u16 reg_addr, u16 phy_data)
434{
435 return atl1c_write_phy_core(hw, true, dev_addr, reg_addr, phy_data);
436}
327 437
328 for (i = 0; i < MDIO_WAIT_TIMES; i++) { 438int atl1c_read_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data)
329 udelay(2); 439{
330 AT_READ_REG(hw, REG_MDIO_CTRL, &val); 440 int err;
331 if (!(val & (MDIO_START | MDIO_BUSY)))
332 break;
333 }
334 441
335 if (!(val & (MDIO_START | MDIO_BUSY))) 442 err = atl1c_write_phy_reg(hw, MII_DBG_ADDR, reg_addr);
336 return 0; 443 if (unlikely(err))
444 return err;
445 else
446 err = atl1c_read_phy_reg(hw, MII_DBG_DATA, phy_data);
337 447
338 return -1; 448 return err;
449}
450
451int atl1c_write_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 phy_data)
452{
453 int err;
454
455 err = atl1c_write_phy_reg(hw, MII_DBG_ADDR, reg_addr);
456 if (unlikely(err))
457 return err;
458 else
459 err = atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
460
461 return err;
339} 462}
340 463
341/* 464/*
@@ -380,119 +503,100 @@ static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
380 503
381void atl1c_phy_disable(struct atl1c_hw *hw) 504void atl1c_phy_disable(struct atl1c_hw *hw)
382{ 505{
383 AT_WRITE_REGW(hw, REG_GPHY_CTRL, 506 atl1c_power_saving(hw, 0);
384 GPHY_CTRL_PW_WOL_DIS | GPHY_CTRL_EXT_RESET);
385} 507}
386 508
387static void atl1c_phy_magic_data(struct atl1c_hw *hw)
388{
389 u16 data;
390
391 data = ANA_LOOP_SEL_10BT | ANA_EN_MASK_TB | ANA_EN_10BT_IDLE |
392 ((1 & ANA_INTERVAL_SEL_TIMER_MASK) <<
393 ANA_INTERVAL_SEL_TIMER_SHIFT);
394
395 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_18);
396 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
397
398 data = (2 & ANA_SERDES_CDR_BW_MASK) | ANA_MS_PAD_DBG |
399 ANA_SERDES_EN_DEEM | ANA_SERDES_SEL_HSP | ANA_SERDES_EN_PLL |
400 ANA_SERDES_EN_LCKDT;
401
402 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_5);
403 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
404
405 data = (44 & ANA_LONG_CABLE_TH_100_MASK) |
406 ((33 & ANA_SHORT_CABLE_TH_100_MASK) <<
407 ANA_SHORT_CABLE_TH_100_SHIFT) | ANA_BP_BAD_LINK_ACCUM |
408 ANA_BP_SMALL_BW;
409
410 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_54);
411 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
412
413 data = (11 & ANA_IECHO_ADJ_MASK) | ((11 & ANA_IECHO_ADJ_MASK) <<
414 ANA_IECHO_ADJ_2_SHIFT) | ((8 & ANA_IECHO_ADJ_MASK) <<
415 ANA_IECHO_ADJ_1_SHIFT) | ((8 & ANA_IECHO_ADJ_MASK) <<
416 ANA_IECHO_ADJ_0_SHIFT);
417
418 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_4);
419 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
420
421 data = ANA_RESTART_CAL | ((7 & ANA_MANUL_SWICH_ON_MASK) <<
422 ANA_MANUL_SWICH_ON_SHIFT) | ANA_MAN_ENABLE |
423 ANA_SEL_HSP | ANA_EN_HB | ANA_OEN_125M;
424
425 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_0);
426 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
427
428 if (hw->ctrl_flags & ATL1C_HIB_DISABLE) {
429 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_41);
430 if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &data) != 0)
431 return;
432 data &= ~ANA_TOP_PS_EN;
433 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
434
435 atl1c_write_phy_reg(hw, MII_DBG_ADDR, MII_ANA_CTRL_11);
436 if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &data) != 0)
437 return;
438 data &= ~ANA_PS_HIB_EN;
439 atl1c_write_phy_reg(hw, MII_DBG_DATA, data);
440 }
441}
442 509
443int atl1c_phy_reset(struct atl1c_hw *hw) 510int atl1c_phy_reset(struct atl1c_hw *hw)
444{ 511{
445 struct atl1c_adapter *adapter = hw->adapter; 512 struct atl1c_adapter *adapter = hw->adapter;
446 struct pci_dev *pdev = adapter->pdev; 513 struct pci_dev *pdev = adapter->pdev;
447 u16 phy_data; 514 u16 phy_data;
448 u32 phy_ctrl_data = GPHY_CTRL_DEFAULT; 515 u32 phy_ctrl_data, lpi_ctrl;
449 u32 mii_ier_data = IER_LINK_UP | IER_LINK_DOWN;
450 int err; 516 int err;
451 517
452 if (hw->ctrl_flags & ATL1C_HIB_DISABLE) 518 /* reset PHY core */
453 phy_ctrl_data &= ~GPHY_CTRL_HIB_EN; 519 AT_READ_REG(hw, REG_GPHY_CTRL, &phy_ctrl_data);
454 520 phy_ctrl_data &= ~(GPHY_CTRL_EXT_RESET | GPHY_CTRL_PHY_IDDQ |
521 GPHY_CTRL_GATE_25M_EN | GPHY_CTRL_PWDOWN_HW | GPHY_CTRL_CLS);
522 phy_ctrl_data |= GPHY_CTRL_SEL_ANA_RST;
523 if (!(hw->ctrl_flags & ATL1C_HIB_DISABLE))
524 phy_ctrl_data |= (GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE);
525 else
526 phy_ctrl_data &= ~(GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE);
455 AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data); 527 AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data);
456 AT_WRITE_FLUSH(hw); 528 AT_WRITE_FLUSH(hw);
457 msleep(40); 529 udelay(10);
458 phy_ctrl_data |= GPHY_CTRL_EXT_RESET; 530 AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data | GPHY_CTRL_EXT_RESET);
459 AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data);
460 AT_WRITE_FLUSH(hw); 531 AT_WRITE_FLUSH(hw);
461 msleep(10); 532 udelay(10 * GPHY_CTRL_EXT_RST_TO); /* delay 800us */
462 533
534 /* switch clock */
463 if (hw->nic_type == athr_l2c_b) { 535 if (hw->nic_type == athr_l2c_b) {
464 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x0A); 536 atl1c_read_phy_dbg(hw, MIIDBG_CFGLPSPD, &phy_data);
465 atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data); 537 atl1c_write_phy_dbg(hw, MIIDBG_CFGLPSPD,
466 atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xDFFF); 538 phy_data & ~CFGLPSPD_RSTCNT_CLK125SW);
467 } 539 }
468 540
469 if (hw->nic_type == athr_l2c_b || 541 /* tx-half amplitude issue fix */
470 hw->nic_type == athr_l2c_b2 || 542 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) {
471 hw->nic_type == athr_l1d || 543 atl1c_read_phy_dbg(hw, MIIDBG_CABLE1TH_DET, &phy_data);
472 hw->nic_type == athr_l1d_2) { 544 phy_data |= CABLE1TH_DET_EN;
473 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B); 545 atl1c_write_phy_dbg(hw, MIIDBG_CABLE1TH_DET, phy_data);
474 atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data);
475 atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xFFF7);
476 msleep(20);
477 } 546 }
478 if (hw->nic_type == athr_l1d) { 547
479 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29); 548 /* clear bit3 of dbgport 3B to lower voltage */
480 atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x929D); 549 if (!(hw->ctrl_flags & ATL1C_HIB_DISABLE)) {
550 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) {
551 atl1c_read_phy_dbg(hw, MIIDBG_VOLT_CTRL, &phy_data);
552 phy_data &= ~VOLT_CTRL_SWLOWEST;
553 atl1c_write_phy_dbg(hw, MIIDBG_VOLT_CTRL, phy_data);
554 }
555 /* power saving config */
556 phy_data =
557 hw->nic_type == athr_l1d || hw->nic_type == athr_l1d_2 ?
558 L1D_LEGCYPS_DEF : L1C_LEGCYPS_DEF;
559 atl1c_write_phy_dbg(hw, MIIDBG_LEGCYPS, phy_data);
560 /* hib */
561 atl1c_write_phy_dbg(hw, MIIDBG_SYSMODCTRL,
562 SYSMODCTRL_IECHOADJ_DEF);
563 } else {
564 /* disable pws */
565 atl1c_read_phy_dbg(hw, MIIDBG_LEGCYPS, &phy_data);
566 atl1c_write_phy_dbg(hw, MIIDBG_LEGCYPS,
567 phy_data & ~LEGCYPS_EN);
568 /* disable hibernate */
569 atl1c_read_phy_dbg(hw, MIIDBG_HIBNEG, &phy_data);
570 atl1c_write_phy_dbg(hw, MIIDBG_HIBNEG,
571 phy_data & HIBNEG_PSHIB_EN);
481 } 572 }
482 if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b2 573 /* disable AZ(EEE) by default */
483 || hw->nic_type == athr_l2c) { 574 if (hw->nic_type == athr_l1d || hw->nic_type == athr_l1d_2 ||
484 atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29); 575 hw->nic_type == athr_l2c_b2) {
485 atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD); 576 AT_READ_REG(hw, REG_LPI_CTRL, &lpi_ctrl);
577 AT_WRITE_REG(hw, REG_LPI_CTRL, lpi_ctrl & ~LPI_CTRL_EN);
578 atl1c_write_phy_ext(hw, MIIEXT_ANEG, MIIEXT_LOCAL_EEEADV, 0);
579 atl1c_write_phy_ext(hw, MIIEXT_PCS, MIIEXT_CLDCTRL3,
580 L2CB_CLDCTRL3);
486 } 581 }
487 err = atl1c_write_phy_reg(hw, MII_IER, mii_ier_data); 582
583 /* other debug port to set */
584 atl1c_write_phy_dbg(hw, MIIDBG_ANACTRL, ANACTRL_DEF);
585 atl1c_write_phy_dbg(hw, MIIDBG_SRDSYSMOD, SRDSYSMOD_DEF);
586 atl1c_write_phy_dbg(hw, MIIDBG_TST10BTCFG, TST10BTCFG_DEF);
587 /* UNH-IOL test issue, set bit7 */
588 atl1c_write_phy_dbg(hw, MIIDBG_TST100BTCFG,
589 TST100BTCFG_DEF | TST100BTCFG_LITCH_EN);
590
591 /* set phy interrupt mask */
592 phy_data = IER_LINK_UP | IER_LINK_DOWN;
593 err = atl1c_write_phy_reg(hw, MII_IER, phy_data);
488 if (err) { 594 if (err) {
489 if (netif_msg_hw(adapter)) 595 if (netif_msg_hw(adapter))
490 dev_err(&pdev->dev, 596 dev_err(&pdev->dev,
491 "Error enable PHY linkChange Interrupt\n"); 597 "Error enable PHY linkChange Interrupt\n");
492 return err; 598 return err;
493 } 599 }
494 if (!(hw->ctrl_flags & ATL1C_FPGA_VERSION))
495 atl1c_phy_magic_data(hw);
496 return 0; 600 return 0;
497} 601}
498 602
@@ -589,7 +693,8 @@ int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex)
589 return 0; 693 return 0;
590} 694}
591 695
592int atl1c_phy_power_saving(struct atl1c_hw *hw) 696/* select one link mode to get lower power consumption */
697int atl1c_phy_to_ps_link(struct atl1c_hw *hw)
593{ 698{
594 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; 699 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
595 struct pci_dev *pdev = adapter->pdev; 700 struct pci_dev *pdev = adapter->pdev;
@@ -660,3 +765,101 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw)
660 765
661 return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data); 766 return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data);
662} 767}
768
769int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc)
770{
771 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
772 struct pci_dev *pdev = adapter->pdev;
773 u32 master_ctrl, mac_ctrl, phy_ctrl;
774 u32 wol_ctrl, speed;
775 u16 phy_data;
776
777 wol_ctrl = 0;
778 speed = adapter->link_speed == SPEED_1000 ?
779 MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100;
780
781 AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl);
782 AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl);
783 AT_READ_REG(hw, REG_GPHY_CTRL, &phy_ctrl);
784
785 master_ctrl &= ~MASTER_CTRL_CLK_SEL_DIS;
786 mac_ctrl = FIELD_SETX(mac_ctrl, MAC_CTRL_SPEED, speed);
787 mac_ctrl &= ~(MAC_CTRL_DUPLX | MAC_CTRL_RX_EN | MAC_CTRL_TX_EN);
788 if (adapter->link_duplex == FULL_DUPLEX)
789 mac_ctrl |= MAC_CTRL_DUPLX;
790 phy_ctrl &= ~(GPHY_CTRL_EXT_RESET | GPHY_CTRL_CLS);
791 phy_ctrl |= GPHY_CTRL_SEL_ANA_RST | GPHY_CTRL_HIB_PULSE |
792 GPHY_CTRL_HIB_EN;
793 if (!wufc) { /* without WoL */
794 master_ctrl |= MASTER_CTRL_CLK_SEL_DIS;
795 phy_ctrl |= GPHY_CTRL_PHY_IDDQ | GPHY_CTRL_PWDOWN_HW;
796 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl);
797 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl);
798 AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl);
799 AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
800 hw->phy_configured = false; /* re-init PHY when resume */
801 return 0;
802 }
803 phy_ctrl |= GPHY_CTRL_EXT_RESET;
804 if (wufc & AT_WUFC_MAG) {
805 mac_ctrl |= MAC_CTRL_RX_EN | MAC_CTRL_BC_EN;
806 wol_ctrl |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
807 if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V11)
808 wol_ctrl |= WOL_PATTERN_EN | WOL_PATTERN_PME_EN;
809 }
810 if (wufc & AT_WUFC_LNKC) {
811 wol_ctrl |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
812 if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) {
813 dev_dbg(&pdev->dev, "%s: write phy MII_IER faild.\n",
814 atl1c_driver_name);
815 }
816 }
817 /* clear PHY interrupt */
818 atl1c_read_phy_reg(hw, MII_ISR, &phy_data);
819
820 dev_dbg(&pdev->dev, "%s: suspend MAC=%x,MASTER=%x,PHY=0x%x,WOL=%x\n",
821 atl1c_driver_name, mac_ctrl, master_ctrl, phy_ctrl, wol_ctrl);
822 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl);
823 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl);
824 AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl);
825 AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl);
826
827 return 0;
828}
829
830
831/* configure phy after Link change Event */
832void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed)
833{
834 u16 phy_val;
835 bool adj_thresh = false;
836
837 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2 ||
838 hw->nic_type == athr_l1d || hw->nic_type == athr_l1d_2)
839 adj_thresh = true;
840
841 if (link_speed != SPEED_0) { /* link up */
842 /* az with brcm, half-amp */
843 if (hw->nic_type == athr_l1d_2) {
844 atl1c_read_phy_ext(hw, MIIEXT_PCS, MIIEXT_CLDCTRL6,
845 &phy_val);
846 phy_val = FIELD_GETX(phy_val, CLDCTRL6_CAB_LEN);
847 phy_val = phy_val > CLDCTRL6_CAB_LEN_SHORT ?
848 AZ_ANADECT_LONG : AZ_ANADECT_DEF;
849 atl1c_write_phy_dbg(hw, MIIDBG_AZ_ANADECT, phy_val);
850 }
851 /* threshold adjust */
852 if (adj_thresh && link_speed == SPEED_100 && hw->msi_lnkpatch) {
853 atl1c_write_phy_dbg(hw, MIIDBG_MSE16DB, L1D_MSE16DB_UP);
854 atl1c_write_phy_dbg(hw, MIIDBG_SYSMODCTRL,
855 L1D_SYSMODCTRL_IECHOADJ_DEF);
856 }
857 } else { /* link down */
858 if (adj_thresh && hw->msi_lnkpatch) {
859 atl1c_write_phy_dbg(hw, MIIDBG_SYSMODCTRL,
860 SYSMODCTRL_IECHOADJ_DEF);
861 atl1c_write_phy_dbg(hw, MIIDBG_MSE16DB,
862 L1D_MSE16DB_DOWN);
863 }
864 }
865}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
index 655fc6c4a8a4..17d935bdde0a 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
@@ -25,12 +25,18 @@
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/mii.h> 26#include <linux/mii.h>
27 27
28#define FIELD_GETX(_x, _name) ((_x) >> (_name##_SHIFT) & (_name##_MASK))
29#define FIELD_SETX(_x, _name, _v) \
30(((_x) & ~((_name##_MASK) << (_name##_SHIFT))) |\
31(((_v) & (_name##_MASK)) << (_name##_SHIFT)))
32#define FIELDX(_name, _v) (((_v) & (_name##_MASK)) << (_name##_SHIFT))
33
28struct atl1c_adapter; 34struct atl1c_adapter;
29struct atl1c_hw; 35struct atl1c_hw;
30 36
31/* function prototype */ 37/* function prototype */
32void atl1c_phy_disable(struct atl1c_hw *hw); 38void atl1c_phy_disable(struct atl1c_hw *hw);
33void atl1c_hw_set_mac_addr(struct atl1c_hw *hw); 39void atl1c_hw_set_mac_addr(struct atl1c_hw *hw, u8 *mac_addr);
34int atl1c_phy_reset(struct atl1c_hw *hw); 40int atl1c_phy_reset(struct atl1c_hw *hw);
35int atl1c_read_mac_addr(struct atl1c_hw *hw); 41int atl1c_read_mac_addr(struct atl1c_hw *hw);
36int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex); 42int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex);
@@ -42,47 +48,45 @@ bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value);
42int atl1c_phy_init(struct atl1c_hw *hw); 48int atl1c_phy_init(struct atl1c_hw *hw);
43int atl1c_check_eeprom_exist(struct atl1c_hw *hw); 49int atl1c_check_eeprom_exist(struct atl1c_hw *hw);
44int atl1c_restart_autoneg(struct atl1c_hw *hw); 50int atl1c_restart_autoneg(struct atl1c_hw *hw);
45int atl1c_phy_power_saving(struct atl1c_hw *hw); 51int atl1c_phy_to_ps_link(struct atl1c_hw *hw);
52int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc);
53bool atl1c_wait_mdio_idle(struct atl1c_hw *hw);
54void atl1c_stop_phy_polling(struct atl1c_hw *hw);
55void atl1c_start_phy_polling(struct atl1c_hw *hw, u16 clk_sel);
56int atl1c_read_phy_core(struct atl1c_hw *hw, bool ext, u8 dev,
57 u16 reg, u16 *phy_data);
58int atl1c_write_phy_core(struct atl1c_hw *hw, bool ext, u8 dev,
59 u16 reg, u16 phy_data);
60int atl1c_read_phy_ext(struct atl1c_hw *hw, u8 dev_addr,
61 u16 reg_addr, u16 *phy_data);
62int atl1c_write_phy_ext(struct atl1c_hw *hw, u8 dev_addr,
63 u16 reg_addr, u16 phy_data);
64int atl1c_read_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data);
65int atl1c_write_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 phy_data);
66void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed);
67
68/* hw-ids */
69#define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062
70#define PCI_DEVICE_ID_ATTANSIC_L1C 0x1063
71#define PCI_DEVICE_ID_ATHEROS_L2C_B 0x2060 /* AR8152 v1.1 Fast 10/100 */
72#define PCI_DEVICE_ID_ATHEROS_L2C_B2 0x2062 /* AR8152 v2.0 Fast 10/100 */
73#define PCI_DEVICE_ID_ATHEROS_L1D 0x1073 /* AR8151 v1.0 Gigabit 1000 */
74#define PCI_DEVICE_ID_ATHEROS_L1D_2_0 0x1083 /* AR8151 v2.0 Gigabit 1000 */
75#define L2CB_V10 0xc0
76#define L2CB_V11 0xc1
77
46/* register definition */ 78/* register definition */
47#define REG_DEVICE_CAP 0x5C 79#define REG_DEVICE_CAP 0x5C
48#define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7 80#define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7
49#define DEVICE_CAP_MAX_PAYLOAD_SHIFT 0 81#define DEVICE_CAP_MAX_PAYLOAD_SHIFT 0
50 82
51#define REG_DEVICE_CTRL 0x60 83#define DEVICE_CTRL_MAXRRS_MIN 2
52#define DEVICE_CTRL_MAX_PAYLOAD_MASK 0x7
53#define DEVICE_CTRL_MAX_PAYLOAD_SHIFT 5
54#define DEVICE_CTRL_MAX_RREQ_SZ_MASK 0x7
55#define DEVICE_CTRL_MAX_RREQ_SZ_SHIFT 12
56 84
57#define REG_LINK_CTRL 0x68 85#define REG_LINK_CTRL 0x68
58#define LINK_CTRL_L0S_EN 0x01 86#define LINK_CTRL_L0S_EN 0x01
59#define LINK_CTRL_L1_EN 0x02 87#define LINK_CTRL_L1_EN 0x02
60#define LINK_CTRL_EXT_SYNC 0x80 88#define LINK_CTRL_EXT_SYNC 0x80
61 89
62#define REG_VPD_CAP 0x6C
63#define VPD_CAP_ID_MASK 0xff
64#define VPD_CAP_ID_SHIFT 0
65#define VPD_CAP_NEXT_PTR_MASK 0xFF
66#define VPD_CAP_NEXT_PTR_SHIFT 8
67#define VPD_CAP_VPD_ADDR_MASK 0x7FFF
68#define VPD_CAP_VPD_ADDR_SHIFT 16
69#define VPD_CAP_VPD_FLAG 0x80000000
70
71#define REG_VPD_DATA 0x70
72
73#define REG_PCIE_UC_SEVERITY 0x10C
74#define PCIE_UC_SERVRITY_TRN 0x00000001
75#define PCIE_UC_SERVRITY_DLP 0x00000010
76#define PCIE_UC_SERVRITY_PSN_TLP 0x00001000
77#define PCIE_UC_SERVRITY_FCP 0x00002000
78#define PCIE_UC_SERVRITY_CPL_TO 0x00004000
79#define PCIE_UC_SERVRITY_CA 0x00008000
80#define PCIE_UC_SERVRITY_UC 0x00010000
81#define PCIE_UC_SERVRITY_ROV 0x00020000
82#define PCIE_UC_SERVRITY_MLFP 0x00040000
83#define PCIE_UC_SERVRITY_ECRC 0x00080000
84#define PCIE_UC_SERVRITY_UR 0x00100000
85
86#define REG_DEV_SERIALNUM_CTRL 0x200 90#define REG_DEV_SERIALNUM_CTRL 0x200
87#define REG_DEV_MAC_SEL_MASK 0x0 /* 0:EUI; 1:MAC */ 91#define REG_DEV_MAC_SEL_MASK 0x0 /* 0:EUI; 1:MAC */
88#define REG_DEV_MAC_SEL_SHIFT 0 92#define REG_DEV_MAC_SEL_SHIFT 0
@@ -90,25 +94,17 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
90#define REG_DEV_SERIAL_NUM_EN_SHIFT 1 94#define REG_DEV_SERIAL_NUM_EN_SHIFT 1
91 95
92#define REG_TWSI_CTRL 0x218 96#define REG_TWSI_CTRL 0x218
97#define TWSI_CTLR_FREQ_MASK 0x3UL
98#define TWSI_CTRL_FREQ_SHIFT 24
99#define TWSI_CTRL_FREQ_100K 0
100#define TWSI_CTRL_FREQ_200K 1
101#define TWSI_CTRL_FREQ_300K 2
102#define TWSI_CTRL_FREQ_400K 3
103#define TWSI_CTRL_LD_EXIST BIT(23)
104#define TWSI_CTRL_HW_LDSTAT BIT(12) /* 0:finish,1:in progress */
105#define TWSI_CTRL_SW_LDSTART BIT(11)
93#define TWSI_CTRL_LD_OFFSET_MASK 0xFF 106#define TWSI_CTRL_LD_OFFSET_MASK 0xFF
94#define TWSI_CTRL_LD_OFFSET_SHIFT 0 107#define TWSI_CTRL_LD_OFFSET_SHIFT 0
95#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7
96#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8
97#define TWSI_CTRL_SW_LDSTART 0x800
98#define TWSI_CTRL_HW_LDSTART 0x1000
99#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F
100#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15
101#define TWSI_CTRL_LD_EXIST 0x400000
102#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3
103#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23
104#define TWSI_CTRL_FREQ_SEL_100K 0
105#define TWSI_CTRL_FREQ_SEL_200K 1
106#define TWSI_CTRL_FREQ_SEL_300K 2
107#define TWSI_CTRL_FREQ_SEL_400K 3
108#define TWSI_CTRL_SMB_SLV_ADDR
109#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3
110#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24
111
112 108
113#define REG_PCIE_DEV_MISC_CTRL 0x21C 109#define REG_PCIE_DEV_MISC_CTRL 0x21C
114#define PCIE_DEV_MISC_EXT_PIPE 0x2 110#define PCIE_DEV_MISC_EXT_PIPE 0x2
@@ -118,16 +114,23 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
118#define PCIE_DEV_MISC_SERDES_SEL_DIN 0x10 114#define PCIE_DEV_MISC_SERDES_SEL_DIN 0x10
119 115
120#define REG_PCIE_PHYMISC 0x1000 116#define REG_PCIE_PHYMISC 0x1000
121#define PCIE_PHYMISC_FORCE_RCV_DET 0x4 117#define PCIE_PHYMISC_FORCE_RCV_DET BIT(2)
118#define PCIE_PHYMISC_NFTS_MASK 0xFFUL
119#define PCIE_PHYMISC_NFTS_SHIFT 16
122 120
123#define REG_PCIE_PHYMISC2 0x1004 121#define REG_PCIE_PHYMISC2 0x1004
124#define PCIE_PHYMISC2_SERDES_CDR_MASK 0x3 122#define PCIE_PHYMISC2_L0S_TH_MASK 0x3UL
125#define PCIE_PHYMISC2_SERDES_CDR_SHIFT 16 123#define PCIE_PHYMISC2_L0S_TH_SHIFT 18
126#define PCIE_PHYMISC2_SERDES_TH_MASK 0x3 124#define L2CB1_PCIE_PHYMISC2_L0S_TH 3
127#define PCIE_PHYMISC2_SERDES_TH_SHIFT 18 125#define PCIE_PHYMISC2_CDR_BW_MASK 0x3UL
126#define PCIE_PHYMISC2_CDR_BW_SHIFT 16
127#define L2CB1_PCIE_PHYMISC2_CDR_BW 3
128 128
129#define REG_TWSI_DEBUG 0x1108 129#define REG_TWSI_DEBUG 0x1108
130#define TWSI_DEBUG_DEV_EXIST 0x20000000 130#define TWSI_DEBUG_DEV_EXIST BIT(29)
131
132#define REG_DMA_DBG 0x1114
133#define DMA_DBG_VENDOR_MSG BIT(0)
131 134
132#define REG_EEPROM_CTRL 0x12C0 135#define REG_EEPROM_CTRL 0x12C0
133#define EEPROM_CTRL_DATA_HI_MASK 0xFFFF 136#define EEPROM_CTRL_DATA_HI_MASK 0xFFFF
@@ -140,56 +143,81 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
140#define REG_EEPROM_DATA_LO 0x12C4 143#define REG_EEPROM_DATA_LO 0x12C4
141 144
142#define REG_OTP_CTRL 0x12F0 145#define REG_OTP_CTRL 0x12F0
143#define OTP_CTRL_CLK_EN 0x0002 146#define OTP_CTRL_CLK_EN BIT(1)
144 147
145#define REG_PM_CTRL 0x12F8 148#define REG_PM_CTRL 0x12F8
146#define PM_CTRL_SDES_EN 0x00000001 149#define PM_CTRL_HOTRST BIT(31)
147#define PM_CTRL_RBER_EN 0x00000002 150#define PM_CTRL_MAC_ASPM_CHK BIT(30) /* L0s/L1 dis by MAC based on
148#define PM_CTRL_CLK_REQ_EN 0x00000004 151 * thrghput(setting in 15A0) */
149#define PM_CTRL_ASPM_L1_EN 0x00000008 152#define PM_CTRL_SA_DLY_EN BIT(29)
150#define PM_CTRL_SERDES_L1_EN 0x00000010 153#define PM_CTRL_L0S_BUFSRX_EN BIT(28)
151#define PM_CTRL_SERDES_PLL_L1_EN 0x00000020 154#define PM_CTRL_LCKDET_TIMER_MASK 0xFUL
152#define PM_CTRL_SERDES_PD_EX_L1 0x00000040
153#define PM_CTRL_SERDES_BUDS_RX_L1_EN 0x00000080
154#define PM_CTRL_L0S_ENTRY_TIMER_MASK 0xF
155#define PM_CTRL_L0S_ENTRY_TIMER_SHIFT 8
156#define PM_CTRL_ASPM_L0S_EN 0x00001000
157#define PM_CTRL_CLK_SWH_L1 0x00002000
158#define PM_CTRL_CLK_PWM_VER1_1 0x00004000
159#define PM_CTRL_RCVR_WT_TIMER 0x00008000
160#define PM_CTRL_L1_ENTRY_TIMER_MASK 0xF
161#define PM_CTRL_L1_ENTRY_TIMER_SHIFT 16
162#define PM_CTRL_PM_REQ_TIMER_MASK 0xF
163#define PM_CTRL_PM_REQ_TIMER_SHIFT 20
164#define PM_CTRL_LCKDET_TIMER_MASK 0xF
165#define PM_CTRL_LCKDET_TIMER_SHIFT 24 155#define PM_CTRL_LCKDET_TIMER_SHIFT 24
166#define PM_CTRL_EN_BUFS_RX_L0S 0x10000000 156#define PM_CTRL_LCKDET_TIMER_DEF 0xC
167#define PM_CTRL_SA_DLY_EN 0x20000000 157#define PM_CTRL_PM_REQ_TIMER_MASK 0xFUL
168#define PM_CTRL_MAC_ASPM_CHK 0x40000000 158#define PM_CTRL_PM_REQ_TIMER_SHIFT 20 /* pm_request_l1 time > @
169#define PM_CTRL_HOTRST 0x80000000 159 * ->L0s not L1 */
160#define PM_CTRL_PM_REQ_TO_DEF 0xF
161#define PMCTRL_TXL1_AFTER_L0S BIT(19) /* l1dv2.0+ */
162#define L1D_PMCTRL_L1_ENTRY_TM_MASK 7UL /* l1dv2.0+, 3bits */
163#define L1D_PMCTRL_L1_ENTRY_TM_SHIFT 16
164#define L1D_PMCTRL_L1_ENTRY_TM_DIS 0
165#define L1D_PMCTRL_L1_ENTRY_TM_2US 1
166#define L1D_PMCTRL_L1_ENTRY_TM_4US 2
167#define L1D_PMCTRL_L1_ENTRY_TM_8US 3
168#define L1D_PMCTRL_L1_ENTRY_TM_16US 4
169#define L1D_PMCTRL_L1_ENTRY_TM_24US 5
170#define L1D_PMCTRL_L1_ENTRY_TM_32US 6
171#define L1D_PMCTRL_L1_ENTRY_TM_63US 7
172#define PM_CTRL_L1_ENTRY_TIMER_MASK 0xFUL /* l1C 4bits */
173#define PM_CTRL_L1_ENTRY_TIMER_SHIFT 16
174#define L2CB1_PM_CTRL_L1_ENTRY_TM 7
175#define L1C_PM_CTRL_L1_ENTRY_TM 0xF
176#define PM_CTRL_RCVR_WT_TIMER BIT(15) /* 1:1us, 0:2ms */
177#define PM_CTRL_CLK_PWM_VER1_1 BIT(14) /* 0:1.0a,1:1.1 */
178#define PM_CTRL_CLK_SWH_L1 BIT(13) /* en pcie clk sw in L1 */
179#define PM_CTRL_ASPM_L0S_EN BIT(12)
180#define PM_CTRL_RXL1_AFTER_L0S BIT(11) /* l1dv2.0+ */
181#define L1D_PMCTRL_L0S_TIMER_MASK 7UL /* l1d2.0+, 3bits*/
182#define L1D_PMCTRL_L0S_TIMER_SHIFT 8
183#define PM_CTRL_L0S_ENTRY_TIMER_MASK 0xFUL /* l1c, 4bits */
184#define PM_CTRL_L0S_ENTRY_TIMER_SHIFT 8
185#define PM_CTRL_SERDES_BUFS_RX_L1_EN BIT(7)
186#define PM_CTRL_SERDES_PD_EX_L1 BIT(6) /* power down serdes rx */
187#define PM_CTRL_SERDES_PLL_L1_EN BIT(5)
188#define PM_CTRL_SERDES_L1_EN BIT(4)
189#define PM_CTRL_ASPM_L1_EN BIT(3)
190#define PM_CTRL_CLK_REQ_EN BIT(2)
191#define PM_CTRL_RBER_EN BIT(1)
192#define PM_CTRL_SPRSDWER_EN BIT(0)
170 193
171#define REG_LTSSM_ID_CTRL 0x12FC 194#define REG_LTSSM_ID_CTRL 0x12FC
172#define LTSSM_ID_EN_WRO 0x1000 195#define LTSSM_ID_EN_WRO 0x1000
196
197
173/* Selene Master Control Register */ 198/* Selene Master Control Register */
174#define REG_MASTER_CTRL 0x1400 199#define REG_MASTER_CTRL 0x1400
175#define MASTER_CTRL_SOFT_RST 0x1 200#define MASTER_CTRL_OTP_SEL BIT(31)
176#define MASTER_CTRL_TEST_MODE_MASK 0x3 201#define MASTER_DEV_NUM_MASK 0x7FUL
177#define MASTER_CTRL_TEST_MODE_SHIFT 2 202#define MASTER_DEV_NUM_SHIFT 24
178#define MASTER_CTRL_BERT_START 0x10 203#define MASTER_REV_NUM_MASK 0xFFUL
179#define MASTER_CTRL_OOB_DIS_OFF 0x40 204#define MASTER_REV_NUM_SHIFT 16
180#define MASTER_CTRL_SA_TIMER_EN 0x80 205#define MASTER_CTRL_INT_RDCLR BIT(14)
181#define MASTER_CTRL_MTIMER_EN 0x100 206#define MASTER_CTRL_CLK_SEL_DIS BIT(12) /* 1:alwys sel pclk from
182#define MASTER_CTRL_MANUAL_INT 0x200 207 * serdes, not sw to 25M */
183#define MASTER_CTRL_TX_ITIMER_EN 0x400 208#define MASTER_CTRL_RX_ITIMER_EN BIT(11) /* IRQ MODURATION FOR RX */
184#define MASTER_CTRL_RX_ITIMER_EN 0x800 209#define MASTER_CTRL_TX_ITIMER_EN BIT(10) /* MODURATION FOR TX/RX */
185#define MASTER_CTRL_CLK_SEL_DIS 0x1000 210#define MASTER_CTRL_MANU_INT BIT(9) /* SOFT MANUAL INT */
186#define MASTER_CTRL_CLK_SWH_MODE 0x2000 211#define MASTER_CTRL_MANUTIMER_EN BIT(8)
187#define MASTER_CTRL_INT_RDCLR 0x4000 212#define MASTER_CTRL_SA_TIMER_EN BIT(7) /* SYS ALIVE TIMER EN */
188#define MASTER_CTRL_REV_NUM_SHIFT 16 213#define MASTER_CTRL_OOB_DIS BIT(6) /* OUT OF BOX DIS */
189#define MASTER_CTRL_REV_NUM_MASK 0xff 214#define MASTER_CTRL_WAKEN_25M BIT(5) /* WAKE WO. PCIE CLK */
190#define MASTER_CTRL_DEV_ID_SHIFT 24 215#define MASTER_CTRL_BERT_START BIT(4)
191#define MASTER_CTRL_DEV_ID_MASK 0x7f 216#define MASTER_PCIE_TSTMOD_MASK 3UL
192#define MASTER_CTRL_OTP_SEL 0x80000000 217#define MASTER_PCIE_TSTMOD_SHIFT 2
218#define MASTER_PCIE_RST BIT(1)
219#define MASTER_CTRL_SOFT_RST BIT(0) /* RST MAC & DMA */
220#define DMA_MAC_RST_TO 50
193 221
194/* Timer Initial Value Register */ 222/* Timer Initial Value Register */
195#define REG_MANUAL_TIMER_INIT 0x1404 223#define REG_MANUAL_TIMER_INIT 0x1404
@@ -201,87 +229,85 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
201#define IRQ_MODRT_RX_TIMER_SHIFT 16 229#define IRQ_MODRT_RX_TIMER_SHIFT 16
202 230
203#define REG_GPHY_CTRL 0x140C 231#define REG_GPHY_CTRL 0x140C
204#define GPHY_CTRL_EXT_RESET 0x1 232#define GPHY_CTRL_ADDR_MASK 0x1FUL
205#define GPHY_CTRL_RTL_MODE 0x2 233#define GPHY_CTRL_ADDR_SHIFT 19
206#define GPHY_CTRL_LED_MODE 0x4 234#define GPHY_CTRL_BP_VLTGSW BIT(18)
207#define GPHY_CTRL_ANEG_NOW 0x8 235#define GPHY_CTRL_100AB_EN BIT(17)
208#define GPHY_CTRL_REV_ANEG 0x10 236#define GPHY_CTRL_10AB_EN BIT(16)
209#define GPHY_CTRL_GATE_25M_EN 0x20 237#define GPHY_CTRL_PHY_PLL_BYPASS BIT(15)
210#define GPHY_CTRL_LPW_EXIT 0x40 238#define GPHY_CTRL_PWDOWN_HW BIT(14) /* affect MAC&PHY, to low pw */
211#define GPHY_CTRL_PHY_IDDQ 0x80 239#define GPHY_CTRL_PHY_PLL_ON BIT(13) /* 1:pll always on, 0:can sw */
212#define GPHY_CTRL_PHY_IDDQ_DIS 0x100 240#define GPHY_CTRL_SEL_ANA_RST BIT(12)
213#define GPHY_CTRL_GIGA_DIS 0x200 241#define GPHY_CTRL_HIB_PULSE BIT(11)
214#define GPHY_CTRL_HIB_EN 0x400 242#define GPHY_CTRL_HIB_EN BIT(10)
215#define GPHY_CTRL_HIB_PULSE 0x800 243#define GPHY_CTRL_GIGA_DIS BIT(9)
216#define GPHY_CTRL_SEL_ANA_RST 0x1000 244#define GPHY_CTRL_PHY_IDDQ_DIS BIT(8) /* pw on RST */
217#define GPHY_CTRL_PHY_PLL_ON 0x2000 245#define GPHY_CTRL_PHY_IDDQ BIT(7) /* bit8 affect bit7 while rb */
218#define GPHY_CTRL_PWDOWN_HW 0x4000 246#define GPHY_CTRL_LPW_EXIT BIT(6)
219#define GPHY_CTRL_PHY_PLL_BYPASS 0x8000 247#define GPHY_CTRL_GATE_25M_EN BIT(5)
220 248#define GPHY_CTRL_REV_ANEG BIT(4)
221#define GPHY_CTRL_DEFAULT ( \ 249#define GPHY_CTRL_ANEG_NOW BIT(3)
222 GPHY_CTRL_SEL_ANA_RST |\ 250#define GPHY_CTRL_LED_MODE BIT(2)
223 GPHY_CTRL_HIB_PULSE |\ 251#define GPHY_CTRL_RTL_MODE BIT(1)
224 GPHY_CTRL_HIB_EN) 252#define GPHY_CTRL_EXT_RESET BIT(0) /* 1:out of DSP RST status */
225 253#define GPHY_CTRL_EXT_RST_TO 80 /* 800us atmost */
226#define GPHY_CTRL_PW_WOL_DIS ( \ 254#define GPHY_CTRL_CLS (\
227 GPHY_CTRL_SEL_ANA_RST |\ 255 GPHY_CTRL_LED_MODE |\
228 GPHY_CTRL_HIB_PULSE |\ 256 GPHY_CTRL_100AB_EN |\
229 GPHY_CTRL_HIB_EN |\ 257 GPHY_CTRL_PHY_PLL_ON)
230 GPHY_CTRL_PWDOWN_HW |\ 258
231 GPHY_CTRL_PHY_IDDQ)
232
233#define GPHY_CTRL_POWER_SAVING ( \
234 GPHY_CTRL_SEL_ANA_RST |\
235 GPHY_CTRL_HIB_EN |\
236 GPHY_CTRL_HIB_PULSE |\
237 GPHY_CTRL_PWDOWN_HW |\
238 GPHY_CTRL_PHY_IDDQ)
239/* Block IDLE Status Register */ 259/* Block IDLE Status Register */
240#define REG_IDLE_STATUS 0x1410 260#define REG_IDLE_STATUS 0x1410
241#define IDLE_STATUS_MASK 0x00FF 261#define IDLE_STATUS_SFORCE_MASK 0xFUL
242#define IDLE_STATUS_RXMAC_NO_IDLE 0x1 262#define IDLE_STATUS_SFORCE_SHIFT 14
243#define IDLE_STATUS_TXMAC_NO_IDLE 0x2 263#define IDLE_STATUS_CALIB_DONE BIT(13)
244#define IDLE_STATUS_RXQ_NO_IDLE 0x4 264#define IDLE_STATUS_CALIB_RES_MASK 0x1FUL
245#define IDLE_STATUS_TXQ_NO_IDLE 0x8 265#define IDLE_STATUS_CALIB_RES_SHIFT 8
246#define IDLE_STATUS_DMAR_NO_IDLE 0x10 266#define IDLE_STATUS_CALIBERR_MASK 0xFUL
247#define IDLE_STATUS_DMAW_NO_IDLE 0x20 267#define IDLE_STATUS_CALIBERR_SHIFT 4
248#define IDLE_STATUS_SMB_NO_IDLE 0x40 268#define IDLE_STATUS_TXQ_BUSY BIT(3)
249#define IDLE_STATUS_CMB_NO_IDLE 0x80 269#define IDLE_STATUS_RXQ_BUSY BIT(2)
270#define IDLE_STATUS_TXMAC_BUSY BIT(1)
271#define IDLE_STATUS_RXMAC_BUSY BIT(0)
272#define IDLE_STATUS_MASK (\
273 IDLE_STATUS_TXQ_BUSY |\
274 IDLE_STATUS_RXQ_BUSY |\
275 IDLE_STATUS_TXMAC_BUSY |\
276 IDLE_STATUS_RXMAC_BUSY)
250 277
251/* MDIO Control Register */ 278/* MDIO Control Register */
252#define REG_MDIO_CTRL 0x1414 279#define REG_MDIO_CTRL 0x1414
253#define MDIO_DATA_MASK 0xffff /* On MDIO write, the 16-bit 280#define MDIO_CTRL_MODE_EXT BIT(30)
254 * control data to write to PHY 281#define MDIO_CTRL_POST_READ BIT(29)
255 * MII management register */ 282#define MDIO_CTRL_AP_EN BIT(28)
256#define MDIO_DATA_SHIFT 0 /* On MDIO read, the 16-bit 283#define MDIO_CTRL_BUSY BIT(27)
257 * status data that was read 284#define MDIO_CTRL_CLK_SEL_MASK 0x7UL
258 * from the PHY MII management register */ 285#define MDIO_CTRL_CLK_SEL_SHIFT 24
259#define MDIO_REG_ADDR_MASK 0x1f /* MDIO register address */ 286#define MDIO_CTRL_CLK_25_4 0 /* 25MHz divide 4 */
260#define MDIO_REG_ADDR_SHIFT 16 287#define MDIO_CTRL_CLK_25_6 2
261#define MDIO_RW 0x200000 /* 1: read, 0: write */ 288#define MDIO_CTRL_CLK_25_8 3
262#define MDIO_SUP_PREAMBLE 0x400000 /* Suppress preamble */ 289#define MDIO_CTRL_CLK_25_10 4
263#define MDIO_START 0x800000 /* Write 1 to initiate the MDIO 290#define MDIO_CTRL_CLK_25_32 5
264 * master. And this bit is self 291#define MDIO_CTRL_CLK_25_64 6
265 * cleared after one cycle */ 292#define MDIO_CTRL_CLK_25_128 7
266#define MDIO_CLK_SEL_SHIFT 24 293#define MDIO_CTRL_START BIT(23)
267#define MDIO_CLK_25_4 0 294#define MDIO_CTRL_SPRES_PRMBL BIT(22)
268#define MDIO_CLK_25_6 2 295#define MDIO_CTRL_OP_READ BIT(21) /* 1:read, 0:write */
269#define MDIO_CLK_25_8 3 296#define MDIO_CTRL_REG_MASK 0x1FUL
270#define MDIO_CLK_25_10 4 297#define MDIO_CTRL_REG_SHIFT 16
271#define MDIO_CLK_25_14 5 298#define MDIO_CTRL_DATA_MASK 0xFFFFUL
272#define MDIO_CLK_25_20 6 299#define MDIO_CTRL_DATA_SHIFT 0
273#define MDIO_CLK_25_28 7 300#define MDIO_MAX_AC_TO 120 /* 1.2ms timeout for slow clk */
274#define MDIO_BUSY 0x8000000 301
275#define MDIO_AP_EN 0x10000000 302/* for extension reg access */
276#define MDIO_WAIT_TIMES 10 303#define REG_MDIO_EXTN 0x1448
277 304#define MDIO_EXTN_PORTAD_MASK 0x1FUL
278/* MII PHY Status Register */ 305#define MDIO_EXTN_PORTAD_SHIFT 21
279#define REG_PHY_STATUS 0x1418 306#define MDIO_EXTN_DEVAD_MASK 0x1FUL
280#define PHY_GENERAL_STATUS_MASK 0xFFFF 307#define MDIO_EXTN_DEVAD_SHIFT 16
281#define PHY_STATUS_RECV_ENABLE 0x0001 308#define MDIO_EXTN_REG_MASK 0xFFFFUL
282#define PHY_OE_PWSP_STATUS_MASK 0x07FF 309#define MDIO_EXTN_REG_SHIFT 0
283#define PHY_OE_PWSP_STATUS_SHIFT 16 310
284#define PHY_STATUS_LPW_STATE 0x80000000
285/* BIST Control and Status Register0 (for the Packet Memory) */ 311/* BIST Control and Status Register0 (for the Packet Memory) */
286#define REG_BIST0_CTRL 0x141c 312#define REG_BIST0_CTRL 0x141c
287#define BIST0_NOW 0x1 313#define BIST0_NOW 0x1
@@ -299,50 +325,81 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
299#define BIST1_FUSE_FLAG 0x4 325#define BIST1_FUSE_FLAG 0x4
300 326
301/* SerDes Lock Detect Control and Status Register */ 327/* SerDes Lock Detect Control and Status Register */
302#define REG_SERDES_LOCK 0x1424 328#define REG_SERDES 0x1424
303#define SERDES_LOCK_DETECT 0x1 /* SerDes lock detected. This signal 329#define SERDES_PHY_CLK_SLOWDOWN BIT(18)
304 * comes from Analog SerDes */ 330#define SERDES_MAC_CLK_SLOWDOWN BIT(17)
305#define SERDES_LOCK_DETECT_EN 0x2 /* 1: Enable SerDes Lock detect function */ 331#define SERDES_SELFB_PLL_MASK 0x3UL
306#define SERDES_LOCK_STS_SELFB_PLL_SHIFT 0xE 332#define SERDES_SELFB_PLL_SHIFT 14
307#define SERDES_LOCK_STS_SELFB_PLL_MASK 0x3 333#define SERDES_PHYCLK_SEL_GTX BIT(13) /* 1:gtx_clk, 0:25M */
308#define SERDES_OVCLK_18_25 0x0 334#define SERDES_PCIECLK_SEL_SRDS BIT(12) /* 1:serdes,0:25M */
309#define SERDES_OVCLK_12_18 0x1 335#define SERDES_BUFS_RX_EN BIT(11)
310#define SERDES_OVCLK_0_4 0x2 336#define SERDES_PD_RX BIT(10)
311#define SERDES_OVCLK_4_12 0x3 337#define SERDES_PLL_EN BIT(9)
312#define SERDES_MAC_CLK_SLOWDOWN 0x20000 338#define SERDES_EN BIT(8)
313#define SERDES_PYH_CLK_SLOWDOWN 0x40000 339#define SERDES_SELFB_PLL_SEL_CSR BIT(6) /* 0:state-machine,1:csr */
340#define SERDES_SELFB_PLL_CSR_MASK 0x3UL
341#define SERDES_SELFB_PLL_CSR_SHIFT 4
342#define SERDES_SELFB_PLL_CSR_4 3 /* 4-12% OV-CLK */
343#define SERDES_SELFB_PLL_CSR_0 2 /* 0-4% OV-CLK */
344#define SERDES_SELFB_PLL_CSR_12 1 /* 12-18% OV-CLK */
345#define SERDES_SELFB_PLL_CSR_18 0 /* 18-25% OV-CLK */
346#define SERDES_VCO_SLOW BIT(3)
347#define SERDES_VCO_FAST BIT(2)
348#define SERDES_LOCK_DETECT_EN BIT(1)
349#define SERDES_LOCK_DETECT BIT(0)
350
351#define REG_LPI_DECISN_TIMER 0x143C
352#define L2CB_LPI_DESISN_TIMER 0x7D00
353
354#define REG_LPI_CTRL 0x1440
355#define LPI_CTRL_CHK_DA BIT(31)
356#define LPI_CTRL_ENH_TO_MASK 0x1FFFUL
357#define LPI_CTRL_ENH_TO_SHIFT 12
358#define LPI_CTRL_ENH_TH_MASK 0x1FUL
359#define LPI_CTRL_ENH_TH_SHIFT 6
360#define LPI_CTRL_ENH_EN BIT(5)
361#define LPI_CTRL_CHK_RX BIT(4)
362#define LPI_CTRL_CHK_STATE BIT(3)
363#define LPI_CTRL_GMII BIT(2)
364#define LPI_CTRL_TO_PHY BIT(1)
365#define LPI_CTRL_EN BIT(0)
366
367#define REG_LPI_WAIT 0x1444
368#define LPI_WAIT_TIMER_MASK 0xFFFFUL
369#define LPI_WAIT_TIMER_SHIFT 0
314 370
315/* MAC Control Register */ 371/* MAC Control Register */
316#define REG_MAC_CTRL 0x1480 372#define REG_MAC_CTRL 0x1480
317#define MAC_CTRL_TX_EN 0x1 373#define MAC_CTRL_SPEED_MODE_SW BIT(30) /* 0:phy,1:sw */
318#define MAC_CTRL_RX_EN 0x2 374#define MAC_CTRL_HASH_ALG_CRC32 BIT(29) /* 1:legacy,0:lw_5b */
319#define MAC_CTRL_TX_FLOW 0x4 375#define MAC_CTRL_SINGLE_PAUSE_EN BIT(28)
320#define MAC_CTRL_RX_FLOW 0x8 376#define MAC_CTRL_DBG BIT(27)
321#define MAC_CTRL_LOOPBACK 0x10 377#define MAC_CTRL_BC_EN BIT(26)
322#define MAC_CTRL_DUPLX 0x20 378#define MAC_CTRL_MC_ALL_EN BIT(25)
323#define MAC_CTRL_ADD_CRC 0x40 379#define MAC_CTRL_RX_CHKSUM_EN BIT(24)
324#define MAC_CTRL_PAD 0x80 380#define MAC_CTRL_TX_HUGE BIT(23)
325#define MAC_CTRL_LENCHK 0x100 381#define MAC_CTRL_DBG_TX_BKPRESURE BIT(22)
326#define MAC_CTRL_HUGE_EN 0x200 382#define MAC_CTRL_SPEED_MASK 3UL
327#define MAC_CTRL_PRMLEN_SHIFT 10 383#define MAC_CTRL_SPEED_SHIFT 20
328#define MAC_CTRL_PRMLEN_MASK 0xf 384#define MAC_CTRL_SPEED_10_100 1
329#define MAC_CTRL_RMV_VLAN 0x4000 385#define MAC_CTRL_SPEED_1000 2
330#define MAC_CTRL_PROMIS_EN 0x8000 386#define MAC_CTRL_TX_SIMURST BIT(19)
331#define MAC_CTRL_TX_PAUSE 0x10000 387#define MAC_CTRL_SCNT BIT(17)
332#define MAC_CTRL_SCNT 0x20000 388#define MAC_CTRL_TX_PAUSE BIT(16)
333#define MAC_CTRL_SRST_TX 0x40000 389#define MAC_CTRL_PROMIS_EN BIT(15)
334#define MAC_CTRL_TX_SIMURST 0x80000 390#define MAC_CTRL_RMV_VLAN BIT(14)
335#define MAC_CTRL_SPEED_SHIFT 20 391#define MAC_CTRL_PRMLEN_MASK 0xFUL
336#define MAC_CTRL_SPEED_MASK 0x3 392#define MAC_CTRL_PRMLEN_SHIFT 10
337#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000 393#define MAC_CTRL_HUGE_EN BIT(9)
338#define MAC_CTRL_TX_HUGE 0x800000 394#define MAC_CTRL_LENCHK BIT(8)
339#define MAC_CTRL_RX_CHKSUM_EN 0x1000000 395#define MAC_CTRL_PAD BIT(7)
340#define MAC_CTRL_MC_ALL_EN 0x2000000 396#define MAC_CTRL_ADD_CRC BIT(6)
341#define MAC_CTRL_BC_EN 0x4000000 397#define MAC_CTRL_DUPLX BIT(5)
342#define MAC_CTRL_DBG 0x8000000 398#define MAC_CTRL_LOOPBACK BIT(4)
343#define MAC_CTRL_SINGLE_PAUSE_EN 0x10000000 399#define MAC_CTRL_RX_FLOW BIT(3)
344#define MAC_CTRL_HASH_ALG_CRC32 0x20000000 400#define MAC_CTRL_TX_FLOW BIT(2)
345#define MAC_CTRL_SPEED_MODE_SW 0x40000000 401#define MAC_CTRL_RX_EN BIT(1)
402#define MAC_CTRL_TX_EN BIT(0)
346 403
347/* MAC IPG/IFG Control Register */ 404/* MAC IPG/IFG Control Register */
348#define REG_MAC_IPG_IFG 0x1484 405#define REG_MAC_IPG_IFG 0x1484
@@ -386,34 +443,53 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
386 443
387/* Wake-On-Lan control register */ 444/* Wake-On-Lan control register */
388#define REG_WOL_CTRL 0x14a0 445#define REG_WOL_CTRL 0x14a0
389#define WOL_PATTERN_EN 0x00000001 446#define WOL_PT7_MATCH BIT(31)
390#define WOL_PATTERN_PME_EN 0x00000002 447#define WOL_PT6_MATCH BIT(30)
391#define WOL_MAGIC_EN 0x00000004 448#define WOL_PT5_MATCH BIT(29)
392#define WOL_MAGIC_PME_EN 0x00000008 449#define WOL_PT4_MATCH BIT(28)
393#define WOL_LINK_CHG_EN 0x00000010 450#define WOL_PT3_MATCH BIT(27)
394#define WOL_LINK_CHG_PME_EN 0x00000020 451#define WOL_PT2_MATCH BIT(26)
395#define WOL_PATTERN_ST 0x00000100 452#define WOL_PT1_MATCH BIT(25)
396#define WOL_MAGIC_ST 0x00000200 453#define WOL_PT0_MATCH BIT(24)
397#define WOL_LINKCHG_ST 0x00000400 454#define WOL_PT7_EN BIT(23)
398#define WOL_CLK_SWITCH_EN 0x00008000 455#define WOL_PT6_EN BIT(22)
399#define WOL_PT0_EN 0x00010000 456#define WOL_PT5_EN BIT(21)
400#define WOL_PT1_EN 0x00020000 457#define WOL_PT4_EN BIT(20)
401#define WOL_PT2_EN 0x00040000 458#define WOL_PT3_EN BIT(19)
402#define WOL_PT3_EN 0x00080000 459#define WOL_PT2_EN BIT(18)
403#define WOL_PT4_EN 0x00100000 460#define WOL_PT1_EN BIT(17)
404#define WOL_PT5_EN 0x00200000 461#define WOL_PT0_EN BIT(16)
405#define WOL_PT6_EN 0x00400000 462#define WOL_LNKCHG_ST BIT(10)
463#define WOL_MAGIC_ST BIT(9)
464#define WOL_PATTERN_ST BIT(8)
465#define WOL_OOB_EN BIT(6)
466#define WOL_LINK_CHG_PME_EN BIT(5)
467#define WOL_LINK_CHG_EN BIT(4)
468#define WOL_MAGIC_PME_EN BIT(3)
469#define WOL_MAGIC_EN BIT(2)
470#define WOL_PATTERN_PME_EN BIT(1)
471#define WOL_PATTERN_EN BIT(0)
406 472
407/* WOL Length ( 2 DWORD ) */ 473/* WOL Length ( 2 DWORD ) */
408#define REG_WOL_PATTERN_LEN 0x14a4 474#define REG_WOL_PTLEN1 0x14A4
409#define WOL_PT_LEN_MASK 0x7f 475#define WOL_PTLEN1_3_MASK 0xFFUL
410#define WOL_PT0_LEN_SHIFT 0 476#define WOL_PTLEN1_3_SHIFT 24
411#define WOL_PT1_LEN_SHIFT 8 477#define WOL_PTLEN1_2_MASK 0xFFUL
412#define WOL_PT2_LEN_SHIFT 16 478#define WOL_PTLEN1_2_SHIFT 16
413#define WOL_PT3_LEN_SHIFT 24 479#define WOL_PTLEN1_1_MASK 0xFFUL
414#define WOL_PT4_LEN_SHIFT 0 480#define WOL_PTLEN1_1_SHIFT 8
415#define WOL_PT5_LEN_SHIFT 8 481#define WOL_PTLEN1_0_MASK 0xFFUL
416#define WOL_PT6_LEN_SHIFT 16 482#define WOL_PTLEN1_0_SHIFT 0
483
484#define REG_WOL_PTLEN2 0x14A8
485#define WOL_PTLEN2_7_MASK 0xFFUL
486#define WOL_PTLEN2_7_SHIFT 24
487#define WOL_PTLEN2_6_MASK 0xFFUL
488#define WOL_PTLEN2_6_SHIFT 16
489#define WOL_PTLEN2_5_MASK 0xFFUL
490#define WOL_PTLEN2_5_SHIFT 8
491#define WOL_PTLEN2_4_MASK 0xFFUL
492#define WOL_PTLEN2_4_SHIFT 0
417 493
418/* Internal SRAM Partition Register */ 494/* Internal SRAM Partition Register */
419#define RFDX_HEAD_ADDR_MASK 0x03FF 495#define RFDX_HEAD_ADDR_MASK 0x03FF
@@ -458,66 +534,50 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
458 */ 534 */
459#define REG_RX_BASE_ADDR_HI 0x1540 535#define REG_RX_BASE_ADDR_HI 0x1540
460#define REG_TX_BASE_ADDR_HI 0x1544 536#define REG_TX_BASE_ADDR_HI 0x1544
461#define REG_SMB_BASE_ADDR_HI 0x1548
462#define REG_SMB_BASE_ADDR_LO 0x154C
463#define REG_RFD0_HEAD_ADDR_LO 0x1550 537#define REG_RFD0_HEAD_ADDR_LO 0x1550
464#define REG_RFD1_HEAD_ADDR_LO 0x1554
465#define REG_RFD2_HEAD_ADDR_LO 0x1558
466#define REG_RFD3_HEAD_ADDR_LO 0x155C
467#define REG_RFD_RING_SIZE 0x1560 538#define REG_RFD_RING_SIZE 0x1560
468#define RFD_RING_SIZE_MASK 0x0FFF 539#define RFD_RING_SIZE_MASK 0x0FFF
469#define REG_RX_BUF_SIZE 0x1564 540#define REG_RX_BUF_SIZE 0x1564
470#define RX_BUF_SIZE_MASK 0xFFFF 541#define RX_BUF_SIZE_MASK 0xFFFF
471#define REG_RRD0_HEAD_ADDR_LO 0x1568 542#define REG_RRD0_HEAD_ADDR_LO 0x1568
472#define REG_RRD1_HEAD_ADDR_LO 0x156C
473#define REG_RRD2_HEAD_ADDR_LO 0x1570
474#define REG_RRD3_HEAD_ADDR_LO 0x1574
475#define REG_RRD_RING_SIZE 0x1578 543#define REG_RRD_RING_SIZE 0x1578
476#define RRD_RING_SIZE_MASK 0x0FFF 544#define RRD_RING_SIZE_MASK 0x0FFF
477#define REG_HTPD_HEAD_ADDR_LO 0x157C 545#define REG_TPD_PRI1_ADDR_LO 0x157C
478#define REG_NTPD_HEAD_ADDR_LO 0x1580 546#define REG_TPD_PRI0_ADDR_LO 0x1580
479#define REG_TPD_RING_SIZE 0x1584 547#define REG_TPD_RING_SIZE 0x1584
480#define TPD_RING_SIZE_MASK 0xFFFF 548#define TPD_RING_SIZE_MASK 0xFFFF
481#define REG_CMB_BASE_ADDR_LO 0x1588
482
483/* RSS about */
484#define REG_RSS_KEY0 0x14B0
485#define REG_RSS_KEY1 0x14B4
486#define REG_RSS_KEY2 0x14B8
487#define REG_RSS_KEY3 0x14BC
488#define REG_RSS_KEY4 0x14C0
489#define REG_RSS_KEY5 0x14C4
490#define REG_RSS_KEY6 0x14C8
491#define REG_RSS_KEY7 0x14CC
492#define REG_RSS_KEY8 0x14D0
493#define REG_RSS_KEY9 0x14D4
494#define REG_IDT_TABLE0 0x14E0
495#define REG_IDT_TABLE1 0x14E4
496#define REG_IDT_TABLE2 0x14E8
497#define REG_IDT_TABLE3 0x14EC
498#define REG_IDT_TABLE4 0x14F0
499#define REG_IDT_TABLE5 0x14F4
500#define REG_IDT_TABLE6 0x14F8
501#define REG_IDT_TABLE7 0x14FC
502#define REG_IDT_TABLE REG_IDT_TABLE0
503#define REG_RSS_HASH_VALUE 0x15B0
504#define REG_RSS_HASH_FLAG 0x15B4
505#define REG_BASE_CPU_NUMBER 0x15B8
506 549
507/* TXQ Control Register */ 550/* TXQ Control Register */
508#define REG_TXQ_CTRL 0x1590 551#define REG_TXQ_CTRL 0x1590
509#define TXQ_NUM_TPD_BURST_MASK 0xF 552#define TXQ_TXF_BURST_NUM_MASK 0xFFFFUL
510#define TXQ_NUM_TPD_BURST_SHIFT 0 553#define TXQ_TXF_BURST_NUM_SHIFT 16
511#define TXQ_CTRL_IP_OPTION_EN 0x10 554#define L1C_TXQ_TXF_BURST_PREF 0x200
512#define TXQ_CTRL_EN 0x20 555#define L2CB_TXQ_TXF_BURST_PREF 0x40
513#define TXQ_CTRL_ENH_MODE 0x40 556#define TXQ_CTRL_PEDING_CLR BIT(8)
514#define TXQ_CTRL_LS_8023_EN 0x80 557#define TXQ_CTRL_LS_8023_EN BIT(7)
515#define TXQ_TXF_BURST_NUM_SHIFT 16 558#define TXQ_CTRL_ENH_MODE BIT(6)
516#define TXQ_TXF_BURST_NUM_MASK 0xFFFF 559#define TXQ_CTRL_EN BIT(5)
560#define TXQ_CTRL_IP_OPTION_EN BIT(4)
561#define TXQ_NUM_TPD_BURST_MASK 0xFUL
562#define TXQ_NUM_TPD_BURST_SHIFT 0
563#define TXQ_NUM_TPD_BURST_DEF 5
564#define TXQ_CFGV (\
565 FIELDX(TXQ_NUM_TPD_BURST, TXQ_NUM_TPD_BURST_DEF) |\
566 TXQ_CTRL_ENH_MODE |\
567 TXQ_CTRL_LS_8023_EN |\
568 TXQ_CTRL_IP_OPTION_EN)
569#define L1C_TXQ_CFGV (\
570 TXQ_CFGV |\
571 FIELDX(TXQ_TXF_BURST_NUM, L1C_TXQ_TXF_BURST_PREF))
572#define L2CB_TXQ_CFGV (\
573 TXQ_CFGV |\
574 FIELDX(TXQ_TXF_BURST_NUM, L2CB_TXQ_TXF_BURST_PREF))
575
517 576
518/* Jumbo packet Threshold for task offload */ 577/* Jumbo packet Threshold for task offload */
519#define REG_TX_TSO_OFFLOAD_THRESH 0x1594 /* In 8-bytes */ 578#define REG_TX_TSO_OFFLOAD_THRESH 0x1594 /* In 8-bytes */
520#define TX_TSO_OFFLOAD_THRESH_MASK 0x07FF 579#define TX_TSO_OFFLOAD_THRESH_MASK 0x07FF
580#define MAX_TSO_FRAME_SIZE (7*1024)
521 581
522#define REG_TXF_WATER_MARK 0x1598 /* In 8-bytes */ 582#define REG_TXF_WATER_MARK 0x1598 /* In 8-bytes */
523#define TXF_WATER_MARK_MASK 0x0FFF 583#define TXF_WATER_MARK_MASK 0x0FFF
@@ -537,26 +597,21 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
537#define ASPM_THRUPUT_LIMIT_NO 0x00 597#define ASPM_THRUPUT_LIMIT_NO 0x00
538#define ASPM_THRUPUT_LIMIT_1M 0x01 598#define ASPM_THRUPUT_LIMIT_1M 0x01
539#define ASPM_THRUPUT_LIMIT_10M 0x02 599#define ASPM_THRUPUT_LIMIT_10M 0x02
540#define ASPM_THRUPUT_LIMIT_100M 0x04 600#define ASPM_THRUPUT_LIMIT_100M 0x03
541#define RXQ1_CTRL_EN 0x10 601#define IPV6_CHKSUM_CTRL_EN BIT(7)
542#define RXQ2_CTRL_EN 0x20
543#define RXQ3_CTRL_EN 0x40
544#define IPV6_CHKSUM_CTRL_EN 0x80
545#define RSS_HASH_BITS_MASK 0x00FF
546#define RSS_HASH_BITS_SHIFT 8
547#define RSS_HASH_IPV4 0x10000
548#define RSS_HASH_IPV4_TCP 0x20000
549#define RSS_HASH_IPV6 0x40000
550#define RSS_HASH_IPV6_TCP 0x80000
551#define RXQ_RFD_BURST_NUM_MASK 0x003F 602#define RXQ_RFD_BURST_NUM_MASK 0x003F
552#define RXQ_RFD_BURST_NUM_SHIFT 20 603#define RXQ_RFD_BURST_NUM_SHIFT 20
553#define RSS_MODE_MASK 0x0003 604#define RXQ_NUM_RFD_PREF_DEF 8
605#define RSS_MODE_MASK 3UL
554#define RSS_MODE_SHIFT 26 606#define RSS_MODE_SHIFT 26
555#define RSS_NIP_QUEUE_SEL_MASK 0x1 607#define RSS_MODE_DIS 0
556#define RSS_NIP_QUEUE_SEL_SHIFT 28 608#define RSS_MODE_SQSI 1
557#define RRS_HASH_CTRL_EN 0x20000000 609#define RSS_MODE_MQSI 2
558#define RX_CUT_THRU_EN 0x40000000 610#define RSS_MODE_MQMI 3
559#define RXQ_CTRL_EN 0x80000000 611#define RSS_NIP_QUEUE_SEL BIT(28) /* 0:q0, 1:table */
612#define RRS_HASH_CTRL_EN BIT(29)
613#define RX_CUT_THRU_EN BIT(30)
614#define RXQ_CTRL_EN BIT(31)
560 615
561#define REG_RFD_FREE_THRESH 0x15A4 616#define REG_RFD_FREE_THRESH 0x15A4
562#define RFD_FREE_THRESH_MASK 0x003F 617#define RFD_FREE_THRESH_MASK 0x003F
@@ -577,57 +632,45 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
577#define RXD_DMA_DOWN_TIMER_SHIFT 16 632#define RXD_DMA_DOWN_TIMER_SHIFT 16
578 633
579/* DMA Engine Control Register */ 634/* DMA Engine Control Register */
580#define REG_DMA_CTRL 0x15C0 635#define REG_DMA_CTRL 0x15C0
581#define DMA_CTRL_DMAR_IN_ORDER 0x1 636#define DMA_CTRL_SMB_NOW BIT(31)
582#define DMA_CTRL_DMAR_ENH_ORDER 0x2 637#define DMA_CTRL_WPEND_CLR BIT(30)
583#define DMA_CTRL_DMAR_OUT_ORDER 0x4 638#define DMA_CTRL_RPEND_CLR BIT(29)
584#define DMA_CTRL_RCB_VALUE 0x8 639#define DMA_CTRL_WDLY_CNT_MASK 0xFUL
585#define DMA_CTRL_DMAR_BURST_LEN_MASK 0x0007 640#define DMA_CTRL_WDLY_CNT_SHIFT 16
586#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4 641#define DMA_CTRL_WDLY_CNT_DEF 4
587#define DMA_CTRL_DMAW_BURST_LEN_MASK 0x0007 642#define DMA_CTRL_RDLY_CNT_MASK 0x1FUL
588#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7 643#define DMA_CTRL_RDLY_CNT_SHIFT 11
589#define DMA_CTRL_DMAR_REQ_PRI 0x400 644#define DMA_CTRL_RDLY_CNT_DEF 15
590#define DMA_CTRL_DMAR_DLY_CNT_MASK 0x001F 645#define DMA_CTRL_RREQ_PRI_DATA BIT(10) /* 0:tpd, 1:data */
591#define DMA_CTRL_DMAR_DLY_CNT_SHIFT 11 646#define DMA_CTRL_WREQ_BLEN_MASK 7UL
592#define DMA_CTRL_DMAW_DLY_CNT_MASK 0x000F 647#define DMA_CTRL_WREQ_BLEN_SHIFT 7
593#define DMA_CTRL_DMAW_DLY_CNT_SHIFT 16 648#define DMA_CTRL_RREQ_BLEN_MASK 7UL
594#define DMA_CTRL_CMB_EN 0x100000 649#define DMA_CTRL_RREQ_BLEN_SHIFT 4
595#define DMA_CTRL_SMB_EN 0x200000 650#define L1C_CTRL_DMA_RCB_LEN128 BIT(3) /* 0:64bytes,1:128bytes */
596#define DMA_CTRL_CMB_NOW 0x400000 651#define DMA_CTRL_RORDER_MODE_MASK 7UL
597#define MAC_CTRL_SMB_DIS 0x1000000 652#define DMA_CTRL_RORDER_MODE_SHIFT 0
598#define DMA_CTRL_SMB_NOW 0x80000000 653#define DMA_CTRL_RORDER_MODE_OUT 4
599 654#define DMA_CTRL_RORDER_MODE_ENHANCE 2
600/* CMB/SMB Control Register */ 655#define DMA_CTRL_RORDER_MODE_IN 1
656
657/* INT-triggle/SMB Control Register */
601#define REG_SMB_STAT_TIMER 0x15C4 /* 2us resolution */ 658#define REG_SMB_STAT_TIMER 0x15C4 /* 2us resolution */
602#define SMB_STAT_TIMER_MASK 0xFFFFFF 659#define SMB_STAT_TIMER_MASK 0xFFFFFF
603#define REG_CMB_TPD_THRESH 0x15C8 660#define REG_TINT_TPD_THRESH 0x15C8 /* tpd th to trig intrrupt */
604#define CMB_TPD_THRESH_MASK 0xFFFF
605#define REG_CMB_TX_TIMER 0x15CC /* 2us resolution */
606#define CMB_TX_TIMER_MASK 0xFFFF
607 661
608/* Mail box */ 662/* Mail box */
609#define MB_RFDX_PROD_IDX_MASK 0xFFFF 663#define MB_RFDX_PROD_IDX_MASK 0xFFFF
610#define REG_MB_RFD0_PROD_IDX 0x15E0 664#define REG_MB_RFD0_PROD_IDX 0x15E0
611#define REG_MB_RFD1_PROD_IDX 0x15E4
612#define REG_MB_RFD2_PROD_IDX 0x15E8
613#define REG_MB_RFD3_PROD_IDX 0x15EC
614 665
615#define MB_PRIO_PROD_IDX_MASK 0xFFFF 666#define REG_TPD_PRI1_PIDX 0x15F0 /* 16bit,hi-tpd producer idx */
616#define REG_MB_PRIO_PROD_IDX 0x15F0 667#define REG_TPD_PRI0_PIDX 0x15F2 /* 16bit,lo-tpd producer idx */
617#define MB_HTPD_PROD_IDX_SHIFT 0 668#define REG_TPD_PRI1_CIDX 0x15F4 /* 16bit,hi-tpd consumer idx */
618#define MB_NTPD_PROD_IDX_SHIFT 16 669#define REG_TPD_PRI0_CIDX 0x15F6 /* 16bit,lo-tpd consumer idx */
619
620#define MB_PRIO_CONS_IDX_MASK 0xFFFF
621#define REG_MB_PRIO_CONS_IDX 0x15F4
622#define MB_HTPD_CONS_IDX_SHIFT 0
623#define MB_NTPD_CONS_IDX_SHIFT 16
624 670
625#define REG_MB_RFD01_CONS_IDX 0x15F8 671#define REG_MB_RFD01_CONS_IDX 0x15F8
626#define MB_RFD0_CONS_IDX_MASK 0x0000FFFF 672#define MB_RFD0_CONS_IDX_MASK 0x0000FFFF
627#define MB_RFD1_CONS_IDX_MASK 0xFFFF0000 673#define MB_RFD1_CONS_IDX_MASK 0xFFFF0000
628#define REG_MB_RFD23_CONS_IDX 0x15FC
629#define MB_RFD2_CONS_IDX_MASK 0x0000FFFF
630#define MB_RFD3_CONS_IDX_MASK 0xFFFF0000
631 674
632/* Interrupt Status Register */ 675/* Interrupt Status Register */
633#define REG_ISR 0x1600 676#define REG_ISR 0x1600
@@ -705,13 +748,6 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
705#define REG_INT_RETRIG_TIMER 0x1608 748#define REG_INT_RETRIG_TIMER 0x1608
706#define INT_RETRIG_TIMER_MASK 0xFFFF 749#define INT_RETRIG_TIMER_MASK 0xFFFF
707 750
708#define REG_HDS_CTRL 0x160C
709#define HDS_CTRL_EN 0x0001
710#define HDS_CTRL_BACKFILLSIZE_SHIFT 8
711#define HDS_CTRL_BACKFILLSIZE_MASK 0x0FFF
712#define HDS_CTRL_MAX_HDRSIZE_SHIFT 20
713#define HDS_CTRL_MAC_HDRSIZE_MASK 0x0FFF
714
715#define REG_MAC_RX_STATUS_BIN 0x1700 751#define REG_MAC_RX_STATUS_BIN 0x1700
716#define REG_MAC_RX_STATUS_END 0x175c 752#define REG_MAC_RX_STATUS_END 0x175c
717#define REG_MAC_TX_STATUS_BIN 0x1760 753#define REG_MAC_TX_STATUS_BIN 0x1760
@@ -796,73 +832,188 @@ int atl1c_phy_power_saving(struct atl1c_hw *hw);
796#define MII_DBG_ADDR 0x1D 832#define MII_DBG_ADDR 0x1D
797#define MII_DBG_DATA 0x1E 833#define MII_DBG_DATA 0x1E
798 834
799#define MII_ANA_CTRL_0 0x0 835/***************************** debug port *************************************/
800#define ANA_RESTART_CAL 0x0001 836
801#define ANA_MANUL_SWICH_ON_SHIFT 0x1 837#define MIIDBG_ANACTRL 0x00
802#define ANA_MANUL_SWICH_ON_MASK 0xF 838#define ANACTRL_CLK125M_DELAY_EN 0x8000
803#define ANA_MAN_ENABLE 0x0020 839#define ANACTRL_VCO_FAST 0x4000
804#define ANA_SEL_HSP 0x0040 840#define ANACTRL_VCO_SLOW 0x2000
805#define ANA_EN_HB 0x0080 841#define ANACTRL_AFE_MODE_EN 0x1000
806#define ANA_EN_HBIAS 0x0100 842#define ANACTRL_LCKDET_PHY 0x800
807#define ANA_OEN_125M 0x0200 843#define ANACTRL_LCKDET_EN 0x400
808#define ANA_EN_LCKDT 0x0400 844#define ANACTRL_OEN_125M 0x200
809#define ANA_LCKDT_PHY 0x0800 845#define ANACTRL_HBIAS_EN 0x100
810#define ANA_AFE_MODE 0x1000 846#define ANACTRL_HB_EN 0x80
811#define ANA_VCO_SLOW 0x2000 847#define ANACTRL_SEL_HSP 0x40
812#define ANA_VCO_FAST 0x4000 848#define ANACTRL_CLASSA_EN 0x20
813#define ANA_SEL_CLK125M_DSP 0x8000 849#define ANACTRL_MANUSWON_SWR_MASK 3U
814 850#define ANACTRL_MANUSWON_SWR_SHIFT 2
815#define MII_ANA_CTRL_4 0x4 851#define ANACTRL_MANUSWON_SWR_2V 0
816#define ANA_IECHO_ADJ_MASK 0xF 852#define ANACTRL_MANUSWON_SWR_1P9V 1
817#define ANA_IECHO_ADJ_3_SHIFT 0 853#define ANACTRL_MANUSWON_SWR_1P8V 2
818#define ANA_IECHO_ADJ_2_SHIFT 4 854#define ANACTRL_MANUSWON_SWR_1P7V 3
819#define ANA_IECHO_ADJ_1_SHIFT 8 855#define ANACTRL_MANUSWON_BW3_4M 0x2
820#define ANA_IECHO_ADJ_0_SHIFT 12 856#define ANACTRL_RESTART_CAL 0x1
821 857#define ANACTRL_DEF 0x02EF
822#define MII_ANA_CTRL_5 0x5 858
823#define ANA_SERDES_CDR_BW_SHIFT 0 859#define MIIDBG_SYSMODCTRL 0x04
824#define ANA_SERDES_CDR_BW_MASK 0x3 860#define SYSMODCTRL_IECHOADJ_PFMH_PHY 0x8000
825#define ANA_MS_PAD_DBG 0x0004 861#define SYSMODCTRL_IECHOADJ_BIASGEN 0x4000
826#define ANA_SPEEDUP_DBG 0x0008 862#define SYSMODCTRL_IECHOADJ_PFML_PHY 0x2000
827#define ANA_SERDES_TH_LOS_SHIFT 4 863#define SYSMODCTRL_IECHOADJ_PS_MASK 3U
828#define ANA_SERDES_TH_LOS_MASK 0x3 864#define SYSMODCTRL_IECHOADJ_PS_SHIFT 10
829#define ANA_SERDES_EN_DEEM 0x0040 865#define SYSMODCTRL_IECHOADJ_PS_40 3
830#define ANA_SERDES_TXELECIDLE 0x0080 866#define SYSMODCTRL_IECHOADJ_PS_20 2
831#define ANA_SERDES_BEACON 0x0100 867#define SYSMODCTRL_IECHOADJ_PS_0 1
832#define ANA_SERDES_HALFTXDR 0x0200 868#define SYSMODCTRL_IECHOADJ_10BT_100MV 0x40 /* 1:100mv, 0:200mv */
833#define ANA_SERDES_SEL_HSP 0x0400 869#define SYSMODCTRL_IECHOADJ_HLFAP_MASK 3U
834#define ANA_SERDES_EN_PLL 0x0800 870#define SYSMODCTRL_IECHOADJ_HLFAP_SHIFT 4
835#define ANA_SERDES_EN 0x1000 871#define SYSMODCTRL_IECHOADJ_VDFULBW 0x8
836#define ANA_SERDES_EN_LCKDT 0x2000 872#define SYSMODCTRL_IECHOADJ_VDBIASHLF 0x4
837 873#define SYSMODCTRL_IECHOADJ_VDAMPHLF 0x2
838#define MII_ANA_CTRL_11 0xB 874#define SYSMODCTRL_IECHOADJ_VDLANSW 0x1
839#define ANA_PS_HIB_EN 0x8000 875#define SYSMODCTRL_IECHOADJ_DEF 0x88BB /* ???? */
840 876
841#define MII_ANA_CTRL_18 0x12 877/* for l1d & l2cb */
842#define ANA_TEST_MODE_10BT_01SHIFT 0 878#define SYSMODCTRL_IECHOADJ_CUR_ADD 0x8000
843#define ANA_TEST_MODE_10BT_01MASK 0x3 879#define SYSMODCTRL_IECHOADJ_CUR_MASK 7U
844#define ANA_LOOP_SEL_10BT 0x0004 880#define SYSMODCTRL_IECHOADJ_CUR_SHIFT 12
845#define ANA_RGMII_MODE_SW 0x0008 881#define SYSMODCTRL_IECHOADJ_VOL_MASK 0xFU
846#define ANA_EN_LONGECABLE 0x0010 882#define SYSMODCTRL_IECHOADJ_VOL_SHIFT 8
847#define ANA_TEST_MODE_10BT_2 0x0020 883#define SYSMODCTRL_IECHOADJ_VOL_17ALL 3
848#define ANA_EN_10BT_IDLE 0x0400 884#define SYSMODCTRL_IECHOADJ_VOL_100M15 1
849#define ANA_EN_MASK_TB 0x0800 885#define SYSMODCTRL_IECHOADJ_VOL_10M17 0
850#define ANA_TRIGGER_SEL_TIMER_SHIFT 12 886#define SYSMODCTRL_IECHOADJ_BIAS1_MASK 0xFU
851#define ANA_TRIGGER_SEL_TIMER_MASK 0x3 887#define SYSMODCTRL_IECHOADJ_BIAS1_SHIFT 4
852#define ANA_INTERVAL_SEL_TIMER_SHIFT 14 888#define SYSMODCTRL_IECHOADJ_BIAS2_MASK 0xFU
853#define ANA_INTERVAL_SEL_TIMER_MASK 0x3 889#define SYSMODCTRL_IECHOADJ_BIAS2_SHIFT 0
854 890#define L1D_SYSMODCTRL_IECHOADJ_DEF 0x4FBB
855#define MII_ANA_CTRL_41 0x29 891
856#define ANA_TOP_PS_EN 0x8000 892#define MIIDBG_SRDSYSMOD 0x05
857 893#define SRDSYSMOD_LCKDET_EN 0x2000
858#define MII_ANA_CTRL_54 0x36 894#define SRDSYSMOD_PLL_EN 0x800
859#define ANA_LONG_CABLE_TH_100_SHIFT 0 895#define SRDSYSMOD_SEL_HSP 0x400
860#define ANA_LONG_CABLE_TH_100_MASK 0x3F 896#define SRDSYSMOD_HLFTXDR 0x200
861#define ANA_DESERVED 0x0040 897#define SRDSYSMOD_TXCLK_DELAY_EN 0x100
862#define ANA_EN_LIT_CH 0x0080 898#define SRDSYSMOD_TXELECIDLE 0x80
863#define ANA_SHORT_CABLE_TH_100_SHIFT 8 899#define SRDSYSMOD_DEEMP_EN 0x40
864#define ANA_SHORT_CABLE_TH_100_MASK 0x3F 900#define SRDSYSMOD_MS_PAD 0x4
865#define ANA_BP_BAD_LINK_ACCUM 0x4000 901#define SRDSYSMOD_CDR_ADC_VLTG 0x2
866#define ANA_BP_SMALL_BW 0x8000 902#define SRDSYSMOD_CDR_DAC_1MA 0x1
903#define SRDSYSMOD_DEF 0x2C46
904
905#define MIIDBG_CFGLPSPD 0x0A
906#define CFGLPSPD_RSTCNT_MASK 3U
907#define CFGLPSPD_RSTCNT_SHIFT 14
908#define CFGLPSPD_RSTCNT_CLK125SW 0x2000
909
910#define MIIDBG_HIBNEG 0x0B
911#define HIBNEG_PSHIB_EN 0x8000
912#define HIBNEG_WAKE_BOTH 0x4000
913#define HIBNEG_ONOFF_ANACHG_SUDEN 0x2000
914#define HIBNEG_HIB_PULSE 0x1000
915#define HIBNEG_GATE_25M_EN 0x800
916#define HIBNEG_RST_80U 0x400
917#define HIBNEG_RST_TIMER_MASK 3U
918#define HIBNEG_RST_TIMER_SHIFT 8
919#define HIBNEG_GTX_CLK_DELAY_MASK 3U
920#define HIBNEG_GTX_CLK_DELAY_SHIFT 5
921#define HIBNEG_BYPSS_BRKTIMER 0x10
922#define HIBNEG_DEF 0xBC40
923
924#define MIIDBG_TST10BTCFG 0x12
925#define TST10BTCFG_INTV_TIMER_MASK 3U
926#define TST10BTCFG_INTV_TIMER_SHIFT 14
927#define TST10BTCFG_TRIGER_TIMER_MASK 3U
928#define TST10BTCFG_TRIGER_TIMER_SHIFT 12
929#define TST10BTCFG_DIV_MAN_MLT3_EN 0x800
930#define TST10BTCFG_OFF_DAC_IDLE 0x400
931#define TST10BTCFG_LPBK_DEEP 0x4 /* 1:deep,0:shallow */
932#define TST10BTCFG_DEF 0x4C04
933
934#define MIIDBG_AZ_ANADECT 0x15
935#define AZ_ANADECT_10BTRX_TH 0x8000
936#define AZ_ANADECT_BOTH_01CHNL 0x4000
937#define AZ_ANADECT_INTV_MASK 0x3FU
938#define AZ_ANADECT_INTV_SHIFT 8
939#define AZ_ANADECT_THRESH_MASK 0xFU
940#define AZ_ANADECT_THRESH_SHIFT 4
941#define AZ_ANADECT_CHNL_MASK 0xFU
942#define AZ_ANADECT_CHNL_SHIFT 0
943#define AZ_ANADECT_DEF 0x3220
944#define AZ_ANADECT_LONG 0xb210
945
946#define MIIDBG_MSE16DB 0x18 /* l1d */
947#define L1D_MSE16DB_UP 0x05EA
948#define L1D_MSE16DB_DOWN 0x02EA
949
950#define MIIDBG_LEGCYPS 0x29
951#define LEGCYPS_EN 0x8000
952#define LEGCYPS_DAC_AMP1000_MASK 7U
953#define LEGCYPS_DAC_AMP1000_SHIFT 12
954#define LEGCYPS_DAC_AMP100_MASK 7U
955#define LEGCYPS_DAC_AMP100_SHIFT 9
956#define LEGCYPS_DAC_AMP10_MASK 7U
957#define LEGCYPS_DAC_AMP10_SHIFT 6
958#define LEGCYPS_UNPLUG_TIMER_MASK 7U
959#define LEGCYPS_UNPLUG_TIMER_SHIFT 3
960#define LEGCYPS_UNPLUG_DECT_EN 0x4
961#define LEGCYPS_ECNC_PS_EN 0x1
962#define L1D_LEGCYPS_DEF 0x129D
963#define L1C_LEGCYPS_DEF 0x36DD
964
965#define MIIDBG_TST100BTCFG 0x36
966#define TST100BTCFG_NORMAL_BW_EN 0x8000
967#define TST100BTCFG_BADLNK_BYPASS 0x4000
968#define TST100BTCFG_SHORTCABL_TH_MASK 0x3FU
969#define TST100BTCFG_SHORTCABL_TH_SHIFT 8
970#define TST100BTCFG_LITCH_EN 0x80
971#define TST100BTCFG_VLT_SW 0x40
972#define TST100BTCFG_LONGCABL_TH_MASK 0x3FU
973#define TST100BTCFG_LONGCABL_TH_SHIFT 0
974#define TST100BTCFG_DEF 0xE12C
975
976#define MIIDBG_VOLT_CTRL 0x3B /* only for l2cb 1 & 2 */
977#define VOLT_CTRL_CABLE1TH_MASK 0x1FFU
978#define VOLT_CTRL_CABLE1TH_SHIFT 7
979#define VOLT_CTRL_AMPCTRL_MASK 3U
980#define VOLT_CTRL_AMPCTRL_SHIFT 5
981#define VOLT_CTRL_SW_BYPASS 0x10
982#define VOLT_CTRL_SWLOWEST 0x8
983#define VOLT_CTRL_DACAMP10_MASK 7U
984#define VOLT_CTRL_DACAMP10_SHIFT 0
985
986#define MIIDBG_CABLE1TH_DET 0x3E
987#define CABLE1TH_DET_EN 0x8000
988
989
990/******* dev 3 *********/
991#define MIIEXT_PCS 3
992
993#define MIIEXT_CLDCTRL3 0x8003
994#define CLDCTRL3_BP_CABLE1TH_DET_GT 0x8000
995#define CLDCTRL3_AZ_DISAMP 0x1000
996#define L2CB_CLDCTRL3 0x4D19
997#define L1D_CLDCTRL3 0xDD19
998
999#define MIIEXT_CLDCTRL6 0x8006
1000#define CLDCTRL6_CAB_LEN_MASK 0x1FFU
1001#define CLDCTRL6_CAB_LEN_SHIFT 0
1002#define CLDCTRL6_CAB_LEN_SHORT 0x50
1003
1004/********* dev 7 **********/
1005#define MIIEXT_ANEG 7
1006
1007#define MIIEXT_LOCAL_EEEADV 0x3C
1008#define LOCAL_EEEADV_1000BT 0x4
1009#define LOCAL_EEEADV_100BT 0x2
1010
1011#define MIIEXT_REMOTE_EEEADV 0x3D
1012#define REMOTE_EEEADV_1000BT 0x4
1013#define REMOTE_EEEADV_100BT 0x2
1014
1015#define MIIEXT_EEE_ANEG 0x8000
1016#define EEE_ANEG_1000M 0x4
1017#define EEE_ANEG_100M 0x2
867 1018
868#endif /*_ATL1C_HW_H_*/ 1019#endif /*_ATL1C_HW_H_*/
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 1ef0c9275dee..9cc15701101b 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -24,14 +24,6 @@
24#define ATL1C_DRV_VERSION "1.0.1.0-NAPI" 24#define ATL1C_DRV_VERSION "1.0.1.0-NAPI"
25char atl1c_driver_name[] = "atl1c"; 25char atl1c_driver_name[] = "atl1c";
26char atl1c_driver_version[] = ATL1C_DRV_VERSION; 26char atl1c_driver_version[] = ATL1C_DRV_VERSION;
27#define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062
28#define PCI_DEVICE_ID_ATTANSIC_L1C 0x1063
29#define PCI_DEVICE_ID_ATHEROS_L2C_B 0x2060 /* AR8152 v1.1 Fast 10/100 */
30#define PCI_DEVICE_ID_ATHEROS_L2C_B2 0x2062 /* AR8152 v2.0 Fast 10/100 */
31#define PCI_DEVICE_ID_ATHEROS_L1D 0x1073 /* AR8151 v1.0 Gigabit 1000 */
32#define PCI_DEVICE_ID_ATHEROS_L1D_2_0 0x1083 /* AR8151 v2.0 Gigabit 1000 */
33#define L2CB_V10 0xc0
34#define L2CB_V11 0xc1
35 27
36/* 28/*
37 * atl1c_pci_tbl - PCI Device ID Table 29 * atl1c_pci_tbl - PCI Device ID Table
@@ -54,70 +46,72 @@ static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = {
54}; 46};
55MODULE_DEVICE_TABLE(pci, atl1c_pci_tbl); 47MODULE_DEVICE_TABLE(pci, atl1c_pci_tbl);
56 48
57MODULE_AUTHOR("Jie Yang <jie.yang@atheros.com>"); 49MODULE_AUTHOR("Jie Yang");
58MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver"); 50MODULE_AUTHOR("Qualcomm Atheros Inc., <nic-devel@qualcomm.com>");
51MODULE_DESCRIPTION("Qualcom Atheros 100/1000M Ethernet Network Driver");
59MODULE_LICENSE("GPL"); 52MODULE_LICENSE("GPL");
60MODULE_VERSION(ATL1C_DRV_VERSION); 53MODULE_VERSION(ATL1C_DRV_VERSION);
61 54
62static int atl1c_stop_mac(struct atl1c_hw *hw); 55static int atl1c_stop_mac(struct atl1c_hw *hw);
63static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw);
64static void atl1c_enable_tx_ctrl(struct atl1c_hw *hw);
65static void atl1c_disable_l0s_l1(struct atl1c_hw *hw); 56static void atl1c_disable_l0s_l1(struct atl1c_hw *hw);
66static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup); 57static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed);
67static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter); 58static void atl1c_start_mac(struct atl1c_adapter *adapter);
68static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que, 59static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter,
69 int *work_done, int work_to_do); 60 int *work_done, int work_to_do);
70static int atl1c_up(struct atl1c_adapter *adapter); 61static int atl1c_up(struct atl1c_adapter *adapter);
71static void atl1c_down(struct atl1c_adapter *adapter); 62static void atl1c_down(struct atl1c_adapter *adapter);
63static int atl1c_reset_mac(struct atl1c_hw *hw);
64static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter);
65static int atl1c_configure(struct atl1c_adapter *adapter);
66static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter);
72 67
73static const u16 atl1c_pay_load_size[] = { 68static const u16 atl1c_pay_load_size[] = {
74 128, 256, 512, 1024, 2048, 4096, 69 128, 256, 512, 1024, 2048, 4096,
75}; 70};
76 71
77static const u16 atl1c_rfd_prod_idx_regs[AT_MAX_RECEIVE_QUEUE] =
78{
79 REG_MB_RFD0_PROD_IDX,
80 REG_MB_RFD1_PROD_IDX,
81 REG_MB_RFD2_PROD_IDX,
82 REG_MB_RFD3_PROD_IDX
83};
84
85static const u16 atl1c_rfd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] =
86{
87 REG_RFD0_HEAD_ADDR_LO,
88 REG_RFD1_HEAD_ADDR_LO,
89 REG_RFD2_HEAD_ADDR_LO,
90 REG_RFD3_HEAD_ADDR_LO
91};
92
93static const u16 atl1c_rrd_addr_lo_regs[AT_MAX_RECEIVE_QUEUE] =
94{
95 REG_RRD0_HEAD_ADDR_LO,
96 REG_RRD1_HEAD_ADDR_LO,
97 REG_RRD2_HEAD_ADDR_LO,
98 REG_RRD3_HEAD_ADDR_LO
99};
100 72
101static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | 73static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
102 NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; 74 NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
103static void atl1c_pcie_patch(struct atl1c_hw *hw) 75static void atl1c_pcie_patch(struct atl1c_hw *hw)
104{ 76{
105 u32 data; 77 u32 mst_data, data;
106 78
107 AT_READ_REG(hw, REG_PCIE_PHYMISC, &data); 79 /* pclk sel could switch to 25M */
108 data |= PCIE_PHYMISC_FORCE_RCV_DET; 80 AT_READ_REG(hw, REG_MASTER_CTRL, &mst_data);
109 AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data); 81 mst_data &= ~MASTER_CTRL_CLK_SEL_DIS;
82 AT_WRITE_REG(hw, REG_MASTER_CTRL, mst_data);
110 83
84 /* WoL/PCIE related settings */
85 if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) {
86 AT_READ_REG(hw, REG_PCIE_PHYMISC, &data);
87 data |= PCIE_PHYMISC_FORCE_RCV_DET;
88 AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data);
89 } else { /* new dev set bit5 of MASTER */
90 if (!(mst_data & MASTER_CTRL_WAKEN_25M))
91 AT_WRITE_REG(hw, REG_MASTER_CTRL,
92 mst_data | MASTER_CTRL_WAKEN_25M);
93 }
94 /* aspm/PCIE setting only for l2cb 1.0 */
111 if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) { 95 if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) {
112 AT_READ_REG(hw, REG_PCIE_PHYMISC2, &data); 96 AT_READ_REG(hw, REG_PCIE_PHYMISC2, &data);
113 97 data = FIELD_SETX(data, PCIE_PHYMISC2_CDR_BW,
114 data &= ~(PCIE_PHYMISC2_SERDES_CDR_MASK << 98 L2CB1_PCIE_PHYMISC2_CDR_BW);
115 PCIE_PHYMISC2_SERDES_CDR_SHIFT); 99 data = FIELD_SETX(data, PCIE_PHYMISC2_L0S_TH,
116 data |= 3 << PCIE_PHYMISC2_SERDES_CDR_SHIFT; 100 L2CB1_PCIE_PHYMISC2_L0S_TH);
117 data &= ~(PCIE_PHYMISC2_SERDES_TH_MASK <<
118 PCIE_PHYMISC2_SERDES_TH_SHIFT);
119 data |= 3 << PCIE_PHYMISC2_SERDES_TH_SHIFT;
120 AT_WRITE_REG(hw, REG_PCIE_PHYMISC2, data); 101 AT_WRITE_REG(hw, REG_PCIE_PHYMISC2, data);
102 /* extend L1 sync timer */
103 AT_READ_REG(hw, REG_LINK_CTRL, &data);
104 data |= LINK_CTRL_EXT_SYNC;
105 AT_WRITE_REG(hw, REG_LINK_CTRL, data);
106 }
107 /* l2cb 1.x & l1d 1.x */
108 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d) {
109 AT_READ_REG(hw, REG_PM_CTRL, &data);
110 data |= PM_CTRL_L0S_BUFSRX_EN;
111 AT_WRITE_REG(hw, REG_PM_CTRL, data);
112 /* clear vendor msg */
113 AT_READ_REG(hw, REG_DMA_DBG, &data);
114 AT_WRITE_REG(hw, REG_DMA_DBG, data & ~DMA_DBG_VENDOR_MSG);
121 } 115 }
122} 116}
123 117
@@ -130,6 +124,7 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
130 u32 data; 124 u32 data;
131 u32 pci_cmd; 125 u32 pci_cmd;
132 struct pci_dev *pdev = hw->adapter->pdev; 126 struct pci_dev *pdev = hw->adapter->pdev;
127 int pos;
133 128
134 AT_READ_REG(hw, PCI_COMMAND, &pci_cmd); 129 AT_READ_REG(hw, PCI_COMMAND, &pci_cmd);
135 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; 130 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
@@ -142,14 +137,23 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
142 */ 137 */
143 pci_enable_wake(pdev, PCI_D3hot, 0); 138 pci_enable_wake(pdev, PCI_D3hot, 0);
144 pci_enable_wake(pdev, PCI_D3cold, 0); 139 pci_enable_wake(pdev, PCI_D3cold, 0);
140 /* wol sts read-clear */
141 AT_READ_REG(hw, REG_WOL_CTRL, &data);
142 AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
145 143
146 /* 144 /*
147 * Mask some pcie error bits 145 * Mask some pcie error bits
148 */ 146 */
149 AT_READ_REG(hw, REG_PCIE_UC_SEVERITY, &data); 147 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
150 data &= ~PCIE_UC_SERVRITY_DLP; 148 pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
151 data &= ~PCIE_UC_SERVRITY_FCP; 149 data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
152 AT_WRITE_REG(hw, REG_PCIE_UC_SEVERITY, data); 150 pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
151 /* clear error status */
152 pci_write_config_word(pdev, pci_pcie_cap(pdev) + PCI_EXP_DEVSTA,
153 PCI_EXP_DEVSTA_NFED |
154 PCI_EXP_DEVSTA_FED |
155 PCI_EXP_DEVSTA_CED |
156 PCI_EXP_DEVSTA_URD);
153 157
154 AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &data); 158 AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &data);
155 data &= ~LTSSM_ID_EN_WRO; 159 data &= ~LTSSM_ID_EN_WRO;
@@ -158,11 +162,6 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
158 atl1c_pcie_patch(hw); 162 atl1c_pcie_patch(hw);
159 if (flag & ATL1C_PCIE_L0S_L1_DISABLE) 163 if (flag & ATL1C_PCIE_L0S_L1_DISABLE)
160 atl1c_disable_l0s_l1(hw); 164 atl1c_disable_l0s_l1(hw);
161 if (flag & ATL1C_PCIE_PHY_RESET)
162 AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT);
163 else
164 AT_WRITE_REG(hw, REG_GPHY_CTRL,
165 GPHY_CTRL_DEFAULT | GPHY_CTRL_EXT_RESET);
166 165
167 msleep(5); 166 msleep(5);
168} 167}
@@ -207,14 +206,14 @@ static inline void atl1c_irq_reset(struct atl1c_adapter *adapter)
207 * atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads 206 * atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads
208 * of the idle status register until the device is actually idle 207 * of the idle status register until the device is actually idle
209 */ 208 */
210static u32 atl1c_wait_until_idle(struct atl1c_hw *hw) 209static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl)
211{ 210{
212 int timeout; 211 int timeout;
213 u32 data; 212 u32 data;
214 213
215 for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) { 214 for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) {
216 AT_READ_REG(hw, REG_IDLE_STATUS, &data); 215 AT_READ_REG(hw, REG_IDLE_STATUS, &data);
217 if ((data & IDLE_STATUS_MASK) == 0) 216 if ((data & modu_ctrl) == 0)
218 return 0; 217 return 0;
219 msleep(1); 218 msleep(1);
220 } 219 }
@@ -261,15 +260,16 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
261 260
262 if ((phy_data & BMSR_LSTATUS) == 0) { 261 if ((phy_data & BMSR_LSTATUS) == 0) {
263 /* link down */ 262 /* link down */
264 hw->hibernate = true;
265 if (atl1c_stop_mac(hw) != 0)
266 if (netif_msg_hw(adapter))
267 dev_warn(&pdev->dev, "stop mac failed\n");
268 atl1c_set_aspm(hw, false);
269 netif_carrier_off(netdev); 263 netif_carrier_off(netdev);
270 netif_stop_queue(netdev); 264 netif_stop_queue(netdev);
271 atl1c_phy_reset(hw); 265 hw->hibernate = true;
272 atl1c_phy_init(&adapter->hw); 266 if (atl1c_reset_mac(hw) != 0)
267 if (netif_msg_hw(adapter))
268 dev_warn(&pdev->dev, "reset mac failed\n");
269 atl1c_set_aspm(hw, SPEED_0);
270 atl1c_post_phy_linkchg(hw, SPEED_0);
271 atl1c_reset_dma_ring(adapter);
272 atl1c_configure(adapter);
273 } else { 273 } else {
274 /* Link Up */ 274 /* Link Up */
275 hw->hibernate = false; 275 hw->hibernate = false;
@@ -283,10 +283,9 @@ static void atl1c_check_link_status(struct atl1c_adapter *adapter)
283 adapter->link_duplex != duplex) { 283 adapter->link_duplex != duplex) {
284 adapter->link_speed = speed; 284 adapter->link_speed = speed;
285 adapter->link_duplex = duplex; 285 adapter->link_duplex = duplex;
286 atl1c_set_aspm(hw, true); 286 atl1c_set_aspm(hw, speed);
287 atl1c_enable_tx_ctrl(hw); 287 atl1c_post_phy_linkchg(hw, speed);
288 atl1c_enable_rx_ctrl(hw); 288 atl1c_start_mac(adapter);
289 atl1c_setup_mac_ctrl(adapter);
290 if (netif_msg_link(adapter)) 289 if (netif_msg_link(adapter))
291 dev_info(&pdev->dev, 290 dev_info(&pdev->dev,
292 "%s: %s NIC Link is Up<%d Mbps %s>\n", 291 "%s: %s NIC Link is Up<%d Mbps %s>\n",
@@ -337,6 +336,9 @@ static void atl1c_common_task(struct work_struct *work)
337 adapter = container_of(work, struct atl1c_adapter, common_task); 336 adapter = container_of(work, struct atl1c_adapter, common_task);
338 netdev = adapter->netdev; 337 netdev = adapter->netdev;
339 338
339 if (test_bit(__AT_DOWN, &adapter->flags))
340 return;
341
340 if (test_and_clear_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event)) { 342 if (test_and_clear_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event)) {
341 netif_device_detach(netdev); 343 netif_device_detach(netdev);
342 atl1c_down(adapter); 344 atl1c_down(adapter);
@@ -345,8 +347,11 @@ static void atl1c_common_task(struct work_struct *work)
345 } 347 }
346 348
347 if (test_and_clear_bit(ATL1C_WORK_EVENT_LINK_CHANGE, 349 if (test_and_clear_bit(ATL1C_WORK_EVENT_LINK_CHANGE,
348 &adapter->work_event)) 350 &adapter->work_event)) {
351 atl1c_irq_disable(adapter);
349 atl1c_check_link_status(adapter); 352 atl1c_check_link_status(adapter);
353 atl1c_irq_enable(adapter);
354 }
350} 355}
351 356
352 357
@@ -470,7 +475,7 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
470 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); 475 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
471 netdev->addr_assign_type &= ~NET_ADDR_RANDOM; 476 netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
472 477
473 atl1c_hw_set_mac_addr(&adapter->hw); 478 atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr);
474 479
475 return 0; 480 return 0;
476} 481}
@@ -523,11 +528,16 @@ static int atl1c_set_features(struct net_device *netdev,
523static int atl1c_change_mtu(struct net_device *netdev, int new_mtu) 528static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
524{ 529{
525 struct atl1c_adapter *adapter = netdev_priv(netdev); 530 struct atl1c_adapter *adapter = netdev_priv(netdev);
531 struct atl1c_hw *hw = &adapter->hw;
526 int old_mtu = netdev->mtu; 532 int old_mtu = netdev->mtu;
527 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 533 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
528 534
529 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || 535 /* Fast Ethernet controller doesn't support jumbo packet */
530 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 536 if (((hw->nic_type == athr_l2c ||
537 hw->nic_type == athr_l2c_b ||
538 hw->nic_type == athr_l2c_b2) && new_mtu > ETH_DATA_LEN) ||
539 max_frame < ETH_ZLEN + ETH_FCS_LEN ||
540 max_frame > MAX_JUMBO_FRAME_SIZE) {
531 if (netif_msg_link(adapter)) 541 if (netif_msg_link(adapter))
532 dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); 542 dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
533 return -EINVAL; 543 return -EINVAL;
@@ -543,14 +553,6 @@ static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
543 netdev_update_features(netdev); 553 netdev_update_features(netdev);
544 atl1c_up(adapter); 554 atl1c_up(adapter);
545 clear_bit(__AT_RESETTING, &adapter->flags); 555 clear_bit(__AT_RESETTING, &adapter->flags);
546 if (adapter->hw.ctrl_flags & ATL1C_FPGA_VERSION) {
547 u32 phy_data;
548
549 AT_READ_REG(&adapter->hw, 0x1414, &phy_data);
550 phy_data |= 0x10000000;
551 AT_WRITE_REG(&adapter->hw, 0x1414, phy_data);
552 }
553
554 } 556 }
555 return 0; 557 return 0;
556} 558}
@@ -563,7 +565,7 @@ static int atl1c_mdio_read(struct net_device *netdev, int phy_id, int reg_num)
563 struct atl1c_adapter *adapter = netdev_priv(netdev); 565 struct atl1c_adapter *adapter = netdev_priv(netdev);
564 u16 result; 566 u16 result;
565 567
566 atl1c_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result); 568 atl1c_read_phy_reg(&adapter->hw, reg_num, &result);
567 return result; 569 return result;
568} 570}
569 571
@@ -572,7 +574,7 @@ static void atl1c_mdio_write(struct net_device *netdev, int phy_id,
572{ 574{
573 struct atl1c_adapter *adapter = netdev_priv(netdev); 575 struct atl1c_adapter *adapter = netdev_priv(netdev);
574 576
575 atl1c_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val); 577 atl1c_write_phy_reg(&adapter->hw, reg_num, val);
576} 578}
577 579
578/* 580/*
@@ -687,21 +689,15 @@ static void atl1c_set_mac_type(struct atl1c_hw *hw)
687 689
688static int atl1c_setup_mac_funcs(struct atl1c_hw *hw) 690static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
689{ 691{
690 u32 phy_status_data;
691 u32 link_ctrl_data; 692 u32 link_ctrl_data;
692 693
693 atl1c_set_mac_type(hw); 694 atl1c_set_mac_type(hw);
694 AT_READ_REG(hw, REG_PHY_STATUS, &phy_status_data);
695 AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data); 695 AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
696 696
697 hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE | 697 hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE |
698 ATL1C_TXQ_MODE_ENHANCE; 698 ATL1C_TXQ_MODE_ENHANCE;
699 if (link_ctrl_data & LINK_CTRL_L0S_EN) 699 hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT |
700 hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT; 700 ATL1C_ASPM_L1_SUPPORT;
701 if (link_ctrl_data & LINK_CTRL_L1_EN)
702 hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT;
703 if (link_ctrl_data & LINK_CTRL_EXT_SYNC)
704 hw->ctrl_flags |= ATL1C_LINK_EXT_SYNC;
705 hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON; 701 hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON;
706 702
707 if (hw->nic_type == athr_l1c || 703 if (hw->nic_type == athr_l1c ||
@@ -710,6 +706,55 @@ static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
710 hw->link_cap_flags |= ATL1C_LINK_CAP_1000M; 706 hw->link_cap_flags |= ATL1C_LINK_CAP_1000M;
711 return 0; 707 return 0;
712} 708}
709
710struct atl1c_platform_patch {
711 u16 pci_did;
712 u8 pci_revid;
713 u16 subsystem_vid;
714 u16 subsystem_did;
715 u32 patch_flag;
716#define ATL1C_LINK_PATCH 0x1
717};
718static const struct atl1c_platform_patch plats[] __devinitdata = {
719{0x2060, 0xC1, 0x1019, 0x8152, 0x1},
720{0x2060, 0xC1, 0x1019, 0x2060, 0x1},
721{0x2060, 0xC1, 0x1019, 0xE000, 0x1},
722{0x2062, 0xC0, 0x1019, 0x8152, 0x1},
723{0x2062, 0xC0, 0x1019, 0x2062, 0x1},
724{0x2062, 0xC0, 0x1458, 0xE000, 0x1},
725{0x2062, 0xC1, 0x1019, 0x8152, 0x1},
726{0x2062, 0xC1, 0x1019, 0x2062, 0x1},
727{0x2062, 0xC1, 0x1458, 0xE000, 0x1},
728{0x2062, 0xC1, 0x1565, 0x2802, 0x1},
729{0x2062, 0xC1, 0x1565, 0x2801, 0x1},
730{0x1073, 0xC0, 0x1019, 0x8151, 0x1},
731{0x1073, 0xC0, 0x1019, 0x1073, 0x1},
732{0x1073, 0xC0, 0x1458, 0xE000, 0x1},
733{0x1083, 0xC0, 0x1458, 0xE000, 0x1},
734{0x1083, 0xC0, 0x1019, 0x8151, 0x1},
735{0x1083, 0xC0, 0x1019, 0x1083, 0x1},
736{0x1083, 0xC0, 0x1462, 0x7680, 0x1},
737{0x1083, 0xC0, 0x1565, 0x2803, 0x1},
738{0},
739};
740
741static void __devinit atl1c_patch_assign(struct atl1c_hw *hw)
742{
743 int i = 0;
744
745 hw->msi_lnkpatch = false;
746
747 while (plats[i].pci_did != 0) {
748 if (plats[i].pci_did == hw->device_id &&
749 plats[i].pci_revid == hw->revision_id &&
750 plats[i].subsystem_vid == hw->subsystem_vendor_id &&
751 plats[i].subsystem_did == hw->subsystem_id) {
752 if (plats[i].patch_flag & ATL1C_LINK_PATCH)
753 hw->msi_lnkpatch = true;
754 }
755 i++;
756 }
757}
713/* 758/*
714 * atl1c_sw_init - Initialize general software structures (struct atl1c_adapter) 759 * atl1c_sw_init - Initialize general software structures (struct atl1c_adapter)
715 * @adapter: board private structure to initialize 760 * @adapter: board private structure to initialize
@@ -729,9 +774,8 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
729 device_set_wakeup_enable(&pdev->dev, false); 774 device_set_wakeup_enable(&pdev->dev, false);
730 adapter->link_speed = SPEED_0; 775 adapter->link_speed = SPEED_0;
731 adapter->link_duplex = FULL_DUPLEX; 776 adapter->link_duplex = FULL_DUPLEX;
732 adapter->num_rx_queues = AT_DEF_RECEIVE_QUEUE;
733 adapter->tpd_ring[0].count = 1024; 777 adapter->tpd_ring[0].count = 1024;
734 adapter->rfd_ring[0].count = 512; 778 adapter->rfd_ring.count = 512;
735 779
736 hw->vendor_id = pdev->vendor; 780 hw->vendor_id = pdev->vendor;
737 hw->device_id = pdev->device; 781 hw->device_id = pdev->device;
@@ -746,26 +790,18 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
746 dev_err(&pdev->dev, "set mac function pointers failed\n"); 790 dev_err(&pdev->dev, "set mac function pointers failed\n");
747 return -1; 791 return -1;
748 } 792 }
793 atl1c_patch_assign(hw);
794
749 hw->intr_mask = IMR_NORMAL_MASK; 795 hw->intr_mask = IMR_NORMAL_MASK;
750 hw->phy_configured = false; 796 hw->phy_configured = false;
751 hw->preamble_len = 7; 797 hw->preamble_len = 7;
752 hw->max_frame_size = adapter->netdev->mtu; 798 hw->max_frame_size = adapter->netdev->mtu;
753 if (adapter->num_rx_queues < 2) {
754 hw->rss_type = atl1c_rss_disable;
755 hw->rss_mode = atl1c_rss_mode_disable;
756 } else {
757 hw->rss_type = atl1c_rss_ipv4;
758 hw->rss_mode = atl1c_rss_mul_que_mul_int;
759 hw->rss_hash_bits = 16;
760 }
761 hw->autoneg_advertised = ADVERTISED_Autoneg; 799 hw->autoneg_advertised = ADVERTISED_Autoneg;
762 hw->indirect_tab = 0xE4E4E4E4; 800 hw->indirect_tab = 0xE4E4E4E4;
763 hw->base_cpu = 0; 801 hw->base_cpu = 0;
764 802
765 hw->ict = 50000; /* 100ms */ 803 hw->ict = 50000; /* 100ms */
766 hw->smb_timer = 200000; /* 400ms */ 804 hw->smb_timer = 200000; /* 400ms */
767 hw->cmb_tpd = 4;
768 hw->cmb_tx_timer = 1; /* 2 us */
769 hw->rx_imt = 200; 805 hw->rx_imt = 200;
770 hw->tx_imt = 1000; 806 hw->tx_imt = 1000;
771 807
@@ -773,9 +809,6 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
773 hw->rfd_burst = 8; 809 hw->rfd_burst = 8;
774 hw->dma_order = atl1c_dma_ord_out; 810 hw->dma_order = atl1c_dma_ord_out;
775 hw->dmar_block = atl1c_dma_req_1024; 811 hw->dmar_block = atl1c_dma_req_1024;
776 hw->dmaw_block = atl1c_dma_req_1024;
777 hw->dmar_dly_cnt = 15;
778 hw->dmaw_dly_cnt = 4;
779 812
780 if (atl1c_alloc_queues(adapter)) { 813 if (atl1c_alloc_queues(adapter)) {
781 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 814 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
@@ -851,24 +884,22 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
851 */ 884 */
852static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter) 885static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
853{ 886{
854 struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring; 887 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
855 struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring; 888 struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
856 struct atl1c_buffer *buffer_info; 889 struct atl1c_buffer *buffer_info;
857 struct pci_dev *pdev = adapter->pdev; 890 struct pci_dev *pdev = adapter->pdev;
858 int i, j; 891 int j;
859 892
860 for (i = 0; i < adapter->num_rx_queues; i++) { 893 for (j = 0; j < rfd_ring->count; j++) {
861 for (j = 0; j < rfd_ring[i].count; j++) { 894 buffer_info = &rfd_ring->buffer_info[j];
862 buffer_info = &rfd_ring[i].buffer_info[j]; 895 atl1c_clean_buffer(pdev, buffer_info, 0);
863 atl1c_clean_buffer(pdev, buffer_info, 0);
864 }
865 /* zero out the descriptor ring */
866 memset(rfd_ring[i].desc, 0, rfd_ring[i].size);
867 rfd_ring[i].next_to_clean = 0;
868 rfd_ring[i].next_to_use = 0;
869 rrd_ring[i].next_to_use = 0;
870 rrd_ring[i].next_to_clean = 0;
871 } 896 }
897 /* zero out the descriptor ring */
898 memset(rfd_ring->desc, 0, rfd_ring->size);
899 rfd_ring->next_to_clean = 0;
900 rfd_ring->next_to_use = 0;
901 rrd_ring->next_to_use = 0;
902 rrd_ring->next_to_clean = 0;
872} 903}
873 904
874/* 905/*
@@ -877,8 +908,8 @@ static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
877static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter) 908static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
878{ 909{
879 struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; 910 struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
880 struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring; 911 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
881 struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring; 912 struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
882 struct atl1c_buffer *buffer_info; 913 struct atl1c_buffer *buffer_info;
883 int i, j; 914 int i, j;
884 915
@@ -890,15 +921,13 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
890 ATL1C_SET_BUFFER_STATE(&buffer_info[i], 921 ATL1C_SET_BUFFER_STATE(&buffer_info[i],
891 ATL1C_BUFFER_FREE); 922 ATL1C_BUFFER_FREE);
892 } 923 }
893 for (i = 0; i < adapter->num_rx_queues; i++) { 924 rfd_ring->next_to_use = 0;
894 rfd_ring[i].next_to_use = 0; 925 rfd_ring->next_to_clean = 0;
895 rfd_ring[i].next_to_clean = 0; 926 rrd_ring->next_to_use = 0;
896 rrd_ring[i].next_to_use = 0; 927 rrd_ring->next_to_clean = 0;
897 rrd_ring[i].next_to_clean = 0; 928 for (j = 0; j < rfd_ring->count; j++) {
898 for (j = 0; j < rfd_ring[i].count; j++) { 929 buffer_info = &rfd_ring->buffer_info[j];
899 buffer_info = &rfd_ring[i].buffer_info[j]; 930 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
900 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
901 }
902 } 931 }
903} 932}
904 933
@@ -935,27 +964,23 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
935{ 964{
936 struct pci_dev *pdev = adapter->pdev; 965 struct pci_dev *pdev = adapter->pdev;
937 struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; 966 struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring;
938 struct atl1c_rfd_ring *rfd_ring = adapter->rfd_ring; 967 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
939 struct atl1c_rrd_ring *rrd_ring = adapter->rrd_ring; 968 struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
940 struct atl1c_ring_header *ring_header = &adapter->ring_header; 969 struct atl1c_ring_header *ring_header = &adapter->ring_header;
941 int num_rx_queues = adapter->num_rx_queues;
942 int size; 970 int size;
943 int i; 971 int i;
944 int count = 0; 972 int count = 0;
945 int rx_desc_count = 0; 973 int rx_desc_count = 0;
946 u32 offset = 0; 974 u32 offset = 0;
947 975
948 rrd_ring[0].count = rfd_ring[0].count; 976 rrd_ring->count = rfd_ring->count;
949 for (i = 1; i < AT_MAX_TRANSMIT_QUEUE; i++) 977 for (i = 1; i < AT_MAX_TRANSMIT_QUEUE; i++)
950 tpd_ring[i].count = tpd_ring[0].count; 978 tpd_ring[i].count = tpd_ring[0].count;
951 979
952 for (i = 1; i < adapter->num_rx_queues; i++)
953 rfd_ring[i].count = rrd_ring[i].count = rfd_ring[0].count;
954
955 /* 2 tpd queue, one high priority queue, 980 /* 2 tpd queue, one high priority queue,
956 * another normal priority queue */ 981 * another normal priority queue */
957 size = sizeof(struct atl1c_buffer) * (tpd_ring->count * 2 + 982 size = sizeof(struct atl1c_buffer) * (tpd_ring->count * 2 +
958 rfd_ring->count * num_rx_queues); 983 rfd_ring->count);
959 tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); 984 tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
960 if (unlikely(!tpd_ring->buffer_info)) { 985 if (unlikely(!tpd_ring->buffer_info)) {
961 dev_err(&pdev->dev, "kzalloc failed, size = %d\n", 986 dev_err(&pdev->dev, "kzalloc failed, size = %d\n",
@@ -968,12 +993,11 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
968 count += tpd_ring[i].count; 993 count += tpd_ring[i].count;
969 } 994 }
970 995
971 for (i = 0; i < num_rx_queues; i++) { 996 rfd_ring->buffer_info =
972 rfd_ring[i].buffer_info = 997 (struct atl1c_buffer *) (tpd_ring->buffer_info + count);
973 (struct atl1c_buffer *) (tpd_ring->buffer_info + count); 998 count += rfd_ring->count;
974 count += rfd_ring[i].count; 999 rx_desc_count += rfd_ring->count;
975 rx_desc_count += rfd_ring[i].count; 1000
976 }
977 /* 1001 /*
978 * real ring DMA buffer 1002 * real ring DMA buffer
979 * each ring/block may need up to 8 bytes for alignment, hence the 1003 * each ring/block may need up to 8 bytes for alignment, hence the
@@ -983,8 +1007,7 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
983 sizeof(struct atl1c_tpd_desc) * tpd_ring->count * 2 + 1007 sizeof(struct atl1c_tpd_desc) * tpd_ring->count * 2 +
984 sizeof(struct atl1c_rx_free_desc) * rx_desc_count + 1008 sizeof(struct atl1c_rx_free_desc) * rx_desc_count +
985 sizeof(struct atl1c_recv_ret_status) * rx_desc_count + 1009 sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
986 sizeof(struct atl1c_hw_stats) + 1010 8 * 4;
987 8 * 4 + 8 * 2 * num_rx_queues;
988 1011
989 ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, 1012 ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
990 &ring_header->dma); 1013 &ring_header->dma);
@@ -1005,25 +1028,18 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
1005 offset += roundup(tpd_ring[i].size, 8); 1028 offset += roundup(tpd_ring[i].size, 8);
1006 } 1029 }
1007 /* init RFD ring */ 1030 /* init RFD ring */
1008 for (i = 0; i < num_rx_queues; i++) { 1031 rfd_ring->dma = ring_header->dma + offset;
1009 rfd_ring[i].dma = ring_header->dma + offset; 1032 rfd_ring->desc = (u8 *) ring_header->desc + offset;
1010 rfd_ring[i].desc = (u8 *) ring_header->desc + offset; 1033 rfd_ring->size = sizeof(struct atl1c_rx_free_desc) * rfd_ring->count;
1011 rfd_ring[i].size = sizeof(struct atl1c_rx_free_desc) * 1034 offset += roundup(rfd_ring->size, 8);
1012 rfd_ring[i].count;
1013 offset += roundup(rfd_ring[i].size, 8);
1014 }
1015 1035
1016 /* init RRD ring */ 1036 /* init RRD ring */
1017 for (i = 0; i < num_rx_queues; i++) { 1037 rrd_ring->dma = ring_header->dma + offset;
1018 rrd_ring[i].dma = ring_header->dma + offset; 1038 rrd_ring->desc = (u8 *) ring_header->desc + offset;
1019 rrd_ring[i].desc = (u8 *) ring_header->desc + offset; 1039 rrd_ring->size = sizeof(struct atl1c_recv_ret_status) *
1020 rrd_ring[i].size = sizeof(struct atl1c_recv_ret_status) * 1040 rrd_ring->count;
1021 rrd_ring[i].count; 1041 offset += roundup(rrd_ring->size, 8);
1022 offset += roundup(rrd_ring[i].size, 8);
1023 }
1024 1042
1025 adapter->smb.dma = ring_header->dma + offset;
1026 adapter->smb.smb = (u8 *)ring_header->desc + offset;
1027 return 0; 1043 return 0;
1028 1044
1029err_nomem: 1045err_nomem:
@@ -1034,26 +1050,20 @@ err_nomem:
1034static void atl1c_configure_des_ring(struct atl1c_adapter *adapter) 1050static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
1035{ 1051{
1036 struct atl1c_hw *hw = &adapter->hw; 1052 struct atl1c_hw *hw = &adapter->hw;
1037 struct atl1c_rfd_ring *rfd_ring = (struct atl1c_rfd_ring *) 1053 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
1038 adapter->rfd_ring; 1054 struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
1039 struct atl1c_rrd_ring *rrd_ring = (struct atl1c_rrd_ring *)
1040 adapter->rrd_ring;
1041 struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *) 1055 struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *)
1042 adapter->tpd_ring; 1056 adapter->tpd_ring;
1043 struct atl1c_cmb *cmb = (struct atl1c_cmb *) &adapter->cmb;
1044 struct atl1c_smb *smb = (struct atl1c_smb *) &adapter->smb;
1045 int i;
1046 u32 data;
1047 1057
1048 /* TPD */ 1058 /* TPD */
1049 AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI, 1059 AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI,
1050 (u32)((tpd_ring[atl1c_trans_normal].dma & 1060 (u32)((tpd_ring[atl1c_trans_normal].dma &
1051 AT_DMA_HI_ADDR_MASK) >> 32)); 1061 AT_DMA_HI_ADDR_MASK) >> 32));
1052 /* just enable normal priority TX queue */ 1062 /* just enable normal priority TX queue */
1053 AT_WRITE_REG(hw, REG_NTPD_HEAD_ADDR_LO, 1063 AT_WRITE_REG(hw, REG_TPD_PRI0_ADDR_LO,
1054 (u32)(tpd_ring[atl1c_trans_normal].dma & 1064 (u32)(tpd_ring[atl1c_trans_normal].dma &
1055 AT_DMA_LO_ADDR_MASK)); 1065 AT_DMA_LO_ADDR_MASK));
1056 AT_WRITE_REG(hw, REG_HTPD_HEAD_ADDR_LO, 1066 AT_WRITE_REG(hw, REG_TPD_PRI1_ADDR_LO,
1057 (u32)(tpd_ring[atl1c_trans_high].dma & 1067 (u32)(tpd_ring[atl1c_trans_high].dma &
1058 AT_DMA_LO_ADDR_MASK)); 1068 AT_DMA_LO_ADDR_MASK));
1059 AT_WRITE_REG(hw, REG_TPD_RING_SIZE, 1069 AT_WRITE_REG(hw, REG_TPD_RING_SIZE,
@@ -1062,31 +1072,21 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
1062 1072
1063 /* RFD */ 1073 /* RFD */
1064 AT_WRITE_REG(hw, REG_RX_BASE_ADDR_HI, 1074 AT_WRITE_REG(hw, REG_RX_BASE_ADDR_HI,
1065 (u32)((rfd_ring[0].dma & AT_DMA_HI_ADDR_MASK) >> 32)); 1075 (u32)((rfd_ring->dma & AT_DMA_HI_ADDR_MASK) >> 32));
1066 for (i = 0; i < adapter->num_rx_queues; i++) 1076 AT_WRITE_REG(hw, REG_RFD0_HEAD_ADDR_LO,
1067 AT_WRITE_REG(hw, atl1c_rfd_addr_lo_regs[i], 1077 (u32)(rfd_ring->dma & AT_DMA_LO_ADDR_MASK));
1068 (u32)(rfd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
1069 1078
1070 AT_WRITE_REG(hw, REG_RFD_RING_SIZE, 1079 AT_WRITE_REG(hw, REG_RFD_RING_SIZE,
1071 rfd_ring[0].count & RFD_RING_SIZE_MASK); 1080 rfd_ring->count & RFD_RING_SIZE_MASK);
1072 AT_WRITE_REG(hw, REG_RX_BUF_SIZE, 1081 AT_WRITE_REG(hw, REG_RX_BUF_SIZE,
1073 adapter->rx_buffer_len & RX_BUF_SIZE_MASK); 1082 adapter->rx_buffer_len & RX_BUF_SIZE_MASK);
1074 1083
1075 /* RRD */ 1084 /* RRD */
1076 for (i = 0; i < adapter->num_rx_queues; i++) 1085 AT_WRITE_REG(hw, REG_RRD0_HEAD_ADDR_LO,
1077 AT_WRITE_REG(hw, atl1c_rrd_addr_lo_regs[i], 1086 (u32)(rrd_ring->dma & AT_DMA_LO_ADDR_MASK));
1078 (u32)(rrd_ring[i].dma & AT_DMA_LO_ADDR_MASK));
1079 AT_WRITE_REG(hw, REG_RRD_RING_SIZE, 1087 AT_WRITE_REG(hw, REG_RRD_RING_SIZE,
1080 (rrd_ring[0].count & RRD_RING_SIZE_MASK)); 1088 (rrd_ring->count & RRD_RING_SIZE_MASK));
1081 1089
1082 /* CMB */
1083 AT_WRITE_REG(hw, REG_CMB_BASE_ADDR_LO, cmb->dma & AT_DMA_LO_ADDR_MASK);
1084
1085 /* SMB */
1086 AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_HI,
1087 (u32)((smb->dma & AT_DMA_HI_ADDR_MASK) >> 32));
1088 AT_WRITE_REG(hw, REG_SMB_BASE_ADDR_LO,
1089 (u32)(smb->dma & AT_DMA_LO_ADDR_MASK));
1090 if (hw->nic_type == athr_l2c_b) { 1090 if (hw->nic_type == athr_l2c_b) {
1091 AT_WRITE_REG(hw, REG_SRAM_RXF_LEN, 0x02a0L); 1091 AT_WRITE_REG(hw, REG_SRAM_RXF_LEN, 0x02a0L);
1092 AT_WRITE_REG(hw, REG_SRAM_TXF_LEN, 0x0100L); 1092 AT_WRITE_REG(hw, REG_SRAM_TXF_LEN, 0x0100L);
@@ -1097,13 +1097,6 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
1097 AT_WRITE_REG(hw, REG_TXF_WATER_MARK, 0); /* TX watermark, to enter l1 state.*/ 1097 AT_WRITE_REG(hw, REG_TXF_WATER_MARK, 0); /* TX watermark, to enter l1 state.*/
1098 AT_WRITE_REG(hw, REG_RXD_DMA_CTRL, 0); /* RXD threshold.*/ 1098 AT_WRITE_REG(hw, REG_RXD_DMA_CTRL, 0); /* RXD threshold.*/
1099 } 1099 }
1100 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d_2) {
1101 /* Power Saving for L2c_B */
1102 AT_READ_REG(hw, REG_SERDES_LOCK, &data);
1103 data |= SERDES_MAC_CLK_SLOWDOWN;
1104 data |= SERDES_PYH_CLK_SLOWDOWN;
1105 AT_WRITE_REG(hw, REG_SERDES_LOCK, data);
1106 }
1107 /* Load all of base address above */ 1100 /* Load all of base address above */
1108 AT_WRITE_REG(hw, REG_LOAD_PTR, 1); 1101 AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
1109} 1102}
@@ -1111,32 +1104,26 @@ static void atl1c_configure_des_ring(struct atl1c_adapter *adapter)
1111static void atl1c_configure_tx(struct atl1c_adapter *adapter) 1104static void atl1c_configure_tx(struct atl1c_adapter *adapter)
1112{ 1105{
1113 struct atl1c_hw *hw = &adapter->hw; 1106 struct atl1c_hw *hw = &adapter->hw;
1114 u32 dev_ctrl_data; 1107 int max_pay_load;
1115 u32 max_pay_load;
1116 u16 tx_offload_thresh; 1108 u16 tx_offload_thresh;
1117 u32 txq_ctrl_data; 1109 u32 txq_ctrl_data;
1118 u32 max_pay_load_data;
1119 1110
1120 tx_offload_thresh = MAX_TX_OFFLOAD_THRESH; 1111 tx_offload_thresh = MAX_TSO_FRAME_SIZE;
1121 AT_WRITE_REG(hw, REG_TX_TSO_OFFLOAD_THRESH, 1112 AT_WRITE_REG(hw, REG_TX_TSO_OFFLOAD_THRESH,
1122 (tx_offload_thresh >> 3) & TX_TSO_OFFLOAD_THRESH_MASK); 1113 (tx_offload_thresh >> 3) & TX_TSO_OFFLOAD_THRESH_MASK);
1123 AT_READ_REG(hw, REG_DEVICE_CTRL, &dev_ctrl_data); 1114 max_pay_load = pcie_get_readrq(adapter->pdev) >> 8;
1124 max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT) &
1125 DEVICE_CTRL_MAX_PAYLOAD_MASK;
1126 hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block);
1127 max_pay_load = (dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT) &
1128 DEVICE_CTRL_MAX_RREQ_SZ_MASK;
1129 hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block); 1115 hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block);
1130 1116 /*
1131 txq_ctrl_data = (hw->tpd_burst & TXQ_NUM_TPD_BURST_MASK) << 1117 * if BIOS had changed the dam-read-max-length to an invalid value,
1132 TXQ_NUM_TPD_BURST_SHIFT; 1118 * restore it to default value
1133 if (hw->ctrl_flags & ATL1C_TXQ_MODE_ENHANCE) 1119 */
1134 txq_ctrl_data |= TXQ_CTRL_ENH_MODE; 1120 if (hw->dmar_block < DEVICE_CTRL_MAXRRS_MIN) {
1135 max_pay_load_data = (atl1c_pay_load_size[hw->dmar_block] & 1121 pcie_set_readrq(adapter->pdev, 128 << DEVICE_CTRL_MAXRRS_MIN);
1136 TXQ_TXF_BURST_NUM_MASK) << TXQ_TXF_BURST_NUM_SHIFT; 1122 hw->dmar_block = DEVICE_CTRL_MAXRRS_MIN;
1137 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) 1123 }
1138 max_pay_load_data >>= 1; 1124 txq_ctrl_data =
1139 txq_ctrl_data |= max_pay_load_data; 1125 hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2 ?
1126 L2CB_TXQ_CFGV : L1C_TXQ_CFGV;
1140 1127
1141 AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data); 1128 AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data);
1142} 1129}
@@ -1151,34 +1138,13 @@ static void atl1c_configure_rx(struct atl1c_adapter *adapter)
1151 1138
1152 if (hw->ctrl_flags & ATL1C_RX_IPV6_CHKSUM) 1139 if (hw->ctrl_flags & ATL1C_RX_IPV6_CHKSUM)
1153 rxq_ctrl_data |= IPV6_CHKSUM_CTRL_EN; 1140 rxq_ctrl_data |= IPV6_CHKSUM_CTRL_EN;
1154 if (hw->rss_type == atl1c_rss_ipv4)
1155 rxq_ctrl_data |= RSS_HASH_IPV4;
1156 if (hw->rss_type == atl1c_rss_ipv4_tcp)
1157 rxq_ctrl_data |= RSS_HASH_IPV4_TCP;
1158 if (hw->rss_type == atl1c_rss_ipv6)
1159 rxq_ctrl_data |= RSS_HASH_IPV6;
1160 if (hw->rss_type == atl1c_rss_ipv6_tcp)
1161 rxq_ctrl_data |= RSS_HASH_IPV6_TCP;
1162 if (hw->rss_type != atl1c_rss_disable)
1163 rxq_ctrl_data |= RRS_HASH_CTRL_EN;
1164
1165 rxq_ctrl_data |= (hw->rss_mode & RSS_MODE_MASK) <<
1166 RSS_MODE_SHIFT;
1167 rxq_ctrl_data |= (hw->rss_hash_bits & RSS_HASH_BITS_MASK) <<
1168 RSS_HASH_BITS_SHIFT;
1169 if (hw->ctrl_flags & ATL1C_ASPM_CTRL_MON)
1170 rxq_ctrl_data |= (ASPM_THRUPUT_LIMIT_1M &
1171 ASPM_THRUPUT_LIMIT_MASK) << ASPM_THRUPUT_LIMIT_SHIFT;
1172 1141
1173 AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data); 1142 /* aspm for gigabit */
1174} 1143 if (hw->nic_type != athr_l1d_2 && (hw->device_id & 1) != 0)
1175 1144 rxq_ctrl_data = FIELD_SETX(rxq_ctrl_data, ASPM_THRUPUT_LIMIT,
1176static void atl1c_configure_rss(struct atl1c_adapter *adapter) 1145 ASPM_THRUPUT_LIMIT_100M);
1177{
1178 struct atl1c_hw *hw = &adapter->hw;
1179 1146
1180 AT_WRITE_REG(hw, REG_IDT_TABLE, hw->indirect_tab); 1147 AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
1181 AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, hw->base_cpu);
1182} 1148}
1183 1149
1184static void atl1c_configure_dma(struct atl1c_adapter *adapter) 1150static void atl1c_configure_dma(struct atl1c_adapter *adapter)
@@ -1186,36 +1152,11 @@ static void atl1c_configure_dma(struct atl1c_adapter *adapter)
1186 struct atl1c_hw *hw = &adapter->hw; 1152 struct atl1c_hw *hw = &adapter->hw;
1187 u32 dma_ctrl_data; 1153 u32 dma_ctrl_data;
1188 1154
1189 dma_ctrl_data = DMA_CTRL_DMAR_REQ_PRI; 1155 dma_ctrl_data = FIELDX(DMA_CTRL_RORDER_MODE, DMA_CTRL_RORDER_MODE_OUT) |
1190 if (hw->ctrl_flags & ATL1C_CMB_ENABLE) 1156 DMA_CTRL_RREQ_PRI_DATA |
1191 dma_ctrl_data |= DMA_CTRL_CMB_EN; 1157 FIELDX(DMA_CTRL_RREQ_BLEN, hw->dmar_block) |
1192 if (hw->ctrl_flags & ATL1C_SMB_ENABLE) 1158 FIELDX(DMA_CTRL_WDLY_CNT, DMA_CTRL_WDLY_CNT_DEF) |
1193 dma_ctrl_data |= DMA_CTRL_SMB_EN; 1159 FIELDX(DMA_CTRL_RDLY_CNT, DMA_CTRL_RDLY_CNT_DEF);
1194 else
1195 dma_ctrl_data |= MAC_CTRL_SMB_DIS;
1196
1197 switch (hw->dma_order) {
1198 case atl1c_dma_ord_in:
1199 dma_ctrl_data |= DMA_CTRL_DMAR_IN_ORDER;
1200 break;
1201 case atl1c_dma_ord_enh:
1202 dma_ctrl_data |= DMA_CTRL_DMAR_ENH_ORDER;
1203 break;
1204 case atl1c_dma_ord_out:
1205 dma_ctrl_data |= DMA_CTRL_DMAR_OUT_ORDER;
1206 break;
1207 default:
1208 break;
1209 }
1210
1211 dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK)
1212 << DMA_CTRL_DMAR_BURST_LEN_SHIFT;
1213 dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK)
1214 << DMA_CTRL_DMAW_BURST_LEN_SHIFT;
1215 dma_ctrl_data |= (((u32)hw->dmar_dly_cnt) & DMA_CTRL_DMAR_DLY_CNT_MASK)
1216 << DMA_CTRL_DMAR_DLY_CNT_SHIFT;
1217 dma_ctrl_data |= (((u32)hw->dmaw_dly_cnt) & DMA_CTRL_DMAW_DLY_CNT_MASK)
1218 << DMA_CTRL_DMAW_DLY_CNT_SHIFT;
1219 1160
1220 AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data); 1161 AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
1221} 1162}
@@ -1230,52 +1171,53 @@ static int atl1c_stop_mac(struct atl1c_hw *hw)
1230 u32 data; 1171 u32 data;
1231 1172
1232 AT_READ_REG(hw, REG_RXQ_CTRL, &data); 1173 AT_READ_REG(hw, REG_RXQ_CTRL, &data);
1233 data &= ~(RXQ1_CTRL_EN | RXQ2_CTRL_EN | 1174 data &= ~RXQ_CTRL_EN;
1234 RXQ3_CTRL_EN | RXQ_CTRL_EN);
1235 AT_WRITE_REG(hw, REG_RXQ_CTRL, data); 1175 AT_WRITE_REG(hw, REG_RXQ_CTRL, data);
1236 1176
1237 AT_READ_REG(hw, REG_TXQ_CTRL, &data); 1177 AT_READ_REG(hw, REG_TXQ_CTRL, &data);
1238 data &= ~TXQ_CTRL_EN; 1178 data &= ~TXQ_CTRL_EN;
1239 AT_WRITE_REG(hw, REG_TWSI_CTRL, data); 1179 AT_WRITE_REG(hw, REG_TXQ_CTRL, data);
1240 1180
1241 atl1c_wait_until_idle(hw); 1181 atl1c_wait_until_idle(hw, IDLE_STATUS_RXQ_BUSY | IDLE_STATUS_TXQ_BUSY);
1242 1182
1243 AT_READ_REG(hw, REG_MAC_CTRL, &data); 1183 AT_READ_REG(hw, REG_MAC_CTRL, &data);
1244 data &= ~(MAC_CTRL_TX_EN | MAC_CTRL_RX_EN); 1184 data &= ~(MAC_CTRL_TX_EN | MAC_CTRL_RX_EN);
1245 AT_WRITE_REG(hw, REG_MAC_CTRL, data); 1185 AT_WRITE_REG(hw, REG_MAC_CTRL, data);
1246 1186
1247 return (int)atl1c_wait_until_idle(hw); 1187 return (int)atl1c_wait_until_idle(hw,
1248} 1188 IDLE_STATUS_TXMAC_BUSY | IDLE_STATUS_RXMAC_BUSY);
1249
1250static void atl1c_enable_rx_ctrl(struct atl1c_hw *hw)
1251{
1252 u32 data;
1253
1254 AT_READ_REG(hw, REG_RXQ_CTRL, &data);
1255 switch (hw->adapter->num_rx_queues) {
1256 case 4:
1257 data |= (RXQ3_CTRL_EN | RXQ2_CTRL_EN | RXQ1_CTRL_EN);
1258 break;
1259 case 3:
1260 data |= (RXQ2_CTRL_EN | RXQ1_CTRL_EN);
1261 break;
1262 case 2:
1263 data |= RXQ1_CTRL_EN;
1264 break;
1265 default:
1266 break;
1267 }
1268 data |= RXQ_CTRL_EN;
1269 AT_WRITE_REG(hw, REG_RXQ_CTRL, data);
1270} 1189}
1271 1190
1272static void atl1c_enable_tx_ctrl(struct atl1c_hw *hw) 1191static void atl1c_start_mac(struct atl1c_adapter *adapter)
1273{ 1192{
1274 u32 data; 1193 struct atl1c_hw *hw = &adapter->hw;
1194 u32 mac, txq, rxq;
1195
1196 hw->mac_duplex = adapter->link_duplex == FULL_DUPLEX ? true : false;
1197 hw->mac_speed = adapter->link_speed == SPEED_1000 ?
1198 atl1c_mac_speed_1000 : atl1c_mac_speed_10_100;
1199
1200 AT_READ_REG(hw, REG_TXQ_CTRL, &txq);
1201 AT_READ_REG(hw, REG_RXQ_CTRL, &rxq);
1202 AT_READ_REG(hw, REG_MAC_CTRL, &mac);
1203
1204 txq |= TXQ_CTRL_EN;
1205 rxq |= RXQ_CTRL_EN;
1206 mac |= MAC_CTRL_TX_EN | MAC_CTRL_TX_FLOW |
1207 MAC_CTRL_RX_EN | MAC_CTRL_RX_FLOW |
1208 MAC_CTRL_ADD_CRC | MAC_CTRL_PAD |
1209 MAC_CTRL_BC_EN | MAC_CTRL_SINGLE_PAUSE_EN |
1210 MAC_CTRL_HASH_ALG_CRC32;
1211 if (hw->mac_duplex)
1212 mac |= MAC_CTRL_DUPLX;
1213 else
1214 mac &= ~MAC_CTRL_DUPLX;
1215 mac = FIELD_SETX(mac, MAC_CTRL_SPEED, hw->mac_speed);
1216 mac = FIELD_SETX(mac, MAC_CTRL_PRMLEN, hw->preamble_len);
1275 1217
1276 AT_READ_REG(hw, REG_TXQ_CTRL, &data); 1218 AT_WRITE_REG(hw, REG_TXQ_CTRL, txq);
1277 data |= TXQ_CTRL_EN; 1219 AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq);
1278 AT_WRITE_REG(hw, REG_TXQ_CTRL, data); 1220 AT_WRITE_REG(hw, REG_MAC_CTRL, mac);
1279} 1221}
1280 1222
1281/* 1223/*
@@ -1287,10 +1229,7 @@ static int atl1c_reset_mac(struct atl1c_hw *hw)
1287{ 1229{
1288 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter; 1230 struct atl1c_adapter *adapter = (struct atl1c_adapter *)hw->adapter;
1289 struct pci_dev *pdev = adapter->pdev; 1231 struct pci_dev *pdev = adapter->pdev;
1290 u32 master_ctrl_data = 0; 1232 u32 ctrl_data = 0;
1291
1292 AT_WRITE_REG(hw, REG_IMR, 0);
1293 AT_WRITE_REG(hw, REG_ISR, ISR_DIS_INT);
1294 1233
1295 atl1c_stop_mac(hw); 1234 atl1c_stop_mac(hw);
1296 /* 1235 /*
@@ -1299,194 +1238,148 @@ static int atl1c_reset_mac(struct atl1c_hw *hw)
1299 * the current PCI configuration. The global reset bit is self- 1238 * the current PCI configuration. The global reset bit is self-
1300 * clearing, and should clear within a microsecond. 1239 * clearing, and should clear within a microsecond.
1301 */ 1240 */
1302 AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data); 1241 AT_READ_REG(hw, REG_MASTER_CTRL, &ctrl_data);
1303 master_ctrl_data |= MASTER_CTRL_OOB_DIS_OFF; 1242 ctrl_data |= MASTER_CTRL_OOB_DIS;
1304 AT_WRITE_REGW(hw, REG_MASTER_CTRL, ((master_ctrl_data | MASTER_CTRL_SOFT_RST) 1243 AT_WRITE_REG(hw, REG_MASTER_CTRL, ctrl_data | MASTER_CTRL_SOFT_RST);
1305 & 0xFFFF));
1306 1244
1307 AT_WRITE_FLUSH(hw); 1245 AT_WRITE_FLUSH(hw);
1308 msleep(10); 1246 msleep(10);
1309 /* Wait at least 10ms for All module to be Idle */ 1247 /* Wait at least 10ms for All module to be Idle */
1310 1248
1311 if (atl1c_wait_until_idle(hw)) { 1249 if (atl1c_wait_until_idle(hw, IDLE_STATUS_MASK)) {
1312 dev_err(&pdev->dev, 1250 dev_err(&pdev->dev,
1313 "MAC state machine can't be idle since" 1251 "MAC state machine can't be idle since"
1314 " disabled for 10ms second\n"); 1252 " disabled for 10ms second\n");
1315 return -1; 1253 return -1;
1316 } 1254 }
1255 AT_WRITE_REG(hw, REG_MASTER_CTRL, ctrl_data);
1256
1257 /* driver control speed/duplex */
1258 AT_READ_REG(hw, REG_MAC_CTRL, &ctrl_data);
1259 AT_WRITE_REG(hw, REG_MAC_CTRL, ctrl_data | MAC_CTRL_SPEED_MODE_SW);
1260
1261 /* clk switch setting */
1262 AT_READ_REG(hw, REG_SERDES, &ctrl_data);
1263 switch (hw->nic_type) {
1264 case athr_l2c_b:
1265 ctrl_data &= ~(SERDES_PHY_CLK_SLOWDOWN |
1266 SERDES_MAC_CLK_SLOWDOWN);
1267 AT_WRITE_REG(hw, REG_SERDES, ctrl_data);
1268 break;
1269 case athr_l2c_b2:
1270 case athr_l1d_2:
1271 ctrl_data |= SERDES_PHY_CLK_SLOWDOWN | SERDES_MAC_CLK_SLOWDOWN;
1272 AT_WRITE_REG(hw, REG_SERDES, ctrl_data);
1273 break;
1274 default:
1275 break;
1276 }
1277
1317 return 0; 1278 return 0;
1318} 1279}
1319 1280
1320static void atl1c_disable_l0s_l1(struct atl1c_hw *hw) 1281static void atl1c_disable_l0s_l1(struct atl1c_hw *hw)
1321{ 1282{
1322 u32 pm_ctrl_data; 1283 u16 ctrl_flags = hw->ctrl_flags;
1323 1284
1324 AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data); 1285 hw->ctrl_flags &= ~(ATL1C_ASPM_L0S_SUPPORT | ATL1C_ASPM_L1_SUPPORT);
1325 pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK << 1286 atl1c_set_aspm(hw, SPEED_0);
1326 PM_CTRL_L1_ENTRY_TIMER_SHIFT); 1287 hw->ctrl_flags = ctrl_flags;
1327 pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
1328 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1329 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1330 pm_ctrl_data &= ~PM_CTRL_MAC_ASPM_CHK;
1331 pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
1332
1333 pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN;
1334 pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
1335 pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
1336 AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
1337} 1288}
1338 1289
1339/* 1290/*
1340 * Set ASPM state. 1291 * Set ASPM state.
1341 * Enable/disable L0s/L1 depend on link state. 1292 * Enable/disable L0s/L1 depend on link state.
1342 */ 1293 */
1343static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup) 1294static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed)
1344{ 1295{
1345 u32 pm_ctrl_data; 1296 u32 pm_ctrl_data;
1346 u32 link_ctrl_data; 1297 u32 link_l1_timer;
1347 u32 link_l1_timer = 0xF;
1348 1298
1349 AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data); 1299 AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
1350 AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data); 1300 pm_ctrl_data &= ~(PM_CTRL_ASPM_L1_EN |
1301 PM_CTRL_ASPM_L0S_EN |
1302 PM_CTRL_MAC_ASPM_CHK);
1303 /* L1 timer */
1304 if (hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
1305 pm_ctrl_data &= ~PMCTRL_TXL1_AFTER_L0S;
1306 link_l1_timer =
1307 link_speed == SPEED_1000 || link_speed == SPEED_100 ?
1308 L1D_PMCTRL_L1_ENTRY_TM_16US : 1;
1309 pm_ctrl_data = FIELD_SETX(pm_ctrl_data,
1310 L1D_PMCTRL_L1_ENTRY_TM, link_l1_timer);
1311 } else {
1312 link_l1_timer = hw->nic_type == athr_l2c_b ?
1313 L2CB1_PM_CTRL_L1_ENTRY_TM : L1C_PM_CTRL_L1_ENTRY_TM;
1314 if (link_speed != SPEED_1000 && link_speed != SPEED_100)
1315 link_l1_timer = 1;
1316 pm_ctrl_data = FIELD_SETX(pm_ctrl_data,
1317 PM_CTRL_L1_ENTRY_TIMER, link_l1_timer);
1318 }
1351 1319
1352 pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1; 1320 /* L0S/L1 enable */
1353 pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK << 1321 if ((hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT) && link_speed != SPEED_0)
1354 PM_CTRL_L1_ENTRY_TIMER_SHIFT); 1322 pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN | PM_CTRL_MAC_ASPM_CHK;
1355 pm_ctrl_data &= ~(PM_CTRL_LCKDET_TIMER_MASK << 1323 if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
1356 PM_CTRL_LCKDET_TIMER_SHIFT); 1324 pm_ctrl_data |= PM_CTRL_ASPM_L1_EN | PM_CTRL_MAC_ASPM_CHK;
1357 pm_ctrl_data |= AT_LCKDET_TIMER << PM_CTRL_LCKDET_TIMER_SHIFT;
1358 1325
1326 /* l2cb & l1d & l2cb2 & l1d2 */
1359 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d || 1327 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
1360 hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) { 1328 hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
1361 link_ctrl_data &= ~LINK_CTRL_EXT_SYNC; 1329 pm_ctrl_data = FIELD_SETX(pm_ctrl_data,
1362 if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) { 1330 PM_CTRL_PM_REQ_TIMER, PM_CTRL_PM_REQ_TO_DEF);
1363 if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) 1331 pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER |
1364 link_ctrl_data |= LINK_CTRL_EXT_SYNC; 1332 PM_CTRL_SERDES_PD_EX_L1 |
1365 } 1333 PM_CTRL_CLK_SWH_L1;
1366 1334 pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN |
1367 AT_WRITE_REG(hw, REG_LINK_CTRL, link_ctrl_data); 1335 PM_CTRL_SERDES_PLL_L1_EN |
1368 1336 PM_CTRL_SERDES_BUFS_RX_L1_EN |
1369 pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER; 1337 PM_CTRL_SA_DLY_EN |
1370 pm_ctrl_data &= ~(PM_CTRL_PM_REQ_TIMER_MASK << 1338 PM_CTRL_HOTRST);
1371 PM_CTRL_PM_REQ_TIMER_SHIFT); 1339 /* disable l0s if link down or l2cb */
1372 pm_ctrl_data |= AT_ASPM_L1_TIMER << 1340 if (link_speed == SPEED_0 || hw->nic_type == athr_l2c_b)
1373 PM_CTRL_PM_REQ_TIMER_SHIFT;
1374 pm_ctrl_data &= ~PM_CTRL_SA_DLY_EN;
1375 pm_ctrl_data &= ~PM_CTRL_HOTRST;
1376 pm_ctrl_data |= 1 << PM_CTRL_L1_ENTRY_TIMER_SHIFT;
1377 pm_ctrl_data |= PM_CTRL_SERDES_PD_EX_L1;
1378 }
1379 pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK;
1380 if (linkup) {
1381 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1382 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1383 if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
1384 pm_ctrl_data |= PM_CTRL_ASPM_L1_EN;
1385 if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT)
1386 pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN;
1387
1388 if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d ||
1389 hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) {
1390 if (hw->nic_type == athr_l2c_b)
1391 if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE))
1392 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1393 pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
1394 pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
1395 pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
1396 pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
1397 if (hw->adapter->link_speed == SPEED_100 ||
1398 hw->adapter->link_speed == SPEED_1000) {
1399 pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
1400 PM_CTRL_L1_ENTRY_TIMER_SHIFT);
1401 if (hw->nic_type == athr_l2c_b)
1402 link_l1_timer = 7;
1403 else if (hw->nic_type == athr_l2c_b2 ||
1404 hw->nic_type == athr_l1d_2)
1405 link_l1_timer = 4;
1406 pm_ctrl_data |= link_l1_timer <<
1407 PM_CTRL_L1_ENTRY_TIMER_SHIFT;
1408 }
1409 } else {
1410 pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
1411 pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
1412 pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN;
1413 pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
1414 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; 1341 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1415 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN; 1342 } else { /* l1c */
1416 1343 pm_ctrl_data =
1344 FIELD_SETX(pm_ctrl_data, PM_CTRL_L1_ENTRY_TIMER, 0);
1345 if (link_speed != SPEED_0) {
1346 pm_ctrl_data |= PM_CTRL_SERDES_L1_EN |
1347 PM_CTRL_SERDES_PLL_L1_EN |
1348 PM_CTRL_SERDES_BUFS_RX_L1_EN;
1349 pm_ctrl_data &= ~(PM_CTRL_SERDES_PD_EX_L1 |
1350 PM_CTRL_CLK_SWH_L1 |
1351 PM_CTRL_ASPM_L0S_EN |
1352 PM_CTRL_ASPM_L1_EN);
1353 } else { /* link down */
1354 pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
1355 pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN |
1356 PM_CTRL_SERDES_PLL_L1_EN |
1357 PM_CTRL_SERDES_BUFS_RX_L1_EN |
1358 PM_CTRL_ASPM_L0S_EN);
1417 } 1359 }
1418 } else {
1419 pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
1420 pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
1421 pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
1422 pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
1423
1424 if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
1425 pm_ctrl_data |= PM_CTRL_ASPM_L1_EN;
1426 else
1427 pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
1428 } 1360 }
1429 AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data); 1361 AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data);
1430 1362
1431 return; 1363 return;
1432} 1364}
1433 1365
1434static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter)
1435{
1436 struct atl1c_hw *hw = &adapter->hw;
1437 struct net_device *netdev = adapter->netdev;
1438 u32 mac_ctrl_data;
1439
1440 mac_ctrl_data = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN;
1441 mac_ctrl_data |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW);
1442
1443 if (adapter->link_duplex == FULL_DUPLEX) {
1444 hw->mac_duplex = true;
1445 mac_ctrl_data |= MAC_CTRL_DUPLX;
1446 }
1447
1448 if (adapter->link_speed == SPEED_1000)
1449 hw->mac_speed = atl1c_mac_speed_1000;
1450 else
1451 hw->mac_speed = atl1c_mac_speed_10_100;
1452
1453 mac_ctrl_data |= (hw->mac_speed & MAC_CTRL_SPEED_MASK) <<
1454 MAC_CTRL_SPEED_SHIFT;
1455
1456 mac_ctrl_data |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD);
1457 mac_ctrl_data |= ((hw->preamble_len & MAC_CTRL_PRMLEN_MASK) <<
1458 MAC_CTRL_PRMLEN_SHIFT);
1459
1460 __atl1c_vlan_mode(netdev->features, &mac_ctrl_data);
1461
1462 mac_ctrl_data |= MAC_CTRL_BC_EN;
1463 if (netdev->flags & IFF_PROMISC)
1464 mac_ctrl_data |= MAC_CTRL_PROMIS_EN;
1465 if (netdev->flags & IFF_ALLMULTI)
1466 mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
1467
1468 mac_ctrl_data |= MAC_CTRL_SINGLE_PAUSE_EN;
1469 if (hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2 ||
1470 hw->nic_type == athr_l1d_2) {
1471 mac_ctrl_data |= MAC_CTRL_SPEED_MODE_SW;
1472 mac_ctrl_data |= MAC_CTRL_HASH_ALG_CRC32;
1473 }
1474 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
1475}
1476
1477/* 1366/*
1478 * atl1c_configure - Configure Transmit&Receive Unit after Reset 1367 * atl1c_configure - Configure Transmit&Receive Unit after Reset
1479 * @adapter: board private structure 1368 * @adapter: board private structure
1480 * 1369 *
1481 * Configure the Tx /Rx unit of the MAC after a reset. 1370 * Configure the Tx /Rx unit of the MAC after a reset.
1482 */ 1371 */
1483static int atl1c_configure(struct atl1c_adapter *adapter) 1372static int atl1c_configure_mac(struct atl1c_adapter *adapter)
1484{ 1373{
1485 struct atl1c_hw *hw = &adapter->hw; 1374 struct atl1c_hw *hw = &adapter->hw;
1486 u32 master_ctrl_data = 0; 1375 u32 master_ctrl_data = 0;
1487 u32 intr_modrt_data; 1376 u32 intr_modrt_data;
1488 u32 data; 1377 u32 data;
1489 1378
1379 AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data);
1380 master_ctrl_data &= ~(MASTER_CTRL_TX_ITIMER_EN |
1381 MASTER_CTRL_RX_ITIMER_EN |
1382 MASTER_CTRL_INT_RDCLR);
1490 /* clear interrupt status */ 1383 /* clear interrupt status */
1491 AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF); 1384 AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF);
1492 /* Clear any WOL status */ 1385 /* Clear any WOL status */
@@ -1525,30 +1418,39 @@ static int atl1c_configure(struct atl1c_adapter *adapter)
1525 master_ctrl_data |= MASTER_CTRL_SA_TIMER_EN; 1418 master_ctrl_data |= MASTER_CTRL_SA_TIMER_EN;
1526 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data); 1419 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
1527 1420
1528 if (hw->ctrl_flags & ATL1C_CMB_ENABLE) { 1421 AT_WRITE_REG(hw, REG_SMB_STAT_TIMER,
1529 AT_WRITE_REG(hw, REG_CMB_TPD_THRESH, 1422 hw->smb_timer & SMB_STAT_TIMER_MASK);
1530 hw->cmb_tpd & CMB_TPD_THRESH_MASK);
1531 AT_WRITE_REG(hw, REG_CMB_TX_TIMER,
1532 hw->cmb_tx_timer & CMB_TX_TIMER_MASK);
1533 }
1534 1423
1535 if (hw->ctrl_flags & ATL1C_SMB_ENABLE)
1536 AT_WRITE_REG(hw, REG_SMB_STAT_TIMER,
1537 hw->smb_timer & SMB_STAT_TIMER_MASK);
1538 /* set MTU */ 1424 /* set MTU */
1539 AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN + 1425 AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN +
1540 VLAN_HLEN + ETH_FCS_LEN); 1426 VLAN_HLEN + ETH_FCS_LEN);
1541 /* HDS, disable */
1542 AT_WRITE_REG(hw, REG_HDS_CTRL, 0);
1543 1427
1544 atl1c_configure_tx(adapter); 1428 atl1c_configure_tx(adapter);
1545 atl1c_configure_rx(adapter); 1429 atl1c_configure_rx(adapter);
1546 atl1c_configure_rss(adapter);
1547 atl1c_configure_dma(adapter); 1430 atl1c_configure_dma(adapter);
1548 1431
1549 return 0; 1432 return 0;
1550} 1433}
1551 1434
1435static int atl1c_configure(struct atl1c_adapter *adapter)
1436{
1437 struct net_device *netdev = adapter->netdev;
1438 int num;
1439
1440 atl1c_init_ring_ptrs(adapter);
1441 atl1c_set_multi(netdev);
1442 atl1c_restore_vlan(adapter);
1443
1444 num = atl1c_alloc_rx_buffer(adapter);
1445 if (unlikely(num == 0))
1446 return -ENOMEM;
1447
1448 if (atl1c_configure_mac(adapter))
1449 return -EIO;
1450
1451 return 0;
1452}
1453
1552static void atl1c_update_hw_stats(struct atl1c_adapter *adapter) 1454static void atl1c_update_hw_stats(struct atl1c_adapter *adapter)
1553{ 1455{
1554 u16 hw_reg_addr = 0; 1456 u16 hw_reg_addr = 0;
@@ -1635,16 +1537,11 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
1635 struct pci_dev *pdev = adapter->pdev; 1537 struct pci_dev *pdev = adapter->pdev;
1636 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); 1538 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1637 u16 hw_next_to_clean; 1539 u16 hw_next_to_clean;
1638 u16 shift; 1540 u16 reg;
1639 u32 data;
1640 1541
1641 if (type == atl1c_trans_high) 1542 reg = type == atl1c_trans_high ? REG_TPD_PRI1_CIDX : REG_TPD_PRI0_CIDX;
1642 shift = MB_HTPD_CONS_IDX_SHIFT;
1643 else
1644 shift = MB_NTPD_CONS_IDX_SHIFT;
1645 1543
1646 AT_READ_REG(&adapter->hw, REG_MB_PRIO_CONS_IDX, &data); 1544 AT_READ_REGW(&adapter->hw, reg, &hw_next_to_clean);
1647 hw_next_to_clean = (data >> shift) & MB_PRIO_PROD_IDX_MASK;
1648 1545
1649 while (next_to_clean != hw_next_to_clean) { 1546 while (next_to_clean != hw_next_to_clean) {
1650 buffer_info = &tpd_ring->buffer_info[next_to_clean]; 1547 buffer_info = &tpd_ring->buffer_info[next_to_clean];
@@ -1746,9 +1643,9 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
1746 skb_checksum_none_assert(skb); 1643 skb_checksum_none_assert(skb);
1747} 1644}
1748 1645
1749static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid) 1646static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
1750{ 1647{
1751 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[ringid]; 1648 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
1752 struct pci_dev *pdev = adapter->pdev; 1649 struct pci_dev *pdev = adapter->pdev;
1753 struct atl1c_buffer *buffer_info, *next_info; 1650 struct atl1c_buffer *buffer_info, *next_info;
1754 struct sk_buff *skb; 1651 struct sk_buff *skb;
@@ -1800,7 +1697,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid
1800 /* TODO: update mailbox here */ 1697 /* TODO: update mailbox here */
1801 wmb(); 1698 wmb();
1802 rfd_ring->next_to_use = rfd_next_to_use; 1699 rfd_ring->next_to_use = rfd_next_to_use;
1803 AT_WRITE_REG(&adapter->hw, atl1c_rfd_prod_idx_regs[ringid], 1700 AT_WRITE_REG(&adapter->hw, REG_MB_RFD0_PROD_IDX,
1804 rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK); 1701 rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK);
1805 } 1702 }
1806 1703
@@ -1839,7 +1736,7 @@ static void atl1c_clean_rfd(struct atl1c_rfd_ring *rfd_ring,
1839 rfd_ring->next_to_clean = rfd_index; 1736 rfd_ring->next_to_clean = rfd_index;
1840} 1737}
1841 1738
1842static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que, 1739static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter,
1843 int *work_done, int work_to_do) 1740 int *work_done, int work_to_do)
1844{ 1741{
1845 u16 rfd_num, rfd_index; 1742 u16 rfd_num, rfd_index;
@@ -1847,8 +1744,8 @@ static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, u8 que,
1847 u16 length; 1744 u16 length;
1848 struct pci_dev *pdev = adapter->pdev; 1745 struct pci_dev *pdev = adapter->pdev;
1849 struct net_device *netdev = adapter->netdev; 1746 struct net_device *netdev = adapter->netdev;
1850 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[que]; 1747 struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
1851 struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[que]; 1748 struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring;
1852 struct sk_buff *skb; 1749 struct sk_buff *skb;
1853 struct atl1c_recv_ret_status *rrs; 1750 struct atl1c_recv_ret_status *rrs;
1854 struct atl1c_buffer *buffer_info; 1751 struct atl1c_buffer *buffer_info;
@@ -1914,7 +1811,7 @@ rrs_checked:
1914 count++; 1811 count++;
1915 } 1812 }
1916 if (count) 1813 if (count)
1917 atl1c_alloc_rx_buffer(adapter, que); 1814 atl1c_alloc_rx_buffer(adapter);
1918} 1815}
1919 1816
1920/* 1817/*
@@ -1931,7 +1828,7 @@ static int atl1c_clean(struct napi_struct *napi, int budget)
1931 if (!netif_carrier_ok(adapter->netdev)) 1828 if (!netif_carrier_ok(adapter->netdev))
1932 goto quit_polling; 1829 goto quit_polling;
1933 /* just enable one RXQ */ 1830 /* just enable one RXQ */
1934 atl1c_clean_rx_irq(adapter, 0, &work_done, budget); 1831 atl1c_clean_rx_irq(adapter, &work_done, budget);
1935 1832
1936 if (work_done < budget) { 1833 if (work_done < budget) {
1937quit_polling: 1834quit_polling:
@@ -2206,23 +2103,10 @@ static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb,
2206 struct atl1c_tpd_desc *tpd, enum atl1c_trans_queue type) 2103 struct atl1c_tpd_desc *tpd, enum atl1c_trans_queue type)
2207{ 2104{
2208 struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; 2105 struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type];
2209 u32 prod_data; 2106 u16 reg;
2210 2107
2211 AT_READ_REG(&adapter->hw, REG_MB_PRIO_PROD_IDX, &prod_data); 2108 reg = type == atl1c_trans_high ? REG_TPD_PRI1_PIDX : REG_TPD_PRI0_PIDX;
2212 switch (type) { 2109 AT_WRITE_REGW(&adapter->hw, reg, tpd_ring->next_to_use);
2213 case atl1c_trans_high:
2214 prod_data &= 0xFFFF0000;
2215 prod_data |= tpd_ring->next_to_use & 0xFFFF;
2216 break;
2217 case atl1c_trans_normal:
2218 prod_data &= 0x0000FFFF;
2219 prod_data |= (tpd_ring->next_to_use & 0xFFFF) << 16;
2220 break;
2221 default:
2222 break;
2223 }
2224 wmb();
2225 AT_WRITE_REG(&adapter->hw, REG_MB_PRIO_PROD_IDX, prod_data);
2226} 2110}
2227 2111
2228static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, 2112static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
@@ -2307,8 +2191,7 @@ static int atl1c_request_irq(struct atl1c_adapter *adapter)
2307 "Unable to allocate MSI interrupt Error: %d\n", 2191 "Unable to allocate MSI interrupt Error: %d\n",
2308 err); 2192 err);
2309 adapter->have_msi = false; 2193 adapter->have_msi = false;
2310 } else 2194 }
2311 netdev->irq = pdev->irq;
2312 2195
2313 if (!adapter->have_msi) 2196 if (!adapter->have_msi)
2314 flags |= IRQF_SHARED; 2197 flags |= IRQF_SHARED;
@@ -2328,44 +2211,38 @@ static int atl1c_request_irq(struct atl1c_adapter *adapter)
2328 return err; 2211 return err;
2329} 2212}
2330 2213
2214
2215static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter)
2216{
2217 /* release tx-pending skbs and reset tx/rx ring index */
2218 atl1c_clean_tx_ring(adapter, atl1c_trans_normal);
2219 atl1c_clean_tx_ring(adapter, atl1c_trans_high);
2220 atl1c_clean_rx_ring(adapter);
2221}
2222
2331static int atl1c_up(struct atl1c_adapter *adapter) 2223static int atl1c_up(struct atl1c_adapter *adapter)
2332{ 2224{
2333 struct net_device *netdev = adapter->netdev; 2225 struct net_device *netdev = adapter->netdev;
2334 int num;
2335 int err; 2226 int err;
2336 int i;
2337 2227
2338 netif_carrier_off(netdev); 2228 netif_carrier_off(netdev);
2339 atl1c_init_ring_ptrs(adapter);
2340 atl1c_set_multi(netdev);
2341 atl1c_restore_vlan(adapter);
2342 2229
2343 for (i = 0; i < adapter->num_rx_queues; i++) { 2230 err = atl1c_configure(adapter);
2344 num = atl1c_alloc_rx_buffer(adapter, i); 2231 if (unlikely(err))
2345 if (unlikely(num == 0)) {
2346 err = -ENOMEM;
2347 goto err_alloc_rx;
2348 }
2349 }
2350
2351 if (atl1c_configure(adapter)) {
2352 err = -EIO;
2353 goto err_up; 2232 goto err_up;
2354 }
2355 2233
2356 err = atl1c_request_irq(adapter); 2234 err = atl1c_request_irq(adapter);
2357 if (unlikely(err)) 2235 if (unlikely(err))
2358 goto err_up; 2236 goto err_up;
2359 2237
2238 atl1c_check_link_status(adapter);
2360 clear_bit(__AT_DOWN, &adapter->flags); 2239 clear_bit(__AT_DOWN, &adapter->flags);
2361 napi_enable(&adapter->napi); 2240 napi_enable(&adapter->napi);
2362 atl1c_irq_enable(adapter); 2241 atl1c_irq_enable(adapter);
2363 atl1c_check_link_status(adapter);
2364 netif_start_queue(netdev); 2242 netif_start_queue(netdev);
2365 return err; 2243 return err;
2366 2244
2367err_up: 2245err_up:
2368err_alloc_rx:
2369 atl1c_clean_rx_ring(adapter); 2246 atl1c_clean_rx_ring(adapter);
2370 return err; 2247 return err;
2371} 2248}
@@ -2383,15 +2260,15 @@ static void atl1c_down(struct atl1c_adapter *adapter)
2383 napi_disable(&adapter->napi); 2260 napi_disable(&adapter->napi);
2384 atl1c_irq_disable(adapter); 2261 atl1c_irq_disable(adapter);
2385 atl1c_free_irq(adapter); 2262 atl1c_free_irq(adapter);
2263 /* disable ASPM if device inactive */
2264 atl1c_disable_l0s_l1(&adapter->hw);
2386 /* reset MAC to disable all RX/TX */ 2265 /* reset MAC to disable all RX/TX */
2387 atl1c_reset_mac(&adapter->hw); 2266 atl1c_reset_mac(&adapter->hw);
2388 msleep(1); 2267 msleep(1);
2389 2268
2390 adapter->link_speed = SPEED_0; 2269 adapter->link_speed = SPEED_0;
2391 adapter->link_duplex = -1; 2270 adapter->link_duplex = -1;
2392 atl1c_clean_tx_ring(adapter, atl1c_trans_normal); 2271 atl1c_reset_dma_ring(adapter);
2393 atl1c_clean_tx_ring(adapter, atl1c_trans_high);
2394 atl1c_clean_rx_ring(adapter);
2395} 2272}
2396 2273
2397/* 2274/*
@@ -2424,13 +2301,6 @@ static int atl1c_open(struct net_device *netdev)
2424 if (unlikely(err)) 2301 if (unlikely(err))
2425 goto err_up; 2302 goto err_up;
2426 2303
2427 if (adapter->hw.ctrl_flags & ATL1C_FPGA_VERSION) {
2428 u32 phy_data;
2429
2430 AT_READ_REG(&adapter->hw, REG_MDIO_CTRL, &phy_data);
2431 phy_data |= MDIO_AP_EN;
2432 AT_WRITE_REG(&adapter->hw, REG_MDIO_CTRL, phy_data);
2433 }
2434 return 0; 2304 return 0;
2435 2305
2436err_up: 2306err_up:
@@ -2456,6 +2326,8 @@ static int atl1c_close(struct net_device *netdev)
2456 struct atl1c_adapter *adapter = netdev_priv(netdev); 2326 struct atl1c_adapter *adapter = netdev_priv(netdev);
2457 2327
2458 WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); 2328 WARN_ON(test_bit(__AT_RESETTING, &adapter->flags));
2329 set_bit(__AT_DOWN, &adapter->flags);
2330 cancel_work_sync(&adapter->common_task);
2459 atl1c_down(adapter); 2331 atl1c_down(adapter);
2460 atl1c_free_ring_resources(adapter); 2332 atl1c_free_ring_resources(adapter);
2461 return 0; 2333 return 0;
@@ -2467,10 +2339,6 @@ static int atl1c_suspend(struct device *dev)
2467 struct net_device *netdev = pci_get_drvdata(pdev); 2339 struct net_device *netdev = pci_get_drvdata(pdev);
2468 struct atl1c_adapter *adapter = netdev_priv(netdev); 2340 struct atl1c_adapter *adapter = netdev_priv(netdev);
2469 struct atl1c_hw *hw = &adapter->hw; 2341 struct atl1c_hw *hw = &adapter->hw;
2470 u32 mac_ctrl_data = 0;
2471 u32 master_ctrl_data = 0;
2472 u32 wol_ctrl_data = 0;
2473 u16 mii_intr_status_data = 0;
2474 u32 wufc = adapter->wol; 2342 u32 wufc = adapter->wol;
2475 2343
2476 atl1c_disable_l0s_l1(hw); 2344 atl1c_disable_l0s_l1(hw);
@@ -2481,75 +2349,10 @@ static int atl1c_suspend(struct device *dev)
2481 netif_device_detach(netdev); 2349 netif_device_detach(netdev);
2482 2350
2483 if (wufc) 2351 if (wufc)
2484 if (atl1c_phy_power_saving(hw) != 0) 2352 if (atl1c_phy_to_ps_link(hw) != 0)
2485 dev_dbg(&pdev->dev, "phy power saving failed"); 2353 dev_dbg(&pdev->dev, "phy power saving failed");
2486 2354
2487 AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data); 2355 atl1c_power_saving(hw, wufc);
2488 AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data);
2489
2490 master_ctrl_data &= ~MASTER_CTRL_CLK_SEL_DIS;
2491 mac_ctrl_data &= ~(MAC_CTRL_PRMLEN_MASK << MAC_CTRL_PRMLEN_SHIFT);
2492 mac_ctrl_data |= (((u32)adapter->hw.preamble_len &
2493 MAC_CTRL_PRMLEN_MASK) <<
2494 MAC_CTRL_PRMLEN_SHIFT);
2495 mac_ctrl_data &= ~(MAC_CTRL_SPEED_MASK << MAC_CTRL_SPEED_SHIFT);
2496 mac_ctrl_data &= ~MAC_CTRL_DUPLX;
2497
2498 if (wufc) {
2499 mac_ctrl_data |= MAC_CTRL_RX_EN;
2500 if (adapter->link_speed == SPEED_1000 ||
2501 adapter->link_speed == SPEED_0) {
2502 mac_ctrl_data |= atl1c_mac_speed_1000 <<
2503 MAC_CTRL_SPEED_SHIFT;
2504 mac_ctrl_data |= MAC_CTRL_DUPLX;
2505 } else
2506 mac_ctrl_data |= atl1c_mac_speed_10_100 <<
2507 MAC_CTRL_SPEED_SHIFT;
2508
2509 if (adapter->link_duplex == DUPLEX_FULL)
2510 mac_ctrl_data |= MAC_CTRL_DUPLX;
2511
2512 /* turn on magic packet wol */
2513 if (wufc & AT_WUFC_MAG)
2514 wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
2515
2516 if (wufc & AT_WUFC_LNKC) {
2517 wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
2518 /* only link up can wake up */
2519 if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) {
2520 dev_dbg(&pdev->dev, "%s: read write phy "
2521 "register failed.\n",
2522 atl1c_driver_name);
2523 }
2524 }
2525 /* clear phy interrupt */
2526 atl1c_read_phy_reg(hw, MII_ISR, &mii_intr_status_data);
2527 /* Config MAC Ctrl register */
2528 __atl1c_vlan_mode(netdev->features, &mac_ctrl_data);
2529
2530 /* magic packet maybe Broadcast&multicast&Unicast frame */
2531 if (wufc & AT_WUFC_MAG)
2532 mac_ctrl_data |= MAC_CTRL_BC_EN;
2533
2534 dev_dbg(&pdev->dev,
2535 "%s: suspend MAC=0x%x\n",
2536 atl1c_driver_name, mac_ctrl_data);
2537 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
2538 AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
2539 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
2540
2541 AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT |
2542 GPHY_CTRL_EXT_RESET);
2543 } else {
2544 AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_POWER_SAVING);
2545 master_ctrl_data |= MASTER_CTRL_CLK_SEL_DIS;
2546 mac_ctrl_data |= atl1c_mac_speed_10_100 << MAC_CTRL_SPEED_SHIFT;
2547 mac_ctrl_data |= MAC_CTRL_DUPLX;
2548 AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data);
2549 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
2550 AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
2551 hw->phy_configured = false; /* re-init PHY when resume */
2552 }
2553 2356
2554 return 0; 2357 return 0;
2555} 2358}
@@ -2562,8 +2365,7 @@ static int atl1c_resume(struct device *dev)
2562 struct atl1c_adapter *adapter = netdev_priv(netdev); 2365 struct atl1c_adapter *adapter = netdev_priv(netdev);
2563 2366
2564 AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); 2367 AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
2565 atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE | 2368 atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE);
2566 ATL1C_PCIE_PHY_RESET);
2567 2369
2568 atl1c_phy_reset(&adapter->hw); 2370 atl1c_phy_reset(&adapter->hw);
2569 atl1c_reset_mac(&adapter->hw); 2371 atl1c_reset_mac(&adapter->hw);
@@ -2616,7 +2418,6 @@ static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2616 SET_NETDEV_DEV(netdev, &pdev->dev); 2418 SET_NETDEV_DEV(netdev, &pdev->dev);
2617 pci_set_drvdata(pdev, netdev); 2419 pci_set_drvdata(pdev, netdev);
2618 2420
2619 netdev->irq = pdev->irq;
2620 netdev->netdev_ops = &atl1c_netdev_ops; 2421 netdev->netdev_ops = &atl1c_netdev_ops;
2621 netdev->watchdog_timeo = AT_TX_WATCHDOG; 2422 netdev->watchdog_timeo = AT_TX_WATCHDOG;
2622 atl1c_set_ethtool_ops(netdev); 2423 atl1c_set_ethtool_ops(netdev);
@@ -2706,14 +2507,13 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
2706 dev_err(&pdev->dev, "cannot map device registers\n"); 2507 dev_err(&pdev->dev, "cannot map device registers\n");
2707 goto err_ioremap; 2508 goto err_ioremap;
2708 } 2509 }
2709 netdev->base_addr = (unsigned long)adapter->hw.hw_addr;
2710 2510
2711 /* init mii data */ 2511 /* init mii data */
2712 adapter->mii.dev = netdev; 2512 adapter->mii.dev = netdev;
2713 adapter->mii.mdio_read = atl1c_mdio_read; 2513 adapter->mii.mdio_read = atl1c_mdio_read;
2714 adapter->mii.mdio_write = atl1c_mdio_write; 2514 adapter->mii.mdio_write = atl1c_mdio_write;
2715 adapter->mii.phy_id_mask = 0x1f; 2515 adapter->mii.phy_id_mask = 0x1f;
2716 adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK; 2516 adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK;
2717 netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64); 2517 netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64);
2718 setup_timer(&adapter->phy_config_timer, atl1c_phy_config, 2518 setup_timer(&adapter->phy_config_timer, atl1c_phy_config,
2719 (unsigned long)adapter); 2519 (unsigned long)adapter);
@@ -2723,8 +2523,7 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
2723 dev_err(&pdev->dev, "net device private data init failed\n"); 2523 dev_err(&pdev->dev, "net device private data init failed\n");
2724 goto err_sw_init; 2524 goto err_sw_init;
2725 } 2525 }
2726 atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE | 2526 atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE);
2727 ATL1C_PCIE_PHY_RESET);
2728 2527
2729 /* Init GPHY as early as possible due to power saving issue */ 2528 /* Init GPHY as early as possible due to power saving issue */
2730 atl1c_phy_reset(&adapter->hw); 2529 atl1c_phy_reset(&adapter->hw);
@@ -2752,7 +2551,7 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
2752 dev_dbg(&pdev->dev, "mac address : %pM\n", 2551 dev_dbg(&pdev->dev, "mac address : %pM\n",
2753 adapter->hw.mac_addr); 2552 adapter->hw.mac_addr);
2754 2553
2755 atl1c_hw_set_mac_addr(&adapter->hw); 2554 atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr);
2756 INIT_WORK(&adapter->common_task, atl1c_common_task); 2555 INIT_WORK(&adapter->common_task, atl1c_common_task);
2757 adapter->work_event = 0; 2556 adapter->work_event = 0;
2758 err = register_netdev(netdev); 2557 err = register_netdev(netdev);
@@ -2796,6 +2595,8 @@ static void __devexit atl1c_remove(struct pci_dev *pdev)
2796 struct atl1c_adapter *adapter = netdev_priv(netdev); 2595 struct atl1c_adapter *adapter = netdev_priv(netdev);
2797 2596
2798 unregister_netdev(netdev); 2597 unregister_netdev(netdev);
2598 /* restore permanent address */
2599 atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.perm_mac_addr);
2799 atl1c_phy_disable(&adapter->hw); 2600 atl1c_phy_disable(&adapter->hw);
2800 2601
2801 iounmap(adapter->hw.hw_addr); 2602 iounmap(adapter->hw.hw_addr);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 93ff2b231284..1220e511ced6 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1883,27 +1883,24 @@ static int atl1e_request_irq(struct atl1e_adapter *adapter)
1883 int err = 0; 1883 int err = 0;
1884 1884
1885 adapter->have_msi = true; 1885 adapter->have_msi = true;
1886 err = pci_enable_msi(adapter->pdev); 1886 err = pci_enable_msi(pdev);
1887 if (err) { 1887 if (err) {
1888 netdev_dbg(adapter->netdev, 1888 netdev_dbg(netdev,
1889 "Unable to allocate MSI interrupt Error: %d\n", err); 1889 "Unable to allocate MSI interrupt Error: %d\n", err);
1890 adapter->have_msi = false; 1890 adapter->have_msi = false;
1891 } else 1891 }
1892 netdev->irq = pdev->irq;
1893
1894 1892
1895 if (!adapter->have_msi) 1893 if (!adapter->have_msi)
1896 flags |= IRQF_SHARED; 1894 flags |= IRQF_SHARED;
1897 err = request_irq(adapter->pdev->irq, atl1e_intr, flags, 1895 err = request_irq(pdev->irq, atl1e_intr, flags, netdev->name, netdev);
1898 netdev->name, netdev);
1899 if (err) { 1896 if (err) {
1900 netdev_dbg(adapter->netdev, 1897 netdev_dbg(adapter->netdev,
1901 "Unable to allocate interrupt Error: %d\n", err); 1898 "Unable to allocate interrupt Error: %d\n", err);
1902 if (adapter->have_msi) 1899 if (adapter->have_msi)
1903 pci_disable_msi(adapter->pdev); 1900 pci_disable_msi(pdev);
1904 return err; 1901 return err;
1905 } 1902 }
1906 netdev_dbg(adapter->netdev, "atl1e_request_irq OK\n"); 1903 netdev_dbg(netdev, "atl1e_request_irq OK\n");
1907 return err; 1904 return err;
1908} 1905}
1909 1906
@@ -2233,7 +2230,6 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
2233 SET_NETDEV_DEV(netdev, &pdev->dev); 2230 SET_NETDEV_DEV(netdev, &pdev->dev);
2234 pci_set_drvdata(pdev, netdev); 2231 pci_set_drvdata(pdev, netdev);
2235 2232
2236 netdev->irq = pdev->irq;
2237 netdev->netdev_ops = &atl1e_netdev_ops; 2233 netdev->netdev_ops = &atl1e_netdev_ops;
2238 2234
2239 netdev->watchdog_timeo = AT_TX_WATCHDOG; 2235 netdev->watchdog_timeo = AT_TX_WATCHDOG;
@@ -2319,7 +2315,6 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
2319 netdev_err(netdev, "cannot map device registers\n"); 2315 netdev_err(netdev, "cannot map device registers\n");
2320 goto err_ioremap; 2316 goto err_ioremap;
2321 } 2317 }
2322 netdev->base_addr = (unsigned long)adapter->hw.hw_addr;
2323 2318
2324 /* init mii data */ 2319 /* init mii data */
2325 adapter->mii.dev = netdev; 2320 adapter->mii.dev = netdev;
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index c926857e8205..5d10884e5080 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -266,7 +266,7 @@ static s32 atl1_reset_hw(struct atl1_hw *hw)
266 * interrupts & Clear any pending interrupt events 266 * interrupts & Clear any pending interrupt events
267 */ 267 */
268 /* 268 /*
269 * iowrite32(0, hw->hw_addr + REG_IMR); 269 * atlx_irq_disable(adapter);
270 * iowrite32(0xffffffff, hw->hw_addr + REG_ISR); 270 * iowrite32(0xffffffff, hw->hw_addr + REG_ISR);
271 */ 271 */
272 272
@@ -1917,7 +1917,7 @@ next:
1917 return num_alloc; 1917 return num_alloc;
1918} 1918}
1919 1919
1920static void atl1_intr_rx(struct atl1_adapter *adapter) 1920static int atl1_intr_rx(struct atl1_adapter *adapter, int budget)
1921{ 1921{
1922 int i, count; 1922 int i, count;
1923 u16 length; 1923 u16 length;
@@ -1933,7 +1933,7 @@ static void atl1_intr_rx(struct atl1_adapter *adapter)
1933 1933
1934 rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean); 1934 rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean);
1935 1935
1936 while (1) { 1936 while (count < budget) {
1937 rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean); 1937 rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean);
1938 i = 1; 1938 i = 1;
1939 if (likely(rrd->xsz.valid)) { /* packet valid */ 1939 if (likely(rrd->xsz.valid)) { /* packet valid */
@@ -2032,7 +2032,7 @@ rrd_ok:
2032 2032
2033 __vlan_hwaccel_put_tag(skb, vlan_tag); 2033 __vlan_hwaccel_put_tag(skb, vlan_tag);
2034 } 2034 }
2035 netif_rx(skb); 2035 netif_receive_skb(skb);
2036 2036
2037 /* let protocol layer free skb */ 2037 /* let protocol layer free skb */
2038 buffer_info->skb = NULL; 2038 buffer_info->skb = NULL;
@@ -2065,14 +2065,17 @@ rrd_ok:
2065 iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); 2065 iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX);
2066 spin_unlock(&adapter->mb_lock); 2066 spin_unlock(&adapter->mb_lock);
2067 } 2067 }
2068
2069 return count;
2068} 2070}
2069 2071
2070static void atl1_intr_tx(struct atl1_adapter *adapter) 2072static int atl1_intr_tx(struct atl1_adapter *adapter)
2071{ 2073{
2072 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 2074 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
2073 struct atl1_buffer *buffer_info; 2075 struct atl1_buffer *buffer_info;
2074 u16 sw_tpd_next_to_clean; 2076 u16 sw_tpd_next_to_clean;
2075 u16 cmb_tpd_next_to_clean; 2077 u16 cmb_tpd_next_to_clean;
2078 int count = 0;
2076 2079
2077 sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean); 2080 sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean);
2078 cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx); 2081 cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx);
@@ -2092,12 +2095,16 @@ static void atl1_intr_tx(struct atl1_adapter *adapter)
2092 2095
2093 if (++sw_tpd_next_to_clean == tpd_ring->count) 2096 if (++sw_tpd_next_to_clean == tpd_ring->count)
2094 sw_tpd_next_to_clean = 0; 2097 sw_tpd_next_to_clean = 0;
2098
2099 count++;
2095 } 2100 }
2096 atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean); 2101 atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean);
2097 2102
2098 if (netif_queue_stopped(adapter->netdev) && 2103 if (netif_queue_stopped(adapter->netdev) &&
2099 netif_carrier_ok(adapter->netdev)) 2104 netif_carrier_ok(adapter->netdev))
2100 netif_wake_queue(adapter->netdev); 2105 netif_wake_queue(adapter->netdev);
2106
2107 return count;
2101} 2108}
2102 2109
2103static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring) 2110static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
@@ -2439,6 +2446,49 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
2439 return NETDEV_TX_OK; 2446 return NETDEV_TX_OK;
2440} 2447}
2441 2448
2449static int atl1_rings_clean(struct napi_struct *napi, int budget)
2450{
2451 struct atl1_adapter *adapter = container_of(napi, struct atl1_adapter, napi);
2452 int work_done = atl1_intr_rx(adapter, budget);
2453
2454 if (atl1_intr_tx(adapter))
2455 work_done = budget;
2456
2457 /* Let's come again to process some more packets */
2458 if (work_done >= budget)
2459 return work_done;
2460
2461 napi_complete(napi);
2462 /* re-enable Interrupt */
2463 if (likely(adapter->int_enabled))
2464 atlx_imr_set(adapter, IMR_NORMAL_MASK);
2465 return work_done;
2466}
2467
2468static inline int atl1_sched_rings_clean(struct atl1_adapter* adapter)
2469{
2470 if (!napi_schedule_prep(&adapter->napi))
2471 /* It is possible in case even the RX/TX ints are disabled via IMR
2472 * register the ISR bits are set anyway (but do not produce IRQ).
2473 * To handle such situation the napi functions used to check is
2474 * something scheduled or not.
2475 */
2476 return 0;
2477
2478 __napi_schedule(&adapter->napi);
2479
2480 /*
2481 * Disable RX/TX ints via IMR register if it is
2482 * allowed. NAPI handler must reenable them in same
2483 * way.
2484 */
2485 if (!adapter->int_enabled)
2486 return 1;
2487
2488 atlx_imr_set(adapter, IMR_NORXTX_MASK);
2489 return 1;
2490}
2491
2442/* 2492/*
2443 * atl1_intr - Interrupt Handler 2493 * atl1_intr - Interrupt Handler
2444 * @irq: interrupt number 2494 * @irq: interrupt number
@@ -2449,78 +2499,74 @@ static irqreturn_t atl1_intr(int irq, void *data)
2449{ 2499{
2450 struct atl1_adapter *adapter = netdev_priv(data); 2500 struct atl1_adapter *adapter = netdev_priv(data);
2451 u32 status; 2501 u32 status;
2452 int max_ints = 10;
2453 2502
2454 status = adapter->cmb.cmb->int_stats; 2503 status = adapter->cmb.cmb->int_stats;
2455 if (!status) 2504 if (!status)
2456 return IRQ_NONE; 2505 return IRQ_NONE;
2457 2506
2458 do { 2507 /* clear CMB interrupt status at once,
2459 /* clear CMB interrupt status at once */ 2508 * but leave rx/tx interrupt status in case it should be dropped
2460 adapter->cmb.cmb->int_stats = 0; 2509 * only if rx/tx processing queued. In other case interrupt
2461 2510 * can be lost.
2462 if (status & ISR_GPHY) /* clear phy status */ 2511 */
2463 atlx_clear_phy_int(adapter); 2512 adapter->cmb.cmb->int_stats = status & (ISR_CMB_TX | ISR_CMB_RX);
2464 2513
2465 /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ 2514 if (status & ISR_GPHY) /* clear phy status */
2466 iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); 2515 atlx_clear_phy_int(adapter);
2467 2516
2468 /* check if SMB intr */ 2517 /* clear ISR status, and Enable CMB DMA/Disable Interrupt */
2469 if (status & ISR_SMB) 2518 iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
2470 atl1_inc_smb(adapter);
2471 2519
2472 /* check if PCIE PHY Link down */ 2520 /* check if SMB intr */
2473 if (status & ISR_PHY_LINKDOWN) { 2521 if (status & ISR_SMB)
2474 if (netif_msg_intr(adapter)) 2522 atl1_inc_smb(adapter);
2475 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
2476 "pcie phy link down %x\n", status);
2477 if (netif_running(adapter->netdev)) { /* reset MAC */
2478 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
2479 schedule_work(&adapter->reset_dev_task);
2480 return IRQ_HANDLED;
2481 }
2482 }
2483 2523
2484 /* check if DMA read/write error ? */ 2524 /* check if PCIE PHY Link down */
2485 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { 2525 if (status & ISR_PHY_LINKDOWN) {
2486 if (netif_msg_intr(adapter)) 2526 if (netif_msg_intr(adapter))
2487 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 2527 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
2488 "pcie DMA r/w error (status = 0x%x)\n", 2528 "pcie phy link down %x\n", status);
2489 status); 2529 if (netif_running(adapter->netdev)) { /* reset MAC */
2490 iowrite32(0, adapter->hw.hw_addr + REG_IMR); 2530 atlx_irq_disable(adapter);
2491 schedule_work(&adapter->reset_dev_task); 2531 schedule_work(&adapter->reset_dev_task);
2492 return IRQ_HANDLED; 2532 return IRQ_HANDLED;
2493 } 2533 }
2534 }
2494 2535
2495 /* link event */ 2536 /* check if DMA read/write error ? */
2496 if (status & ISR_GPHY) { 2537 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
2497 adapter->soft_stats.tx_carrier_errors++; 2538 if (netif_msg_intr(adapter))
2498 atl1_check_for_link(adapter); 2539 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
2499 } 2540 "pcie DMA r/w error (status = 0x%x)\n",
2541 status);
2542 atlx_irq_disable(adapter);
2543 schedule_work(&adapter->reset_dev_task);
2544 return IRQ_HANDLED;
2545 }
2500 2546
2501 /* transmit event */ 2547 /* link event */
2502 if (status & ISR_CMB_TX) 2548 if (status & ISR_GPHY) {
2503 atl1_intr_tx(adapter); 2549 adapter->soft_stats.tx_carrier_errors++;
2504 2550 atl1_check_for_link(adapter);
2505 /* rx exception */ 2551 }
2506 if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
2507 ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
2508 ISR_HOST_RRD_OV | ISR_CMB_RX))) {
2509 if (status & (ISR_RXF_OV | ISR_RFD_UNRUN |
2510 ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
2511 ISR_HOST_RRD_OV))
2512 if (netif_msg_intr(adapter))
2513 dev_printk(KERN_DEBUG,
2514 &adapter->pdev->dev,
2515 "rx exception, ISR = 0x%x\n",
2516 status);
2517 atl1_intr_rx(adapter);
2518 }
2519 2552
2520 if (--max_ints < 0) 2553 /* transmit or receive event */
2521 break; 2554 if (status & (ISR_CMB_TX | ISR_CMB_RX) &&
2555 atl1_sched_rings_clean(adapter))
2556 adapter->cmb.cmb->int_stats = adapter->cmb.cmb->int_stats &
2557 ~(ISR_CMB_TX | ISR_CMB_RX);
2522 2558
2523 } while ((status = adapter->cmb.cmb->int_stats)); 2559 /* rx exception */
2560 if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN |
2561 ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
2562 ISR_HOST_RRD_OV))) {
2563 if (netif_msg_intr(adapter))
2564 dev_printk(KERN_DEBUG,
2565 &adapter->pdev->dev,
2566 "rx exception, ISR = 0x%x\n",
2567 status);
2568 atl1_sched_rings_clean(adapter);
2569 }
2524 2570
2525 /* re-enable Interrupt */ 2571 /* re-enable Interrupt */
2526 iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR); 2572 iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR);
@@ -2599,6 +2645,7 @@ static s32 atl1_up(struct atl1_adapter *adapter)
2599 if (unlikely(err)) 2645 if (unlikely(err))
2600 goto err_up; 2646 goto err_up;
2601 2647
2648 napi_enable(&adapter->napi);
2602 atlx_irq_enable(adapter); 2649 atlx_irq_enable(adapter);
2603 atl1_check_link(adapter); 2650 atl1_check_link(adapter);
2604 netif_start_queue(netdev); 2651 netif_start_queue(netdev);
@@ -2615,6 +2662,7 @@ static void atl1_down(struct atl1_adapter *adapter)
2615{ 2662{
2616 struct net_device *netdev = adapter->netdev; 2663 struct net_device *netdev = adapter->netdev;
2617 2664
2665 napi_disable(&adapter->napi);
2618 netif_stop_queue(netdev); 2666 netif_stop_queue(netdev);
2619 del_timer_sync(&adapter->phy_config_timer); 2667 del_timer_sync(&adapter->phy_config_timer);
2620 adapter->phy_timer_pending = false; 2668 adapter->phy_timer_pending = false;
@@ -2971,6 +3019,7 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
2971 3019
2972 netdev->netdev_ops = &atl1_netdev_ops; 3020 netdev->netdev_ops = &atl1_netdev_ops;
2973 netdev->watchdog_timeo = 5 * HZ; 3021 netdev->watchdog_timeo = 5 * HZ;
3022 netif_napi_add(netdev, &adapter->napi, atl1_rings_clean, 64);
2974 3023
2975 netdev->ethtool_ops = &atl1_ethtool_ops; 3024 netdev->ethtool_ops = &atl1_ethtool_ops;
2976 adapter->bd_number = cards_found; 3025 adapter->bd_number = cards_found;
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h
index e04bf4d71e46..3bf79a56220d 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.h
+++ b/drivers/net/ethernet/atheros/atlx/atl1.h
@@ -275,13 +275,17 @@ static u32 atl1_check_link(struct atl1_adapter *adapter);
275#define ISR_DIS_SMB 0x20000000 275#define ISR_DIS_SMB 0x20000000
276#define ISR_DIS_DMA 0x40000000 276#define ISR_DIS_DMA 0x40000000
277 277
278/* Normal Interrupt mask */ 278/* Normal Interrupt mask without RX/TX enabled */
279#define IMR_NORMAL_MASK (\ 279#define IMR_NORXTX_MASK (\
280 ISR_SMB |\ 280 ISR_SMB |\
281 ISR_GPHY |\ 281 ISR_GPHY |\
282 ISR_PHY_LINKDOWN|\ 282 ISR_PHY_LINKDOWN|\
283 ISR_DMAR_TO_RST |\ 283 ISR_DMAR_TO_RST |\
284 ISR_DMAW_TO_RST |\ 284 ISR_DMAW_TO_RST)
285
286/* Normal Interrupt mask */
287#define IMR_NORMAL_MASK (\
288 IMR_NORXTX_MASK |\
285 ISR_CMB_TX |\ 289 ISR_CMB_TX |\
286 ISR_CMB_RX) 290 ISR_CMB_RX)
287 291
@@ -758,6 +762,7 @@ struct atl1_adapter {
758 u16 link_speed; 762 u16 link_speed;
759 u16 link_duplex; 763 u16 link_duplex;
760 spinlock_t lock; 764 spinlock_t lock;
765 struct napi_struct napi;
761 struct work_struct reset_dev_task; 766 struct work_struct reset_dev_task;
762 struct work_struct link_chg_task; 767 struct work_struct link_chg_task;
763 768
@@ -781,6 +786,12 @@ struct atl1_adapter {
781 u16 ict; /* interrupt clear timer (2us resolution */ 786 u16 ict; /* interrupt clear timer (2us resolution */
782 struct mii_if_info mii; /* MII interface info */ 787 struct mii_if_info mii; /* MII interface info */
783 788
789 /*
790 * Use this value to check is napi handler allowed to
791 * enable ints or not
792 */
793 bool int_enabled;
794
784 u32 bd_number; /* board number */ 795 u32 bd_number; /* board number */
785 bool pci_using_64; 796 bool pci_using_64;
786 struct atl1_hw hw; 797 struct atl1_hw hw;
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index c9e9dc57986c..b4f3aa49a7fc 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -155,14 +155,21 @@ static void atlx_set_multi(struct net_device *netdev)
155 } 155 }
156} 156}
157 157
158static inline void atlx_imr_set(struct atlx_adapter *adapter,
159 unsigned int imr)
160{
161 iowrite32(imr, adapter->hw.hw_addr + REG_IMR);
162 ioread32(adapter->hw.hw_addr + REG_IMR);
163}
164
158/* 165/*
159 * atlx_irq_enable - Enable default interrupt generation settings 166 * atlx_irq_enable - Enable default interrupt generation settings
160 * @adapter: board private structure 167 * @adapter: board private structure
161 */ 168 */
162static void atlx_irq_enable(struct atlx_adapter *adapter) 169static void atlx_irq_enable(struct atlx_adapter *adapter)
163{ 170{
164 iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR); 171 atlx_imr_set(adapter, IMR_NORMAL_MASK);
165 ioread32(adapter->hw.hw_addr + REG_IMR); 172 adapter->int_enabled = true;
166} 173}
167 174
168/* 175/*
@@ -171,8 +178,8 @@ static void atlx_irq_enable(struct atlx_adapter *adapter)
171 */ 178 */
172static void atlx_irq_disable(struct atlx_adapter *adapter) 179static void atlx_irq_disable(struct atlx_adapter *adapter)
173{ 180{
174 iowrite32(0, adapter->hw.hw_addr + REG_IMR); 181 adapter->int_enabled = false;
175 ioread32(adapter->hw.hw_addr + REG_IMR); 182 atlx_imr_set(adapter, 0);
176 synchronize_irq(adapter->pdev->irq); 183 synchronize_irq(adapter->pdev->irq);
177} 184}
178 185
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 8297e2868736..ac7b74488531 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -3006,7 +3006,7 @@ error:
3006 3006
3007 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, 3007 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3008 PCI_DMA_FROMDEVICE); 3008 PCI_DMA_FROMDEVICE);
3009 skb = build_skb(data); 3009 skb = build_skb(data, 0);
3010 if (!skb) { 3010 if (!skb) {
3011 kfree(data); 3011 kfree(data);
3012 goto error; 3012 goto error;
@@ -7343,8 +7343,7 @@ static struct {
7343 { "rx_fw_discards" }, 7343 { "rx_fw_discards" },
7344}; 7344};
7345 7345
7346#define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\ 7346#define BNX2_NUM_STATS ARRAY_SIZE(bnx2_stats_str_arr)
7347 sizeof(bnx2_stats_str_arr[0]))
7348 7347
7349#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4) 7348#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7350 7349
@@ -7976,7 +7975,6 @@ static int __devinit
7976bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) 7975bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7977{ 7976{
7978 struct bnx2 *bp; 7977 struct bnx2 *bp;
7979 unsigned long mem_len;
7980 int rc, i, j; 7978 int rc, i, j;
7981 u32 reg; 7979 u32 reg;
7982 u64 dma_mask, persist_dma_mask; 7980 u64 dma_mask, persist_dma_mask;
@@ -8036,13 +8034,8 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
8036#endif 8034#endif
8037 INIT_WORK(&bp->reset_task, bnx2_reset_task); 8035 INIT_WORK(&bp->reset_task, bnx2_reset_task);
8038 8036
8039 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); 8037 bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8040 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1); 8038 TX_MAX_TSS_RINGS + 1));
8041 dev->mem_end = dev->mem_start + mem_len;
8042 dev->irq = pdev->irq;
8043
8044 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
8045
8046 if (!bp->regview) { 8039 if (!bp->regview) {
8047 dev_err(&pdev->dev, "Cannot map register space, aborting\n"); 8040 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
8048 rc = -ENOMEM; 8041 rc = -ENOMEM;
@@ -8346,10 +8339,8 @@ err_out_unmap:
8346 bp->flags &= ~BNX2_FLAG_AER_ENABLED; 8339 bp->flags &= ~BNX2_FLAG_AER_ENABLED;
8347 } 8340 }
8348 8341
8349 if (bp->regview) { 8342 pci_iounmap(pdev, bp->regview);
8350 iounmap(bp->regview); 8343 bp->regview = NULL;
8351 bp->regview = NULL;
8352 }
8353 8344
8354err_out_release: 8345err_out_release:
8355 pci_release_regions(pdev); 8346 pci_release_regions(pdev);
@@ -8432,7 +8423,7 @@ static int __devinit
8432bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 8423bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8433{ 8424{
8434 static int version_printed = 0; 8425 static int version_printed = 0;
8435 struct net_device *dev = NULL; 8426 struct net_device *dev;
8436 struct bnx2 *bp; 8427 struct bnx2 *bp;
8437 int rc; 8428 int rc;
8438 char str[40]; 8429 char str[40];
@@ -8442,15 +8433,12 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8442 8433
8443 /* dev zeroed in init_etherdev */ 8434 /* dev zeroed in init_etherdev */
8444 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS); 8435 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8445
8446 if (!dev) 8436 if (!dev)
8447 return -ENOMEM; 8437 return -ENOMEM;
8448 8438
8449 rc = bnx2_init_board(pdev, dev); 8439 rc = bnx2_init_board(pdev, dev);
8450 if (rc < 0) { 8440 if (rc < 0)
8451 free_netdev(dev); 8441 goto err_free;
8452 return rc;
8453 }
8454 8442
8455 dev->netdev_ops = &bnx2_netdev_ops; 8443 dev->netdev_ops = &bnx2_netdev_ops;
8456 dev->watchdog_timeo = TX_TIMEOUT; 8444 dev->watchdog_timeo = TX_TIMEOUT;
@@ -8480,22 +8468,21 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8480 goto error; 8468 goto error;
8481 } 8469 }
8482 8470
8483 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n", 8471 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, "
8484 board_info[ent->driver_data].name, 8472 "node addr %pM\n", board_info[ent->driver_data].name,
8485 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A', 8473 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8486 ((CHIP_ID(bp) & 0x0ff0) >> 4), 8474 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8487 bnx2_bus_string(bp, str), 8475 bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8488 dev->base_addr, 8476 pdev->irq, dev->dev_addr);
8489 bp->pdev->irq, dev->dev_addr);
8490 8477
8491 return 0; 8478 return 0;
8492 8479
8493error: 8480error:
8494 if (bp->regview) 8481 iounmap(bp->regview);
8495 iounmap(bp->regview);
8496 pci_release_regions(pdev); 8482 pci_release_regions(pdev);
8497 pci_disable_device(pdev); 8483 pci_disable_device(pdev);
8498 pci_set_drvdata(pdev, NULL); 8484 pci_set_drvdata(pdev, NULL);
8485err_free:
8499 free_netdev(dev); 8486 free_netdev(dev);
8500 return rc; 8487 return rc;
8501} 8488}
@@ -8511,8 +8498,7 @@ bnx2_remove_one(struct pci_dev *pdev)
8511 del_timer_sync(&bp->timer); 8498 del_timer_sync(&bp->timer);
8512 cancel_work_sync(&bp->reset_task); 8499 cancel_work_sync(&bp->reset_task);
8513 8500
8514 if (bp->regview) 8501 pci_iounmap(bp->pdev, bp->regview);
8515 iounmap(bp->regview);
8516 8502
8517 kfree(bp->temp_stats_blk); 8503 kfree(bp->temp_stats_blk);
8518 8504
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 2c9ee552dffc..e30e2a2f354c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -23,13 +23,17 @@
23 * (you will need to reboot afterwards) */ 23 * (you will need to reboot afterwards) */
24/* #define BNX2X_STOP_ON_ERROR */ 24/* #define BNX2X_STOP_ON_ERROR */
25 25
26#define DRV_MODULE_VERSION "1.72.10-0" 26#define DRV_MODULE_VERSION "1.72.50-0"
27#define DRV_MODULE_RELDATE "2012/02/20" 27#define DRV_MODULE_RELDATE "2012/04/23"
28#define BNX2X_BC_VER 0x040200 28#define BNX2X_BC_VER 0x040200
29 29
30#if defined(CONFIG_DCB) 30#if defined(CONFIG_DCB)
31#define BCM_DCBNL 31#define BCM_DCBNL
32#endif 32#endif
33
34
35#include "bnx2x_hsi.h"
36
33#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) 37#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
34#define BCM_CNIC 1 38#define BCM_CNIC 1
35#include "../cnic_if.h" 39#include "../cnic_if.h"
@@ -345,7 +349,6 @@ union db_prod {
345#define SGE_PAGE_SIZE PAGE_SIZE 349#define SGE_PAGE_SIZE PAGE_SIZE
346#define SGE_PAGE_SHIFT PAGE_SHIFT 350#define SGE_PAGE_SHIFT PAGE_SHIFT
347#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr)) 351#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
348#define SGE_PAGES (SGE_PAGE_SIZE * PAGES_PER_SGE)
349 352
350/* SGE ring related macros */ 353/* SGE ring related macros */
351#define NUM_RX_SGE_PAGES 2 354#define NUM_RX_SGE_PAGES 2
@@ -815,6 +818,8 @@ struct bnx2x_common {
815#define CHIP_NUM_57800_MF 0x16a5 818#define CHIP_NUM_57800_MF 0x16a5
816#define CHIP_NUM_57810 0x168e 819#define CHIP_NUM_57810 0x168e
817#define CHIP_NUM_57810_MF 0x16ae 820#define CHIP_NUM_57810_MF 0x16ae
821#define CHIP_NUM_57811 0x163d
822#define CHIP_NUM_57811_MF 0x163e
818#define CHIP_NUM_57840 0x168d 823#define CHIP_NUM_57840 0x168d
819#define CHIP_NUM_57840_MF 0x16ab 824#define CHIP_NUM_57840_MF 0x16ab
820#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710) 825#define CHIP_IS_E1(bp) (CHIP_NUM(bp) == CHIP_NUM_57710)
@@ -826,6 +831,8 @@ struct bnx2x_common {
826#define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF) 831#define CHIP_IS_57800_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57800_MF)
827#define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810) 832#define CHIP_IS_57810(bp) (CHIP_NUM(bp) == CHIP_NUM_57810)
828#define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF) 833#define CHIP_IS_57810_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57810_MF)
834#define CHIP_IS_57811(bp) (CHIP_NUM(bp) == CHIP_NUM_57811)
835#define CHIP_IS_57811_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57811_MF)
829#define CHIP_IS_57840(bp) (CHIP_NUM(bp) == CHIP_NUM_57840) 836#define CHIP_IS_57840(bp) (CHIP_NUM(bp) == CHIP_NUM_57840)
830#define CHIP_IS_57840_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_MF) 837#define CHIP_IS_57840_MF(bp) (CHIP_NUM(bp) == CHIP_NUM_57840_MF)
831#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \ 838#define CHIP_IS_E1H(bp) (CHIP_IS_57711(bp) || \
@@ -836,6 +843,8 @@ struct bnx2x_common {
836 CHIP_IS_57800_MF(bp) || \ 843 CHIP_IS_57800_MF(bp) || \
837 CHIP_IS_57810(bp) || \ 844 CHIP_IS_57810(bp) || \
838 CHIP_IS_57810_MF(bp) || \ 845 CHIP_IS_57810_MF(bp) || \
846 CHIP_IS_57811(bp) || \
847 CHIP_IS_57811_MF(bp) || \
839 CHIP_IS_57840(bp) || \ 848 CHIP_IS_57840(bp) || \
840 CHIP_IS_57840_MF(bp)) 849 CHIP_IS_57840_MF(bp))
841#define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp))) 850#define CHIP_IS_E1x(bp) (CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
@@ -1053,6 +1062,13 @@ struct bnx2x_slowpath {
1053 struct flow_control_configuration pfc_config; 1062 struct flow_control_configuration pfc_config;
1054 } func_rdata; 1063 } func_rdata;
1055 1064
1065 /* afex ramrod can not be a part of func_rdata union because these
1066 * events might arrive in parallel to other events from func_rdata.
1067 * Therefore, if they would have been defined in the same union,
1068 * data can get corrupted.
1069 */
1070 struct afex_vif_list_ramrod_data func_afex_rdata;
1071
1056 /* used by dmae command executer */ 1072 /* used by dmae command executer */
1057 struct dmae_command dmae[MAX_DMAE_C]; 1073 struct dmae_command dmae[MAX_DMAE_C];
1058 1074
@@ -1169,6 +1185,7 @@ struct bnx2x_fw_stats_data {
1169enum { 1185enum {
1170 BNX2X_SP_RTNL_SETUP_TC, 1186 BNX2X_SP_RTNL_SETUP_TC,
1171 BNX2X_SP_RTNL_TX_TIMEOUT, 1187 BNX2X_SP_RTNL_TX_TIMEOUT,
1188 BNX2X_SP_RTNL_AFEX_F_UPDATE,
1172 BNX2X_SP_RTNL_FAN_FAILURE, 1189 BNX2X_SP_RTNL_FAN_FAILURE,
1173}; 1190};
1174 1191
@@ -1222,7 +1239,6 @@ struct bnx2x {
1222#define ETH_MAX_JUMBO_PACKET_SIZE 9600 1239#define ETH_MAX_JUMBO_PACKET_SIZE 9600
1223/* TCP with Timestamp Option (32) + IPv6 (40) */ 1240/* TCP with Timestamp Option (32) + IPv6 (40) */
1224#define ETH_MAX_TPA_HEADER_SIZE 72 1241#define ETH_MAX_TPA_HEADER_SIZE 72
1225#define ETH_MIN_TPA_HEADER_SIZE 40
1226 1242
1227 /* Max supported alignment is 256 (8 shift) */ 1243 /* Max supported alignment is 256 (8 shift) */
1228#define BNX2X_RX_ALIGN_SHIFT min(8, L1_CACHE_SHIFT) 1244#define BNX2X_RX_ALIGN_SHIFT min(8, L1_CACHE_SHIFT)
@@ -1300,6 +1316,7 @@ struct bnx2x {
1300#define NO_ISCSI_FLAG (1 << 14) 1316#define NO_ISCSI_FLAG (1 << 14)
1301#define NO_FCOE_FLAG (1 << 15) 1317#define NO_FCOE_FLAG (1 << 15)
1302#define BC_SUPPORTS_PFC_STATS (1 << 17) 1318#define BC_SUPPORTS_PFC_STATS (1 << 17)
1319#define USING_SINGLE_MSIX_FLAG (1 << 20)
1303 1320
1304#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) 1321#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
1305#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) 1322#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -1329,21 +1346,20 @@ struct bnx2x {
1329 struct bnx2x_common common; 1346 struct bnx2x_common common;
1330 struct bnx2x_port port; 1347 struct bnx2x_port port;
1331 1348
1332 struct cmng_struct_per_port cmng; 1349 struct cmng_init cmng;
1333 u32 vn_weight_sum; 1350
1334 u32 mf_config[E1HVN_MAX]; 1351 u32 mf_config[E1HVN_MAX];
1335 u32 mf2_config[E2_FUNC_MAX]; 1352 u32 mf_ext_config;
1336 u32 path_has_ovlan; /* E3 */ 1353 u32 path_has_ovlan; /* E3 */
1337 u16 mf_ov; 1354 u16 mf_ov;
1338 u8 mf_mode; 1355 u8 mf_mode;
1339#define IS_MF(bp) (bp->mf_mode != 0) 1356#define IS_MF(bp) (bp->mf_mode != 0)
1340#define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI) 1357#define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI)
1341#define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD) 1358#define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD)
1359#define IS_MF_AFEX(bp) (bp->mf_mode == MULTI_FUNCTION_AFEX)
1342 1360
1343 u8 wol; 1361 u8 wol;
1344 1362
1345 bool gro_check;
1346
1347 int rx_ring_size; 1363 int rx_ring_size;
1348 1364
1349 u16 tx_quick_cons_trip_int; 1365 u16 tx_quick_cons_trip_int;
@@ -1371,7 +1387,6 @@ struct bnx2x {
1371#define BNX2X_STATE_DIAG 0xe000 1387#define BNX2X_STATE_DIAG 0xe000
1372#define BNX2X_STATE_ERROR 0xf000 1388#define BNX2X_STATE_ERROR 0xf000
1373 1389
1374 int multi_mode;
1375#define BNX2X_MAX_PRIORITY 8 1390#define BNX2X_MAX_PRIORITY 8
1376#define BNX2X_MAX_ENTRIES_PER_PRI 16 1391#define BNX2X_MAX_ENTRIES_PER_PRI 16
1377#define BNX2X_MAX_COS 3 1392#define BNX2X_MAX_COS 3
@@ -1582,6 +1597,9 @@ struct bnx2x {
1582 struct dcbx_features dcbx_remote_feat; 1597 struct dcbx_features dcbx_remote_feat;
1583 u32 dcbx_remote_flags; 1598 u32 dcbx_remote_flags;
1584#endif 1599#endif
1600 /* AFEX: store default vlan used */
1601 int afex_def_vlan_tag;
1602 enum mf_cfg_afex_vlan_mode afex_vlan_mode;
1585 u32 pending_max; 1603 u32 pending_max;
1586 1604
1587 /* multiple tx classes of service */ 1605 /* multiple tx classes of service */
@@ -2138,9 +2156,16 @@ void bnx2x_notify_link_changed(struct bnx2x *bp);
2138#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) 2156#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp))
2139#define IS_MF_FCOE_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) 2157#define IS_MF_FCOE_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))
2140 2158
2159#define BNX2X_MF_EXT_PROTOCOL_FCOE(bp) ((bp)->mf_ext_config & \
2160 MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)
2161
2162#define IS_MF_FCOE_AFEX(bp) (IS_MF_AFEX(bp) && BNX2X_MF_EXT_PROTOCOL_FCOE(bp))
2141#define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \ 2163#define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \
2142 (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \ 2164 (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \
2143 BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))) 2165 BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
2166#else
2167#define IS_MF_FCOE_AFEX(bp) false
2144#endif 2168#endif
2145 2169
2170
2146#endif /* bnx2x.h */ 2171#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4b054812713a..ad0743bf4bde 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -23,7 +23,6 @@
23#include <linux/ip.h> 23#include <linux/ip.h>
24#include <net/ipv6.h> 24#include <net/ipv6.h>
25#include <net/ip6_checksum.h> 25#include <net/ip6_checksum.h>
26#include <linux/firmware.h>
27#include <linux/prefetch.h> 26#include <linux/prefetch.h>
28#include "bnx2x_cmn.h" 27#include "bnx2x_cmn.h"
29#include "bnx2x_init.h" 28#include "bnx2x_init.h"
@@ -329,16 +328,6 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
329 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len); 328 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
330 tpa_info->full_page = 329 tpa_info->full_page =
331 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size; 330 SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
332 /*
333 * FW 7.2.16 BUG workaround:
334 * if SGE size is (exactly) multiple gro_size
335 * fw will place one less frag on SGE.
336 * the calculation is done only for potentially
337 * dangerous MTUs.
338 */
339 if (unlikely(bp->gro_check))
340 if (!(SGE_PAGE_SIZE * PAGES_PER_SGE % gro_size))
341 tpa_info->full_page -= gro_size;
342 tpa_info->gro_size = gro_size; 331 tpa_info->gro_size = gro_size;
343 } 332 }
344 333
@@ -369,8 +358,8 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
369 * Approximate value of the MSS for this aggregation calculated using 358 * Approximate value of the MSS for this aggregation calculated using
370 * the first packet of it. 359 * the first packet of it.
371 */ 360 */
372static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, 361static u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
373 u16 len_on_bd) 362 u16 len_on_bd)
374{ 363{
375 /* 364 /*
376 * TPA arrgregation won't have either IP options or TCP options 365 * TPA arrgregation won't have either IP options or TCP options
@@ -396,6 +385,36 @@ static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
396 return len_on_bd - hdrs_len; 385 return len_on_bd - hdrs_len;
397} 386}
398 387
388static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
389 struct bnx2x_fastpath *fp, u16 index)
390{
391 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
392 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
393 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
394 dma_addr_t mapping;
395
396 if (unlikely(page == NULL)) {
397 BNX2X_ERR("Can't alloc sge\n");
398 return -ENOMEM;
399 }
400
401 mapping = dma_map_page(&bp->pdev->dev, page, 0,
402 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
403 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
404 __free_pages(page, PAGES_PER_SGE_SHIFT);
405 BNX2X_ERR("Can't map sge\n");
406 return -ENOMEM;
407 }
408
409 sw_buf->page = page;
410 dma_unmap_addr_set(sw_buf, mapping, mapping);
411
412 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
413 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
414
415 return 0;
416}
417
399static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, 418static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
400 struct bnx2x_agg_info *tpa_info, 419 struct bnx2x_agg_info *tpa_info,
401 u16 pages, 420 u16 pages,
@@ -494,11 +513,11 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
494 return 0; 513 return 0;
495} 514}
496 515
497static inline void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, 516static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
498 struct bnx2x_agg_info *tpa_info, 517 struct bnx2x_agg_info *tpa_info,
499 u16 pages, 518 u16 pages,
500 struct eth_end_agg_rx_cqe *cqe, 519 struct eth_end_agg_rx_cqe *cqe,
501 u16 cqe_idx) 520 u16 cqe_idx)
502{ 521{
503 struct sw_rx_bd *rx_buf = &tpa_info->first_buf; 522 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
504 u8 pad = tpa_info->placement_offset; 523 u8 pad = tpa_info->placement_offset;
@@ -524,7 +543,7 @@ static inline void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
524 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), 543 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
525 fp->rx_buf_size, DMA_FROM_DEVICE); 544 fp->rx_buf_size, DMA_FROM_DEVICE);
526 if (likely(new_data)) 545 if (likely(new_data))
527 skb = build_skb(data); 546 skb = build_skb(data, 0);
528 547
529 if (likely(skb)) { 548 if (likely(skb)) {
530#ifdef BNX2X_STOP_ON_ERROR 549#ifdef BNX2X_STOP_ON_ERROR
@@ -568,6 +587,36 @@ drop:
568 fp->eth_q_stats.rx_skb_alloc_failed++; 587 fp->eth_q_stats.rx_skb_alloc_failed++;
569} 588}
570 589
590static int bnx2x_alloc_rx_data(struct bnx2x *bp,
591 struct bnx2x_fastpath *fp, u16 index)
592{
593 u8 *data;
594 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
595 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
596 dma_addr_t mapping;
597
598 data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
599 if (unlikely(data == NULL))
600 return -ENOMEM;
601
602 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
603 fp->rx_buf_size,
604 DMA_FROM_DEVICE);
605 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
606 kfree(data);
607 BNX2X_ERR("Can't map rx data\n");
608 return -ENOMEM;
609 }
610
611 rx_buf->data = data;
612 dma_unmap_addr_set(rx_buf, mapping, mapping);
613
614 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
615 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
616
617 return 0;
618}
619
571 620
572int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) 621int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
573{ 622{
@@ -732,7 +781,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
732 dma_unmap_addr(rx_buf, mapping), 781 dma_unmap_addr(rx_buf, mapping),
733 fp->rx_buf_size, 782 fp->rx_buf_size,
734 DMA_FROM_DEVICE); 783 DMA_FROM_DEVICE);
735 skb = build_skb(data); 784 skb = build_skb(data, 0);
736 if (unlikely(!skb)) { 785 if (unlikely(!skb)) {
737 kfree(data); 786 kfree(data);
738 fp->eth_q_stats.rx_skb_alloc_failed++; 787 fp->eth_q_stats.rx_skb_alloc_failed++;
@@ -881,8 +930,8 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp)
881 * 930 *
882 * It uses a none-atomic bit operations because is called under the mutex. 931 * It uses a none-atomic bit operations because is called under the mutex.
883 */ 932 */
884static inline void bnx2x_fill_report_data(struct bnx2x *bp, 933static void bnx2x_fill_report_data(struct bnx2x *bp,
885 struct bnx2x_link_report_data *data) 934 struct bnx2x_link_report_data *data)
886{ 935{
887 u16 line_speed = bnx2x_get_mf_speed(bp); 936 u16 line_speed = bnx2x_get_mf_speed(bp);
888 937
@@ -1000,6 +1049,47 @@ void __bnx2x_link_report(struct bnx2x *bp)
1000 } 1049 }
1001} 1050}
1002 1051
1052static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1053{
1054 int i;
1055
1056 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1057 struct eth_rx_sge *sge;
1058
1059 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1060 sge->addr_hi =
1061 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1062 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1063
1064 sge->addr_lo =
1065 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1066 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1067 }
1068}
1069
1070static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1071 struct bnx2x_fastpath *fp, int last)
1072{
1073 int i;
1074
1075 for (i = 0; i < last; i++) {
1076 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1077 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1078 u8 *data = first_buf->data;
1079
1080 if (data == NULL) {
1081 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1082 continue;
1083 }
1084 if (tpa_info->tpa_state == BNX2X_TPA_START)
1085 dma_unmap_single(&bp->pdev->dev,
1086 dma_unmap_addr(first_buf, mapping),
1087 fp->rx_buf_size, DMA_FROM_DEVICE);
1088 kfree(data);
1089 first_buf->data = NULL;
1090 }
1091}
1092
1003void bnx2x_init_rx_rings(struct bnx2x *bp) 1093void bnx2x_init_rx_rings(struct bnx2x *bp)
1004{ 1094{
1005 int func = BP_FUNC(bp); 1095 int func = BP_FUNC(bp);
@@ -1212,16 +1302,15 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1212 1302
1213void bnx2x_free_irq(struct bnx2x *bp) 1303void bnx2x_free_irq(struct bnx2x *bp)
1214{ 1304{
1215 if (bp->flags & USING_MSIX_FLAG) 1305 if (bp->flags & USING_MSIX_FLAG &&
1306 !(bp->flags & USING_SINGLE_MSIX_FLAG))
1216 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) + 1307 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1217 CNIC_PRESENT + 1); 1308 CNIC_PRESENT + 1);
1218 else if (bp->flags & USING_MSI_FLAG)
1219 free_irq(bp->pdev->irq, bp->dev);
1220 else 1309 else
1221 free_irq(bp->pdev->irq, bp->dev); 1310 free_irq(bp->dev->irq, bp->dev);
1222} 1311}
1223 1312
1224int bnx2x_enable_msix(struct bnx2x *bp) 1313int __devinit bnx2x_enable_msix(struct bnx2x *bp)
1225{ 1314{
1226 int msix_vec = 0, i, rc, req_cnt; 1315 int msix_vec = 0, i, rc, req_cnt;
1227 1316
@@ -1261,8 +1350,8 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1261 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc); 1350 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1262 1351
1263 if (rc) { 1352 if (rc) {
1264 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); 1353 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1265 return rc; 1354 goto no_msix;
1266 } 1355 }
1267 /* 1356 /*
1268 * decrease number of queues by number of unallocated entries 1357 * decrease number of queues by number of unallocated entries
@@ -1270,18 +1359,34 @@ int bnx2x_enable_msix(struct bnx2x *bp)
1270 bp->num_queues -= diff; 1359 bp->num_queues -= diff;
1271 1360
1272 BNX2X_DEV_INFO("New queue configuration set: %d\n", 1361 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1273 bp->num_queues); 1362 bp->num_queues);
1274 } else if (rc) { 1363 } else if (rc > 0) {
1275 /* fall to INTx if not enough memory */ 1364 /* Get by with single vector */
1276 if (rc == -ENOMEM) 1365 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
1277 bp->flags |= DISABLE_MSI_FLAG; 1366 if (rc) {
1367 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1368 rc);
1369 goto no_msix;
1370 }
1371
1372 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1373 bp->flags |= USING_SINGLE_MSIX_FLAG;
1374
1375 } else if (rc < 0) {
1278 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc); 1376 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1279 return rc; 1377 goto no_msix;
1280 } 1378 }
1281 1379
1282 bp->flags |= USING_MSIX_FLAG; 1380 bp->flags |= USING_MSIX_FLAG;
1283 1381
1284 return 0; 1382 return 0;
1383
1384no_msix:
1385 /* fall to INTx if not enough memory */
1386 if (rc == -ENOMEM)
1387 bp->flags |= DISABLE_MSI_FLAG;
1388
1389 return rc;
1285} 1390}
1286 1391
1287static int bnx2x_req_msix_irqs(struct bnx2x *bp) 1392static int bnx2x_req_msix_irqs(struct bnx2x *bp)
@@ -1343,22 +1448,26 @@ int bnx2x_enable_msi(struct bnx2x *bp)
1343static int bnx2x_req_irq(struct bnx2x *bp) 1448static int bnx2x_req_irq(struct bnx2x *bp)
1344{ 1449{
1345 unsigned long flags; 1450 unsigned long flags;
1346 int rc; 1451 unsigned int irq;
1347 1452
1348 if (bp->flags & USING_MSI_FLAG) 1453 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1349 flags = 0; 1454 flags = 0;
1350 else 1455 else
1351 flags = IRQF_SHARED; 1456 flags = IRQF_SHARED;
1352 1457
1353 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags, 1458 if (bp->flags & USING_MSIX_FLAG)
1354 bp->dev->name, bp->dev); 1459 irq = bp->msix_table[0].vector;
1355 return rc; 1460 else
1461 irq = bp->pdev->irq;
1462
1463 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1356} 1464}
1357 1465
1358static inline int bnx2x_setup_irqs(struct bnx2x *bp) 1466static int bnx2x_setup_irqs(struct bnx2x *bp)
1359{ 1467{
1360 int rc = 0; 1468 int rc = 0;
1361 if (bp->flags & USING_MSIX_FLAG) { 1469 if (bp->flags & USING_MSIX_FLAG &&
1470 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1362 rc = bnx2x_req_msix_irqs(bp); 1471 rc = bnx2x_req_msix_irqs(bp);
1363 if (rc) 1472 if (rc)
1364 return rc; 1473 return rc;
@@ -1371,15 +1480,20 @@ static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1371 } 1480 }
1372 if (bp->flags & USING_MSI_FLAG) { 1481 if (bp->flags & USING_MSI_FLAG) {
1373 bp->dev->irq = bp->pdev->irq; 1482 bp->dev->irq = bp->pdev->irq;
1374 netdev_info(bp->dev, "using MSI IRQ %d\n", 1483 netdev_info(bp->dev, "using MSI IRQ %d\n",
1375 bp->pdev->irq); 1484 bp->dev->irq);
1485 }
1486 if (bp->flags & USING_MSIX_FLAG) {
1487 bp->dev->irq = bp->msix_table[0].vector;
1488 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1489 bp->dev->irq);
1376 } 1490 }
1377 } 1491 }
1378 1492
1379 return 0; 1493 return 0;
1380} 1494}
1381 1495
1382static inline void bnx2x_napi_enable(struct bnx2x *bp) 1496static void bnx2x_napi_enable(struct bnx2x *bp)
1383{ 1497{
1384 int i; 1498 int i;
1385 1499
@@ -1387,7 +1501,7 @@ static inline void bnx2x_napi_enable(struct bnx2x *bp)
1387 napi_enable(&bnx2x_fp(bp, i, napi)); 1501 napi_enable(&bnx2x_fp(bp, i, napi));
1388} 1502}
1389 1503
1390static inline void bnx2x_napi_disable(struct bnx2x *bp) 1504static void bnx2x_napi_disable(struct bnx2x *bp)
1391{ 1505{
1392 int i; 1506 int i;
1393 1507
@@ -1437,24 +1551,15 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1437 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp)); 1551 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
1438} 1552}
1439 1553
1554
1440void bnx2x_set_num_queues(struct bnx2x *bp) 1555void bnx2x_set_num_queues(struct bnx2x *bp)
1441{ 1556{
1442 switch (bp->multi_mode) { 1557 /* RSS queues */
1443 case ETH_RSS_MODE_DISABLED: 1558 bp->num_queues = bnx2x_calc_num_queues(bp);
1444 bp->num_queues = 1;
1445 break;
1446 case ETH_RSS_MODE_REGULAR:
1447 bp->num_queues = bnx2x_calc_num_queues(bp);
1448 break;
1449
1450 default:
1451 bp->num_queues = 1;
1452 break;
1453 }
1454 1559
1455#ifdef BCM_CNIC 1560#ifdef BCM_CNIC
1456 /* override in STORAGE SD mode */ 1561 /* override in STORAGE SD modes */
1457 if (IS_MF_STORAGE_SD(bp)) 1562 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1458 bp->num_queues = 1; 1563 bp->num_queues = 1;
1459#endif 1564#endif
1460 /* Add special queues */ 1565 /* Add special queues */
@@ -1483,7 +1588,7 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
1483 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash() 1588 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1484 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0). 1589 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1485 */ 1590 */
1486static inline int bnx2x_set_real_num_queues(struct bnx2x *bp) 1591static int bnx2x_set_real_num_queues(struct bnx2x *bp)
1487{ 1592{
1488 int rc, tx, rx; 1593 int rc, tx, rx;
1489 1594
@@ -1515,7 +1620,7 @@ static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1515 return rc; 1620 return rc;
1516} 1621}
1517 1622
1518static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp) 1623static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1519{ 1624{
1520 int i; 1625 int i;
1521 1626
@@ -1543,22 +1648,19 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1543 } 1648 }
1544} 1649}
1545 1650
1546static inline int bnx2x_init_rss_pf(struct bnx2x *bp) 1651static int bnx2x_init_rss_pf(struct bnx2x *bp)
1547{ 1652{
1548 int i; 1653 int i;
1549 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; 1654 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1550 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); 1655 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1551 1656
1552 /* 1657 /* Prepare the initial contents fo the indirection table if RSS is
1553 * Prepare the inital contents fo the indirection table if RSS is
1554 * enabled 1658 * enabled
1555 */ 1659 */
1556 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { 1660 for (i = 0; i < sizeof(ind_table); i++)
1557 for (i = 0; i < sizeof(ind_table); i++) 1661 ind_table[i] =
1558 ind_table[i] = 1662 bp->fp->cl_id +
1559 bp->fp->cl_id + 1663 ethtool_rxfh_indir_default(i, num_eth_queues);
1560 ethtool_rxfh_indir_default(i, num_eth_queues);
1561 }
1562 1664
1563 /* 1665 /*
1564 * For 57710 and 57711 SEARCHER configuration (rss_keys) is 1666 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
@@ -1568,11 +1670,12 @@ static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1568 * For 57712 and newer on the other hand it's a per-function 1670 * For 57712 and newer on the other hand it's a per-function
1569 * configuration. 1671 * configuration.
1570 */ 1672 */
1571 return bnx2x_config_rss_pf(bp, ind_table, 1673 return bnx2x_config_rss_eth(bp, ind_table,
1572 bp->port.pmf || !CHIP_IS_E1x(bp)); 1674 bp->port.pmf || !CHIP_IS_E1x(bp));
1573} 1675}
1574 1676
1575int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash) 1677int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
1678 u8 *ind_table, bool config_hash)
1576{ 1679{
1577 struct bnx2x_config_rss_params params = {NULL}; 1680 struct bnx2x_config_rss_params params = {NULL};
1578 int i; 1681 int i;
@@ -1584,58 +1687,35 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
1584 * bp->multi_mode = ETH_RSS_MODE_DISABLED; 1687 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1585 */ 1688 */
1586 1689
1587 params.rss_obj = &bp->rss_conf_obj; 1690 params.rss_obj = rss_obj;
1588 1691
1589 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags); 1692 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1590 1693
1591 /* RSS mode */ 1694 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1592 switch (bp->multi_mode) {
1593 case ETH_RSS_MODE_DISABLED:
1594 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
1595 break;
1596 case ETH_RSS_MODE_REGULAR:
1597 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1598 break;
1599 case ETH_RSS_MODE_VLAN_PRI:
1600 __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
1601 break;
1602 case ETH_RSS_MODE_E1HOV_PRI:
1603 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
1604 break;
1605 case ETH_RSS_MODE_IP_DSCP:
1606 __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
1607 break;
1608 default:
1609 BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
1610 return -EINVAL;
1611 }
1612 1695
1613 /* If RSS is enabled */ 1696 /* RSS configuration */
1614 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) { 1697 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1615 /* RSS configuration */ 1698 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1616 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags); 1699 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1617 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags); 1700 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1618 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1619 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1620 1701
1621 /* Hash bits */ 1702 /* Hash bits */
1622 params.rss_result_mask = MULTI_MASK; 1703 params.rss_result_mask = MULTI_MASK;
1623 1704
1624 memcpy(params.ind_table, ind_table, sizeof(params.ind_table)); 1705 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1625 1706
1626 if (config_hash) { 1707 if (config_hash) {
1627 /* RSS keys */ 1708 /* RSS keys */
1628 for (i = 0; i < sizeof(params.rss_key) / 4; i++) 1709 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1629 params.rss_key[i] = random32(); 1710 params.rss_key[i] = random32();
1630 1711
1631 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags); 1712 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1632 }
1633 } 1713 }
1634 1714
1635 return bnx2x_config_rss(bp, &params); 1715 return bnx2x_config_rss(bp, &params);
1636} 1716}
1637 1717
1638static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code) 1718static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1639{ 1719{
1640 struct bnx2x_func_state_params func_params = {NULL}; 1720 struct bnx2x_func_state_params func_params = {NULL};
1641 1721
@@ -1744,6 +1824,87 @@ bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err)
1744 return true; 1824 return true;
1745} 1825}
1746 1826
1827/**
1828 * bnx2x_bz_fp - zero content of the fastpath structure.
1829 *
1830 * @bp: driver handle
1831 * @index: fastpath index to be zeroed
1832 *
1833 * Makes sure the contents of the bp->fp[index].napi is kept
1834 * intact.
1835 */
1836static void bnx2x_bz_fp(struct bnx2x *bp, int index)
1837{
1838 struct bnx2x_fastpath *fp = &bp->fp[index];
1839 struct napi_struct orig_napi = fp->napi;
1840 /* bzero bnx2x_fastpath contents */
1841 if (bp->stats_init)
1842 memset(fp, 0, sizeof(*fp));
1843 else {
1844 /* Keep Queue statistics */
1845 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
1846 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
1847
1848 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
1849 GFP_KERNEL);
1850 if (tmp_eth_q_stats)
1851 memcpy(tmp_eth_q_stats, &fp->eth_q_stats,
1852 sizeof(struct bnx2x_eth_q_stats));
1853
1854 tmp_eth_q_stats_old =
1855 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
1856 GFP_KERNEL);
1857 if (tmp_eth_q_stats_old)
1858 memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old,
1859 sizeof(struct bnx2x_eth_q_stats_old));
1860
1861 memset(fp, 0, sizeof(*fp));
1862
1863 if (tmp_eth_q_stats) {
1864 memcpy(&fp->eth_q_stats, tmp_eth_q_stats,
1865 sizeof(struct bnx2x_eth_q_stats));
1866 kfree(tmp_eth_q_stats);
1867 }
1868
1869 if (tmp_eth_q_stats_old) {
1870 memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old,
1871 sizeof(struct bnx2x_eth_q_stats_old));
1872 kfree(tmp_eth_q_stats_old);
1873 }
1874
1875 }
1876
1877 /* Restore the NAPI object as it has been already initialized */
1878 fp->napi = orig_napi;
1879
1880 fp->bp = bp;
1881 fp->index = index;
1882 if (IS_ETH_FP(fp))
1883 fp->max_cos = bp->max_cos;
1884 else
1885 /* Special queues support only one CoS */
1886 fp->max_cos = 1;
1887
1888 /*
1889 * set the tpa flag for each queue. The tpa flag determines the queue
1890 * minimal size so it must be set prior to queue memory allocation
1891 */
1892 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
1893 (bp->flags & GRO_ENABLE_FLAG &&
1894 bnx2x_mtu_allows_gro(bp->dev->mtu)));
1895 if (bp->flags & TPA_ENABLE_FLAG)
1896 fp->mode = TPA_MODE_LRO;
1897 else if (bp->flags & GRO_ENABLE_FLAG)
1898 fp->mode = TPA_MODE_GRO;
1899
1900#ifdef BCM_CNIC
1901 /* We don't want TPA on an FCoE L2 ring */
1902 if (IS_FCOE_FP(fp))
1903 fp->disable_tpa = 1;
1904#endif
1905}
1906
1907
1747/* must be called with rtnl_lock */ 1908/* must be called with rtnl_lock */
1748int bnx2x_nic_load(struct bnx2x *bp, int load_mode) 1909int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1749{ 1910{
@@ -1911,8 +2072,14 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1911 SHMEM2_WR(bp, dcc_support, 2072 SHMEM2_WR(bp, dcc_support,
1912 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV | 2073 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1913 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV)); 2074 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2075 if (SHMEM2_HAS(bp, afex_driver_support))
2076 SHMEM2_WR(bp, afex_driver_support,
2077 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
1914 } 2078 }
1915 2079
2080 /* Set AFEX default VLAN tag to an invalid value */
2081 bp->afex_def_vlan_tag = -1;
2082
1916 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; 2083 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1917 rc = bnx2x_func_start(bp); 2084 rc = bnx2x_func_start(bp);
1918 if (rc) { 2085 if (rc) {
@@ -2968,6 +3135,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2968 3135
2969 netdev_tx_sent_queue(txq, skb->len); 3136 netdev_tx_sent_queue(txq, skb->len);
2970 3137
3138 skb_tx_timestamp(skb);
3139
2971 txdata->tx_pkt_prod++; 3140 txdata->tx_pkt_prod++;
2972 /* 3141 /*
2973 * Make sure that the BD data is updated before updating the producer 3142 * Make sure that the BD data is updated before updating the producer
@@ -3084,7 +3253,8 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
3084 } 3253 }
3085 3254
3086#ifdef BCM_CNIC 3255#ifdef BCM_CNIC
3087 if (IS_MF_STORAGE_SD(bp) && !is_zero_ether_addr(addr->sa_data)) { 3256 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
3257 !is_zero_ether_addr(addr->sa_data)) {
3088 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n"); 3258 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
3089 return -EINVAL; 3259 return -EINVAL;
3090 } 3260 }
@@ -3181,7 +3351,7 @@ void bnx2x_free_fp_mem(struct bnx2x *bp)
3181 bnx2x_free_fp_mem_at(bp, i); 3351 bnx2x_free_fp_mem_at(bp, i);
3182} 3352}
3183 3353
3184static inline void set_sb_shortcuts(struct bnx2x *bp, int index) 3354static void set_sb_shortcuts(struct bnx2x *bp, int index)
3185{ 3355{
3186 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk); 3356 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
3187 if (!CHIP_IS_E1x(bp)) { 3357 if (!CHIP_IS_E1x(bp)) {
@@ -3197,6 +3367,63 @@ static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
3197 } 3367 }
3198} 3368}
3199 3369
3370/* Returns the number of actually allocated BDs */
3371static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
3372 int rx_ring_size)
3373{
3374 struct bnx2x *bp = fp->bp;
3375 u16 ring_prod, cqe_ring_prod;
3376 int i, failure_cnt = 0;
3377
3378 fp->rx_comp_cons = 0;
3379 cqe_ring_prod = ring_prod = 0;
3380
3381 /* This routine is called only during fo init so
3382 * fp->eth_q_stats.rx_skb_alloc_failed = 0
3383 */
3384 for (i = 0; i < rx_ring_size; i++) {
3385 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
3386 failure_cnt++;
3387 continue;
3388 }
3389 ring_prod = NEXT_RX_IDX(ring_prod);
3390 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
3391 WARN_ON(ring_prod <= (i - failure_cnt));
3392 }
3393
3394 if (failure_cnt)
3395 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
3396 i - failure_cnt, fp->index);
3397
3398 fp->rx_bd_prod = ring_prod;
3399 /* Limit the CQE producer by the CQE ring size */
3400 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
3401 cqe_ring_prod);
3402 fp->rx_pkt = fp->rx_calls = 0;
3403
3404 fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
3405
3406 return i - failure_cnt;
3407}
3408
3409static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
3410{
3411 int i;
3412
3413 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3414 struct eth_rx_cqe_next_page *nextpg;
3415
3416 nextpg = (struct eth_rx_cqe_next_page *)
3417 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3418 nextpg->addr_hi =
3419 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3420 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3421 nextpg->addr_lo =
3422 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3423 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3424 }
3425}
3426
3200static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) 3427static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3201{ 3428{
3202 union host_hc_status_block *sb; 3429 union host_hc_status_block *sb;
@@ -3206,7 +3433,8 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3206 int rx_ring_size = 0; 3433 int rx_ring_size = 0;
3207 3434
3208#ifdef BCM_CNIC 3435#ifdef BCM_CNIC
3209 if (!bp->rx_ring_size && IS_MF_STORAGE_SD(bp)) { 3436 if (!bp->rx_ring_size &&
3437 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
3210 rx_ring_size = MIN_RX_SIZE_NONTPA; 3438 rx_ring_size = MIN_RX_SIZE_NONTPA;
3211 bp->rx_ring_size = rx_ring_size; 3439 bp->rx_ring_size = rx_ring_size;
3212 } else 3440 } else
@@ -3528,8 +3756,6 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3528 */ 3756 */
3529 dev->mtu = new_mtu; 3757 dev->mtu = new_mtu;
3530 3758
3531 bp->gro_check = bnx2x_need_gro_check(new_mtu);
3532
3533 return bnx2x_reload_if_running(dev); 3759 return bnx2x_reload_if_running(dev);
3534} 3760}
3535 3761
@@ -3687,9 +3913,9 @@ void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3687 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE); 3913 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3688} 3914}
3689 3915
3690static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port, 3916static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3691 u8 fw_sb_id, u8 sb_index, 3917 u8 fw_sb_id, u8 sb_index,
3692 u8 ticks) 3918 u8 ticks)
3693{ 3919{
3694 3920
3695 u32 addr = BAR_CSTRORM_INTMEM + 3921 u32 addr = BAR_CSTRORM_INTMEM +
@@ -3700,9 +3926,9 @@ static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3700 port, fw_sb_id, sb_index, ticks); 3926 port, fw_sb_id, sb_index, ticks);
3701} 3927}
3702 3928
3703static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port, 3929static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3704 u16 fw_sb_id, u8 sb_index, 3930 u16 fw_sb_id, u8 sb_index,
3705 u8 disable) 3931 u8 disable)
3706{ 3932{
3707 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT); 3933 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3708 u32 addr = BAR_CSTRORM_INTMEM + 3934 u32 addr = BAR_CSTRORM_INTMEM +
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 5c27454d2ec2..7cd99b75347a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -86,13 +86,15 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
86void bnx2x_send_unload_done(struct bnx2x *bp); 86void bnx2x_send_unload_done(struct bnx2x *bp);
87 87
88/** 88/**
89 * bnx2x_config_rss_pf - configure RSS parameters. 89 * bnx2x_config_rss_pf - configure RSS parameters in a PF.
90 * 90 *
91 * @bp: driver handle 91 * @bp: driver handle
92 * @rss_obj RSS object to use
92 * @ind_table: indirection table to configure 93 * @ind_table: indirection table to configure
93 * @config_hash: re-configure RSS hash keys configuration 94 * @config_hash: re-configure RSS hash keys configuration
94 */ 95 */
95int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash); 96int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
97 u8 *ind_table, bool config_hash);
96 98
97/** 99/**
98 * bnx2x__init_func_obj - init function object 100 * bnx2x__init_func_obj - init function object
@@ -485,7 +487,7 @@ void bnx2x_netif_start(struct bnx2x *bp);
485 * fills msix_table, requests vectors, updates num_queues 487 * fills msix_table, requests vectors, updates num_queues
486 * according to number of available vectors. 488 * according to number of available vectors.
487 */ 489 */
488int bnx2x_enable_msix(struct bnx2x *bp); 490int __devinit bnx2x_enable_msix(struct bnx2x *bp);
489 491
490/** 492/**
491 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly 493 * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
@@ -610,53 +612,6 @@ static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
610 barrier(); 612 barrier();
611} 613}
612 614
613static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
614 u8 idu_sb_id, bool is_Pf)
615{
616 u32 data, ctl, cnt = 100;
617 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
618 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
619 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
620 u32 sb_bit = 1 << (idu_sb_id%32);
621 u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
622 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
623
624 /* Not supported in BC mode */
625 if (CHIP_INT_MODE_IS_BC(bp))
626 return;
627
628 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
629 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
630 IGU_REGULAR_CLEANUP_SET |
631 IGU_REGULAR_BCLEANUP;
632
633 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
634 func_encode << IGU_CTRL_REG_FID_SHIFT |
635 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
636
637 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
638 data, igu_addr_data);
639 REG_WR(bp, igu_addr_data, data);
640 mmiowb();
641 barrier();
642 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
643 ctl, igu_addr_ctl);
644 REG_WR(bp, igu_addr_ctl, ctl);
645 mmiowb();
646 barrier();
647
648 /* wait for clean up to finish */
649 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
650 msleep(20);
651
652
653 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
654 DP(NETIF_MSG_HW,
655 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
656 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
657 }
658}
659
660static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id, 615static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
661 u8 storm, u16 index, u8 op, u8 update) 616 u8 storm, u16 index, u8 op, u8 update)
662{ 617{
@@ -843,7 +798,7 @@ static inline void bnx2x_disable_msi(struct bnx2x *bp)
843{ 798{
844 if (bp->flags & USING_MSIX_FLAG) { 799 if (bp->flags & USING_MSIX_FLAG) {
845 pci_disable_msix(bp->pdev); 800 pci_disable_msix(bp->pdev);
846 bp->flags &= ~USING_MSIX_FLAG; 801 bp->flags &= ~(USING_MSIX_FLAG | USING_SINGLE_MSIX_FLAG);
847 } else if (bp->flags & USING_MSI_FLAG) { 802 } else if (bp->flags & USING_MSI_FLAG) {
848 pci_disable_msi(bp->pdev); 803 pci_disable_msi(bp->pdev);
849 bp->flags &= ~USING_MSI_FLAG; 804 bp->flags &= ~USING_MSI_FLAG;
@@ -883,66 +838,6 @@ static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
883 bnx2x_clear_sge_mask_next_elems(fp); 838 bnx2x_clear_sge_mask_next_elems(fp);
884} 839}
885 840
886static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
887 struct bnx2x_fastpath *fp, u16 index)
888{
889 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
890 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
891 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
892 dma_addr_t mapping;
893
894 if (unlikely(page == NULL)) {
895 BNX2X_ERR("Can't alloc sge\n");
896 return -ENOMEM;
897 }
898
899 mapping = dma_map_page(&bp->pdev->dev, page, 0,
900 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
901 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
902 __free_pages(page, PAGES_PER_SGE_SHIFT);
903 BNX2X_ERR("Can't map sge\n");
904 return -ENOMEM;
905 }
906
907 sw_buf->page = page;
908 dma_unmap_addr_set(sw_buf, mapping, mapping);
909
910 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
911 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
912
913 return 0;
914}
915
916static inline int bnx2x_alloc_rx_data(struct bnx2x *bp,
917 struct bnx2x_fastpath *fp, u16 index)
918{
919 u8 *data;
920 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
921 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
922 dma_addr_t mapping;
923
924 data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
925 if (unlikely(data == NULL))
926 return -ENOMEM;
927
928 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
929 fp->rx_buf_size,
930 DMA_FROM_DEVICE);
931 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
932 kfree(data);
933 BNX2X_ERR("Can't map rx data\n");
934 return -ENOMEM;
935 }
936
937 rx_buf->data = data;
938 dma_unmap_addr_set(rx_buf, mapping, mapping);
939
940 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
941 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
942
943 return 0;
944}
945
946/* note that we are not allocating a new buffer, 841/* note that we are not allocating a new buffer,
947 * we are just moving one from cons to prod 842 * we are just moving one from cons to prod
948 * we are not creating a new mapping, 843 * we are not creating a new mapping,
@@ -964,6 +859,19 @@ static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp,
964 859
965/************************* Init ******************************************/ 860/************************* Init ******************************************/
966 861
862/* returns func by VN for current port */
863static inline int func_by_vn(struct bnx2x *bp, int vn)
864{
865 return 2 * vn + BP_PORT(bp);
866}
867
868static inline int bnx2x_config_rss_eth(struct bnx2x *bp, u8 *ind_table,
869 bool config_hash)
870{
871 return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, ind_table,
872 config_hash);
873}
874
967/** 875/**
968 * bnx2x_func_start - init function 876 * bnx2x_func_start - init function
969 * 877 *
@@ -1027,66 +935,6 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1027 bnx2x_free_rx_sge(bp, fp, i); 935 bnx2x_free_rx_sge(bp, fp, i);
1028} 936}
1029 937
1030static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
1031 struct bnx2x_fastpath *fp, int last)
1032{
1033 int i;
1034
1035 for (i = 0; i < last; i++) {
1036 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1037 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1038 u8 *data = first_buf->data;
1039
1040 if (data == NULL) {
1041 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1042 continue;
1043 }
1044 if (tpa_info->tpa_state == BNX2X_TPA_START)
1045 dma_unmap_single(&bp->pdev->dev,
1046 dma_unmap_addr(first_buf, mapping),
1047 fp->rx_buf_size, DMA_FROM_DEVICE);
1048 kfree(data);
1049 first_buf->data = NULL;
1050 }
1051}
1052
1053static inline void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
1054{
1055 int i;
1056
1057 for (i = 1; i <= NUM_TX_RINGS; i++) {
1058 struct eth_tx_next_bd *tx_next_bd =
1059 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
1060
1061 tx_next_bd->addr_hi =
1062 cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
1063 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
1064 tx_next_bd->addr_lo =
1065 cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
1066 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
1067 }
1068
1069 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
1070 txdata->tx_db.data.zero_fill1 = 0;
1071 txdata->tx_db.data.prod = 0;
1072
1073 txdata->tx_pkt_prod = 0;
1074 txdata->tx_pkt_cons = 0;
1075 txdata->tx_bd_prod = 0;
1076 txdata->tx_bd_cons = 0;
1077 txdata->tx_pkt = 0;
1078}
1079
1080static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
1081{
1082 int i;
1083 u8 cos;
1084
1085 for_each_tx_queue(bp, i)
1086 for_each_cos_in_tx_queue(&bp->fp[i], cos)
1087 bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]);
1088}
1089
1090static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp) 938static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
1091{ 939{
1092 int i; 940 int i;
@@ -1104,80 +952,6 @@ static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
1104 } 952 }
1105} 953}
1106 954
1107static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1108{
1109 int i;
1110
1111 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1112 struct eth_rx_sge *sge;
1113
1114 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1115 sge->addr_hi =
1116 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1117 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1118
1119 sge->addr_lo =
1120 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1121 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1122 }
1123}
1124
1125static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
1126{
1127 int i;
1128 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
1129 struct eth_rx_cqe_next_page *nextpg;
1130
1131 nextpg = (struct eth_rx_cqe_next_page *)
1132 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
1133 nextpg->addr_hi =
1134 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
1135 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
1136 nextpg->addr_lo =
1137 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
1138 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
1139 }
1140}
1141
1142/* Returns the number of actually allocated BDs */
1143static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
1144 int rx_ring_size)
1145{
1146 struct bnx2x *bp = fp->bp;
1147 u16 ring_prod, cqe_ring_prod;
1148 int i, failure_cnt = 0;
1149
1150 fp->rx_comp_cons = 0;
1151 cqe_ring_prod = ring_prod = 0;
1152
1153 /* This routine is called only during fo init so
1154 * fp->eth_q_stats.rx_skb_alloc_failed = 0
1155 */
1156 for (i = 0; i < rx_ring_size; i++) {
1157 if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
1158 failure_cnt++;
1159 continue;
1160 }
1161 ring_prod = NEXT_RX_IDX(ring_prod);
1162 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
1163 WARN_ON(ring_prod <= (i - failure_cnt));
1164 }
1165
1166 if (failure_cnt)
1167 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
1168 i - failure_cnt, fp->index);
1169
1170 fp->rx_bd_prod = ring_prod;
1171 /* Limit the CQE producer by the CQE ring size */
1172 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
1173 cqe_ring_prod);
1174 fp->rx_pkt = fp->rx_calls = 0;
1175
1176 fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
1177
1178 return i - failure_cnt;
1179}
1180
1181/* Statistics ID are global per chip/path, while Client IDs for E1x are per 955/* Statistics ID are global per chip/path, while Client IDs for E1x are per
1182 * port. 956 * port.
1183 */ 957 */
@@ -1406,30 +1180,6 @@ static inline void __storm_memset_struct(struct bnx2x *bp,
1406 REG_WR(bp, addr + (i * 4), data[i]); 1180 REG_WR(bp, addr + (i * 4), data[i]);
1407} 1181}
1408 1182
1409static inline void storm_memset_func_cfg(struct bnx2x *bp,
1410 struct tstorm_eth_function_common_config *tcfg,
1411 u16 abs_fid)
1412{
1413 size_t size = sizeof(struct tstorm_eth_function_common_config);
1414
1415 u32 addr = BAR_TSTRORM_INTMEM +
1416 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
1417
1418 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
1419}
1420
1421static inline void storm_memset_cmng(struct bnx2x *bp,
1422 struct cmng_struct_per_port *cmng,
1423 u8 port)
1424{
1425 size_t size = sizeof(struct cmng_struct_per_port);
1426
1427 u32 addr = BAR_XSTRORM_INTMEM +
1428 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
1429
1430 __storm_memset_struct(bp, addr, size, (u32 *)cmng);
1431}
1432
1433/** 1183/**
1434 * bnx2x_wait_sp_comp - wait for the outstanding SP commands. 1184 * bnx2x_wait_sp_comp - wait for the outstanding SP commands.
1435 * 1185 *
@@ -1512,93 +1262,6 @@ static inline bool bnx2x_mtu_allows_gro(int mtu)
1512 */ 1262 */
1513 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS; 1263 return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
1514} 1264}
1515
1516static inline bool bnx2x_need_gro_check(int mtu)
1517{
1518 return (SGE_PAGES / (mtu - ETH_MAX_TPA_HEADER_SIZE - 1)) !=
1519 (SGE_PAGES / (mtu - ETH_MIN_TPA_HEADER_SIZE + 1));
1520}
1521
1522/**
1523 * bnx2x_bz_fp - zero content of the fastpath structure.
1524 *
1525 * @bp: driver handle
1526 * @index: fastpath index to be zeroed
1527 *
1528 * Makes sure the contents of the bp->fp[index].napi is kept
1529 * intact.
1530 */
1531static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
1532{
1533 struct bnx2x_fastpath *fp = &bp->fp[index];
1534 struct napi_struct orig_napi = fp->napi;
1535 /* bzero bnx2x_fastpath contents */
1536 if (bp->stats_init)
1537 memset(fp, 0, sizeof(*fp));
1538 else {
1539 /* Keep Queue statistics */
1540 struct bnx2x_eth_q_stats *tmp_eth_q_stats;
1541 struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
1542
1543 tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
1544 GFP_KERNEL);
1545 if (tmp_eth_q_stats)
1546 memcpy(tmp_eth_q_stats, &fp->eth_q_stats,
1547 sizeof(struct bnx2x_eth_q_stats));
1548
1549 tmp_eth_q_stats_old =
1550 kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
1551 GFP_KERNEL);
1552 if (tmp_eth_q_stats_old)
1553 memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old,
1554 sizeof(struct bnx2x_eth_q_stats_old));
1555
1556 memset(fp, 0, sizeof(*fp));
1557
1558 if (tmp_eth_q_stats) {
1559 memcpy(&fp->eth_q_stats, tmp_eth_q_stats,
1560 sizeof(struct bnx2x_eth_q_stats));
1561 kfree(tmp_eth_q_stats);
1562 }
1563
1564 if (tmp_eth_q_stats_old) {
1565 memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old,
1566 sizeof(struct bnx2x_eth_q_stats_old));
1567 kfree(tmp_eth_q_stats_old);
1568 }
1569
1570 }
1571
1572 /* Restore the NAPI object as it has been already initialized */
1573 fp->napi = orig_napi;
1574
1575 fp->bp = bp;
1576 fp->index = index;
1577 if (IS_ETH_FP(fp))
1578 fp->max_cos = bp->max_cos;
1579 else
1580 /* Special queues support only one CoS */
1581 fp->max_cos = 1;
1582
1583 /*
1584 * set the tpa flag for each queue. The tpa flag determines the queue
1585 * minimal size so it must be set prior to queue memory allocation
1586 */
1587 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
1588 (bp->flags & GRO_ENABLE_FLAG &&
1589 bnx2x_mtu_allows_gro(bp->dev->mtu)));
1590 if (bp->flags & TPA_ENABLE_FLAG)
1591 fp->mode = TPA_MODE_LRO;
1592 else if (bp->flags & GRO_ENABLE_FLAG)
1593 fp->mode = TPA_MODE_GRO;
1594
1595#ifdef BCM_CNIC
1596 /* We don't want TPA on an FCoE L2 ring */
1597 if (IS_FCOE_FP(fp))
1598 fp->disable_tpa = 1;
1599#endif
1600}
1601
1602#ifdef BCM_CNIC 1265#ifdef BCM_CNIC
1603/** 1266/**
1604 * bnx2x_get_iscsi_info - update iSCSI params according to licensing info. 1267 * bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
@@ -1608,11 +1271,6 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
1608 */ 1271 */
1609void bnx2x_get_iscsi_info(struct bnx2x *bp); 1272void bnx2x_get_iscsi_info(struct bnx2x *bp);
1610#endif 1273#endif
1611/* returns func by VN for current port */
1612static inline int func_by_vn(struct bnx2x *bp, int vn)
1613{
1614 return 2 * vn + BP_PORT(bp);
1615}
1616 1274
1617/** 1275/**
1618 * bnx2x_link_sync_notify - send notification to other functions. 1276 * bnx2x_link_sync_notify - send notification to other functions.
@@ -1667,7 +1325,8 @@ static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
1667 if (is_valid_ether_addr(addr)) 1325 if (is_valid_ether_addr(addr))
1668 return true; 1326 return true;
1669#ifdef BCM_CNIC 1327#ifdef BCM_CNIC
1670 if (is_zero_ether_addr(addr) && IS_MF_STORAGE_SD(bp)) 1328 if (is_zero_ether_addr(addr) &&
1329 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)))
1671 return true; 1330 return true;
1672#endif 1331#endif
1673 return false; 1332 return false;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 2cc0a1703970..ddc18ee5c5ae 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -22,13 +22,10 @@
22#include <linux/types.h> 22#include <linux/types.h>
23#include <linux/sched.h> 23#include <linux/sched.h>
24#include <linux/crc32.h> 24#include <linux/crc32.h>
25
26
27#include "bnx2x.h" 25#include "bnx2x.h"
28#include "bnx2x_cmn.h" 26#include "bnx2x_cmn.h"
29#include "bnx2x_dump.h" 27#include "bnx2x_dump.h"
30#include "bnx2x_init.h" 28#include "bnx2x_init.h"
31#include "bnx2x_sp.h"
32 29
33/* Note: in the format strings below %s is replaced by the queue-name which is 30/* Note: in the format strings below %s is replaced by the queue-name which is
34 * either its index or 'fcoe' for the fcoe queue. Make sure the format string 31 * either its index or 'fcoe' for the fcoe queue. Make sure the format string
@@ -595,8 +592,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
595#define IS_E3_ONLINE(info) (((info) & RI_E3_ONLINE) == RI_E3_ONLINE) 592#define IS_E3_ONLINE(info) (((info) & RI_E3_ONLINE) == RI_E3_ONLINE)
596#define IS_E3B0_ONLINE(info) (((info) & RI_E3B0_ONLINE) == RI_E3B0_ONLINE) 593#define IS_E3B0_ONLINE(info) (((info) & RI_E3B0_ONLINE) == RI_E3B0_ONLINE)
597 594
598static inline bool bnx2x_is_reg_online(struct bnx2x *bp, 595static bool bnx2x_is_reg_online(struct bnx2x *bp,
599 const struct reg_addr *reg_info) 596 const struct reg_addr *reg_info)
600{ 597{
601 if (CHIP_IS_E1(bp)) 598 if (CHIP_IS_E1(bp))
602 return IS_E1_ONLINE(reg_info->info); 599 return IS_E1_ONLINE(reg_info->info);
@@ -613,7 +610,7 @@ static inline bool bnx2x_is_reg_online(struct bnx2x *bp,
613} 610}
614 611
615/******* Paged registers info selectors ********/ 612/******* Paged registers info selectors ********/
616static inline const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp) 613static const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
617{ 614{
618 if (CHIP_IS_E2(bp)) 615 if (CHIP_IS_E2(bp))
619 return page_vals_e2; 616 return page_vals_e2;
@@ -623,7 +620,7 @@ static inline const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
623 return NULL; 620 return NULL;
624} 621}
625 622
626static inline u32 __bnx2x_get_page_reg_num(struct bnx2x *bp) 623static u32 __bnx2x_get_page_reg_num(struct bnx2x *bp)
627{ 624{
628 if (CHIP_IS_E2(bp)) 625 if (CHIP_IS_E2(bp))
629 return PAGE_MODE_VALUES_E2; 626 return PAGE_MODE_VALUES_E2;
@@ -633,7 +630,7 @@ static inline u32 __bnx2x_get_page_reg_num(struct bnx2x *bp)
633 return 0; 630 return 0;
634} 631}
635 632
636static inline const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp) 633static const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp)
637{ 634{
638 if (CHIP_IS_E2(bp)) 635 if (CHIP_IS_E2(bp))
639 return page_write_regs_e2; 636 return page_write_regs_e2;
@@ -643,7 +640,7 @@ static inline const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp)
643 return NULL; 640 return NULL;
644} 641}
645 642
646static inline u32 __bnx2x_get_page_write_num(struct bnx2x *bp) 643static u32 __bnx2x_get_page_write_num(struct bnx2x *bp)
647{ 644{
648 if (CHIP_IS_E2(bp)) 645 if (CHIP_IS_E2(bp))
649 return PAGE_WRITE_REGS_E2; 646 return PAGE_WRITE_REGS_E2;
@@ -653,7 +650,7 @@ static inline u32 __bnx2x_get_page_write_num(struct bnx2x *bp)
653 return 0; 650 return 0;
654} 651}
655 652
656static inline const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp) 653static const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp)
657{ 654{
658 if (CHIP_IS_E2(bp)) 655 if (CHIP_IS_E2(bp))
659 return page_read_regs_e2; 656 return page_read_regs_e2;
@@ -663,7 +660,7 @@ static inline const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp)
663 return NULL; 660 return NULL;
664} 661}
665 662
666static inline u32 __bnx2x_get_page_read_num(struct bnx2x *bp) 663static u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
667{ 664{
668 if (CHIP_IS_E2(bp)) 665 if (CHIP_IS_E2(bp))
669 return PAGE_READ_REGS_E2; 666 return PAGE_READ_REGS_E2;
@@ -673,7 +670,7 @@ static inline u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
673 return 0; 670 return 0;
674} 671}
675 672
676static inline int __bnx2x_get_regs_len(struct bnx2x *bp) 673static int __bnx2x_get_regs_len(struct bnx2x *bp)
677{ 674{
678 int num_pages = __bnx2x_get_page_reg_num(bp); 675 int num_pages = __bnx2x_get_page_reg_num(bp);
679 int page_write_num = __bnx2x_get_page_write_num(bp); 676 int page_write_num = __bnx2x_get_page_write_num(bp);
@@ -718,7 +715,7 @@ static int bnx2x_get_regs_len(struct net_device *dev)
718 * ("read address"). There may be more than one write address per "page" and 715 * ("read address"). There may be more than one write address per "page" and
719 * more than one read address per write address. 716 * more than one read address per write address.
720 */ 717 */
721static inline void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p) 718static void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p)
722{ 719{
723 u32 i, j, k, n; 720 u32 i, j, k, n;
724 /* addresses of the paged registers */ 721 /* addresses of the paged registers */
@@ -747,7 +744,7 @@ static inline void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p)
747 } 744 }
748} 745}
749 746
750static inline void __bnx2x_get_regs(struct bnx2x *bp, u32 *p) 747static void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
751{ 748{
752 u32 i, j; 749 u32 i, j;
753 750
@@ -1433,7 +1430,7 @@ static void bnx2x_get_ringparam(struct net_device *dev,
1433 else 1430 else
1434 ering->rx_pending = MAX_RX_AVAIL; 1431 ering->rx_pending = MAX_RX_AVAIL;
1435 1432
1436 ering->tx_max_pending = MAX_TX_AVAIL; 1433 ering->tx_max_pending = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
1437 ering->tx_pending = bp->tx_ring_size; 1434 ering->tx_pending = bp->tx_ring_size;
1438} 1435}
1439 1436
@@ -1451,7 +1448,7 @@ static int bnx2x_set_ringparam(struct net_device *dev,
1451 if ((ering->rx_pending > MAX_RX_AVAIL) || 1448 if ((ering->rx_pending > MAX_RX_AVAIL) ||
1452 (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA : 1449 (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
1453 MIN_RX_SIZE_TPA)) || 1450 MIN_RX_SIZE_TPA)) ||
1454 (ering->tx_pending > MAX_TX_AVAIL) || 1451 (ering->tx_pending > (IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL)) ||
1455 (ering->tx_pending <= MAX_SKB_FRAGS + 4)) { 1452 (ering->tx_pending <= MAX_SKB_FRAGS + 4)) {
1456 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); 1453 DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
1457 return -EINVAL; 1454 return -EINVAL;
@@ -2212,7 +2209,7 @@ static void bnx2x_self_test(struct net_device *dev,
2212/* ethtool statistics are displayed for all regular ethernet queues and the 2209/* ethtool statistics are displayed for all regular ethernet queues and the
2213 * fcoe L2 queue if not disabled 2210 * fcoe L2 queue if not disabled
2214 */ 2211 */
2215static inline int bnx2x_num_stat_queues(struct bnx2x *bp) 2212static int bnx2x_num_stat_queues(struct bnx2x *bp)
2216{ 2213{
2217 return BNX2X_NUM_ETH_QUEUES(bp); 2214 return BNX2X_NUM_ETH_QUEUES(bp);
2218} 2215}
@@ -2396,10 +2393,7 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
2396 2393
2397static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev) 2394static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
2398{ 2395{
2399 struct bnx2x *bp = netdev_priv(dev); 2396 return T_ETH_INDIRECTION_TABLE_SIZE;
2400
2401 return (bp->multi_mode == ETH_RSS_MODE_DISABLED ?
2402 0 : T_ETH_INDIRECTION_TABLE_SIZE);
2403} 2397}
2404 2398
2405static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir) 2399static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
@@ -2445,7 +2439,7 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
2445 ind_table[i] = indir[i] + bp->fp->cl_id; 2439 ind_table[i] = indir[i] + bp->fp->cl_id;
2446 } 2440 }
2447 2441
2448 return bnx2x_config_rss_pf(bp, ind_table, false); 2442 return bnx2x_config_rss_eth(bp, ind_table, false);
2449} 2443}
2450 2444
2451static const struct ethtool_ops bnx2x_ethtool_ops = { 2445static const struct ethtool_ops bnx2x_ethtool_ops = {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index b9b263323436..426f77aa721a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -387,7 +387,7 @@
387 387
388#define STATS_QUERY_CMD_COUNT 16 388#define STATS_QUERY_CMD_COUNT 16
389 389
390#define NIV_LIST_TABLE_SIZE 4096 390#define AFEX_LIST_TABLE_SIZE 4096
391 391
392#define INVALID_VNIC_ID 0xFF 392#define INVALID_VNIC_ID 0xFF
393 393
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index dbff5915b81a..a440a8ba85f2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -833,6 +833,7 @@ struct shared_feat_cfg { /* NVRAM Offset */
833 #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100 833 #define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100
834 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200 834 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200
835 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300 835 #define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300
836 #define SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE 0x00000400
836 837
837 /* The interval in seconds between sending LLDP packets. Set to zero 838 /* The interval in seconds between sending LLDP packets. Set to zero
838 to disable the feature */ 839 to disable the feature */
@@ -1235,6 +1236,8 @@ struct drv_func_mb {
1235 #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006 1236 #define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL 0x00050006
1236 #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000 1237 #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
1237 #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234 1238 #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
1239 #define DRV_MSG_CODE_VRFY_AFEX_SUPPORTED 0xa2000000
1240 #define REQ_BC_VER_4_VRFY_AFEX_SUPPORTED 0x00070002
1238 #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014 1241 #define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014
1239 #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201 1242 #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201
1240 1243
@@ -1242,6 +1245,13 @@ struct drv_func_mb {
1242 #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000 1245 #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
1243 1246
1244 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 1247 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
1248
1249 #define DRV_MSG_CODE_AFEX_DRIVER_SETMAC 0xd0000000
1250 #define DRV_MSG_CODE_AFEX_LISTGET_ACK 0xd1000000
1251 #define DRV_MSG_CODE_AFEX_LISTSET_ACK 0xd2000000
1252 #define DRV_MSG_CODE_AFEX_STATSGET_ACK 0xd3000000
1253 #define DRV_MSG_CODE_AFEX_VIFSET_ACK 0xd4000000
1254
1245 #define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000 1255 #define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000
1246 #define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000 1256 #define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000
1247 1257
@@ -1299,6 +1309,14 @@ struct drv_func_mb {
1299 #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000 1309 #define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000
1300 #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000 1310 #define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000
1301 #define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000 1311 #define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
1312 #define FW_MSG_CODE_HW_SET_INVALID_IMAGE 0xb0100000
1313
1314 #define FW_MSG_CODE_AFEX_DRIVER_SETMAC_DONE 0xd0100000
1315 #define FW_MSG_CODE_AFEX_LISTGET_ACK 0xd1100000
1316 #define FW_MSG_CODE_AFEX_LISTSET_ACK 0xd2100000
1317 #define FW_MSG_CODE_AFEX_STATSGET_ACK 0xd3100000
1318 #define FW_MSG_CODE_AFEX_VIFSET_ACK 0xd4100000
1319
1302 #define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000 1320 #define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000
1303 #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000 1321 #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000
1304 1322
@@ -1357,6 +1375,12 @@ struct drv_func_mb {
1357 1375
1358 #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000 1376 #define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000
1359 #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000 1377 #define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000
1378 #define DRV_STATUS_AFEX_EVENT_MASK 0x03f00000
1379 #define DRV_STATUS_AFEX_LISTGET_REQ 0x00100000
1380 #define DRV_STATUS_AFEX_LISTSET_REQ 0x00200000
1381 #define DRV_STATUS_AFEX_STATSGET_REQ 0x00400000
1382 #define DRV_STATUS_AFEX_VIFSET_REQ 0x00800000
1383
1360 #define DRV_STATUS_DRV_INFO_REQ 0x04000000 1384 #define DRV_STATUS_DRV_INFO_REQ 0x04000000
1361 1385
1362 u32 virt_mac_upper; 1386 u32 virt_mac_upper;
@@ -1448,7 +1472,26 @@ struct func_mf_cfg {
1448 #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0 1472 #define FUNC_MF_CFG_E1HOV_TAG_SHIFT 0
1449 #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK 1473 #define FUNC_MF_CFG_E1HOV_TAG_DEFAULT FUNC_MF_CFG_E1HOV_TAG_MASK
1450 1474
1451 u32 reserved[2]; 1475 /* afex default VLAN ID - 12 bits */
1476 #define FUNC_MF_CFG_AFEX_VLAN_MASK 0x0fff0000
1477 #define FUNC_MF_CFG_AFEX_VLAN_SHIFT 16
1478
1479 u32 afex_config;
1480 #define FUNC_MF_CFG_AFEX_COS_FILTER_MASK 0x000000ff
1481 #define FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT 0
1482 #define FUNC_MF_CFG_AFEX_MBA_ENABLED_MASK 0x0000ff00
1483 #define FUNC_MF_CFG_AFEX_MBA_ENABLED_SHIFT 8
1484 #define FUNC_MF_CFG_AFEX_MBA_ENABLED_VAL 0x00000100
1485 #define FUNC_MF_CFG_AFEX_VLAN_MODE_MASK 0x000f0000
1486 #define FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT 16
1487
1488 u32 reserved;
1489};
1490
1491enum mf_cfg_afex_vlan_mode {
1492 FUNC_MF_CFG_AFEX_VLAN_TRUNK_MODE = 0,
1493 FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE,
1494 FUNC_MF_CFG_AFEX_VLAN_TRUNK_TAG_NATIVE_MODE
1452}; 1495};
1453 1496
1454/* This structure is not applicable and should not be accessed on 57711 */ 1497/* This structure is not applicable and should not be accessed on 57711 */
@@ -1945,18 +1988,29 @@ struct shmem2_region {
1945 1988
1946 u32 nvm_retain_bitmap_addr; /* 0x0070 */ 1989 u32 nvm_retain_bitmap_addr; /* 0x0070 */
1947 1990
1948 u32 reserved1; /* 0x0074 */ 1991 /* afex support of that driver */
1992 u32 afex_driver_support; /* 0x0074 */
1993 #define SHMEM_AFEX_VERSION_MASK 0x100f
1994 #define SHMEM_AFEX_SUPPORTED_VERSION_ONE 0x1001
1995 #define SHMEM_AFEX_REDUCED_DRV_LOADED 0x8000
1949 1996
1950 u32 reserved2[E2_FUNC_MAX]; 1997 /* driver receives addr in scratchpad to which it should respond */
1998 u32 afex_scratchpad_addr_to_write[E2_FUNC_MAX];
1951 1999
1952 u32 reserved3[E2_FUNC_MAX];/* 0x0088 */ 2000 /* generic params from MCP to driver (value depends on the msg sent
1953 u32 reserved4[E2_FUNC_MAX];/* 0x0098 */ 2001 * to driver
2002 */
2003 u32 afex_param1_to_driver[E2_FUNC_MAX]; /* 0x0088 */
2004 u32 afex_param2_to_driver[E2_FUNC_MAX]; /* 0x0098 */
1954 2005
1955 u32 swim_base_addr; /* 0x0108 */ 2006 u32 swim_base_addr; /* 0x0108 */
1956 u32 swim_funcs; 2007 u32 swim_funcs;
1957 u32 swim_main_cb; 2008 u32 swim_main_cb;
1958 2009
1959 u32 reserved5[2]; 2010 /* bitmap notifying which VIF profiles stored in nvram are enabled by
2011 * switch
2012 */
2013 u32 afex_profiles_enabled[2];
1960 2014
1961 /* generic flags controlled by the driver */ 2015 /* generic flags controlled by the driver */
1962 u32 drv_flags; 2016 u32 drv_flags;
@@ -2696,10 +2750,51 @@ union drv_info_to_mcp {
2696 struct fcoe_stats_info fcoe_stat; 2750 struct fcoe_stats_info fcoe_stat;
2697 struct iscsi_stats_info iscsi_stat; 2751 struct iscsi_stats_info iscsi_stat;
2698}; 2752};
2753
2754/* stats collected for afex.
2755 * NOTE: structure is exactly as expected to be received by the switch.
2756 * order must remain exactly as is unless protocol changes !
2757 */
2758struct afex_stats {
2759 u32 tx_unicast_frames_hi;
2760 u32 tx_unicast_frames_lo;
2761 u32 tx_unicast_bytes_hi;
2762 u32 tx_unicast_bytes_lo;
2763 u32 tx_multicast_frames_hi;
2764 u32 tx_multicast_frames_lo;
2765 u32 tx_multicast_bytes_hi;
2766 u32 tx_multicast_bytes_lo;
2767 u32 tx_broadcast_frames_hi;
2768 u32 tx_broadcast_frames_lo;
2769 u32 tx_broadcast_bytes_hi;
2770 u32 tx_broadcast_bytes_lo;
2771 u32 tx_frames_discarded_hi;
2772 u32 tx_frames_discarded_lo;
2773 u32 tx_frames_dropped_hi;
2774 u32 tx_frames_dropped_lo;
2775
2776 u32 rx_unicast_frames_hi;
2777 u32 rx_unicast_frames_lo;
2778 u32 rx_unicast_bytes_hi;
2779 u32 rx_unicast_bytes_lo;
2780 u32 rx_multicast_frames_hi;
2781 u32 rx_multicast_frames_lo;
2782 u32 rx_multicast_bytes_hi;
2783 u32 rx_multicast_bytes_lo;
2784 u32 rx_broadcast_frames_hi;
2785 u32 rx_broadcast_frames_lo;
2786 u32 rx_broadcast_bytes_hi;
2787 u32 rx_broadcast_bytes_lo;
2788 u32 rx_frames_discarded_hi;
2789 u32 rx_frames_discarded_lo;
2790 u32 rx_frames_dropped_hi;
2791 u32 rx_frames_dropped_lo;
2792};
2793
2699#define BCM_5710_FW_MAJOR_VERSION 7 2794#define BCM_5710_FW_MAJOR_VERSION 7
2700#define BCM_5710_FW_MINOR_VERSION 2 2795#define BCM_5710_FW_MINOR_VERSION 2
2701#define BCM_5710_FW_REVISION_VERSION 16 2796#define BCM_5710_FW_REVISION_VERSION 51
2702#define BCM_5710_FW_ENGINEERING_VERSION 0 2797#define BCM_5710_FW_ENGINEERING_VERSION 0
2703#define BCM_5710_FW_COMPILE_FLAGS 1 2798#define BCM_5710_FW_COMPILE_FLAGS 1
2704 2799
2705 2800
@@ -3389,7 +3484,7 @@ struct client_init_tx_data {
3389#define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4) 3484#define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4)
3390#define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4 3485#define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4
3391 u8 default_vlan_flg; 3486 u8 default_vlan_flg;
3392 u8 reserved2; 3487 u8 force_default_pri_flg;
3393 __le32 reserved3; 3488 __le32 reserved3;
3394}; 3489};
3395 3490
@@ -4375,8 +4470,21 @@ struct fcoe_statistics_params {
4375 4470
4376 4471
4377/* 4472/*
4473 * The data afex vif list ramrod need
4474 */
4475struct afex_vif_list_ramrod_data {
4476 u8 afex_vif_list_command;
4477 u8 func_bit_map;
4478 __le16 vif_list_index;
4479 u8 func_to_clear;
4480 u8 echo;
4481 __le16 reserved1;
4482};
4483
4484
4485/*
4378 * cfc delete event data 4486 * cfc delete event data
4379*/ 4487 */
4380struct cfc_del_event_data { 4488struct cfc_del_event_data {
4381 u32 cid; 4489 u32 cid;
4382 u32 reserved0; 4490 u32 reserved0;
@@ -4448,6 +4556,65 @@ struct cmng_struct_per_port {
4448 struct cmng_flags_per_port flags; 4556 struct cmng_flags_per_port flags;
4449}; 4557};
4450 4558
4559/*
4560 * a single rate shaping counter. can be used as protocol or vnic counter
4561 */
4562struct rate_shaping_counter {
4563 u32 quota;
4564#if defined(__BIG_ENDIAN)
4565 u16 __reserved0;
4566 u16 rate;
4567#elif defined(__LITTLE_ENDIAN)
4568 u16 rate;
4569 u16 __reserved0;
4570#endif
4571};
4572
4573/*
4574 * per-vnic rate shaping variables
4575 */
4576struct rate_shaping_vars_per_vn {
4577 struct rate_shaping_counter vn_counter;
4578};
4579
4580/*
4581 * per-vnic fairness variables
4582 */
4583struct fairness_vars_per_vn {
4584 u32 cos_credit_delta[MAX_COS_NUMBER];
4585 u32 vn_credit_delta;
4586 u32 __reserved0;
4587};
4588
4589/*
4590 * cmng port init state
4591 */
4592struct cmng_vnic {
4593 struct rate_shaping_vars_per_vn vnic_max_rate[4];
4594 struct fairness_vars_per_vn vnic_min_rate[4];
4595};
4596
4597/*
4598 * cmng port init state
4599 */
4600struct cmng_init {
4601 struct cmng_struct_per_port port;
4602 struct cmng_vnic vnic;
4603};
4604
4605
4606/*
4607 * driver parameters for congestion management init, all rates are in Mbps
4608 */
4609struct cmng_init_input {
4610 u32 port_rate;
4611 u16 vnic_min_rate[4];
4612 u16 vnic_max_rate[4];
4613 u16 cos_min_rate[MAX_COS_NUMBER];
4614 u16 cos_to_pause_mask[MAX_COS_NUMBER];
4615 struct cmng_flags_per_port flags;
4616};
4617
4451 4618
4452/* 4619/*
4453 * Protocol-common command ID for slow path elements 4620 * Protocol-common command ID for slow path elements
@@ -4462,7 +4629,7 @@ enum common_spqe_cmd_id {
4462 RAMROD_CMD_ID_COMMON_STAT_QUERY, 4629 RAMROD_CMD_ID_COMMON_STAT_QUERY,
4463 RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 4630 RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
4464 RAMROD_CMD_ID_COMMON_START_TRAFFIC, 4631 RAMROD_CMD_ID_COMMON_START_TRAFFIC,
4465 RAMROD_CMD_ID_COMMON_RESERVED1, 4632 RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS,
4466 MAX_COMMON_SPQE_CMD_ID 4633 MAX_COMMON_SPQE_CMD_ID
4467}; 4634};
4468 4635
@@ -4670,6 +4837,17 @@ struct malicious_vf_event_data {
4670}; 4837};
4671 4838
4672/* 4839/*
4840 * vif list event data
4841 */
4842struct vif_list_event_data {
4843 u8 func_bit_map;
4844 u8 echo;
4845 __le16 reserved0;
4846 __le32 reserved1;
4847 __le32 reserved2;
4848};
4849
4850/*
4673 * union for all event ring message types 4851 * union for all event ring message types
4674 */ 4852 */
4675union event_data { 4853union event_data {
@@ -4678,6 +4856,7 @@ union event_data {
4678 struct cfc_del_event_data cfc_del_event; 4856 struct cfc_del_event_data cfc_del_event;
4679 struct vf_flr_event_data vf_flr_event; 4857 struct vf_flr_event_data vf_flr_event;
4680 struct malicious_vf_event_data malicious_vf_event; 4858 struct malicious_vf_event_data malicious_vf_event;
4859 struct vif_list_event_data vif_list_event;
4681}; 4860};
4682 4861
4683 4862
@@ -4743,7 +4922,7 @@ enum event_ring_opcode {
4743 EVENT_RING_OPCODE_FORWARD_SETUP, 4922 EVENT_RING_OPCODE_FORWARD_SETUP,
4744 EVENT_RING_OPCODE_RSS_UPDATE_RULES, 4923 EVENT_RING_OPCODE_RSS_UPDATE_RULES,
4745 EVENT_RING_OPCODE_FUNCTION_UPDATE, 4924 EVENT_RING_OPCODE_FUNCTION_UPDATE,
4746 EVENT_RING_OPCODE_RESERVED1, 4925 EVENT_RING_OPCODE_AFEX_VIF_LISTS,
4747 EVENT_RING_OPCODE_SET_MAC, 4926 EVENT_RING_OPCODE_SET_MAC,
4748 EVENT_RING_OPCODE_CLASSIFICATION_RULES, 4927 EVENT_RING_OPCODE_CLASSIFICATION_RULES,
4749 EVENT_RING_OPCODE_FILTERS_RULES, 4928 EVENT_RING_OPCODE_FILTERS_RULES,
@@ -4763,16 +4942,6 @@ enum fairness_mode {
4763 4942
4764 4943
4765/* 4944/*
4766 * per-vnic fairness variables
4767 */
4768struct fairness_vars_per_vn {
4769 u32 cos_credit_delta[MAX_COS_NUMBER];
4770 u32 vn_credit_delta;
4771 u32 __reserved0;
4772};
4773
4774
4775/*
4776 * Priority and cos 4945 * Priority and cos
4777 */ 4946 */
4778struct priority_cos { 4947struct priority_cos {
@@ -4800,12 +4969,27 @@ struct flow_control_configuration {
4800struct function_start_data { 4969struct function_start_data {
4801 __le16 function_mode; 4970 __le16 function_mode;
4802 __le16 sd_vlan_tag; 4971 __le16 sd_vlan_tag;
4803 u16 reserved; 4972 __le16 vif_id;
4804 u8 path_id; 4973 u8 path_id;
4805 u8 network_cos_mode; 4974 u8 network_cos_mode;
4806}; 4975};
4807 4976
4808 4977
4978struct function_update_data {
4979 u8 vif_id_change_flg;
4980 u8 afex_default_vlan_change_flg;
4981 u8 allowed_priorities_change_flg;
4982 u8 network_cos_mode_change_flg;
4983 __le16 vif_id;
4984 __le16 afex_default_vlan;
4985 u8 allowed_priorities;
4986 u8 network_cos_mode;
4987 u8 lb_mode_en;
4988 u8 reserved0;
4989 __le32 reserved1;
4990};
4991
4992
4809/* 4993/*
4810 * FW version stored in the Xstorm RAM 4994 * FW version stored in the Xstorm RAM
4811 */ 4995 */
@@ -5003,7 +5187,7 @@ enum mf_mode {
5003 SINGLE_FUNCTION, 5187 SINGLE_FUNCTION,
5004 MULTI_FUNCTION_SD, 5188 MULTI_FUNCTION_SD,
5005 MULTI_FUNCTION_SI, 5189 MULTI_FUNCTION_SI,
5006 MULTI_FUNCTION_RESERVED, 5190 MULTI_FUNCTION_AFEX,
5007 MAX_MF_MODE 5191 MAX_MF_MODE
5008}; 5192};
5009 5193
@@ -5128,6 +5312,7 @@ union protocol_common_specific_data {
5128 u8 protocol_data[8]; 5312 u8 protocol_data[8];
5129 struct regpair phy_address; 5313 struct regpair phy_address;
5130 struct regpair mac_config_addr; 5314 struct regpair mac_config_addr;
5315 struct afex_vif_list_ramrod_data afex_vif_list_data;
5131}; 5316};
5132 5317
5133/* 5318/*
@@ -5140,29 +5325,6 @@ struct protocol_common_spe {
5140 5325
5141 5326
5142/* 5327/*
5143 * a single rate shaping counter. can be used as protocol or vnic counter
5144 */
5145struct rate_shaping_counter {
5146 u32 quota;
5147#if defined(__BIG_ENDIAN)
5148 u16 __reserved0;
5149 u16 rate;
5150#elif defined(__LITTLE_ENDIAN)
5151 u16 rate;
5152 u16 __reserved0;
5153#endif
5154};
5155
5156
5157/*
5158 * per-vnic rate shaping variables
5159 */
5160struct rate_shaping_vars_per_vn {
5161 struct rate_shaping_counter vn_counter;
5162};
5163
5164
5165/*
5166 * The send queue element 5328 * The send queue element
5167 */ 5329 */
5168struct slow_path_element { 5330struct slow_path_element {
@@ -5330,6 +5492,18 @@ enum vf_pf_channel_state {
5330 5492
5331 5493
5332/* 5494/*
5495 * vif_list_rule_kind
5496 */
5497enum vif_list_rule_kind {
5498 VIF_LIST_RULE_SET,
5499 VIF_LIST_RULE_GET,
5500 VIF_LIST_RULE_CLEAR_ALL,
5501 VIF_LIST_RULE_CLEAR_FUNC,
5502 MAX_VIF_LIST_RULE_KIND
5503};
5504
5505
5506/*
5333 * zone A per-queue data 5507 * zone A per-queue data
5334 */ 5508 */
5335struct xstorm_queue_zone_data { 5509struct xstorm_queue_zone_data {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 29f5c3cca31a..559c396d45cc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -125,7 +125,7 @@ enum {
125 MODE_MF = 0x00000100, 125 MODE_MF = 0x00000100,
126 MODE_MF_SD = 0x00000200, 126 MODE_MF_SD = 0x00000200,
127 MODE_MF_SI = 0x00000400, 127 MODE_MF_SI = 0x00000400,
128 MODE_MF_NIV = 0x00000800, 128 MODE_MF_AFEX = 0x00000800,
129 MODE_E3_A0 = 0x00001000, 129 MODE_E3_A0 = 0x00001000,
130 MODE_E3_B0 = 0x00002000, 130 MODE_E3_B0 = 0x00002000,
131 MODE_COS3 = 0x00004000, 131 MODE_COS3 = 0x00004000,
@@ -241,7 +241,8 @@ static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos)
241 REG_WR(bp, reg_addr, reg_bit_map | q_bit_map); 241 REG_WR(bp, reg_addr, reg_bit_map | q_bit_map);
242 242
243 /* set/clear queue bit in command-queue bit map 243 /* set/clear queue bit in command-queue bit map
244 (E2/E3A0 only, valid COS values are 0/1) */ 244 * (E2/E3A0 only, valid COS values are 0/1)
245 */
245 if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) { 246 if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) {
246 reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num); 247 reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num);
247 reg_bit_map = REG_RD(bp, reg_addr); 248 reg_bit_map = REG_RD(bp, reg_addr);
@@ -277,7 +278,215 @@ static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode,
277} 278}
278 279
279 280
280/* Returns the index of start or end of a specific block stage in ops array*/ 281/* congestion managment port init api description
282 * the api works as follows:
283 * the driver should pass the cmng_init_input struct, the port_init function
284 * will prepare the required internal ram structure which will be passed back
285 * to the driver (cmng_init) that will write it into the internal ram.
286 *
287 * IMPORTANT REMARKS:
288 * 1. the cmng_init struct does not represent the contiguous internal ram
289 * structure. the driver should use the XSTORM_CMNG_PERPORT_VARS_OFFSET
290 * offset in order to write the port sub struct and the
291 * PFID_FROM_PORT_AND_VNIC offset for writing the vnic sub struct (in other
292 * words - don't use memcpy!).
293 * 2. although the cmng_init struct is filled for the maximal vnic number
294 * possible, the driver should only write the valid vnics into the internal
295 * ram according to the appropriate port mode.
296 */
297#define BITS_TO_BYTES(x) ((x)/8)
298
299/* CMNG constants, as derived from system spec calculations */
300
301/* default MIN rate in case VNIC min rate is configured to zero- 100Mbps */
302#define DEF_MIN_RATE 100
303
304/* resolution of the rate shaping timer - 400 usec */
305#define RS_PERIODIC_TIMEOUT_USEC 400
306
307/* number of bytes in single QM arbitration cycle -
308 * coefficient for calculating the fairness timer
309 */
310#define QM_ARB_BYTES 160000
311
312/* resolution of Min algorithm 1:100 */
313#define MIN_RES 100
314
315/* how many bytes above threshold for
316 * the minimal credit of Min algorithm
317 */
318#define MIN_ABOVE_THRESH 32768
319
320/* Fairness algorithm integration time coefficient -
321 * for calculating the actual Tfair
322 */
323#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
324
325/* Memory of fairness algorithm - 2 cycles */
326#define FAIR_MEM 2
327#define SAFC_TIMEOUT_USEC 52
328
329#define SDM_TICKS 4
330
331
332static inline void bnx2x_init_max(const struct cmng_init_input *input_data,
333 u32 r_param, struct cmng_init *ram_data)
334{
335 u32 vnic;
336 struct cmng_vnic *vdata = &ram_data->vnic;
337 struct cmng_struct_per_port *pdata = &ram_data->port;
338 /* rate shaping per-port variables
339 * 100 micro seconds in SDM ticks = 25
340 * since each tick is 4 microSeconds
341 */
342
343 pdata->rs_vars.rs_periodic_timeout =
344 RS_PERIODIC_TIMEOUT_USEC / SDM_TICKS;
345
346 /* this is the threshold below which no timer arming will occur.
347 * 1.25 coefficient is for the threshold to be a little bigger
348 * then the real time to compensate for timer in-accuracy
349 */
350 pdata->rs_vars.rs_threshold =
351 (5 * RS_PERIODIC_TIMEOUT_USEC * r_param)/4;
352
353 /* rate shaping per-vnic variables */
354 for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
355 /* global vnic counter */
356 vdata->vnic_max_rate[vnic].vn_counter.rate =
357 input_data->vnic_max_rate[vnic];
358 /* maximal Mbps for this vnic
359 * the quota in each timer period - number of bytes
360 * transmitted in this period
361 */
362 vdata->vnic_max_rate[vnic].vn_counter.quota =
363 RS_PERIODIC_TIMEOUT_USEC *
364 (u32)vdata->vnic_max_rate[vnic].vn_counter.rate / 8;
365 }
366
367}
368
369static inline void bnx2x_init_min(const struct cmng_init_input *input_data,
370 u32 r_param, struct cmng_init *ram_data)
371{
372 u32 vnic, fair_periodic_timeout_usec, vnicWeightSum, tFair;
373 struct cmng_vnic *vdata = &ram_data->vnic;
374 struct cmng_struct_per_port *pdata = &ram_data->port;
375
376 /* this is the resolution of the fairness timer */
377 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
378
379 /* fairness per-port variables
380 * for 10G it is 1000usec. for 1G it is 10000usec.
381 */
382 tFair = T_FAIR_COEF / input_data->port_rate;
383
384 /* this is the threshold below which we won't arm the timer anymore */
385 pdata->fair_vars.fair_threshold = QM_ARB_BYTES;
386
387 /* we multiply by 1e3/8 to get bytes/msec. We don't want the credits
388 * to pass a credit of the T_FAIR*FAIR_MEM (algorithm resolution)
389 */
390 pdata->fair_vars.upper_bound = r_param * tFair * FAIR_MEM;
391
392 /* since each tick is 4 microSeconds */
393 pdata->fair_vars.fairness_timeout =
394 fair_periodic_timeout_usec / SDM_TICKS;
395
396 /* calculate sum of weights */
397 vnicWeightSum = 0;
398
399 for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++)
400 vnicWeightSum += input_data->vnic_min_rate[vnic];
401
402 /* global vnic counter */
403 if (vnicWeightSum > 0) {
404 /* fairness per-vnic variables */
405 for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
406 /* this is the credit for each period of the fairness
407 * algorithm - number of bytes in T_FAIR (this vnic
408 * share of the port rate)
409 */
410 vdata->vnic_min_rate[vnic].vn_credit_delta =
411 (u32)input_data->vnic_min_rate[vnic] * 100 *
412 (T_FAIR_COEF / (8 * 100 * vnicWeightSum));
413 if (vdata->vnic_min_rate[vnic].vn_credit_delta <
414 pdata->fair_vars.fair_threshold +
415 MIN_ABOVE_THRESH) {
416 vdata->vnic_min_rate[vnic].vn_credit_delta =
417 pdata->fair_vars.fair_threshold +
418 MIN_ABOVE_THRESH;
419 }
420 }
421 }
422}
423
424static inline void bnx2x_init_fw_wrr(const struct cmng_init_input *input_data,
425 u32 r_param, struct cmng_init *ram_data)
426{
427 u32 vnic, cos;
428 u32 cosWeightSum = 0;
429 struct cmng_vnic *vdata = &ram_data->vnic;
430 struct cmng_struct_per_port *pdata = &ram_data->port;
431
432 for (cos = 0; cos < MAX_COS_NUMBER; cos++)
433 cosWeightSum += input_data->cos_min_rate[cos];
434
435 if (cosWeightSum > 0) {
436
437 for (vnic = 0; vnic < BNX2X_PORT2_MODE_NUM_VNICS; vnic++) {
438 /* Since cos and vnic shouldn't work together the rate
439 * to divide between the coses is the port rate.
440 */
441 u32 *ccd = vdata->vnic_min_rate[vnic].cos_credit_delta;
442 for (cos = 0; cos < MAX_COS_NUMBER; cos++) {
443 /* this is the credit for each period of
444 * the fairness algorithm - number of bytes
445 * in T_FAIR (this cos share of the vnic rate)
446 */
447 ccd[cos] =
448 (u32)input_data->cos_min_rate[cos] * 100 *
449 (T_FAIR_COEF / (8 * 100 * cosWeightSum));
450 if (ccd[cos] < pdata->fair_vars.fair_threshold
451 + MIN_ABOVE_THRESH) {
452 ccd[cos] =
453 pdata->fair_vars.fair_threshold +
454 MIN_ABOVE_THRESH;
455 }
456 }
457 }
458 }
459}
460
461static inline void bnx2x_init_safc(const struct cmng_init_input *input_data,
462 struct cmng_init *ram_data)
463{
464 /* in microSeconds */
465 ram_data->port.safc_vars.safc_timeout_usec = SAFC_TIMEOUT_USEC;
466}
467
468/* Congestion management port init */
469static inline void bnx2x_init_cmng(const struct cmng_init_input *input_data,
470 struct cmng_init *ram_data)
471{
472 u32 r_param;
473 memset(ram_data, 0, sizeof(struct cmng_init));
474
475 ram_data->port.flags = input_data->flags;
476
477 /* number of bytes transmitted in a rate of 10Gbps
478 * in one usec = 1.25KB.
479 */
480 r_param = BITS_TO_BYTES(input_data->port_rate);
481 bnx2x_init_max(input_data, r_param, ram_data);
482 bnx2x_init_min(input_data, r_param, ram_data);
483 bnx2x_init_fw_wrr(input_data, r_param, ram_data);
484 bnx2x_init_safc(input_data, ram_data);
485}
486
487
488
489/* Returns the index of start or end of a specific block stage in ops array */
281#define BLOCK_OPS_IDX(block, stage, end) \ 490#define BLOCK_OPS_IDX(block, stage, end) \
282 (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end)) 491 (2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
283 492
@@ -499,9 +708,7 @@ static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp)
499 bnx2x_set_mcp_parity(bp, false); 708 bnx2x_set_mcp_parity(bp, false);
500} 709}
501 710
502/** 711/* Clear the parity error status registers. */
503 * Clear the parity error status registers.
504 */
505static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp) 712static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp)
506{ 713{
507 int i; 714 int i;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 64392ec410a3..a3fb7215cd89 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -138,7 +138,6 @@
138 138
139 139
140 140
141/* */
142#define SFP_EEPROM_CON_TYPE_ADDR 0x2 141#define SFP_EEPROM_CON_TYPE_ADDR 0x2
143 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 142 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
144 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 143 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
@@ -404,8 +403,7 @@ static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
404 403
405 DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n"); 404 DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n");
406 405
407 /* 406 /* mapping between entry priority to client number (0,1,2 -debug and
408 * mapping between entry priority to client number (0,1,2 -debug and
409 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) 407 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
410 * 3bits client num. 408 * 3bits client num.
411 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 409 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
@@ -413,8 +411,7 @@ static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
413 */ 411 */
414 412
415 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688); 413 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
416 /* 414 /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
417 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
418 * as strict. Bits 0,1,2 - debug and management entries, 3 - 415 * as strict. Bits 0,1,2 - debug and management entries, 3 -
419 * COS0 entry, 4 - COS1 entry. 416 * COS0 entry, 4 - COS1 entry.
420 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT 417 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
@@ -425,13 +422,11 @@ static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
425 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7); 422 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
426 /* defines which entries (clients) are subjected to WFQ arbitration */ 423 /* defines which entries (clients) are subjected to WFQ arbitration */
427 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); 424 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
428 /* 425 /* For strict priority entries defines the number of consecutive
429 * For strict priority entries defines the number of consecutive
430 * slots for the highest priority. 426 * slots for the highest priority.
431 */ 427 */
432 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); 428 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
433 /* 429 /* mapping between the CREDIT_WEIGHT registers and actual client
434 * mapping between the CREDIT_WEIGHT registers and actual client
435 * numbers 430 * numbers
436 */ 431 */
437 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0); 432 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0);
@@ -443,8 +438,7 @@ static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
443 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0); 438 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
444 /* ETS mode disable */ 439 /* ETS mode disable */
445 REG_WR(bp, PBF_REG_ETS_ENABLED, 0); 440 REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
446 /* 441 /* If ETS mode is enabled (there is no strict priority) defines a WFQ
447 * If ETS mode is enabled (there is no strict priority) defines a WFQ
448 * weight for COS0/COS1. 442 * weight for COS0/COS1.
449 */ 443 */
450 REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710); 444 REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710);
@@ -471,10 +465,9 @@ static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars)
471 min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS; 465 min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
472 } else 466 } else
473 min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS; 467 min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
474 /** 468 /* If the link isn't up (static configuration for example ) The
475 * If the link isn't up (static configuration for example ) The 469 * link will be according to 20GBPS.
476 * link will be according to 20GBPS. 470 */
477 */
478 return min_w_val; 471 return min_w_val;
479} 472}
480/****************************************************************************** 473/******************************************************************************
@@ -538,8 +531,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
538 struct bnx2x *bp = params->bp; 531 struct bnx2x *bp = params->bp;
539 const u8 port = params->port; 532 const u8 port = params->port;
540 const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars); 533 const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars);
541 /** 534 /* Mapping between entry priority to client number (0,1,2 -debug and
542 * mapping between entry priority to client number (0,1,2 -debug and
543 * management clients, 3 - COS0 client, 4 - COS1, ... 8 - 535 * management clients, 3 - COS0 client, 4 - COS1, ... 8 -
544 * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by 536 * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by
545 * reset value or init tool 537 * reset value or init tool
@@ -551,18 +543,14 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
551 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210); 543 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210);
552 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8); 544 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8);
553 } 545 }
554 /** 546 /* For strict priority entries defines the number of consecutive
555 * For strict priority entries defines the number of consecutive 547 * slots for the highest priority.
556 * slots for the highest priority. 548 */
557 */
558 /* TODO_ETS - Should be done by reset value or init tool */
559 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS : 549 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
560 NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); 550 NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
561 /** 551 /* Mapping between the CREDIT_WEIGHT registers and actual client
562 * mapping between the CREDIT_WEIGHT registers and actual client
563 * numbers 552 * numbers
564 */ 553 */
565 /* TODO_ETS - Should be done by reset value or init tool */
566 if (port) { 554 if (port) {
567 /*Port 1 has 6 COS*/ 555 /*Port 1 has 6 COS*/
568 REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543); 556 REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543);
@@ -574,8 +562,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
574 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5); 562 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5);
575 } 563 }
576 564
577 /** 565 /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
578 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
579 * as strict. Bits 0,1,2 - debug and management entries, 3 - 566 * as strict. Bits 0,1,2 - debug and management entries, 3 -
580 * COS0 entry, 4 - COS1 entry. 567 * COS0 entry, 4 - COS1 entry.
581 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT 568 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
@@ -590,13 +577,12 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
590 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ : 577 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
591 NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0); 578 NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
592 579
593 /** 580 /* Please notice the register address are note continuous and a
594 * Please notice the register address are note continuous and a 581 * for here is note appropriate.In 2 port mode port0 only COS0-5
595 * for here is note appropriate.In 2 port mode port0 only COS0-5 582 * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4
596 * can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4 583 * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT
597 * port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT 584 * are never used for WFQ
598 * are never used for WFQ 585 */
599 */
600 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 : 586 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
601 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0); 587 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0);
602 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 : 588 REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
@@ -633,10 +619,9 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf(
633 u32 base_upper_bound = 0; 619 u32 base_upper_bound = 0;
634 u8 max_cos = 0; 620 u8 max_cos = 0;
635 u8 i = 0; 621 u8 i = 0;
636 /** 622 /* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
637 * In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4 623 * port mode port1 has COS0-2 that can be used for WFQ.
638 * port mode port1 has COS0-2 that can be used for WFQ. 624 */
639 */
640 if (!port) { 625 if (!port) {
641 base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0; 626 base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
642 max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; 627 max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
@@ -666,8 +651,7 @@ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
666 u32 base_weight = 0; 651 u32 base_weight = 0;
667 u8 max_cos = 0; 652 u8 max_cos = 0;
668 653
669 /** 654 /* Mapping between entry priority to client number 0 - COS0
670 * mapping between entry priority to client number 0 - COS0
671 * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num. 655 * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num.
672 * TODO_ETS - Should be done by reset value or init tool 656 * TODO_ETS - Should be done by reset value or init tool
673 */ 657 */
@@ -695,10 +679,9 @@ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
695 679
696 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 : 680 REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
697 PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0); 681 PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0);
698 /** 682 /* In 2 port mode port0 has COS0-5 that can be used for WFQ.
699 * In 2 port mode port0 has COS0-5 that can be used for WFQ. 683 * In 4 port mode port1 has COS0-2 that can be used for WFQ.
700 * In 4 port mode port1 has COS0-2 that can be used for WFQ. 684 */
701 */
702 if (!port) { 685 if (!port) {
703 base_weight = PBF_REG_COS0_WEIGHT_P0; 686 base_weight = PBF_REG_COS0_WEIGHT_P0;
704 max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0; 687 max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
@@ -738,7 +721,7 @@ static int bnx2x_ets_e3b0_disabled(const struct link_params *params,
738/****************************************************************************** 721/******************************************************************************
739* Description: 722* Description:
740* Disable will return basicly the values to init values. 723* Disable will return basicly the values to init values.
741*. 724*
742******************************************************************************/ 725******************************************************************************/
743int bnx2x_ets_disabled(struct link_params *params, 726int bnx2x_ets_disabled(struct link_params *params,
744 struct link_vars *vars) 727 struct link_vars *vars)
@@ -867,7 +850,7 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
867/****************************************************************************** 850/******************************************************************************
868* Description: 851* Description:
869* Calculate the total BW.A value of 0 isn't legal. 852* Calculate the total BW.A value of 0 isn't legal.
870*. 853*
871******************************************************************************/ 854******************************************************************************/
872static int bnx2x_ets_e3b0_get_total_bw( 855static int bnx2x_ets_e3b0_get_total_bw(
873 const struct link_params *params, 856 const struct link_params *params,
@@ -879,7 +862,6 @@ static int bnx2x_ets_e3b0_get_total_bw(
879 u8 is_bw_cos_exist = 0; 862 u8 is_bw_cos_exist = 0;
880 863
881 *total_bw = 0 ; 864 *total_bw = 0 ;
882
883 /* Calculate total BW requested */ 865 /* Calculate total BW requested */
884 for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) { 866 for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
885 if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) { 867 if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) {
@@ -887,10 +869,9 @@ static int bnx2x_ets_e3b0_get_total_bw(
887 if (!ets_params->cos[cos_idx].params.bw_params.bw) { 869 if (!ets_params->cos[cos_idx].params.bw_params.bw) {
888 DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW" 870 DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
889 "was set to 0\n"); 871 "was set to 0\n");
890 /* 872 /* This is to prevent a state when ramrods
891 * This is to prevent a state when ramrods
892 * can't be sent 873 * can't be sent
893 */ 874 */
894 ets_params->cos[cos_idx].params.bw_params.bw 875 ets_params->cos[cos_idx].params.bw_params.bw
895 = 1; 876 = 1;
896 } 877 }
@@ -908,8 +889,7 @@ static int bnx2x_ets_e3b0_get_total_bw(
908 } 889 }
909 DP(NETIF_MSG_LINK, 890 DP(NETIF_MSG_LINK,
910 "bnx2x_ets_E3B0_config total BW should be 100\n"); 891 "bnx2x_ets_E3B0_config total BW should be 100\n");
911 /* 892 /* We can handle a case whre the BW isn't 100 this can happen
912 * We can handle a case whre the BW isn't 100 this can happen
913 * if the TC are joined. 893 * if the TC are joined.
914 */ 894 */
915 } 895 }
@@ -919,7 +899,7 @@ static int bnx2x_ets_e3b0_get_total_bw(
919/****************************************************************************** 899/******************************************************************************
920* Description: 900* Description:
921* Invalidate all the sp_pri_to_cos. 901* Invalidate all the sp_pri_to_cos.
922*. 902*
923******************************************************************************/ 903******************************************************************************/
924static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos) 904static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos)
925{ 905{
@@ -931,7 +911,7 @@ static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos)
931* Description: 911* Description:
932* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers 912* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
933* according to sp_pri_to_cos. 913* according to sp_pri_to_cos.
934*. 914*
935******************************************************************************/ 915******************************************************************************/
936static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params, 916static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
937 u8 *sp_pri_to_cos, const u8 pri, 917 u8 *sp_pri_to_cos, const u8 pri,
@@ -964,7 +944,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
964* Description: 944* Description:
965* Returns the correct value according to COS and priority in 945* Returns the correct value according to COS and priority in
966* the sp_pri_cli register. 946* the sp_pri_cli register.
967*. 947*
968******************************************************************************/ 948******************************************************************************/
969static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset, 949static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset,
970 const u8 pri_set, 950 const u8 pri_set,
@@ -981,7 +961,7 @@ static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset,
981* Description: 961* Description:
982* Returns the correct value according to COS and priority in the 962* Returns the correct value according to COS and priority in the
983* sp_pri_cli register for NIG. 963* sp_pri_cli register for NIG.
984*. 964*
985******************************************************************************/ 965******************************************************************************/
986static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set) 966static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set)
987{ 967{
@@ -997,7 +977,7 @@ static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set)
997* Description: 977* Description:
998* Returns the correct value according to COS and priority in the 978* Returns the correct value according to COS and priority in the
999* sp_pri_cli register for PBF. 979* sp_pri_cli register for PBF.
1000*. 980*
1001******************************************************************************/ 981******************************************************************************/
1002static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set) 982static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set)
1003{ 983{
@@ -1013,7 +993,7 @@ static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set)
1013* Description: 993* Description:
1014* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers 994* Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
1015* according to sp_pri_to_cos.(which COS has higher priority) 995* according to sp_pri_to_cos.(which COS has higher priority)
1016*. 996*
1017******************************************************************************/ 997******************************************************************************/
1018static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params, 998static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
1019 u8 *sp_pri_to_cos) 999 u8 *sp_pri_to_cos)
@@ -1149,8 +1129,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
1149 return -EINVAL; 1129 return -EINVAL;
1150 } 1130 }
1151 1131
1152 /* 1132 /* Upper bound is set according to current link speed (min_w_val
1153 * Upper bound is set according to current link speed (min_w_val
1154 * should be the same for upper bound and COS credit val). 1133 * should be the same for upper bound and COS credit val).
1155 */ 1134 */
1156 bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig); 1135 bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
@@ -1160,8 +1139,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
1160 for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) { 1139 for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
1161 if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) { 1140 if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) {
1162 cos_bw_bitmap |= (1 << cos_entry); 1141 cos_bw_bitmap |= (1 << cos_entry);
1163 /* 1142 /* The function also sets the BW in HW(not the mappin
1164 * The function also sets the BW in HW(not the mappin
1165 * yet) 1143 * yet)
1166 */ 1144 */
1167 bnx2x_status = bnx2x_ets_e3b0_set_cos_bw( 1145 bnx2x_status = bnx2x_ets_e3b0_set_cos_bw(
@@ -1217,14 +1195,12 @@ static void bnx2x_ets_bw_limit_common(const struct link_params *params)
1217 /* ETS disabled configuration */ 1195 /* ETS disabled configuration */
1218 struct bnx2x *bp = params->bp; 1196 struct bnx2x *bp = params->bp;
1219 DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n"); 1197 DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
1220 /* 1198 /* Defines which entries (clients) are subjected to WFQ arbitration
1221 * defines which entries (clients) are subjected to WFQ arbitration
1222 * COS0 0x8 1199 * COS0 0x8
1223 * COS1 0x10 1200 * COS1 0x10
1224 */ 1201 */
1225 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18); 1202 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
1226 /* 1203 /* Mapping between the ARB_CREDIT_WEIGHT registers and actual
1227 * mapping between the ARB_CREDIT_WEIGHT registers and actual
1228 * client numbers (WEIGHT_0 does not actually have to represent 1204 * client numbers (WEIGHT_0 does not actually have to represent
1229 * client 0) 1205 * client 0)
1230 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 1206 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
@@ -1242,8 +1218,7 @@ static void bnx2x_ets_bw_limit_common(const struct link_params *params)
1242 1218
1243 /* Defines the number of consecutive slots for the strict priority */ 1219 /* Defines the number of consecutive slots for the strict priority */
1244 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0); 1220 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
1245 /* 1221 /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
1246 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
1247 * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0 1222 * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0
1248 * entry, 4 - COS1 entry. 1223 * entry, 4 - COS1 entry.
1249 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT 1224 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
@@ -1298,8 +1273,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
1298 u32 val = 0; 1273 u32 val = 0;
1299 1274
1300 DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n"); 1275 DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
1301 /* 1276 /* Bitmap of 5bits length. Each bit specifies whether the entry behaves
1302 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
1303 * as strict. Bits 0,1,2 - debug and management entries, 1277 * as strict. Bits 0,1,2 - debug and management entries,
1304 * 3 - COS0 entry, 4 - COS1 entry. 1278 * 3 - COS0 entry, 4 - COS1 entry.
1305 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT 1279 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
@@ -1307,8 +1281,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
1307 * MCP and debug are strict 1281 * MCP and debug are strict
1308 */ 1282 */
1309 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F); 1283 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
1310 /* 1284 /* For strict priority entries defines the number of consecutive slots
1311 * For strict priority entries defines the number of consecutive slots
1312 * for the highest priority. 1285 * for the highest priority.
1313 */ 1286 */
1314 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100); 1287 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
@@ -1320,8 +1293,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
1320 /* Defines the number of consecutive slots for the strict priority */ 1293 /* Defines the number of consecutive slots for the strict priority */
1321 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos); 1294 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
1322 1295
1323 /* 1296 /* Mapping between entry priority to client number (0,1,2 -debug and
1324 * mapping between entry priority to client number (0,1,2 -debug and
1325 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST) 1297 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
1326 * 3bits client num. 1298 * 3bits client num.
1327 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0 1299 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
@@ -1356,15 +1328,12 @@ static void bnx2x_update_pfc_xmac(struct link_params *params,
1356 if (!(params->feature_config_flags & 1328 if (!(params->feature_config_flags &
1357 FEATURE_CONFIG_PFC_ENABLED)) { 1329 FEATURE_CONFIG_PFC_ENABLED)) {
1358 1330
1359 /* 1331 /* RX flow control - Process pause frame in receive direction
1360 * RX flow control - Process pause frame in receive direction
1361 */ 1332 */
1362 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX) 1333 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
1363 pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN; 1334 pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN;
1364 1335
1365 /* 1336 /* TX flow control - Send pause packet when buffer is full */
1366 * TX flow control - Send pause packet when buffer is full
1367 */
1368 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) 1337 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
1369 pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN; 1338 pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN;
1370 } else {/* PFC support */ 1339 } else {/* PFC support */
@@ -1457,8 +1426,7 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
1457static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port) 1426static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port)
1458{ 1427{
1459 u32 mode, emac_base; 1428 u32 mode, emac_base;
1460 /** 1429 /* Set clause 45 mode, slow down the MDIO clock to 2.5MHz
1461 * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
1462 * (a value of 49==0x31) and make sure that the AUTO poll is off 1430 * (a value of 49==0x31) and make sure that the AUTO poll is off
1463 */ 1431 */
1464 1432
@@ -1578,15 +1546,6 @@ static void bnx2x_umac_enable(struct link_params *params,
1578 1546
1579 DP(NETIF_MSG_LINK, "enabling UMAC\n"); 1547 DP(NETIF_MSG_LINK, "enabling UMAC\n");
1580 1548
1581 /**
1582 * This register determines on which events the MAC will assert
1583 * error on the i/f to the NIG along w/ EOP.
1584 */
1585
1586 /**
1587 * BD REG_WR(bp, NIG_REG_P0_MAC_RSV_ERR_MASK +
1588 * params->port*0x14, 0xfffff.
1589 */
1590 /* This register opens the gate for the UMAC despite its name */ 1549 /* This register opens the gate for the UMAC despite its name */
1591 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); 1550 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
1592 1551
@@ -1649,8 +1608,7 @@ static void bnx2x_umac_enable(struct link_params *params,
1649 val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA; 1608 val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA;
1650 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val); 1609 REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
1651 1610
1652 /* 1611 /* Maximum Frame Length (RW). Defines a 14-Bit maximum frame
1653 * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
1654 * length used by the MAC receive logic to check frames. 1612 * length used by the MAC receive logic to check frames.
1655 */ 1613 */
1656 REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); 1614 REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
@@ -1666,8 +1624,7 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
1666 struct bnx2x *bp = params->bp; 1624 struct bnx2x *bp = params->bp;
1667 u32 is_port4mode = bnx2x_is_4_port_mode(bp); 1625 u32 is_port4mode = bnx2x_is_4_port_mode(bp);
1668 1626
1669 /* 1627 /* In 4-port mode, need to set the mode only once, so if XMAC is
1670 * In 4-port mode, need to set the mode only once, so if XMAC is
1671 * already out of reset, it means the mode has already been set, 1628 * already out of reset, it means the mode has already been set,
1672 * and it must not* reset the XMAC again, since it controls both 1629 * and it must not* reset the XMAC again, since it controls both
1673 * ports of the path 1630 * ports of the path
@@ -1691,13 +1648,13 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
1691 if (is_port4mode) { 1648 if (is_port4mode) {
1692 DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n"); 1649 DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n");
1693 1650
1694 /* Set the number of ports on the system side to up to 2 */ 1651 /* Set the number of ports on the system side to up to 2 */
1695 REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1); 1652 REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1);
1696 1653
1697 /* Set the number of ports on the Warp Core to 10G */ 1654 /* Set the number of ports on the Warp Core to 10G */
1698 REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3); 1655 REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
1699 } else { 1656 } else {
1700 /* Set the number of ports on the system side to 1 */ 1657 /* Set the number of ports on the system side to 1 */
1701 REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0); 1658 REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0);
1702 if (max_speed == SPEED_10000) { 1659 if (max_speed == SPEED_10000) {
1703 DP(NETIF_MSG_LINK, 1660 DP(NETIF_MSG_LINK,
@@ -1729,8 +1686,7 @@ static void bnx2x_xmac_disable(struct link_params *params)
1729 1686
1730 if (REG_RD(bp, MISC_REG_RESET_REG_2) & 1687 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
1731 MISC_REGISTERS_RESET_REG_2_XMAC) { 1688 MISC_REGISTERS_RESET_REG_2_XMAC) {
1732 /* 1689 /* Send an indication to change the state in the NIG back to XON
1733 * Send an indication to change the state in the NIG back to XON
1734 * Clearing this bit enables the next set of this bit to get 1690 * Clearing this bit enables the next set of this bit to get
1735 * rising edge 1691 * rising edge
1736 */ 1692 */
@@ -1755,13 +1711,11 @@ static int bnx2x_xmac_enable(struct link_params *params,
1755 1711
1756 bnx2x_xmac_init(params, vars->line_speed); 1712 bnx2x_xmac_init(params, vars->line_speed);
1757 1713
1758 /* 1714 /* This register determines on which events the MAC will assert
1759 * This register determines on which events the MAC will assert
1760 * error on the i/f to the NIG along w/ EOP. 1715 * error on the i/f to the NIG along w/ EOP.
1761 */ 1716 */
1762 1717
1763 /* 1718 /* This register tells the NIG whether to send traffic to UMAC
1764 * This register tells the NIG whether to send traffic to UMAC
1765 * or XMAC 1719 * or XMAC
1766 */ 1720 */
1767 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0); 1721 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0);
@@ -1863,8 +1817,7 @@ static int bnx2x_emac_enable(struct link_params *params,
1863 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); 1817 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
1864 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; 1818 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
1865 1819
1866 /* 1820 /* Setting this bit causes MAC control frames (except for pause
1867 * Setting this bit causes MAC control frames (except for pause
1868 * frames) to be passed on for processing. This setting has no 1821 * frames) to be passed on for processing. This setting has no
1869 * affect on the operation of the pause frames. This bit effects 1822 * affect on the operation of the pause frames. This bit effects
1870 * all packets regardless of RX Parser packet sorting logic. 1823 * all packets regardless of RX Parser packet sorting logic.
@@ -1963,8 +1916,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
1963 struct link_vars *vars, 1916 struct link_vars *vars,
1964 u8 is_lb) 1917 u8 is_lb)
1965{ 1918{
1966 /* 1919 /* Set rx control: Strip CRC and enable BigMAC to relay
1967 * Set rx control: Strip CRC and enable BigMAC to relay
1968 * control packets to the system as well 1920 * control packets to the system as well
1969 */ 1921 */
1970 u32 wb_data[2]; 1922 u32 wb_data[2];
@@ -2016,8 +1968,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
2016 1968
2017 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2); 1969 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
2018 1970
2019 /* 1971 /* Set Time (based unit is 512 bit time) between automatic
2020 * Set Time (based unit is 512 bit time) between automatic
2021 * re-sending of PP packets amd enable automatic re-send of 1972 * re-sending of PP packets amd enable automatic re-send of
2022 * Per-Priroity Packet as long as pp_gen is asserted and 1973 * Per-Priroity Packet as long as pp_gen is asserted and
2023 * pp_disable is low. 1974 * pp_disable is low.
@@ -2086,7 +2037,7 @@ static int bnx2x_pfc_brb_get_config_params(
2086 config_val->default_class1.full_xon = 0; 2037 config_val->default_class1.full_xon = 0;
2087 2038
2088 if (CHIP_IS_E2(bp)) { 2039 if (CHIP_IS_E2(bp)) {
2089 /* class0 defaults */ 2040 /* Class0 defaults */
2090 config_val->default_class0.pause_xoff = 2041 config_val->default_class0.pause_xoff =
2091 DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR; 2042 DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR;
2092 config_val->default_class0.pause_xon = 2043 config_val->default_class0.pause_xon =
@@ -2095,7 +2046,7 @@ static int bnx2x_pfc_brb_get_config_params(
2095 DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR; 2046 DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR;
2096 config_val->default_class0.full_xon = 2047 config_val->default_class0.full_xon =
2097 DEFAULT0_E2_BRB_MAC_FULL_XON_THR; 2048 DEFAULT0_E2_BRB_MAC_FULL_XON_THR;
2098 /* pause able*/ 2049 /* Pause able*/
2099 config_val->pauseable_th.pause_xoff = 2050 config_val->pauseable_th.pause_xoff =
2100 PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE; 2051 PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
2101 config_val->pauseable_th.pause_xon = 2052 config_val->pauseable_th.pause_xon =
@@ -2114,7 +2065,7 @@ static int bnx2x_pfc_brb_get_config_params(
2114 config_val->non_pauseable_th.full_xon = 2065 config_val->non_pauseable_th.full_xon =
2115 PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE; 2066 PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
2116 } else if (CHIP_IS_E3A0(bp)) { 2067 } else if (CHIP_IS_E3A0(bp)) {
2117 /* class0 defaults */ 2068 /* Class0 defaults */
2118 config_val->default_class0.pause_xoff = 2069 config_val->default_class0.pause_xoff =
2119 DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR; 2070 DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR;
2120 config_val->default_class0.pause_xon = 2071 config_val->default_class0.pause_xon =
@@ -2123,7 +2074,7 @@ static int bnx2x_pfc_brb_get_config_params(
2123 DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR; 2074 DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR;
2124 config_val->default_class0.full_xon = 2075 config_val->default_class0.full_xon =
2125 DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR; 2076 DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR;
2126 /* pause able */ 2077 /* Pause able */
2127 config_val->pauseable_th.pause_xoff = 2078 config_val->pauseable_th.pause_xoff =
2128 PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE; 2079 PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
2129 config_val->pauseable_th.pause_xon = 2080 config_val->pauseable_th.pause_xon =
@@ -2142,7 +2093,7 @@ static int bnx2x_pfc_brb_get_config_params(
2142 config_val->non_pauseable_th.full_xon = 2093 config_val->non_pauseable_th.full_xon =
2143 PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE; 2094 PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
2144 } else if (CHIP_IS_E3B0(bp)) { 2095 } else if (CHIP_IS_E3B0(bp)) {
2145 /* class0 defaults */ 2096 /* Class0 defaults */
2146 config_val->default_class0.pause_xoff = 2097 config_val->default_class0.pause_xoff =
2147 DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR; 2098 DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR;
2148 config_val->default_class0.pause_xon = 2099 config_val->default_class0.pause_xon =
@@ -2305,27 +2256,23 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
2305 reg_th_config = &config_val.non_pauseable_th; 2256 reg_th_config = &config_val.non_pauseable_th;
2306 } else 2257 } else
2307 reg_th_config = &config_val.default_class0; 2258 reg_th_config = &config_val.default_class0;
2308 /* 2259 /* The number of free blocks below which the pause signal to class 0
2309 * The number of free blocks below which the pause signal to class 0
2310 * of MAC #n is asserted. n=0,1 2260 * of MAC #n is asserted. n=0,1
2311 */ 2261 */
2312 REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 : 2262 REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 :
2313 BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , 2263 BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 ,
2314 reg_th_config->pause_xoff); 2264 reg_th_config->pause_xoff);
2315 /* 2265 /* The number of free blocks above which the pause signal to class 0
2316 * The number of free blocks above which the pause signal to class 0
2317 * of MAC #n is de-asserted. n=0,1 2266 * of MAC #n is de-asserted. n=0,1
2318 */ 2267 */
2319 REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 : 2268 REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 :
2320 BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon); 2269 BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon);
2321 /* 2270 /* The number of free blocks below which the full signal to class 0
2322 * The number of free blocks below which the full signal to class 0
2323 * of MAC #n is asserted. n=0,1 2271 * of MAC #n is asserted. n=0,1
2324 */ 2272 */
2325 REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 : 2273 REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 :
2326 BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff); 2274 BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff);
2327 /* 2275 /* The number of free blocks above which the full signal to class 0
2328 * The number of free blocks above which the full signal to class 0
2329 * of MAC #n is de-asserted. n=0,1 2276 * of MAC #n is de-asserted. n=0,1
2330 */ 2277 */
2331 REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 : 2278 REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
@@ -2339,30 +2286,26 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
2339 reg_th_config = &config_val.non_pauseable_th; 2286 reg_th_config = &config_val.non_pauseable_th;
2340 } else 2287 } else
2341 reg_th_config = &config_val.default_class1; 2288 reg_th_config = &config_val.default_class1;
2342 /* 2289 /* The number of free blocks below which the pause signal to
2343 * The number of free blocks below which the pause signal to
2344 * class 1 of MAC #n is asserted. n=0,1 2290 * class 1 of MAC #n is asserted. n=0,1
2345 */ 2291 */
2346 REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 : 2292 REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
2347 BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, 2293 BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
2348 reg_th_config->pause_xoff); 2294 reg_th_config->pause_xoff);
2349 2295
2350 /* 2296 /* The number of free blocks above which the pause signal to
2351 * The number of free blocks above which the pause signal to
2352 * class 1 of MAC #n is de-asserted. n=0,1 2297 * class 1 of MAC #n is de-asserted. n=0,1
2353 */ 2298 */
2354 REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 : 2299 REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
2355 BRB1_REG_PAUSE_1_XON_THRESHOLD_0, 2300 BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
2356 reg_th_config->pause_xon); 2301 reg_th_config->pause_xon);
2357 /* 2302 /* The number of free blocks below which the full signal to
2358 * The number of free blocks below which the full signal to
2359 * class 1 of MAC #n is asserted. n=0,1 2303 * class 1 of MAC #n is asserted. n=0,1
2360 */ 2304 */
2361 REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 : 2305 REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
2362 BRB1_REG_FULL_1_XOFF_THRESHOLD_0, 2306 BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
2363 reg_th_config->full_xoff); 2307 reg_th_config->full_xoff);
2364 /* 2308 /* The number of free blocks above which the full signal to
2365 * The number of free blocks above which the full signal to
2366 * class 1 of MAC #n is de-asserted. n=0,1 2309 * class 1 of MAC #n is de-asserted. n=0,1
2367 */ 2310 */
2368 REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 : 2311 REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
@@ -2379,49 +2322,41 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
2379 REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE, 2322 REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE,
2380 e3b0_val.per_class_guaranty_mode); 2323 e3b0_val.per_class_guaranty_mode);
2381 2324
2382 /* 2325 /* The hysteresis on the guarantied buffer space for the Lb
2383 * The hysteresis on the guarantied buffer space for the Lb
2384 * port before signaling XON. 2326 * port before signaling XON.
2385 */ 2327 */
2386 REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, 2328 REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST,
2387 e3b0_val.lb_guarantied_hyst); 2329 e3b0_val.lb_guarantied_hyst);
2388 2330
2389 /* 2331 /* The number of free blocks below which the full signal to the
2390 * The number of free blocks below which the full signal to the
2391 * LB port is asserted. 2332 * LB port is asserted.
2392 */ 2333 */
2393 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 2334 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
2394 e3b0_val.full_lb_xoff_th); 2335 e3b0_val.full_lb_xoff_th);
2395 /* 2336 /* The number of free blocks above which the full signal to the
2396 * The number of free blocks above which the full signal to the
2397 * LB port is de-asserted. 2337 * LB port is de-asserted.
2398 */ 2338 */
2399 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 2339 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
2400 e3b0_val.full_lb_xon_threshold); 2340 e3b0_val.full_lb_xon_threshold);
2401 /* 2341 /* The number of blocks guarantied for the MAC #n port. n=0,1
2402 * The number of blocks guarantied for the MAC #n port. n=0,1
2403 */ 2342 */
2404 2343
2405 /* The number of blocks guarantied for the LB port.*/ 2344 /* The number of blocks guarantied for the LB port. */
2406 REG_WR(bp, BRB1_REG_LB_GUARANTIED, 2345 REG_WR(bp, BRB1_REG_LB_GUARANTIED,
2407 e3b0_val.lb_guarantied); 2346 e3b0_val.lb_guarantied);
2408 2347
2409 /* 2348 /* The number of blocks guarantied for the MAC #n port. */
2410 * The number of blocks guarantied for the MAC #n port.
2411 */
2412 REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0, 2349 REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
2413 2 * e3b0_val.mac_0_class_t_guarantied); 2350 2 * e3b0_val.mac_0_class_t_guarantied);
2414 REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1, 2351 REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
2415 2 * e3b0_val.mac_1_class_t_guarantied); 2352 2 * e3b0_val.mac_1_class_t_guarantied);
2416 /* 2353 /* The number of blocks guarantied for class #t in MAC0. t=0,1
2417 * The number of blocks guarantied for class #t in MAC0. t=0,1
2418 */ 2354 */
2419 REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED, 2355 REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
2420 e3b0_val.mac_0_class_t_guarantied); 2356 e3b0_val.mac_0_class_t_guarantied);
2421 REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED, 2357 REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
2422 e3b0_val.mac_0_class_t_guarantied); 2358 e3b0_val.mac_0_class_t_guarantied);
2423 /* 2359 /* The hysteresis on the guarantied buffer space for class in
2424 * The hysteresis on the guarantied buffer space for class in
2425 * MAC0. t=0,1 2360 * MAC0. t=0,1
2426 */ 2361 */
2427 REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST, 2362 REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
@@ -2429,15 +2364,13 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
2429 REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST, 2364 REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
2430 e3b0_val.mac_0_class_t_guarantied_hyst); 2365 e3b0_val.mac_0_class_t_guarantied_hyst);
2431 2366
2432 /* 2367 /* The number of blocks guarantied for class #t in MAC1.t=0,1
2433 * The number of blocks guarantied for class #t in MAC1.t=0,1
2434 */ 2368 */
2435 REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED, 2369 REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
2436 e3b0_val.mac_1_class_t_guarantied); 2370 e3b0_val.mac_1_class_t_guarantied);
2437 REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED, 2371 REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
2438 e3b0_val.mac_1_class_t_guarantied); 2372 e3b0_val.mac_1_class_t_guarantied);
2439 /* 2373 /* The hysteresis on the guarantied buffer space for class #t
2440 * The hysteresis on the guarantied buffer space for class #t
2441 * in MAC1. t=0,1 2374 * in MAC1. t=0,1
2442 */ 2375 */
2443 REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST, 2376 REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
@@ -2520,15 +2453,13 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
2520 FEATURE_CONFIG_PFC_ENABLED; 2453 FEATURE_CONFIG_PFC_ENABLED;
2521 DP(NETIF_MSG_LINK, "updating pfc nig parameters\n"); 2454 DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
2522 2455
2523 /* 2456 /* When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
2524 * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
2525 * MAC control frames (that are not pause packets) 2457 * MAC control frames (that are not pause packets)
2526 * will be forwarded to the XCM. 2458 * will be forwarded to the XCM.
2527 */ 2459 */
2528 xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK : 2460 xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK :
2529 NIG_REG_LLH0_XCM_MASK); 2461 NIG_REG_LLH0_XCM_MASK);
2530 /* 2462 /* NIG params will override non PFC params, since it's possible to
2531 * nig params will override non PFC params, since it's possible to
2532 * do transition from PFC to SAFC 2463 * do transition from PFC to SAFC
2533 */ 2464 */
2534 if (set_pfc) { 2465 if (set_pfc) {
@@ -2548,7 +2479,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
2548 llfc_out_en = nig_params->llfc_out_en; 2479 llfc_out_en = nig_params->llfc_out_en;
2549 llfc_enable = nig_params->llfc_enable; 2480 llfc_enable = nig_params->llfc_enable;
2550 pause_enable = nig_params->pause_enable; 2481 pause_enable = nig_params->pause_enable;
2551 } else /*defaul non PFC mode - PAUSE */ 2482 } else /* Default non PFC mode - PAUSE */
2552 pause_enable = 1; 2483 pause_enable = 1;
2553 2484
2554 xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN : 2485 xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
@@ -2608,8 +2539,7 @@ int bnx2x_update_pfc(struct link_params *params,
2608 struct link_vars *vars, 2539 struct link_vars *vars,
2609 struct bnx2x_nig_brb_pfc_port_params *pfc_params) 2540 struct bnx2x_nig_brb_pfc_port_params *pfc_params)
2610{ 2541{
2611 /* 2542 /* The PFC and pause are orthogonal to one another, meaning when
2612 * The PFC and pause are orthogonal to one another, meaning when
2613 * PFC is enabled, the pause are disabled, and when PFC is 2543 * PFC is enabled, the pause are disabled, and when PFC is
2614 * disabled, pause are set according to the pause result. 2544 * disabled, pause are set according to the pause result.
2615 */ 2545 */
@@ -3148,7 +3078,6 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
3148 EMAC_MDIO_STATUS_10MB); 3078 EMAC_MDIO_STATUS_10MB);
3149 3079
3150 /* address */ 3080 /* address */
3151
3152 tmp = ((phy->addr << 21) | (devad << 16) | reg | 3081 tmp = ((phy->addr << 21) | (devad << 16) | reg |
3153 EMAC_MDIO_COMM_COMMAND_ADDRESS | 3082 EMAC_MDIO_COMM_COMMAND_ADDRESS |
3154 EMAC_MDIO_COMM_START_BUSY); 3083 EMAC_MDIO_COMM_START_BUSY);
@@ -3337,8 +3266,7 @@ int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
3337 u8 devad, u16 reg, u16 *ret_val) 3266 u8 devad, u16 reg, u16 *ret_val)
3338{ 3267{
3339 u8 phy_index; 3268 u8 phy_index;
3340 /* 3269 /* Probe for the phy according to the given phy_addr, and execute
3341 * Probe for the phy according to the given phy_addr, and execute
3342 * the read request on it 3270 * the read request on it
3343 */ 3271 */
3344 for (phy_index = 0; phy_index < params->num_phys; phy_index++) { 3272 for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
@@ -3355,8 +3283,7 @@ int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
3355 u8 devad, u16 reg, u16 val) 3283 u8 devad, u16 reg, u16 val)
3356{ 3284{
3357 u8 phy_index; 3285 u8 phy_index;
3358 /* 3286 /* Probe for the phy according to the given phy_addr, and execute
3359 * Probe for the phy according to the given phy_addr, and execute
3360 * the write request on it 3287 * the write request on it
3361 */ 3288 */
3362 for (phy_index = 0; phy_index < params->num_phys; phy_index++) { 3289 for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
@@ -3382,7 +3309,7 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
3382 if (bnx2x_is_4_port_mode(bp)) { 3309 if (bnx2x_is_4_port_mode(bp)) {
3383 u32 port_swap, port_swap_ovr; 3310 u32 port_swap, port_swap_ovr;
3384 3311
3385 /*figure out path swap value */ 3312 /* Figure out path swap value */
3386 path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR); 3313 path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR);
3387 if (path_swap_ovr & 0x1) 3314 if (path_swap_ovr & 0x1)
3388 path_swap = (path_swap_ovr & 0x2); 3315 path_swap = (path_swap_ovr & 0x2);
@@ -3392,7 +3319,7 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
3392 if (path_swap) 3319 if (path_swap)
3393 path = path ^ 1; 3320 path = path ^ 1;
3394 3321
3395 /*figure out port swap value */ 3322 /* Figure out port swap value */
3396 port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR); 3323 port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR);
3397 if (port_swap_ovr & 0x1) 3324 if (port_swap_ovr & 0x1)
3398 port_swap = (port_swap_ovr & 0x2); 3325 port_swap = (port_swap_ovr & 0x2);
@@ -3405,7 +3332,7 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
3405 lane = (port<<1) + path; 3332 lane = (port<<1) + path;
3406 } else { /* two port mode - no port swap */ 3333 } else { /* two port mode - no port swap */
3407 3334
3408 /*figure out path swap value */ 3335 /* Figure out path swap value */
3409 path_swap_ovr = 3336 path_swap_ovr =
3410 REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR); 3337 REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR);
3411 if (path_swap_ovr & 0x1) { 3338 if (path_swap_ovr & 0x1) {
@@ -3437,8 +3364,7 @@ static void bnx2x_set_aer_mmd(struct link_params *params,
3437 3364
3438 if (USES_WARPCORE(bp)) { 3365 if (USES_WARPCORE(bp)) {
3439 aer_val = bnx2x_get_warpcore_lane(phy, params); 3366 aer_val = bnx2x_get_warpcore_lane(phy, params);
3440 /* 3367 /* In Dual-lane mode, two lanes are joined together,
3441 * In Dual-lane mode, two lanes are joined together,
3442 * so in order to configure them, the AER broadcast method is 3368 * so in order to configure them, the AER broadcast method is
3443 * used here. 3369 * used here.
3444 * 0x200 is the broadcast address for lanes 0,1 3370 * 0x200 is the broadcast address for lanes 0,1
@@ -3518,8 +3444,7 @@ static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
3518{ 3444{
3519 struct bnx2x *bp = params->bp; 3445 struct bnx2x *bp = params->bp;
3520 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; 3446 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
3521 /** 3447 /* Resolve pause mode and advertisement Please refer to Table
3522 * resolve pause mode and advertisement Please refer to Table
3523 * 28B-3 of the 802.3ab-1999 spec 3448 * 28B-3 of the 802.3ab-1999 spec
3524 */ 3449 */
3525 3450
@@ -3642,6 +3567,7 @@ static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
3642 vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE; 3567 vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE;
3643 if (pause_result & (1<<1)) 3568 if (pause_result & (1<<1))
3644 vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE; 3569 vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
3570
3645} 3571}
3646 3572
3647static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy, 3573static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy,
@@ -3698,6 +3624,7 @@ static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy,
3698 bnx2x_pause_resolve(vars, pause_result); 3624 bnx2x_pause_resolve(vars, pause_result);
3699 3625
3700} 3626}
3627
3701static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy, 3628static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
3702 struct link_params *params, 3629 struct link_params *params,
3703 struct link_vars *vars) 3630 struct link_vars *vars)
@@ -3819,9 +3746,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3819 3746
3820 /* Advertise pause */ 3747 /* Advertise pause */
3821 bnx2x_ext_phy_set_pause(params, phy, vars); 3748 bnx2x_ext_phy_set_pause(params, phy, vars);
3822 3749 /* Set KR Autoneg Work-Around flag for Warpcore version older than D108
3823 /*
3824 * Set KR Autoneg Work-Around flag for Warpcore version older than D108
3825 */ 3750 */
3826 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3751 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3827 MDIO_WC_REG_UC_INFO_B1_VERSION, &val16); 3752 MDIO_WC_REG_UC_INFO_B1_VERSION, &val16);
@@ -3829,7 +3754,6 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3829 DP(NETIF_MSG_LINK, "Enable AN KR work-around\n"); 3754 DP(NETIF_MSG_LINK, "Enable AN KR work-around\n");
3830 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; 3755 vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
3831 } 3756 }
3832
3833 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 3757 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
3834 MDIO_WC_REG_DIGITAL5_MISC7, &val16); 3758 MDIO_WC_REG_DIGITAL5_MISC7, &val16);
3835 3759
@@ -3903,7 +3827,7 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
3903 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 3827 bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
3904 MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB); 3828 MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB);
3905 3829
3906 /*Enable encoded forced speed */ 3830 /* Enable encoded forced speed */
3907 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3831 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3908 MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30); 3832 MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30);
3909 3833
@@ -4265,8 +4189,7 @@ static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
4265 PORT_HW_CFG_E3_MOD_ABS_MASK) >> 4189 PORT_HW_CFG_E3_MOD_ABS_MASK) >>
4266 PORT_HW_CFG_E3_MOD_ABS_SHIFT; 4190 PORT_HW_CFG_E3_MOD_ABS_SHIFT;
4267 4191
4268 /* 4192 /* Should not happen. This function called upon interrupt
4269 * Should not happen. This function called upon interrupt
4270 * triggered by GPIO ( since EPIO can only generate interrupts 4193 * triggered by GPIO ( since EPIO can only generate interrupts
4271 * to MCP). 4194 * to MCP).
4272 * So if this function was called and none of the GPIOs was set, 4195 * So if this function was called and none of the GPIOs was set,
@@ -4366,7 +4289,7 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
4366 "link up, rx_tx_asic_rst 0x%x\n", 4289 "link up, rx_tx_asic_rst 0x%x\n",
4367 vars->rx_tx_asic_rst); 4290 vars->rx_tx_asic_rst);
4368 } else { 4291 } else {
4369 /*reset the lane to see if link comes up.*/ 4292 /* Reset the lane to see if link comes up.*/
4370 bnx2x_warpcore_reset_lane(bp, phy, 1); 4293 bnx2x_warpcore_reset_lane(bp, phy, 1);
4371 bnx2x_warpcore_reset_lane(bp, phy, 0); 4294 bnx2x_warpcore_reset_lane(bp, phy, 0);
4372 4295
@@ -4387,7 +4310,6 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy,
4387 } /*params->rx_tx_asic_rst*/ 4310 } /*params->rx_tx_asic_rst*/
4388 4311
4389} 4312}
4390
4391static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, 4313static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
4392 struct link_params *params, 4314 struct link_params *params,
4393 struct link_vars *vars) 4315 struct link_vars *vars)
@@ -4545,7 +4467,7 @@ static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
4545 /* Update those 1-copy registers */ 4467 /* Update those 1-copy registers */
4546 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, 4468 CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
4547 MDIO_AER_BLOCK_AER_REG, 0); 4469 MDIO_AER_BLOCK_AER_REG, 0);
4548 /* Enable 1G MDIO (1-copy) */ 4470 /* Enable 1G MDIO (1-copy) */
4549 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, 4471 bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
4550 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, 4472 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
4551 &val16); 4473 &val16);
@@ -4624,43 +4546,43 @@ void bnx2x_sync_link(struct link_params *params,
4624 vars->duplex = DUPLEX_FULL; 4546 vars->duplex = DUPLEX_FULL;
4625 switch (vars->link_status & 4547 switch (vars->link_status &
4626 LINK_STATUS_SPEED_AND_DUPLEX_MASK) { 4548 LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
4627 case LINK_10THD: 4549 case LINK_10THD:
4628 vars->duplex = DUPLEX_HALF; 4550 vars->duplex = DUPLEX_HALF;
4629 /* fall thru */ 4551 /* Fall thru */
4630 case LINK_10TFD: 4552 case LINK_10TFD:
4631 vars->line_speed = SPEED_10; 4553 vars->line_speed = SPEED_10;
4632 break; 4554 break;
4633 4555
4634 case LINK_100TXHD: 4556 case LINK_100TXHD:
4635 vars->duplex = DUPLEX_HALF; 4557 vars->duplex = DUPLEX_HALF;
4636 /* fall thru */ 4558 /* Fall thru */
4637 case LINK_100T4: 4559 case LINK_100T4:
4638 case LINK_100TXFD: 4560 case LINK_100TXFD:
4639 vars->line_speed = SPEED_100; 4561 vars->line_speed = SPEED_100;
4640 break; 4562 break;
4641 4563
4642 case LINK_1000THD: 4564 case LINK_1000THD:
4643 vars->duplex = DUPLEX_HALF; 4565 vars->duplex = DUPLEX_HALF;
4644 /* fall thru */ 4566 /* Fall thru */
4645 case LINK_1000TFD: 4567 case LINK_1000TFD:
4646 vars->line_speed = SPEED_1000; 4568 vars->line_speed = SPEED_1000;
4647 break; 4569 break;
4648 4570
4649 case LINK_2500THD: 4571 case LINK_2500THD:
4650 vars->duplex = DUPLEX_HALF; 4572 vars->duplex = DUPLEX_HALF;
4651 /* fall thru */ 4573 /* Fall thru */
4652 case LINK_2500TFD: 4574 case LINK_2500TFD:
4653 vars->line_speed = SPEED_2500; 4575 vars->line_speed = SPEED_2500;
4654 break; 4576 break;
4655 4577
4656 case LINK_10GTFD: 4578 case LINK_10GTFD:
4657 vars->line_speed = SPEED_10000; 4579 vars->line_speed = SPEED_10000;
4658 break; 4580 break;
4659 case LINK_20GTFD: 4581 case LINK_20GTFD:
4660 vars->line_speed = SPEED_20000; 4582 vars->line_speed = SPEED_20000;
4661 break; 4583 break;
4662 default: 4584 default:
4663 break; 4585 break;
4664 } 4586 }
4665 vars->flow_ctrl = 0; 4587 vars->flow_ctrl = 0;
4666 if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED) 4588 if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
@@ -4835,9 +4757,8 @@ static void bnx2x_set_swap_lanes(struct link_params *params,
4835 struct bnx2x_phy *phy) 4757 struct bnx2x_phy *phy)
4836{ 4758{
4837 struct bnx2x *bp = params->bp; 4759 struct bnx2x *bp = params->bp;
4838 /* 4760 /* Each two bits represents a lane number:
4839 * Each two bits represents a lane number: 4761 * No swap is 0123 => 0x1b no need to enable the swap
4840 * No swap is 0123 => 0x1b no need to enable the swap
4841 */ 4762 */
4842 u16 rx_lane_swap, tx_lane_swap; 4763 u16 rx_lane_swap, tx_lane_swap;
4843 4764
@@ -5051,8 +4972,7 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy,
5051 MDIO_REG_BANK_COMBO_IEEE0, 4972 MDIO_REG_BANK_COMBO_IEEE0,
5052 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val); 4973 MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
5053 4974
5054 /* 4975 /* Program speed
5055 * program speed
5056 * - needed only if the speed is greater than 1G (2.5G or 10G) 4976 * - needed only if the speed is greater than 1G (2.5G or 10G)
5057 */ 4977 */
5058 CL22_RD_OVER_CL45(bp, phy, 4978 CL22_RD_OVER_CL45(bp, phy,
@@ -5087,8 +5007,6 @@ static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy,
5087 struct bnx2x *bp = params->bp; 5007 struct bnx2x *bp = params->bp;
5088 u16 val = 0; 5008 u16 val = 0;
5089 5009
5090 /* configure the 48 bits for BAM AN */
5091
5092 /* set extended capabilities */ 5010 /* set extended capabilities */
5093 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) 5011 if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
5094 val |= MDIO_OVER_1G_UP1_2_5G; 5012 val |= MDIO_OVER_1G_UP1_2_5G;
@@ -5234,11 +5152,8 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
5234 } 5152 }
5235} 5153}
5236 5154
5237 5155/* Link management
5238/*
5239 * link management
5240 */ 5156 */
5241
5242static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy, 5157static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
5243 struct link_params *params) 5158 struct link_params *params)
5244{ 5159{
@@ -5383,8 +5298,7 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
5383 "ustat_val(0x8371) = 0x%x\n", ustat_val); 5298 "ustat_val(0x8371) = 0x%x\n", ustat_val);
5384 return; 5299 return;
5385 } 5300 }
5386 /* 5301 /* Step 3: Check CL37 Message Pages received to indicate LP
5387 * Step 3: Check CL37 Message Pages received to indicate LP
5388 * supports only CL37 5302 * supports only CL37
5389 */ 5303 */
5390 CL22_RD_OVER_CL45(bp, phy, 5304 CL22_RD_OVER_CL45(bp, phy,
@@ -5401,8 +5315,7 @@ static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
5401 cl37_fsm_received); 5315 cl37_fsm_received);
5402 return; 5316 return;
5403 } 5317 }
5404 /* 5318 /* The combined cl37/cl73 fsm state information indicating that
5405 * The combined cl37/cl73 fsm state information indicating that
5406 * we are connected to a device which does not support cl73, but 5319 * we are connected to a device which does not support cl73, but
5407 * does support cl37 BAM. In this case we disable cl73 and 5320 * does support cl37 BAM. In this case we disable cl73 and
5408 * restart cl37 auto-neg 5321 * restart cl37 auto-neg
@@ -5973,8 +5886,7 @@ static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
5973{ 5886{
5974 u32 latch_status = 0; 5887 u32 latch_status = 0;
5975 5888
5976 /* 5889 /* Disable the MI INT ( external phy int ) by writing 1 to the
5977 * Disable the MI INT ( external phy int ) by writing 1 to the
5978 * status register. Link down indication is high-active-signal, 5890 * status register. Link down indication is high-active-signal,
5979 * so in this case we need to write the status to clear the XOR 5891 * so in this case we need to write the status to clear the XOR
5980 */ 5892 */
@@ -6009,8 +5921,7 @@ static void bnx2x_link_int_ack(struct link_params *params,
6009 struct bnx2x *bp = params->bp; 5921 struct bnx2x *bp = params->bp;
6010 u8 port = params->port; 5922 u8 port = params->port;
6011 u32 mask; 5923 u32 mask;
6012 /* 5924 /* First reset all status we assume only one line will be
6013 * First reset all status we assume only one line will be
6014 * change at a time 5925 * change at a time
6015 */ 5926 */
6016 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4, 5927 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
@@ -6024,8 +5935,7 @@ static void bnx2x_link_int_ack(struct link_params *params,
6024 if (is_10g_plus) 5935 if (is_10g_plus)
6025 mask = NIG_STATUS_XGXS0_LINK10G; 5936 mask = NIG_STATUS_XGXS0_LINK10G;
6026 else if (params->switch_cfg == SWITCH_CFG_10G) { 5937 else if (params->switch_cfg == SWITCH_CFG_10G) {
6027 /* 5938 /* Disable the link interrupt by writing 1 to
6028 * Disable the link interrupt by writing 1 to
6029 * the relevant lane in the status register 5939 * the relevant lane in the status register
6030 */ 5940 */
6031 u32 ser_lane = 5941 u32 ser_lane =
@@ -6227,8 +6137,7 @@ int bnx2x_set_led(struct link_params *params,
6227 break; 6137 break;
6228 6138
6229 case LED_MODE_OPER: 6139 case LED_MODE_OPER:
6230 /* 6140 /* For all other phys, OPER mode is same as ON, so in case
6231 * For all other phys, OPER mode is same as ON, so in case
6232 * link is down, do nothing 6141 * link is down, do nothing
6233 */ 6142 */
6234 if (!vars->link_up) 6143 if (!vars->link_up)
@@ -6239,9 +6148,7 @@ int bnx2x_set_led(struct link_params *params,
6239 (params->phy[EXT_PHY1].type == 6148 (params->phy[EXT_PHY1].type ==
6240 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) && 6149 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) &&
6241 CHIP_IS_E2(bp) && params->num_phys == 2) { 6150 CHIP_IS_E2(bp) && params->num_phys == 2) {
6242 /* 6151 /* This is a work-around for E2+8727 Configurations */
6243 * This is a work-around for E2+8727 Configurations
6244 */
6245 if (mode == LED_MODE_ON || 6152 if (mode == LED_MODE_ON ||
6246 speed == SPEED_10000){ 6153 speed == SPEED_10000){
6247 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0); 6154 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
@@ -6250,8 +6157,7 @@ int bnx2x_set_led(struct link_params *params,
6250 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED); 6157 tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
6251 EMAC_WR(bp, EMAC_REG_EMAC_LED, 6158 EMAC_WR(bp, EMAC_REG_EMAC_LED,
6252 (tmp | EMAC_LED_OVERRIDE)); 6159 (tmp | EMAC_LED_OVERRIDE));
6253 /* 6160 /* Return here without enabling traffic
6254 * return here without enabling traffic
6255 * LED blink and setting rate in ON mode. 6161 * LED blink and setting rate in ON mode.
6256 * In oper mode, enabling LED blink 6162 * In oper mode, enabling LED blink
6257 * and setting rate is needed. 6163 * and setting rate is needed.
@@ -6260,8 +6166,7 @@ int bnx2x_set_led(struct link_params *params,
6260 return rc; 6166 return rc;
6261 } 6167 }
6262 } else if (SINGLE_MEDIA_DIRECT(params)) { 6168 } else if (SINGLE_MEDIA_DIRECT(params)) {
6263 /* 6169 /* This is a work-around for HW issue found when link
6264 * This is a work-around for HW issue found when link
6265 * is up in CL73 6170 * is up in CL73
6266 */ 6171 */
6267 if ((!CHIP_IS_E3(bp)) || 6172 if ((!CHIP_IS_E3(bp)) ||
@@ -6310,10 +6215,7 @@ int bnx2x_set_led(struct link_params *params,
6310 (speed == SPEED_1000) || 6215 (speed == SPEED_1000) ||
6311 (speed == SPEED_100) || 6216 (speed == SPEED_100) ||
6312 (speed == SPEED_10))) { 6217 (speed == SPEED_10))) {
6313 /* 6218 /* For speeds less than 10G LED scheme is different */
6314 * On Everest 1 Ax chip versions for speeds less than
6315 * 10G LED scheme is different
6316 */
6317 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 6219 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
6318 + port*4, 1); 6220 + port*4, 1);
6319 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 + 6221 REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
@@ -6333,8 +6235,7 @@ int bnx2x_set_led(struct link_params *params,
6333 6235
6334} 6236}
6335 6237
6336/* 6238/* This function comes to reflect the actual link state read DIRECTLY from the
6337 * This function comes to reflect the actual link state read DIRECTLY from the
6338 * HW 6239 * HW
6339 */ 6240 */
6340int bnx2x_test_link(struct link_params *params, struct link_vars *vars, 6241int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
@@ -6422,16 +6323,14 @@ static int bnx2x_link_initialize(struct link_params *params,
6422 int rc = 0; 6323 int rc = 0;
6423 u8 phy_index, non_ext_phy; 6324 u8 phy_index, non_ext_phy;
6424 struct bnx2x *bp = params->bp; 6325 struct bnx2x *bp = params->bp;
6425 /* 6326 /* In case of external phy existence, the line speed would be the
6426 * In case of external phy existence, the line speed would be the
6427 * line speed linked up by the external phy. In case it is direct 6327 * line speed linked up by the external phy. In case it is direct
6428 * only, then the line_speed during initialization will be 6328 * only, then the line_speed during initialization will be
6429 * equal to the req_line_speed 6329 * equal to the req_line_speed
6430 */ 6330 */
6431 vars->line_speed = params->phy[INT_PHY].req_line_speed; 6331 vars->line_speed = params->phy[INT_PHY].req_line_speed;
6432 6332
6433 /* 6333 /* Initialize the internal phy in case this is a direct board
6434 * Initialize the internal phy in case this is a direct board
6435 * (no external phys), or this board has external phy which requires 6334 * (no external phys), or this board has external phy which requires
6436 * to first. 6335 * to first.
6437 */ 6336 */
@@ -6463,8 +6362,7 @@ static int bnx2x_link_initialize(struct link_params *params,
6463 } else { 6362 } else {
6464 for (phy_index = EXT_PHY1; phy_index < params->num_phys; 6363 for (phy_index = EXT_PHY1; phy_index < params->num_phys;
6465 phy_index++) { 6364 phy_index++) {
6466 /* 6365 /* No need to initialize second phy in case of first
6467 * No need to initialize second phy in case of first
6468 * phy only selection. In case of second phy, we do 6366 * phy only selection. In case of second phy, we do
6469 * need to initialize the first phy, since they are 6367 * need to initialize the first phy, since they are
6470 * connected. 6368 * connected.
@@ -6492,7 +6390,6 @@ static int bnx2x_link_initialize(struct link_params *params,
6492 NIG_STATUS_XGXS0_LINK_STATUS | 6390 NIG_STATUS_XGXS0_LINK_STATUS |
6493 NIG_STATUS_SERDES0_LINK_STATUS | 6391 NIG_STATUS_SERDES0_LINK_STATUS |
6494 NIG_MASK_MI_INT)); 6392 NIG_MASK_MI_INT));
6495 bnx2x_update_mng(params, vars->link_status);
6496 return rc; 6393 return rc;
6497} 6394}
6498 6395
@@ -6577,7 +6474,7 @@ static int bnx2x_update_link_up(struct link_params *params,
6577 u8 link_10g) 6474 u8 link_10g)
6578{ 6475{
6579 struct bnx2x *bp = params->bp; 6476 struct bnx2x *bp = params->bp;
6580 u8 port = params->port; 6477 u8 phy_idx, port = params->port;
6581 int rc = 0; 6478 int rc = 0;
6582 6479
6583 vars->link_status |= (LINK_STATUS_LINK_UP | 6480 vars->link_status |= (LINK_STATUS_LINK_UP |
@@ -6641,11 +6538,18 @@ static int bnx2x_update_link_up(struct link_params *params,
6641 6538
6642 /* update shared memory */ 6539 /* update shared memory */
6643 bnx2x_update_mng(params, vars->link_status); 6540 bnx2x_update_mng(params, vars->link_status);
6541
6542 /* Check remote fault */
6543 for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
6544 if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
6545 bnx2x_check_half_open_conn(params, vars, 0);
6546 break;
6547 }
6548 }
6644 msleep(20); 6549 msleep(20);
6645 return rc; 6550 return rc;
6646} 6551}
6647/* 6552/* The bnx2x_link_update function should be called upon link
6648 * The bnx2x_link_update function should be called upon link
6649 * interrupt. 6553 * interrupt.
6650 * Link is considered up as follows: 6554 * Link is considered up as follows:
6651 * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs 6555 * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs
@@ -6702,8 +6606,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6702 if (!CHIP_IS_E3(bp)) 6606 if (!CHIP_IS_E3(bp))
6703 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); 6607 REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
6704 6608
6705 /* 6609 /* Step 1:
6706 * Step 1:
6707 * Check external link change only for external phys, and apply 6610 * Check external link change only for external phys, and apply
6708 * priority selection between them in case the link on both phys 6611 * priority selection between them in case the link on both phys
6709 * is up. Note that instead of the common vars, a temporary 6612 * is up. Note that instead of the common vars, a temporary
@@ -6734,23 +6637,20 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6734 switch (bnx2x_phy_selection(params)) { 6637 switch (bnx2x_phy_selection(params)) {
6735 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT: 6638 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
6736 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: 6639 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
6737 /* 6640 /* In this option, the first PHY makes sure to pass the
6738 * In this option, the first PHY makes sure to pass the
6739 * traffic through itself only. 6641 * traffic through itself only.
6740 * Its not clear how to reset the link on the second phy 6642 * Its not clear how to reset the link on the second phy
6741 */ 6643 */
6742 active_external_phy = EXT_PHY1; 6644 active_external_phy = EXT_PHY1;
6743 break; 6645 break;
6744 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY: 6646 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
6745 /* 6647 /* In this option, the first PHY makes sure to pass the
6746 * In this option, the first PHY makes sure to pass the
6747 * traffic through the second PHY. 6648 * traffic through the second PHY.
6748 */ 6649 */
6749 active_external_phy = EXT_PHY2; 6650 active_external_phy = EXT_PHY2;
6750 break; 6651 break;
6751 default: 6652 default:
6752 /* 6653 /* Link indication on both PHYs with the following cases
6753 * Link indication on both PHYs with the following cases
6754 * is invalid: 6654 * is invalid:
6755 * - FIRST_PHY means that second phy wasn't initialized, 6655 * - FIRST_PHY means that second phy wasn't initialized,
6756 * hence its link is expected to be down 6656 * hence its link is expected to be down
@@ -6767,8 +6667,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6767 } 6667 }
6768 } 6668 }
6769 prev_line_speed = vars->line_speed; 6669 prev_line_speed = vars->line_speed;
6770 /* 6670 /* Step 2:
6771 * Step 2:
6772 * Read the status of the internal phy. In case of 6671 * Read the status of the internal phy. In case of
6773 * DIRECT_SINGLE_MEDIA board, this link is the external link, 6672 * DIRECT_SINGLE_MEDIA board, this link is the external link,
6774 * otherwise this is the link between the 577xx and the first 6673 * otherwise this is the link between the 577xx and the first
@@ -6778,8 +6677,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6778 params->phy[INT_PHY].read_status( 6677 params->phy[INT_PHY].read_status(
6779 &params->phy[INT_PHY], 6678 &params->phy[INT_PHY],
6780 params, vars); 6679 params, vars);
6781 /* 6680 /* The INT_PHY flow control reside in the vars. This include the
6782 * The INT_PHY flow control reside in the vars. This include the
6783 * case where the speed or flow control are not set to AUTO. 6681 * case where the speed or flow control are not set to AUTO.
6784 * Otherwise, the active external phy flow control result is set 6682 * Otherwise, the active external phy flow control result is set
6785 * to the vars. The ext_phy_line_speed is needed to check if the 6683 * to the vars. The ext_phy_line_speed is needed to check if the
@@ -6788,14 +6686,12 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6788 */ 6686 */
6789 if (active_external_phy > INT_PHY) { 6687 if (active_external_phy > INT_PHY) {
6790 vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl; 6688 vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
6791 /* 6689 /* Link speed is taken from the XGXS. AN and FC result from
6792 * Link speed is taken from the XGXS. AN and FC result from
6793 * the external phy. 6690 * the external phy.
6794 */ 6691 */
6795 vars->link_status |= phy_vars[active_external_phy].link_status; 6692 vars->link_status |= phy_vars[active_external_phy].link_status;
6796 6693
6797 /* 6694 /* if active_external_phy is first PHY and link is up - disable
6798 * if active_external_phy is first PHY and link is up - disable
6799 * disable TX on second external PHY 6695 * disable TX on second external PHY
6800 */ 6696 */
6801 if (active_external_phy == EXT_PHY1) { 6697 if (active_external_phy == EXT_PHY1) {
@@ -6832,8 +6728,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6832 DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x," 6728 DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
6833 " ext_phy_line_speed = %d\n", vars->flow_ctrl, 6729 " ext_phy_line_speed = %d\n", vars->flow_ctrl,
6834 vars->link_status, ext_phy_line_speed); 6730 vars->link_status, ext_phy_line_speed);
6835 /* 6731 /* Upon link speed change set the NIG into drain mode. Comes to
6836 * Upon link speed change set the NIG into drain mode. Comes to
6837 * deals with possible FIFO glitch due to clk change when speed 6732 * deals with possible FIFO glitch due to clk change when speed
6838 * is decreased without link down indicator 6733 * is decreased without link down indicator
6839 */ 6734 */
@@ -6858,8 +6753,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6858 6753
6859 bnx2x_link_int_ack(params, vars, link_10g_plus); 6754 bnx2x_link_int_ack(params, vars, link_10g_plus);
6860 6755
6861 /* 6756 /* In case external phy link is up, and internal link is down
6862 * In case external phy link is up, and internal link is down
6863 * (not initialized yet probably after link initialization, it 6757 * (not initialized yet probably after link initialization, it
6864 * needs to be initialized. 6758 * needs to be initialized.
6865 * Note that after link down-up as result of cable plug, the xgxs 6759 * Note that after link down-up as result of cable plug, the xgxs
@@ -6887,8 +6781,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6887 vars); 6781 vars);
6888 } 6782 }
6889 } 6783 }
6890 /* 6784 /* Link is up only if both local phy and external phy (in case of
6891 * Link is up only if both local phy and external phy (in case of
6892 * non-direct board) are up and no fault detected on active PHY. 6785 * non-direct board) are up and no fault detected on active PHY.
6893 */ 6786 */
6894 vars->link_up = (vars->phy_link_up && 6787 vars->link_up = (vars->phy_link_up &&
@@ -6907,6 +6800,10 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6907 else 6800 else
6908 rc = bnx2x_update_link_down(params, vars); 6801 rc = bnx2x_update_link_down(params, vars);
6909 6802
6803 /* Update MCP link status was changed */
6804 if (params->feature_config_flags & FEATURE_CONFIG_BC_SUPPORTS_AFEX)
6805 bnx2x_fw_command(bp, DRV_MSG_CODE_LINK_STATUS_CHANGED, 0);
6806
6910 return rc; 6807 return rc;
6911} 6808}
6912 6809
@@ -7120,8 +7017,7 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
7120 } 7017 }
7121 /* XAUI workaround in 8073 A0: */ 7018 /* XAUI workaround in 8073 A0: */
7122 7019
7123 /* 7020 /* After loading the boot ROM and restarting Autoneg, poll
7124 * After loading the boot ROM and restarting Autoneg, poll
7125 * Dev1, Reg $C820: 7021 * Dev1, Reg $C820:
7126 */ 7022 */
7127 7023
@@ -7130,8 +7026,7 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
7130 MDIO_PMA_DEVAD, 7026 MDIO_PMA_DEVAD,
7131 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, 7027 MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
7132 &val); 7028 &val);
7133 /* 7029 /* If bit [14] = 0 or bit [13] = 0, continue on with
7134 * If bit [14] = 0 or bit [13] = 0, continue on with
7135 * system initialization (XAUI work-around not required, as 7030 * system initialization (XAUI work-around not required, as
7136 * these bits indicate 2.5G or 1G link up). 7031 * these bits indicate 2.5G or 1G link up).
7137 */ 7032 */
@@ -7140,8 +7035,7 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
7140 return 0; 7035 return 0;
7141 } else if (!(val & (1<<15))) { 7036 } else if (!(val & (1<<15))) {
7142 DP(NETIF_MSG_LINK, "bit 15 went off\n"); 7037 DP(NETIF_MSG_LINK, "bit 15 went off\n");
7143 /* 7038 /* If bit 15 is 0, then poll Dev1, Reg $C841 until it's
7144 * If bit 15 is 0, then poll Dev1, Reg $C841 until it's
7145 * MSB (bit15) goes to 1 (indicating that the XAUI 7039 * MSB (bit15) goes to 1 (indicating that the XAUI
7146 * workaround has completed), then continue on with 7040 * workaround has completed), then continue on with
7147 * system initialization. 7041 * system initialization.
@@ -7291,8 +7185,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
7291 val = (1<<7); 7185 val = (1<<7);
7292 } else if (phy->req_line_speed == SPEED_2500) { 7186 } else if (phy->req_line_speed == SPEED_2500) {
7293 val = (1<<5); 7187 val = (1<<5);
7294 /* 7188 /* Note that 2.5G works only when used with 1G
7295 * Note that 2.5G works only when used with 1G
7296 * advertisement 7189 * advertisement
7297 */ 7190 */
7298 } else 7191 } else
@@ -7343,8 +7236,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
7343 /* Add support for CL37 (passive mode) III */ 7236 /* Add support for CL37 (passive mode) III */
7344 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); 7237 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
7345 7238
7346 /* 7239 /* The SNR will improve about 2db by changing BW and FEE main
7347 * The SNR will improve about 2db by changing BW and FEE main
7348 * tap. Rest commands are executed after link is up 7240 * tap. Rest commands are executed after link is up
7349 * Change FFE main cursor to 5 in EDC register 7241 * Change FFE main cursor to 5 in EDC register
7350 */ 7242 */
@@ -7431,8 +7323,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
7431 7323
7432 link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1))); 7324 link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
7433 if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) { 7325 if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
7434 /* 7326 /* The SNR will improve about 2dbby changing the BW and FEE main
7435 * The SNR will improve about 2dbby changing the BW and FEE main
7436 * tap. The 1st write to change FFE main tap is set before 7327 * tap. The 1st write to change FFE main tap is set before
7437 * restart AN. Change PLL Bandwidth in EDC register 7328 * restart AN. Change PLL Bandwidth in EDC register
7438 */ 7329 */
@@ -7479,8 +7370,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
7479 bnx2x_cl45_read(bp, phy, 7370 bnx2x_cl45_read(bp, phy,
7480 MDIO_XS_DEVAD, 7371 MDIO_XS_DEVAD,
7481 MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1); 7372 MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
7482 /* 7373 /* Set bit 3 to invert Rx in 1G mode and clear this bit
7483 * Set bit 3 to invert Rx in 1G mode and clear this bit
7484 * when it`s in 10G mode. 7374 * when it`s in 10G mode.
7485 */ 7375 */
7486 if (vars->line_speed == SPEED_1000) { 7376 if (vars->line_speed == SPEED_1000) {
@@ -7602,8 +7492,7 @@ static void bnx2x_set_disable_pmd_transmit(struct link_params *params,
7602 u8 pmd_dis) 7492 u8 pmd_dis)
7603{ 7493{
7604 struct bnx2x *bp = params->bp; 7494 struct bnx2x *bp = params->bp;
7605 /* 7495 /* Disable transmitter only for bootcodes which can enable it afterwards
7606 * Disable transmitter only for bootcodes which can enable it afterwards
7607 * (for D3 link) 7496 * (for D3 link)
7608 */ 7497 */
7609 if (pmd_dis) { 7498 if (pmd_dis) {
@@ -7780,9 +7669,6 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7780 u32 data_array[4]; 7669 u32 data_array[4];
7781 u16 addr32; 7670 u16 addr32;
7782 struct bnx2x *bp = params->bp; 7671 struct bnx2x *bp = params->bp;
7783 /*DP(NETIF_MSG_LINK, "bnx2x_direct_read_sfp_module_eeprom:"
7784 " addr %d, cnt %d\n",
7785 addr, byte_cnt);*/
7786 if (byte_cnt > 16) { 7672 if (byte_cnt > 16) {
7787 DP(NETIF_MSG_LINK, 7673 DP(NETIF_MSG_LINK,
7788 "Reading from eeprom is limited to 16 bytes\n"); 7674 "Reading from eeprom is limited to 16 bytes\n");
@@ -7847,8 +7733,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7847 MDIO_PMA_DEVAD, 7733 MDIO_PMA_DEVAD,
7848 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, 7734 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
7849 0x8002); 7735 0x8002);
7850 /* 7736 /* Wait appropriate time for two-wire command to finish before
7851 * Wait appropriate time for two-wire command to finish before
7852 * polling the status register 7737 * polling the status register
7853 */ 7738 */
7854 msleep(1); 7739 msleep(1);
@@ -7941,8 +7826,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
7941 { 7826 {
7942 u8 copper_module_type; 7827 u8 copper_module_type;
7943 phy->media_type = ETH_PHY_DA_TWINAX; 7828 phy->media_type = ETH_PHY_DA_TWINAX;
7944 /* 7829 /* Check if its active cable (includes SFP+ module)
7945 * Check if its active cable (includes SFP+ module)
7946 * of passive cable 7830 * of passive cable
7947 */ 7831 */
7948 if (bnx2x_read_sfp_module_eeprom(phy, 7832 if (bnx2x_read_sfp_module_eeprom(phy,
@@ -8019,8 +7903,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8019 DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode); 7903 DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
8020 return 0; 7904 return 0;
8021} 7905}
8022/* 7906/* This function read the relevant field from the module (SFP+), and verify it
8023 * This function read the relevant field from the module (SFP+), and verify it
8024 * is compliant with this board 7907 * is compliant with this board
8025 */ 7908 */
8026static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy, 7909static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
@@ -8102,8 +7985,7 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
8102 u8 val; 7985 u8 val;
8103 struct bnx2x *bp = params->bp; 7986 struct bnx2x *bp = params->bp;
8104 u16 timeout; 7987 u16 timeout;
8105 /* 7988 /* Initialization time after hot-plug may take up to 300ms for
8106 * Initialization time after hot-plug may take up to 300ms for
8107 * some phys type ( e.g. JDSU ) 7989 * some phys type ( e.g. JDSU )
8108 */ 7990 */
8109 7991
@@ -8125,8 +8007,7 @@ static void bnx2x_8727_power_module(struct bnx2x *bp,
8125 u8 is_power_up) { 8007 u8 is_power_up) {
8126 /* Make sure GPIOs are not using for LED mode */ 8008 /* Make sure GPIOs are not using for LED mode */
8127 u16 val; 8009 u16 val;
8128 /* 8010 /* In the GPIO register, bit 4 is use to determine if the GPIOs are
8129 * In the GPIO register, bit 4 is use to determine if the GPIOs are
8130 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for 8011 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
8131 * output 8012 * output
8132 * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0 8013 * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0
@@ -8142,8 +8023,7 @@ static void bnx2x_8727_power_module(struct bnx2x *bp,
8142 if (is_power_up) 8023 if (is_power_up)
8143 val = (1<<4); 8024 val = (1<<4);
8144 else 8025 else
8145 /* 8026 /* Set GPIO control to OUTPUT, and set the power bit
8146 * Set GPIO control to OUTPUT, and set the power bit
8147 * to according to the is_power_up 8027 * to according to the is_power_up
8148 */ 8028 */
8149 val = (1<<1); 8029 val = (1<<1);
@@ -8177,8 +8057,7 @@ static int bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
8177 8057
8178 DP(NETIF_MSG_LINK, "Setting LRM MODE\n"); 8058 DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
8179 8059
8180 /* 8060 /* Changing to LRM mode takes quite few seconds. So do it only
8181 * Changing to LRM mode takes quite few seconds. So do it only
8182 * if current mode is limiting (default is LRM) 8061 * if current mode is limiting (default is LRM)
8183 */ 8062 */
8184 if (cur_limiting_mode != EDC_MODE_LIMITING) 8063 if (cur_limiting_mode != EDC_MODE_LIMITING)
@@ -8313,8 +8192,7 @@ static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
8313 struct bnx2x *bp = params->bp; 8192 struct bnx2x *bp = params->bp;
8314 DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode); 8193 DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode);
8315 if (CHIP_IS_E3(bp)) { 8194 if (CHIP_IS_E3(bp)) {
8316 /* 8195 /* Low ==> if SFP+ module is supported otherwise
8317 * Low ==> if SFP+ module is supported otherwise
8318 * High ==> if SFP+ module is not on the approved vendor list 8196 * High ==> if SFP+ module is not on the approved vendor list
8319 */ 8197 */
8320 bnx2x_set_e3_module_fault_led(params, gpio_mode); 8198 bnx2x_set_e3_module_fault_led(params, gpio_mode);
@@ -8339,8 +8217,7 @@ static void bnx2x_warpcore_power_module(struct link_params *params,
8339 return; 8217 return;
8340 DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n", 8218 DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
8341 power, pin_cfg); 8219 power, pin_cfg);
8342 /* 8220 /* Low ==> corresponding SFP+ module is powered
8343 * Low ==> corresponding SFP+ module is powered
8344 * high ==> the SFP+ module is powered down 8221 * high ==> the SFP+ module is powered down
8345 */ 8222 */
8346 bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1); 8223 bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
@@ -8474,14 +8351,12 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
8474 bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW); 8351 bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
8475 } 8352 }
8476 8353
8477 /* 8354 /* Check and set limiting mode / LRM mode on 8726. On 8727 it
8478 * Check and set limiting mode / LRM mode on 8726. On 8727 it
8479 * is done automatically 8355 * is done automatically
8480 */ 8356 */
8481 bnx2x_set_limiting_mode(params, phy, edc_mode); 8357 bnx2x_set_limiting_mode(params, phy, edc_mode);
8482 8358
8483 /* 8359 /* Enable transmit for this module if the module is approved, or
8484 * Enable transmit for this module if the module is approved, or
8485 * if unapproved modules should also enable the Tx laser 8360 * if unapproved modules should also enable the Tx laser
8486 */ 8361 */
8487 if (rc == 0 || 8362 if (rc == 0 ||
@@ -8536,8 +8411,7 @@ void bnx2x_handle_module_detect_int(struct link_params *params)
8536 bnx2x_set_gpio_int(bp, gpio_num, 8411 bnx2x_set_gpio_int(bp, gpio_num,
8537 MISC_REGISTERS_GPIO_INT_OUTPUT_SET, 8412 MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
8538 gpio_port); 8413 gpio_port);
8539 /* 8414 /* Module was plugged out.
8540 * Module was plugged out.
8541 * Disable transmit for this module 8415 * Disable transmit for this module
8542 */ 8416 */
8543 phy->media_type = ETH_PHY_NOT_PRESENT; 8417 phy->media_type = ETH_PHY_NOT_PRESENT;
@@ -8607,8 +8481,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
8607 8481
8608 DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps" 8482 DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
8609 " link_status 0x%x\n", rx_sd, pcs_status, val2); 8483 " link_status 0x%x\n", rx_sd, pcs_status, val2);
8610 /* 8484 /* Link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
8611 * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
8612 * are set, or if the autoneg bit 1 is set 8485 * are set, or if the autoneg bit 1 is set
8613 */ 8486 */
8614 link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1))); 8487 link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
@@ -8722,8 +8595,7 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
8722 } 8595 }
8723 bnx2x_save_bcm_spirom_ver(bp, phy, params->port); 8596 bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
8724 8597
8725 /* 8598 /* If TX Laser is controlled by GPIO_0, do not let PHY go into low
8726 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
8727 * power mode, if TX Laser is disabled 8599 * power mode, if TX Laser is disabled
8728 */ 8600 */
8729 8601
@@ -8833,8 +8705,7 @@ static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
8833 8705
8834 bnx2x_8726_external_rom_boot(phy, params); 8706 bnx2x_8726_external_rom_boot(phy, params);
8835 8707
8836 /* 8708 /* Need to call module detected on initialization since the module
8837 * Need to call module detected on initialization since the module
8838 * detection triggered by actual module insertion might occur before 8709 * detection triggered by actual module insertion might occur before
8839 * driver is loaded, and when driver is loaded, it reset all 8710 * driver is loaded, and when driver is loaded, it reset all
8840 * registers, including the transmitter 8711 * registers, including the transmitter
@@ -8871,8 +8742,7 @@ static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
8871 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000); 8742 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
8872 bnx2x_cl45_write(bp, phy, 8743 bnx2x_cl45_write(bp, phy,
8873 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200); 8744 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
8874 /* 8745 /* Enable RX-ALARM control to receive interrupt for 1G speed
8875 * Enable RX-ALARM control to receive interrupt for 1G speed
8876 * change 8746 * change
8877 */ 8747 */
8878 bnx2x_cl45_write(bp, phy, 8748 bnx2x_cl45_write(bp, phy,
@@ -8973,8 +8843,7 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
8973 struct link_params *params) { 8843 struct link_params *params) {
8974 u32 swap_val, swap_override; 8844 u32 swap_val, swap_override;
8975 u8 port; 8845 u8 port;
8976 /* 8846 /* The PHY reset is controlled by GPIO 1. Fake the port number
8977 * The PHY reset is controlled by GPIO 1. Fake the port number
8978 * to cancel the swap done in set_gpio() 8847 * to cancel the swap done in set_gpio()
8979 */ 8848 */
8980 struct bnx2x *bp = params->bp; 8849 struct bnx2x *bp = params->bp;
@@ -9012,14 +8881,12 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9012 bnx2x_cl45_write(bp, phy, 8881 bnx2x_cl45_write(bp, phy,
9013 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val); 8882 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val);
9014 8883
9015 /* 8884 /* Initially configure MOD_ABS to interrupt when module is
9016 * Initially configure MOD_ABS to interrupt when module is
9017 * presence( bit 8) 8885 * presence( bit 8)
9018 */ 8886 */
9019 bnx2x_cl45_read(bp, phy, 8887 bnx2x_cl45_read(bp, phy,
9020 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs); 8888 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
9021 /* 8889 /* Set EDC off by setting OPTXLOS signal input to low (bit 9).
9022 * Set EDC off by setting OPTXLOS signal input to low (bit 9).
9023 * When the EDC is off it locks onto a reference clock and avoids 8890 * When the EDC is off it locks onto a reference clock and avoids
9024 * becoming 'lost' 8891 * becoming 'lost'
9025 */ 8892 */
@@ -9040,8 +8907,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9040 if (phy->flags & FLAGS_NOC) 8907 if (phy->flags & FLAGS_NOC)
9041 val |= (3<<5); 8908 val |= (3<<5);
9042 8909
9043 /* 8910 /* Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
9044 * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
9045 * status which reflect SFP+ module over-current 8911 * status which reflect SFP+ module over-current
9046 */ 8912 */
9047 if (!(phy->flags & FLAGS_NOC)) 8913 if (!(phy->flags & FLAGS_NOC))
@@ -9067,8 +8933,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9067 bnx2x_cl45_read(bp, phy, 8933 bnx2x_cl45_read(bp, phy,
9068 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1); 8934 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
9069 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1); 8935 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
9070 /* 8936 /* Power down the XAUI until link is up in case of dual-media
9071 * Power down the XAUI until link is up in case of dual-media
9072 * and 1G 8937 * and 1G
9073 */ 8938 */
9074 if (DUAL_MEDIA(params)) { 8939 if (DUAL_MEDIA(params)) {
@@ -9093,8 +8958,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9093 bnx2x_cl45_write(bp, phy, 8958 bnx2x_cl45_write(bp, phy,
9094 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300); 8959 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
9095 } else { 8960 } else {
9096 /* 8961 /* Since the 8727 has only single reset pin, need to set the 10G
9097 * Since the 8727 has only single reset pin, need to set the 10G
9098 * registers although it is default 8962 * registers although it is default
9099 */ 8963 */
9100 bnx2x_cl45_write(bp, phy, 8964 bnx2x_cl45_write(bp, phy,
@@ -9109,8 +8973,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9109 0x0008); 8973 0x0008);
9110 } 8974 }
9111 8975
9112 /* 8976 /* Set 2-wire transfer rate of SFP+ module EEPROM
9113 * Set 2-wire transfer rate of SFP+ module EEPROM
9114 * to 100Khz since some DACs(direct attached cables) do 8977 * to 100Khz since some DACs(direct attached cables) do
9115 * not work at 400Khz. 8978 * not work at 400Khz.
9116 */ 8979 */
@@ -9133,8 +8996,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
9133 phy->tx_preemphasis[1]); 8996 phy->tx_preemphasis[1]);
9134 } 8997 }
9135 8998
9136 /* 8999 /* If TX Laser is controlled by GPIO_0, do not let PHY go into low
9137 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
9138 * power mode, if TX Laser is disabled 9000 * power mode, if TX Laser is disabled
9139 */ 9001 */
9140 tx_en_mode = REG_RD(bp, params->shmem_base + 9002 tx_en_mode = REG_RD(bp, params->shmem_base +
@@ -9180,8 +9042,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
9180 DP(NETIF_MSG_LINK, 9042 DP(NETIF_MSG_LINK,
9181 "MOD_ABS indication show module is absent\n"); 9043 "MOD_ABS indication show module is absent\n");
9182 phy->media_type = ETH_PHY_NOT_PRESENT; 9044 phy->media_type = ETH_PHY_NOT_PRESENT;
9183 /* 9045 /* 1. Set mod_abs to detect next module
9184 * 1. Set mod_abs to detect next module
9185 * presence event 9046 * presence event
9186 * 2. Set EDC off by setting OPTXLOS signal input to low 9047 * 2. Set EDC off by setting OPTXLOS signal input to low
9187 * (bit 9). 9048 * (bit 9).
@@ -9195,8 +9056,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
9195 MDIO_PMA_DEVAD, 9056 MDIO_PMA_DEVAD,
9196 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); 9057 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
9197 9058
9198 /* 9059 /* Clear RX alarm since it stays up as long as
9199 * Clear RX alarm since it stays up as long as
9200 * the mod_abs wasn't changed 9060 * the mod_abs wasn't changed
9201 */ 9061 */
9202 bnx2x_cl45_read(bp, phy, 9062 bnx2x_cl45_read(bp, phy,
@@ -9207,8 +9067,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
9207 /* Module is present */ 9067 /* Module is present */
9208 DP(NETIF_MSG_LINK, 9068 DP(NETIF_MSG_LINK,
9209 "MOD_ABS indication show module is present\n"); 9069 "MOD_ABS indication show module is present\n");
9210 /* 9070 /* First disable transmitter, and if the module is ok, the
9211 * First disable transmitter, and if the module is ok, the
9212 * module_detection will enable it 9071 * module_detection will enable it
9213 * 1. Set mod_abs to detect next module absent event ( bit 8) 9072 * 1. Set mod_abs to detect next module absent event ( bit 8)
9214 * 2. Restore the default polarity of the OPRXLOS signal and 9073 * 2. Restore the default polarity of the OPRXLOS signal and
@@ -9222,8 +9081,7 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
9222 MDIO_PMA_DEVAD, 9081 MDIO_PMA_DEVAD,
9223 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs); 9082 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
9224 9083
9225 /* 9084 /* Clear RX alarm since it stays up as long as the mod_abs
9226 * Clear RX alarm since it stays up as long as the mod_abs
9227 * wasn't changed. This is need to be done before calling the 9085 * wasn't changed. This is need to be done before calling the
9228 * module detection, otherwise it will clear* the link update 9086 * module detection, otherwise it will clear* the link update
9229 * alarm 9087 * alarm
@@ -9284,8 +9142,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
9284 bnx2x_cl45_read(bp, phy, 9142 bnx2x_cl45_read(bp, phy,
9285 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1); 9143 MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
9286 9144
9287 /* 9145 /* If a module is present and there is need to check
9288 * If a module is present and there is need to check
9289 * for over current 9146 * for over current
9290 */ 9147 */
9291 if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) { 9148 if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) {
@@ -9350,8 +9207,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
9350 MDIO_PMA_DEVAD, 9207 MDIO_PMA_DEVAD,
9351 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status); 9208 MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
9352 9209
9353 /* 9210 /* Bits 0..2 --> speed detected,
9354 * Bits 0..2 --> speed detected,
9355 * Bits 13..15--> link is down 9211 * Bits 13..15--> link is down
9356 */ 9212 */
9357 if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) { 9213 if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
@@ -9394,8 +9250,7 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
9394 bnx2x_cl45_read(bp, phy, 9250 bnx2x_cl45_read(bp, phy,
9395 MDIO_PMA_DEVAD, 9251 MDIO_PMA_DEVAD,
9396 MDIO_PMA_REG_8727_PCS_GP, &val1); 9252 MDIO_PMA_REG_8727_PCS_GP, &val1);
9397 /* 9253 /* In case of dual-media board and 1G, power up the XAUI side,
9398 * In case of dual-media board and 1G, power up the XAUI side,
9399 * otherwise power it down. For 10G it is done automatically 9254 * otherwise power it down. For 10G it is done automatically
9400 */ 9255 */
9401 if (link_up) 9256 if (link_up)
@@ -9561,8 +9416,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
9561 /* Save spirom version */ 9416 /* Save spirom version */
9562 bnx2x_save_848xx_spirom_version(phy, bp, params->port); 9417 bnx2x_save_848xx_spirom_version(phy, bp, params->port);
9563 } 9418 }
9564 /* 9419 /* This phy uses the NIG latch mechanism since link indication
9565 * This phy uses the NIG latch mechanism since link indication
9566 * arrives through its LED4 and not via its LASI signal, so we 9420 * arrives through its LED4 and not via its LASI signal, so we
9567 * get steady signal instead of clear on read 9421 * get steady signal instead of clear on read
9568 */ 9422 */
@@ -9667,8 +9521,7 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
9667 if (phy->req_duplex == DUPLEX_FULL) 9521 if (phy->req_duplex == DUPLEX_FULL)
9668 autoneg_val |= (1<<8); 9522 autoneg_val |= (1<<8);
9669 9523
9670 /* 9524 /* Always write this if this is not 84833.
9671 * Always write this if this is not 84833.
9672 * For 84833, write it only when it's a forced speed. 9525 * For 84833, write it only when it's a forced speed.
9673 */ 9526 */
9674 if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) || 9527 if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
@@ -9916,8 +9769,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
9916 /* Wait for GPHY to come out of reset */ 9769 /* Wait for GPHY to come out of reset */
9917 msleep(50); 9770 msleep(50);
9918 if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) { 9771 if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
9919 /* 9772 /* BCM84823 requires that XGXS links up first @ 10G for normal
9920 * BCM84823 requires that XGXS links up first @ 10G for normal
9921 * behavior. 9773 * behavior.
9922 */ 9774 */
9923 u16 temp; 9775 u16 temp;
@@ -10393,8 +10245,7 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10393 break; 10245 break;
10394 } 10246 }
10395 10247
10396 /* 10248 /* This is a workaround for E3+84833 until autoneg
10397 * This is a workaround for E3+84833 until autoneg
10398 * restart is fixed in f/w 10249 * restart is fixed in f/w
10399 */ 10250 */
10400 if (CHIP_IS_E3(bp)) { 10251 if (CHIP_IS_E3(bp)) {
@@ -10418,8 +10269,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10418 DP(NETIF_MSG_LINK, "54618SE cfg init\n"); 10269 DP(NETIF_MSG_LINK, "54618SE cfg init\n");
10419 usleep_range(1000, 1000); 10270 usleep_range(1000, 1000);
10420 10271
10421 /* 10272 /* This works with E3 only, no need to check the chip
10422 * This works with E3 only, no need to check the chip
10423 * before determining the port. 10273 * before determining the port.
10424 */ 10274 */
10425 port = params->port; 10275 port = params->port;
@@ -10441,7 +10291,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
10441 MDIO_PMA_REG_CTRL, 0x8000); 10291 MDIO_PMA_REG_CTRL, 0x8000);
10442 bnx2x_wait_reset_complete(bp, phy, params); 10292 bnx2x_wait_reset_complete(bp, phy, params);
10443 10293
10444 /*wait for GPHY to reset */ 10294 /* Wait for GPHY to reset */
10445 msleep(50); 10295 msleep(50);
10446 10296
10447 /* Configure LED4: set to INTR (0x6). */ 10297 /* Configure LED4: set to INTR (0x6). */
@@ -10647,13 +10497,11 @@ static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
10647 u32 cfg_pin; 10497 u32 cfg_pin;
10648 u8 port; 10498 u8 port;
10649 10499
10650 /* 10500 /* In case of no EPIO routed to reset the GPHY, put it
10651 * In case of no EPIO routed to reset the GPHY, put it
10652 * in low power mode. 10501 * in low power mode.
10653 */ 10502 */
10654 bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800); 10503 bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800);
10655 /* 10504 /* This works with E3 only, no need to check the chip
10656 * This works with E3 only, no need to check the chip
10657 * before determining the port. 10505 * before determining the port.
10658 */ 10506 */
10659 port = params->port; 10507 port = params->port;
@@ -10762,7 +10610,7 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
10762 bnx2x_ext_phy_resolve_fc(phy, params, vars); 10610 bnx2x_ext_phy_resolve_fc(phy, params, vars);
10763 10611
10764 if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) { 10612 if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
10765 /* report LP advertised speeds */ 10613 /* Report LP advertised speeds */
10766 bnx2x_cl22_read(bp, phy, 0x5, &val); 10614 bnx2x_cl22_read(bp, phy, 0x5, &val);
10767 10615
10768 if (val & (1<<5)) 10616 if (val & (1<<5))
@@ -10827,8 +10675,7 @@ static void bnx2x_54618se_config_loopback(struct bnx2x_phy *phy,
10827 /* This register opens the gate for the UMAC despite its name */ 10675 /* This register opens the gate for the UMAC despite its name */
10828 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1); 10676 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
10829 10677
10830 /* 10678 /* Maximum Frame Length (RW). Defines a 14-Bit maximum frame
10831 * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
10832 * length used by the MAC receive logic to check frames. 10679 * length used by the MAC receive logic to check frames.
10833 */ 10680 */
10834 REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710); 10681 REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
@@ -11101,22 +10948,23 @@ static struct bnx2x_phy phy_warpcore = {
11101 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT, 10948 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
11102 .addr = 0xff, 10949 .addr = 0xff,
11103 .def_md_devad = 0, 10950 .def_md_devad = 0,
11104 .flags = FLAGS_HW_LOCK_REQUIRED, 10951 .flags = (FLAGS_HW_LOCK_REQUIRED |
10952 FLAGS_TX_ERROR_CHECK),
11105 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10953 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11106 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 10954 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11107 .mdio_ctrl = 0, 10955 .mdio_ctrl = 0,
11108 .supported = (SUPPORTED_10baseT_Half | 10956 .supported = (SUPPORTED_10baseT_Half |
11109 SUPPORTED_10baseT_Full | 10957 SUPPORTED_10baseT_Full |
11110 SUPPORTED_100baseT_Half | 10958 SUPPORTED_100baseT_Half |
11111 SUPPORTED_100baseT_Full | 10959 SUPPORTED_100baseT_Full |
11112 SUPPORTED_1000baseT_Full | 10960 SUPPORTED_1000baseT_Full |
11113 SUPPORTED_10000baseT_Full | 10961 SUPPORTED_10000baseT_Full |
11114 SUPPORTED_20000baseKR2_Full | 10962 SUPPORTED_20000baseKR2_Full |
11115 SUPPORTED_20000baseMLD2_Full | 10963 SUPPORTED_20000baseMLD2_Full |
11116 SUPPORTED_FIBRE | 10964 SUPPORTED_FIBRE |
11117 SUPPORTED_Autoneg | 10965 SUPPORTED_Autoneg |
11118 SUPPORTED_Pause | 10966 SUPPORTED_Pause |
11119 SUPPORTED_Asym_Pause), 10967 SUPPORTED_Asym_Pause),
11120 .media_type = ETH_PHY_UNSPECIFIED, 10968 .media_type = ETH_PHY_UNSPECIFIED,
11121 .ver_addr = 0, 10969 .ver_addr = 0,
11122 .req_flow_ctrl = 0, 10970 .req_flow_ctrl = 0,
@@ -11258,7 +11106,8 @@ static struct bnx2x_phy phy_8726 = {
11258 .addr = 0xff, 11106 .addr = 0xff,
11259 .def_md_devad = 0, 11107 .def_md_devad = 0,
11260 .flags = (FLAGS_HW_LOCK_REQUIRED | 11108 .flags = (FLAGS_HW_LOCK_REQUIRED |
11261 FLAGS_INIT_XGXS_FIRST), 11109 FLAGS_INIT_XGXS_FIRST |
11110 FLAGS_TX_ERROR_CHECK),
11262 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11111 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11263 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11112 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11264 .mdio_ctrl = 0, 11113 .mdio_ctrl = 0,
@@ -11289,7 +11138,8 @@ static struct bnx2x_phy phy_8727 = {
11289 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727, 11138 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
11290 .addr = 0xff, 11139 .addr = 0xff,
11291 .def_md_devad = 0, 11140 .def_md_devad = 0,
11292 .flags = FLAGS_FAN_FAILURE_DET_REQ, 11141 .flags = (FLAGS_FAN_FAILURE_DET_REQ |
11142 FLAGS_TX_ERROR_CHECK),
11293 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11143 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11294 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11144 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11295 .mdio_ctrl = 0, 11145 .mdio_ctrl = 0,
@@ -11354,8 +11204,9 @@ static struct bnx2x_phy phy_84823 = {
11354 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823, 11204 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
11355 .addr = 0xff, 11205 .addr = 0xff,
11356 .def_md_devad = 0, 11206 .def_md_devad = 0,
11357 .flags = FLAGS_FAN_FAILURE_DET_REQ | 11207 .flags = (FLAGS_FAN_FAILURE_DET_REQ |
11358 FLAGS_REARM_LATCH_SIGNAL, 11208 FLAGS_REARM_LATCH_SIGNAL |
11209 FLAGS_TX_ERROR_CHECK),
11359 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11210 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11360 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11211 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11361 .mdio_ctrl = 0, 11212 .mdio_ctrl = 0,
@@ -11390,8 +11241,9 @@ static struct bnx2x_phy phy_84833 = {
11390 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833, 11241 .type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
11391 .addr = 0xff, 11242 .addr = 0xff,
11392 .def_md_devad = 0, 11243 .def_md_devad = 0,
11393 .flags = FLAGS_FAN_FAILURE_DET_REQ | 11244 .flags = (FLAGS_FAN_FAILURE_DET_REQ |
11394 FLAGS_REARM_LATCH_SIGNAL, 11245 FLAGS_REARM_LATCH_SIGNAL |
11246 FLAGS_TX_ERROR_CHECK),
11395 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11247 .rx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11396 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff}, 11248 .tx_preemphasis = {0xffff, 0xffff, 0xffff, 0xffff},
11397 .mdio_ctrl = 0, 11249 .mdio_ctrl = 0,
@@ -11466,9 +11318,8 @@ static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
11466 /* Get the 4 lanes xgxs config rx and tx */ 11318 /* Get the 4 lanes xgxs config rx and tx */
11467 u32 rx = 0, tx = 0, i; 11319 u32 rx = 0, tx = 0, i;
11468 for (i = 0; i < 2; i++) { 11320 for (i = 0; i < 2; i++) {
11469 /* 11321 /* INT_PHY and EXT_PHY1 share the same value location in
11470 * INT_PHY and EXT_PHY1 share the same value location in the 11322 * the shmem. When num_phys is greater than 1, than this value
11471 * shmem. When num_phys is greater than 1, than this value
11472 * applies only to EXT_PHY1 11323 * applies only to EXT_PHY1
11473 */ 11324 */
11474 if (phy_index == INT_PHY || phy_index == EXT_PHY1) { 11325 if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
@@ -11546,8 +11397,7 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
11546 offsetof(struct shmem_region, dev_info. 11397 offsetof(struct shmem_region, dev_info.
11547 port_hw_config[port].default_cfg)) & 11398 port_hw_config[port].default_cfg)) &
11548 PORT_HW_CFG_NET_SERDES_IF_MASK); 11399 PORT_HW_CFG_NET_SERDES_IF_MASK);
11549 /* 11400 /* Set the appropriate supported and flags indications per
11550 * Set the appropriate supported and flags indications per
11551 * interface type of the chip 11401 * interface type of the chip
11552 */ 11402 */
11553 switch (serdes_net_if) { 11403 switch (serdes_net_if) {
@@ -11605,8 +11455,7 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
11605 break; 11455 break;
11606 } 11456 }
11607 11457
11608 /* 11458 /* Enable MDC/MDIO work-around for E3 A0 since free running MDC
11609 * Enable MDC/MDIO work-around for E3 A0 since free running MDC
11610 * was not set as expected. For B0, ECO will be enabled so there 11459 * was not set as expected. For B0, ECO will be enabled so there
11611 * won't be an issue there 11460 * won't be an issue there
11612 */ 11461 */
@@ -11719,8 +11568,7 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
11719 phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config); 11568 phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
11720 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index); 11569 bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
11721 11570
11722 /* 11571 /* The shmem address of the phy version is located on different
11723 * The shmem address of the phy version is located on different
11724 * structures. In case this structure is too old, do not set 11572 * structures. In case this structure is too old, do not set
11725 * the address 11573 * the address
11726 */ 11574 */
@@ -11754,8 +11602,7 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
11754 11602
11755 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) && 11603 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
11756 (phy->ver_addr)) { 11604 (phy->ver_addr)) {
11757 /* 11605 /* Remove 100Mb link supported for BCM84833 when phy fw
11758 * Remove 100Mb link supported for BCM84833 when phy fw
11759 * version lower than or equal to 1.39 11606 * version lower than or equal to 1.39
11760 */ 11607 */
11761 u32 raw_ver = REG_RD(bp, phy->ver_addr); 11608 u32 raw_ver = REG_RD(bp, phy->ver_addr);
@@ -11765,8 +11612,7 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
11765 SUPPORTED_100baseT_Full); 11612 SUPPORTED_100baseT_Full);
11766 } 11613 }
11767 11614
11768 /* 11615 /* In case mdc/mdio_access of the external phy is different than the
11769 * In case mdc/mdio_access of the external phy is different than the
11770 * mdc/mdio access of the XGXS, a HW lock must be taken in each access 11616 * mdc/mdio access of the XGXS, a HW lock must be taken in each access
11771 * to prevent one port interfere with another port's CL45 operations. 11617 * to prevent one port interfere with another port's CL45 operations.
11772 */ 11618 */
@@ -11936,13 +11782,16 @@ int bnx2x_phy_probe(struct link_params *params)
11936 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) 11782 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)
11937 break; 11783 break;
11938 11784
11785 if (params->feature_config_flags &
11786 FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET)
11787 phy->flags &= ~FLAGS_TX_ERROR_CHECK;
11788
11939 sync_offset = params->shmem_base + 11789 sync_offset = params->shmem_base +
11940 offsetof(struct shmem_region, 11790 offsetof(struct shmem_region,
11941 dev_info.port_hw_config[params->port].media_type); 11791 dev_info.port_hw_config[params->port].media_type);
11942 media_types = REG_RD(bp, sync_offset); 11792 media_types = REG_RD(bp, sync_offset);
11943 11793
11944 /* 11794 /* Update media type for non-PMF sync only for the first time
11945 * Update media type for non-PMF sync only for the first time
11946 * In case the media type changes afterwards, it will be updated 11795 * In case the media type changes afterwards, it will be updated
11947 * using the update_status function 11796 * using the update_status function
11948 */ 11797 */
@@ -12016,8 +11865,7 @@ void bnx2x_init_xmac_loopback(struct link_params *params,
12016 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 11865 vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
12017 vars->mac_type = MAC_TYPE_XMAC; 11866 vars->mac_type = MAC_TYPE_XMAC;
12018 vars->phy_flags = PHY_XGXS_FLAG; 11867 vars->phy_flags = PHY_XGXS_FLAG;
12019 /* 11868 /* Set WC to loopback mode since link is required to provide clock
12020 * Set WC to loopback mode since link is required to provide clock
12021 * to the XMAC in 20G mode 11869 * to the XMAC in 20G mode
12022 */ 11870 */
12023 bnx2x_set_aer_mmd(params, &params->phy[0]); 11871 bnx2x_set_aer_mmd(params, &params->phy[0]);
@@ -12162,6 +12010,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
12162 bnx2x_link_int_enable(params); 12010 bnx2x_link_int_enable(params);
12163 break; 12011 break;
12164 } 12012 }
12013 bnx2x_update_mng(params, vars->link_status);
12165 return 0; 12014 return 0;
12166} 12015}
12167 12016
@@ -12302,7 +12151,8 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
12302 NIG_MASK_MI_INT)); 12151 NIG_MASK_MI_INT));
12303 12152
12304 /* Need to take the phy out of low power mode in order 12153 /* Need to take the phy out of low power mode in order
12305 to write to access its registers */ 12154 * to write to access its registers
12155 */
12306 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2, 12156 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
12307 MISC_REGISTERS_GPIO_OUTPUT_HIGH, 12157 MISC_REGISTERS_GPIO_OUTPUT_HIGH,
12308 port); 12158 port);
@@ -12350,8 +12200,7 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
12350 (val | 1<<10)); 12200 (val | 1<<10));
12351 } 12201 }
12352 12202
12353 /* 12203 /* Toggle Transmitter: Power down and then up with 600ms delay
12354 * Toggle Transmitter: Power down and then up with 600ms delay
12355 * between 12204 * between
12356 */ 12205 */
12357 msleep(600); 12206 msleep(600);
@@ -12494,8 +12343,7 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
12494 reset_gpio = MISC_REGISTERS_GPIO_1; 12343 reset_gpio = MISC_REGISTERS_GPIO_1;
12495 port = 1; 12344 port = 1;
12496 12345
12497 /* 12346 /* Retrieve the reset gpio/port which control the reset.
12498 * Retrieve the reset gpio/port which control the reset.
12499 * Default is GPIO1, PORT1 12347 * Default is GPIO1, PORT1
12500 */ 12348 */
12501 bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0], 12349 bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0],
@@ -12670,8 +12518,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
12670 break; 12518 break;
12671 12519
12672 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 12520 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
12673 /* 12521 /* GPIO1 affects both ports, so there's need to pull
12674 * GPIO1 affects both ports, so there's need to pull
12675 * it for single port alone 12522 * it for single port alone
12676 */ 12523 */
12677 rc = bnx2x_8726_common_init_phy(bp, shmem_base_path, 12524 rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
@@ -12679,8 +12526,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
12679 phy_index, chip_id); 12526 phy_index, chip_id);
12680 break; 12527 break;
12681 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833: 12528 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
12682 /* 12529 /* GPIO3's are linked, and so both need to be toggled
12683 * GPIO3's are linked, and so both need to be toggled
12684 * to obtain required 2us pulse. 12530 * to obtain required 2us pulse.
12685 */ 12531 */
12686 rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, 12532 rc = bnx2x_84833_common_init_phy(bp, shmem_base_path,
@@ -12779,7 +12625,8 @@ static void bnx2x_check_over_curr(struct link_params *params,
12779} 12625}
12780 12626
12781static void bnx2x_analyze_link_error(struct link_params *params, 12627static void bnx2x_analyze_link_error(struct link_params *params,
12782 struct link_vars *vars, u32 lss_status) 12628 struct link_vars *vars, u32 lss_status,
12629 u8 notify)
12783{ 12630{
12784 struct bnx2x *bp = params->bp; 12631 struct bnx2x *bp = params->bp;
12785 /* Compare new value with previous value */ 12632 /* Compare new value with previous value */
@@ -12793,8 +12640,7 @@ static void bnx2x_analyze_link_error(struct link_params *params,
12793 DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up, 12640 DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up,
12794 half_open_conn, lss_status); 12641 half_open_conn, lss_status);
12795 12642
12796 /* 12643 /* a. Update shmem->link_status accordingly
12797 * a. Update shmem->link_status accordingly
12798 * b. Update link_vars->link_up 12644 * b. Update link_vars->link_up
12799 */ 12645 */
12800 if (lss_status) { 12646 if (lss_status) {
@@ -12802,8 +12648,10 @@ static void bnx2x_analyze_link_error(struct link_params *params,
12802 vars->link_status &= ~LINK_STATUS_LINK_UP; 12648 vars->link_status &= ~LINK_STATUS_LINK_UP;
12803 vars->link_up = 0; 12649 vars->link_up = 0;
12804 vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; 12650 vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
12805 /* 12651
12806 * Set LED mode to off since the PHY doesn't know about these 12652 /* activate nig drain */
12653 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1);
12654 /* Set LED mode to off since the PHY doesn't know about these
12807 * errors 12655 * errors
12808 */ 12656 */
12809 led_mode = LED_MODE_OFF; 12657 led_mode = LED_MODE_OFF;
@@ -12813,7 +12661,11 @@ static void bnx2x_analyze_link_error(struct link_params *params,
12813 vars->link_up = 1; 12661 vars->link_up = 1;
12814 vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; 12662 vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
12815 led_mode = LED_MODE_OPER; 12663 led_mode = LED_MODE_OPER;
12664
12665 /* Clear nig drain */
12666 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
12816 } 12667 }
12668 bnx2x_sync_link(params, vars);
12817 /* Update the LED according to the link state */ 12669 /* Update the LED according to the link state */
12818 bnx2x_set_led(params, vars, led_mode, SPEED_10000); 12670 bnx2x_set_led(params, vars, led_mode, SPEED_10000);
12819 12671
@@ -12822,7 +12674,8 @@ static void bnx2x_analyze_link_error(struct link_params *params,
12822 12674
12823 /* C. Trigger General Attention */ 12675 /* C. Trigger General Attention */
12824 vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT; 12676 vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT;
12825 bnx2x_notify_link_changed(bp); 12677 if (notify)
12678 bnx2x_notify_link_changed(bp);
12826} 12679}
12827 12680
12828/****************************************************************************** 12681/******************************************************************************
@@ -12834,22 +12687,23 @@ static void bnx2x_analyze_link_error(struct link_params *params,
12834* a fault, for example, due to break in the TX side of fiber. 12687* a fault, for example, due to break in the TX side of fiber.
12835* 12688*
12836******************************************************************************/ 12689******************************************************************************/
12837static void bnx2x_check_half_open_conn(struct link_params *params, 12690int bnx2x_check_half_open_conn(struct link_params *params,
12838 struct link_vars *vars) 12691 struct link_vars *vars,
12692 u8 notify)
12839{ 12693{
12840 struct bnx2x *bp = params->bp; 12694 struct bnx2x *bp = params->bp;
12841 u32 lss_status = 0; 12695 u32 lss_status = 0;
12842 u32 mac_base; 12696 u32 mac_base;
12843 /* In case link status is physically up @ 10G do */ 12697 /* In case link status is physically up @ 10G do */
12844 if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) 12698 if (((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0) ||
12845 return; 12699 (REG_RD(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4)))
12700 return 0;
12846 12701
12847 if (CHIP_IS_E3(bp) && 12702 if (CHIP_IS_E3(bp) &&
12848 (REG_RD(bp, MISC_REG_RESET_REG_2) & 12703 (REG_RD(bp, MISC_REG_RESET_REG_2) &
12849 (MISC_REGISTERS_RESET_REG_2_XMAC))) { 12704 (MISC_REGISTERS_RESET_REG_2_XMAC))) {
12850 /* Check E3 XMAC */ 12705 /* Check E3 XMAC */
12851 /* 12706 /* Note that link speed cannot be queried here, since it may be
12852 * Note that link speed cannot be queried here, since it may be
12853 * zero while link is down. In case UMAC is active, LSS will 12707 * zero while link is down. In case UMAC is active, LSS will
12854 * simply not be set 12708 * simply not be set
12855 */ 12709 */
@@ -12863,7 +12717,7 @@ static void bnx2x_check_half_open_conn(struct link_params *params,
12863 if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS)) 12717 if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS))
12864 lss_status = 1; 12718 lss_status = 1;
12865 12719
12866 bnx2x_analyze_link_error(params, vars, lss_status); 12720 bnx2x_analyze_link_error(params, vars, lss_status, notify);
12867 } else if (REG_RD(bp, MISC_REG_RESET_REG_2) & 12721 } else if (REG_RD(bp, MISC_REG_RESET_REG_2) &
12868 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) { 12722 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) {
12869 /* Check E1X / E2 BMAC */ 12723 /* Check E1X / E2 BMAC */
@@ -12880,18 +12734,21 @@ static void bnx2x_check_half_open_conn(struct link_params *params,
12880 REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2); 12734 REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2);
12881 lss_status = (wb_data[0] > 0); 12735 lss_status = (wb_data[0] > 0);
12882 12736
12883 bnx2x_analyze_link_error(params, vars, lss_status); 12737 bnx2x_analyze_link_error(params, vars, lss_status, notify);
12884 } 12738 }
12739 return 0;
12885} 12740}
12886 12741
12887void bnx2x_period_func(struct link_params *params, struct link_vars *vars) 12742void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
12888{ 12743{
12889 struct bnx2x *bp = params->bp;
12890 u16 phy_idx; 12744 u16 phy_idx;
12745 struct bnx2x *bp = params->bp;
12891 for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) { 12746 for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
12892 if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) { 12747 if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
12893 bnx2x_set_aer_mmd(params, &params->phy[phy_idx]); 12748 bnx2x_set_aer_mmd(params, &params->phy[phy_idx]);
12894 bnx2x_check_half_open_conn(params, vars); 12749 if (bnx2x_check_half_open_conn(params, vars, 1) !=
12750 0)
12751 DP(NETIF_MSG_LINK, "Fault detection failed\n");
12895 break; 12752 break;
12896 } 12753 }
12897 } 12754 }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 763535ee4832..ea4371f4335f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -254,8 +254,10 @@ struct link_params {
254#define FEATURE_CONFIG_PFC_ENABLED (1<<1) 254#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
255#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2) 255#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
256#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3) 256#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
257#define FEATURE_CONFIG_BC_SUPPORTS_AFEX (1<<8)
257#define FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9) 258#define FEATURE_CONFIG_AUTOGREEEN_ENABLED (1<<9)
258#define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10) 259#define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED (1<<10)
260#define FEATURE_CONFIG_DISABLE_REMOTE_FAULT_DET (1<<11)
259 /* Will be populated during common init */ 261 /* Will be populated during common init */
260 struct bnx2x_phy phy[MAX_PHYS]; 262 struct bnx2x_phy phy[MAX_PHYS];
261 263
@@ -495,4 +497,6 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
495 497
496void bnx2x_period_func(struct link_params *params, struct link_vars *vars); 498void bnx2x_period_func(struct link_params *params, struct link_vars *vars);
497 499
500int bnx2x_check_half_open_conn(struct link_params *params,
501 struct link_vars *vars, u8 notify);
498#endif /* BNX2X_LINK_H */ 502#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 6af310195bae..f755a665dab3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -39,7 +39,6 @@
39#include <linux/time.h> 39#include <linux/time.h>
40#include <linux/ethtool.h> 40#include <linux/ethtool.h>
41#include <linux/mii.h> 41#include <linux/mii.h>
42#include <linux/if.h>
43#include <linux/if_vlan.h> 42#include <linux/if_vlan.h>
44#include <net/ip.h> 43#include <net/ip.h>
45#include <net/ipv6.h> 44#include <net/ipv6.h>
@@ -93,15 +92,11 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1);
93MODULE_FIRMWARE(FW_FILE_NAME_E1H); 92MODULE_FIRMWARE(FW_FILE_NAME_E1H);
94MODULE_FIRMWARE(FW_FILE_NAME_E2); 93MODULE_FIRMWARE(FW_FILE_NAME_E2);
95 94
96static int multi_mode = 1;
97module_param(multi_mode, int, 0);
98MODULE_PARM_DESC(multi_mode, " Multi queue mode "
99 "(0 Disable; 1 Enable (default))");
100 95
101int num_queues; 96int num_queues;
102module_param(num_queues, int, 0); 97module_param(num_queues, int, 0);
103MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1" 98MODULE_PARM_DESC(num_queues,
104 " (default is as a number of CPUs)"); 99 " Set number of queues (default is as a number of CPUs)");
105 100
106static int disable_tpa; 101static int disable_tpa;
107module_param(disable_tpa, int, 0); 102module_param(disable_tpa, int, 0);
@@ -141,7 +136,9 @@ enum bnx2x_board_type {
141 BCM57810, 136 BCM57810,
142 BCM57810_MF, 137 BCM57810_MF,
143 BCM57840, 138 BCM57840,
144 BCM57840_MF 139 BCM57840_MF,
140 BCM57811,
141 BCM57811_MF
145}; 142};
146 143
147/* indexed by board_type, above */ 144/* indexed by board_type, above */
@@ -158,8 +155,9 @@ static struct {
158 { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" }, 155 { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
159 { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" }, 156 { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
160 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" }, 157 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
161 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit " 158 { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function"},
162 "Ethernet Multi Function"} 159 { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet"},
160 { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function"},
163}; 161};
164 162
165#ifndef PCI_DEVICE_ID_NX2_57710 163#ifndef PCI_DEVICE_ID_NX2_57710
@@ -195,6 +193,12 @@ static struct {
195#ifndef PCI_DEVICE_ID_NX2_57840_MF 193#ifndef PCI_DEVICE_ID_NX2_57840_MF
196#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF 194#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
197#endif 195#endif
196#ifndef PCI_DEVICE_ID_NX2_57811
197#define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
198#endif
199#ifndef PCI_DEVICE_ID_NX2_57811_MF
200#define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
201#endif
198static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = { 202static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
199 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 }, 203 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
200 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 }, 204 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
@@ -207,6 +211,8 @@ static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
207 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF }, 211 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
208 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 }, 212 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 },
209 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF }, 213 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
214 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
215 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
210 { 0 } 216 { 0 }
211}; 217};
212 218
@@ -220,15 +226,15 @@ static LIST_HEAD(bnx2x_prev_list);
220* General service functions 226* General service functions
221****************************************************************************/ 227****************************************************************************/
222 228
223static inline void __storm_memset_dma_mapping(struct bnx2x *bp, 229static void __storm_memset_dma_mapping(struct bnx2x *bp,
224 u32 addr, dma_addr_t mapping) 230 u32 addr, dma_addr_t mapping)
225{ 231{
226 REG_WR(bp, addr, U64_LO(mapping)); 232 REG_WR(bp, addr, U64_LO(mapping));
227 REG_WR(bp, addr + 4, U64_HI(mapping)); 233 REG_WR(bp, addr + 4, U64_HI(mapping));
228} 234}
229 235
230static inline void storm_memset_spq_addr(struct bnx2x *bp, 236static void storm_memset_spq_addr(struct bnx2x *bp,
231 dma_addr_t mapping, u16 abs_fid) 237 dma_addr_t mapping, u16 abs_fid)
232{ 238{
233 u32 addr = XSEM_REG_FAST_MEMORY + 239 u32 addr = XSEM_REG_FAST_MEMORY +
234 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid); 240 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
@@ -236,8 +242,8 @@ static inline void storm_memset_spq_addr(struct bnx2x *bp,
236 __storm_memset_dma_mapping(bp, addr, mapping); 242 __storm_memset_dma_mapping(bp, addr, mapping);
237} 243}
238 244
239static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 245static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
240 u16 pf_id) 246 u16 pf_id)
241{ 247{
242 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid), 248 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
243 pf_id); 249 pf_id);
@@ -249,8 +255,8 @@ static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
249 pf_id); 255 pf_id);
250} 256}
251 257
252static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid, 258static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
253 u8 enable) 259 u8 enable)
254{ 260{
255 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid), 261 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
256 enable); 262 enable);
@@ -262,8 +268,8 @@ static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
262 enable); 268 enable);
263} 269}
264 270
265static inline void storm_memset_eq_data(struct bnx2x *bp, 271static void storm_memset_eq_data(struct bnx2x *bp,
266 struct event_ring_data *eq_data, 272 struct event_ring_data *eq_data,
267 u16 pfid) 273 u16 pfid)
268{ 274{
269 size_t size = sizeof(struct event_ring_data); 275 size_t size = sizeof(struct event_ring_data);
@@ -273,8 +279,8 @@ static inline void storm_memset_eq_data(struct bnx2x *bp,
273 __storm_memset_struct(bp, addr, size, (u32 *)eq_data); 279 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
274} 280}
275 281
276static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod, 282static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
277 u16 pfid) 283 u16 pfid)
278{ 284{
279 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid); 285 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
280 REG_WR16(bp, addr, eq_prod); 286 REG_WR16(bp, addr, eq_prod);
@@ -309,67 +315,6 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
309#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]" 315#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
310#define DMAE_DP_DST_NONE "dst_addr [none]" 316#define DMAE_DP_DST_NONE "dst_addr [none]"
311 317
312static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
313 int msglvl)
314{
315 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
316
317 switch (dmae->opcode & DMAE_COMMAND_DST) {
318 case DMAE_CMD_DST_PCI:
319 if (src_type == DMAE_CMD_SRC_PCI)
320 DP(msglvl, "DMAE: opcode 0x%08x\n"
321 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
322 "comp_addr [%x:%08x], comp_val 0x%08x\n",
323 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
324 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
325 dmae->comp_addr_hi, dmae->comp_addr_lo,
326 dmae->comp_val);
327 else
328 DP(msglvl, "DMAE: opcode 0x%08x\n"
329 "src [%08x], len [%d*4], dst [%x:%08x]\n"
330 "comp_addr [%x:%08x], comp_val 0x%08x\n",
331 dmae->opcode, dmae->src_addr_lo >> 2,
332 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
333 dmae->comp_addr_hi, dmae->comp_addr_lo,
334 dmae->comp_val);
335 break;
336 case DMAE_CMD_DST_GRC:
337 if (src_type == DMAE_CMD_SRC_PCI)
338 DP(msglvl, "DMAE: opcode 0x%08x\n"
339 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
340 "comp_addr [%x:%08x], comp_val 0x%08x\n",
341 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
342 dmae->len, dmae->dst_addr_lo >> 2,
343 dmae->comp_addr_hi, dmae->comp_addr_lo,
344 dmae->comp_val);
345 else
346 DP(msglvl, "DMAE: opcode 0x%08x\n"
347 "src [%08x], len [%d*4], dst [%08x]\n"
348 "comp_addr [%x:%08x], comp_val 0x%08x\n",
349 dmae->opcode, dmae->src_addr_lo >> 2,
350 dmae->len, dmae->dst_addr_lo >> 2,
351 dmae->comp_addr_hi, dmae->comp_addr_lo,
352 dmae->comp_val);
353 break;
354 default:
355 if (src_type == DMAE_CMD_SRC_PCI)
356 DP(msglvl, "DMAE: opcode 0x%08x\n"
357 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
358 "comp_addr [%x:%08x] comp_val 0x%08x\n",
359 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
360 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
361 dmae->comp_val);
362 else
363 DP(msglvl, "DMAE: opcode 0x%08x\n"
364 "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
365 "comp_addr [%x:%08x] comp_val 0x%08x\n",
366 dmae->opcode, dmae->src_addr_lo >> 2,
367 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
368 dmae->comp_val);
369 break;
370 }
371
372}
373 318
374/* copy command into DMAE command memory and set DMAE command go */ 319/* copy command into DMAE command memory and set DMAE command go */
375void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx) 320void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
@@ -506,8 +451,6 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
506 dmae.dst_addr_hi = 0; 451 dmae.dst_addr_hi = 0;
507 dmae.len = len32; 452 dmae.len = len32;
508 453
509 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
510
511 /* issue the command and wait for completion */ 454 /* issue the command and wait for completion */
512 bnx2x_issue_dmae_with_comp(bp, &dmae); 455 bnx2x_issue_dmae_with_comp(bp, &dmae);
513} 456}
@@ -540,8 +483,6 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
540 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data)); 483 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
541 dmae.len = len32; 484 dmae.len = len32;
542 485
543 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
544
545 /* issue the command and wait for completion */ 486 /* issue the command and wait for completion */
546 bnx2x_issue_dmae_with_comp(bp, &dmae); 487 bnx2x_issue_dmae_with_comp(bp, &dmae);
547} 488}
@@ -562,27 +503,6 @@ static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
562 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); 503 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
563} 504}
564 505
565/* used only for slowpath so not inlined */
566static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
567{
568 u32 wb_write[2];
569
570 wb_write[0] = val_hi;
571 wb_write[1] = val_lo;
572 REG_WR_DMAE(bp, reg, wb_write, 2);
573}
574
575#ifdef USE_WB_RD
576static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
577{
578 u32 wb_data[2];
579
580 REG_RD_DMAE(bp, reg, wb_data, 2);
581
582 return HILO_U64(wb_data[0], wb_data[1]);
583}
584#endif
585
586static int bnx2x_mc_assert(struct bnx2x *bp) 506static int bnx2x_mc_assert(struct bnx2x *bp)
587{ 507{
588 char last_idx; 508 char last_idx;
@@ -756,7 +676,7 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
756 printk("%s" "end of fw dump\n", lvl); 676 printk("%s" "end of fw dump\n", lvl);
757} 677}
758 678
759static inline void bnx2x_fw_dump(struct bnx2x *bp) 679static void bnx2x_fw_dump(struct bnx2x *bp)
760{ 680{
761 bnx2x_fw_dump_lvl(bp, KERN_ERR); 681 bnx2x_fw_dump_lvl(bp, KERN_ERR);
762} 682}
@@ -1076,8 +996,8 @@ static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1076 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN); 996 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
1077} 997}
1078 998
1079static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg, 999static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1080 u32 expected, u32 poll_count) 1000 u32 expected, u32 poll_count)
1081{ 1001{
1082 u32 cur_cnt = poll_count; 1002 u32 cur_cnt = poll_count;
1083 u32 val; 1003 u32 val;
@@ -1088,8 +1008,8 @@ static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1088 return val; 1008 return val;
1089} 1009}
1090 1010
1091static inline int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg, 1011static int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1092 char *msg, u32 poll_cnt) 1012 char *msg, u32 poll_cnt)
1093{ 1013{
1094 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt); 1014 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1095 if (val != 0) { 1015 if (val != 0) {
@@ -1186,7 +1106,7 @@ static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
1186 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX) 1106 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1187 1107
1188 1108
1189static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, 1109static int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
1190 u32 poll_cnt) 1110 u32 poll_cnt)
1191{ 1111{
1192 struct sdm_op_gen op_gen = {0}; 1112 struct sdm_op_gen op_gen = {0};
@@ -1220,7 +1140,7 @@ static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
1220 return ret; 1140 return ret;
1221} 1141}
1222 1142
1223static inline u8 bnx2x_is_pcie_pending(struct pci_dev *dev) 1143static u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
1224{ 1144{
1225 int pos; 1145 int pos;
1226 u16 status; 1146 u16 status;
@@ -1361,14 +1281,17 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
1361 int port = BP_PORT(bp); 1281 int port = BP_PORT(bp);
1362 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0; 1282 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1363 u32 val = REG_RD(bp, addr); 1283 u32 val = REG_RD(bp, addr);
1364 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 1284 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1365 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0; 1285 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1286 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1366 1287
1367 if (msix) { 1288 if (msix) {
1368 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1289 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1369 HC_CONFIG_0_REG_INT_LINE_EN_0); 1290 HC_CONFIG_0_REG_INT_LINE_EN_0);
1370 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 | 1291 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1371 HC_CONFIG_0_REG_ATTN_BIT_EN_0); 1292 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1293 if (single_msix)
1294 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
1372 } else if (msi) { 1295 } else if (msi) {
1373 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0; 1296 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1374 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 | 1297 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
@@ -1425,8 +1348,9 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
1425static void bnx2x_igu_int_enable(struct bnx2x *bp) 1348static void bnx2x_igu_int_enable(struct bnx2x *bp)
1426{ 1349{
1427 u32 val; 1350 u32 val;
1428 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 1351 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1429 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0; 1352 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1353 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
1430 1354
1431 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION); 1355 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1432 1356
@@ -1436,6 +1360,9 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
1436 val |= (IGU_PF_CONF_FUNC_EN | 1360 val |= (IGU_PF_CONF_FUNC_EN |
1437 IGU_PF_CONF_MSI_MSIX_EN | 1361 IGU_PF_CONF_MSI_MSIX_EN |
1438 IGU_PF_CONF_ATTN_BIT_EN); 1362 IGU_PF_CONF_ATTN_BIT_EN);
1363
1364 if (single_msix)
1365 val |= IGU_PF_CONF_SINGLE_ISR_EN;
1439 } else if (msi) { 1366 } else if (msi) {
1440 val &= ~IGU_PF_CONF_INT_LINE_EN; 1367 val &= ~IGU_PF_CONF_INT_LINE_EN;
1441 val |= (IGU_PF_CONF_FUNC_EN | 1368 val |= (IGU_PF_CONF_FUNC_EN |
@@ -1455,6 +1382,9 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
1455 1382
1456 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val); 1383 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1457 1384
1385 if (val & IGU_PF_CONF_INT_LINE_EN)
1386 pci_intx(bp->pdev, true);
1387
1458 barrier(); 1388 barrier();
1459 1389
1460 /* init leading/trailing edge */ 1390 /* init leading/trailing edge */
@@ -1623,7 +1553,7 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1623 * Returns the recovery leader resource id according to the engine this function 1553 * Returns the recovery leader resource id according to the engine this function
1624 * belongs to. Currently only only 2 engines is supported. 1554 * belongs to. Currently only only 2 engines is supported.
1625 */ 1555 */
1626static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp) 1556static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1627{ 1557{
1628 if (BP_PATH(bp)) 1558 if (BP_PATH(bp))
1629 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1; 1559 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
@@ -1636,9 +1566,9 @@ static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
1636 * 1566 *
1637 * @bp: driver handle 1567 * @bp: driver handle
1638 * 1568 *
1639 * Tries to aquire a leader lock for cuurent engine. 1569 * Tries to aquire a leader lock for current engine.
1640 */ 1570 */
1641static inline bool bnx2x_trylock_leader_lock(struct bnx2x *bp) 1571static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
1642{ 1572{
1643 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp)); 1573 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1644} 1574}
@@ -1719,6 +1649,27 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
1719 1649
1720 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left)); 1650 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1721 1651
1652 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1653 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1654 /* if Q update ramrod is completed for last Q in AFEX vif set
1655 * flow, then ACK MCP at the end
1656 *
1657 * mark pending ACK to MCP bit.
1658 * prevent case that both bits are cleared.
1659 * At the end of load/unload driver checks that
1660 * sp_state is cleaerd, and this order prevents
1661 * races
1662 */
1663 smp_mb__before_clear_bit();
1664 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1665 wmb();
1666 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1667 smp_mb__after_clear_bit();
1668
1669 /* schedule workqueue to send ack to MCP */
1670 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1671 }
1672
1722 return; 1673 return;
1723} 1674}
1724 1675
@@ -2229,40 +2180,6 @@ u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
2229 return rc; 2180 return rc;
2230} 2181}
2231 2182
2232static void bnx2x_init_port_minmax(struct bnx2x *bp)
2233{
2234 u32 r_param = bp->link_vars.line_speed / 8;
2235 u32 fair_periodic_timeout_usec;
2236 u32 t_fair;
2237
2238 memset(&(bp->cmng.rs_vars), 0,
2239 sizeof(struct rate_shaping_vars_per_port));
2240 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2241
2242 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2243 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2244
2245 /* this is the threshold below which no timer arming will occur
2246 1.25 coefficient is for the threshold to be a little bigger
2247 than the real time, to compensate for timer in-accuracy */
2248 bp->cmng.rs_vars.rs_threshold =
2249 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2250
2251 /* resolution of fairness timer */
2252 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2253 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2254 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2255
2256 /* this is the threshold below which we won't arm the timer anymore */
2257 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2258
2259 /* we multiply by 1e3/8 to get bytes/msec.
2260 We don't want the credits to pass a credit
2261 of the t_fair*FAIR_MEM (algorithm resolution) */
2262 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2263 /* since each tick is 4 usec */
2264 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2265}
2266 2183
2267/* Calculates the sum of vn_min_rates. 2184/* Calculates the sum of vn_min_rates.
2268 It's needed for further normalizing of the min_rates. 2185 It's needed for further normalizing of the min_rates.
@@ -2273,12 +2190,12 @@ static void bnx2x_init_port_minmax(struct bnx2x *bp)
2273 In the later case fainess algorithm should be deactivated. 2190 In the later case fainess algorithm should be deactivated.
2274 If not all min_rates are zero then those that are zeroes will be set to 1. 2191 If not all min_rates are zero then those that are zeroes will be set to 1.
2275 */ 2192 */
2276static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp) 2193static void bnx2x_calc_vn_min(struct bnx2x *bp,
2194 struct cmng_init_input *input)
2277{ 2195{
2278 int all_zero = 1; 2196 int all_zero = 1;
2279 int vn; 2197 int vn;
2280 2198
2281 bp->vn_weight_sum = 0;
2282 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) { 2199 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2283 u32 vn_cfg = bp->mf_config[vn]; 2200 u32 vn_cfg = bp->mf_config[vn];
2284 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 2201 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
@@ -2286,106 +2203,56 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2286 2203
2287 /* Skip hidden vns */ 2204 /* Skip hidden vns */
2288 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) 2205 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2289 continue; 2206 vn_min_rate = 0;
2290
2291 /* If min rate is zero - set it to 1 */ 2207 /* If min rate is zero - set it to 1 */
2292 if (!vn_min_rate) 2208 else if (!vn_min_rate)
2293 vn_min_rate = DEF_MIN_RATE; 2209 vn_min_rate = DEF_MIN_RATE;
2294 else 2210 else
2295 all_zero = 0; 2211 all_zero = 0;
2296 2212
2297 bp->vn_weight_sum += vn_min_rate; 2213 input->vnic_min_rate[vn] = vn_min_rate;
2298 } 2214 }
2299 2215
2300 /* if ETS or all min rates are zeros - disable fairness */ 2216 /* if ETS or all min rates are zeros - disable fairness */
2301 if (BNX2X_IS_ETS_ENABLED(bp)) { 2217 if (BNX2X_IS_ETS_ENABLED(bp)) {
2302 bp->cmng.flags.cmng_enables &= 2218 input->flags.cmng_enables &=
2303 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2219 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2304 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n"); 2220 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2305 } else if (all_zero) { 2221 } else if (all_zero) {
2306 bp->cmng.flags.cmng_enables &= 2222 input->flags.cmng_enables &=
2307 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2223 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2308 DP(NETIF_MSG_IFUP, "All MIN values are zeroes" 2224 DP(NETIF_MSG_IFUP,
2309 " fairness will be disabled\n"); 2225 "All MIN values are zeroes fairness will be disabled\n");
2310 } else 2226 } else
2311 bp->cmng.flags.cmng_enables |= 2227 input->flags.cmng_enables |=
2312 CMNG_FLAGS_PER_PORT_FAIRNESS_VN; 2228 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2313} 2229}
2314 2230
2315static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) 2231static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2232 struct cmng_init_input *input)
2316{ 2233{
2317 struct rate_shaping_vars_per_vn m_rs_vn; 2234 u16 vn_max_rate;
2318 struct fairness_vars_per_vn m_fair_vn;
2319 u32 vn_cfg = bp->mf_config[vn]; 2235 u32 vn_cfg = bp->mf_config[vn];
2320 int func = func_by_vn(bp, vn);
2321 u16 vn_min_rate, vn_max_rate;
2322 int i;
2323 2236
2324 /* If function is hidden - set min and max to zeroes */ 2237 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2325 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2326 vn_min_rate = 0;
2327 vn_max_rate = 0; 2238 vn_max_rate = 0;
2328 2239 else {
2329 } else {
2330 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); 2240 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2331 2241
2332 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 2242 if (IS_MF_SI(bp)) {
2333 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2334 /* If fairness is enabled (not all min rates are zeroes) and
2335 if current min rate is zero - set it to 1.
2336 This is a requirement of the algorithm. */
2337 if (bp->vn_weight_sum && (vn_min_rate == 0))
2338 vn_min_rate = DEF_MIN_RATE;
2339
2340 if (IS_MF_SI(bp))
2341 /* maxCfg in percents of linkspeed */ 2243 /* maxCfg in percents of linkspeed */
2342 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; 2244 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
2343 else 2245 } else /* SD modes */
2344 /* maxCfg is absolute in 100Mb units */ 2246 /* maxCfg is absolute in 100Mb units */
2345 vn_max_rate = maxCfg * 100; 2247 vn_max_rate = maxCfg * 100;
2346 } 2248 }
2347 2249
2348 DP(NETIF_MSG_IFUP, 2250 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
2349 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n", 2251
2350 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum); 2252 input->vnic_max_rate[vn] = vn_max_rate;
2351
2352 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2353 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2354
2355 /* global vn counter - maximal Mbps for this vn */
2356 m_rs_vn.vn_counter.rate = vn_max_rate;
2357
2358 /* quota - number of bytes transmitted in this period */
2359 m_rs_vn.vn_counter.quota =
2360 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2361
2362 if (bp->vn_weight_sum) {
2363 /* credit for each period of the fairness algorithm:
2364 number of bytes in T_FAIR (the vn share the port rate).
2365 vn_weight_sum should not be larger than 10000, thus
2366 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2367 than zero */
2368 m_fair_vn.vn_credit_delta =
2369 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2370 (8 * bp->vn_weight_sum))),
2371 (bp->cmng.fair_vars.fair_threshold +
2372 MIN_ABOVE_THRESH));
2373 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2374 m_fair_vn.vn_credit_delta);
2375 }
2376
2377 /* Store it to internal memory */
2378 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2379 REG_WR(bp, BAR_XSTRORM_INTMEM +
2380 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2381 ((u32 *)(&m_rs_vn))[i]);
2382
2383 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2384 REG_WR(bp, BAR_XSTRORM_INTMEM +
2385 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2386 ((u32 *)(&m_fair_vn))[i]);
2387} 2253}
2388 2254
2255
2389static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp) 2256static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2390{ 2257{
2391 if (CHIP_REV_IS_SLOW(bp)) 2258 if (CHIP_REV_IS_SLOW(bp))
@@ -2423,38 +2290,42 @@ void bnx2x_read_mf_cfg(struct bnx2x *bp)
2423 bp->mf_config[vn] = 2290 bp->mf_config[vn] =
2424 MF_CFG_RD(bp, func_mf_config[func].config); 2291 MF_CFG_RD(bp, func_mf_config[func].config);
2425 } 2292 }
2293 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2294 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2295 bp->flags |= MF_FUNC_DIS;
2296 } else {
2297 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2298 bp->flags &= ~MF_FUNC_DIS;
2299 }
2426} 2300}
2427 2301
2428static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) 2302static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2429{ 2303{
2304 struct cmng_init_input input;
2305 memset(&input, 0, sizeof(struct cmng_init_input));
2306
2307 input.port_rate = bp->link_vars.line_speed;
2430 2308
2431 if (cmng_type == CMNG_FNS_MINMAX) { 2309 if (cmng_type == CMNG_FNS_MINMAX) {
2432 int vn; 2310 int vn;
2433 2311
2434 /* clear cmng_enables */
2435 bp->cmng.flags.cmng_enables = 0;
2436
2437 /* read mf conf from shmem */ 2312 /* read mf conf from shmem */
2438 if (read_cfg) 2313 if (read_cfg)
2439 bnx2x_read_mf_cfg(bp); 2314 bnx2x_read_mf_cfg(bp);
2440 2315
2441 /* Init rate shaping and fairness contexts */
2442 bnx2x_init_port_minmax(bp);
2443
2444 /* vn_weight_sum and enable fairness if not 0 */ 2316 /* vn_weight_sum and enable fairness if not 0 */
2445 bnx2x_calc_vn_weight_sum(bp); 2317 bnx2x_calc_vn_min(bp, &input);
2446 2318
2447 /* calculate and set min-max rate for each vn */ 2319 /* calculate and set min-max rate for each vn */
2448 if (bp->port.pmf) 2320 if (bp->port.pmf)
2449 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) 2321 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
2450 bnx2x_init_vn_minmax(bp, vn); 2322 bnx2x_calc_vn_max(bp, vn, &input);
2451 2323
2452 /* always enable rate shaping and fairness */ 2324 /* always enable rate shaping and fairness */
2453 bp->cmng.flags.cmng_enables |= 2325 input.flags.cmng_enables |=
2454 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 2326 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2455 if (!bp->vn_weight_sum) 2327
2456 DP(NETIF_MSG_IFUP, "All MIN values are zeroes" 2328 bnx2x_init_cmng(&input, &bp->cmng);
2457 " fairness will be disabled\n");
2458 return; 2329 return;
2459 } 2330 }
2460 2331
@@ -2463,6 +2334,35 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2463 "rate shaping and fairness are disabled\n"); 2334 "rate shaping and fairness are disabled\n");
2464} 2335}
2465 2336
2337static void storm_memset_cmng(struct bnx2x *bp,
2338 struct cmng_init *cmng,
2339 u8 port)
2340{
2341 int vn;
2342 size_t size = sizeof(struct cmng_struct_per_port);
2343
2344 u32 addr = BAR_XSTRORM_INTMEM +
2345 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2346
2347 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2348
2349 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2350 int func = func_by_vn(bp, vn);
2351
2352 addr = BAR_XSTRORM_INTMEM +
2353 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2354 size = sizeof(struct rate_shaping_vars_per_vn);
2355 __storm_memset_struct(bp, addr, size,
2356 (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2357
2358 addr = BAR_XSTRORM_INTMEM +
2359 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2360 size = sizeof(struct fairness_vars_per_vn);
2361 __storm_memset_struct(bp, addr, size,
2362 (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2363 }
2364}
2365
2466/* This function is called upon link interrupt */ 2366/* This function is called upon link interrupt */
2467static void bnx2x_link_attn(struct bnx2x *bp) 2367static void bnx2x_link_attn(struct bnx2x *bp)
2468{ 2368{
@@ -2535,6 +2435,190 @@ void bnx2x__link_status_update(struct bnx2x *bp)
2535 bnx2x_link_report(bp); 2435 bnx2x_link_report(bp);
2536} 2436}
2537 2437
2438static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2439 u16 vlan_val, u8 allowed_prio)
2440{
2441 struct bnx2x_func_state_params func_params = {0};
2442 struct bnx2x_func_afex_update_params *f_update_params =
2443 &func_params.params.afex_update;
2444
2445 func_params.f_obj = &bp->func_obj;
2446 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2447
2448 /* no need to wait for RAMROD completion, so don't
2449 * set RAMROD_COMP_WAIT flag
2450 */
2451
2452 f_update_params->vif_id = vifid;
2453 f_update_params->afex_default_vlan = vlan_val;
2454 f_update_params->allowed_priorities = allowed_prio;
2455
2456 /* if ramrod can not be sent, response to MCP immediately */
2457 if (bnx2x_func_state_change(bp, &func_params) < 0)
2458 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2459
2460 return 0;
2461}
2462
2463static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2464 u16 vif_index, u8 func_bit_map)
2465{
2466 struct bnx2x_func_state_params func_params = {0};
2467 struct bnx2x_func_afex_viflists_params *update_params =
2468 &func_params.params.afex_viflists;
2469 int rc;
2470 u32 drv_msg_code;
2471
2472 /* validate only LIST_SET and LIST_GET are received from switch */
2473 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2474 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2475 cmd_type);
2476
2477 func_params.f_obj = &bp->func_obj;
2478 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2479
2480 /* set parameters according to cmd_type */
2481 update_params->afex_vif_list_command = cmd_type;
2482 update_params->vif_list_index = cpu_to_le16(vif_index);
2483 update_params->func_bit_map =
2484 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2485 update_params->func_to_clear = 0;
2486 drv_msg_code =
2487 (cmd_type == VIF_LIST_RULE_GET) ?
2488 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2489 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2490
2491 /* if ramrod can not be sent, respond to MCP immediately for
2492 * SET and GET requests (other are not triggered from MCP)
2493 */
2494 rc = bnx2x_func_state_change(bp, &func_params);
2495 if (rc < 0)
2496 bnx2x_fw_command(bp, drv_msg_code, 0);
2497
2498 return 0;
2499}
2500
2501static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2502{
2503 struct afex_stats afex_stats;
2504 u32 func = BP_ABS_FUNC(bp);
2505 u32 mf_config;
2506 u16 vlan_val;
2507 u32 vlan_prio;
2508 u16 vif_id;
2509 u8 allowed_prio;
2510 u8 vlan_mode;
2511 u32 addr_to_write, vifid, addrs, stats_type, i;
2512
2513 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2514 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2515 DP(BNX2X_MSG_MCP,
2516 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2517 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2518 }
2519
2520 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2521 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2522 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2523 DP(BNX2X_MSG_MCP,
2524 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2525 vifid, addrs);
2526 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2527 addrs);
2528 }
2529
2530 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2531 addr_to_write = SHMEM2_RD(bp,
2532 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2533 stats_type = SHMEM2_RD(bp,
2534 afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2535
2536 DP(BNX2X_MSG_MCP,
2537 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2538 addr_to_write);
2539
2540 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2541
2542 /* write response to scratchpad, for MCP */
2543 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2544 REG_WR(bp, addr_to_write + i*sizeof(u32),
2545 *(((u32 *)(&afex_stats))+i));
2546
2547 /* send ack message to MCP */
2548 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2549 }
2550
2551 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2552 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2553 bp->mf_config[BP_VN(bp)] = mf_config;
2554 DP(BNX2X_MSG_MCP,
2555 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2556 mf_config);
2557
2558 /* if VIF_SET is "enabled" */
2559 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2560 /* set rate limit directly to internal RAM */
2561 struct cmng_init_input cmng_input;
2562 struct rate_shaping_vars_per_vn m_rs_vn;
2563 size_t size = sizeof(struct rate_shaping_vars_per_vn);
2564 u32 addr = BAR_XSTRORM_INTMEM +
2565 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2566
2567 bp->mf_config[BP_VN(bp)] = mf_config;
2568
2569 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2570 m_rs_vn.vn_counter.rate =
2571 cmng_input.vnic_max_rate[BP_VN(bp)];
2572 m_rs_vn.vn_counter.quota =
2573 (m_rs_vn.vn_counter.rate *
2574 RS_PERIODIC_TIMEOUT_USEC) / 8;
2575
2576 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2577
2578 /* read relevant values from mf_cfg struct in shmem */
2579 vif_id =
2580 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2581 FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2582 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2583 vlan_val =
2584 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2585 FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2586 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2587 vlan_prio = (mf_config &
2588 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2589 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2590 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2591 vlan_mode =
2592 (MF_CFG_RD(bp,
2593 func_mf_config[func].afex_config) &
2594 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2595 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2596 allowed_prio =
2597 (MF_CFG_RD(bp,
2598 func_mf_config[func].afex_config) &
2599 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2600 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2601
2602 /* send ramrod to FW, return in case of failure */
2603 if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2604 allowed_prio))
2605 return;
2606
2607 bp->afex_def_vlan_tag = vlan_val;
2608 bp->afex_vlan_mode = vlan_mode;
2609 } else {
2610 /* notify link down because BP->flags is disabled */
2611 bnx2x_link_report(bp);
2612
2613 /* send INVALID VIF ramrod to FW */
2614 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2615
2616 /* Reset the default afex VLAN */
2617 bp->afex_def_vlan_tag = -1;
2618 }
2619 }
2620}
2621
2538static void bnx2x_pmf_update(struct bnx2x *bp) 2622static void bnx2x_pmf_update(struct bnx2x *bp)
2539{ 2623{
2540 int port = BP_PORT(bp); 2624 int port = BP_PORT(bp);
@@ -2619,6 +2703,18 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2619} 2703}
2620 2704
2621 2705
2706static void storm_memset_func_cfg(struct bnx2x *bp,
2707 struct tstorm_eth_function_common_config *tcfg,
2708 u16 abs_fid)
2709{
2710 size_t size = sizeof(struct tstorm_eth_function_common_config);
2711
2712 u32 addr = BAR_TSTRORM_INTMEM +
2713 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
2714
2715 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
2716}
2717
2622void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p) 2718void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2623{ 2719{
2624 if (CHIP_IS_E1x(bp)) { 2720 if (CHIP_IS_E1x(bp)) {
@@ -2648,9 +2744,9 @@ void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2648 * 2744 *
2649 * Return the flags that are common for the Tx-only and not normal connections. 2745 * Return the flags that are common for the Tx-only and not normal connections.
2650 */ 2746 */
2651static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp, 2747static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
2652 struct bnx2x_fastpath *fp, 2748 struct bnx2x_fastpath *fp,
2653 bool zero_stats) 2749 bool zero_stats)
2654{ 2750{
2655 unsigned long flags = 0; 2751 unsigned long flags = 0;
2656 2752
@@ -2670,9 +2766,9 @@ static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
2670 return flags; 2766 return flags;
2671} 2767}
2672 2768
2673static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp, 2769static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
2674 struct bnx2x_fastpath *fp, 2770 struct bnx2x_fastpath *fp,
2675 bool leading) 2771 bool leading)
2676{ 2772{
2677 unsigned long flags = 0; 2773 unsigned long flags = 0;
2678 2774
@@ -2680,8 +2776,11 @@ static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
2680 if (IS_MF_SD(bp)) 2776 if (IS_MF_SD(bp))
2681 __set_bit(BNX2X_Q_FLG_OV, &flags); 2777 __set_bit(BNX2X_Q_FLG_OV, &flags);
2682 2778
2683 if (IS_FCOE_FP(fp)) 2779 if (IS_FCOE_FP(fp)) {
2684 __set_bit(BNX2X_Q_FLG_FCOE, &flags); 2780 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
2781 /* For FCoE - force usage of default priority (for afex) */
2782 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
2783 }
2685 2784
2686 if (!fp->disable_tpa) { 2785 if (!fp->disable_tpa) {
2687 __set_bit(BNX2X_Q_FLG_TPA, &flags); 2786 __set_bit(BNX2X_Q_FLG_TPA, &flags);
@@ -2698,6 +2797,10 @@ static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
2698 /* Always set HW VLAN stripping */ 2797 /* Always set HW VLAN stripping */
2699 __set_bit(BNX2X_Q_FLG_VLAN, &flags); 2798 __set_bit(BNX2X_Q_FLG_VLAN, &flags);
2700 2799
2800 /* configure silent vlan removal */
2801 if (IS_MF_AFEX(bp))
2802 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
2803
2701 2804
2702 return flags | bnx2x_get_common_flags(bp, fp, true); 2805 return flags | bnx2x_get_common_flags(bp, fp, true);
2703} 2806}
@@ -2800,6 +2903,13 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
2800 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS; 2903 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2801 else 2904 else
2802 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS; 2905 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
2906 /* configure silent vlan removal
2907 * if multi function mode is afex, then mask default vlan
2908 */
2909 if (IS_MF_AFEX(bp)) {
2910 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
2911 rxq_init->silent_removal_mask = VLAN_VID_MASK;
2912 }
2803} 2913}
2804 2914
2805static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, 2915static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
@@ -3051,7 +3161,7 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3051 * configure FW 3161 * configure FW
3052 * notify others function about the change 3162 * notify others function about the change
3053 */ 3163 */
3054static inline void bnx2x_config_mf_bw(struct bnx2x *bp) 3164static void bnx2x_config_mf_bw(struct bnx2x *bp)
3055{ 3165{
3056 if (bp->link_vars.link_up) { 3166 if (bp->link_vars.link_up) {
3057 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX); 3167 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
@@ -3060,7 +3170,7 @@ static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
3060 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); 3170 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3061} 3171}
3062 3172
3063static inline void bnx2x_set_mf_bw(struct bnx2x *bp) 3173static void bnx2x_set_mf_bw(struct bnx2x *bp)
3064{ 3174{
3065 bnx2x_config_mf_bw(bp); 3175 bnx2x_config_mf_bw(bp);
3066 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0); 3176 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
@@ -3147,7 +3257,7 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
3147} 3257}
3148 3258
3149/* must be called under the spq lock */ 3259/* must be called under the spq lock */
3150static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp) 3260static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3151{ 3261{
3152 struct eth_spe *next_spe = bp->spq_prod_bd; 3262 struct eth_spe *next_spe = bp->spq_prod_bd;
3153 3263
@@ -3163,7 +3273,7 @@ static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
3163} 3273}
3164 3274
3165/* must be called under the spq lock */ 3275/* must be called under the spq lock */
3166static inline void bnx2x_sp_prod_update(struct bnx2x *bp) 3276static void bnx2x_sp_prod_update(struct bnx2x *bp)
3167{ 3277{
3168 int func = BP_FUNC(bp); 3278 int func = BP_FUNC(bp);
3169 3279
@@ -3185,7 +3295,7 @@ static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
3185 * @cmd: command to check 3295 * @cmd: command to check
3186 * @cmd_type: command type 3296 * @cmd_type: command type
3187 */ 3297 */
3188static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type) 3298static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
3189{ 3299{
3190 if ((cmd_type == NONE_CONNECTION_TYPE) || 3300 if ((cmd_type == NONE_CONNECTION_TYPE) ||
3191 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) || 3301 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
@@ -3319,7 +3429,7 @@ static void bnx2x_release_alr(struct bnx2x *bp)
3319#define BNX2X_DEF_SB_ATT_IDX 0x0001 3429#define BNX2X_DEF_SB_ATT_IDX 0x0001
3320#define BNX2X_DEF_SB_IDX 0x0002 3430#define BNX2X_DEF_SB_IDX 0x0002
3321 3431
3322static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) 3432static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
3323{ 3433{
3324 struct host_sp_status_block *def_sb = bp->def_status_blk; 3434 struct host_sp_status_block *def_sb = bp->def_status_blk;
3325 u16 rc = 0; 3435 u16 rc = 0;
@@ -3451,7 +3561,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3451 } 3561 }
3452} 3562}
3453 3563
3454static inline void bnx2x_fan_failure(struct bnx2x *bp) 3564static void bnx2x_fan_failure(struct bnx2x *bp)
3455{ 3565{
3456 int port = BP_PORT(bp); 3566 int port = BP_PORT(bp);
3457 u32 ext_phy_config; 3567 u32 ext_phy_config;
@@ -3481,7 +3591,7 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
3481 3591
3482} 3592}
3483 3593
3484static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) 3594static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
3485{ 3595{
3486 int port = BP_PORT(bp); 3596 int port = BP_PORT(bp);
3487 int reg_offset; 3597 int reg_offset;
@@ -3521,7 +3631,7 @@ static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
3521 } 3631 }
3522} 3632}
3523 3633
3524static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn) 3634static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3525{ 3635{
3526 u32 val; 3636 u32 val;
3527 3637
@@ -3552,7 +3662,7 @@ static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3552 } 3662 }
3553} 3663}
3554 3664
3555static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn) 3665static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3556{ 3666{
3557 u32 val; 3667 u32 val;
3558 3668
@@ -3596,7 +3706,7 @@ static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3596 } 3706 }
3597} 3707}
3598 3708
3599static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn) 3709static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3600{ 3710{
3601 u32 val; 3711 u32 val;
3602 3712
@@ -3606,6 +3716,7 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3606 int func = BP_FUNC(bp); 3716 int func = BP_FUNC(bp);
3607 3717
3608 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 3718 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3719 bnx2x_read_mf_cfg(bp);
3609 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp, 3720 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3610 func_mf_config[BP_ABS_FUNC(bp)].config); 3721 func_mf_config[BP_ABS_FUNC(bp)].config);
3611 val = SHMEM_RD(bp, 3722 val = SHMEM_RD(bp,
@@ -3628,6 +3739,9 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3628 /* start dcbx state machine */ 3739 /* start dcbx state machine */
3629 bnx2x_dcbx_set_params(bp, 3740 bnx2x_dcbx_set_params(bp,
3630 BNX2X_DCBX_STATE_NEG_RECEIVED); 3741 BNX2X_DCBX_STATE_NEG_RECEIVED);
3742 if (val & DRV_STATUS_AFEX_EVENT_MASK)
3743 bnx2x_handle_afex_cmd(bp,
3744 val & DRV_STATUS_AFEX_EVENT_MASK);
3631 if (bp->link_vars.periodic_flags & 3745 if (bp->link_vars.periodic_flags &
3632 PERIODIC_FLAGS_LINK_EVENT) { 3746 PERIODIC_FLAGS_LINK_EVENT) {
3633 /* sync with link */ 3747 /* sync with link */
@@ -3722,7 +3836,7 @@ void bnx2x_set_reset_global(struct bnx2x *bp)
3722 * 3836 *
3723 * Should be run under rtnl lock 3837 * Should be run under rtnl lock
3724 */ 3838 */
3725static inline void bnx2x_clear_reset_global(struct bnx2x *bp) 3839static void bnx2x_clear_reset_global(struct bnx2x *bp)
3726{ 3840{
3727 u32 val; 3841 u32 val;
3728 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 3842 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
@@ -3736,7 +3850,7 @@ static inline void bnx2x_clear_reset_global(struct bnx2x *bp)
3736 * 3850 *
3737 * should be run under rtnl lock 3851 * should be run under rtnl lock
3738 */ 3852 */
3739static inline bool bnx2x_reset_is_global(struct bnx2x *bp) 3853static bool bnx2x_reset_is_global(struct bnx2x *bp)
3740{ 3854{
3741 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); 3855 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
3742 3856
@@ -3749,7 +3863,7 @@ static inline bool bnx2x_reset_is_global(struct bnx2x *bp)
3749 * 3863 *
3750 * Should be run under rtnl lock 3864 * Should be run under rtnl lock
3751 */ 3865 */
3752static inline void bnx2x_set_reset_done(struct bnx2x *bp) 3866static void bnx2x_set_reset_done(struct bnx2x *bp)
3753{ 3867{
3754 u32 val; 3868 u32 val;
3755 u32 bit = BP_PATH(bp) ? 3869 u32 bit = BP_PATH(bp) ?
@@ -3874,7 +3988,7 @@ bool bnx2x_clear_pf_load(struct bnx2x *bp)
3874 * 3988 *
3875 * should be run under rtnl lock 3989 * should be run under rtnl lock
3876 */ 3990 */
3877static inline bool bnx2x_get_load_status(struct bnx2x *bp, int engine) 3991static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
3878{ 3992{
3879 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK : 3993 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
3880 BNX2X_PATH0_LOAD_CNT_MASK); 3994 BNX2X_PATH0_LOAD_CNT_MASK);
@@ -3895,7 +4009,7 @@ static inline bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
3895/* 4009/*
3896 * Reset the load status for the current engine. 4010 * Reset the load status for the current engine.
3897 */ 4011 */
3898static inline void bnx2x_clear_load_status(struct bnx2x *bp) 4012static void bnx2x_clear_load_status(struct bnx2x *bp)
3899{ 4013{
3900 u32 val; 4014 u32 val;
3901 u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : 4015 u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
@@ -3906,13 +4020,13 @@ static inline void bnx2x_clear_load_status(struct bnx2x *bp)
3906 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); 4020 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
3907} 4021}
3908 4022
3909static inline void _print_next_block(int idx, const char *blk) 4023static void _print_next_block(int idx, const char *blk)
3910{ 4024{
3911 pr_cont("%s%s", idx ? ", " : "", blk); 4025 pr_cont("%s%s", idx ? ", " : "", blk);
3912} 4026}
3913 4027
3914static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num, 4028static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
3915 bool print) 4029 bool print)
3916{ 4030{
3917 int i = 0; 4031 int i = 0;
3918 u32 cur_bit = 0; 4032 u32 cur_bit = 0;
@@ -3959,8 +4073,8 @@ static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
3959 return par_num; 4073 return par_num;
3960} 4074}
3961 4075
3962static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num, 4076static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
3963 bool *global, bool print) 4077 bool *global, bool print)
3964{ 4078{
3965 int i = 0; 4079 int i = 0;
3966 u32 cur_bit = 0; 4080 u32 cur_bit = 0;
@@ -4045,8 +4159,8 @@ static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
4045 return par_num; 4159 return par_num;
4046} 4160}
4047 4161
4048static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num, 4162static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
4049 bool print) 4163 bool print)
4050{ 4164{
4051 int i = 0; 4165 int i = 0;
4052 u32 cur_bit = 0; 4166 u32 cur_bit = 0;
@@ -4097,8 +4211,8 @@ static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
4097 return par_num; 4211 return par_num;
4098} 4212}
4099 4213
4100static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num, 4214static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
4101 bool *global, bool print) 4215 bool *global, bool print)
4102{ 4216{
4103 int i = 0; 4217 int i = 0;
4104 u32 cur_bit = 0; 4218 u32 cur_bit = 0;
@@ -4139,8 +4253,8 @@ static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
4139 return par_num; 4253 return par_num;
4140} 4254}
4141 4255
4142static inline int bnx2x_check_blocks_with_parity4(u32 sig, int par_num, 4256static int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
4143 bool print) 4257 bool print)
4144{ 4258{
4145 int i = 0; 4259 int i = 0;
4146 u32 cur_bit = 0; 4260 u32 cur_bit = 0;
@@ -4166,8 +4280,8 @@ static inline int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
4166 return par_num; 4280 return par_num;
4167} 4281}
4168 4282
4169static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print, 4283static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4170 u32 *sig) 4284 u32 *sig)
4171{ 4285{
4172 if ((sig[0] & HW_PRTY_ASSERT_SET_0) || 4286 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4173 (sig[1] & HW_PRTY_ASSERT_SET_1) || 4287 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
@@ -4238,7 +4352,7 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
4238} 4352}
4239 4353
4240 4354
4241static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn) 4355static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
4242{ 4356{
4243 u32 val; 4357 u32 val;
4244 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) { 4358 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
@@ -4430,7 +4544,7 @@ void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
4430 igu_addr); 4544 igu_addr);
4431} 4545}
4432 4546
4433static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod) 4547static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
4434{ 4548{
4435 /* No memory barriers */ 4549 /* No memory barriers */
4436 storm_memset_eq_prod(bp, prod, BP_FUNC(bp)); 4550 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
@@ -4461,7 +4575,7 @@ static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
4461} 4575}
4462#endif 4576#endif
4463 4577
4464static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp) 4578static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
4465{ 4579{
4466 struct bnx2x_mcast_ramrod_params rparam; 4580 struct bnx2x_mcast_ramrod_params rparam;
4467 int rc; 4581 int rc;
@@ -4486,8 +4600,8 @@ static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
4486 netif_addr_unlock_bh(bp->dev); 4600 netif_addr_unlock_bh(bp->dev);
4487} 4601}
4488 4602
4489static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp, 4603static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
4490 union event_ring_elem *elem) 4604 union event_ring_elem *elem)
4491{ 4605{
4492 unsigned long ramrod_flags = 0; 4606 unsigned long ramrod_flags = 0;
4493 int rc = 0; 4607 int rc = 0;
@@ -4534,7 +4648,7 @@ static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp,
4534static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start); 4648static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
4535#endif 4649#endif
4536 4650
4537static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp) 4651static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
4538{ 4652{
4539 netif_addr_lock_bh(bp->dev); 4653 netif_addr_lock_bh(bp->dev);
4540 4654
@@ -4555,7 +4669,94 @@ static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
4555 netif_addr_unlock_bh(bp->dev); 4669 netif_addr_unlock_bh(bp->dev);
4556} 4670}
4557 4671
4558static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( 4672static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
4673 union event_ring_elem *elem)
4674{
4675 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
4676 DP(BNX2X_MSG_SP,
4677 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
4678 elem->message.data.vif_list_event.func_bit_map);
4679 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
4680 elem->message.data.vif_list_event.func_bit_map);
4681 } else if (elem->message.data.vif_list_event.echo ==
4682 VIF_LIST_RULE_SET) {
4683 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
4684 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
4685 }
4686}
4687
4688/* called with rtnl_lock */
4689static void bnx2x_after_function_update(struct bnx2x *bp)
4690{
4691 int q, rc;
4692 struct bnx2x_fastpath *fp;
4693 struct bnx2x_queue_state_params queue_params = {NULL};
4694 struct bnx2x_queue_update_params *q_update_params =
4695 &queue_params.params.update;
4696
4697 /* Send Q update command with afex vlan removal values for all Qs */
4698 queue_params.cmd = BNX2X_Q_CMD_UPDATE;
4699
4700 /* set silent vlan removal values according to vlan mode */
4701 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4702 &q_update_params->update_flags);
4703 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
4704 &q_update_params->update_flags);
4705 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
4706
4707 /* in access mode mark mask and value are 0 to strip all vlans */
4708 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
4709 q_update_params->silent_removal_value = 0;
4710 q_update_params->silent_removal_mask = 0;
4711 } else {
4712 q_update_params->silent_removal_value =
4713 (bp->afex_def_vlan_tag & VLAN_VID_MASK);
4714 q_update_params->silent_removal_mask = VLAN_VID_MASK;
4715 }
4716
4717 for_each_eth_queue(bp, q) {
4718 /* Set the appropriate Queue object */
4719 fp = &bp->fp[q];
4720 queue_params.q_obj = &fp->q_obj;
4721
4722 /* send the ramrod */
4723 rc = bnx2x_queue_state_change(bp, &queue_params);
4724 if (rc < 0)
4725 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
4726 q);
4727 }
4728
4729#ifdef BCM_CNIC
4730 if (!NO_FCOE(bp)) {
4731 fp = &bp->fp[FCOE_IDX];
4732 queue_params.q_obj = &fp->q_obj;
4733
4734 /* clear pending completion bit */
4735 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
4736
4737 /* mark latest Q bit */
4738 smp_mb__before_clear_bit();
4739 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
4740 smp_mb__after_clear_bit();
4741
4742 /* send Q update ramrod for FCoE Q */
4743 rc = bnx2x_queue_state_change(bp, &queue_params);
4744 if (rc < 0)
4745 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
4746 q);
4747 } else {
4748 /* If no FCoE ring - ACK MCP now */
4749 bnx2x_link_report(bp);
4750 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
4751 }
4752#else
4753 /* If no FCoE ring - ACK MCP now */
4754 bnx2x_link_report(bp);
4755 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
4756#endif /* BCM_CNIC */
4757}
4758
4759static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
4559 struct bnx2x *bp, u32 cid) 4760 struct bnx2x *bp, u32 cid)
4560{ 4761{
4561 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); 4762 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
@@ -4653,6 +4854,28 @@ static void bnx2x_eq_int(struct bnx2x *bp)
4653 break; 4854 break;
4654 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED); 4855 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
4655 goto next_spqe; 4856 goto next_spqe;
4857 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
4858 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
4859 "AFEX: ramrod completed FUNCTION_UPDATE\n");
4860 f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_AFEX_UPDATE);
4861
4862 /* We will perform the Queues update from sp_rtnl task
4863 * as all Queue SP operations should run under
4864 * rtnl_lock.
4865 */
4866 smp_mb__before_clear_bit();
4867 set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
4868 &bp->sp_rtnl_state);
4869 smp_mb__after_clear_bit();
4870
4871 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4872 goto next_spqe;
4873
4874 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
4875 f_obj->complete_cmd(bp, f_obj,
4876 BNX2X_F_CMD_AFEX_VIFLISTS);
4877 bnx2x_after_afex_vif_lists(bp, elem);
4878 goto next_spqe;
4656 case EVENT_RING_OPCODE_FUNCTION_START: 4879 case EVENT_RING_OPCODE_FUNCTION_START:
4657 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, 4880 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4658 "got FUNC_START ramrod\n"); 4881 "got FUNC_START ramrod\n");
@@ -4784,6 +5007,13 @@ static void bnx2x_sp_task(struct work_struct *work)
4784 5007
4785 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID, 5008 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
4786 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1); 5009 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
5010
5011 /* afex - poll to check if VIFSET_ACK should be sent to MFW */
5012 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5013 &bp->sp_state)) {
5014 bnx2x_link_report(bp);
5015 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5016 }
4787} 5017}
4788 5018
4789irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) 5019irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
@@ -4870,7 +5100,7 @@ static void bnx2x_timer(unsigned long data)
4870 * nic init service functions 5100 * nic init service functions
4871 */ 5101 */
4872 5102
4873static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len) 5103static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
4874{ 5104{
4875 u32 i; 5105 u32 i;
4876 if (!(len%4) && !(addr%4)) 5106 if (!(len%4) && !(addr%4))
@@ -4883,10 +5113,10 @@ static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
4883} 5113}
4884 5114
4885/* helper: writes FP SP data to FW - data_size in dwords */ 5115/* helper: writes FP SP data to FW - data_size in dwords */
4886static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp, 5116static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
4887 int fw_sb_id, 5117 int fw_sb_id,
4888 u32 *sb_data_p, 5118 u32 *sb_data_p,
4889 u32 data_size) 5119 u32 data_size)
4890{ 5120{
4891 int index; 5121 int index;
4892 for (index = 0; index < data_size; index++) 5122 for (index = 0; index < data_size; index++)
@@ -4896,7 +5126,7 @@ static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
4896 *(sb_data_p + index)); 5126 *(sb_data_p + index));
4897} 5127}
4898 5128
4899static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id) 5129static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
4900{ 5130{
4901 u32 *sb_data_p; 5131 u32 *sb_data_p;
4902 u32 data_size = 0; 5132 u32 data_size = 0;
@@ -4929,7 +5159,7 @@ static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
4929} 5159}
4930 5160
4931/* helper: writes SP SB data to FW */ 5161/* helper: writes SP SB data to FW */
4932static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp, 5162static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
4933 struct hc_sp_status_block_data *sp_sb_data) 5163 struct hc_sp_status_block_data *sp_sb_data)
4934{ 5164{
4935 int func = BP_FUNC(bp); 5165 int func = BP_FUNC(bp);
@@ -4941,7 +5171,7 @@ static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
4941 *((u32 *)sp_sb_data + i)); 5171 *((u32 *)sp_sb_data + i));
4942} 5172}
4943 5173
4944static inline void bnx2x_zero_sp_sb(struct bnx2x *bp) 5174static void bnx2x_zero_sp_sb(struct bnx2x *bp)
4945{ 5175{
4946 int func = BP_FUNC(bp); 5176 int func = BP_FUNC(bp);
4947 struct hc_sp_status_block_data sp_sb_data; 5177 struct hc_sp_status_block_data sp_sb_data;
@@ -4962,8 +5192,7 @@ static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
4962} 5192}
4963 5193
4964 5194
4965static inline 5195static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4966void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4967 int igu_sb_id, int igu_seg_id) 5196 int igu_sb_id, int igu_seg_id)
4968{ 5197{
4969 hc_sm->igu_sb_id = igu_sb_id; 5198 hc_sm->igu_sb_id = igu_sb_id;
@@ -4974,8 +5203,7 @@ void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4974 5203
4975 5204
4976/* allocates state machine ids. */ 5205/* allocates state machine ids. */
4977static inline 5206static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
4978void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
4979{ 5207{
4980 /* zero out state machine indices */ 5208 /* zero out state machine indices */
4981 /* rx indices */ 5209 /* rx indices */
@@ -5383,7 +5611,7 @@ static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
5383 return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT; 5611 return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT;
5384} 5612}
5385 5613
5386static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp) 5614static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
5387{ 5615{
5388 if (CHIP_IS_E1x(fp->bp)) 5616 if (CHIP_IS_E1x(fp->bp))
5389 return BP_L_ID(fp->bp) + fp->index; 5617 return BP_L_ID(fp->bp) + fp->index;
@@ -5444,6 +5672,43 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
5444 bnx2x_update_fpsb_idx(fp); 5672 bnx2x_update_fpsb_idx(fp);
5445} 5673}
5446 5674
5675static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
5676{
5677 int i;
5678
5679 for (i = 1; i <= NUM_TX_RINGS; i++) {
5680 struct eth_tx_next_bd *tx_next_bd =
5681 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5682
5683 tx_next_bd->addr_hi =
5684 cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
5685 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5686 tx_next_bd->addr_lo =
5687 cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
5688 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5689 }
5690
5691 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
5692 txdata->tx_db.data.zero_fill1 = 0;
5693 txdata->tx_db.data.prod = 0;
5694
5695 txdata->tx_pkt_prod = 0;
5696 txdata->tx_pkt_cons = 0;
5697 txdata->tx_bd_prod = 0;
5698 txdata->tx_bd_cons = 0;
5699 txdata->tx_pkt = 0;
5700}
5701
5702static void bnx2x_init_tx_rings(struct bnx2x *bp)
5703{
5704 int i;
5705 u8 cos;
5706
5707 for_each_tx_queue(bp, i)
5708 for_each_cos_in_tx_queue(&bp->fp[i], cos)
5709 bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]);
5710}
5711
5447void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) 5712void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5448{ 5713{
5449 int i; 5714 int i;
@@ -5968,7 +6233,7 @@ void bnx2x_pf_disable(struct bnx2x *bp)
5968 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0); 6233 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
5969} 6234}
5970 6235
5971static inline void bnx2x__common_init_phy(struct bnx2x *bp) 6236static void bnx2x__common_init_phy(struct bnx2x *bp)
5972{ 6237{
5973 u32 shmem_base[2], shmem2_base[2]; 6238 u32 shmem_base[2], shmem2_base[2];
5974 shmem_base[0] = bp->common.shmem_base; 6239 shmem_base[0] = bp->common.shmem_base;
@@ -6255,12 +6520,24 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
6255 if (!CHIP_IS_E1(bp)) 6520 if (!CHIP_IS_E1(bp))
6256 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan); 6521 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
6257 6522
6258 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) 6523 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
6259 /* Bit-map indicating which L2 hdrs may appear 6524 if (IS_MF_AFEX(bp)) {
6260 * after the basic Ethernet header 6525 /* configure that VNTag and VLAN headers must be
6261 */ 6526 * received in afex mode
6262 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 6527 */
6263 bp->path_has_ovlan ? 7 : 6); 6528 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
6529 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
6530 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
6531 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
6532 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
6533 } else {
6534 /* Bit-map indicating which L2 hdrs may appear
6535 * after the basic Ethernet header
6536 */
6537 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
6538 bp->path_has_ovlan ? 7 : 6);
6539 }
6540 }
6264 6541
6265 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON); 6542 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
6266 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON); 6543 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
@@ -6294,9 +6571,21 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
6294 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON); 6571 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
6295 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON); 6572 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
6296 6573
6297 if (!CHIP_IS_E1x(bp)) 6574 if (!CHIP_IS_E1x(bp)) {
6298 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 6575 if (IS_MF_AFEX(bp)) {
6299 bp->path_has_ovlan ? 7 : 6); 6576 /* configure that VNTag and VLAN headers must be
6577 * sent in afex mode
6578 */
6579 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
6580 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
6581 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
6582 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
6583 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
6584 } else {
6585 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
6586 bp->path_has_ovlan ? 7 : 6);
6587 }
6588 }
6300 6589
6301 REG_WR(bp, SRC_REG_SOFT_RST, 1); 6590 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6302 6591
@@ -6514,15 +6803,29 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
6514 6803
6515 6804
6516 bnx2x_init_block(bp, BLOCK_PRS, init_phase); 6805 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
6517 if (CHIP_IS_E3B0(bp)) 6806 if (CHIP_IS_E3B0(bp)) {
6518 /* Ovlan exists only if we are in multi-function + 6807 if (IS_MF_AFEX(bp)) {
6519 * switch-dependent mode, in switch-independent there 6808 /* configure headers for AFEX mode */
6520 * is no ovlan headers 6809 REG_WR(bp, BP_PORT(bp) ?
6521 */ 6810 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
6522 REG_WR(bp, BP_PORT(bp) ? 6811 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
6523 PRS_REG_HDRS_AFTER_BASIC_PORT_1 : 6812 REG_WR(bp, BP_PORT(bp) ?
6524 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 6813 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
6525 (bp->path_has_ovlan ? 7 : 6)); 6814 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
6815 REG_WR(bp, BP_PORT(bp) ?
6816 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
6817 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
6818 } else {
6819 /* Ovlan exists only if we are in multi-function +
6820 * switch-dependent mode, in switch-independent there
6821 * is no ovlan headers
6822 */
6823 REG_WR(bp, BP_PORT(bp) ?
6824 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
6825 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
6826 (bp->path_has_ovlan ? 7 : 6));
6827 }
6828 }
6526 6829
6527 bnx2x_init_block(bp, BLOCK_TSDM, init_phase); 6830 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
6528 bnx2x_init_block(bp, BLOCK_CSDM, init_phase); 6831 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
@@ -6584,10 +6887,15 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
6584 /* Bit-map indicating which L2 hdrs may appear after the 6887 /* Bit-map indicating which L2 hdrs may appear after the
6585 * basic Ethernet header 6888 * basic Ethernet header
6586 */ 6889 */
6587 REG_WR(bp, BP_PORT(bp) ? 6890 if (IS_MF_AFEX(bp))
6588 NIG_REG_P1_HDRS_AFTER_BASIC : 6891 REG_WR(bp, BP_PORT(bp) ?
6589 NIG_REG_P0_HDRS_AFTER_BASIC, 6892 NIG_REG_P1_HDRS_AFTER_BASIC :
6590 IS_MF_SD(bp) ? 7 : 6); 6893 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
6894 else
6895 REG_WR(bp, BP_PORT(bp) ?
6896 NIG_REG_P1_HDRS_AFTER_BASIC :
6897 NIG_REG_P0_HDRS_AFTER_BASIC,
6898 IS_MF_SD(bp) ? 7 : 6);
6591 6899
6592 if (CHIP_IS_E3(bp)) 6900 if (CHIP_IS_E3(bp))
6593 REG_WR(bp, BP_PORT(bp) ? 6901 REG_WR(bp, BP_PORT(bp) ?
@@ -6609,6 +6917,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
6609 val = 1; 6917 val = 1;
6610 break; 6918 break;
6611 case MULTI_FUNCTION_SI: 6919 case MULTI_FUNCTION_SI:
6920 case MULTI_FUNCTION_AFEX:
6612 val = 2; 6921 val = 2;
6613 break; 6922 break;
6614 } 6923 }
@@ -6640,21 +6949,71 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
6640static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) 6949static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6641{ 6950{
6642 int reg; 6951 int reg;
6952 u32 wb_write[2];
6643 6953
6644 if (CHIP_IS_E1(bp)) 6954 if (CHIP_IS_E1(bp))
6645 reg = PXP2_REG_RQ_ONCHIP_AT + index*8; 6955 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6646 else 6956 else
6647 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8; 6957 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6648 6958
6649 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr)); 6959 wb_write[0] = ONCHIP_ADDR1(addr);
6960 wb_write[1] = ONCHIP_ADDR2(addr);
6961 REG_WR_DMAE(bp, reg, wb_write, 2);
6650} 6962}
6651 6963
6652static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id) 6964static void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
6965 u8 idu_sb_id, bool is_Pf)
6966{
6967 u32 data, ctl, cnt = 100;
6968 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
6969 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
6970 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
6971 u32 sb_bit = 1 << (idu_sb_id%32);
6972 u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
6973 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
6974
6975 /* Not supported in BC mode */
6976 if (CHIP_INT_MODE_IS_BC(bp))
6977 return;
6978
6979 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
6980 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
6981 IGU_REGULAR_CLEANUP_SET |
6982 IGU_REGULAR_BCLEANUP;
6983
6984 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
6985 func_encode << IGU_CTRL_REG_FID_SHIFT |
6986 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
6987
6988 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
6989 data, igu_addr_data);
6990 REG_WR(bp, igu_addr_data, data);
6991 mmiowb();
6992 barrier();
6993 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
6994 ctl, igu_addr_ctl);
6995 REG_WR(bp, igu_addr_ctl, ctl);
6996 mmiowb();
6997 barrier();
6998
6999 /* wait for clean up to finish */
7000 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7001 msleep(20);
7002
7003
7004 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7005 DP(NETIF_MSG_HW,
7006 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
7007 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
7008 }
7009}
7010
7011static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
6653{ 7012{
6654 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/); 7013 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
6655} 7014}
6656 7015
6657static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func) 7016static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
6658{ 7017{
6659 u32 i, base = FUNC_ILT_BASE(func); 7018 u32 i, base = FUNC_ILT_BASE(func);
6660 for (i = base; i < base + ILT_PER_FUNC; i++) 7019 for (i = base; i < base + ILT_PER_FUNC; i++)
@@ -7005,7 +7364,7 @@ void bnx2x_free_mem(struct bnx2x *bp)
7005 BCM_PAGE_SIZE * NUM_EQ_PAGES); 7364 BCM_PAGE_SIZE * NUM_EQ_PAGES);
7006} 7365}
7007 7366
7008static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) 7367static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
7009{ 7368{
7010 int num_groups; 7369 int num_groups;
7011 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1; 7370 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
@@ -7192,7 +7551,8 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
7192 unsigned long ramrod_flags = 0; 7551 unsigned long ramrod_flags = 0;
7193 7552
7194#ifdef BCM_CNIC 7553#ifdef BCM_CNIC
7195 if (is_zero_ether_addr(bp->dev->dev_addr) && IS_MF_STORAGE_SD(bp)) { 7554 if (is_zero_ether_addr(bp->dev->dev_addr) &&
7555 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
7196 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN, 7556 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
7197 "Ignoring Zero MAC for STORAGE SD mode\n"); 7557 "Ignoring Zero MAC for STORAGE SD mode\n");
7198 return 0; 7558 return 0;
@@ -7230,7 +7590,7 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
7230 BNX2X_DEV_INFO("set number of queues to 1\n"); 7590 BNX2X_DEV_INFO("set number of queues to 1\n");
7231 break; 7591 break;
7232 default: 7592 default:
7233 /* Set number of queues according to bp->multi_mode value */ 7593 /* Set number of queues for MSI-X mode */
7234 bnx2x_set_num_queues(bp); 7594 bnx2x_set_num_queues(bp);
7235 7595
7236 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); 7596 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
@@ -7239,15 +7599,17 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
7239 * so try to enable MSI-X with the requested number of fp's 7599 * so try to enable MSI-X with the requested number of fp's
7240 * and fallback to MSI or legacy INTx with one fp 7600 * and fallback to MSI or legacy INTx with one fp
7241 */ 7601 */
7242 if (bnx2x_enable_msix(bp)) { 7602 if (bnx2x_enable_msix(bp) ||
7243 /* failed to enable MSI-X */ 7603 bp->flags & USING_SINGLE_MSIX_FLAG) {
7244 BNX2X_DEV_INFO("Failed to enable MSI-X (%d), set number of queues to %d\n", 7604 /* failed to enable multiple MSI-X */
7605 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
7245 bp->num_queues, 1 + NON_ETH_CONTEXT_USE); 7606 bp->num_queues, 1 + NON_ETH_CONTEXT_USE);
7246 7607
7247 bp->num_queues = 1 + NON_ETH_CONTEXT_USE; 7608 bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
7248 7609
7249 /* Try to enable MSI */ 7610 /* Try to enable MSI */
7250 if (!(bp->flags & DISABLE_MSI_FLAG)) 7611 if (!(bp->flags & USING_SINGLE_MSIX_FLAG) &&
7612 !(bp->flags & DISABLE_MSI_FLAG))
7251 bnx2x_enable_msi(bp); 7613 bnx2x_enable_msi(bp);
7252 } 7614 }
7253 break; 7615 break;
@@ -7368,7 +7730,7 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
7368 * - HC configuration 7730 * - HC configuration
7369 * - Queue's CDU context 7731 * - Queue's CDU context
7370 */ 7732 */
7371static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp, 7733static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
7372 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params) 7734 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
7373{ 7735{
7374 7736
@@ -7718,7 +8080,7 @@ static void bnx2x_reset_port(struct bnx2x *bp)
7718 /* TODO: Close Doorbell port? */ 8080 /* TODO: Close Doorbell port? */
7719} 8081}
7720 8082
7721static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code) 8083static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
7722{ 8084{
7723 struct bnx2x_func_state_params func_params = {NULL}; 8085 struct bnx2x_func_state_params func_params = {NULL};
7724 8086
@@ -7733,7 +8095,7 @@ static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
7733 return bnx2x_func_state_change(bp, &func_params); 8095 return bnx2x_func_state_change(bp, &func_params);
7734} 8096}
7735 8097
7736static inline int bnx2x_func_stop(struct bnx2x *bp) 8098static int bnx2x_func_stop(struct bnx2x *bp)
7737{ 8099{
7738 struct bnx2x_func_state_params func_params = {NULL}; 8100 struct bnx2x_func_state_params func_params = {NULL};
7739 int rc; 8101 int rc;
@@ -7848,7 +8210,7 @@ void bnx2x_send_unload_done(struct bnx2x *bp)
7848 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0); 8210 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
7849} 8211}
7850 8212
7851static inline int bnx2x_func_wait_started(struct bnx2x *bp) 8213static int bnx2x_func_wait_started(struct bnx2x *bp)
7852{ 8214{
7853 int tout = 50; 8215 int tout = 50;
7854 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0; 8216 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
@@ -8158,7 +8520,7 @@ static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8158 * 8520 *
8159 * @bp: driver handle 8521 * @bp: driver handle
8160 */ 8522 */
8161static inline void bnx2x_mcp_wait_one(struct bnx2x *bp) 8523static void bnx2x_mcp_wait_one(struct bnx2x *bp)
8162{ 8524{
8163 /* special handling for emulation and FPGA, 8525 /* special handling for emulation and FPGA,
8164 wait 10 times longer */ 8526 wait 10 times longer */
@@ -8494,7 +8856,7 @@ exit_leader_reset:
8494 return rc; 8856 return rc;
8495} 8857}
8496 8858
8497static inline void bnx2x_recovery_failed(struct bnx2x *bp) 8859static void bnx2x_recovery_failed(struct bnx2x *bp)
8498{ 8860{
8499 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n"); 8861 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
8500 8862
@@ -8727,7 +9089,8 @@ sp_rtnl_not_reset:
8727#endif 9089#endif
8728 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state)) 9090 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
8729 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos); 9091 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
8730 9092 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
9093 bnx2x_after_function_update(bp);
8731 /* 9094 /*
8732 * in case of fan failure we need to reset id if the "stop on error" 9095 * in case of fan failure we need to reset id if the "stop on error"
8733 * debug flag is set, since we trying to prevent permanent overheating 9096 * debug flag is set, since we trying to prevent permanent overheating
@@ -9222,6 +9585,17 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9222 id |= (val & 0xf); 9585 id |= (val & 0xf);
9223 bp->common.chip_id = id; 9586 bp->common.chip_id = id;
9224 9587
9588 /* force 57811 according to MISC register */
9589 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
9590 if (CHIP_IS_57810(bp))
9591 bp->common.chip_id = (CHIP_NUM_57811 << 16) |
9592 (bp->common.chip_id & 0x0000FFFF);
9593 else if (CHIP_IS_57810_MF(bp))
9594 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
9595 (bp->common.chip_id & 0x0000FFFF);
9596 bp->common.chip_id |= 0x1;
9597 }
9598
9225 /* Set doorbell size */ 9599 /* Set doorbell size */
9226 bp->db_size = (1 << BNX2X_DB_SHIFT); 9600 bp->db_size = (1 << BNX2X_DB_SHIFT);
9227 9601
@@ -9314,7 +9688,9 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9314 bp->link_params.feature_config_flags |= 9688 bp->link_params.feature_config_flags |=
9315 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ? 9689 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
9316 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0; 9690 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
9317 9691 bp->link_params.feature_config_flags |=
9692 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
9693 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
9318 bp->link_params.feature_config_flags |= 9694 bp->link_params.feature_config_flags |=
9319 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ? 9695 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
9320 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0; 9696 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
@@ -9946,6 +10322,9 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
9946 10322
9947 } else 10323 } else
9948 bp->flags |= NO_FCOE_FLAG; 10324 bp->flags |= NO_FCOE_FLAG;
10325
10326 bp->mf_ext_config = cfg;
10327
9949 } else { /* SD MODE */ 10328 } else { /* SD MODE */
9950 if (IS_MF_STORAGE_SD(bp)) { 10329 if (IS_MF_STORAGE_SD(bp)) {
9951 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) { 10330 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
@@ -9967,6 +10346,11 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
9967 memset(bp->dev->dev_addr, 0, ETH_ALEN); 10346 memset(bp->dev->dev_addr, 0, ETH_ALEN);
9968 } 10347 }
9969 } 10348 }
10349
10350 if (IS_MF_FCOE_AFEX(bp))
10351 /* use FIP MAC as primary MAC */
10352 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
10353
9970#endif 10354#endif
9971 } else { 10355 } else {
9972 /* in SF read MACs from port configuration */ 10356 /* in SF read MACs from port configuration */
@@ -10139,6 +10523,19 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
10139 } else 10523 } else
10140 BNX2X_DEV_INFO("illegal MAC address for SI\n"); 10524 BNX2X_DEV_INFO("illegal MAC address for SI\n");
10141 break; 10525 break;
10526 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
10527 if ((!CHIP_IS_E1x(bp)) &&
10528 (MF_CFG_RD(bp, func_mf_config[func].
10529 mac_upper) != 0xffff) &&
10530 (SHMEM2_HAS(bp,
10531 afex_driver_support))) {
10532 bp->mf_mode = MULTI_FUNCTION_AFEX;
10533 bp->mf_config[vn] = MF_CFG_RD(bp,
10534 func_mf_config[func].config);
10535 } else {
10536 BNX2X_DEV_INFO("can not configure afex mode\n");
10537 }
10538 break;
10142 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED: 10539 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
10143 /* get OV configuration */ 10540 /* get OV configuration */
10144 val = MF_CFG_RD(bp, 10541 val = MF_CFG_RD(bp,
@@ -10179,6 +10576,9 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
10179 return -EPERM; 10576 return -EPERM;
10180 } 10577 }
10181 break; 10578 break;
10579 case MULTI_FUNCTION_AFEX:
10580 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
10581 break;
10182 case MULTI_FUNCTION_SI: 10582 case MULTI_FUNCTION_SI:
10183 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n", 10583 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
10184 func); 10584 func);
@@ -10346,6 +10746,9 @@ static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp)
10346 case MULTI_FUNCTION_SI: 10746 case MULTI_FUNCTION_SI:
10347 SET_FLAGS(flags, MODE_MF_SI); 10747 SET_FLAGS(flags, MODE_MF_SI);
10348 break; 10748 break;
10749 case MULTI_FUNCTION_AFEX:
10750 SET_FLAGS(flags, MODE_MF_AFEX);
10751 break;
10349 } 10752 }
10350 } else 10753 } else
10351 SET_FLAGS(flags, MODE_SF); 10754 SET_FLAGS(flags, MODE_SF);
@@ -10405,12 +10808,10 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10405 if (BP_NOMCP(bp) && (func == 0)) 10808 if (BP_NOMCP(bp) && (func == 0))
10406 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n"); 10809 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
10407 10810
10408 bp->multi_mode = multi_mode;
10409
10410 bp->disable_tpa = disable_tpa; 10811 bp->disable_tpa = disable_tpa;
10411 10812
10412#ifdef BCM_CNIC 10813#ifdef BCM_CNIC
10413 bp->disable_tpa |= IS_MF_STORAGE_SD(bp); 10814 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
10414#endif 10815#endif
10415 10816
10416 /* Set TPA flags */ 10817 /* Set TPA flags */
@@ -10429,7 +10830,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10429 10830
10430 bp->mrrs = mrrs; 10831 bp->mrrs = mrrs;
10431 10832
10432 bp->tx_ring_size = MAX_TX_AVAIL; 10833 bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
10433 10834
10434 /* make sure that the numbers are in the right granularity */ 10835 /* make sure that the numbers are in the right granularity */
10435 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR; 10836 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
@@ -10460,8 +10861,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
10460 if (CHIP_IS_E3B0(bp)) 10861 if (CHIP_IS_E3B0(bp))
10461 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0; 10862 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
10462 10863
10463 bp->gro_check = bnx2x_need_gro_check(bp->dev->mtu);
10464
10465 return rc; 10864 return rc;
10466} 10865}
10467 10866
@@ -10551,8 +10950,8 @@ static int bnx2x_close(struct net_device *dev)
10551 return 0; 10950 return 0;
10552} 10951}
10553 10952
10554static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp, 10953static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
10555 struct bnx2x_mcast_ramrod_params *p) 10954 struct bnx2x_mcast_ramrod_params *p)
10556{ 10955{
10557 int mc_count = netdev_mc_count(bp->dev); 10956 int mc_count = netdev_mc_count(bp->dev);
10558 struct bnx2x_mcast_list_elem *mc_mac = 10957 struct bnx2x_mcast_list_elem *mc_mac =
@@ -10575,7 +10974,7 @@ static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
10575 return 0; 10974 return 0;
10576} 10975}
10577 10976
10578static inline void bnx2x_free_mcast_macs_list( 10977static void bnx2x_free_mcast_macs_list(
10579 struct bnx2x_mcast_ramrod_params *p) 10978 struct bnx2x_mcast_ramrod_params *p)
10580{ 10979{
10581 struct bnx2x_mcast_list_elem *mc_mac = 10980 struct bnx2x_mcast_list_elem *mc_mac =
@@ -10593,7 +10992,7 @@ static inline void bnx2x_free_mcast_macs_list(
10593 * 10992 *
10594 * We will use zero (0) as a MAC type for these MACs. 10993 * We will use zero (0) as a MAC type for these MACs.
10595 */ 10994 */
10596static inline int bnx2x_set_uc_list(struct bnx2x *bp) 10995static int bnx2x_set_uc_list(struct bnx2x *bp)
10597{ 10996{
10598 int rc; 10997 int rc;
10599 struct net_device *dev = bp->dev; 10998 struct net_device *dev = bp->dev;
@@ -10624,7 +11023,7 @@ static inline int bnx2x_set_uc_list(struct bnx2x *bp)
10624 BNX2X_UC_LIST_MAC, &ramrod_flags); 11023 BNX2X_UC_LIST_MAC, &ramrod_flags);
10625} 11024}
10626 11025
10627static inline int bnx2x_set_mc_list(struct bnx2x *bp) 11026static int bnx2x_set_mc_list(struct bnx2x *bp)
10628{ 11027{
10629 struct net_device *dev = bp->dev; 11028 struct net_device *dev = bp->dev;
10630 struct bnx2x_mcast_ramrod_params rparam = {NULL}; 11029 struct bnx2x_mcast_ramrod_params rparam = {NULL};
@@ -10810,7 +11209,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
10810#endif 11209#endif
10811}; 11210};
10812 11211
10813static inline int bnx2x_set_coherency_mask(struct bnx2x *bp) 11212static int bnx2x_set_coherency_mask(struct bnx2x *bp)
10814{ 11213{
10815 struct device *dev = &bp->pdev->dev; 11214 struct device *dev = &bp->pdev->dev;
10816 11215
@@ -11076,7 +11475,7 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
11076 return 0; 11475 return 0;
11077} 11476}
11078 11477
11079static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n) 11478static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11080{ 11479{
11081 const __be32 *source = (const __be32 *)_source; 11480 const __be32 *source = (const __be32 *)_source;
11082 u32 *target = (u32 *)_target; 11481 u32 *target = (u32 *)_target;
@@ -11090,7 +11489,7 @@ static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11090 Ops array is stored in the following format: 11489 Ops array is stored in the following format:
11091 {op(8bit), offset(24bit, big endian), data(32bit, big endian)} 11490 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11092 */ 11491 */
11093static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n) 11492static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11094{ 11493{
11095 const __be32 *source = (const __be32 *)_source; 11494 const __be32 *source = (const __be32 *)_source;
11096 struct raw_op *target = (struct raw_op *)_target; 11495 struct raw_op *target = (struct raw_op *)_target;
@@ -11108,7 +11507,7 @@ static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11108 * IRO array is stored in the following format: 11507 * IRO array is stored in the following format:
11109 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) } 11508 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
11110 */ 11509 */
11111static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n) 11510static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
11112{ 11511{
11113 const __be32 *source = (const __be32 *)_source; 11512 const __be32 *source = (const __be32 *)_source;
11114 struct iro *target = (struct iro *)_target; 11513 struct iro *target = (struct iro *)_target;
@@ -11128,7 +11527,7 @@ static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
11128 } 11527 }
11129} 11528}
11130 11529
11131static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n) 11530static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11132{ 11531{
11133 const __be16 *source = (const __be16 *)_source; 11532 const __be16 *source = (const __be16 *)_source;
11134 u16 *target = (u16 *)_target; 11533 u16 *target = (u16 *)_target;
@@ -11265,11 +11664,13 @@ void bnx2x__init_func_obj(struct bnx2x *bp)
11265 bnx2x_init_func_obj(bp, &bp->func_obj, 11664 bnx2x_init_func_obj(bp, &bp->func_obj,
11266 bnx2x_sp(bp, func_rdata), 11665 bnx2x_sp(bp, func_rdata),
11267 bnx2x_sp_mapping(bp, func_rdata), 11666 bnx2x_sp_mapping(bp, func_rdata),
11667 bnx2x_sp(bp, func_afex_rdata),
11668 bnx2x_sp_mapping(bp, func_afex_rdata),
11268 &bnx2x_func_sp_drv); 11669 &bnx2x_func_sp_drv);
11269} 11670}
11270 11671
11271/* must be called after sriov-enable */ 11672/* must be called after sriov-enable */
11272static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp) 11673static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
11273{ 11674{
11274 int cid_count = BNX2X_L2_CID_COUNT(bp); 11675 int cid_count = BNX2X_L2_CID_COUNT(bp);
11275 11676
@@ -11285,7 +11686,7 @@ static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp)
11285 * @dev: pci device 11686 * @dev: pci device
11286 * 11687 *
11287 */ 11688 */
11288static inline int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev) 11689static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
11289{ 11690{
11290 int pos; 11691 int pos;
11291 u16 control; 11692 u16 control;
@@ -11346,6 +11747,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11346 case BCM57810_MF: 11747 case BCM57810_MF:
11347 case BCM57840: 11748 case BCM57840:
11348 case BCM57840_MF: 11749 case BCM57840_MF:
11750 case BCM57811:
11751 case BCM57811_MF:
11349 max_cos_est = BNX2X_MULTI_TX_COS_E3B0; 11752 max_cos_est = BNX2X_MULTI_TX_COS_E3B0;
11350 break; 11753 break;
11351 11754
@@ -11759,7 +12162,7 @@ module_exit(bnx2x_cleanup);
11759 * This function will wait until the ramdord completion returns. 12162 * This function will wait until the ramdord completion returns.
11760 * Return 0 if success, -ENODEV if ramrod doesn't return. 12163 * Return 0 if success, -ENODEV if ramrod doesn't return.
11761 */ 12164 */
11762static inline int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) 12165static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
11763{ 12166{
11764 unsigned long ramrod_flags = 0; 12167 unsigned long ramrod_flags = 0;
11765 12168
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index c25803b9c0ca..bbd387492a80 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1483,6 +1483,11 @@
1483 starts at 0x0 for the A0 tape-out and increments by one for each 1483 starts at 0x0 for the A0 tape-out and increments by one for each
1484 all-layer tape-out. */ 1484 all-layer tape-out. */
1485#define MISC_REG_CHIP_REV 0xa40c 1485#define MISC_REG_CHIP_REV 0xa40c
1486/* [R 14] otp_misc_do[100:0] spare bits collection: 13:11-
1487 * otp_misc_do[100:98]; 10:7 - otp_misc_do[87:84]; 6:3 - otp_misc_do[75:72];
1488 * 2:1 - otp_misc_do[51:50]; 0 - otp_misc_do[1]. */
1489#define MISC_REG_CHIP_TYPE 0xac60
1490#define MISC_REG_CHIP_TYPE_57811_MASK (1<<1)
1486/* [RW 32] The following driver registers(1...16) represent 16 drivers and 1491/* [RW 32] The following driver registers(1...16) represent 16 drivers and
1487 32 clients. Each client can be controlled by one driver only. One in each 1492 32 clients. Each client can be controlled by one driver only. One in each
1488 bit represent that this driver control the appropriate client (Ex: bit 5 1493 bit represent that this driver control the appropriate client (Ex: bit 5
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 513573321625..6c14b4a4e82c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -633,14 +633,17 @@ static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
633} 633}
634 634
635 635
636static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp, 636void bnx2x_set_mac_in_nig(struct bnx2x *bp,
637 bool add, unsigned char *dev_addr, int index) 637 bool add, unsigned char *dev_addr, int index)
638{ 638{
639 u32 wb_data[2]; 639 u32 wb_data[2];
640 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM : 640 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
641 NIG_REG_LLH0_FUNC_MEM; 641 NIG_REG_LLH0_FUNC_MEM;
642 642
643 if (!IS_MF_SI(bp) || index > BNX2X_LLH_CAM_MAX_PF_LINE) 643 if (!IS_MF_SI(bp) && !IS_MF_AFEX(bp))
644 return;
645
646 if (index > BNX2X_LLH_CAM_MAX_PF_LINE)
644 return; 647 return;
645 648
646 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n", 649 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
@@ -4090,12 +4093,6 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
4090 rss_mode = ETH_RSS_MODE_DISABLED; 4093 rss_mode = ETH_RSS_MODE_DISABLED;
4091 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags)) 4094 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4092 rss_mode = ETH_RSS_MODE_REGULAR; 4095 rss_mode = ETH_RSS_MODE_REGULAR;
4093 else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags))
4094 rss_mode = ETH_RSS_MODE_VLAN_PRI;
4095 else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags))
4096 rss_mode = ETH_RSS_MODE_E1HOV_PRI;
4097 else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags))
4098 rss_mode = ETH_RSS_MODE_IP_DSCP;
4099 4096
4100 data->rss_mode = rss_mode; 4097 data->rss_mode = rss_mode;
4101 4098
@@ -4404,6 +4401,9 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4404 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags); 4401 test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4405 tx_data->anti_spoofing_flg = 4402 tx_data->anti_spoofing_flg =
4406 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags); 4403 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4404 tx_data->force_default_pri_flg =
4405 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4406
4407 tx_data->tx_status_block_id = params->fw_sb_id; 4407 tx_data->tx_status_block_id = params->fw_sb_id;
4408 tx_data->tx_sb_index_number = params->sb_cq_index; 4408 tx_data->tx_sb_index_number = params->sb_cq_index;
4409 tx_data->tss_leading_client_id = params->tss_leading_cl_id; 4409 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
@@ -5331,6 +5331,17 @@ static int bnx2x_func_chk_transition(struct bnx2x *bp,
5331 case BNX2X_F_STATE_STARTED: 5331 case BNX2X_F_STATE_STARTED:
5332 if (cmd == BNX2X_F_CMD_STOP) 5332 if (cmd == BNX2X_F_CMD_STOP)
5333 next_state = BNX2X_F_STATE_INITIALIZED; 5333 next_state = BNX2X_F_STATE_INITIALIZED;
5334 /* afex ramrods can be sent only in started mode, and only
5335 * if not pending for function_stop ramrod completion
5336 * for these events - next state remained STARTED.
5337 */
5338 else if ((cmd == BNX2X_F_CMD_AFEX_UPDATE) &&
5339 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5340 next_state = BNX2X_F_STATE_STARTED;
5341
5342 else if ((cmd == BNX2X_F_CMD_AFEX_VIFLISTS) &&
5343 (!test_bit(BNX2X_F_CMD_STOP, &o->pending)))
5344 next_state = BNX2X_F_STATE_STARTED;
5334 else if (cmd == BNX2X_F_CMD_TX_STOP) 5345 else if (cmd == BNX2X_F_CMD_TX_STOP)
5335 next_state = BNX2X_F_STATE_TX_STOPPED; 5346 next_state = BNX2X_F_STATE_TX_STOPPED;
5336 5347
@@ -5618,6 +5629,83 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
5618 U64_LO(data_mapping), NONE_CONNECTION_TYPE); 5629 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5619} 5630}
5620 5631
5632static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
5633 struct bnx2x_func_state_params *params)
5634{
5635 struct bnx2x_func_sp_obj *o = params->f_obj;
5636 struct function_update_data *rdata =
5637 (struct function_update_data *)o->afex_rdata;
5638 dma_addr_t data_mapping = o->afex_rdata_mapping;
5639 struct bnx2x_func_afex_update_params *afex_update_params =
5640 &params->params.afex_update;
5641
5642 memset(rdata, 0, sizeof(*rdata));
5643
5644 /* Fill the ramrod data with provided parameters */
5645 rdata->vif_id_change_flg = 1;
5646 rdata->vif_id = cpu_to_le16(afex_update_params->vif_id);
5647 rdata->afex_default_vlan_change_flg = 1;
5648 rdata->afex_default_vlan =
5649 cpu_to_le16(afex_update_params->afex_default_vlan);
5650 rdata->allowed_priorities_change_flg = 1;
5651 rdata->allowed_priorities = afex_update_params->allowed_priorities;
5652
5653 /* No need for an explicit memory barrier here as long we would
5654 * need to ensure the ordering of writing to the SPQ element
5655 * and updating of the SPQ producer which involves a memory
5656 * read and we will have to put a full memory barrier there
5657 * (inside bnx2x_sp_post()).
5658 */
5659 DP(BNX2X_MSG_SP,
5660 "afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
5661 rdata->vif_id,
5662 rdata->afex_default_vlan, rdata->allowed_priorities);
5663
5664 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
5665 U64_HI(data_mapping),
5666 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5667}
5668
5669static
5670inline int bnx2x_func_send_afex_viflists(struct bnx2x *bp,
5671 struct bnx2x_func_state_params *params)
5672{
5673 struct bnx2x_func_sp_obj *o = params->f_obj;
5674 struct afex_vif_list_ramrod_data *rdata =
5675 (struct afex_vif_list_ramrod_data *)o->afex_rdata;
5676 struct bnx2x_func_afex_viflists_params *afex_viflist_params =
5677 &params->params.afex_viflists;
5678 u64 *p_rdata = (u64 *)rdata;
5679
5680 memset(rdata, 0, sizeof(*rdata));
5681
5682 /* Fill the ramrod data with provided parameters */
5683 rdata->vif_list_index = afex_viflist_params->vif_list_index;
5684 rdata->func_bit_map = afex_viflist_params->func_bit_map;
5685 rdata->afex_vif_list_command =
5686 afex_viflist_params->afex_vif_list_command;
5687 rdata->func_to_clear = afex_viflist_params->func_to_clear;
5688
5689 /* send in echo type of sub command */
5690 rdata->echo = afex_viflist_params->afex_vif_list_command;
5691
5692 /* No need for an explicit memory barrier here as long we would
5693 * need to ensure the ordering of writing to the SPQ element
5694 * and updating of the SPQ producer which involves a memory
5695 * read and we will have to put a full memory barrier there
5696 * (inside bnx2x_sp_post()).
5697 */
5698
5699 DP(BNX2X_MSG_SP, "afex: ramrod lists, cmd 0x%x index 0x%x func_bit_map 0x%x func_to_clr 0x%x\n",
5700 rdata->afex_vif_list_command, rdata->vif_list_index,
5701 rdata->func_bit_map, rdata->func_to_clear);
5702
5703 /* this ramrod sends data directly and not through DMA mapping */
5704 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 0,
5705 U64_HI(*p_rdata), U64_LO(*p_rdata),
5706 NONE_CONNECTION_TYPE);
5707}
5708
5621static inline int bnx2x_func_send_stop(struct bnx2x *bp, 5709static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5622 struct bnx2x_func_state_params *params) 5710 struct bnx2x_func_state_params *params)
5623{ 5711{
@@ -5669,6 +5757,10 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp,
5669 return bnx2x_func_send_stop(bp, params); 5757 return bnx2x_func_send_stop(bp, params);
5670 case BNX2X_F_CMD_HW_RESET: 5758 case BNX2X_F_CMD_HW_RESET:
5671 return bnx2x_func_hw_reset(bp, params); 5759 return bnx2x_func_hw_reset(bp, params);
5760 case BNX2X_F_CMD_AFEX_UPDATE:
5761 return bnx2x_func_send_afex_update(bp, params);
5762 case BNX2X_F_CMD_AFEX_VIFLISTS:
5763 return bnx2x_func_send_afex_viflists(bp, params);
5672 case BNX2X_F_CMD_TX_STOP: 5764 case BNX2X_F_CMD_TX_STOP:
5673 return bnx2x_func_send_tx_stop(bp, params); 5765 return bnx2x_func_send_tx_stop(bp, params);
5674 case BNX2X_F_CMD_TX_START: 5766 case BNX2X_F_CMD_TX_START:
@@ -5682,6 +5774,7 @@ static int bnx2x_func_send_cmd(struct bnx2x *bp,
5682void bnx2x_init_func_obj(struct bnx2x *bp, 5774void bnx2x_init_func_obj(struct bnx2x *bp,
5683 struct bnx2x_func_sp_obj *obj, 5775 struct bnx2x_func_sp_obj *obj,
5684 void *rdata, dma_addr_t rdata_mapping, 5776 void *rdata, dma_addr_t rdata_mapping,
5777 void *afex_rdata, dma_addr_t afex_rdata_mapping,
5685 struct bnx2x_func_sp_drv_ops *drv_iface) 5778 struct bnx2x_func_sp_drv_ops *drv_iface)
5686{ 5779{
5687 memset(obj, 0, sizeof(*obj)); 5780 memset(obj, 0, sizeof(*obj));
@@ -5690,7 +5783,8 @@ void bnx2x_init_func_obj(struct bnx2x *bp,
5690 5783
5691 obj->rdata = rdata; 5784 obj->rdata = rdata;
5692 obj->rdata_mapping = rdata_mapping; 5785 obj->rdata_mapping = rdata_mapping;
5693 5786 obj->afex_rdata = afex_rdata;
5787 obj->afex_rdata_mapping = afex_rdata_mapping;
5694 obj->send_cmd = bnx2x_func_send_cmd; 5788 obj->send_cmd = bnx2x_func_send_cmd;
5695 obj->check_transition = bnx2x_func_chk_transition; 5789 obj->check_transition = bnx2x_func_chk_transition;
5696 obj->complete_cmd = bnx2x_func_comp_cmd; 5790 obj->complete_cmd = bnx2x_func_comp_cmd;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 61a7670adfcd..efd80bdd0dfe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -62,6 +62,8 @@ enum {
62 BNX2X_FILTER_MCAST_PENDING, 62 BNX2X_FILTER_MCAST_PENDING,
63 BNX2X_FILTER_MCAST_SCHED, 63 BNX2X_FILTER_MCAST_SCHED,
64 BNX2X_FILTER_RSS_CONF_PENDING, 64 BNX2X_FILTER_RSS_CONF_PENDING,
65 BNX2X_AFEX_FCOE_Q_UPDATE_PENDING,
66 BNX2X_AFEX_PENDING_VIFSET_MCP_ACK
65}; 67};
66 68
67struct bnx2x_raw_obj { 69struct bnx2x_raw_obj {
@@ -432,6 +434,8 @@ enum {
432 BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2 434 BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
433}; 435};
434 436
437void bnx2x_set_mac_in_nig(struct bnx2x *bp,
438 bool add, unsigned char *dev_addr, int index);
435 439
436/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */ 440/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
437 441
@@ -685,9 +689,6 @@ enum {
685 /* RSS_MODE bits are mutually exclusive */ 689 /* RSS_MODE bits are mutually exclusive */
686 BNX2X_RSS_MODE_DISABLED, 690 BNX2X_RSS_MODE_DISABLED,
687 BNX2X_RSS_MODE_REGULAR, 691 BNX2X_RSS_MODE_REGULAR,
688 BNX2X_RSS_MODE_VLAN_PRI,
689 BNX2X_RSS_MODE_E1HOV_PRI,
690 BNX2X_RSS_MODE_IP_DSCP,
691 692
692 BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */ 693 BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */
693 694
@@ -801,7 +802,8 @@ enum {
801 BNX2X_Q_FLG_TX_SWITCH, 802 BNX2X_Q_FLG_TX_SWITCH,
802 BNX2X_Q_FLG_TX_SEC, 803 BNX2X_Q_FLG_TX_SEC,
803 BNX2X_Q_FLG_ANTI_SPOOF, 804 BNX2X_Q_FLG_ANTI_SPOOF,
804 BNX2X_Q_FLG_SILENT_VLAN_REM 805 BNX2X_Q_FLG_SILENT_VLAN_REM,
806 BNX2X_Q_FLG_FORCE_DEFAULT_PRI
805}; 807};
806 808
807/* Queue type options: queue type may be a compination of below. */ 809/* Queue type options: queue type may be a compination of below. */
@@ -963,6 +965,11 @@ struct bnx2x_queue_state_params {
963 } params; 965 } params;
964}; 966};
965 967
968struct bnx2x_viflist_params {
969 u8 echo_res;
970 u8 func_bit_map_res;
971};
972
966struct bnx2x_queue_sp_obj { 973struct bnx2x_queue_sp_obj {
967 u32 cids[BNX2X_MULTI_TX_COS]; 974 u32 cids[BNX2X_MULTI_TX_COS];
968 u8 cl_id; 975 u8 cl_id;
@@ -1045,6 +1052,8 @@ enum bnx2x_func_cmd {
1045 BNX2X_F_CMD_START, 1052 BNX2X_F_CMD_START,
1046 BNX2X_F_CMD_STOP, 1053 BNX2X_F_CMD_STOP,
1047 BNX2X_F_CMD_HW_RESET, 1054 BNX2X_F_CMD_HW_RESET,
1055 BNX2X_F_CMD_AFEX_UPDATE,
1056 BNX2X_F_CMD_AFEX_VIFLISTS,
1048 BNX2X_F_CMD_TX_STOP, 1057 BNX2X_F_CMD_TX_STOP,
1049 BNX2X_F_CMD_TX_START, 1058 BNX2X_F_CMD_TX_START,
1050 BNX2X_F_CMD_MAX, 1059 BNX2X_F_CMD_MAX,
@@ -1089,6 +1098,18 @@ struct bnx2x_func_start_params {
1089 u8 network_cos_mode; 1098 u8 network_cos_mode;
1090}; 1099};
1091 1100
1101struct bnx2x_func_afex_update_params {
1102 u16 vif_id;
1103 u16 afex_default_vlan;
1104 u8 allowed_priorities;
1105};
1106
1107struct bnx2x_func_afex_viflists_params {
1108 u16 vif_list_index;
1109 u8 func_bit_map;
1110 u8 afex_vif_list_command;
1111 u8 func_to_clear;
1112};
1092struct bnx2x_func_tx_start_params { 1113struct bnx2x_func_tx_start_params {
1093 struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES]; 1114 struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
1094 u8 dcb_enabled; 1115 u8 dcb_enabled;
@@ -1110,6 +1131,8 @@ struct bnx2x_func_state_params {
1110 struct bnx2x_func_hw_init_params hw_init; 1131 struct bnx2x_func_hw_init_params hw_init;
1111 struct bnx2x_func_hw_reset_params hw_reset; 1132 struct bnx2x_func_hw_reset_params hw_reset;
1112 struct bnx2x_func_start_params start; 1133 struct bnx2x_func_start_params start;
1134 struct bnx2x_func_afex_update_params afex_update;
1135 struct bnx2x_func_afex_viflists_params afex_viflists;
1113 struct bnx2x_func_tx_start_params tx_start; 1136 struct bnx2x_func_tx_start_params tx_start;
1114 } params; 1137 } params;
1115}; 1138};
@@ -1154,6 +1177,13 @@ struct bnx2x_func_sp_obj {
1154 void *rdata; 1177 void *rdata;
1155 dma_addr_t rdata_mapping; 1178 dma_addr_t rdata_mapping;
1156 1179
1180 /* Buffer to use as a afex ramrod data and its mapping.
1181 * This can't be same rdata as above because afex ramrod requests
1182 * can arrive to the object in parallel to other ramrod requests.
1183 */
1184 void *afex_rdata;
1185 dma_addr_t afex_rdata_mapping;
1186
1157 /* this mutex validates that when pending flag is taken, the next 1187 /* this mutex validates that when pending flag is taken, the next
1158 * ramrod to be sent will be the one set the pending bit 1188 * ramrod to be sent will be the one set the pending bit
1159 */ 1189 */
@@ -1197,6 +1227,7 @@ union bnx2x_qable_obj {
1197void bnx2x_init_func_obj(struct bnx2x *bp, 1227void bnx2x_init_func_obj(struct bnx2x *bp,
1198 struct bnx2x_func_sp_obj *obj, 1228 struct bnx2x_func_sp_obj *obj,
1199 void *rdata, dma_addr_t rdata_mapping, 1229 void *rdata, dma_addr_t rdata_mapping,
1230 void *afex_rdata, dma_addr_t afex_rdata_mapping,
1200 struct bnx2x_func_sp_drv_ops *drv_iface); 1231 struct bnx2x_func_sp_drv_ops *drv_iface);
1201 1232
1202int bnx2x_func_state_change(struct bnx2x *bp, 1233int bnx2x_func_state_change(struct bnx2x *bp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index e1c9310fb07c..1e2785cd11d0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1316,7 +1316,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
1316 * 1316 *
1317 * @param bp 1317 * @param bp
1318 */ 1318 */
1319static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp) 1319static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
1320{ 1320{
1321 int i; 1321 int i;
1322 int first_queue_query_index; 1322 int first_queue_query_index;
@@ -1561,3 +1561,274 @@ void bnx2x_save_statistics(struct bnx2x *bp)
1561 UPDATE_FW_STAT_OLD(mac_discard); 1561 UPDATE_FW_STAT_OLD(mac_discard);
1562 } 1562 }
1563} 1563}
1564
1565void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
1566 u32 stats_type)
1567{
1568 int i;
1569 struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats;
1570 struct bnx2x_eth_stats *estats = &bp->eth_stats;
1571 struct per_queue_stats *fcoe_q_stats =
1572 &bp->fw_stats_data->queue_stats[FCOE_IDX];
1573
1574 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
1575 &fcoe_q_stats->tstorm_queue_statistics;
1576
1577 struct ustorm_per_queue_stats *fcoe_q_ustorm_stats =
1578 &fcoe_q_stats->ustorm_queue_statistics;
1579
1580 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
1581 &fcoe_q_stats->xstorm_queue_statistics;
1582
1583 struct fcoe_statistics_params *fw_fcoe_stat =
1584 &bp->fw_stats_data->fcoe;
1585
1586 memset(afex_stats, 0, sizeof(struct afex_stats));
1587
1588 for_each_eth_queue(bp, i) {
1589 struct bnx2x_fastpath *fp = &bp->fp[i];
1590 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1591
1592 ADD_64(afex_stats->rx_unicast_bytes_hi,
1593 qstats->total_unicast_bytes_received_hi,
1594 afex_stats->rx_unicast_bytes_lo,
1595 qstats->total_unicast_bytes_received_lo);
1596
1597 ADD_64(afex_stats->rx_broadcast_bytes_hi,
1598 qstats->total_broadcast_bytes_received_hi,
1599 afex_stats->rx_broadcast_bytes_lo,
1600 qstats->total_broadcast_bytes_received_lo);
1601
1602 ADD_64(afex_stats->rx_multicast_bytes_hi,
1603 qstats->total_multicast_bytes_received_hi,
1604 afex_stats->rx_multicast_bytes_lo,
1605 qstats->total_multicast_bytes_received_lo);
1606
1607 ADD_64(afex_stats->rx_unicast_frames_hi,
1608 qstats->total_unicast_packets_received_hi,
1609 afex_stats->rx_unicast_frames_lo,
1610 qstats->total_unicast_packets_received_lo);
1611
1612 ADD_64(afex_stats->rx_broadcast_frames_hi,
1613 qstats->total_broadcast_packets_received_hi,
1614 afex_stats->rx_broadcast_frames_lo,
1615 qstats->total_broadcast_packets_received_lo);
1616
1617 ADD_64(afex_stats->rx_multicast_frames_hi,
1618 qstats->total_multicast_packets_received_hi,
1619 afex_stats->rx_multicast_frames_lo,
1620 qstats->total_multicast_packets_received_lo);
1621
1622 /* sum to rx_frames_discarded all discraded
1623 * packets due to size, ttl0 and checksum
1624 */
1625 ADD_64(afex_stats->rx_frames_discarded_hi,
1626 qstats->total_packets_received_checksum_discarded_hi,
1627 afex_stats->rx_frames_discarded_lo,
1628 qstats->total_packets_received_checksum_discarded_lo);
1629
1630 ADD_64(afex_stats->rx_frames_discarded_hi,
1631 qstats->total_packets_received_ttl0_discarded_hi,
1632 afex_stats->rx_frames_discarded_lo,
1633 qstats->total_packets_received_ttl0_discarded_lo);
1634
1635 ADD_64(afex_stats->rx_frames_discarded_hi,
1636 qstats->etherstatsoverrsizepkts_hi,
1637 afex_stats->rx_frames_discarded_lo,
1638 qstats->etherstatsoverrsizepkts_lo);
1639
1640 ADD_64(afex_stats->rx_frames_dropped_hi,
1641 qstats->no_buff_discard_hi,
1642 afex_stats->rx_frames_dropped_lo,
1643 qstats->no_buff_discard_lo);
1644
1645 ADD_64(afex_stats->tx_unicast_bytes_hi,
1646 qstats->total_unicast_bytes_transmitted_hi,
1647 afex_stats->tx_unicast_bytes_lo,
1648 qstats->total_unicast_bytes_transmitted_lo);
1649
1650 ADD_64(afex_stats->tx_broadcast_bytes_hi,
1651 qstats->total_broadcast_bytes_transmitted_hi,
1652 afex_stats->tx_broadcast_bytes_lo,
1653 qstats->total_broadcast_bytes_transmitted_lo);
1654
1655 ADD_64(afex_stats->tx_multicast_bytes_hi,
1656 qstats->total_multicast_bytes_transmitted_hi,
1657 afex_stats->tx_multicast_bytes_lo,
1658 qstats->total_multicast_bytes_transmitted_lo);
1659
1660 ADD_64(afex_stats->tx_unicast_frames_hi,
1661 qstats->total_unicast_packets_transmitted_hi,
1662 afex_stats->tx_unicast_frames_lo,
1663 qstats->total_unicast_packets_transmitted_lo);
1664
1665 ADD_64(afex_stats->tx_broadcast_frames_hi,
1666 qstats->total_broadcast_packets_transmitted_hi,
1667 afex_stats->tx_broadcast_frames_lo,
1668 qstats->total_broadcast_packets_transmitted_lo);
1669
1670 ADD_64(afex_stats->tx_multicast_frames_hi,
1671 qstats->total_multicast_packets_transmitted_hi,
1672 afex_stats->tx_multicast_frames_lo,
1673 qstats->total_multicast_packets_transmitted_lo);
1674
1675 ADD_64(afex_stats->tx_frames_dropped_hi,
1676 qstats->total_transmitted_dropped_packets_error_hi,
1677 afex_stats->tx_frames_dropped_lo,
1678 qstats->total_transmitted_dropped_packets_error_lo);
1679 }
1680
1681 /* now add FCoE statistics which are collected separately
1682 * (both offloaded and non offloaded)
1683 */
1684 if (!NO_FCOE(bp)) {
1685 ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1686 LE32_0,
1687 afex_stats->rx_unicast_bytes_lo,
1688 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
1689
1690 ADD_64_LE(afex_stats->rx_unicast_bytes_hi,
1691 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
1692 afex_stats->rx_unicast_bytes_lo,
1693 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
1694
1695 ADD_64_LE(afex_stats->rx_broadcast_bytes_hi,
1696 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
1697 afex_stats->rx_broadcast_bytes_lo,
1698 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
1699
1700 ADD_64_LE(afex_stats->rx_multicast_bytes_hi,
1701 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
1702 afex_stats->rx_multicast_bytes_lo,
1703 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
1704
1705 ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1706 LE32_0,
1707 afex_stats->rx_unicast_frames_lo,
1708 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
1709
1710 ADD_64_LE(afex_stats->rx_unicast_frames_hi,
1711 LE32_0,
1712 afex_stats->rx_unicast_frames_lo,
1713 fcoe_q_tstorm_stats->rcv_ucast_pkts);
1714
1715 ADD_64_LE(afex_stats->rx_broadcast_frames_hi,
1716 LE32_0,
1717 afex_stats->rx_broadcast_frames_lo,
1718 fcoe_q_tstorm_stats->rcv_bcast_pkts);
1719
1720 ADD_64_LE(afex_stats->rx_multicast_frames_hi,
1721 LE32_0,
1722 afex_stats->rx_multicast_frames_lo,
1723 fcoe_q_tstorm_stats->rcv_ucast_pkts);
1724
1725 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1726 LE32_0,
1727 afex_stats->rx_frames_discarded_lo,
1728 fcoe_q_tstorm_stats->checksum_discard);
1729
1730 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1731 LE32_0,
1732 afex_stats->rx_frames_discarded_lo,
1733 fcoe_q_tstorm_stats->pkts_too_big_discard);
1734
1735 ADD_64_LE(afex_stats->rx_frames_discarded_hi,
1736 LE32_0,
1737 afex_stats->rx_frames_discarded_lo,
1738 fcoe_q_tstorm_stats->ttl0_discard);
1739
1740 ADD_64_LE16(afex_stats->rx_frames_dropped_hi,
1741 LE16_0,
1742 afex_stats->rx_frames_dropped_lo,
1743 fcoe_q_tstorm_stats->no_buff_discard);
1744
1745 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1746 LE32_0,
1747 afex_stats->rx_frames_dropped_lo,
1748 fcoe_q_ustorm_stats->ucast_no_buff_pkts);
1749
1750 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1751 LE32_0,
1752 afex_stats->rx_frames_dropped_lo,
1753 fcoe_q_ustorm_stats->mcast_no_buff_pkts);
1754
1755 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1756 LE32_0,
1757 afex_stats->rx_frames_dropped_lo,
1758 fcoe_q_ustorm_stats->bcast_no_buff_pkts);
1759
1760 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1761 LE32_0,
1762 afex_stats->rx_frames_dropped_lo,
1763 fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt);
1764
1765 ADD_64_LE(afex_stats->rx_frames_dropped_hi,
1766 LE32_0,
1767 afex_stats->rx_frames_dropped_lo,
1768 fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt);
1769
1770 ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1771 LE32_0,
1772 afex_stats->tx_unicast_bytes_lo,
1773 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
1774
1775 ADD_64_LE(afex_stats->tx_unicast_bytes_hi,
1776 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
1777 afex_stats->tx_unicast_bytes_lo,
1778 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
1779
1780 ADD_64_LE(afex_stats->tx_broadcast_bytes_hi,
1781 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
1782 afex_stats->tx_broadcast_bytes_lo,
1783 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
1784
1785 ADD_64_LE(afex_stats->tx_multicast_bytes_hi,
1786 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
1787 afex_stats->tx_multicast_bytes_lo,
1788 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
1789
1790 ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1791 LE32_0,
1792 afex_stats->tx_unicast_frames_lo,
1793 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
1794
1795 ADD_64_LE(afex_stats->tx_unicast_frames_hi,
1796 LE32_0,
1797 afex_stats->tx_unicast_frames_lo,
1798 fcoe_q_xstorm_stats->ucast_pkts_sent);
1799
1800 ADD_64_LE(afex_stats->tx_broadcast_frames_hi,
1801 LE32_0,
1802 afex_stats->tx_broadcast_frames_lo,
1803 fcoe_q_xstorm_stats->bcast_pkts_sent);
1804
1805 ADD_64_LE(afex_stats->tx_multicast_frames_hi,
1806 LE32_0,
1807 afex_stats->tx_multicast_frames_lo,
1808 fcoe_q_xstorm_stats->mcast_pkts_sent);
1809
1810 ADD_64_LE(afex_stats->tx_frames_dropped_hi,
1811 LE32_0,
1812 afex_stats->tx_frames_dropped_lo,
1813 fcoe_q_xstorm_stats->error_drop_pkts);
1814 }
1815
1816 /* if port stats are requested, add them to the PMF
1817 * stats, as anyway they will be accumulated by the
1818 * MCP before sent to the switch
1819 */
1820 if ((bp->port.pmf) && (stats_type == VICSTATST_UIF_INDEX)) {
1821 ADD_64(afex_stats->rx_frames_dropped_hi,
1822 0,
1823 afex_stats->rx_frames_dropped_lo,
1824 estats->mac_filter_discard);
1825 ADD_64(afex_stats->rx_frames_dropped_hi,
1826 0,
1827 afex_stats->rx_frames_dropped_lo,
1828 estats->brb_truncate_discard);
1829 ADD_64(afex_stats->rx_frames_discarded_hi,
1830 0,
1831 afex_stats->rx_frames_discarded_lo,
1832 estats->mac_discard);
1833 }
1834}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 2b46e1eb7fd1..93e689fdfeda 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -338,6 +338,18 @@ struct bnx2x_fw_port_stats_old {
338 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \ 338 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
339 } while (0) 339 } while (0)
340 340
341#define LE32_0 ((__force __le32) 0)
342#define LE16_0 ((__force __le16) 0)
343
344/* The _force is for cases where high value is 0 */
345#define ADD_64_LE(s_hi, a_hi_le, s_lo, a_lo_le) \
346 ADD_64(s_hi, le32_to_cpu(a_hi_le), \
347 s_lo, le32_to_cpu(a_lo_le))
348
349#define ADD_64_LE16(s_hi, a_hi_le, s_lo, a_lo_le) \
350 ADD_64(s_hi, le16_to_cpu(a_hi_le), \
351 s_lo, le16_to_cpu(a_lo_le))
352
341/* difference = minuend - subtrahend */ 353/* difference = minuend - subtrahend */
342#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \ 354#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
343 do { \ 355 do { \
@@ -529,4 +541,7 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
529 * @bp: driver handle 541 * @bp: driver handle
530 */ 542 */
531void bnx2x_save_statistics(struct bnx2x *bp); 543void bnx2x_save_statistics(struct bnx2x *bp);
544
545void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
546 u32 stats_type);
532#endif /* BNX2X_STATS_H */ 547#endif /* BNX2X_STATS_H */
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index ceeab8e852ef..39b92f5ed7dd 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -195,6 +195,15 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
195#define TG3_RX_OFFSET(tp) (NET_SKB_PAD) 195#define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
196#endif 196#endif
197 197
198/* This driver uses the new build_skb() API providing a frag as skb->head
199 * This strategy permits better GRO aggregation, better TCP coalescing, and
200 * better splice() implementation (avoids a copy from head to a page), at
201 * minimal memory cost.
202 * In this 2048 bytes block, we have enough room to store the MTU=1500 frame
203 * and the struct skb_shared_info.
204 */
205#define TG3_FRAGSIZE 2048
206
198/* minimum number of free TX descriptors required to wake up TX process */ 207/* minimum number of free TX descriptors required to wake up TX process */
199#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) 208#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
200#define TG3_TX_BD_DMA_MAX_2K 2048 209#define TG3_TX_BD_DMA_MAX_2K 2048
@@ -5622,17 +5631,48 @@ static void tg3_tx(struct tg3_napi *tnapi)
5622 } 5631 }
5623} 5632}
5624 5633
5634static void *tg3_frag_alloc(struct tg3_rx_prodring_set *tpr)
5635{
5636 void *data;
5637
5638 if (tpr->rx_page_size < TG3_FRAGSIZE) {
5639 struct page *page = alloc_page(GFP_ATOMIC);
5640
5641 if (!page)
5642 return NULL;
5643 atomic_add((PAGE_SIZE / TG3_FRAGSIZE) - 1, &page->_count);
5644 tpr->rx_page_addr = page_address(page);
5645 tpr->rx_page_size = PAGE_SIZE;
5646 }
5647 data = tpr->rx_page_addr;
5648 tpr->rx_page_addr += TG3_FRAGSIZE;
5649 tpr->rx_page_size -= TG3_FRAGSIZE;
5650 return data;
5651}
5652
5653static void tg3_frag_free(bool is_frag, void *data)
5654{
5655 if (is_frag)
5656 put_page(virt_to_head_page(data));
5657 else
5658 kfree(data);
5659}
5660
5625static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) 5661static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5626{ 5662{
5663 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
5664 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5665
5627 if (!ri->data) 5666 if (!ri->data)
5628 return; 5667 return;
5629 5668
5630 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), 5669 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5631 map_sz, PCI_DMA_FROMDEVICE); 5670 map_sz, PCI_DMA_FROMDEVICE);
5632 kfree(ri->data); 5671 tg3_frag_free(skb_size <= TG3_FRAGSIZE, ri->data);
5633 ri->data = NULL; 5672 ri->data = NULL;
5634} 5673}
5635 5674
5675
5636/* Returns size of skb allocated or < 0 on error. 5676/* Returns size of skb allocated or < 0 on error.
5637 * 5677 *
5638 * We only need to fill in the address because the other members 5678 * We only need to fill in the address because the other members
@@ -5645,7 +5685,8 @@ static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5645 * (to fetch the error flags, vlan tag, checksum, and opaque cookie). 5685 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5646 */ 5686 */
5647static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, 5687static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5648 u32 opaque_key, u32 dest_idx_unmasked) 5688 u32 opaque_key, u32 dest_idx_unmasked,
5689 unsigned int *frag_size)
5649{ 5690{
5650 struct tg3_rx_buffer_desc *desc; 5691 struct tg3_rx_buffer_desc *desc;
5651 struct ring_info *map; 5692 struct ring_info *map;
@@ -5680,7 +5721,13 @@ static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5680 */ 5721 */
5681 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) + 5722 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5682 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 5723 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5683 data = kmalloc(skb_size, GFP_ATOMIC); 5724 if (skb_size <= TG3_FRAGSIZE) {
5725 data = tg3_frag_alloc(tpr);
5726 *frag_size = TG3_FRAGSIZE;
5727 } else {
5728 data = kmalloc(skb_size, GFP_ATOMIC);
5729 *frag_size = 0;
5730 }
5684 if (!data) 5731 if (!data)
5685 return -ENOMEM; 5732 return -ENOMEM;
5686 5733
@@ -5688,8 +5735,8 @@ static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5688 data + TG3_RX_OFFSET(tp), 5735 data + TG3_RX_OFFSET(tp),
5689 data_size, 5736 data_size,
5690 PCI_DMA_FROMDEVICE); 5737 PCI_DMA_FROMDEVICE);
5691 if (pci_dma_mapping_error(tp->pdev, mapping)) { 5738 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
5692 kfree(data); 5739 tg3_frag_free(skb_size <= TG3_FRAGSIZE, data);
5693 return -EIO; 5740 return -EIO;
5694 } 5741 }
5695 5742
@@ -5840,18 +5887,19 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
5840 5887
5841 if (len > TG3_RX_COPY_THRESH(tp)) { 5888 if (len > TG3_RX_COPY_THRESH(tp)) {
5842 int skb_size; 5889 int skb_size;
5890 unsigned int frag_size;
5843 5891
5844 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key, 5892 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5845 *post_ptr); 5893 *post_ptr, &frag_size);
5846 if (skb_size < 0) 5894 if (skb_size < 0)
5847 goto drop_it; 5895 goto drop_it;
5848 5896
5849 pci_unmap_single(tp->pdev, dma_addr, skb_size, 5897 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5850 PCI_DMA_FROMDEVICE); 5898 PCI_DMA_FROMDEVICE);
5851 5899
5852 skb = build_skb(data); 5900 skb = build_skb(data, frag_size);
5853 if (!skb) { 5901 if (!skb) {
5854 kfree(data); 5902 tg3_frag_free(frag_size != 0, data);
5855 goto drop_it_no_recycle; 5903 goto drop_it_no_recycle;
5856 } 5904 }
5857 skb_reserve(skb, TG3_RX_OFFSET(tp)); 5905 skb_reserve(skb, TG3_RX_OFFSET(tp));
@@ -7287,7 +7335,10 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
7287 7335
7288 /* Now allocate fresh SKBs for each rx ring. */ 7336 /* Now allocate fresh SKBs for each rx ring. */
7289 for (i = 0; i < tp->rx_pending; i++) { 7337 for (i = 0; i < tp->rx_pending; i++) {
7290 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { 7338 unsigned int frag_size;
7339
7340 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
7341 &frag_size) < 0) {
7291 netdev_warn(tp->dev, 7342 netdev_warn(tp->dev,
7292 "Using a smaller RX standard ring. Only " 7343 "Using a smaller RX standard ring. Only "
7293 "%d out of %d buffers were allocated " 7344 "%d out of %d buffers were allocated "
@@ -7319,7 +7370,10 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
7319 } 7370 }
7320 7371
7321 for (i = 0; i < tp->rx_jumbo_pending; i++) { 7372 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7322 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) { 7373 unsigned int frag_size;
7374
7375 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
7376 &frag_size) < 0) {
7323 netdev_warn(tp->dev, 7377 netdev_warn(tp->dev,
7324 "Using a smaller RX jumbo ring. Only %d " 7378 "Using a smaller RX jumbo ring. Only %d "
7325 "out of %d buffers were allocated " 7379 "out of %d buffers were allocated "
@@ -12248,6 +12302,7 @@ static const struct ethtool_ops tg3_ethtool_ops = {
12248 .get_rxfh_indir_size = tg3_get_rxfh_indir_size, 12302 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
12249 .get_rxfh_indir = tg3_get_rxfh_indir, 12303 .get_rxfh_indir = tg3_get_rxfh_indir,
12250 .set_rxfh_indir = tg3_set_rxfh_indir, 12304 .set_rxfh_indir = tg3_set_rxfh_indir,
12305 .get_ts_info = ethtool_op_get_ts_info,
12251}; 12306};
12252 12307
12253static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev, 12308static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 93865f899a4f..7c855455d937 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2815,6 +2815,8 @@ struct tg3_rx_prodring_set {
2815 struct ring_info *rx_jmb_buffers; 2815 struct ring_info *rx_jmb_buffers;
2816 dma_addr_t rx_std_mapping; 2816 dma_addr_t rx_std_mapping;
2817 dma_addr_t rx_jmb_mapping; 2817 dma_addr_t rx_jmb_mapping;
2818 void *rx_page_addr;
2819 unsigned int rx_page_size;
2818}; 2820};
2819 2821
2820#define TG3_IRQ_MAX_VECS_RSS 5 2822#define TG3_IRQ_MAX_VECS_RSS 5
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 77977d735dd7..0b640fafbda3 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -70,7 +70,6 @@ static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
70static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc); 70static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
71static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc); 71static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
72static void bfa_ioc_recover(struct bfa_ioc *ioc); 72static void bfa_ioc_recover(struct bfa_ioc *ioc);
73static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
74static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event); 73static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
75static void bfa_ioc_disable_comp(struct bfa_ioc *ioc); 74static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
76static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc); 75static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
@@ -346,8 +345,6 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
346 switch (event) { 345 switch (event) {
347 case IOC_E_FWRSP_GETATTR: 346 case IOC_E_FWRSP_GETATTR:
348 del_timer(&ioc->ioc_timer); 347 del_timer(&ioc->ioc_timer);
349 bfa_ioc_check_attr_wwns(ioc);
350 bfa_ioc_hb_monitor(ioc);
351 bfa_fsm_set_state(ioc, bfa_ioc_sm_op); 348 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
352 break; 349 break;
353 350
@@ -380,6 +377,7 @@ bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
380{ 377{
381 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); 378 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
382 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED); 379 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
380 bfa_ioc_hb_monitor(ioc);
383} 381}
384 382
385static void 383static void
@@ -1207,27 +1205,62 @@ bfa_nw_ioc_sem_release(void __iomem *sem_reg)
1207 writel(1, sem_reg); 1205 writel(1, sem_reg);
1208} 1206}
1209 1207
1208/* Clear fwver hdr */
1209static void
1210bfa_ioc_fwver_clear(struct bfa_ioc *ioc)
1211{
1212 u32 pgnum, pgoff, loff = 0;
1213 int i;
1214
1215 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1216 pgoff = PSS_SMEM_PGOFF(loff);
1217 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1218
1219 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) {
1220 writel(0, ioc->ioc_regs.smem_page_start + loff);
1221 loff += sizeof(u32);
1222 }
1223}
1224
1225
1210static void 1226static void
1211bfa_ioc_hw_sem_init(struct bfa_ioc *ioc) 1227bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
1212{ 1228{
1213 struct bfi_ioc_image_hdr fwhdr; 1229 struct bfi_ioc_image_hdr fwhdr;
1214 u32 fwstate = readl(ioc->ioc_regs.ioc_fwstate); 1230 u32 fwstate, r32;
1215 1231
1216 if (fwstate == BFI_IOC_UNINIT) 1232 /* Spin on init semaphore to serialize. */
1233 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1234 while (r32 & 0x1) {
1235 udelay(20);
1236 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1237 }
1238
1239 fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1240 if (fwstate == BFI_IOC_UNINIT) {
1241 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1217 return; 1242 return;
1243 }
1218 1244
1219 bfa_nw_ioc_fwver_get(ioc, &fwhdr); 1245 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1220 1246
1221 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) 1247 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
1248 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1222 return; 1249 return;
1250 }
1223 1251
1252 bfa_ioc_fwver_clear(ioc);
1224 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate); 1253 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
1254 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
1225 1255
1226 /* 1256 /*
1227 * Try to lock and then unlock the semaphore. 1257 * Try to lock and then unlock the semaphore.
1228 */ 1258 */
1229 readl(ioc->ioc_regs.ioc_sem_reg); 1259 readl(ioc->ioc_regs.ioc_sem_reg);
1230 writel(1, ioc->ioc_regs.ioc_sem_reg); 1260 writel(1, ioc->ioc_regs.ioc_sem_reg);
1261
1262 /* Unlock init semaphore */
1263 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1231} 1264}
1232 1265
1233static void 1266static void
@@ -1585,11 +1618,6 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1585 u32 i; 1618 u32 i;
1586 u32 asicmode; 1619 u32 asicmode;
1587 1620
1588 /**
1589 * Initialize LMEM first before code download
1590 */
1591 bfa_ioc_lmem_init(ioc);
1592
1593 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno); 1621 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), chunkno);
1594 1622
1595 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1623 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
@@ -1914,6 +1942,10 @@ bfa_ioc_pll_init(struct bfa_ioc *ioc)
1914 bfa_ioc_pll_init_asic(ioc); 1942 bfa_ioc_pll_init_asic(ioc);
1915 1943
1916 ioc->pllinit = true; 1944 ioc->pllinit = true;
1945
1946 /* Initialize LMEM */
1947 bfa_ioc_lmem_init(ioc);
1948
1917 /* 1949 /*
1918 * release semaphore. 1950 * release semaphore.
1919 */ 1951 */
@@ -2513,13 +2545,6 @@ bfa_ioc_recover(struct bfa_ioc *ioc)
2513 bfa_fsm_send_event(ioc, IOC_E_HBFAIL); 2545 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2514} 2546}
2515 2547
2516static void
2517bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
2518{
2519 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2520 return;
2521}
2522
2523/** 2548/**
2524 * @dg hal_iocpf_pvt BFA IOC PF private functions 2549 * @dg hal_iocpf_pvt BFA IOC PF private functions
2525 * @{ 2550 * @{
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
index 348479bbfa3a..b6b036a143ae 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
@@ -199,9 +199,9 @@ bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
199 * Host to LPU mailbox message addresses 199 * Host to LPU mailbox message addresses
200 */ 200 */
201static const struct { 201static const struct {
202 u32 hfn_mbox; 202 u32 hfn_mbox;
203 u32 lpu_mbox; 203 u32 lpu_mbox;
204 u32 hfn_pgn; 204 u32 hfn_pgn;
205} ct_fnreg[] = { 205} ct_fnreg[] = {
206 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 }, 206 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
207 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }, 207 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
@@ -803,17 +803,72 @@ bfa_ioc_ct2_mac_reset(void __iomem *rb)
803} 803}
804 804
805#define CT2_NFC_MAX_DELAY 1000 805#define CT2_NFC_MAX_DELAY 1000
806#define CT2_NFC_VER_VALID 0x143
807#define BFA_IOC_PLL_POLL 1000000
808
809static bool
810bfa_ioc_ct2_nfc_halted(void __iomem *rb)
811{
812 volatile u32 r32;
813
814 r32 = readl(rb + CT2_NFC_CSR_SET_REG);
815 if (r32 & __NFC_CONTROLLER_HALTED)
816 return true;
817
818 return false;
819}
820
821static void
822bfa_ioc_ct2_nfc_resume(void __iomem *rb)
823{
824 volatile u32 r32;
825 int i;
826
827 writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
828 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
829 r32 = readl(rb + CT2_NFC_CSR_SET_REG);
830 if (!(r32 & __NFC_CONTROLLER_HALTED))
831 return;
832 udelay(1000);
833 }
834 BUG_ON(1);
835}
836
806static enum bfa_status 837static enum bfa_status
807bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode) 838bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
808{ 839{
809 volatile u32 wgn, r32; 840 volatile u32 wgn, r32;
810 int i; 841 u32 nfc_ver, i;
811 842
812 /*
813 * Initialize PLL if not already done by NFC
814 */
815 wgn = readl(rb + CT2_WGN_STATUS); 843 wgn = readl(rb + CT2_WGN_STATUS);
816 if (!(wgn & __GLBL_PF_VF_CFG_RDY)) { 844
845 nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
846
847 if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) &&
848 (nfc_ver >= CT2_NFC_VER_VALID)) {
849 if (bfa_ioc_ct2_nfc_halted(rb))
850 bfa_ioc_ct2_nfc_resume(rb);
851 writel(__RESET_AND_START_SCLK_LCLK_PLLS,
852 rb + CT2_CSI_FW_CTL_SET_REG);
853
854 for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
855 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
856 if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
857 break;
858 }
859 BUG_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
860
861 for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
862 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
863 if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS))
864 break;
865 }
866 BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
867 udelay(1000);
868
869 r32 = readl(rb + CT2_CSI_FW_CTL_REG);
870 BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
871 } else {
817 writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG)); 872 writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG));
818 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) { 873 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
819 r32 = readl(rb + CT2_NFC_CSR_SET_REG); 874 r32 = readl(rb + CT2_NFC_CSR_SET_REG);
@@ -821,53 +876,48 @@ bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
821 break; 876 break;
822 udelay(1000); 877 udelay(1000);
823 } 878 }
879
880 bfa_ioc_ct2_mac_reset(rb);
881 bfa_ioc_ct2_sclk_init(rb);
882 bfa_ioc_ct2_lclk_init(rb);
883
884 /* release soft reset on s_clk & l_clk */
885 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
886 writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
887 rb + CT2_APP_PLL_SCLK_CTL_REG);
888 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
889 writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
890 rb + CT2_APP_PLL_LCLK_CTL_REG);
891 }
892
893 /* Announce flash device presence, if flash was corrupted. */
894 if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
895 r32 = readl((rb + PSS_GPIO_OUT_REG));
896 writel(r32 & ~1, rb + PSS_GPIO_OUT_REG);
897 r32 = readl((rb + PSS_GPIO_OE_REG));
898 writel(r32 | 1, rb + PSS_GPIO_OE_REG);
824 } 899 }
825 900
826 /* 901 /*
827 * Mask the interrupts and clear any 902 * Mask the interrupts and clear any
828 * pending interrupts left by BIOS/EFI 903 * pending interrupts left by BIOS/EFI
829 */ 904 */
830
831 writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK)); 905 writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
832 writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK)); 906 writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
833 907
834 r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); 908 /* For first time initialization, no need to clear interrupts */
835 if (r32 == 1) { 909 r32 = readl(rb + HOST_SEM5_REG);
836 writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT)); 910 if (r32 & 0x1) {
837 readl((rb + CT2_LPU0_HOSTFN_CMD_STAT)); 911 r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
838 } 912 if (r32 == 1) {
839 r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); 913 writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
840 if (r32 == 1) { 914 readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
841 writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT)); 915 }
842 readl((rb + CT2_LPU1_HOSTFN_CMD_STAT)); 916 r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
843 } 917 if (r32 == 1) {
844 918 writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
845 bfa_ioc_ct2_mac_reset(rb); 919 readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
846 bfa_ioc_ct2_sclk_init(rb); 920 }
847 bfa_ioc_ct2_lclk_init(rb);
848
849 /*
850 * release soft reset on s_clk & l_clk
851 */
852 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
853 writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET),
854 (rb + CT2_APP_PLL_SCLK_CTL_REG));
855
856 /*
857 * release soft reset on s_clk & l_clk
858 */
859 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
860 writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
861 (rb + CT2_APP_PLL_LCLK_CTL_REG));
862
863 /*
864 * Announce flash device presence, if flash was corrupted.
865 */
866 if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
867 r32 = readl((rb + PSS_GPIO_OUT_REG));
868 writel((r32 & ~1), (rb + PSS_GPIO_OUT_REG));
869 r32 = readl((rb + PSS_GPIO_OE_REG));
870 writel((r32 | 1), (rb + PSS_GPIO_OE_REG));
871 } 921 }
872 922
873 bfa_ioc_ct2_mem_init(rb); 923 bfa_ioc_ct2_mem_init(rb);
diff --git a/drivers/net/ethernet/brocade/bna/bfi_reg.h b/drivers/net/ethernet/brocade/bna/bfi_reg.h
index efacff3ab51d..0e094fe46dfd 100644
--- a/drivers/net/ethernet/brocade/bna/bfi_reg.h
+++ b/drivers/net/ethernet/brocade/bna/bfi_reg.h
@@ -339,10 +339,16 @@ enum {
339#define __A2T_AHB_LOAD 0x00000800 339#define __A2T_AHB_LOAD 0x00000800
340#define __WGN_READY 0x00000400 340#define __WGN_READY 0x00000400
341#define __GLBL_PF_VF_CFG_RDY 0x00000200 341#define __GLBL_PF_VF_CFG_RDY 0x00000200
342#define CT2_NFC_CSR_CLR_REG 0x00027420
342#define CT2_NFC_CSR_SET_REG 0x00027424 343#define CT2_NFC_CSR_SET_REG 0x00027424
343#define __HALT_NFC_CONTROLLER 0x00000002 344#define __HALT_NFC_CONTROLLER 0x00000002
344#define __NFC_CONTROLLER_HALTED 0x00001000 345#define __NFC_CONTROLLER_HALTED 0x00001000
345 346
347#define CT2_RSC_GPR15_REG 0x0002765c
348#define CT2_CSI_FW_CTL_REG 0x00027080
349#define __RESET_AND_START_SCLK_LCLK_PLLS 0x00010000
350#define CT2_CSI_FW_CTL_SET_REG 0x00027088
351
346#define CT2_CSI_MAC0_CONTROL_REG 0x000270d0 352#define CT2_CSI_MAC0_CONTROL_REG 0x000270d0
347#define __CSI_MAC_RESET 0x00000010 353#define __CSI_MAC_RESET 0x00000010
348#define __CSI_MAC_AHB_RESET 0x00000008 354#define __CSI_MAC_AHB_RESET 0x00000008
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index ff78f770dec9..25c4e7f2a099 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -80,8 +80,6 @@ do { \
80 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \ 80 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
81} while (0) 81} while (0)
82 82
83#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
84
85static void 83static void
86bnad_add_to_list(struct bnad *bnad) 84bnad_add_to_list(struct bnad *bnad)
87{ 85{
@@ -103,7 +101,7 @@ bnad_remove_from_list(struct bnad *bnad)
103 * Reinitialize completions in CQ, once Rx is taken down 101 * Reinitialize completions in CQ, once Rx is taken down
104 */ 102 */
105static void 103static void
106bnad_cq_cmpl_init(struct bnad *bnad, struct bna_ccb *ccb) 104bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
107{ 105{
108 struct bna_cq_entry *cmpl, *next_cmpl; 106 struct bna_cq_entry *cmpl, *next_cmpl;
109 unsigned int wi_range, wis = 0, ccb_prod = 0; 107 unsigned int wi_range, wis = 0, ccb_prod = 0;
@@ -141,7 +139,8 @@ bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
141 139
142 for (j = 0; j < frag; j++) { 140 for (j = 0; j < frag; j++) {
143 dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr), 141 dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr),
144 skb_frag_size(&skb_shinfo(skb)->frags[j]), DMA_TO_DEVICE); 142 skb_frag_size(&skb_shinfo(skb)->frags[j]),
143 DMA_TO_DEVICE);
145 dma_unmap_addr_set(&array[index], dma_addr, 0); 144 dma_unmap_addr_set(&array[index], dma_addr, 0);
146 BNA_QE_INDX_ADD(index, 1, depth); 145 BNA_QE_INDX_ADD(index, 1, depth);
147 } 146 }
@@ -155,7 +154,7 @@ bnad_pci_unmap_skb(struct device *pdev, struct bnad_skb_unmap *array,
155 * so DMA unmap & freeing is fine. 154 * so DMA unmap & freeing is fine.
156 */ 155 */
157static void 156static void
158bnad_free_all_txbufs(struct bnad *bnad, 157bnad_txq_cleanup(struct bnad *bnad,
159 struct bna_tcb *tcb) 158 struct bna_tcb *tcb)
160{ 159{
161 u32 unmap_cons; 160 u32 unmap_cons;
@@ -183,13 +182,12 @@ bnad_free_all_txbufs(struct bnad *bnad,
183/* Data Path Handlers */ 182/* Data Path Handlers */
184 183
185/* 184/*
186 * bnad_free_txbufs : Frees the Tx bufs on Tx completion 185 * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
187 * Can be called in a) Interrupt context 186 * Can be called in a) Interrupt context
188 * b) Sending context 187 * b) Sending context
189 * c) Tasklet context
190 */ 188 */
191static u32 189static u32
192bnad_free_txbufs(struct bnad *bnad, 190bnad_txcmpl_process(struct bnad *bnad,
193 struct bna_tcb *tcb) 191 struct bna_tcb *tcb)
194{ 192{
195 u32 unmap_cons, sent_packets = 0, sent_bytes = 0; 193 u32 unmap_cons, sent_packets = 0, sent_bytes = 0;
@@ -198,13 +196,7 @@ bnad_free_txbufs(struct bnad *bnad,
198 struct bnad_skb_unmap *unmap_array; 196 struct bnad_skb_unmap *unmap_array;
199 struct sk_buff *skb; 197 struct sk_buff *skb;
200 198
201 /* 199 /* Just return if TX is stopped */
202 * Just return if TX is stopped. This check is useful
203 * when bnad_free_txbufs() runs out of a tasklet scheduled
204 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
205 * but this routine runs actually after the cleanup has been
206 * executed.
207 */
208 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) 200 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
209 return 0; 201 return 0;
210 202
@@ -243,57 +235,8 @@ bnad_free_txbufs(struct bnad *bnad,
243 return sent_packets; 235 return sent_packets;
244} 236}
245 237
246/* Tx Free Tasklet function */
247/* Frees for all the tcb's in all the Tx's */
248/*
249 * Scheduled from sending context, so that
250 * the fat Tx lock is not held for too long
251 * in the sending context.
252 */
253static void
254bnad_tx_free_tasklet(unsigned long bnad_ptr)
255{
256 struct bnad *bnad = (struct bnad *)bnad_ptr;
257 struct bna_tcb *tcb;
258 u32 acked = 0;
259 int i, j;
260
261 for (i = 0; i < bnad->num_tx; i++) {
262 for (j = 0; j < bnad->num_txq_per_tx; j++) {
263 tcb = bnad->tx_info[i].tcb[j];
264 if (!tcb)
265 continue;
266 if (((u16) (*tcb->hw_consumer_index) !=
267 tcb->consumer_index) &&
268 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
269 &tcb->flags))) {
270 acked = bnad_free_txbufs(bnad, tcb);
271 if (likely(test_bit(BNAD_TXQ_TX_STARTED,
272 &tcb->flags)))
273 bna_ib_ack(tcb->i_dbell, acked);
274 smp_mb__before_clear_bit();
275 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
276 }
277 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
278 &tcb->flags)))
279 continue;
280 if (netif_queue_stopped(bnad->netdev)) {
281 if (acked && netif_carrier_ok(bnad->netdev) &&
282 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
283 BNAD_NETIF_WAKE_THRESHOLD) {
284 netif_wake_queue(bnad->netdev);
285 /* TODO */
286 /* Counters for individual TxQs? */
287 BNAD_UPDATE_CTR(bnad,
288 netif_queue_wakeup);
289 }
290 }
291 }
292 }
293}
294
295static u32 238static u32
296bnad_tx(struct bnad *bnad, struct bna_tcb *tcb) 239bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
297{ 240{
298 struct net_device *netdev = bnad->netdev; 241 struct net_device *netdev = bnad->netdev;
299 u32 sent = 0; 242 u32 sent = 0;
@@ -301,7 +244,7 @@ bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
301 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) 244 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
302 return 0; 245 return 0;
303 246
304 sent = bnad_free_txbufs(bnad, tcb); 247 sent = bnad_txcmpl_process(bnad, tcb);
305 if (sent) { 248 if (sent) {
306 if (netif_queue_stopped(netdev) && 249 if (netif_queue_stopped(netdev) &&
307 netif_carrier_ok(netdev) && 250 netif_carrier_ok(netdev) &&
@@ -330,13 +273,13 @@ bnad_msix_tx(int irq, void *data)
330 struct bna_tcb *tcb = (struct bna_tcb *)data; 273 struct bna_tcb *tcb = (struct bna_tcb *)data;
331 struct bnad *bnad = tcb->bnad; 274 struct bnad *bnad = tcb->bnad;
332 275
333 bnad_tx(bnad, tcb); 276 bnad_tx_complete(bnad, tcb);
334 277
335 return IRQ_HANDLED; 278 return IRQ_HANDLED;
336} 279}
337 280
338static void 281static void
339bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb) 282bnad_rcb_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
340{ 283{
341 struct bnad_unmap_q *unmap_q = rcb->unmap_q; 284 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
342 285
@@ -348,7 +291,7 @@ bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
348} 291}
349 292
350static void 293static void
351bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) 294bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
352{ 295{
353 struct bnad_unmap_q *unmap_q; 296 struct bnad_unmap_q *unmap_q;
354 struct bnad_skb_unmap *unmap_array; 297 struct bnad_skb_unmap *unmap_array;
@@ -369,11 +312,11 @@ bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
369 DMA_FROM_DEVICE); 312 DMA_FROM_DEVICE);
370 dev_kfree_skb(skb); 313 dev_kfree_skb(skb);
371 } 314 }
372 bnad_reset_rcb(bnad, rcb); 315 bnad_rcb_cleanup(bnad, rcb);
373} 316}
374 317
375static void 318static void
376bnad_alloc_n_post_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) 319bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
377{ 320{
378 u16 to_alloc, alloced, unmap_prod, wi_range; 321 u16 to_alloc, alloced, unmap_prod, wi_range;
379 struct bnad_unmap_q *unmap_q = rcb->unmap_q; 322 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
@@ -434,14 +377,14 @@ bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
434 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) { 377 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
435 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth) 378 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
436 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT) 379 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
437 bnad_alloc_n_post_rxbufs(bnad, rcb); 380 bnad_rxq_post(bnad, rcb);
438 smp_mb__before_clear_bit(); 381 smp_mb__before_clear_bit();
439 clear_bit(BNAD_RXQ_REFILL, &rcb->flags); 382 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
440 } 383 }
441} 384}
442 385
443static u32 386static u32
444bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget) 387bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
445{ 388{
446 struct bna_cq_entry *cmpl, *next_cmpl; 389 struct bna_cq_entry *cmpl, *next_cmpl;
447 struct bna_rcb *rcb = NULL; 390 struct bna_rcb *rcb = NULL;
@@ -453,12 +396,8 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
453 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; 396 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
454 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); 397 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
455 398
456 set_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags); 399 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
457
458 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) {
459 clear_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags);
460 return 0; 400 return 0;
461 }
462 401
463 prefetch(bnad->netdev); 402 prefetch(bnad->netdev);
464 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl, 403 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
@@ -533,9 +472,8 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
533 472
534 if (skb->ip_summed == CHECKSUM_UNNECESSARY) 473 if (skb->ip_summed == CHECKSUM_UNNECESSARY)
535 napi_gro_receive(&rx_ctrl->napi, skb); 474 napi_gro_receive(&rx_ctrl->napi, skb);
536 else { 475 else
537 netif_receive_skb(skb); 476 netif_receive_skb(skb);
538 }
539 477
540next: 478next:
541 cmpl->valid = 0; 479 cmpl->valid = 0;
@@ -646,7 +584,7 @@ bnad_isr(int irq, void *data)
646 for (j = 0; j < bnad->num_txq_per_tx; j++) { 584 for (j = 0; j < bnad->num_txq_per_tx; j++) {
647 tcb = bnad->tx_info[i].tcb[j]; 585 tcb = bnad->tx_info[i].tcb[j];
648 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) 586 if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
649 bnad_tx(bnad, bnad->tx_info[i].tcb[j]); 587 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
650 } 588 }
651 } 589 }
652 /* Rx processing */ 590 /* Rx processing */
@@ -839,20 +777,9 @@ bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
839{ 777{
840 struct bnad_tx_info *tx_info = 778 struct bnad_tx_info *tx_info =
841 (struct bnad_tx_info *)tcb->txq->tx->priv; 779 (struct bnad_tx_info *)tcb->txq->tx->priv;
842 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
843
844 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
845 cpu_relax();
846
847 bnad_free_all_txbufs(bnad, tcb);
848
849 unmap_q->producer_index = 0;
850 unmap_q->consumer_index = 0;
851
852 smp_mb__before_clear_bit();
853 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
854 780
855 tx_info->tcb[tcb->id] = NULL; 781 tx_info->tcb[tcb->id] = NULL;
782 tcb->priv = NULL;
856} 783}
857 784
858static void 785static void
@@ -866,12 +793,6 @@ bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
866} 793}
867 794
868static void 795static void
869bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
870{
871 bnad_free_all_rxbufs(bnad, rcb);
872}
873
874static void
875bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) 796bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
876{ 797{
877 struct bnad_rx_info *rx_info = 798 struct bnad_rx_info *rx_info =
@@ -916,7 +837,6 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
916{ 837{
917 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv; 838 struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
918 struct bna_tcb *tcb; 839 struct bna_tcb *tcb;
919 struct bnad_unmap_q *unmap_q;
920 u32 txq_id; 840 u32 txq_id;
921 int i; 841 int i;
922 842
@@ -926,23 +846,9 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
926 continue; 846 continue;
927 txq_id = tcb->id; 847 txq_id = tcb->id;
928 848
929 unmap_q = tcb->unmap_q; 849 BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
930
931 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
932 continue;
933
934 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
935 cpu_relax();
936
937 bnad_free_all_txbufs(bnad, tcb);
938
939 unmap_q->producer_index = 0;
940 unmap_q->consumer_index = 0;
941
942 smp_mb__before_clear_bit();
943 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
944
945 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); 850 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
851 BUG_ON(*(tcb->hw_consumer_index) != 0);
946 852
947 if (netif_carrier_ok(bnad->netdev)) { 853 if (netif_carrier_ok(bnad->netdev)) {
948 printk(KERN_INFO "bna: %s %d TXQ_STARTED\n", 854 printk(KERN_INFO "bna: %s %d TXQ_STARTED\n",
@@ -963,6 +869,54 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
963 } 869 }
964} 870}
965 871
872/*
873 * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
874 */
875static void
876bnad_tx_cleanup(struct delayed_work *work)
877{
878 struct bnad_tx_info *tx_info =
879 container_of(work, struct bnad_tx_info, tx_cleanup_work);
880 struct bnad *bnad = NULL;
881 struct bnad_unmap_q *unmap_q;
882 struct bna_tcb *tcb;
883 unsigned long flags;
884 uint32_t i, pending = 0;
885
886 for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
887 tcb = tx_info->tcb[i];
888 if (!tcb)
889 continue;
890
891 bnad = tcb->bnad;
892
893 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
894 pending++;
895 continue;
896 }
897
898 bnad_txq_cleanup(bnad, tcb);
899
900 unmap_q = tcb->unmap_q;
901 unmap_q->producer_index = 0;
902 unmap_q->consumer_index = 0;
903
904 smp_mb__before_clear_bit();
905 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
906 }
907
908 if (pending) {
909 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
910 msecs_to_jiffies(1));
911 return;
912 }
913
914 spin_lock_irqsave(&bnad->bna_lock, flags);
915 bna_tx_cleanup_complete(tx_info->tx);
916 spin_unlock_irqrestore(&bnad->bna_lock, flags);
917}
918
919
966static void 920static void
967bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx) 921bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
968{ 922{
@@ -976,8 +930,7 @@ bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
976 continue; 930 continue;
977 } 931 }
978 932
979 mdelay(BNAD_TXRX_SYNC_MDELAY); 933 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
980 bna_tx_cleanup_complete(tx);
981} 934}
982 935
983static void 936static void
@@ -1001,6 +954,44 @@ bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1001 } 954 }
1002} 955}
1003 956
957/*
958 * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
959 */
960static void
961bnad_rx_cleanup(void *work)
962{
963 struct bnad_rx_info *rx_info =
964 container_of(work, struct bnad_rx_info, rx_cleanup_work);
965 struct bnad_rx_ctrl *rx_ctrl;
966 struct bnad *bnad = NULL;
967 unsigned long flags;
968 uint32_t i;
969
970 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
971 rx_ctrl = &rx_info->rx_ctrl[i];
972
973 if (!rx_ctrl->ccb)
974 continue;
975
976 bnad = rx_ctrl->ccb->bnad;
977
978 /*
979 * Wait till the poll handler has exited
980 * and nothing can be scheduled anymore
981 */
982 napi_disable(&rx_ctrl->napi);
983
984 bnad_cq_cleanup(bnad, rx_ctrl->ccb);
985 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
986 if (rx_ctrl->ccb->rcb[1])
987 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
988 }
989
990 spin_lock_irqsave(&bnad->bna_lock, flags);
991 bna_rx_cleanup_complete(rx_info->rx);
992 spin_unlock_irqrestore(&bnad->bna_lock, flags);
993}
994
1004static void 995static void
1005bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx) 996bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1006{ 997{
@@ -1009,8 +1000,6 @@ bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1009 struct bnad_rx_ctrl *rx_ctrl; 1000 struct bnad_rx_ctrl *rx_ctrl;
1010 int i; 1001 int i;
1011 1002
1012 mdelay(BNAD_TXRX_SYNC_MDELAY);
1013
1014 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) { 1003 for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1015 rx_ctrl = &rx_info->rx_ctrl[i]; 1004 rx_ctrl = &rx_info->rx_ctrl[i];
1016 ccb = rx_ctrl->ccb; 1005 ccb = rx_ctrl->ccb;
@@ -1021,12 +1010,9 @@ bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1021 1010
1022 if (ccb->rcb[1]) 1011 if (ccb->rcb[1])
1023 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); 1012 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1024
1025 while (test_bit(BNAD_FP_IN_RX_PATH, &rx_ctrl->flags))
1026 cpu_relax();
1027 } 1013 }
1028 1014
1029 bna_rx_cleanup_complete(rx); 1015 queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1030} 1016}
1031 1017
1032static void 1018static void
@@ -1046,13 +1032,12 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1046 if (!ccb) 1032 if (!ccb)
1047 continue; 1033 continue;
1048 1034
1049 bnad_cq_cmpl_init(bnad, ccb); 1035 napi_enable(&rx_ctrl->napi);
1050 1036
1051 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) { 1037 for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1052 rcb = ccb->rcb[j]; 1038 rcb = ccb->rcb[j];
1053 if (!rcb) 1039 if (!rcb)
1054 continue; 1040 continue;
1055 bnad_free_all_rxbufs(bnad, rcb);
1056 1041
1057 set_bit(BNAD_RXQ_STARTED, &rcb->flags); 1042 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1058 set_bit(BNAD_RXQ_POST_OK, &rcb->flags); 1043 set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
@@ -1063,7 +1048,7 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1063 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) { 1048 if (!test_and_set_bit(BNAD_RXQ_REFILL, &rcb->flags)) {
1064 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth) 1049 if (BNA_QE_FREE_CNT(unmap_q, unmap_q->q_depth)
1065 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT) 1050 >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT)
1066 bnad_alloc_n_post_rxbufs(bnad, rcb); 1051 bnad_rxq_post(bnad, rcb);
1067 smp_mb__before_clear_bit(); 1052 smp_mb__before_clear_bit();
1068 clear_bit(BNAD_RXQ_REFILL, &rcb->flags); 1053 clear_bit(BNAD_RXQ_REFILL, &rcb->flags);
1069 } 1054 }
@@ -1687,7 +1672,7 @@ bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1687 if (!netif_carrier_ok(bnad->netdev)) 1672 if (!netif_carrier_ok(bnad->netdev))
1688 goto poll_exit; 1673 goto poll_exit;
1689 1674
1690 rcvd = bnad_poll_cq(bnad, rx_ctrl->ccb, budget); 1675 rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1691 if (rcvd >= budget) 1676 if (rcvd >= budget)
1692 return rcvd; 1677 return rcvd;
1693 1678
@@ -1704,7 +1689,7 @@ poll_exit:
1704 1689
1705#define BNAD_NAPI_POLL_QUOTA 64 1690#define BNAD_NAPI_POLL_QUOTA 64
1706static void 1691static void
1707bnad_napi_init(struct bnad *bnad, u32 rx_id) 1692bnad_napi_add(struct bnad *bnad, u32 rx_id)
1708{ 1693{
1709 struct bnad_rx_ctrl *rx_ctrl; 1694 struct bnad_rx_ctrl *rx_ctrl;
1710 int i; 1695 int i;
@@ -1718,34 +1703,18 @@ bnad_napi_init(struct bnad *bnad, u32 rx_id)
1718} 1703}
1719 1704
1720static void 1705static void
1721bnad_napi_enable(struct bnad *bnad, u32 rx_id) 1706bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1722{
1723 struct bnad_rx_ctrl *rx_ctrl;
1724 int i;
1725
1726 /* Initialize & enable NAPI */
1727 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1728 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1729
1730 napi_enable(&rx_ctrl->napi);
1731 }
1732}
1733
1734static void
1735bnad_napi_disable(struct bnad *bnad, u32 rx_id)
1736{ 1707{
1737 int i; 1708 int i;
1738 1709
1739 /* First disable and then clean up */ 1710 /* First disable and then clean up */
1740 for (i = 0; i < bnad->num_rxp_per_rx; i++) { 1711 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1741 napi_disable(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1742 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi); 1712 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1743 }
1744} 1713}
1745 1714
1746/* Should be held with conf_lock held */ 1715/* Should be held with conf_lock held */
1747void 1716void
1748bnad_cleanup_tx(struct bnad *bnad, u32 tx_id) 1717bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1749{ 1718{
1750 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id]; 1719 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1751 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0]; 1720 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
@@ -1764,9 +1733,6 @@ bnad_cleanup_tx(struct bnad *bnad, u32 tx_id)
1764 bnad_tx_msix_unregister(bnad, tx_info, 1733 bnad_tx_msix_unregister(bnad, tx_info,
1765 bnad->num_txq_per_tx); 1734 bnad->num_txq_per_tx);
1766 1735
1767 if (0 == tx_id)
1768 tasklet_kill(&bnad->tx_free_tasklet);
1769
1770 spin_lock_irqsave(&bnad->bna_lock, flags); 1736 spin_lock_irqsave(&bnad->bna_lock, flags);
1771 bna_tx_destroy(tx_info->tx); 1737 bna_tx_destroy(tx_info->tx);
1772 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1738 spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -1832,6 +1798,9 @@ bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1832 goto err_return; 1798 goto err_return;
1833 tx_info->tx = tx; 1799 tx_info->tx = tx;
1834 1800
1801 INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
1802 (work_func_t)bnad_tx_cleanup);
1803
1835 /* Register ISR for the Tx object */ 1804 /* Register ISR for the Tx object */
1836 if (intr_info->intr_type == BNA_INTR_T_MSIX) { 1805 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
1837 err = bnad_tx_msix_register(bnad, tx_info, 1806 err = bnad_tx_msix_register(bnad, tx_info,
@@ -1896,7 +1865,7 @@ bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
1896 1865
1897/* Called with mutex_lock(&bnad->conf_mutex) held */ 1866/* Called with mutex_lock(&bnad->conf_mutex) held */
1898void 1867void
1899bnad_cleanup_rx(struct bnad *bnad, u32 rx_id) 1868bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
1900{ 1869{
1901 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id]; 1870 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
1902 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; 1871 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
@@ -1928,7 +1897,7 @@ bnad_cleanup_rx(struct bnad *bnad, u32 rx_id)
1928 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX) 1897 if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
1929 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths); 1898 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
1930 1899
1931 bnad_napi_disable(bnad, rx_id); 1900 bnad_napi_delete(bnad, rx_id);
1932 1901
1933 spin_lock_irqsave(&bnad->bna_lock, flags); 1902 spin_lock_irqsave(&bnad->bna_lock, flags);
1934 bna_rx_destroy(rx_info->rx); 1903 bna_rx_destroy(rx_info->rx);
@@ -1952,7 +1921,7 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
1952 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id]; 1921 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
1953 static const struct bna_rx_event_cbfn rx_cbfn = { 1922 static const struct bna_rx_event_cbfn rx_cbfn = {
1954 .rcb_setup_cbfn = bnad_cb_rcb_setup, 1923 .rcb_setup_cbfn = bnad_cb_rcb_setup,
1955 .rcb_destroy_cbfn = bnad_cb_rcb_destroy, 1924 .rcb_destroy_cbfn = NULL,
1956 .ccb_setup_cbfn = bnad_cb_ccb_setup, 1925 .ccb_setup_cbfn = bnad_cb_ccb_setup,
1957 .ccb_destroy_cbfn = bnad_cb_ccb_destroy, 1926 .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
1958 .rx_stall_cbfn = bnad_cb_rx_stall, 1927 .rx_stall_cbfn = bnad_cb_rx_stall,
@@ -1998,11 +1967,14 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
1998 rx_info->rx = rx; 1967 rx_info->rx = rx;
1999 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1968 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2000 1969
1970 INIT_WORK(&rx_info->rx_cleanup_work,
1971 (work_func_t)(bnad_rx_cleanup));
1972
2001 /* 1973 /*
2002 * Init NAPI, so that state is set to NAPI_STATE_SCHED, 1974 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2003 * so that IRQ handler cannot schedule NAPI at this point. 1975 * so that IRQ handler cannot schedule NAPI at this point.
2004 */ 1976 */
2005 bnad_napi_init(bnad, rx_id); 1977 bnad_napi_add(bnad, rx_id);
2006 1978
2007 /* Register ISR for the Rx object */ 1979 /* Register ISR for the Rx object */
2008 if (intr_info->intr_type == BNA_INTR_T_MSIX) { 1980 if (intr_info->intr_type == BNA_INTR_T_MSIX) {
@@ -2028,13 +2000,10 @@ bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2028 bna_rx_enable(rx); 2000 bna_rx_enable(rx);
2029 spin_unlock_irqrestore(&bnad->bna_lock, flags); 2001 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2030 2002
2031 /* Enable scheduling of NAPI */
2032 bnad_napi_enable(bnad, rx_id);
2033
2034 return 0; 2003 return 0;
2035 2004
2036err_return: 2005err_return:
2037 bnad_cleanup_rx(bnad, rx_id); 2006 bnad_destroy_rx(bnad, rx_id);
2038 return err; 2007 return err;
2039} 2008}
2040 2009
@@ -2519,7 +2488,7 @@ bnad_open(struct net_device *netdev)
2519 return 0; 2488 return 0;
2520 2489
2521cleanup_tx: 2490cleanup_tx:
2522 bnad_cleanup_tx(bnad, 0); 2491 bnad_destroy_tx(bnad, 0);
2523 2492
2524err_return: 2493err_return:
2525 mutex_unlock(&bnad->conf_mutex); 2494 mutex_unlock(&bnad->conf_mutex);
@@ -2546,8 +2515,8 @@ bnad_stop(struct net_device *netdev)
2546 2515
2547 wait_for_completion(&bnad->bnad_completions.enet_comp); 2516 wait_for_completion(&bnad->bnad_completions.enet_comp);
2548 2517
2549 bnad_cleanup_tx(bnad, 0); 2518 bnad_destroy_tx(bnad, 0);
2550 bnad_cleanup_rx(bnad, 0); 2519 bnad_destroy_rx(bnad, 0);
2551 2520
2552 /* Synchronize mailbox IRQ */ 2521 /* Synchronize mailbox IRQ */
2553 bnad_mbox_irq_sync(bnad); 2522 bnad_mbox_irq_sync(bnad);
@@ -2620,7 +2589,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2620 if ((u16) (*tcb->hw_consumer_index) != 2589 if ((u16) (*tcb->hw_consumer_index) !=
2621 tcb->consumer_index && 2590 tcb->consumer_index &&
2622 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { 2591 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2623 acked = bnad_free_txbufs(bnad, tcb); 2592 acked = bnad_txcmpl_process(bnad, tcb);
2624 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) 2593 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2625 bna_ib_ack(tcb->i_dbell, acked); 2594 bna_ib_ack(tcb->i_dbell, acked);
2626 smp_mb__before_clear_bit(); 2595 smp_mb__before_clear_bit();
@@ -2843,9 +2812,6 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2843 bna_txq_prod_indx_doorbell(tcb); 2812 bna_txq_prod_indx_doorbell(tcb);
2844 smp_mb(); 2813 smp_mb();
2845 2814
2846 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
2847 tasklet_schedule(&bnad->tx_free_tasklet);
2848
2849 return NETDEV_TX_OK; 2815 return NETDEV_TX_OK;
2850} 2816}
2851 2817
@@ -3127,8 +3093,8 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac)
3127/* 3093/*
3128 * 1. Initialize the bnad structure 3094 * 1. Initialize the bnad structure
3129 * 2. Setup netdev pointer in pci_dev 3095 * 2. Setup netdev pointer in pci_dev
3130 * 3. Initialze Tx free tasklet 3096 * 3. Initialize no. of TxQ & CQs & MSIX vectors
3131 * 4. Initialize no. of TxQ & CQs & MSIX vectors 3097 * 4. Initialize work queue.
3132 */ 3098 */
3133static int 3099static int
3134bnad_init(struct bnad *bnad, 3100bnad_init(struct bnad *bnad,
@@ -3171,8 +3137,11 @@ bnad_init(struct bnad *bnad,
3171 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO; 3137 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3172 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO; 3138 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3173 3139
3174 tasklet_init(&bnad->tx_free_tasklet, bnad_tx_free_tasklet, 3140 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3175 (unsigned long)bnad); 3141 bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3142
3143 if (!bnad->work_q)
3144 return -ENOMEM;
3176 3145
3177 return 0; 3146 return 0;
3178} 3147}
@@ -3185,6 +3154,12 @@ bnad_init(struct bnad *bnad,
3185static void 3154static void
3186bnad_uninit(struct bnad *bnad) 3155bnad_uninit(struct bnad *bnad)
3187{ 3156{
3157 if (bnad->work_q) {
3158 flush_workqueue(bnad->work_q);
3159 destroy_workqueue(bnad->work_q);
3160 bnad->work_q = NULL;
3161 }
3162
3188 if (bnad->bar0) 3163 if (bnad->bar0)
3189 iounmap(bnad->bar0); 3164 iounmap(bnad->bar0);
3190 pci_set_drvdata(bnad->pcidev, NULL); 3165 pci_set_drvdata(bnad->pcidev, NULL);
@@ -3304,7 +3279,6 @@ bnad_pci_probe(struct pci_dev *pdev,
3304 /* 3279 /*
3305 * Initialize bnad structure 3280 * Initialize bnad structure
3306 * Setup relation between pci_dev & netdev 3281 * Setup relation between pci_dev & netdev
3307 * Init Tx free tasklet
3308 */ 3282 */
3309 err = bnad_init(bnad, pdev, netdev); 3283 err = bnad_init(bnad, pdev, netdev);
3310 if (err) 3284 if (err)
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 55824d92699f..72742be11277 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -71,7 +71,7 @@ struct bnad_rx_ctrl {
71#define BNAD_NAME "bna" 71#define BNAD_NAME "bna"
72#define BNAD_NAME_LEN 64 72#define BNAD_NAME_LEN 64
73 73
74#define BNAD_VERSION "3.0.2.2" 74#define BNAD_VERSION "3.0.23.0"
75 75
76#define BNAD_MAILBOX_MSIX_INDEX 0 76#define BNAD_MAILBOX_MSIX_INDEX 0
77#define BNAD_MAILBOX_MSIX_VECTORS 1 77#define BNAD_MAILBOX_MSIX_VECTORS 1
@@ -210,6 +210,7 @@ struct bnad_tx_info {
210 struct bna_tx *tx; /* 1:1 between tx_info & tx */ 210 struct bna_tx *tx; /* 1:1 between tx_info & tx */
211 struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX]; 211 struct bna_tcb *tcb[BNAD_MAX_TXQ_PER_TX];
212 u32 tx_id; 212 u32 tx_id;
213 struct delayed_work tx_cleanup_work;
213} ____cacheline_aligned; 214} ____cacheline_aligned;
214 215
215struct bnad_rx_info { 216struct bnad_rx_info {
@@ -217,6 +218,7 @@ struct bnad_rx_info {
217 218
218 struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXP_PER_RX]; 219 struct bnad_rx_ctrl rx_ctrl[BNAD_MAX_RXP_PER_RX];
219 u32 rx_id; 220 u32 rx_id;
221 struct work_struct rx_cleanup_work;
220} ____cacheline_aligned; 222} ____cacheline_aligned;
221 223
222/* Unmap queues for Tx / Rx cleanup */ 224/* Unmap queues for Tx / Rx cleanup */
@@ -318,7 +320,7 @@ struct bnad {
318 /* Burnt in MAC address */ 320 /* Burnt in MAC address */
319 mac_t perm_addr; 321 mac_t perm_addr;
320 322
321 struct tasklet_struct tx_free_tasklet; 323 struct workqueue_struct *work_q;
322 324
323 /* Statistics */ 325 /* Statistics */
324 struct bnad_stats stats; 326 struct bnad_stats stats;
@@ -328,6 +330,7 @@ struct bnad {
328 char adapter_name[BNAD_NAME_LEN]; 330 char adapter_name[BNAD_NAME_LEN];
329 char port_name[BNAD_NAME_LEN]; 331 char port_name[BNAD_NAME_LEN];
330 char mbox_irq_name[BNAD_NAME_LEN]; 332 char mbox_irq_name[BNAD_NAME_LEN];
333 char wq_name[BNAD_NAME_LEN];
331 334
332 /* debugfs specific data */ 335 /* debugfs specific data */
333 char *regdata; 336 char *regdata;
@@ -370,8 +373,8 @@ extern void bnad_rx_coalescing_timeo_set(struct bnad *bnad);
370 373
371extern int bnad_setup_rx(struct bnad *bnad, u32 rx_id); 374extern int bnad_setup_rx(struct bnad *bnad, u32 rx_id);
372extern int bnad_setup_tx(struct bnad *bnad, u32 tx_id); 375extern int bnad_setup_tx(struct bnad *bnad, u32 tx_id);
373extern void bnad_cleanup_tx(struct bnad *bnad, u32 tx_id); 376extern void bnad_destroy_tx(struct bnad *bnad, u32 tx_id);
374extern void bnad_cleanup_rx(struct bnad *bnad, u32 rx_id); 377extern void bnad_destroy_rx(struct bnad *bnad, u32 rx_id);
375 378
376/* Timer start/stop protos */ 379/* Timer start/stop protos */
377extern void bnad_dim_timer_start(struct bnad *bnad); 380extern void bnad_dim_timer_start(struct bnad *bnad);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index ab753d7334a6..40e1e84f4984 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -464,7 +464,7 @@ bnad_set_ringparam(struct net_device *netdev,
464 for (i = 0; i < bnad->num_rx; i++) { 464 for (i = 0; i < bnad->num_rx; i++) {
465 if (!bnad->rx_info[i].rx) 465 if (!bnad->rx_info[i].rx)
466 continue; 466 continue;
467 bnad_cleanup_rx(bnad, i); 467 bnad_destroy_rx(bnad, i);
468 current_err = bnad_setup_rx(bnad, i); 468 current_err = bnad_setup_rx(bnad, i);
469 if (current_err && !err) 469 if (current_err && !err)
470 err = current_err; 470 err = current_err;
@@ -492,7 +492,7 @@ bnad_set_ringparam(struct net_device *netdev,
492 for (i = 0; i < bnad->num_tx; i++) { 492 for (i = 0; i < bnad->num_tx; i++) {
493 if (!bnad->tx_info[i].tx) 493 if (!bnad->tx_info[i].tx)
494 continue; 494 continue;
495 bnad_cleanup_tx(bnad, i); 495 bnad_destroy_tx(bnad, i);
496 current_err = bnad_setup_tx(bnad, i); 496 current_err = bnad_setup_tx(bnad, i);
497 if (current_err && !err) 497 if (current_err && !err)
498 err = current_err; 498 err = current_err;
@@ -539,7 +539,7 @@ bnad_set_pauseparam(struct net_device *netdev,
539} 539}
540 540
541static void 541static void
542bnad_get_strings(struct net_device *netdev, u32 stringset, u8 * string) 542bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
543{ 543{
544 struct bnad *bnad = netdev_priv(netdev); 544 struct bnad *bnad = netdev_priv(netdev);
545 int i, j, q_num; 545 int i, j, q_num;
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 906117016fc4..77884191a8c6 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -30,6 +30,7 @@
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31#include <linux/clk.h> 31#include <linux/clk.h>
32#include <linux/gfp.h> 32#include <linux/gfp.h>
33#include <linux/phy.h>
33 34
34#include <asm/io.h> 35#include <asm/io.h>
35#include <asm/uaccess.h> 36#include <asm/uaccess.h>
@@ -51,21 +52,17 @@
51/* 52/*
52 * Read from a EMAC register. 53 * Read from a EMAC register.
53 */ 54 */
54static inline unsigned long at91_emac_read(unsigned int reg) 55static inline unsigned long at91_emac_read(struct at91_private *lp, unsigned int reg)
55{ 56{
56 void __iomem *emac_base = (void __iomem *)AT91_VA_BASE_EMAC; 57 return __raw_readl(lp->emac_base + reg);
57
58 return __raw_readl(emac_base + reg);
59} 58}
60 59
61/* 60/*
62 * Write to a EMAC register. 61 * Write to a EMAC register.
63 */ 62 */
64static inline void at91_emac_write(unsigned int reg, unsigned long value) 63static inline void at91_emac_write(struct at91_private *lp, unsigned int reg, unsigned long value)
65{ 64{
66 void __iomem *emac_base = (void __iomem *)AT91_VA_BASE_EMAC; 65 __raw_writel(value, lp->emac_base + reg);
67
68 __raw_writel(value, emac_base + reg);
69} 66}
70 67
71/* ........................... PHY INTERFACE ........................... */ 68/* ........................... PHY INTERFACE ........................... */
@@ -75,32 +72,33 @@ static inline void at91_emac_write(unsigned int reg, unsigned long value)
75 * When not called from an interrupt-handler, access to the PHY must be 72 * When not called from an interrupt-handler, access to the PHY must be
76 * protected by a spinlock. 73 * protected by a spinlock.
77 */ 74 */
78static void enable_mdi(void) 75static void enable_mdi(struct at91_private *lp)
79{ 76{
80 unsigned long ctl; 77 unsigned long ctl;
81 78
82 ctl = at91_emac_read(AT91_EMAC_CTL); 79 ctl = at91_emac_read(lp, AT91_EMAC_CTL);
83 at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_MPE); /* enable management port */ 80 at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_MPE); /* enable management port */
84} 81}
85 82
86/* 83/*
87 * Disable the MDIO bit in the MAC control register 84 * Disable the MDIO bit in the MAC control register
88 */ 85 */
89static void disable_mdi(void) 86static void disable_mdi(struct at91_private *lp)
90{ 87{
91 unsigned long ctl; 88 unsigned long ctl;
92 89
93 ctl = at91_emac_read(AT91_EMAC_CTL); 90 ctl = at91_emac_read(lp, AT91_EMAC_CTL);
94 at91_emac_write(AT91_EMAC_CTL, ctl & ~AT91_EMAC_MPE); /* disable management port */ 91 at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~AT91_EMAC_MPE); /* disable management port */
95} 92}
96 93
97/* 94/*
98 * Wait until the PHY operation is complete. 95 * Wait until the PHY operation is complete.
99 */ 96 */
100static inline void at91_phy_wait(void) { 97static inline void at91_phy_wait(struct at91_private *lp)
98{
101 unsigned long timeout = jiffies + 2; 99 unsigned long timeout = jiffies + 2;
102 100
103 while (!(at91_emac_read(AT91_EMAC_SR) & AT91_EMAC_SR_IDLE)) { 101 while (!(at91_emac_read(lp, AT91_EMAC_SR) & AT91_EMAC_SR_IDLE)) {
104 if (time_after(jiffies, timeout)) { 102 if (time_after(jiffies, timeout)) {
105 printk("at91_ether: MIO timeout\n"); 103 printk("at91_ether: MIO timeout\n");
106 break; 104 break;
@@ -113,28 +111,28 @@ static inline void at91_phy_wait(void) {
113 * Write value to the a PHY register 111 * Write value to the a PHY register
114 * Note: MDI interface is assumed to already have been enabled. 112 * Note: MDI interface is assumed to already have been enabled.
115 */ 113 */
116static void write_phy(unsigned char phy_addr, unsigned char address, unsigned int value) 114static void write_phy(struct at91_private *lp, unsigned char phy_addr, unsigned char address, unsigned int value)
117{ 115{
118 at91_emac_write(AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_W 116 at91_emac_write(lp, AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_W
119 | ((phy_addr & 0x1f) << 23) | (address << 18) | (value & AT91_EMAC_DATA)); 117 | ((phy_addr & 0x1f) << 23) | (address << 18) | (value & AT91_EMAC_DATA));
120 118
121 /* Wait until IDLE bit in Network Status register is cleared */ 119 /* Wait until IDLE bit in Network Status register is cleared */
122 at91_phy_wait(); 120 at91_phy_wait(lp);
123} 121}
124 122
125/* 123/*
126 * Read value stored in a PHY register. 124 * Read value stored in a PHY register.
127 * Note: MDI interface is assumed to already have been enabled. 125 * Note: MDI interface is assumed to already have been enabled.
128 */ 126 */
129static void read_phy(unsigned char phy_addr, unsigned char address, unsigned int *value) 127static void read_phy(struct at91_private *lp, unsigned char phy_addr, unsigned char address, unsigned int *value)
130{ 128{
131 at91_emac_write(AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_R 129 at91_emac_write(lp, AT91_EMAC_MAN, AT91_EMAC_MAN_802_3 | AT91_EMAC_RW_R
132 | ((phy_addr & 0x1f) << 23) | (address << 18)); 130 | ((phy_addr & 0x1f) << 23) | (address << 18));
133 131
134 /* Wait until IDLE bit in Network Status register is cleared */ 132 /* Wait until IDLE bit in Network Status register is cleared */
135 at91_phy_wait(); 133 at91_phy_wait(lp);
136 134
137 *value = at91_emac_read(AT91_EMAC_MAN) & AT91_EMAC_DATA; 135 *value = at91_emac_read(lp, AT91_EMAC_MAN) & AT91_EMAC_DATA;
138} 136}
139 137
140/* ........................... PHY MANAGEMENT .......................... */ 138/* ........................... PHY MANAGEMENT .......................... */
@@ -158,13 +156,13 @@ static void update_linkspeed(struct net_device *dev, int silent)
158 } 156 }
159 157
160 /* Link up, or auto-negotiation still in progress */ 158 /* Link up, or auto-negotiation still in progress */
161 read_phy(lp->phy_address, MII_BMSR, &bmsr); 159 read_phy(lp, lp->phy_address, MII_BMSR, &bmsr);
162 read_phy(lp->phy_address, MII_BMCR, &bmcr); 160 read_phy(lp, lp->phy_address, MII_BMCR, &bmcr);
163 if (bmcr & BMCR_ANENABLE) { /* AutoNegotiation is enabled */ 161 if (bmcr & BMCR_ANENABLE) { /* AutoNegotiation is enabled */
164 if (!(bmsr & BMSR_ANEGCOMPLETE)) 162 if (!(bmsr & BMSR_ANEGCOMPLETE))
165 return; /* Do nothing - another interrupt generated when negotiation complete */ 163 return; /* Do nothing - another interrupt generated when negotiation complete */
166 164
167 read_phy(lp->phy_address, MII_LPA, &lpa); 165 read_phy(lp, lp->phy_address, MII_LPA, &lpa);
168 if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF)) speed = SPEED_100; 166 if ((lpa & LPA_100FULL) || (lpa & LPA_100HALF)) speed = SPEED_100;
169 else speed = SPEED_10; 167 else speed = SPEED_10;
170 if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL)) duplex = DUPLEX_FULL; 168 if ((lpa & LPA_100FULL) || (lpa & LPA_10FULL)) duplex = DUPLEX_FULL;
@@ -175,7 +173,7 @@ static void update_linkspeed(struct net_device *dev, int silent)
175 } 173 }
176 174
177 /* Update the MAC */ 175 /* Update the MAC */
178 mac_cfg = at91_emac_read(AT91_EMAC_CFG) & ~(AT91_EMAC_SPD | AT91_EMAC_FD); 176 mac_cfg = at91_emac_read(lp, AT91_EMAC_CFG) & ~(AT91_EMAC_SPD | AT91_EMAC_FD);
179 if (speed == SPEED_100) { 177 if (speed == SPEED_100) {
180 if (duplex == DUPLEX_FULL) /* 100 Full Duplex */ 178 if (duplex == DUPLEX_FULL) /* 100 Full Duplex */
181 mac_cfg |= AT91_EMAC_SPD | AT91_EMAC_FD; 179 mac_cfg |= AT91_EMAC_SPD | AT91_EMAC_FD;
@@ -186,7 +184,7 @@ static void update_linkspeed(struct net_device *dev, int silent)
186 mac_cfg |= AT91_EMAC_FD; 184 mac_cfg |= AT91_EMAC_FD;
187 else {} /* 10 Half Duplex */ 185 else {} /* 10 Half Duplex */
188 } 186 }
189 at91_emac_write(AT91_EMAC_CFG, mac_cfg); 187 at91_emac_write(lp, AT91_EMAC_CFG, mac_cfg);
190 188
191 if (!silent) 189 if (!silent)
192 printk(KERN_INFO "%s: Link now %i-%s\n", dev->name, speed, (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex"); 190 printk(KERN_INFO "%s: Link now %i-%s\n", dev->name, speed, (duplex == DUPLEX_FULL) ? "FullDuplex" : "HalfDuplex");
@@ -207,34 +205,34 @@ static irqreturn_t at91ether_phy_interrupt(int irq, void *dev_id)
207 * level-triggering. We therefore have to check if the PHY actually has 205 * level-triggering. We therefore have to check if the PHY actually has
208 * an IRQ pending. 206 * an IRQ pending.
209 */ 207 */
210 enable_mdi(); 208 enable_mdi(lp);
211 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { 209 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {
212 read_phy(lp->phy_address, MII_DSINTR_REG, &phy); /* ack interrupt in Davicom PHY */ 210 read_phy(lp, lp->phy_address, MII_DSINTR_REG, &phy); /* ack interrupt in Davicom PHY */
213 if (!(phy & (1 << 0))) 211 if (!(phy & (1 << 0)))
214 goto done; 212 goto done;
215 } 213 }
216 else if (lp->phy_type == MII_LXT971A_ID) { 214 else if (lp->phy_type == MII_LXT971A_ID) {
217 read_phy(lp->phy_address, MII_ISINTS_REG, &phy); /* ack interrupt in Intel PHY */ 215 read_phy(lp, lp->phy_address, MII_ISINTS_REG, &phy); /* ack interrupt in Intel PHY */
218 if (!(phy & (1 << 2))) 216 if (!(phy & (1 << 2)))
219 goto done; 217 goto done;
220 } 218 }
221 else if (lp->phy_type == MII_BCM5221_ID) { 219 else if (lp->phy_type == MII_BCM5221_ID) {
222 read_phy(lp->phy_address, MII_BCMINTR_REG, &phy); /* ack interrupt in Broadcom PHY */ 220 read_phy(lp, lp->phy_address, MII_BCMINTR_REG, &phy); /* ack interrupt in Broadcom PHY */
223 if (!(phy & (1 << 0))) 221 if (!(phy & (1 << 0)))
224 goto done; 222 goto done;
225 } 223 }
226 else if (lp->phy_type == MII_KS8721_ID) { 224 else if (lp->phy_type == MII_KS8721_ID) {
227 read_phy(lp->phy_address, MII_TPISTATUS, &phy); /* ack interrupt in Micrel PHY */ 225 read_phy(lp, lp->phy_address, MII_TPISTATUS, &phy); /* ack interrupt in Micrel PHY */
228 if (!(phy & ((1 << 2) | 1))) 226 if (!(phy & ((1 << 2) | 1)))
229 goto done; 227 goto done;
230 } 228 }
231 else if (lp->phy_type == MII_T78Q21x3_ID) { /* ack interrupt in Teridian PHY */ 229 else if (lp->phy_type == MII_T78Q21x3_ID) { /* ack interrupt in Teridian PHY */
232 read_phy(lp->phy_address, MII_T78Q21INT_REG, &phy); 230 read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &phy);
233 if (!(phy & ((1 << 2) | 1))) 231 if (!(phy & ((1 << 2) | 1)))
234 goto done; 232 goto done;
235 } 233 }
236 else if (lp->phy_type == MII_DP83848_ID) { 234 else if (lp->phy_type == MII_DP83848_ID) {
237 read_phy(lp->phy_address, MII_DPPHYSTS_REG, &phy); /* ack interrupt in DP83848 PHY */ 235 read_phy(lp, lp->phy_address, MII_DPPHYSTS_REG, &phy); /* ack interrupt in DP83848 PHY */
238 if (!(phy & (1 << 7))) 236 if (!(phy & (1 << 7)))
239 goto done; 237 goto done;
240 } 238 }
@@ -242,7 +240,7 @@ static irqreturn_t at91ether_phy_interrupt(int irq, void *dev_id)
242 update_linkspeed(dev, 0); 240 update_linkspeed(dev, 0);
243 241
244done: 242done:
245 disable_mdi(); 243 disable_mdi(lp);
246 244
247 return IRQ_HANDLED; 245 return IRQ_HANDLED;
248} 246}
@@ -265,7 +263,7 @@ static void enable_phyirq(struct net_device *dev)
265 return; 263 return;
266 } 264 }
267 265
268 irq_number = lp->board_data.phy_irq_pin; 266 irq_number = gpio_to_irq(lp->board_data.phy_irq_pin);
269 status = request_irq(irq_number, at91ether_phy_interrupt, 0, dev->name, dev); 267 status = request_irq(irq_number, at91ether_phy_interrupt, 0, dev->name, dev);
270 if (status) { 268 if (status) {
271 printk(KERN_ERR "at91_ether: PHY IRQ %d request failed - status %d!\n", irq_number, status); 269 printk(KERN_ERR "at91_ether: PHY IRQ %d request failed - status %d!\n", irq_number, status);
@@ -273,41 +271,41 @@ static void enable_phyirq(struct net_device *dev)
273 } 271 }
274 272
275 spin_lock_irq(&lp->lock); 273 spin_lock_irq(&lp->lock);
276 enable_mdi(); 274 enable_mdi(lp);
277 275
278 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */ 276 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */
279 read_phy(lp->phy_address, MII_DSINTR_REG, &dsintr); 277 read_phy(lp, lp->phy_address, MII_DSINTR_REG, &dsintr);
280 dsintr = dsintr & ~0xf00; /* clear bits 8..11 */ 278 dsintr = dsintr & ~0xf00; /* clear bits 8..11 */
281 write_phy(lp->phy_address, MII_DSINTR_REG, dsintr); 279 write_phy(lp, lp->phy_address, MII_DSINTR_REG, dsintr);
282 } 280 }
283 else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */ 281 else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */
284 read_phy(lp->phy_address, MII_ISINTE_REG, &dsintr); 282 read_phy(lp, lp->phy_address, MII_ISINTE_REG, &dsintr);
285 dsintr = dsintr | 0xf2; /* set bits 1, 4..7 */ 283 dsintr = dsintr | 0xf2; /* set bits 1, 4..7 */
286 write_phy(lp->phy_address, MII_ISINTE_REG, dsintr); 284 write_phy(lp, lp->phy_address, MII_ISINTE_REG, dsintr);
287 } 285 }
288 else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */ 286 else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */
289 dsintr = (1 << 15) | ( 1 << 14); 287 dsintr = (1 << 15) | ( 1 << 14);
290 write_phy(lp->phy_address, MII_BCMINTR_REG, dsintr); 288 write_phy(lp, lp->phy_address, MII_BCMINTR_REG, dsintr);
291 } 289 }
292 else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */ 290 else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */
293 dsintr = (1 << 10) | ( 1 << 8); 291 dsintr = (1 << 10) | ( 1 << 8);
294 write_phy(lp->phy_address, MII_TPISTATUS, dsintr); 292 write_phy(lp, lp->phy_address, MII_TPISTATUS, dsintr);
295 } 293 }
296 else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */ 294 else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */
297 read_phy(lp->phy_address, MII_T78Q21INT_REG, &dsintr); 295 read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &dsintr);
298 dsintr = dsintr | 0x500; /* set bits 8, 10 */ 296 dsintr = dsintr | 0x500; /* set bits 8, 10 */
299 write_phy(lp->phy_address, MII_T78Q21INT_REG, dsintr); 297 write_phy(lp, lp->phy_address, MII_T78Q21INT_REG, dsintr);
300 } 298 }
301 else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */ 299 else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */
302 read_phy(lp->phy_address, MII_DPMISR_REG, &dsintr); 300 read_phy(lp, lp->phy_address, MII_DPMISR_REG, &dsintr);
303 dsintr = dsintr | 0x3c; /* set bits 2..5 */ 301 dsintr = dsintr | 0x3c; /* set bits 2..5 */
304 write_phy(lp->phy_address, MII_DPMISR_REG, dsintr); 302 write_phy(lp, lp->phy_address, MII_DPMISR_REG, dsintr);
305 read_phy(lp->phy_address, MII_DPMICR_REG, &dsintr); 303 read_phy(lp, lp->phy_address, MII_DPMICR_REG, &dsintr);
306 dsintr = dsintr | 0x3; /* set bits 0,1 */ 304 dsintr = dsintr | 0x3; /* set bits 0,1 */
307 write_phy(lp->phy_address, MII_DPMICR_REG, dsintr); 305 write_phy(lp, lp->phy_address, MII_DPMICR_REG, dsintr);
308 } 306 }
309 307
310 disable_mdi(); 308 disable_mdi(lp);
311 spin_unlock_irq(&lp->lock); 309 spin_unlock_irq(&lp->lock);
312} 310}
313 311
@@ -326,46 +324,46 @@ static void disable_phyirq(struct net_device *dev)
326 } 324 }
327 325
328 spin_lock_irq(&lp->lock); 326 spin_lock_irq(&lp->lock);
329 enable_mdi(); 327 enable_mdi(lp);
330 328
331 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */ 329 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { /* for Davicom PHY */
332 read_phy(lp->phy_address, MII_DSINTR_REG, &dsintr); 330 read_phy(lp, lp->phy_address, MII_DSINTR_REG, &dsintr);
333 dsintr = dsintr | 0xf00; /* set bits 8..11 */ 331 dsintr = dsintr | 0xf00; /* set bits 8..11 */
334 write_phy(lp->phy_address, MII_DSINTR_REG, dsintr); 332 write_phy(lp, lp->phy_address, MII_DSINTR_REG, dsintr);
335 } 333 }
336 else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */ 334 else if (lp->phy_type == MII_LXT971A_ID) { /* for Intel PHY */
337 read_phy(lp->phy_address, MII_ISINTE_REG, &dsintr); 335 read_phy(lp, lp->phy_address, MII_ISINTE_REG, &dsintr);
338 dsintr = dsintr & ~0xf2; /* clear bits 1, 4..7 */ 336 dsintr = dsintr & ~0xf2; /* clear bits 1, 4..7 */
339 write_phy(lp->phy_address, MII_ISINTE_REG, dsintr); 337 write_phy(lp, lp->phy_address, MII_ISINTE_REG, dsintr);
340 } 338 }
341 else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */ 339 else if (lp->phy_type == MII_BCM5221_ID) { /* for Broadcom PHY */
342 read_phy(lp->phy_address, MII_BCMINTR_REG, &dsintr); 340 read_phy(lp, lp->phy_address, MII_BCMINTR_REG, &dsintr);
343 dsintr = ~(1 << 14); 341 dsintr = ~(1 << 14);
344 write_phy(lp->phy_address, MII_BCMINTR_REG, dsintr); 342 write_phy(lp, lp->phy_address, MII_BCMINTR_REG, dsintr);
345 } 343 }
346 else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */ 344 else if (lp->phy_type == MII_KS8721_ID) { /* for Micrel PHY */
347 read_phy(lp->phy_address, MII_TPISTATUS, &dsintr); 345 read_phy(lp, lp->phy_address, MII_TPISTATUS, &dsintr);
348 dsintr = ~((1 << 10) | (1 << 8)); 346 dsintr = ~((1 << 10) | (1 << 8));
349 write_phy(lp->phy_address, MII_TPISTATUS, dsintr); 347 write_phy(lp, lp->phy_address, MII_TPISTATUS, dsintr);
350 } 348 }
351 else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */ 349 else if (lp->phy_type == MII_T78Q21x3_ID) { /* for Teridian PHY */
352 read_phy(lp->phy_address, MII_T78Q21INT_REG, &dsintr); 350 read_phy(lp, lp->phy_address, MII_T78Q21INT_REG, &dsintr);
353 dsintr = dsintr & ~0x500; /* clear bits 8, 10 */ 351 dsintr = dsintr & ~0x500; /* clear bits 8, 10 */
354 write_phy(lp->phy_address, MII_T78Q21INT_REG, dsintr); 352 write_phy(lp, lp->phy_address, MII_T78Q21INT_REG, dsintr);
355 } 353 }
356 else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */ 354 else if (lp->phy_type == MII_DP83848_ID) { /* National Semiconductor DP83848 PHY */
357 read_phy(lp->phy_address, MII_DPMICR_REG, &dsintr); 355 read_phy(lp, lp->phy_address, MII_DPMICR_REG, &dsintr);
358 dsintr = dsintr & ~0x3; /* clear bits 0, 1 */ 356 dsintr = dsintr & ~0x3; /* clear bits 0, 1 */
359 write_phy(lp->phy_address, MII_DPMICR_REG, dsintr); 357 write_phy(lp, lp->phy_address, MII_DPMICR_REG, dsintr);
360 read_phy(lp->phy_address, MII_DPMISR_REG, &dsintr); 358 read_phy(lp, lp->phy_address, MII_DPMISR_REG, &dsintr);
361 dsintr = dsintr & ~0x3c; /* clear bits 2..5 */ 359 dsintr = dsintr & ~0x3c; /* clear bits 2..5 */
362 write_phy(lp->phy_address, MII_DPMISR_REG, dsintr); 360 write_phy(lp, lp->phy_address, MII_DPMISR_REG, dsintr);
363 } 361 }
364 362
365 disable_mdi(); 363 disable_mdi(lp);
366 spin_unlock_irq(&lp->lock); 364 spin_unlock_irq(&lp->lock);
367 365
368 irq_number = lp->board_data.phy_irq_pin; 366 irq_number = gpio_to_irq(lp->board_data.phy_irq_pin);
369 free_irq(irq_number, dev); /* Free interrupt handler */ 367 free_irq(irq_number, dev); /* Free interrupt handler */
370} 368}
371 369
@@ -379,17 +377,17 @@ static void reset_phy(struct net_device *dev)
379 unsigned int bmcr; 377 unsigned int bmcr;
380 378
381 spin_lock_irq(&lp->lock); 379 spin_lock_irq(&lp->lock);
382 enable_mdi(); 380 enable_mdi(lp);
383 381
384 /* Perform PHY reset */ 382 /* Perform PHY reset */
385 write_phy(lp->phy_address, MII_BMCR, BMCR_RESET); 383 write_phy(lp, lp->phy_address, MII_BMCR, BMCR_RESET);
386 384
387 /* Wait until PHY reset is complete */ 385 /* Wait until PHY reset is complete */
388 do { 386 do {
389 read_phy(lp->phy_address, MII_BMCR, &bmcr); 387 read_phy(lp, lp->phy_address, MII_BMCR, &bmcr);
390 } while (!(bmcr & BMCR_RESET)); 388 } while (!(bmcr & BMCR_RESET));
391 389
392 disable_mdi(); 390 disable_mdi(lp);
393 spin_unlock_irq(&lp->lock); 391 spin_unlock_irq(&lp->lock);
394} 392}
395#endif 393#endif
@@ -399,13 +397,37 @@ static void at91ether_check_link(unsigned long dev_id)
399 struct net_device *dev = (struct net_device *) dev_id; 397 struct net_device *dev = (struct net_device *) dev_id;
400 struct at91_private *lp = netdev_priv(dev); 398 struct at91_private *lp = netdev_priv(dev);
401 399
402 enable_mdi(); 400 enable_mdi(lp);
403 update_linkspeed(dev, 1); 401 update_linkspeed(dev, 1);
404 disable_mdi(); 402 disable_mdi(lp);
405 403
406 mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL); 404 mod_timer(&lp->check_timer, jiffies + LINK_POLL_INTERVAL);
407} 405}
408 406
407/*
408 * Perform any PHY-specific initialization.
409 */
410static void __init initialize_phy(struct at91_private *lp)
411{
412 unsigned int val;
413
414 spin_lock_irq(&lp->lock);
415 enable_mdi(lp);
416
417 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) {
418 read_phy(lp, lp->phy_address, MII_DSCR_REG, &val);
419 if ((val & (1 << 10)) == 0) /* DSCR bit 10 is 0 -- fiber mode */
420 lp->phy_media = PORT_FIBRE;
421 } else if (machine_is_csb337()) {
422 /* mix link activity status into LED2 link state */
423 write_phy(lp, lp->phy_address, MII_LEDCTRL_REG, 0x0d22);
424 } else if (machine_is_ecbat91())
425 write_phy(lp, lp->phy_address, MII_LEDCTRL_REG, 0x156A);
426
427 disable_mdi(lp);
428 spin_unlock_irq(&lp->lock);
429}
430
409/* ......................... ADDRESS MANAGEMENT ........................ */ 431/* ......................... ADDRESS MANAGEMENT ........................ */
410 432
411/* 433/*
@@ -454,17 +476,19 @@ static short __init unpack_mac_address(struct net_device *dev, unsigned int hi,
454 */ 476 */
455static void __init get_mac_address(struct net_device *dev) 477static void __init get_mac_address(struct net_device *dev)
456{ 478{
479 struct at91_private *lp = netdev_priv(dev);
480
457 /* Check Specific-Address 1 */ 481 /* Check Specific-Address 1 */
458 if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA1H), at91_emac_read(AT91_EMAC_SA1L))) 482 if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA1H), at91_emac_read(lp, AT91_EMAC_SA1L)))
459 return; 483 return;
460 /* Check Specific-Address 2 */ 484 /* Check Specific-Address 2 */
461 if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA2H), at91_emac_read(AT91_EMAC_SA2L))) 485 if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA2H), at91_emac_read(lp, AT91_EMAC_SA2L)))
462 return; 486 return;
463 /* Check Specific-Address 3 */ 487 /* Check Specific-Address 3 */
464 if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA3H), at91_emac_read(AT91_EMAC_SA3L))) 488 if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA3H), at91_emac_read(lp, AT91_EMAC_SA3L)))
465 return; 489 return;
466 /* Check Specific-Address 4 */ 490 /* Check Specific-Address 4 */
467 if (unpack_mac_address(dev, at91_emac_read(AT91_EMAC_SA4H), at91_emac_read(AT91_EMAC_SA4L))) 491 if (unpack_mac_address(dev, at91_emac_read(lp, AT91_EMAC_SA4H), at91_emac_read(lp, AT91_EMAC_SA4L)))
468 return; 492 return;
469 493
470 printk(KERN_ERR "at91_ether: Your bootloader did not configure a MAC address.\n"); 494 printk(KERN_ERR "at91_ether: Your bootloader did not configure a MAC address.\n");
@@ -475,11 +499,13 @@ static void __init get_mac_address(struct net_device *dev)
475 */ 499 */
476static void update_mac_address(struct net_device *dev) 500static void update_mac_address(struct net_device *dev)
477{ 501{
478 at91_emac_write(AT91_EMAC_SA1L, (dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) | (dev->dev_addr[1] << 8) | (dev->dev_addr[0])); 502 struct at91_private *lp = netdev_priv(dev);
479 at91_emac_write(AT91_EMAC_SA1H, (dev->dev_addr[5] << 8) | (dev->dev_addr[4]));
480 503
481 at91_emac_write(AT91_EMAC_SA2L, 0); 504 at91_emac_write(lp, AT91_EMAC_SA1L, (dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) | (dev->dev_addr[1] << 8) | (dev->dev_addr[0]));
482 at91_emac_write(AT91_EMAC_SA2H, 0); 505 at91_emac_write(lp, AT91_EMAC_SA1H, (dev->dev_addr[5] << 8) | (dev->dev_addr[4]));
506
507 at91_emac_write(lp, AT91_EMAC_SA2L, 0);
508 at91_emac_write(lp, AT91_EMAC_SA2H, 0);
483} 509}
484 510
485/* 511/*
@@ -559,6 +585,7 @@ static int hash_get_index(__u8 *addr)
559 */ 585 */
560static void at91ether_sethashtable(struct net_device *dev) 586static void at91ether_sethashtable(struct net_device *dev)
561{ 587{
588 struct at91_private *lp = netdev_priv(dev);
562 struct netdev_hw_addr *ha; 589 struct netdev_hw_addr *ha;
563 unsigned long mc_filter[2]; 590 unsigned long mc_filter[2];
564 unsigned int bitnr; 591 unsigned int bitnr;
@@ -570,8 +597,8 @@ static void at91ether_sethashtable(struct net_device *dev)
570 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); 597 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
571 } 598 }
572 599
573 at91_emac_write(AT91_EMAC_HSL, mc_filter[0]); 600 at91_emac_write(lp, AT91_EMAC_HSL, mc_filter[0]);
574 at91_emac_write(AT91_EMAC_HSH, mc_filter[1]); 601 at91_emac_write(lp, AT91_EMAC_HSH, mc_filter[1]);
575} 602}
576 603
577/* 604/*
@@ -579,9 +606,10 @@ static void at91ether_sethashtable(struct net_device *dev)
579 */ 606 */
580static void at91ether_set_multicast_list(struct net_device *dev) 607static void at91ether_set_multicast_list(struct net_device *dev)
581{ 608{
609 struct at91_private *lp = netdev_priv(dev);
582 unsigned long cfg; 610 unsigned long cfg;
583 611
584 cfg = at91_emac_read(AT91_EMAC_CFG); 612 cfg = at91_emac_read(lp, AT91_EMAC_CFG);
585 613
586 if (dev->flags & IFF_PROMISC) /* Enable promiscuous mode */ 614 if (dev->flags & IFF_PROMISC) /* Enable promiscuous mode */
587 cfg |= AT91_EMAC_CAF; 615 cfg |= AT91_EMAC_CAF;
@@ -589,34 +617,37 @@ static void at91ether_set_multicast_list(struct net_device *dev)
589 cfg &= ~AT91_EMAC_CAF; 617 cfg &= ~AT91_EMAC_CAF;
590 618
591 if (dev->flags & IFF_ALLMULTI) { /* Enable all multicast mode */ 619 if (dev->flags & IFF_ALLMULTI) { /* Enable all multicast mode */
592 at91_emac_write(AT91_EMAC_HSH, -1); 620 at91_emac_write(lp, AT91_EMAC_HSH, -1);
593 at91_emac_write(AT91_EMAC_HSL, -1); 621 at91_emac_write(lp, AT91_EMAC_HSL, -1);
594 cfg |= AT91_EMAC_MTI; 622 cfg |= AT91_EMAC_MTI;
595 } else if (!netdev_mc_empty(dev)) { /* Enable specific multicasts */ 623 } else if (!netdev_mc_empty(dev)) { /* Enable specific multicasts */
596 at91ether_sethashtable(dev); 624 at91ether_sethashtable(dev);
597 cfg |= AT91_EMAC_MTI; 625 cfg |= AT91_EMAC_MTI;
598 } else if (dev->flags & (~IFF_ALLMULTI)) { /* Disable all multicast mode */ 626 } else if (dev->flags & (~IFF_ALLMULTI)) { /* Disable all multicast mode */
599 at91_emac_write(AT91_EMAC_HSH, 0); 627 at91_emac_write(lp, AT91_EMAC_HSH, 0);
600 at91_emac_write(AT91_EMAC_HSL, 0); 628 at91_emac_write(lp, AT91_EMAC_HSL, 0);
601 cfg &= ~AT91_EMAC_MTI; 629 cfg &= ~AT91_EMAC_MTI;
602 } 630 }
603 631
604 at91_emac_write(AT91_EMAC_CFG, cfg); 632 at91_emac_write(lp, AT91_EMAC_CFG, cfg);
605} 633}
606 634
607/* ......................... ETHTOOL SUPPORT ........................... */ 635/* ......................... ETHTOOL SUPPORT ........................... */
608 636
609static int mdio_read(struct net_device *dev, int phy_id, int location) 637static int mdio_read(struct net_device *dev, int phy_id, int location)
610{ 638{
639 struct at91_private *lp = netdev_priv(dev);
611 unsigned int value; 640 unsigned int value;
612 641
613 read_phy(phy_id, location, &value); 642 read_phy(lp, phy_id, location, &value);
614 return value; 643 return value;
615} 644}
616 645
617static void mdio_write(struct net_device *dev, int phy_id, int location, int value) 646static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
618{ 647{
619 write_phy(phy_id, location, value); 648 struct at91_private *lp = netdev_priv(dev);
649
650 write_phy(lp, phy_id, location, value);
620} 651}
621 652
622static int at91ether_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 653static int at91ether_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -625,11 +656,11 @@ static int at91ether_get_settings(struct net_device *dev, struct ethtool_cmd *cm
625 int ret; 656 int ret;
626 657
627 spin_lock_irq(&lp->lock); 658 spin_lock_irq(&lp->lock);
628 enable_mdi(); 659 enable_mdi(lp);
629 660
630 ret = mii_ethtool_gset(&lp->mii, cmd); 661 ret = mii_ethtool_gset(&lp->mii, cmd);
631 662
632 disable_mdi(); 663 disable_mdi(lp);
633 spin_unlock_irq(&lp->lock); 664 spin_unlock_irq(&lp->lock);
634 665
635 if (lp->phy_media == PORT_FIBRE) { /* override media type since mii.c doesn't know */ 666 if (lp->phy_media == PORT_FIBRE) { /* override media type since mii.c doesn't know */
@@ -646,11 +677,11 @@ static int at91ether_set_settings(struct net_device *dev, struct ethtool_cmd *cm
646 int ret; 677 int ret;
647 678
648 spin_lock_irq(&lp->lock); 679 spin_lock_irq(&lp->lock);
649 enable_mdi(); 680 enable_mdi(lp);
650 681
651 ret = mii_ethtool_sset(&lp->mii, cmd); 682 ret = mii_ethtool_sset(&lp->mii, cmd);
652 683
653 disable_mdi(); 684 disable_mdi(lp);
654 spin_unlock_irq(&lp->lock); 685 spin_unlock_irq(&lp->lock);
655 686
656 return ret; 687 return ret;
@@ -662,11 +693,11 @@ static int at91ether_nwayreset(struct net_device *dev)
662 int ret; 693 int ret;
663 694
664 spin_lock_irq(&lp->lock); 695 spin_lock_irq(&lp->lock);
665 enable_mdi(); 696 enable_mdi(lp);
666 697
667 ret = mii_nway_restart(&lp->mii); 698 ret = mii_nway_restart(&lp->mii);
668 699
669 disable_mdi(); 700 disable_mdi(lp);
670 spin_unlock_irq(&lp->lock); 701 spin_unlock_irq(&lp->lock);
671 702
672 return ret; 703 return ret;
@@ -696,9 +727,9 @@ static int at91ether_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
696 return -EINVAL; 727 return -EINVAL;
697 728
698 spin_lock_irq(&lp->lock); 729 spin_lock_irq(&lp->lock);
699 enable_mdi(); 730 enable_mdi(lp);
700 res = generic_mii_ioctl(&lp->mii, if_mii(rq), cmd, NULL); 731 res = generic_mii_ioctl(&lp->mii, if_mii(rq), cmd, NULL);
701 disable_mdi(); 732 disable_mdi(lp);
702 spin_unlock_irq(&lp->lock); 733 spin_unlock_irq(&lp->lock);
703 734
704 return res; 735 return res;
@@ -731,11 +762,11 @@ static void at91ether_start(struct net_device *dev)
731 lp->rxBuffIndex = 0; 762 lp->rxBuffIndex = 0;
732 763
733 /* Program address of descriptor list in Rx Buffer Queue register */ 764 /* Program address of descriptor list in Rx Buffer Queue register */
734 at91_emac_write(AT91_EMAC_RBQP, (unsigned long) dlist_phys); 765 at91_emac_write(lp, AT91_EMAC_RBQP, (unsigned long) dlist_phys);
735 766
736 /* Enable Receive and Transmit */ 767 /* Enable Receive and Transmit */
737 ctl = at91_emac_read(AT91_EMAC_CTL); 768 ctl = at91_emac_read(lp, AT91_EMAC_CTL);
738 at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_RE | AT91_EMAC_TE); 769 at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_RE | AT91_EMAC_TE);
739} 770}
740 771
741/* 772/*
@@ -752,8 +783,8 @@ static int at91ether_open(struct net_device *dev)
752 clk_enable(lp->ether_clk); /* Re-enable Peripheral clock */ 783 clk_enable(lp->ether_clk); /* Re-enable Peripheral clock */
753 784
754 /* Clear internal statistics */ 785 /* Clear internal statistics */
755 ctl = at91_emac_read(AT91_EMAC_CTL); 786 ctl = at91_emac_read(lp, AT91_EMAC_CTL);
756 at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_CSR); 787 at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_CSR);
757 788
758 /* Update the MAC address (incase user has changed it) */ 789 /* Update the MAC address (incase user has changed it) */
759 update_mac_address(dev); 790 update_mac_address(dev);
@@ -762,15 +793,15 @@ static int at91ether_open(struct net_device *dev)
762 enable_phyirq(dev); 793 enable_phyirq(dev);
763 794
764 /* Enable MAC interrupts */ 795 /* Enable MAC interrupts */
765 at91_emac_write(AT91_EMAC_IER, AT91_EMAC_RCOM | AT91_EMAC_RBNA 796 at91_emac_write(lp, AT91_EMAC_IER, AT91_EMAC_RCOM | AT91_EMAC_RBNA
766 | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM 797 | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM
767 | AT91_EMAC_ROVR | AT91_EMAC_ABT); 798 | AT91_EMAC_ROVR | AT91_EMAC_ABT);
768 799
769 /* Determine current link speed */ 800 /* Determine current link speed */
770 spin_lock_irq(&lp->lock); 801 spin_lock_irq(&lp->lock);
771 enable_mdi(); 802 enable_mdi(lp);
772 update_linkspeed(dev, 0); 803 update_linkspeed(dev, 0);
773 disable_mdi(); 804 disable_mdi(lp);
774 spin_unlock_irq(&lp->lock); 805 spin_unlock_irq(&lp->lock);
775 806
776 at91ether_start(dev); 807 at91ether_start(dev);
@@ -787,14 +818,14 @@ static int at91ether_close(struct net_device *dev)
787 unsigned long ctl; 818 unsigned long ctl;
788 819
789 /* Disable Receiver and Transmitter */ 820 /* Disable Receiver and Transmitter */
790 ctl = at91_emac_read(AT91_EMAC_CTL); 821 ctl = at91_emac_read(lp, AT91_EMAC_CTL);
791 at91_emac_write(AT91_EMAC_CTL, ctl & ~(AT91_EMAC_TE | AT91_EMAC_RE)); 822 at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~(AT91_EMAC_TE | AT91_EMAC_RE));
792 823
793 /* Disable PHY interrupt */ 824 /* Disable PHY interrupt */
794 disable_phyirq(dev); 825 disable_phyirq(dev);
795 826
796 /* Disable MAC interrupts */ 827 /* Disable MAC interrupts */
797 at91_emac_write(AT91_EMAC_IDR, AT91_EMAC_RCOM | AT91_EMAC_RBNA 828 at91_emac_write(lp, AT91_EMAC_IDR, AT91_EMAC_RCOM | AT91_EMAC_RBNA
798 | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM 829 | AT91_EMAC_TUND | AT91_EMAC_RTRY | AT91_EMAC_TCOM
799 | AT91_EMAC_ROVR | AT91_EMAC_ABT); 830 | AT91_EMAC_ROVR | AT91_EMAC_ABT);
800 831
@@ -812,7 +843,7 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
812{ 843{
813 struct at91_private *lp = netdev_priv(dev); 844 struct at91_private *lp = netdev_priv(dev);
814 845
815 if (at91_emac_read(AT91_EMAC_TSR) & AT91_EMAC_TSR_BNQ) { 846 if (at91_emac_read(lp, AT91_EMAC_TSR) & AT91_EMAC_TSR_BNQ) {
816 netif_stop_queue(dev); 847 netif_stop_queue(dev);
817 848
818 /* Store packet information (to free when Tx completed) */ 849 /* Store packet information (to free when Tx completed) */
@@ -822,9 +853,9 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
822 dev->stats.tx_bytes += skb->len; 853 dev->stats.tx_bytes += skb->len;
823 854
824 /* Set address of the data in the Transmit Address register */ 855 /* Set address of the data in the Transmit Address register */
825 at91_emac_write(AT91_EMAC_TAR, lp->skb_physaddr); 856 at91_emac_write(lp, AT91_EMAC_TAR, lp->skb_physaddr);
826 /* Set length of the packet in the Transmit Control register */ 857 /* Set length of the packet in the Transmit Control register */
827 at91_emac_write(AT91_EMAC_TCR, skb->len); 858 at91_emac_write(lp, AT91_EMAC_TCR, skb->len);
828 859
829 } else { 860 } else {
830 printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n"); 861 printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n");
@@ -841,31 +872,32 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
841 */ 872 */
842static struct net_device_stats *at91ether_stats(struct net_device *dev) 873static struct net_device_stats *at91ether_stats(struct net_device *dev)
843{ 874{
875 struct at91_private *lp = netdev_priv(dev);
844 int ale, lenerr, seqe, lcol, ecol; 876 int ale, lenerr, seqe, lcol, ecol;
845 877
846 if (netif_running(dev)) { 878 if (netif_running(dev)) {
847 dev->stats.rx_packets += at91_emac_read(AT91_EMAC_OK); /* Good frames received */ 879 dev->stats.rx_packets += at91_emac_read(lp, AT91_EMAC_OK); /* Good frames received */
848 ale = at91_emac_read(AT91_EMAC_ALE); 880 ale = at91_emac_read(lp, AT91_EMAC_ALE);
849 dev->stats.rx_frame_errors += ale; /* Alignment errors */ 881 dev->stats.rx_frame_errors += ale; /* Alignment errors */
850 lenerr = at91_emac_read(AT91_EMAC_ELR) + at91_emac_read(AT91_EMAC_USF); 882 lenerr = at91_emac_read(lp, AT91_EMAC_ELR) + at91_emac_read(lp, AT91_EMAC_USF);
851 dev->stats.rx_length_errors += lenerr; /* Excessive Length or Undersize Frame error */ 883 dev->stats.rx_length_errors += lenerr; /* Excessive Length or Undersize Frame error */
852 seqe = at91_emac_read(AT91_EMAC_SEQE); 884 seqe = at91_emac_read(lp, AT91_EMAC_SEQE);
853 dev->stats.rx_crc_errors += seqe; /* CRC error */ 885 dev->stats.rx_crc_errors += seqe; /* CRC error */
854 dev->stats.rx_fifo_errors += at91_emac_read(AT91_EMAC_DRFC); /* Receive buffer not available */ 886 dev->stats.rx_fifo_errors += at91_emac_read(lp, AT91_EMAC_DRFC);/* Receive buffer not available */
855 dev->stats.rx_errors += (ale + lenerr + seqe 887 dev->stats.rx_errors += (ale + lenerr + seqe
856 + at91_emac_read(AT91_EMAC_CDE) + at91_emac_read(AT91_EMAC_RJB)); 888 + at91_emac_read(lp, AT91_EMAC_CDE) + at91_emac_read(lp, AT91_EMAC_RJB));
857 889
858 dev->stats.tx_packets += at91_emac_read(AT91_EMAC_FRA); /* Frames successfully transmitted */ 890 dev->stats.tx_packets += at91_emac_read(lp, AT91_EMAC_FRA); /* Frames successfully transmitted */
859 dev->stats.tx_fifo_errors += at91_emac_read(AT91_EMAC_TUE); /* Transmit FIFO underruns */ 891 dev->stats.tx_fifo_errors += at91_emac_read(lp, AT91_EMAC_TUE); /* Transmit FIFO underruns */
860 dev->stats.tx_carrier_errors += at91_emac_read(AT91_EMAC_CSE); /* Carrier Sense errors */ 892 dev->stats.tx_carrier_errors += at91_emac_read(lp, AT91_EMAC_CSE); /* Carrier Sense errors */
861 dev->stats.tx_heartbeat_errors += at91_emac_read(AT91_EMAC_SQEE);/* Heartbeat error */ 893 dev->stats.tx_heartbeat_errors += at91_emac_read(lp, AT91_EMAC_SQEE);/* Heartbeat error */
862 894
863 lcol = at91_emac_read(AT91_EMAC_LCOL); 895 lcol = at91_emac_read(lp, AT91_EMAC_LCOL);
864 ecol = at91_emac_read(AT91_EMAC_ECOL); 896 ecol = at91_emac_read(lp, AT91_EMAC_ECOL);
865 dev->stats.tx_window_errors += lcol; /* Late collisions */ 897 dev->stats.tx_window_errors += lcol; /* Late collisions */
866 dev->stats.tx_aborted_errors += ecol; /* 16 collisions */ 898 dev->stats.tx_aborted_errors += ecol; /* 16 collisions */
867 899
868 dev->stats.collisions += (at91_emac_read(AT91_EMAC_SCOL) + at91_emac_read(AT91_EMAC_MCOL) + lcol + ecol); 900 dev->stats.collisions += (at91_emac_read(lp, AT91_EMAC_SCOL) + at91_emac_read(lp, AT91_EMAC_MCOL) + lcol + ecol);
869 } 901 }
870 return &dev->stats; 902 return &dev->stats;
871} 903}
@@ -922,7 +954,7 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
922 954
923 /* MAC Interrupt Status register indicates what interrupts are pending. 955 /* MAC Interrupt Status register indicates what interrupts are pending.
924 It is automatically cleared once read. */ 956 It is automatically cleared once read. */
925 intstatus = at91_emac_read(AT91_EMAC_ISR); 957 intstatus = at91_emac_read(lp, AT91_EMAC_ISR);
926 958
927 if (intstatus & AT91_EMAC_RCOM) /* Receive complete */ 959 if (intstatus & AT91_EMAC_RCOM) /* Receive complete */
928 at91ether_rx(dev); 960 at91ether_rx(dev);
@@ -942,9 +974,9 @@ static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
942 974
943 /* Work-around for Errata #11 */ 975 /* Work-around for Errata #11 */
944 if (intstatus & AT91_EMAC_RBNA) { 976 if (intstatus & AT91_EMAC_RBNA) {
945 ctl = at91_emac_read(AT91_EMAC_CTL); 977 ctl = at91_emac_read(lp, AT91_EMAC_CTL);
946 at91_emac_write(AT91_EMAC_CTL, ctl & ~AT91_EMAC_RE); 978 at91_emac_write(lp, AT91_EMAC_CTL, ctl & ~AT91_EMAC_RE);
947 at91_emac_write(AT91_EMAC_CTL, ctl | AT91_EMAC_RE); 979 at91_emac_write(lp, AT91_EMAC_CTL, ctl | AT91_EMAC_RE);
948 } 980 }
949 981
950 if (intstatus & AT91_EMAC_ROVR) 982 if (intstatus & AT91_EMAC_ROVR)
@@ -980,189 +1012,199 @@ static const struct net_device_ops at91ether_netdev_ops = {
980}; 1012};
981 1013
982/* 1014/*
983 * Initialize the ethernet interface 1015 * Detect the PHY type, and its address.
984 */ 1016 */
985static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_address, 1017static int __init at91ether_phy_detect(struct at91_private *lp)
986 struct platform_device *pdev, struct clk *ether_clk) 1018{
1019 unsigned int phyid1, phyid2;
1020 unsigned long phy_id;
1021 unsigned short phy_address = 0;
1022
1023 while (phy_address < PHY_MAX_ADDR) {
1024 /* Read the PHY ID registers */
1025 enable_mdi(lp);
1026 read_phy(lp, phy_address, MII_PHYSID1, &phyid1);
1027 read_phy(lp, phy_address, MII_PHYSID2, &phyid2);
1028 disable_mdi(lp);
1029
1030 phy_id = (phyid1 << 16) | (phyid2 & 0xfff0);
1031 switch (phy_id) {
1032 case MII_DM9161_ID: /* Davicom 9161: PHY_ID1 = 0x181, PHY_ID2 = B881 */
1033 case MII_DM9161A_ID: /* Davicom 9161A: PHY_ID1 = 0x181, PHY_ID2 = B8A0 */
1034 case MII_LXT971A_ID: /* Intel LXT971A: PHY_ID1 = 0x13, PHY_ID2 = 78E0 */
1035 case MII_RTL8201_ID: /* Realtek RTL8201: PHY_ID1 = 0, PHY_ID2 = 0x8201 */
1036 case MII_BCM5221_ID: /* Broadcom BCM5221: PHY_ID1 = 0x40, PHY_ID2 = 0x61e0 */
1037 case MII_DP83847_ID: /* National Semiconductor DP83847: */
1038 case MII_DP83848_ID: /* National Semiconductor DP83848: */
1039 case MII_AC101L_ID: /* Altima AC101L: PHY_ID1 = 0x22, PHY_ID2 = 0x5520 */
1040 case MII_KS8721_ID: /* Micrel KS8721: PHY_ID1 = 0x22, PHY_ID2 = 0x1610 */
1041 case MII_T78Q21x3_ID: /* Teridian 78Q21x3: PHY_ID1 = 0x0E, PHY_ID2 = 7237 */
1042 case MII_LAN83C185_ID: /* SMSC LAN83C185: PHY_ID1 = 0x0007, PHY_ID2 = 0xC0A1 */
1043 /* store detected values */
1044 lp->phy_type = phy_id; /* Type of PHY connected */
1045 lp->phy_address = phy_address; /* MDI address of PHY */
1046 return 1;
1047 }
1048
1049 phy_address++;
1050 }
1051
1052 return 0; /* not detected */
1053}
1054
1055
1056/*
1057 * Detect MAC & PHY and perform ethernet interface initialization
1058 */
1059static int __init at91ether_probe(struct platform_device *pdev)
987{ 1060{
988 struct macb_platform_data *board_data = pdev->dev.platform_data; 1061 struct macb_platform_data *board_data = pdev->dev.platform_data;
1062 struct resource *regs;
989 struct net_device *dev; 1063 struct net_device *dev;
990 struct at91_private *lp; 1064 struct at91_private *lp;
991 unsigned int val;
992 int res; 1065 int res;
993 1066
1067 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1068 if (!regs)
1069 return -ENOENT;
1070
994 dev = alloc_etherdev(sizeof(struct at91_private)); 1071 dev = alloc_etherdev(sizeof(struct at91_private));
995 if (!dev) 1072 if (!dev)
996 return -ENOMEM; 1073 return -ENOMEM;
997 1074
998 dev->base_addr = AT91_VA_BASE_EMAC; 1075 lp = netdev_priv(dev);
999 dev->irq = AT91RM9200_ID_EMAC; 1076 lp->board_data = *board_data;
1077 spin_lock_init(&lp->lock);
1078
1079 dev->base_addr = regs->start; /* physical base address */
1080 lp->emac_base = ioremap(regs->start, regs->end - regs->start + 1);
1081 if (!lp->emac_base) {
1082 res = -ENOMEM;
1083 goto err_free_dev;
1084 }
1085
1086 /* Clock */
1087 lp->ether_clk = clk_get(&pdev->dev, "ether_clk");
1088 if (IS_ERR(lp->ether_clk)) {
1089 res = -ENODEV;
1090 goto err_ioumap;
1091 }
1092 clk_enable(lp->ether_clk);
1000 1093
1001 /* Install the interrupt handler */ 1094 /* Install the interrupt handler */
1095 dev->irq = platform_get_irq(pdev, 0);
1002 if (request_irq(dev->irq, at91ether_interrupt, 0, dev->name, dev)) { 1096 if (request_irq(dev->irq, at91ether_interrupt, 0, dev->name, dev)) {
1003 free_netdev(dev); 1097 res = -EBUSY;
1004 return -EBUSY; 1098 goto err_disable_clock;
1005 } 1099 }
1006 1100
1007 /* Allocate memory for DMA Receive descriptors */ 1101 /* Allocate memory for DMA Receive descriptors */
1008 lp = netdev_priv(dev);
1009 lp->dlist = (struct recv_desc_bufs *) dma_alloc_coherent(NULL, sizeof(struct recv_desc_bufs), (dma_addr_t *) &lp->dlist_phys, GFP_KERNEL); 1102 lp->dlist = (struct recv_desc_bufs *) dma_alloc_coherent(NULL, sizeof(struct recv_desc_bufs), (dma_addr_t *) &lp->dlist_phys, GFP_KERNEL);
1010 if (lp->dlist == NULL) { 1103 if (lp->dlist == NULL) {
1011 free_irq(dev->irq, dev); 1104 res = -ENOMEM;
1012 free_netdev(dev); 1105 goto err_free_irq;
1013 return -ENOMEM;
1014 } 1106 }
1015 lp->board_data = *board_data;
1016 lp->ether_clk = ether_clk;
1017 platform_set_drvdata(pdev, dev);
1018
1019 spin_lock_init(&lp->lock);
1020 1107
1021 ether_setup(dev); 1108 ether_setup(dev);
1022 dev->netdev_ops = &at91ether_netdev_ops; 1109 dev->netdev_ops = &at91ether_netdev_ops;
1023 dev->ethtool_ops = &at91ether_ethtool_ops; 1110 dev->ethtool_ops = &at91ether_ethtool_ops;
1024 1111 platform_set_drvdata(pdev, dev);
1025 SET_NETDEV_DEV(dev, &pdev->dev); 1112 SET_NETDEV_DEV(dev, &pdev->dev);
1026 1113
1027 get_mac_address(dev); /* Get ethernet address and store it in dev->dev_addr */ 1114 get_mac_address(dev); /* Get ethernet address and store it in dev->dev_addr */
1028 update_mac_address(dev); /* Program ethernet address into MAC */ 1115 update_mac_address(dev); /* Program ethernet address into MAC */
1029 1116
1030 at91_emac_write(AT91_EMAC_CTL, 0); 1117 at91_emac_write(lp, AT91_EMAC_CTL, 0);
1031 1118
1032 if (lp->board_data.is_rmii) 1119 if (board_data->is_rmii)
1033 at91_emac_write(AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG | AT91_EMAC_RMII); 1120 at91_emac_write(lp, AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG | AT91_EMAC_RMII);
1034 else 1121 else
1035 at91_emac_write(AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG); 1122 at91_emac_write(lp, AT91_EMAC_CFG, AT91_EMAC_CLK_DIV32 | AT91_EMAC_BIG);
1036 1123
1037 /* Perform PHY-specific initialization */ 1124 /* Detect PHY */
1038 spin_lock_irq(&lp->lock); 1125 if (!at91ether_phy_detect(lp)) {
1039 enable_mdi(); 1126 printk(KERN_ERR "at91_ether: Could not detect ethernet PHY\n");
1040 if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) { 1127 res = -ENODEV;
1041 read_phy(phy_address, MII_DSCR_REG, &val); 1128 goto err_free_dmamem;
1042 if ((val & (1 << 10)) == 0) /* DSCR bit 10 is 0 -- fiber mode */ 1129 }
1043 lp->phy_media = PORT_FIBRE;
1044 } else if (machine_is_csb337()) {
1045 /* mix link activity status into LED2 link state */
1046 write_phy(phy_address, MII_LEDCTRL_REG, 0x0d22);
1047 } else if (machine_is_ecbat91())
1048 write_phy(phy_address, MII_LEDCTRL_REG, 0x156A);
1049 1130
1050 disable_mdi(); 1131 initialize_phy(lp);
1051 spin_unlock_irq(&lp->lock);
1052 1132
1053 lp->mii.dev = dev; /* Support for ethtool */ 1133 lp->mii.dev = dev; /* Support for ethtool */
1054 lp->mii.mdio_read = mdio_read; 1134 lp->mii.mdio_read = mdio_read;
1055 lp->mii.mdio_write = mdio_write; 1135 lp->mii.mdio_write = mdio_write;
1056 lp->mii.phy_id = phy_address; 1136 lp->mii.phy_id = lp->phy_address;
1057 lp->mii.phy_id_mask = 0x1f; 1137 lp->mii.phy_id_mask = 0x1f;
1058 lp->mii.reg_num_mask = 0x1f; 1138 lp->mii.reg_num_mask = 0x1f;
1059 1139
1060 lp->phy_type = phy_type; /* Type of PHY connected */
1061 lp->phy_address = phy_address; /* MDI address of PHY */
1062
1063 /* Register the network interface */ 1140 /* Register the network interface */
1064 res = register_netdev(dev); 1141 res = register_netdev(dev);
1065 if (res) { 1142 if (res)
1066 free_irq(dev->irq, dev); 1143 goto err_free_dmamem;
1067 free_netdev(dev);
1068 dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
1069 return res;
1070 }
1071 1144
1072 /* Determine current link speed */ 1145 /* Determine current link speed */
1073 spin_lock_irq(&lp->lock); 1146 spin_lock_irq(&lp->lock);
1074 enable_mdi(); 1147 enable_mdi(lp);
1075 update_linkspeed(dev, 0); 1148 update_linkspeed(dev, 0);
1076 disable_mdi(); 1149 disable_mdi(lp);
1077 spin_unlock_irq(&lp->lock); 1150 spin_unlock_irq(&lp->lock);
1078 netif_carrier_off(dev); /* will be enabled in open() */ 1151 netif_carrier_off(dev); /* will be enabled in open() */
1079 1152
1080 /* If board has no PHY IRQ, use a timer to poll the PHY */ 1153 /* If board has no PHY IRQ, use a timer to poll the PHY */
1081 if (!gpio_is_valid(lp->board_data.phy_irq_pin)) { 1154 if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
1155 gpio_request(board_data->phy_irq_pin, "ethernet_phy");
1156 } else {
1157 /* If board has no PHY IRQ, use a timer to poll the PHY */
1082 init_timer(&lp->check_timer); 1158 init_timer(&lp->check_timer);
1083 lp->check_timer.data = (unsigned long)dev; 1159 lp->check_timer.data = (unsigned long)dev;
1084 lp->check_timer.function = at91ether_check_link; 1160 lp->check_timer.function = at91ether_check_link;
1085 } else if (lp->board_data.phy_irq_pin >= 32) 1161 }
1086 gpio_request(lp->board_data.phy_irq_pin, "ethernet_phy");
1087 1162
1088 /* Display ethernet banner */ 1163 /* Display ethernet banner */
1089 printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%pM)\n", 1164 printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%pM)\n",
1090 dev->name, (uint) dev->base_addr, dev->irq, 1165 dev->name, (uint) dev->base_addr, dev->irq,
1091 at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-", 1166 at91_emac_read(lp, AT91_EMAC_CFG) & AT91_EMAC_SPD ? "100-" : "10-",
1092 at91_emac_read(AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex", 1167 at91_emac_read(lp, AT91_EMAC_CFG) & AT91_EMAC_FD ? "FullDuplex" : "HalfDuplex",
1093 dev->dev_addr); 1168 dev->dev_addr);
1094 if ((phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID)) 1169 if ((lp->phy_type == MII_DM9161_ID) || (lp->phy_type == MII_DM9161A_ID))
1095 printk(KERN_INFO "%s: Davicom 9161 PHY %s\n", dev->name, (lp->phy_media == PORT_FIBRE) ? "(Fiber)" : "(Copper)"); 1170 printk(KERN_INFO "%s: Davicom 9161 PHY %s\n", dev->name, (lp->phy_media == PORT_FIBRE) ? "(Fiber)" : "(Copper)");
1096 else if (phy_type == MII_LXT971A_ID) 1171 else if (lp->phy_type == MII_LXT971A_ID)
1097 printk(KERN_INFO "%s: Intel LXT971A PHY\n", dev->name); 1172 printk(KERN_INFO "%s: Intel LXT971A PHY\n", dev->name);
1098 else if (phy_type == MII_RTL8201_ID) 1173 else if (lp->phy_type == MII_RTL8201_ID)
1099 printk(KERN_INFO "%s: Realtek RTL8201(B)L PHY\n", dev->name); 1174 printk(KERN_INFO "%s: Realtek RTL8201(B)L PHY\n", dev->name);
1100 else if (phy_type == MII_BCM5221_ID) 1175 else if (lp->phy_type == MII_BCM5221_ID)
1101 printk(KERN_INFO "%s: Broadcom BCM5221 PHY\n", dev->name); 1176 printk(KERN_INFO "%s: Broadcom BCM5221 PHY\n", dev->name);
1102 else if (phy_type == MII_DP83847_ID) 1177 else if (lp->phy_type == MII_DP83847_ID)
1103 printk(KERN_INFO "%s: National Semiconductor DP83847 PHY\n", dev->name); 1178 printk(KERN_INFO "%s: National Semiconductor DP83847 PHY\n", dev->name);
1104 else if (phy_type == MII_DP83848_ID) 1179 else if (lp->phy_type == MII_DP83848_ID)
1105 printk(KERN_INFO "%s: National Semiconductor DP83848 PHY\n", dev->name); 1180 printk(KERN_INFO "%s: National Semiconductor DP83848 PHY\n", dev->name);
1106 else if (phy_type == MII_AC101L_ID) 1181 else if (lp->phy_type == MII_AC101L_ID)
1107 printk(KERN_INFO "%s: Altima AC101L PHY\n", dev->name); 1182 printk(KERN_INFO "%s: Altima AC101L PHY\n", dev->name);
1108 else if (phy_type == MII_KS8721_ID) 1183 else if (lp->phy_type == MII_KS8721_ID)
1109 printk(KERN_INFO "%s: Micrel KS8721 PHY\n", dev->name); 1184 printk(KERN_INFO "%s: Micrel KS8721 PHY\n", dev->name);
1110 else if (phy_type == MII_T78Q21x3_ID) 1185 else if (lp->phy_type == MII_T78Q21x3_ID)
1111 printk(KERN_INFO "%s: Teridian 78Q21x3 PHY\n", dev->name); 1186 printk(KERN_INFO "%s: Teridian 78Q21x3 PHY\n", dev->name);
1112 else if (phy_type == MII_LAN83C185_ID) 1187 else if (lp->phy_type == MII_LAN83C185_ID)
1113 printk(KERN_INFO "%s: SMSC LAN83C185 PHY\n", dev->name); 1188 printk(KERN_INFO "%s: SMSC LAN83C185 PHY\n", dev->name);
1114 1189
1115 return 0; 1190 clk_disable(lp->ether_clk); /* Disable Peripheral clock */
1116}
1117
1118/*
1119 * Detect MAC and PHY and perform initialization
1120 */
1121static int __init at91ether_probe(struct platform_device *pdev)
1122{
1123 unsigned int phyid1, phyid2;
1124 int detected = -1;
1125 unsigned long phy_id;
1126 unsigned short phy_address = 0;
1127 struct clk *ether_clk;
1128
1129 ether_clk = clk_get(&pdev->dev, "ether_clk");
1130 if (IS_ERR(ether_clk)) {
1131 printk(KERN_ERR "at91_ether: no clock defined\n");
1132 return -ENODEV;
1133 }
1134 clk_enable(ether_clk); /* Enable Peripheral clock */
1135
1136 while ((detected != 0) && (phy_address < 32)) {
1137 /* Read the PHY ID registers */
1138 enable_mdi();
1139 read_phy(phy_address, MII_PHYSID1, &phyid1);
1140 read_phy(phy_address, MII_PHYSID2, &phyid2);
1141 disable_mdi();
1142
1143 phy_id = (phyid1 << 16) | (phyid2 & 0xfff0);
1144 switch (phy_id) {
1145 case MII_DM9161_ID: /* Davicom 9161: PHY_ID1 = 0x181, PHY_ID2 = B881 */
1146 case MII_DM9161A_ID: /* Davicom 9161A: PHY_ID1 = 0x181, PHY_ID2 = B8A0 */
1147 case MII_LXT971A_ID: /* Intel LXT971A: PHY_ID1 = 0x13, PHY_ID2 = 78E0 */
1148 case MII_RTL8201_ID: /* Realtek RTL8201: PHY_ID1 = 0, PHY_ID2 = 0x8201 */
1149 case MII_BCM5221_ID: /* Broadcom BCM5221: PHY_ID1 = 0x40, PHY_ID2 = 0x61e0 */
1150 case MII_DP83847_ID: /* National Semiconductor DP83847: */
1151 case MII_DP83848_ID: /* National Semiconductor DP83848: */
1152 case MII_AC101L_ID: /* Altima AC101L: PHY_ID1 = 0x22, PHY_ID2 = 0x5520 */
1153 case MII_KS8721_ID: /* Micrel KS8721: PHY_ID1 = 0x22, PHY_ID2 = 0x1610 */
1154 case MII_T78Q21x3_ID: /* Teridian 78Q21x3: PHY_ID1 = 0x0E, PHY_ID2 = 7237 */
1155 case MII_LAN83C185_ID: /* SMSC LAN83C185: PHY_ID1 = 0x0007, PHY_ID2 = 0xC0A1 */
1156 detected = at91ether_setup(phy_id, phy_address, pdev, ether_clk);
1157 break;
1158 }
1159 1191
1160 phy_address++; 1192 return 0;
1161 }
1162 1193
1163 clk_disable(ether_clk); /* Disable Peripheral clock */
1164 1194
1165 return detected; 1195err_free_dmamem:
1196 platform_set_drvdata(pdev, NULL);
1197 dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
1198err_free_irq:
1199 free_irq(dev->irq, dev);
1200err_disable_clock:
1201 clk_disable(lp->ether_clk);
1202 clk_put(lp->ether_clk);
1203err_ioumap:
1204 iounmap(lp->emac_base);
1205err_free_dev:
1206 free_netdev(dev);
1207 return res;
1166} 1208}
1167 1209
1168static int __devexit at91ether_remove(struct platform_device *pdev) 1210static int __devexit at91ether_remove(struct platform_device *pdev)
@@ -1170,8 +1212,7 @@ static int __devexit at91ether_remove(struct platform_device *pdev)
1170 struct net_device *dev = platform_get_drvdata(pdev); 1212 struct net_device *dev = platform_get_drvdata(pdev);
1171 struct at91_private *lp = netdev_priv(dev); 1213 struct at91_private *lp = netdev_priv(dev);
1172 1214
1173 if (gpio_is_valid(lp->board_data.phy_irq_pin) && 1215 if (gpio_is_valid(lp->board_data.phy_irq_pin))
1174 lp->board_data.phy_irq_pin >= 32)
1175 gpio_free(lp->board_data.phy_irq_pin); 1216 gpio_free(lp->board_data.phy_irq_pin);
1176 1217
1177 unregister_netdev(dev); 1218 unregister_netdev(dev);
@@ -1193,7 +1234,7 @@ static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
1193 1234
1194 if (netif_running(net_dev)) { 1235 if (netif_running(net_dev)) {
1195 if (gpio_is_valid(lp->board_data.phy_irq_pin)) { 1236 if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
1196 int phy_irq = lp->board_data.phy_irq_pin; 1237 int phy_irq = gpio_to_irq(lp->board_data.phy_irq_pin);
1197 disable_irq(phy_irq); 1238 disable_irq(phy_irq);
1198 } 1239 }
1199 1240
@@ -1217,7 +1258,7 @@ static int at91ether_resume(struct platform_device *pdev)
1217 netif_start_queue(net_dev); 1258 netif_start_queue(net_dev);
1218 1259
1219 if (gpio_is_valid(lp->board_data.phy_irq_pin)) { 1260 if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
1220 int phy_irq = lp->board_data.phy_irq_pin; 1261 int phy_irq = gpio_to_irq(lp->board_data.phy_irq_pin);
1221 enable_irq(phy_irq); 1262 enable_irq(phy_irq);
1222 } 1263 }
1223 } 1264 }
diff --git a/drivers/net/ethernet/cadence/at91_ether.h b/drivers/net/ethernet/cadence/at91_ether.h
index 3725fbb0defe..0ef6328fa7f8 100644
--- a/drivers/net/ethernet/cadence/at91_ether.h
+++ b/drivers/net/ethernet/cadence/at91_ether.h
@@ -88,6 +88,7 @@ struct at91_private
88 struct macb_platform_data board_data; /* board-specific 88 struct macb_platform_data board_data; /* board-specific
89 * configuration (shared with 89 * configuration (shared with
90 * macb for common data */ 90 * macb for common data */
91 void __iomem *emac_base; /* base register address */
91 struct clk *ether_clk; /* clock */ 92 struct clk *ether_clk; /* clock */
92 93
93 /* PHY */ 94 /* PHY */
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index c4834c23be35..1466bc4e3dda 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1213,6 +1213,7 @@ static const struct ethtool_ops macb_ethtool_ops = {
1213 .set_settings = macb_set_settings, 1213 .set_settings = macb_set_settings,
1214 .get_drvinfo = macb_get_drvinfo, 1214 .get_drvinfo = macb_get_drvinfo,
1215 .get_link = ethtool_op_get_link, 1215 .get_link = ethtool_op_get_link,
1216 .get_ts_info = ethtool_op_get_ts_info,
1216}; 1217};
1217 1218
1218static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1219static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 77b4e873f91c..d7ac6c17547c 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1193,18 +1193,16 @@ static int enic_get_vf_port(struct net_device *netdev, int vf,
1193 if (err) 1193 if (err)
1194 return err; 1194 return err;
1195 1195
1196 NLA_PUT_U16(skb, IFLA_PORT_REQUEST, pp->request); 1196 if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) ||
1197 NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response); 1197 nla_put_u16(skb, IFLA_PORT_RESPONSE, response) ||
1198 if (pp->set & ENIC_SET_NAME) 1198 ((pp->set & ENIC_SET_NAME) &&
1199 NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, 1199 nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) ||
1200 pp->name); 1200 ((pp->set & ENIC_SET_INSTANCE) &&
1201 if (pp->set & ENIC_SET_INSTANCE) 1201 nla_put(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
1202 NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX, 1202 pp->instance_uuid)) ||
1203 pp->instance_uuid); 1203 ((pp->set & ENIC_SET_HOST) &&
1204 if (pp->set & ENIC_SET_HOST) 1204 nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid)))
1205 NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, 1205 goto nla_put_failure;
1206 pp->host_uuid);
1207
1208 return 0; 1206 return 0;
1209 1207
1210nla_put_failure: 1208nla_put_failure:
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c
index dafea1ecb7b1..43464f0a4f99 100644
--- a/drivers/net/ethernet/cisco/enic/enic_pp.c
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.c
@@ -184,7 +184,7 @@ static int (*enic_pp_handlers[])(struct enic *enic, int vf,
184}; 184};
185 185
186static const int enic_pp_handlers_count = 186static const int enic_pp_handlers_count =
187 sizeof(enic_pp_handlers)/sizeof(*enic_pp_handlers); 187 ARRAY_SIZE(enic_pp_handlers);
188 188
189static int enic_pp_preassociate(struct enic *enic, int vf, 189static int enic_pp_preassociate(struct enic *enic, int vf,
190 struct enic_port_profile *prev_pp, int *restore_pp) 190 struct enic_port_profile *prev_pp, int *restore_pp)
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 68f1c39184df..61cc09342865 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1380,6 +1380,7 @@ static void de_free_rings (struct de_private *de)
1380static int de_open (struct net_device *dev) 1380static int de_open (struct net_device *dev)
1381{ 1381{
1382 struct de_private *de = netdev_priv(dev); 1382 struct de_private *de = netdev_priv(dev);
1383 const int irq = de->pdev->irq;
1383 int rc; 1384 int rc;
1384 1385
1385 netif_dbg(de, ifup, dev, "enabling interface\n"); 1386 netif_dbg(de, ifup, dev, "enabling interface\n");
@@ -1394,10 +1395,9 @@ static int de_open (struct net_device *dev)
1394 1395
1395 dw32(IntrMask, 0); 1396 dw32(IntrMask, 0);
1396 1397
1397 rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev); 1398 rc = request_irq(irq, de_interrupt, IRQF_SHARED, dev->name, dev);
1398 if (rc) { 1399 if (rc) {
1399 netdev_err(dev, "IRQ %d request failure, err=%d\n", 1400 netdev_err(dev, "IRQ %d request failure, err=%d\n", irq, rc);
1400 dev->irq, rc);
1401 goto err_out_free; 1401 goto err_out_free;
1402 } 1402 }
1403 1403
@@ -1413,7 +1413,7 @@ static int de_open (struct net_device *dev)
1413 return 0; 1413 return 0;
1414 1414
1415err_out_free_irq: 1415err_out_free_irq:
1416 free_irq(dev->irq, dev); 1416 free_irq(irq, dev);
1417err_out_free: 1417err_out_free:
1418 de_free_rings(de); 1418 de_free_rings(de);
1419 return rc; 1419 return rc;
@@ -1434,7 +1434,7 @@ static int de_close (struct net_device *dev)
1434 netif_carrier_off(dev); 1434 netif_carrier_off(dev);
1435 spin_unlock_irqrestore(&de->lock, flags); 1435 spin_unlock_irqrestore(&de->lock, flags);
1436 1436
1437 free_irq(dev->irq, dev); 1437 free_irq(de->pdev->irq, dev);
1438 1438
1439 de_free_rings(de); 1439 de_free_rings(de);
1440 de_adapter_sleep(de); 1440 de_adapter_sleep(de);
@@ -1444,6 +1444,7 @@ static int de_close (struct net_device *dev)
1444static void de_tx_timeout (struct net_device *dev) 1444static void de_tx_timeout (struct net_device *dev)
1445{ 1445{
1446 struct de_private *de = netdev_priv(dev); 1446 struct de_private *de = netdev_priv(dev);
1447 const int irq = de->pdev->irq;
1447 1448
1448 netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n", 1449 netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
1449 dr32(MacStatus), dr32(MacMode), dr32(SIAStatus), 1450 dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
@@ -1451,7 +1452,7 @@ static void de_tx_timeout (struct net_device *dev)
1451 1452
1452 del_timer_sync(&de->media_timer); 1453 del_timer_sync(&de->media_timer);
1453 1454
1454 disable_irq(dev->irq); 1455 disable_irq(irq);
1455 spin_lock_irq(&de->lock); 1456 spin_lock_irq(&de->lock);
1456 1457
1457 de_stop_hw(de); 1458 de_stop_hw(de);
@@ -1459,12 +1460,12 @@ static void de_tx_timeout (struct net_device *dev)
1459 netif_carrier_off(dev); 1460 netif_carrier_off(dev);
1460 1461
1461 spin_unlock_irq(&de->lock); 1462 spin_unlock_irq(&de->lock);
1462 enable_irq(dev->irq); 1463 enable_irq(irq);
1463 1464
1464 /* Update the error counts. */ 1465 /* Update the error counts. */
1465 __de_get_stats(de); 1466 __de_get_stats(de);
1466 1467
1467 synchronize_irq(dev->irq); 1468 synchronize_irq(irq);
1468 de_clean_rings(de); 1469 de_clean_rings(de);
1469 1470
1470 de_init_rings(de); 1471 de_init_rings(de);
@@ -2024,8 +2025,6 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2024 goto err_out_res; 2025 goto err_out_res;
2025 } 2026 }
2026 2027
2027 dev->irq = pdev->irq;
2028
2029 /* obtain and check validity of PCI I/O address */ 2028 /* obtain and check validity of PCI I/O address */
2030 pciaddr = pci_resource_start(pdev, 1); 2029 pciaddr = pci_resource_start(pdev, 1);
2031 if (!pciaddr) { 2030 if (!pciaddr) {
@@ -2050,7 +2049,6 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2050 pciaddr, pci_name(pdev)); 2049 pciaddr, pci_name(pdev));
2051 goto err_out_res; 2050 goto err_out_res;
2052 } 2051 }
2053 dev->base_addr = (unsigned long) regs;
2054 de->regs = regs; 2052 de->regs = regs;
2055 2053
2056 de_adapter_wake(de); 2054 de_adapter_wake(de);
@@ -2078,11 +2076,9 @@ static int __devinit de_init_one (struct pci_dev *pdev,
2078 goto err_out_iomap; 2076 goto err_out_iomap;
2079 2077
2080 /* print info about board and interface just registered */ 2078 /* print info about board and interface just registered */
2081 netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n", 2079 netdev_info(dev, "%s at %p, %pM, IRQ %d\n",
2082 de->de21040 ? "21040" : "21041", 2080 de->de21040 ? "21040" : "21041",
2083 dev->base_addr, 2081 regs, dev->dev_addr, pdev->irq);
2084 dev->dev_addr,
2085 dev->irq);
2086 2082
2087 pci_set_drvdata(pdev, dev); 2083 pci_set_drvdata(pdev, dev);
2088 2084
@@ -2130,9 +2126,11 @@ static int de_suspend (struct pci_dev *pdev, pm_message_t state)
2130 2126
2131 rtnl_lock(); 2127 rtnl_lock();
2132 if (netif_running (dev)) { 2128 if (netif_running (dev)) {
2129 const int irq = pdev->irq;
2130
2133 del_timer_sync(&de->media_timer); 2131 del_timer_sync(&de->media_timer);
2134 2132
2135 disable_irq(dev->irq); 2133 disable_irq(irq);
2136 spin_lock_irq(&de->lock); 2134 spin_lock_irq(&de->lock);
2137 2135
2138 de_stop_hw(de); 2136 de_stop_hw(de);
@@ -2141,12 +2139,12 @@ static int de_suspend (struct pci_dev *pdev, pm_message_t state)
2141 netif_carrier_off(dev); 2139 netif_carrier_off(dev);
2142 2140
2143 spin_unlock_irq(&de->lock); 2141 spin_unlock_irq(&de->lock);
2144 enable_irq(dev->irq); 2142 enable_irq(irq);
2145 2143
2146 /* Update the error counts. */ 2144 /* Update the error counts. */
2147 __de_get_stats(de); 2145 __de_get_stats(de);
2148 2146
2149 synchronize_irq(dev->irq); 2147 synchronize_irq(irq);
2150 de_clean_rings(de); 2148 de_clean_rings(de);
2151 2149
2152 de_adapter_sleep(de); 2150 de_adapter_sleep(de);
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 1eccf4945485..4d6fe604fa64 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -150,6 +150,12 @@
150#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */ 150#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
151#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */ 151#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
152 152
153#define dw32(reg, val) iowrite32(val, ioaddr + (reg))
154#define dw16(reg, val) iowrite16(val, ioaddr + (reg))
155#define dr32(reg) ioread32(ioaddr + (reg))
156#define dr16(reg) ioread16(ioaddr + (reg))
157#define dr8(reg) ioread8(ioaddr + (reg))
158
153#define DMFE_DBUG(dbug_now, msg, value) \ 159#define DMFE_DBUG(dbug_now, msg, value) \
154 do { \ 160 do { \
155 if (dmfe_debug || (dbug_now)) \ 161 if (dmfe_debug || (dbug_now)) \
@@ -178,14 +184,6 @@
178 184
179#define SROM_V41_CODE 0x14 185#define SROM_V41_CODE 0x14
180 186
181#define SROM_CLK_WRITE(data, ioaddr) \
182 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
183 udelay(5); \
184 outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
185 udelay(5); \
186 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
187 udelay(5);
188
189#define __CHK_IO_SIZE(pci_id, dev_rev) \ 187#define __CHK_IO_SIZE(pci_id, dev_rev) \
190 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \ 188 (( ((pci_id)==PCI_DM9132_ID) || ((dev_rev) >= 0x30) ) ? \
191 DM9102A_IO_SIZE: DM9102_IO_SIZE) 189 DM9102A_IO_SIZE: DM9102_IO_SIZE)
@@ -213,11 +211,11 @@ struct rx_desc {
213struct dmfe_board_info { 211struct dmfe_board_info {
214 u32 chip_id; /* Chip vendor/Device ID */ 212 u32 chip_id; /* Chip vendor/Device ID */
215 u8 chip_revision; /* Chip revision */ 213 u8 chip_revision; /* Chip revision */
216 struct DEVICE *next_dev; /* next device */ 214 struct net_device *next_dev; /* next device */
217 struct pci_dev *pdev; /* PCI device */ 215 struct pci_dev *pdev; /* PCI device */
218 spinlock_t lock; 216 spinlock_t lock;
219 217
220 long ioaddr; /* I/O base address */ 218 void __iomem *ioaddr; /* I/O base address */
221 u32 cr0_data; 219 u32 cr0_data;
222 u32 cr5_data; 220 u32 cr5_data;
223 u32 cr6_data; 221 u32 cr6_data;
@@ -320,20 +318,20 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
320static int dmfe_stop(struct DEVICE *); 318static int dmfe_stop(struct DEVICE *);
321static void dmfe_set_filter_mode(struct DEVICE *); 319static void dmfe_set_filter_mode(struct DEVICE *);
322static const struct ethtool_ops netdev_ethtool_ops; 320static const struct ethtool_ops netdev_ethtool_ops;
323static u16 read_srom_word(long ,int); 321static u16 read_srom_word(void __iomem *, int);
324static irqreturn_t dmfe_interrupt(int , void *); 322static irqreturn_t dmfe_interrupt(int , void *);
325#ifdef CONFIG_NET_POLL_CONTROLLER 323#ifdef CONFIG_NET_POLL_CONTROLLER
326static void poll_dmfe (struct net_device *dev); 324static void poll_dmfe (struct net_device *dev);
327#endif 325#endif
328static void dmfe_descriptor_init(struct net_device *, unsigned long); 326static void dmfe_descriptor_init(struct net_device *);
329static void allocate_rx_buffer(struct net_device *); 327static void allocate_rx_buffer(struct net_device *);
330static void update_cr6(u32, unsigned long); 328static void update_cr6(u32, void __iomem *);
331static void send_filter_frame(struct DEVICE *); 329static void send_filter_frame(struct DEVICE *);
332static void dm9132_id_table(struct DEVICE *); 330static void dm9132_id_table(struct DEVICE *);
333static u16 phy_read(unsigned long, u8, u8, u32); 331static u16 phy_read(void __iomem *, u8, u8, u32);
334static void phy_write(unsigned long, u8, u8, u16, u32); 332static void phy_write(void __iomem *, u8, u8, u16, u32);
335static void phy_write_1bit(unsigned long, u32); 333static void phy_write_1bit(void __iomem *, u32);
336static u16 phy_read_1bit(unsigned long); 334static u16 phy_read_1bit(void __iomem *);
337static u8 dmfe_sense_speed(struct dmfe_board_info *); 335static u8 dmfe_sense_speed(struct dmfe_board_info *);
338static void dmfe_process_mode(struct dmfe_board_info *); 336static void dmfe_process_mode(struct dmfe_board_info *);
339static void dmfe_timer(unsigned long); 337static void dmfe_timer(unsigned long);
@@ -462,14 +460,16 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
462 db->buf_pool_dma_start = db->buf_pool_dma_ptr; 460 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
463 461
464 db->chip_id = ent->driver_data; 462 db->chip_id = ent->driver_data;
465 db->ioaddr = pci_resource_start(pdev, 0); 463 /* IO type range. */
464 db->ioaddr = pci_iomap(pdev, 0, 0);
465 if (!db->ioaddr)
466 goto err_out_free_buf;
467
466 db->chip_revision = pdev->revision; 468 db->chip_revision = pdev->revision;
467 db->wol_mode = 0; 469 db->wol_mode = 0;
468 470
469 db->pdev = pdev; 471 db->pdev = pdev;
470 472
471 dev->base_addr = db->ioaddr;
472 dev->irq = pdev->irq;
473 pci_set_drvdata(pdev, dev); 473 pci_set_drvdata(pdev, dev);
474 dev->netdev_ops = &netdev_ops; 474 dev->netdev_ops = &netdev_ops;
475 dev->ethtool_ops = &netdev_ethtool_ops; 475 dev->ethtool_ops = &netdev_ethtool_ops;
@@ -484,9 +484,10 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
484 db->chip_type = 0; 484 db->chip_type = 0;
485 485
486 /* read 64 word srom data */ 486 /* read 64 word srom data */
487 for (i = 0; i < 64; i++) 487 for (i = 0; i < 64; i++) {
488 ((__le16 *) db->srom)[i] = 488 ((__le16 *) db->srom)[i] =
489 cpu_to_le16(read_srom_word(db->ioaddr, i)); 489 cpu_to_le16(read_srom_word(db->ioaddr, i));
490 }
490 491
491 /* Set Node address */ 492 /* Set Node address */
492 for (i = 0; i < 6; i++) 493 for (i = 0; i < 6; i++)
@@ -494,16 +495,18 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
494 495
495 err = register_netdev (dev); 496 err = register_netdev (dev);
496 if (err) 497 if (err)
497 goto err_out_free_buf; 498 goto err_out_unmap;
498 499
499 dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n", 500 dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
500 ent->driver_data >> 16, 501 ent->driver_data >> 16,
501 pci_name(pdev), dev->dev_addr, dev->irq); 502 pci_name(pdev), dev->dev_addr, pdev->irq);
502 503
503 pci_set_master(pdev); 504 pci_set_master(pdev);
504 505
505 return 0; 506 return 0;
506 507
508err_out_unmap:
509 pci_iounmap(pdev, db->ioaddr);
507err_out_free_buf: 510err_out_free_buf:
508 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, 511 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
509 db->buf_pool_ptr, db->buf_pool_dma_ptr); 512 db->buf_pool_ptr, db->buf_pool_dma_ptr);
@@ -532,7 +535,7 @@ static void __devexit dmfe_remove_one (struct pci_dev *pdev)
532 if (dev) { 535 if (dev) {
533 536
534 unregister_netdev(dev); 537 unregister_netdev(dev);
535 538 pci_iounmap(db->pdev, db->ioaddr);
536 pci_free_consistent(db->pdev, sizeof(struct tx_desc) * 539 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
537 DESC_ALL_CNT + 0x20, db->desc_pool_ptr, 540 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
538 db->desc_pool_dma_ptr); 541 db->desc_pool_dma_ptr);
@@ -555,13 +558,13 @@ static void __devexit dmfe_remove_one (struct pci_dev *pdev)
555 558
556static int dmfe_open(struct DEVICE *dev) 559static int dmfe_open(struct DEVICE *dev)
557{ 560{
558 int ret;
559 struct dmfe_board_info *db = netdev_priv(dev); 561 struct dmfe_board_info *db = netdev_priv(dev);
562 const int irq = db->pdev->irq;
563 int ret;
560 564
561 DMFE_DBUG(0, "dmfe_open", 0); 565 DMFE_DBUG(0, "dmfe_open", 0);
562 566
563 ret = request_irq(dev->irq, dmfe_interrupt, 567 ret = request_irq(irq, dmfe_interrupt, IRQF_SHARED, dev->name, dev);
564 IRQF_SHARED, dev->name, dev);
565 if (ret) 568 if (ret)
566 return ret; 569 return ret;
567 570
@@ -615,14 +618,14 @@ static int dmfe_open(struct DEVICE *dev)
615static void dmfe_init_dm910x(struct DEVICE *dev) 618static void dmfe_init_dm910x(struct DEVICE *dev)
616{ 619{
617 struct dmfe_board_info *db = netdev_priv(dev); 620 struct dmfe_board_info *db = netdev_priv(dev);
618 unsigned long ioaddr = db->ioaddr; 621 void __iomem *ioaddr = db->ioaddr;
619 622
620 DMFE_DBUG(0, "dmfe_init_dm910x()", 0); 623 DMFE_DBUG(0, "dmfe_init_dm910x()", 0);
621 624
622 /* Reset DM910x MAC controller */ 625 /* Reset DM910x MAC controller */
623 outl(DM910X_RESET, ioaddr + DCR0); /* RESET MAC */ 626 dw32(DCR0, DM910X_RESET); /* RESET MAC */
624 udelay(100); 627 udelay(100);
625 outl(db->cr0_data, ioaddr + DCR0); 628 dw32(DCR0, db->cr0_data);
626 udelay(5); 629 udelay(5);
627 630
628 /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */ 631 /* Phy addr : DM910(A)2/DM9132/9801, phy address = 1 */
@@ -633,12 +636,12 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
633 db->media_mode = dmfe_media_mode; 636 db->media_mode = dmfe_media_mode;
634 637
635 /* RESET Phyxcer Chip by GPR port bit 7 */ 638 /* RESET Phyxcer Chip by GPR port bit 7 */
636 outl(0x180, ioaddr + DCR12); /* Let bit 7 output port */ 639 dw32(DCR12, 0x180); /* Let bit 7 output port */
637 if (db->chip_id == PCI_DM9009_ID) { 640 if (db->chip_id == PCI_DM9009_ID) {
638 outl(0x80, ioaddr + DCR12); /* Issue RESET signal */ 641 dw32(DCR12, 0x80); /* Issue RESET signal */
639 mdelay(300); /* Delay 300 ms */ 642 mdelay(300); /* Delay 300 ms */
640 } 643 }
641 outl(0x0, ioaddr + DCR12); /* Clear RESET signal */ 644 dw32(DCR12, 0x0); /* Clear RESET signal */
642 645
643 /* Process Phyxcer Media Mode */ 646 /* Process Phyxcer Media Mode */
644 if ( !(db->media_mode & 0x10) ) /* Force 1M mode */ 647 if ( !(db->media_mode & 0x10) ) /* Force 1M mode */
@@ -649,7 +652,7 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
649 db->op_mode = db->media_mode; /* Force Mode */ 652 db->op_mode = db->media_mode; /* Force Mode */
650 653
651 /* Initialize Transmit/Receive decriptor and CR3/4 */ 654 /* Initialize Transmit/Receive decriptor and CR3/4 */
652 dmfe_descriptor_init(dev, ioaddr); 655 dmfe_descriptor_init(dev);
653 656
654 /* Init CR6 to program DM910x operation */ 657 /* Init CR6 to program DM910x operation */
655 update_cr6(db->cr6_data, ioaddr); 658 update_cr6(db->cr6_data, ioaddr);
@@ -662,10 +665,10 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
662 665
663 /* Init CR7, interrupt active bit */ 666 /* Init CR7, interrupt active bit */
664 db->cr7_data = CR7_DEFAULT; 667 db->cr7_data = CR7_DEFAULT;
665 outl(db->cr7_data, ioaddr + DCR7); 668 dw32(DCR7, db->cr7_data);
666 669
667 /* Init CR15, Tx jabber and Rx watchdog timer */ 670 /* Init CR15, Tx jabber and Rx watchdog timer */
668 outl(db->cr15_data, ioaddr + DCR15); 671 dw32(DCR15, db->cr15_data);
669 672
670 /* Enable DM910X Tx/Rx function */ 673 /* Enable DM910X Tx/Rx function */
671 db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000; 674 db->cr6_data |= CR6_RXSC | CR6_TXSC | 0x40000;
@@ -682,6 +685,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
682 struct DEVICE *dev) 685 struct DEVICE *dev)
683{ 686{
684 struct dmfe_board_info *db = netdev_priv(dev); 687 struct dmfe_board_info *db = netdev_priv(dev);
688 void __iomem *ioaddr = db->ioaddr;
685 struct tx_desc *txptr; 689 struct tx_desc *txptr;
686 unsigned long flags; 690 unsigned long flags;
687 691
@@ -707,7 +711,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
707 } 711 }
708 712
709 /* Disable NIC interrupt */ 713 /* Disable NIC interrupt */
710 outl(0, dev->base_addr + DCR7); 714 dw32(DCR7, 0);
711 715
712 /* transmit this packet */ 716 /* transmit this packet */
713 txptr = db->tx_insert_ptr; 717 txptr = db->tx_insert_ptr;
@@ -721,11 +725,11 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
721 if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) { 725 if ( (!db->tx_queue_cnt) && (db->tx_packet_cnt < TX_MAX_SEND_CNT) ) {
722 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ 726 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
723 db->tx_packet_cnt++; /* Ready to send */ 727 db->tx_packet_cnt++; /* Ready to send */
724 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ 728 dw32(DCR1, 0x1); /* Issue Tx polling */
725 dev->trans_start = jiffies; /* saved time stamp */ 729 dev->trans_start = jiffies; /* saved time stamp */
726 } else { 730 } else {
727 db->tx_queue_cnt++; /* queue TX packet */ 731 db->tx_queue_cnt++; /* queue TX packet */
728 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ 732 dw32(DCR1, 0x1); /* Issue Tx polling */
729 } 733 }
730 734
731 /* Tx resource check */ 735 /* Tx resource check */
@@ -734,7 +738,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
734 738
735 /* Restore CR7 to enable interrupt */ 739 /* Restore CR7 to enable interrupt */
736 spin_unlock_irqrestore(&db->lock, flags); 740 spin_unlock_irqrestore(&db->lock, flags);
737 outl(db->cr7_data, dev->base_addr + DCR7); 741 dw32(DCR7, db->cr7_data);
738 742
739 /* free this SKB */ 743 /* free this SKB */
740 dev_kfree_skb(skb); 744 dev_kfree_skb(skb);
@@ -751,7 +755,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
751static int dmfe_stop(struct DEVICE *dev) 755static int dmfe_stop(struct DEVICE *dev)
752{ 756{
753 struct dmfe_board_info *db = netdev_priv(dev); 757 struct dmfe_board_info *db = netdev_priv(dev);
754 unsigned long ioaddr = dev->base_addr; 758 void __iomem *ioaddr = db->ioaddr;
755 759
756 DMFE_DBUG(0, "dmfe_stop", 0); 760 DMFE_DBUG(0, "dmfe_stop", 0);
757 761
@@ -762,12 +766,12 @@ static int dmfe_stop(struct DEVICE *dev)
762 del_timer_sync(&db->timer); 766 del_timer_sync(&db->timer);
763 767
764 /* Reset & stop DM910X board */ 768 /* Reset & stop DM910X board */
765 outl(DM910X_RESET, ioaddr + DCR0); 769 dw32(DCR0, DM910X_RESET);
766 udelay(5); 770 udelay(100);
767 phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id); 771 phy_write(ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
768 772
769 /* free interrupt */ 773 /* free interrupt */
770 free_irq(dev->irq, dev); 774 free_irq(db->pdev->irq, dev);
771 775
772 /* free allocated rx buffer */ 776 /* free allocated rx buffer */
773 dmfe_free_rxbuffer(db); 777 dmfe_free_rxbuffer(db);
@@ -794,7 +798,7 @@ static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
794{ 798{
795 struct DEVICE *dev = dev_id; 799 struct DEVICE *dev = dev_id;
796 struct dmfe_board_info *db = netdev_priv(dev); 800 struct dmfe_board_info *db = netdev_priv(dev);
797 unsigned long ioaddr = dev->base_addr; 801 void __iomem *ioaddr = db->ioaddr;
798 unsigned long flags; 802 unsigned long flags;
799 803
800 DMFE_DBUG(0, "dmfe_interrupt()", 0); 804 DMFE_DBUG(0, "dmfe_interrupt()", 0);
@@ -802,15 +806,15 @@ static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
802 spin_lock_irqsave(&db->lock, flags); 806 spin_lock_irqsave(&db->lock, flags);
803 807
804 /* Got DM910X status */ 808 /* Got DM910X status */
805 db->cr5_data = inl(ioaddr + DCR5); 809 db->cr5_data = dr32(DCR5);
806 outl(db->cr5_data, ioaddr + DCR5); 810 dw32(DCR5, db->cr5_data);
807 if ( !(db->cr5_data & 0xc1) ) { 811 if ( !(db->cr5_data & 0xc1) ) {
808 spin_unlock_irqrestore(&db->lock, flags); 812 spin_unlock_irqrestore(&db->lock, flags);
809 return IRQ_HANDLED; 813 return IRQ_HANDLED;
810 } 814 }
811 815
812 /* Disable all interrupt in CR7 to solve the interrupt edge problem */ 816 /* Disable all interrupt in CR7 to solve the interrupt edge problem */
813 outl(0, ioaddr + DCR7); 817 dw32(DCR7, 0);
814 818
815 /* Check system status */ 819 /* Check system status */
816 if (db->cr5_data & 0x2000) { 820 if (db->cr5_data & 0x2000) {
@@ -838,11 +842,11 @@ static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
838 if (db->dm910x_chk_mode & 0x2) { 842 if (db->dm910x_chk_mode & 0x2) {
839 db->dm910x_chk_mode = 0x4; 843 db->dm910x_chk_mode = 0x4;
840 db->cr6_data |= 0x100; 844 db->cr6_data |= 0x100;
841 update_cr6(db->cr6_data, db->ioaddr); 845 update_cr6(db->cr6_data, ioaddr);
842 } 846 }
843 847
844 /* Restore CR7 to enable interrupt mask */ 848 /* Restore CR7 to enable interrupt mask */
845 outl(db->cr7_data, ioaddr + DCR7); 849 dw32(DCR7, db->cr7_data);
846 850
847 spin_unlock_irqrestore(&db->lock, flags); 851 spin_unlock_irqrestore(&db->lock, flags);
848 return IRQ_HANDLED; 852 return IRQ_HANDLED;
@@ -858,11 +862,14 @@ static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
858 862
859static void poll_dmfe (struct net_device *dev) 863static void poll_dmfe (struct net_device *dev)
860{ 864{
865 struct dmfe_board_info *db = netdev_priv(dev);
866 const int irq = db->pdev->irq;
867
861 /* disable_irq here is not very nice, but with the lockless 868 /* disable_irq here is not very nice, but with the lockless
862 interrupt handler we have no other choice. */ 869 interrupt handler we have no other choice. */
863 disable_irq(dev->irq); 870 disable_irq(irq);
864 dmfe_interrupt (dev->irq, dev); 871 dmfe_interrupt (irq, dev);
865 enable_irq(dev->irq); 872 enable_irq(irq);
866} 873}
867#endif 874#endif
868 875
@@ -873,7 +880,7 @@ static void poll_dmfe (struct net_device *dev)
873static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db) 880static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
874{ 881{
875 struct tx_desc *txptr; 882 struct tx_desc *txptr;
876 unsigned long ioaddr = dev->base_addr; 883 void __iomem *ioaddr = db->ioaddr;
877 u32 tdes0; 884 u32 tdes0;
878 885
879 txptr = db->tx_remove_ptr; 886 txptr = db->tx_remove_ptr;
@@ -897,7 +904,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
897 db->tx_fifo_underrun++; 904 db->tx_fifo_underrun++;
898 if ( !(db->cr6_data & CR6_SFT) ) { 905 if ( !(db->cr6_data & CR6_SFT) ) {
899 db->cr6_data = db->cr6_data | CR6_SFT; 906 db->cr6_data = db->cr6_data | CR6_SFT;
900 update_cr6(db->cr6_data, db->ioaddr); 907 update_cr6(db->cr6_data, ioaddr);
901 } 908 }
902 } 909 }
903 if (tdes0 & 0x0100) 910 if (tdes0 & 0x0100)
@@ -924,7 +931,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
924 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ 931 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
925 db->tx_packet_cnt++; /* Ready to send */ 932 db->tx_packet_cnt++; /* Ready to send */
926 db->tx_queue_cnt--; 933 db->tx_queue_cnt--;
927 outl(0x1, ioaddr + DCR1); /* Issue Tx polling */ 934 dw32(DCR1, 0x1); /* Issue Tx polling */
928 dev->trans_start = jiffies; /* saved time stamp */ 935 dev->trans_start = jiffies; /* saved time stamp */
929 } 936 }
930 937
@@ -1087,12 +1094,7 @@ static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
1087 1094
1088 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 1095 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1089 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 1096 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1090 if (np->pdev) 1097 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
1091 strlcpy(info->bus_info, pci_name(np->pdev),
1092 sizeof(info->bus_info));
1093 else
1094 sprintf(info->bus_info, "EISA 0x%lx %d",
1095 dev->base_addr, dev->irq);
1096} 1098}
1097 1099
1098static int dmfe_ethtool_set_wol(struct net_device *dev, 1100static int dmfe_ethtool_set_wol(struct net_device *dev,
@@ -1132,10 +1134,11 @@ static const struct ethtool_ops netdev_ethtool_ops = {
1132 1134
1133static void dmfe_timer(unsigned long data) 1135static void dmfe_timer(unsigned long data)
1134{ 1136{
1137 struct net_device *dev = (struct net_device *)data;
1138 struct dmfe_board_info *db = netdev_priv(dev);
1139 void __iomem *ioaddr = db->ioaddr;
1135 u32 tmp_cr8; 1140 u32 tmp_cr8;
1136 unsigned char tmp_cr12; 1141 unsigned char tmp_cr12;
1137 struct DEVICE *dev = (struct DEVICE *) data;
1138 struct dmfe_board_info *db = netdev_priv(dev);
1139 unsigned long flags; 1142 unsigned long flags;
1140 1143
1141 int link_ok, link_ok_phy; 1144 int link_ok, link_ok_phy;
@@ -1148,11 +1151,10 @@ static void dmfe_timer(unsigned long data)
1148 db->first_in_callback = 1; 1151 db->first_in_callback = 1;
1149 if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) { 1152 if (db->chip_type && (db->chip_id==PCI_DM9102_ID)) {
1150 db->cr6_data &= ~0x40000; 1153 db->cr6_data &= ~0x40000;
1151 update_cr6(db->cr6_data, db->ioaddr); 1154 update_cr6(db->cr6_data, ioaddr);
1152 phy_write(db->ioaddr, 1155 phy_write(ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
1153 db->phy_addr, 0, 0x1000, db->chip_id);
1154 db->cr6_data |= 0x40000; 1156 db->cr6_data |= 0x40000;
1155 update_cr6(db->cr6_data, db->ioaddr); 1157 update_cr6(db->cr6_data, ioaddr);
1156 db->timer.expires = DMFE_TIMER_WUT + HZ * 2; 1158 db->timer.expires = DMFE_TIMER_WUT + HZ * 2;
1157 add_timer(&db->timer); 1159 add_timer(&db->timer);
1158 spin_unlock_irqrestore(&db->lock, flags); 1160 spin_unlock_irqrestore(&db->lock, flags);
@@ -1167,7 +1169,7 @@ static void dmfe_timer(unsigned long data)
1167 db->dm910x_chk_mode = 0x4; 1169 db->dm910x_chk_mode = 0x4;
1168 1170
1169 /* Dynamic reset DM910X : system error or transmit time-out */ 1171 /* Dynamic reset DM910X : system error or transmit time-out */
1170 tmp_cr8 = inl(db->ioaddr + DCR8); 1172 tmp_cr8 = dr32(DCR8);
1171 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) { 1173 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1172 db->reset_cr8++; 1174 db->reset_cr8++;
1173 db->wait_reset = 1; 1175 db->wait_reset = 1;
@@ -1177,7 +1179,7 @@ static void dmfe_timer(unsigned long data)
1177 /* TX polling kick monitor */ 1179 /* TX polling kick monitor */
1178 if ( db->tx_packet_cnt && 1180 if ( db->tx_packet_cnt &&
1179 time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) { 1181 time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
1180 outl(0x1, dev->base_addr + DCR1); /* Tx polling again */ 1182 dw32(DCR1, 0x1); /* Tx polling again */
1181 1183
1182 /* TX Timeout */ 1184 /* TX Timeout */
1183 if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) { 1185 if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
@@ -1200,9 +1202,9 @@ static void dmfe_timer(unsigned long data)
1200 1202
1201 /* Link status check, Dynamic media type change */ 1203 /* Link status check, Dynamic media type change */
1202 if (db->chip_id == PCI_DM9132_ID) 1204 if (db->chip_id == PCI_DM9132_ID)
1203 tmp_cr12 = inb(db->ioaddr + DCR9 + 3); /* DM9132 */ 1205 tmp_cr12 = dr8(DCR9 + 3); /* DM9132 */
1204 else 1206 else
1205 tmp_cr12 = inb(db->ioaddr + DCR12); /* DM9102/DM9102A */ 1207 tmp_cr12 = dr8(DCR12); /* DM9102/DM9102A */
1206 1208
1207 if ( ((db->chip_id == PCI_DM9102_ID) && 1209 if ( ((db->chip_id == PCI_DM9102_ID) &&
1208 (db->chip_revision == 0x30)) || 1210 (db->chip_revision == 0x30)) ||
@@ -1251,7 +1253,7 @@ static void dmfe_timer(unsigned long data)
1251 /* 10/100M link failed, used 1M Home-Net */ 1253 /* 10/100M link failed, used 1M Home-Net */
1252 db->cr6_data|=0x00040000; /* bit18=1, MII */ 1254 db->cr6_data|=0x00040000; /* bit18=1, MII */
1253 db->cr6_data&=~0x00000200; /* bit9=0, HD mode */ 1255 db->cr6_data&=~0x00000200; /* bit9=0, HD mode */
1254 update_cr6(db->cr6_data, db->ioaddr); 1256 update_cr6(db->cr6_data, ioaddr);
1255 } 1257 }
1256 } else if (!netif_carrier_ok(dev)) { 1258 } else if (!netif_carrier_ok(dev)) {
1257 1259
@@ -1288,17 +1290,18 @@ static void dmfe_timer(unsigned long data)
1288 * Re-initialize DM910X board 1290 * Re-initialize DM910X board
1289 */ 1291 */
1290 1292
1291static void dmfe_dynamic_reset(struct DEVICE *dev) 1293static void dmfe_dynamic_reset(struct net_device *dev)
1292{ 1294{
1293 struct dmfe_board_info *db = netdev_priv(dev); 1295 struct dmfe_board_info *db = netdev_priv(dev);
1296 void __iomem *ioaddr = db->ioaddr;
1294 1297
1295 DMFE_DBUG(0, "dmfe_dynamic_reset()", 0); 1298 DMFE_DBUG(0, "dmfe_dynamic_reset()", 0);
1296 1299
1297 /* Sopt MAC controller */ 1300 /* Sopt MAC controller */
1298 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */ 1301 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1299 update_cr6(db->cr6_data, dev->base_addr); 1302 update_cr6(db->cr6_data, ioaddr);
1300 outl(0, dev->base_addr + DCR7); /* Disable Interrupt */ 1303 dw32(DCR7, 0); /* Disable Interrupt */
1301 outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5); 1304 dw32(DCR5, dr32(DCR5));
1302 1305
1303 /* Disable upper layer interface */ 1306 /* Disable upper layer interface */
1304 netif_stop_queue(dev); 1307 netif_stop_queue(dev);
@@ -1364,9 +1367,10 @@ static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1364 * Using Chain structure, and allocate Tx/Rx buffer 1367 * Using Chain structure, and allocate Tx/Rx buffer
1365 */ 1368 */
1366 1369
1367static void dmfe_descriptor_init(struct net_device *dev, unsigned long ioaddr) 1370static void dmfe_descriptor_init(struct net_device *dev)
1368{ 1371{
1369 struct dmfe_board_info *db = netdev_priv(dev); 1372 struct dmfe_board_info *db = netdev_priv(dev);
1373 void __iomem *ioaddr = db->ioaddr;
1370 struct tx_desc *tmp_tx; 1374 struct tx_desc *tmp_tx;
1371 struct rx_desc *tmp_rx; 1375 struct rx_desc *tmp_rx;
1372 unsigned char *tmp_buf; 1376 unsigned char *tmp_buf;
@@ -1379,7 +1383,7 @@ static void dmfe_descriptor_init(struct net_device *dev, unsigned long ioaddr)
1379 /* tx descriptor start pointer */ 1383 /* tx descriptor start pointer */
1380 db->tx_insert_ptr = db->first_tx_desc; 1384 db->tx_insert_ptr = db->first_tx_desc;
1381 db->tx_remove_ptr = db->first_tx_desc; 1385 db->tx_remove_ptr = db->first_tx_desc;
1382 outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */ 1386 dw32(DCR4, db->first_tx_desc_dma); /* TX DESC address */
1383 1387
1384 /* rx descriptor start pointer */ 1388 /* rx descriptor start pointer */
1385 db->first_rx_desc = (void *)db->first_tx_desc + 1389 db->first_rx_desc = (void *)db->first_tx_desc +
@@ -1389,7 +1393,7 @@ static void dmfe_descriptor_init(struct net_device *dev, unsigned long ioaddr)
1389 sizeof(struct tx_desc) * TX_DESC_CNT; 1393 sizeof(struct tx_desc) * TX_DESC_CNT;
1390 db->rx_insert_ptr = db->first_rx_desc; 1394 db->rx_insert_ptr = db->first_rx_desc;
1391 db->rx_ready_ptr = db->first_rx_desc; 1395 db->rx_ready_ptr = db->first_rx_desc;
1392 outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */ 1396 dw32(DCR3, db->first_rx_desc_dma); /* RX DESC address */
1393 1397
1394 /* Init Transmit chain */ 1398 /* Init Transmit chain */
1395 tmp_buf = db->buf_pool_start; 1399 tmp_buf = db->buf_pool_start;
@@ -1431,14 +1435,14 @@ static void dmfe_descriptor_init(struct net_device *dev, unsigned long ioaddr)
1431 * Firstly stop DM910X , then written value and start 1435 * Firstly stop DM910X , then written value and start
1432 */ 1436 */
1433 1437
1434static void update_cr6(u32 cr6_data, unsigned long ioaddr) 1438static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
1435{ 1439{
1436 u32 cr6_tmp; 1440 u32 cr6_tmp;
1437 1441
1438 cr6_tmp = cr6_data & ~0x2002; /* stop Tx/Rx */ 1442 cr6_tmp = cr6_data & ~0x2002; /* stop Tx/Rx */
1439 outl(cr6_tmp, ioaddr + DCR6); 1443 dw32(DCR6, cr6_tmp);
1440 udelay(5); 1444 udelay(5);
1441 outl(cr6_data, ioaddr + DCR6); 1445 dw32(DCR6, cr6_data);
1442 udelay(5); 1446 udelay(5);
1443} 1447}
1444 1448
@@ -1448,24 +1452,19 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1448 * This setup frame initialize DM910X address filter mode 1452 * This setup frame initialize DM910X address filter mode
1449*/ 1453*/
1450 1454
1451static void dm9132_id_table(struct DEVICE *dev) 1455static void dm9132_id_table(struct net_device *dev)
1452{ 1456{
1457 struct dmfe_board_info *db = netdev_priv(dev);
1458 void __iomem *ioaddr = db->ioaddr + 0xc0;
1459 u16 *addrptr = (u16 *)dev->dev_addr;
1453 struct netdev_hw_addr *ha; 1460 struct netdev_hw_addr *ha;
1454 u16 * addrptr;
1455 unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */
1456 u32 hash_val;
1457 u16 i, hash_table[4]; 1461 u16 i, hash_table[4];
1458 1462
1459 DMFE_DBUG(0, "dm9132_id_table()", 0);
1460
1461 /* Node address */ 1463 /* Node address */
1462 addrptr = (u16 *) dev->dev_addr; 1464 for (i = 0; i < 3; i++) {
1463 outw(addrptr[0], ioaddr); 1465 dw16(0, addrptr[i]);
1464 ioaddr += 4; 1466 ioaddr += 4;
1465 outw(addrptr[1], ioaddr); 1467 }
1466 ioaddr += 4;
1467 outw(addrptr[2], ioaddr);
1468 ioaddr += 4;
1469 1468
1470 /* Clear Hash Table */ 1469 /* Clear Hash Table */
1471 memset(hash_table, 0, sizeof(hash_table)); 1470 memset(hash_table, 0, sizeof(hash_table));
@@ -1475,13 +1474,14 @@ static void dm9132_id_table(struct DEVICE *dev)
1475 1474
1476 /* the multicast address in Hash Table : 64 bits */ 1475 /* the multicast address in Hash Table : 64 bits */
1477 netdev_for_each_mc_addr(ha, dev) { 1476 netdev_for_each_mc_addr(ha, dev) {
1478 hash_val = cal_CRC((char *) ha->addr, 6, 0) & 0x3f; 1477 u32 hash_val = cal_CRC((char *)ha->addr, 6, 0) & 0x3f;
1478
1479 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); 1479 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1480 } 1480 }
1481 1481
1482 /* Write the hash table to MAC MD table */ 1482 /* Write the hash table to MAC MD table */
1483 for (i = 0; i < 4; i++, ioaddr += 4) 1483 for (i = 0; i < 4; i++, ioaddr += 4)
1484 outw(hash_table[i], ioaddr); 1484 dw16(0, hash_table[i]);
1485} 1485}
1486 1486
1487 1487
@@ -1490,7 +1490,7 @@ static void dm9132_id_table(struct DEVICE *dev)
1490 * This setup frame initialize DM910X address filter mode 1490 * This setup frame initialize DM910X address filter mode
1491 */ 1491 */
1492 1492
1493static void send_filter_frame(struct DEVICE *dev) 1493static void send_filter_frame(struct net_device *dev)
1494{ 1494{
1495 struct dmfe_board_info *db = netdev_priv(dev); 1495 struct dmfe_board_info *db = netdev_priv(dev);
1496 struct netdev_hw_addr *ha; 1496 struct netdev_hw_addr *ha;
@@ -1535,12 +1535,14 @@ static void send_filter_frame(struct DEVICE *dev)
1535 1535
1536 /* Resource Check and Send the setup packet */ 1536 /* Resource Check and Send the setup packet */
1537 if (!db->tx_packet_cnt) { 1537 if (!db->tx_packet_cnt) {
1538 void __iomem *ioaddr = db->ioaddr;
1539
1538 /* Resource Empty */ 1540 /* Resource Empty */
1539 db->tx_packet_cnt++; 1541 db->tx_packet_cnt++;
1540 txptr->tdes0 = cpu_to_le32(0x80000000); 1542 txptr->tdes0 = cpu_to_le32(0x80000000);
1541 update_cr6(db->cr6_data | 0x2000, dev->base_addr); 1543 update_cr6(db->cr6_data | 0x2000, ioaddr);
1542 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ 1544 dw32(DCR1, 0x1); /* Issue Tx polling */
1543 update_cr6(db->cr6_data, dev->base_addr); 1545 update_cr6(db->cr6_data, ioaddr);
1544 dev->trans_start = jiffies; 1546 dev->trans_start = jiffies;
1545 } else 1547 } else
1546 db->tx_queue_cnt++; /* Put in TX queue */ 1548 db->tx_queue_cnt++; /* Put in TX queue */
@@ -1575,43 +1577,59 @@ static void allocate_rx_buffer(struct net_device *dev)
1575 db->rx_insert_ptr = rxptr; 1577 db->rx_insert_ptr = rxptr;
1576} 1578}
1577 1579
1580static void srom_clk_write(void __iomem *ioaddr, u32 data)
1581{
1582 static const u32 cmd[] = {
1583 CR9_SROM_READ | CR9_SRCS,
1584 CR9_SROM_READ | CR9_SRCS | CR9_SRCLK,
1585 CR9_SROM_READ | CR9_SRCS
1586 };
1587 int i;
1588
1589 for (i = 0; i < ARRAY_SIZE(cmd); i++) {
1590 dw32(DCR9, data | cmd[i]);
1591 udelay(5);
1592 }
1593}
1578 1594
1579/* 1595/*
1580 * Read one word data from the serial ROM 1596 * Read one word data from the serial ROM
1581 */ 1597 */
1582 1598static u16 read_srom_word(void __iomem *ioaddr, int offset)
1583static u16 read_srom_word(long ioaddr, int offset)
1584{ 1599{
1600 u16 srom_data;
1585 int i; 1601 int i;
1586 u16 srom_data = 0;
1587 long cr9_ioaddr = ioaddr + DCR9;
1588 1602
1589 outl(CR9_SROM_READ, cr9_ioaddr); 1603 dw32(DCR9, CR9_SROM_READ);
1590 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); 1604 udelay(5);
1605 dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1606 udelay(5);
1591 1607
1592 /* Send the Read Command 110b */ 1608 /* Send the Read Command 110b */
1593 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr); 1609 srom_clk_write(ioaddr, SROM_DATA_1);
1594 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr); 1610 srom_clk_write(ioaddr, SROM_DATA_1);
1595 SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr); 1611 srom_clk_write(ioaddr, SROM_DATA_0);
1596 1612
1597 /* Send the offset */ 1613 /* Send the offset */
1598 for (i = 5; i >= 0; i--) { 1614 for (i = 5; i >= 0; i--) {
1599 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0; 1615 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1600 SROM_CLK_WRITE(srom_data, cr9_ioaddr); 1616 srom_clk_write(ioaddr, srom_data);
1601 } 1617 }
1602 1618
1603 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); 1619 dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1620 udelay(5);
1604 1621
1605 for (i = 16; i > 0; i--) { 1622 for (i = 16; i > 0; i--) {
1606 outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr); 1623 dw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
1607 udelay(5); 1624 udelay(5);
1608 srom_data = (srom_data << 1) | 1625 srom_data = (srom_data << 1) |
1609 ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0); 1626 ((dr32(DCR9) & CR9_CRDOUT) ? 1 : 0);
1610 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); 1627 dw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1611 udelay(5); 1628 udelay(5);
1612 } 1629 }
1613 1630
1614 outl(CR9_SROM_READ, cr9_ioaddr); 1631 dw32(DCR9, CR9_SROM_READ);
1632 udelay(5);
1615 return srom_data; 1633 return srom_data;
1616} 1634}
1617 1635
@@ -1620,13 +1638,14 @@ static u16 read_srom_word(long ioaddr, int offset)
1620 * Auto sense the media mode 1638 * Auto sense the media mode
1621 */ 1639 */
1622 1640
1623static u8 dmfe_sense_speed(struct dmfe_board_info * db) 1641static u8 dmfe_sense_speed(struct dmfe_board_info *db)
1624{ 1642{
1643 void __iomem *ioaddr = db->ioaddr;
1625 u8 ErrFlag = 0; 1644 u8 ErrFlag = 0;
1626 u16 phy_mode; 1645 u16 phy_mode;
1627 1646
1628 /* CR6 bit18=0, select 10/100M */ 1647 /* CR6 bit18=0, select 10/100M */
1629 update_cr6( (db->cr6_data & ~0x40000), db->ioaddr); 1648 update_cr6(db->cr6_data & ~0x40000, ioaddr);
1630 1649
1631 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); 1650 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
1632 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); 1651 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
@@ -1665,11 +1684,12 @@ static u8 dmfe_sense_speed(struct dmfe_board_info * db)
1665 1684
1666static void dmfe_set_phyxcer(struct dmfe_board_info *db) 1685static void dmfe_set_phyxcer(struct dmfe_board_info *db)
1667{ 1686{
1687 void __iomem *ioaddr = db->ioaddr;
1668 u16 phy_reg; 1688 u16 phy_reg;
1669 1689
1670 /* Select 10/100M phyxcer */ 1690 /* Select 10/100M phyxcer */
1671 db->cr6_data &= ~0x40000; 1691 db->cr6_data &= ~0x40000;
1672 update_cr6(db->cr6_data, db->ioaddr); 1692 update_cr6(db->cr6_data, ioaddr);
1673 1693
1674 /* DM9009 Chip: Phyxcer reg18 bit12=0 */ 1694 /* DM9009 Chip: Phyxcer reg18 bit12=0 */
1675 if (db->chip_id == PCI_DM9009_ID) { 1695 if (db->chip_id == PCI_DM9009_ID) {
@@ -1765,18 +1785,15 @@ static void dmfe_process_mode(struct dmfe_board_info *db)
1765 * Write a word to Phy register 1785 * Write a word to Phy register
1766 */ 1786 */
1767 1787
1768static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, 1788static void phy_write(void __iomem *ioaddr, u8 phy_addr, u8 offset,
1769 u16 phy_data, u32 chip_id) 1789 u16 phy_data, u32 chip_id)
1770{ 1790{
1771 u16 i; 1791 u16 i;
1772 unsigned long ioaddr;
1773 1792
1774 if (chip_id == PCI_DM9132_ID) { 1793 if (chip_id == PCI_DM9132_ID) {
1775 ioaddr = iobase + 0x80 + offset * 4; 1794 dw16(0x80 + offset * 4, phy_data);
1776 outw(phy_data, ioaddr);
1777 } else { 1795 } else {
1778 /* DM9102/DM9102A Chip */ 1796 /* DM9102/DM9102A Chip */
1779 ioaddr = iobase + DCR9;
1780 1797
1781 /* Send 33 synchronization clock to Phy controller */ 1798 /* Send 33 synchronization clock to Phy controller */
1782 for (i = 0; i < 35; i++) 1799 for (i = 0; i < 35; i++)
@@ -1816,19 +1833,16 @@ static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset,
1816 * Read a word data from phy register 1833 * Read a word data from phy register
1817 */ 1834 */
1818 1835
1819static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id) 1836static u16 phy_read(void __iomem *ioaddr, u8 phy_addr, u8 offset, u32 chip_id)
1820{ 1837{
1821 int i; 1838 int i;
1822 u16 phy_data; 1839 u16 phy_data;
1823 unsigned long ioaddr;
1824 1840
1825 if (chip_id == PCI_DM9132_ID) { 1841 if (chip_id == PCI_DM9132_ID) {
1826 /* DM9132 Chip */ 1842 /* DM9132 Chip */
1827 ioaddr = iobase + 0x80 + offset * 4; 1843 phy_data = dr16(0x80 + offset * 4);
1828 phy_data = inw(ioaddr);
1829 } else { 1844 } else {
1830 /* DM9102/DM9102A Chip */ 1845 /* DM9102/DM9102A Chip */
1831 ioaddr = iobase + DCR9;
1832 1846
1833 /* Send 33 synchronization clock to Phy controller */ 1847 /* Send 33 synchronization clock to Phy controller */
1834 for (i = 0; i < 35; i++) 1848 for (i = 0; i < 35; i++)
@@ -1870,13 +1884,13 @@ static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1870 * Write one bit data to Phy Controller 1884 * Write one bit data to Phy Controller
1871 */ 1885 */
1872 1886
1873static void phy_write_1bit(unsigned long ioaddr, u32 phy_data) 1887static void phy_write_1bit(void __iomem *ioaddr, u32 phy_data)
1874{ 1888{
1875 outl(phy_data, ioaddr); /* MII Clock Low */ 1889 dw32(DCR9, phy_data); /* MII Clock Low */
1876 udelay(1); 1890 udelay(1);
1877 outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */ 1891 dw32(DCR9, phy_data | MDCLKH); /* MII Clock High */
1878 udelay(1); 1892 udelay(1);
1879 outl(phy_data, ioaddr); /* MII Clock Low */ 1893 dw32(DCR9, phy_data); /* MII Clock Low */
1880 udelay(1); 1894 udelay(1);
1881} 1895}
1882 1896
@@ -1885,14 +1899,14 @@ static void phy_write_1bit(unsigned long ioaddr, u32 phy_data)
1885 * Read one bit phy data from PHY controller 1899 * Read one bit phy data from PHY controller
1886 */ 1900 */
1887 1901
1888static u16 phy_read_1bit(unsigned long ioaddr) 1902static u16 phy_read_1bit(void __iomem *ioaddr)
1889{ 1903{
1890 u16 phy_data; 1904 u16 phy_data;
1891 1905
1892 outl(0x50000, ioaddr); 1906 dw32(DCR9, 0x50000);
1893 udelay(1); 1907 udelay(1);
1894 phy_data = ( inl(ioaddr) >> 19 ) & 0x1; 1908 phy_data = (dr32(DCR9) >> 19) & 0x1;
1895 outl(0x40000, ioaddr); 1909 dw32(DCR9, 0x40000);
1896 udelay(1); 1910 udelay(1);
1897 1911
1898 return phy_data; 1912 return phy_data;
@@ -1978,7 +1992,7 @@ static void dmfe_parse_srom(struct dmfe_board_info * db)
1978 1992
1979 /* Check DM9801 or DM9802 present or not */ 1993 /* Check DM9801 or DM9802 present or not */
1980 db->HPNA_present = 0; 1994 db->HPNA_present = 0;
1981 update_cr6(db->cr6_data|0x40000, db->ioaddr); 1995 update_cr6(db->cr6_data | 0x40000, db->ioaddr);
1982 tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id); 1996 tmp_reg = phy_read(db->ioaddr, db->phy_addr, 3, db->chip_id);
1983 if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) { 1997 if ( ( tmp_reg & 0xfff0 ) == 0xb900 ) {
1984 /* DM9801 or DM9802 present */ 1998 /* DM9801 or DM9802 present */
@@ -2095,6 +2109,7 @@ static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
2095{ 2109{
2096 struct net_device *dev = pci_get_drvdata(pci_dev); 2110 struct net_device *dev = pci_get_drvdata(pci_dev);
2097 struct dmfe_board_info *db = netdev_priv(dev); 2111 struct dmfe_board_info *db = netdev_priv(dev);
2112 void __iomem *ioaddr = db->ioaddr;
2098 u32 tmp; 2113 u32 tmp;
2099 2114
2100 /* Disable upper layer interface */ 2115 /* Disable upper layer interface */
@@ -2102,11 +2117,11 @@ static int dmfe_suspend(struct pci_dev *pci_dev, pm_message_t state)
2102 2117
2103 /* Disable Tx/Rx */ 2118 /* Disable Tx/Rx */
2104 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); 2119 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);
2105 update_cr6(db->cr6_data, dev->base_addr); 2120 update_cr6(db->cr6_data, ioaddr);
2106 2121
2107 /* Disable Interrupt */ 2122 /* Disable Interrupt */
2108 outl(0, dev->base_addr + DCR7); 2123 dw32(DCR7, 0);
2109 outl(inl (dev->base_addr + DCR5), dev->base_addr + DCR5); 2124 dw32(DCR5, dr32(DCR5));
2110 2125
2111 /* Fre RX buffers */ 2126 /* Fre RX buffers */
2112 dmfe_free_rxbuffer(db); 2127 dmfe_free_rxbuffer(db);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index fea3641d9398..c4f37aca2269 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -328,7 +328,7 @@ static void tulip_up(struct net_device *dev)
328 udelay(100); 328 udelay(100);
329 329
330 if (tulip_debug > 1) 330 if (tulip_debug > 1)
331 netdev_dbg(dev, "tulip_up(), irq==%d\n", dev->irq); 331 netdev_dbg(dev, "tulip_up(), irq==%d\n", tp->pdev->irq);
332 332
333 iowrite32(tp->rx_ring_dma, ioaddr + CSR3); 333 iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
334 iowrite32(tp->tx_ring_dma, ioaddr + CSR4); 334 iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
@@ -515,11 +515,13 @@ media_picked:
515static int 515static int
516tulip_open(struct net_device *dev) 516tulip_open(struct net_device *dev)
517{ 517{
518 struct tulip_private *tp = netdev_priv(dev);
518 int retval; 519 int retval;
519 520
520 tulip_init_ring (dev); 521 tulip_init_ring (dev);
521 522
522 retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev); 523 retval = request_irq(tp->pdev->irq, tulip_interrupt, IRQF_SHARED,
524 dev->name, dev);
523 if (retval) 525 if (retval)
524 goto free_ring; 526 goto free_ring;
525 527
@@ -841,7 +843,7 @@ static int tulip_close (struct net_device *dev)
841 netdev_dbg(dev, "Shutting down ethercard, status was %02x\n", 843 netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
842 ioread32 (ioaddr + CSR5)); 844 ioread32 (ioaddr + CSR5));
843 845
844 free_irq (dev->irq, dev); 846 free_irq (tp->pdev->irq, dev);
845 847
846 tulip_free_ring (dev); 848 tulip_free_ring (dev);
847 849
@@ -1489,8 +1491,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1489 1491
1490 INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task); 1492 INIT_WORK(&tp->media_work, tulip_tbl[tp->chip_id].media_task);
1491 1493
1492 dev->base_addr = (unsigned long)ioaddr;
1493
1494#ifdef CONFIG_TULIP_MWI 1494#ifdef CONFIG_TULIP_MWI
1495 if (!force_csr0 && (tp->flags & HAS_PCI_MWI)) 1495 if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1496 tulip_mwi_config (pdev, dev); 1496 tulip_mwi_config (pdev, dev);
@@ -1650,7 +1650,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1650 for (i = 0; i < 6; i++) 1650 for (i = 0; i < 6; i++)
1651 last_phys_addr[i] = dev->dev_addr[i]; 1651 last_phys_addr[i] = dev->dev_addr[i];
1652 last_irq = irq; 1652 last_irq = irq;
1653 dev->irq = irq;
1654 1653
1655 /* The lower four bits are the media type. */ 1654 /* The lower four bits are the media type. */
1656 if (board_idx >= 0 && board_idx < MAX_UNITS) { 1655 if (board_idx >= 0 && board_idx < MAX_UNITS) {
@@ -1858,7 +1857,8 @@ static int tulip_suspend (struct pci_dev *pdev, pm_message_t state)
1858 tulip_down(dev); 1857 tulip_down(dev);
1859 1858
1860 netif_device_detach(dev); 1859 netif_device_detach(dev);
1861 free_irq(dev->irq, dev); 1860 /* FIXME: it needlessly adds an error path. */
1861 free_irq(tp->pdev->irq, dev);
1862 1862
1863save_state: 1863save_state:
1864 pci_save_state(pdev); 1864 pci_save_state(pdev);
@@ -1900,7 +1900,9 @@ static int tulip_resume(struct pci_dev *pdev)
1900 return retval; 1900 return retval;
1901 } 1901 }
1902 1902
1903 if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) { 1903 retval = request_irq(pdev->irq, tulip_interrupt, IRQF_SHARED,
1904 dev->name, dev);
1905 if (retval) {
1904 pr_err("request_irq failed in resume\n"); 1906 pr_err("request_irq failed in resume\n");
1905 return retval; 1907 return retval;
1906 } 1908 }
@@ -1960,11 +1962,14 @@ static void __devexit tulip_remove_one (struct pci_dev *pdev)
1960 1962
1961static void poll_tulip (struct net_device *dev) 1963static void poll_tulip (struct net_device *dev)
1962{ 1964{
1965 struct tulip_private *tp = netdev_priv(dev);
1966 const int irq = tp->pdev->irq;
1967
1963 /* disable_irq here is not very nice, but with the lockless 1968 /* disable_irq here is not very nice, but with the lockless
1964 interrupt handler we have no other choice. */ 1969 interrupt handler we have no other choice. */
1965 disable_irq(dev->irq); 1970 disable_irq(irq);
1966 tulip_interrupt (dev->irq, dev); 1971 tulip_interrupt (irq, dev);
1967 enable_irq(dev->irq); 1972 enable_irq(irq);
1968} 1973}
1969#endif 1974#endif
1970 1975
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index fc4001f6a5e4..75d45f8a37dc 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -42,6 +42,8 @@
42#include <asm/dma.h> 42#include <asm/dma.h>
43#include <asm/uaccess.h> 43#include <asm/uaccess.h>
44 44
45#define uw32(reg, val) iowrite32(val, ioaddr + (reg))
46#define ur32(reg) ioread32(ioaddr + (reg))
45 47
46/* Board/System/Debug information/definition ---------------- */ 48/* Board/System/Debug information/definition ---------------- */
47#define PCI_ULI5261_ID 0x526110B9 /* ULi M5261 ID*/ 49#define PCI_ULI5261_ID 0x526110B9 /* ULi M5261 ID*/
@@ -110,14 +112,6 @@ do { \
110 112
111#define SROM_V41_CODE 0x14 113#define SROM_V41_CODE 0x14
112 114
113#define SROM_CLK_WRITE(data, ioaddr) \
114 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
115 udelay(5); \
116 outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr); \
117 udelay(5); \
118 outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr); \
119 udelay(5);
120
121/* Structure/enum declaration ------------------------------- */ 115/* Structure/enum declaration ------------------------------- */
122struct tx_desc { 116struct tx_desc {
123 __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */ 117 __le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
@@ -132,12 +126,15 @@ struct rx_desc {
132} __attribute__(( aligned(32) )); 126} __attribute__(( aligned(32) ));
133 127
134struct uli526x_board_info { 128struct uli526x_board_info {
135 u32 chip_id; /* Chip vendor/Device ID */ 129 struct uli_phy_ops {
130 void (*write)(struct uli526x_board_info *, u8, u8, u16);
131 u16 (*read)(struct uli526x_board_info *, u8, u8);
132 } phy;
136 struct net_device *next_dev; /* next device */ 133 struct net_device *next_dev; /* next device */
137 struct pci_dev *pdev; /* PCI device */ 134 struct pci_dev *pdev; /* PCI device */
138 spinlock_t lock; 135 spinlock_t lock;
139 136
140 long ioaddr; /* I/O base address */ 137 void __iomem *ioaddr; /* I/O base address */
141 u32 cr0_data; 138 u32 cr0_data;
142 u32 cr5_data; 139 u32 cr5_data;
143 u32 cr6_data; 140 u32 cr6_data;
@@ -227,21 +224,21 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *,
227static int uli526x_stop(struct net_device *); 224static int uli526x_stop(struct net_device *);
228static void uli526x_set_filter_mode(struct net_device *); 225static void uli526x_set_filter_mode(struct net_device *);
229static const struct ethtool_ops netdev_ethtool_ops; 226static const struct ethtool_ops netdev_ethtool_ops;
230static u16 read_srom_word(long, int); 227static u16 read_srom_word(struct uli526x_board_info *, int);
231static irqreturn_t uli526x_interrupt(int, void *); 228static irqreturn_t uli526x_interrupt(int, void *);
232#ifdef CONFIG_NET_POLL_CONTROLLER 229#ifdef CONFIG_NET_POLL_CONTROLLER
233static void uli526x_poll(struct net_device *dev); 230static void uli526x_poll(struct net_device *dev);
234#endif 231#endif
235static void uli526x_descriptor_init(struct net_device *, unsigned long); 232static void uli526x_descriptor_init(struct net_device *, void __iomem *);
236static void allocate_rx_buffer(struct net_device *); 233static void allocate_rx_buffer(struct net_device *);
237static void update_cr6(u32, unsigned long); 234static void update_cr6(u32, void __iomem *);
238static void send_filter_frame(struct net_device *, int); 235static void send_filter_frame(struct net_device *, int);
239static u16 phy_read(unsigned long, u8, u8, u32); 236static u16 phy_readby_cr9(struct uli526x_board_info *, u8, u8);
240static u16 phy_readby_cr10(unsigned long, u8, u8); 237static u16 phy_readby_cr10(struct uli526x_board_info *, u8, u8);
241static void phy_write(unsigned long, u8, u8, u16, u32); 238static void phy_writeby_cr9(struct uli526x_board_info *, u8, u8, u16);
242static void phy_writeby_cr10(unsigned long, u8, u8, u16); 239static void phy_writeby_cr10(struct uli526x_board_info *, u8, u8, u16);
243static void phy_write_1bit(unsigned long, u32, u32); 240static void phy_write_1bit(struct uli526x_board_info *db, u32);
244static u16 phy_read_1bit(unsigned long, u32); 241static u16 phy_read_1bit(struct uli526x_board_info *db);
245static u8 uli526x_sense_speed(struct uli526x_board_info *); 242static u8 uli526x_sense_speed(struct uli526x_board_info *);
246static void uli526x_process_mode(struct uli526x_board_info *); 243static void uli526x_process_mode(struct uli526x_board_info *);
247static void uli526x_timer(unsigned long); 244static void uli526x_timer(unsigned long);
@@ -253,6 +250,18 @@ static void uli526x_free_rxbuffer(struct uli526x_board_info *);
253static void uli526x_init(struct net_device *); 250static void uli526x_init(struct net_device *);
254static void uli526x_set_phyxcer(struct uli526x_board_info *); 251static void uli526x_set_phyxcer(struct uli526x_board_info *);
255 252
253static void srom_clk_write(struct uli526x_board_info *db, u32 data)
254{
255 void __iomem *ioaddr = db->ioaddr;
256
257 uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS);
258 udelay(5);
259 uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
260 udelay(5);
261 uw32(DCR9, data | CR9_SROM_READ | CR9_SRCS);
262 udelay(5);
263}
264
256/* ULI526X network board routine ---------------------------- */ 265/* ULI526X network board routine ---------------------------- */
257 266
258static const struct net_device_ops netdev_ops = { 267static const struct net_device_ops netdev_ops = {
@@ -277,6 +286,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
277{ 286{
278 struct uli526x_board_info *db; /* board information structure */ 287 struct uli526x_board_info *db; /* board information structure */
279 struct net_device *dev; 288 struct net_device *dev;
289 void __iomem *ioaddr;
280 int i, err; 290 int i, err;
281 291
282 ULI526X_DBUG(0, "uli526x_init_one()", 0); 292 ULI526X_DBUG(0, "uli526x_init_one()", 0);
@@ -313,9 +323,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
313 goto err_out_disable; 323 goto err_out_disable;
314 } 324 }
315 325
316 if (pci_request_regions(pdev, DRV_NAME)) { 326 err = pci_request_regions(pdev, DRV_NAME);
327 if (err < 0) {
317 pr_err("Failed to request PCI regions\n"); 328 pr_err("Failed to request PCI regions\n");
318 err = -ENODEV;
319 goto err_out_disable; 329 goto err_out_disable;
320 } 330 }
321 331
@@ -323,32 +333,41 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
323 db = netdev_priv(dev); 333 db = netdev_priv(dev);
324 334
325 /* Allocate Tx/Rx descriptor memory */ 335 /* Allocate Tx/Rx descriptor memory */
336 err = -ENOMEM;
337
326 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr); 338 db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
327 if(db->desc_pool_ptr == NULL) 339 if (!db->desc_pool_ptr)
328 { 340 goto err_out_release;
329 err = -ENOMEM; 341
330 goto err_out_nomem;
331 }
332 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr); 342 db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
333 if(db->buf_pool_ptr == NULL) 343 if (!db->buf_pool_ptr)
334 { 344 goto err_out_free_tx_desc;
335 err = -ENOMEM;
336 goto err_out_nomem;
337 }
338 345
339 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr; 346 db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
340 db->first_tx_desc_dma = db->desc_pool_dma_ptr; 347 db->first_tx_desc_dma = db->desc_pool_dma_ptr;
341 db->buf_pool_start = db->buf_pool_ptr; 348 db->buf_pool_start = db->buf_pool_ptr;
342 db->buf_pool_dma_start = db->buf_pool_dma_ptr; 349 db->buf_pool_dma_start = db->buf_pool_dma_ptr;
343 350
344 db->chip_id = ent->driver_data; 351 switch (ent->driver_data) {
345 db->ioaddr = pci_resource_start(pdev, 0); 352 case PCI_ULI5263_ID:
353 db->phy.write = phy_writeby_cr10;
354 db->phy.read = phy_readby_cr10;
355 break;
356 default:
357 db->phy.write = phy_writeby_cr9;
358 db->phy.read = phy_readby_cr9;
359 break;
360 }
361
362 /* IO region. */
363 ioaddr = pci_iomap(pdev, 0, 0);
364 if (!ioaddr)
365 goto err_out_free_tx_buf;
346 366
367 db->ioaddr = ioaddr;
347 db->pdev = pdev; 368 db->pdev = pdev;
348 db->init = 1; 369 db->init = 1;
349 370
350 dev->base_addr = db->ioaddr;
351 dev->irq = pdev->irq;
352 pci_set_drvdata(pdev, dev); 371 pci_set_drvdata(pdev, dev);
353 372
354 /* Register some necessary functions */ 373 /* Register some necessary functions */
@@ -360,24 +379,24 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
360 379
361 /* read 64 word srom data */ 380 /* read 64 word srom data */
362 for (i = 0; i < 64; i++) 381 for (i = 0; i < 64; i++)
363 ((__le16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i)); 382 ((__le16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db, i));
364 383
365 /* Set Node address */ 384 /* Set Node address */
366 if(((u16 *) db->srom)[0] == 0xffff || ((u16 *) db->srom)[0] == 0) /* SROM absent, so read MAC address from ID Table */ 385 if(((u16 *) db->srom)[0] == 0xffff || ((u16 *) db->srom)[0] == 0) /* SROM absent, so read MAC address from ID Table */
367 { 386 {
368 outl(0x10000, db->ioaddr + DCR0); //Diagnosis mode 387 uw32(DCR0, 0x10000); //Diagnosis mode
369 outl(0x1c0, db->ioaddr + DCR13); //Reset dianostic pointer port 388 uw32(DCR13, 0x1c0); //Reset dianostic pointer port
370 outl(0, db->ioaddr + DCR14); //Clear reset port 389 uw32(DCR14, 0); //Clear reset port
371 outl(0x10, db->ioaddr + DCR14); //Reset ID Table pointer 390 uw32(DCR14, 0x10); //Reset ID Table pointer
372 outl(0, db->ioaddr + DCR14); //Clear reset port 391 uw32(DCR14, 0); //Clear reset port
373 outl(0, db->ioaddr + DCR13); //Clear CR13 392 uw32(DCR13, 0); //Clear CR13
374 outl(0x1b0, db->ioaddr + DCR13); //Select ID Table access port 393 uw32(DCR13, 0x1b0); //Select ID Table access port
375 //Read MAC address from CR14 394 //Read MAC address from CR14
376 for (i = 0; i < 6; i++) 395 for (i = 0; i < 6; i++)
377 dev->dev_addr[i] = inl(db->ioaddr + DCR14); 396 dev->dev_addr[i] = ur32(DCR14);
378 //Read end 397 //Read end
379 outl(0, db->ioaddr + DCR13); //Clear CR13 398 uw32(DCR13, 0); //Clear CR13
380 outl(0, db->ioaddr + DCR0); //Clear CR0 399 uw32(DCR0, 0); //Clear CR0
381 udelay(10); 400 udelay(10);
382 } 401 }
383 else /*Exist SROM*/ 402 else /*Exist SROM*/
@@ -387,26 +406,26 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
387 } 406 }
388 err = register_netdev (dev); 407 err = register_netdev (dev);
389 if (err) 408 if (err)
390 goto err_out_res; 409 goto err_out_unmap;
391 410
392 netdev_info(dev, "ULi M%04lx at pci%s, %pM, irq %d\n", 411 netdev_info(dev, "ULi M%04lx at pci%s, %pM, irq %d\n",
393 ent->driver_data >> 16, pci_name(pdev), 412 ent->driver_data >> 16, pci_name(pdev),
394 dev->dev_addr, dev->irq); 413 dev->dev_addr, pdev->irq);
395 414
396 pci_set_master(pdev); 415 pci_set_master(pdev);
397 416
398 return 0; 417 return 0;
399 418
400err_out_res: 419err_out_unmap:
420 pci_iounmap(pdev, db->ioaddr);
421err_out_free_tx_buf:
422 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
423 db->buf_pool_ptr, db->buf_pool_dma_ptr);
424err_out_free_tx_desc:
425 pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
426 db->desc_pool_ptr, db->desc_pool_dma_ptr);
427err_out_release:
401 pci_release_regions(pdev); 428 pci_release_regions(pdev);
402err_out_nomem:
403 if(db->desc_pool_ptr)
404 pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
405 db->desc_pool_ptr, db->desc_pool_dma_ptr);
406
407 if(db->buf_pool_ptr != NULL)
408 pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
409 db->buf_pool_ptr, db->buf_pool_dma_ptr);
410err_out_disable: 429err_out_disable:
411 pci_disable_device(pdev); 430 pci_disable_device(pdev);
412err_out_free: 431err_out_free:
@@ -422,19 +441,17 @@ static void __devexit uli526x_remove_one (struct pci_dev *pdev)
422 struct net_device *dev = pci_get_drvdata(pdev); 441 struct net_device *dev = pci_get_drvdata(pdev);
423 struct uli526x_board_info *db = netdev_priv(dev); 442 struct uli526x_board_info *db = netdev_priv(dev);
424 443
425 ULI526X_DBUG(0, "uli526x_remove_one()", 0); 444 unregister_netdev(dev);
426 445 pci_iounmap(pdev, db->ioaddr);
427 pci_free_consistent(db->pdev, sizeof(struct tx_desc) * 446 pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
428 DESC_ALL_CNT + 0x20, db->desc_pool_ptr, 447 DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
429 db->desc_pool_dma_ptr); 448 db->desc_pool_dma_ptr);
430 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, 449 pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
431 db->buf_pool_ptr, db->buf_pool_dma_ptr); 450 db->buf_pool_ptr, db->buf_pool_dma_ptr);
432 unregister_netdev(dev);
433 pci_release_regions(pdev); 451 pci_release_regions(pdev);
434 free_netdev(dev); /* free board information */
435 pci_set_drvdata(pdev, NULL);
436 pci_disable_device(pdev); 452 pci_disable_device(pdev);
437 ULI526X_DBUG(0, "uli526x_remove_one() exit", 0); 453 pci_set_drvdata(pdev, NULL);
454 free_netdev(dev);
438} 455}
439 456
440 457
@@ -468,7 +485,8 @@ static int uli526x_open(struct net_device *dev)
468 /* Initialize ULI526X board */ 485 /* Initialize ULI526X board */
469 uli526x_init(dev); 486 uli526x_init(dev);
470 487
471 ret = request_irq(dev->irq, uli526x_interrupt, IRQF_SHARED, dev->name, dev); 488 ret = request_irq(db->pdev->irq, uli526x_interrupt, IRQF_SHARED,
489 dev->name, dev);
472 if (ret) 490 if (ret)
473 return ret; 491 return ret;
474 492
@@ -496,57 +514,57 @@ static int uli526x_open(struct net_device *dev)
496static void uli526x_init(struct net_device *dev) 514static void uli526x_init(struct net_device *dev)
497{ 515{
498 struct uli526x_board_info *db = netdev_priv(dev); 516 struct uli526x_board_info *db = netdev_priv(dev);
499 unsigned long ioaddr = db->ioaddr; 517 struct uli_phy_ops *phy = &db->phy;
518 void __iomem *ioaddr = db->ioaddr;
500 u8 phy_tmp; 519 u8 phy_tmp;
501 u8 timeout; 520 u8 timeout;
502 u16 phy_value;
503 u16 phy_reg_reset; 521 u16 phy_reg_reset;
504 522
505 523
506 ULI526X_DBUG(0, "uli526x_init()", 0); 524 ULI526X_DBUG(0, "uli526x_init()", 0);
507 525
508 /* Reset M526x MAC controller */ 526 /* Reset M526x MAC controller */
509 outl(ULI526X_RESET, ioaddr + DCR0); /* RESET MAC */ 527 uw32(DCR0, ULI526X_RESET); /* RESET MAC */
510 udelay(100); 528 udelay(100);
511 outl(db->cr0_data, ioaddr + DCR0); 529 uw32(DCR0, db->cr0_data);
512 udelay(5); 530 udelay(5);
513 531
514 /* Phy addr : In some boards,M5261/M5263 phy address != 1 */ 532 /* Phy addr : In some boards,M5261/M5263 phy address != 1 */
515 db->phy_addr = 1; 533 db->phy_addr = 1;
516 for(phy_tmp=0;phy_tmp<32;phy_tmp++) 534 for (phy_tmp = 0; phy_tmp < 32; phy_tmp++) {
517 { 535 u16 phy_value;
518 phy_value=phy_read(db->ioaddr,phy_tmp,3,db->chip_id);//peer add 536
519 if(phy_value != 0xffff&&phy_value!=0) 537 phy_value = phy->read(db, phy_tmp, 3); //peer add
520 { 538 if (phy_value != 0xffff && phy_value != 0) {
521 db->phy_addr = phy_tmp; 539 db->phy_addr = phy_tmp;
522 break; 540 break;
523 } 541 }
524 } 542 }
525 if(phy_tmp == 32) 543
544 if (phy_tmp == 32)
526 pr_warn("Can not find the phy address!!!\n"); 545 pr_warn("Can not find the phy address!!!\n");
527 /* Parser SROM and media mode */ 546 /* Parser SROM and media mode */
528 db->media_mode = uli526x_media_mode; 547 db->media_mode = uli526x_media_mode;
529 548
530 /* phyxcer capability setting */ 549 /* phyxcer capability setting */
531 phy_reg_reset = phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id); 550 phy_reg_reset = phy->read(db, db->phy_addr, 0);
532 phy_reg_reset = (phy_reg_reset | 0x8000); 551 phy_reg_reset = (phy_reg_reset | 0x8000);
533 phy_write(db->ioaddr, db->phy_addr, 0, phy_reg_reset, db->chip_id); 552 phy->write(db, db->phy_addr, 0, phy_reg_reset);
534 553
535 /* See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management 554 /* See IEEE 802.3-2002.pdf (Section 2, Chapter "22.2.4 Management
536 * functions") or phy data sheet for details on phy reset 555 * functions") or phy data sheet for details on phy reset
537 */ 556 */
538 udelay(500); 557 udelay(500);
539 timeout = 10; 558 timeout = 10;
540 while (timeout-- && 559 while (timeout-- && phy->read(db, db->phy_addr, 0) & 0x8000)
541 phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id) & 0x8000) 560 udelay(100);
542 udelay(100);
543 561
544 /* Process Phyxcer Media Mode */ 562 /* Process Phyxcer Media Mode */
545 uli526x_set_phyxcer(db); 563 uli526x_set_phyxcer(db);
546 564
547 /* Media Mode Process */ 565 /* Media Mode Process */
548 if ( !(db->media_mode & ULI526X_AUTO) ) 566 if ( !(db->media_mode & ULI526X_AUTO) )
549 db->op_mode = db->media_mode; /* Force Mode */ 567 db->op_mode = db->media_mode; /* Force Mode */
550 568
551 /* Initialize Transmit/Receive decriptor and CR3/4 */ 569 /* Initialize Transmit/Receive decriptor and CR3/4 */
552 uli526x_descriptor_init(dev, ioaddr); 570 uli526x_descriptor_init(dev, ioaddr);
@@ -559,10 +577,10 @@ static void uli526x_init(struct net_device *dev)
559 577
560 /* Init CR7, interrupt active bit */ 578 /* Init CR7, interrupt active bit */
561 db->cr7_data = CR7_DEFAULT; 579 db->cr7_data = CR7_DEFAULT;
562 outl(db->cr7_data, ioaddr + DCR7); 580 uw32(DCR7, db->cr7_data);
563 581
564 /* Init CR15, Tx jabber and Rx watchdog timer */ 582 /* Init CR15, Tx jabber and Rx watchdog timer */
565 outl(db->cr15_data, ioaddr + DCR15); 583 uw32(DCR15, db->cr15_data);
566 584
567 /* Enable ULI526X Tx/Rx function */ 585 /* Enable ULI526X Tx/Rx function */
568 db->cr6_data |= CR6_RXSC | CR6_TXSC; 586 db->cr6_data |= CR6_RXSC | CR6_TXSC;
@@ -579,6 +597,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
579 struct net_device *dev) 597 struct net_device *dev)
580{ 598{
581 struct uli526x_board_info *db = netdev_priv(dev); 599 struct uli526x_board_info *db = netdev_priv(dev);
600 void __iomem *ioaddr = db->ioaddr;
582 struct tx_desc *txptr; 601 struct tx_desc *txptr;
583 unsigned long flags; 602 unsigned long flags;
584 603
@@ -604,7 +623,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
604 } 623 }
605 624
606 /* Disable NIC interrupt */ 625 /* Disable NIC interrupt */
607 outl(0, dev->base_addr + DCR7); 626 uw32(DCR7, 0);
608 627
609 /* transmit this packet */ 628 /* transmit this packet */
610 txptr = db->tx_insert_ptr; 629 txptr = db->tx_insert_ptr;
@@ -615,10 +634,10 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
615 db->tx_insert_ptr = txptr->next_tx_desc; 634 db->tx_insert_ptr = txptr->next_tx_desc;
616 635
617 /* Transmit Packet Process */ 636 /* Transmit Packet Process */
618 if ( (db->tx_packet_cnt < TX_DESC_CNT) ) { 637 if (db->tx_packet_cnt < TX_DESC_CNT) {
619 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */ 638 txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
620 db->tx_packet_cnt++; /* Ready to send */ 639 db->tx_packet_cnt++; /* Ready to send */
621 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ 640 uw32(DCR1, 0x1); /* Issue Tx polling */
622 dev->trans_start = jiffies; /* saved time stamp */ 641 dev->trans_start = jiffies; /* saved time stamp */
623 } 642 }
624 643
@@ -628,7 +647,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
628 647
629 /* Restore CR7 to enable interrupt */ 648 /* Restore CR7 to enable interrupt */
630 spin_unlock_irqrestore(&db->lock, flags); 649 spin_unlock_irqrestore(&db->lock, flags);
631 outl(db->cr7_data, dev->base_addr + DCR7); 650 uw32(DCR7, db->cr7_data);
632 651
633 /* free this SKB */ 652 /* free this SKB */
634 dev_kfree_skb(skb); 653 dev_kfree_skb(skb);
@@ -645,9 +664,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
645static int uli526x_stop(struct net_device *dev) 664static int uli526x_stop(struct net_device *dev)
646{ 665{
647 struct uli526x_board_info *db = netdev_priv(dev); 666 struct uli526x_board_info *db = netdev_priv(dev);
648 unsigned long ioaddr = dev->base_addr; 667 void __iomem *ioaddr = db->ioaddr;
649
650 ULI526X_DBUG(0, "uli526x_stop", 0);
651 668
652 /* disable system */ 669 /* disable system */
653 netif_stop_queue(dev); 670 netif_stop_queue(dev);
@@ -656,12 +673,12 @@ static int uli526x_stop(struct net_device *dev)
656 del_timer_sync(&db->timer); 673 del_timer_sync(&db->timer);
657 674
658 /* Reset & stop ULI526X board */ 675 /* Reset & stop ULI526X board */
659 outl(ULI526X_RESET, ioaddr + DCR0); 676 uw32(DCR0, ULI526X_RESET);
660 udelay(5); 677 udelay(5);
661 phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id); 678 db->phy.write(db, db->phy_addr, 0, 0x8000);
662 679
663 /* free interrupt */ 680 /* free interrupt */
664 free_irq(dev->irq, dev); 681 free_irq(db->pdev->irq, dev);
665 682
666 /* free allocated rx buffer */ 683 /* free allocated rx buffer */
667 uli526x_free_rxbuffer(db); 684 uli526x_free_rxbuffer(db);
@@ -679,18 +696,18 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
679{ 696{
680 struct net_device *dev = dev_id; 697 struct net_device *dev = dev_id;
681 struct uli526x_board_info *db = netdev_priv(dev); 698 struct uli526x_board_info *db = netdev_priv(dev);
682 unsigned long ioaddr = dev->base_addr; 699 void __iomem *ioaddr = db->ioaddr;
683 unsigned long flags; 700 unsigned long flags;
684 701
685 spin_lock_irqsave(&db->lock, flags); 702 spin_lock_irqsave(&db->lock, flags);
686 outl(0, ioaddr + DCR7); 703 uw32(DCR7, 0);
687 704
688 /* Got ULI526X status */ 705 /* Got ULI526X status */
689 db->cr5_data = inl(ioaddr + DCR5); 706 db->cr5_data = ur32(DCR5);
690 outl(db->cr5_data, ioaddr + DCR5); 707 uw32(DCR5, db->cr5_data);
691 if ( !(db->cr5_data & 0x180c1) ) { 708 if ( !(db->cr5_data & 0x180c1) ) {
692 /* Restore CR7 to enable interrupt mask */ 709 /* Restore CR7 to enable interrupt mask */
693 outl(db->cr7_data, ioaddr + DCR7); 710 uw32(DCR7, db->cr7_data);
694 spin_unlock_irqrestore(&db->lock, flags); 711 spin_unlock_irqrestore(&db->lock, flags);
695 return IRQ_HANDLED; 712 return IRQ_HANDLED;
696 } 713 }
@@ -718,7 +735,7 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
718 uli526x_free_tx_pkt(dev, db); 735 uli526x_free_tx_pkt(dev, db);
719 736
720 /* Restore CR7 to enable interrupt mask */ 737 /* Restore CR7 to enable interrupt mask */
721 outl(db->cr7_data, ioaddr + DCR7); 738 uw32(DCR7, db->cr7_data);
722 739
723 spin_unlock_irqrestore(&db->lock, flags); 740 spin_unlock_irqrestore(&db->lock, flags);
724 return IRQ_HANDLED; 741 return IRQ_HANDLED;
@@ -727,8 +744,10 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
727#ifdef CONFIG_NET_POLL_CONTROLLER 744#ifdef CONFIG_NET_POLL_CONTROLLER
728static void uli526x_poll(struct net_device *dev) 745static void uli526x_poll(struct net_device *dev)
729{ 746{
747 struct uli526x_board_info *db = netdev_priv(dev);
748
730 /* ISR grabs the irqsave lock, so this should be safe */ 749 /* ISR grabs the irqsave lock, so this should be safe */
731 uli526x_interrupt(dev->irq, dev); 750 uli526x_interrupt(db->pdev->irq, dev);
732} 751}
733#endif 752#endif
734 753
@@ -962,12 +981,7 @@ static void netdev_get_drvinfo(struct net_device *dev,
962 981
963 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 982 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
964 strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 983 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
965 if (np->pdev) 984 strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
966 strlcpy(info->bus_info, pci_name(np->pdev),
967 sizeof(info->bus_info));
968 else
969 sprintf(info->bus_info, "EISA 0x%lx %d",
970 dev->base_addr, dev->irq);
971} 985}
972 986
973static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { 987static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
@@ -1007,18 +1021,20 @@ static const struct ethtool_ops netdev_ethtool_ops = {
1007 1021
1008static void uli526x_timer(unsigned long data) 1022static void uli526x_timer(unsigned long data)
1009{ 1023{
1010 u32 tmp_cr8;
1011 unsigned char tmp_cr12=0;
1012 struct net_device *dev = (struct net_device *) data; 1024 struct net_device *dev = (struct net_device *) data;
1013 struct uli526x_board_info *db = netdev_priv(dev); 1025 struct uli526x_board_info *db = netdev_priv(dev);
1026 struct uli_phy_ops *phy = &db->phy;
1027 void __iomem *ioaddr = db->ioaddr;
1014 unsigned long flags; 1028 unsigned long flags;
1029 u8 tmp_cr12 = 0;
1030 u32 tmp_cr8;
1015 1031
1016 //ULI526X_DBUG(0, "uli526x_timer()", 0); 1032 //ULI526X_DBUG(0, "uli526x_timer()", 0);
1017 spin_lock_irqsave(&db->lock, flags); 1033 spin_lock_irqsave(&db->lock, flags);
1018 1034
1019 1035
1020 /* Dynamic reset ULI526X : system error or transmit time-out */ 1036 /* Dynamic reset ULI526X : system error or transmit time-out */
1021 tmp_cr8 = inl(db->ioaddr + DCR8); 1037 tmp_cr8 = ur32(DCR8);
1022 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) { 1038 if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
1023 db->reset_cr8++; 1039 db->reset_cr8++;
1024 db->wait_reset = 1; 1040 db->wait_reset = 1;
@@ -1028,7 +1044,7 @@ static void uli526x_timer(unsigned long data)
1028 /* TX polling kick monitor */ 1044 /* TX polling kick monitor */
1029 if ( db->tx_packet_cnt && 1045 if ( db->tx_packet_cnt &&
1030 time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_KICK) ) { 1046 time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_KICK) ) {
1031 outl(0x1, dev->base_addr + DCR1); // Tx polling again 1047 uw32(DCR1, 0x1); // Tx polling again
1032 1048
1033 // TX Timeout 1049 // TX Timeout
1034 if ( time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_TIMEOUT) ) { 1050 if ( time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_TIMEOUT) ) {
@@ -1049,7 +1065,7 @@ static void uli526x_timer(unsigned long data)
1049 } 1065 }
1050 1066
1051 /* Link status check, Dynamic media type change */ 1067 /* Link status check, Dynamic media type change */
1052 if((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)!=0) 1068 if ((phy->read(db, db->phy_addr, 5) & 0x01e0)!=0)
1053 tmp_cr12 = 3; 1069 tmp_cr12 = 3;
1054 1070
1055 if ( !(tmp_cr12 & 0x3) && !db->link_failed ) { 1071 if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
@@ -1062,7 +1078,7 @@ static void uli526x_timer(unsigned long data)
1062 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */ 1078 /* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
1063 /* AUTO don't need */ 1079 /* AUTO don't need */
1064 if ( !(db->media_mode & 0x8) ) 1080 if ( !(db->media_mode & 0x8) )
1065 phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id); 1081 phy->write(db, db->phy_addr, 0, 0x1000);
1066 1082
1067 /* AUTO mode, if INT phyxcer link failed, select EXT device */ 1083 /* AUTO mode, if INT phyxcer link failed, select EXT device */
1068 if (db->media_mode & ULI526X_AUTO) { 1084 if (db->media_mode & ULI526X_AUTO) {
@@ -1119,12 +1135,13 @@ static void uli526x_timer(unsigned long data)
1119static void uli526x_reset_prepare(struct net_device *dev) 1135static void uli526x_reset_prepare(struct net_device *dev)
1120{ 1136{
1121 struct uli526x_board_info *db = netdev_priv(dev); 1137 struct uli526x_board_info *db = netdev_priv(dev);
1138 void __iomem *ioaddr = db->ioaddr;
1122 1139
1123 /* Sopt MAC controller */ 1140 /* Sopt MAC controller */
1124 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */ 1141 db->cr6_data &= ~(CR6_RXSC | CR6_TXSC); /* Disable Tx/Rx */
1125 update_cr6(db->cr6_data, dev->base_addr); 1142 update_cr6(db->cr6_data, ioaddr);
1126 outl(0, dev->base_addr + DCR7); /* Disable Interrupt */ 1143 uw32(DCR7, 0); /* Disable Interrupt */
1127 outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5); 1144 uw32(DCR5, ur32(DCR5));
1128 1145
1129 /* Disable upper layer interface */ 1146 /* Disable upper layer interface */
1130 netif_stop_queue(dev); 1147 netif_stop_queue(dev);
@@ -1289,7 +1306,7 @@ static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * sk
1289 * Using Chain structure, and allocate Tx/Rx buffer 1306 * Using Chain structure, and allocate Tx/Rx buffer
1290 */ 1307 */
1291 1308
1292static void uli526x_descriptor_init(struct net_device *dev, unsigned long ioaddr) 1309static void uli526x_descriptor_init(struct net_device *dev, void __iomem *ioaddr)
1293{ 1310{
1294 struct uli526x_board_info *db = netdev_priv(dev); 1311 struct uli526x_board_info *db = netdev_priv(dev);
1295 struct tx_desc *tmp_tx; 1312 struct tx_desc *tmp_tx;
@@ -1304,14 +1321,14 @@ static void uli526x_descriptor_init(struct net_device *dev, unsigned long ioaddr
1304 /* tx descriptor start pointer */ 1321 /* tx descriptor start pointer */
1305 db->tx_insert_ptr = db->first_tx_desc; 1322 db->tx_insert_ptr = db->first_tx_desc;
1306 db->tx_remove_ptr = db->first_tx_desc; 1323 db->tx_remove_ptr = db->first_tx_desc;
1307 outl(db->first_tx_desc_dma, ioaddr + DCR4); /* TX DESC address */ 1324 uw32(DCR4, db->first_tx_desc_dma); /* TX DESC address */
1308 1325
1309 /* rx descriptor start pointer */ 1326 /* rx descriptor start pointer */
1310 db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT; 1327 db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT;
1311 db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT; 1328 db->first_rx_desc_dma = db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT;
1312 db->rx_insert_ptr = db->first_rx_desc; 1329 db->rx_insert_ptr = db->first_rx_desc;
1313 db->rx_ready_ptr = db->first_rx_desc; 1330 db->rx_ready_ptr = db->first_rx_desc;
1314 outl(db->first_rx_desc_dma, ioaddr + DCR3); /* RX DESC address */ 1331 uw32(DCR3, db->first_rx_desc_dma); /* RX DESC address */
1315 1332
1316 /* Init Transmit chain */ 1333 /* Init Transmit chain */
1317 tmp_buf = db->buf_pool_start; 1334 tmp_buf = db->buf_pool_start;
@@ -1352,11 +1369,9 @@ static void uli526x_descriptor_init(struct net_device *dev, unsigned long ioaddr
1352 * Update CR6 value 1369 * Update CR6 value
1353 * Firstly stop ULI526X, then written value and start 1370 * Firstly stop ULI526X, then written value and start
1354 */ 1371 */
1355 1372static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
1356static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1357{ 1373{
1358 1374 uw32(DCR6, cr6_data);
1359 outl(cr6_data, ioaddr + DCR6);
1360 udelay(5); 1375 udelay(5);
1361} 1376}
1362 1377
@@ -1375,6 +1390,7 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1375static void send_filter_frame(struct net_device *dev, int mc_cnt) 1390static void send_filter_frame(struct net_device *dev, int mc_cnt)
1376{ 1391{
1377 struct uli526x_board_info *db = netdev_priv(dev); 1392 struct uli526x_board_info *db = netdev_priv(dev);
1393 void __iomem *ioaddr = db->ioaddr;
1378 struct netdev_hw_addr *ha; 1394 struct netdev_hw_addr *ha;
1379 struct tx_desc *txptr; 1395 struct tx_desc *txptr;
1380 u16 * addrptr; 1396 u16 * addrptr;
@@ -1420,9 +1436,9 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
1420 /* Resource Empty */ 1436 /* Resource Empty */
1421 db->tx_packet_cnt++; 1437 db->tx_packet_cnt++;
1422 txptr->tdes0 = cpu_to_le32(0x80000000); 1438 txptr->tdes0 = cpu_to_le32(0x80000000);
1423 update_cr6(db->cr6_data | 0x2000, dev->base_addr); 1439 update_cr6(db->cr6_data | 0x2000, ioaddr);
1424 outl(0x1, dev->base_addr + DCR1); /* Issue Tx polling */ 1440 uw32(DCR1, 0x1); /* Issue Tx polling */
1425 update_cr6(db->cr6_data, dev->base_addr); 1441 update_cr6(db->cr6_data, ioaddr);
1426 dev->trans_start = jiffies; 1442 dev->trans_start = jiffies;
1427 } else 1443 } else
1428 netdev_err(dev, "No Tx resource - Send_filter_frame!\n"); 1444 netdev_err(dev, "No Tx resource - Send_filter_frame!\n");
@@ -1465,37 +1481,38 @@ static void allocate_rx_buffer(struct net_device *dev)
1465 * Read one word data from the serial ROM 1481 * Read one word data from the serial ROM
1466 */ 1482 */
1467 1483
1468static u16 read_srom_word(long ioaddr, int offset) 1484static u16 read_srom_word(struct uli526x_board_info *db, int offset)
1469{ 1485{
1470 int i; 1486 void __iomem *ioaddr = db->ioaddr;
1471 u16 srom_data = 0; 1487 u16 srom_data = 0;
1472 long cr9_ioaddr = ioaddr + DCR9; 1488 int i;
1473 1489
1474 outl(CR9_SROM_READ, cr9_ioaddr); 1490 uw32(DCR9, CR9_SROM_READ);
1475 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); 1491 uw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1476 1492
1477 /* Send the Read Command 110b */ 1493 /* Send the Read Command 110b */
1478 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr); 1494 srom_clk_write(db, SROM_DATA_1);
1479 SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr); 1495 srom_clk_write(db, SROM_DATA_1);
1480 SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr); 1496 srom_clk_write(db, SROM_DATA_0);
1481 1497
1482 /* Send the offset */ 1498 /* Send the offset */
1483 for (i = 5; i >= 0; i--) { 1499 for (i = 5; i >= 0; i--) {
1484 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0; 1500 srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
1485 SROM_CLK_WRITE(srom_data, cr9_ioaddr); 1501 srom_clk_write(db, srom_data);
1486 } 1502 }
1487 1503
1488 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); 1504 uw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1489 1505
1490 for (i = 16; i > 0; i--) { 1506 for (i = 16; i > 0; i--) {
1491 outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr); 1507 uw32(DCR9, CR9_SROM_READ | CR9_SRCS | CR9_SRCLK);
1492 udelay(5); 1508 udelay(5);
1493 srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0); 1509 srom_data = (srom_data << 1) |
1494 outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr); 1510 ((ur32(DCR9) & CR9_CRDOUT) ? 1 : 0);
1511 uw32(DCR9, CR9_SROM_READ | CR9_SRCS);
1495 udelay(5); 1512 udelay(5);
1496 } 1513 }
1497 1514
1498 outl(CR9_SROM_READ, cr9_ioaddr); 1515 uw32(DCR9, CR9_SROM_READ);
1499 return srom_data; 1516 return srom_data;
1500} 1517}
1501 1518
@@ -1506,15 +1523,16 @@ static u16 read_srom_word(long ioaddr, int offset)
1506 1523
1507static u8 uli526x_sense_speed(struct uli526x_board_info * db) 1524static u8 uli526x_sense_speed(struct uli526x_board_info * db)
1508{ 1525{
1526 struct uli_phy_ops *phy = &db->phy;
1509 u8 ErrFlag = 0; 1527 u8 ErrFlag = 0;
1510 u16 phy_mode; 1528 u16 phy_mode;
1511 1529
1512 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); 1530 phy_mode = phy->read(db, db->phy_addr, 1);
1513 phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id); 1531 phy_mode = phy->read(db, db->phy_addr, 1);
1514 1532
1515 if ( (phy_mode & 0x24) == 0x24 ) { 1533 if ( (phy_mode & 0x24) == 0x24 ) {
1516 1534
1517 phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7); 1535 phy_mode = ((phy->read(db, db->phy_addr, 5) & 0x01e0)<<7);
1518 if(phy_mode&0x8000) 1536 if(phy_mode&0x8000)
1519 phy_mode = 0x8000; 1537 phy_mode = 0x8000;
1520 else if(phy_mode&0x4000) 1538 else if(phy_mode&0x4000)
@@ -1549,10 +1567,11 @@ static u8 uli526x_sense_speed(struct uli526x_board_info * db)
1549 1567
1550static void uli526x_set_phyxcer(struct uli526x_board_info *db) 1568static void uli526x_set_phyxcer(struct uli526x_board_info *db)
1551{ 1569{
1570 struct uli_phy_ops *phy = &db->phy;
1552 u16 phy_reg; 1571 u16 phy_reg;
1553 1572
1554 /* Phyxcer capability setting */ 1573 /* Phyxcer capability setting */
1555 phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0; 1574 phy_reg = phy->read(db, db->phy_addr, 4) & ~0x01e0;
1556 1575
1557 if (db->media_mode & ULI526X_AUTO) { 1576 if (db->media_mode & ULI526X_AUTO) {
1558 /* AUTO Mode */ 1577 /* AUTO Mode */
@@ -1573,10 +1592,10 @@ static void uli526x_set_phyxcer(struct uli526x_board_info *db)
1573 phy_reg|=db->PHY_reg4; 1592 phy_reg|=db->PHY_reg4;
1574 db->media_mode|=ULI526X_AUTO; 1593 db->media_mode|=ULI526X_AUTO;
1575 } 1594 }
1576 phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id); 1595 phy->write(db, db->phy_addr, 4, phy_reg);
1577 1596
1578 /* Restart Auto-Negotiation */ 1597 /* Restart Auto-Negotiation */
1579 phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id); 1598 phy->write(db, db->phy_addr, 0, 0x1200);
1580 udelay(50); 1599 udelay(50);
1581} 1600}
1582 1601
@@ -1590,6 +1609,7 @@ static void uli526x_set_phyxcer(struct uli526x_board_info *db)
1590 1609
1591static void uli526x_process_mode(struct uli526x_board_info *db) 1610static void uli526x_process_mode(struct uli526x_board_info *db)
1592{ 1611{
1612 struct uli_phy_ops *phy = &db->phy;
1593 u16 phy_reg; 1613 u16 phy_reg;
1594 1614
1595 /* Full Duplex Mode Check */ 1615 /* Full Duplex Mode Check */
@@ -1601,10 +1621,10 @@ static void uli526x_process_mode(struct uli526x_board_info *db)
1601 update_cr6(db->cr6_data, db->ioaddr); 1621 update_cr6(db->cr6_data, db->ioaddr);
1602 1622
1603 /* 10/100M phyxcer force mode need */ 1623 /* 10/100M phyxcer force mode need */
1604 if ( !(db->media_mode & 0x8)) { 1624 if (!(db->media_mode & 0x8)) {
1605 /* Forece Mode */ 1625 /* Forece Mode */
1606 phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id); 1626 phy_reg = phy->read(db, db->phy_addr, 6);
1607 if ( !(phy_reg & 0x1) ) { 1627 if (!(phy_reg & 0x1)) {
1608 /* parter without N-Way capability */ 1628 /* parter without N-Way capability */
1609 phy_reg = 0x0; 1629 phy_reg = 0x0;
1610 switch(db->op_mode) { 1630 switch(db->op_mode) {
@@ -1613,148 +1633,126 @@ static void uli526x_process_mode(struct uli526x_board_info *db)
1613 case ULI526X_100MHF: phy_reg = 0x2000; break; 1633 case ULI526X_100MHF: phy_reg = 0x2000; break;
1614 case ULI526X_100MFD: phy_reg = 0x2100; break; 1634 case ULI526X_100MFD: phy_reg = 0x2100; break;
1615 } 1635 }
1616 phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id); 1636 phy->write(db, db->phy_addr, 0, phy_reg);
1617 } 1637 }
1618 } 1638 }
1619} 1639}
1620 1640
1621 1641
1622/* 1642/* M5261/M5263 Chip */
1623 * Write a word to Phy register 1643static void phy_writeby_cr9(struct uli526x_board_info *db, u8 phy_addr,
1624 */ 1644 u8 offset, u16 phy_data)
1625
1626static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id)
1627{ 1645{
1628 u16 i; 1646 u16 i;
1629 unsigned long ioaddr;
1630
1631 if(chip_id == PCI_ULI5263_ID)
1632 {
1633 phy_writeby_cr10(iobase, phy_addr, offset, phy_data);
1634 return;
1635 }
1636 /* M5261/M5263 Chip */
1637 ioaddr = iobase + DCR9;
1638 1647
1639 /* Send 33 synchronization clock to Phy controller */ 1648 /* Send 33 synchronization clock to Phy controller */
1640 for (i = 0; i < 35; i++) 1649 for (i = 0; i < 35; i++)
1641 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); 1650 phy_write_1bit(db, PHY_DATA_1);
1642 1651
1643 /* Send start command(01) to Phy */ 1652 /* Send start command(01) to Phy */
1644 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); 1653 phy_write_1bit(db, PHY_DATA_0);
1645 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); 1654 phy_write_1bit(db, PHY_DATA_1);
1646 1655
1647 /* Send write command(01) to Phy */ 1656 /* Send write command(01) to Phy */
1648 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); 1657 phy_write_1bit(db, PHY_DATA_0);
1649 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); 1658 phy_write_1bit(db, PHY_DATA_1);
1650 1659
1651 /* Send Phy address */ 1660 /* Send Phy address */
1652 for (i = 0x10; i > 0; i = i >> 1) 1661 for (i = 0x10; i > 0; i = i >> 1)
1653 phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); 1662 phy_write_1bit(db, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1654 1663
1655 /* Send register address */ 1664 /* Send register address */
1656 for (i = 0x10; i > 0; i = i >> 1) 1665 for (i = 0x10; i > 0; i = i >> 1)
1657 phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); 1666 phy_write_1bit(db, offset & i ? PHY_DATA_1 : PHY_DATA_0);
1658 1667
1659 /* written trasnition */ 1668 /* written trasnition */
1660 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); 1669 phy_write_1bit(db, PHY_DATA_1);
1661 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); 1670 phy_write_1bit(db, PHY_DATA_0);
1662 1671
1663 /* Write a word data to PHY controller */ 1672 /* Write a word data to PHY controller */
1664 for ( i = 0x8000; i > 0; i >>= 1) 1673 for (i = 0x8000; i > 0; i >>= 1)
1665 phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); 1674 phy_write_1bit(db, phy_data & i ? PHY_DATA_1 : PHY_DATA_0);
1666
1667} 1675}
1668 1676
1669 1677static u16 phy_readby_cr9(struct uli526x_board_info *db, u8 phy_addr, u8 offset)
1670/*
1671 * Read a word data from phy register
1672 */
1673
1674static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
1675{ 1678{
1676 int i;
1677 u16 phy_data; 1679 u16 phy_data;
1678 unsigned long ioaddr; 1680 int i;
1679
1680 if(chip_id == PCI_ULI5263_ID)
1681 return phy_readby_cr10(iobase, phy_addr, offset);
1682 /* M5261/M5263 Chip */
1683 ioaddr = iobase + DCR9;
1684 1681
1685 /* Send 33 synchronization clock to Phy controller */ 1682 /* Send 33 synchronization clock to Phy controller */
1686 for (i = 0; i < 35; i++) 1683 for (i = 0; i < 35; i++)
1687 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); 1684 phy_write_1bit(db, PHY_DATA_1);
1688 1685
1689 /* Send start command(01) to Phy */ 1686 /* Send start command(01) to Phy */
1690 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); 1687 phy_write_1bit(db, PHY_DATA_0);
1691 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); 1688 phy_write_1bit(db, PHY_DATA_1);
1692 1689
1693 /* Send read command(10) to Phy */ 1690 /* Send read command(10) to Phy */
1694 phy_write_1bit(ioaddr, PHY_DATA_1, chip_id); 1691 phy_write_1bit(db, PHY_DATA_1);
1695 phy_write_1bit(ioaddr, PHY_DATA_0, chip_id); 1692 phy_write_1bit(db, PHY_DATA_0);
1696 1693
1697 /* Send Phy address */ 1694 /* Send Phy address */
1698 for (i = 0x10; i > 0; i = i >> 1) 1695 for (i = 0x10; i > 0; i = i >> 1)
1699 phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); 1696 phy_write_1bit(db, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0);
1700 1697
1701 /* Send register address */ 1698 /* Send register address */
1702 for (i = 0x10; i > 0; i = i >> 1) 1699 for (i = 0x10; i > 0; i = i >> 1)
1703 phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id); 1700 phy_write_1bit(db, offset & i ? PHY_DATA_1 : PHY_DATA_0);
1704 1701
1705 /* Skip transition state */ 1702 /* Skip transition state */
1706 phy_read_1bit(ioaddr, chip_id); 1703 phy_read_1bit(db);
1707 1704
1708 /* read 16bit data */ 1705 /* read 16bit data */
1709 for (phy_data = 0, i = 0; i < 16; i++) { 1706 for (phy_data = 0, i = 0; i < 16; i++) {
1710 phy_data <<= 1; 1707 phy_data <<= 1;
1711 phy_data |= phy_read_1bit(ioaddr, chip_id); 1708 phy_data |= phy_read_1bit(db);
1712 } 1709 }
1713 1710
1714 return phy_data; 1711 return phy_data;
1715} 1712}
1716 1713
1717static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset) 1714static u16 phy_readby_cr10(struct uli526x_board_info *db, u8 phy_addr,
1715 u8 offset)
1718{ 1716{
1719 unsigned long ioaddr,cr10_value; 1717 void __iomem *ioaddr = db->ioaddr;
1718 u32 cr10_value = phy_addr;
1720 1719
1721 ioaddr = iobase + DCR10; 1720 cr10_value = (cr10_value << 5) + offset;
1722 cr10_value = phy_addr; 1721 cr10_value = (cr10_value << 16) + 0x08000000;
1723 cr10_value = (cr10_value<<5) + offset; 1722 uw32(DCR10, cr10_value);
1724 cr10_value = (cr10_value<<16) + 0x08000000;
1725 outl(cr10_value,ioaddr);
1726 udelay(1); 1723 udelay(1);
1727 while(1) 1724 while (1) {
1728 { 1725 cr10_value = ur32(DCR10);
1729 cr10_value = inl(ioaddr); 1726 if (cr10_value & 0x10000000)
1730 if(cr10_value&0x10000000)
1731 break; 1727 break;
1732 } 1728 }
1733 return cr10_value & 0x0ffff; 1729 return cr10_value & 0x0ffff;
1734} 1730}
1735 1731
1736static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data) 1732static void phy_writeby_cr10(struct uli526x_board_info *db, u8 phy_addr,
1733 u8 offset, u16 phy_data)
1737{ 1734{
1738 unsigned long ioaddr,cr10_value; 1735 void __iomem *ioaddr = db->ioaddr;
1736 u32 cr10_value = phy_addr;
1739 1737
1740 ioaddr = iobase + DCR10; 1738 cr10_value = (cr10_value << 5) + offset;
1741 cr10_value = phy_addr; 1739 cr10_value = (cr10_value << 16) + 0x04000000 + phy_data;
1742 cr10_value = (cr10_value<<5) + offset; 1740 uw32(DCR10, cr10_value);
1743 cr10_value = (cr10_value<<16) + 0x04000000 + phy_data;
1744 outl(cr10_value,ioaddr);
1745 udelay(1); 1741 udelay(1);
1746} 1742}
1747/* 1743/*
1748 * Write one bit data to Phy Controller 1744 * Write one bit data to Phy Controller
1749 */ 1745 */
1750 1746
1751static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id) 1747static void phy_write_1bit(struct uli526x_board_info *db, u32 data)
1752{ 1748{
1753 outl(phy_data , ioaddr); /* MII Clock Low */ 1749 void __iomem *ioaddr = db->ioaddr;
1750
1751 uw32(DCR9, data); /* MII Clock Low */
1754 udelay(1); 1752 udelay(1);
1755 outl(phy_data | MDCLKH, ioaddr); /* MII Clock High */ 1753 uw32(DCR9, data | MDCLKH); /* MII Clock High */
1756 udelay(1); 1754 udelay(1);
1757 outl(phy_data , ioaddr); /* MII Clock Low */ 1755 uw32(DCR9, data); /* MII Clock Low */
1758 udelay(1); 1756 udelay(1);
1759} 1757}
1760 1758
@@ -1763,14 +1761,15 @@ static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id)
1763 * Read one bit phy data from PHY controller 1761 * Read one bit phy data from PHY controller
1764 */ 1762 */
1765 1763
1766static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id) 1764static u16 phy_read_1bit(struct uli526x_board_info *db)
1767{ 1765{
1766 void __iomem *ioaddr = db->ioaddr;
1768 u16 phy_data; 1767 u16 phy_data;
1769 1768
1770 outl(0x50000 , ioaddr); 1769 uw32(DCR9, 0x50000);
1771 udelay(1); 1770 udelay(1);
1772 phy_data = ( inl(ioaddr) >> 19 ) & 0x1; 1771 phy_data = (ur32(DCR9) >> 19) & 0x1;
1773 outl(0x40000 , ioaddr); 1772 uw32(DCR9, 0x40000);
1774 udelay(1); 1773 udelay(1);
1775 1774
1776 return phy_data; 1775 return phy_data;
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 2ac6fff0363a..4d1ffca83c82 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -400,9 +400,6 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
400 No hold time required! */ 400 No hold time required! */
401 iowrite32(0x00000001, ioaddr + PCIBusCfg); 401 iowrite32(0x00000001, ioaddr + PCIBusCfg);
402 402
403 dev->base_addr = (unsigned long)ioaddr;
404 dev->irq = irq;
405
406 np = netdev_priv(dev); 403 np = netdev_priv(dev);
407 np->pci_dev = pdev; 404 np->pci_dev = pdev;
408 np->chip_id = chip_idx; 405 np->chip_id = chip_idx;
@@ -635,17 +632,18 @@ static int netdev_open(struct net_device *dev)
635{ 632{
636 struct netdev_private *np = netdev_priv(dev); 633 struct netdev_private *np = netdev_priv(dev);
637 void __iomem *ioaddr = np->base_addr; 634 void __iomem *ioaddr = np->base_addr;
635 const int irq = np->pci_dev->irq;
638 int i; 636 int i;
639 637
640 iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */ 638 iowrite32(0x00000001, ioaddr + PCIBusCfg); /* Reset */
641 639
642 netif_device_detach(dev); 640 netif_device_detach(dev);
643 i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev); 641 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
644 if (i) 642 if (i)
645 goto out_err; 643 goto out_err;
646 644
647 if (debug > 1) 645 if (debug > 1)
648 netdev_dbg(dev, "w89c840_open() irq %d\n", dev->irq); 646 netdev_dbg(dev, "w89c840_open() irq %d\n", irq);
649 647
650 if((i=alloc_ringdesc(dev))) 648 if((i=alloc_ringdesc(dev)))
651 goto out_err; 649 goto out_err;
@@ -932,6 +930,7 @@ static void tx_timeout(struct net_device *dev)
932{ 930{
933 struct netdev_private *np = netdev_priv(dev); 931 struct netdev_private *np = netdev_priv(dev);
934 void __iomem *ioaddr = np->base_addr; 932 void __iomem *ioaddr = np->base_addr;
933 const int irq = np->pci_dev->irq;
935 934
936 dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n", 935 dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
937 ioread32(ioaddr + IntrStatus)); 936 ioread32(ioaddr + IntrStatus));
@@ -951,7 +950,7 @@ static void tx_timeout(struct net_device *dev)
951 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes); 950 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
952 printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C)); 951 printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
953 952
954 disable_irq(dev->irq); 953 disable_irq(irq);
955 spin_lock_irq(&np->lock); 954 spin_lock_irq(&np->lock);
956 /* 955 /*
957 * Under high load dirty_tx and the internal tx descriptor pointer 956 * Under high load dirty_tx and the internal tx descriptor pointer
@@ -966,7 +965,7 @@ static void tx_timeout(struct net_device *dev)
966 init_rxtx_rings(dev); 965 init_rxtx_rings(dev);
967 init_registers(dev); 966 init_registers(dev);
968 spin_unlock_irq(&np->lock); 967 spin_unlock_irq(&np->lock);
969 enable_irq(dev->irq); 968 enable_irq(irq);
970 969
971 netif_wake_queue(dev); 970 netif_wake_queue(dev);
972 dev->trans_start = jiffies; /* prevent tx timeout */ 971 dev->trans_start = jiffies; /* prevent tx timeout */
@@ -1500,7 +1499,7 @@ static int netdev_close(struct net_device *dev)
1500 iowrite32(0x0000, ioaddr + IntrEnable); 1499 iowrite32(0x0000, ioaddr + IntrEnable);
1501 spin_unlock_irq(&np->lock); 1500 spin_unlock_irq(&np->lock);
1502 1501
1503 free_irq(dev->irq, dev); 1502 free_irq(np->pci_dev->irq, dev);
1504 wmb(); 1503 wmb();
1505 netif_device_attach(dev); 1504 netif_device_attach(dev);
1506 1505
@@ -1589,7 +1588,7 @@ static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
1589 iowrite32(0, ioaddr + IntrEnable); 1588 iowrite32(0, ioaddr + IntrEnable);
1590 spin_unlock_irq(&np->lock); 1589 spin_unlock_irq(&np->lock);
1591 1590
1592 synchronize_irq(dev->irq); 1591 synchronize_irq(np->pci_dev->irq);
1593 netif_tx_disable(dev); 1592 netif_tx_disable(dev);
1594 1593
1595 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; 1594 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index fdb329fe6e8e..138bf83bc98e 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -41,7 +41,9 @@ MODULE_DESCRIPTION("Xircom Cardbus ethernet driver");
41MODULE_AUTHOR("Arjan van de Ven <arjanv@redhat.com>"); 41MODULE_AUTHOR("Arjan van de Ven <arjanv@redhat.com>");
42MODULE_LICENSE("GPL"); 42MODULE_LICENSE("GPL");
43 43
44 44#define xw32(reg, val) iowrite32(val, ioaddr + (reg))
45#define xr32(reg) ioread32(ioaddr + (reg))
46#define xr8(reg) ioread8(ioaddr + (reg))
45 47
46/* IO registers on the card, offsets */ 48/* IO registers on the card, offsets */
47#define CSR0 0x00 49#define CSR0 0x00
@@ -83,7 +85,7 @@ struct xircom_private {
83 85
84 struct sk_buff *tx_skb[4]; 86 struct sk_buff *tx_skb[4];
85 87
86 unsigned long io_port; 88 void __iomem *ioaddr;
87 int open; 89 int open;
88 90
89 /* transmit_used is the rotating counter that indicates which transmit 91 /* transmit_used is the rotating counter that indicates which transmit
@@ -137,7 +139,7 @@ static int link_status(struct xircom_private *card);
137 139
138 140
139static DEFINE_PCI_DEVICE_TABLE(xircom_pci_table) = { 141static DEFINE_PCI_DEVICE_TABLE(xircom_pci_table) = {
140 {0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID,}, 142 { PCI_VDEVICE(XIRCOM, 0x0003), },
141 {0,}, 143 {0,},
142}; 144};
143MODULE_DEVICE_TABLE(pci, xircom_pci_table); 145MODULE_DEVICE_TABLE(pci, xircom_pci_table);
@@ -146,9 +148,7 @@ static struct pci_driver xircom_ops = {
146 .name = "xircom_cb", 148 .name = "xircom_cb",
147 .id_table = xircom_pci_table, 149 .id_table = xircom_pci_table,
148 .probe = xircom_probe, 150 .probe = xircom_probe,
149 .remove = xircom_remove, 151 .remove = __devexit_p(xircom_remove),
150 .suspend =NULL,
151 .resume =NULL
152}; 152};
153 153
154 154
@@ -192,15 +192,18 @@ static const struct net_device_ops netdev_ops = {
192 */ 192 */
193static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id) 193static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
194{ 194{
195 struct device *d = &pdev->dev;
195 struct net_device *dev = NULL; 196 struct net_device *dev = NULL;
196 struct xircom_private *private; 197 struct xircom_private *private;
197 unsigned long flags; 198 unsigned long flags;
198 unsigned short tmp16; 199 unsigned short tmp16;
200 int rc;
199 201
200 /* First do the PCI initialisation */ 202 /* First do the PCI initialisation */
201 203
202 if (pci_enable_device(pdev)) 204 rc = pci_enable_device(pdev);
203 return -ENODEV; 205 if (rc < 0)
206 goto out;
204 207
205 /* disable all powermanagement */ 208 /* disable all powermanagement */
206 pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000); 209 pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000);
@@ -211,11 +214,13 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
211 pci_read_config_word (pdev,PCI_STATUS, &tmp16); 214 pci_read_config_word (pdev,PCI_STATUS, &tmp16);
212 pci_write_config_word (pdev, PCI_STATUS,tmp16); 215 pci_write_config_word (pdev, PCI_STATUS,tmp16);
213 216
214 if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) { 217 rc = pci_request_regions(pdev, "xircom_cb");
218 if (rc < 0) {
215 pr_err("%s: failed to allocate io-region\n", __func__); 219 pr_err("%s: failed to allocate io-region\n", __func__);
216 return -ENODEV; 220 goto err_disable;
217 } 221 }
218 222
223 rc = -ENOMEM;
219 /* 224 /*
220 Before changing the hardware, allocate the memory. 225 Before changing the hardware, allocate the memory.
221 This way, we can fail gracefully if not enough memory 226 This way, we can fail gracefully if not enough memory
@@ -223,17 +228,21 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
223 */ 228 */
224 dev = alloc_etherdev(sizeof(struct xircom_private)); 229 dev = alloc_etherdev(sizeof(struct xircom_private));
225 if (!dev) 230 if (!dev)
226 goto device_fail; 231 goto err_release;
227 232
228 private = netdev_priv(dev); 233 private = netdev_priv(dev);
229 234
230 /* Allocate the send/receive buffers */ 235 /* Allocate the send/receive buffers */
231 private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle); 236 private->rx_buffer = dma_alloc_coherent(d, 8192,
237 &private->rx_dma_handle,
238 GFP_KERNEL);
232 if (private->rx_buffer == NULL) { 239 if (private->rx_buffer == NULL) {
233 pr_err("%s: no memory for rx buffer\n", __func__); 240 pr_err("%s: no memory for rx buffer\n", __func__);
234 goto rx_buf_fail; 241 goto rx_buf_fail;
235 } 242 }
236 private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle); 243 private->tx_buffer = dma_alloc_coherent(d, 8192,
244 &private->tx_dma_handle,
245 GFP_KERNEL);
237 if (private->tx_buffer == NULL) { 246 if (private->tx_buffer == NULL) {
238 pr_err("%s: no memory for tx buffer\n", __func__); 247 pr_err("%s: no memory for tx buffer\n", __func__);
239 goto tx_buf_fail; 248 goto tx_buf_fail;
@@ -244,10 +253,13 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
244 253
245 private->dev = dev; 254 private->dev = dev;
246 private->pdev = pdev; 255 private->pdev = pdev;
247 private->io_port = pci_resource_start(pdev, 0); 256
257 /* IO range. */
258 private->ioaddr = pci_iomap(pdev, 0, 0);
259 if (!private->ioaddr)
260 goto reg_fail;
261
248 spin_lock_init(&private->lock); 262 spin_lock_init(&private->lock);
249 dev->irq = pdev->irq;
250 dev->base_addr = private->io_port;
251 263
252 initialize_card(private); 264 initialize_card(private);
253 read_mac_address(private); 265 read_mac_address(private);
@@ -256,9 +268,10 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
256 dev->netdev_ops = &netdev_ops; 268 dev->netdev_ops = &netdev_ops;
257 pci_set_drvdata(pdev, dev); 269 pci_set_drvdata(pdev, dev);
258 270
259 if (register_netdev(dev)) { 271 rc = register_netdev(dev);
272 if (rc < 0) {
260 pr_err("%s: netdevice registration failed\n", __func__); 273 pr_err("%s: netdevice registration failed\n", __func__);
261 goto reg_fail; 274 goto err_unmap;
262 } 275 }
263 276
264 netdev_info(dev, "Xircom cardbus revision %i at irq %i\n", 277 netdev_info(dev, "Xircom cardbus revision %i at irq %i\n",
@@ -273,17 +286,23 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
273 spin_unlock_irqrestore(&private->lock,flags); 286 spin_unlock_irqrestore(&private->lock,flags);
274 287
275 trigger_receive(private); 288 trigger_receive(private);
289out:
290 return rc;
276 291
277 return 0; 292err_unmap:
278 293 pci_iounmap(pdev, private->ioaddr);
279reg_fail: 294reg_fail:
280 kfree(private->tx_buffer); 295 pci_set_drvdata(pdev, NULL);
296 dma_free_coherent(d, 8192, private->tx_buffer, private->tx_dma_handle);
281tx_buf_fail: 297tx_buf_fail:
282 kfree(private->rx_buffer); 298 dma_free_coherent(d, 8192, private->rx_buffer, private->rx_dma_handle);
283rx_buf_fail: 299rx_buf_fail:
284 free_netdev(dev); 300 free_netdev(dev);
285device_fail: 301err_release:
286 return -ENODEV; 302 pci_release_regions(pdev);
303err_disable:
304 pci_disable_device(pdev);
305 goto out;
287} 306}
288 307
289 308
@@ -297,25 +316,28 @@ static void __devexit xircom_remove(struct pci_dev *pdev)
297{ 316{
298 struct net_device *dev = pci_get_drvdata(pdev); 317 struct net_device *dev = pci_get_drvdata(pdev);
299 struct xircom_private *card = netdev_priv(dev); 318 struct xircom_private *card = netdev_priv(dev);
319 struct device *d = &pdev->dev;
300 320
301 pci_free_consistent(pdev,8192,card->rx_buffer,card->rx_dma_handle);
302 pci_free_consistent(pdev,8192,card->tx_buffer,card->tx_dma_handle);
303
304 release_region(dev->base_addr, 128);
305 unregister_netdev(dev); 321 unregister_netdev(dev);
306 free_netdev(dev); 322 pci_iounmap(pdev, card->ioaddr);
307 pci_set_drvdata(pdev, NULL); 323 pci_set_drvdata(pdev, NULL);
324 dma_free_coherent(d, 8192, card->tx_buffer, card->tx_dma_handle);
325 dma_free_coherent(d, 8192, card->rx_buffer, card->rx_dma_handle);
326 free_netdev(dev);
327 pci_release_regions(pdev);
328 pci_disable_device(pdev);
308} 329}
309 330
310static irqreturn_t xircom_interrupt(int irq, void *dev_instance) 331static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
311{ 332{
312 struct net_device *dev = (struct net_device *) dev_instance; 333 struct net_device *dev = (struct net_device *) dev_instance;
313 struct xircom_private *card = netdev_priv(dev); 334 struct xircom_private *card = netdev_priv(dev);
335 void __iomem *ioaddr = card->ioaddr;
314 unsigned int status; 336 unsigned int status;
315 int i; 337 int i;
316 338
317 spin_lock(&card->lock); 339 spin_lock(&card->lock);
318 status = inl(card->io_port+CSR5); 340 status = xr32(CSR5);
319 341
320#if defined DEBUG && DEBUG > 1 342#if defined DEBUG && DEBUG > 1
321 print_binary(status); 343 print_binary(status);
@@ -345,7 +367,7 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
345 /* Clear all remaining interrupts */ 367 /* Clear all remaining interrupts */
346 status |= 0xffffffff; /* FIXME: make this clear only the 368 status |= 0xffffffff; /* FIXME: make this clear only the
347 real existing bits */ 369 real existing bits */
348 outl(status,card->io_port+CSR5); 370 xw32(CSR5, status);
349 371
350 372
351 for (i=0;i<NUMDESCRIPTORS;i++) 373 for (i=0;i<NUMDESCRIPTORS;i++)
@@ -423,11 +445,11 @@ static netdev_tx_t xircom_start_xmit(struct sk_buff *skb,
423static int xircom_open(struct net_device *dev) 445static int xircom_open(struct net_device *dev)
424{ 446{
425 struct xircom_private *xp = netdev_priv(dev); 447 struct xircom_private *xp = netdev_priv(dev);
448 const int irq = xp->pdev->irq;
426 int retval; 449 int retval;
427 450
428 netdev_info(dev, "xircom cardbus adaptor found, using irq %i\n", 451 netdev_info(dev, "xircom cardbus adaptor found, using irq %i\n", irq);
429 dev->irq); 452 retval = request_irq(irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
430 retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
431 if (retval) 453 if (retval)
432 return retval; 454 return retval;
433 455
@@ -459,7 +481,7 @@ static int xircom_close(struct net_device *dev)
459 spin_unlock_irqrestore(&card->lock,flags); 481 spin_unlock_irqrestore(&card->lock,flags);
460 482
461 card->open = 0; 483 card->open = 0;
462 free_irq(dev->irq,dev); 484 free_irq(card->pdev->irq, dev);
463 485
464 return 0; 486 return 0;
465 487
@@ -469,35 +491,39 @@ static int xircom_close(struct net_device *dev)
469#ifdef CONFIG_NET_POLL_CONTROLLER 491#ifdef CONFIG_NET_POLL_CONTROLLER
470static void xircom_poll_controller(struct net_device *dev) 492static void xircom_poll_controller(struct net_device *dev)
471{ 493{
472 disable_irq(dev->irq); 494 struct xircom_private *xp = netdev_priv(dev);
473 xircom_interrupt(dev->irq, dev); 495 const int irq = xp->pdev->irq;
474 enable_irq(dev->irq); 496
497 disable_irq(irq);
498 xircom_interrupt(irq, dev);
499 enable_irq(irq);
475} 500}
476#endif 501#endif
477 502
478 503
479static void initialize_card(struct xircom_private *card) 504static void initialize_card(struct xircom_private *card)
480{ 505{
481 unsigned int val; 506 void __iomem *ioaddr = card->ioaddr;
482 unsigned long flags; 507 unsigned long flags;
508 u32 val;
483 509
484 spin_lock_irqsave(&card->lock, flags); 510 spin_lock_irqsave(&card->lock, flags);
485 511
486 /* First: reset the card */ 512 /* First: reset the card */
487 val = inl(card->io_port + CSR0); 513 val = xr32(CSR0);
488 val |= 0x01; /* Software reset */ 514 val |= 0x01; /* Software reset */
489 outl(val, card->io_port + CSR0); 515 xw32(CSR0, val);
490 516
491 udelay(100); /* give the card some time to reset */ 517 udelay(100); /* give the card some time to reset */
492 518
493 val = inl(card->io_port + CSR0); 519 val = xr32(CSR0);
494 val &= ~0x01; /* disable Software reset */ 520 val &= ~0x01; /* disable Software reset */
495 outl(val, card->io_port + CSR0); 521 xw32(CSR0, val);
496 522
497 523
498 val = 0; /* Value 0x00 is a safe and conservative value 524 val = 0; /* Value 0x00 is a safe and conservative value
499 for the PCI configuration settings */ 525 for the PCI configuration settings */
500 outl(val, card->io_port + CSR0); 526 xw32(CSR0, val);
501 527
502 528
503 disable_all_interrupts(card); 529 disable_all_interrupts(card);
@@ -515,10 +541,9 @@ ignored; I chose zero.
515*/ 541*/
516static void trigger_transmit(struct xircom_private *card) 542static void trigger_transmit(struct xircom_private *card)
517{ 543{
518 unsigned int val; 544 void __iomem *ioaddr = card->ioaddr;
519 545
520 val = 0; 546 xw32(CSR1, 0);
521 outl(val, card->io_port + CSR1);
522} 547}
523 548
524/* 549/*
@@ -530,10 +555,9 @@ ignored; I chose zero.
530*/ 555*/
531static void trigger_receive(struct xircom_private *card) 556static void trigger_receive(struct xircom_private *card)
532{ 557{
533 unsigned int val; 558 void __iomem *ioaddr = card->ioaddr;
534 559
535 val = 0; 560 xw32(CSR2, 0);
536 outl(val, card->io_port + CSR2);
537} 561}
538 562
539/* 563/*
@@ -542,6 +566,7 @@ descriptors and programs the addresses into the card.
542*/ 566*/
543static void setup_descriptors(struct xircom_private *card) 567static void setup_descriptors(struct xircom_private *card)
544{ 568{
569 void __iomem *ioaddr = card->ioaddr;
545 u32 address; 570 u32 address;
546 int i; 571 int i;
547 572
@@ -571,7 +596,7 @@ static void setup_descriptors(struct xircom_private *card)
571 wmb(); 596 wmb();
572 /* Write the receive descriptor ring address to the card */ 597 /* Write the receive descriptor ring address to the card */
573 address = card->rx_dma_handle; 598 address = card->rx_dma_handle;
574 outl(address, card->io_port + CSR3); /* Receive descr list address */ 599 xw32(CSR3, address); /* Receive descr list address */
575 600
576 601
577 /* transmit descriptors */ 602 /* transmit descriptors */
@@ -596,7 +621,7 @@ static void setup_descriptors(struct xircom_private *card)
596 wmb(); 621 wmb();
597 /* wite the transmit descriptor ring to the card */ 622 /* wite the transmit descriptor ring to the card */
598 address = card->tx_dma_handle; 623 address = card->tx_dma_handle;
599 outl(address, card->io_port + CSR4); /* xmit descr list address */ 624 xw32(CSR4, address); /* xmit descr list address */
600} 625}
601 626
602/* 627/*
@@ -605,11 +630,12 @@ valid by setting the address in the card to 0x00.
605*/ 630*/
606static void remove_descriptors(struct xircom_private *card) 631static void remove_descriptors(struct xircom_private *card)
607{ 632{
633 void __iomem *ioaddr = card->ioaddr;
608 unsigned int val; 634 unsigned int val;
609 635
610 val = 0; 636 val = 0;
611 outl(val, card->io_port + CSR3); /* Receive descriptor address */ 637 xw32(CSR3, val); /* Receive descriptor address */
612 outl(val, card->io_port + CSR4); /* Send descriptor address */ 638 xw32(CSR4, val); /* Send descriptor address */
613} 639}
614 640
615/* 641/*
@@ -620,17 +646,17 @@ This function also clears the status-bit.
620*/ 646*/
621static int link_status_changed(struct xircom_private *card) 647static int link_status_changed(struct xircom_private *card)
622{ 648{
649 void __iomem *ioaddr = card->ioaddr;
623 unsigned int val; 650 unsigned int val;
624 651
625 val = inl(card->io_port + CSR5); /* Status register */ 652 val = xr32(CSR5); /* Status register */
626 653 if (!(val & (1 << 27))) /* no change */
627 if ((val & (1 << 27)) == 0) /* no change */
628 return 0; 654 return 0;
629 655
630 /* clear the event by writing a 1 to the bit in the 656 /* clear the event by writing a 1 to the bit in the
631 status register. */ 657 status register. */
632 val = (1 << 27); 658 val = (1 << 27);
633 outl(val, card->io_port + CSR5); 659 xw32(CSR5, val);
634 660
635 return 1; 661 return 1;
636} 662}
@@ -642,11 +668,9 @@ in a non-stopped state.
642*/ 668*/
643static int transmit_active(struct xircom_private *card) 669static int transmit_active(struct xircom_private *card)
644{ 670{
645 unsigned int val; 671 void __iomem *ioaddr = card->ioaddr;
646
647 val = inl(card->io_port + CSR5); /* Status register */
648 672
649 if ((val & (7 << 20)) == 0) /* transmitter disabled */ 673 if (!(xr32(CSR5) & (7 << 20))) /* transmitter disabled */
650 return 0; 674 return 0;
651 675
652 return 1; 676 return 1;
@@ -658,11 +682,9 @@ in a non-stopped state.
658*/ 682*/
659static int receive_active(struct xircom_private *card) 683static int receive_active(struct xircom_private *card)
660{ 684{
661 unsigned int val; 685 void __iomem *ioaddr = card->ioaddr;
662
663 val = inl(card->io_port + CSR5); /* Status register */
664 686
665 if ((val & (7 << 17)) == 0) /* receiver disabled */ 687 if (!(xr32(CSR5) & (7 << 17))) /* receiver disabled */
666 return 0; 688 return 0;
667 689
668 return 1; 690 return 1;
@@ -680,10 +702,11 @@ must be called with the lock held and interrupts disabled.
680*/ 702*/
681static void activate_receiver(struct xircom_private *card) 703static void activate_receiver(struct xircom_private *card)
682{ 704{
705 void __iomem *ioaddr = card->ioaddr;
683 unsigned int val; 706 unsigned int val;
684 int counter; 707 int counter;
685 708
686 val = inl(card->io_port + CSR6); /* Operation mode */ 709 val = xr32(CSR6); /* Operation mode */
687 710
688 /* If the "active" bit is set and the receiver is already 711 /* If the "active" bit is set and the receiver is already
689 active, no need to do the expensive thing */ 712 active, no need to do the expensive thing */
@@ -692,7 +715,7 @@ static void activate_receiver(struct xircom_private *card)
692 715
693 716
694 val = val & ~2; /* disable the receiver */ 717 val = val & ~2; /* disable the receiver */
695 outl(val, card->io_port + CSR6); 718 xw32(CSR6, val);
696 719
697 counter = 10; 720 counter = 10;
698 while (counter > 0) { 721 while (counter > 0) {
@@ -706,9 +729,9 @@ static void activate_receiver(struct xircom_private *card)
706 } 729 }
707 730
708 /* enable the receiver */ 731 /* enable the receiver */
709 val = inl(card->io_port + CSR6); /* Operation mode */ 732 val = xr32(CSR6); /* Operation mode */
710 val = val | 2; /* enable the receiver */ 733 val = val | 2; /* enable the receiver */
711 outl(val, card->io_port + CSR6); 734 xw32(CSR6, val);
712 735
713 /* now wait for the card to activate again */ 736 /* now wait for the card to activate again */
714 counter = 10; 737 counter = 10;
@@ -733,12 +756,13 @@ must be called with the lock held and interrupts disabled.
733*/ 756*/
734static void deactivate_receiver(struct xircom_private *card) 757static void deactivate_receiver(struct xircom_private *card)
735{ 758{
759 void __iomem *ioaddr = card->ioaddr;
736 unsigned int val; 760 unsigned int val;
737 int counter; 761 int counter;
738 762
739 val = inl(card->io_port + CSR6); /* Operation mode */ 763 val = xr32(CSR6); /* Operation mode */
740 val = val & ~2; /* disable the receiver */ 764 val = val & ~2; /* disable the receiver */
741 outl(val, card->io_port + CSR6); 765 xw32(CSR6, val);
742 766
743 counter = 10; 767 counter = 10;
744 while (counter > 0) { 768 while (counter > 0) {
@@ -765,10 +789,11 @@ must be called with the lock held and interrupts disabled.
765*/ 789*/
766static void activate_transmitter(struct xircom_private *card) 790static void activate_transmitter(struct xircom_private *card)
767{ 791{
792 void __iomem *ioaddr = card->ioaddr;
768 unsigned int val; 793 unsigned int val;
769 int counter; 794 int counter;
770 795
771 val = inl(card->io_port + CSR6); /* Operation mode */ 796 val = xr32(CSR6); /* Operation mode */
772 797
773 /* If the "active" bit is set and the receiver is already 798 /* If the "active" bit is set and the receiver is already
774 active, no need to do the expensive thing */ 799 active, no need to do the expensive thing */
@@ -776,7 +801,7 @@ static void activate_transmitter(struct xircom_private *card)
776 return; 801 return;
777 802
778 val = val & ~(1 << 13); /* disable the transmitter */ 803 val = val & ~(1 << 13); /* disable the transmitter */
779 outl(val, card->io_port + CSR6); 804 xw32(CSR6, val);
780 805
781 counter = 10; 806 counter = 10;
782 while (counter > 0) { 807 while (counter > 0) {
@@ -791,9 +816,9 @@ static void activate_transmitter(struct xircom_private *card)
791 } 816 }
792 817
793 /* enable the transmitter */ 818 /* enable the transmitter */
794 val = inl(card->io_port + CSR6); /* Operation mode */ 819 val = xr32(CSR6); /* Operation mode */
795 val = val | (1 << 13); /* enable the transmitter */ 820 val = val | (1 << 13); /* enable the transmitter */
796 outl(val, card->io_port + CSR6); 821 xw32(CSR6, val);
797 822
798 /* now wait for the card to activate again */ 823 /* now wait for the card to activate again */
799 counter = 10; 824 counter = 10;
@@ -818,12 +843,13 @@ must be called with the lock held and interrupts disabled.
818*/ 843*/
819static void deactivate_transmitter(struct xircom_private *card) 844static void deactivate_transmitter(struct xircom_private *card)
820{ 845{
846 void __iomem *ioaddr = card->ioaddr;
821 unsigned int val; 847 unsigned int val;
822 int counter; 848 int counter;
823 849
824 val = inl(card->io_port + CSR6); /* Operation mode */ 850 val = xr32(CSR6); /* Operation mode */
825 val = val & ~2; /* disable the transmitter */ 851 val = val & ~2; /* disable the transmitter */
826 outl(val, card->io_port + CSR6); 852 xw32(CSR6, val);
827 853
828 counter = 20; 854 counter = 20;
829 while (counter > 0) { 855 while (counter > 0) {
@@ -846,11 +872,12 @@ must be called with the lock held and interrupts disabled.
846*/ 872*/
847static void enable_transmit_interrupt(struct xircom_private *card) 873static void enable_transmit_interrupt(struct xircom_private *card)
848{ 874{
875 void __iomem *ioaddr = card->ioaddr;
849 unsigned int val; 876 unsigned int val;
850 877
851 val = inl(card->io_port + CSR7); /* Interrupt enable register */ 878 val = xr32(CSR7); /* Interrupt enable register */
852 val |= 1; /* enable the transmit interrupt */ 879 val |= 1; /* enable the transmit interrupt */
853 outl(val, card->io_port + CSR7); 880 xw32(CSR7, val);
854} 881}
855 882
856 883
@@ -861,11 +888,12 @@ must be called with the lock held and interrupts disabled.
861*/ 888*/
862static void enable_receive_interrupt(struct xircom_private *card) 889static void enable_receive_interrupt(struct xircom_private *card)
863{ 890{
891 void __iomem *ioaddr = card->ioaddr;
864 unsigned int val; 892 unsigned int val;
865 893
866 val = inl(card->io_port + CSR7); /* Interrupt enable register */ 894 val = xr32(CSR7); /* Interrupt enable register */
867 val = val | (1 << 6); /* enable the receive interrupt */ 895 val = val | (1 << 6); /* enable the receive interrupt */
868 outl(val, card->io_port + CSR7); 896 xw32(CSR7, val);
869} 897}
870 898
871/* 899/*
@@ -875,11 +903,12 @@ must be called with the lock held and interrupts disabled.
875*/ 903*/
876static void enable_link_interrupt(struct xircom_private *card) 904static void enable_link_interrupt(struct xircom_private *card)
877{ 905{
906 void __iomem *ioaddr = card->ioaddr;
878 unsigned int val; 907 unsigned int val;
879 908
880 val = inl(card->io_port + CSR7); /* Interrupt enable register */ 909 val = xr32(CSR7); /* Interrupt enable register */
881 val = val | (1 << 27); /* enable the link status chage interrupt */ 910 val = val | (1 << 27); /* enable the link status chage interrupt */
882 outl(val, card->io_port + CSR7); 911 xw32(CSR7, val);
883} 912}
884 913
885 914
@@ -891,10 +920,9 @@ must be called with the lock held and interrupts disabled.
891*/ 920*/
892static void disable_all_interrupts(struct xircom_private *card) 921static void disable_all_interrupts(struct xircom_private *card)
893{ 922{
894 unsigned int val; 923 void __iomem *ioaddr = card->ioaddr;
895 924
896 val = 0; /* disable all interrupts */ 925 xw32(CSR7, 0);
897 outl(val, card->io_port + CSR7);
898} 926}
899 927
900/* 928/*
@@ -904,9 +932,10 @@ must be called with the lock held and interrupts disabled.
904*/ 932*/
905static void enable_common_interrupts(struct xircom_private *card) 933static void enable_common_interrupts(struct xircom_private *card)
906{ 934{
935 void __iomem *ioaddr = card->ioaddr;
907 unsigned int val; 936 unsigned int val;
908 937
909 val = inl(card->io_port + CSR7); /* Interrupt enable register */ 938 val = xr32(CSR7); /* Interrupt enable register */
910 val |= (1<<16); /* Normal Interrupt Summary */ 939 val |= (1<<16); /* Normal Interrupt Summary */
911 val |= (1<<15); /* Abnormal Interrupt Summary */ 940 val |= (1<<15); /* Abnormal Interrupt Summary */
912 val |= (1<<13); /* Fatal bus error */ 941 val |= (1<<13); /* Fatal bus error */
@@ -915,7 +944,7 @@ static void enable_common_interrupts(struct xircom_private *card)
915 val |= (1<<5); /* Transmit Underflow */ 944 val |= (1<<5); /* Transmit Underflow */
916 val |= (1<<2); /* Transmit Buffer Unavailable */ 945 val |= (1<<2); /* Transmit Buffer Unavailable */
917 val |= (1<<1); /* Transmit Process Stopped */ 946 val |= (1<<1); /* Transmit Process Stopped */
918 outl(val, card->io_port + CSR7); 947 xw32(CSR7, val);
919} 948}
920 949
921/* 950/*
@@ -925,11 +954,12 @@ must be called with the lock held and interrupts disabled.
925*/ 954*/
926static int enable_promisc(struct xircom_private *card) 955static int enable_promisc(struct xircom_private *card)
927{ 956{
957 void __iomem *ioaddr = card->ioaddr;
928 unsigned int val; 958 unsigned int val;
929 959
930 val = inl(card->io_port + CSR6); 960 val = xr32(CSR6);
931 val = val | (1 << 6); 961 val = val | (1 << 6);
932 outl(val, card->io_port + CSR6); 962 xw32(CSR6, val);
933 963
934 return 1; 964 return 1;
935} 965}
@@ -944,13 +974,16 @@ Must be called in locked state with interrupts disabled
944*/ 974*/
945static int link_status(struct xircom_private *card) 975static int link_status(struct xircom_private *card)
946{ 976{
947 unsigned int val; 977 void __iomem *ioaddr = card->ioaddr;
978 u8 val;
948 979
949 val = inb(card->io_port + CSR12); 980 val = xr8(CSR12);
950 981
951 if (!(val&(1<<2))) /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */ 982 /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */
983 if (!(val & (1 << 2)))
952 return 10; 984 return 10;
953 if (!(val&(1<<1))) /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */ 985 /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */
986 if (!(val & (1 << 1)))
954 return 100; 987 return 100;
955 988
956 /* If we get here -> no link at all */ 989 /* If we get here -> no link at all */
@@ -969,29 +1002,31 @@ static int link_status(struct xircom_private *card)
969 */ 1002 */
970static void read_mac_address(struct xircom_private *card) 1003static void read_mac_address(struct xircom_private *card)
971{ 1004{
972 unsigned char j, tuple, link, data_id, data_count; 1005 void __iomem *ioaddr = card->ioaddr;
973 unsigned long flags; 1006 unsigned long flags;
1007 u8 link;
974 int i; 1008 int i;
975 1009
976 spin_lock_irqsave(&card->lock, flags); 1010 spin_lock_irqsave(&card->lock, flags);
977 1011
978 outl(1 << 12, card->io_port + CSR9); /* enable boot rom access */ 1012 xw32(CSR9, 1 << 12); /* enable boot rom access */
979 for (i = 0x100; i < 0x1f7; i += link + 2) { 1013 for (i = 0x100; i < 0x1f7; i += link + 2) {
980 outl(i, card->io_port + CSR10); 1014 u8 tuple, data_id, data_count;
981 tuple = inl(card->io_port + CSR9) & 0xff; 1015
982 outl(i + 1, card->io_port + CSR10); 1016 xw32(CSR10, i);
983 link = inl(card->io_port + CSR9) & 0xff; 1017 tuple = xr32(CSR9);
984 outl(i + 2, card->io_port + CSR10); 1018 xw32(CSR10, i + 1);
985 data_id = inl(card->io_port + CSR9) & 0xff; 1019 link = xr32(CSR9);
986 outl(i + 3, card->io_port + CSR10); 1020 xw32(CSR10, i + 2);
987 data_count = inl(card->io_port + CSR9) & 0xff; 1021 data_id = xr32(CSR9);
1022 xw32(CSR10, i + 3);
1023 data_count = xr32(CSR9);
988 if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) { 1024 if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) {
989 /* 1025 int j;
990 * This is it. We have the data we want. 1026
991 */
992 for (j = 0; j < 6; j++) { 1027 for (j = 0; j < 6; j++) {
993 outl(i + j + 4, card->io_port + CSR10); 1028 xw32(CSR10, i + j + 4);
994 card->dev->dev_addr[j] = inl(card->io_port + CSR9) & 0xff; 1029 card->dev->dev_addr[j] = xr32(CSR9) & 0xff;
995 } 1030 }
996 break; 1031 break;
997 } else if (link == 0) { 1032 } else if (link == 0) {
@@ -1010,6 +1045,7 @@ static void read_mac_address(struct xircom_private *card)
1010 */ 1045 */
1011static void transceiver_voodoo(struct xircom_private *card) 1046static void transceiver_voodoo(struct xircom_private *card)
1012{ 1047{
1048 void __iomem *ioaddr = card->ioaddr;
1013 unsigned long flags; 1049 unsigned long flags;
1014 1050
1015 /* disable all powermanagement */ 1051 /* disable all powermanagement */
@@ -1019,14 +1055,14 @@ static void transceiver_voodoo(struct xircom_private *card)
1019 1055
1020 spin_lock_irqsave(&card->lock, flags); 1056 spin_lock_irqsave(&card->lock, flags);
1021 1057
1022 outl(0x0008, card->io_port + CSR15); 1058 xw32(CSR15, 0x0008);
1023 udelay(25); 1059 udelay(25);
1024 outl(0xa8050000, card->io_port + CSR15); 1060 xw32(CSR15, 0xa8050000);
1025 udelay(25); 1061 udelay(25);
1026 outl(0xa00f0000, card->io_port + CSR15); 1062 xw32(CSR15, 0xa00f0000);
1027 udelay(25); 1063 udelay(25);
1028 1064
1029 spin_unlock_irqrestore(&card->lock, flags); 1065 spin_unlock_irqrestore(&card->lock, flags);
1030 1066
1031 netif_start_queue(card->dev); 1067 netif_start_queue(card->dev);
1032} 1068}
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 2e09edb9cdf8..a059f0c27e28 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -16,6 +16,13 @@
16#include "dl2k.h" 16#include "dl2k.h"
17#include <linux/dma-mapping.h> 17#include <linux/dma-mapping.h>
18 18
19#define dw32(reg, val) iowrite32(val, ioaddr + (reg))
20#define dw16(reg, val) iowrite16(val, ioaddr + (reg))
21#define dw8(reg, val) iowrite8(val, ioaddr + (reg))
22#define dr32(reg) ioread32(ioaddr + (reg))
23#define dr16(reg) ioread16(ioaddr + (reg))
24#define dr8(reg) ioread8(ioaddr + (reg))
25
19static char version[] __devinitdata = 26static char version[] __devinitdata =
20 KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n"; 27 KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n";
21#define MAX_UNITS 8 28#define MAX_UNITS 8
@@ -49,8 +56,13 @@ module_param(tx_coalesce, int, 0); /* HW xmit count each TxDMAComplete */
49/* Enable the default interrupts */ 56/* Enable the default interrupts */
50#define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \ 57#define DEFAULT_INTR (RxDMAComplete | HostError | IntRequested | TxDMAComplete| \
51 UpdateStats | LinkEvent) 58 UpdateStats | LinkEvent)
52#define EnableInt() \ 59
53writew(DEFAULT_INTR, ioaddr + IntEnable) 60static void dl2k_enable_int(struct netdev_private *np)
61{
62 void __iomem *ioaddr = np->ioaddr;
63
64 dw16(IntEnable, DEFAULT_INTR);
65}
54 66
55static const int max_intrloop = 50; 67static const int max_intrloop = 50;
56static const int multicast_filter_limit = 0x40; 68static const int multicast_filter_limit = 0x40;
@@ -73,7 +85,7 @@ static int rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
73static int rio_close (struct net_device *dev); 85static int rio_close (struct net_device *dev);
74static int find_miiphy (struct net_device *dev); 86static int find_miiphy (struct net_device *dev);
75static int parse_eeprom (struct net_device *dev); 87static int parse_eeprom (struct net_device *dev);
76static int read_eeprom (long ioaddr, int eep_addr); 88static int read_eeprom (struct netdev_private *, int eep_addr);
77static int mii_wait_link (struct net_device *dev, int wait); 89static int mii_wait_link (struct net_device *dev, int wait);
78static int mii_set_media (struct net_device *dev); 90static int mii_set_media (struct net_device *dev);
79static int mii_get_media (struct net_device *dev); 91static int mii_get_media (struct net_device *dev);
@@ -106,7 +118,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
106 static int card_idx; 118 static int card_idx;
107 int chip_idx = ent->driver_data; 119 int chip_idx = ent->driver_data;
108 int err, irq; 120 int err, irq;
109 long ioaddr; 121 void __iomem *ioaddr;
110 static int version_printed; 122 static int version_printed;
111 void *ring_space; 123 void *ring_space;
112 dma_addr_t ring_dma; 124 dma_addr_t ring_dma;
@@ -124,26 +136,29 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
124 goto err_out_disable; 136 goto err_out_disable;
125 137
126 pci_set_master (pdev); 138 pci_set_master (pdev);
139
140 err = -ENOMEM;
141
127 dev = alloc_etherdev (sizeof (*np)); 142 dev = alloc_etherdev (sizeof (*np));
128 if (!dev) { 143 if (!dev)
129 err = -ENOMEM;
130 goto err_out_res; 144 goto err_out_res;
131 }
132 SET_NETDEV_DEV(dev, &pdev->dev); 145 SET_NETDEV_DEV(dev, &pdev->dev);
133 146
134#ifdef MEM_MAPPING 147 np = netdev_priv(dev);
135 ioaddr = pci_resource_start (pdev, 1); 148
136 ioaddr = (long) ioremap (ioaddr, RIO_IO_SIZE); 149 /* IO registers range. */
137 if (!ioaddr) { 150 ioaddr = pci_iomap(pdev, 0, 0);
138 err = -ENOMEM; 151 if (!ioaddr)
139 goto err_out_dev; 152 goto err_out_dev;
140 } 153 np->eeprom_addr = ioaddr;
141#else 154
142 ioaddr = pci_resource_start (pdev, 0); 155#ifdef MEM_MAPPING
156 /* MM registers range. */
157 ioaddr = pci_iomap(pdev, 1, 0);
158 if (!ioaddr)
159 goto err_out_iounmap;
143#endif 160#endif
144 dev->base_addr = ioaddr; 161 np->ioaddr = ioaddr;
145 dev->irq = irq;
146 np = netdev_priv(dev);
147 np->chip_id = chip_idx; 162 np->chip_id = chip_idx;
148 np->pdev = pdev; 163 np->pdev = pdev;
149 spin_lock_init (&np->tx_lock); 164 spin_lock_init (&np->tx_lock);
@@ -239,7 +254,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
239 goto err_out_unmap_rx; 254 goto err_out_unmap_rx;
240 255
241 /* Fiber device? */ 256 /* Fiber device? */
242 np->phy_media = (readw(ioaddr + ASICCtrl) & PhyMedia) ? 1 : 0; 257 np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0;
243 np->link_status = 0; 258 np->link_status = 0;
244 /* Set media and reset PHY */ 259 /* Set media and reset PHY */
245 if (np->phy_media) { 260 if (np->phy_media) {
@@ -276,22 +291,20 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
276 printk(KERN_INFO "vlan(id):\t%d\n", np->vlan); 291 printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
277 return 0; 292 return 0;
278 293
279 err_out_unmap_rx: 294err_out_unmap_rx:
280 pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); 295 pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
281 err_out_unmap_tx: 296err_out_unmap_tx:
282 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); 297 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
283 err_out_iounmap: 298err_out_iounmap:
284#ifdef MEM_MAPPING 299#ifdef MEM_MAPPING
285 iounmap ((void *) ioaddr); 300 pci_iounmap(pdev, np->ioaddr);
286
287 err_out_dev:
288#endif 301#endif
302 pci_iounmap(pdev, np->eeprom_addr);
303err_out_dev:
289 free_netdev (dev); 304 free_netdev (dev);
290 305err_out_res:
291 err_out_res:
292 pci_release_regions (pdev); 306 pci_release_regions (pdev);
293 307err_out_disable:
294 err_out_disable:
295 pci_disable_device (pdev); 308 pci_disable_device (pdev);
296 return err; 309 return err;
297} 310}
@@ -299,11 +312,9 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
299static int 312static int
300find_miiphy (struct net_device *dev) 313find_miiphy (struct net_device *dev)
301{ 314{
315 struct netdev_private *np = netdev_priv(dev);
302 int i, phy_found = 0; 316 int i, phy_found = 0;
303 struct netdev_private *np;
304 long ioaddr;
305 np = netdev_priv(dev); 317 np = netdev_priv(dev);
306 ioaddr = dev->base_addr;
307 np->phy_addr = 1; 318 np->phy_addr = 1;
308 319
309 for (i = 31; i >= 0; i--) { 320 for (i = 31; i >= 0; i--) {
@@ -323,26 +334,19 @@ find_miiphy (struct net_device *dev)
323static int 334static int
324parse_eeprom (struct net_device *dev) 335parse_eeprom (struct net_device *dev)
325{ 336{
337 struct netdev_private *np = netdev_priv(dev);
338 void __iomem *ioaddr = np->ioaddr;
326 int i, j; 339 int i, j;
327 long ioaddr = dev->base_addr;
328 u8 sromdata[256]; 340 u8 sromdata[256];
329 u8 *psib; 341 u8 *psib;
330 u32 crc; 342 u32 crc;
331 PSROM_t psrom = (PSROM_t) sromdata; 343 PSROM_t psrom = (PSROM_t) sromdata;
332 struct netdev_private *np = netdev_priv(dev);
333 344
334 int cid, next; 345 int cid, next;
335 346
336#ifdef MEM_MAPPING 347 for (i = 0; i < 128; i++)
337 ioaddr = pci_resource_start (np->pdev, 0); 348 ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom(np, i));
338#endif 349
339 /* Read eeprom */
340 for (i = 0; i < 128; i++) {
341 ((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom (ioaddr, i));
342 }
343#ifdef MEM_MAPPING
344 ioaddr = dev->base_addr;
345#endif
346 if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */ 350 if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */
347 /* Check CRC */ 351 /* Check CRC */
348 crc = ~ether_crc_le (256 - 4, sromdata); 352 crc = ~ether_crc_le (256 - 4, sromdata);
@@ -378,8 +382,7 @@ parse_eeprom (struct net_device *dev)
378 return 0; 382 return 0;
379 case 2: /* Duplex Polarity */ 383 case 2: /* Duplex Polarity */
380 np->duplex_polarity = psib[i]; 384 np->duplex_polarity = psib[i];
381 writeb (readb (ioaddr + PhyCtrl) | psib[i], 385 dw8(PhyCtrl, dr8(PhyCtrl) | psib[i]);
382 ioaddr + PhyCtrl);
383 break; 386 break;
384 case 3: /* Wake Polarity */ 387 case 3: /* Wake Polarity */
385 np->wake_polarity = psib[i]; 388 np->wake_polarity = psib[i];
@@ -407,59 +410,57 @@ static int
407rio_open (struct net_device *dev) 410rio_open (struct net_device *dev)
408{ 411{
409 struct netdev_private *np = netdev_priv(dev); 412 struct netdev_private *np = netdev_priv(dev);
410 long ioaddr = dev->base_addr; 413 void __iomem *ioaddr = np->ioaddr;
414 const int irq = np->pdev->irq;
411 int i; 415 int i;
412 u16 macctrl; 416 u16 macctrl;
413 417
414 i = request_irq (dev->irq, rio_interrupt, IRQF_SHARED, dev->name, dev); 418 i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev);
415 if (i) 419 if (i)
416 return i; 420 return i;
417 421
418 /* Reset all logic functions */ 422 /* Reset all logic functions */
419 writew (GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset, 423 dw16(ASICCtrl + 2,
420 ioaddr + ASICCtrl + 2); 424 GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset);
421 mdelay(10); 425 mdelay(10);
422 426
423 /* DebugCtrl bit 4, 5, 9 must set */ 427 /* DebugCtrl bit 4, 5, 9 must set */
424 writel (readl (ioaddr + DebugCtrl) | 0x0230, ioaddr + DebugCtrl); 428 dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230);
425 429
426 /* Jumbo frame */ 430 /* Jumbo frame */
427 if (np->jumbo != 0) 431 if (np->jumbo != 0)
428 writew (MAX_JUMBO+14, ioaddr + MaxFrameSize); 432 dw16(MaxFrameSize, MAX_JUMBO+14);
429 433
430 alloc_list (dev); 434 alloc_list (dev);
431 435
432 /* Get station address */ 436 /* Get station address */
433 for (i = 0; i < 6; i++) 437 for (i = 0; i < 6; i++)
434 writeb (dev->dev_addr[i], ioaddr + StationAddr0 + i); 438 dw8(StationAddr0 + i, dev->dev_addr[i]);
435 439
436 set_multicast (dev); 440 set_multicast (dev);
437 if (np->coalesce) { 441 if (np->coalesce) {
438 writel (np->rx_coalesce | np->rx_timeout << 16, 442 dw32(RxDMAIntCtrl, np->rx_coalesce | np->rx_timeout << 16);
439 ioaddr + RxDMAIntCtrl);
440 } 443 }
441 /* Set RIO to poll every N*320nsec. */ 444 /* Set RIO to poll every N*320nsec. */
442 writeb (0x20, ioaddr + RxDMAPollPeriod); 445 dw8(RxDMAPollPeriod, 0x20);
443 writeb (0xff, ioaddr + TxDMAPollPeriod); 446 dw8(TxDMAPollPeriod, 0xff);
444 writeb (0x30, ioaddr + RxDMABurstThresh); 447 dw8(RxDMABurstThresh, 0x30);
445 writeb (0x30, ioaddr + RxDMAUrgentThresh); 448 dw8(RxDMAUrgentThresh, 0x30);
446 writel (0x0007ffff, ioaddr + RmonStatMask); 449 dw32(RmonStatMask, 0x0007ffff);
447 /* clear statistics */ 450 /* clear statistics */
448 clear_stats (dev); 451 clear_stats (dev);
449 452
450 /* VLAN supported */ 453 /* VLAN supported */
451 if (np->vlan) { 454 if (np->vlan) {
452 /* priority field in RxDMAIntCtrl */ 455 /* priority field in RxDMAIntCtrl */
453 writel (readl(ioaddr + RxDMAIntCtrl) | 0x7 << 10, 456 dw32(RxDMAIntCtrl, dr32(RxDMAIntCtrl) | 0x7 << 10);
454 ioaddr + RxDMAIntCtrl);
455 /* VLANId */ 457 /* VLANId */
456 writew (np->vlan, ioaddr + VLANId); 458 dw16(VLANId, np->vlan);
457 /* Length/Type should be 0x8100 */ 459 /* Length/Type should be 0x8100 */
458 writel (0x8100 << 16 | np->vlan, ioaddr + VLANTag); 460 dw32(VLANTag, 0x8100 << 16 | np->vlan);
459 /* Enable AutoVLANuntagging, but disable AutoVLANtagging. 461 /* Enable AutoVLANuntagging, but disable AutoVLANtagging.
460 VLAN information tagged by TFC' VID, CFI fields. */ 462 VLAN information tagged by TFC' VID, CFI fields. */
461 writel (readl (ioaddr + MACCtrl) | AutoVLANuntagging, 463 dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging);
462 ioaddr + MACCtrl);
463 } 464 }
464 465
465 init_timer (&np->timer); 466 init_timer (&np->timer);
@@ -469,20 +470,18 @@ rio_open (struct net_device *dev)
469 add_timer (&np->timer); 470 add_timer (&np->timer);
470 471
471 /* Start Tx/Rx */ 472 /* Start Tx/Rx */
472 writel (readl (ioaddr + MACCtrl) | StatsEnable | RxEnable | TxEnable, 473 dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable);
473 ioaddr + MACCtrl);
474 474
475 macctrl = 0; 475 macctrl = 0;
476 macctrl |= (np->vlan) ? AutoVLANuntagging : 0; 476 macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
477 macctrl |= (np->full_duplex) ? DuplexSelect : 0; 477 macctrl |= (np->full_duplex) ? DuplexSelect : 0;
478 macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0; 478 macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0;
479 macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0; 479 macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0;
480 writew(macctrl, ioaddr + MACCtrl); 480 dw16(MACCtrl, macctrl);
481 481
482 netif_start_queue (dev); 482 netif_start_queue (dev);
483 483
484 /* Enable default interrupts */ 484 dl2k_enable_int(np);
485 EnableInt ();
486 return 0; 485 return 0;
487} 486}
488 487
@@ -533,10 +532,11 @@ rio_timer (unsigned long data)
533static void 532static void
534rio_tx_timeout (struct net_device *dev) 533rio_tx_timeout (struct net_device *dev)
535{ 534{
536 long ioaddr = dev->base_addr; 535 struct netdev_private *np = netdev_priv(dev);
536 void __iomem *ioaddr = np->ioaddr;
537 537
538 printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n", 538 printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n",
539 dev->name, readl (ioaddr + TxStatus)); 539 dev->name, dr32(TxStatus));
540 rio_free_tx(dev, 0); 540 rio_free_tx(dev, 0);
541 dev->if_port = 0; 541 dev->if_port = 0;
542 dev->trans_start = jiffies; /* prevent tx timeout */ 542 dev->trans_start = jiffies; /* prevent tx timeout */
@@ -547,6 +547,7 @@ static void
547alloc_list (struct net_device *dev) 547alloc_list (struct net_device *dev)
548{ 548{
549 struct netdev_private *np = netdev_priv(dev); 549 struct netdev_private *np = netdev_priv(dev);
550 void __iomem *ioaddr = np->ioaddr;
550 int i; 551 int i;
551 552
552 np->cur_rx = np->cur_tx = 0; 553 np->cur_rx = np->cur_tx = 0;
@@ -594,24 +595,23 @@ alloc_list (struct net_device *dev)
594 } 595 }
595 596
596 /* Set RFDListPtr */ 597 /* Set RFDListPtr */
597 writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0); 598 dw32(RFDListPtr0, np->rx_ring_dma);
598 writel (0, dev->base_addr + RFDListPtr1); 599 dw32(RFDListPtr1, 0);
599} 600}
600 601
601static netdev_tx_t 602static netdev_tx_t
602start_xmit (struct sk_buff *skb, struct net_device *dev) 603start_xmit (struct sk_buff *skb, struct net_device *dev)
603{ 604{
604 struct netdev_private *np = netdev_priv(dev); 605 struct netdev_private *np = netdev_priv(dev);
606 void __iomem *ioaddr = np->ioaddr;
605 struct netdev_desc *txdesc; 607 struct netdev_desc *txdesc;
606 unsigned entry; 608 unsigned entry;
607 u32 ioaddr;
608 u64 tfc_vlan_tag = 0; 609 u64 tfc_vlan_tag = 0;
609 610
610 if (np->link_status == 0) { /* Link Down */ 611 if (np->link_status == 0) { /* Link Down */
611 dev_kfree_skb(skb); 612 dev_kfree_skb(skb);
612 return NETDEV_TX_OK; 613 return NETDEV_TX_OK;
613 } 614 }
614 ioaddr = dev->base_addr;
615 entry = np->cur_tx % TX_RING_SIZE; 615 entry = np->cur_tx % TX_RING_SIZE;
616 np->tx_skbuff[entry] = skb; 616 np->tx_skbuff[entry] = skb;
617 txdesc = &np->tx_ring[entry]; 617 txdesc = &np->tx_ring[entry];
@@ -646,9 +646,9 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
646 (1 << FragCountShift)); 646 (1 << FragCountShift));
647 647
648 /* TxDMAPollNow */ 648 /* TxDMAPollNow */
649 writel (readl (ioaddr + DMACtrl) | 0x00001000, ioaddr + DMACtrl); 649 dw32(DMACtrl, dr32(DMACtrl) | 0x00001000);
650 /* Schedule ISR */ 650 /* Schedule ISR */
651 writel(10000, ioaddr + CountDown); 651 dw32(CountDown, 10000);
652 np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE; 652 np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;
653 if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE 653 if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
654 < TX_QUEUE_LEN - 1 && np->speed != 10) { 654 < TX_QUEUE_LEN - 1 && np->speed != 10) {
@@ -658,10 +658,10 @@ start_xmit (struct sk_buff *skb, struct net_device *dev)
658 } 658 }
659 659
660 /* The first TFDListPtr */ 660 /* The first TFDListPtr */
661 if (readl (dev->base_addr + TFDListPtr0) == 0) { 661 if (!dr32(TFDListPtr0)) {
662 writel (np->tx_ring_dma + entry * sizeof (struct netdev_desc), 662 dw32(TFDListPtr0, np->tx_ring_dma +
663 dev->base_addr + TFDListPtr0); 663 entry * sizeof (struct netdev_desc));
664 writel (0, dev->base_addr + TFDListPtr1); 664 dw32(TFDListPtr1, 0);
665 } 665 }
666 666
667 return NETDEV_TX_OK; 667 return NETDEV_TX_OK;
@@ -671,17 +671,15 @@ static irqreturn_t
671rio_interrupt (int irq, void *dev_instance) 671rio_interrupt (int irq, void *dev_instance)
672{ 672{
673 struct net_device *dev = dev_instance; 673 struct net_device *dev = dev_instance;
674 struct netdev_private *np; 674 struct netdev_private *np = netdev_priv(dev);
675 void __iomem *ioaddr = np->ioaddr;
675 unsigned int_status; 676 unsigned int_status;
676 long ioaddr;
677 int cnt = max_intrloop; 677 int cnt = max_intrloop;
678 int handled = 0; 678 int handled = 0;
679 679
680 ioaddr = dev->base_addr;
681 np = netdev_priv(dev);
682 while (1) { 680 while (1) {
683 int_status = readw (ioaddr + IntStatus); 681 int_status = dr16(IntStatus);
684 writew (int_status, ioaddr + IntStatus); 682 dw16(IntStatus, int_status);
685 int_status &= DEFAULT_INTR; 683 int_status &= DEFAULT_INTR;
686 if (int_status == 0 || --cnt < 0) 684 if (int_status == 0 || --cnt < 0)
687 break; 685 break;
@@ -692,7 +690,7 @@ rio_interrupt (int irq, void *dev_instance)
692 /* TxDMAComplete interrupt */ 690 /* TxDMAComplete interrupt */
693 if ((int_status & (TxDMAComplete|IntRequested))) { 691 if ((int_status & (TxDMAComplete|IntRequested))) {
694 int tx_status; 692 int tx_status;
695 tx_status = readl (ioaddr + TxStatus); 693 tx_status = dr32(TxStatus);
696 if (tx_status & 0x01) 694 if (tx_status & 0x01)
697 tx_error (dev, tx_status); 695 tx_error (dev, tx_status);
698 /* Free used tx skbuffs */ 696 /* Free used tx skbuffs */
@@ -705,7 +703,7 @@ rio_interrupt (int irq, void *dev_instance)
705 rio_error (dev, int_status); 703 rio_error (dev, int_status);
706 } 704 }
707 if (np->cur_tx != np->old_tx) 705 if (np->cur_tx != np->old_tx)
708 writel (100, ioaddr + CountDown); 706 dw32(CountDown, 100);
709 return IRQ_RETVAL(handled); 707 return IRQ_RETVAL(handled);
710} 708}
711 709
@@ -765,13 +763,11 @@ rio_free_tx (struct net_device *dev, int irq)
765static void 763static void
766tx_error (struct net_device *dev, int tx_status) 764tx_error (struct net_device *dev, int tx_status)
767{ 765{
768 struct netdev_private *np; 766 struct netdev_private *np = netdev_priv(dev);
769 long ioaddr = dev->base_addr; 767 void __iomem *ioaddr = np->ioaddr;
770 int frame_id; 768 int frame_id;
771 int i; 769 int i;
772 770
773 np = netdev_priv(dev);
774
775 frame_id = (tx_status & 0xffff0000); 771 frame_id = (tx_status & 0xffff0000);
776 printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n", 772 printk (KERN_ERR "%s: Transmit error, TxStatus %4.4x, FrameId %d.\n",
777 dev->name, tx_status, frame_id); 773 dev->name, tx_status, frame_id);
@@ -779,23 +775,21 @@ tx_error (struct net_device *dev, int tx_status)
779 /* Ttransmit Underrun */ 775 /* Ttransmit Underrun */
780 if (tx_status & 0x10) { 776 if (tx_status & 0x10) {
781 np->stats.tx_fifo_errors++; 777 np->stats.tx_fifo_errors++;
782 writew (readw (ioaddr + TxStartThresh) + 0x10, 778 dw16(TxStartThresh, dr16(TxStartThresh) + 0x10);
783 ioaddr + TxStartThresh);
784 /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */ 779 /* Transmit Underrun need to set TxReset, DMARest, FIFOReset */
785 writew (TxReset | DMAReset | FIFOReset | NetworkReset, 780 dw16(ASICCtrl + 2,
786 ioaddr + ASICCtrl + 2); 781 TxReset | DMAReset | FIFOReset | NetworkReset);
787 /* Wait for ResetBusy bit clear */ 782 /* Wait for ResetBusy bit clear */
788 for (i = 50; i > 0; i--) { 783 for (i = 50; i > 0; i--) {
789 if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0) 784 if (!(dr16(ASICCtrl + 2) & ResetBusy))
790 break; 785 break;
791 mdelay (1); 786 mdelay (1);
792 } 787 }
793 rio_free_tx (dev, 1); 788 rio_free_tx (dev, 1);
794 /* Reset TFDListPtr */ 789 /* Reset TFDListPtr */
795 writel (np->tx_ring_dma + 790 dw32(TFDListPtr0, np->tx_ring_dma +
796 np->old_tx * sizeof (struct netdev_desc), 791 np->old_tx * sizeof (struct netdev_desc));
797 dev->base_addr + TFDListPtr0); 792 dw32(TFDListPtr1, 0);
798 writel (0, dev->base_addr + TFDListPtr1);
799 793
800 /* Let TxStartThresh stay default value */ 794 /* Let TxStartThresh stay default value */
801 } 795 }
@@ -803,10 +797,10 @@ tx_error (struct net_device *dev, int tx_status)
803 if (tx_status & 0x04) { 797 if (tx_status & 0x04) {
804 np->stats.tx_fifo_errors++; 798 np->stats.tx_fifo_errors++;
805 /* TxReset and clear FIFO */ 799 /* TxReset and clear FIFO */
806 writew (TxReset | FIFOReset, ioaddr + ASICCtrl + 2); 800 dw16(ASICCtrl + 2, TxReset | FIFOReset);
807 /* Wait reset done */ 801 /* Wait reset done */
808 for (i = 50; i > 0; i--) { 802 for (i = 50; i > 0; i--) {
809 if ((readw (ioaddr + ASICCtrl + 2) & ResetBusy) == 0) 803 if (!(dr16(ASICCtrl + 2) & ResetBusy))
810 break; 804 break;
811 mdelay (1); 805 mdelay (1);
812 } 806 }
@@ -821,7 +815,7 @@ tx_error (struct net_device *dev, int tx_status)
821 np->stats.collisions++; 815 np->stats.collisions++;
822#endif 816#endif
823 /* Restart the Tx */ 817 /* Restart the Tx */
824 writel (readw (dev->base_addr + MACCtrl) | TxEnable, ioaddr + MACCtrl); 818 dw32(MACCtrl, dr16(MACCtrl) | TxEnable);
825} 819}
826 820
827static int 821static int
@@ -931,8 +925,8 @@ receive_packet (struct net_device *dev)
931static void 925static void
932rio_error (struct net_device *dev, int int_status) 926rio_error (struct net_device *dev, int int_status)
933{ 927{
934 long ioaddr = dev->base_addr;
935 struct netdev_private *np = netdev_priv(dev); 928 struct netdev_private *np = netdev_priv(dev);
929 void __iomem *ioaddr = np->ioaddr;
936 u16 macctrl; 930 u16 macctrl;
937 931
938 /* Link change event */ 932 /* Link change event */
@@ -954,7 +948,7 @@ rio_error (struct net_device *dev, int int_status)
954 TxFlowControlEnable : 0; 948 TxFlowControlEnable : 0;
955 macctrl |= (np->rx_flow) ? 949 macctrl |= (np->rx_flow) ?
956 RxFlowControlEnable : 0; 950 RxFlowControlEnable : 0;
957 writew(macctrl, ioaddr + MACCtrl); 951 dw16(MACCtrl, macctrl);
958 np->link_status = 1; 952 np->link_status = 1;
959 netif_carrier_on(dev); 953 netif_carrier_on(dev);
960 } else { 954 } else {
@@ -974,7 +968,7 @@ rio_error (struct net_device *dev, int int_status)
974 if (int_status & HostError) { 968 if (int_status & HostError) {
975 printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n", 969 printk (KERN_ERR "%s: HostError! IntStatus %4.4x.\n",
976 dev->name, int_status); 970 dev->name, int_status);
977 writew (GlobalReset | HostReset, ioaddr + ASICCtrl + 2); 971 dw16(ASICCtrl + 2, GlobalReset | HostReset);
978 mdelay (500); 972 mdelay (500);
979 } 973 }
980} 974}
@@ -982,8 +976,8 @@ rio_error (struct net_device *dev, int int_status)
982static struct net_device_stats * 976static struct net_device_stats *
983get_stats (struct net_device *dev) 977get_stats (struct net_device *dev)
984{ 978{
985 long ioaddr = dev->base_addr;
986 struct netdev_private *np = netdev_priv(dev); 979 struct netdev_private *np = netdev_priv(dev);
980 void __iomem *ioaddr = np->ioaddr;
987#ifdef MEM_MAPPING 981#ifdef MEM_MAPPING
988 int i; 982 int i;
989#endif 983#endif
@@ -992,106 +986,107 @@ get_stats (struct net_device *dev)
992 /* All statistics registers need to be acknowledged, 986 /* All statistics registers need to be acknowledged,
993 else statistic overflow could cause problems */ 987 else statistic overflow could cause problems */
994 988
995 np->stats.rx_packets += readl (ioaddr + FramesRcvOk); 989 np->stats.rx_packets += dr32(FramesRcvOk);
996 np->stats.tx_packets += readl (ioaddr + FramesXmtOk); 990 np->stats.tx_packets += dr32(FramesXmtOk);
997 np->stats.rx_bytes += readl (ioaddr + OctetRcvOk); 991 np->stats.rx_bytes += dr32(OctetRcvOk);
998 np->stats.tx_bytes += readl (ioaddr + OctetXmtOk); 992 np->stats.tx_bytes += dr32(OctetXmtOk);
999 993
1000 np->stats.multicast = readl (ioaddr + McstFramesRcvdOk); 994 np->stats.multicast = dr32(McstFramesRcvdOk);
1001 np->stats.collisions += readl (ioaddr + SingleColFrames) 995 np->stats.collisions += dr32(SingleColFrames)
1002 + readl (ioaddr + MultiColFrames); 996 + dr32(MultiColFrames);
1003 997
1004 /* detailed tx errors */ 998 /* detailed tx errors */
1005 stat_reg = readw (ioaddr + FramesAbortXSColls); 999 stat_reg = dr16(FramesAbortXSColls);
1006 np->stats.tx_aborted_errors += stat_reg; 1000 np->stats.tx_aborted_errors += stat_reg;
1007 np->stats.tx_errors += stat_reg; 1001 np->stats.tx_errors += stat_reg;
1008 1002
1009 stat_reg = readw (ioaddr + CarrierSenseErrors); 1003 stat_reg = dr16(CarrierSenseErrors);
1010 np->stats.tx_carrier_errors += stat_reg; 1004 np->stats.tx_carrier_errors += stat_reg;
1011 np->stats.tx_errors += stat_reg; 1005 np->stats.tx_errors += stat_reg;
1012 1006
1013 /* Clear all other statistic register. */ 1007 /* Clear all other statistic register. */
1014 readl (ioaddr + McstOctetXmtOk); 1008 dr32(McstOctetXmtOk);
1015 readw (ioaddr + BcstFramesXmtdOk); 1009 dr16(BcstFramesXmtdOk);
1016 readl (ioaddr + McstFramesXmtdOk); 1010 dr32(McstFramesXmtdOk);
1017 readw (ioaddr + BcstFramesRcvdOk); 1011 dr16(BcstFramesRcvdOk);
1018 readw (ioaddr + MacControlFramesRcvd); 1012 dr16(MacControlFramesRcvd);
1019 readw (ioaddr + FrameTooLongErrors); 1013 dr16(FrameTooLongErrors);
1020 readw (ioaddr + InRangeLengthErrors); 1014 dr16(InRangeLengthErrors);
1021 readw (ioaddr + FramesCheckSeqErrors); 1015 dr16(FramesCheckSeqErrors);
1022 readw (ioaddr + FramesLostRxErrors); 1016 dr16(FramesLostRxErrors);
1023 readl (ioaddr + McstOctetXmtOk); 1017 dr32(McstOctetXmtOk);
1024 readl (ioaddr + BcstOctetXmtOk); 1018 dr32(BcstOctetXmtOk);
1025 readl (ioaddr + McstFramesXmtdOk); 1019 dr32(McstFramesXmtdOk);
1026 readl (ioaddr + FramesWDeferredXmt); 1020 dr32(FramesWDeferredXmt);
1027 readl (ioaddr + LateCollisions); 1021 dr32(LateCollisions);
1028 readw (ioaddr + BcstFramesXmtdOk); 1022 dr16(BcstFramesXmtdOk);
1029 readw (ioaddr + MacControlFramesXmtd); 1023 dr16(MacControlFramesXmtd);
1030 readw (ioaddr + FramesWEXDeferal); 1024 dr16(FramesWEXDeferal);
1031 1025
1032#ifdef MEM_MAPPING 1026#ifdef MEM_MAPPING
1033 for (i = 0x100; i <= 0x150; i += 4) 1027 for (i = 0x100; i <= 0x150; i += 4)
1034 readl (ioaddr + i); 1028 dr32(i);
1035#endif 1029#endif
1036 readw (ioaddr + TxJumboFrames); 1030 dr16(TxJumboFrames);
1037 readw (ioaddr + RxJumboFrames); 1031 dr16(RxJumboFrames);
1038 readw (ioaddr + TCPCheckSumErrors); 1032 dr16(TCPCheckSumErrors);
1039 readw (ioaddr + UDPCheckSumErrors); 1033 dr16(UDPCheckSumErrors);
1040 readw (ioaddr + IPCheckSumErrors); 1034 dr16(IPCheckSumErrors);
1041 return &np->stats; 1035 return &np->stats;
1042} 1036}
1043 1037
1044static int 1038static int
1045clear_stats (struct net_device *dev) 1039clear_stats (struct net_device *dev)
1046{ 1040{
1047 long ioaddr = dev->base_addr; 1041 struct netdev_private *np = netdev_priv(dev);
1042 void __iomem *ioaddr = np->ioaddr;
1048#ifdef MEM_MAPPING 1043#ifdef MEM_MAPPING
1049 int i; 1044 int i;
1050#endif 1045#endif
1051 1046
1052 /* All statistics registers need to be acknowledged, 1047 /* All statistics registers need to be acknowledged,
1053 else statistic overflow could cause problems */ 1048 else statistic overflow could cause problems */
1054 readl (ioaddr + FramesRcvOk); 1049 dr32(FramesRcvOk);
1055 readl (ioaddr + FramesXmtOk); 1050 dr32(FramesXmtOk);
1056 readl (ioaddr + OctetRcvOk); 1051 dr32(OctetRcvOk);
1057 readl (ioaddr + OctetXmtOk); 1052 dr32(OctetXmtOk);
1058 1053
1059 readl (ioaddr + McstFramesRcvdOk); 1054 dr32(McstFramesRcvdOk);
1060 readl (ioaddr + SingleColFrames); 1055 dr32(SingleColFrames);
1061 readl (ioaddr + MultiColFrames); 1056 dr32(MultiColFrames);
1062 readl (ioaddr + LateCollisions); 1057 dr32(LateCollisions);
1063 /* detailed rx errors */ 1058 /* detailed rx errors */
1064 readw (ioaddr + FrameTooLongErrors); 1059 dr16(FrameTooLongErrors);
1065 readw (ioaddr + InRangeLengthErrors); 1060 dr16(InRangeLengthErrors);
1066 readw (ioaddr + FramesCheckSeqErrors); 1061 dr16(FramesCheckSeqErrors);
1067 readw (ioaddr + FramesLostRxErrors); 1062 dr16(FramesLostRxErrors);
1068 1063
1069 /* detailed tx errors */ 1064 /* detailed tx errors */
1070 readw (ioaddr + FramesAbortXSColls); 1065 dr16(FramesAbortXSColls);
1071 readw (ioaddr + CarrierSenseErrors); 1066 dr16(CarrierSenseErrors);
1072 1067
1073 /* Clear all other statistic register. */ 1068 /* Clear all other statistic register. */
1074 readl (ioaddr + McstOctetXmtOk); 1069 dr32(McstOctetXmtOk);
1075 readw (ioaddr + BcstFramesXmtdOk); 1070 dr16(BcstFramesXmtdOk);
1076 readl (ioaddr + McstFramesXmtdOk); 1071 dr32(McstFramesXmtdOk);
1077 readw (ioaddr + BcstFramesRcvdOk); 1072 dr16(BcstFramesRcvdOk);
1078 readw (ioaddr + MacControlFramesRcvd); 1073 dr16(MacControlFramesRcvd);
1079 readl (ioaddr + McstOctetXmtOk); 1074 dr32(McstOctetXmtOk);
1080 readl (ioaddr + BcstOctetXmtOk); 1075 dr32(BcstOctetXmtOk);
1081 readl (ioaddr + McstFramesXmtdOk); 1076 dr32(McstFramesXmtdOk);
1082 readl (ioaddr + FramesWDeferredXmt); 1077 dr32(FramesWDeferredXmt);
1083 readw (ioaddr + BcstFramesXmtdOk); 1078 dr16(BcstFramesXmtdOk);
1084 readw (ioaddr + MacControlFramesXmtd); 1079 dr16(MacControlFramesXmtd);
1085 readw (ioaddr + FramesWEXDeferal); 1080 dr16(FramesWEXDeferal);
1086#ifdef MEM_MAPPING 1081#ifdef MEM_MAPPING
1087 for (i = 0x100; i <= 0x150; i += 4) 1082 for (i = 0x100; i <= 0x150; i += 4)
1088 readl (ioaddr + i); 1083 dr32(i);
1089#endif 1084#endif
1090 readw (ioaddr + TxJumboFrames); 1085 dr16(TxJumboFrames);
1091 readw (ioaddr + RxJumboFrames); 1086 dr16(RxJumboFrames);
1092 readw (ioaddr + TCPCheckSumErrors); 1087 dr16(TCPCheckSumErrors);
1093 readw (ioaddr + UDPCheckSumErrors); 1088 dr16(UDPCheckSumErrors);
1094 readw (ioaddr + IPCheckSumErrors); 1089 dr16(IPCheckSumErrors);
1095 return 0; 1090 return 0;
1096} 1091}
1097 1092
@@ -1114,10 +1109,10 @@ change_mtu (struct net_device *dev, int new_mtu)
1114static void 1109static void
1115set_multicast (struct net_device *dev) 1110set_multicast (struct net_device *dev)
1116{ 1111{
1117 long ioaddr = dev->base_addr; 1112 struct netdev_private *np = netdev_priv(dev);
1113 void __iomem *ioaddr = np->ioaddr;
1118 u32 hash_table[2]; 1114 u32 hash_table[2];
1119 u16 rx_mode = 0; 1115 u16 rx_mode = 0;
1120 struct netdev_private *np = netdev_priv(dev);
1121 1116
1122 hash_table[0] = hash_table[1] = 0; 1117 hash_table[0] = hash_table[1] = 0;
1123 /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */ 1118 /* RxFlowcontrol DA: 01-80-C2-00-00-01. Hash index=0x39 */
@@ -1153,9 +1148,9 @@ set_multicast (struct net_device *dev)
1153 rx_mode |= ReceiveVLANMatch; 1148 rx_mode |= ReceiveVLANMatch;
1154 } 1149 }
1155 1150
1156 writel (hash_table[0], ioaddr + HashTable0); 1151 dw32(HashTable0, hash_table[0]);
1157 writel (hash_table[1], ioaddr + HashTable1); 1152 dw32(HashTable1, hash_table[1]);
1158 writew (rx_mode, ioaddr + ReceiveMode); 1153 dw16(ReceiveMode, rx_mode);
1159} 1154}
1160 1155
1161static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1156static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
@@ -1284,15 +1279,15 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1284#define EEP_BUSY 0x8000 1279#define EEP_BUSY 0x8000
1285/* Read the EEPROM word */ 1280/* Read the EEPROM word */
1286/* We use I/O instruction to read/write eeprom to avoid fail on some machines */ 1281/* We use I/O instruction to read/write eeprom to avoid fail on some machines */
1287static int 1282static int read_eeprom(struct netdev_private *np, int eep_addr)
1288read_eeprom (long ioaddr, int eep_addr)
1289{ 1283{
1284 void __iomem *ioaddr = np->eeprom_addr;
1290 int i = 1000; 1285 int i = 1000;
1291 outw (EEP_READ | (eep_addr & 0xff), ioaddr + EepromCtrl); 1286
1287 dw16(EepromCtrl, EEP_READ | (eep_addr & 0xff));
1292 while (i-- > 0) { 1288 while (i-- > 0) {
1293 if (!(inw (ioaddr + EepromCtrl) & EEP_BUSY)) { 1289 if (!(dr16(EepromCtrl) & EEP_BUSY))
1294 return inw (ioaddr + EepromData); 1290 return dr16(EepromData);
1295 }
1296 } 1291 }
1297 return 0; 1292 return 0;
1298} 1293}
@@ -1302,38 +1297,40 @@ enum phy_ctrl_bits {
1302 MII_DUPLEX = 0x08, 1297 MII_DUPLEX = 0x08,
1303}; 1298};
1304 1299
1305#define mii_delay() readb(ioaddr) 1300#define mii_delay() dr8(PhyCtrl)
1306static void 1301static void
1307mii_sendbit (struct net_device *dev, u32 data) 1302mii_sendbit (struct net_device *dev, u32 data)
1308{ 1303{
1309 long ioaddr = dev->base_addr + PhyCtrl; 1304 struct netdev_private *np = netdev_priv(dev);
1310 data = (data) ? MII_DATA1 : 0; 1305 void __iomem *ioaddr = np->ioaddr;
1311 data |= MII_WRITE; 1306
1312 data |= (readb (ioaddr) & 0xf8) | MII_WRITE; 1307 data = ((data) ? MII_DATA1 : 0) | (dr8(PhyCtrl) & 0xf8) | MII_WRITE;
1313 writeb (data, ioaddr); 1308 dw8(PhyCtrl, data);
1314 mii_delay (); 1309 mii_delay ();
1315 writeb (data | MII_CLK, ioaddr); 1310 dw8(PhyCtrl, data | MII_CLK);
1316 mii_delay (); 1311 mii_delay ();
1317} 1312}
1318 1313
1319static int 1314static int
1320mii_getbit (struct net_device *dev) 1315mii_getbit (struct net_device *dev)
1321{ 1316{
1322 long ioaddr = dev->base_addr + PhyCtrl; 1317 struct netdev_private *np = netdev_priv(dev);
1318 void __iomem *ioaddr = np->ioaddr;
1323 u8 data; 1319 u8 data;
1324 1320
1325 data = (readb (ioaddr) & 0xf8) | MII_READ; 1321 data = (dr8(PhyCtrl) & 0xf8) | MII_READ;
1326 writeb (data, ioaddr); 1322 dw8(PhyCtrl, data);
1327 mii_delay (); 1323 mii_delay ();
1328 writeb (data | MII_CLK, ioaddr); 1324 dw8(PhyCtrl, data | MII_CLK);
1329 mii_delay (); 1325 mii_delay ();
1330 return ((readb (ioaddr) >> 1) & 1); 1326 return (dr8(PhyCtrl) >> 1) & 1;
1331} 1327}
1332 1328
1333static void 1329static void
1334mii_send_bits (struct net_device *dev, u32 data, int len) 1330mii_send_bits (struct net_device *dev, u32 data, int len)
1335{ 1331{
1336 int i; 1332 int i;
1333
1337 for (i = len - 1; i >= 0; i--) { 1334 for (i = len - 1; i >= 0; i--) {
1338 mii_sendbit (dev, data & (1 << i)); 1335 mii_sendbit (dev, data & (1 << i));
1339 } 1336 }
@@ -1687,28 +1684,29 @@ mii_set_media_pcs (struct net_device *dev)
1687static int 1684static int
1688rio_close (struct net_device *dev) 1685rio_close (struct net_device *dev)
1689{ 1686{
1690 long ioaddr = dev->base_addr;
1691 struct netdev_private *np = netdev_priv(dev); 1687 struct netdev_private *np = netdev_priv(dev);
1688 void __iomem *ioaddr = np->ioaddr;
1689
1690 struct pci_dev *pdev = np->pdev;
1692 struct sk_buff *skb; 1691 struct sk_buff *skb;
1693 int i; 1692 int i;
1694 1693
1695 netif_stop_queue (dev); 1694 netif_stop_queue (dev);
1696 1695
1697 /* Disable interrupts */ 1696 /* Disable interrupts */
1698 writew (0, ioaddr + IntEnable); 1697 dw16(IntEnable, 0);
1699 1698
1700 /* Stop Tx and Rx logics */ 1699 /* Stop Tx and Rx logics */
1701 writel (TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl); 1700 dw32(MACCtrl, TxDisable | RxDisable | StatsDisable);
1702 1701
1703 free_irq (dev->irq, dev); 1702 free_irq(pdev->irq, dev);
1704 del_timer_sync (&np->timer); 1703 del_timer_sync (&np->timer);
1705 1704
1706 /* Free all the skbuffs in the queue. */ 1705 /* Free all the skbuffs in the queue. */
1707 for (i = 0; i < RX_RING_SIZE; i++) { 1706 for (i = 0; i < RX_RING_SIZE; i++) {
1708 skb = np->rx_skbuff[i]; 1707 skb = np->rx_skbuff[i];
1709 if (skb) { 1708 if (skb) {
1710 pci_unmap_single(np->pdev, 1709 pci_unmap_single(pdev, desc_to_dma(&np->rx_ring[i]),
1711 desc_to_dma(&np->rx_ring[i]),
1712 skb->len, PCI_DMA_FROMDEVICE); 1710 skb->len, PCI_DMA_FROMDEVICE);
1713 dev_kfree_skb (skb); 1711 dev_kfree_skb (skb);
1714 np->rx_skbuff[i] = NULL; 1712 np->rx_skbuff[i] = NULL;
@@ -1719,8 +1717,7 @@ rio_close (struct net_device *dev)
1719 for (i = 0; i < TX_RING_SIZE; i++) { 1717 for (i = 0; i < TX_RING_SIZE; i++) {
1720 skb = np->tx_skbuff[i]; 1718 skb = np->tx_skbuff[i];
1721 if (skb) { 1719 if (skb) {
1722 pci_unmap_single(np->pdev, 1720 pci_unmap_single(pdev, desc_to_dma(&np->tx_ring[i]),
1723 desc_to_dma(&np->tx_ring[i]),
1724 skb->len, PCI_DMA_TODEVICE); 1721 skb->len, PCI_DMA_TODEVICE);
1725 dev_kfree_skb (skb); 1722 dev_kfree_skb (skb);
1726 np->tx_skbuff[i] = NULL; 1723 np->tx_skbuff[i] = NULL;
@@ -1744,8 +1741,9 @@ rio_remove1 (struct pci_dev *pdev)
1744 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, 1741 pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring,
1745 np->tx_ring_dma); 1742 np->tx_ring_dma);
1746#ifdef MEM_MAPPING 1743#ifdef MEM_MAPPING
1747 iounmap ((char *) (dev->base_addr)); 1744 pci_iounmap(pdev, np->ioaddr);
1748#endif 1745#endif
1746 pci_iounmap(pdev, np->eeprom_addr);
1749 free_netdev (dev); 1747 free_netdev (dev);
1750 pci_release_regions (pdev); 1748 pci_release_regions (pdev);
1751 pci_disable_device (pdev); 1749 pci_disable_device (pdev);
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index 30c2da3de548..3699565704c7 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -42,23 +42,6 @@
42#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc) 42#define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc)
43#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc) 43#define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc)
44 44
45/* This driver was written to use PCI memory space, however x86-oriented
46 hardware often uses I/O space accesses. */
47#ifndef MEM_MAPPING
48#undef readb
49#undef readw
50#undef readl
51#undef writeb
52#undef writew
53#undef writel
54#define readb inb
55#define readw inw
56#define readl inl
57#define writeb outb
58#define writew outw
59#define writel outl
60#endif
61
62/* Offsets to the device registers. 45/* Offsets to the device registers.
63 Unlike software-only systems, device drivers interact with complex hardware. 46 Unlike software-only systems, device drivers interact with complex hardware.
64 It's not useful to define symbolic names for every register bit in the 47 It's not useful to define symbolic names for every register bit in the
@@ -384,6 +367,8 @@ struct netdev_private {
384 dma_addr_t tx_ring_dma; 367 dma_addr_t tx_ring_dma;
385 dma_addr_t rx_ring_dma; 368 dma_addr_t rx_ring_dma;
386 struct pci_dev *pdev; 369 struct pci_dev *pdev;
370 void __iomem *ioaddr;
371 void __iomem *eeprom_addr;
387 spinlock_t tx_lock; 372 spinlock_t tx_lock;
388 spinlock_t rx_lock; 373 spinlock_t rx_lock;
389 struct net_device_stats stats; 374 struct net_device_stats stats;
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index d783f4f96ec0..d7bb52a7bda1 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -522,9 +522,6 @@ static int __devinit sundance_probe1 (struct pci_dev *pdev,
522 cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET)); 522 cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
523 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 523 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
524 524
525 dev->base_addr = (unsigned long)ioaddr;
526 dev->irq = irq;
527
528 np = netdev_priv(dev); 525 np = netdev_priv(dev);
529 np->base = ioaddr; 526 np->base = ioaddr;
530 np->pci_dev = pdev; 527 np->pci_dev = pdev;
@@ -828,18 +825,19 @@ static int netdev_open(struct net_device *dev)
828{ 825{
829 struct netdev_private *np = netdev_priv(dev); 826 struct netdev_private *np = netdev_priv(dev);
830 void __iomem *ioaddr = np->base; 827 void __iomem *ioaddr = np->base;
828 const int irq = np->pci_dev->irq;
831 unsigned long flags; 829 unsigned long flags;
832 int i; 830 int i;
833 831
834 /* Do we need to reset the chip??? */ 832 /* Do we need to reset the chip??? */
835 833
836 i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev); 834 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
837 if (i) 835 if (i)
838 return i; 836 return i;
839 837
840 if (netif_msg_ifup(np)) 838 if (netif_msg_ifup(np))
841 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n", 839 printk(KERN_DEBUG "%s: netdev_open() irq %d\n", dev->name, irq);
842 dev->name, dev->irq); 840
843 init_ring(dev); 841 init_ring(dev);
844 842
845 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr); 843 iowrite32(np->rx_ring_dma, ioaddr + RxListPtr);
@@ -1814,7 +1812,7 @@ static int netdev_close(struct net_device *dev)
1814 } 1812 }
1815#endif /* __i386__ debugging only */ 1813#endif /* __i386__ debugging only */
1816 1814
1817 free_irq(dev->irq, dev); 1815 free_irq(np->pci_dev->irq, dev);
1818 1816
1819 del_timer_sync(&np->timer); 1817 del_timer_sync(&np->timer);
1820 1818
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index b276469f74e9..290b26f868c9 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -815,6 +815,7 @@ static const struct ethtool_ops dnet_ethtool_ops = {
815 .set_settings = dnet_set_settings, 815 .set_settings = dnet_set_settings,
816 .get_drvinfo = dnet_get_drvinfo, 816 .get_drvinfo = dnet_get_drvinfo,
817 .get_link = ethtool_op_get_link, 817 .get_link = ethtool_op_get_link,
818 .get_ts_info = ethtool_op_get_ts_info,
818}; 819};
819 820
820static const struct net_device_ops dnet_netdev_ops = { 821static const struct net_device_ops dnet_netdev_ops = {
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 9576ac002c23..c3ee9103ff4f 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -33,7 +33,7 @@
33 33
34#include "be_hw.h" 34#include "be_hw.h"
35 35
36#define DRV_VER "4.2.116u" 36#define DRV_VER "4.2.220u"
37#define DRV_NAME "be2net" 37#define DRV_NAME "be2net"
38#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" 38#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
39#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" 39#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
@@ -162,6 +162,11 @@ static inline void queue_head_inc(struct be_queue_info *q)
162 index_inc(&q->head, q->len); 162 index_inc(&q->head, q->len);
163} 163}
164 164
165static inline void index_dec(u16 *index, u16 limit)
166{
167 *index = MODULO((*index - 1), limit);
168}
169
165static inline void queue_tail_inc(struct be_queue_info *q) 170static inline void queue_tail_inc(struct be_queue_info *q)
166{ 171{
167 index_inc(&q->tail, q->len); 172 index_inc(&q->tail, q->len);
@@ -313,6 +318,23 @@ struct be_vf_cfg {
313#define BE_UC_PMAC_COUNT 30 318#define BE_UC_PMAC_COUNT 30
314#define BE_VF_UC_PMAC_COUNT 2 319#define BE_VF_UC_PMAC_COUNT 2
315 320
321struct phy_info {
322 u8 transceiver;
323 u8 autoneg;
324 u8 fc_autoneg;
325 u8 port_type;
326 u16 phy_type;
327 u16 interface_type;
328 u32 misc_params;
329 u16 auto_speeds_supported;
330 u16 fixed_speeds_supported;
331 int link_speed;
332 int forced_port_speed;
333 u32 dac_cable_len;
334 u32 advertising;
335 u32 supported;
336};
337
316struct be_adapter { 338struct be_adapter {
317 struct pci_dev *pdev; 339 struct pci_dev *pdev;
318 struct net_device *netdev; 340 struct net_device *netdev;
@@ -377,10 +399,6 @@ struct be_adapter {
377 u32 rx_fc; /* Rx flow control */ 399 u32 rx_fc; /* Rx flow control */
378 u32 tx_fc; /* Tx flow control */ 400 u32 tx_fc; /* Tx flow control */
379 bool stats_cmd_sent; 401 bool stats_cmd_sent;
380 int link_speed;
381 u8 port_type;
382 u8 transceiver;
383 u8 autoneg;
384 u8 generation; /* BladeEngine ASIC generation */ 402 u8 generation; /* BladeEngine ASIC generation */
385 u32 flash_status; 403 u32 flash_status;
386 struct completion flash_compl; 404 struct completion flash_compl;
@@ -392,6 +410,7 @@ struct be_adapter {
392 u32 sli_family; 410 u32 sli_family;
393 u8 hba_port_num; 411 u8 hba_port_num;
394 u16 pvid; 412 u16 pvid;
413 struct phy_info phy;
395 u8 wol_cap; 414 u8 wol_cap;
396 bool wol; 415 bool wol;
397 u32 max_pmac_cnt; /* Max secondary UC MACs programmable */ 416 u32 max_pmac_cnt; /* Max secondary UC MACs programmable */
@@ -583,4 +602,5 @@ extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
583extern void be_parse_stats(struct be_adapter *adapter); 602extern void be_parse_stats(struct be_adapter *adapter);
584extern int be_load_fw(struct be_adapter *adapter, u8 *func); 603extern int be_load_fw(struct be_adapter *adapter, u8 *func);
585extern bool be_is_wol_supported(struct be_adapter *adapter); 604extern bool be_is_wol_supported(struct be_adapter *adapter);
605extern bool be_pause_supported(struct be_adapter *adapter);
586#endif /* BE_H */ 606#endif /* BE_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 67b030d72df1..43167e863955 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -61,10 +61,21 @@ static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
61 compl->flags = 0; 61 compl->flags = 0;
62} 62}
63 63
64static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
65{
66 unsigned long addr;
67
68 addr = tag1;
69 addr = ((addr << 16) << 16) | tag0;
70 return (void *)addr;
71}
72
64static int be_mcc_compl_process(struct be_adapter *adapter, 73static int be_mcc_compl_process(struct be_adapter *adapter,
65 struct be_mcc_compl *compl) 74 struct be_mcc_compl *compl)
66{ 75{
67 u16 compl_status, extd_status; 76 u16 compl_status, extd_status;
77 struct be_cmd_resp_hdr *resp_hdr;
78 u8 opcode = 0, subsystem = 0;
68 79
69 /* Just swap the status to host endian; mcc tag is opaquely copied 80 /* Just swap the status to host endian; mcc tag is opaquely copied
70 * from mcc_wrb */ 81 * from mcc_wrb */
@@ -73,32 +84,36 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
73 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & 84 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
74 CQE_STATUS_COMPL_MASK; 85 CQE_STATUS_COMPL_MASK;
75 86
76 if (((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) || 87 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
77 (compl->tag0 == OPCODE_COMMON_WRITE_OBJECT)) && 88
78 (compl->tag1 == CMD_SUBSYSTEM_COMMON)) { 89 if (resp_hdr) {
90 opcode = resp_hdr->opcode;
91 subsystem = resp_hdr->subsystem;
92 }
93
94 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
95 (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
96 (subsystem == CMD_SUBSYSTEM_COMMON)) {
79 adapter->flash_status = compl_status; 97 adapter->flash_status = compl_status;
80 complete(&adapter->flash_compl); 98 complete(&adapter->flash_compl);
81 } 99 }
82 100
83 if (compl_status == MCC_STATUS_SUCCESS) { 101 if (compl_status == MCC_STATUS_SUCCESS) {
84 if (((compl->tag0 == OPCODE_ETH_GET_STATISTICS) || 102 if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
85 (compl->tag0 == OPCODE_ETH_GET_PPORT_STATS)) && 103 (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
86 (compl->tag1 == CMD_SUBSYSTEM_ETH)) { 104 (subsystem == CMD_SUBSYSTEM_ETH)) {
87 be_parse_stats(adapter); 105 be_parse_stats(adapter);
88 adapter->stats_cmd_sent = false; 106 adapter->stats_cmd_sent = false;
89 } 107 }
90 if (compl->tag0 == 108 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
91 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) { 109 subsystem == CMD_SUBSYSTEM_COMMON) {
92 struct be_mcc_wrb *mcc_wrb =
93 queue_index_node(&adapter->mcc_obj.q,
94 compl->tag1);
95 struct be_cmd_resp_get_cntl_addnl_attribs *resp = 110 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
96 embedded_payload(mcc_wrb); 111 (void *)resp_hdr;
97 adapter->drv_stats.be_on_die_temperature = 112 adapter->drv_stats.be_on_die_temperature =
98 resp->on_die_temperature; 113 resp->on_die_temperature;
99 } 114 }
100 } else { 115 } else {
101 if (compl->tag0 == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) 116 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
102 be_get_temp_freq = 0; 117 be_get_temp_freq = 0;
103 118
104 if (compl_status == MCC_STATUS_NOT_SUPPORTED || 119 if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
@@ -108,13 +123,13 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
108 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { 123 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
109 dev_warn(&adapter->pdev->dev, "This domain(VM) is not " 124 dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
110 "permitted to execute this cmd (opcode %d)\n", 125 "permitted to execute this cmd (opcode %d)\n",
111 compl->tag0); 126 opcode);
112 } else { 127 } else {
113 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & 128 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
114 CQE_STATUS_EXTD_MASK; 129 CQE_STATUS_EXTD_MASK;
115 dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:" 130 dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
116 "status %d, extd-status %d\n", 131 "status %d, extd-status %d\n",
117 compl->tag0, compl_status, extd_status); 132 opcode, compl_status, extd_status);
118 } 133 }
119 } 134 }
120done: 135done:
@@ -126,7 +141,7 @@ static void be_async_link_state_process(struct be_adapter *adapter,
126 struct be_async_event_link_state *evt) 141 struct be_async_event_link_state *evt)
127{ 142{
128 /* When link status changes, link speed must be re-queried from FW */ 143 /* When link status changes, link speed must be re-queried from FW */
129 adapter->link_speed = -1; 144 adapter->phy.link_speed = -1;
130 145
131 /* For the initial link status do not rely on the ASYNC event as 146 /* For the initial link status do not rely on the ASYNC event as
132 * it may not be received in some cases. 147 * it may not be received in some cases.
@@ -153,7 +168,7 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
153{ 168{
154 if (evt->physical_port == adapter->port_num) { 169 if (evt->physical_port == adapter->port_num) {
155 /* qos_link_speed is in units of 10 Mbps */ 170 /* qos_link_speed is in units of 10 Mbps */
156 adapter->link_speed = evt->qos_link_speed * 10; 171 adapter->phy.link_speed = evt->qos_link_speed * 10;
157 } 172 }
158} 173}
159 174
@@ -286,7 +301,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
286 if (i == mcc_timeout) { 301 if (i == mcc_timeout) {
287 dev_err(&adapter->pdev->dev, "FW not responding\n"); 302 dev_err(&adapter->pdev->dev, "FW not responding\n");
288 adapter->fw_timeout = true; 303 adapter->fw_timeout = true;
289 return -1; 304 return -EIO;
290 } 305 }
291 return status; 306 return status;
292} 307}
@@ -294,8 +309,26 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
294/* Notify MCC requests and wait for completion */ 309/* Notify MCC requests and wait for completion */
295static int be_mcc_notify_wait(struct be_adapter *adapter) 310static int be_mcc_notify_wait(struct be_adapter *adapter)
296{ 311{
312 int status;
313 struct be_mcc_wrb *wrb;
314 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
315 u16 index = mcc_obj->q.head;
316 struct be_cmd_resp_hdr *resp;
317
318 index_dec(&index, mcc_obj->q.len);
319 wrb = queue_index_node(&mcc_obj->q, index);
320
321 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
322
297 be_mcc_notify(adapter); 323 be_mcc_notify(adapter);
298 return be_mcc_wait_compl(adapter); 324
325 status = be_mcc_wait_compl(adapter);
326 if (status == -EIO)
327 goto out;
328
329 status = resp->status;
330out:
331 return status;
299} 332}
300 333
301static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) 334static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
@@ -435,14 +468,17 @@ static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
435 struct be_mcc_wrb *wrb, struct be_dma_mem *mem) 468 struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
436{ 469{
437 struct be_sge *sge; 470 struct be_sge *sge;
471 unsigned long addr = (unsigned long)req_hdr;
472 u64 req_addr = addr;
438 473
439 req_hdr->opcode = opcode; 474 req_hdr->opcode = opcode;
440 req_hdr->subsystem = subsystem; 475 req_hdr->subsystem = subsystem;
441 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); 476 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
442 req_hdr->version = 0; 477 req_hdr->version = 0;
443 478
444 wrb->tag0 = opcode; 479 wrb->tag0 = req_addr & 0xFFFFFFFF;
445 wrb->tag1 = subsystem; 480 wrb->tag1 = upper_32_bits(req_addr);
481
446 wrb->payload_length = cmd_len; 482 wrb->payload_length = cmd_len;
447 if (mem) { 483 if (mem) {
448 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) << 484 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
@@ -1221,7 +1257,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1221 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb, 1257 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
1222 nonemb_cmd); 1258 nonemb_cmd);
1223 1259
1224 req->cmd_params.params.pport_num = cpu_to_le16(adapter->port_num); 1260 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1225 req->cmd_params.params.reset_stats = 0; 1261 req->cmd_params.params.reset_stats = 0;
1226 1262
1227 be_mcc_notify(adapter); 1263 be_mcc_notify(adapter);
@@ -1283,13 +1319,10 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
1283{ 1319{
1284 struct be_mcc_wrb *wrb; 1320 struct be_mcc_wrb *wrb;
1285 struct be_cmd_req_get_cntl_addnl_attribs *req; 1321 struct be_cmd_req_get_cntl_addnl_attribs *req;
1286 u16 mccq_index;
1287 int status; 1322 int status;
1288 1323
1289 spin_lock_bh(&adapter->mcc_lock); 1324 spin_lock_bh(&adapter->mcc_lock);
1290 1325
1291 mccq_index = adapter->mcc_obj.q.head;
1292
1293 wrb = wrb_from_mccq(adapter); 1326 wrb = wrb_from_mccq(adapter);
1294 if (!wrb) { 1327 if (!wrb) {
1295 status = -EBUSY; 1328 status = -EBUSY;
@@ -1301,8 +1334,6 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
1301 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req), 1334 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
1302 wrb, NULL); 1335 wrb, NULL);
1303 1336
1304 wrb->tag1 = mccq_index;
1305
1306 be_mcc_notify(adapter); 1337 be_mcc_notify(adapter);
1307 1338
1308err: 1339err:
@@ -1824,18 +1855,16 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1824 spin_unlock_bh(&adapter->mcc_lock); 1855 spin_unlock_bh(&adapter->mcc_lock);
1825 1856
1826 if (!wait_for_completion_timeout(&adapter->flash_compl, 1857 if (!wait_for_completion_timeout(&adapter->flash_compl,
1827 msecs_to_jiffies(12000))) 1858 msecs_to_jiffies(30000)))
1828 status = -1; 1859 status = -1;
1829 else 1860 else
1830 status = adapter->flash_status; 1861 status = adapter->flash_status;
1831 1862
1832 resp = embedded_payload(wrb); 1863 resp = embedded_payload(wrb);
1833 if (!status) { 1864 if (!status)
1834 *data_written = le32_to_cpu(resp->actual_write_len); 1865 *data_written = le32_to_cpu(resp->actual_write_len);
1835 } else { 1866 else
1836 *addn_status = resp->additional_status; 1867 *addn_status = resp->additional_status;
1837 status = resp->status;
1838 }
1839 1868
1840 return status; 1869 return status;
1841 1870
@@ -1950,7 +1979,7 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1950 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1979 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1951 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL); 1980 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL);
1952 1981
1953 req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT); 1982 req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
1954 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); 1983 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
1955 req->params.offset = cpu_to_le32(offset); 1984 req->params.offset = cpu_to_le32(offset);
1956 req->params.data_buf_size = cpu_to_le32(0x4); 1985 req->params.data_buf_size = cpu_to_le32(0x4);
@@ -2136,8 +2165,7 @@ err:
2136 return status; 2165 return status;
2137} 2166}
2138 2167
2139int be_cmd_get_phy_info(struct be_adapter *adapter, 2168int be_cmd_get_phy_info(struct be_adapter *adapter)
2140 struct be_phy_info *phy_info)
2141{ 2169{
2142 struct be_mcc_wrb *wrb; 2170 struct be_mcc_wrb *wrb;
2143 struct be_cmd_req_get_phy_info *req; 2171 struct be_cmd_req_get_phy_info *req;
@@ -2170,9 +2198,15 @@ int be_cmd_get_phy_info(struct be_adapter *adapter,
2170 if (!status) { 2198 if (!status) {
2171 struct be_phy_info *resp_phy_info = 2199 struct be_phy_info *resp_phy_info =
2172 cmd.va + sizeof(struct be_cmd_req_hdr); 2200 cmd.va + sizeof(struct be_cmd_req_hdr);
2173 phy_info->phy_type = le16_to_cpu(resp_phy_info->phy_type); 2201 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2174 phy_info->interface_type = 2202 adapter->phy.interface_type =
2175 le16_to_cpu(resp_phy_info->interface_type); 2203 le16_to_cpu(resp_phy_info->interface_type);
2204 adapter->phy.auto_speeds_supported =
2205 le16_to_cpu(resp_phy_info->auto_speeds_supported);
2206 adapter->phy.fixed_speeds_supported =
2207 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2208 adapter->phy.misc_params =
2209 le32_to_cpu(resp_phy_info->misc_params);
2176 } 2210 }
2177 pci_free_consistent(adapter->pdev, cmd.size, 2211 pci_free_consistent(adapter->pdev, cmd.size,
2178 cmd.va, cmd.dma); 2212 cmd.va, cmd.dma);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index d5b680c56af0..944f031bd31e 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -225,8 +225,12 @@ struct be_cmd_req_hdr {
225#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */ 225#define RESP_HDR_INFO_OPCODE_SHIFT 0 /* bits 0 - 7 */
226#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */ 226#define RESP_HDR_INFO_SUBSYS_SHIFT 8 /* bits 8 - 15 */
227struct be_cmd_resp_hdr { 227struct be_cmd_resp_hdr {
228 u32 info; /* dword 0 */ 228 u8 opcode; /* dword 0 */
229 u32 status; /* dword 1 */ 229 u8 subsystem; /* dword 0 */
230 u8 rsvd[2]; /* dword 0 */
231 u8 status; /* dword 1 */
232 u8 add_status; /* dword 1 */
233 u8 rsvd1[2]; /* dword 1 */
230 u32 response_length; /* dword 2 */ 234 u32 response_length; /* dword 2 */
231 u32 actual_resp_len; /* dword 3 */ 235 u32 actual_resp_len; /* dword 3 */
232}; 236};
@@ -1309,9 +1313,36 @@ enum {
1309 PHY_TYPE_KX4_10GB, 1313 PHY_TYPE_KX4_10GB,
1310 PHY_TYPE_BASET_10GB, 1314 PHY_TYPE_BASET_10GB,
1311 PHY_TYPE_BASET_1GB, 1315 PHY_TYPE_BASET_1GB,
1316 PHY_TYPE_BASEX_1GB,
1317 PHY_TYPE_SGMII,
1312 PHY_TYPE_DISABLED = 255 1318 PHY_TYPE_DISABLED = 255
1313}; 1319};
1314 1320
1321#define BE_SUPPORTED_SPEED_NONE 0
1322#define BE_SUPPORTED_SPEED_10MBPS 1
1323#define BE_SUPPORTED_SPEED_100MBPS 2
1324#define BE_SUPPORTED_SPEED_1GBPS 4
1325#define BE_SUPPORTED_SPEED_10GBPS 8
1326
1327#define BE_AN_EN 0x2
1328#define BE_PAUSE_SYM_EN 0x80
1329
1330/* MAC speed valid values */
1331#define SPEED_DEFAULT 0x0
1332#define SPEED_FORCED_10GB 0x1
1333#define SPEED_FORCED_1GB 0x2
1334#define SPEED_AUTONEG_10GB 0x3
1335#define SPEED_AUTONEG_1GB 0x4
1336#define SPEED_AUTONEG_100MB 0x5
1337#define SPEED_AUTONEG_10GB_1GB 0x6
1338#define SPEED_AUTONEG_10GB_1GB_100MB 0x7
1339#define SPEED_AUTONEG_1GB_100MB 0x8
1340#define SPEED_AUTONEG_10MB 0x9
1341#define SPEED_AUTONEG_1GB_100MB_10MB 0xa
1342#define SPEED_AUTONEG_100MB_10MB 0xb
1343#define SPEED_FORCED_100MB 0xc
1344#define SPEED_FORCED_10MB 0xd
1345
1315struct be_cmd_req_get_phy_info { 1346struct be_cmd_req_get_phy_info {
1316 struct be_cmd_req_hdr hdr; 1347 struct be_cmd_req_hdr hdr;
1317 u8 rsvd0[24]; 1348 u8 rsvd0[24];
@@ -1321,7 +1352,11 @@ struct be_phy_info {
1321 u16 phy_type; 1352 u16 phy_type;
1322 u16 interface_type; 1353 u16 interface_type;
1323 u32 misc_params; 1354 u32 misc_params;
1324 u32 future_use[4]; 1355 u16 ext_phy_details;
1356 u16 rsvd;
1357 u16 auto_speeds_supported;
1358 u16 fixed_speeds_supported;
1359 u32 future_use[2];
1325}; 1360};
1326 1361
1327struct be_cmd_resp_get_phy_info { 1362struct be_cmd_resp_get_phy_info {
@@ -1655,8 +1690,7 @@ extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1655 struct be_dma_mem *nonemb_cmd); 1690 struct be_dma_mem *nonemb_cmd);
1656extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, 1691extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
1657 u8 loopback_type, u8 enable); 1692 u8 loopback_type, u8 enable);
1658extern int be_cmd_get_phy_info(struct be_adapter *adapter, 1693extern int be_cmd_get_phy_info(struct be_adapter *adapter);
1659 struct be_phy_info *phy_info);
1660extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain); 1694extern int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain);
1661extern void be_detect_dump_ue(struct be_adapter *adapter); 1695extern void be_detect_dump_ue(struct be_adapter *adapter);
1662extern int be_cmd_get_die_temperature(struct be_adapter *adapter); 1696extern int be_cmd_get_die_temperature(struct be_adapter *adapter);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index c1ff73cb0e62..747f68fa976d 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -433,102 +433,193 @@ static int be_get_sset_count(struct net_device *netdev, int stringset)
433 } 433 }
434} 434}
435 435
436static u32 be_get_port_type(u32 phy_type, u32 dac_cable_len)
437{
438 u32 port;
439
440 switch (phy_type) {
441 case PHY_TYPE_BASET_1GB:
442 case PHY_TYPE_BASEX_1GB:
443 case PHY_TYPE_SGMII:
444 port = PORT_TP;
445 break;
446 case PHY_TYPE_SFP_PLUS_10GB:
447 port = dac_cable_len ? PORT_DA : PORT_FIBRE;
448 break;
449 case PHY_TYPE_XFP_10GB:
450 case PHY_TYPE_SFP_1GB:
451 port = PORT_FIBRE;
452 break;
453 case PHY_TYPE_BASET_10GB:
454 port = PORT_TP;
455 break;
456 default:
457 port = PORT_OTHER;
458 }
459
460 return port;
461}
462
463static u32 convert_to_et_setting(u32 if_type, u32 if_speeds)
464{
465 u32 val = 0;
466
467 switch (if_type) {
468 case PHY_TYPE_BASET_1GB:
469 case PHY_TYPE_BASEX_1GB:
470 case PHY_TYPE_SGMII:
471 val |= SUPPORTED_TP;
472 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
473 val |= SUPPORTED_1000baseT_Full;
474 if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
475 val |= SUPPORTED_100baseT_Full;
476 if (if_speeds & BE_SUPPORTED_SPEED_10MBPS)
477 val |= SUPPORTED_10baseT_Full;
478 break;
479 case PHY_TYPE_KX4_10GB:
480 val |= SUPPORTED_Backplane;
481 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
482 val |= SUPPORTED_1000baseKX_Full;
483 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
484 val |= SUPPORTED_10000baseKX4_Full;
485 break;
486 case PHY_TYPE_KR_10GB:
487 val |= SUPPORTED_Backplane |
488 SUPPORTED_10000baseKR_Full;
489 break;
490 case PHY_TYPE_SFP_PLUS_10GB:
491 case PHY_TYPE_XFP_10GB:
492 case PHY_TYPE_SFP_1GB:
493 val |= SUPPORTED_FIBRE;
494 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
495 val |= SUPPORTED_10000baseT_Full;
496 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
497 val |= SUPPORTED_1000baseT_Full;
498 break;
499 case PHY_TYPE_BASET_10GB:
500 val |= SUPPORTED_TP;
501 if (if_speeds & BE_SUPPORTED_SPEED_10GBPS)
502 val |= SUPPORTED_10000baseT_Full;
503 if (if_speeds & BE_SUPPORTED_SPEED_1GBPS)
504 val |= SUPPORTED_1000baseT_Full;
505 if (if_speeds & BE_SUPPORTED_SPEED_100MBPS)
506 val |= SUPPORTED_100baseT_Full;
507 break;
508 default:
509 val |= SUPPORTED_TP;
510 }
511
512 return val;
513}
514
515static int convert_to_et_speed(u32 be_speed)
516{
517 int et_speed = SPEED_10000;
518
519 switch (be_speed) {
520 case PHY_LINK_SPEED_10MBPS:
521 et_speed = SPEED_10;
522 break;
523 case PHY_LINK_SPEED_100MBPS:
524 et_speed = SPEED_100;
525 break;
526 case PHY_LINK_SPEED_1GBPS:
527 et_speed = SPEED_1000;
528 break;
529 case PHY_LINK_SPEED_10GBPS:
530 et_speed = SPEED_10000;
531 break;
532 }
533
534 return et_speed;
535}
536
537bool be_pause_supported(struct be_adapter *adapter)
538{
539 return (adapter->phy.interface_type == PHY_TYPE_SFP_PLUS_10GB ||
540 adapter->phy.interface_type == PHY_TYPE_XFP_10GB) ?
541 false : true;
542}
543
436static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 544static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
437{ 545{
438 struct be_adapter *adapter = netdev_priv(netdev); 546 struct be_adapter *adapter = netdev_priv(netdev);
439 struct be_phy_info phy_info; 547 u8 port_speed = 0;
440 u8 mac_speed = 0;
441 u16 link_speed = 0; 548 u16 link_speed = 0;
442 u8 link_status; 549 u8 link_status;
550 u32 et_speed = 0;
443 int status; 551 int status;
444 552
445 if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) { 553 if (adapter->phy.link_speed < 0 || !(netdev->flags & IFF_UP)) {
446 status = be_cmd_link_status_query(adapter, &mac_speed, 554 if (adapter->phy.forced_port_speed < 0) {
447 &link_speed, &link_status, 0); 555 status = be_cmd_link_status_query(adapter, &port_speed,
448 if (!status) 556 &link_speed, &link_status, 0);
449 be_link_status_update(adapter, link_status); 557 if (!status)
450 558 be_link_status_update(adapter, link_status);
451 /* link_speed is in units of 10 Mbps */ 559 if (link_speed)
452 if (link_speed) { 560 et_speed = link_speed * 10;
453 ethtool_cmd_speed_set(ecmd, link_speed*10); 561 else if (link_status)
562 et_speed = convert_to_et_speed(port_speed);
454 } else { 563 } else {
455 switch (mac_speed) { 564 et_speed = adapter->phy.forced_port_speed;
456 case PHY_LINK_SPEED_10MBPS:
457 ethtool_cmd_speed_set(ecmd, SPEED_10);
458 break;
459 case PHY_LINK_SPEED_100MBPS:
460 ethtool_cmd_speed_set(ecmd, SPEED_100);
461 break;
462 case PHY_LINK_SPEED_1GBPS:
463 ethtool_cmd_speed_set(ecmd, SPEED_1000);
464 break;
465 case PHY_LINK_SPEED_10GBPS:
466 ethtool_cmd_speed_set(ecmd, SPEED_10000);
467 break;
468 case PHY_LINK_SPEED_ZERO:
469 ethtool_cmd_speed_set(ecmd, 0);
470 break;
471 }
472 } 565 }
473 566
474 status = be_cmd_get_phy_info(adapter, &phy_info); 567 ethtool_cmd_speed_set(ecmd, et_speed);
475 if (!status) { 568
476 switch (phy_info.interface_type) { 569 status = be_cmd_get_phy_info(adapter);
477 case PHY_TYPE_XFP_10GB: 570 if (status)
478 case PHY_TYPE_SFP_1GB: 571 return status;
479 case PHY_TYPE_SFP_PLUS_10GB: 572
480 ecmd->port = PORT_FIBRE; 573 ecmd->supported =
481 break; 574 convert_to_et_setting(adapter->phy.interface_type,
482 default: 575 adapter->phy.auto_speeds_supported |
483 ecmd->port = PORT_TP; 576 adapter->phy.fixed_speeds_supported);
484 break; 577 ecmd->advertising =
485 } 578 convert_to_et_setting(adapter->phy.interface_type,
579 adapter->phy.auto_speeds_supported);
486 580
487 switch (phy_info.interface_type) { 581 ecmd->port = be_get_port_type(adapter->phy.interface_type,
488 case PHY_TYPE_KR_10GB: 582 adapter->phy.dac_cable_len);
489 case PHY_TYPE_KX4_10GB: 583
490 ecmd->autoneg = AUTONEG_ENABLE; 584 if (adapter->phy.auto_speeds_supported) {
585 ecmd->supported |= SUPPORTED_Autoneg;
586 ecmd->autoneg = AUTONEG_ENABLE;
587 ecmd->advertising |= ADVERTISED_Autoneg;
588 }
589
590 if (be_pause_supported(adapter)) {
591 ecmd->supported |= SUPPORTED_Pause;
592 ecmd->advertising |= ADVERTISED_Pause;
593 }
594
595 switch (adapter->phy.interface_type) {
596 case PHY_TYPE_KR_10GB:
597 case PHY_TYPE_KX4_10GB:
491 ecmd->transceiver = XCVR_INTERNAL; 598 ecmd->transceiver = XCVR_INTERNAL;
492 break; 599 break;
493 default: 600 default:
494 ecmd->autoneg = AUTONEG_DISABLE; 601 ecmd->transceiver = XCVR_EXTERNAL;
495 ecmd->transceiver = XCVR_EXTERNAL; 602 break;
496 break;
497 }
498 } 603 }
499 604
500 /* Save for future use */ 605 /* Save for future use */
501 adapter->link_speed = ethtool_cmd_speed(ecmd); 606 adapter->phy.link_speed = ethtool_cmd_speed(ecmd);
502 adapter->port_type = ecmd->port; 607 adapter->phy.port_type = ecmd->port;
503 adapter->transceiver = ecmd->transceiver; 608 adapter->phy.transceiver = ecmd->transceiver;
504 adapter->autoneg = ecmd->autoneg; 609 adapter->phy.autoneg = ecmd->autoneg;
610 adapter->phy.advertising = ecmd->advertising;
611 adapter->phy.supported = ecmd->supported;
505 } else { 612 } else {
506 ethtool_cmd_speed_set(ecmd, adapter->link_speed); 613 ethtool_cmd_speed_set(ecmd, adapter->phy.link_speed);
507 ecmd->port = adapter->port_type; 614 ecmd->port = adapter->phy.port_type;
508 ecmd->transceiver = adapter->transceiver; 615 ecmd->transceiver = adapter->phy.transceiver;
509 ecmd->autoneg = adapter->autoneg; 616 ecmd->autoneg = adapter->phy.autoneg;
617 ecmd->advertising = adapter->phy.advertising;
618 ecmd->supported = adapter->phy.supported;
510 } 619 }
511 620
512 ecmd->duplex = DUPLEX_FULL; 621 ecmd->duplex = netif_carrier_ok(netdev) ? DUPLEX_FULL : DUPLEX_UNKNOWN;
513 ecmd->phy_address = adapter->port_num; 622 ecmd->phy_address = adapter->port_num;
514 switch (ecmd->port) {
515 case PORT_FIBRE:
516 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
517 break;
518 case PORT_TP:
519 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
520 break;
521 case PORT_AUI:
522 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_AUI);
523 break;
524 }
525
526 if (ecmd->autoneg) {
527 ecmd->supported |= SUPPORTED_1000baseT_Full;
528 ecmd->supported |= SUPPORTED_Autoneg;
529 ecmd->advertising |= (ADVERTISED_10000baseT_Full |
530 ADVERTISED_1000baseT_Full);
531 }
532 623
533 return 0; 624 return 0;
534} 625}
@@ -548,7 +639,7 @@ be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
548 struct be_adapter *adapter = netdev_priv(netdev); 639 struct be_adapter *adapter = netdev_priv(netdev);
549 640
550 be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause); 641 be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause);
551 ecmd->autoneg = 0; 642 ecmd->autoneg = adapter->phy.fc_autoneg;
552} 643}
553 644
554static int 645static int
@@ -702,7 +793,7 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
702 } 793 }
703 } 794 }
704 795
705 if (be_test_ddr_dma(adapter) != 0) { 796 if (!lancer_chip(adapter) && be_test_ddr_dma(adapter) != 0) {
706 data[3] = 1; 797 data[3] = 1;
707 test->flags |= ETH_TEST_FL_FAILED; 798 test->flags |= ETH_TEST_FL_FAILED;
708 } 799 }
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index f2c89e3ccabd..0949aa609164 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -162,22 +162,23 @@
162#define QUERY_FAT 1 162#define QUERY_FAT 1
163 163
164/* Flashrom related descriptors */ 164/* Flashrom related descriptors */
165#define MAX_FLASH_COMP 32
165#define IMAGE_TYPE_FIRMWARE 160 166#define IMAGE_TYPE_FIRMWARE 160
166#define IMAGE_TYPE_BOOTCODE 224 167#define IMAGE_TYPE_BOOTCODE 224
167#define IMAGE_TYPE_OPTIONROM 32 168#define IMAGE_TYPE_OPTIONROM 32
168 169
169#define NUM_FLASHDIR_ENTRIES 32 170#define NUM_FLASHDIR_ENTRIES 32
170 171
171#define IMG_TYPE_ISCSI_ACTIVE 0 172#define OPTYPE_ISCSI_ACTIVE 0
172#define IMG_TYPE_REDBOOT 1 173#define OPTYPE_REDBOOT 1
173#define IMG_TYPE_BIOS 2 174#define OPTYPE_BIOS 2
174#define IMG_TYPE_PXE_BIOS 3 175#define OPTYPE_PXE_BIOS 3
175#define IMG_TYPE_FCOE_BIOS 8 176#define OPTYPE_FCOE_BIOS 8
176#define IMG_TYPE_ISCSI_BACKUP 9 177#define OPTYPE_ISCSI_BACKUP 9
177#define IMG_TYPE_FCOE_FW_ACTIVE 10 178#define OPTYPE_FCOE_FW_ACTIVE 10
178#define IMG_TYPE_FCOE_FW_BACKUP 11 179#define OPTYPE_FCOE_FW_BACKUP 11
179#define IMG_TYPE_NCSI_FW 13 180#define OPTYPE_NCSI_FW 13
180#define IMG_TYPE_PHY_FW 99 181#define OPTYPE_PHY_FW 99
181#define TN_8022 13 182#define TN_8022 13
182 183
183#define ILLEGAL_IOCTL_REQ 2 184#define ILLEGAL_IOCTL_REQ 2
@@ -223,6 +224,24 @@
223#define FLASH_REDBOOT_START_g3 (262144) 224#define FLASH_REDBOOT_START_g3 (262144)
224#define FLASH_PHY_FW_START_g3 1310720 225#define FLASH_PHY_FW_START_g3 1310720
225 226
227#define IMAGE_NCSI 16
228#define IMAGE_OPTION_ROM_PXE 32
229#define IMAGE_OPTION_ROM_FCoE 33
230#define IMAGE_OPTION_ROM_ISCSI 34
231#define IMAGE_FLASHISM_JUMPVECTOR 48
232#define IMAGE_FLASH_ISM 49
233#define IMAGE_JUMP_VECTOR 50
234#define IMAGE_FIRMWARE_iSCSI 160
235#define IMAGE_FIRMWARE_COMP_iSCSI 161
236#define IMAGE_FIRMWARE_FCoE 162
237#define IMAGE_FIRMWARE_COMP_FCoE 163
238#define IMAGE_FIRMWARE_BACKUP_iSCSI 176
239#define IMAGE_FIRMWARE_BACKUP_COMP_iSCSI 177
240#define IMAGE_FIRMWARE_BACKUP_FCoE 178
241#define IMAGE_FIRMWARE_BACKUP_COMP_FCoE 179
242#define IMAGE_FIRMWARE_PHY 192
243#define IMAGE_BOOT_CODE 224
244
226/************* Rx Packet Type Encoding **************/ 245/************* Rx Packet Type Encoding **************/
227#define BE_UNICAST_PACKET 0 246#define BE_UNICAST_PACKET 0
228#define BE_MULTICAST_PACKET 1 247#define BE_MULTICAST_PACKET 1
@@ -445,6 +464,7 @@ struct flash_comp {
445 unsigned long offset; 464 unsigned long offset;
446 int optype; 465 int optype;
447 int size; 466 int size;
467 int img_type;
448}; 468};
449 469
450struct image_hdr { 470struct image_hdr {
@@ -481,17 +501,19 @@ struct flash_section_hdr {
481 u32 format_rev; 501 u32 format_rev;
482 u32 cksum; 502 u32 cksum;
483 u32 antidote; 503 u32 antidote;
484 u32 build_no; 504 u32 num_images;
485 u8 id_string[64]; 505 u8 id_string[128];
486 u32 active_entry_mask; 506 u32 rsvd[4];
487 u32 valid_entry_mask; 507} __packed;
488 u32 org_content_mask; 508
489 u32 rsvd0; 509struct flash_section_hdr_g2 {
490 u32 rsvd1; 510 u32 format_rev;
491 u32 rsvd2; 511 u32 cksum;
492 u32 rsvd3; 512 u32 antidote;
493 u32 rsvd4; 513 u32 build_num;
494}; 514 u8 id_string[128];
515 u32 rsvd[8];
516} __packed;
495 517
496struct flash_section_entry { 518struct flash_section_entry {
497 u32 type; 519 u32 type;
@@ -503,10 +525,16 @@ struct flash_section_entry {
503 u32 rsvd0; 525 u32 rsvd0;
504 u32 rsvd1; 526 u32 rsvd1;
505 u8 ver_data[32]; 527 u8 ver_data[32];
506}; 528} __packed;
507 529
508struct flash_section_info { 530struct flash_section_info {
509 u8 cookie[32]; 531 u8 cookie[32];
510 struct flash_section_hdr fsec_hdr; 532 struct flash_section_hdr fsec_hdr;
511 struct flash_section_entry fsec_entry[32]; 533 struct flash_section_entry fsec_entry[32];
512}; 534} __packed;
535
536struct flash_section_info_g2 {
537 u8 cookie[32];
538 struct flash_section_hdr_g2 fsec_hdr;
539 struct flash_section_entry fsec_entry[32];
540} __packed;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 528a886bc2cd..6d5d30be0481 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -421,6 +421,9 @@ void be_parse_stats(struct be_adapter *adapter)
421 populate_be2_stats(adapter); 421 populate_be2_stats(adapter);
422 } 422 }
423 423
424 if (lancer_chip(adapter))
425 goto done;
426
424 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */ 427 /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
425 for_all_rx_queues(adapter, rxo, i) { 428 for_all_rx_queues(adapter, rxo, i) {
426 /* below erx HW counter can actually wrap around after 429 /* below erx HW counter can actually wrap around after
@@ -429,6 +432,8 @@ void be_parse_stats(struct be_adapter *adapter)
429 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags, 432 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
430 (u16)erx->rx_drops_no_fragments[rxo->q.id]); 433 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
431 } 434 }
435done:
436 return;
432} 437}
433 438
434static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev, 439static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
@@ -797,22 +802,30 @@ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
797 if (adapter->promiscuous) 802 if (adapter->promiscuous)
798 return 0; 803 return 0;
799 804
800 if (adapter->vlans_added <= adapter->max_vlans) { 805 if (adapter->vlans_added > adapter->max_vlans)
801 /* Construct VLAN Table to give to HW */ 806 goto set_vlan_promisc;
802 for (i = 0; i < VLAN_N_VID; i++) { 807
803 if (adapter->vlan_tag[i]) { 808 /* Construct VLAN Table to give to HW */
804 vtag[ntags] = cpu_to_le16(i); 809 for (i = 0; i < VLAN_N_VID; i++)
805 ntags++; 810 if (adapter->vlan_tag[i])
806 } 811 vtag[ntags++] = cpu_to_le16(i);
807 } 812
808 status = be_cmd_vlan_config(adapter, adapter->if_handle, 813 status = be_cmd_vlan_config(adapter, adapter->if_handle,
809 vtag, ntags, 1, 0); 814 vtag, ntags, 1, 0);
810 } else { 815
811 status = be_cmd_vlan_config(adapter, adapter->if_handle, 816 /* Set to VLAN promisc mode as setting VLAN filter failed */
812 NULL, 0, 1, 1); 817 if (status) {
818 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
819 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
820 goto set_vlan_promisc;
813 } 821 }
814 822
815 return status; 823 return status;
824
825set_vlan_promisc:
826 status = be_cmd_vlan_config(adapter, adapter->if_handle,
827 NULL, 0, 1, 1);
828 return status;
816} 829}
817 830
818static int be_vlan_add_vid(struct net_device *netdev, u16 vid) 831static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
@@ -862,6 +875,7 @@ ret:
862static void be_set_rx_mode(struct net_device *netdev) 875static void be_set_rx_mode(struct net_device *netdev)
863{ 876{
864 struct be_adapter *adapter = netdev_priv(netdev); 877 struct be_adapter *adapter = netdev_priv(netdev);
878 int status;
865 879
866 if (netdev->flags & IFF_PROMISC) { 880 if (netdev->flags & IFF_PROMISC) {
867 be_cmd_rx_filter(adapter, IFF_PROMISC, ON); 881 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
@@ -908,7 +922,14 @@ static void be_set_rx_mode(struct net_device *netdev)
908 } 922 }
909 } 923 }
910 924
911 be_cmd_rx_filter(adapter, IFF_MULTICAST, ON); 925 status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
926
927 /* Set to MCAST promisc mode if setting MULTICAST address fails */
928 if (status) {
929 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
930 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
931 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
932 }
912done: 933done:
913 return; 934 return;
914} 935}
@@ -1238,6 +1259,7 @@ static void be_rx_compl_process(struct be_rx_obj *rxo,
1238 skb_checksum_none_assert(skb); 1259 skb_checksum_none_assert(skb);
1239 1260
1240 skb->protocol = eth_type_trans(skb, netdev); 1261 skb->protocol = eth_type_trans(skb, netdev);
1262 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1241 if (netdev->features & NETIF_F_RXHASH) 1263 if (netdev->features & NETIF_F_RXHASH)
1242 skb->rxhash = rxcp->rss_hash; 1264 skb->rxhash = rxcp->rss_hash;
1243 1265
@@ -1294,6 +1316,7 @@ void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1294 skb->len = rxcp->pkt_size; 1316 skb->len = rxcp->pkt_size;
1295 skb->data_len = rxcp->pkt_size; 1317 skb->data_len = rxcp->pkt_size;
1296 skb->ip_summed = CHECKSUM_UNNECESSARY; 1318 skb->ip_summed = CHECKSUM_UNNECESSARY;
1319 skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1297 if (adapter->netdev->features & NETIF_F_RXHASH) 1320 if (adapter->netdev->features & NETIF_F_RXHASH)
1298 skb->rxhash = rxcp->rss_hash; 1321 skb->rxhash = rxcp->rss_hash;
1299 1322
@@ -1555,7 +1578,9 @@ static int event_handle(struct be_eq_obj *eqo)
1555 if (!num) 1578 if (!num)
1556 rearm = true; 1579 rearm = true;
1557 1580
1558 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num); 1581 if (num || msix_enabled(eqo->adapter))
1582 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1583
1559 if (num) 1584 if (num)
1560 napi_schedule(&eqo->napi); 1585 napi_schedule(&eqo->napi);
1561 1586
@@ -2571,11 +2596,12 @@ err:
2571static void be_setup_init(struct be_adapter *adapter) 2596static void be_setup_init(struct be_adapter *adapter)
2572{ 2597{
2573 adapter->vlan_prio_bmap = 0xff; 2598 adapter->vlan_prio_bmap = 0xff;
2574 adapter->link_speed = -1; 2599 adapter->phy.link_speed = -1;
2575 adapter->if_handle = -1; 2600 adapter->if_handle = -1;
2576 adapter->be3_native = false; 2601 adapter->be3_native = false;
2577 adapter->promiscuous = false; 2602 adapter->promiscuous = false;
2578 adapter->eq_next_idx = 0; 2603 adapter->eq_next_idx = 0;
2604 adapter->phy.forced_port_speed = -1;
2579} 2605}
2580 2606
2581static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac) 2607static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
@@ -2680,24 +2706,15 @@ static int be_setup(struct be_adapter *adapter)
2680 2706
2681 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL); 2707 be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2682 2708
2683 status = be_vid_config(adapter, false, 0); 2709 be_vid_config(adapter, false, 0);
2684 if (status)
2685 goto err;
2686 2710
2687 be_set_rx_mode(adapter->netdev); 2711 be_set_rx_mode(adapter->netdev);
2688 2712
2689 status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc); 2713 be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2690 /* For Lancer: It is legal for this cmd to fail on VF */
2691 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2692 goto err;
2693 2714
2694 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) { 2715 if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2695 status = be_cmd_set_flow_control(adapter, adapter->tx_fc, 2716 be_cmd_set_flow_control(adapter, adapter->tx_fc,
2696 adapter->rx_fc); 2717 adapter->rx_fc);
2697 /* For Lancer: It is legal for this cmd to fail on VF */
2698 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2699 goto err;
2700 }
2701 2718
2702 pcie_set_readrq(adapter->pdev, 4096); 2719 pcie_set_readrq(adapter->pdev, 4096);
2703 2720
@@ -2707,6 +2724,10 @@ static int be_setup(struct be_adapter *adapter)
2707 goto err; 2724 goto err;
2708 } 2725 }
2709 2726
2727 be_cmd_get_phy_info(adapter);
2728 if (be_pause_supported(adapter))
2729 adapter->phy.fc_autoneg = 1;
2730
2710 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000)); 2731 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2711 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED; 2732 adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2712 2733
@@ -2731,6 +2752,8 @@ static void be_netpoll(struct net_device *netdev)
2731#endif 2752#endif
2732 2753
2733#define FW_FILE_HDR_SIGN "ServerEngines Corp. " 2754#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2755char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
2756
2734static bool be_flash_redboot(struct be_adapter *adapter, 2757static bool be_flash_redboot(struct be_adapter *adapter,
2735 const u8 *p, u32 img_start, int image_size, 2758 const u8 *p, u32 img_start, int image_size,
2736 int hdr_size) 2759 int hdr_size)
@@ -2760,71 +2783,105 @@ static bool be_flash_redboot(struct be_adapter *adapter,
2760 2783
2761static bool phy_flashing_required(struct be_adapter *adapter) 2784static bool phy_flashing_required(struct be_adapter *adapter)
2762{ 2785{
2763 int status = 0; 2786 return (adapter->phy.phy_type == TN_8022 &&
2764 struct be_phy_info phy_info; 2787 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
2788}
2765 2789
2766 status = be_cmd_get_phy_info(adapter, &phy_info); 2790static bool is_comp_in_ufi(struct be_adapter *adapter,
2767 if (status) 2791 struct flash_section_info *fsec, int type)
2768 return false; 2792{
2769 if ((phy_info.phy_type == TN_8022) && 2793 int i = 0, img_type = 0;
2770 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) { 2794 struct flash_section_info_g2 *fsec_g2 = NULL;
2771 return true; 2795
2796 if (adapter->generation != BE_GEN3)
2797 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2798
2799 for (i = 0; i < MAX_FLASH_COMP; i++) {
2800 if (fsec_g2)
2801 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2802 else
2803 img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2804
2805 if (img_type == type)
2806 return true;
2772 } 2807 }
2773 return false; 2808 return false;
2809
2810}
2811
2812struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2813 int header_size,
2814 const struct firmware *fw)
2815{
2816 struct flash_section_info *fsec = NULL;
2817 const u8 *p = fw->data;
2818
2819 p += header_size;
2820 while (p < (fw->data + fw->size)) {
2821 fsec = (struct flash_section_info *)p;
2822 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2823 return fsec;
2824 p += 32;
2825 }
2826 return NULL;
2774} 2827}
2775 2828
2776static int be_flash_data(struct be_adapter *adapter, 2829static int be_flash_data(struct be_adapter *adapter,
2777 const struct firmware *fw, 2830 const struct firmware *fw,
2778 struct be_dma_mem *flash_cmd, int num_of_images) 2831 struct be_dma_mem *flash_cmd,
2832 int num_of_images)
2779 2833
2780{ 2834{
2781 int status = 0, i, filehdr_size = 0; 2835 int status = 0, i, filehdr_size = 0;
2836 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
2782 u32 total_bytes = 0, flash_op; 2837 u32 total_bytes = 0, flash_op;
2783 int num_bytes; 2838 int num_bytes;
2784 const u8 *p = fw->data; 2839 const u8 *p = fw->data;
2785 struct be_cmd_write_flashrom *req = flash_cmd->va; 2840 struct be_cmd_write_flashrom *req = flash_cmd->va;
2786 const struct flash_comp *pflashcomp; 2841 const struct flash_comp *pflashcomp;
2787 int num_comp; 2842 int num_comp, hdr_size;
2788 2843 struct flash_section_info *fsec = NULL;
2789 static const struct flash_comp gen3_flash_types[10] = { 2844
2790 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE, 2845 struct flash_comp gen3_flash_types[] = {
2791 FLASH_IMAGE_MAX_SIZE_g3}, 2846 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2792 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT, 2847 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2793 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3}, 2848 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2794 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS, 2849 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2795 FLASH_BIOS_IMAGE_MAX_SIZE_g3}, 2850 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2796 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS, 2851 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2797 FLASH_BIOS_IMAGE_MAX_SIZE_g3}, 2852 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2798 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS, 2853 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2799 FLASH_BIOS_IMAGE_MAX_SIZE_g3}, 2854 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2800 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP, 2855 FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2801 FLASH_IMAGE_MAX_SIZE_g3}, 2856 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2802 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE, 2857 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2803 FLASH_IMAGE_MAX_SIZE_g3}, 2858 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2804 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP, 2859 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2805 FLASH_IMAGE_MAX_SIZE_g3}, 2860 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2806 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW, 2861 FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2807 FLASH_NCSI_IMAGE_MAX_SIZE_g3}, 2862 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2808 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW, 2863 FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2809 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3} 2864 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2865 FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
2810 }; 2866 };
2811 static const struct flash_comp gen2_flash_types[8] = { 2867
2812 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE, 2868 struct flash_comp gen2_flash_types[] = {
2813 FLASH_IMAGE_MAX_SIZE_g2}, 2869 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2814 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT, 2870 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2815 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2}, 2871 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2816 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS, 2872 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2817 FLASH_BIOS_IMAGE_MAX_SIZE_g2}, 2873 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2818 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS, 2874 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2819 FLASH_BIOS_IMAGE_MAX_SIZE_g2}, 2875 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2820 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS, 2876 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2821 FLASH_BIOS_IMAGE_MAX_SIZE_g2}, 2877 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2822 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP, 2878 FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2823 FLASH_IMAGE_MAX_SIZE_g2}, 2879 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2824 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE, 2880 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2825 FLASH_IMAGE_MAX_SIZE_g2}, 2881 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2826 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP, 2882 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2827 FLASH_IMAGE_MAX_SIZE_g2} 2883 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2884 FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
2828 }; 2885 };
2829 2886
2830 if (adapter->generation == BE_GEN3) { 2887 if (adapter->generation == BE_GEN3) {
@@ -2836,22 +2893,37 @@ static int be_flash_data(struct be_adapter *adapter,
2836 filehdr_size = sizeof(struct flash_file_hdr_g2); 2893 filehdr_size = sizeof(struct flash_file_hdr_g2);
2837 num_comp = ARRAY_SIZE(gen2_flash_types); 2894 num_comp = ARRAY_SIZE(gen2_flash_types);
2838 } 2895 }
2896 /* Get flash section info*/
2897 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2898 if (!fsec) {
2899 dev_err(&adapter->pdev->dev,
2900 "Invalid Cookie. UFI corrupted ?\n");
2901 return -1;
2902 }
2839 for (i = 0; i < num_comp; i++) { 2903 for (i = 0; i < num_comp; i++) {
2840 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) && 2904 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
2841 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2842 continue; 2905 continue;
2843 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) { 2906
2907 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
2908 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2909 continue;
2910
2911 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
2844 if (!phy_flashing_required(adapter)) 2912 if (!phy_flashing_required(adapter))
2845 continue; 2913 continue;
2846 } 2914 }
2847 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) && 2915
2848 (!be_flash_redboot(adapter, fw->data, 2916 hdr_size = filehdr_size +
2849 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size + 2917 (num_of_images * sizeof(struct image_hdr));
2850 (num_of_images * sizeof(struct image_hdr))))) 2918
2919 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
2920 (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
2921 pflashcomp[i].size, hdr_size)))
2851 continue; 2922 continue;
2923
2924 /* Flash the component */
2852 p = fw->data; 2925 p = fw->data;
2853 p += filehdr_size + pflashcomp[i].offset 2926 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
2854 + (num_of_images * sizeof(struct image_hdr));
2855 if (p + pflashcomp[i].size > fw->data + fw->size) 2927 if (p + pflashcomp[i].size > fw->data + fw->size)
2856 return -1; 2928 return -1;
2857 total_bytes = pflashcomp[i].size; 2929 total_bytes = pflashcomp[i].size;
@@ -2862,12 +2934,12 @@ static int be_flash_data(struct be_adapter *adapter,
2862 num_bytes = total_bytes; 2934 num_bytes = total_bytes;
2863 total_bytes -= num_bytes; 2935 total_bytes -= num_bytes;
2864 if (!total_bytes) { 2936 if (!total_bytes) {
2865 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) 2937 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
2866 flash_op = FLASHROM_OPER_PHY_FLASH; 2938 flash_op = FLASHROM_OPER_PHY_FLASH;
2867 else 2939 else
2868 flash_op = FLASHROM_OPER_FLASH; 2940 flash_op = FLASHROM_OPER_FLASH;
2869 } else { 2941 } else {
2870 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) 2942 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
2871 flash_op = FLASHROM_OPER_PHY_SAVE; 2943 flash_op = FLASHROM_OPER_PHY_SAVE;
2872 else 2944 else
2873 flash_op = FLASHROM_OPER_SAVE; 2945 flash_op = FLASHROM_OPER_SAVE;
@@ -2879,7 +2951,7 @@ static int be_flash_data(struct be_adapter *adapter,
2879 if (status) { 2951 if (status) {
2880 if ((status == ILLEGAL_IOCTL_REQ) && 2952 if ((status == ILLEGAL_IOCTL_REQ) &&
2881 (pflashcomp[i].optype == 2953 (pflashcomp[i].optype ==
2882 IMG_TYPE_PHY_FW)) 2954 OPTYPE_PHY_FW))
2883 break; 2955 break;
2884 dev_err(&adapter->pdev->dev, 2956 dev_err(&adapter->pdev->dev,
2885 "cmd to write to flash rom failed.\n"); 2957 "cmd to write to flash rom failed.\n");
@@ -3749,6 +3821,11 @@ static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3749 3821
3750 pci_disable_device(pdev); 3822 pci_disable_device(pdev);
3751 3823
3824 /* The error could cause the FW to trigger a flash debug dump.
3825 * Resetting the card while flash dump is in progress
3826 * can cause it not to recover; wait for it to finish
3827 */
3828 ssleep(30);
3752 return PCI_ERS_RESULT_NEED_RESET; 3829 return PCI_ERS_RESULT_NEED_RESET;
3753} 3830}
3754 3831
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 1637b9862292..9d71c9cc300b 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -545,9 +545,6 @@ static int __devinit fealnx_init_one(struct pci_dev *pdev,
545 /* Reset the chip to erase previous misconfiguration. */ 545 /* Reset the chip to erase previous misconfiguration. */
546 iowrite32(0x00000001, ioaddr + BCR); 546 iowrite32(0x00000001, ioaddr + BCR);
547 547
548 dev->base_addr = (unsigned long)ioaddr;
549 dev->irq = irq;
550
551 /* Make certain the descriptor lists are aligned. */ 548 /* Make certain the descriptor lists are aligned. */
552 np = netdev_priv(dev); 549 np = netdev_priv(dev);
553 np->mem = ioaddr; 550 np->mem = ioaddr;
@@ -832,11 +829,13 @@ static int netdev_open(struct net_device *dev)
832{ 829{
833 struct netdev_private *np = netdev_priv(dev); 830 struct netdev_private *np = netdev_priv(dev);
834 void __iomem *ioaddr = np->mem; 831 void __iomem *ioaddr = np->mem;
835 int i; 832 const int irq = np->pci_dev->irq;
833 int rc, i;
836 834
837 iowrite32(0x00000001, ioaddr + BCR); /* Reset */ 835 iowrite32(0x00000001, ioaddr + BCR); /* Reset */
838 836
839 if (request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev)) 837 rc = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
838 if (rc)
840 return -EAGAIN; 839 return -EAGAIN;
841 840
842 for (i = 0; i < 3; i++) 841 for (i = 0; i < 3; i++)
@@ -924,8 +923,7 @@ static int netdev_open(struct net_device *dev)
924 np->reset_timer.data = (unsigned long) dev; 923 np->reset_timer.data = (unsigned long) dev;
925 np->reset_timer.function = reset_timer; 924 np->reset_timer.function = reset_timer;
926 np->reset_timer_armed = 0; 925 np->reset_timer_armed = 0;
927 926 return rc;
928 return 0;
929} 927}
930 928
931 929
@@ -1910,7 +1908,7 @@ static int netdev_close(struct net_device *dev)
1910 del_timer_sync(&np->timer); 1908 del_timer_sync(&np->timer);
1911 del_timer_sync(&np->reset_timer); 1909 del_timer_sync(&np->reset_timer);
1912 1910
1913 free_irq(dev->irq, dev); 1911 free_irq(np->pci_dev->irq, dev);
1914 1912
1915 /* Free all the skbuffs in the Rx queue. */ 1913 /* Free all the skbuffs in the Rx queue. */
1916 for (i = 0; i < RX_RING_SIZE; i++) { 1914 for (i = 0; i < RX_RING_SIZE; i++) {
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index a12b3f5bc025..7fa0227c9c02 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -1161,6 +1161,7 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
1161 .set_settings = fec_enet_set_settings, 1161 .set_settings = fec_enet_set_settings,
1162 .get_drvinfo = fec_enet_get_drvinfo, 1162 .get_drvinfo = fec_enet_get_drvinfo,
1163 .get_link = ethtool_op_get_link, 1163 .get_link = ethtool_op_get_link,
1164 .get_ts_info = ethtool_op_get_ts_info,
1164}; 1165};
1165 1166
1166static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 1167static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 7b34d8c698da..97f947b3d94a 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -811,6 +811,7 @@ static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
811 .get_link = ethtool_op_get_link, 811 .get_link = ethtool_op_get_link,
812 .get_msglevel = mpc52xx_fec_get_msglevel, 812 .get_msglevel = mpc52xx_fec_get_msglevel,
813 .set_msglevel = mpc52xx_fec_set_msglevel, 813 .set_msglevel = mpc52xx_fec_set_msglevel,
814 .get_ts_info = ethtool_op_get_ts_info,
814}; 815};
815 816
816 817
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index e4e6cd2c5f82..2b7633f766d9 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -963,6 +963,7 @@ static const struct ethtool_ops fs_ethtool_ops = {
963 .get_msglevel = fs_get_msglevel, 963 .get_msglevel = fs_get_msglevel,
964 .set_msglevel = fs_set_msglevel, 964 .set_msglevel = fs_set_msglevel,
965 .get_regs = fs_get_regs, 965 .get_regs = fs_get_regs,
966 .get_ts_info = ethtool_op_get_ts_info,
966}; 967};
967 968
968static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 969static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index e7bed5303997..1adb0245b9dd 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -136,7 +136,7 @@ static void gfar_netpoll(struct net_device *dev);
136int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); 136int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
137static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); 137static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
138static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 138static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
139 int amount_pull); 139 int amount_pull, struct napi_struct *napi);
140void gfar_halt(struct net_device *dev); 140void gfar_halt(struct net_device *dev);
141static void gfar_halt_nodisable(struct net_device *dev); 141static void gfar_halt_nodisable(struct net_device *dev);
142void gfar_start(struct net_device *dev); 142void gfar_start(struct net_device *dev);
@@ -2675,12 +2675,12 @@ static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2675/* gfar_process_frame() -- handle one incoming packet if skb 2675/* gfar_process_frame() -- handle one incoming packet if skb
2676 * isn't NULL. */ 2676 * isn't NULL. */
2677static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 2677static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2678 int amount_pull) 2678 int amount_pull, struct napi_struct *napi)
2679{ 2679{
2680 struct gfar_private *priv = netdev_priv(dev); 2680 struct gfar_private *priv = netdev_priv(dev);
2681 struct rxfcb *fcb = NULL; 2681 struct rxfcb *fcb = NULL;
2682 2682
2683 int ret; 2683 gro_result_t ret;
2684 2684
2685 /* fcb is at the beginning if exists */ 2685 /* fcb is at the beginning if exists */
2686 fcb = (struct rxfcb *)skb->data; 2686 fcb = (struct rxfcb *)skb->data;
@@ -2719,9 +2719,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2719 __vlan_hwaccel_put_tag(skb, fcb->vlctl); 2719 __vlan_hwaccel_put_tag(skb, fcb->vlctl);
2720 2720
2721 /* Send the packet up the stack */ 2721 /* Send the packet up the stack */
2722 ret = netif_receive_skb(skb); 2722 ret = napi_gro_receive(napi, skb);
2723 2723
2724 if (NET_RX_DROP == ret) 2724 if (GRO_DROP == ret)
2725 priv->extra_stats.kernel_dropped++; 2725 priv->extra_stats.kernel_dropped++;
2726 2726
2727 return 0; 2727 return 0;
@@ -2783,7 +2783,8 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2783 skb_put(skb, pkt_len); 2783 skb_put(skb, pkt_len);
2784 rx_queue->stats.rx_bytes += pkt_len; 2784 rx_queue->stats.rx_bytes += pkt_len;
2785 skb_record_rx_queue(skb, rx_queue->qindex); 2785 skb_record_rx_queue(skb, rx_queue->qindex);
2786 gfar_process_frame(dev, skb, amount_pull); 2786 gfar_process_frame(dev, skb, amount_pull,
2787 &rx_queue->grp->napi);
2787 2788
2788 } else { 2789 } else {
2789 netif_warn(priv, rx_err, dev, "Missing skb!\n"); 2790 netif_warn(priv, rx_err, dev, "Missing skb!\n");
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 4c9f8d487dbb..2136c7ff5e6d 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1210,4 +1210,7 @@ struct filer_table {
1210 struct gfar_filer_entry fe[MAX_FILER_CACHE_IDX + 20]; 1210 struct gfar_filer_entry fe[MAX_FILER_CACHE_IDX + 20];
1211}; 1211};
1212 1212
1213/* The gianfar_ptp module will set this variable */
1214extern int gfar_phc_index;
1215
1213#endif /* __GIANFAR_H */ 1216#endif /* __GIANFAR_H */
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 8d74efd04bb9..8a025570d97e 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -26,6 +26,7 @@
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/netdevice.h> 27#include <linux/netdevice.h>
28#include <linux/etherdevice.h> 28#include <linux/etherdevice.h>
29#include <linux/net_tstamp.h>
29#include <linux/skbuff.h> 30#include <linux/skbuff.h>
30#include <linux/spinlock.h> 31#include <linux/spinlock.h>
31#include <linux/mm.h> 32#include <linux/mm.h>
@@ -1739,6 +1740,34 @@ static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1739 return ret; 1740 return ret;
1740} 1741}
1741 1742
1743int gfar_phc_index = -1;
1744
1745static int gfar_get_ts_info(struct net_device *dev,
1746 struct ethtool_ts_info *info)
1747{
1748 struct gfar_private *priv = netdev_priv(dev);
1749
1750 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)) {
1751 info->so_timestamping =
1752 SOF_TIMESTAMPING_RX_SOFTWARE |
1753 SOF_TIMESTAMPING_SOFTWARE;
1754 info->phc_index = -1;
1755 return 0;
1756 }
1757 info->so_timestamping =
1758 SOF_TIMESTAMPING_TX_HARDWARE |
1759 SOF_TIMESTAMPING_RX_HARDWARE |
1760 SOF_TIMESTAMPING_RAW_HARDWARE;
1761 info->phc_index = gfar_phc_index;
1762 info->tx_types =
1763 (1 << HWTSTAMP_TX_OFF) |
1764 (1 << HWTSTAMP_TX_ON);
1765 info->rx_filters =
1766 (1 << HWTSTAMP_FILTER_NONE) |
1767 (1 << HWTSTAMP_FILTER_ALL);
1768 return 0;
1769}
1770
1742const struct ethtool_ops gfar_ethtool_ops = { 1771const struct ethtool_ops gfar_ethtool_ops = {
1743 .get_settings = gfar_gsettings, 1772 .get_settings = gfar_gsettings,
1744 .set_settings = gfar_ssettings, 1773 .set_settings = gfar_ssettings,
@@ -1761,4 +1790,5 @@ const struct ethtool_ops gfar_ethtool_ops = {
1761#endif 1790#endif
1762 .set_rxnfc = gfar_set_nfc, 1791 .set_rxnfc = gfar_set_nfc,
1763 .get_rxnfc = gfar_get_nfc, 1792 .get_rxnfc = gfar_get_nfc,
1793 .get_ts_info = gfar_get_ts_info,
1764}; 1794};
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 5fd620bec15c..c08e5d40fecb 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -515,6 +515,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
515 err = PTR_ERR(etsects->clock); 515 err = PTR_ERR(etsects->clock);
516 goto no_clock; 516 goto no_clock;
517 } 517 }
518 gfar_phc_clock = ptp_clock_index(etsects->clock);
518 519
519 dev_set_drvdata(&dev->dev, etsects); 520 dev_set_drvdata(&dev->dev, etsects);
520 521
@@ -538,6 +539,7 @@ static int gianfar_ptp_remove(struct platform_device *dev)
538 gfar_write(&etsects->regs->tmr_temask, 0); 539 gfar_write(&etsects->regs->tmr_temask, 0);
539 gfar_write(&etsects->regs->tmr_ctrl, 0); 540 gfar_write(&etsects->regs->tmr_ctrl, 0);
540 541
542 gfar_phc_clock = -1;
541 ptp_clock_unregister(etsects->clock); 543 ptp_clock_unregister(etsects->clock);
542 iounmap(etsects->regs); 544 iounmap(etsects->regs);
543 release_resource(etsects->rsrc); 545 release_resource(etsects->rsrc);
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index a97257f91a3d..37b035306013 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -415,6 +415,7 @@ static const struct ethtool_ops uec_ethtool_ops = {
415 .get_ethtool_stats = uec_get_ethtool_stats, 415 .get_ethtool_stats = uec_get_ethtool_stats,
416 .get_wol = uec_get_wol, 416 .get_wol = uec_get_wol,
417 .set_wol = uec_set_wol, 417 .set_wol = uec_set_wol,
418 .get_ts_info = ethtool_op_get_ts_info,
418}; 419};
419 420
420void uec_set_ethtool_ops(struct net_device *netdev) 421void uec_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 76213162fbe3..546efe30c9b8 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -7,7 +7,7 @@ config NET_VENDOR_INTEL
7 default y 7 default y
8 depends on PCI || PCI_MSI || ISA || ISA_DMA_API || ARM || \ 8 depends on PCI || PCI_MSI || ISA || ISA_DMA_API || ARM || \
9 ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \ 9 ARCH_ACORN || MCA || MCA_LEGACY || SNI_RM || SUN3 || \
10 GSC || BVME6000 || MVME16x || ARCH_ENP2611 || \ 10 GSC || BVME6000 || MVME16x || \
11 (ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR) || \ 11 (ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR) || \
12 EXPERIMENTAL 12 EXPERIMENTAL
13 ---help--- 13 ---help---
@@ -120,6 +120,17 @@ config IGB_DCA
120 driver. DCA is a method for warming the CPU cache before data 120 driver. DCA is a method for warming the CPU cache before data
121 is used, with the intent of lessening the impact of cache misses. 121 is used, with the intent of lessening the impact of cache misses.
122 122
123config IGB_PTP
124 bool "PTP Hardware Clock (PHC)"
125 default y
126 depends on IGB && PTP_1588_CLOCK
127 ---help---
128 Say Y here if you want to use PTP Hardware Clock (PHC) in the
129 driver. Only the basic clock operations have been implemented.
130
131 Every timestamp and clock read operations must consult the
132 overflow counter to form a correct time value.
133
123config IGBVF 134config IGBVF
124 tristate "Intel(R) 82576 Virtual Function Ethernet support" 135 tristate "Intel(R) 82576 Virtual Function Ethernet support"
125 depends on PCI 136 depends on PCI
@@ -182,6 +193,14 @@ config IXGBE
182 To compile this driver as a module, choose M here. The module 193 To compile this driver as a module, choose M here. The module
183 will be called ixgbe. 194 will be called ixgbe.
184 195
196config IXGBE_HWMON
197 bool "Intel(R) 10GbE PCI Express adapters HWMON support"
198 default y
199 depends on IXGBE && HWMON && !(IXGBE=y && HWMON=m)
200 ---help---
201 Say Y if you want to expose the thermal sensor data on some of
202 our cards, via a hwmon sysfs interface.
203
185config IXGBE_DCA 204config IXGBE_DCA
186 bool "Direct Cache Access (DCA) Support" 205 bool "Direct Cache Access (DCA) Support"
187 default y 206 default y
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index e498effb85d9..ada720b42ff6 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1759,6 +1759,7 @@ static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
1759 skb->data, skb->len, PCI_DMA_TODEVICE)); 1759 skb->data, skb->len, PCI_DMA_TODEVICE));
1760 /* check for mapping failure? */ 1760 /* check for mapping failure? */
1761 cb->u.tcb.tbd.size = cpu_to_le16(skb->len); 1761 cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
1762 skb_tx_timestamp(skb);
1762} 1763}
1763 1764
1764static netdev_tx_t e100_xmit_frame(struct sk_buff *skb, 1765static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
@@ -2733,6 +2734,7 @@ static const struct ethtool_ops e100_ethtool_ops = {
2733 .set_phys_id = e100_set_phys_id, 2734 .set_phys_id = e100_set_phys_id,
2734 .get_ethtool_stats = e100_get_ethtool_stats, 2735 .get_ethtool_stats = e100_get_ethtool_stats,
2735 .get_sset_count = e100_get_sset_count, 2736 .get_sset_count = e100_get_sset_count,
2737 .get_ts_info = ethtool_op_get_ts_info,
2736}; 2738};
2737 2739
2738static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 2740static int e100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 37caa8885c2a..f1aef68e1e83 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -827,9 +827,10 @@ static int e1000_set_features(struct net_device *netdev,
827 if (changed & NETIF_F_HW_VLAN_RX) 827 if (changed & NETIF_F_HW_VLAN_RX)
828 e1000_vlan_mode(netdev, features); 828 e1000_vlan_mode(netdev, features);
829 829
830 if (!(changed & NETIF_F_RXCSUM)) 830 if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
831 return 0; 831 return 0;
832 832
833 netdev->features = features;
833 adapter->rx_csum = !!(features & NETIF_F_RXCSUM); 834 adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
834 835
835 if (netif_running(netdev)) 836 if (netif_running(netdev))
@@ -1074,6 +1075,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1074 1075
1075 netdev->features |= netdev->hw_features; 1076 netdev->features |= netdev->hw_features;
1076 netdev->hw_features |= NETIF_F_RXCSUM; 1077 netdev->hw_features |= NETIF_F_RXCSUM;
1078 netdev->hw_features |= NETIF_F_RXALL;
1077 netdev->hw_features |= NETIF_F_RXFCS; 1079 netdev->hw_features |= NETIF_F_RXFCS;
1078 1080
1079 if (pci_using_dac) { 1081 if (pci_using_dac) {
@@ -1841,6 +1843,22 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
1841 break; 1843 break;
1842 } 1844 }
1843 1845
1846 /* This is useful for sniffing bad packets. */
1847 if (adapter->netdev->features & NETIF_F_RXALL) {
1848 /* UPE and MPE will be handled by normal PROMISC logic
1849 * in e1000e_set_rx_mode */
1850 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1851 E1000_RCTL_BAM | /* RX All Bcast Pkts */
1852 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1853
1854 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1855 E1000_RCTL_DPF | /* Allow filtered pause */
1856 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1857 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1858 * and that breaks VLANs.
1859 */
1860 }
1861
1844 ew32(RCTL, rctl); 1862 ew32(RCTL, rctl);
1845} 1863}
1846 1864
@@ -3243,6 +3261,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3243 nr_frags, mss); 3261 nr_frags, mss);
3244 3262
3245 if (count) { 3263 if (count) {
3264 skb_tx_timestamp(skb);
3265
3246 e1000_tx_queue(adapter, tx_ring, tx_flags, count); 3266 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3247 /* Make sure there is space in the ring for the next send. */ 3267 /* Make sure there is space in the ring for the next send. */
3248 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); 3268 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
@@ -4057,6 +4077,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4057 irq_flags); 4077 irq_flags);
4058 length--; 4078 length--;
4059 } else { 4079 } else {
4080 if (netdev->features & NETIF_F_RXALL)
4081 goto process_skb;
4060 /* recycle both page and skb */ 4082 /* recycle both page and skb */
4061 buffer_info->skb = skb; 4083 buffer_info->skb = skb;
4062 /* an error means any chain goes out the window 4084 /* an error means any chain goes out the window
@@ -4069,6 +4091,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4069 } 4091 }
4070 4092
4071#define rxtop rx_ring->rx_skb_top 4093#define rxtop rx_ring->rx_skb_top
4094process_skb:
4072 if (!(status & E1000_RXD_STAT_EOP)) { 4095 if (!(status & E1000_RXD_STAT_EOP)) {
4073 /* this descriptor is only the beginning (or middle) */ 4096 /* this descriptor is only the beginning (or middle) */
4074 if (!rxtop) { 4097 if (!rxtop) {
@@ -4276,12 +4299,15 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4276 flags); 4299 flags);
4277 length--; 4300 length--;
4278 } else { 4301 } else {
4302 if (netdev->features & NETIF_F_RXALL)
4303 goto process_skb;
4279 /* recycle */ 4304 /* recycle */
4280 buffer_info->skb = skb; 4305 buffer_info->skb = skb;
4281 goto next_desc; 4306 goto next_desc;
4282 } 4307 }
4283 } 4308 }
4284 4309
4310process_skb:
4285 total_rx_bytes += (length - 4); /* don't count FCS */ 4311 total_rx_bytes += (length - 4); /* don't count FCS */
4286 total_rx_packets++; 4312 total_rx_packets++;
4287 4313
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index bac9dda31b6c..4dd18a1f45d2 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -228,9 +228,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw)
228 /* FWSM register */ 228 /* FWSM register */
229 mac->has_fwsm = true; 229 mac->has_fwsm = true;
230 /* ARC supported; valid only if manageability features are enabled. */ 230 /* ARC supported; valid only if manageability features are enabled. */
231 mac->arc_subsystem_valid = 231 mac->arc_subsystem_valid = !!(er32(FWSM) & E1000_FWSM_MODE_MASK);
232 (er32(FWSM) & E1000_FWSM_MODE_MASK)
233 ? true : false;
234 /* Adaptive IFS not supported */ 232 /* Adaptive IFS not supported */
235 mac->adaptive_ifs = false; 233 mac->adaptive_ifs = false;
236 234
@@ -766,6 +764,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
766{ 764{
767 u32 ctrl; 765 u32 ctrl;
768 s32 ret_val; 766 s32 ret_val;
767 u16 kum_reg_data;
769 768
770 /* 769 /*
771 * Prevent the PCI-E bus from sticking if there is no TLP connection 770 * Prevent the PCI-E bus from sticking if there is no TLP connection
@@ -791,6 +790,13 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
791 ew32(CTRL, ctrl | E1000_CTRL_RST); 790 ew32(CTRL, ctrl | E1000_CTRL_RST);
792 e1000_release_phy_80003es2lan(hw); 791 e1000_release_phy_80003es2lan(hw);
793 792
793 /* Disable IBIST slave mode (far-end loopback) */
794 e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
795 &kum_reg_data);
796 kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
797 e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
798 kum_reg_data);
799
794 ret_val = e1000e_get_auto_rd_done(hw); 800 ret_val = e1000e_get_auto_rd_done(hw);
795 if (ret_val) 801 if (ret_val)
796 /* We don't want to continue accessing MAC registers. */ 802 /* We don't want to continue accessing MAC registers. */
@@ -938,6 +944,14 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
938 else 944 else
939 reg |= (1 << 28); 945 reg |= (1 << 28);
940 ew32(TARC(1), reg); 946 ew32(TARC(1), reg);
947
948 /*
949 * Disable IPv6 extension header parsing because some malformed
950 * IPv6 headers can hang the Rx.
951 */
952 reg = er32(RFCTL);
953 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
954 ew32(RFCTL, reg);
941} 955}
942 956
943/** 957/**
@@ -1433,6 +1447,7 @@ static const struct e1000_mac_operations es2_mac_ops = {
1433 /* setup_physical_interface dependent on media type */ 1447 /* setup_physical_interface dependent on media type */
1434 .setup_led = e1000e_setup_led_generic, 1448 .setup_led = e1000e_setup_led_generic,
1435 .config_collision_dist = e1000e_config_collision_dist_generic, 1449 .config_collision_dist = e1000e_config_collision_dist_generic,
1450 .rar_set = e1000e_rar_set_generic,
1436}; 1451};
1437 1452
1438static const struct e1000_phy_operations es2_phy_ops = { 1453static const struct e1000_phy_operations es2_phy_ops = {
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index b3fdc6977f2e..36db4df09aed 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -295,9 +295,8 @@ static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
295 * ARC supported; valid only if manageability features are 295 * ARC supported; valid only if manageability features are
296 * enabled. 296 * enabled.
297 */ 297 */
298 mac->arc_subsystem_valid = 298 mac->arc_subsystem_valid = !!(er32(FWSM) &
299 (er32(FWSM) & E1000_FWSM_MODE_MASK) 299 E1000_FWSM_MODE_MASK);
300 ? true : false;
301 break; 300 break;
302 case e1000_82574: 301 case e1000_82574:
303 case e1000_82583: 302 case e1000_82583:
@@ -798,7 +797,7 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
798 /* Check for pending operations. */ 797 /* Check for pending operations. */
799 for (i = 0; i < E1000_FLASH_UPDATES; i++) { 798 for (i = 0; i < E1000_FLASH_UPDATES; i++) {
800 usleep_range(1000, 2000); 799 usleep_range(1000, 2000);
801 if ((er32(EECD) & E1000_EECD_FLUPD) == 0) 800 if (!(er32(EECD) & E1000_EECD_FLUPD))
802 break; 801 break;
803 } 802 }
804 803
@@ -822,7 +821,7 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
822 821
823 for (i = 0; i < E1000_FLASH_UPDATES; i++) { 822 for (i = 0; i < E1000_FLASH_UPDATES; i++) {
824 usleep_range(1000, 2000); 823 usleep_range(1000, 2000);
825 if ((er32(EECD) & E1000_EECD_FLUPD) == 0) 824 if (!(er32(EECD) & E1000_EECD_FLUPD))
826 break; 825 break;
827 } 826 }
828 827
@@ -1000,7 +999,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
1000 **/ 999 **/
1001static s32 e1000_reset_hw_82571(struct e1000_hw *hw) 1000static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1002{ 1001{
1003 u32 ctrl, ctrl_ext; 1002 u32 ctrl, ctrl_ext, eecd;
1004 s32 ret_val; 1003 s32 ret_val;
1005 1004
1006 /* 1005 /*
@@ -1073,6 +1072,16 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1073 */ 1072 */
1074 1073
1075 switch (hw->mac.type) { 1074 switch (hw->mac.type) {
1075 case e1000_82571:
1076 case e1000_82572:
1077 /*
1078 * REQ and GNT bits need to be cleared when using AUTO_RD
1079 * to access the EEPROM.
1080 */
1081 eecd = er32(EECD);
1082 eecd &= ~(E1000_EECD_REQ | E1000_EECD_GNT);
1083 ew32(EECD, eecd);
1084 break;
1076 case e1000_82573: 1085 case e1000_82573:
1077 case e1000_82574: 1086 case e1000_82574:
1078 case e1000_82583: 1087 case e1000_82583:
@@ -1280,6 +1289,16 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
1280 ew32(CTRL_EXT, reg); 1289 ew32(CTRL_EXT, reg);
1281 } 1290 }
1282 1291
1292 /*
1293 * Disable IPv6 extension header parsing because some malformed
1294 * IPv6 headers can hang the Rx.
1295 */
1296 if (hw->mac.type <= e1000_82573) {
1297 reg = er32(RFCTL);
1298 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
1299 ew32(RFCTL, reg);
1300 }
1301
1283 /* PCI-Ex Control Registers */ 1302 /* PCI-Ex Control Registers */
1284 switch (hw->mac.type) { 1303 switch (hw->mac.type) {
1285 case e1000_82574: 1304 case e1000_82574:
@@ -1763,7 +1782,8 @@ void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state)
1763 * incoming packets directed to this port are dropped. 1782 * incoming packets directed to this port are dropped.
1764 * Eventually the LAA will be in RAR[0] and RAR[14]. 1783 * Eventually the LAA will be in RAR[0] and RAR[14].
1765 */ 1784 */
1766 e1000e_rar_set(hw, hw->mac.addr, hw->mac.rar_entry_count - 1); 1785 hw->mac.ops.rar_set(hw, hw->mac.addr,
1786 hw->mac.rar_entry_count - 1);
1767} 1787}
1768 1788
1769/** 1789/**
@@ -1927,6 +1947,7 @@ static const struct e1000_mac_operations e82571_mac_ops = {
1927 .setup_led = e1000e_setup_led_generic, 1947 .setup_led = e1000e_setup_led_generic,
1928 .config_collision_dist = e1000e_config_collision_dist_generic, 1948 .config_collision_dist = e1000e_config_collision_dist_generic,
1929 .read_mac_addr = e1000_read_mac_addr_82571, 1949 .read_mac_addr = e1000_read_mac_addr_82571,
1950 .rar_set = e1000e_rar_set_generic,
1930}; 1951};
1931 1952
1932static const struct e1000_phy_operations e82_phy_ops_igp = { 1953static const struct e1000_phy_operations e82_phy_ops_igp = {
@@ -2061,9 +2082,11 @@ const struct e1000_info e1000_82574_info = {
2061 | FLAG_HAS_SMART_POWER_DOWN 2082 | FLAG_HAS_SMART_POWER_DOWN
2062 | FLAG_HAS_AMT 2083 | FLAG_HAS_AMT
2063 | FLAG_HAS_CTRLEXT_ON_LOAD, 2084 | FLAG_HAS_CTRLEXT_ON_LOAD,
2064 .flags2 = FLAG2_CHECK_PHY_HANG 2085 .flags2 = FLAG2_CHECK_PHY_HANG
2065 | FLAG2_DISABLE_ASPM_L0S 2086 | FLAG2_DISABLE_ASPM_L0S
2066 | FLAG2_NO_DISABLE_RX, 2087 | FLAG2_DISABLE_ASPM_L1
2088 | FLAG2_NO_DISABLE_RX
2089 | FLAG2_DMA_BURST,
2067 .pba = 32, 2090 .pba = 32,
2068 .max_hw_frame_size = DEFAULT_JUMBO, 2091 .max_hw_frame_size = DEFAULT_JUMBO,
2069 .get_variants = e1000_get_variants_82571, 2092 .get_variants = e1000_get_variants_82571,
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 3a5025917163..11c46661af09 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -74,7 +74,9 @@
74#define E1000_WUS_BC E1000_WUFC_BC 74#define E1000_WUS_BC E1000_WUFC_BC
75 75
76/* Extended Device Control */ 76/* Extended Device Control */
77#define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */
77#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */ 78#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */
79#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000004 /* Force SMBus mode*/
78#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ 80#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
79#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ 81#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
80#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ 82#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
@@ -573,6 +575,7 @@
573#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ 575#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
574 576
575/* Link Partner Ability Register (Base Page) */ 577/* Link Partner Ability Register (Base Page) */
578#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP 100TX Full Dplx Capable */
576#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ 579#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */
577#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ 580#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */
578 581
@@ -739,6 +742,7 @@
739#define I82577_E_PHY_ID 0x01540050 742#define I82577_E_PHY_ID 0x01540050
740#define I82578_E_PHY_ID 0x004DD040 743#define I82578_E_PHY_ID 0x004DD040
741#define I82579_E_PHY_ID 0x01540090 744#define I82579_E_PHY_ID 0x01540090
745#define I217_E_PHY_ID 0x015400A0
742 746
743/* M88E1000 Specific Registers */ 747/* M88E1000 Specific Registers */
744#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ 748#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
@@ -850,4 +854,8 @@
850/* SerDes Control */ 854/* SerDes Control */
851#define E1000_GEN_POLL_TIMEOUT 640 855#define E1000_GEN_POLL_TIMEOUT 640
852 856
857/* FW Semaphore */
858#define E1000_FWSM_WLOCK_MAC_MASK 0x0380
859#define E1000_FWSM_WLOCK_MAC_SHIFT 7
860
853#endif /* _E1000_DEFINES_H_ */ 861#endif /* _E1000_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index b83897f76ee3..6e6fffb34581 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -206,6 +206,7 @@ enum e1000_boards {
206 board_ich10lan, 206 board_ich10lan,
207 board_pchlan, 207 board_pchlan,
208 board_pch2lan, 208 board_pch2lan,
209 board_pch_lpt,
209}; 210};
210 211
211struct e1000_ps_page { 212struct e1000_ps_page {
@@ -528,6 +529,7 @@ extern const struct e1000_info e1000_ich9_info;
528extern const struct e1000_info e1000_ich10_info; 529extern const struct e1000_info e1000_ich10_info;
529extern const struct e1000_info e1000_pch_info; 530extern const struct e1000_info e1000_pch_info;
530extern const struct e1000_info e1000_pch2_info; 531extern const struct e1000_info e1000_pch2_info;
532extern const struct e1000_info e1000_pch_lpt_info;
531extern const struct e1000_info e1000_es2_info; 533extern const struct e1000_info e1000_es2_info;
532 534
533extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, 535extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
@@ -576,7 +578,7 @@ extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
576extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, 578extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
577 u8 *mc_addr_list, 579 u8 *mc_addr_list,
578 u32 mc_addr_count); 580 u32 mc_addr_count);
579extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); 581extern void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
580extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); 582extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
581extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); 583extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
582extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw); 584extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
@@ -673,11 +675,21 @@ static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data)
673 return hw->phy.ops.read_reg(hw, offset, data); 675 return hw->phy.ops.read_reg(hw, offset, data);
674} 676}
675 677
678static inline s32 e1e_rphy_locked(struct e1000_hw *hw, u32 offset, u16 *data)
679{
680 return hw->phy.ops.read_reg_locked(hw, offset, data);
681}
682
676static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data) 683static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data)
677{ 684{
678 return hw->phy.ops.write_reg(hw, offset, data); 685 return hw->phy.ops.write_reg(hw, offset, data);
679} 686}
680 687
688static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data)
689{
690 return hw->phy.ops.write_reg_locked(hw, offset, data);
691}
692
681static inline s32 e1000_get_cable_length(struct e1000_hw *hw) 693static inline s32 e1000_get_cable_length(struct e1000_hw *hw)
682{ 694{
683 return hw->phy.ops.get_cable_length(hw); 695 return hw->phy.ops.get_cable_length(hw);
@@ -735,9 +747,46 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
735 return readl(hw->hw_addr + reg); 747 return readl(hw->hw_addr + reg);
736} 748}
737 749
750#define er32(reg) __er32(hw, E1000_##reg)
751
752/**
753 * __ew32_prepare - prepare to write to MAC CSR register on certain parts
754 * @hw: pointer to the HW structure
755 *
756 * When updating the MAC CSR registers, the Manageability Engine (ME) could
757 * be accessing the registers at the same time. Normally, this is handled in
758 * h/w by an arbiter but on some parts there is a bug that acknowledges Host
759 * accesses later than it should which could result in the register to have
760 * an incorrect value. Workaround this by checking the FWSM register which
761 * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
762 * and try again a number of times.
763 **/
764static inline s32 __ew32_prepare(struct e1000_hw *hw)
765{
766 s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
767
768 while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
769 udelay(50);
770
771 return i;
772}
773
738static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) 774static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
739{ 775{
776 if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
777 __ew32_prepare(hw);
778
740 writel(val, hw->hw_addr + reg); 779 writel(val, hw->hw_addr + reg);
741} 780}
742 781
782#define ew32(reg, val) __ew32(hw, E1000_##reg, (val))
783
784#define e1e_flush() er32(STATUS)
785
786#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \
787 (__ew32((a), (reg + ((offset) << 2)), (value)))
788
789#define E1000_READ_REG_ARRAY(a, reg, offset) \
790 (readl((a)->hw_addr + reg + ((offset) << 2)))
791
743#endif /* _E1000_H_ */ 792#endif /* _E1000_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index db35dd5d96de..d863075df7a4 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -259,8 +259,7 @@ static int e1000_set_settings(struct net_device *netdev,
259 * cannot be changed 259 * cannot be changed
260 */ 260 */
261 if (hw->phy.ops.check_reset_block(hw)) { 261 if (hw->phy.ops.check_reset_block(hw)) {
262 e_err("Cannot change link characteristics when SoL/IDER is " 262 e_err("Cannot change link characteristics when SoL/IDER is active.\n");
263 "active.\n");
264 return -EINVAL; 263 return -EINVAL;
265 } 264 }
266 265
@@ -403,15 +402,15 @@ static void e1000_get_regs(struct net_device *netdev,
403 regs_buff[1] = er32(STATUS); 402 regs_buff[1] = er32(STATUS);
404 403
405 regs_buff[2] = er32(RCTL); 404 regs_buff[2] = er32(RCTL);
406 regs_buff[3] = er32(RDLEN); 405 regs_buff[3] = er32(RDLEN(0));
407 regs_buff[4] = er32(RDH); 406 regs_buff[4] = er32(RDH(0));
408 regs_buff[5] = er32(RDT); 407 regs_buff[5] = er32(RDT(0));
409 regs_buff[6] = er32(RDTR); 408 regs_buff[6] = er32(RDTR);
410 409
411 regs_buff[7] = er32(TCTL); 410 regs_buff[7] = er32(TCTL);
412 regs_buff[8] = er32(TDLEN); 411 regs_buff[8] = er32(TDLEN(0));
413 regs_buff[9] = er32(TDH); 412 regs_buff[9] = er32(TDH(0));
414 regs_buff[10] = er32(TDT); 413 regs_buff[10] = er32(TDT(0));
415 regs_buff[11] = er32(TIDV); 414 regs_buff[11] = er32(TIDV);
416 415
417 regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ 416 regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */
@@ -727,9 +726,8 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
727 (test[pat] & write)); 726 (test[pat] & write));
728 val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); 727 val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
729 if (val != (test[pat] & write & mask)) { 728 if (val != (test[pat] & write & mask)) {
730 e_err("pattern test reg %04X failed: got 0x%08X " 729 e_err("pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
731 "expected 0x%08X\n", reg + offset, val, 730 reg + offset, val, (test[pat] & write & mask));
732 (test[pat] & write & mask));
733 *data = reg; 731 *data = reg;
734 return 1; 732 return 1;
735 } 733 }
@@ -744,8 +742,8 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
744 __ew32(&adapter->hw, reg, write & mask); 742 __ew32(&adapter->hw, reg, write & mask);
745 val = __er32(&adapter->hw, reg); 743 val = __er32(&adapter->hw, reg);
746 if ((write & mask) != (val & mask)) { 744 if ((write & mask) != (val & mask)) {
747 e_err("set/check reg %04X test failed: got 0x%08X " 745 e_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
748 "expected 0x%08X\n", reg, (val & mask), (write & mask)); 746 reg, (val & mask), (write & mask));
749 *data = reg; 747 *data = reg;
750 return 1; 748 return 1;
751 } 749 }
@@ -775,6 +773,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
775 u32 i; 773 u32 i;
776 u32 toggle; 774 u32 toggle;
777 u32 mask; 775 u32 mask;
776 u32 wlock_mac = 0;
778 777
779 /* 778 /*
780 * The status register is Read Only, so a write should fail. 779 * The status register is Read Only, so a write should fail.
@@ -797,8 +796,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
797 ew32(STATUS, toggle); 796 ew32(STATUS, toggle);
798 after = er32(STATUS) & toggle; 797 after = er32(STATUS) & toggle;
799 if (value != after) { 798 if (value != after) {
800 e_err("failed STATUS register test got: 0x%08X expected: " 799 e_err("failed STATUS register test got: 0x%08X expected: 0x%08X\n",
801 "0x%08X\n", after, value); 800 after, value);
802 *data = 1; 801 *data = 1;
803 return 1; 802 return 1;
804 } 803 }
@@ -813,15 +812,15 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
813 } 812 }
814 813
815 REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF); 814 REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF);
816 REG_PATTERN_TEST(E1000_RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); 815 REG_PATTERN_TEST(E1000_RDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
817 REG_PATTERN_TEST(E1000_RDLEN, 0x000FFF80, 0x000FFFFF); 816 REG_PATTERN_TEST(E1000_RDLEN(0), 0x000FFF80, 0x000FFFFF);
818 REG_PATTERN_TEST(E1000_RDH, 0x0000FFFF, 0x0000FFFF); 817 REG_PATTERN_TEST(E1000_RDH(0), 0x0000FFFF, 0x0000FFFF);
819 REG_PATTERN_TEST(E1000_RDT, 0x0000FFFF, 0x0000FFFF); 818 REG_PATTERN_TEST(E1000_RDT(0), 0x0000FFFF, 0x0000FFFF);
820 REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8); 819 REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8);
821 REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF); 820 REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF);
822 REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF); 821 REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
823 REG_PATTERN_TEST(E1000_TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); 822 REG_PATTERN_TEST(E1000_TDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
824 REG_PATTERN_TEST(E1000_TDLEN, 0x000FFF80, 0x000FFFFF); 823 REG_PATTERN_TEST(E1000_TDLEN(0), 0x000FFF80, 0x000FFFFF);
825 824
826 REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); 825 REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000);
827 826
@@ -830,29 +829,41 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
830 REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); 829 REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000);
831 830
832 REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); 831 REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF);
833 REG_PATTERN_TEST(E1000_RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 832 REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
834 if (!(adapter->flags & FLAG_IS_ICH)) 833 if (!(adapter->flags & FLAG_IS_ICH))
835 REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); 834 REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF);
836 REG_PATTERN_TEST(E1000_TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); 835 REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
837 REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); 836 REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
838 mask = 0x8003FFFF; 837 mask = 0x8003FFFF;
839 switch (mac->type) { 838 switch (mac->type) {
840 case e1000_ich10lan: 839 case e1000_ich10lan:
841 case e1000_pchlan: 840 case e1000_pchlan:
842 case e1000_pch2lan: 841 case e1000_pch2lan:
842 case e1000_pch_lpt:
843 mask |= (1 << 18); 843 mask |= (1 << 18);
844 break; 844 break;
845 default: 845 default:
846 break; 846 break;
847 } 847 }
848 for (i = 0; i < mac->rar_entry_count; i++) 848
849 if (mac->type == e1000_pch_lpt)
850 wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >>
851 E1000_FWSM_WLOCK_MAC_SHIFT;
852
853 for (i = 0; i < mac->rar_entry_count; i++) {
854 /* Cannot test write-protected SHRAL[n] registers */
855 if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac)))
856 continue;
857
849 REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), 858 REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1),
850 mask, 0xFFFFFFFF); 859 mask, 0xFFFFFFFF);
860 }
851 861
852 for (i = 0; i < mac->mta_reg_count; i++) 862 for (i = 0; i < mac->mta_reg_count; i++)
853 REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF); 863 REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF);
854 864
855 *data = 0; 865 *data = 0;
866
856 return 0; 867 return 0;
857} 868}
858 869
@@ -1104,11 +1115,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1104 tx_ring->next_to_use = 0; 1115 tx_ring->next_to_use = 0;
1105 tx_ring->next_to_clean = 0; 1116 tx_ring->next_to_clean = 0;
1106 1117
1107 ew32(TDBAL, ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); 1118 ew32(TDBAL(0), ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1108 ew32(TDBAH, ((u64) tx_ring->dma >> 32)); 1119 ew32(TDBAH(0), ((u64) tx_ring->dma >> 32));
1109 ew32(TDLEN, tx_ring->count * sizeof(struct e1000_tx_desc)); 1120 ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc));
1110 ew32(TDH, 0); 1121 ew32(TDH(0), 0);
1111 ew32(TDT, 0); 1122 ew32(TDT(0), 0);
1112 ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR | 1123 ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR |
1113 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | 1124 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
1114 E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); 1125 E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
@@ -1168,11 +1179,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1168 rctl = er32(RCTL); 1179 rctl = er32(RCTL);
1169 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) 1180 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
1170 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1181 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1171 ew32(RDBAL, ((u64) rx_ring->dma & 0xFFFFFFFF)); 1182 ew32(RDBAL(0), ((u64) rx_ring->dma & 0xFFFFFFFF));
1172 ew32(RDBAH, ((u64) rx_ring->dma >> 32)); 1183 ew32(RDBAH(0), ((u64) rx_ring->dma >> 32));
1173 ew32(RDLEN, rx_ring->size); 1184 ew32(RDLEN(0), rx_ring->size);
1174 ew32(RDH, 0); 1185 ew32(RDH(0), 0);
1175 ew32(RDT, 0); 1186 ew32(RDT(0), 0);
1176 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | 1187 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
1177 E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE | 1188 E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
1178 E1000_RCTL_SBP | E1000_RCTL_SECRC | 1189 E1000_RCTL_SBP | E1000_RCTL_SECRC |
@@ -1534,7 +1545,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1534 int ret_val = 0; 1545 int ret_val = 0;
1535 unsigned long time; 1546 unsigned long time;
1536 1547
1537 ew32(RDT, rx_ring->count - 1); 1548 ew32(RDT(0), rx_ring->count - 1);
1538 1549
1539 /* 1550 /*
1540 * Calculate the loop count based on the largest descriptor ring 1551 * Calculate the loop count based on the largest descriptor ring
@@ -1561,7 +1572,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1561 if (k == tx_ring->count) 1572 if (k == tx_ring->count)
1562 k = 0; 1573 k = 0;
1563 } 1574 }
1564 ew32(TDT, k); 1575 ew32(TDT(0), k);
1565 e1e_flush(); 1576 e1e_flush();
1566 msleep(200); 1577 msleep(200);
1567 time = jiffies; /* set the start time for the receive */ 1578 time = jiffies; /* set the start time for the receive */
@@ -1791,8 +1802,7 @@ static void e1000_get_wol(struct net_device *netdev,
1791 wol->supported &= ~WAKE_UCAST; 1802 wol->supported &= ~WAKE_UCAST;
1792 1803
1793 if (adapter->wol & E1000_WUFC_EX) 1804 if (adapter->wol & E1000_WUFC_EX)
1794 e_err("Interface does not support directed (unicast) " 1805 e_err("Interface does not support directed (unicast) frame wake-up packets\n");
1795 "frame wake-up packets\n");
1796 } 1806 }
1797 1807
1798 if (adapter->wol & E1000_WUFC_EX) 1808 if (adapter->wol & E1000_WUFC_EX)
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index f82ecf536c8b..ed5b40985edb 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -36,16 +36,6 @@ struct e1000_adapter;
36 36
37#include "defines.h" 37#include "defines.h"
38 38
39#define er32(reg) __er32(hw, E1000_##reg)
40#define ew32(reg,val) __ew32(hw, E1000_##reg, (val))
41#define e1e_flush() er32(STATUS)
42
43#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \
44 (writel((value), ((a)->hw_addr + reg + ((offset) << 2))))
45
46#define E1000_READ_REG_ARRAY(a, reg, offset) \
47 (readl((a)->hw_addr + reg + ((offset) << 2)))
48
49enum e1e_registers { 39enum e1e_registers {
50 E1000_CTRL = 0x00000, /* Device Control - RW */ 40 E1000_CTRL = 0x00000, /* Device Control - RW */
51 E1000_STATUS = 0x00008, /* Device Status - RO */ 41 E1000_STATUS = 0x00008, /* Device Status - RO */
@@ -61,6 +51,7 @@ enum e1e_registers {
61 E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */ 51 E1000_FEXTNVM = 0x00028, /* Future Extended NVM - RW */
62 E1000_FCT = 0x00030, /* Flow Control Type - RW */ 52 E1000_FCT = 0x00030, /* Flow Control Type - RW */
63 E1000_VET = 0x00038, /* VLAN Ether Type - RW */ 53 E1000_VET = 0x00038, /* VLAN Ether Type - RW */
54 E1000_FEXTNVM3 = 0x0003C, /* Future Extended NVM 3 - RW */
64 E1000_ICR = 0x000C0, /* Interrupt Cause Read - R/clr */ 55 E1000_ICR = 0x000C0, /* Interrupt Cause Read - R/clr */
65 E1000_ITR = 0x000C4, /* Interrupt Throttling Rate - RW */ 56 E1000_ITR = 0x000C4, /* Interrupt Throttling Rate - RW */
66 E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */ 57 E1000_ICS = 0x000C8, /* Interrupt Cause Set - WO */
@@ -94,31 +85,40 @@ enum e1e_registers {
94 E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */ 85 E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */
95 E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */ 86 E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */
96 E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */ 87 E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */
97 E1000_RDBAL = 0x02800, /* Rx Descriptor Base Address Low - RW */ 88/*
98 E1000_RDBAH = 0x02804, /* Rx Descriptor Base Address High - RW */ 89 * Convenience macros
99 E1000_RDLEN = 0x02808, /* Rx Descriptor Length - RW */
100 E1000_RDH = 0x02810, /* Rx Descriptor Head - RW */
101 E1000_RDT = 0x02818, /* Rx Descriptor Tail - RW */
102 E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */
103 E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
104#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8))
105 E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
106
107/* Convenience macros
108 * 90 *
109 * Note: "_n" is the queue number of the register to be written to. 91 * Note: "_n" is the queue number of the register to be written to.
110 * 92 *
111 * Example usage: 93 * Example usage:
112 * E1000_RDBAL_REG(current_rx_queue) 94 * E1000_RDBAL(current_rx_queue)
113 *
114 */ 95 */
115#define E1000_RDBAL_REG(_n) (E1000_RDBAL + (_n << 8)) 96 E1000_RDBAL_BASE = 0x02800, /* Rx Descriptor Base Address Low - RW */
97#define E1000_RDBAL(_n) (E1000_RDBAL_BASE + (_n << 8))
98 E1000_RDBAH_BASE = 0x02804, /* Rx Descriptor Base Address High - RW */
99#define E1000_RDBAH(_n) (E1000_RDBAH_BASE + (_n << 8))
100 E1000_RDLEN_BASE = 0x02808, /* Rx Descriptor Length - RW */
101#define E1000_RDLEN(_n) (E1000_RDLEN_BASE + (_n << 8))
102 E1000_RDH_BASE = 0x02810, /* Rx Descriptor Head - RW */
103#define E1000_RDH(_n) (E1000_RDH_BASE + (_n << 8))
104 E1000_RDT_BASE = 0x02818, /* Rx Descriptor Tail - RW */
105#define E1000_RDT(_n) (E1000_RDT_BASE + (_n << 8))
106 E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */
107 E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
108#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8))
109 E1000_RADV = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
110
116 E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */ 111 E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */
117 E1000_TDBAL = 0x03800, /* Tx Descriptor Base Address Low - RW */ 112 E1000_TDBAL_BASE = 0x03800, /* Tx Descriptor Base Address Low - RW */
118 E1000_TDBAH = 0x03804, /* Tx Descriptor Base Address High - RW */ 113#define E1000_TDBAL(_n) (E1000_TDBAL_BASE + (_n << 8))
119 E1000_TDLEN = 0x03808, /* Tx Descriptor Length - RW */ 114 E1000_TDBAH_BASE = 0x03804, /* Tx Descriptor Base Address High - RW */
120 E1000_TDH = 0x03810, /* Tx Descriptor Head - RW */ 115#define E1000_TDBAH(_n) (E1000_TDBAH_BASE + (_n << 8))
121 E1000_TDT = 0x03818, /* Tx Descriptor Tail - RW */ 116 E1000_TDLEN_BASE = 0x03808, /* Tx Descriptor Length - RW */
117#define E1000_TDLEN(_n) (E1000_TDLEN_BASE + (_n << 8))
118 E1000_TDH_BASE = 0x03810, /* Tx Descriptor Head - RW */
119#define E1000_TDH(_n) (E1000_TDH_BASE + (_n << 8))
120 E1000_TDT_BASE = 0x03818, /* Tx Descriptor Tail - RW */
121#define E1000_TDT(_n) (E1000_TDT_BASE + (_n << 8))
122 E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */ 122 E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */
123 E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */ 123 E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */
124#define E1000_TXDCTL(_n) (E1000_TXDCTL_BASE + (_n << 8)) 124#define E1000_TXDCTL(_n) (E1000_TXDCTL_BASE + (_n << 8))
@@ -200,6 +200,14 @@ enum e1e_registers {
200#define E1000_RA (E1000_RAL(0)) 200#define E1000_RA (E1000_RAL(0))
201 E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */ 201 E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */
202#define E1000_RAH(_n) (E1000_RAH_BASE + ((_n) * 8)) 202#define E1000_RAH(_n) (E1000_RAH_BASE + ((_n) * 8))
203 E1000_SHRAL_PCH_LPT_BASE = 0x05408,
204#define E1000_SHRAL_PCH_LPT(_n) (E1000_SHRAL_PCH_LPT_BASE + ((_n) * 8))
205 E1000_SHRAH_PCH_LTP_BASE = 0x0540C,
206#define E1000_SHRAH_PCH_LPT(_n) (E1000_SHRAH_PCH_LTP_BASE + ((_n) * 8))
207 E1000_SHRAL_BASE = 0x05438, /* Shared Receive Address Low - RW */
208#define E1000_SHRAL(_n) (E1000_SHRAL_BASE + ((_n) * 8))
209 E1000_SHRAH_BASE = 0x0543C, /* Shared Receive Address High - RW */
210#define E1000_SHRAH(_n) (E1000_SHRAH_BASE + ((_n) * 8))
203 E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */ 211 E1000_VFTA = 0x05600, /* VLAN Filter Table Array - RW Array */
204 E1000_WUC = 0x05800, /* Wakeup Control - RW */ 212 E1000_WUC = 0x05800, /* Wakeup Control - RW */
205 E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */ 213 E1000_WUFC = 0x05808, /* Wakeup Filter Control - RW */
@@ -402,6 +410,8 @@ enum e1e_registers {
402#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0 410#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0
403#define E1000_DEV_ID_PCH2_LV_LM 0x1502 411#define E1000_DEV_ID_PCH2_LV_LM 0x1502
404#define E1000_DEV_ID_PCH2_LV_V 0x1503 412#define E1000_DEV_ID_PCH2_LV_V 0x1503
413#define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A
414#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B
405 415
406#define E1000_REVISION_4 4 416#define E1000_REVISION_4 4
407 417
@@ -422,6 +432,7 @@ enum e1000_mac_type {
422 e1000_ich10lan, 432 e1000_ich10lan,
423 e1000_pchlan, 433 e1000_pchlan,
424 e1000_pch2lan, 434 e1000_pch2lan,
435 e1000_pch_lpt,
425}; 436};
426 437
427enum e1000_media_type { 438enum e1000_media_type {
@@ -459,6 +470,7 @@ enum e1000_phy_type {
459 e1000_phy_82578, 470 e1000_phy_82578,
460 e1000_phy_82577, 471 e1000_phy_82577,
461 e1000_phy_82579, 472 e1000_phy_82579,
473 e1000_phy_i217,
462}; 474};
463 475
464enum e1000_bus_width { 476enum e1000_bus_width {
@@ -782,6 +794,7 @@ struct e1000_mac_operations {
782 s32 (*setup_led)(struct e1000_hw *); 794 s32 (*setup_led)(struct e1000_hw *);
783 void (*write_vfta)(struct e1000_hw *, u32, u32); 795 void (*write_vfta)(struct e1000_hw *, u32, u32);
784 void (*config_collision_dist)(struct e1000_hw *); 796 void (*config_collision_dist)(struct e1000_hw *);
797 void (*rar_set)(struct e1000_hw *, u8 *, u32);
785 s32 (*read_mac_addr)(struct e1000_hw *); 798 s32 (*read_mac_addr)(struct e1000_hw *);
786}; 799};
787 800
@@ -966,6 +979,7 @@ struct e1000_dev_spec_ich8lan {
966 struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; 979 struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS];
967 bool nvm_k1_enabled; 980 bool nvm_k1_enabled;
968 bool eee_disable; 981 bool eee_disable;
982 u16 eee_lp_ability;
969}; 983};
970 984
971struct e1000_hw { 985struct e1000_hw {
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index b461c24945e3..bbf70ba367da 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -105,6 +105,9 @@
105#define E1000_FEXTNVM_SW_CONFIG 1 105#define E1000_FEXTNVM_SW_CONFIG 1
106#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */ 106#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
107 107
108#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000
109#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000
110
108#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7 111#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7
109#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 112#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7
110#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 113#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
@@ -112,6 +115,8 @@
112#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL 115#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
113 116
114#define E1000_ICH_RAR_ENTRIES 7 117#define E1000_ICH_RAR_ENTRIES 7
118#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */
119#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */
115 120
116#define PHY_PAGE_SHIFT 5 121#define PHY_PAGE_SHIFT 5
117#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ 122#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
@@ -127,14 +132,22 @@
127 132
128#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */ 133#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in milliseconds */
129 134
135/* SMBus Control Phy Register */
136#define CV_SMB_CTRL PHY_REG(769, 23)
137#define CV_SMB_CTRL_FORCE_SMBUS 0x0001
138
130/* SMBus Address Phy Register */ 139/* SMBus Address Phy Register */
131#define HV_SMB_ADDR PHY_REG(768, 26) 140#define HV_SMB_ADDR PHY_REG(768, 26)
132#define HV_SMB_ADDR_MASK 0x007F 141#define HV_SMB_ADDR_MASK 0x007F
133#define HV_SMB_ADDR_PEC_EN 0x0200 142#define HV_SMB_ADDR_PEC_EN 0x0200
134#define HV_SMB_ADDR_VALID 0x0080 143#define HV_SMB_ADDR_VALID 0x0080
144#define HV_SMB_ADDR_FREQ_MASK 0x1100
145#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8
146#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12
135 147
136/* PHY Power Management Control */ 148/* PHY Power Management Control */
137#define HV_PM_CTRL PHY_REG(770, 17) 149#define HV_PM_CTRL PHY_REG(770, 17)
150#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
138 151
139/* PHY Low Power Idle Control */ 152/* PHY Low Power Idle Control */
140#define I82579_LPI_CTRL PHY_REG(772, 20) 153#define I82579_LPI_CTRL PHY_REG(772, 20)
@@ -147,11 +160,26 @@
147#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */ 160#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */
148#define I82579_MSE_THRESHOLD 0x084F /* Mean Square Error Threshold */ 161#define I82579_MSE_THRESHOLD 0x084F /* Mean Square Error Threshold */
149#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ 162#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
163#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */
164#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */
165#define I217_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */
166
167/* Intel Rapid Start Technology Support */
168#define I217_PROXY_CTRL PHY_REG(BM_WUC_PAGE, 70)
169#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080
170#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28)
171#define I217_SxCTRL_MASK 0x1000
172#define I217_CGFREG PHY_REG(772, 29)
173#define I217_CGFREG_MASK 0x0002
174#define I217_MEMPWR PHY_REG(772, 26)
175#define I217_MEMPWR_MASK 0x0010
150 176
151/* Strapping Option Register - RO */ 177/* Strapping Option Register - RO */
152#define E1000_STRAP 0x0000C 178#define E1000_STRAP 0x0000C
153#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 179#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000
154#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17 180#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
181#define E1000_STRAP_SMT_FREQ_MASK 0x00003000
182#define E1000_STRAP_SMT_FREQ_SHIFT 12
155 183
156/* OEM Bits Phy Register */ 184/* OEM Bits Phy Register */
157#define HV_OEM_BITS PHY_REG(768, 25) 185#define HV_OEM_BITS PHY_REG(768, 25)
@@ -255,6 +283,8 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
255static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); 283static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
256static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); 284static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
257static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); 285static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
286static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
287static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
258static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); 288static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
259static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); 289static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
260 290
@@ -283,18 +313,161 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
283#define ew16flash(reg, val) __ew16flash(hw, (reg), (val)) 313#define ew16flash(reg, val) __ew16flash(hw, (reg), (val))
284#define ew32flash(reg, val) __ew32flash(hw, (reg), (val)) 314#define ew32flash(reg, val) __ew32flash(hw, (reg), (val))
285 315
286static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw) 316/**
317 * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers
318 * @hw: pointer to the HW structure
319 *
320 * Test access to the PHY registers by reading the PHY ID registers. If
321 * the PHY ID is already known (e.g. resume path) compare it with known ID,
322 * otherwise assume the read PHY ID is correct if it is valid.
323 *
324 * Assumes the sw/fw/hw semaphore is already acquired.
325 **/
326static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
287{ 327{
288 u32 ctrl; 328 u16 phy_reg;
329 u32 phy_id;
289 330
290 ctrl = er32(CTRL); 331 e1e_rphy_locked(hw, PHY_ID1, &phy_reg);
291 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE; 332 phy_id = (u32)(phy_reg << 16);
292 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE; 333 e1e_rphy_locked(hw, PHY_ID2, &phy_reg);
293 ew32(CTRL, ctrl); 334 phy_id |= (u32)(phy_reg & PHY_REVISION_MASK);
294 e1e_flush(); 335
295 udelay(10); 336 if (hw->phy.id) {
296 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE; 337 if (hw->phy.id == phy_id)
297 ew32(CTRL, ctrl); 338 return true;
339 } else {
340 if ((phy_id != 0) && (phy_id != PHY_REVISION_MASK))
341 hw->phy.id = phy_id;
342 return true;
343 }
344
345 return false;
346}
347
348/**
349 * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds
350 * @hw: pointer to the HW structure
351 *
352 * Workarounds/flow necessary for PHY initialization during driver load
353 * and resume paths.
354 **/
355static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
356{
357 u32 mac_reg, fwsm = er32(FWSM);
358 s32 ret_val;
359 u16 phy_reg;
360
361 ret_val = hw->phy.ops.acquire(hw);
362 if (ret_val) {
363 e_dbg("Failed to initialize PHY flow\n");
364 return ret_val;
365 }
366
367 /*
368 * The MAC-PHY interconnect may be in SMBus mode. If the PHY is
369 * inaccessible and resetting the PHY is not blocked, toggle the
370 * LANPHYPC Value bit to force the interconnect to PCIe mode.
371 */
372 switch (hw->mac.type) {
373 case e1000_pch_lpt:
374 if (e1000_phy_is_accessible_pchlan(hw))
375 break;
376
377 /*
378 * Before toggling LANPHYPC, see if PHY is accessible by
379 * forcing MAC to SMBus mode first.
380 */
381 mac_reg = er32(CTRL_EXT);
382 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
383 ew32(CTRL_EXT, mac_reg);
384
385 /* fall-through */
386 case e1000_pch2lan:
387 /*
388 * Gate automatic PHY configuration by hardware on
389 * non-managed 82579
390 */
391 if ((hw->mac.type == e1000_pch2lan) &&
392 !(fwsm & E1000_ICH_FWSM_FW_VALID))
393 e1000_gate_hw_phy_config_ich8lan(hw, true);
394
395 if (e1000_phy_is_accessible_pchlan(hw)) {
396 if (hw->mac.type == e1000_pch_lpt) {
397 /* Unforce SMBus mode in PHY */
398 e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
399 phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
400 e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
401
402 /* Unforce SMBus mode in MAC */
403 mac_reg = er32(CTRL_EXT);
404 mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
405 ew32(CTRL_EXT, mac_reg);
406 }
407 break;
408 }
409
410 /* fall-through */
411 case e1000_pchlan:
412 if ((hw->mac.type == e1000_pchlan) &&
413 (fwsm & E1000_ICH_FWSM_FW_VALID))
414 break;
415
416 if (hw->phy.ops.check_reset_block(hw)) {
417 e_dbg("Required LANPHYPC toggle blocked by ME\n");
418 break;
419 }
420
421 e_dbg("Toggling LANPHYPC\n");
422
423 /* Set Phy Config Counter to 50msec */
424 mac_reg = er32(FEXTNVM3);
425 mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
426 mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
427 ew32(FEXTNVM3, mac_reg);
428
429 /* Toggle LANPHYPC Value bit */
430 mac_reg = er32(CTRL);
431 mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
432 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
433 ew32(CTRL, mac_reg);
434 e1e_flush();
435 udelay(10);
436 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
437 ew32(CTRL, mac_reg);
438 e1e_flush();
439 if (hw->mac.type < e1000_pch_lpt) {
440 msleep(50);
441 } else {
442 u16 count = 20;
443 do {
444 usleep_range(5000, 10000);
445 } while (!(er32(CTRL_EXT) &
446 E1000_CTRL_EXT_LPCD) && count--);
447 }
448 break;
449 default:
450 break;
451 }
452
453 hw->phy.ops.release(hw);
454
455 /*
456 * Reset the PHY before any access to it. Doing so, ensures
457 * that the PHY is in a known good state before we read/write
458 * PHY registers. The generic reset is sufficient here,
459 * because we haven't determined the PHY type yet.
460 */
461 ret_val = e1000e_phy_hw_reset_generic(hw);
462
463 /* Ungate automatic PHY configuration on non-managed 82579 */
464 if ((hw->mac.type == e1000_pch2lan) &&
465 !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
466 usleep_range(10000, 20000);
467 e1000_gate_hw_phy_config_ich8lan(hw, false);
468 }
469
470 return ret_val;
298} 471}
299 472
300/** 473/**
@@ -324,70 +497,41 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
324 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; 497 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
325 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 498 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
326 499
327 if (!hw->phy.ops.check_reset_block(hw)) { 500 phy->id = e1000_phy_unknown;
328 u32 fwsm = er32(FWSM);
329
330 /*
331 * The MAC-PHY interconnect may still be in SMBus mode after
332 * Sx->S0. If resetting the PHY is not blocked, toggle the
333 * LANPHYPC Value bit to force the interconnect to PCIe mode.
334 */
335 e1000_toggle_lanphypc_value_ich8lan(hw);
336 msleep(50);
337
338 /*
339 * Gate automatic PHY configuration by hardware on
340 * non-managed 82579
341 */
342 if ((hw->mac.type == e1000_pch2lan) &&
343 !(fwsm & E1000_ICH_FWSM_FW_VALID))
344 e1000_gate_hw_phy_config_ich8lan(hw, true);
345
346 /*
347 * Reset the PHY before any access to it. Doing so, ensures
348 * that the PHY is in a known good state before we read/write
349 * PHY registers. The generic reset is sufficient here,
350 * because we haven't determined the PHY type yet.
351 */
352 ret_val = e1000e_phy_hw_reset_generic(hw);
353 if (ret_val)
354 return ret_val;
355 501
356 /* Ungate automatic PHY configuration on non-managed 82579 */ 502 ret_val = e1000_init_phy_workarounds_pchlan(hw);
357 if ((hw->mac.type == e1000_pch2lan) && 503 if (ret_val)
358 !(fwsm & E1000_ICH_FWSM_FW_VALID)) { 504 return ret_val;
359 usleep_range(10000, 20000);
360 e1000_gate_hw_phy_config_ich8lan(hw, false);
361 }
362 }
363 505
364 phy->id = e1000_phy_unknown; 506 if (phy->id == e1000_phy_unknown)
365 switch (hw->mac.type) { 507 switch (hw->mac.type) {
366 default: 508 default:
367 ret_val = e1000e_get_phy_id(hw); 509 ret_val = e1000e_get_phy_id(hw);
368 if (ret_val) 510 if (ret_val)
369 return ret_val; 511 return ret_val;
370 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) 512 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
513 break;
514 /* fall-through */
515 case e1000_pch2lan:
516 case e1000_pch_lpt:
517 /*
518 * In case the PHY needs to be in mdio slow mode,
519 * set slow mode and try to get the PHY id again.
520 */
521 ret_val = e1000_set_mdio_slow_mode_hv(hw);
522 if (ret_val)
523 return ret_val;
524 ret_val = e1000e_get_phy_id(hw);
525 if (ret_val)
526 return ret_val;
371 break; 527 break;
372 /* fall-through */ 528 }
373 case e1000_pch2lan:
374 /*
375 * In case the PHY needs to be in mdio slow mode,
376 * set slow mode and try to get the PHY id again.
377 */
378 ret_val = e1000_set_mdio_slow_mode_hv(hw);
379 if (ret_val)
380 return ret_val;
381 ret_val = e1000e_get_phy_id(hw);
382 if (ret_val)
383 return ret_val;
384 break;
385 }
386 phy->type = e1000e_get_phy_type_from_id(phy->id); 529 phy->type = e1000e_get_phy_type_from_id(phy->id);
387 530
388 switch (phy->type) { 531 switch (phy->type) {
389 case e1000_phy_82577: 532 case e1000_phy_82577:
390 case e1000_phy_82579: 533 case e1000_phy_82579:
534 case e1000_phy_i217:
391 phy->ops.check_polarity = e1000_check_polarity_82577; 535 phy->ops.check_polarity = e1000_check_polarity_82577;
392 phy->ops.force_speed_duplex = 536 phy->ops.force_speed_duplex =
393 e1000_phy_force_speed_duplex_82577; 537 e1000_phy_force_speed_duplex_82577;
@@ -572,7 +716,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
572 /* Adaptive IFS supported */ 716 /* Adaptive IFS supported */
573 mac->adaptive_ifs = true; 717 mac->adaptive_ifs = true;
574 718
575 /* LED operations */ 719 /* LED and other operations */
576 switch (mac->type) { 720 switch (mac->type) {
577 case e1000_ich8lan: 721 case e1000_ich8lan:
578 case e1000_ich9lan: 722 case e1000_ich9lan:
@@ -591,8 +735,12 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
591 mac->ops.led_on = e1000_led_on_ich8lan; 735 mac->ops.led_on = e1000_led_on_ich8lan;
592 mac->ops.led_off = e1000_led_off_ich8lan; 736 mac->ops.led_off = e1000_led_off_ich8lan;
593 break; 737 break;
594 case e1000_pchlan:
595 case e1000_pch2lan: 738 case e1000_pch2lan:
739 mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES;
740 mac->ops.rar_set = e1000_rar_set_pch2lan;
741 /* fall-through */
742 case e1000_pch_lpt:
743 case e1000_pchlan:
596 /* check management mode */ 744 /* check management mode */
597 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; 745 mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
598 /* ID LED init */ 746 /* ID LED init */
@@ -609,12 +757,20 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
609 break; 757 break;
610 } 758 }
611 759
760 if (mac->type == e1000_pch_lpt) {
761 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
762 mac->ops.rar_set = e1000_rar_set_pch_lpt;
763 }
764
612 /* Enable PCS Lock-loss workaround for ICH8 */ 765 /* Enable PCS Lock-loss workaround for ICH8 */
613 if (mac->type == e1000_ich8lan) 766 if (mac->type == e1000_ich8lan)
614 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); 767 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
615 768
616 /* Gate automatic PHY configuration by hardware on managed 82579 */ 769 /*
617 if ((mac->type == e1000_pch2lan) && 770 * Gate automatic PHY configuration by hardware on managed
771 * 82579 and i217
772 */
773 if ((mac->type == e1000_pch2lan || mac->type == e1000_pch_lpt) &&
618 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) 774 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
619 e1000_gate_hw_phy_config_ich8lan(hw, true); 775 e1000_gate_hw_phy_config_ich8lan(hw, true);
620 776
@@ -630,22 +786,50 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
630 **/ 786 **/
631static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) 787static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
632{ 788{
789 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
633 s32 ret_val = 0; 790 s32 ret_val = 0;
634 u16 phy_reg; 791 u16 phy_reg;
635 792
636 if (hw->phy.type != e1000_phy_82579) 793 if ((hw->phy.type != e1000_phy_82579) &&
794 (hw->phy.type != e1000_phy_i217))
637 return 0; 795 return 0;
638 796
639 ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); 797 ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
640 if (ret_val) 798 if (ret_val)
641 return ret_val; 799 return ret_val;
642 800
643 if (hw->dev_spec.ich8lan.eee_disable) 801 if (dev_spec->eee_disable)
644 phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK; 802 phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
645 else 803 else
646 phy_reg |= I82579_LPI_CTRL_ENABLE_MASK; 804 phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
647 805
648 return e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); 806 ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
807 if (ret_val)
808 return ret_val;
809
810 if ((hw->phy.type == e1000_phy_i217) && !dev_spec->eee_disable) {
811 /* Save off link partner's EEE ability */
812 ret_val = hw->phy.ops.acquire(hw);
813 if (ret_val)
814 return ret_val;
815 ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
816 I217_EEE_LP_ABILITY);
817 if (ret_val)
818 goto release;
819 e1e_rphy_locked(hw, I82579_EMI_DATA, &dev_spec->eee_lp_ability);
820
821 /*
822 * EEE is not supported in 100Half, so ignore partner's EEE
823 * in 100 ability if full-duplex is not advertised.
824 */
825 e1e_rphy_locked(hw, PHY_LP_ABILITY, &phy_reg);
826 if (!(phy_reg & NWAY_LPAR_100TX_FD_CAPS))
827 dev_spec->eee_lp_ability &= ~I217_EEE_100_SUPPORTED;
828release:
829 hw->phy.ops.release(hw);
830 }
831
832 return 0;
649} 833}
650 834
651/** 835/**
@@ -687,6 +871,9 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
687 return ret_val; 871 return ret_val;
688 } 872 }
689 873
874 /* Clear link partner's EEE ability */
875 hw->dev_spec.ich8lan.eee_lp_ability = 0;
876
690 if (!link) 877 if (!link)
691 return 0; /* No link detected */ 878 return 0; /* No link detected */
692 879
@@ -782,6 +969,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
782 break; 969 break;
783 case e1000_pchlan: 970 case e1000_pchlan:
784 case e1000_pch2lan: 971 case e1000_pch2lan:
972 case e1000_pch_lpt:
785 rc = e1000_init_phy_params_pchlan(hw); 973 rc = e1000_init_phy_params_pchlan(hw);
786 break; 974 break;
787 default: 975 default:
@@ -967,6 +1155,145 @@ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
967} 1155}
968 1156
969/** 1157/**
1158 * e1000_rar_set_pch2lan - Set receive address register
1159 * @hw: pointer to the HW structure
1160 * @addr: pointer to the receive address
1161 * @index: receive address array register
1162 *
1163 * Sets the receive address array register at index to the address passed
1164 * in by addr. For 82579, RAR[0] is the base address register that is to
1165 * contain the MAC address but RAR[1-6] are reserved for manageability (ME).
1166 * Use SHRA[0-3] in place of those reserved for ME.
1167 **/
1168static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
1169{
1170 u32 rar_low, rar_high;
1171
1172 /*
1173 * HW expects these in little endian so we reverse the byte order
1174 * from network order (big endian) to little endian
1175 */
1176 rar_low = ((u32)addr[0] |
1177 ((u32)addr[1] << 8) |
1178 ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
1179
1180 rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
1181
1182 /* If MAC address zero, no need to set the AV bit */
1183 if (rar_low || rar_high)
1184 rar_high |= E1000_RAH_AV;
1185
1186 if (index == 0) {
1187 ew32(RAL(index), rar_low);
1188 e1e_flush();
1189 ew32(RAH(index), rar_high);
1190 e1e_flush();
1191 return;
1192 }
1193
1194 if (index < hw->mac.rar_entry_count) {
1195 s32 ret_val;
1196
1197 ret_val = e1000_acquire_swflag_ich8lan(hw);
1198 if (ret_val)
1199 goto out;
1200
1201 ew32(SHRAL(index - 1), rar_low);
1202 e1e_flush();
1203 ew32(SHRAH(index - 1), rar_high);
1204 e1e_flush();
1205
1206 e1000_release_swflag_ich8lan(hw);
1207
1208 /* verify the register updates */
1209 if ((er32(SHRAL(index - 1)) == rar_low) &&
1210 (er32(SHRAH(index - 1)) == rar_high))
1211 return;
1212
1213 e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
1214 (index - 1), er32(FWSM));
1215 }
1216
1217out:
1218 e_dbg("Failed to write receive address at index %d\n", index);
1219}
1220
1221/**
1222 * e1000_rar_set_pch_lpt - Set receive address registers
1223 * @hw: pointer to the HW structure
1224 * @addr: pointer to the receive address
1225 * @index: receive address array register
1226 *
1227 * Sets the receive address register array at index to the address passed
1228 * in by addr. For LPT, RAR[0] is the base address register that is to
1229 * contain the MAC address. SHRA[0-10] are the shared receive address
1230 * registers that are shared between the Host and manageability engine (ME).
1231 **/
1232static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
1233{
1234 u32 rar_low, rar_high;
1235 u32 wlock_mac;
1236
1237 /*
1238 * HW expects these in little endian so we reverse the byte order
1239 * from network order (big endian) to little endian
1240 */
1241 rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
1242 ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
1243
1244 rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
1245
1246 /* If MAC address zero, no need to set the AV bit */
1247 if (rar_low || rar_high)
1248 rar_high |= E1000_RAH_AV;
1249
1250 if (index == 0) {
1251 ew32(RAL(index), rar_low);
1252 e1e_flush();
1253 ew32(RAH(index), rar_high);
1254 e1e_flush();
1255 return;
1256 }
1257
1258 /*
1259 * The manageability engine (ME) can lock certain SHRAR registers that
1260 * it is using - those registers are unavailable for use.
1261 */
1262 if (index < hw->mac.rar_entry_count) {
1263 wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
1264 wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
1265
1266 /* Check if all SHRAR registers are locked */
1267 if (wlock_mac == 1)
1268 goto out;
1269
1270 if ((wlock_mac == 0) || (index <= wlock_mac)) {
1271 s32 ret_val;
1272
1273 ret_val = e1000_acquire_swflag_ich8lan(hw);
1274
1275 if (ret_val)
1276 goto out;
1277
1278 ew32(SHRAL_PCH_LPT(index - 1), rar_low);
1279 e1e_flush();
1280 ew32(SHRAH_PCH_LPT(index - 1), rar_high);
1281 e1e_flush();
1282
1283 e1000_release_swflag_ich8lan(hw);
1284
1285 /* verify the register updates */
1286 if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) &&
1287 (er32(SHRAH_PCH_LPT(index - 1)) == rar_high))
1288 return;
1289 }
1290 }
1291
1292out:
1293 e_dbg("Failed to write receive address at index %d\n", index);
1294}
1295
1296/**
970 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked 1297 * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
971 * @hw: pointer to the HW structure 1298 * @hw: pointer to the HW structure
972 * 1299 *
@@ -994,6 +1321,8 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
994{ 1321{
995 u16 phy_data; 1322 u16 phy_data;
996 u32 strap = er32(STRAP); 1323 u32 strap = er32(STRAP);
1324 u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
1325 E1000_STRAP_SMT_FREQ_SHIFT;
997 s32 ret_val = 0; 1326 s32 ret_val = 0;
998 1327
999 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; 1328 strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
@@ -1006,6 +1335,19 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
1006 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); 1335 phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
1007 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; 1336 phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
1008 1337
1338 if (hw->phy.type == e1000_phy_i217) {
1339 /* Restore SMBus frequency */
1340 if (freq--) {
1341 phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
1342 phy_data |= (freq & (1 << 0)) <<
1343 HV_SMB_ADDR_FREQ_LOW_SHIFT;
1344 phy_data |= (freq & (1 << 1)) <<
1345 (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
1346 } else {
1347 e_dbg("Unsupported SMB frequency in PHY\n");
1348 }
1349 }
1350
1009 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); 1351 return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
1010} 1352}
1011 1353
@@ -1043,6 +1385,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1043 /* Fall-thru */ 1385 /* Fall-thru */
1044 case e1000_pchlan: 1386 case e1000_pchlan:
1045 case e1000_pch2lan: 1387 case e1000_pch2lan:
1388 case e1000_pch_lpt:
1046 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; 1389 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
1047 break; 1390 break;
1048 default: 1391 default:
@@ -1062,10 +1405,9 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1062 * extended configuration before SW configuration 1405 * extended configuration before SW configuration
1063 */ 1406 */
1064 data = er32(EXTCNF_CTRL); 1407 data = er32(EXTCNF_CTRL);
1065 if (!(hw->mac.type == e1000_pch2lan)) { 1408 if ((hw->mac.type < e1000_pch2lan) &&
1066 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) 1409 (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
1067 goto release; 1410 goto release;
1068 }
1069 1411
1070 cnf_size = er32(EXTCNF_SIZE); 1412 cnf_size = er32(EXTCNF_SIZE);
1071 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; 1413 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
@@ -1076,9 +1418,9 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1076 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; 1418 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
1077 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; 1419 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
1078 1420
1079 if ((!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) && 1421 if (((hw->mac.type == e1000_pchlan) &&
1080 (hw->mac.type == e1000_pchlan)) || 1422 !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
1081 (hw->mac.type == e1000_pch2lan)) { 1423 (hw->mac.type > e1000_pchlan)) {
1082 /* 1424 /*
1083 * HW configures the SMBus address and LEDs when the 1425 * HW configures the SMBus address and LEDs when the
1084 * OEM and LCD Write Enable bits are set in the NVM. 1426 * OEM and LCD Write Enable bits are set in the NVM.
@@ -1121,8 +1463,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1121 reg_addr &= PHY_REG_MASK; 1463 reg_addr &= PHY_REG_MASK;
1122 reg_addr |= phy_page; 1464 reg_addr |= phy_page;
1123 1465
1124 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr, 1466 ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data);
1125 reg_data);
1126 if (ret_val) 1467 if (ret_val)
1127 goto release; 1468 goto release;
1128 } 1469 }
@@ -1159,8 +1500,8 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1159 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ 1500 /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
1160 if (link) { 1501 if (link) {
1161 if (hw->phy.type == e1000_phy_82578) { 1502 if (hw->phy.type == e1000_phy_82578) {
1162 ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS, 1503 ret_val = e1e_rphy_locked(hw, BM_CS_STATUS,
1163 &status_reg); 1504 &status_reg);
1164 if (ret_val) 1505 if (ret_val)
1165 goto release; 1506 goto release;
1166 1507
@@ -1175,8 +1516,7 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1175 } 1516 }
1176 1517
1177 if (hw->phy.type == e1000_phy_82577) { 1518 if (hw->phy.type == e1000_phy_82577) {
1178 ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS, 1519 ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg);
1179 &status_reg);
1180 if (ret_val) 1520 if (ret_val)
1181 goto release; 1521 goto release;
1182 1522
@@ -1191,15 +1531,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1191 } 1531 }
1192 1532
1193 /* Link stall fix for link up */ 1533 /* Link stall fix for link up */
1194 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), 1534 ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100);
1195 0x0100);
1196 if (ret_val) 1535 if (ret_val)
1197 goto release; 1536 goto release;
1198 1537
1199 } else { 1538 } else {
1200 /* Link stall fix for link down */ 1539 /* Link stall fix for link down */
1201 ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19), 1540 ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100);
1202 0x4100);
1203 if (ret_val) 1541 if (ret_val)
1204 goto release; 1542 goto release;
1205 } 1543 }
@@ -1279,14 +1617,14 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1279 u32 mac_reg; 1617 u32 mac_reg;
1280 u16 oem_reg; 1618 u16 oem_reg;
1281 1619
1282 if ((hw->mac.type != e1000_pch2lan) && (hw->mac.type != e1000_pchlan)) 1620 if (hw->mac.type < e1000_pchlan)
1283 return ret_val; 1621 return ret_val;
1284 1622
1285 ret_val = hw->phy.ops.acquire(hw); 1623 ret_val = hw->phy.ops.acquire(hw);
1286 if (ret_val) 1624 if (ret_val)
1287 return ret_val; 1625 return ret_val;
1288 1626
1289 if (!(hw->mac.type == e1000_pch2lan)) { 1627 if (hw->mac.type == e1000_pchlan) {
1290 mac_reg = er32(EXTCNF_CTRL); 1628 mac_reg = er32(EXTCNF_CTRL);
1291 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) 1629 if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
1292 goto release; 1630 goto release;
@@ -1298,7 +1636,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1298 1636
1299 mac_reg = er32(PHY_CTRL); 1637 mac_reg = er32(PHY_CTRL);
1300 1638
1301 ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg); 1639 ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg);
1302 if (ret_val) 1640 if (ret_val)
1303 goto release; 1641 goto release;
1304 1642
@@ -1325,7 +1663,7 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
1325 !hw->phy.ops.check_reset_block(hw)) 1663 !hw->phy.ops.check_reset_block(hw))
1326 oem_reg |= HV_OEM_BITS_RESTART_AN; 1664 oem_reg |= HV_OEM_BITS_RESTART_AN;
1327 1665
1328 ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); 1666 ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg);
1329 1667
1330release: 1668release:
1331 hw->phy.ops.release(hw); 1669 hw->phy.ops.release(hw);
@@ -1421,11 +1759,10 @@ static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1421 ret_val = hw->phy.ops.acquire(hw); 1759 ret_val = hw->phy.ops.acquire(hw);
1422 if (ret_val) 1760 if (ret_val)
1423 return ret_val; 1761 return ret_val;
1424 ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data); 1762 ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data);
1425 if (ret_val) 1763 if (ret_val)
1426 goto release; 1764 goto release;
1427 ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG, 1765 ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF);
1428 phy_data & 0x00FF);
1429release: 1766release:
1430 hw->phy.ops.release(hw); 1767 hw->phy.ops.release(hw);
1431 1768
@@ -1484,7 +1821,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1484 u32 mac_reg; 1821 u32 mac_reg;
1485 u16 i; 1822 u16 i;
1486 1823
1487 if (hw->mac.type != e1000_pch2lan) 1824 if (hw->mac.type < e1000_pch2lan)
1488 return 0; 1825 return 0;
1489 1826
1490 /* disable Rx path while enabling/disabling workaround */ 1827 /* disable Rx path while enabling/disabling workaround */
@@ -1657,20 +1994,18 @@ static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
1657 ret_val = hw->phy.ops.acquire(hw); 1994 ret_val = hw->phy.ops.acquire(hw);
1658 if (ret_val) 1995 if (ret_val)
1659 return ret_val; 1996 return ret_val;
1660 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, 1997 ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_THRESHOLD);
1661 I82579_MSE_THRESHOLD);
1662 if (ret_val) 1998 if (ret_val)
1663 goto release; 1999 goto release;
1664 /* set MSE higher to enable link to stay up when noise is high */ 2000 /* set MSE higher to enable link to stay up when noise is high */
1665 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0034); 2001 ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0034);
1666 if (ret_val) 2002 if (ret_val)
1667 goto release; 2003 goto release;
1668 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, 2004 ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, I82579_MSE_LINK_DOWN);
1669 I82579_MSE_LINK_DOWN);
1670 if (ret_val) 2005 if (ret_val)
1671 goto release; 2006 goto release;
1672 /* drop link after 5 times MSE threshold was reached */ 2007 /* drop link after 5 times MSE threshold was reached */
1673 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA, 0x0005); 2008 ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x0005);
1674release: 2009release:
1675 hw->phy.ops.release(hw); 2010 hw->phy.ops.release(hw);
1676 2011
@@ -1708,8 +2043,18 @@ static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
1708 return ret_val; 2043 return ret_val;
1709 2044
1710 if (status_reg & HV_M_STATUS_SPEED_1000) { 2045 if (status_reg & HV_M_STATUS_SPEED_1000) {
2046 u16 pm_phy_reg;
2047
1711 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; 2048 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
1712 phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; 2049 phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
2050 /* LV 1G Packet drop issue wa */
2051 ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg);
2052 if (ret_val)
2053 return ret_val;
2054 pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA;
2055 ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg);
2056 if (ret_val)
2057 return ret_val;
1713 } else { 2058 } else {
1714 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; 2059 mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
1715 phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; 2060 phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
@@ -1733,7 +2078,7 @@ static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
1733{ 2078{
1734 u32 extcnf_ctrl; 2079 u32 extcnf_ctrl;
1735 2080
1736 if (hw->mac.type != e1000_pch2lan) 2081 if (hw->mac.type < e1000_pch2lan)
1737 return; 2082 return;
1738 2083
1739 extcnf_ctrl = er32(EXTCNF_CTRL); 2084 extcnf_ctrl = er32(EXTCNF_CTRL);
@@ -1835,12 +2180,10 @@ static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1835 ret_val = hw->phy.ops.acquire(hw); 2180 ret_val = hw->phy.ops.acquire(hw);
1836 if (ret_val) 2181 if (ret_val)
1837 return ret_val; 2182 return ret_val;
1838 ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR, 2183 ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
1839 I82579_LPI_UPDATE_TIMER); 2184 I82579_LPI_UPDATE_TIMER);
1840 if (!ret_val) 2185 if (!ret_val)
1841 ret_val = hw->phy.ops.write_reg_locked(hw, 2186 ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, 0x1387);
1842 I82579_EMI_DATA,
1843 0x1387);
1844 hw->phy.ops.release(hw); 2187 hw->phy.ops.release(hw);
1845 } 2188 }
1846 2189
@@ -2213,7 +2556,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2213 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 2556 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2214 2557
2215 /* Check if the flash descriptor is valid */ 2558 /* Check if the flash descriptor is valid */
2216 if (hsfsts.hsf_status.fldesvalid == 0) { 2559 if (!hsfsts.hsf_status.fldesvalid) {
2217 e_dbg("Flash descriptor invalid. SW Sequencing must be used.\n"); 2560 e_dbg("Flash descriptor invalid. SW Sequencing must be used.\n");
2218 return -E1000_ERR_NVM; 2561 return -E1000_ERR_NVM;
2219 } 2562 }
@@ -2233,7 +2576,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2233 * completed. 2576 * completed.
2234 */ 2577 */
2235 2578
2236 if (hsfsts.hsf_status.flcinprog == 0) { 2579 if (!hsfsts.hsf_status.flcinprog) {
2237 /* 2580 /*
2238 * There is no cycle running at present, 2581 * There is no cycle running at present,
2239 * so we can start a cycle. 2582 * so we can start a cycle.
@@ -2251,7 +2594,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
2251 */ 2594 */
2252 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { 2595 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
2253 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 2596 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2254 if (hsfsts.hsf_status.flcinprog == 0) { 2597 if (!hsfsts.hsf_status.flcinprog) {
2255 ret_val = 0; 2598 ret_val = 0;
2256 break; 2599 break;
2257 } 2600 }
@@ -2293,12 +2636,12 @@ static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
2293 /* wait till FDONE bit is set to 1 */ 2636 /* wait till FDONE bit is set to 1 */
2294 do { 2637 do {
2295 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 2638 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2296 if (hsfsts.hsf_status.flcdone == 1) 2639 if (hsfsts.hsf_status.flcdone)
2297 break; 2640 break;
2298 udelay(1); 2641 udelay(1);
2299 } while (i++ < timeout); 2642 } while (i++ < timeout);
2300 2643
2301 if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) 2644 if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr)
2302 return 0; 2645 return 0;
2303 2646
2304 return -E1000_ERR_NVM; 2647 return -E1000_ERR_NVM;
@@ -2409,10 +2752,10 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2409 * ICH_FLASH_CYCLE_REPEAT_COUNT times. 2752 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
2410 */ 2753 */
2411 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 2754 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2412 if (hsfsts.hsf_status.flcerr == 1) { 2755 if (hsfsts.hsf_status.flcerr) {
2413 /* Repeat for some time before giving up. */ 2756 /* Repeat for some time before giving up. */
2414 continue; 2757 continue;
2415 } else if (hsfsts.hsf_status.flcdone == 0) { 2758 } else if (!hsfsts.hsf_status.flcdone) {
2416 e_dbg("Timeout error - flash cycle did not complete.\n"); 2759 e_dbg("Timeout error - flash cycle did not complete.\n");
2417 break; 2760 break;
2418 } 2761 }
@@ -2642,7 +2985,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
2642 if (ret_val) 2985 if (ret_val)
2643 return ret_val; 2986 return ret_val;
2644 2987
2645 if ((data & 0x40) == 0) { 2988 if (!(data & 0x40)) {
2646 data |= 0x40; 2989 data |= 0x40;
2647 ret_val = e1000_write_nvm(hw, 0x19, 1, &data); 2990 ret_val = e1000_write_nvm(hw, 0x19, 1, &data);
2648 if (ret_val) 2991 if (ret_val)
@@ -2760,10 +3103,10 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2760 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. 3103 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
2761 */ 3104 */
2762 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 3105 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2763 if (hsfsts.hsf_status.flcerr == 1) 3106 if (hsfsts.hsf_status.flcerr)
2764 /* Repeat for some time before giving up. */ 3107 /* Repeat for some time before giving up. */
2765 continue; 3108 continue;
2766 if (hsfsts.hsf_status.flcdone == 0) { 3109 if (!hsfsts.hsf_status.flcdone) {
2767 e_dbg("Timeout error - flash cycle did not complete.\n"); 3110 e_dbg("Timeout error - flash cycle did not complete.\n");
2768 break; 3111 break;
2769 } 3112 }
@@ -2915,10 +3258,10 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
2915 * a few more times else Done 3258 * a few more times else Done
2916 */ 3259 */
2917 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 3260 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
2918 if (hsfsts.hsf_status.flcerr == 1) 3261 if (hsfsts.hsf_status.flcerr)
2919 /* repeat for some time before giving up */ 3262 /* repeat for some time before giving up */
2920 continue; 3263 continue;
2921 else if (hsfsts.hsf_status.flcdone == 0) 3264 else if (!hsfsts.hsf_status.flcdone)
2922 return ret_val; 3265 return ret_val;
2923 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); 3266 } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
2924 } 3267 }
@@ -3060,8 +3403,8 @@ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
3060static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) 3403static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3061{ 3404{
3062 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 3405 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3063 u16 reg; 3406 u16 kum_cfg;
3064 u32 ctrl, kab; 3407 u32 ctrl, reg;
3065 s32 ret_val; 3408 s32 ret_val;
3066 3409
3067 /* 3410 /*
@@ -3095,12 +3438,12 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3095 } 3438 }
3096 3439
3097 if (hw->mac.type == e1000_pchlan) { 3440 if (hw->mac.type == e1000_pchlan) {
3098 /* Save the NVM K1 bit setting*/ 3441 /* Save the NVM K1 bit setting */
3099 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &reg); 3442 ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg);
3100 if (ret_val) 3443 if (ret_val)
3101 return ret_val; 3444 return ret_val;
3102 3445
3103 if (reg & E1000_NVM_K1_ENABLE) 3446 if (kum_cfg & E1000_NVM_K1_ENABLE)
3104 dev_spec->nvm_k1_enabled = true; 3447 dev_spec->nvm_k1_enabled = true;
3105 else 3448 else
3106 dev_spec->nvm_k1_enabled = false; 3449 dev_spec->nvm_k1_enabled = false;
@@ -3130,6 +3473,14 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3130 /* cannot issue a flush here because it hangs the hardware */ 3473 /* cannot issue a flush here because it hangs the hardware */
3131 msleep(20); 3474 msleep(20);
3132 3475
3476 /* Set Phy Config Counter to 50msec */
3477 if (hw->mac.type == e1000_pch2lan) {
3478 reg = er32(FEXTNVM3);
3479 reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK;
3480 reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
3481 ew32(FEXTNVM3, reg);
3482 }
3483
3133 if (!ret_val) 3484 if (!ret_val)
3134 clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); 3485 clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
3135 3486
@@ -3154,9 +3505,9 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3154 ew32(IMC, 0xffffffff); 3505 ew32(IMC, 0xffffffff);
3155 er32(ICR); 3506 er32(ICR);
3156 3507
3157 kab = er32(KABGTXD); 3508 reg = er32(KABGTXD);
3158 kab |= E1000_KABGTXD_BGSQLBIAS; 3509 reg |= E1000_KABGTXD_BGSQLBIAS;
3159 ew32(KABGTXD, kab); 3510 ew32(KABGTXD, reg);
3160 3511
3161 return 0; 3512 return 0;
3162} 3513}
@@ -3309,6 +3660,13 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
3309 */ 3660 */
3310 reg = er32(RFCTL); 3661 reg = er32(RFCTL);
3311 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); 3662 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
3663
3664 /*
3665 * Disable IPv6 extension header parsing because some malformed
3666 * IPv6 headers can hang the Rx.
3667 */
3668 if (hw->mac.type == e1000_ich8lan)
3669 reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS);
3312 ew32(RFCTL, reg); 3670 ew32(RFCTL, reg);
3313} 3671}
3314 3672
@@ -3359,6 +3717,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3359 ew32(FCTTV, hw->fc.pause_time); 3717 ew32(FCTTV, hw->fc.pause_time);
3360 if ((hw->phy.type == e1000_phy_82578) || 3718 if ((hw->phy.type == e1000_phy_82578) ||
3361 (hw->phy.type == e1000_phy_82579) || 3719 (hw->phy.type == e1000_phy_82579) ||
3720 (hw->phy.type == e1000_phy_i217) ||
3362 (hw->phy.type == e1000_phy_82577)) { 3721 (hw->phy.type == e1000_phy_82577)) {
3363 ew32(FCRTV_PCH, hw->fc.refresh_time); 3722 ew32(FCRTV_PCH, hw->fc.refresh_time);
3364 3723
@@ -3422,6 +3781,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3422 break; 3781 break;
3423 case e1000_phy_82577: 3782 case e1000_phy_82577:
3424 case e1000_phy_82579: 3783 case e1000_phy_82579:
3784 case e1000_phy_i217:
3425 ret_val = e1000_copper_link_setup_82577(hw); 3785 ret_val = e1000_copper_link_setup_82577(hw);
3426 if (ret_val) 3786 if (ret_val)
3427 return ret_val; 3787 return ret_val;
@@ -3668,14 +4028,88 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3668 * the LPLU setting in the NVM or custom setting. For PCH and newer parts, 4028 * the LPLU setting in the NVM or custom setting. For PCH and newer parts,
3669 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also 4029 * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also
3670 * needs to be written. 4030 * needs to be written.
4031 * Parts that support (and are linked to a partner which support) EEE in
4032 * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
4033 * than 10Mbps w/o EEE.
3671 **/ 4034 **/
3672void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) 4035void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3673{ 4036{
4037 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3674 u32 phy_ctrl; 4038 u32 phy_ctrl;
3675 s32 ret_val; 4039 s32 ret_val;
3676 4040
3677 phy_ctrl = er32(PHY_CTRL); 4041 phy_ctrl = er32(PHY_CTRL);
3678 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; 4042 phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE;
4043 if (hw->phy.type == e1000_phy_i217) {
4044 u16 phy_reg;
4045
4046 ret_val = hw->phy.ops.acquire(hw);
4047 if (ret_val)
4048 goto out;
4049
4050 if (!dev_spec->eee_disable) {
4051 u16 eee_advert;
4052
4053 ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
4054 I217_EEE_ADVERTISEMENT);
4055 if (ret_val)
4056 goto release;
4057 e1e_rphy_locked(hw, I82579_EMI_DATA, &eee_advert);
4058
4059 /*
4060 * Disable LPLU if both link partners support 100BaseT
4061 * EEE and 100Full is advertised on both ends of the
4062 * link.
4063 */
4064 if ((eee_advert & I217_EEE_100_SUPPORTED) &&
4065 (dev_spec->eee_lp_ability &
4066 I217_EEE_100_SUPPORTED) &&
4067 (hw->phy.autoneg_advertised & ADVERTISE_100_FULL))
4068 phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
4069 E1000_PHY_CTRL_NOND0A_LPLU);
4070 }
4071
4072 /*
4073 * For i217 Intel Rapid Start Technology support,
4074 * when the system is going into Sx and no manageability engine
4075 * is present, the driver must configure proxy to reset only on
4076 * power good. LPI (Low Power Idle) state must also reset only
4077 * on power good, as well as the MTA (Multicast table array).
4078 * The SMBus release must also be disabled on LCD reset.
4079 */
4080 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
4081
4082 /* Enable proxy to reset only on power good. */
4083 e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg);
4084 phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
4085 e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg);
4086
4087 /*
4088 * Set bit enable LPI (EEE) to reset only on
4089 * power good.
4090 */
4091 e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
4092 phy_reg |= I217_SxCTRL_MASK;
4093 e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
4094
4095 /* Disable the SMB release on LCD reset. */
4096 e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
4097 phy_reg &= ~I217_MEMPWR;
4098 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
4099 }
4100
4101 /*
4102 * Enable MTA to reset for Intel Rapid Start Technology
4103 * Support
4104 */
4105 e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
4106 phy_reg |= I217_CGFREG_MASK;
4107 e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
4108
4109release:
4110 hw->phy.ops.release(hw);
4111 }
4112out:
3679 ew32(PHY_CTRL, phy_ctrl); 4113 ew32(PHY_CTRL, phy_ctrl);
3680 4114
3681 if (hw->mac.type == e1000_ich8lan) 4115 if (hw->mac.type == e1000_ich8lan)
@@ -3704,44 +4138,61 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
3704 * on which PHY resets are not blocked, if the PHY registers cannot be 4138 * on which PHY resets are not blocked, if the PHY registers cannot be
3705 * accessed properly by the s/w toggle the LANPHYPC value to power cycle 4139 * accessed properly by the s/w toggle the LANPHYPC value to power cycle
3706 * the PHY. 4140 * the PHY.
4141 * On i217, setup Intel Rapid Start Technology.
3707 **/ 4142 **/
3708void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) 4143void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
3709{ 4144{
3710 u16 phy_id1, phy_id2;
3711 s32 ret_val; 4145 s32 ret_val;
3712 4146
3713 if ((hw->mac.type != e1000_pch2lan) || 4147 if (hw->mac.type < e1000_pch2lan)
3714 hw->phy.ops.check_reset_block(hw))
3715 return; 4148 return;
3716 4149
3717 ret_val = hw->phy.ops.acquire(hw); 4150 ret_val = e1000_init_phy_workarounds_pchlan(hw);
3718 if (ret_val) { 4151 if (ret_val) {
3719 e_dbg("Failed to acquire PHY semaphore in resume\n"); 4152 e_dbg("Failed to init PHY flow ret_val=%d\n", ret_val);
3720 return; 4153 return;
3721 } 4154 }
3722 4155
3723 /* Test access to the PHY registers by reading the ID regs */ 4156 /*
3724 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1); 4157 * For i217 Intel Rapid Start Technology support when the system
3725 if (ret_val) 4158 * is transitioning from Sx and no manageability engine is present
3726 goto release; 4159 * configure SMBus to restore on reset, disable proxy, and enable
3727 ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2); 4160 * the reset on MTA (Multicast table array).
3728 if (ret_val) 4161 */
3729 goto release; 4162 if (hw->phy.type == e1000_phy_i217) {
3730 4163 u16 phy_reg;
3731 if (hw->phy.id == ((u32)(phy_id1 << 16) |
3732 (u32)(phy_id2 & PHY_REVISION_MASK)))
3733 goto release;
3734 4164
3735 e1000_toggle_lanphypc_value_ich8lan(hw); 4165 ret_val = hw->phy.ops.acquire(hw);
4166 if (ret_val) {
4167 e_dbg("Failed to setup iRST\n");
4168 return;
4169 }
3736 4170
3737 hw->phy.ops.release(hw); 4171 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
3738 msleep(50); 4172 /*
3739 e1000_phy_hw_reset(hw); 4173 * Restore clear on SMB if no manageability engine
3740 msleep(50); 4174 * is present
3741 return; 4175 */
4176 ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
4177 if (ret_val)
4178 goto release;
4179 phy_reg |= I217_MEMPWR_MASK;
4180 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
3742 4181
4182 /* Disable Proxy */
4183 e1e_wphy_locked(hw, I217_PROXY_CTRL, 0);
4184 }
4185 /* Enable reset on MTA */
4186 ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
4187 if (ret_val)
4188 goto release;
4189 phy_reg &= ~I217_CGFREG_MASK;
4190 e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
3743release: 4191release:
3744 hw->phy.ops.release(hw); 4192 if (ret_val)
4193 e_dbg("Error %d in resume workarounds\n", ret_val);
4194 hw->phy.ops.release(hw);
4195 }
3745} 4196}
3746 4197
3747/** 4198/**
@@ -3921,7 +4372,7 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
3921 4372
3922 /* If EEPROM is not marked present, init the IGP 3 PHY manually */ 4373 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
3923 if (hw->mac.type <= e1000_ich9lan) { 4374 if (hw->mac.type <= e1000_ich9lan) {
3924 if (((er32(EECD) & E1000_EECD_PRES) == 0) && 4375 if (!(er32(EECD) & E1000_EECD_PRES) &&
3925 (hw->phy.type == e1000_phy_igp_3)) { 4376 (hw->phy.type == e1000_phy_igp_3)) {
3926 e1000e_phy_init_script_igp3(hw); 4377 e1000e_phy_init_script_igp3(hw);
3927 } 4378 }
@@ -3982,6 +4433,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
3982 /* Clear PHY statistics registers */ 4433 /* Clear PHY statistics registers */
3983 if ((hw->phy.type == e1000_phy_82578) || 4434 if ((hw->phy.type == e1000_phy_82578) ||
3984 (hw->phy.type == e1000_phy_82579) || 4435 (hw->phy.type == e1000_phy_82579) ||
4436 (hw->phy.type == e1000_phy_i217) ||
3985 (hw->phy.type == e1000_phy_82577)) { 4437 (hw->phy.type == e1000_phy_82577)) {
3986 ret_val = hw->phy.ops.acquire(hw); 4438 ret_val = hw->phy.ops.acquire(hw);
3987 if (ret_val) 4439 if (ret_val)
@@ -4026,6 +4478,7 @@ static const struct e1000_mac_operations ich8_mac_ops = {
4026 .setup_physical_interface= e1000_setup_copper_link_ich8lan, 4478 .setup_physical_interface= e1000_setup_copper_link_ich8lan,
4027 /* id_led_init dependent on mac type */ 4479 /* id_led_init dependent on mac type */
4028 .config_collision_dist = e1000e_config_collision_dist_generic, 4480 .config_collision_dist = e1000e_config_collision_dist_generic,
4481 .rar_set = e1000e_rar_set_generic,
4029}; 4482};
4030 4483
4031static const struct e1000_phy_operations ich8_phy_ops = { 4484static const struct e1000_phy_operations ich8_phy_ops = {
@@ -4140,3 +4593,22 @@ const struct e1000_info e1000_pch2_info = {
4140 .phy_ops = &ich8_phy_ops, 4593 .phy_ops = &ich8_phy_ops,
4141 .nvm_ops = &ich8_nvm_ops, 4594 .nvm_ops = &ich8_nvm_ops,
4142}; 4595};
4596
4597const struct e1000_info e1000_pch_lpt_info = {
4598 .mac = e1000_pch_lpt,
4599 .flags = FLAG_IS_ICH
4600 | FLAG_HAS_WOL
4601 | FLAG_HAS_CTRLEXT_ON_LOAD
4602 | FLAG_HAS_AMT
4603 | FLAG_HAS_FLASH
4604 | FLAG_HAS_JUMBO_FRAMES
4605 | FLAG_APME_IN_WUC,
4606 .flags2 = FLAG2_HAS_PHY_STATS
4607 | FLAG2_HAS_EEE,
4608 .pba = 26,
4609 .max_hw_frame_size = DEFAULT_JUMBO,
4610 .get_variants = e1000_get_variants_ich8lan,
4611 .mac_ops = &ich8_mac_ops,
4612 .phy_ops = &ich8_phy_ops,
4613 .nvm_ops = &ich8_nvm_ops,
4614};
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index decad98c1059..026e8b3ab52e 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -143,12 +143,12 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
143 /* Setup the receive address */ 143 /* Setup the receive address */
144 e_dbg("Programming MAC Address into RAR[0]\n"); 144 e_dbg("Programming MAC Address into RAR[0]\n");
145 145
146 e1000e_rar_set(hw, hw->mac.addr, 0); 146 hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
147 147
148 /* Zero out the other (rar_entry_count - 1) receive addresses */ 148 /* Zero out the other (rar_entry_count - 1) receive addresses */
149 e_dbg("Clearing RAR[1-%u]\n", rar_count - 1); 149 e_dbg("Clearing RAR[1-%u]\n", rar_count - 1);
150 for (i = 1; i < rar_count; i++) 150 for (i = 1; i < rar_count; i++)
151 e1000e_rar_set(hw, mac_addr, i); 151 hw->mac.ops.rar_set(hw, mac_addr, i);
152} 152}
153 153
154/** 154/**
@@ -215,13 +215,13 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
215 * same as the normal permanent MAC address stored by the HW into the 215 * same as the normal permanent MAC address stored by the HW into the
216 * RAR. Do this by mapping this address into RAR0. 216 * RAR. Do this by mapping this address into RAR0.
217 */ 217 */
218 e1000e_rar_set(hw, alt_mac_addr, 0); 218 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
219 219
220 return 0; 220 return 0;
221} 221}
222 222
223/** 223/**
224 * e1000e_rar_set - Set receive address register 224 * e1000e_rar_set_generic - Set receive address register
225 * @hw: pointer to the HW structure 225 * @hw: pointer to the HW structure
226 * @addr: pointer to the receive address 226 * @addr: pointer to the receive address
227 * @index: receive address array register 227 * @index: receive address array register
@@ -229,7 +229,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
229 * Sets the receive address array register at index to the address passed 229 * Sets the receive address array register at index to the address passed
230 * in by addr. 230 * in by addr.
231 **/ 231 **/
232void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) 232void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
233{ 233{
234 u32 rar_low, rar_high; 234 u32 rar_low, rar_high;
235 235
@@ -681,7 +681,7 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
681 return ret_val; 681 return ret_val;
682 } 682 }
683 683
684 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) 684 if (!(nvm_data & NVM_WORD0F_PAUSE_MASK))
685 hw->fc.requested_mode = e1000_fc_none; 685 hw->fc.requested_mode = e1000_fc_none;
686 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR) 686 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR)
687 hw->fc.requested_mode = e1000_fc_tx_pause; 687 hw->fc.requested_mode = e1000_fc_tx_pause;
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index 473f8e711510..bacc950fc684 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -85,7 +85,7 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
85 85
86 /* Check that the host interface is enabled. */ 86 /* Check that the host interface is enabled. */
87 hicr = er32(HICR); 87 hicr = er32(HICR);
88 if ((hicr & E1000_HICR_EN) == 0) { 88 if (!(hicr & E1000_HICR_EN)) {
89 e_dbg("E1000_HOST_EN bit disabled.\n"); 89 e_dbg("E1000_HOST_EN bit disabled.\n");
90 return -E1000_ERR_HOST_INTERFACE_COMMAND; 90 return -E1000_ERR_HOST_INTERFACE_COMMAND;
91 } 91 }
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 9520a6ac1f30..a4b0435b00dc 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -56,7 +56,7 @@
56 56
57#define DRV_EXTRAVERSION "-k" 57#define DRV_EXTRAVERSION "-k"
58 58
59#define DRV_VERSION "1.9.5" DRV_EXTRAVERSION 59#define DRV_VERSION "2.0.0" DRV_EXTRAVERSION
60char e1000e_driver_name[] = "e1000e"; 60char e1000e_driver_name[] = "e1000e";
61const char e1000e_driver_version[] = DRV_VERSION; 61const char e1000e_driver_version[] = DRV_VERSION;
62 62
@@ -79,6 +79,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
79 [board_ich10lan] = &e1000_ich10_info, 79 [board_ich10lan] = &e1000_ich10_info,
80 [board_pchlan] = &e1000_pch_info, 80 [board_pchlan] = &e1000_pch_info,
81 [board_pch2lan] = &e1000_pch2_info, 81 [board_pch2lan] = &e1000_pch2_info,
82 [board_pch_lpt] = &e1000_pch_lpt_info,
82}; 83};
83 84
84struct e1000_reg_info { 85struct e1000_reg_info {
@@ -110,14 +111,14 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
110 111
111 /* Rx Registers */ 112 /* Rx Registers */
112 {E1000_RCTL, "RCTL"}, 113 {E1000_RCTL, "RCTL"},
113 {E1000_RDLEN, "RDLEN"}, 114 {E1000_RDLEN(0), "RDLEN"},
114 {E1000_RDH, "RDH"}, 115 {E1000_RDH(0), "RDH"},
115 {E1000_RDT, "RDT"}, 116 {E1000_RDT(0), "RDT"},
116 {E1000_RDTR, "RDTR"}, 117 {E1000_RDTR, "RDTR"},
117 {E1000_RXDCTL(0), "RXDCTL"}, 118 {E1000_RXDCTL(0), "RXDCTL"},
118 {E1000_ERT, "ERT"}, 119 {E1000_ERT, "ERT"},
119 {E1000_RDBAL, "RDBAL"}, 120 {E1000_RDBAL(0), "RDBAL"},
120 {E1000_RDBAH, "RDBAH"}, 121 {E1000_RDBAH(0), "RDBAH"},
121 {E1000_RDFH, "RDFH"}, 122 {E1000_RDFH, "RDFH"},
122 {E1000_RDFT, "RDFT"}, 123 {E1000_RDFT, "RDFT"},
123 {E1000_RDFHS, "RDFHS"}, 124 {E1000_RDFHS, "RDFHS"},
@@ -126,11 +127,11 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
126 127
127 /* Tx Registers */ 128 /* Tx Registers */
128 {E1000_TCTL, "TCTL"}, 129 {E1000_TCTL, "TCTL"},
129 {E1000_TDBAL, "TDBAL"}, 130 {E1000_TDBAL(0), "TDBAL"},
130 {E1000_TDBAH, "TDBAH"}, 131 {E1000_TDBAH(0), "TDBAH"},
131 {E1000_TDLEN, "TDLEN"}, 132 {E1000_TDLEN(0), "TDLEN"},
132 {E1000_TDH, "TDH"}, 133 {E1000_TDH(0), "TDH"},
133 {E1000_TDT, "TDT"}, 134 {E1000_TDT(0), "TDT"},
134 {E1000_TIDV, "TIDV"}, 135 {E1000_TIDV, "TIDV"},
135 {E1000_TXDCTL(0), "TXDCTL"}, 136 {E1000_TXDCTL(0), "TXDCTL"},
136 {E1000_TADV, "TADV"}, 137 {E1000_TADV, "TADV"},
@@ -538,43 +539,15 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
538 adapter->hw_csum_good++; 539 adapter->hw_csum_good++;
539} 540}
540 541
541/**
542 * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa()
543 * @hw: pointer to the HW structure
544 * @tail: address of tail descriptor register
545 * @i: value to write to tail descriptor register
546 *
547 * When updating the tail register, the ME could be accessing Host CSR
548 * registers at the same time. Normally, this is handled in h/w by an
549 * arbiter but on some parts there is a bug that acknowledges Host accesses
550 * later than it should which could result in the descriptor register to
551 * have an incorrect value. Workaround this by checking the FWSM register
552 * which has bit 24 set while ME is accessing Host CSR registers, wait
553 * if it is set and try again a number of times.
554 **/
555static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, void __iomem *tail,
556 unsigned int i)
557{
558 unsigned int j = 0;
559
560 while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) &&
561 (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI))
562 udelay(50);
563
564 writel(i, tail);
565
566 if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail)))
567 return E1000_ERR_SWFW_SYNC;
568
569 return 0;
570}
571
572static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) 542static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
573{ 543{
574 struct e1000_adapter *adapter = rx_ring->adapter; 544 struct e1000_adapter *adapter = rx_ring->adapter;
575 struct e1000_hw *hw = &adapter->hw; 545 struct e1000_hw *hw = &adapter->hw;
546 s32 ret_val = __ew32_prepare(hw);
576 547
577 if (e1000e_update_tail_wa(hw, rx_ring->tail, i)) { 548 writel(i, rx_ring->tail);
549
550 if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
578 u32 rctl = er32(RCTL); 551 u32 rctl = er32(RCTL);
579 ew32(RCTL, rctl & ~E1000_RCTL_EN); 552 ew32(RCTL, rctl & ~E1000_RCTL_EN);
580 e_err("ME firmware caused invalid RDT - resetting\n"); 553 e_err("ME firmware caused invalid RDT - resetting\n");
@@ -586,8 +559,11 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
586{ 559{
587 struct e1000_adapter *adapter = tx_ring->adapter; 560 struct e1000_adapter *adapter = tx_ring->adapter;
588 struct e1000_hw *hw = &adapter->hw; 561 struct e1000_hw *hw = &adapter->hw;
562 s32 ret_val = __ew32_prepare(hw);
563
564 writel(i, tx_ring->tail);
589 565
590 if (e1000e_update_tail_wa(hw, tx_ring->tail, i)) { 566 if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
591 u32 tctl = er32(TCTL); 567 u32 tctl = er32(TCTL);
592 ew32(TCTL, tctl & ~E1000_TCTL_EN); 568 ew32(TCTL, tctl & ~E1000_TCTL_EN);
593 e_err("ME firmware caused invalid TDT - resetting\n"); 569 e_err("ME firmware caused invalid TDT - resetting\n");
@@ -1053,7 +1029,8 @@ static void e1000_print_hw_hang(struct work_struct *work)
1053 1029
1054 if (!adapter->tx_hang_recheck && 1030 if (!adapter->tx_hang_recheck &&
1055 (adapter->flags2 & FLAG2_DMA_BURST)) { 1031 (adapter->flags2 & FLAG2_DMA_BURST)) {
1056 /* May be block on write-back, flush and detect again 1032 /*
1033 * May be block on write-back, flush and detect again
1057 * flush pending descriptor writebacks to memory 1034 * flush pending descriptor writebacks to memory
1058 */ 1035 */
1059 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); 1036 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
@@ -1108,6 +1085,10 @@ static void e1000_print_hw_hang(struct work_struct *work)
1108 phy_1000t_status, 1085 phy_1000t_status,
1109 phy_ext_status, 1086 phy_ext_status,
1110 pci_status); 1087 pci_status);
1088
1089 /* Suggest workaround for known h/w issue */
1090 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
1091 e_err("Try turning off Tx pause (flow control) via ethtool\n");
1111} 1092}
1112 1093
1113/** 1094/**
@@ -1645,7 +1626,10 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
1645 adapter->flags2 &= ~FLAG2_IS_DISCARDING; 1626 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1646 1627
1647 writel(0, rx_ring->head); 1628 writel(0, rx_ring->head);
1648 writel(0, rx_ring->tail); 1629 if (rx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
1630 e1000e_update_rdt_wa(rx_ring, 0);
1631 else
1632 writel(0, rx_ring->tail);
1649} 1633}
1650 1634
1651static void e1000e_downshift_workaround(struct work_struct *work) 1635static void e1000e_downshift_workaround(struct work_struct *work)
@@ -2318,7 +2302,10 @@ static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
2318 tx_ring->next_to_clean = 0; 2302 tx_ring->next_to_clean = 0;
2319 2303
2320 writel(0, tx_ring->head); 2304 writel(0, tx_ring->head);
2321 writel(0, tx_ring->tail); 2305 if (tx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
2306 e1000e_update_tdt_wa(tx_ring, 0);
2307 else
2308 writel(0, tx_ring->tail);
2322} 2309}
2323 2310
2324/** 2311/**
@@ -2530,33 +2517,31 @@ err:
2530} 2517}
2531 2518
2532/** 2519/**
2533 * e1000_clean - NAPI Rx polling callback 2520 * e1000e_poll - NAPI Rx polling callback
2534 * @napi: struct associated with this polling callback 2521 * @napi: struct associated with this polling callback
2535 * @budget: amount of packets driver is allowed to process this poll 2522 * @weight: number of packets driver is allowed to process this poll
2536 **/ 2523 **/
2537static int e1000_clean(struct napi_struct *napi, int budget) 2524static int e1000e_poll(struct napi_struct *napi, int weight)
2538{ 2525{
2539 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); 2526 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
2527 napi);
2540 struct e1000_hw *hw = &adapter->hw; 2528 struct e1000_hw *hw = &adapter->hw;
2541 struct net_device *poll_dev = adapter->netdev; 2529 struct net_device *poll_dev = adapter->netdev;
2542 int tx_cleaned = 1, work_done = 0; 2530 int tx_cleaned = 1, work_done = 0;
2543 2531
2544 adapter = netdev_priv(poll_dev); 2532 adapter = netdev_priv(poll_dev);
2545 2533
2546 if (adapter->msix_entries && 2534 if (!adapter->msix_entries ||
2547 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) 2535 (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2548 goto clean_rx; 2536 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
2549
2550 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
2551 2537
2552clean_rx: 2538 adapter->clean_rx(adapter->rx_ring, &work_done, weight);
2553 adapter->clean_rx(adapter->rx_ring, &work_done, budget);
2554 2539
2555 if (!tx_cleaned) 2540 if (!tx_cleaned)
2556 work_done = budget; 2541 work_done = weight;
2557 2542
2558 /* If budget not fully consumed, exit the polling mode */ 2543 /* If weight not fully consumed, exit the polling mode */
2559 if (work_done < budget) { 2544 if (work_done < weight) {
2560 if (adapter->itr_setting & 3) 2545 if (adapter->itr_setting & 3)
2561 e1000_set_itr(adapter); 2546 e1000_set_itr(adapter);
2562 napi_complete(napi); 2547 napi_complete(napi);
@@ -2800,13 +2785,13 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
2800 /* Setup the HW Tx Head and Tail descriptor pointers */ 2785 /* Setup the HW Tx Head and Tail descriptor pointers */
2801 tdba = tx_ring->dma; 2786 tdba = tx_ring->dma;
2802 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); 2787 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
2803 ew32(TDBAL, (tdba & DMA_BIT_MASK(32))); 2788 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
2804 ew32(TDBAH, (tdba >> 32)); 2789 ew32(TDBAH(0), (tdba >> 32));
2805 ew32(TDLEN, tdlen); 2790 ew32(TDLEN(0), tdlen);
2806 ew32(TDH, 0); 2791 ew32(TDH(0), 0);
2807 ew32(TDT, 0); 2792 ew32(TDT(0), 0);
2808 tx_ring->head = adapter->hw.hw_addr + E1000_TDH; 2793 tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
2809 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT; 2794 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
2810 2795
2811 /* Set the Tx Interrupt Delay register */ 2796 /* Set the Tx Interrupt Delay register */
2812 ew32(TIDV, adapter->tx_int_delay); 2797 ew32(TIDV, adapter->tx_int_delay);
@@ -2879,8 +2864,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2879 u32 rctl, rfctl; 2864 u32 rctl, rfctl;
2880 u32 pages = 0; 2865 u32 pages = 0;
2881 2866
2882 /* Workaround Si errata on 82579 - configure jumbo frame flow */ 2867 /* Workaround Si errata on PCHx - configure jumbo frame flow */
2883 if (hw->mac.type == e1000_pch2lan) { 2868 if (hw->mac.type >= e1000_pch2lan) {
2884 s32 ret_val; 2869 s32 ret_val;
2885 2870
2886 if (adapter->netdev->mtu > ETH_DATA_LEN) 2871 if (adapter->netdev->mtu > ETH_DATA_LEN)
@@ -2955,6 +2940,7 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2955 /* Enable Extended Status in all Receive Descriptors */ 2940 /* Enable Extended Status in all Receive Descriptors */
2956 rfctl = er32(RFCTL); 2941 rfctl = er32(RFCTL);
2957 rfctl |= E1000_RFCTL_EXTEN; 2942 rfctl |= E1000_RFCTL_EXTEN;
2943 ew32(RFCTL, rfctl);
2958 2944
2959 /* 2945 /*
2960 * 82571 and greater support packet-split where the protocol 2946 * 82571 and greater support packet-split where the protocol
@@ -2980,13 +2966,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2980 if (adapter->rx_ps_pages) { 2966 if (adapter->rx_ps_pages) {
2981 u32 psrctl = 0; 2967 u32 psrctl = 0;
2982 2968
2983 /*
2984 * disable packet split support for IPv6 extension headers,
2985 * because some malformed IPv6 headers can hang the Rx
2986 */
2987 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
2988 E1000_RFCTL_NEW_IPV6_EXT_DIS);
2989
2990 /* Enable Packet split descriptors */ 2969 /* Enable Packet split descriptors */
2991 rctl |= E1000_RCTL_DTYP_PS; 2970 rctl |= E1000_RCTL_DTYP_PS;
2992 2971
@@ -3025,7 +3004,6 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
3025 */ 3004 */
3026 } 3005 }
3027 3006
3028 ew32(RFCTL, rfctl);
3029 ew32(RCTL, rctl); 3007 ew32(RCTL, rctl);
3030 /* just started the receive unit, no need to restart */ 3008 /* just started the receive unit, no need to restart */
3031 adapter->flags &= ~FLAG_RX_RESTART_NOW; 3009 adapter->flags &= ~FLAG_RX_RESTART_NOW;
@@ -3110,13 +3088,13 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
3110 * the Base and Length of the Rx Descriptor Ring 3088 * the Base and Length of the Rx Descriptor Ring
3111 */ 3089 */
3112 rdba = rx_ring->dma; 3090 rdba = rx_ring->dma;
3113 ew32(RDBAL, (rdba & DMA_BIT_MASK(32))); 3091 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
3114 ew32(RDBAH, (rdba >> 32)); 3092 ew32(RDBAH(0), (rdba >> 32));
3115 ew32(RDLEN, rdlen); 3093 ew32(RDLEN(0), rdlen);
3116 ew32(RDH, 0); 3094 ew32(RDH(0), 0);
3117 ew32(RDT, 0); 3095 ew32(RDT(0), 0);
3118 rx_ring->head = adapter->hw.hw_addr + E1000_RDH; 3096 rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
3119 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT; 3097 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
3120 3098
3121 /* Enable Receive Checksum Offload for TCP and UDP */ 3099 /* Enable Receive Checksum Offload for TCP and UDP */
3122 rxcsum = er32(RXCSUM); 3100 rxcsum = er32(RXCSUM);
@@ -3229,7 +3207,7 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
3229 netdev_for_each_uc_addr(ha, netdev) { 3207 netdev_for_each_uc_addr(ha, netdev) {
3230 if (!rar_entries) 3208 if (!rar_entries)
3231 break; 3209 break;
3232 e1000e_rar_set(hw, ha->addr, rar_entries--); 3210 hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
3233 count++; 3211 count++;
3234 } 3212 }
3235 } 3213 }
@@ -3510,6 +3488,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3510 fc->refresh_time = 0x1000; 3488 fc->refresh_time = 0x1000;
3511 break; 3489 break;
3512 case e1000_pch2lan: 3490 case e1000_pch2lan:
3491 case e1000_pch_lpt:
3513 fc->high_water = 0x05C20; 3492 fc->high_water = 0x05C20;
3514 fc->low_water = 0x05048; 3493 fc->low_water = 0x05048;
3515 fc->pause_time = 0x0650; 3494 fc->pause_time = 0x0650;
@@ -4038,6 +4017,7 @@ static int e1000_close(struct net_device *netdev)
4038static int e1000_set_mac(struct net_device *netdev, void *p) 4017static int e1000_set_mac(struct net_device *netdev, void *p)
4039{ 4018{
4040 struct e1000_adapter *adapter = netdev_priv(netdev); 4019 struct e1000_adapter *adapter = netdev_priv(netdev);
4020 struct e1000_hw *hw = &adapter->hw;
4041 struct sockaddr *addr = p; 4021 struct sockaddr *addr = p;
4042 4022
4043 if (!is_valid_ether_addr(addr->sa_data)) 4023 if (!is_valid_ether_addr(addr->sa_data))
@@ -4046,7 +4026,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
4046 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 4026 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4047 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); 4027 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
4048 4028
4049 e1000e_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); 4029 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
4050 4030
4051 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { 4031 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
4052 /* activate the work around */ 4032 /* activate the work around */
@@ -4060,9 +4040,8 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
4060 * are dropped. Eventually the LAA will be in RAR[0] and 4040 * are dropped. Eventually the LAA will be in RAR[0] and
4061 * RAR[14] 4041 * RAR[14]
4062 */ 4042 */
4063 e1000e_rar_set(&adapter->hw, 4043 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr,
4064 adapter->hw.mac.addr, 4044 adapter->hw.mac.rar_entry_count - 1);
4065 adapter->hw.mac.rar_entry_count - 1);
4066 } 4045 }
4067 4046
4068 return 0; 4047 return 0;
@@ -4641,7 +4620,7 @@ link_up:
4641 * reset from the other port. Set the appropriate LAA in RAR[0] 4620 * reset from the other port. Set the appropriate LAA in RAR[0]
4642 */ 4621 */
4643 if (e1000e_get_laa_state_82571(hw)) 4622 if (e1000e_get_laa_state_82571(hw))
4644 e1000e_rar_set(hw, adapter->hw.mac.addr, 0); 4623 hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0);
4645 4624
4646 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) 4625 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
4647 e1000e_check_82574_phy_workaround(adapter); 4626 e1000e_check_82574_phy_workaround(adapter);
@@ -5151,6 +5130,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5151 /* if count is 0 then mapping error has occurred */ 5130 /* if count is 0 then mapping error has occurred */
5152 count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss); 5131 count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss);
5153 if (count) { 5132 if (count) {
5133 skb_tx_timestamp(skb);
5134
5154 netdev_sent_queue(netdev, skb->len); 5135 netdev_sent_queue(netdev, skb->len);
5155 e1000_tx_queue(tx_ring, tx_flags, count); 5136 e1000_tx_queue(tx_ring, tx_flags, count);
5156 /* Make sure there is space in the ring for the next send. */ 5137 /* Make sure there is space in the ring for the next send. */
@@ -5285,22 +5266,14 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5285 return -EINVAL; 5266 return -EINVAL;
5286 } 5267 }
5287 5268
5288 /* Jumbo frame workaround on 82579 requires CRC be stripped */ 5269 /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */
5289 if ((adapter->hw.mac.type == e1000_pch2lan) && 5270 if ((adapter->hw.mac.type >= e1000_pch2lan) &&
5290 !(adapter->flags2 & FLAG2_CRC_STRIPPING) && 5271 !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
5291 (new_mtu > ETH_DATA_LEN)) { 5272 (new_mtu > ETH_DATA_LEN)) {
5292 e_err("Jumbo Frames not supported on 82579 when CRC stripping is disabled.\n"); 5273 e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n");
5293 return -EINVAL; 5274 return -EINVAL;
5294 } 5275 }
5295 5276
5296 /* 82573 Errata 17 */
5297 if (((adapter->hw.mac.type == e1000_82573) ||
5298 (adapter->hw.mac.type == e1000_82574)) &&
5299 (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
5300 adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
5301 e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
5302 }
5303
5304 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 5277 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
5305 usleep_range(1000, 2000); 5278 usleep_range(1000, 2000);
5306 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ 5279 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
@@ -5694,7 +5667,7 @@ static int __e1000_resume(struct pci_dev *pdev)
5694 return err; 5667 return err;
5695 } 5668 }
5696 5669
5697 if (hw->mac.type == e1000_pch2lan) 5670 if (hw->mac.type >= e1000_pch2lan)
5698 e1000_resume_workarounds_pchlan(&adapter->hw); 5671 e1000_resume_workarounds_pchlan(&adapter->hw);
5699 5672
5700 e1000e_power_up_phy(adapter); 5673 e1000e_power_up_phy(adapter);
@@ -6226,7 +6199,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
6226 netdev->netdev_ops = &e1000e_netdev_ops; 6199 netdev->netdev_ops = &e1000e_netdev_ops;
6227 e1000e_set_ethtool_ops(netdev); 6200 e1000e_set_ethtool_ops(netdev);
6228 netdev->watchdog_timeo = 5 * HZ; 6201 netdev->watchdog_timeo = 5 * HZ;
6229 netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); 6202 netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
6230 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); 6203 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
6231 6204
6232 netdev->mem_start = mmio_start; 6205 netdev->mem_start = mmio_start;
@@ -6593,6 +6566,9 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
6593 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan }, 6566 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
6594 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan }, 6567 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
6595 6568
6569 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt },
6570 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
6571
6596 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ 6572 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
6597}; 6573};
6598MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 6574MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 16adeb9418a8..42444d14aae6 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -166,8 +166,8 @@ E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lea
166 * 166 *
167 * Default Value: 1 (enabled) 167 * Default Value: 1 (enabled)
168 */ 168 */
169E1000_PARAM(CrcStripping, "Enable CRC Stripping, disable if your BMC needs " \ 169E1000_PARAM(CrcStripping,
170 "the CRC"); 170 "Enable CRC Stripping, disable if your BMC needs the CRC");
171 171
172struct e1000_option { 172struct e1000_option {
173 enum { enable_option, range_option, list_option } type; 173 enum { enable_option, range_option, list_option } type;
@@ -344,16 +344,50 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
344 344
345 if (num_InterruptThrottleRate > bd) { 345 if (num_InterruptThrottleRate > bd) {
346 adapter->itr = InterruptThrottleRate[bd]; 346 adapter->itr = InterruptThrottleRate[bd];
347 347 switch (adapter->itr) {
348 /* 348 case 0:
349 * Make sure a message is printed for non-special 349 e_info("%s turned off\n", opt.name);
350 * values. And in case of an invalid option, display 350 break;
351 * warning, use default and got through itr/itr_setting 351 case 1:
352 * adjustment logic below 352 e_info("%s set to dynamic mode\n", opt.name);
353 */ 353 adapter->itr_setting = adapter->itr;
354 if ((adapter->itr > 4) && 354 adapter->itr = 20000;
355 e1000_validate_option(&adapter->itr, &opt, adapter)) 355 break;
356 adapter->itr = opt.def; 356 case 3:
357 e_info("%s set to dynamic conservative mode\n",
358 opt.name);
359 adapter->itr_setting = adapter->itr;
360 adapter->itr = 20000;
361 break;
362 case 4:
363 e_info("%s set to simplified (2000-8000 ints) mode\n",
364 opt.name);
365 adapter->itr_setting = 4;
366 break;
367 default:
368 /*
369 * Save the setting, because the dynamic bits
370 * change itr.
371 */
372 if (e1000_validate_option(&adapter->itr, &opt,
373 adapter) &&
374 (adapter->itr == 3)) {
375 /*
376 * In case of invalid user value,
377 * default to conservative mode.
378 */
379 adapter->itr_setting = adapter->itr;
380 adapter->itr = 20000;
381 } else {
382 /*
383 * Clear the lower two bits because
384 * they are used as control.
385 */
386 adapter->itr_setting =
387 adapter->itr & ~3;
388 }
389 break;
390 }
357 } else { 391 } else {
358 /* 392 /*
359 * If no option specified, use default value and go 393 * If no option specified, use default value and go
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 35b45578c604..0334d013bc3c 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -639,6 +639,45 @@ s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
639} 639}
640 640
641/** 641/**
642 * e1000_set_master_slave_mode - Setup PHY for Master/slave mode
643 * @hw: pointer to the HW structure
644 *
645 * Sets up Master/slave mode
646 **/
647static s32 e1000_set_master_slave_mode(struct e1000_hw *hw)
648{
649 s32 ret_val;
650 u16 phy_data;
651
652 /* Resolve Master/Slave mode */
653 ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &phy_data);
654 if (ret_val)
655 return ret_val;
656
657 /* load defaults for future use */
658 hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ?
659 ((phy_data & CR_1000T_MS_VALUE) ?
660 e1000_ms_force_master : e1000_ms_force_slave) : e1000_ms_auto;
661
662 switch (hw->phy.ms_type) {
663 case e1000_ms_force_master:
664 phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
665 break;
666 case e1000_ms_force_slave:
667 phy_data |= CR_1000T_MS_ENABLE;
668 phy_data &= ~(CR_1000T_MS_VALUE);
669 break;
670 case e1000_ms_auto:
671 phy_data &= ~CR_1000T_MS_ENABLE;
672 /* fall-through */
673 default:
674 break;
675 }
676
677 return e1e_wphy(hw, PHY_1000T_CTRL, phy_data);
678}
679
680/**
642 * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link 681 * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
643 * @hw: pointer to the HW structure 682 * @hw: pointer to the HW structure
644 * 683 *
@@ -659,7 +698,11 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
659 /* Enable downshift */ 698 /* Enable downshift */
660 phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; 699 phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
661 700
662 return e1e_wphy(hw, I82577_CFG_REG, phy_data); 701 ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data);
702 if (ret_val)
703 return ret_val;
704
705 return e1000_set_master_slave_mode(hw);
663} 706}
664 707
665/** 708/**
@@ -718,12 +761,28 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
718 * 1 - Enabled 761 * 1 - Enabled
719 */ 762 */
720 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; 763 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
721 if (phy->disable_polarity_correction == 1) 764 if (phy->disable_polarity_correction)
722 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; 765 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
723 766
724 /* Enable downshift on BM (disabled by default) */ 767 /* Enable downshift on BM (disabled by default) */
725 if (phy->type == e1000_phy_bm) 768 if (phy->type == e1000_phy_bm) {
769 /* For 82574/82583, first disable then enable downshift */
770 if (phy->id == BME1000_E_PHY_ID_R2) {
771 phy_data &= ~BME1000_PSCR_ENABLE_DOWNSHIFT;
772 ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL,
773 phy_data);
774 if (ret_val)
775 return ret_val;
776 /* Commit the changes. */
777 ret_val = e1000e_commit_phy(hw);
778 if (ret_val) {
779 e_dbg("Error committing the PHY changes\n");
780 return ret_val;
781 }
782 }
783
726 phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; 784 phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT;
785 }
727 786
728 ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); 787 ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
729 if (ret_val) 788 if (ret_val)
@@ -879,31 +938,7 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
879 return ret_val; 938 return ret_val;
880 } 939 }
881 940
882 ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data); 941 ret_val = e1000_set_master_slave_mode(hw);
883 if (ret_val)
884 return ret_val;
885
886 /* load defaults for future use */
887 phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
888 ((data & CR_1000T_MS_VALUE) ?
889 e1000_ms_force_master :
890 e1000_ms_force_slave) :
891 e1000_ms_auto;
892
893 switch (phy->ms_type) {
894 case e1000_ms_force_master:
895 data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
896 break;
897 case e1000_ms_force_slave:
898 data |= CR_1000T_MS_ENABLE;
899 data &= ~(CR_1000T_MS_VALUE);
900 break;
901 case e1000_ms_auto:
902 data &= ~CR_1000T_MS_ENABLE;
903 default:
904 break;
905 }
906 ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data);
907 } 942 }
908 943
909 return ret_val; 944 return ret_val;
@@ -1090,7 +1125,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
1090 * If autoneg_advertised is zero, we assume it was not defaulted 1125 * If autoneg_advertised is zero, we assume it was not defaulted
1091 * by the calling code so we set to advertise full capability. 1126 * by the calling code so we set to advertise full capability.
1092 */ 1127 */
1093 if (phy->autoneg_advertised == 0) 1128 if (!phy->autoneg_advertised)
1094 phy->autoneg_advertised = phy->autoneg_mask; 1129 phy->autoneg_advertised = phy->autoneg_mask;
1095 1130
1096 e_dbg("Reconfiguring auto-neg advertisement params\n"); 1131 e_dbg("Reconfiguring auto-neg advertisement params\n");
@@ -1596,7 +1631,7 @@ s32 e1000e_check_downshift(struct e1000_hw *hw)
1596 ret_val = e1e_rphy(hw, offset, &phy_data); 1631 ret_val = e1e_rphy(hw, offset, &phy_data);
1597 1632
1598 if (!ret_val) 1633 if (!ret_val)
1599 phy->speed_downgraded = (phy_data & mask); 1634 phy->speed_downgraded = !!(phy_data & mask);
1600 1635
1601 return ret_val; 1636 return ret_val;
1602} 1637}
@@ -1925,8 +1960,8 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
1925 if (ret_val) 1960 if (ret_val)
1926 return ret_val; 1961 return ret_val;
1927 1962
1928 phy->polarity_correction = (phy_data & 1963 phy->polarity_correction = !!(phy_data &
1929 M88E1000_PSCR_POLARITY_REVERSAL); 1964 M88E1000_PSCR_POLARITY_REVERSAL);
1930 1965
1931 ret_val = e1000_check_polarity_m88(hw); 1966 ret_val = e1000_check_polarity_m88(hw);
1932 if (ret_val) 1967 if (ret_val)
@@ -1936,7 +1971,7 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
1936 if (ret_val) 1971 if (ret_val)
1937 return ret_val; 1972 return ret_val;
1938 1973
1939 phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX); 1974 phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX);
1940 1975
1941 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { 1976 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
1942 ret_val = e1000_get_cable_length(hw); 1977 ret_val = e1000_get_cable_length(hw);
@@ -1999,7 +2034,7 @@ s32 e1000e_get_phy_info_igp(struct e1000_hw *hw)
1999 if (ret_val) 2034 if (ret_val)
2000 return ret_val; 2035 return ret_val;
2001 2036
2002 phy->is_mdix = (data & IGP01E1000_PSSR_MDIX); 2037 phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX);
2003 2038
2004 if ((data & IGP01E1000_PSSR_SPEED_MASK) == 2039 if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
2005 IGP01E1000_PSSR_SPEED_1000MBPS) { 2040 IGP01E1000_PSSR_SPEED_1000MBPS) {
@@ -2052,8 +2087,7 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
2052 ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data); 2087 ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data);
2053 if (ret_val) 2088 if (ret_val)
2054 return ret_val; 2089 return ret_val;
2055 phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE) 2090 phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE);
2056 ? false : true;
2057 2091
2058 if (phy->polarity_correction) { 2092 if (phy->polarity_correction) {
2059 ret_val = e1000_check_polarity_ife(hw); 2093 ret_val = e1000_check_polarity_ife(hw);
@@ -2070,7 +2104,7 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
2070 if (ret_val) 2104 if (ret_val)
2071 return ret_val; 2105 return ret_val;
2072 2106
2073 phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? true : false; 2107 phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS);
2074 2108
2075 /* The following parameters are undefined for 10/100 operation. */ 2109 /* The following parameters are undefined for 10/100 operation. */
2076 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; 2110 phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
@@ -2320,6 +2354,9 @@ enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
2320 case I82579_E_PHY_ID: 2354 case I82579_E_PHY_ID:
2321 phy_type = e1000_phy_82579; 2355 phy_type = e1000_phy_82579;
2322 break; 2356 break;
2357 case I217_E_PHY_ID:
2358 phy_type = e1000_phy_i217;
2359 break;
2323 default: 2360 default:
2324 phy_type = e1000_phy_unknown; 2361 phy_type = e1000_phy_unknown;
2325 break; 2362 break;
@@ -2979,7 +3016,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2979 if ((hw->phy.type == e1000_phy_82578) && 3016 if ((hw->phy.type == e1000_phy_82578) &&
2980 (hw->phy.revision >= 1) && 3017 (hw->phy.revision >= 1) &&
2981 (hw->phy.addr == 2) && 3018 (hw->phy.addr == 2) &&
2982 ((MAX_PHY_REG_ADDRESS & reg) == 0) && (data & (1 << 11))) { 3019 !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) {
2983 u16 data2 = 0x7EFF; 3020 u16 data2 = 0x7EFF;
2984 ret_val = e1000_access_phy_debug_regs_hv(hw, 3021 ret_val = e1000_access_phy_debug_regs_hv(hw,
2985 (1 << 6) | 0x3, 3022 (1 << 6) | 0x3,
@@ -3265,7 +3302,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
3265 if (ret_val) 3302 if (ret_val)
3266 return ret_val; 3303 return ret_val;
3267 3304
3268 phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? true : false; 3305 phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX);
3269 3306
3270 if ((data & I82577_PHY_STATUS2_SPEED_MASK) == 3307 if ((data & I82577_PHY_STATUS2_SPEED_MASK) ==
3271 I82577_PHY_STATUS2_SPEED_1000MBPS) { 3308 I82577_PHY_STATUS2_SPEED_1000MBPS) {
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index 6565c463185c..4bd16e266414 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -35,3 +35,4 @@ obj-$(CONFIG_IGB) += igb.o
35igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \ 35igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
36 e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o 36 e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o
37 37
38igb-$(CONFIG_IGB_PTP) += igb_ptp.o
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 8e33bdd33eea..3758ad246742 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -35,8 +35,8 @@
35#include "e1000_82575.h" 35#include "e1000_82575.h"
36 36
37#include <linux/clocksource.h> 37#include <linux/clocksource.h>
38#include <linux/timecompare.h>
39#include <linux/net_tstamp.h> 38#include <linux/net_tstamp.h>
39#include <linux/ptp_clock_kernel.h>
40#include <linux/bitops.h> 40#include <linux/bitops.h>
41#include <linux/if_vlan.h> 41#include <linux/if_vlan.h>
42 42
@@ -328,9 +328,6 @@ struct igb_adapter {
328 328
329 /* OS defined structs */ 329 /* OS defined structs */
330 struct pci_dev *pdev; 330 struct pci_dev *pdev;
331 struct cyclecounter cycles;
332 struct timecounter clock;
333 struct timecompare compare;
334 struct hwtstamp_config hwtstamp_config; 331 struct hwtstamp_config hwtstamp_config;
335 332
336 spinlock_t stats64_lock; 333 spinlock_t stats64_lock;
@@ -364,6 +361,13 @@ struct igb_adapter {
364 u32 wvbr; 361 u32 wvbr;
365 int node; 362 int node;
366 u32 *shadow_vfta; 363 u32 *shadow_vfta;
364
365 struct ptp_clock *ptp_clock;
366 struct ptp_clock_info caps;
367 struct delayed_work overflow_work;
368 spinlock_t tmreg_lock;
369 struct cyclecounter cc;
370 struct timecounter tc;
367}; 371};
368 372
369#define IGB_FLAG_HAS_MSI (1 << 0) 373#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -378,7 +382,6 @@ struct igb_adapter {
378#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */ 382#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
379 383
380#define IGB_82576_TSYNC_SHIFT 19 384#define IGB_82576_TSYNC_SHIFT 19
381#define IGB_82580_TSYNC_SHIFT 24
382#define IGB_TS_HDR_LEN 16 385#define IGB_TS_HDR_LEN 16
383enum e1000_state_t { 386enum e1000_state_t {
384 __IGB_TESTING, 387 __IGB_TESTING,
@@ -414,7 +417,15 @@ extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
414extern bool igb_has_link(struct igb_adapter *adapter); 417extern bool igb_has_link(struct igb_adapter *adapter);
415extern void igb_set_ethtool_ops(struct net_device *); 418extern void igb_set_ethtool_ops(struct net_device *);
416extern void igb_power_up_link(struct igb_adapter *); 419extern void igb_power_up_link(struct igb_adapter *);
420#ifdef CONFIG_IGB_PTP
421extern void igb_ptp_init(struct igb_adapter *adapter);
422extern void igb_ptp_remove(struct igb_adapter *adapter);
423
424extern void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
425 struct skb_shared_hwtstamps *hwtstamps,
426 u64 systim);
417 427
428#endif
418static inline s32 igb_reset_phy(struct e1000_hw *hw) 429static inline s32 igb_reset_phy(struct e1000_hw *hw)
419{ 430{
420 if (hw->phy.ops.reset) 431 if (hw->phy.ops.reset)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index d22350055285..80d52d2dfea3 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -60,8 +60,8 @@
60#include "igb.h" 60#include "igb.h"
61 61
62#define MAJ 3 62#define MAJ 3
63#define MIN 2 63#define MIN 4
64#define BUILD 10 64#define BUILD 7
65#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ 65#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
66__stringify(BUILD) "-k" 66__stringify(BUILD) "-k"
67char igb_driver_name[] = "igb"; 67char igb_driver_name[] = "igb";
@@ -114,7 +114,6 @@ static void igb_free_all_rx_resources(struct igb_adapter *);
114static void igb_setup_mrqc(struct igb_adapter *); 114static void igb_setup_mrqc(struct igb_adapter *);
115static int igb_probe(struct pci_dev *, const struct pci_device_id *); 115static int igb_probe(struct pci_dev *, const struct pci_device_id *);
116static void __devexit igb_remove(struct pci_dev *pdev); 116static void __devexit igb_remove(struct pci_dev *pdev);
117static void igb_init_hw_timer(struct igb_adapter *adapter);
118static int igb_sw_init(struct igb_adapter *); 117static int igb_sw_init(struct igb_adapter *);
119static int igb_open(struct net_device *); 118static int igb_open(struct net_device *);
120static int igb_close(struct net_device *); 119static int igb_close(struct net_device *);
@@ -565,33 +564,6 @@ exit:
565 return; 564 return;
566} 565}
567 566
568
569/**
570 * igb_read_clock - read raw cycle counter (to be used by time counter)
571 */
572static cycle_t igb_read_clock(const struct cyclecounter *tc)
573{
574 struct igb_adapter *adapter =
575 container_of(tc, struct igb_adapter, cycles);
576 struct e1000_hw *hw = &adapter->hw;
577 u64 stamp = 0;
578 int shift = 0;
579
580 /*
581 * The timestamp latches on lowest register read. For the 82580
582 * the lowest register is SYSTIMR instead of SYSTIML. However we never
583 * adjusted TIMINCA so SYSTIMR will just read as all 0s so ignore it.
584 */
585 if (hw->mac.type >= e1000_82580) {
586 stamp = rd32(E1000_SYSTIMR) >> 8;
587 shift = IGB_82580_TSYNC_SHIFT;
588 }
589
590 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
591 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
592 return stamp;
593}
594
595/** 567/**
596 * igb_get_hw_dev - return device 568 * igb_get_hw_dev - return device
597 * used by hardware layer to print debugging information 569 * used by hardware layer to print debugging information
@@ -1743,6 +1715,13 @@ void igb_reset(struct igb_adapter *adapter)
1743 if (hw->mac.ops.init_hw(hw)) 1715 if (hw->mac.ops.init_hw(hw))
1744 dev_err(&pdev->dev, "Hardware Error\n"); 1716 dev_err(&pdev->dev, "Hardware Error\n");
1745 1717
1718 /*
1719 * Flow control settings reset on hardware reset, so guarantee flow
1720 * control is off when forcing speed.
1721 */
1722 if (!hw->mac.autoneg)
1723 igb_force_mac_fc(hw);
1724
1746 igb_init_dmac(adapter, pba); 1725 igb_init_dmac(adapter, pba);
1747 if (!netif_running(adapter->netdev)) 1726 if (!netif_running(adapter->netdev))
1748 igb_power_down_link(adapter); 1727 igb_power_down_link(adapter);
@@ -2110,9 +2089,11 @@ static int __devinit igb_probe(struct pci_dev *pdev,
2110 } 2089 }
2111 2090
2112#endif 2091#endif
2092#ifdef CONFIG_IGB_PTP
2113 /* do hw tstamp init after resetting */ 2093 /* do hw tstamp init after resetting */
2114 igb_init_hw_timer(adapter); 2094 igb_ptp_init(adapter);
2115 2095
2096#endif
2116 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); 2097 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
2117 /* print bus type/speed/width info */ 2098 /* print bus type/speed/width info */
2118 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", 2099 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
@@ -2184,7 +2165,10 @@ static void __devexit igb_remove(struct pci_dev *pdev)
2184 struct e1000_hw *hw = &adapter->hw; 2165 struct e1000_hw *hw = &adapter->hw;
2185 2166
2186 pm_runtime_get_noresume(&pdev->dev); 2167 pm_runtime_get_noresume(&pdev->dev);
2168#ifdef CONFIG_IGB_PTP
2169 igb_ptp_remove(adapter);
2187 2170
2171#endif
2188 /* 2172 /*
2189 * The watchdog timer may be rescheduled, so explicitly 2173 * The watchdog timer may be rescheduled, so explicitly
2190 * disable watchdog from being rescheduled. 2174 * disable watchdog from being rescheduled.
@@ -2304,112 +2288,6 @@ out:
2304} 2288}
2305 2289
2306/** 2290/**
2307 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
2308 * @adapter: board private structure to initialize
2309 *
2310 * igb_init_hw_timer initializes the function pointer and values for the hw
2311 * timer found in hardware.
2312 **/
2313static void igb_init_hw_timer(struct igb_adapter *adapter)
2314{
2315 struct e1000_hw *hw = &adapter->hw;
2316
2317 switch (hw->mac.type) {
2318 case e1000_i350:
2319 case e1000_82580:
2320 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2321 adapter->cycles.read = igb_read_clock;
2322 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2323 adapter->cycles.mult = 1;
2324 /*
2325 * The 82580 timesync updates the system timer every 8ns by 8ns
2326 * and the value cannot be shifted. Instead we need to shift
2327 * the registers to generate a 64bit timer value. As a result
2328 * SYSTIMR/L/H, TXSTMPL/H, RXSTMPL/H all have to be shifted by
2329 * 24 in order to generate a larger value for synchronization.
2330 */
2331 adapter->cycles.shift = IGB_82580_TSYNC_SHIFT;
2332 /* disable system timer temporarily by setting bit 31 */
2333 wr32(E1000_TSAUXC, 0x80000000);
2334 wrfl();
2335
2336 /* Set registers so that rollover occurs soon to test this. */
2337 wr32(E1000_SYSTIMR, 0x00000000);
2338 wr32(E1000_SYSTIML, 0x80000000);
2339 wr32(E1000_SYSTIMH, 0x000000FF);
2340 wrfl();
2341
2342 /* enable system timer by clearing bit 31 */
2343 wr32(E1000_TSAUXC, 0x0);
2344 wrfl();
2345
2346 timecounter_init(&adapter->clock,
2347 &adapter->cycles,
2348 ktime_to_ns(ktime_get_real()));
2349 /*
2350 * Synchronize our NIC clock against system wall clock. NIC
2351 * time stamp reading requires ~3us per sample, each sample
2352 * was pretty stable even under load => only require 10
2353 * samples for each offset comparison.
2354 */
2355 memset(&adapter->compare, 0, sizeof(adapter->compare));
2356 adapter->compare.source = &adapter->clock;
2357 adapter->compare.target = ktime_get_real;
2358 adapter->compare.num_samples = 10;
2359 timecompare_update(&adapter->compare, 0);
2360 break;
2361 case e1000_82576:
2362 /*
2363 * Initialize hardware timer: we keep it running just in case
2364 * that some program needs it later on.
2365 */
2366 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
2367 adapter->cycles.read = igb_read_clock;
2368 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
2369 adapter->cycles.mult = 1;
2370 /**
2371 * Scale the NIC clock cycle by a large factor so that
2372 * relatively small clock corrections can be added or
2373 * subtracted at each clock tick. The drawbacks of a large
2374 * factor are a) that the clock register overflows more quickly
2375 * (not such a big deal) and b) that the increment per tick has
2376 * to fit into 24 bits. As a result we need to use a shift of
2377 * 19 so we can fit a value of 16 into the TIMINCA register.
2378 */
2379 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
2380 wr32(E1000_TIMINCA,
2381 (1 << E1000_TIMINCA_16NS_SHIFT) |
2382 (16 << IGB_82576_TSYNC_SHIFT));
2383
2384 /* Set registers so that rollover occurs soon to test this. */
2385 wr32(E1000_SYSTIML, 0x00000000);
2386 wr32(E1000_SYSTIMH, 0xFF800000);
2387 wrfl();
2388
2389 timecounter_init(&adapter->clock,
2390 &adapter->cycles,
2391 ktime_to_ns(ktime_get_real()));
2392 /*
2393 * Synchronize our NIC clock against system wall clock. NIC
2394 * time stamp reading requires ~3us per sample, each sample
2395 * was pretty stable even under load => only require 10
2396 * samples for each offset comparison.
2397 */
2398 memset(&adapter->compare, 0, sizeof(adapter->compare));
2399 adapter->compare.source = &adapter->clock;
2400 adapter->compare.target = ktime_get_real;
2401 adapter->compare.num_samples = 10;
2402 timecompare_update(&adapter->compare, 0);
2403 break;
2404 case e1000_82575:
2405 /* 82575 does not support timesync */
2406 default:
2407 break;
2408 }
2409
2410}
2411
2412/**
2413 * igb_sw_init - Initialize general software structures (struct igb_adapter) 2291 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2414 * @adapter: board private structure to initialize 2292 * @adapter: board private structure to initialize
2415 * 2293 *
@@ -5718,35 +5596,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
5718 return 0; 5596 return 0;
5719} 5597}
5720 5598
5721/** 5599#ifdef CONFIG_IGB_PTP
5722 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
5723 * @adapter: board private structure
5724 * @shhwtstamps: timestamp structure to update
5725 * @regval: unsigned 64bit system time value.
5726 *
5727 * We need to convert the system time value stored in the RX/TXSTMP registers
5728 * into a hwtstamp which can be used by the upper level timestamping functions
5729 */
5730static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
5731 struct skb_shared_hwtstamps *shhwtstamps,
5732 u64 regval)
5733{
5734 u64 ns;
5735
5736 /*
5737 * The 82580 starts with 1ns at bit 0 in RX/TXSTMPL, shift this up to
5738 * 24 to match clock shift we setup earlier.
5739 */
5740 if (adapter->hw.mac.type >= e1000_82580)
5741 regval <<= IGB_82580_TSYNC_SHIFT;
5742
5743 ns = timecounter_cyc2time(&adapter->clock, regval);
5744 timecompare_update(&adapter->compare, ns);
5745 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
5746 shhwtstamps->hwtstamp = ns_to_ktime(ns);
5747 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
5748}
5749
5750/** 5600/**
5751 * igb_tx_hwtstamp - utility function which checks for TX time stamp 5601 * igb_tx_hwtstamp - utility function which checks for TX time stamp
5752 * @q_vector: pointer to q_vector containing needed info 5602 * @q_vector: pointer to q_vector containing needed info
@@ -5776,6 +5626,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
5776 skb_tstamp_tx(buffer_info->skb, &shhwtstamps); 5626 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
5777} 5627}
5778 5628
5629#endif
5779/** 5630/**
5780 * igb_clean_tx_irq - Reclaim resources after transmit completes 5631 * igb_clean_tx_irq - Reclaim resources after transmit completes
5781 * @q_vector: pointer to q_vector containing needed info 5632 * @q_vector: pointer to q_vector containing needed info
@@ -5819,9 +5670,11 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5819 total_bytes += tx_buffer->bytecount; 5670 total_bytes += tx_buffer->bytecount;
5820 total_packets += tx_buffer->gso_segs; 5671 total_packets += tx_buffer->gso_segs;
5821 5672
5673#ifdef CONFIG_IGB_PTP
5822 /* retrieve hardware timestamp */ 5674 /* retrieve hardware timestamp */
5823 igb_tx_hwtstamp(q_vector, tx_buffer); 5675 igb_tx_hwtstamp(q_vector, tx_buffer);
5824 5676
5677#endif
5825 /* free the skb */ 5678 /* free the skb */
5826 dev_kfree_skb_any(tx_buffer->skb); 5679 dev_kfree_skb_any(tx_buffer->skb);
5827 tx_buffer->skb = NULL; 5680 tx_buffer->skb = NULL;
@@ -5993,6 +5846,7 @@ static inline void igb_rx_hash(struct igb_ring *ring,
5993 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); 5846 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
5994} 5847}
5995 5848
5849#ifdef CONFIG_IGB_PTP
5996static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, 5850static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
5997 union e1000_adv_rx_desc *rx_desc, 5851 union e1000_adv_rx_desc *rx_desc,
5998 struct sk_buff *skb) 5852 struct sk_buff *skb)
@@ -6032,6 +5886,7 @@ static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
6032 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); 5886 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
6033} 5887}
6034 5888
5889#endif
6035static void igb_rx_vlan(struct igb_ring *ring, 5890static void igb_rx_vlan(struct igb_ring *ring,
6036 union e1000_adv_rx_desc *rx_desc, 5891 union e1000_adv_rx_desc *rx_desc,
6037 struct sk_buff *skb) 5892 struct sk_buff *skb)
@@ -6142,7 +5997,9 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
6142 goto next_desc; 5997 goto next_desc;
6143 } 5998 }
6144 5999
6000#ifdef CONFIG_IGB_PTP
6145 igb_rx_hwtstamp(q_vector, rx_desc, skb); 6001 igb_rx_hwtstamp(q_vector, rx_desc, skb);
6002#endif
6146 igb_rx_hash(rx_ring, rx_desc, skb); 6003 igb_rx_hash(rx_ring, rx_desc, skb);
6147 igb_rx_checksum(rx_ring, rx_desc, skb); 6004 igb_rx_checksum(rx_ring, rx_desc, skb);
6148 igb_rx_vlan(rx_ring, rx_desc, skb); 6005 igb_rx_vlan(rx_ring, rx_desc, skb);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
new file mode 100644
index 000000000000..c9b71c5bc475
--- /dev/null
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -0,0 +1,381 @@
1/*
2 * PTP Hardware Clock (PHC) driver for the Intel 82576 and 82580
3 *
4 * Copyright (C) 2011 Richard Cochran <richardcochran@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19 */
20#include <linux/module.h>
21#include <linux/device.h>
22#include <linux/pci.h>
23
24#include "igb.h"
25
26#define INCVALUE_MASK 0x7fffffff
27#define ISGN 0x80000000
28
29/*
30 * The 82580 timesync updates the system timer every 8ns by 8ns,
31 * and this update value cannot be reprogrammed.
32 *
33 * Neither the 82576 nor the 82580 offer registers wide enough to hold
34 * nanoseconds time values for very long. For the 82580, SYSTIM always
35 * counts nanoseconds, but the upper 24 bits are not availible. The
36 * frequency is adjusted by changing the 32 bit fractional nanoseconds
37 * register, TIMINCA.
38 *
39 * For the 82576, the SYSTIM register time unit is affect by the
40 * choice of the 24 bit TININCA:IV (incvalue) field. Five bits of this
41 * field are needed to provide the nominal 16 nanosecond period,
42 * leaving 19 bits for fractional nanoseconds.
43 *
44 * We scale the NIC clock cycle by a large factor so that relatively
45 * small clock corrections can be added or subtracted at each clock
46 * tick. The drawbacks of a large factor are a) that the clock
47 * register overflows more quickly (not such a big deal) and b) that
48 * the increment per tick has to fit into 24 bits. As a result we
49 * need to use a shift of 19 so we can fit a value of 16 into the
50 * TIMINCA register.
51 *
52 *
53 * SYSTIMH SYSTIML
54 * +--------------+ +---+---+------+
55 * 82576 | 32 | | 8 | 5 | 19 |
56 * +--------------+ +---+---+------+
57 * \________ 45 bits _______/ fract
58 *
59 * +----------+---+ +--------------+
60 * 82580 | 24 | 8 | | 32 |
61 * +----------+---+ +--------------+
62 * reserved \______ 40 bits _____/
63 *
64 *
65 * The 45 bit 82576 SYSTIM overflows every
66 * 2^45 * 10^-9 / 3600 = 9.77 hours.
67 *
68 * The 40 bit 82580 SYSTIM overflows every
69 * 2^40 * 10^-9 / 60 = 18.3 minutes.
70 */
71
72#define IGB_OVERFLOW_PERIOD (HZ * 60 * 9)
73#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
74#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
75#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
76#define IGB_NBITS_82580 40
77
78/*
79 * SYSTIM read access for the 82576
80 */
81
82static cycle_t igb_82576_systim_read(const struct cyclecounter *cc)
83{
84 u64 val;
85 u32 lo, hi;
86 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
87 struct e1000_hw *hw = &igb->hw;
88
89 lo = rd32(E1000_SYSTIML);
90 hi = rd32(E1000_SYSTIMH);
91
92 val = ((u64) hi) << 32;
93 val |= lo;
94
95 return val;
96}
97
98/*
99 * SYSTIM read access for the 82580
100 */
101
102static cycle_t igb_82580_systim_read(const struct cyclecounter *cc)
103{
104 u64 val;
105 u32 lo, hi, jk;
106 struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
107 struct e1000_hw *hw = &igb->hw;
108
109 /*
110 * The timestamp latches on lowest register read. For the 82580
111 * the lowest register is SYSTIMR instead of SYSTIML. However we only
112 * need to provide nanosecond resolution, so we just ignore it.
113 */
114 jk = rd32(E1000_SYSTIMR);
115 lo = rd32(E1000_SYSTIML);
116 hi = rd32(E1000_SYSTIMH);
117
118 val = ((u64) hi) << 32;
119 val |= lo;
120
121 return val;
122}
123
124/*
125 * PTP clock operations
126 */
127
128static int ptp_82576_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
129{
130 u64 rate;
131 u32 incvalue;
132 int neg_adj = 0;
133 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
134 struct e1000_hw *hw = &igb->hw;
135
136 if (ppb < 0) {
137 neg_adj = 1;
138 ppb = -ppb;
139 }
140 rate = ppb;
141 rate <<= 14;
142 rate = div_u64(rate, 1953125);
143
144 incvalue = 16 << IGB_82576_TSYNC_SHIFT;
145
146 if (neg_adj)
147 incvalue -= rate;
148 else
149 incvalue += rate;
150
151 wr32(E1000_TIMINCA, INCPERIOD_82576 | (incvalue & INCVALUE_82576_MASK));
152
153 return 0;
154}
155
156static int ptp_82580_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
157{
158 u64 rate;
159 u32 inca;
160 int neg_adj = 0;
161 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
162 struct e1000_hw *hw = &igb->hw;
163
164 if (ppb < 0) {
165 neg_adj = 1;
166 ppb = -ppb;
167 }
168 rate = ppb;
169 rate <<= 26;
170 rate = div_u64(rate, 1953125);
171
172 inca = rate & INCVALUE_MASK;
173 if (neg_adj)
174 inca |= ISGN;
175
176 wr32(E1000_TIMINCA, inca);
177
178 return 0;
179}
180
181static int igb_adjtime(struct ptp_clock_info *ptp, s64 delta)
182{
183 s64 now;
184 unsigned long flags;
185 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
186
187 spin_lock_irqsave(&igb->tmreg_lock, flags);
188
189 now = timecounter_read(&igb->tc);
190 now += delta;
191 timecounter_init(&igb->tc, &igb->cc, now);
192
193 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
194
195 return 0;
196}
197
198static int igb_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
199{
200 u64 ns;
201 u32 remainder;
202 unsigned long flags;
203 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
204
205 spin_lock_irqsave(&igb->tmreg_lock, flags);
206
207 ns = timecounter_read(&igb->tc);
208
209 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
210
211 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
212 ts->tv_nsec = remainder;
213
214 return 0;
215}
216
217static int igb_settime(struct ptp_clock_info *ptp, const struct timespec *ts)
218{
219 u64 ns;
220 unsigned long flags;
221 struct igb_adapter *igb = container_of(ptp, struct igb_adapter, caps);
222
223 ns = ts->tv_sec * 1000000000ULL;
224 ns += ts->tv_nsec;
225
226 spin_lock_irqsave(&igb->tmreg_lock, flags);
227
228 timecounter_init(&igb->tc, &igb->cc, ns);
229
230 spin_unlock_irqrestore(&igb->tmreg_lock, flags);
231
232 return 0;
233}
234
235static int ptp_82576_enable(struct ptp_clock_info *ptp,
236 struct ptp_clock_request *rq, int on)
237{
238 return -EOPNOTSUPP;
239}
240
241static int ptp_82580_enable(struct ptp_clock_info *ptp,
242 struct ptp_clock_request *rq, int on)
243{
244 return -EOPNOTSUPP;
245}
246
247static void igb_overflow_check(struct work_struct *work)
248{
249 struct timespec ts;
250 struct igb_adapter *igb =
251 container_of(work, struct igb_adapter, overflow_work.work);
252
253 igb_gettime(&igb->caps, &ts);
254
255 pr_debug("igb overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec);
256
257 schedule_delayed_work(&igb->overflow_work, IGB_OVERFLOW_PERIOD);
258}
259
260void igb_ptp_init(struct igb_adapter *adapter)
261{
262 struct e1000_hw *hw = &adapter->hw;
263
264 switch (hw->mac.type) {
265 case e1000_i350:
266 case e1000_82580:
267 adapter->caps.owner = THIS_MODULE;
268 strcpy(adapter->caps.name, "igb-82580");
269 adapter->caps.max_adj = 62499999;
270 adapter->caps.n_ext_ts = 0;
271 adapter->caps.pps = 0;
272 adapter->caps.adjfreq = ptp_82580_adjfreq;
273 adapter->caps.adjtime = igb_adjtime;
274 adapter->caps.gettime = igb_gettime;
275 adapter->caps.settime = igb_settime;
276 adapter->caps.enable = ptp_82580_enable;
277 adapter->cc.read = igb_82580_systim_read;
278 adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
279 adapter->cc.mult = 1;
280 adapter->cc.shift = 0;
281 /* Enable the timer functions by clearing bit 31. */
282 wr32(E1000_TSAUXC, 0x0);
283 break;
284
285 case e1000_82576:
286 adapter->caps.owner = THIS_MODULE;
287 strcpy(adapter->caps.name, "igb-82576");
288 adapter->caps.max_adj = 1000000000;
289 adapter->caps.n_ext_ts = 0;
290 adapter->caps.pps = 0;
291 adapter->caps.adjfreq = ptp_82576_adjfreq;
292 adapter->caps.adjtime = igb_adjtime;
293 adapter->caps.gettime = igb_gettime;
294 adapter->caps.settime = igb_settime;
295 adapter->caps.enable = ptp_82576_enable;
296 adapter->cc.read = igb_82576_systim_read;
297 adapter->cc.mask = CLOCKSOURCE_MASK(64);
298 adapter->cc.mult = 1;
299 adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
300 /* Dial the nominal frequency. */
301 wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
302 break;
303
304 default:
305 adapter->ptp_clock = NULL;
306 return;
307 }
308
309 wrfl();
310
311 timecounter_init(&adapter->tc, &adapter->cc,
312 ktime_to_ns(ktime_get_real()));
313
314 INIT_DELAYED_WORK(&adapter->overflow_work, igb_overflow_check);
315
316 spin_lock_init(&adapter->tmreg_lock);
317
318 schedule_delayed_work(&adapter->overflow_work, IGB_OVERFLOW_PERIOD);
319
320 adapter->ptp_clock = ptp_clock_register(&adapter->caps);
321 if (IS_ERR(adapter->ptp_clock)) {
322 adapter->ptp_clock = NULL;
323 dev_err(&adapter->pdev->dev, "ptp_clock_register failed\n");
324 } else
325 dev_info(&adapter->pdev->dev, "added PHC on %s\n",
326 adapter->netdev->name);
327}
328
329void igb_ptp_remove(struct igb_adapter *adapter)
330{
331 cancel_delayed_work_sync(&adapter->overflow_work);
332
333 if (adapter->ptp_clock) {
334 ptp_clock_unregister(adapter->ptp_clock);
335 dev_info(&adapter->pdev->dev, "removed PHC on %s\n",
336 adapter->netdev->name);
337 }
338}
339
340/**
341 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
342 * @adapter: board private structure
343 * @hwtstamps: timestamp structure to update
344 * @systim: unsigned 64bit system time value.
345 *
346 * We need to convert the system time value stored in the RX/TXSTMP registers
347 * into a hwtstamp which can be used by the upper level timestamping functions.
348 *
349 * The 'tmreg_lock' spinlock is used to protect the consistency of the
350 * system time value. This is needed because reading the 64 bit time
351 * value involves reading two (or three) 32 bit registers. The first
352 * read latches the value. Ditto for writing.
353 *
354 * In addition, here have extended the system time with an overflow
355 * counter in software.
356 **/
357void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
358 struct skb_shared_hwtstamps *hwtstamps,
359 u64 systim)
360{
361 u64 ns;
362 unsigned long flags;
363
364 switch (adapter->hw.mac.type) {
365 case e1000_i350:
366 case e1000_82580:
367 case e1000_82576:
368 break;
369 default:
370 return;
371 }
372
373 spin_lock_irqsave(&adapter->tmreg_lock, flags);
374
375 ns = timecounter_cyc2time(&adapter->tc, systim);
376
377 spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
378
379 memset(hwtstamps, 0, sizeof(*hwtstamps));
380 hwtstamps->hwtstamp = ns_to_ktime(ns);
381}
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 8be1d1b2132e..0708d7eb4668 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
34 34
35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ 35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ 36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
37 ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o 37 ixgbe_mbx.o ixgbe_x540.o ixgbe_sysfs.o ixgbe_lib.o
38 38
39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ 39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o 40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 81b155589532..425f86432f90 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -331,6 +331,26 @@ struct ixgbe_q_vector {
331 /* for dynamic allocation of rings associated with this q_vector */ 331 /* for dynamic allocation of rings associated with this q_vector */
332 struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; 332 struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
333}; 333};
334#ifdef CONFIG_IXGBE_HWMON
335
336#define IXGBE_HWMON_TYPE_LOC 0
337#define IXGBE_HWMON_TYPE_TEMP 1
338#define IXGBE_HWMON_TYPE_CAUTION 2
339#define IXGBE_HWMON_TYPE_MAX 3
340
341struct hwmon_attr {
342 struct device_attribute dev_attr;
343 struct ixgbe_hw *hw;
344 struct ixgbe_thermal_diode_data *sensor;
345 char name[12];
346};
347
348struct hwmon_buff {
349 struct device *device;
350 struct hwmon_attr *hwmon_list;
351 unsigned int n_hwmon;
352};
353#endif /* CONFIG_IXGBE_HWMON */
334 354
335/* 355/*
336 * microsecond values for various ITR rates shifted by 2 to fit itr register 356 * microsecond values for various ITR rates shifted by 2 to fit itr register
@@ -535,6 +555,10 @@ struct ixgbe_adapter {
535 555
536 u32 timer_event_accumulator; 556 u32 timer_event_accumulator;
537 u32 vferr_refcount; 557 u32 vferr_refcount;
558 struct kobject *info_kobj;
559#ifdef CONFIG_IXGBE_HWMON
560 struct hwmon_buff ixgbe_hwmon_buff;
561#endif /* CONFIG_IXGBE_HWMON */
538}; 562};
539 563
540struct ixgbe_fdir_filter { 564struct ixgbe_fdir_filter {
@@ -597,6 +621,8 @@ extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
597 struct ixgbe_ring *); 621 struct ixgbe_ring *);
598extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); 622extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
599extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); 623extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
624extern int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
625 u16 subdevice_id);
600extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); 626extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
601extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, 627extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
602 struct ixgbe_adapter *, 628 struct ixgbe_adapter *,
@@ -630,6 +656,8 @@ extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
630#endif 656#endif
631extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); 657extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
632extern void ixgbe_do_reset(struct net_device *netdev); 658extern void ixgbe_do_reset(struct net_device *netdev);
659extern void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
660extern int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
633#ifdef IXGBE_FCOE 661#ifdef IXGBE_FCOE
634extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); 662extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
635extern int ixgbe_fso(struct ixgbe_ring *tx_ring, 663extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 85d2e2c4ce4a..42537336110c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -91,29 +91,6 @@ out:
91 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); 91 IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr);
92} 92}
93 93
94/**
95 * ixgbe_get_pcie_msix_count_82598 - Gets MSI-X vector count
96 * @hw: pointer to hardware structure
97 *
98 * Read PCIe configuration space, and get the MSI-X vector count from
99 * the capabilities table.
100 **/
101static u16 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw)
102{
103 struct ixgbe_adapter *adapter = hw->back;
104 u16 msix_count;
105 pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82598_CAPS,
106 &msix_count);
107 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
108
109 /* MSI-X count is zero-based in HW, so increment to give proper value */
110 msix_count++;
111
112 return msix_count;
113}
114
115/**
116 */
117static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) 94static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
118{ 95{
119 struct ixgbe_mac_info *mac = &hw->mac; 96 struct ixgbe_mac_info *mac = &hw->mac;
@@ -126,7 +103,7 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw)
126 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; 103 mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES;
127 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; 104 mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES;
128 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; 105 mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES;
129 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82598(hw); 106 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
130 107
131 return 0; 108 return 0;
132} 109}
@@ -347,24 +324,33 @@ out:
347/** 324/**
348 * ixgbe_fc_enable_82598 - Enable flow control 325 * ixgbe_fc_enable_82598 - Enable flow control
349 * @hw: pointer to hardware structure 326 * @hw: pointer to hardware structure
350 * @packetbuf_num: packet buffer number (0-7)
351 * 327 *
352 * Enable flow control according to the current settings. 328 * Enable flow control according to the current settings.
353 **/ 329 **/
354static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num) 330static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
355{ 331{
356 s32 ret_val = 0; 332 s32 ret_val = 0;
357 u32 fctrl_reg; 333 u32 fctrl_reg;
358 u32 rmcs_reg; 334 u32 rmcs_reg;
359 u32 reg; 335 u32 reg;
336 u32 fcrtl, fcrth;
360 u32 link_speed = 0; 337 u32 link_speed = 0;
338 int i;
361 bool link_up; 339 bool link_up;
362 340
363#ifdef CONFIG_DCB 341 /*
364 if (hw->fc.requested_mode == ixgbe_fc_pfc) 342 * Validate the water mark configuration for packet buffer 0. Zero
343 * water marks indicate that the packet buffer was not configured
344 * and the watermarks for packet buffer 0 should always be configured.
345 */
346 if (!hw->fc.low_water ||
347 !hw->fc.high_water[0] ||
348 !hw->fc.pause_time) {
349 hw_dbg(hw, "Invalid water mark configuration\n");
350 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
365 goto out; 351 goto out;
352 }
366 353
367#endif /* CONFIG_DCB */
368 /* 354 /*
369 * On 82598 having Rx FC on causes resets while doing 1G 355 * On 82598 having Rx FC on causes resets while doing 1G
370 * so if it's on turn it off once we know link_speed. For 356 * so if it's on turn it off once we know link_speed. For
@@ -386,9 +372,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
386 } 372 }
387 373
388 /* Negotiate the fc mode to use */ 374 /* Negotiate the fc mode to use */
389 ret_val = ixgbe_fc_autoneg(hw); 375 ixgbe_fc_autoneg(hw);
390 if (ret_val == IXGBE_ERR_FLOW_CONTROL)
391 goto out;
392 376
393 /* Disable any previous flow control settings */ 377 /* Disable any previous flow control settings */
394 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); 378 fctrl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL);
@@ -405,9 +389,6 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
405 * 2: Tx flow control is enabled (we can send pause frames but 389 * 2: Tx flow control is enabled (we can send pause frames but
406 * we do not support receiving pause frames). 390 * we do not support receiving pause frames).
407 * 3: Both Rx and Tx flow control (symmetric) are enabled. 391 * 3: Both Rx and Tx flow control (symmetric) are enabled.
408#ifdef CONFIG_DCB
409 * 4: Priority Flow Control is enabled.
410#endif
411 * other: Invalid. 392 * other: Invalid.
412 */ 393 */
413 switch (hw->fc.current_mode) { 394 switch (hw->fc.current_mode) {
@@ -440,11 +421,6 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
440 fctrl_reg |= IXGBE_FCTRL_RFCE; 421 fctrl_reg |= IXGBE_FCTRL_RFCE;
441 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; 422 rmcs_reg |= IXGBE_RMCS_TFCE_802_3X;
442 break; 423 break;
443#ifdef CONFIG_DCB
444 case ixgbe_fc_pfc:
445 goto out;
446 break;
447#endif /* CONFIG_DCB */
448 default: 424 default:
449 hw_dbg(hw, "Flow control param set incorrectly\n"); 425 hw_dbg(hw, "Flow control param set incorrectly\n");
450 ret_val = IXGBE_ERR_CONFIG; 426 ret_val = IXGBE_ERR_CONFIG;
@@ -457,29 +433,29 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
457 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg); 433 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
458 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); 434 IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
459 435
460 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 436 fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
461 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
462 reg = hw->fc.low_water << 6;
463 if (hw->fc.send_xon)
464 reg |= IXGBE_FCRTL_XONE;
465
466 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
467 437
468 reg = hw->fc.high_water[packetbuf_num] << 6; 438 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
469 reg |= IXGBE_FCRTH_FCEN; 439 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
440 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
441 hw->fc.high_water[i]) {
442 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
443 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
444 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
445 } else {
446 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
447 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
448 }
470 449
471 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
472 } 450 }
473 451
474 /* Configure pause time (2 TCs per register) */ 452 /* Configure pause time (2 TCs per register) */
475 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); 453 reg = hw->fc.pause_time * 0x00010001;
476 if ((packetbuf_num & 1) == 0) 454 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
477 reg = (reg & 0xFFFF0000) | hw->fc.pause_time; 455 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
478 else
479 reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
480 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
481 456
482 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); 457 /* Configure flow control refresh threshold value */
458 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
483 459
484out: 460out:
485 return ret_val; 461 return ret_val;
@@ -1300,6 +1276,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
1300 .set_fw_drv_ver = NULL, 1276 .set_fw_drv_ver = NULL,
1301 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, 1277 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
1302 .release_swfw_sync = &ixgbe_release_swfw_sync, 1278 .release_swfw_sync = &ixgbe_release_swfw_sync,
1279 .get_thermal_sensor_data = NULL,
1280 .init_thermal_sensor_thresh = NULL,
1303}; 1281};
1304 1282
1305static struct ixgbe_eeprom_operations eeprom_ops_82598 = { 1283static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 9c14685358eb..dee64d2703f0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -2119,6 +2119,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2119 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, 2119 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
2120 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, 2120 .acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
2121 .release_swfw_sync = &ixgbe_release_swfw_sync, 2121 .release_swfw_sync = &ixgbe_release_swfw_sync,
2122 .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic,
2123 .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
2122 2124
2123}; 2125};
2124 2126
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 49aa41fe7b84..c7e51b85b8b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -47,13 +47,6 @@ static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
47static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 47static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
48 48
49static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 49static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
50static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw);
51static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw);
52static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw);
53static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
54static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
55 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm);
56static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
57static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); 50static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
58static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, 51static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
59 u16 words, u16 *data); 52 u16 words, u16 *data);
@@ -64,6 +57,172 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
64static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); 57static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
65 58
66/** 59/**
60 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
61 * control
62 * @hw: pointer to hardware structure
63 *
64 * There are several phys that do not support autoneg flow control. This
65 * function check the device id to see if the associated phy supports
66 * autoneg flow control.
67 **/
68static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
69{
70
71 switch (hw->device_id) {
72 case IXGBE_DEV_ID_X540T:
73 return 0;
74 case IXGBE_DEV_ID_82599_T3_LOM:
75 return 0;
76 default:
77 return IXGBE_ERR_FC_NOT_SUPPORTED;
78 }
79}
80
81/**
82 * ixgbe_setup_fc - Set up flow control
83 * @hw: pointer to hardware structure
84 *
85 * Called at init time to set up flow control.
86 **/
87static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
88{
89 s32 ret_val = 0;
90 u32 reg = 0, reg_bp = 0;
91 u16 reg_cu = 0;
92
93 /*
94 * Validate the requested mode. Strict IEEE mode does not allow
95 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
96 */
97 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
98 hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
99 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
100 goto out;
101 }
102
103 /*
104 * 10gig parts do not have a word in the EEPROM to determine the
105 * default flow control setting, so we explicitly set it to full.
106 */
107 if (hw->fc.requested_mode == ixgbe_fc_default)
108 hw->fc.requested_mode = ixgbe_fc_full;
109
110 /*
111 * Set up the 1G and 10G flow control advertisement registers so the
112 * HW will be able to do fc autoneg once the cable is plugged in. If
113 * we link at 10G, the 1G advertisement is harmless and vice versa.
114 */
115 switch (hw->phy.media_type) {
116 case ixgbe_media_type_fiber:
117 case ixgbe_media_type_backplane:
118 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
119 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
120 break;
121 case ixgbe_media_type_copper:
122 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
123 MDIO_MMD_AN, &reg_cu);
124 break;
125 default:
126 break;
127 }
128
129 /*
130 * The possible values of fc.requested_mode are:
131 * 0: Flow control is completely disabled
132 * 1: Rx flow control is enabled (we can receive pause frames,
133 * but not send pause frames).
134 * 2: Tx flow control is enabled (we can send pause frames but
135 * we do not support receiving pause frames).
136 * 3: Both Rx and Tx flow control (symmetric) are enabled.
137 * other: Invalid.
138 */
139 switch (hw->fc.requested_mode) {
140 case ixgbe_fc_none:
141 /* Flow control completely disabled by software override. */
142 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
143 if (hw->phy.media_type == ixgbe_media_type_backplane)
144 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
145 IXGBE_AUTOC_ASM_PAUSE);
146 else if (hw->phy.media_type == ixgbe_media_type_copper)
147 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
148 break;
149 case ixgbe_fc_tx_pause:
150 /*
151 * Tx Flow control is enabled, and Rx Flow control is
152 * disabled by software override.
153 */
154 reg |= IXGBE_PCS1GANA_ASM_PAUSE;
155 reg &= ~IXGBE_PCS1GANA_SYM_PAUSE;
156 if (hw->phy.media_type == ixgbe_media_type_backplane) {
157 reg_bp |= IXGBE_AUTOC_ASM_PAUSE;
158 reg_bp &= ~IXGBE_AUTOC_SYM_PAUSE;
159 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
160 reg_cu |= IXGBE_TAF_ASM_PAUSE;
161 reg_cu &= ~IXGBE_TAF_SYM_PAUSE;
162 }
163 break;
164 case ixgbe_fc_rx_pause:
165 /*
166 * Rx Flow control is enabled and Tx Flow control is
167 * disabled by software override. Since there really
168 * isn't a way to advertise that we are capable of RX
169 * Pause ONLY, we will advertise that we support both
170 * symmetric and asymmetric Rx PAUSE, as such we fall
171 * through to the fc_full statement. Later, we will
172 * disable the adapter's ability to send PAUSE frames.
173 */
174 case ixgbe_fc_full:
175 /* Flow control (both Rx and Tx) is enabled by SW override. */
176 reg |= IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE;
177 if (hw->phy.media_type == ixgbe_media_type_backplane)
178 reg_bp |= IXGBE_AUTOC_SYM_PAUSE |
179 IXGBE_AUTOC_ASM_PAUSE;
180 else if (hw->phy.media_type == ixgbe_media_type_copper)
181 reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE;
182 break;
183 default:
184 hw_dbg(hw, "Flow control param set incorrectly\n");
185 ret_val = IXGBE_ERR_CONFIG;
186 goto out;
187 break;
188 }
189
190 if (hw->mac.type != ixgbe_mac_X540) {
191 /*
192 * Enable auto-negotiation between the MAC & PHY;
193 * the MAC will advertise clause 37 flow control.
194 */
195 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
196 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
197
198 /* Disable AN timeout */
199 if (hw->fc.strict_ieee)
200 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
201
202 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
203 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
204 }
205
206 /*
207 * AUTOC restart handles negotiation of 1G and 10G on backplane
208 * and copper. There is no need to set the PCS1GCTL register.
209 *
210 */
211 if (hw->phy.media_type == ixgbe_media_type_backplane) {
212 reg_bp |= IXGBE_AUTOC_AN_RESTART;
213 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
214 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
215 (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
216 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
217 MDIO_MMD_AN, reg_cu);
218 }
219
220 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
221out:
222 return ret_val;
223}
224
225/**
67 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx 226 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
68 * @hw: pointer to hardware structure 227 * @hw: pointer to hardware structure
69 * 228 *
@@ -95,7 +254,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
95 IXGBE_WRITE_FLUSH(hw); 254 IXGBE_WRITE_FLUSH(hw);
96 255
97 /* Setup flow control */ 256 /* Setup flow control */
98 ixgbe_setup_fc(hw, 0); 257 ixgbe_setup_fc(hw);
99 258
100 /* Clear adapter stopped flag */ 259 /* Clear adapter stopped flag */
101 hw->adapter_stopped = false; 260 hw->adapter_stopped = false;
@@ -1923,30 +2082,36 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
1923/** 2082/**
1924 * ixgbe_fc_enable_generic - Enable flow control 2083 * ixgbe_fc_enable_generic - Enable flow control
1925 * @hw: pointer to hardware structure 2084 * @hw: pointer to hardware structure
1926 * @packetbuf_num: packet buffer number (0-7)
1927 * 2085 *
1928 * Enable flow control according to the current settings. 2086 * Enable flow control according to the current settings.
1929 **/ 2087 **/
1930s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num) 2088s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
1931{ 2089{
1932 s32 ret_val = 0; 2090 s32 ret_val = 0;
1933 u32 mflcn_reg, fccfg_reg; 2091 u32 mflcn_reg, fccfg_reg;
1934 u32 reg; 2092 u32 reg;
1935 u32 fcrtl, fcrth; 2093 u32 fcrtl, fcrth;
2094 int i;
1936 2095
1937#ifdef CONFIG_DCB 2096 /*
1938 if (hw->fc.requested_mode == ixgbe_fc_pfc) 2097 * Validate the water mark configuration for packet buffer 0. Zero
2098 * water marks indicate that the packet buffer was not configured
2099 * and the watermarks for packet buffer 0 should always be configured.
2100 */
2101 if (!hw->fc.low_water ||
2102 !hw->fc.high_water[0] ||
2103 !hw->fc.pause_time) {
2104 hw_dbg(hw, "Invalid water mark configuration\n");
2105 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
1939 goto out; 2106 goto out;
2107 }
1940 2108
1941#endif /* CONFIG_DCB */
1942 /* Negotiate the fc mode to use */ 2109 /* Negotiate the fc mode to use */
1943 ret_val = ixgbe_fc_autoneg(hw); 2110 ixgbe_fc_autoneg(hw);
1944 if (ret_val == IXGBE_ERR_FLOW_CONTROL)
1945 goto out;
1946 2111
1947 /* Disable any previous flow control settings */ 2112 /* Disable any previous flow control settings */
1948 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); 2113 mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
1949 mflcn_reg &= ~(IXGBE_MFLCN_RFCE | IXGBE_MFLCN_RPFCE); 2114 mflcn_reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
1950 2115
1951 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG); 2116 fccfg_reg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
1952 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY); 2117 fccfg_reg &= ~(IXGBE_FCCFG_TFCE_802_3X | IXGBE_FCCFG_TFCE_PRIORITY);
@@ -1959,9 +2124,6 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1959 * 2: Tx flow control is enabled (we can send pause frames but 2124 * 2: Tx flow control is enabled (we can send pause frames but
1960 * we do not support receiving pause frames). 2125 * we do not support receiving pause frames).
1961 * 3: Both Rx and Tx flow control (symmetric) are enabled. 2126 * 3: Both Rx and Tx flow control (symmetric) are enabled.
1962#ifdef CONFIG_DCB
1963 * 4: Priority Flow Control is enabled.
1964#endif
1965 * other: Invalid. 2127 * other: Invalid.
1966 */ 2128 */
1967 switch (hw->fc.current_mode) { 2129 switch (hw->fc.current_mode) {
@@ -1994,11 +2156,6 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1994 mflcn_reg |= IXGBE_MFLCN_RFCE; 2156 mflcn_reg |= IXGBE_MFLCN_RFCE;
1995 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; 2157 fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X;
1996 break; 2158 break;
1997#ifdef CONFIG_DCB
1998 case ixgbe_fc_pfc:
1999 goto out;
2000 break;
2001#endif /* CONFIG_DCB */
2002 default: 2159 default:
2003 hw_dbg(hw, "Flow control param set incorrectly\n"); 2160 hw_dbg(hw, "Flow control param set incorrectly\n");
2004 ret_val = IXGBE_ERR_CONFIG; 2161 ret_val = IXGBE_ERR_CONFIG;
@@ -2011,100 +2168,86 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
2011 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 2168 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
2012 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 2169 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
2013 2170
2014 fcrtl = hw->fc.low_water << 10; 2171 fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
2015 2172
2016 if (hw->fc.current_mode & ixgbe_fc_tx_pause) { 2173 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2017 fcrth = hw->fc.high_water[packetbuf_num] << 10; 2174 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
2018 fcrth |= IXGBE_FCRTH_FCEN; 2175 if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
2019 if (hw->fc.send_xon) 2176 hw->fc.high_water[i]) {
2020 fcrtl |= IXGBE_FCRTL_XONE; 2177 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
2021 } else { 2178 fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
2022 /* 2179 } else {
2023 * If Tx flow control is disabled, set our high water mark 2180 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
2024 * to Rx FIFO size minus 32 in order prevent Tx switch 2181 /*
2025 * loopback from stalling on DMA. 2182 * In order to prevent Tx hangs when the internal Tx
2026 */ 2183 * switch is enabled we must set the high water mark
2027 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num)) - 32; 2184 * to the maximum FCRTH value. This allows the Tx
2028 } 2185 * switch to function even under heavy Rx workloads.
2186 */
2187 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
2188 }
2029 2189
2030 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth); 2190 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
2031 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl); 2191 }
2032 2192
2033 /* Configure pause time (2 TCs per register) */ 2193 /* Configure pause time (2 TCs per register) */
2034 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); 2194 reg = hw->fc.pause_time * 0x00010001;
2035 if ((packetbuf_num & 1) == 0) 2195 for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++)
2036 reg = (reg & 0xFFFF0000) | hw->fc.pause_time; 2196 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
2037 else
2038 reg = (reg & 0x0000FFFF) | (hw->fc.pause_time << 16);
2039 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(packetbuf_num / 2), reg);
2040 2197
2041 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); 2198 IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
2042 2199
2043out: 2200out:
2044 return ret_val; 2201 return ret_val;
2045} 2202}
2046 2203
2047/** 2204/**
2048 * ixgbe_fc_autoneg - Configure flow control 2205 * ixgbe_negotiate_fc - Negotiate flow control
2049 * @hw: pointer to hardware structure 2206 * @hw: pointer to hardware structure
2207 * @adv_reg: flow control advertised settings
2208 * @lp_reg: link partner's flow control settings
2209 * @adv_sym: symmetric pause bit in advertisement
2210 * @adv_asm: asymmetric pause bit in advertisement
2211 * @lp_sym: symmetric pause bit in link partner advertisement
2212 * @lp_asm: asymmetric pause bit in link partner advertisement
2050 * 2213 *
2051 * Compares our advertised flow control capabilities to those advertised by 2214 * Find the intersection between advertised settings and link partner's
2052 * our link partner, and determines the proper flow control mode to use. 2215 * advertised settings
2053 **/ 2216 **/
2054s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw) 2217static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2218 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2055{ 2219{
2056 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; 2220 if ((!(adv_reg)) || (!(lp_reg)))
2057 ixgbe_link_speed speed; 2221 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2058 bool link_up;
2059
2060 if (hw->fc.disable_fc_autoneg)
2061 goto out;
2062
2063 /*
2064 * AN should have completed when the cable was plugged in.
2065 * Look for reasons to bail out. Bail out if:
2066 * - FC autoneg is disabled, or if
2067 * - link is not up.
2068 *
2069 * Since we're being called from an LSC, link is already known to be up.
2070 * So use link_up_wait_to_complete=false.
2071 */
2072 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2073 if (!link_up) {
2074 ret_val = IXGBE_ERR_FLOW_CONTROL;
2075 goto out;
2076 }
2077
2078 switch (hw->phy.media_type) {
2079 /* Autoneg flow control on fiber adapters */
2080 case ixgbe_media_type_fiber:
2081 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2082 ret_val = ixgbe_fc_autoneg_fiber(hw);
2083 break;
2084
2085 /* Autoneg flow control on backplane adapters */
2086 case ixgbe_media_type_backplane:
2087 ret_val = ixgbe_fc_autoneg_backplane(hw);
2088 break;
2089
2090 /* Autoneg flow control on copper adapters */
2091 case ixgbe_media_type_copper:
2092 if (ixgbe_device_supports_autoneg_fc(hw) == 0)
2093 ret_val = ixgbe_fc_autoneg_copper(hw);
2094 break;
2095
2096 default:
2097 break;
2098 }
2099 2222
2100out: 2223 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2101 if (ret_val == 0) { 2224 /*
2102 hw->fc.fc_was_autonegged = true; 2225 * Now we need to check if the user selected Rx ONLY
2226 * of pause frames. In this case, we had to advertise
2227 * FULL flow control because we could not advertise RX
2228 * ONLY. Hence, we must now check to see if we need to
2229 * turn OFF the TRANSMISSION of PAUSE frames.
2230 */
2231 if (hw->fc.requested_mode == ixgbe_fc_full) {
2232 hw->fc.current_mode = ixgbe_fc_full;
2233 hw_dbg(hw, "Flow Control = FULL.\n");
2234 } else {
2235 hw->fc.current_mode = ixgbe_fc_rx_pause;
2236 hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
2237 }
2238 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2239 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2240 hw->fc.current_mode = ixgbe_fc_tx_pause;
2241 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
2242 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2243 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2244 hw->fc.current_mode = ixgbe_fc_rx_pause;
2245 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
2103 } else { 2246 } else {
2104 hw->fc.fc_was_autonegged = false; 2247 hw->fc.current_mode = ixgbe_fc_none;
2105 hw->fc.current_mode = hw->fc.requested_mode; 2248 hw_dbg(hw, "Flow Control = NONE.\n");
2106 } 2249 }
2107 return ret_val; 2250 return 0;
2108} 2251}
2109 2252
2110/** 2253/**
@@ -2116,7 +2259,7 @@ out:
2116static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) 2259static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2117{ 2260{
2118 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; 2261 u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
2119 s32 ret_val; 2262 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2120 2263
2121 /* 2264 /*
2122 * On multispeed fiber at 1g, bail out if 2265 * On multispeed fiber at 1g, bail out if
@@ -2126,10 +2269,8 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
2126 2269
2127 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); 2270 linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
2128 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || 2271 if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
2129 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { 2272 (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
2130 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2131 goto out; 2273 goto out;
2132 }
2133 2274
2134 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 2275 pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
2135 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); 2276 pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
@@ -2153,7 +2294,7 @@ out:
2153static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) 2294static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2154{ 2295{
2155 u32 links2, anlp1_reg, autoc_reg, links; 2296 u32 links2, anlp1_reg, autoc_reg, links;
2156 s32 ret_val; 2297 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2157 2298
2158 /* 2299 /*
2159 * On backplane, bail out if 2300 * On backplane, bail out if
@@ -2161,21 +2302,13 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
2161 * - we are 82599 and link partner is not AN enabled 2302 * - we are 82599 and link partner is not AN enabled
2162 */ 2303 */
2163 links = IXGBE_READ_REG(hw, IXGBE_LINKS); 2304 links = IXGBE_READ_REG(hw, IXGBE_LINKS);
2164 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { 2305 if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
2165 hw->fc.fc_was_autonegged = false;
2166 hw->fc.current_mode = hw->fc.requested_mode;
2167 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2168 goto out; 2306 goto out;
2169 }
2170 2307
2171 if (hw->mac.type == ixgbe_mac_82599EB) { 2308 if (hw->mac.type == ixgbe_mac_82599EB) {
2172 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); 2309 links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
2173 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { 2310 if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
2174 hw->fc.fc_was_autonegged = false;
2175 hw->fc.current_mode = hw->fc.requested_mode;
2176 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2177 goto out; 2311 goto out;
2178 }
2179 } 2312 }
2180 /* 2313 /*
2181 * Read the 10g AN autoc and LP ability registers and resolve 2314 * Read the 10g AN autoc and LP ability registers and resolve
@@ -2217,241 +2350,63 @@ static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw)
2217} 2350}
2218 2351
2219/** 2352/**
2220 * ixgbe_negotiate_fc - Negotiate flow control 2353 * ixgbe_fc_autoneg - Configure flow control
2221 * @hw: pointer to hardware structure
2222 * @adv_reg: flow control advertised settings
2223 * @lp_reg: link partner's flow control settings
2224 * @adv_sym: symmetric pause bit in advertisement
2225 * @adv_asm: asymmetric pause bit in advertisement
2226 * @lp_sym: symmetric pause bit in link partner advertisement
2227 * @lp_asm: asymmetric pause bit in link partner advertisement
2228 *
2229 * Find the intersection between advertised settings and link partner's
2230 * advertised settings
2231 **/
2232static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
2233 u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm)
2234{
2235 if ((!(adv_reg)) || (!(lp_reg)))
2236 return IXGBE_ERR_FC_NOT_NEGOTIATED;
2237
2238 if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) {
2239 /*
2240 * Now we need to check if the user selected Rx ONLY
2241 * of pause frames. In this case, we had to advertise
2242 * FULL flow control because we could not advertise RX
2243 * ONLY. Hence, we must now check to see if we need to
2244 * turn OFF the TRANSMISSION of PAUSE frames.
2245 */
2246 if (hw->fc.requested_mode == ixgbe_fc_full) {
2247 hw->fc.current_mode = ixgbe_fc_full;
2248 hw_dbg(hw, "Flow Control = FULL.\n");
2249 } else {
2250 hw->fc.current_mode = ixgbe_fc_rx_pause;
2251 hw_dbg(hw, "Flow Control=RX PAUSE frames only\n");
2252 }
2253 } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2254 (lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2255 hw->fc.current_mode = ixgbe_fc_tx_pause;
2256 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n");
2257 } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) &&
2258 !(lp_reg & lp_sym) && (lp_reg & lp_asm)) {
2259 hw->fc.current_mode = ixgbe_fc_rx_pause;
2260 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n");
2261 } else {
2262 hw->fc.current_mode = ixgbe_fc_none;
2263 hw_dbg(hw, "Flow Control = NONE.\n");
2264 }
2265 return 0;
2266}
2267
2268/**
2269 * ixgbe_setup_fc - Set up flow control
2270 * @hw: pointer to hardware structure 2354 * @hw: pointer to hardware structure
2271 * 2355 *
2272 * Called at init time to set up flow control. 2356 * Compares our advertised flow control capabilities to those advertised by
2357 * our link partner, and determines the proper flow control mode to use.
2273 **/ 2358 **/
2274static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) 2359void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
2275{ 2360{
2276 s32 ret_val = 0; 2361 s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
2277 u32 reg = 0, reg_bp = 0; 2362 ixgbe_link_speed speed;
2278 u16 reg_cu = 0; 2363 bool link_up;
2279
2280#ifdef CONFIG_DCB
2281 if (hw->fc.requested_mode == ixgbe_fc_pfc) {
2282 hw->fc.current_mode = hw->fc.requested_mode;
2283 goto out;
2284 }
2285
2286#endif /* CONFIG_DCB */
2287 /* Validate the packetbuf configuration */
2288 if (packetbuf_num < 0 || packetbuf_num > 7) {
2289 hw_dbg(hw, "Invalid packet buffer number [%d], expected range "
2290 "is 0-7\n", packetbuf_num);
2291 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2292 goto out;
2293 }
2294 2364
2295 /* 2365 /*
2296 * Validate the water mark configuration. Zero water marks are invalid 2366 * AN should have completed when the cable was plugged in.
2297 * because it causes the controller to just blast out fc packets. 2367 * Look for reasons to bail out. Bail out if:
2368 * - FC autoneg is disabled, or if
2369 * - link is not up.
2370 *
2371 * Since we're being called from an LSC, link is already known to be up.
2372 * So use link_up_wait_to_complete=false.
2298 */ 2373 */
2299 if (!hw->fc.low_water || 2374 if (hw->fc.disable_fc_autoneg)
2300 !hw->fc.high_water[packetbuf_num] ||
2301 !hw->fc.pause_time) {
2302 hw_dbg(hw, "Invalid water mark configuration\n");
2303 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2304 goto out; 2375 goto out;
2305 }
2306 2376
2307 /* 2377 hw->mac.ops.check_link(hw, &speed, &link_up, false);
2308 * Validate the requested mode. Strict IEEE mode does not allow 2378 if (!link_up)
2309 * ixgbe_fc_rx_pause because it will cause us to fail at UNH.
2310 */
2311 if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
2312 hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict "
2313 "IEEE mode\n");
2314 ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
2315 goto out; 2379 goto out;
2316 }
2317
2318 /*
2319 * 10gig parts do not have a word in the EEPROM to determine the
2320 * default flow control setting, so we explicitly set it to full.
2321 */
2322 if (hw->fc.requested_mode == ixgbe_fc_default)
2323 hw->fc.requested_mode = ixgbe_fc_full;
2324
2325 /*
2326 * Set up the 1G and 10G flow control advertisement registers so the
2327 * HW will be able to do fc autoneg once the cable is plugged in. If
2328 * we link at 10G, the 1G advertisement is harmless and vice versa.
2329 */
2330 2380
2331 switch (hw->phy.media_type) { 2381 switch (hw->phy.media_type) {
2382 /* Autoneg flow control on fiber adapters */
2332 case ixgbe_media_type_fiber: 2383 case ixgbe_media_type_fiber:
2384 if (speed == IXGBE_LINK_SPEED_1GB_FULL)
2385 ret_val = ixgbe_fc_autoneg_fiber(hw);
2386 break;
2387
2388 /* Autoneg flow control on backplane adapters */
2333 case ixgbe_media_type_backplane: 2389 case ixgbe_media_type_backplane:
2334 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); 2390 ret_val = ixgbe_fc_autoneg_backplane(hw);
2335 reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
2336 break; 2391 break;
2337 2392
2393 /* Autoneg flow control on copper adapters */
2338 case ixgbe_media_type_copper: 2394 case ixgbe_media_type_copper:
2339 hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, 2395 if (ixgbe_device_supports_autoneg_fc(hw) == 0)
2340 MDIO_MMD_AN, &reg_cu); 2396 ret_val = ixgbe_fc_autoneg_copper(hw);
2341 break; 2397 break;
2342 2398
2343 default: 2399 default:
2344 ;
2345 }
2346
2347 /*
2348 * The possible values of fc.requested_mode are:
2349 * 0: Flow control is completely disabled
2350 * 1: Rx flow control is enabled (we can receive pause frames,
2351 * but not send pause frames).
2352 * 2: Tx flow control is enabled (we can send pause frames but
2353 * we do not support receiving pause frames).
2354 * 3: Both Rx and Tx flow control (symmetric) are enabled.
2355#ifdef CONFIG_DCB
2356 * 4: Priority Flow Control is enabled.
2357#endif
2358 * other: Invalid.
2359 */
2360 switch (hw->fc.requested_mode) {
2361 case ixgbe_fc_none:
2362 /* Flow control completely disabled by software override. */
2363 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2364 if (hw->phy.media_type == ixgbe_media_type_backplane)
2365 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE |
2366 IXGBE_AUTOC_ASM_PAUSE);
2367 else if (hw->phy.media_type == ixgbe_media_type_copper)
2368 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2369 break;
2370 case ixgbe_fc_rx_pause:
2371 /*
2372 * Rx Flow control is enabled and Tx Flow control is
2373 * disabled by software override. Since there really
2374 * isn't a way to advertise that we are capable of RX
2375 * Pause ONLY, we will advertise that we support both
2376 * symmetric and asymmetric Rx PAUSE. Later, we will
2377 * disable the adapter's ability to send PAUSE frames.
2378 */
2379 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2380 if (hw->phy.media_type == ixgbe_media_type_backplane)
2381 reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
2382 IXGBE_AUTOC_ASM_PAUSE);
2383 else if (hw->phy.media_type == ixgbe_media_type_copper)
2384 reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2385 break;
2386 case ixgbe_fc_tx_pause:
2387 /*
2388 * Tx Flow control is enabled, and Rx Flow control is
2389 * disabled by software override.
2390 */
2391 reg |= (IXGBE_PCS1GANA_ASM_PAUSE);
2392 reg &= ~(IXGBE_PCS1GANA_SYM_PAUSE);
2393 if (hw->phy.media_type == ixgbe_media_type_backplane) {
2394 reg_bp |= (IXGBE_AUTOC_ASM_PAUSE);
2395 reg_bp &= ~(IXGBE_AUTOC_SYM_PAUSE);
2396 } else if (hw->phy.media_type == ixgbe_media_type_copper) {
2397 reg_cu |= (IXGBE_TAF_ASM_PAUSE);
2398 reg_cu &= ~(IXGBE_TAF_SYM_PAUSE);
2399 }
2400 break; 2400 break;
2401 case ixgbe_fc_full:
2402 /* Flow control (both Rx and Tx) is enabled by SW override. */
2403 reg |= (IXGBE_PCS1GANA_SYM_PAUSE | IXGBE_PCS1GANA_ASM_PAUSE);
2404 if (hw->phy.media_type == ixgbe_media_type_backplane)
2405 reg_bp |= (IXGBE_AUTOC_SYM_PAUSE |
2406 IXGBE_AUTOC_ASM_PAUSE);
2407 else if (hw->phy.media_type == ixgbe_media_type_copper)
2408 reg_cu |= (IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE);
2409 break;
2410#ifdef CONFIG_DCB
2411 case ixgbe_fc_pfc:
2412 goto out;
2413 break;
2414#endif /* CONFIG_DCB */
2415 default:
2416 hw_dbg(hw, "Flow control param set incorrectly\n");
2417 ret_val = IXGBE_ERR_CONFIG;
2418 goto out;
2419 break;
2420 }
2421
2422 if (hw->mac.type != ixgbe_mac_X540) {
2423 /*
2424 * Enable auto-negotiation between the MAC & PHY;
2425 * the MAC will advertise clause 37 flow control.
2426 */
2427 IXGBE_WRITE_REG(hw, IXGBE_PCS1GANA, reg);
2428 reg = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
2429
2430 /* Disable AN timeout */
2431 if (hw->fc.strict_ieee)
2432 reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN;
2433
2434 IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg);
2435 hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg);
2436 }
2437
2438 /*
2439 * AUTOC restart handles negotiation of 1G and 10G on backplane
2440 * and copper. There is no need to set the PCS1GCTL register.
2441 *
2442 */
2443 if (hw->phy.media_type == ixgbe_media_type_backplane) {
2444 reg_bp |= IXGBE_AUTOC_AN_RESTART;
2445 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
2446 } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
2447 (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
2448 hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
2449 MDIO_MMD_AN, reg_cu);
2450 } 2401 }
2451 2402
2452 hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
2453out: 2403out:
2454 return ret_val; 2404 if (ret_val == 0) {
2405 hw->fc.fc_was_autonegged = true;
2406 } else {
2407 hw->fc.fc_was_autonegged = false;
2408 hw->fc.current_mode = hw->fc.requested_mode;
2409 }
2455} 2410}
2456 2411
2457/** 2412/**
@@ -2783,17 +2738,36 @@ san_mac_addr_out:
2783 * Read PCIe configuration space, and get the MSI-X vector count from 2738 * Read PCIe configuration space, and get the MSI-X vector count from
2784 * the capabilities table. 2739 * the capabilities table.
2785 **/ 2740 **/
2786u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) 2741u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2787{ 2742{
2788 struct ixgbe_adapter *adapter = hw->back; 2743 struct ixgbe_adapter *adapter = hw->back;
2789 u16 msix_count; 2744 u16 msix_count = 1;
2790 pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82599_CAPS, 2745 u16 max_msix_count;
2791 &msix_count); 2746 u16 pcie_offset;
2747
2748 switch (hw->mac.type) {
2749 case ixgbe_mac_82598EB:
2750 pcie_offset = IXGBE_PCIE_MSIX_82598_CAPS;
2751 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82598;
2752 break;
2753 case ixgbe_mac_82599EB:
2754 case ixgbe_mac_X540:
2755 pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
2756 max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
2757 break;
2758 default:
2759 return msix_count;
2760 }
2761
2762 pci_read_config_word(adapter->pdev, pcie_offset, &msix_count);
2792 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; 2763 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
2793 2764
2794 /* MSI-X count is zero-based in HW, so increment to give proper value */ 2765 /* MSI-X count is zero-based in HW */
2795 msix_count++; 2766 msix_count++;
2796 2767
2768 if (msix_count > max_msix_count)
2769 msix_count = max_msix_count;
2770
2797 return msix_count; 2771 return msix_count;
2798} 2772}
2799 2773
@@ -3203,28 +3177,6 @@ wwn_prefix_out:
3203} 3177}
3204 3178
3205/** 3179/**
3206 * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow
3207 * control
3208 * @hw: pointer to hardware structure
3209 *
3210 * There are several phys that do not support autoneg flow control. This
3211 * function check the device id to see if the associated phy supports
3212 * autoneg flow control.
3213 **/
3214static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
3215{
3216
3217 switch (hw->device_id) {
3218 case IXGBE_DEV_ID_X540T:
3219 return 0;
3220 case IXGBE_DEV_ID_82599_T3_LOM:
3221 return 0;
3222 default:
3223 return IXGBE_ERR_FC_NOT_SUPPORTED;
3224 }
3225}
3226
3227/**
3228 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing 3180 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
3229 * @hw: pointer to hardware structure 3181 * @hw: pointer to hardware structure
3230 * @enable: enable or disable switch for anti-spoofing 3182 * @enable: enable or disable switch for anti-spoofing
@@ -3585,3 +3537,172 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw)
3585 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 3537 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
3586 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); 3538 IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
3587} 3539}
3540
3541static const u8 ixgbe_emc_temp_data[4] = {
3542 IXGBE_EMC_INTERNAL_DATA,
3543 IXGBE_EMC_DIODE1_DATA,
3544 IXGBE_EMC_DIODE2_DATA,
3545 IXGBE_EMC_DIODE3_DATA
3546};
3547static const u8 ixgbe_emc_therm_limit[4] = {
3548 IXGBE_EMC_INTERNAL_THERM_LIMIT,
3549 IXGBE_EMC_DIODE1_THERM_LIMIT,
3550 IXGBE_EMC_DIODE2_THERM_LIMIT,
3551 IXGBE_EMC_DIODE3_THERM_LIMIT
3552};
3553
3554/**
3555 * ixgbe_get_ets_data - Extracts the ETS bit data
3556 * @hw: pointer to hardware structure
3557 * @ets_cfg: extected ETS data
3558 * @ets_offset: offset of ETS data
3559 *
3560 * Returns error code.
3561 **/
3562static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
3563 u16 *ets_offset)
3564{
3565 s32 status = 0;
3566
3567 status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset);
3568 if (status)
3569 goto out;
3570
3571 if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF)) {
3572 status = IXGBE_NOT_IMPLEMENTED;
3573 goto out;
3574 }
3575
3576 status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg);
3577 if (status)
3578 goto out;
3579
3580 if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED) {
3581 status = IXGBE_NOT_IMPLEMENTED;
3582 goto out;
3583 }
3584
3585out:
3586 return status;
3587}
3588
3589/**
3590 * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
3591 * @hw: pointer to hardware structure
3592 *
3593 * Returns the thermal sensor data structure
3594 **/
3595s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
3596{
3597 s32 status = 0;
3598 u16 ets_offset;
3599 u16 ets_cfg;
3600 u16 ets_sensor;
3601 u8 num_sensors;
3602 u8 i;
3603 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
3604
3605 /* Only support thermal sensors attached to physical port 0 */
3606 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
3607 status = IXGBE_NOT_IMPLEMENTED;
3608 goto out;
3609 }
3610
3611 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
3612 if (status)
3613 goto out;
3614
3615 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
3616 if (num_sensors > IXGBE_MAX_SENSORS)
3617 num_sensors = IXGBE_MAX_SENSORS;
3618
3619 for (i = 0; i < num_sensors; i++) {
3620 u8 sensor_index;
3621 u8 sensor_location;
3622
3623 status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
3624 &ets_sensor);
3625 if (status)
3626 goto out;
3627
3628 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
3629 IXGBE_ETS_DATA_INDEX_SHIFT);
3630 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
3631 IXGBE_ETS_DATA_LOC_SHIFT);
3632
3633 if (sensor_location != 0) {
3634 status = hw->phy.ops.read_i2c_byte(hw,
3635 ixgbe_emc_temp_data[sensor_index],
3636 IXGBE_I2C_THERMAL_SENSOR_ADDR,
3637 &data->sensor[i].temp);
3638 if (status)
3639 goto out;
3640 }
3641 }
3642out:
3643 return status;
3644}
3645
3646/**
3647 * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds
3648 * @hw: pointer to hardware structure
3649 *
3650 * Inits the thermal sensor thresholds according to the NVM map
3651 * and save off the threshold and location values into mac.thermal_sensor_data
3652 **/
3653s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
3654{
3655 s32 status = 0;
3656 u16 ets_offset;
3657 u16 ets_cfg;
3658 u16 ets_sensor;
3659 u8 low_thresh_delta;
3660 u8 num_sensors;
3661 u8 therm_limit;
3662 u8 i;
3663 struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
3664
3665 memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
3666
3667 /* Only support thermal sensors attached to physical port 0 */
3668 if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
3669 status = IXGBE_NOT_IMPLEMENTED;
3670 goto out;
3671 }
3672
3673 status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
3674 if (status)
3675 goto out;
3676
3677 low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
3678 IXGBE_ETS_LTHRES_DELTA_SHIFT);
3679 num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
3680 if (num_sensors > IXGBE_MAX_SENSORS)
3681 num_sensors = IXGBE_MAX_SENSORS;
3682
3683 for (i = 0; i < num_sensors; i++) {
3684 u8 sensor_index;
3685 u8 sensor_location;
3686
3687 hw->eeprom.ops.read(hw, (ets_offset + 1 + i), &ets_sensor);
3688 sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
3689 IXGBE_ETS_DATA_INDEX_SHIFT);
3690 sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
3691 IXGBE_ETS_DATA_LOC_SHIFT);
3692 therm_limit = ets_sensor & IXGBE_ETS_DATA_HTHRESH_MASK;
3693
3694 hw->phy.ops.write_i2c_byte(hw,
3695 ixgbe_emc_therm_limit[sensor_index],
3696 IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit);
3697
3698 if (sensor_location == 0)
3699 continue;
3700
3701 data->sensor[i].location = sensor_location;
3702 data->sensor[i].caution_thresh = therm_limit;
3703 data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta;
3704 }
3705out:
3706 return status;
3707}
3708
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 204f06235b45..6222fdb3d3f1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -31,7 +31,7 @@
31#include "ixgbe_type.h" 31#include "ixgbe_type.h"
32#include "ixgbe.h" 32#include "ixgbe.h"
33 33
34u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); 34u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
35s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); 35s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
36s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); 36s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
37s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); 37s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
@@ -77,8 +77,8 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
77s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw); 77s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
78s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw); 78s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
79s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); 79s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
80s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num); 80s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
81s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw); 81void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
82 82
83s32 ixgbe_validate_mac_addr(u8 *mac_addr); 83s32 ixgbe_validate_mac_addr(u8 *mac_addr);
84s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); 84s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
@@ -107,6 +107,19 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
107void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, 107void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
108 u32 headroom, int strategy); 108 u32 headroom, int strategy);
109 109
110#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
111#define IXGBE_EMC_INTERNAL_DATA 0x00
112#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20
113#define IXGBE_EMC_DIODE1_DATA 0x01
114#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19
115#define IXGBE_EMC_DIODE2_DATA 0x23
116#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A
117#define IXGBE_EMC_DIODE3_DATA 0x2A
118#define IXGBE_EMC_DIODE3_THERM_LIMIT 0x30
119
120s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
121s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
122
110#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) 123#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
111 124
112#ifndef writeq 125#ifndef writeq
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 888a419dc3d9..65913c5a616e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -278,18 +278,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
278 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); 278 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
279 279
280 } else { 280 } else {
281 /* X540 devices have a RX bit that should be cleared 281 hw->mac.ops.fc_enable(hw);
282 * if PFC is disabled on all TCs but PFC features is
283 * enabled.
284 */
285 if (hw->mac.type == ixgbe_mac_X540) {
286 reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
287 reg &= ~IXGBE_MFLCN_RPFCE_MASK;
288 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
289 }
290
291 for (i = 0; i < MAX_TRAFFIC_CLASS; i++)
292 hw->mac.ops.fc_enable(hw, i);
293 } 282 }
294 283
295 return 0; 284 return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 32e5c02ff6d0..a09d6b4f0ab0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -659,6 +659,13 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
659 return -ENOMEM; 659 return -ENOMEM;
660 } 660 }
661 661
662 if (pfc->pfc_en) {
663 adapter->last_lfc_mode = adapter->hw.fc.current_mode;
664 adapter->hw.fc.current_mode = ixgbe_fc_pfc;
665 } else {
666 adapter->hw.fc.current_mode = adapter->last_lfc_mode;
667 }
668
662 prio_tc = adapter->ixgbe_ieee_ets->prio_tc; 669 prio_tc = adapter->ixgbe_ieee_ets->prio_tc;
663 memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc)); 670 memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc));
664 return ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en, prio_tc); 671 return ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en, prio_tc);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index cfe7d269590c..cca3e9c4a08a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1971,53 +1971,12 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
1971 struct ethtool_wolinfo *wol) 1971 struct ethtool_wolinfo *wol)
1972{ 1972{
1973 struct ixgbe_hw *hw = &adapter->hw; 1973 struct ixgbe_hw *hw = &adapter->hw;
1974 int retval = 1; 1974 int retval = 0;
1975 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
1976
1977 /* WOL not supported except for the following */
1978 switch(hw->device_id) {
1979 case IXGBE_DEV_ID_82599_SFP:
1980 /* Only these subdevices could supports WOL */
1981 switch (hw->subsystem_device_id) {
1982 case IXGBE_SUBDEV_ID_82599_560FLR:
1983 /* only support first port */
1984 if (hw->bus.func != 0) {
1985 wol->supported = 0;
1986 break;
1987 }
1988 case IXGBE_SUBDEV_ID_82599_SFP:
1989 retval = 0;
1990 break;
1991 default:
1992 wol->supported = 0;
1993 break;
1994 }
1995 break;
1996 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
1997 /* All except this subdevice support WOL */
1998 if (hw->subsystem_device_id ==
1999 IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
2000 wol->supported = 0;
2001 break;
2002 }
2003 retval = 0;
2004 break;
2005 case IXGBE_DEV_ID_82599_KX4:
2006 retval = 0;
2007 break;
2008 case IXGBE_DEV_ID_X540T:
2009 /* check eeprom to see if enabled wol */
2010 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
2011 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
2012 (hw->bus.func == 0))) {
2013 retval = 0;
2014 break;
2015 }
2016 1975
2017 /* All others not supported */ 1976 /* WOL not supported for all devices */
2018 wol->supported = 0; 1977 if (!ixgbe_wol_supported(adapter, hw->device_id,
2019 break; 1978 hw->subsystem_device_id)) {
2020 default: 1979 retval = 1;
2021 wol->supported = 0; 1980 wol->supported = 0;
2022 } 1981 }
2023 1982
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index ed1b47dc0834..af1a5314b494 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -523,11 +523,17 @@ static void ixgbe_add_ring(struct ixgbe_ring *ring,
523/** 523/**
524 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector 524 * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
525 * @adapter: board private structure to initialize 525 * @adapter: board private structure to initialize
526 * @v_count: q_vectors allocated on adapter, used for ring interleaving
526 * @v_idx: index of vector in adapter struct 527 * @v_idx: index of vector in adapter struct
528 * @txr_count: total number of Tx rings to allocate
529 * @txr_idx: index of first Tx ring to allocate
530 * @rxr_count: total number of Rx rings to allocate
531 * @rxr_idx: index of first Rx ring to allocate
527 * 532 *
528 * We allocate one q_vector. If allocation fails we return -ENOMEM. 533 * We allocate one q_vector. If allocation fails we return -ENOMEM.
529 **/ 534 **/
530static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx, 535static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
536 int v_count, int v_idx,
531 int txr_count, int txr_idx, 537 int txr_count, int txr_idx,
532 int rxr_count, int rxr_idx) 538 int rxr_count, int rxr_idx)
533{ 539{
@@ -598,7 +604,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx,
598 604
599 /* update count and index */ 605 /* update count and index */
600 txr_count--; 606 txr_count--;
601 txr_idx++; 607 txr_idx += v_count;
602 608
603 /* push pointer to next ring */ 609 /* push pointer to next ring */
604 ring++; 610 ring++;
@@ -641,7 +647,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx,
641 647
642 /* update count and index */ 648 /* update count and index */
643 rxr_count--; 649 rxr_count--;
644 rxr_idx++; 650 rxr_idx += v_count;
645 651
646 /* push pointer to next ring */ 652 /* push pointer to next ring */
647 ring++; 653 ring++;
@@ -700,24 +706,23 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
700 q_vectors = 1; 706 q_vectors = 1;
701 707
702 if (q_vectors >= (rxr_remaining + txr_remaining)) { 708 if (q_vectors >= (rxr_remaining + txr_remaining)) {
703 for (; rxr_remaining; v_idx++, q_vectors--) { 709 for (; rxr_remaining; v_idx++) {
704 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); 710 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
705 err = ixgbe_alloc_q_vector(adapter, v_idx, 711 0, 0, 1, rxr_idx);
706 0, 0, rqpv, rxr_idx);
707 712
708 if (err) 713 if (err)
709 goto err_out; 714 goto err_out;
710 715
711 /* update counts and index */ 716 /* update counts and index */
712 rxr_remaining -= rqpv; 717 rxr_remaining--;
713 rxr_idx += rqpv; 718 rxr_idx++;
714 } 719 }
715 } 720 }
716 721
717 for (; q_vectors; v_idx++, q_vectors--) { 722 for (; v_idx < q_vectors; v_idx++) {
718 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors); 723 int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
719 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors); 724 int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
720 err = ixgbe_alloc_q_vector(adapter, v_idx, 725 err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
721 tqpv, txr_idx, 726 tqpv, txr_idx,
722 rqpv, rxr_idx); 727 rqpv, rxr_idx);
723 728
@@ -726,9 +731,9 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
726 731
727 /* update counts and index */ 732 /* update counts and index */
728 rxr_remaining -= rqpv; 733 rxr_remaining -= rqpv;
729 rxr_idx += rqpv;
730 txr_remaining -= tqpv; 734 txr_remaining -= tqpv;
731 txr_idx += tqpv; 735 rxr_idx++;
736 txr_idx++;
732 } 737 }
733 738
734 return 0; 739 return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 467948e9ecd9..ea3cb710c2dd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -133,7 +133,7 @@ static struct notifier_block dca_notifier = {
133static unsigned int max_vfs; 133static unsigned int max_vfs;
134module_param(max_vfs, uint, 0); 134module_param(max_vfs, uint, 0);
135MODULE_PARM_DESC(max_vfs, 135MODULE_PARM_DESC(max_vfs,
136 "Maximum number of virtual functions to allocate per physical function"); 136 "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63");
137#endif /* CONFIG_PCI_IOV */ 137#endif /* CONFIG_PCI_IOV */
138 138
139static unsigned int allow_unsupported_sfp; 139static unsigned int allow_unsupported_sfp;
@@ -637,7 +637,11 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
637 clear_bit(__IXGBE_HANG_CHECK_ARMED, 637 clear_bit(__IXGBE_HANG_CHECK_ARMED,
638 &adapter->tx_ring[i]->state); 638 &adapter->tx_ring[i]->state);
639 return; 639 return;
640 } else if (!(adapter->dcb_cfg.pfc_mode_enable)) 640 } else if (((adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) &&
641 !(adapter->dcb_cfg.pfc_mode_enable)) ||
642 ((adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) &&
643 adapter->ixgbe_ieee_pfc &&
644 !(adapter->ixgbe_ieee_pfc->pfc_en)))
641 return; 645 return;
642 646
643 /* update stats for each tc, only valid with PFC enabled */ 647 /* update stats for each tc, only valid with PFC enabled */
@@ -1144,7 +1148,7 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring,
1144 * there isn't much point in holding memory we can't use 1148 * there isn't much point in holding memory we can't use
1145 */ 1149 */
1146 if (dma_mapping_error(rx_ring->dev, dma)) { 1150 if (dma_mapping_error(rx_ring->dev, dma)) {
1147 put_page(page); 1151 __free_pages(page, ixgbe_rx_pg_order(rx_ring));
1148 bi->page = NULL; 1152 bi->page = NULL;
1149 1153
1150 rx_ring->rx_stats.alloc_rx_page_failed++; 1154 rx_ring->rx_stats.alloc_rx_page_failed++;
@@ -2902,33 +2906,6 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2902 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); 2906 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
2903} 2907}
2904 2908
2905/**
2906 * ixgbe_set_uta - Set unicast filter table address
2907 * @adapter: board private structure
2908 *
2909 * The unicast table address is a register array of 32-bit registers.
2910 * The table is meant to be used in a way similar to how the MTA is used
2911 * however due to certain limitations in the hardware it is necessary to
2912 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
2913 * enable bit to allow vlan tag stripping when promiscuous mode is enabled
2914 **/
2915static void ixgbe_set_uta(struct ixgbe_adapter *adapter)
2916{
2917 struct ixgbe_hw *hw = &adapter->hw;
2918 int i;
2919
2920 /* The UTA table only exists on 82599 hardware and newer */
2921 if (hw->mac.type < ixgbe_mac_82599EB)
2922 return;
2923
2924 /* we only need to do this if VMDq is enabled */
2925 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
2926 return;
2927
2928 for (i = 0; i < 128; i++)
2929 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
2930}
2931
2932#define IXGBE_MAX_RX_DESC_POLL 10 2909#define IXGBE_MAX_RX_DESC_POLL 10
2933static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, 2910static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2934 struct ixgbe_ring *ring) 2911 struct ixgbe_ring *ring)
@@ -3214,8 +3191,6 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
3214 /* Program registers for the distribution of queues */ 3191 /* Program registers for the distribution of queues */
3215 ixgbe_setup_mrqc(adapter); 3192 ixgbe_setup_mrqc(adapter);
3216 3193
3217 ixgbe_set_uta(adapter);
3218
3219 /* set_rx_buffer_len must be called before ring initialization */ 3194 /* set_rx_buffer_len must be called before ring initialization */
3220 ixgbe_set_rx_buffer_len(adapter); 3195 ixgbe_set_rx_buffer_len(adapter);
3221 3196
@@ -3452,16 +3427,17 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3452 } 3427 }
3453 ixgbe_vlan_filter_enable(adapter); 3428 ixgbe_vlan_filter_enable(adapter);
3454 hw->addr_ctrl.user_set_promisc = false; 3429 hw->addr_ctrl.user_set_promisc = false;
3455 /* 3430 }
3456 * Write addresses to available RAR registers, if there is not 3431
3457 * sufficient space to store all the addresses then enable 3432 /*
3458 * unicast promiscuous mode 3433 * Write addresses to available RAR registers, if there is not
3459 */ 3434 * sufficient space to store all the addresses then enable
3460 count = ixgbe_write_uc_addr_list(netdev); 3435 * unicast promiscuous mode
3461 if (count < 0) { 3436 */
3462 fctrl |= IXGBE_FCTRL_UPE; 3437 count = ixgbe_write_uc_addr_list(netdev);
3463 vmolr |= IXGBE_VMOLR_ROPE; 3438 if (count < 0) {
3464 } 3439 fctrl |= IXGBE_FCTRL_UPE;
3440 vmolr |= IXGBE_VMOLR_ROPE;
3465 } 3441 }
3466 3442
3467 if (adapter->num_vfs) { 3443 if (adapter->num_vfs) {
@@ -4128,7 +4104,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
4128 DMA_FROM_DEVICE); 4104 DMA_FROM_DEVICE);
4129 rx_buffer->dma = 0; 4105 rx_buffer->dma = 0;
4130 if (rx_buffer->page) 4106 if (rx_buffer->page)
4131 put_page(rx_buffer->page); 4107 __free_pages(rx_buffer->page,
4108 ixgbe_rx_pg_order(rx_ring));
4132 rx_buffer->page = NULL; 4109 rx_buffer->page = NULL;
4133 } 4110 }
4134 4111
@@ -4993,9 +4970,6 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
4993 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 4970 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
4994 u64 rsc_count = 0; 4971 u64 rsc_count = 0;
4995 u64 rsc_flush = 0; 4972 u64 rsc_flush = 0;
4996 for (i = 0; i < 16; i++)
4997 adapter->hw_rx_no_dma_resources +=
4998 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
4999 for (i = 0; i < adapter->num_rx_queues; i++) { 4973 for (i = 0; i < adapter->num_rx_queues; i++) {
5000 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count; 4974 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
5001 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush; 4975 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
@@ -5098,6 +5072,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5098 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); 5072 hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC);
5099 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); 5073 hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC);
5100 case ixgbe_mac_82599EB: 5074 case ixgbe_mac_82599EB:
5075 for (i = 0; i < 16; i++)
5076 adapter->hw_rx_no_dma_resources +=
5077 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5101 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 5078 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
5102 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */ 5079 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
5103 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 5080 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
@@ -5275,7 +5252,7 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
5275 struct ixgbe_hw *hw = &adapter->hw; 5252 struct ixgbe_hw *hw = &adapter->hw;
5276 u32 link_speed = adapter->link_speed; 5253 u32 link_speed = adapter->link_speed;
5277 bool link_up = adapter->link_up; 5254 bool link_up = adapter->link_up;
5278 int i; 5255 bool pfc_en = adapter->dcb_cfg.pfc_mode_enable;
5279 5256
5280 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) 5257 if (!(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
5281 return; 5258 return;
@@ -5287,14 +5264,12 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
5287 link_speed = IXGBE_LINK_SPEED_10GB_FULL; 5264 link_speed = IXGBE_LINK_SPEED_10GB_FULL;
5288 link_up = true; 5265 link_up = true;
5289 } 5266 }
5290 if (link_up) { 5267
5291 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 5268 if (adapter->ixgbe_ieee_pfc)
5292 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) 5269 pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
5293 hw->mac.ops.fc_enable(hw, i); 5270
5294 } else { 5271 if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en))
5295 hw->mac.ops.fc_enable(hw, 0); 5272 hw->mac.ops.fc_enable(hw);
5296 }
5297 }
5298 5273
5299 if (link_up || 5274 if (link_up ||
5300 time_after(jiffies, (adapter->link_check_timeout + 5275 time_after(jiffies, (adapter->link_check_timeout +
@@ -6624,7 +6599,7 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
6624 /* Turn off LRO if not RSC capable */ 6599 /* Turn off LRO if not RSC capable */
6625 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) 6600 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
6626 features &= ~NETIF_F_LRO; 6601 features &= ~NETIF_F_LRO;
6627 6602
6628 6603
6629 return features; 6604 return features;
6630} 6605}
@@ -6683,6 +6658,74 @@ static int ixgbe_set_features(struct net_device *netdev,
6683 return 0; 6658 return 0;
6684} 6659}
6685 6660
6661static int ixgbe_ndo_fdb_add(struct ndmsg *ndm,
6662 struct net_device *dev,
6663 unsigned char *addr,
6664 u16 flags)
6665{
6666 struct ixgbe_adapter *adapter = netdev_priv(dev);
6667 int err = -EOPNOTSUPP;
6668
6669 if (ndm->ndm_state & NUD_PERMANENT) {
6670 pr_info("%s: FDB only supports static addresses\n",
6671 ixgbe_driver_name);
6672 return -EINVAL;
6673 }
6674
6675 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
6676 if (is_unicast_ether_addr(addr))
6677 err = dev_uc_add_excl(dev, addr);
6678 else if (is_multicast_ether_addr(addr))
6679 err = dev_mc_add_excl(dev, addr);
6680 else
6681 err = -EINVAL;
6682 }
6683
6684 /* Only return duplicate errors if NLM_F_EXCL is set */
6685 if (err == -EEXIST && !(flags & NLM_F_EXCL))
6686 err = 0;
6687
6688 return err;
6689}
6690
6691static int ixgbe_ndo_fdb_del(struct ndmsg *ndm,
6692 struct net_device *dev,
6693 unsigned char *addr)
6694{
6695 struct ixgbe_adapter *adapter = netdev_priv(dev);
6696 int err = -EOPNOTSUPP;
6697
6698 if (ndm->ndm_state & NUD_PERMANENT) {
6699 pr_info("%s: FDB only supports static addresses\n",
6700 ixgbe_driver_name);
6701 return -EINVAL;
6702 }
6703
6704 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
6705 if (is_unicast_ether_addr(addr))
6706 err = dev_uc_del(dev, addr);
6707 else if (is_multicast_ether_addr(addr))
6708 err = dev_mc_del(dev, addr);
6709 else
6710 err = -EINVAL;
6711 }
6712
6713 return err;
6714}
6715
6716static int ixgbe_ndo_fdb_dump(struct sk_buff *skb,
6717 struct netlink_callback *cb,
6718 struct net_device *dev,
6719 int idx)
6720{
6721 struct ixgbe_adapter *adapter = netdev_priv(dev);
6722
6723 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
6724 idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
6725
6726 return idx;
6727}
6728
6686static const struct net_device_ops ixgbe_netdev_ops = { 6729static const struct net_device_ops ixgbe_netdev_ops = {
6687 .ndo_open = ixgbe_open, 6730 .ndo_open = ixgbe_open,
6688 .ndo_stop = ixgbe_close, 6731 .ndo_stop = ixgbe_close,
@@ -6719,6 +6762,9 @@ static const struct net_device_ops ixgbe_netdev_ops = {
6719#endif /* IXGBE_FCOE */ 6762#endif /* IXGBE_FCOE */
6720 .ndo_set_features = ixgbe_set_features, 6763 .ndo_set_features = ixgbe_set_features,
6721 .ndo_fix_features = ixgbe_fix_features, 6764 .ndo_fix_features = ixgbe_fix_features,
6765 .ndo_fdb_add = ixgbe_ndo_fdb_add,
6766 .ndo_fdb_del = ixgbe_ndo_fdb_del,
6767 .ndo_fdb_dump = ixgbe_ndo_fdb_dump,
6722}; 6768};
6723 6769
6724static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, 6770static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
@@ -6733,14 +6779,66 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
6733 /* The 82599 supports up to 64 VFs per physical function 6779 /* The 82599 supports up to 64 VFs per physical function
6734 * but this implementation limits allocation to 63 so that 6780 * but this implementation limits allocation to 63 so that
6735 * basic networking resources are still available to the 6781 * basic networking resources are still available to the
6736 * physical function 6782 * physical function. If the user requests greater thn
6783 * 63 VFs then it is an error - reset to default of zero.
6737 */ 6784 */
6738 adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs; 6785 adapter->num_vfs = (max_vfs > 63) ? 0 : max_vfs;
6739 ixgbe_enable_sriov(adapter, ii); 6786 ixgbe_enable_sriov(adapter, ii);
6740#endif /* CONFIG_PCI_IOV */ 6787#endif /* CONFIG_PCI_IOV */
6741} 6788}
6742 6789
6743/** 6790/**
6791 * ixgbe_wol_supported - Check whether device supports WoL
6792 * @hw: hw specific details
6793 * @device_id: the device ID
6794 * @subdev_id: the subsystem device ID
6795 *
6796 * This function is used by probe and ethtool to determine
6797 * which devices have WoL support
6798 *
6799 **/
6800int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
6801 u16 subdevice_id)
6802{
6803 struct ixgbe_hw *hw = &adapter->hw;
6804 u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
6805 int is_wol_supported = 0;
6806
6807 switch (device_id) {
6808 case IXGBE_DEV_ID_82599_SFP:
6809 /* Only these subdevices could supports WOL */
6810 switch (subdevice_id) {
6811 case IXGBE_SUBDEV_ID_82599_560FLR:
6812 /* only support first port */
6813 if (hw->bus.func != 0)
6814 break;
6815 case IXGBE_SUBDEV_ID_82599_SFP:
6816 is_wol_supported = 1;
6817 break;
6818 }
6819 break;
6820 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
6821 /* All except this subdevice support WOL */
6822 if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
6823 is_wol_supported = 1;
6824 break;
6825 case IXGBE_DEV_ID_82599_KX4:
6826 is_wol_supported = 1;
6827 break;
6828 case IXGBE_DEV_ID_X540T:
6829 /* check eeprom to see if enabled wol */
6830 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
6831 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
6832 (hw->bus.func == 0))) {
6833 is_wol_supported = 1;
6834 }
6835 break;
6836 }
6837
6838 return is_wol_supported;
6839}
6840
6841/**
6744 * ixgbe_probe - Device Initialization Routine 6842 * ixgbe_probe - Device Initialization Routine
6745 * @pdev: PCI device information struct 6843 * @pdev: PCI device information struct
6746 * @ent: entry in ixgbe_pci_tbl 6844 * @ent: entry in ixgbe_pci_tbl
@@ -6766,7 +6864,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6766 u16 device_caps; 6864 u16 device_caps;
6767#endif 6865#endif
6768 u32 eec; 6866 u32 eec;
6769 u16 wol_cap;
6770 6867
6771 /* Catch broken hardware that put the wrong VF device ID in 6868 /* Catch broken hardware that put the wrong VF device ID in
6772 * the PCIe SR-IOV capability. 6869 * the PCIe SR-IOV capability.
@@ -7030,40 +7127,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7030 netdev->features &= ~NETIF_F_RXHASH; 7127 netdev->features &= ~NETIF_F_RXHASH;
7031 } 7128 }
7032 7129
7033 /* WOL not supported for all but the following */ 7130 /* WOL not supported for all devices */
7034 adapter->wol = 0; 7131 adapter->wol = 0;
7035 switch (pdev->device) { 7132 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
7036 case IXGBE_DEV_ID_82599_SFP: 7133 if (ixgbe_wol_supported(adapter, pdev->device, pdev->subsystem_device))
7037 /* Only these subdevice supports WOL */
7038 switch (pdev->subsystem_device) {
7039 case IXGBE_SUBDEV_ID_82599_560FLR:
7040 /* only support first port */
7041 if (hw->bus.func != 0)
7042 break;
7043 case IXGBE_SUBDEV_ID_82599_SFP:
7044 adapter->wol = IXGBE_WUFC_MAG;
7045 break;
7046 }
7047 break;
7048 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
7049 /* All except this subdevice support WOL */
7050 if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
7051 adapter->wol = IXGBE_WUFC_MAG;
7052 break;
7053 case IXGBE_DEV_ID_82599_KX4:
7054 adapter->wol = IXGBE_WUFC_MAG; 7134 adapter->wol = IXGBE_WUFC_MAG;
7055 break;
7056 case IXGBE_DEV_ID_X540T:
7057 /* Check eeprom to see if it is enabled */
7058 hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap);
7059 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
7060 7135
7061 if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
7062 ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
7063 (hw->bus.func == 0)))
7064 adapter->wol = IXGBE_WUFC_MAG;
7065 break;
7066 }
7067 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 7136 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
7068 7137
7069 /* save off EEPROM version number */ 7138 /* save off EEPROM version number */
@@ -7152,6 +7221,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
7152 7221
7153 e_dev_info("%s\n", ixgbe_default_device_descr); 7222 e_dev_info("%s\n", ixgbe_default_device_descr);
7154 cards_found++; 7223 cards_found++;
7224
7225 if (ixgbe_sysfs_init(adapter))
7226 e_err(probe, "failed to allocate sysfs resources\n");
7227
7155 return 0; 7228 return 0;
7156 7229
7157err_register: 7230err_register:
@@ -7198,6 +7271,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
7198 } 7271 }
7199 7272
7200#endif 7273#endif
7274 ixgbe_sysfs_exit(adapter);
7275
7201#ifdef IXGBE_FCOE 7276#ifdef IXGBE_FCOE
7202 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) 7277 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
7203 ixgbe_cleanup_fcoe(adapter); 7278 ixgbe_cleanup_fcoe(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index bf9f82f4b1ae..24117709d6a2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1582,13 +1582,21 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
1582 **/ 1582 **/
1583static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) 1583static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
1584{ 1584{
1585 *i2cctl |= IXGBE_I2C_CLK_OUT; 1585 u32 i = 0;
1586 1586 u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT;
1587 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); 1587 u32 i2cctl_r = 0;
1588 IXGBE_WRITE_FLUSH(hw);
1589 1588
1590 /* SCL rise time (1000ns) */ 1589 for (i = 0; i < timeout; i++) {
1591 udelay(IXGBE_I2C_T_RISE); 1590 *i2cctl |= IXGBE_I2C_CLK_OUT;
1591 IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl);
1592 IXGBE_WRITE_FLUSH(hw);
1593 /* SCL rise time (1000ns) */
1594 udelay(IXGBE_I2C_T_RISE);
1595
1596 i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
1597 if (i2cctl_r & IXGBE_I2C_CLK_IN)
1598 break;
1599 }
1592} 1600}
1593 1601
1594/** 1602/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 88a58cb08569..39856371acb1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -635,6 +635,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
635 } 635 }
636 break; 636 break;
637 case IXGBE_VF_SET_MACVLAN: 637 case IXGBE_VF_SET_MACVLAN:
638 if (adapter->vfinfo[vf].pf_set_mac) {
639 e_warn(drv, "VF %d requested MACVLAN filter but is "
640 "administratively denied\n", vf);
641 retval = -1;
642 break;
643 }
638 index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> 644 index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
639 IXGBE_VT_MSGINFO_SHIFT; 645 IXGBE_VT_MSGINFO_SHIFT;
640 /* 646 /*
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
new file mode 100644
index 000000000000..f81c166dc5a8
--- /dev/null
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c
@@ -0,0 +1,273 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include "ixgbe.h"
29#include "ixgbe_common.h"
30#include "ixgbe_type.h"
31
32#include <linux/module.h>
33#include <linux/types.h>
34#include <linux/sysfs.h>
35#include <linux/kobject.h>
36#include <linux/device.h>
37#include <linux/netdevice.h>
38#include <linux/hwmon.h>
39
40/*
41 * This file provides a sysfs interface to export information from the
42 * driver. The information presented is READ-ONLY.
43 */
44#ifdef CONFIG_IXGBE_HWMON
45
46/* hwmon callback functions */
47static ssize_t ixgbe_hwmon_show_location(struct device *dev,
48 struct device_attribute *attr,
49 char *buf)
50{
51 struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr,
52 dev_attr);
53 return sprintf(buf, "loc%u\n",
54 ixgbe_attr->sensor->location);
55}
56
57static ssize_t ixgbe_hwmon_show_temp(struct device *dev,
58 struct device_attribute *attr,
59 char *buf)
60{
61 struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr,
62 dev_attr);
63 unsigned int value;
64
65 /* reset the temp field */
66 ixgbe_attr->hw->mac.ops.get_thermal_sensor_data(ixgbe_attr->hw);
67
68 value = ixgbe_attr->sensor->temp;
69
70 /* display millidegree */
71 value *= 1000;
72
73 return sprintf(buf, "%u\n", value);
74}
75
76static ssize_t ixgbe_hwmon_show_cautionthresh(struct device *dev,
77 struct device_attribute *attr,
78 char *buf)
79{
80 struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr,
81 dev_attr);
82 unsigned int value = ixgbe_attr->sensor->caution_thresh;
83
84 /* display millidegree */
85 value *= 1000;
86
87 return sprintf(buf, "%u\n", value);
88}
89
90static ssize_t ixgbe_hwmon_show_maxopthresh(struct device *dev,
91 struct device_attribute *attr,
92 char *buf)
93{
94 struct hwmon_attr *ixgbe_attr = container_of(attr, struct hwmon_attr,
95 dev_attr);
96 unsigned int value = ixgbe_attr->sensor->max_op_thresh;
97
98 /* display millidegree */
99 value *= 1000;
100
101 return sprintf(buf, "%u\n", value);
102}
103
104/*
105 * ixgbe_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file.
106 * @ adapter: pointer to the adapter structure
107 * @ offset: offset in the eeprom sensor data table
108 * @ type: type of sensor data to display
109 *
110 * For each file we want in hwmon's sysfs interface we need a device_attribute
111 * This is included in our hwmon_attr struct that contains the references to
112 * the data structures we need to get the data to display.
113 */
114static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter,
115 unsigned int offset, int type) {
116 int rc;
117 unsigned int n_attr;
118 struct hwmon_attr *ixgbe_attr;
119
120 n_attr = adapter->ixgbe_hwmon_buff.n_hwmon;
121 ixgbe_attr = &adapter->ixgbe_hwmon_buff.hwmon_list[n_attr];
122
123 switch (type) {
124 case IXGBE_HWMON_TYPE_LOC:
125 ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_location;
126 snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
127 "temp%u_label", offset);
128 break;
129 case IXGBE_HWMON_TYPE_TEMP:
130 ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_temp;
131 snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
132 "temp%u_input", offset);
133 break;
134 case IXGBE_HWMON_TYPE_CAUTION:
135 ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_cautionthresh;
136 snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
137 "temp%u_max", offset);
138 break;
139 case IXGBE_HWMON_TYPE_MAX:
140 ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_maxopthresh;
141 snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name),
142 "temp%u_crit", offset);
143 break;
144 default:
145 rc = -EPERM;
146 return rc;
147 }
148
149 /* These always the same regardless of type */
150 ixgbe_attr->sensor =
151 &adapter->hw.mac.thermal_sensor_data.sensor[offset];
152 ixgbe_attr->hw = &adapter->hw;
153 ixgbe_attr->dev_attr.store = NULL;
154 ixgbe_attr->dev_attr.attr.mode = S_IRUGO;
155 ixgbe_attr->dev_attr.attr.name = ixgbe_attr->name;
156
157 rc = device_create_file(&adapter->pdev->dev,
158 &ixgbe_attr->dev_attr);
159
160 if (rc == 0)
161 ++adapter->ixgbe_hwmon_buff.n_hwmon;
162
163 return rc;
164}
165#endif /* CONFIG_IXGBE_HWMON */
166
167static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter *adapter)
168{
169#ifdef CONFIG_IXGBE_HWMON
170 int i;
171#endif /* CONFIG_IXGBE_HWMON */
172
173 if (adapter == NULL)
174 return;
175#ifdef CONFIG_IXGBE_HWMON
176
177 for (i = 0; i < adapter->ixgbe_hwmon_buff.n_hwmon; i++) {
178 device_remove_file(&adapter->pdev->dev,
179 &adapter->ixgbe_hwmon_buff.hwmon_list[i].dev_attr);
180 }
181
182 kfree(adapter->ixgbe_hwmon_buff.hwmon_list);
183
184 if (adapter->ixgbe_hwmon_buff.device)
185 hwmon_device_unregister(adapter->ixgbe_hwmon_buff.device);
186#endif /* CONFIG_IXGBE_HWMON */
187
188 if (adapter->info_kobj != NULL) {
189 kobject_put(adapter->info_kobj);
190 adapter->info_kobj = NULL;
191 }
192}
193
194/* called from ixgbe_main.c */
195void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter)
196{
197 ixgbe_sysfs_del_adapter(adapter);
198}
199
200/* called from ixgbe_main.c */
201int ixgbe_sysfs_init(struct ixgbe_adapter *adapter)
202{
203#ifdef CONFIG_IXGBE_HWMON
204 struct hwmon_buff *ixgbe_hwmon = &adapter->ixgbe_hwmon_buff;
205 unsigned int i;
206 int n_attrs;
207#endif /* CONFIG_IXGBE_HWMON */
208 struct net_device *netdev = adapter->netdev;
209 int rc = 0;
210
211 /* create info kobj and attribute listings in kobj */
212 adapter->info_kobj = kobject_create_and_add("info", &netdev->dev.kobj);
213 if (adapter->info_kobj == NULL) {
214 rc = -ENOMEM;
215 goto err;
216 }
217
218#ifdef CONFIG_IXGBE_HWMON
219 /* If this method isn't defined we don't support thermals */
220 if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) {
221 rc = -EPERM;
222 goto err;
223 }
224
225 /* Don't create thermal hwmon interface if no sensors present */
226 rc = adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw);
227 if (rc)
228 goto err;
229
230 /*
231 * Allocation space for max attributs
232 * max num sensors * values (loc, temp, max, caution)
233 */
234 n_attrs = IXGBE_MAX_SENSORS * 4;
235 ixgbe_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr),
236 GFP_KERNEL);
237 if (!ixgbe_hwmon->hwmon_list) {
238 rc = -ENOMEM;
239 goto err;
240 }
241
242 ixgbe_hwmon->device = hwmon_device_register(&adapter->pdev->dev);
243 if (IS_ERR(ixgbe_hwmon->device)) {
244 rc = PTR_ERR(ixgbe_hwmon->device);
245 goto err;
246 }
247
248 for (i = 0; i < IXGBE_MAX_SENSORS; i++) {
249 /*
250 * Only create hwmon sysfs entries for sensors that have
251 * meaningful data for.
252 */
253 if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0)
254 continue;
255
256 /* Bail if any hwmon attr struct fails to initialize */
257 rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_CAUTION);
258 rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC);
259 rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP);
260 rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX);
261 if (rc)
262 goto err;
263 }
264#endif /* CONFIG_IXGBE_HWMON */
265
266 goto exit;
267
268err:
269 ixgbe_sysfs_del_adapter(adapter);
270exit:
271 return rc;
272}
273
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 8636e8344fc9..5e64c77255e9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -110,6 +110,28 @@
110#define IXGBE_I2C_CLK_OUT 0x00000002 110#define IXGBE_I2C_CLK_OUT 0x00000002
111#define IXGBE_I2C_DATA_IN 0x00000004 111#define IXGBE_I2C_DATA_IN 0x00000004
112#define IXGBE_I2C_DATA_OUT 0x00000008 112#define IXGBE_I2C_DATA_OUT 0x00000008
113#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500
114
115#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
116#define IXGBE_EMC_INTERNAL_DATA 0x00
117#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20
118#define IXGBE_EMC_DIODE1_DATA 0x01
119#define IXGBE_EMC_DIODE1_THERM_LIMIT 0x19
120#define IXGBE_EMC_DIODE2_DATA 0x23
121#define IXGBE_EMC_DIODE2_THERM_LIMIT 0x1A
122
123#define IXGBE_MAX_SENSORS 3
124
125struct ixgbe_thermal_diode_data {
126 u8 location;
127 u8 temp;
128 u8 caution_thresh;
129 u8 max_op_thresh;
130};
131
132struct ixgbe_thermal_sensor_data {
133 struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS];
134};
113 135
114/* Interrupt Registers */ 136/* Interrupt Registers */
115#define IXGBE_EICR 0x00800 137#define IXGBE_EICR 0x00800
@@ -1677,11 +1699,29 @@ enum {
1677#define IXGBE_PBANUM0_PTR 0x15 1699#define IXGBE_PBANUM0_PTR 0x15
1678#define IXGBE_PBANUM1_PTR 0x16 1700#define IXGBE_PBANUM1_PTR 0x16
1679#define IXGBE_FREE_SPACE_PTR 0X3E 1701#define IXGBE_FREE_SPACE_PTR 0X3E
1702
1703/* External Thermal Sensor Config */
1704#define IXGBE_ETS_CFG 0x26
1705#define IXGBE_ETS_LTHRES_DELTA_MASK 0x07C0
1706#define IXGBE_ETS_LTHRES_DELTA_SHIFT 6
1707#define IXGBE_ETS_TYPE_MASK 0x0038
1708#define IXGBE_ETS_TYPE_SHIFT 3
1709#define IXGBE_ETS_TYPE_EMC 0x000
1710#define IXGBE_ETS_TYPE_EMC_SHIFTED 0x000
1711#define IXGBE_ETS_NUM_SENSORS_MASK 0x0007
1712#define IXGBE_ETS_DATA_LOC_MASK 0x3C00
1713#define IXGBE_ETS_DATA_LOC_SHIFT 10
1714#define IXGBE_ETS_DATA_INDEX_MASK 0x0300
1715#define IXGBE_ETS_DATA_INDEX_SHIFT 8
1716#define IXGBE_ETS_DATA_HTHRESH_MASK 0x00FF
1717
1680#define IXGBE_SAN_MAC_ADDR_PTR 0x28 1718#define IXGBE_SAN_MAC_ADDR_PTR 0x28
1681#define IXGBE_DEVICE_CAPS 0x2C 1719#define IXGBE_DEVICE_CAPS 0x2C
1682#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11 1720#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11
1683#define IXGBE_PCIE_MSIX_82599_CAPS 0x72 1721#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
1722#define IXGBE_MAX_MSIX_VECTORS_82599 0x40
1684#define IXGBE_PCIE_MSIX_82598_CAPS 0x62 1723#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
1724#define IXGBE_MAX_MSIX_VECTORS_82598 0x13
1685 1725
1686/* MSI-X capability fields masks */ 1726/* MSI-X capability fields masks */
1687#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF 1727#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF
@@ -1852,7 +1892,7 @@ enum {
1852#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */ 1892#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */
1853#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */ 1893#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */
1854#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */ 1894#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */
1855#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF0 /* Receive FC Mask */ 1895#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF4 /* Receive FC Mask */
1856 1896
1857#define IXGBE_MFLCN_RPFCE_SHIFT 4 1897#define IXGBE_MFLCN_RPFCE_SHIFT 4
1858 1898
@@ -2768,10 +2808,12 @@ struct ixgbe_mac_operations {
2768 void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int); 2808 void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
2769 2809
2770 /* Flow Control */ 2810 /* Flow Control */
2771 s32 (*fc_enable)(struct ixgbe_hw *, s32); 2811 s32 (*fc_enable)(struct ixgbe_hw *);
2772 2812
2773 /* Manageability interface */ 2813 /* Manageability interface */
2774 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); 2814 s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
2815 s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
2816 s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
2775}; 2817};
2776 2818
2777struct ixgbe_phy_operations { 2819struct ixgbe_phy_operations {
@@ -2813,6 +2855,7 @@ struct ixgbe_mac_info {
2813 u16 wwnn_prefix; 2855 u16 wwnn_prefix;
2814 /* prefix for World Wide Port Name (WWPN) */ 2856 /* prefix for World Wide Port Name (WWPN) */
2815 u16 wwpn_prefix; 2857 u16 wwpn_prefix;
2858 u16 max_msix_vectors;
2816#define IXGBE_MAX_MTA 128 2859#define IXGBE_MAX_MTA 128
2817 u32 mta_shadow[IXGBE_MAX_MTA]; 2860 u32 mta_shadow[IXGBE_MAX_MTA];
2818 s32 mc_filter_type; 2861 s32 mc_filter_type;
@@ -2823,12 +2866,12 @@ struct ixgbe_mac_info {
2823 u32 rx_pb_size; 2866 u32 rx_pb_size;
2824 u32 max_tx_queues; 2867 u32 max_tx_queues;
2825 u32 max_rx_queues; 2868 u32 max_rx_queues;
2826 u32 max_msix_vectors;
2827 u32 orig_autoc; 2869 u32 orig_autoc;
2828 u32 orig_autoc2; 2870 u32 orig_autoc2;
2829 bool orig_link_settings_stored; 2871 bool orig_link_settings_stored;
2830 bool autotry_restart; 2872 bool autotry_restart;
2831 u8 flags; 2873 u8 flags;
2874 struct ixgbe_thermal_sensor_data thermal_sensor_data;
2832}; 2875};
2833 2876
2834struct ixgbe_phy_info { 2877struct ixgbe_phy_info {
@@ -2938,7 +2981,6 @@ struct ixgbe_info {
2938#define IXGBE_ERR_OVERTEMP -26 2981#define IXGBE_ERR_OVERTEMP -26
2939#define IXGBE_ERR_FC_NOT_NEGOTIATED -27 2982#define IXGBE_ERR_FC_NOT_NEGOTIATED -27
2940#define IXGBE_ERR_FC_NOT_SUPPORTED -28 2983#define IXGBE_ERR_FC_NOT_SUPPORTED -28
2941#define IXGBE_ERR_FLOW_CONTROL -29
2942#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 2984#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
2943#define IXGBE_ERR_PBA_SECTION -31 2985#define IXGBE_ERR_PBA_SECTION -31
2944#define IXGBE_ERR_INVALID_ARGUMENT -32 2986#define IXGBE_ERR_INVALID_ARGUMENT -32
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 97a991403bbd..f90ec078ece2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -849,6 +849,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
849 .release_swfw_sync = &ixgbe_release_swfw_sync_X540, 849 .release_swfw_sync = &ixgbe_release_swfw_sync_X540,
850 .disable_rx_buff = &ixgbe_disable_rx_buff_generic, 850 .disable_rx_buff = &ixgbe_disable_rx_buff_generic,
851 .enable_rx_buff = &ixgbe_enable_rx_buff_generic, 851 .enable_rx_buff = &ixgbe_enable_rx_buff_generic,
852 .get_thermal_sensor_data = NULL,
853 .init_thermal_sensor_thresh = NULL,
852}; 854};
853 855
854static struct ixgbe_eeprom_operations eeprom_ops_X540 = { 856static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 947b5c830735..e09a6cc633bb 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -40,6 +40,7 @@
40typedef u32 ixgbe_link_speed; 40typedef u32 ixgbe_link_speed;
41#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 41#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
42#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 42#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
43#define IXGBE_LINK_SPEED_100_FULL 0x0008
43 44
44#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ 45#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
45#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ 46#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
@@ -48,6 +49,7 @@ typedef u32 ixgbe_link_speed;
48#define IXGBE_LINKS_SPEED_82599 0x30000000 49#define IXGBE_LINKS_SPEED_82599 0x30000000
49#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 50#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
50#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 51#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
52#define IXGBE_LINKS_SPEED_100_82599 0x10000000
51 53
52/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ 54/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
53#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 55#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 2bfe0d1d7958..e8dddf572d38 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -107,10 +107,20 @@ static int ixgbevf_get_settings(struct net_device *netdev,
107 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 107 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
108 108
109 if (link_up) { 109 if (link_up) {
110 ethtool_cmd_speed_set( 110 __u32 speed = SPEED_10000;
111 ecmd, 111 switch (link_speed) {
112 (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 112 case IXGBE_LINK_SPEED_10GB_FULL:
113 SPEED_10000 : SPEED_1000); 113 speed = SPEED_10000;
114 break;
115 case IXGBE_LINK_SPEED_1GB_FULL:
116 speed = SPEED_1000;
117 break;
118 case IXGBE_LINK_SPEED_100_FULL:
119 speed = SPEED_100;
120 break;
121 }
122
123 ethtool_cmd_speed_set(ecmd, speed);
114 ecmd->duplex = DUPLEX_FULL; 124 ecmd->duplex = DUPLEX_FULL;
115 } else { 125 } else {
116 ethtool_cmd_speed_set(ecmd, -1); 126 ethtool_cmd_speed_set(ecmd, -1);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index dfed420a1bf6..0a1b99240d43 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -287,7 +287,7 @@ extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
287extern const char ixgbevf_driver_name[]; 287extern const char ixgbevf_driver_name[];
288extern const char ixgbevf_driver_version[]; 288extern const char ixgbevf_driver_version[];
289 289
290extern int ixgbevf_up(struct ixgbevf_adapter *adapter); 290extern void ixgbevf_up(struct ixgbevf_adapter *adapter);
291extern void ixgbevf_down(struct ixgbevf_adapter *adapter); 291extern void ixgbevf_down(struct ixgbevf_adapter *adapter);
292extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter); 292extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
293extern void ixgbevf_reset(struct ixgbevf_adapter *adapter); 293extern void ixgbevf_reset(struct ixgbevf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 307611ae831d..f69ec4288b10 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -57,7 +57,7 @@ const char ixgbevf_driver_name[] = "ixgbevf";
57static const char ixgbevf_driver_string[] = 57static const char ixgbevf_driver_string[] =
58 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver"; 58 "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
59 59
60#define DRV_VERSION "2.2.0-k" 60#define DRV_VERSION "2.6.0-k"
61const char ixgbevf_driver_version[] = DRV_VERSION; 61const char ixgbevf_driver_version[] = DRV_VERSION;
62static char ixgbevf_copyright[] = 62static char ixgbevf_copyright[] =
63 "Copyright (c) 2009 - 2012 Intel Corporation."; 63 "Copyright (c) 2009 - 2012 Intel Corporation.";
@@ -1608,13 +1608,14 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
1608 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc; 1608 adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
1609} 1609}
1610 1610
1611static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter) 1611static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1612{ 1612{
1613 struct net_device *netdev = adapter->netdev; 1613 struct net_device *netdev = adapter->netdev;
1614 struct ixgbe_hw *hw = &adapter->hw; 1614 struct ixgbe_hw *hw = &adapter->hw;
1615 int i, j = 0; 1615 int i, j = 0;
1616 int num_rx_rings = adapter->num_rx_queues; 1616 int num_rx_rings = adapter->num_rx_queues;
1617 u32 txdctl, rxdctl; 1617 u32 txdctl, rxdctl;
1618 u32 msg[2];
1618 1619
1619 for (i = 0; i < adapter->num_tx_queues; i++) { 1620 for (i = 0; i < adapter->num_tx_queues; i++) {
1620 j = adapter->tx_ring[i].reg_idx; 1621 j = adapter->tx_ring[i].reg_idx;
@@ -1653,6 +1654,10 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1653 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0); 1654 hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
1654 } 1655 }
1655 1656
1657 msg[0] = IXGBE_VF_SET_LPE;
1658 msg[1] = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1659 hw->mbx.ops.write_posted(hw, msg, 2);
1660
1656 clear_bit(__IXGBEVF_DOWN, &adapter->state); 1661 clear_bit(__IXGBEVF_DOWN, &adapter->state);
1657 ixgbevf_napi_enable_all(adapter); 1662 ixgbevf_napi_enable_all(adapter);
1658 1663
@@ -1667,24 +1672,20 @@ static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
1667 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 1672 adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
1668 adapter->link_check_timeout = jiffies; 1673 adapter->link_check_timeout = jiffies;
1669 mod_timer(&adapter->watchdog_timer, jiffies); 1674 mod_timer(&adapter->watchdog_timer, jiffies);
1670 return 0;
1671} 1675}
1672 1676
1673int ixgbevf_up(struct ixgbevf_adapter *adapter) 1677void ixgbevf_up(struct ixgbevf_adapter *adapter)
1674{ 1678{
1675 int err;
1676 struct ixgbe_hw *hw = &adapter->hw; 1679 struct ixgbe_hw *hw = &adapter->hw;
1677 1680
1678 ixgbevf_configure(adapter); 1681 ixgbevf_configure(adapter);
1679 1682
1680 err = ixgbevf_up_complete(adapter); 1683 ixgbevf_up_complete(adapter);
1681 1684
1682 /* clear any pending interrupts, may auto mask */ 1685 /* clear any pending interrupts, may auto mask */
1683 IXGBE_READ_REG(hw, IXGBE_VTEICR); 1686 IXGBE_READ_REG(hw, IXGBE_VTEICR);
1684 1687
1685 ixgbevf_irq_enable(adapter, true, true); 1688 ixgbevf_irq_enable(adapter, true, true);
1686
1687 return err;
1688} 1689}
1689 1690
1690/** 1691/**
@@ -2673,9 +2674,7 @@ static int ixgbevf_open(struct net_device *netdev)
2673 */ 2674 */
2674 ixgbevf_map_rings_to_vectors(adapter); 2675 ixgbevf_map_rings_to_vectors(adapter);
2675 2676
2676 err = ixgbevf_up_complete(adapter); 2677 ixgbevf_up_complete(adapter);
2677 if (err)
2678 goto err_up;
2679 2678
2680 /* clear any pending interrupts, may auto mask */ 2679 /* clear any pending interrupts, may auto mask */
2681 IXGBE_READ_REG(hw, IXGBE_VTEICR); 2680 IXGBE_READ_REG(hw, IXGBE_VTEICR);
@@ -2689,7 +2688,6 @@ static int ixgbevf_open(struct net_device *netdev)
2689 2688
2690err_req_irq: 2689err_req_irq:
2691 ixgbevf_down(adapter); 2690 ixgbevf_down(adapter);
2692err_up:
2693 ixgbevf_free_irq(adapter); 2691 ixgbevf_free_irq(adapter);
2694err_setup_rx: 2692err_setup_rx:
2695 ixgbevf_free_all_rx_resources(adapter); 2693 ixgbevf_free_all_rx_resources(adapter);
@@ -3196,9 +3194,11 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
3196 /* must set new MTU before calling down or up */ 3194 /* must set new MTU before calling down or up */
3197 netdev->mtu = new_mtu; 3195 netdev->mtu = new_mtu;
3198 3196
3199 msg[0] = IXGBE_VF_SET_LPE; 3197 if (!netif_running(netdev)) {
3200 msg[1] = max_frame; 3198 msg[0] = IXGBE_VF_SET_LPE;
3201 hw->mbx.ops.write_posted(hw, msg, 2); 3199 msg[1] = max_frame;
3200 hw->mbx.ops.write_posted(hw, msg, 2);
3201 }
3202 3202
3203 if (netif_running(netdev)) 3203 if (netif_running(netdev))
3204 ixgbevf_reinit_locked(adapter); 3204 ixgbevf_reinit_locked(adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 74be7411242a..ec89b86f7ca4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -404,11 +404,17 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
404 else 404 else
405 *link_up = false; 405 *link_up = false;
406 406
407 if ((links_reg & IXGBE_LINKS_SPEED_82599) == 407 switch (links_reg & IXGBE_LINKS_SPEED_82599) {
408 IXGBE_LINKS_SPEED_10G_82599) 408 case IXGBE_LINKS_SPEED_10G_82599:
409 *speed = IXGBE_LINK_SPEED_10GB_FULL; 409 *speed = IXGBE_LINK_SPEED_10GB_FULL;
410 else 410 break;
411 case IXGBE_LINKS_SPEED_1G_82599:
411 *speed = IXGBE_LINK_SPEED_1GB_FULL; 412 *speed = IXGBE_LINK_SPEED_1GB_FULL;
413 break;
414 case IXGBE_LINKS_SPEED_100_82599:
415 *speed = IXGBE_LINK_SPEED_100_FULL;
416 break;
417 }
412 418
413 return 0; 419 return 0;
414} 420}
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 5e1ca0f05090..c8950da60e6b 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1665,6 +1665,7 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
1665 .get_strings = mv643xx_eth_get_strings, 1665 .get_strings = mv643xx_eth_get_strings,
1666 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, 1666 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
1667 .get_sset_count = mv643xx_eth_get_sset_count, 1667 .get_sset_count = mv643xx_eth_get_sset_count,
1668 .get_ts_info = ethtool_op_get_ts_info,
1668}; 1669};
1669 1670
1670 1671
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index efec6b60b327..1db023b075a1 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1456,6 +1456,7 @@ static const struct ethtool_ops pxa168_ethtool_ops = {
1456 .set_settings = pxa168_set_settings, 1456 .set_settings = pxa168_set_settings,
1457 .get_drvinfo = pxa168_get_drvinfo, 1457 .get_drvinfo = pxa168_get_drvinfo,
1458 .get_link = ethtool_op_get_link, 1458 .get_link = ethtool_op_get_link,
1459 .get_ts_info = ethtool_op_get_ts_info,
1459}; 1460};
1460 1461
1461static const struct net_device_ops pxa168_eth_netdev_ops = { 1462static const struct net_device_ops pxa168_eth_netdev_ops = {
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 487a6c8bd4ec..cace36f2ab92 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4825,14 +4825,14 @@ static int __devinit sky2_test_msi(struct sky2_hw *hw)
4825 4825
4826 init_waitqueue_head(&hw->msi_wait); 4826 init_waitqueue_head(&hw->msi_wait);
4827 4827
4828 sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
4829
4830 err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw); 4828 err = request_irq(pdev->irq, sky2_test_intr, 0, DRV_NAME, hw);
4831 if (err) { 4829 if (err) {
4832 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq); 4830 dev_err(&pdev->dev, "cannot assign irq %d\n", pdev->irq);
4833 return err; 4831 return err;
4834 } 4832 }
4835 4833
4834 sky2_write32(hw, B0_IMSK, Y2_IS_IRQ_SW);
4835
4836 sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ); 4836 sky2_write8(hw, B0_CTST, CS_ST_SW_IRQ);
4837 sky2_read8(hw, B0_CTST); 4837 sky2_read8(hw, B0_CTST);
4838 4838
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 1bb93531f1ba..5f027f95cc84 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -11,6 +11,18 @@ config MLX4_EN
11 This driver supports Mellanox Technologies ConnectX Ethernet 11 This driver supports Mellanox Technologies ConnectX Ethernet
12 devices. 12 devices.
13 13
14config MLX4_EN_DCB
15 bool "Data Center Bridging (DCB) Support"
16 default y
17 depends on MLX4_EN && DCB
18 ---help---
19 Say Y here if you want to use Data Center Bridging (DCB) in the
20 driver.
21 If set to N, will not be able to configure QoS and ratelimit attributes.
22 This flag is depended on the kernel's DCB support.
23
24 If unsure, set to Y
25
14config MLX4_CORE 26config MLX4_CORE
15 tristate 27 tristate
16 depends on PCI 28 depends on PCI
diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile
index 4a40ab967eeb..293127d28b33 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx4/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_MLX4_EN) += mlx4_en.o
7 7
8mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \ 8mlx4_en-y := en_main.o en_tx.o en_rx.o en_ethtool.o en_port.o en_cq.o \
9 en_resources.o en_netdev.o en_selftest.o 9 en_resources.o en_netdev.o en_selftest.o
10mlx4_en-$(CONFIG_MLX4_EN_DCB) += en_dcb_nl.o
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 00b81272e314..908a460d8db6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -124,11 +124,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
124 cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq; 124 cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
125 cq->mcq.event = mlx4_en_cq_event; 125 cq->mcq.event = mlx4_en_cq_event;
126 126
127 if (cq->is_tx) { 127 if (!cq->is_tx) {
128 init_timer(&cq->timer);
129 cq->timer.function = mlx4_en_poll_tx_cq;
130 cq->timer.data = (unsigned long) cq;
131 } else {
132 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64); 128 netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
133 napi_enable(&cq->napi); 129 napi_enable(&cq->napi);
134 } 130 }
@@ -151,16 +147,12 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
151 147
152void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) 148void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
153{ 149{
154 struct mlx4_en_dev *mdev = priv->mdev; 150 if (!cq->is_tx) {
155
156 if (cq->is_tx)
157 del_timer(&cq->timer);
158 else {
159 napi_disable(&cq->napi); 151 napi_disable(&cq->napi);
160 netif_napi_del(&cq->napi); 152 netif_napi_del(&cq->napi);
161 } 153 }
162 154
163 mlx4_cq_free(mdev->dev, &cq->mcq); 155 mlx4_cq_free(priv->mdev->dev, &cq->mcq);
164} 156}
165 157
166/* Set rx cq moderation parameters */ 158/* Set rx cq moderation parameters */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
new file mode 100644
index 000000000000..5d36795877cb
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -0,0 +1,255 @@
1/*
2 * Copyright (c) 2011 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33
34#include <linux/dcbnl.h>
35#include <linux/math64.h>
36
37#include "mlx4_en.h"
38
39static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
40 struct ieee_ets *ets)
41{
42 struct mlx4_en_priv *priv = netdev_priv(dev);
43 struct ieee_ets *my_ets = &priv->ets;
44
45 /* No IEEE PFC settings available */
46 if (!my_ets)
47 return -EINVAL;
48
49 ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
50 ets->cbs = my_ets->cbs;
51 memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
52 memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
53 memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
54
55 return 0;
56}
57
58static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
59{
60 int i;
61 int total_ets_bw = 0;
62 int has_ets_tc = 0;
63
64 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
65 if (ets->prio_tc[i] > MLX4_EN_NUM_UP) {
66 en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
67 i, ets->prio_tc[i]);
68 return -EINVAL;
69 }
70
71 switch (ets->tc_tsa[i]) {
72 case IEEE_8021QAZ_TSA_STRICT:
73 break;
74 case IEEE_8021QAZ_TSA_ETS:
75 has_ets_tc = 1;
76 total_ets_bw += ets->tc_tx_bw[i];
77 break;
78 default:
79 en_err(priv, "TC[%d]: Not supported TSA: %d\n",
80 i, ets->tc_tsa[i]);
81 return -ENOTSUPP;
82 }
83 }
84
85 if (has_ets_tc && total_ets_bw != MLX4_EN_BW_MAX) {
86 en_err(priv, "Bad ETS BW sum: %d. Should be exactly 100%%\n",
87 total_ets_bw);
88 return -EINVAL;
89 }
90
91 return 0;
92}
93
94static int mlx4_en_config_port_scheduler(struct mlx4_en_priv *priv,
95 struct ieee_ets *ets, u16 *ratelimit)
96{
97 struct mlx4_en_dev *mdev = priv->mdev;
98 int num_strict = 0;
99 int i;
100 __u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS] = { 0 };
101 __u8 pg[IEEE_8021QAZ_MAX_TCS] = { 0 };
102
103 ets = ets ?: &priv->ets;
104 ratelimit = ratelimit ?: priv->maxrate;
105
106 /* higher TC means higher priority => lower pg */
107 for (i = IEEE_8021QAZ_MAX_TCS - 1; i >= 0; i--) {
108 switch (ets->tc_tsa[i]) {
109 case IEEE_8021QAZ_TSA_STRICT:
110 pg[i] = num_strict++;
111 tc_tx_bw[i] = MLX4_EN_BW_MAX;
112 break;
113 case IEEE_8021QAZ_TSA_ETS:
114 pg[i] = MLX4_EN_TC_ETS;
115 tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX4_EN_BW_MIN;
116 break;
117 }
118 }
119
120 return mlx4_SET_PORT_SCHEDULER(mdev->dev, priv->port, tc_tx_bw, pg,
121 ratelimit);
122}
123
124static int
125mlx4_en_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
126{
127 struct mlx4_en_priv *priv = netdev_priv(dev);
128 struct mlx4_en_dev *mdev = priv->mdev;
129 int err;
130
131 err = mlx4_en_ets_validate(priv, ets);
132 if (err)
133 return err;
134
135 err = mlx4_SET_PORT_PRIO2TC(mdev->dev, priv->port, ets->prio_tc);
136 if (err)
137 return err;
138
139 err = mlx4_en_config_port_scheduler(priv, ets, NULL);
140 if (err)
141 return err;
142
143 memcpy(&priv->ets, ets, sizeof(priv->ets));
144
145 return 0;
146}
147
148static int mlx4_en_dcbnl_ieee_getpfc(struct net_device *dev,
149 struct ieee_pfc *pfc)
150{
151 struct mlx4_en_priv *priv = netdev_priv(dev);
152
153 pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
154 pfc->pfc_en = priv->prof->tx_ppp;
155
156 return 0;
157}
158
159static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
160 struct ieee_pfc *pfc)
161{
162 struct mlx4_en_priv *priv = netdev_priv(dev);
163 struct mlx4_en_dev *mdev = priv->mdev;
164 int err;
165
166 en_dbg(DRV, priv, "cap: 0x%x en: 0x%x mbc: 0x%x delay: %d\n",
167 pfc->pfc_cap,
168 pfc->pfc_en,
169 pfc->mbc,
170 pfc->delay);
171
172 priv->prof->rx_pause = priv->prof->tx_pause = !!pfc->pfc_en;
173 priv->prof->rx_ppp = priv->prof->tx_ppp = pfc->pfc_en;
174
175 err = mlx4_SET_PORT_general(mdev->dev, priv->port,
176 priv->rx_skb_size + ETH_FCS_LEN,
177 priv->prof->tx_pause,
178 priv->prof->tx_ppp,
179 priv->prof->rx_pause,
180 priv->prof->rx_ppp);
181 if (err)
182 en_err(priv, "Failed setting pause params\n");
183
184 return err;
185}
186
187static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
188{
189 return DCB_CAP_DCBX_VER_IEEE;
190}
191
192static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
193{
194 if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
195 (mode & DCB_CAP_DCBX_VER_CEE) ||
196 !(mode & DCB_CAP_DCBX_VER_IEEE) ||
197 !(mode & DCB_CAP_DCBX_HOST))
198 return 1;
199
200 return 0;
201}
202
203#define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */
204static int mlx4_en_dcbnl_ieee_getmaxrate(struct net_device *dev,
205 struct ieee_maxrate *maxrate)
206{
207 struct mlx4_en_priv *priv = netdev_priv(dev);
208 int i;
209
210 if (!priv->maxrate)
211 return -EINVAL;
212
213 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
214 maxrate->tc_maxrate[i] =
215 priv->maxrate[i] * MLX4_RATELIMIT_UNITS_IN_KB;
216
217 return 0;
218}
219
220static int mlx4_en_dcbnl_ieee_setmaxrate(struct net_device *dev,
221 struct ieee_maxrate *maxrate)
222{
223 struct mlx4_en_priv *priv = netdev_priv(dev);
224 u16 tmp[IEEE_8021QAZ_MAX_TCS];
225 int i, err;
226
227 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
228 /* Convert from Kbps into HW units, rounding result up.
229 * Setting to 0, means unlimited BW.
230 */
231 tmp[i] = div_u64(maxrate->tc_maxrate[i] +
232 MLX4_RATELIMIT_UNITS_IN_KB - 1,
233 MLX4_RATELIMIT_UNITS_IN_KB);
234 }
235
236 err = mlx4_en_config_port_scheduler(priv, NULL, tmp);
237 if (err)
238 return err;
239
240 memcpy(priv->maxrate, tmp, sizeof(*priv->maxrate));
241
242 return 0;
243}
244
245const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
246 .ieee_getets = mlx4_en_dcbnl_ieee_getets,
247 .ieee_setets = mlx4_en_dcbnl_ieee_setets,
248 .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate,
249 .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate,
250 .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
251 .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
252
253 .getdcbx = mlx4_en_dcbnl_getdcbx,
254 .setdcbx = mlx4_en_dcbnl_setdcbx,
255};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 70346fd7f9c4..72901ce2b088 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -83,7 +83,7 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
83#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS) 83#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
84 84
85static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= { 85static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
86 "Interupt Test", 86 "Interrupt Test",
87 "Link Test", 87 "Link Test",
88 "Speed Test", 88 "Speed Test",
89 "Register Test", 89 "Register Test",
@@ -359,8 +359,8 @@ static int mlx4_en_get_coalesce(struct net_device *dev,
359{ 359{
360 struct mlx4_en_priv *priv = netdev_priv(dev); 360 struct mlx4_en_priv *priv = netdev_priv(dev);
361 361
362 coal->tx_coalesce_usecs = 0; 362 coal->tx_coalesce_usecs = priv->tx_usecs;
363 coal->tx_max_coalesced_frames = 0; 363 coal->tx_max_coalesced_frames = priv->tx_frames;
364 coal->rx_coalesce_usecs = priv->rx_usecs; 364 coal->rx_coalesce_usecs = priv->rx_usecs;
365 coal->rx_max_coalesced_frames = priv->rx_frames; 365 coal->rx_max_coalesced_frames = priv->rx_frames;
366 366
@@ -388,6 +388,21 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
388 MLX4_EN_RX_COAL_TIME : 388 MLX4_EN_RX_COAL_TIME :
389 coal->rx_coalesce_usecs; 389 coal->rx_coalesce_usecs;
390 390
391 /* Setting TX coalescing parameters */
392 if (coal->tx_coalesce_usecs != priv->tx_usecs ||
393 coal->tx_max_coalesced_frames != priv->tx_frames) {
394 priv->tx_usecs = coal->tx_coalesce_usecs;
395 priv->tx_frames = coal->tx_max_coalesced_frames;
396 for (i = 0; i < priv->tx_ring_num; i++) {
397 priv->tx_cq[i].moder_cnt = priv->tx_frames;
398 priv->tx_cq[i].moder_time = priv->tx_usecs;
399 if (mlx4_en_set_cq_moder(priv, &priv->tx_cq[i])) {
400 en_warn(priv, "Failed changing moderation "
401 "for TX cq %d\n", i);
402 }
403 }
404 }
405
391 /* Set adaptive coalescing params */ 406 /* Set adaptive coalescing params */
392 priv->pkt_rate_low = coal->pkt_rate_low; 407 priv->pkt_rate_low = coal->pkt_rate_low;
393 priv->rx_usecs_low = coal->rx_coalesce_usecs_low; 408 priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 2097a7d3c5b8..346fdb2e92a6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -114,7 +114,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
114 params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE; 114 params->prof[i].tx_ring_size = MLX4_EN_DEF_TX_RING_SIZE;
115 params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE; 115 params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
116 params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS + 116 params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS +
117 (!!pfcrx) * MLX4_EN_NUM_PPP_RINGS; 117 MLX4_EN_NUM_PPP_RINGS;
118 params->prof[i].rss_rings = 0; 118 params->prof[i].rss_rings = 0;
119 } 119 }
120 120
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 31b455a49273..eaa8fadf19c0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -45,6 +45,14 @@
45#include "mlx4_en.h" 45#include "mlx4_en.h"
46#include "en_port.h" 46#include "en_port.h"
47 47
48static int mlx4_en_setup_tc(struct net_device *dev, u8 up)
49{
50 if (up != MLX4_EN_NUM_UP)
51 return -EINVAL;
52
53 return 0;
54}
55
48static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) 56static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
49{ 57{
50 struct mlx4_en_priv *priv = netdev_priv(dev); 58 struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -421,6 +429,8 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
421 */ 429 */
422 priv->rx_frames = MLX4_EN_RX_COAL_TARGET; 430 priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
423 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; 431 priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
432 priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
433 priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
424 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - " 434 en_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
425 "rx_frames:%d rx_usecs:%d\n", 435 "rx_frames:%d rx_usecs:%d\n",
426 priv->dev->mtu, priv->rx_frames, priv->rx_usecs); 436 priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
@@ -437,8 +447,8 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
437 447
438 for (i = 0; i < priv->tx_ring_num; i++) { 448 for (i = 0; i < priv->tx_ring_num; i++) {
439 cq = &priv->tx_cq[i]; 449 cq = &priv->tx_cq[i];
440 cq->moder_cnt = MLX4_EN_TX_COAL_PKTS; 450 cq->moder_cnt = priv->tx_frames;
441 cq->moder_time = MLX4_EN_TX_COAL_TIME; 451 cq->moder_time = priv->tx_usecs;
442 } 452 }
443 453
444 /* Reset auto-moderation params */ 454 /* Reset auto-moderation params */
@@ -650,12 +660,18 @@ int mlx4_en_start_port(struct net_device *dev)
650 660
651 /* Configure ring */ 661 /* Configure ring */
652 tx_ring = &priv->tx_ring[i]; 662 tx_ring = &priv->tx_ring[i];
653 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn); 663 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
664 max(0, i - MLX4_EN_NUM_TX_RINGS));
654 if (err) { 665 if (err) {
655 en_err(priv, "Failed allocating Tx ring\n"); 666 en_err(priv, "Failed allocating Tx ring\n");
656 mlx4_en_deactivate_cq(priv, cq); 667 mlx4_en_deactivate_cq(priv, cq);
657 goto tx_err; 668 goto tx_err;
658 } 669 }
670 tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
671
672 /* Arm CQ for TX completions */
673 mlx4_en_arm_cq(priv, cq);
674
659 /* Set initial ownership of all Tx TXBBs to SW (1) */ 675 /* Set initial ownership of all Tx TXBBs to SW (1) */
660 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) 676 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
661 *((u32 *) (tx_ring->buf + j)) = 0xffffffff; 677 *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
@@ -797,12 +813,15 @@ static void mlx4_en_restart(struct work_struct *work)
797 watchdog_task); 813 watchdog_task);
798 struct mlx4_en_dev *mdev = priv->mdev; 814 struct mlx4_en_dev *mdev = priv->mdev;
799 struct net_device *dev = priv->dev; 815 struct net_device *dev = priv->dev;
816 int i;
800 817
801 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 818 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
802 819
803 mutex_lock(&mdev->state_lock); 820 mutex_lock(&mdev->state_lock);
804 if (priv->port_up) { 821 if (priv->port_up) {
805 mlx4_en_stop_port(dev); 822 mlx4_en_stop_port(dev);
823 for (i = 0; i < priv->tx_ring_num; i++)
824 netdev_tx_reset_queue(priv->tx_ring[i].tx_queue);
806 if (mlx4_en_start_port(dev)) 825 if (mlx4_en_start_port(dev))
807 en_err(priv, "Failed restarting port %d\n", priv->port); 826 en_err(priv, "Failed restarting port %d\n", priv->port);
808 } 827 }
@@ -966,6 +985,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
966 mutex_unlock(&mdev->state_lock); 985 mutex_unlock(&mdev->state_lock);
967 986
968 mlx4_en_free_resources(priv); 987 mlx4_en_free_resources(priv);
988
969 free_netdev(dev); 989 free_netdev(dev);
970} 990}
971 991
@@ -1036,6 +1056,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
1036 .ndo_poll_controller = mlx4_en_netpoll, 1056 .ndo_poll_controller = mlx4_en_netpoll,
1037#endif 1057#endif
1038 .ndo_set_features = mlx4_en_set_features, 1058 .ndo_set_features = mlx4_en_set_features,
1059 .ndo_setup_tc = mlx4_en_setup_tc,
1039}; 1060};
1040 1061
1041int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 1062int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -1079,6 +1100,10 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1079 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 1100 INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
1080 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 1101 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
1081 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 1102 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
1103#ifdef CONFIG_MLX4_EN_DCB
1104 if (!mlx4_is_slave(priv->mdev->dev))
1105 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
1106#endif
1082 1107
1083 /* Query for default mac and max mtu */ 1108 /* Query for default mac and max mtu */
1084 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; 1109 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
@@ -1113,6 +1138,15 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
1113 netif_set_real_num_tx_queues(dev, priv->tx_ring_num); 1138 netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
1114 netif_set_real_num_rx_queues(dev, priv->rx_ring_num); 1139 netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
1115 1140
1141 netdev_set_num_tc(dev, MLX4_EN_NUM_UP);
1142
1143 /* First 9 rings are for UP 0 */
1144 netdev_set_tc_queue(dev, 0, MLX4_EN_NUM_TX_RINGS + 1, 0);
1145
1146 /* Partition Tx queues evenly amongst UP's 1-7 */
1147 for (i = 1; i < MLX4_EN_NUM_UP; i++)
1148 netdev_set_tc_queue(dev, i, 1, MLX4_EN_NUM_TX_RINGS + i);
1149
1116 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops); 1150 SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
1117 1151
1118 /* Set defualt MAC */ 1152 /* Set defualt MAC */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h
index 6934fd7e66ed..745090b49d9e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.h
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h
@@ -39,6 +39,8 @@
39#define SET_PORT_PROMISC_SHIFT 31 39#define SET_PORT_PROMISC_SHIFT 31
40#define SET_PORT_MC_PROMISC_SHIFT 30 40#define SET_PORT_MC_PROMISC_SHIFT 30
41 41
42#define MLX4_EN_NUM_TC 8
43
42#define VLAN_FLTR_SIZE 128 44#define VLAN_FLTR_SIZE 128
43struct mlx4_set_vlan_fltr_mbox { 45struct mlx4_set_vlan_fltr_mbox {
44 __be32 entry[VLAN_FLTR_SIZE]; 46 __be32 entry[VLAN_FLTR_SIZE];
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index bcbc54c16947..10c24c784b70 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -39,7 +39,7 @@
39 39
40void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, 40void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
41 int is_tx, int rss, int qpn, int cqn, 41 int is_tx, int rss, int qpn, int cqn,
42 struct mlx4_qp_context *context) 42 int user_prio, struct mlx4_qp_context *context)
43{ 43{
44 struct mlx4_en_dev *mdev = priv->mdev; 44 struct mlx4_en_dev *mdev = priv->mdev;
45 45
@@ -57,6 +57,10 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
57 context->local_qpn = cpu_to_be32(qpn); 57 context->local_qpn = cpu_to_be32(qpn);
58 context->pri_path.ackto = 1 & 0x07; 58 context->pri_path.ackto = 1 & 0x07;
59 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; 59 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
60 if (user_prio >= 0) {
61 context->pri_path.sched_queue |= user_prio << 3;
62 context->pri_path.feup = 1 << 6;
63 }
60 context->pri_path.counter_index = 0xff; 64 context->pri_path.counter_index = 0xff;
61 context->cqn_send = cpu_to_be32(cqn); 65 context->cqn_send = cpu_to_be32(cqn);
62 context->cqn_recv = cpu_to_be32(cqn); 66 context->cqn_recv = cpu_to_be32(cqn);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 9adbd53da525..d49a7ac3187d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -823,7 +823,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
823 823
824 memset(context, 0, sizeof *context); 824 memset(context, 0, sizeof *context);
825 mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0, 825 mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
826 qpn, ring->cqn, context); 826 qpn, ring->cqn, -1, context);
827 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma); 827 context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
828 828
829 /* Cancel FCS removal if FW allows */ 829 /* Cancel FCS removal if FW allows */
@@ -890,7 +890,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
890 } 890 }
891 rss_map->indir_qp.event = mlx4_en_sqp_event; 891 rss_map->indir_qp.event = mlx4_en_sqp_event;
892 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn, 892 mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
893 priv->rx_ring[0].cqn, &context); 893 priv->rx_ring[0].cqn, -1, &context);
894 894
895 if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num) 895 if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
896 rss_rings = priv->rx_ring_num; 896 rss_rings = priv->rx_ring_num;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 17968244c399..9a38483feb92 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -67,8 +67,6 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
67 67
68 inline_thold = min(inline_thold, MAX_INLINE); 68 inline_thold = min(inline_thold, MAX_INLINE);
69 69
70 spin_lock_init(&ring->comp_lock);
71
72 tmp = size * sizeof(struct mlx4_en_tx_info); 70 tmp = size * sizeof(struct mlx4_en_tx_info);
73 ring->tx_info = vmalloc(tmp); 71 ring->tx_info = vmalloc(tmp);
74 if (!ring->tx_info) 72 if (!ring->tx_info)
@@ -156,7 +154,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
156 154
157int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 155int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
158 struct mlx4_en_tx_ring *ring, 156 struct mlx4_en_tx_ring *ring,
159 int cq) 157 int cq, int user_prio)
160{ 158{
161 struct mlx4_en_dev *mdev = priv->mdev; 159 struct mlx4_en_dev *mdev = priv->mdev;
162 int err; 160 int err;
@@ -174,7 +172,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
174 ring->doorbell_qpn = ring->qp.qpn << 8; 172 ring->doorbell_qpn = ring->qp.qpn << 8;
175 173
176 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, 174 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
177 ring->cqn, &ring->context); 175 ring->cqn, user_prio, &ring->context);
178 if (ring->bf_enabled) 176 if (ring->bf_enabled)
179 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); 177 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
180 178
@@ -317,6 +315,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
317 int size = cq->size; 315 int size = cq->size;
318 u32 size_mask = ring->size_mask; 316 u32 size_mask = ring->size_mask;
319 struct mlx4_cqe *buf = cq->buf; 317 struct mlx4_cqe *buf = cq->buf;
318 u32 packets = 0;
319 u32 bytes = 0;
320 320
321 if (!priv->port_up) 321 if (!priv->port_up)
322 return; 322 return;
@@ -345,6 +345,8 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
345 priv, ring, ring_index, 345 priv, ring, ring_index,
346 !!((ring->cons + txbbs_skipped) & 346 !!((ring->cons + txbbs_skipped) &
347 ring->size)); 347 ring->size));
348 packets++;
349 bytes += ring->tx_info[ring_index].nr_bytes;
348 } while (ring_index != new_index); 350 } while (ring_index != new_index);
349 351
350 ++cons_index; 352 ++cons_index;
@@ -361,13 +363,14 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
361 mlx4_cq_set_ci(mcq); 363 mlx4_cq_set_ci(mcq);
362 wmb(); 364 wmb();
363 ring->cons += txbbs_skipped; 365 ring->cons += txbbs_skipped;
366 netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
364 367
365 /* Wakeup Tx queue if this ring stopped it */ 368 /* Wakeup Tx queue if this ring stopped it */
366 if (unlikely(ring->blocked)) { 369 if (unlikely(ring->blocked)) {
367 if ((u32) (ring->prod - ring->cons) <= 370 if ((u32) (ring->prod - ring->cons) <=
368 ring->size - HEADROOM - MAX_DESC_TXBBS) { 371 ring->size - HEADROOM - MAX_DESC_TXBBS) {
369 ring->blocked = 0; 372 ring->blocked = 0;
370 netif_tx_wake_queue(netdev_get_tx_queue(dev, cq->ring)); 373 netif_tx_wake_queue(ring->tx_queue);
371 priv->port_stats.wake_queue++; 374 priv->port_stats.wake_queue++;
372 } 375 }
373 } 376 }
@@ -377,41 +380,12 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq)
377{ 380{
378 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); 381 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
379 struct mlx4_en_priv *priv = netdev_priv(cq->dev); 382 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
380 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
381 383
382 if (!spin_trylock(&ring->comp_lock))
383 return;
384 mlx4_en_process_tx_cq(cq->dev, cq); 384 mlx4_en_process_tx_cq(cq->dev, cq);
385 mod_timer(&cq->timer, jiffies + 1); 385 mlx4_en_arm_cq(priv, cq);
386 spin_unlock(&ring->comp_lock);
387} 386}
388 387
389 388
390void mlx4_en_poll_tx_cq(unsigned long data)
391{
392 struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
393 struct mlx4_en_priv *priv = netdev_priv(cq->dev);
394 struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
395 u32 inflight;
396
397 INC_PERF_COUNTER(priv->pstats.tx_poll);
398
399 if (!spin_trylock_irq(&ring->comp_lock)) {
400 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
401 return;
402 }
403 mlx4_en_process_tx_cq(cq->dev, cq);
404 inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
405
406 /* If there are still packets in flight and the timer has not already
407 * been scheduled by the Tx routine then schedule it here to guarantee
408 * completion processing of these packets */
409 if (inflight && priv->port_up)
410 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
411
412 spin_unlock_irq(&ring->comp_lock);
413}
414
415static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, 389static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
416 struct mlx4_en_tx_ring *ring, 390 struct mlx4_en_tx_ring *ring,
417 u32 index, 391 u32 index,
@@ -440,25 +414,6 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
440 return ring->buf + index * TXBB_SIZE; 414 return ring->buf + index * TXBB_SIZE;
441} 415}
442 416
443static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
444{
445 struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
446 struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
447 unsigned long flags;
448
449 /* If we don't have a pending timer, set one up to catch our recent
450 post in case the interface becomes idle */
451 if (!timer_pending(&cq->timer))
452 mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
453
454 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
455 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
456 if (spin_trylock_irqsave(&ring->comp_lock, flags)) {
457 mlx4_en_process_tx_cq(priv->dev, cq);
458 spin_unlock_irqrestore(&ring->comp_lock, flags);
459 }
460}
461
462static int is_inline(struct sk_buff *skb, void **pfrag) 417static int is_inline(struct sk_buff *skb, void **pfrag)
463{ 418{
464 void *ptr; 419 void *ptr;
@@ -570,13 +525,9 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
570 525
571u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb) 526u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb)
572{ 527{
573 struct mlx4_en_priv *priv = netdev_priv(dev);
574 u16 vlan_tag = 0; 528 u16 vlan_tag = 0;
575 529
576 /* If we support per priority flow control and the packet contains 530 if (vlan_tx_tag_present(skb)) {
577 * a vlan tag, send the packet to the TX ring assigned to that priority
578 */
579 if (priv->prof->rx_ppp && vlan_tx_tag_present(skb)) {
580 vlan_tag = vlan_tx_tag_get(skb); 531 vlan_tag = vlan_tx_tag_get(skb);
581 return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13); 532 return MLX4_EN_NUM_TX_RINGS + (vlan_tag >> 13);
582 } 533 }
@@ -594,7 +545,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
594 struct mlx4_en_priv *priv = netdev_priv(dev); 545 struct mlx4_en_priv *priv = netdev_priv(dev);
595 struct mlx4_en_dev *mdev = priv->mdev; 546 struct mlx4_en_dev *mdev = priv->mdev;
596 struct mlx4_en_tx_ring *ring; 547 struct mlx4_en_tx_ring *ring;
597 struct mlx4_en_cq *cq;
598 struct mlx4_en_tx_desc *tx_desc; 548 struct mlx4_en_tx_desc *tx_desc;
599 struct mlx4_wqe_data_seg *data; 549 struct mlx4_wqe_data_seg *data;
600 struct skb_frag_struct *frag; 550 struct skb_frag_struct *frag;
@@ -638,13 +588,10 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
638 if (unlikely(((int)(ring->prod - ring->cons)) > 588 if (unlikely(((int)(ring->prod - ring->cons)) >
639 ring->size - HEADROOM - MAX_DESC_TXBBS)) { 589 ring->size - HEADROOM - MAX_DESC_TXBBS)) {
640 /* every full Tx ring stops queue */ 590 /* every full Tx ring stops queue */
641 netif_tx_stop_queue(netdev_get_tx_queue(dev, tx_ind)); 591 netif_tx_stop_queue(ring->tx_queue);
642 ring->blocked = 1; 592 ring->blocked = 1;
643 priv->port_stats.queue_stopped++; 593 priv->port_stats.queue_stopped++;
644 594
645 /* Use interrupts to find out when queue opened */
646 cq = &priv->tx_cq[tx_ind];
647 mlx4_en_arm_cq(priv, cq);
648 return NETDEV_TX_BUSY; 595 return NETDEV_TX_BUSY;
649 } 596 }
650 597
@@ -707,7 +654,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
707 priv->port_stats.tso_packets++; 654 priv->port_stats.tso_packets++;
708 i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) + 655 i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
709 !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size); 656 !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
710 ring->bytes += skb->len + (i - 1) * lso_header_size; 657 tx_info->nr_bytes = skb->len + (i - 1) * lso_header_size;
711 ring->packets += i; 658 ring->packets += i;
712 } else { 659 } else {
713 /* Normal (Non LSO) packet */ 660 /* Normal (Non LSO) packet */
@@ -715,10 +662,12 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
715 ((ring->prod & ring->size) ? 662 ((ring->prod & ring->size) ?
716 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); 663 cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
717 data = &tx_desc->data; 664 data = &tx_desc->data;
718 ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN); 665 tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
719 ring->packets++; 666 ring->packets++;
720 667
721 } 668 }
669 ring->bytes += tx_info->nr_bytes;
670 netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
722 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); 671 AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
723 672
724 673
@@ -792,9 +741,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
792 iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); 741 iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
793 } 742 }
794 743
795 /* Poll CQ here */
796 mlx4_en_xmit_poll(priv, tx_ind);
797
798 return NETDEV_TX_OK; 744 return NETDEV_TX_OK;
799 745
800tx_drop: 746tx_drop:
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 2a0ff2cc7182..cd56f1aea4b5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -53,6 +53,26 @@
53#define DRV_VERSION "1.1" 53#define DRV_VERSION "1.1"
54#define DRV_RELDATE "Dec, 2011" 54#define DRV_RELDATE "Dec, 2011"
55 55
56#define MLX4_NUM_UP 8
57#define MLX4_NUM_TC 8
58#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */
59#define MLX4_RATELIMIT_DEFAULT 0xffff
60
61struct mlx4_set_port_prio2tc_context {
62 u8 prio2tc[4];
63};
64
65struct mlx4_port_scheduler_tc_cfg_be {
66 __be16 pg;
67 __be16 bw_precentage;
68 __be16 max_bw_units; /* 3-100Mbps, 4-1Gbps, other values - reserved */
69 __be16 max_bw_value;
70};
71
72struct mlx4_set_port_scheduler_context {
73 struct mlx4_port_scheduler_tc_cfg_be tc[MLX4_NUM_TC];
74};
75
56enum { 76enum {
57 MLX4_HCR_BASE = 0x80680, 77 MLX4_HCR_BASE = 0x80680,
58 MLX4_HCR_SIZE = 0x0001c, 78 MLX4_HCR_SIZE = 0x0001c,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index d69fee41f24a..5d876375a132 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -40,6 +40,9 @@
40#include <linux/mutex.h> 40#include <linux/mutex.h>
41#include <linux/netdevice.h> 41#include <linux/netdevice.h>
42#include <linux/if_vlan.h> 42#include <linux/if_vlan.h>
43#ifdef CONFIG_MLX4_EN_DCB
44#include <linux/dcbnl.h>
45#endif
43 46
44#include <linux/mlx4/device.h> 47#include <linux/mlx4/device.h>
45#include <linux/mlx4/qp.h> 48#include <linux/mlx4/qp.h>
@@ -111,6 +114,7 @@ enum {
111#define MLX4_EN_NUM_TX_RINGS 8 114#define MLX4_EN_NUM_TX_RINGS 8
112#define MLX4_EN_NUM_PPP_RINGS 8 115#define MLX4_EN_NUM_PPP_RINGS 8
113#define MAX_TX_RINGS (MLX4_EN_NUM_TX_RINGS + MLX4_EN_NUM_PPP_RINGS) 116#define MAX_TX_RINGS (MLX4_EN_NUM_TX_RINGS + MLX4_EN_NUM_PPP_RINGS)
117#define MLX4_EN_NUM_UP 8
114#define MLX4_EN_DEF_TX_RING_SIZE 512 118#define MLX4_EN_DEF_TX_RING_SIZE 512
115#define MLX4_EN_DEF_RX_RING_SIZE 1024 119#define MLX4_EN_DEF_RX_RING_SIZE 1024
116 120
@@ -118,7 +122,7 @@ enum {
118#define MLX4_EN_RX_COAL_TARGET 44 122#define MLX4_EN_RX_COAL_TARGET 44
119#define MLX4_EN_RX_COAL_TIME 0x10 123#define MLX4_EN_RX_COAL_TIME 0x10
120 124
121#define MLX4_EN_TX_COAL_PKTS 5 125#define MLX4_EN_TX_COAL_PKTS 16
122#define MLX4_EN_TX_COAL_TIME 0x80 126#define MLX4_EN_TX_COAL_TIME 0x80
123 127
124#define MLX4_EN_RX_RATE_LOW 400000 128#define MLX4_EN_RX_RATE_LOW 400000
@@ -196,6 +200,7 @@ enum cq_type {
196struct mlx4_en_tx_info { 200struct mlx4_en_tx_info {
197 struct sk_buff *skb; 201 struct sk_buff *skb;
198 u32 nr_txbb; 202 u32 nr_txbb;
203 u32 nr_bytes;
199 u8 linear; 204 u8 linear;
200 u8 data_offset; 205 u8 data_offset;
201 u8 inl; 206 u8 inl;
@@ -251,9 +256,9 @@ struct mlx4_en_tx_ring {
251 unsigned long bytes; 256 unsigned long bytes;
252 unsigned long packets; 257 unsigned long packets;
253 unsigned long tx_csum; 258 unsigned long tx_csum;
254 spinlock_t comp_lock;
255 struct mlx4_bf bf; 259 struct mlx4_bf bf;
256 bool bf_enabled; 260 bool bf_enabled;
261 struct netdev_queue *tx_queue;
257}; 262};
258 263
259struct mlx4_en_rx_desc { 264struct mlx4_en_rx_desc {
@@ -304,8 +309,6 @@ struct mlx4_en_cq {
304 spinlock_t lock; 309 spinlock_t lock;
305 struct net_device *dev; 310 struct net_device *dev;
306 struct napi_struct napi; 311 struct napi_struct napi;
307 /* Per-core Tx cq processing support */
308 struct timer_list timer;
309 int size; 312 int size;
310 int buf_size; 313 int buf_size;
311 unsigned vector; 314 unsigned vector;
@@ -411,6 +414,15 @@ struct mlx4_en_frag_info {
411 414
412}; 415};
413 416
417#ifdef CONFIG_MLX4_EN_DCB
418/* Minimal TC BW - setting to 0 will block traffic */
419#define MLX4_EN_BW_MIN 1
420#define MLX4_EN_BW_MAX 100 /* Utilize 100% of the line */
421
422#define MLX4_EN_TC_ETS 7
423
424#endif
425
414struct mlx4_en_priv { 426struct mlx4_en_priv {
415 struct mlx4_en_dev *mdev; 427 struct mlx4_en_dev *mdev;
416 struct mlx4_en_port_profile *prof; 428 struct mlx4_en_port_profile *prof;
@@ -484,6 +496,11 @@ struct mlx4_en_priv {
484 int vids[128]; 496 int vids[128];
485 bool wol; 497 bool wol;
486 struct device *ddev; 498 struct device *ddev;
499
500#ifdef CONFIG_MLX4_EN_DCB
501 struct ieee_ets ets;
502 u16 maxrate[IEEE_8021QAZ_MAX_TCS];
503#endif
487}; 504};
488 505
489enum mlx4_en_wol { 506enum mlx4_en_wol {
@@ -512,7 +529,6 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
512int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 529int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
513int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq); 530int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
514 531
515void mlx4_en_poll_tx_cq(unsigned long data);
516void mlx4_en_tx_irq(struct mlx4_cq *mcq); 532void mlx4_en_tx_irq(struct mlx4_cq *mcq);
517u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb); 533u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
518netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev); 534netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
@@ -522,7 +538,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ri
522void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring); 538void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
523int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, 539int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
524 struct mlx4_en_tx_ring *ring, 540 struct mlx4_en_tx_ring *ring,
525 int cq); 541 int cq, int user_prio);
526void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, 542void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
527 struct mlx4_en_tx_ring *ring); 543 struct mlx4_en_tx_ring *ring);
528 544
@@ -540,8 +556,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
540 int budget); 556 int budget);
541int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget); 557int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
542void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, 558void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
543 int is_tx, int rss, int qpn, int cqn, 559 int is_tx, int rss, int qpn, int cqn, int user_prio,
544 struct mlx4_qp_context *context); 560 struct mlx4_qp_context *context);
545void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event); 561void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
546int mlx4_en_map_buffer(struct mlx4_buf *buf); 562int mlx4_en_map_buffer(struct mlx4_buf *buf);
547void mlx4_en_unmap_buffer(struct mlx4_buf *buf); 563void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
@@ -558,6 +574,10 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv);
558int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset); 574int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
559int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port); 575int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
560 576
577#ifdef CONFIG_MLX4_EN_DCB
578extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops;
579#endif
580
561#define MLX4_EN_NUM_SELF_TEST 5 581#define MLX4_EN_NUM_SELF_TEST 5
562void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf); 582void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
563u64 mlx4_en_mac_to_u64(u8 *addr); 583u64 mlx4_en_mac_to_u64(u8 *addr);
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 77535ff18f1b..55b12e6bed87 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -834,6 +834,68 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
834} 834}
835EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc); 835EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
836 836
837int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc)
838{
839 struct mlx4_cmd_mailbox *mailbox;
840 struct mlx4_set_port_prio2tc_context *context;
841 int err;
842 u32 in_mod;
843 int i;
844
845 mailbox = mlx4_alloc_cmd_mailbox(dev);
846 if (IS_ERR(mailbox))
847 return PTR_ERR(mailbox);
848 context = mailbox->buf;
849 memset(context, 0, sizeof *context);
850
851 for (i = 0; i < MLX4_NUM_UP; i += 2)
852 context->prio2tc[i >> 1] = prio2tc[i] << 4 | prio2tc[i + 1];
853
854 in_mod = MLX4_SET_PORT_PRIO2TC << 8 | port;
855 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
856 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
857
858 mlx4_free_cmd_mailbox(dev, mailbox);
859 return err;
860}
861EXPORT_SYMBOL(mlx4_SET_PORT_PRIO2TC);
862
863int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
864 u8 *pg, u16 *ratelimit)
865{
866 struct mlx4_cmd_mailbox *mailbox;
867 struct mlx4_set_port_scheduler_context *context;
868 int err;
869 u32 in_mod;
870 int i;
871
872 mailbox = mlx4_alloc_cmd_mailbox(dev);
873 if (IS_ERR(mailbox))
874 return PTR_ERR(mailbox);
875 context = mailbox->buf;
876 memset(context, 0, sizeof *context);
877
878 for (i = 0; i < MLX4_NUM_TC; i++) {
879 struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
880 u16 r = ratelimit && ratelimit[i] ? ratelimit[i] :
881 MLX4_RATELIMIT_DEFAULT;
882
883 tc->pg = htons(pg[i]);
884 tc->bw_precentage = htons(tc_tx_bw[i]);
885
886 tc->max_bw_units = htons(MLX4_RATELIMIT_UNITS);
887 tc->max_bw_value = htons(r);
888 }
889
890 in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
891 err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
892 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
893
894 mlx4_free_cmd_mailbox(dev, mailbox);
895 return err;
896}
897EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER);
898
837int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, 899int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
838 struct mlx4_vhcr *vhcr, 900 struct mlx4_vhcr *vhcr,
839 struct mlx4_cmd_mailbox *inbox, 901 struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index f84dd2dc82b6..24fb049ac2f2 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -1262,7 +1262,7 @@ static struct platform_driver ks8842_platform_driver = {
1262 .owner = THIS_MODULE, 1262 .owner = THIS_MODULE,
1263 }, 1263 },
1264 .probe = ks8842_probe, 1264 .probe = ks8842_probe,
1265 .remove = ks8842_remove, 1265 .remove = __devexit_p(ks8842_remove),
1266}; 1266};
1267 1267
1268module_platform_driver(ks8842_platform_driver); 1268module_platform_driver(ks8842_platform_driver);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 27273ae1a6e6..90153fc983cb 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -4033,7 +4033,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4033 4033
4034 netdev->netdev_ops = &myri10ge_netdev_ops; 4034 netdev->netdev_ops = &myri10ge_netdev_ops;
4035 netdev->mtu = myri10ge_initial_mtu; 4035 netdev->mtu = myri10ge_initial_mtu;
4036 netdev->base_addr = mgp->iomem_base;
4037 netdev->hw_features = mgp->features | NETIF_F_LRO | NETIF_F_RXCSUM; 4036 netdev->hw_features = mgp->features | NETIF_F_LRO | NETIF_F_RXCSUM;
4038 netdev->features = netdev->hw_features; 4037 netdev->features = netdev->hw_features;
4039 4038
@@ -4047,12 +4046,10 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4047 netdev->vlan_features &= ~NETIF_F_TSO; 4046 netdev->vlan_features &= ~NETIF_F_TSO;
4048 4047
4049 /* make sure we can get an irq, and that MSI can be 4048 /* make sure we can get an irq, and that MSI can be
4050 * setup (if available). Also ensure netdev->irq 4049 * setup (if available). */
4051 * is set to correct value if MSI is enabled */
4052 status = myri10ge_request_irq(mgp); 4050 status = myri10ge_request_irq(mgp);
4053 if (status != 0) 4051 if (status != 0)
4054 goto abort_with_firmware; 4052 goto abort_with_firmware;
4055 netdev->irq = pdev->irq;
4056 myri10ge_free_irq(mgp); 4053 myri10ge_free_irq(mgp);
4057 4054
4058 /* Save configuration space to be restored if the 4055 /* Save configuration space to be restored if the
@@ -4077,7 +4074,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4077 else 4074 else
4078 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n", 4075 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n",
4079 mgp->msi_enabled ? "MSI" : "xPIC", 4076 mgp->msi_enabled ? "MSI" : "xPIC",
4080 netdev->irq, mgp->tx_boundary, mgp->fw_name, 4077 pdev->irq, mgp->tx_boundary, mgp->fw_name,
4081 (mgp->wc_enabled ? "Enabled" : "Disabled")); 4078 (mgp->wc_enabled ? "Enabled" : "Disabled"));
4082 4079
4083 board_number++; 4080 board_number++;
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index d38e48d4f430..5b61d12f8b91 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -547,6 +547,7 @@ struct netdev_private {
547 struct sk_buff *tx_skbuff[TX_RING_SIZE]; 547 struct sk_buff *tx_skbuff[TX_RING_SIZE];
548 dma_addr_t tx_dma[TX_RING_SIZE]; 548 dma_addr_t tx_dma[TX_RING_SIZE];
549 struct net_device *dev; 549 struct net_device *dev;
550 void __iomem *ioaddr;
550 struct napi_struct napi; 551 struct napi_struct napi;
551 /* Media monitoring timer */ 552 /* Media monitoring timer */
552 struct timer_list timer; 553 struct timer_list timer;
@@ -699,7 +700,9 @@ static ssize_t natsemi_set_dspcfg_workaround(struct device *dev,
699 700
700static inline void __iomem *ns_ioaddr(struct net_device *dev) 701static inline void __iomem *ns_ioaddr(struct net_device *dev)
701{ 702{
702 return (void __iomem *) dev->base_addr; 703 struct netdev_private *np = netdev_priv(dev);
704
705 return np->ioaddr;
703} 706}
704 707
705static inline void natsemi_irq_enable(struct net_device *dev) 708static inline void natsemi_irq_enable(struct net_device *dev)
@@ -863,10 +866,9 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
863 /* Store MAC Address in perm_addr */ 866 /* Store MAC Address in perm_addr */
864 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); 867 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
865 868
866 dev->base_addr = (unsigned long __force) ioaddr;
867 dev->irq = irq;
868
869 np = netdev_priv(dev); 869 np = netdev_priv(dev);
870 np->ioaddr = ioaddr;
871
870 netif_napi_add(dev, &np->napi, natsemi_poll, 64); 872 netif_napi_add(dev, &np->napi, natsemi_poll, 64);
871 np->dev = dev; 873 np->dev = dev;
872 874
@@ -914,9 +916,6 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
914 } 916 }
915 917
916 option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; 918 option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
917 if (dev->mem_start)
918 option = dev->mem_start;
919
920 /* The lower four bits are the media type. */ 919 /* The lower four bits are the media type. */
921 if (option) { 920 if (option) {
922 if (option & 0x200) 921 if (option & 0x200)
@@ -1532,20 +1531,21 @@ static int netdev_open(struct net_device *dev)
1532{ 1531{
1533 struct netdev_private *np = netdev_priv(dev); 1532 struct netdev_private *np = netdev_priv(dev);
1534 void __iomem * ioaddr = ns_ioaddr(dev); 1533 void __iomem * ioaddr = ns_ioaddr(dev);
1534 const int irq = np->pci_dev->irq;
1535 int i; 1535 int i;
1536 1536
1537 /* Reset the chip, just in case. */ 1537 /* Reset the chip, just in case. */
1538 natsemi_reset(dev); 1538 natsemi_reset(dev);
1539 1539
1540 i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev); 1540 i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
1541 if (i) return i; 1541 if (i) return i;
1542 1542
1543 if (netif_msg_ifup(np)) 1543 if (netif_msg_ifup(np))
1544 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n", 1544 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
1545 dev->name, dev->irq); 1545 dev->name, irq);
1546 i = alloc_ring(dev); 1546 i = alloc_ring(dev);
1547 if (i < 0) { 1547 if (i < 0) {
1548 free_irq(dev->irq, dev); 1548 free_irq(irq, dev);
1549 return i; 1549 return i;
1550 } 1550 }
1551 napi_enable(&np->napi); 1551 napi_enable(&np->napi);
@@ -1794,6 +1794,7 @@ static void netdev_timer(unsigned long data)
1794 struct netdev_private *np = netdev_priv(dev); 1794 struct netdev_private *np = netdev_priv(dev);
1795 void __iomem * ioaddr = ns_ioaddr(dev); 1795 void __iomem * ioaddr = ns_ioaddr(dev);
1796 int next_tick = NATSEMI_TIMER_FREQ; 1796 int next_tick = NATSEMI_TIMER_FREQ;
1797 const int irq = np->pci_dev->irq;
1797 1798
1798 if (netif_msg_timer(np)) { 1799 if (netif_msg_timer(np)) {
1799 /* DO NOT read the IntrStatus register, 1800 /* DO NOT read the IntrStatus register,
@@ -1817,14 +1818,14 @@ static void netdev_timer(unsigned long data)
1817 if (netif_msg_drv(np)) 1818 if (netif_msg_drv(np))
1818 printk(KERN_NOTICE "%s: possible phy reset: " 1819 printk(KERN_NOTICE "%s: possible phy reset: "
1819 "re-initializing\n", dev->name); 1820 "re-initializing\n", dev->name);
1820 disable_irq(dev->irq); 1821 disable_irq(irq);
1821 spin_lock_irq(&np->lock); 1822 spin_lock_irq(&np->lock);
1822 natsemi_stop_rxtx(dev); 1823 natsemi_stop_rxtx(dev);
1823 dump_ring(dev); 1824 dump_ring(dev);
1824 reinit_ring(dev); 1825 reinit_ring(dev);
1825 init_registers(dev); 1826 init_registers(dev);
1826 spin_unlock_irq(&np->lock); 1827 spin_unlock_irq(&np->lock);
1827 enable_irq(dev->irq); 1828 enable_irq(irq);
1828 } else { 1829 } else {
1829 /* hurry back */ 1830 /* hurry back */
1830 next_tick = HZ; 1831 next_tick = HZ;
@@ -1841,10 +1842,10 @@ static void netdev_timer(unsigned long data)
1841 spin_unlock_irq(&np->lock); 1842 spin_unlock_irq(&np->lock);
1842 } 1843 }
1843 if (np->oom) { 1844 if (np->oom) {
1844 disable_irq(dev->irq); 1845 disable_irq(irq);
1845 np->oom = 0; 1846 np->oom = 0;
1846 refill_rx(dev); 1847 refill_rx(dev);
1847 enable_irq(dev->irq); 1848 enable_irq(irq);
1848 if (!np->oom) { 1849 if (!np->oom) {
1849 writel(RxOn, ioaddr + ChipCmd); 1850 writel(RxOn, ioaddr + ChipCmd);
1850 } else { 1851 } else {
@@ -1885,8 +1886,9 @@ static void ns_tx_timeout(struct net_device *dev)
1885{ 1886{
1886 struct netdev_private *np = netdev_priv(dev); 1887 struct netdev_private *np = netdev_priv(dev);
1887 void __iomem * ioaddr = ns_ioaddr(dev); 1888 void __iomem * ioaddr = ns_ioaddr(dev);
1889 const int irq = np->pci_dev->irq;
1888 1890
1889 disable_irq(dev->irq); 1891 disable_irq(irq);
1890 spin_lock_irq(&np->lock); 1892 spin_lock_irq(&np->lock);
1891 if (!np->hands_off) { 1893 if (!np->hands_off) {
1892 if (netif_msg_tx_err(np)) 1894 if (netif_msg_tx_err(np))
@@ -1905,7 +1907,7 @@ static void ns_tx_timeout(struct net_device *dev)
1905 dev->name); 1907 dev->name);
1906 } 1908 }
1907 spin_unlock_irq(&np->lock); 1909 spin_unlock_irq(&np->lock);
1908 enable_irq(dev->irq); 1910 enable_irq(irq);
1909 1911
1910 dev->trans_start = jiffies; /* prevent tx timeout */ 1912 dev->trans_start = jiffies; /* prevent tx timeout */
1911 dev->stats.tx_errors++; 1913 dev->stats.tx_errors++;
@@ -2470,9 +2472,12 @@ static struct net_device_stats *get_stats(struct net_device *dev)
2470#ifdef CONFIG_NET_POLL_CONTROLLER 2472#ifdef CONFIG_NET_POLL_CONTROLLER
2471static void natsemi_poll_controller(struct net_device *dev) 2473static void natsemi_poll_controller(struct net_device *dev)
2472{ 2474{
2473 disable_irq(dev->irq); 2475 struct netdev_private *np = netdev_priv(dev);
2474 intr_handler(dev->irq, dev); 2476 const int irq = np->pci_dev->irq;
2475 enable_irq(dev->irq); 2477
2478 disable_irq(irq);
2479 intr_handler(irq, dev);
2480 enable_irq(irq);
2476} 2481}
2477#endif 2482#endif
2478 2483
@@ -2523,8 +2528,9 @@ static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
2523 if (netif_running(dev)) { 2528 if (netif_running(dev)) {
2524 struct netdev_private *np = netdev_priv(dev); 2529 struct netdev_private *np = netdev_priv(dev);
2525 void __iomem * ioaddr = ns_ioaddr(dev); 2530 void __iomem * ioaddr = ns_ioaddr(dev);
2531 const int irq = np->pci_dev->irq;
2526 2532
2527 disable_irq(dev->irq); 2533 disable_irq(irq);
2528 spin_lock(&np->lock); 2534 spin_lock(&np->lock);
2529 /* stop engines */ 2535 /* stop engines */
2530 natsemi_stop_rxtx(dev); 2536 natsemi_stop_rxtx(dev);
@@ -2537,7 +2543,7 @@ static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
2537 /* restart engines */ 2543 /* restart engines */
2538 writel(RxOn | TxOn, ioaddr + ChipCmd); 2544 writel(RxOn | TxOn, ioaddr + ChipCmd);
2539 spin_unlock(&np->lock); 2545 spin_unlock(&np->lock);
2540 enable_irq(dev->irq); 2546 enable_irq(irq);
2541 } 2547 }
2542 return 0; 2548 return 0;
2543} 2549}
@@ -3135,6 +3141,7 @@ static int netdev_close(struct net_device *dev)
3135{ 3141{
3136 void __iomem * ioaddr = ns_ioaddr(dev); 3142 void __iomem * ioaddr = ns_ioaddr(dev);
3137 struct netdev_private *np = netdev_priv(dev); 3143 struct netdev_private *np = netdev_priv(dev);
3144 const int irq = np->pci_dev->irq;
3138 3145
3139 if (netif_msg_ifdown(np)) 3146 if (netif_msg_ifdown(np))
3140 printk(KERN_DEBUG 3147 printk(KERN_DEBUG
@@ -3156,14 +3163,14 @@ static int netdev_close(struct net_device *dev)
3156 */ 3163 */
3157 3164
3158 del_timer_sync(&np->timer); 3165 del_timer_sync(&np->timer);
3159 disable_irq(dev->irq); 3166 disable_irq(irq);
3160 spin_lock_irq(&np->lock); 3167 spin_lock_irq(&np->lock);
3161 natsemi_irq_disable(dev); 3168 natsemi_irq_disable(dev);
3162 np->hands_off = 1; 3169 np->hands_off = 1;
3163 spin_unlock_irq(&np->lock); 3170 spin_unlock_irq(&np->lock);
3164 enable_irq(dev->irq); 3171 enable_irq(irq);
3165 3172
3166 free_irq(dev->irq, dev); 3173 free_irq(irq, dev);
3167 3174
3168 /* Interrupt disabled, interrupt handler released, 3175 /* Interrupt disabled, interrupt handler released,
3169 * queue stopped, timer deleted, rtnl_lock held 3176 * queue stopped, timer deleted, rtnl_lock held
@@ -3256,9 +3263,11 @@ static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state)
3256 3263
3257 rtnl_lock(); 3264 rtnl_lock();
3258 if (netif_running (dev)) { 3265 if (netif_running (dev)) {
3266 const int irq = np->pci_dev->irq;
3267
3259 del_timer_sync(&np->timer); 3268 del_timer_sync(&np->timer);
3260 3269
3261 disable_irq(dev->irq); 3270 disable_irq(irq);
3262 spin_lock_irq(&np->lock); 3271 spin_lock_irq(&np->lock);
3263 3272
3264 natsemi_irq_disable(dev); 3273 natsemi_irq_disable(dev);
@@ -3267,7 +3276,7 @@ static int natsemi_suspend (struct pci_dev *pdev, pm_message_t state)
3267 netif_stop_queue(dev); 3276 netif_stop_queue(dev);
3268 3277
3269 spin_unlock_irq(&np->lock); 3278 spin_unlock_irq(&np->lock);
3270 enable_irq(dev->irq); 3279 enable_irq(irq);
3271 3280
3272 napi_disable(&np->napi); 3281 napi_disable(&np->napi);
3273 3282
@@ -3307,6 +3316,8 @@ static int natsemi_resume (struct pci_dev *pdev)
3307 if (netif_device_present(dev)) 3316 if (netif_device_present(dev))
3308 goto out; 3317 goto out;
3309 if (netif_running(dev)) { 3318 if (netif_running(dev)) {
3319 const int irq = np->pci_dev->irq;
3320
3310 BUG_ON(!np->hands_off); 3321 BUG_ON(!np->hands_off);
3311 ret = pci_enable_device(pdev); 3322 ret = pci_enable_device(pdev);
3312 if (ret < 0) { 3323 if (ret < 0) {
@@ -3320,13 +3331,13 @@ static int natsemi_resume (struct pci_dev *pdev)
3320 3331
3321 natsemi_reset(dev); 3332 natsemi_reset(dev);
3322 init_ring(dev); 3333 init_ring(dev);
3323 disable_irq(dev->irq); 3334 disable_irq(irq);
3324 spin_lock_irq(&np->lock); 3335 spin_lock_irq(&np->lock);
3325 np->hands_off = 0; 3336 np->hands_off = 0;
3326 init_registers(dev); 3337 init_registers(dev);
3327 netif_device_attach(dev); 3338 netif_device_attach(dev);
3328 spin_unlock_irq(&np->lock); 3339 spin_unlock_irq(&np->lock);
3329 enable_irq(dev->irq); 3340 enable_irq(irq);
3330 3341
3331 mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ)); 3342 mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ));
3332 } 3343 }
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 6338ef8606ae..bb367582c1e8 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -2846,6 +2846,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget)
2846static void s2io_netpoll(struct net_device *dev) 2846static void s2io_netpoll(struct net_device *dev)
2847{ 2847{
2848 struct s2io_nic *nic = netdev_priv(dev); 2848 struct s2io_nic *nic = netdev_priv(dev);
2849 const int irq = nic->pdev->irq;
2849 struct XENA_dev_config __iomem *bar0 = nic->bar0; 2850 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2850 u64 val64 = 0xFFFFFFFFFFFFFFFFULL; 2851 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2851 int i; 2852 int i;
@@ -2855,7 +2856,7 @@ static void s2io_netpoll(struct net_device *dev)
2855 if (pci_channel_offline(nic->pdev)) 2856 if (pci_channel_offline(nic->pdev))
2856 return; 2857 return;
2857 2858
2858 disable_irq(dev->irq); 2859 disable_irq(irq);
2859 2860
2860 writeq(val64, &bar0->rx_traffic_int); 2861 writeq(val64, &bar0->rx_traffic_int);
2861 writeq(val64, &bar0->tx_traffic_int); 2862 writeq(val64, &bar0->tx_traffic_int);
@@ -2884,7 +2885,7 @@ static void s2io_netpoll(struct net_device *dev)
2884 break; 2885 break;
2885 } 2886 }
2886 } 2887 }
2887 enable_irq(dev->irq); 2888 enable_irq(irq);
2888} 2889}
2889#endif 2890#endif
2890 2891
@@ -3897,9 +3898,7 @@ static void remove_msix_isr(struct s2io_nic *sp)
3897 3898
3898static void remove_inta_isr(struct s2io_nic *sp) 3899static void remove_inta_isr(struct s2io_nic *sp)
3899{ 3900{
3900 struct net_device *dev = sp->dev; 3901 free_irq(sp->pdev->irq, sp->dev);
3901
3902 free_irq(sp->pdev->irq, dev);
3903} 3902}
3904 3903
3905/* ********************************************************* * 3904/* ********************************************************* *
@@ -7046,7 +7045,7 @@ static int s2io_add_isr(struct s2io_nic *sp)
7046 } 7045 }
7047 } 7046 }
7048 if (sp->config.intr_type == INTA) { 7047 if (sp->config.intr_type == INTA) {
7049 err = request_irq((int)sp->pdev->irq, s2io_isr, IRQF_SHARED, 7048 err = request_irq(sp->pdev->irq, s2io_isr, IRQF_SHARED,
7050 sp->name, dev); 7049 sp->name, dev);
7051 if (err) { 7050 if (err) {
7052 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n", 7051 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
@@ -7908,9 +7907,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7908 goto bar1_remap_failed; 7907 goto bar1_remap_failed;
7909 } 7908 }
7910 7909
7911 dev->irq = pdev->irq;
7912 dev->base_addr = (unsigned long)sp->bar0;
7913
7914 /* Initializing the BAR1 address as the start of the FIFO pointer. */ 7910 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7915 for (j = 0; j < MAX_TX_FIFOS; j++) { 7911 for (j = 0; j < MAX_TX_FIFOS; j++) {
7916 mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000); 7912 mac_control->tx_FIFO_start[j] = sp->bar1 + (j * 0x00020000);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index ef76725454d2..51387c31914b 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -1882,25 +1882,24 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
1882 */ 1882 */
1883static void vxge_netpoll(struct net_device *dev) 1883static void vxge_netpoll(struct net_device *dev)
1884{ 1884{
1885 struct __vxge_hw_device *hldev; 1885 struct vxgedev *vdev = netdev_priv(dev);
1886 struct vxgedev *vdev; 1886 struct pci_dev *pdev = vdev->pdev;
1887 1887 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
1888 vdev = netdev_priv(dev); 1888 const int irq = pdev->irq;
1889 hldev = pci_get_drvdata(vdev->pdev);
1890 1889
1891 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 1890 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1892 1891
1893 if (pci_channel_offline(vdev->pdev)) 1892 if (pci_channel_offline(pdev))
1894 return; 1893 return;
1895 1894
1896 disable_irq(dev->irq); 1895 disable_irq(irq);
1897 vxge_hw_device_clear_tx_rx(hldev); 1896 vxge_hw_device_clear_tx_rx(hldev);
1898 1897
1899 vxge_hw_device_clear_tx_rx(hldev); 1898 vxge_hw_device_clear_tx_rx(hldev);
1900 VXGE_COMPLETE_ALL_RX(vdev); 1899 VXGE_COMPLETE_ALL_RX(vdev);
1901 VXGE_COMPLETE_ALL_TX(vdev); 1900 VXGE_COMPLETE_ALL_TX(vdev);
1902 1901
1903 enable_irq(dev->irq); 1902 enable_irq(irq);
1904 1903
1905 vxge_debug_entryexit(VXGE_TRACE, 1904 vxge_debug_entryexit(VXGE_TRACE,
1906 "%s:%d Exiting...", __func__, __LINE__); 1905 "%s:%d Exiting...", __func__, __LINE__);
@@ -2860,12 +2859,12 @@ static int vxge_open(struct net_device *dev)
2860 vdev->config.rx_pause_enable); 2859 vdev->config.rx_pause_enable);
2861 2860
2862 if (vdev->vp_reset_timer.function == NULL) 2861 if (vdev->vp_reset_timer.function == NULL)
2863 vxge_os_timer(vdev->vp_reset_timer, 2862 vxge_os_timer(&vdev->vp_reset_timer, vxge_poll_vp_reset, vdev,
2864 vxge_poll_vp_reset, vdev, (HZ/2)); 2863 HZ / 2);
2865 2864
2866 /* There is no need to check for RxD leak and RxD lookup on Titan1A */ 2865 /* There is no need to check for RxD leak and RxD lookup on Titan1A */
2867 if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL) 2866 if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
2868 vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev, 2867 vxge_os_timer(&vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
2869 HZ / 2); 2868 HZ / 2);
2870 2869
2871 set_bit(__VXGE_STATE_CARD_UP, &vdev->state); 2870 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@@ -3424,9 +3423,6 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3424 ndev->features |= ndev->hw_features | 3423 ndev->features |= ndev->hw_features |
3425 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER; 3424 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3426 3425
3427 /* Driver entry points */
3428 ndev->irq = vdev->pdev->irq;
3429 ndev->base_addr = (unsigned long) hldev->bar0;
3430 3426
3431 ndev->netdev_ops = &vxge_netdev_ops; 3427 ndev->netdev_ops = &vxge_netdev_ops;
3432 3428
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.h b/drivers/net/ethernet/neterion/vxge/vxge-main.h
index f52a42d1dbb7..35f3e7552ec2 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.h
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.h
@@ -416,12 +416,15 @@ struct vxge_tx_priv {
416 static int p = val; \ 416 static int p = val; \
417 module_param(p, int, 0) 417 module_param(p, int, 0)
418 418
419#define vxge_os_timer(timer, handle, arg, exp) do { \ 419static inline
420 init_timer(&timer); \ 420void vxge_os_timer(struct timer_list *timer, void (*func)(unsigned long data),
421 timer.function = handle; \ 421 struct vxgedev *vdev, unsigned long timeout)
422 timer.data = (unsigned long) arg; \ 422{
423 mod_timer(&timer, (jiffies + exp)); \ 423 init_timer(timer);
424 } while (0); 424 timer->function = func;
425 timer->data = (unsigned long)vdev;
426 mod_timer(timer, jiffies + timeout);
427}
425 428
426void vxge_initialize_ethtool_ops(struct net_device *ndev); 429void vxge_initialize_ethtool_ops(struct net_device *ndev);
427enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev); 430enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index aca13046e432..928913c4f3ff 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -2279,6 +2279,8 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2279 2279
2280 netdev_sent_queue(np->dev, skb->len); 2280 netdev_sent_queue(np->dev, skb->len);
2281 2281
2282 skb_tx_timestamp(skb);
2283
2282 np->put_tx.orig = put_tx; 2284 np->put_tx.orig = put_tx;
2283 2285
2284 spin_unlock_irqrestore(&np->lock, flags); 2286 spin_unlock_irqrestore(&np->lock, flags);
@@ -2426,6 +2428,8 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2426 2428
2427 netdev_sent_queue(np->dev, skb->len); 2429 netdev_sent_queue(np->dev, skb->len);
2428 2430
2431 skb_tx_timestamp(skb);
2432
2429 np->put_tx.ex = put_tx; 2433 np->put_tx.ex = put_tx;
2430 2434
2431 spin_unlock_irqrestore(&np->lock, flags); 2435 spin_unlock_irqrestore(&np->lock, flags);
@@ -3942,13 +3946,11 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3942 ret = pci_enable_msi(np->pci_dev); 3946 ret = pci_enable_msi(np->pci_dev);
3943 if (ret == 0) { 3947 if (ret == 0) {
3944 np->msi_flags |= NV_MSI_ENABLED; 3948 np->msi_flags |= NV_MSI_ENABLED;
3945 dev->irq = np->pci_dev->irq;
3946 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { 3949 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
3947 netdev_info(dev, "request_irq failed %d\n", 3950 netdev_info(dev, "request_irq failed %d\n",
3948 ret); 3951 ret);
3949 pci_disable_msi(np->pci_dev); 3952 pci_disable_msi(np->pci_dev);
3950 np->msi_flags &= ~NV_MSI_ENABLED; 3953 np->msi_flags &= ~NV_MSI_ENABLED;
3951 dev->irq = np->pci_dev->irq;
3952 goto out_err; 3954 goto out_err;
3953 } 3955 }
3954 3956
@@ -5649,9 +5651,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5649 np->base = ioremap(addr, np->register_size); 5651 np->base = ioremap(addr, np->register_size);
5650 if (!np->base) 5652 if (!np->base)
5651 goto out_relreg; 5653 goto out_relreg;
5652 dev->base_addr = (unsigned long)np->base;
5653
5654 dev->irq = pci_dev->irq;
5655 5654
5656 np->rx_ring_size = RX_RING_DEFAULT; 5655 np->rx_ring_size = RX_RING_DEFAULT;
5657 np->tx_ring_size = TX_RING_DEFAULT; 5656 np->tx_ring_size = TX_RING_DEFAULT;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 6dfc26d85e47..d3469d8e3f0d 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -990,10 +990,10 @@ static int __lpc_handle_recv(struct net_device *ndev, int budget)
990 ndev->stats.rx_errors++; 990 ndev->stats.rx_errors++;
991 } else { 991 } else {
992 /* Packet is good */ 992 /* Packet is good */
993 skb = dev_alloc_skb(len + 8); 993 skb = dev_alloc_skb(len);
994 if (!skb) 994 if (!skb) {
995 ndev->stats.rx_dropped++; 995 ndev->stats.rx_dropped++;
996 else { 996 } else {
997 prdbuf = skb_put(skb, len); 997 prdbuf = skb_put(skb, len);
998 998
999 /* Copy packet from buffer */ 999 /* Copy packet from buffer */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index dd14915f54bb..9f3dbc4feadc 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -660,6 +660,7 @@ extern u32 pch_src_uuid_lo_read(struct pci_dev *pdev);
660extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev); 660extern u32 pch_src_uuid_hi_read(struct pci_dev *pdev);
661extern u64 pch_rx_snap_read(struct pci_dev *pdev); 661extern u64 pch_rx_snap_read(struct pci_dev *pdev);
662extern u64 pch_tx_snap_read(struct pci_dev *pdev); 662extern u64 pch_tx_snap_read(struct pci_dev *pdev);
663extern int pch_set_station_address(u8 *addr, struct pci_dev *pdev);
663#endif 664#endif
664 665
665/* pch_gbe_param.c */ 666/* pch_gbe_param.c */
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 8035e5ff6e06..9dc7e5023671 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -79,7 +79,6 @@ const char pch_driver_version[] = DRV_VERSION;
79#define PCH_GBE_PAUSE_PKT4_VALUE 0x01000888 79#define PCH_GBE_PAUSE_PKT4_VALUE 0x01000888
80#define PCH_GBE_PAUSE_PKT5_VALUE 0x0000FFFF 80#define PCH_GBE_PAUSE_PKT5_VALUE 0x0000FFFF
81 81
82#define PCH_GBE_ETH_ALEN 6
83 82
84/* This defines the bits that are set in the Interrupt Mask 83/* This defines the bits that are set in the Interrupt Mask
85 * Set/Read Register. Each bit is documented below: 84 * Set/Read Register. Each bit is documented below:
@@ -101,18 +100,19 @@ const char pch_driver_version[] = DRV_VERSION;
101 100
102#ifdef CONFIG_PCH_PTP 101#ifdef CONFIG_PCH_PTP
103/* Macros for ieee1588 */ 102/* Macros for ieee1588 */
104#define TICKS_NS_SHIFT 5
105
106/* 0x40 Time Synchronization Channel Control Register Bits */ 103/* 0x40 Time Synchronization Channel Control Register Bits */
107#define MASTER_MODE (1<<0) 104#define MASTER_MODE (1<<0)
108#define SLAVE_MODE (0<<0) 105#define SLAVE_MODE (0)
109#define V2_MODE (1<<31) 106#define V2_MODE (1<<31)
110#define CAP_MODE0 (0<<16) 107#define CAP_MODE0 (0)
111#define CAP_MODE2 (1<<17) 108#define CAP_MODE2 (1<<17)
112 109
113/* 0x44 Time Synchronization Channel Event Register Bits */ 110/* 0x44 Time Synchronization Channel Event Register Bits */
114#define TX_SNAPSHOT_LOCKED (1<<0) 111#define TX_SNAPSHOT_LOCKED (1<<0)
115#define RX_SNAPSHOT_LOCKED (1<<1) 112#define RX_SNAPSHOT_LOCKED (1<<1)
113
114#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
115#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
116#endif 116#endif
117 117
118static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; 118static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
@@ -120,6 +120,7 @@ static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
120static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); 120static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
121static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, 121static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
122 int data); 122 int data);
123static void pch_gbe_set_multi(struct net_device *netdev);
123 124
124#ifdef CONFIG_PCH_PTP 125#ifdef CONFIG_PCH_PTP
125static struct sock_filter ptp_filter[] = { 126static struct sock_filter ptp_filter[] = {
@@ -133,10 +134,8 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
133 u16 *hi, *id; 134 u16 *hi, *id;
134 u32 lo; 135 u32 lo;
135 136
136 if ((sk_run_filter(skb, ptp_filter) != PTP_CLASS_V2_IPV4) && 137 if (sk_run_filter(skb, ptp_filter) == PTP_CLASS_NONE)
137 (sk_run_filter(skb, ptp_filter) != PTP_CLASS_V1_IPV4)) {
138 return 0; 138 return 0;
139 }
140 139
141 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; 140 offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
142 141
@@ -153,8 +152,8 @@ static int pch_ptp_match(struct sk_buff *skb, u16 uid_hi, u32 uid_lo, u16 seqid)
153 seqid == *id); 152 seqid == *id);
154} 153}
155 154
156static void pch_rx_timestamp( 155static void
157 struct pch_gbe_adapter *adapter, struct sk_buff *skb) 156pch_rx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
158{ 157{
159 struct skb_shared_hwtstamps *shhwtstamps; 158 struct skb_shared_hwtstamps *shhwtstamps;
160 struct pci_dev *pdev; 159 struct pci_dev *pdev;
@@ -183,7 +182,6 @@ static void pch_rx_timestamp(
183 goto out; 182 goto out;
184 183
185 ns = pch_rx_snap_read(pdev); 184 ns = pch_rx_snap_read(pdev);
186 ns <<= TICKS_NS_SHIFT;
187 185
188 shhwtstamps = skb_hwtstamps(skb); 186 shhwtstamps = skb_hwtstamps(skb);
189 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 187 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
@@ -192,8 +190,8 @@ out:
192 pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED); 190 pch_ch_event_write(pdev, RX_SNAPSHOT_LOCKED);
193} 191}
194 192
195static void pch_tx_timestamp( 193static void
196 struct pch_gbe_adapter *adapter, struct sk_buff *skb) 194pch_tx_timestamp(struct pch_gbe_adapter *adapter, struct sk_buff *skb)
197{ 195{
198 struct skb_shared_hwtstamps shhwtstamps; 196 struct skb_shared_hwtstamps shhwtstamps;
199 struct pci_dev *pdev; 197 struct pci_dev *pdev;
@@ -202,17 +200,16 @@ static void pch_tx_timestamp(
202 u32 cnt, val; 200 u32 cnt, val;
203 201
204 shtx = skb_shinfo(skb); 202 shtx = skb_shinfo(skb);
205 if (unlikely(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en)) 203 if (likely(!(shtx->tx_flags & SKBTX_HW_TSTAMP && adapter->hwts_tx_en)))
206 shtx->tx_flags |= SKBTX_IN_PROGRESS;
207 else
208 return; 204 return;
209 205
206 shtx->tx_flags |= SKBTX_IN_PROGRESS;
207
210 /* Get ieee1588's dev information */ 208 /* Get ieee1588's dev information */
211 pdev = adapter->ptp_pdev; 209 pdev = adapter->ptp_pdev;
212 210
213 /* 211 /*
214 * This really stinks, but we have to poll for the Tx time stamp. 212 * This really stinks, but we have to poll for the Tx time stamp.
215 * Usually, the time stamp is ready after 4 to 6 microseconds.
216 */ 213 */
217 for (cnt = 0; cnt < 100; cnt++) { 214 for (cnt = 0; cnt < 100; cnt++) {
218 val = pch_ch_event_read(pdev); 215 val = pch_ch_event_read(pdev);
@@ -226,7 +223,6 @@ static void pch_tx_timestamp(
226 } 223 }
227 224
228 ns = pch_tx_snap_read(pdev); 225 ns = pch_tx_snap_read(pdev);
229 ns <<= TICKS_NS_SHIFT;
230 226
231 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 227 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
232 shhwtstamps.hwtstamp = ns_to_ktime(ns); 228 shhwtstamps.hwtstamp = ns_to_ktime(ns);
@@ -240,6 +236,7 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
240 struct hwtstamp_config cfg; 236 struct hwtstamp_config cfg;
241 struct pch_gbe_adapter *adapter = netdev_priv(netdev); 237 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
242 struct pci_dev *pdev; 238 struct pci_dev *pdev;
239 u8 station[20];
243 240
244 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) 241 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
245 return -EFAULT; 242 return -EFAULT;
@@ -267,15 +264,23 @@ static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
267 break; 264 break;
268 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 265 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
269 adapter->hwts_rx_en = 0; 266 adapter->hwts_rx_en = 0;
270 pch_ch_control_write(pdev, (SLAVE_MODE | CAP_MODE0)); 267 pch_ch_control_write(pdev, SLAVE_MODE | CAP_MODE0);
271 break; 268 break;
272 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 269 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
273 adapter->hwts_rx_en = 1; 270 adapter->hwts_rx_en = 1;
274 pch_ch_control_write(pdev, (MASTER_MODE | CAP_MODE0)); 271 pch_ch_control_write(pdev, MASTER_MODE | CAP_MODE0);
272 break;
273 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
274 adapter->hwts_rx_en = 1;
275 pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
276 strcpy(station, PTP_L4_MULTICAST_SA);
277 pch_set_station_address(station, pdev);
275 break; 278 break;
276 case HWTSTAMP_FILTER_PTP_V2_EVENT: 279 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
277 adapter->hwts_rx_en = 1; 280 adapter->hwts_rx_en = 1;
278 pch_ch_control_write(pdev, (V2_MODE | CAP_MODE2)); 281 pch_ch_control_write(pdev, V2_MODE | CAP_MODE2);
282 strcpy(station, PTP_L2_MULTICAST_SA);
283 pch_set_station_address(station, pdev);
279 break; 284 break;
280 default: 285 default:
281 return -ERANGE; 286 return -ERANGE;
@@ -399,18 +404,18 @@ static void pch_gbe_mac_reset_hw(struct pch_gbe_hw *hw)
399 iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE); 404 iowrite32(PCH_GBE_MODE_GMII_ETHER, &hw->reg->MODE);
400#endif 405#endif
401 pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST); 406 pch_gbe_wait_clr_bit(&hw->reg->RESET, PCH_GBE_ALL_RST);
402 /* Setup the receive address */ 407 /* Setup the receive addresses */
403 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); 408 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
404 return; 409 return;
405} 410}
406 411
407static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw) 412static void pch_gbe_mac_reset_rx(struct pch_gbe_hw *hw)
408{ 413{
409 /* Read the MAC address. and store to the private data */ 414 /* Read the MAC addresses. and store to the private data */
410 pch_gbe_mac_read_mac_addr(hw); 415 pch_gbe_mac_read_mac_addr(hw);
411 iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET); 416 iowrite32(PCH_GBE_RX_RST, &hw->reg->RESET);
412 pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST); 417 pch_gbe_wait_clr_bit_irq(&hw->reg->RESET, PCH_GBE_RX_RST);
413 /* Setup the MAC address */ 418 /* Setup the MAC addresses */
414 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0); 419 pch_gbe_mac_mar_set(hw, hw->mac.addr, 0);
415 return; 420 return;
416} 421}
@@ -460,7 +465,7 @@ static void pch_gbe_mac_mc_addr_list_update(struct pch_gbe_hw *hw,
460 if (mc_addr_count) { 465 if (mc_addr_count) {
461 pch_gbe_mac_mar_set(hw, mc_addr_list, i); 466 pch_gbe_mac_mar_set(hw, mc_addr_list, i);
462 mc_addr_count--; 467 mc_addr_count--;
463 mc_addr_list += PCH_GBE_ETH_ALEN; 468 mc_addr_list += ETH_ALEN;
464 } else { 469 } else {
465 /* Clear MAC address mask */ 470 /* Clear MAC address mask */
466 adrmask = ioread32(&hw->reg->ADDR_MASK); 471 adrmask = ioread32(&hw->reg->ADDR_MASK);
@@ -778,6 +783,8 @@ void pch_gbe_reinit_locked(struct pch_gbe_adapter *adapter)
778void pch_gbe_reset(struct pch_gbe_adapter *adapter) 783void pch_gbe_reset(struct pch_gbe_adapter *adapter)
779{ 784{
780 pch_gbe_mac_reset_hw(&adapter->hw); 785 pch_gbe_mac_reset_hw(&adapter->hw);
786 /* reprogram multicast address register after reset */
787 pch_gbe_set_multi(adapter->netdev);
781 /* Setup the receive address. */ 788 /* Setup the receive address. */
782 pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES); 789 pch_gbe_mac_init_rx_addrs(&adapter->hw, PCH_GBE_MAR_ENTRIES);
783 if (pch_gbe_hal_init_hw(&adapter->hw)) 790 if (pch_gbe_hal_init_hw(&adapter->hw))
@@ -1182,8 +1189,6 @@ static void pch_gbe_tx_queue(struct pch_gbe_adapter *adapter,
1182 if (skb->protocol == htons(ETH_P_IP)) { 1189 if (skb->protocol == htons(ETH_P_IP)) {
1183 struct iphdr *iph = ip_hdr(skb); 1190 struct iphdr *iph = ip_hdr(skb);
1184 unsigned int offset; 1191 unsigned int offset;
1185 iph->check = 0;
1186 iph->check = ip_fast_csum((u8 *) iph, iph->ihl);
1187 offset = skb_transport_offset(skb); 1192 offset = skb_transport_offset(skb);
1188 if (iph->protocol == IPPROTO_TCP) { 1193 if (iph->protocol == IPPROTO_TCP) {
1189 skb->csum = 0; 1194 skb->csum = 0;
@@ -1342,6 +1347,8 @@ static void pch_gbe_stop_receive(struct pch_gbe_adapter *adapter)
1342 /* Stop Receive */ 1347 /* Stop Receive */
1343 pch_gbe_mac_reset_rx(hw); 1348 pch_gbe_mac_reset_rx(hw);
1344 } 1349 }
1350 /* reprogram multicast address register after reset */
1351 pch_gbe_set_multi(adapter->netdev);
1345} 1352}
1346 1353
1347static void pch_gbe_start_receive(struct pch_gbe_hw *hw) 1354static void pch_gbe_start_receive(struct pch_gbe_hw *hw)
@@ -1924,7 +1931,6 @@ static int pch_gbe_request_irq(struct pch_gbe_adapter *adapter)
1924} 1931}
1925 1932
1926 1933
1927static void pch_gbe_set_multi(struct net_device *netdev);
1928/** 1934/**
1929 * pch_gbe_up - Up GbE network device 1935 * pch_gbe_up - Up GbE network device
1930 * @adapter: Board private structure 1936 * @adapter: Board private structure
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 0d29f5f4b8e4..c2367158350e 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -683,8 +683,6 @@ static int __devinit hamachi_init_one (struct pci_dev *pdev,
683 } 683 }
684 684
685 hmp->base = ioaddr; 685 hmp->base = ioaddr;
686 dev->base_addr = (unsigned long)ioaddr;
687 dev->irq = irq;
688 pci_set_drvdata(pdev, dev); 686 pci_set_drvdata(pdev, dev);
689 687
690 hmp->chip_id = chip_id; 688 hmp->chip_id = chip_id;
@@ -859,14 +857,11 @@ static int hamachi_open(struct net_device *dev)
859 u32 rx_int_var, tx_int_var; 857 u32 rx_int_var, tx_int_var;
860 u16 fifo_info; 858 u16 fifo_info;
861 859
862 i = request_irq(dev->irq, hamachi_interrupt, IRQF_SHARED, dev->name, dev); 860 i = request_irq(hmp->pci_dev->irq, hamachi_interrupt, IRQF_SHARED,
861 dev->name, dev);
863 if (i) 862 if (i)
864 return i; 863 return i;
865 864
866 if (hamachi_debug > 1)
867 printk(KERN_DEBUG "%s: hamachi_open() irq %d.\n",
868 dev->name, dev->irq);
869
870 hamachi_init_ring(dev); 865 hamachi_init_ring(dev);
871 866
872#if ADDRLEN == 64 867#if ADDRLEN == 64
@@ -1705,7 +1700,7 @@ static int hamachi_close(struct net_device *dev)
1705 } 1700 }
1706#endif /* __i386__ debugging only */ 1701#endif /* __i386__ debugging only */
1707 1702
1708 free_irq(dev->irq, dev); 1703 free_irq(hmp->pci_dev->irq, dev);
1709 1704
1710 del_timer_sync(&hmp->timer); 1705 del_timer_sync(&hmp->timer);
1711 1706
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 7757b80ef924..04e622fd468d 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -427,9 +427,6 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev,
427 /* Reset the chip. */ 427 /* Reset the chip. */
428 iowrite32(0x80000000, ioaddr + DMACtrl); 428 iowrite32(0x80000000, ioaddr + DMACtrl);
429 429
430 dev->base_addr = (unsigned long)ioaddr;
431 dev->irq = irq;
432
433 pci_set_drvdata(pdev, dev); 430 pci_set_drvdata(pdev, dev);
434 spin_lock_init(&np->lock); 431 spin_lock_init(&np->lock);
435 432
@@ -569,25 +566,20 @@ static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value
569static int yellowfin_open(struct net_device *dev) 566static int yellowfin_open(struct net_device *dev)
570{ 567{
571 struct yellowfin_private *yp = netdev_priv(dev); 568 struct yellowfin_private *yp = netdev_priv(dev);
569 const int irq = yp->pci_dev->irq;
572 void __iomem *ioaddr = yp->base; 570 void __iomem *ioaddr = yp->base;
573 int i, ret; 571 int i, rc;
574 572
575 /* Reset the chip. */ 573 /* Reset the chip. */
576 iowrite32(0x80000000, ioaddr + DMACtrl); 574 iowrite32(0x80000000, ioaddr + DMACtrl);
577 575
578 ret = request_irq(dev->irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev); 576 rc = request_irq(irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
579 if (ret) 577 if (rc)
580 return ret; 578 return rc;
581
582 if (yellowfin_debug > 1)
583 netdev_printk(KERN_DEBUG, dev, "%s() irq %d\n",
584 __func__, dev->irq);
585 579
586 ret = yellowfin_init_ring(dev); 580 rc = yellowfin_init_ring(dev);
587 if (ret) { 581 if (rc < 0)
588 free_irq(dev->irq, dev); 582 goto err_free_irq;
589 return ret;
590 }
591 583
592 iowrite32(yp->rx_ring_dma, ioaddr + RxPtr); 584 iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
593 iowrite32(yp->tx_ring_dma, ioaddr + TxPtr); 585 iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
@@ -647,8 +639,12 @@ static int yellowfin_open(struct net_device *dev)
647 yp->timer.data = (unsigned long)dev; 639 yp->timer.data = (unsigned long)dev;
648 yp->timer.function = yellowfin_timer; /* timer handler */ 640 yp->timer.function = yellowfin_timer; /* timer handler */
649 add_timer(&yp->timer); 641 add_timer(&yp->timer);
642out:
643 return rc;
650 644
651 return 0; 645err_free_irq:
646 free_irq(irq, dev);
647 goto out;
652} 648}
653 649
654static void yellowfin_timer(unsigned long data) 650static void yellowfin_timer(unsigned long data)
@@ -1251,7 +1247,7 @@ static int yellowfin_close(struct net_device *dev)
1251 } 1247 }
1252#endif /* __i386__ debugging only */ 1248#endif /* __i386__ debugging only */
1253 1249
1254 free_irq(dev->irq, dev); 1250 free_irq(yp->pci_dev->irq, dev);
1255 1251
1256 /* Free all the skbuffs in the Rx queue. */ 1252 /* Free all the skbuffs in the Rx queue. */
1257 for (i = 0; i < RX_RING_SIZE; i++) { 1253 for (i = 0; i < RX_RING_SIZE; i++) {
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index ddc95b0ac78d..e559dfa06d6a 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -623,7 +623,7 @@ static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac)
623 mac->rx = NULL; 623 mac->rx = NULL;
624} 624}
625 625
626static void pasemi_mac_replenish_rx_ring(const struct net_device *dev, 626static void pasemi_mac_replenish_rx_ring(struct net_device *dev,
627 const int limit) 627 const int limit)
628{ 628{
629 const struct pasemi_mac *mac = netdev_priv(dev); 629 const struct pasemi_mac *mac = netdev_priv(dev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 385a4d5c7c25..8680a5dae4a2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -36,8 +36,8 @@
36 36
37#define _QLCNIC_LINUX_MAJOR 5 37#define _QLCNIC_LINUX_MAJOR 5
38#define _QLCNIC_LINUX_MINOR 0 38#define _QLCNIC_LINUX_MINOR 0
39#define _QLCNIC_LINUX_SUBVERSION 27 39#define _QLCNIC_LINUX_SUBVERSION 28
40#define QLCNIC_LINUX_VERSIONID "5.0.27" 40#define QLCNIC_LINUX_VERSIONID "5.0.28"
41#define QLCNIC_DRV_IDC_VER 0x01 41#define QLCNIC_DRV_IDC_VER 0x01
42#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 42#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
43 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 43 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -607,6 +607,7 @@ struct qlcnic_recv_context {
607#define QLCNIC_CDRP_CMD_CONFIG_PORT 0x0000002E 607#define QLCNIC_CDRP_CMD_CONFIG_PORT 0x0000002E
608#define QLCNIC_CDRP_CMD_TEMP_SIZE 0x0000002f 608#define QLCNIC_CDRP_CMD_TEMP_SIZE 0x0000002f
609#define QLCNIC_CDRP_CMD_GET_TEMP_HDR 0x00000030 609#define QLCNIC_CDRP_CMD_GET_TEMP_HDR 0x00000030
610#define QLCNIC_CDRP_CMD_GET_MAC_STATS 0x00000037
610 611
611#define QLCNIC_RCODE_SUCCESS 0 612#define QLCNIC_RCODE_SUCCESS 0
612#define QLCNIC_RCODE_NOT_SUPPORTED 9 613#define QLCNIC_RCODE_NOT_SUPPORTED 9
@@ -1180,18 +1181,62 @@ struct qlcnic_esw_func_cfg {
1180#define QLCNIC_STATS_ESWITCH 2 1181#define QLCNIC_STATS_ESWITCH 2
1181#define QLCNIC_QUERY_RX_COUNTER 0 1182#define QLCNIC_QUERY_RX_COUNTER 0
1182#define QLCNIC_QUERY_TX_COUNTER 1 1183#define QLCNIC_QUERY_TX_COUNTER 1
1183#define QLCNIC_ESW_STATS_NOT_AVAIL 0xffffffffffffffffULL 1184#define QLCNIC_STATS_NOT_AVAIL 0xffffffffffffffffULL
1185#define QLCNIC_FILL_STATS(VAL1) \
1186 (((VAL1) == QLCNIC_STATS_NOT_AVAIL) ? 0 : VAL1)
1187#define QLCNIC_MAC_STATS 1
1188#define QLCNIC_ESW_STATS 2
1184 1189
1185#define QLCNIC_ADD_ESW_STATS(VAL1, VAL2)\ 1190#define QLCNIC_ADD_ESW_STATS(VAL1, VAL2)\
1186do { \ 1191do { \
1187 if (((VAL1) == QLCNIC_ESW_STATS_NOT_AVAIL) && \ 1192 if (((VAL1) == QLCNIC_STATS_NOT_AVAIL) && \
1188 ((VAL2) != QLCNIC_ESW_STATS_NOT_AVAIL)) \ 1193 ((VAL2) != QLCNIC_STATS_NOT_AVAIL)) \
1189 (VAL1) = (VAL2); \ 1194 (VAL1) = (VAL2); \
1190 else if (((VAL1) != QLCNIC_ESW_STATS_NOT_AVAIL) && \ 1195 else if (((VAL1) != QLCNIC_STATS_NOT_AVAIL) && \
1191 ((VAL2) != QLCNIC_ESW_STATS_NOT_AVAIL)) \ 1196 ((VAL2) != QLCNIC_STATS_NOT_AVAIL)) \
1192 (VAL1) += (VAL2); \ 1197 (VAL1) += (VAL2); \
1193} while (0) 1198} while (0)
1194 1199
1200struct qlcnic_mac_statistics{
1201 __le64 mac_tx_frames;
1202 __le64 mac_tx_bytes;
1203 __le64 mac_tx_mcast_pkts;
1204 __le64 mac_tx_bcast_pkts;
1205 __le64 mac_tx_pause_cnt;
1206 __le64 mac_tx_ctrl_pkt;
1207 __le64 mac_tx_lt_64b_pkts;
1208 __le64 mac_tx_lt_127b_pkts;
1209 __le64 mac_tx_lt_255b_pkts;
1210 __le64 mac_tx_lt_511b_pkts;
1211 __le64 mac_tx_lt_1023b_pkts;
1212 __le64 mac_tx_lt_1518b_pkts;
1213 __le64 mac_tx_gt_1518b_pkts;
1214 __le64 rsvd1[3];
1215
1216 __le64 mac_rx_frames;
1217 __le64 mac_rx_bytes;
1218 __le64 mac_rx_mcast_pkts;
1219 __le64 mac_rx_bcast_pkts;
1220 __le64 mac_rx_pause_cnt;
1221 __le64 mac_rx_ctrl_pkt;
1222 __le64 mac_rx_lt_64b_pkts;
1223 __le64 mac_rx_lt_127b_pkts;
1224 __le64 mac_rx_lt_255b_pkts;
1225 __le64 mac_rx_lt_511b_pkts;
1226 __le64 mac_rx_lt_1023b_pkts;
1227 __le64 mac_rx_lt_1518b_pkts;
1228 __le64 mac_rx_gt_1518b_pkts;
1229 __le64 rsvd2[3];
1230
1231 __le64 mac_rx_length_error;
1232 __le64 mac_rx_length_small;
1233 __le64 mac_rx_length_large;
1234 __le64 mac_rx_jabber;
1235 __le64 mac_rx_dropped;
1236 __le64 mac_rx_crc_error;
1237 __le64 mac_align_error;
1238} __packed;
1239
1195struct __qlcnic_esw_statistics { 1240struct __qlcnic_esw_statistics {
1196 __le16 context_id; 1241 __le16 context_id;
1197 __le16 version; 1242 __le16 version;
@@ -1352,6 +1397,8 @@ enum op_codes {
1352#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed 1397#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed
1353#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed 1398#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed
1354#define QLCNIC_FORCE_FW_RESET 0xdeaddead 1399#define QLCNIC_FORCE_FW_RESET 0xdeaddead
1400#define QLCNIC_SET_QUIESCENT 0xadd00010
1401#define QLCNIC_RESET_QUIESCENT 0xadd00020
1355 1402
1356struct qlcnic_dump_operations { 1403struct qlcnic_dump_operations {
1357 enum op_codes opcode; 1404 enum op_codes opcode;
@@ -1510,6 +1557,7 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *, const u8, const u8,
1510int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8, 1557int qlcnic_get_eswitch_stats(struct qlcnic_adapter *, const u8, u8,
1511 struct __qlcnic_esw_statistics *); 1558 struct __qlcnic_esw_statistics *);
1512int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8); 1559int qlcnic_clear_esw_stats(struct qlcnic_adapter *adapter, u8, u8, u8);
1560int qlcnic_get_mac_stats(struct qlcnic_adapter *, struct qlcnic_mac_statistics *);
1513extern int qlcnic_config_tso; 1561extern int qlcnic_config_tso;
1514 1562
1515/* 1563/*
@@ -1559,6 +1607,7 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1559} 1607}
1560 1608
1561extern const struct ethtool_ops qlcnic_ethtool_ops; 1609extern const struct ethtool_ops qlcnic_ethtool_ops;
1610extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
1562 1611
1563struct qlcnic_nic_template { 1612struct qlcnic_nic_template {
1564 int (*config_bridged_mode) (struct qlcnic_adapter *, u32); 1613 int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 569a837d2ac4..8db85244e8ad 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -905,6 +905,65 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
905 return err; 905 return err;
906} 906}
907 907
908/* This routine will retrieve the MAC statistics from firmware */
909int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
910 struct qlcnic_mac_statistics *mac_stats)
911{
912 struct qlcnic_mac_statistics *stats;
913 struct qlcnic_cmd_args cmd;
914 size_t stats_size = sizeof(struct qlcnic_mac_statistics);
915 dma_addr_t stats_dma_t;
916 void *stats_addr;
917 int err;
918
919 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
920 &stats_dma_t, GFP_KERNEL);
921 if (!stats_addr) {
922 dev_err(&adapter->pdev->dev,
923 "%s: Unable to allocate memory.\n", __func__);
924 return -ENOMEM;
925 }
926 memset(stats_addr, 0, stats_size);
927 memset(&cmd, 0, sizeof(cmd));
928 cmd.req.cmd = QLCNIC_CDRP_CMD_GET_MAC_STATS;
929 cmd.req.arg1 = stats_size << 16;
930 cmd.req.arg2 = MSD(stats_dma_t);
931 cmd.req.arg3 = LSD(stats_dma_t);
932
933 qlcnic_issue_cmd(adapter, &cmd);
934 err = cmd.rsp.cmd;
935
936 if (!err) {
937 stats = stats_addr;
938 mac_stats->mac_tx_frames = le64_to_cpu(stats->mac_tx_frames);
939 mac_stats->mac_tx_bytes = le64_to_cpu(stats->mac_tx_bytes);
940 mac_stats->mac_tx_mcast_pkts =
941 le64_to_cpu(stats->mac_tx_mcast_pkts);
942 mac_stats->mac_tx_bcast_pkts =
943 le64_to_cpu(stats->mac_tx_bcast_pkts);
944 mac_stats->mac_rx_frames = le64_to_cpu(stats->mac_rx_frames);
945 mac_stats->mac_rx_bytes = le64_to_cpu(stats->mac_rx_bytes);
946 mac_stats->mac_rx_mcast_pkts =
947 le64_to_cpu(stats->mac_rx_mcast_pkts);
948 mac_stats->mac_rx_length_error =
949 le64_to_cpu(stats->mac_rx_length_error);
950 mac_stats->mac_rx_length_small =
951 le64_to_cpu(stats->mac_rx_length_small);
952 mac_stats->mac_rx_length_large =
953 le64_to_cpu(stats->mac_rx_length_large);
954 mac_stats->mac_rx_jabber = le64_to_cpu(stats->mac_rx_jabber);
955 mac_stats->mac_rx_dropped = le64_to_cpu(stats->mac_rx_dropped);
956 mac_stats->mac_rx_crc_error = le64_to_cpu(stats->mac_rx_crc_error);
957 } else {
958 dev_info(&adapter->pdev->dev,
959 "%s: Get mac stats failed =%d.\n", __func__, err);
960 }
961
962 dma_free_coherent(&adapter->pdev->dev, stats_size, stats_addr,
963 stats_dma_t);
964 return err;
965}
966
908int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch, 967int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
909 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) { 968 const u8 rx_tx, struct __qlcnic_esw_statistics *esw_stats) {
910 969
@@ -920,13 +979,13 @@ int qlcnic_get_eswitch_stats(struct qlcnic_adapter *adapter, const u8 eswitch,
920 return -EIO; 979 return -EIO;
921 980
922 memset(esw_stats, 0, sizeof(u64)); 981 memset(esw_stats, 0, sizeof(u64));
923 esw_stats->unicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL; 982 esw_stats->unicast_frames = QLCNIC_STATS_NOT_AVAIL;
924 esw_stats->multicast_frames = QLCNIC_ESW_STATS_NOT_AVAIL; 983 esw_stats->multicast_frames = QLCNIC_STATS_NOT_AVAIL;
925 esw_stats->broadcast_frames = QLCNIC_ESW_STATS_NOT_AVAIL; 984 esw_stats->broadcast_frames = QLCNIC_STATS_NOT_AVAIL;
926 esw_stats->dropped_frames = QLCNIC_ESW_STATS_NOT_AVAIL; 985 esw_stats->dropped_frames = QLCNIC_STATS_NOT_AVAIL;
927 esw_stats->errors = QLCNIC_ESW_STATS_NOT_AVAIL; 986 esw_stats->errors = QLCNIC_STATS_NOT_AVAIL;
928 esw_stats->local_frames = QLCNIC_ESW_STATS_NOT_AVAIL; 987 esw_stats->local_frames = QLCNIC_STATS_NOT_AVAIL;
929 esw_stats->numbytes = QLCNIC_ESW_STATS_NOT_AVAIL; 988 esw_stats->numbytes = QLCNIC_STATS_NOT_AVAIL;
930 esw_stats->context_id = eswitch; 989 esw_stats->context_id = eswitch;
931 990
932 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) { 991 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 89ddf7f7d7df..735423f7273f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -78,8 +78,46 @@ static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
78 "tx numbytes", 78 "tx numbytes",
79}; 79};
80 80
81#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats) 81static const char qlcnic_mac_stats_strings [][ETH_GSTRING_LEN] = {
82 "mac_tx_frames",
83 "mac_tx_bytes",
84 "mac_tx_mcast_pkts",
85 "mac_tx_bcast_pkts",
86 "mac_tx_pause_cnt",
87 "mac_tx_ctrl_pkt",
88 "mac_tx_lt_64b_pkts",
89 "mac_tx_lt_127b_pkts",
90 "mac_tx_lt_255b_pkts",
91 "mac_tx_lt_511b_pkts",
92 "mac_tx_lt_1023b_pkts",
93 "mac_tx_lt_1518b_pkts",
94 "mac_tx_gt_1518b_pkts",
95 "mac_rx_frames",
96 "mac_rx_bytes",
97 "mac_rx_mcast_pkts",
98 "mac_rx_bcast_pkts",
99 "mac_rx_pause_cnt",
100 "mac_rx_ctrl_pkt",
101 "mac_rx_lt_64b_pkts",
102 "mac_rx_lt_127b_pkts",
103 "mac_rx_lt_255b_pkts",
104 "mac_rx_lt_511b_pkts",
105 "mac_rx_lt_1023b_pkts",
106 "mac_rx_lt_1518b_pkts",
107 "mac_rx_gt_1518b_pkts",
108 "mac_rx_length_error",
109 "mac_rx_length_small",
110 "mac_rx_length_large",
111 "mac_rx_jabber",
112 "mac_rx_dropped",
113 "mac_rx_crc_error",
114 "mac_align_error",
115};
116
117#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
118#define QLCNIC_MAC_STATS_LEN ARRAY_SIZE(qlcnic_mac_stats_strings)
82#define QLCNIC_DEVICE_STATS_LEN ARRAY_SIZE(qlcnic_device_gstrings_stats) 119#define QLCNIC_DEVICE_STATS_LEN ARRAY_SIZE(qlcnic_device_gstrings_stats)
120#define QLCNIC_TOTAL_STATS_LEN QLCNIC_STATS_LEN + QLCNIC_MAC_STATS_LEN
83 121
84static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = { 122static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
85 "Register_Test_on_offline", 123 "Register_Test_on_offline",
@@ -644,8 +682,8 @@ static int qlcnic_get_sset_count(struct net_device *dev, int sset)
644 return QLCNIC_TEST_LEN; 682 return QLCNIC_TEST_LEN;
645 case ETH_SS_STATS: 683 case ETH_SS_STATS:
646 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) 684 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
647 return QLCNIC_STATS_LEN + QLCNIC_DEVICE_STATS_LEN; 685 return QLCNIC_TOTAL_STATS_LEN + QLCNIC_DEVICE_STATS_LEN;
648 return QLCNIC_STATS_LEN; 686 return QLCNIC_TOTAL_STATS_LEN;
649 default: 687 default:
650 return -EOPNOTSUPP; 688 return -EOPNOTSUPP;
651 } 689 }
@@ -851,7 +889,7 @@ static void
851qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data) 889qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
852{ 890{
853 struct qlcnic_adapter *adapter = netdev_priv(dev); 891 struct qlcnic_adapter *adapter = netdev_priv(dev);
854 int index, i; 892 int index, i, j;
855 893
856 switch (stringset) { 894 switch (stringset) {
857 case ETH_SS_TEST: 895 case ETH_SS_TEST:
@@ -864,6 +902,11 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
864 qlcnic_gstrings_stats[index].stat_string, 902 qlcnic_gstrings_stats[index].stat_string,
865 ETH_GSTRING_LEN); 903 ETH_GSTRING_LEN);
866 } 904 }
905 for (j = 0; j < QLCNIC_MAC_STATS_LEN; index++, j++) {
906 memcpy(data + index * ETH_GSTRING_LEN,
907 qlcnic_mac_stats_strings[j],
908 ETH_GSTRING_LEN);
909 }
867 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) 910 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
868 return; 911 return;
869 for (i = 0; i < QLCNIC_DEVICE_STATS_LEN; index++, i++) { 912 for (i = 0; i < QLCNIC_DEVICE_STATS_LEN; index++, i++) {
@@ -874,22 +917,64 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
874 } 917 }
875} 918}
876 919
877#define QLCNIC_FILL_ESWITCH_STATS(VAL1) \
878 (((VAL1) == QLCNIC_ESW_STATS_NOT_AVAIL) ? 0 : VAL1)
879
880static void 920static void
881qlcnic_fill_device_stats(int *index, u64 *data, 921qlcnic_fill_stats(int *index, u64 *data, void *stats, int type)
882 struct __qlcnic_esw_statistics *stats)
883{ 922{
884 int ind = *index; 923 int ind = *index;
885 924
886 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->unicast_frames); 925 if (type == QLCNIC_MAC_STATS) {
887 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->multicast_frames); 926 struct qlcnic_mac_statistics *mac_stats =
888 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->broadcast_frames); 927 (struct qlcnic_mac_statistics *)stats;
889 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->dropped_frames); 928 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_frames);
890 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->errors); 929 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_bytes);
891 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->local_frames); 930 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_mcast_pkts);
892 data[ind++] = QLCNIC_FILL_ESWITCH_STATS(stats->numbytes); 931 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_bcast_pkts);
932 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_pause_cnt);
933 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_ctrl_pkt);
934 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_64b_pkts);
935 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_127b_pkts);
936 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_255b_pkts);
937 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_511b_pkts);
938 data[ind++] =
939 QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1023b_pkts);
940 data[ind++] =
941 QLCNIC_FILL_STATS(mac_stats->mac_tx_lt_1518b_pkts);
942 data[ind++] =
943 QLCNIC_FILL_STATS(mac_stats->mac_tx_gt_1518b_pkts);
944 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_frames);
945 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_bytes);
946 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_mcast_pkts);
947 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_bcast_pkts);
948 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_pause_cnt);
949 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_ctrl_pkt);
950 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_64b_pkts);
951 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_127b_pkts);
952 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_255b_pkts);
953 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_511b_pkts);
954 data[ind++] =
955 QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1023b_pkts);
956 data[ind++] =
957 QLCNIC_FILL_STATS(mac_stats->mac_rx_lt_1518b_pkts);
958 data[ind++] =
959 QLCNIC_FILL_STATS(mac_stats->mac_rx_gt_1518b_pkts);
960 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_error);
961 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_small);
962 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_length_large);
963 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_jabber);
964 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_dropped);
965 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_rx_crc_error);
966 data[ind++] = QLCNIC_FILL_STATS(mac_stats->mac_align_error);
967 } else if (type == QLCNIC_ESW_STATS) {
968 struct __qlcnic_esw_statistics *esw_stats =
969 (struct __qlcnic_esw_statistics *)stats;
970 data[ind++] = QLCNIC_FILL_STATS(esw_stats->unicast_frames);
971 data[ind++] = QLCNIC_FILL_STATS(esw_stats->multicast_frames);
972 data[ind++] = QLCNIC_FILL_STATS(esw_stats->broadcast_frames);
973 data[ind++] = QLCNIC_FILL_STATS(esw_stats->dropped_frames);
974 data[ind++] = QLCNIC_FILL_STATS(esw_stats->errors);
975 data[ind++] = QLCNIC_FILL_STATS(esw_stats->local_frames);
976 data[ind++] = QLCNIC_FILL_STATS(esw_stats->numbytes);
977 }
893 978
894 *index = ind; 979 *index = ind;
895} 980}
@@ -900,6 +985,7 @@ qlcnic_get_ethtool_stats(struct net_device *dev,
900{ 985{
901 struct qlcnic_adapter *adapter = netdev_priv(dev); 986 struct qlcnic_adapter *adapter = netdev_priv(dev);
902 struct qlcnic_esw_statistics port_stats; 987 struct qlcnic_esw_statistics port_stats;
988 struct qlcnic_mac_statistics mac_stats;
903 int index, ret; 989 int index, ret;
904 990
905 for (index = 0; index < QLCNIC_STATS_LEN; index++) { 991 for (index = 0; index < QLCNIC_STATS_LEN; index++) {
@@ -911,6 +997,11 @@ qlcnic_get_ethtool_stats(struct net_device *dev,
911 sizeof(u64)) ? *(u64 *)p:(*(u32 *)p); 997 sizeof(u64)) ? *(u64 *)p:(*(u32 *)p);
912 } 998 }
913 999
1000 /* Retrieve MAC statistics from firmware */
1001 memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics));
1002 qlcnic_get_mac_stats(adapter, &mac_stats);
1003 qlcnic_fill_stats(&index, data, &mac_stats, QLCNIC_MAC_STATS);
1004
914 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) 1005 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
915 return; 1006 return;
916 1007
@@ -920,14 +1011,14 @@ qlcnic_get_ethtool_stats(struct net_device *dev,
920 if (ret) 1011 if (ret)
921 return; 1012 return;
922 1013
923 qlcnic_fill_device_stats(&index, data, &port_stats.rx); 1014 qlcnic_fill_stats(&index, data, &port_stats.rx, QLCNIC_ESW_STATS);
924 1015
925 ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func, 1016 ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
926 QLCNIC_QUERY_TX_COUNTER, &port_stats.tx); 1017 QLCNIC_QUERY_TX_COUNTER, &port_stats.tx);
927 if (ret) 1018 if (ret)
928 return; 1019 return;
929 1020
930 qlcnic_fill_device_stats(&index, data, &port_stats.tx); 1021 qlcnic_fill_stats(&index, data, &port_stats.tx, QLCNIC_ESW_STATS);
931} 1022}
932 1023
933static int qlcnic_set_led(struct net_device *dev, 1024static int qlcnic_set_led(struct net_device *dev,
@@ -1132,6 +1223,11 @@ qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
1132 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1223 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1133 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; 1224 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1134 1225
1226 if (!fw_dump->tmpl_hdr) {
1227 netdev_err(adapter->netdev, "FW Dump not supported\n");
1228 return -ENOTSUPP;
1229 }
1230
1135 if (fw_dump->clr) 1231 if (fw_dump->clr)
1136 dump->len = fw_dump->tmpl_hdr->size + fw_dump->size; 1232 dump->len = fw_dump->tmpl_hdr->size + fw_dump->size;
1137 else 1233 else
@@ -1150,6 +1246,11 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
1150 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1246 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1151 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; 1247 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1152 1248
1249 if (!fw_dump->tmpl_hdr) {
1250 netdev_err(netdev, "FW Dump not supported\n");
1251 return -ENOTSUPP;
1252 }
1253
1153 if (!fw_dump->clr) { 1254 if (!fw_dump->clr) {
1154 netdev_info(netdev, "Dump not available\n"); 1255 netdev_info(netdev, "Dump not available\n");
1155 return -EINVAL; 1256 return -EINVAL;
@@ -1177,55 +1278,74 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
1177static int 1278static int
1178qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val) 1279qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
1179{ 1280{
1180 int ret = 0; 1281 int i;
1181 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1282 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1182 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump; 1283 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
1284 u32 state;
1183 1285
1184 switch (val->flag) { 1286 switch (val->flag) {
1185 case QLCNIC_FORCE_FW_DUMP_KEY: 1287 case QLCNIC_FORCE_FW_DUMP_KEY:
1288 if (!fw_dump->tmpl_hdr) {
1289 netdev_err(netdev, "FW dump not supported\n");
1290 return -ENOTSUPP;
1291 }
1186 if (!fw_dump->enable) { 1292 if (!fw_dump->enable) {
1187 netdev_info(netdev, "FW dump not enabled\n"); 1293 netdev_info(netdev, "FW dump not enabled\n");
1188 return ret; 1294 return 0;
1189 } 1295 }
1190 if (fw_dump->clr) { 1296 if (fw_dump->clr) {
1191 netdev_info(netdev, 1297 netdev_info(netdev,
1192 "Previous dump not cleared, not forcing dump\n"); 1298 "Previous dump not cleared, not forcing dump\n");
1193 return ret; 1299 return 0;
1194 } 1300 }
1195 netdev_info(netdev, "Forcing a FW dump\n"); 1301 netdev_info(netdev, "Forcing a FW dump\n");
1196 qlcnic_dev_request_reset(adapter); 1302 qlcnic_dev_request_reset(adapter);
1197 break; 1303 break;
1198 case QLCNIC_DISABLE_FW_DUMP: 1304 case QLCNIC_DISABLE_FW_DUMP:
1199 if (fw_dump->enable) { 1305 if (fw_dump->enable && fw_dump->tmpl_hdr) {
1200 netdev_info(netdev, "Disabling FW dump\n"); 1306 netdev_info(netdev, "Disabling FW dump\n");
1201 fw_dump->enable = 0; 1307 fw_dump->enable = 0;
1202 } 1308 }
1203 break; 1309 return 0;
1204 case QLCNIC_ENABLE_FW_DUMP: 1310 case QLCNIC_ENABLE_FW_DUMP:
1205 if (!fw_dump->enable && fw_dump->tmpl_hdr) { 1311 if (!fw_dump->tmpl_hdr) {
1312 netdev_err(netdev, "FW dump not supported\n");
1313 return -ENOTSUPP;
1314 }
1315 if (!fw_dump->enable) {
1206 netdev_info(netdev, "Enabling FW dump\n"); 1316 netdev_info(netdev, "Enabling FW dump\n");
1207 fw_dump->enable = 1; 1317 fw_dump->enable = 1;
1208 } 1318 }
1209 break; 1319 return 0;
1210 case QLCNIC_FORCE_FW_RESET: 1320 case QLCNIC_FORCE_FW_RESET:
1211 netdev_info(netdev, "Forcing a FW reset\n"); 1321 netdev_info(netdev, "Forcing a FW reset\n");
1212 qlcnic_dev_request_reset(adapter); 1322 qlcnic_dev_request_reset(adapter);
1213 adapter->flags &= ~QLCNIC_FW_RESET_OWNER; 1323 adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
1214 break; 1324 return 0;
1325 case QLCNIC_SET_QUIESCENT:
1326 case QLCNIC_RESET_QUIESCENT:
1327 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
1328 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
1329 netdev_info(netdev, "Device in FAILED state\n");
1330 return 0;
1215 default: 1331 default:
1216 if (val->flag > QLCNIC_DUMP_MASK_MAX || 1332 if (!fw_dump->tmpl_hdr) {
1217 val->flag < QLCNIC_DUMP_MASK_MIN) { 1333 netdev_err(netdev, "FW dump not supported\n");
1218 netdev_info(netdev, 1334 return -ENOTSUPP;
1219 "Invalid dump level: 0x%x\n", val->flag);
1220 ret = -EINVAL;
1221 goto out;
1222 } 1335 }
1223 fw_dump->tmpl_hdr->drv_cap_mask = val->flag & 0xff; 1336 for (i = 0; i < ARRAY_SIZE(FW_DUMP_LEVELS); i++) {
1224 netdev_info(netdev, "Driver mask changed to: 0x%x\n", 1337 if (val->flag == FW_DUMP_LEVELS[i]) {
1225 fw_dump->tmpl_hdr->drv_cap_mask); 1338 fw_dump->tmpl_hdr->drv_cap_mask =
1339 val->flag;
1340 netdev_info(netdev, "Driver mask changed to: 0x%x\n",
1341 fw_dump->tmpl_hdr->drv_cap_mask);
1342 return 0;
1343 }
1344 }
1345 netdev_info(netdev, "Invalid dump level: 0x%x\n", val->flag);
1346 return -EINVAL;
1226 } 1347 }
1227out: 1348 return 0;
1228 return ret;
1229} 1349}
1230 1350
1231const struct ethtool_ops qlcnic_ethtool_ops = { 1351const struct ethtool_ops qlcnic_ethtool_ops = {
@@ -1258,3 +1378,10 @@ const struct ethtool_ops qlcnic_ethtool_ops = {
1258 .get_dump_data = qlcnic_get_dump_data, 1378 .get_dump_data = qlcnic_get_dump_data,
1259 .set_dump = qlcnic_set_dump, 1379 .set_dump = qlcnic_set_dump,
1260}; 1380};
1381
1382const struct ethtool_ops qlcnic_ethtool_failed_ops = {
1383 .get_settings = qlcnic_get_settings,
1384 .get_drvinfo = qlcnic_get_drvinfo,
1385 .set_msglevel = qlcnic_set_msglevel,
1386 .get_msglevel = qlcnic_get_msglevel,
1387};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index a52819303d1b..6ced3195aad3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -704,6 +704,8 @@ enum {
704#define QLCNIC_DEV_FAILED 0x6 704#define QLCNIC_DEV_FAILED 0x6
705#define QLCNIC_DEV_QUISCENT 0x7 705#define QLCNIC_DEV_QUISCENT 0x7
706 706
707#define QLCNIC_DEV_BADBAD 0xbad0bad0
708
707#define QLCNIC_DEV_NPAR_NON_OPER 0 /* NON Operational */ 709#define QLCNIC_DEV_NPAR_NON_OPER 0 /* NON Operational */
708#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */ 710#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */
709#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */ 711#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */
@@ -776,6 +778,10 @@ struct qlcnic_legacy_intr_set {
776#define FLASH_ROM_WINDOW 0x42110030 778#define FLASH_ROM_WINDOW 0x42110030
777#define FLASH_ROM_DATA 0x42150000 779#define FLASH_ROM_DATA 0x42150000
778 780
781
782static const u32 FW_DUMP_LEVELS[] = {
783 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff };
784
779static const u32 MIU_TEST_READ_DATA[] = { 785static const u32 MIU_TEST_READ_DATA[] = {
780 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC, }; 786 0x410000A8, 0x410000AC, 0x410000B8, 0x410000BC, };
781 787
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 75c32e875fef..5c4713521d4c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -338,6 +338,10 @@ static const struct net_device_ops qlcnic_netdev_ops = {
338#endif 338#endif
339}; 339};
340 340
341static const struct net_device_ops qlcnic_netdev_failed_ops = {
342 .ndo_open = qlcnic_open,
343};
344
341static struct qlcnic_nic_template qlcnic_ops = { 345static struct qlcnic_nic_template qlcnic_ops = {
342 .config_bridged_mode = qlcnic_config_bridged_mode, 346 .config_bridged_mode = qlcnic_config_bridged_mode,
343 .config_led = qlcnic_config_led, 347 .config_led = qlcnic_config_led,
@@ -1623,8 +1627,9 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1623 1627
1624 err = adapter->nic_ops->start_firmware(adapter); 1628 err = adapter->nic_ops->start_firmware(adapter);
1625 if (err) { 1629 if (err) {
1626 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"); 1630 dev_err(&pdev->dev, "Loading fw failed. Please Reboot\n"
1627 goto err_out_decr_ref; 1631 "\t\tIf reboot doesn't help, try flashing the card\n");
1632 goto err_out_maintenance_mode;
1628 } 1633 }
1629 1634
1630 if (qlcnic_read_mac_addr(adapter)) 1635 if (qlcnic_read_mac_addr(adapter))
@@ -1695,6 +1700,18 @@ err_out_disable_pdev:
1695 pci_set_drvdata(pdev, NULL); 1700 pci_set_drvdata(pdev, NULL);
1696 pci_disable_device(pdev); 1701 pci_disable_device(pdev);
1697 return err; 1702 return err;
1703
1704err_out_maintenance_mode:
1705 netdev->netdev_ops = &qlcnic_netdev_failed_ops;
1706 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
1707 err = register_netdev(netdev);
1708 if (err) {
1709 dev_err(&pdev->dev, "failed to register net device\n");
1710 goto err_out_decr_ref;
1711 }
1712 pci_set_drvdata(pdev, adapter);
1713 qlcnic_create_diag_entries(adapter);
1714 return 0;
1698} 1715}
1699 1716
1700static void __devexit qlcnic_remove(struct pci_dev *pdev) 1717static void __devexit qlcnic_remove(struct pci_dev *pdev)
@@ -1831,8 +1848,14 @@ done:
1831static int qlcnic_open(struct net_device *netdev) 1848static int qlcnic_open(struct net_device *netdev)
1832{ 1849{
1833 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1850 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1851 u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
1834 int err; 1852 int err;
1835 1853
1854 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) {
1855 netdev_err(netdev, "Device in FAILED state\n");
1856 return -EIO;
1857 }
1858
1836 netif_carrier_off(netdev); 1859 netif_carrier_off(netdev);
1837 1860
1838 err = qlcnic_attach(adapter); 1861 err = qlcnic_attach(adapter);
@@ -3018,6 +3041,12 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
3018 return; 3041 return;
3019 3042
3020 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); 3043 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
3044 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD)) {
3045 netdev_err(adapter->netdev,
3046 "Device is in FAILED state, Please Reboot\n");
3047 qlcnic_api_unlock(adapter);
3048 return;
3049 }
3021 3050
3022 if (state == QLCNIC_DEV_READY) { 3051 if (state == QLCNIC_DEV_READY) {
3023 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET); 3052 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
@@ -3061,6 +3090,9 @@ qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
3061 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) 3090 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
3062 msleep(10); 3091 msleep(10);
3063 3092
3093 if (!adapter->fw_work.work.func)
3094 return;
3095
3064 cancel_delayed_work_sync(&adapter->fw_work); 3096 cancel_delayed_work_sync(&adapter->fw_work);
3065} 3097}
3066 3098
@@ -4280,6 +4312,7 @@ static void
4280qlcnic_create_diag_entries(struct qlcnic_adapter *adapter) 4312qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
4281{ 4313{
4282 struct device *dev = &adapter->pdev->dev; 4314 struct device *dev = &adapter->pdev->dev;
4315 u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
4283 4316
4284 if (device_create_bin_file(dev, &bin_attr_port_stats)) 4317 if (device_create_bin_file(dev, &bin_attr_port_stats))
4285 dev_info(dev, "failed to create port stats sysfs entry"); 4318 dev_info(dev, "failed to create port stats sysfs entry");
@@ -4288,14 +4321,19 @@ qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
4288 return; 4321 return;
4289 if (device_create_file(dev, &dev_attr_diag_mode)) 4322 if (device_create_file(dev, &dev_attr_diag_mode))
4290 dev_info(dev, "failed to create diag_mode sysfs entry\n"); 4323 dev_info(dev, "failed to create diag_mode sysfs entry\n");
4291 if (device_create_file(dev, &dev_attr_beacon))
4292 dev_info(dev, "failed to create beacon sysfs entry");
4293 if (device_create_bin_file(dev, &bin_attr_crb)) 4324 if (device_create_bin_file(dev, &bin_attr_crb))
4294 dev_info(dev, "failed to create crb sysfs entry\n"); 4325 dev_info(dev, "failed to create crb sysfs entry\n");
4295 if (device_create_bin_file(dev, &bin_attr_mem)) 4326 if (device_create_bin_file(dev, &bin_attr_mem))
4296 dev_info(dev, "failed to create mem sysfs entry\n"); 4327 dev_info(dev, "failed to create mem sysfs entry\n");
4328
4329 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
4330 return;
4331
4297 if (device_create_bin_file(dev, &bin_attr_pci_config)) 4332 if (device_create_bin_file(dev, &bin_attr_pci_config))
4298 dev_info(dev, "failed to create pci config sysfs entry"); 4333 dev_info(dev, "failed to create pci config sysfs entry");
4334 if (device_create_file(dev, &dev_attr_beacon))
4335 dev_info(dev, "failed to create beacon sysfs entry");
4336
4299 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) 4337 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4300 return; 4338 return;
4301 if (device_create_bin_file(dev, &bin_attr_esw_config)) 4339 if (device_create_bin_file(dev, &bin_attr_esw_config))
@@ -4314,16 +4352,19 @@ static void
4314qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter) 4352qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
4315{ 4353{
4316 struct device *dev = &adapter->pdev->dev; 4354 struct device *dev = &adapter->pdev->dev;
4355 u32 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
4317 4356
4318 device_remove_bin_file(dev, &bin_attr_port_stats); 4357 device_remove_bin_file(dev, &bin_attr_port_stats);
4319 4358
4320 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) 4359 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4321 return; 4360 return;
4322 device_remove_file(dev, &dev_attr_diag_mode); 4361 device_remove_file(dev, &dev_attr_diag_mode);
4323 device_remove_file(dev, &dev_attr_beacon);
4324 device_remove_bin_file(dev, &bin_attr_crb); 4362 device_remove_bin_file(dev, &bin_attr_crb);
4325 device_remove_bin_file(dev, &bin_attr_mem); 4363 device_remove_bin_file(dev, &bin_attr_mem);
4364 if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
4365 return;
4326 device_remove_bin_file(dev, &bin_attr_pci_config); 4366 device_remove_bin_file(dev, &bin_attr_pci_config);
4367 device_remove_file(dev, &dev_attr_beacon);
4327 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) 4368 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4328 return; 4369 return;
4329 device_remove_bin_file(dev, &bin_attr_esw_config); 4370 device_remove_bin_file(dev, &bin_attr_esw_config);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 49343ec21c82..09d8d33171df 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -3845,7 +3845,7 @@ static int ql_wol(struct ql_adapter *qdev)
3845 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST | 3845 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3846 WAKE_MCAST | WAKE_BCAST)) { 3846 WAKE_MCAST | WAKE_BCAST)) {
3847 netif_err(qdev, ifdown, qdev->ndev, 3847 netif_err(qdev, ifdown, qdev->ndev,
3848 "Unsupported WOL paramter. qdev->wol = 0x%x.\n", 3848 "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
3849 qdev->wol); 3849 qdev->wol);
3850 return -EINVAL; 3850 return -EINVAL;
3851 } 3851 }
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index b96e1920e045..4de73643fec6 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -4,7 +4,7 @@
4 * Copyright (C) 2004 Sten Wang <sten.wang@rdc.com.tw> 4 * Copyright (C) 2004 Sten Wang <sten.wang@rdc.com.tw>
5 * Copyright (C) 2007 5 * Copyright (C) 2007
6 * Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us> 6 * Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>
7 * Florian Fainelli <florian@openwrt.org> 7 * Copyright (C) 2007-2012 Florian Fainelli <florian@openwrt.org>
8 * 8 *
9 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
@@ -74,9 +74,13 @@
74#define MT_ICR 0x0C /* TX interrupt control */ 74#define MT_ICR 0x0C /* TX interrupt control */
75#define MR_ICR 0x10 /* RX interrupt control */ 75#define MR_ICR 0x10 /* RX interrupt control */
76#define MTPR 0x14 /* TX poll command register */ 76#define MTPR 0x14 /* TX poll command register */
77#define TM2TX 0x0001 /* Trigger MAC to transmit */
77#define MR_BSR 0x18 /* RX buffer size */ 78#define MR_BSR 0x18 /* RX buffer size */
78#define MR_DCR 0x1A /* RX descriptor control */ 79#define MR_DCR 0x1A /* RX descriptor control */
79#define MLSR 0x1C /* Last status */ 80#define MLSR 0x1C /* Last status */
81#define TX_FIFO_UNDR 0x0200 /* TX FIFO under-run */
82#define TX_EXCEEDC 0x2000 /* Transmit exceed collision */
83#define TX_LATEC 0x4000 /* Transmit late collision */
80#define MMDIO 0x20 /* MDIO control register */ 84#define MMDIO 0x20 /* MDIO control register */
81#define MDIO_WRITE 0x4000 /* MDIO write */ 85#define MDIO_WRITE 0x4000 /* MDIO write */
82#define MDIO_READ 0x2000 /* MDIO read */ 86#define MDIO_READ 0x2000 /* MDIO read */
@@ -124,6 +128,9 @@
124#define MID_3M 0x82 /* MID3 Medium */ 128#define MID_3M 0x82 /* MID3 Medium */
125#define MID_3H 0x84 /* MID3 High */ 129#define MID_3H 0x84 /* MID3 High */
126#define PHY_CC 0x88 /* PHY status change configuration register */ 130#define PHY_CC 0x88 /* PHY status change configuration register */
131#define SCEN 0x8000 /* PHY status change enable */
132#define PHYAD_SHIFT 8 /* PHY address shift */
133#define TMRDIV_SHIFT 0 /* Timer divider shift */
127#define PHY_ST 0x8A /* PHY status register */ 134#define PHY_ST 0x8A /* PHY status register */
128#define MAC_SM 0xAC /* MAC status machine */ 135#define MAC_SM 0xAC /* MAC status machine */
129#define MAC_SM_RST 0x0002 /* MAC status machine reset */ 136#define MAC_SM_RST 0x0002 /* MAC status machine reset */
@@ -137,6 +144,8 @@
137#define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */ 144#define MBCR_DEFAULT 0x012A /* MAC Bus Control Register */
138#define MCAST_MAX 3 /* Max number multicast addresses to filter */ 145#define MCAST_MAX 3 /* Max number multicast addresses to filter */
139 146
147#define MAC_DEF_TIMEOUT 2048 /* Default MAC read/write operation timeout */
148
140/* Descriptor status */ 149/* Descriptor status */
141#define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */ 150#define DSC_OWNER_MAC 0x8000 /* MAC is the owner of this descriptor */
142#define DSC_RX_OK 0x4000 /* RX was successful */ 151#define DSC_RX_OK 0x4000 /* RX was successful */
@@ -187,7 +196,7 @@ struct r6040_private {
187 dma_addr_t rx_ring_dma; 196 dma_addr_t rx_ring_dma;
188 dma_addr_t tx_ring_dma; 197 dma_addr_t tx_ring_dma;
189 u16 tx_free_desc; 198 u16 tx_free_desc;
190 u16 mcr0, mcr1; 199 u16 mcr0;
191 struct net_device *dev; 200 struct net_device *dev;
192 struct mii_bus *mii_bus; 201 struct mii_bus *mii_bus;
193 struct napi_struct napi; 202 struct napi_struct napi;
@@ -204,7 +213,7 @@ static char version[] __devinitdata = DRV_NAME
204/* Read a word data from PHY Chip */ 213/* Read a word data from PHY Chip */
205static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg) 214static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
206{ 215{
207 int limit = 2048; 216 int limit = MAC_DEF_TIMEOUT;
208 u16 cmd; 217 u16 cmd;
209 218
210 iowrite16(MDIO_READ + reg + (phy_addr << 8), ioaddr + MMDIO); 219 iowrite16(MDIO_READ + reg + (phy_addr << 8), ioaddr + MMDIO);
@@ -222,7 +231,7 @@ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
222static void r6040_phy_write(void __iomem *ioaddr, 231static void r6040_phy_write(void __iomem *ioaddr,
223 int phy_addr, int reg, u16 val) 232 int phy_addr, int reg, u16 val)
224{ 233{
225 int limit = 2048; 234 int limit = MAC_DEF_TIMEOUT;
226 u16 cmd; 235 u16 cmd;
227 236
228 iowrite16(val, ioaddr + MMWD); 237 iowrite16(val, ioaddr + MMWD);
@@ -358,27 +367,35 @@ err_exit:
358 return rc; 367 return rc;
359} 368}
360 369
361static void r6040_init_mac_regs(struct net_device *dev) 370static void r6040_reset_mac(struct r6040_private *lp)
362{ 371{
363 struct r6040_private *lp = netdev_priv(dev);
364 void __iomem *ioaddr = lp->base; 372 void __iomem *ioaddr = lp->base;
365 int limit = 2048; 373 int limit = MAC_DEF_TIMEOUT;
366 u16 cmd; 374 u16 cmd;
367 375
368 /* Mask Off Interrupt */
369 iowrite16(MSK_INT, ioaddr + MIER);
370
371 /* Reset RDC MAC */
372 iowrite16(MAC_RST, ioaddr + MCR1); 376 iowrite16(MAC_RST, ioaddr + MCR1);
373 while (limit--) { 377 while (limit--) {
374 cmd = ioread16(ioaddr + MCR1); 378 cmd = ioread16(ioaddr + MCR1);
375 if (cmd & MAC_RST) 379 if (cmd & MAC_RST)
376 break; 380 break;
377 } 381 }
382
378 /* Reset internal state machine */ 383 /* Reset internal state machine */
379 iowrite16(MAC_SM_RST, ioaddr + MAC_SM); 384 iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
380 iowrite16(0, ioaddr + MAC_SM); 385 iowrite16(0, ioaddr + MAC_SM);
381 mdelay(5); 386 mdelay(5);
387}
388
389static void r6040_init_mac_regs(struct net_device *dev)
390{
391 struct r6040_private *lp = netdev_priv(dev);
392 void __iomem *ioaddr = lp->base;
393
394 /* Mask Off Interrupt */
395 iowrite16(MSK_INT, ioaddr + MIER);
396
397 /* Reset RDC MAC */
398 r6040_reset_mac(lp);
382 399
383 /* MAC Bus Control Register */ 400 /* MAC Bus Control Register */
384 iowrite16(MBCR_DEFAULT, ioaddr + MBCR); 401 iowrite16(MBCR_DEFAULT, ioaddr + MBCR);
@@ -407,7 +424,7 @@ static void r6040_init_mac_regs(struct net_device *dev)
407 /* Let TX poll the descriptors 424 /* Let TX poll the descriptors
408 * we may got called by r6040_tx_timeout which has left 425 * we may got called by r6040_tx_timeout which has left
409 * some unsent tx buffers */ 426 * some unsent tx buffers */
410 iowrite16(0x01, ioaddr + MTPR); 427 iowrite16(TM2TX, ioaddr + MTPR);
411} 428}
412 429
413static void r6040_tx_timeout(struct net_device *dev) 430static void r6040_tx_timeout(struct net_device *dev)
@@ -445,18 +462,13 @@ static void r6040_down(struct net_device *dev)
445{ 462{
446 struct r6040_private *lp = netdev_priv(dev); 463 struct r6040_private *lp = netdev_priv(dev);
447 void __iomem *ioaddr = lp->base; 464 void __iomem *ioaddr = lp->base;
448 int limit = 2048;
449 u16 *adrp; 465 u16 *adrp;
450 u16 cmd;
451 466
452 /* Stop MAC */ 467 /* Stop MAC */
453 iowrite16(MSK_INT, ioaddr + MIER); /* Mask Off Interrupt */ 468 iowrite16(MSK_INT, ioaddr + MIER); /* Mask Off Interrupt */
454 iowrite16(MAC_RST, ioaddr + MCR1); /* Reset RDC MAC */ 469
455 while (limit--) { 470 /* Reset RDC MAC */
456 cmd = ioread16(ioaddr + MCR1); 471 r6040_reset_mac(lp);
457 if (cmd & MAC_RST)
458 break;
459 }
460 472
461 /* Restore MAC Address to MIDx */ 473 /* Restore MAC Address to MIDx */
462 adrp = (u16 *) dev->dev_addr; 474 adrp = (u16 *) dev->dev_addr;
@@ -599,9 +611,9 @@ static void r6040_tx(struct net_device *dev)
599 /* Check for errors */ 611 /* Check for errors */
600 err = ioread16(ioaddr + MLSR); 612 err = ioread16(ioaddr + MLSR);
601 613
602 if (err & 0x0200) 614 if (err & TX_FIFO_UNDR)
603 dev->stats.rx_fifo_errors++; 615 dev->stats.tx_fifo_errors++;
604 if (err & (0x2000 | 0x4000)) 616 if (err & (TX_EXCEEDC | TX_LATEC))
605 dev->stats.tx_carrier_errors++; 617 dev->stats.tx_carrier_errors++;
606 618
607 if (descptr->status & DSC_OWNER_MAC) 619 if (descptr->status & DSC_OWNER_MAC)
@@ -736,11 +748,7 @@ static void r6040_mac_address(struct net_device *dev)
736 u16 *adrp; 748 u16 *adrp;
737 749
738 /* Reset MAC */ 750 /* Reset MAC */
739 iowrite16(MAC_RST, ioaddr + MCR1); 751 r6040_reset_mac(lp);
740 /* Reset internal state machine */
741 iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
742 iowrite16(0, ioaddr + MAC_SM);
743 mdelay(5);
744 752
745 /* Restore MAC Address */ 753 /* Restore MAC Address */
746 adrp = (u16 *) dev->dev_addr; 754 adrp = (u16 *) dev->dev_addr;
@@ -840,7 +848,7 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
840 skb_tx_timestamp(skb); 848 skb_tx_timestamp(skb);
841 849
842 /* Trigger the MAC to check the TX descriptor */ 850 /* Trigger the MAC to check the TX descriptor */
843 iowrite16(0x01, ioaddr + MTPR); 851 iowrite16(TM2TX, ioaddr + MTPR);
844 lp->tx_insert_ptr = descptr->vndescp; 852 lp->tx_insert_ptr = descptr->vndescp;
845 853
846 /* If no tx resource, stop */ 854 /* If no tx resource, stop */
@@ -973,6 +981,7 @@ static const struct ethtool_ops netdev_ethtool_ops = {
973 .get_settings = netdev_get_settings, 981 .get_settings = netdev_get_settings,
974 .set_settings = netdev_set_settings, 982 .set_settings = netdev_set_settings,
975 .get_link = ethtool_op_get_link, 983 .get_link = ethtool_op_get_link,
984 .get_ts_info = ethtool_op_get_ts_info,
976}; 985};
977 986
978static const struct net_device_ops r6040_netdev_ops = { 987static const struct net_device_ops r6040_netdev_ops = {
@@ -1126,10 +1135,15 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1126 err = -EIO; 1135 err = -EIO;
1127 goto err_out_free_res; 1136 goto err_out_free_res;
1128 } 1137 }
1138
1129 /* If PHY status change register is still set to zero it means the 1139 /* If PHY status change register is still set to zero it means the
1130 * bootloader didn't initialize it */ 1140 * bootloader didn't initialize it, so we set it to:
1141 * - enable phy status change
1142 * - enable all phy addresses
1143 * - set to lowest timer divider */
1131 if (ioread16(ioaddr + PHY_CC) == 0) 1144 if (ioread16(ioaddr + PHY_CC) == 0)
1132 iowrite16(0x9f07, ioaddr + PHY_CC); 1145 iowrite16(SCEN | PHY_MAX_ADDR << PHYAD_SHIFT |
1146 7 << TMRDIV_SHIFT, ioaddr + PHY_CC);
1133 1147
1134 /* Init system & device */ 1148 /* Init system & device */
1135 lp->base = ioaddr; 1149 lp->base = ioaddr;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index b3287c0fe279..5eef290997f9 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -635,9 +635,12 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
635 */ 635 */
636static void cp_poll_controller(struct net_device *dev) 636static void cp_poll_controller(struct net_device *dev)
637{ 637{
638 disable_irq(dev->irq); 638 struct cp_private *cp = netdev_priv(dev);
639 cp_interrupt(dev->irq, dev); 639 const int irq = cp->pdev->irq;
640 enable_irq(dev->irq); 640
641 disable_irq(irq);
642 cp_interrupt(irq, dev);
643 enable_irq(irq);
641} 644}
642#endif 645#endif
643 646
@@ -1117,6 +1120,7 @@ static void cp_free_rings (struct cp_private *cp)
1117static int cp_open (struct net_device *dev) 1120static int cp_open (struct net_device *dev)
1118{ 1121{
1119 struct cp_private *cp = netdev_priv(dev); 1122 struct cp_private *cp = netdev_priv(dev);
1123 const int irq = cp->pdev->irq;
1120 int rc; 1124 int rc;
1121 1125
1122 netif_dbg(cp, ifup, dev, "enabling interface\n"); 1126 netif_dbg(cp, ifup, dev, "enabling interface\n");
@@ -1129,7 +1133,7 @@ static int cp_open (struct net_device *dev)
1129 1133
1130 cp_init_hw(cp); 1134 cp_init_hw(cp);
1131 1135
1132 rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev); 1136 rc = request_irq(irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1133 if (rc) 1137 if (rc)
1134 goto err_out_hw; 1138 goto err_out_hw;
1135 1139
@@ -1166,7 +1170,7 @@ static int cp_close (struct net_device *dev)
1166 1170
1167 spin_unlock_irqrestore(&cp->lock, flags); 1171 spin_unlock_irqrestore(&cp->lock, flags);
1168 1172
1169 free_irq(dev->irq, dev); 1173 free_irq(cp->pdev->irq, dev);
1170 1174
1171 cp_free_rings(cp); 1175 cp_free_rings(cp);
1172 return 0; 1176 return 0;
@@ -1914,7 +1918,6 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1914 (unsigned long long)pciaddr); 1918 (unsigned long long)pciaddr);
1915 goto err_out_res; 1919 goto err_out_res;
1916 } 1920 }
1917 dev->base_addr = (unsigned long) regs;
1918 cp->regs = regs; 1921 cp->regs = regs;
1919 1922
1920 cp_stop_hw(cp); 1923 cp_stop_hw(cp);
@@ -1942,14 +1945,12 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1942 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | 1945 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1943 NETIF_F_HIGHDMA; 1946 NETIF_F_HIGHDMA;
1944 1947
1945 dev->irq = pdev->irq;
1946
1947 rc = register_netdev(dev); 1948 rc = register_netdev(dev);
1948 if (rc) 1949 if (rc)
1949 goto err_out_iomap; 1950 goto err_out_iomap;
1950 1951
1951 netdev_info(dev, "RTL-8139C+ at 0x%lx, %pM, IRQ %d\n", 1952 netdev_info(dev, "RTL-8139C+ at 0x%p, %pM, IRQ %d\n",
1952 dev->base_addr, dev->dev_addr, dev->irq); 1953 regs, dev->dev_addr, pdev->irq);
1953 1954
1954 pci_set_drvdata(pdev, dev); 1955 pci_set_drvdata(pdev, dev);
1955 1956
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index df7fd8d083dc..03df076ed596 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -148,9 +148,9 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
148 148
149/* Whether to use MMIO or PIO. Default to MMIO. */ 149/* Whether to use MMIO or PIO. Default to MMIO. */
150#ifdef CONFIG_8139TOO_PIO 150#ifdef CONFIG_8139TOO_PIO
151static int use_io = 1; 151static bool use_io = true;
152#else 152#else
153static int use_io = 0; 153static bool use_io = false;
154#endif 154#endif
155 155
156/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). 156/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
@@ -620,7 +620,7 @@ MODULE_DESCRIPTION ("RealTek RTL-8139 Fast Ethernet driver");
620MODULE_LICENSE("GPL"); 620MODULE_LICENSE("GPL");
621MODULE_VERSION(DRV_VERSION); 621MODULE_VERSION(DRV_VERSION);
622 622
623module_param(use_io, int, 0); 623module_param(use_io, bool, 0);
624MODULE_PARM_DESC(use_io, "Force use of I/O access mode. 0=MMIO 1=PIO"); 624MODULE_PARM_DESC(use_io, "Force use of I/O access mode. 0=MMIO 1=PIO");
625module_param(multicast_filter_limit, int, 0); 625module_param(multicast_filter_limit, int, 0);
626module_param_array(media, int, NULL, 0); 626module_param_array(media, int, NULL, 0);
@@ -750,15 +750,22 @@ static void rtl8139_chip_reset (void __iomem *ioaddr)
750 750
751static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev) 751static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev)
752{ 752{
753 struct device *d = &pdev->dev;
753 void __iomem *ioaddr; 754 void __iomem *ioaddr;
754 struct net_device *dev; 755 struct net_device *dev;
755 struct rtl8139_private *tp; 756 struct rtl8139_private *tp;
756 u8 tmp8; 757 u8 tmp8;
757 int rc, disable_dev_on_err = 0; 758 int rc, disable_dev_on_err = 0;
758 unsigned int i; 759 unsigned int i, bar;
759 unsigned long pio_start, pio_end, pio_flags, pio_len; 760 unsigned long io_len;
760 unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
761 u32 version; 761 u32 version;
762 static const struct {
763 unsigned long mask;
764 char *type;
765 } res[] = {
766 { IORESOURCE_IO, "PIO" },
767 { IORESOURCE_MEM, "MMIO" }
768 };
762 769
763 assert (pdev != NULL); 770 assert (pdev != NULL);
764 771
@@ -777,78 +784,45 @@ static __devinit struct net_device * rtl8139_init_board (struct pci_dev *pdev)
777 if (rc) 784 if (rc)
778 goto err_out; 785 goto err_out;
779 786
780 pio_start = pci_resource_start (pdev, 0);
781 pio_end = pci_resource_end (pdev, 0);
782 pio_flags = pci_resource_flags (pdev, 0);
783 pio_len = pci_resource_len (pdev, 0);
784
785 mmio_start = pci_resource_start (pdev, 1);
786 mmio_end = pci_resource_end (pdev, 1);
787 mmio_flags = pci_resource_flags (pdev, 1);
788 mmio_len = pci_resource_len (pdev, 1);
789
790 /* set this immediately, we need to know before
791 * we talk to the chip directly */
792 pr_debug("PIO region size == 0x%02lX\n", pio_len);
793 pr_debug("MMIO region size == 0x%02lX\n", mmio_len);
794
795retry:
796 if (use_io) {
797 /* make sure PCI base addr 0 is PIO */
798 if (!(pio_flags & IORESOURCE_IO)) {
799 dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n");
800 rc = -ENODEV;
801 goto err_out;
802 }
803 /* check for weird/broken PCI region reporting */
804 if (pio_len < RTL_MIN_IO_SIZE) {
805 dev_err(&pdev->dev, "Invalid PCI I/O region size(s), aborting\n");
806 rc = -ENODEV;
807 goto err_out;
808 }
809 } else {
810 /* make sure PCI base addr 1 is MMIO */
811 if (!(mmio_flags & IORESOURCE_MEM)) {
812 dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n");
813 rc = -ENODEV;
814 goto err_out;
815 }
816 if (mmio_len < RTL_MIN_IO_SIZE) {
817 dev_err(&pdev->dev, "Invalid PCI mem region size(s), aborting\n");
818 rc = -ENODEV;
819 goto err_out;
820 }
821 }
822
823 rc = pci_request_regions (pdev, DRV_NAME); 787 rc = pci_request_regions (pdev, DRV_NAME);
824 if (rc) 788 if (rc)
825 goto err_out; 789 goto err_out;
826 disable_dev_on_err = 1; 790 disable_dev_on_err = 1;
827 791
828 /* enable PCI bus-mastering */
829 pci_set_master (pdev); 792 pci_set_master (pdev);
830 793
831 if (use_io) { 794retry:
832 ioaddr = pci_iomap(pdev, 0, 0); 795 /* PIO bar register comes first. */
833 if (!ioaddr) { 796 bar = !use_io;
834 dev_err(&pdev->dev, "cannot map PIO, aborting\n"); 797
835 rc = -EIO; 798 io_len = pci_resource_len(pdev, bar);
836 goto err_out; 799
837 } 800 dev_dbg(d, "%s region size = 0x%02lX\n", res[bar].type, io_len);
838 dev->base_addr = pio_start; 801
839 tp->regs_len = pio_len; 802 if (!(pci_resource_flags(pdev, bar) & res[bar].mask)) {
840 } else { 803 dev_err(d, "region #%d not a %s resource, aborting\n", bar,
841 /* ioremap MMIO region */ 804 res[bar].type);
842 ioaddr = pci_iomap(pdev, 1, 0); 805 rc = -ENODEV;
843 if (ioaddr == NULL) { 806 goto err_out;
844 dev_err(&pdev->dev, "cannot remap MMIO, trying PIO\n"); 807 }
845 pci_release_regions(pdev); 808 if (io_len < RTL_MIN_IO_SIZE) {
846 use_io = 1; 809 dev_err(d, "Invalid PCI %s region size(s), aborting\n",
810 res[bar].type);
811 rc = -ENODEV;
812 goto err_out;
813 }
814
815 ioaddr = pci_iomap(pdev, bar, 0);
816 if (!ioaddr) {
817 dev_err(d, "cannot map %s\n", res[bar].type);
818 if (!use_io) {
819 use_io = true;
847 goto retry; 820 goto retry;
848 } 821 }
849 dev->base_addr = (long) ioaddr; 822 rc = -ENODEV;
850 tp->regs_len = mmio_len; 823 goto err_out;
851 } 824 }
825 tp->regs_len = io_len;
852 tp->mmio_addr = ioaddr; 826 tp->mmio_addr = ioaddr;
853 827
854 /* Bring old chips out of low-power mode. */ 828 /* Bring old chips out of low-power mode. */
@@ -1035,8 +1009,6 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
1035 dev->hw_features |= NETIF_F_RXALL; 1009 dev->hw_features |= NETIF_F_RXALL;
1036 dev->hw_features |= NETIF_F_RXFCS; 1010 dev->hw_features |= NETIF_F_RXFCS;
1037 1011
1038 dev->irq = pdev->irq;
1039
1040 /* tp zeroed and aligned in alloc_etherdev */ 1012 /* tp zeroed and aligned in alloc_etherdev */
1041 tp = netdev_priv(dev); 1013 tp = netdev_priv(dev);
1042 1014
@@ -1062,9 +1034,9 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
1062 1034
1063 pci_set_drvdata (pdev, dev); 1035 pci_set_drvdata (pdev, dev);
1064 1036
1065 netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n", 1037 netdev_info(dev, "%s at 0x%p, %pM, IRQ %d\n",
1066 board_info[ent->driver_data].name, 1038 board_info[ent->driver_data].name,
1067 dev->base_addr, dev->dev_addr, dev->irq); 1039 ioaddr, dev->dev_addr, pdev->irq);
1068 1040
1069 netdev_dbg(dev, "Identified 8139 chip type '%s'\n", 1041 netdev_dbg(dev, "Identified 8139 chip type '%s'\n",
1070 rtl_chip_info[tp->chipset].name); 1042 rtl_chip_info[tp->chipset].name);
@@ -1339,10 +1311,11 @@ static void mdio_write (struct net_device *dev, int phy_id, int location,
1339static int rtl8139_open (struct net_device *dev) 1311static int rtl8139_open (struct net_device *dev)
1340{ 1312{
1341 struct rtl8139_private *tp = netdev_priv(dev); 1313 struct rtl8139_private *tp = netdev_priv(dev);
1342 int retval;
1343 void __iomem *ioaddr = tp->mmio_addr; 1314 void __iomem *ioaddr = tp->mmio_addr;
1315 const int irq = tp->pci_dev->irq;
1316 int retval;
1344 1317
1345 retval = request_irq (dev->irq, rtl8139_interrupt, IRQF_SHARED, dev->name, dev); 1318 retval = request_irq(irq, rtl8139_interrupt, IRQF_SHARED, dev->name, dev);
1346 if (retval) 1319 if (retval)
1347 return retval; 1320 return retval;
1348 1321
@@ -1351,7 +1324,7 @@ static int rtl8139_open (struct net_device *dev)
1351 tp->rx_ring = dma_alloc_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, 1324 tp->rx_ring = dma_alloc_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN,
1352 &tp->rx_ring_dma, GFP_KERNEL); 1325 &tp->rx_ring_dma, GFP_KERNEL);
1353 if (tp->tx_bufs == NULL || tp->rx_ring == NULL) { 1326 if (tp->tx_bufs == NULL || tp->rx_ring == NULL) {
1354 free_irq(dev->irq, dev); 1327 free_irq(irq, dev);
1355 1328
1356 if (tp->tx_bufs) 1329 if (tp->tx_bufs)
1357 dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN, 1330 dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN,
@@ -1377,7 +1350,7 @@ static int rtl8139_open (struct net_device *dev)
1377 "%s() ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n", 1350 "%s() ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n",
1378 __func__, 1351 __func__,
1379 (unsigned long long)pci_resource_start (tp->pci_dev, 1), 1352 (unsigned long long)pci_resource_start (tp->pci_dev, 1),
1380 dev->irq, RTL_R8 (MediaStatus), 1353 irq, RTL_R8 (MediaStatus),
1381 tp->mii.full_duplex ? "full" : "half"); 1354 tp->mii.full_duplex ? "full" : "half");
1382 1355
1383 rtl8139_start_thread(tp); 1356 rtl8139_start_thread(tp);
@@ -2240,9 +2213,12 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance)
2240 */ 2213 */
2241static void rtl8139_poll_controller(struct net_device *dev) 2214static void rtl8139_poll_controller(struct net_device *dev)
2242{ 2215{
2243 disable_irq(dev->irq); 2216 struct rtl8139_private *tp = netdev_priv(dev);
2244 rtl8139_interrupt(dev->irq, dev); 2217 const int irq = tp->pci_dev->irq;
2245 enable_irq(dev->irq); 2218
2219 disable_irq(irq);
2220 rtl8139_interrupt(irq, dev);
2221 enable_irq(irq);
2246} 2222}
2247#endif 2223#endif
2248 2224
@@ -2295,7 +2271,7 @@ static int rtl8139_close (struct net_device *dev)
2295 2271
2296 spin_unlock_irqrestore (&tp->lock, flags); 2272 spin_unlock_irqrestore (&tp->lock, flags);
2297 2273
2298 free_irq (dev->irq, dev); 2274 free_irq(tp->pci_dev->irq, dev);
2299 2275
2300 rtl8139_tx_clear (tp); 2276 rtl8139_tx_clear (tp);
2301 2277
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index f54509377efa..00628d84342f 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -44,6 +44,8 @@
44#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw" 44#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
45#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw" 45#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
46#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" 46#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
47 49
48#ifdef RTL8169_DEBUG 50#ifdef RTL8169_DEBUG
49#define assert(expr) \ 51#define assert(expr) \
@@ -133,6 +135,8 @@ enum mac_version {
133 RTL_GIGA_MAC_VER_34, 135 RTL_GIGA_MAC_VER_34,
134 RTL_GIGA_MAC_VER_35, 136 RTL_GIGA_MAC_VER_35,
135 RTL_GIGA_MAC_VER_36, 137 RTL_GIGA_MAC_VER_36,
138 RTL_GIGA_MAC_VER_37,
139 RTL_GIGA_MAC_VER_38,
136 RTL_GIGA_MAC_NONE = 0xff, 140 RTL_GIGA_MAC_NONE = 0xff,
137}; 141};
138 142
@@ -245,6 +249,12 @@ static const struct {
245 [RTL_GIGA_MAC_VER_36] = 249 [RTL_GIGA_MAC_VER_36] =
246 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2, 250 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
247 JUMBO_9K, false), 251 JUMBO_9K, false),
252 [RTL_GIGA_MAC_VER_37] =
253 _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1,
254 JUMBO_1K, true),
255 [RTL_GIGA_MAC_VER_38] =
256 _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
257 JUMBO_9K, false),
248}; 258};
249#undef _R 259#undef _R
250 260
@@ -315,6 +325,8 @@ enum rtl_registers {
315 Config0 = 0x51, 325 Config0 = 0x51,
316 Config1 = 0x52, 326 Config1 = 0x52,
317 Config2 = 0x53, 327 Config2 = 0x53,
328#define PME_SIGNAL (1 << 5) /* 8168c and later */
329
318 Config3 = 0x54, 330 Config3 = 0x54,
319 Config4 = 0x55, 331 Config4 = 0x55,
320 Config5 = 0x56, 332 Config5 = 0x56,
@@ -355,6 +367,9 @@ enum rtl8168_8101_registers {
355#define CSIAR_BYTE_ENABLE 0x0f 367#define CSIAR_BYTE_ENABLE 0x0f
356#define CSIAR_BYTE_ENABLE_SHIFT 12 368#define CSIAR_BYTE_ENABLE_SHIFT 12
357#define CSIAR_ADDR_MASK 0x0fff 369#define CSIAR_ADDR_MASK 0x0fff
370#define CSIAR_FUNC_CARD 0x00000000
371#define CSIAR_FUNC_SDIO 0x00010000
372#define CSIAR_FUNC_NIC 0x00020000
358 PMCH = 0x6f, 373 PMCH = 0x6f,
359 EPHYAR = 0x80, 374 EPHYAR = 0x80,
360#define EPHYAR_FLAG 0x80000000 375#define EPHYAR_FLAG 0x80000000
@@ -716,6 +731,11 @@ struct rtl8169_private {
716 void (*disable)(struct rtl8169_private *); 731 void (*disable)(struct rtl8169_private *);
717 } jumbo_ops; 732 } jumbo_ops;
718 733
734 struct csi_ops {
735 void (*write)(void __iomem *, int, int);
736 u32 (*read)(void __iomem *, int);
737 } csi_ops;
738
719 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); 739 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
720 int (*get_settings)(struct net_device *, struct ethtool_cmd *); 740 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
721 void (*phy_reset_enable)(struct rtl8169_private *tp); 741 void (*phy_reset_enable)(struct rtl8169_private *tp);
@@ -768,6 +788,8 @@ MODULE_FIRMWARE(FIRMWARE_8168E_3);
768MODULE_FIRMWARE(FIRMWARE_8105E_1); 788MODULE_FIRMWARE(FIRMWARE_8105E_1);
769MODULE_FIRMWARE(FIRMWARE_8168F_1); 789MODULE_FIRMWARE(FIRMWARE_8168F_1);
770MODULE_FIRMWARE(FIRMWARE_8168F_2); 790MODULE_FIRMWARE(FIRMWARE_8168F_2);
791MODULE_FIRMWARE(FIRMWARE_8402_1);
792MODULE_FIRMWARE(FIRMWARE_8411_1);
771 793
772static void rtl_lock_work(struct rtl8169_private *tp) 794static void rtl_lock_work(struct rtl8169_private *tp)
773{ 795{
@@ -1078,40 +1100,6 @@ static u16 rtl_ephy_read(void __iomem *ioaddr, int reg_addr)
1078 return value; 1100 return value;
1079} 1101}
1080 1102
1081static void rtl_csi_write(void __iomem *ioaddr, int addr, int value)
1082{
1083 unsigned int i;
1084
1085 RTL_W32(CSIDR, value);
1086 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
1087 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
1088
1089 for (i = 0; i < 100; i++) {
1090 if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
1091 break;
1092 udelay(10);
1093 }
1094}
1095
1096static u32 rtl_csi_read(void __iomem *ioaddr, int addr)
1097{
1098 u32 value = ~0x00;
1099 unsigned int i;
1100
1101 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
1102 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
1103
1104 for (i = 0; i < 100; i++) {
1105 if (RTL_R32(CSIAR) & CSIAR_FLAG) {
1106 value = RTL_R32(CSIDR);
1107 break;
1108 }
1109 udelay(10);
1110 }
1111
1112 return value;
1113}
1114
1115static 1103static
1116void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type) 1104void rtl_eri_write(void __iomem *ioaddr, int addr, u32 mask, u32 val, int type)
1117{ 1105{
@@ -1281,7 +1269,8 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
1281 if (!netif_running(dev)) 1269 if (!netif_running(dev))
1282 return; 1270 return;
1283 1271
1284 if (tp->mac_version == RTL_GIGA_MAC_VER_34) { 1272 if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1273 tp->mac_version == RTL_GIGA_MAC_VER_38) {
1285 if (RTL_R8(PHYstatus) & _1000bpsF) { 1274 if (RTL_R8(PHYstatus) & _1000bpsF) {
1286 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111, 1275 rtl_eri_write(ioaddr, 0x1bc, ERIAR_MASK_1111,
1287 0x00000011, ERIAR_EXGMAC); 1276 0x00000011, ERIAR_EXGMAC);
@@ -1316,6 +1305,16 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
1316 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111, 1305 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_1111,
1317 0x0000003f, ERIAR_EXGMAC); 1306 0x0000003f, ERIAR_EXGMAC);
1318 } 1307 }
1308 } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1309 if (RTL_R8(PHYstatus) & _10bps) {
1310 rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011,
1311 0x4d02, ERIAR_EXGMAC);
1312 rtl_eri_write(ioaddr, 0x1dc, ERIAR_MASK_0011,
1313 0x0060, ERIAR_EXGMAC);
1314 } else {
1315 rtl_eri_write(ioaddr, 0x1d0, ERIAR_MASK_0011,
1316 0x0000, ERIAR_EXGMAC);
1317 }
1319 } 1318 }
1320} 1319}
1321 1320
@@ -1396,7 +1395,6 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1396 u16 reg; 1395 u16 reg;
1397 u8 mask; 1396 u8 mask;
1398 } cfg[] = { 1397 } cfg[] = {
1399 { WAKE_ANY, Config1, PMEnable },
1400 { WAKE_PHY, Config3, LinkUp }, 1398 { WAKE_PHY, Config3, LinkUp },
1401 { WAKE_MAGIC, Config3, MagicPacket }, 1399 { WAKE_MAGIC, Config3, MagicPacket },
1402 { WAKE_UCAST, Config5, UWF }, 1400 { WAKE_UCAST, Config5, UWF },
@@ -1404,16 +1402,32 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1404 { WAKE_MCAST, Config5, MWF }, 1402 { WAKE_MCAST, Config5, MWF },
1405 { WAKE_ANY, Config5, LanWake } 1403 { WAKE_ANY, Config5, LanWake }
1406 }; 1404 };
1405 u8 options;
1407 1406
1408 RTL_W8(Cfg9346, Cfg9346_Unlock); 1407 RTL_W8(Cfg9346, Cfg9346_Unlock);
1409 1408
1410 for (i = 0; i < ARRAY_SIZE(cfg); i++) { 1409 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
1411 u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; 1410 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1412 if (wolopts & cfg[i].opt) 1411 if (wolopts & cfg[i].opt)
1413 options |= cfg[i].mask; 1412 options |= cfg[i].mask;
1414 RTL_W8(cfg[i].reg, options); 1413 RTL_W8(cfg[i].reg, options);
1415 } 1414 }
1416 1415
1416 switch (tp->mac_version) {
1417 case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
1418 options = RTL_R8(Config1) & ~PMEnable;
1419 if (wolopts)
1420 options |= PMEnable;
1421 RTL_W8(Config1, options);
1422 break;
1423 default:
1424 options = RTL_R8(Config2) & ~PME_SIGNAL;
1425 if (wolopts)
1426 options |= PME_SIGNAL;
1427 RTL_W8(Config2, options);
1428 break;
1429 }
1430
1417 RTL_W8(Cfg9346, Cfg9346_Lock); 1431 RTL_W8(Cfg9346, Cfg9346_Lock);
1418} 1432}
1419 1433
@@ -1853,6 +1867,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
1853 .get_strings = rtl8169_get_strings, 1867 .get_strings = rtl8169_get_strings,
1854 .get_sset_count = rtl8169_get_sset_count, 1868 .get_sset_count = rtl8169_get_sset_count,
1855 .get_ethtool_stats = rtl8169_get_ethtool_stats, 1869 .get_ethtool_stats = rtl8169_get_ethtool_stats,
1870 .get_ts_info = ethtool_op_get_ts_info,
1856}; 1871};
1857 1872
1858static void rtl8169_get_mac_version(struct rtl8169_private *tp, 1873static void rtl8169_get_mac_version(struct rtl8169_private *tp,
@@ -1876,6 +1891,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1876 int mac_version; 1891 int mac_version;
1877 } mac_info[] = { 1892 } mac_info[] = {
1878 /* 8168F family. */ 1893 /* 8168F family. */
1894 { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
1879 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 }, 1895 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
1880 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 }, 1896 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
1881 1897
@@ -1913,6 +1929,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1913 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, 1929 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
1914 1930
1915 /* 8101 family. */ 1931 /* 8101 family. */
1932 { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
1916 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 }, 1933 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
1917 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 }, 1934 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
1918 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 }, 1935 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
@@ -3013,6 +3030,28 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3013 rtl_writephy(tp, 0x1f, 0x0000); 3030 rtl_writephy(tp, 0x1f, 0x0000);
3014} 3031}
3015 3032
3033static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
3034{
3035 /* For 4-corner performance improve */
3036 rtl_writephy(tp, 0x1f, 0x0005);
3037 rtl_writephy(tp, 0x05, 0x8b80);
3038 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3039 rtl_writephy(tp, 0x1f, 0x0000);
3040
3041 /* PHY auto speed down */
3042 rtl_writephy(tp, 0x1f, 0x0007);
3043 rtl_writephy(tp, 0x1e, 0x002d);
3044 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3045 rtl_writephy(tp, 0x1f, 0x0000);
3046 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3047
3048 /* Improve 10M EEE waveform */
3049 rtl_writephy(tp, 0x1f, 0x0005);
3050 rtl_writephy(tp, 0x05, 0x8b86);
3051 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3052 rtl_writephy(tp, 0x1f, 0x0000);
3053}
3054
3016static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp) 3055static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3017{ 3056{
3018 static const struct phy_reg phy_reg_init[] = { 3057 static const struct phy_reg phy_reg_init[] = {
@@ -3054,24 +3093,7 @@ static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3054 3093
3055 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 3094 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3056 3095
3057 /* For 4-corner performance improve */ 3096 rtl8168f_hw_phy_config(tp);
3058 rtl_writephy(tp, 0x1f, 0x0005);
3059 rtl_writephy(tp, 0x05, 0x8b80);
3060 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3061 rtl_writephy(tp, 0x1f, 0x0000);
3062
3063 /* PHY auto speed down */
3064 rtl_writephy(tp, 0x1f, 0x0007);
3065 rtl_writephy(tp, 0x1e, 0x002d);
3066 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3067 rtl_writephy(tp, 0x1f, 0x0000);
3068 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3069
3070 /* Improve 10M EEE waveform */
3071 rtl_writephy(tp, 0x1f, 0x0005);
3072 rtl_writephy(tp, 0x05, 0x8b86);
3073 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3074 rtl_writephy(tp, 0x1f, 0x0000);
3075 3097
3076 /* Improve 2-pair detection performance */ 3098 /* Improve 2-pair detection performance */
3077 rtl_writephy(tp, 0x1f, 0x0005); 3099 rtl_writephy(tp, 0x1f, 0x0005);
@@ -3084,23 +3106,104 @@ static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3084{ 3106{
3085 rtl_apply_firmware(tp); 3107 rtl_apply_firmware(tp);
3086 3108
3087 /* For 4-corner performance improve */ 3109 rtl8168f_hw_phy_config(tp);
3110}
3111
3112static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3113{
3114 void __iomem *ioaddr = tp->mmio_addr;
3115 static const struct phy_reg phy_reg_init[] = {
3116 /* Channel estimation fine tune */
3117 { 0x1f, 0x0003 },
3118 { 0x09, 0xa20f },
3119 { 0x1f, 0x0000 },
3120
3121 /* Modify green table for giga & fnet */
3122 { 0x1f, 0x0005 },
3123 { 0x05, 0x8b55 },
3124 { 0x06, 0x0000 },
3125 { 0x05, 0x8b5e },
3126 { 0x06, 0x0000 },
3127 { 0x05, 0x8b67 },
3128 { 0x06, 0x0000 },
3129 { 0x05, 0x8b70 },
3130 { 0x06, 0x0000 },
3131 { 0x1f, 0x0000 },
3132 { 0x1f, 0x0007 },
3133 { 0x1e, 0x0078 },
3134 { 0x17, 0x0000 },
3135 { 0x19, 0x00aa },
3136 { 0x1f, 0x0000 },
3137
3138 /* Modify green table for 10M */
3139 { 0x1f, 0x0005 },
3140 { 0x05, 0x8b79 },
3141 { 0x06, 0xaa00 },
3142 { 0x1f, 0x0000 },
3143
3144 /* Disable hiimpedance detection (RTCT) */
3145 { 0x1f, 0x0003 },
3146 { 0x01, 0x328a },
3147 { 0x1f, 0x0000 }
3148 };
3149
3150
3151 rtl_apply_firmware(tp);
3152
3153 rtl8168f_hw_phy_config(tp);
3154
3155 /* Improve 2-pair detection performance */
3088 rtl_writephy(tp, 0x1f, 0x0005); 3156 rtl_writephy(tp, 0x1f, 0x0005);
3089 rtl_writephy(tp, 0x05, 0x8b80); 3157 rtl_writephy(tp, 0x05, 0x8b85);
3090 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000); 3158 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3091 rtl_writephy(tp, 0x1f, 0x0000); 3159 rtl_writephy(tp, 0x1f, 0x0000);
3092 3160
3093 /* PHY auto speed down */ 3161 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3094 rtl_writephy(tp, 0x1f, 0x0007); 3162
3095 rtl_writephy(tp, 0x1e, 0x002d); 3163 /* Modify green table for giga */
3096 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000); 3164 rtl_writephy(tp, 0x1f, 0x0005);
3165 rtl_writephy(tp, 0x05, 0x8b54);
3166 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3167 rtl_writephy(tp, 0x05, 0x8b5d);
3168 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3169 rtl_writephy(tp, 0x05, 0x8a7c);
3170 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3171 rtl_writephy(tp, 0x05, 0x8a7f);
3172 rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000);
3173 rtl_writephy(tp, 0x05, 0x8a82);
3174 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3175 rtl_writephy(tp, 0x05, 0x8a85);
3176 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3177 rtl_writephy(tp, 0x05, 0x8a88);
3178 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3097 rtl_writephy(tp, 0x1f, 0x0000); 3179 rtl_writephy(tp, 0x1f, 0x0000);
3098 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3099 3180
3100 /* Improve 10M EEE waveform */ 3181 /* uc same-seed solution */
3101 rtl_writephy(tp, 0x1f, 0x0005); 3182 rtl_writephy(tp, 0x1f, 0x0005);
3102 rtl_writephy(tp, 0x05, 0x8b86); 3183 rtl_writephy(tp, 0x05, 0x8b85);
3103 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); 3184 rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000);
3185 rtl_writephy(tp, 0x1f, 0x0000);
3186
3187 /* eee setting */
3188 rtl_w1w0_eri(ioaddr, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
3189 rtl_writephy(tp, 0x1f, 0x0005);
3190 rtl_writephy(tp, 0x05, 0x8b85);
3191 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3192 rtl_writephy(tp, 0x1f, 0x0004);
3193 rtl_writephy(tp, 0x1f, 0x0007);
3194 rtl_writephy(tp, 0x1e, 0x0020);
3195 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3196 rtl_writephy(tp, 0x1f, 0x0000);
3197 rtl_writephy(tp, 0x0d, 0x0007);
3198 rtl_writephy(tp, 0x0e, 0x003c);
3199 rtl_writephy(tp, 0x0d, 0x4007);
3200 rtl_writephy(tp, 0x0e, 0x0000);
3201 rtl_writephy(tp, 0x0d, 0x0000);
3202
3203 /* Green feature */
3204 rtl_writephy(tp, 0x1f, 0x0003);
3205 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3206 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3104 rtl_writephy(tp, 0x1f, 0x0000); 3207 rtl_writephy(tp, 0x1f, 0x0000);
3105} 3208}
3106 3209
@@ -3147,6 +3250,25 @@ static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3147 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 3250 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3148} 3251}
3149 3252
3253static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3254{
3255 void __iomem *ioaddr = tp->mmio_addr;
3256
3257 /* Disable ALDPS before setting firmware */
3258 rtl_writephy(tp, 0x1f, 0x0000);
3259 rtl_writephy(tp, 0x18, 0x0310);
3260 msleep(20);
3261
3262 rtl_apply_firmware(tp);
3263
3264 /* EEE setting */
3265 rtl_eri_write(ioaddr, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3266 rtl_writephy(tp, 0x1f, 0x0004);
3267 rtl_writephy(tp, 0x10, 0x401f);
3268 rtl_writephy(tp, 0x19, 0x7030);
3269 rtl_writephy(tp, 0x1f, 0x0000);
3270}
3271
3150static void rtl_hw_phy_config(struct net_device *dev) 3272static void rtl_hw_phy_config(struct net_device *dev)
3151{ 3273{
3152 struct rtl8169_private *tp = netdev_priv(dev); 3274 struct rtl8169_private *tp = netdev_priv(dev);
@@ -3235,6 +3357,14 @@ static void rtl_hw_phy_config(struct net_device *dev)
3235 rtl8168f_2_hw_phy_config(tp); 3357 rtl8168f_2_hw_phy_config(tp);
3236 break; 3358 break;
3237 3359
3360 case RTL_GIGA_MAC_VER_37:
3361 rtl8402_hw_phy_config(tp);
3362 break;
3363
3364 case RTL_GIGA_MAC_VER_38:
3365 rtl8411_hw_phy_config(tp);
3366 break;
3367
3238 default: 3368 default:
3239 break; 3369 break;
3240 } 3370 }
@@ -3472,6 +3602,8 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3472 case RTL_GIGA_MAC_VER_32: 3602 case RTL_GIGA_MAC_VER_32:
3473 case RTL_GIGA_MAC_VER_33: 3603 case RTL_GIGA_MAC_VER_33:
3474 case RTL_GIGA_MAC_VER_34: 3604 case RTL_GIGA_MAC_VER_34:
3605 case RTL_GIGA_MAC_VER_37:
3606 case RTL_GIGA_MAC_VER_38:
3475 RTL_W32(RxConfig, RTL_R32(RxConfig) | 3607 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3476 AcceptBroadcast | AcceptMulticast | AcceptMyPhys); 3608 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3477 break; 3609 break;
@@ -3507,15 +3639,45 @@ static void r810x_phy_power_up(struct rtl8169_private *tp)
3507 3639
3508static void r810x_pll_power_down(struct rtl8169_private *tp) 3640static void r810x_pll_power_down(struct rtl8169_private *tp)
3509{ 3641{
3642 void __iomem *ioaddr = tp->mmio_addr;
3643
3510 if (rtl_wol_pll_power_down(tp)) 3644 if (rtl_wol_pll_power_down(tp))
3511 return; 3645 return;
3512 3646
3513 r810x_phy_power_down(tp); 3647 r810x_phy_power_down(tp);
3648
3649 switch (tp->mac_version) {
3650 case RTL_GIGA_MAC_VER_07:
3651 case RTL_GIGA_MAC_VER_08:
3652 case RTL_GIGA_MAC_VER_09:
3653 case RTL_GIGA_MAC_VER_10:
3654 case RTL_GIGA_MAC_VER_13:
3655 case RTL_GIGA_MAC_VER_16:
3656 break;
3657 default:
3658 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3659 break;
3660 }
3514} 3661}
3515 3662
3516static void r810x_pll_power_up(struct rtl8169_private *tp) 3663static void r810x_pll_power_up(struct rtl8169_private *tp)
3517{ 3664{
3665 void __iomem *ioaddr = tp->mmio_addr;
3666
3518 r810x_phy_power_up(tp); 3667 r810x_phy_power_up(tp);
3668
3669 switch (tp->mac_version) {
3670 case RTL_GIGA_MAC_VER_07:
3671 case RTL_GIGA_MAC_VER_08:
3672 case RTL_GIGA_MAC_VER_09:
3673 case RTL_GIGA_MAC_VER_10:
3674 case RTL_GIGA_MAC_VER_13:
3675 case RTL_GIGA_MAC_VER_16:
3676 break;
3677 default:
3678 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3679 break;
3680 }
3519} 3681}
3520 3682
3521static void r8168_phy_power_up(struct rtl8169_private *tp) 3683static void r8168_phy_power_up(struct rtl8169_private *tp)
@@ -3619,13 +3781,6 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
3619{ 3781{
3620 void __iomem *ioaddr = tp->mmio_addr; 3782 void __iomem *ioaddr = tp->mmio_addr;
3621 3783
3622 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3623 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
3624 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
3625 r8168dp_check_dash(tp)) {
3626 return;
3627 }
3628
3629 switch (tp->mac_version) { 3784 switch (tp->mac_version) {
3630 case RTL_GIGA_MAC_VER_25: 3785 case RTL_GIGA_MAC_VER_25:
3631 case RTL_GIGA_MAC_VER_26: 3786 case RTL_GIGA_MAC_VER_26:
@@ -3670,6 +3825,7 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
3670 case RTL_GIGA_MAC_VER_16: 3825 case RTL_GIGA_MAC_VER_16:
3671 case RTL_GIGA_MAC_VER_29: 3826 case RTL_GIGA_MAC_VER_29:
3672 case RTL_GIGA_MAC_VER_30: 3827 case RTL_GIGA_MAC_VER_30:
3828 case RTL_GIGA_MAC_VER_37:
3673 ops->down = r810x_pll_power_down; 3829 ops->down = r810x_pll_power_down;
3674 ops->up = r810x_pll_power_up; 3830 ops->up = r810x_pll_power_up;
3675 break; 3831 break;
@@ -3694,6 +3850,7 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
3694 case RTL_GIGA_MAC_VER_34: 3850 case RTL_GIGA_MAC_VER_34:
3695 case RTL_GIGA_MAC_VER_35: 3851 case RTL_GIGA_MAC_VER_35:
3696 case RTL_GIGA_MAC_VER_36: 3852 case RTL_GIGA_MAC_VER_36:
3853 case RTL_GIGA_MAC_VER_38:
3697 ops->down = r8168_pll_power_down; 3854 ops->down = r8168_pll_power_down;
3698 ops->up = r8168_pll_power_up; 3855 ops->up = r8168_pll_power_up;
3699 break; 3856 break;
@@ -3979,7 +4136,9 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
3979 udelay(20); 4136 udelay(20);
3980 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 || 4137 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
3981 tp->mac_version == RTL_GIGA_MAC_VER_35 || 4138 tp->mac_version == RTL_GIGA_MAC_VER_35 ||
3982 tp->mac_version == RTL_GIGA_MAC_VER_36) { 4139 tp->mac_version == RTL_GIGA_MAC_VER_36 ||
4140 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
4141 tp->mac_version == RTL_GIGA_MAC_VER_38) {
3983 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); 4142 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
3984 while (!(RTL_R32(TxConfig) & TXCFG_EMPTY)) 4143 while (!(RTL_R32(TxConfig) & TXCFG_EMPTY))
3985 udelay(100); 4144 udelay(100);
@@ -4185,22 +4344,141 @@ static void rtl_hw_start_8169(struct net_device *dev)
4185 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); 4344 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4186} 4345}
4187 4346
4188static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits) 4347static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
4348{
4349 if (tp->csi_ops.write)
4350 tp->csi_ops.write(tp->mmio_addr, addr, value);
4351}
4352
4353static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
4354{
4355 if (tp->csi_ops.read)
4356 return tp->csi_ops.read(tp->mmio_addr, addr);
4357 else
4358 return ~0;
4359}
4360
4361static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
4189{ 4362{
4190 u32 csi; 4363 u32 csi;
4191 4364
4192 csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff; 4365 csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
4193 rtl_csi_write(ioaddr, 0x070c, csi | bits); 4366 rtl_csi_write(tp, 0x070c, csi | bits);
4367}
4368
4369static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
4370{
4371 rtl_csi_access_enable(tp, 0x17000000);
4372}
4373
4374static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
4375{
4376 rtl_csi_access_enable(tp, 0x27000000);
4377}
4378
4379static void r8169_csi_write(void __iomem *ioaddr, int addr, int value)
4380{
4381 unsigned int i;
4382
4383 RTL_W32(CSIDR, value);
4384 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4385 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4386
4387 for (i = 0; i < 100; i++) {
4388 if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
4389 break;
4390 udelay(10);
4391 }
4392}
4393
4394static u32 r8169_csi_read(void __iomem *ioaddr, int addr)
4395{
4396 u32 value = ~0x00;
4397 unsigned int i;
4398
4399 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
4400 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4401
4402 for (i = 0; i < 100; i++) {
4403 if (RTL_R32(CSIAR) & CSIAR_FLAG) {
4404 value = RTL_R32(CSIDR);
4405 break;
4406 }
4407 udelay(10);
4408 }
4409
4410 return value;
4411}
4412
4413static void r8402_csi_write(void __iomem *ioaddr, int addr, int value)
4414{
4415 unsigned int i;
4416
4417 RTL_W32(CSIDR, value);
4418 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4419 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
4420 CSIAR_FUNC_NIC);
4421
4422 for (i = 0; i < 100; i++) {
4423 if (!(RTL_R32(CSIAR) & CSIAR_FLAG))
4424 break;
4425 udelay(10);
4426 }
4194} 4427}
4195 4428
4196static void rtl_csi_access_enable_1(void __iomem *ioaddr) 4429static u32 r8402_csi_read(void __iomem *ioaddr, int addr)
4197{ 4430{
4198 rtl_csi_access_enable(ioaddr, 0x17000000); 4431 u32 value = ~0x00;
4432 unsigned int i;
4433
4434 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
4435 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4436
4437 for (i = 0; i < 100; i++) {
4438 if (RTL_R32(CSIAR) & CSIAR_FLAG) {
4439 value = RTL_R32(CSIDR);
4440 break;
4441 }
4442 udelay(10);
4443 }
4444
4445 return value;
4199} 4446}
4200 4447
4201static void rtl_csi_access_enable_2(void __iomem *ioaddr) 4448static void __devinit rtl_init_csi_ops(struct rtl8169_private *tp)
4202{ 4449{
4203 rtl_csi_access_enable(ioaddr, 0x27000000); 4450 struct csi_ops *ops = &tp->csi_ops;
4451
4452 switch (tp->mac_version) {
4453 case RTL_GIGA_MAC_VER_01:
4454 case RTL_GIGA_MAC_VER_02:
4455 case RTL_GIGA_MAC_VER_03:
4456 case RTL_GIGA_MAC_VER_04:
4457 case RTL_GIGA_MAC_VER_05:
4458 case RTL_GIGA_MAC_VER_06:
4459 case RTL_GIGA_MAC_VER_10:
4460 case RTL_GIGA_MAC_VER_11:
4461 case RTL_GIGA_MAC_VER_12:
4462 case RTL_GIGA_MAC_VER_13:
4463 case RTL_GIGA_MAC_VER_14:
4464 case RTL_GIGA_MAC_VER_15:
4465 case RTL_GIGA_MAC_VER_16:
4466 case RTL_GIGA_MAC_VER_17:
4467 ops->write = NULL;
4468 ops->read = NULL;
4469 break;
4470
4471 case RTL_GIGA_MAC_VER_37:
4472 case RTL_GIGA_MAC_VER_38:
4473 ops->write = r8402_csi_write;
4474 ops->read = r8402_csi_read;
4475 break;
4476
4477 default:
4478 ops->write = r8169_csi_write;
4479 ops->read = r8169_csi_read;
4480 break;
4481 }
4204} 4482}
4205 4483
4206struct ephy_info { 4484struct ephy_info {
@@ -4257,8 +4535,11 @@ static void rtl_enable_clock_request(struct pci_dev *pdev)
4257 PktCntrDisable | \ 4535 PktCntrDisable | \
4258 Mac_dbgo_sel) 4536 Mac_dbgo_sel)
4259 4537
4260static void rtl_hw_start_8168bb(void __iomem *ioaddr, struct pci_dev *pdev) 4538static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
4261{ 4539{
4540 void __iomem *ioaddr = tp->mmio_addr;
4541 struct pci_dev *pdev = tp->pci_dev;
4542
4262 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); 4543 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4263 4544
4264 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 4545 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
@@ -4267,17 +4548,22 @@ static void rtl_hw_start_8168bb(void __iomem *ioaddr, struct pci_dev *pdev)
4267 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); 4548 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4268} 4549}
4269 4550
4270static void rtl_hw_start_8168bef(void __iomem *ioaddr, struct pci_dev *pdev) 4551static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
4271{ 4552{
4272 rtl_hw_start_8168bb(ioaddr, pdev); 4553 void __iomem *ioaddr = tp->mmio_addr;
4554
4555 rtl_hw_start_8168bb(tp);
4273 4556
4274 RTL_W8(MaxTxPacketSize, TxPacketMax); 4557 RTL_W8(MaxTxPacketSize, TxPacketMax);
4275 4558
4276 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0)); 4559 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4277} 4560}
4278 4561
4279static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev) 4562static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4280{ 4563{
4564 void __iomem *ioaddr = tp->mmio_addr;
4565 struct pci_dev *pdev = tp->pci_dev;
4566
4281 RTL_W8(Config1, RTL_R8(Config1) | Speed_down); 4567 RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
4282 4568
4283 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); 4569 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
@@ -4289,8 +4575,9 @@ static void __rtl_hw_start_8168cp(void __iomem *ioaddr, struct pci_dev *pdev)
4289 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 4575 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4290} 4576}
4291 4577
4292static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev) 4578static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4293{ 4579{
4580 void __iomem *ioaddr = tp->mmio_addr;
4294 static const struct ephy_info e_info_8168cp[] = { 4581 static const struct ephy_info e_info_8168cp[] = {
4295 { 0x01, 0, 0x0001 }, 4582 { 0x01, 0, 0x0001 },
4296 { 0x02, 0x0800, 0x1000 }, 4583 { 0x02, 0x0800, 0x1000 },
@@ -4299,16 +4586,19 @@ static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev)
4299 { 0x07, 0, 0x2000 } 4586 { 0x07, 0, 0x2000 }
4300 }; 4587 };
4301 4588
4302 rtl_csi_access_enable_2(ioaddr); 4589 rtl_csi_access_enable_2(tp);
4303 4590
4304 rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp)); 4591 rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4305 4592
4306 __rtl_hw_start_8168cp(ioaddr, pdev); 4593 __rtl_hw_start_8168cp(tp);
4307} 4594}
4308 4595
4309static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev) 4596static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
4310{ 4597{
4311 rtl_csi_access_enable_2(ioaddr); 4598 void __iomem *ioaddr = tp->mmio_addr;
4599 struct pci_dev *pdev = tp->pci_dev;
4600
4601 rtl_csi_access_enable_2(tp);
4312 4602
4313 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); 4603 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4314 4604
@@ -4317,9 +4607,12 @@ static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev)
4317 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 4607 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4318} 4608}
4319 4609
4320static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev) 4610static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
4321{ 4611{
4322 rtl_csi_access_enable_2(ioaddr); 4612 void __iomem *ioaddr = tp->mmio_addr;
4613 struct pci_dev *pdev = tp->pci_dev;
4614
4615 rtl_csi_access_enable_2(tp);
4323 4616
4324 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); 4617 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4325 4618
@@ -4333,52 +4626,57 @@ static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev)
4333 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 4626 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4334} 4627}
4335 4628
4336static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev) 4629static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
4337{ 4630{
4631 void __iomem *ioaddr = tp->mmio_addr;
4338 static const struct ephy_info e_info_8168c_1[] = { 4632 static const struct ephy_info e_info_8168c_1[] = {
4339 { 0x02, 0x0800, 0x1000 }, 4633 { 0x02, 0x0800, 0x1000 },
4340 { 0x03, 0, 0x0002 }, 4634 { 0x03, 0, 0x0002 },
4341 { 0x06, 0x0080, 0x0000 } 4635 { 0x06, 0x0080, 0x0000 }
4342 }; 4636 };
4343 4637
4344 rtl_csi_access_enable_2(ioaddr); 4638 rtl_csi_access_enable_2(tp);
4345 4639
4346 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); 4640 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4347 4641
4348 rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1)); 4642 rtl_ephy_init(ioaddr, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
4349 4643
4350 __rtl_hw_start_8168cp(ioaddr, pdev); 4644 __rtl_hw_start_8168cp(tp);
4351} 4645}
4352 4646
4353static void rtl_hw_start_8168c_2(void __iomem *ioaddr, struct pci_dev *pdev) 4647static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
4354{ 4648{
4649 void __iomem *ioaddr = tp->mmio_addr;
4355 static const struct ephy_info e_info_8168c_2[] = { 4650 static const struct ephy_info e_info_8168c_2[] = {
4356 { 0x01, 0, 0x0001 }, 4651 { 0x01, 0, 0x0001 },
4357 { 0x03, 0x0400, 0x0220 } 4652 { 0x03, 0x0400, 0x0220 }
4358 }; 4653 };
4359 4654
4360 rtl_csi_access_enable_2(ioaddr); 4655 rtl_csi_access_enable_2(tp);
4361 4656
4362 rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2)); 4657 rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
4363 4658
4364 __rtl_hw_start_8168cp(ioaddr, pdev); 4659 __rtl_hw_start_8168cp(tp);
4365} 4660}
4366 4661
4367static void rtl_hw_start_8168c_3(void __iomem *ioaddr, struct pci_dev *pdev) 4662static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
4368{ 4663{
4369 rtl_hw_start_8168c_2(ioaddr, pdev); 4664 rtl_hw_start_8168c_2(tp);
4370} 4665}
4371 4666
4372static void rtl_hw_start_8168c_4(void __iomem *ioaddr, struct pci_dev *pdev) 4667static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
4373{ 4668{
4374 rtl_csi_access_enable_2(ioaddr); 4669 rtl_csi_access_enable_2(tp);
4375 4670
4376 __rtl_hw_start_8168cp(ioaddr, pdev); 4671 __rtl_hw_start_8168cp(tp);
4377} 4672}
4378 4673
4379static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev) 4674static void rtl_hw_start_8168d(struct rtl8169_private *tp)
4380{ 4675{
4381 rtl_csi_access_enable_2(ioaddr); 4676 void __iomem *ioaddr = tp->mmio_addr;
4677 struct pci_dev *pdev = tp->pci_dev;
4678
4679 rtl_csi_access_enable_2(tp);
4382 4680
4383 rtl_disable_clock_request(pdev); 4681 rtl_disable_clock_request(pdev);
4384 4682
@@ -4389,9 +4687,12 @@ static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev)
4389 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 4687 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4390} 4688}
4391 4689
4392static void rtl_hw_start_8168dp(void __iomem *ioaddr, struct pci_dev *pdev) 4690static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
4393{ 4691{
4394 rtl_csi_access_enable_1(ioaddr); 4692 void __iomem *ioaddr = tp->mmio_addr;
4693 struct pci_dev *pdev = tp->pci_dev;
4694
4695 rtl_csi_access_enable_1(tp);
4395 4696
4396 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 4697 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4397 4698
@@ -4400,8 +4701,10 @@ static void rtl_hw_start_8168dp(void __iomem *ioaddr, struct pci_dev *pdev)
4400 rtl_disable_clock_request(pdev); 4701 rtl_disable_clock_request(pdev);
4401} 4702}
4402 4703
4403static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev) 4704static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
4404{ 4705{
4706 void __iomem *ioaddr = tp->mmio_addr;
4707 struct pci_dev *pdev = tp->pci_dev;
4405 static const struct ephy_info e_info_8168d_4[] = { 4708 static const struct ephy_info e_info_8168d_4[] = {
4406 { 0x0b, ~0, 0x48 }, 4709 { 0x0b, ~0, 0x48 },
4407 { 0x19, 0x20, 0x50 }, 4710 { 0x19, 0x20, 0x50 },
@@ -4409,7 +4712,7 @@ static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
4409 }; 4712 };
4410 int i; 4713 int i;
4411 4714
4412 rtl_csi_access_enable_1(ioaddr); 4715 rtl_csi_access_enable_1(tp);
4413 4716
4414 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 4717 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4415 4718
@@ -4426,8 +4729,10 @@ static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
4426 rtl_enable_clock_request(pdev); 4729 rtl_enable_clock_request(pdev);
4427} 4730}
4428 4731
4429static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev) 4732static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
4430{ 4733{
4734 void __iomem *ioaddr = tp->mmio_addr;
4735 struct pci_dev *pdev = tp->pci_dev;
4431 static const struct ephy_info e_info_8168e_1[] = { 4736 static const struct ephy_info e_info_8168e_1[] = {
4432 { 0x00, 0x0200, 0x0100 }, 4737 { 0x00, 0x0200, 0x0100 },
4433 { 0x00, 0x0000, 0x0004 }, 4738 { 0x00, 0x0000, 0x0004 },
@@ -4444,7 +4749,7 @@ static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev)
4444 { 0x0a, 0x0000, 0x0040 } 4749 { 0x0a, 0x0000, 0x0040 }
4445 }; 4750 };
4446 4751
4447 rtl_csi_access_enable_2(ioaddr); 4752 rtl_csi_access_enable_2(tp);
4448 4753
4449 rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); 4754 rtl_ephy_init(ioaddr, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
4450 4755
@@ -4461,14 +4766,16 @@ static void rtl_hw_start_8168e_1(void __iomem *ioaddr, struct pci_dev *pdev)
4461 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); 4766 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4462} 4767}
4463 4768
4464static void rtl_hw_start_8168e_2(void __iomem *ioaddr, struct pci_dev *pdev) 4769static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
4465{ 4770{
4771 void __iomem *ioaddr = tp->mmio_addr;
4772 struct pci_dev *pdev = tp->pci_dev;
4466 static const struct ephy_info e_info_8168e_2[] = { 4773 static const struct ephy_info e_info_8168e_2[] = {
4467 { 0x09, 0x0000, 0x0080 }, 4774 { 0x09, 0x0000, 0x0080 },
4468 { 0x19, 0x0000, 0x0224 } 4775 { 0x19, 0x0000, 0x0224 }
4469 }; 4776 };
4470 4777
4471 rtl_csi_access_enable_1(ioaddr); 4778 rtl_csi_access_enable_1(tp);
4472 4779
4473 rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); 4780 rtl_ephy_init(ioaddr, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
4474 4781
@@ -4499,18 +4806,12 @@ static void rtl_hw_start_8168e_2(void __iomem *ioaddr, struct pci_dev *pdev)
4499 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); 4806 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4500} 4807}
4501 4808
4502static void rtl_hw_start_8168f_1(void __iomem *ioaddr, struct pci_dev *pdev) 4809static void rtl_hw_start_8168f(struct rtl8169_private *tp)
4503{ 4810{
4504 static const struct ephy_info e_info_8168f_1[] = { 4811 void __iomem *ioaddr = tp->mmio_addr;
4505 { 0x06, 0x00c0, 0x0020 }, 4812 struct pci_dev *pdev = tp->pci_dev;
4506 { 0x08, 0x0001, 0x0002 },
4507 { 0x09, 0x0000, 0x0080 },
4508 { 0x19, 0x0000, 0x0224 }
4509 };
4510
4511 rtl_csi_access_enable_1(ioaddr);
4512 4813
4513 rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); 4814 rtl_csi_access_enable_2(tp);
4514 4815
4515 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 4816 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4516 4817
@@ -4524,8 +4825,6 @@ static void rtl_hw_start_8168f_1(void __iomem *ioaddr, struct pci_dev *pdev)
4524 rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); 4825 rtl_w1w0_eri(ioaddr, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
4525 rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); 4826 rtl_eri_write(ioaddr, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
4526 rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC); 4827 rtl_eri_write(ioaddr, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
4527 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
4528 ERIAR_EXGMAC);
4529 4828
4530 RTL_W8(MaxTxPacketSize, EarlySize); 4829 RTL_W8(MaxTxPacketSize, EarlySize);
4531 4830
@@ -4533,20 +4832,54 @@ static void rtl_hw_start_8168f_1(void __iomem *ioaddr, struct pci_dev *pdev)
4533 4832
4534 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); 4833 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
4535 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); 4834 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
4835 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
4836 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
4837 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
4838}
4839
4840static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
4841{
4842 void __iomem *ioaddr = tp->mmio_addr;
4843 static const struct ephy_info e_info_8168f_1[] = {
4844 { 0x06, 0x00c0, 0x0020 },
4845 { 0x08, 0x0001, 0x0002 },
4846 { 0x09, 0x0000, 0x0080 },
4847 { 0x19, 0x0000, 0x0224 }
4848 };
4849
4850 rtl_hw_start_8168f(tp);
4851
4852 rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
4853
4854 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00,
4855 ERIAR_EXGMAC);
4536 4856
4537 /* Adjust EEE LED frequency */ 4857 /* Adjust EEE LED frequency */
4538 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); 4858 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
4859}
4539 4860
4540 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); 4861static void rtl_hw_start_8411(struct rtl8169_private *tp)
4541 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); 4862{
4542 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); 4863 void __iomem *ioaddr = tp->mmio_addr;
4864 static const struct ephy_info e_info_8168f_1[] = {
4865 { 0x06, 0x00c0, 0x0020 },
4866 { 0x0f, 0xffff, 0x5200 },
4867 { 0x1e, 0x0000, 0x4000 },
4868 { 0x19, 0x0000, 0x0224 }
4869 };
4870
4871 rtl_hw_start_8168f(tp);
4872
4873 rtl_ephy_init(ioaddr, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
4874
4875 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000,
4876 ERIAR_EXGMAC);
4543} 4877}
4544 4878
4545static void rtl_hw_start_8168(struct net_device *dev) 4879static void rtl_hw_start_8168(struct net_device *dev)
4546{ 4880{
4547 struct rtl8169_private *tp = netdev_priv(dev); 4881 struct rtl8169_private *tp = netdev_priv(dev);
4548 void __iomem *ioaddr = tp->mmio_addr; 4882 void __iomem *ioaddr = tp->mmio_addr;
4549 struct pci_dev *pdev = tp->pci_dev;
4550 4883
4551 RTL_W8(Cfg9346, Cfg9346_Unlock); 4884 RTL_W8(Cfg9346, Cfg9346_Unlock);
4552 4885
@@ -4577,67 +4910,71 @@ static void rtl_hw_start_8168(struct net_device *dev)
4577 4910
4578 switch (tp->mac_version) { 4911 switch (tp->mac_version) {
4579 case RTL_GIGA_MAC_VER_11: 4912 case RTL_GIGA_MAC_VER_11:
4580 rtl_hw_start_8168bb(ioaddr, pdev); 4913 rtl_hw_start_8168bb(tp);
4581 break; 4914 break;
4582 4915
4583 case RTL_GIGA_MAC_VER_12: 4916 case RTL_GIGA_MAC_VER_12:
4584 case RTL_GIGA_MAC_VER_17: 4917 case RTL_GIGA_MAC_VER_17:
4585 rtl_hw_start_8168bef(ioaddr, pdev); 4918 rtl_hw_start_8168bef(tp);
4586 break; 4919 break;
4587 4920
4588 case RTL_GIGA_MAC_VER_18: 4921 case RTL_GIGA_MAC_VER_18:
4589 rtl_hw_start_8168cp_1(ioaddr, pdev); 4922 rtl_hw_start_8168cp_1(tp);
4590 break; 4923 break;
4591 4924
4592 case RTL_GIGA_MAC_VER_19: 4925 case RTL_GIGA_MAC_VER_19:
4593 rtl_hw_start_8168c_1(ioaddr, pdev); 4926 rtl_hw_start_8168c_1(tp);
4594 break; 4927 break;
4595 4928
4596 case RTL_GIGA_MAC_VER_20: 4929 case RTL_GIGA_MAC_VER_20:
4597 rtl_hw_start_8168c_2(ioaddr, pdev); 4930 rtl_hw_start_8168c_2(tp);
4598 break; 4931 break;
4599 4932
4600 case RTL_GIGA_MAC_VER_21: 4933 case RTL_GIGA_MAC_VER_21:
4601 rtl_hw_start_8168c_3(ioaddr, pdev); 4934 rtl_hw_start_8168c_3(tp);
4602 break; 4935 break;
4603 4936
4604 case RTL_GIGA_MAC_VER_22: 4937 case RTL_GIGA_MAC_VER_22:
4605 rtl_hw_start_8168c_4(ioaddr, pdev); 4938 rtl_hw_start_8168c_4(tp);
4606 break; 4939 break;
4607 4940
4608 case RTL_GIGA_MAC_VER_23: 4941 case RTL_GIGA_MAC_VER_23:
4609 rtl_hw_start_8168cp_2(ioaddr, pdev); 4942 rtl_hw_start_8168cp_2(tp);
4610 break; 4943 break;
4611 4944
4612 case RTL_GIGA_MAC_VER_24: 4945 case RTL_GIGA_MAC_VER_24:
4613 rtl_hw_start_8168cp_3(ioaddr, pdev); 4946 rtl_hw_start_8168cp_3(tp);
4614 break; 4947 break;
4615 4948
4616 case RTL_GIGA_MAC_VER_25: 4949 case RTL_GIGA_MAC_VER_25:
4617 case RTL_GIGA_MAC_VER_26: 4950 case RTL_GIGA_MAC_VER_26:
4618 case RTL_GIGA_MAC_VER_27: 4951 case RTL_GIGA_MAC_VER_27:
4619 rtl_hw_start_8168d(ioaddr, pdev); 4952 rtl_hw_start_8168d(tp);
4620 break; 4953 break;
4621 4954
4622 case RTL_GIGA_MAC_VER_28: 4955 case RTL_GIGA_MAC_VER_28:
4623 rtl_hw_start_8168d_4(ioaddr, pdev); 4956 rtl_hw_start_8168d_4(tp);
4624 break; 4957 break;
4625 4958
4626 case RTL_GIGA_MAC_VER_31: 4959 case RTL_GIGA_MAC_VER_31:
4627 rtl_hw_start_8168dp(ioaddr, pdev); 4960 rtl_hw_start_8168dp(tp);
4628 break; 4961 break;
4629 4962
4630 case RTL_GIGA_MAC_VER_32: 4963 case RTL_GIGA_MAC_VER_32:
4631 case RTL_GIGA_MAC_VER_33: 4964 case RTL_GIGA_MAC_VER_33:
4632 rtl_hw_start_8168e_1(ioaddr, pdev); 4965 rtl_hw_start_8168e_1(tp);
4633 break; 4966 break;
4634 case RTL_GIGA_MAC_VER_34: 4967 case RTL_GIGA_MAC_VER_34:
4635 rtl_hw_start_8168e_2(ioaddr, pdev); 4968 rtl_hw_start_8168e_2(tp);
4636 break; 4969 break;
4637 4970
4638 case RTL_GIGA_MAC_VER_35: 4971 case RTL_GIGA_MAC_VER_35:
4639 case RTL_GIGA_MAC_VER_36: 4972 case RTL_GIGA_MAC_VER_36:
4640 rtl_hw_start_8168f_1(ioaddr, pdev); 4973 rtl_hw_start_8168f_1(tp);
4974 break;
4975
4976 case RTL_GIGA_MAC_VER_38:
4977 rtl_hw_start_8411(tp);
4641 break; 4978 break;
4642 4979
4643 default: 4980 default:
@@ -4664,8 +5001,10 @@ static void rtl_hw_start_8168(struct net_device *dev)
4664 PktCntrDisable | \ 5001 PktCntrDisable | \
4665 Mac_dbgo_sel) 5002 Mac_dbgo_sel)
4666 5003
4667static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) 5004static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
4668{ 5005{
5006 void __iomem *ioaddr = tp->mmio_addr;
5007 struct pci_dev *pdev = tp->pci_dev;
4669 static const struct ephy_info e_info_8102e_1[] = { 5008 static const struct ephy_info e_info_8102e_1[] = {
4670 { 0x01, 0, 0x6e65 }, 5009 { 0x01, 0, 0x6e65 },
4671 { 0x02, 0, 0x091f }, 5010 { 0x02, 0, 0x091f },
@@ -4678,7 +5017,7 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
4678 }; 5017 };
4679 u8 cfg1; 5018 u8 cfg1;
4680 5019
4681 rtl_csi_access_enable_2(ioaddr); 5020 rtl_csi_access_enable_2(tp);
4682 5021
4683 RTL_W8(DBG_REG, FIX_NAK_1); 5022 RTL_W8(DBG_REG, FIX_NAK_1);
4684 5023
@@ -4695,9 +5034,12 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
4695 rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); 5034 rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
4696} 5035}
4697 5036
4698static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev) 5037static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
4699{ 5038{
4700 rtl_csi_access_enable_2(ioaddr); 5039 void __iomem *ioaddr = tp->mmio_addr;
5040 struct pci_dev *pdev = tp->pci_dev;
5041
5042 rtl_csi_access_enable_2(tp);
4701 5043
4702 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 5044 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4703 5045
@@ -4705,15 +5047,16 @@ static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
4705 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); 5047 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4706} 5048}
4707 5049
4708static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev) 5050static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
4709{ 5051{
4710 rtl_hw_start_8102e_2(ioaddr, pdev); 5052 rtl_hw_start_8102e_2(tp);
4711 5053
4712 rtl_ephy_write(ioaddr, 0x03, 0xc2f9); 5054 rtl_ephy_write(tp->mmio_addr, 0x03, 0xc2f9);
4713} 5055}
4714 5056
4715static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev) 5057static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
4716{ 5058{
5059 void __iomem *ioaddr = tp->mmio_addr;
4717 static const struct ephy_info e_info_8105e_1[] = { 5060 static const struct ephy_info e_info_8105e_1[] = {
4718 { 0x07, 0, 0x4000 }, 5061 { 0x07, 0, 0x4000 },
4719 { 0x19, 0, 0x0200 }, 5062 { 0x19, 0, 0x0200 },
@@ -4737,12 +5080,44 @@ static void rtl_hw_start_8105e_1(void __iomem *ioaddr, struct pci_dev *pdev)
4737 rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); 5080 rtl_ephy_init(ioaddr, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
4738} 5081}
4739 5082
4740static void rtl_hw_start_8105e_2(void __iomem *ioaddr, struct pci_dev *pdev) 5083static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
4741{ 5084{
4742 rtl_hw_start_8105e_1(ioaddr, pdev); 5085 void __iomem *ioaddr = tp->mmio_addr;
5086
5087 rtl_hw_start_8105e_1(tp);
4743 rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000); 5088 rtl_ephy_write(ioaddr, 0x1e, rtl_ephy_read(ioaddr, 0x1e) | 0x8000);
4744} 5089}
4745 5090
5091static void rtl_hw_start_8402(struct rtl8169_private *tp)
5092{
5093 void __iomem *ioaddr = tp->mmio_addr;
5094 static const struct ephy_info e_info_8402[] = {
5095 { 0x19, 0xffff, 0xff64 },
5096 { 0x1e, 0, 0x4000 }
5097 };
5098
5099 rtl_csi_access_enable_2(tp);
5100
5101 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5102 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5103
5104 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5105 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5106
5107 rtl_ephy_init(ioaddr, e_info_8402, ARRAY_SIZE(e_info_8402));
5108
5109 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
5110
5111 rtl_eri_write(ioaddr, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
5112 rtl_eri_write(ioaddr, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
5113 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5114 rtl_w1w0_eri(ioaddr, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5115 rtl_eri_write(ioaddr, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5116 rtl_eri_write(ioaddr, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5117 rtl_w1w0_eri(ioaddr, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00,
5118 ERIAR_EXGMAC);
5119}
5120
4746static void rtl_hw_start_8101(struct net_device *dev) 5121static void rtl_hw_start_8101(struct net_device *dev)
4747{ 5122{
4748 struct rtl8169_private *tp = netdev_priv(dev); 5123 struct rtl8169_private *tp = netdev_priv(dev);
@@ -4766,22 +5141,26 @@ static void rtl_hw_start_8101(struct net_device *dev)
4766 5141
4767 switch (tp->mac_version) { 5142 switch (tp->mac_version) {
4768 case RTL_GIGA_MAC_VER_07: 5143 case RTL_GIGA_MAC_VER_07:
4769 rtl_hw_start_8102e_1(ioaddr, pdev); 5144 rtl_hw_start_8102e_1(tp);
4770 break; 5145 break;
4771 5146
4772 case RTL_GIGA_MAC_VER_08: 5147 case RTL_GIGA_MAC_VER_08:
4773 rtl_hw_start_8102e_3(ioaddr, pdev); 5148 rtl_hw_start_8102e_3(tp);
4774 break; 5149 break;
4775 5150
4776 case RTL_GIGA_MAC_VER_09: 5151 case RTL_GIGA_MAC_VER_09:
4777 rtl_hw_start_8102e_2(ioaddr, pdev); 5152 rtl_hw_start_8102e_2(tp);
4778 break; 5153 break;
4779 5154
4780 case RTL_GIGA_MAC_VER_29: 5155 case RTL_GIGA_MAC_VER_29:
4781 rtl_hw_start_8105e_1(ioaddr, pdev); 5156 rtl_hw_start_8105e_1(tp);
4782 break; 5157 break;
4783 case RTL_GIGA_MAC_VER_30: 5158 case RTL_GIGA_MAC_VER_30:
4784 rtl_hw_start_8105e_2(ioaddr, pdev); 5159 rtl_hw_start_8105e_2(tp);
5160 break;
5161
5162 case RTL_GIGA_MAC_VER_37:
5163 rtl_hw_start_8402(tp);
4785 break; 5164 break;
4786 } 5165 }
4787 5166
@@ -6178,6 +6557,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6178 rtl_init_mdio_ops(tp); 6557 rtl_init_mdio_ops(tp);
6179 rtl_init_pll_power_ops(tp); 6558 rtl_init_pll_power_ops(tp);
6180 rtl_init_jumbo_ops(tp); 6559 rtl_init_jumbo_ops(tp);
6560 rtl_init_csi_ops(tp);
6181 6561
6182 rtl8169_print_mac_version(tp); 6562 rtl8169_print_mac_version(tp);
6183 6563
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 3fb2355af37e..46df3a04030c 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -4,11 +4,11 @@
4 4
5config SH_ETH 5config SH_ETH
6 tristate "Renesas SuperH Ethernet support" 6 tristate "Renesas SuperH Ethernet support"
7 depends on SUPERH && \ 7 depends on (SUPERH || ARCH_SHMOBILE) && \
8 (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \ 8 (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \
9 CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \ 9 CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \
10 CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \ 10 CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \
11 CPU_SUBTYPE_SH7757) 11 CPU_SUBTYPE_SH7757 || ARCH_R8A7740)
12 select CRC32 12 select CRC32
13 select NET_CORE 13 select NET_CORE
14 select MII 14 select MII
@@ -17,4 +17,5 @@ config SH_ETH
17 ---help--- 17 ---help---
18 Renesas SuperH Ethernet device driver. 18 Renesas SuperH Ethernet device driver.
19 This driver supporting CPUs are: 19 This driver supporting CPUs are:
20 - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763 and SH7757. 20 - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
21 and R8A7740.
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index d63e09b29a96..be3c22179161 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -386,6 +386,114 @@ static void sh_eth_reset_hw_crc(struct net_device *ndev)
386 sh_eth_write(ndev, 0x0, CSMR); 386 sh_eth_write(ndev, 0x0, CSMR);
387} 387}
388 388
389#elif defined(CONFIG_ARCH_R8A7740)
390#define SH_ETH_HAS_TSU 1
391static void sh_eth_chip_reset(struct net_device *ndev)
392{
393 struct sh_eth_private *mdp = netdev_priv(ndev);
394 unsigned long mii;
395
396 /* reset device */
397 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
398 mdelay(1);
399
400 switch (mdp->phy_interface) {
401 case PHY_INTERFACE_MODE_GMII:
402 mii = 2;
403 break;
404 case PHY_INTERFACE_MODE_MII:
405 mii = 1;
406 break;
407 case PHY_INTERFACE_MODE_RMII:
408 default:
409 mii = 0;
410 break;
411 }
412 sh_eth_write(ndev, mii, RMII_MII);
413}
414
415static void sh_eth_reset(struct net_device *ndev)
416{
417 int cnt = 100;
418
419 sh_eth_write(ndev, EDSR_ENALL, EDSR);
420 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER, EDMR);
421 while (cnt > 0) {
422 if (!(sh_eth_read(ndev, EDMR) & 0x3))
423 break;
424 mdelay(1);
425 cnt--;
426 }
427 if (cnt == 0)
428 printk(KERN_ERR "Device reset fail\n");
429
430 /* Table Init */
431 sh_eth_write(ndev, 0x0, TDLAR);
432 sh_eth_write(ndev, 0x0, TDFAR);
433 sh_eth_write(ndev, 0x0, TDFXR);
434 sh_eth_write(ndev, 0x0, TDFFR);
435 sh_eth_write(ndev, 0x0, RDLAR);
436 sh_eth_write(ndev, 0x0, RDFAR);
437 sh_eth_write(ndev, 0x0, RDFXR);
438 sh_eth_write(ndev, 0x0, RDFFR);
439}
440
441static void sh_eth_set_duplex(struct net_device *ndev)
442{
443 struct sh_eth_private *mdp = netdev_priv(ndev);
444
445 if (mdp->duplex) /* Full */
446 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
447 else /* Half */
448 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
449}
450
451static void sh_eth_set_rate(struct net_device *ndev)
452{
453 struct sh_eth_private *mdp = netdev_priv(ndev);
454
455 switch (mdp->speed) {
456 case 10: /* 10BASE */
457 sh_eth_write(ndev, GECMR_10, GECMR);
458 break;
459 case 100:/* 100BASE */
460 sh_eth_write(ndev, GECMR_100, GECMR);
461 break;
462 case 1000: /* 1000BASE */
463 sh_eth_write(ndev, GECMR_1000, GECMR);
464 break;
465 default:
466 break;
467 }
468}
469
470/* R8A7740 */
471static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
472 .chip_reset = sh_eth_chip_reset,
473 .set_duplex = sh_eth_set_duplex,
474 .set_rate = sh_eth_set_rate,
475
476 .ecsr_value = ECSR_ICD | ECSR_MPD,
477 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
478 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
479
480 .tx_check = EESR_TC1 | EESR_FTC,
481 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
482 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
483 EESR_ECI,
484 .tx_error_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
485 EESR_TFE,
486
487 .apr = 1,
488 .mpr = 1,
489 .tpauser = 1,
490 .bculr = 1,
491 .hw_swap = 1,
492 .no_trimd = 1,
493 .no_ade = 1,
494 .tsu = 1,
495};
496
389#elif defined(CONFIG_CPU_SUBTYPE_SH7619) 497#elif defined(CONFIG_CPU_SUBTYPE_SH7619)
390#define SH_ETH_RESET_DEFAULT 1 498#define SH_ETH_RESET_DEFAULT 1
391static struct sh_eth_cpu_data sh_eth_my_cpu_data = { 499static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
@@ -443,7 +551,7 @@ static void sh_eth_reset(struct net_device *ndev)
443} 551}
444#endif 552#endif
445 553
446#if defined(CONFIG_CPU_SH4) 554#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
447static void sh_eth_set_receive_align(struct sk_buff *skb) 555static void sh_eth_set_receive_align(struct sk_buff *skb)
448{ 556{
449 int reserve; 557 int reserve;
@@ -919,6 +1027,10 @@ static int sh_eth_rx(struct net_device *ndev)
919 desc_status = edmac_to_cpu(mdp, rxdesc->status); 1027 desc_status = edmac_to_cpu(mdp, rxdesc->status);
920 pkt_len = rxdesc->frame_length; 1028 pkt_len = rxdesc->frame_length;
921 1029
1030#if defined(CONFIG_ARCH_R8A7740)
1031 desc_status >>= 16;
1032#endif
1033
922 if (--boguscnt < 0) 1034 if (--boguscnt < 0)
923 break; 1035 break;
924 1036
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 0fa14afce23d..57b8e1fc5d15 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -372,7 +372,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
372}; 372};
373 373
374/* Driver's parameters */ 374/* Driver's parameters */
375#if defined(CONFIG_CPU_SH4) 375#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
376#define SH4_SKB_RX_ALIGN 32 376#define SH4_SKB_RX_ALIGN 32
377#else 377#else
378#define SH2_SH3_SKB_RX_ALIGN 2 378#define SH2_SH3_SKB_RX_ALIGN 2
@@ -381,7 +381,8 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
381/* 381/*
382 * Register's bits 382 * Register's bits
383 */ 383 */
384#if defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) 384#if defined(CONFIG_CPU_SUBTYPE_SH7734) || defined(CONFIG_CPU_SUBTYPE_SH7763) ||\
385 defined(CONFIG_ARCH_R8A7740)
385/* EDSR */ 386/* EDSR */
386enum EDSR_BIT { 387enum EDSR_BIT {
387 EDSR_ENT = 0x01, EDSR_ENR = 0x02, 388 EDSR_ENT = 0x01, EDSR_ENR = 0x02,
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index 1895605abb35..8e9fda0c7aeb 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -937,7 +937,7 @@ static struct net_device_stats *s6gmac_stats(struct net_device *dev)
937 do { 937 do {
938 unsigned long flags; 938 unsigned long flags;
939 spin_lock_irqsave(&pd->lock, flags); 939 spin_lock_irqsave(&pd->lock, flags);
940 for (i = 0; i < sizeof(pd->stats) / sizeof(unsigned long); i++) 940 for (i = 0; i < ARRAY_SIZE(pd->stats); i++)
941 pd->stats[i] = 941 pd->stats[i] =
942 pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1); 942 pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1);
943 s6gmac_stats_collect(pd, &statinf[0][0]); 943 s6gmac_stats_collect(pd, &statinf[0][0]);
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index a284d6440538..32e55664df6e 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -39,9 +39,7 @@
39#define SC92031_NAME "sc92031" 39#define SC92031_NAME "sc92031"
40 40
41/* BAR 0 is MMIO, BAR 1 is PIO */ 41/* BAR 0 is MMIO, BAR 1 is PIO */
42#ifndef SC92031_USE_BAR 42#define SC92031_USE_PIO 0
43#define SC92031_USE_BAR 0
44#endif
45 43
46/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */ 44/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). */
47static int multicast_filter_limit = 64; 45static int multicast_filter_limit = 64;
@@ -366,7 +364,7 @@ static void sc92031_disable_interrupts(struct net_device *dev)
366 mmiowb(); 364 mmiowb();
367 365
368 /* wait for any concurrent interrupt/tasklet to finish */ 366 /* wait for any concurrent interrupt/tasklet to finish */
369 synchronize_irq(dev->irq); 367 synchronize_irq(priv->pdev->irq);
370 tasklet_disable(&priv->tasklet); 368 tasklet_disable(&priv->tasklet);
371} 369}
372 370
@@ -1114,10 +1112,13 @@ static void sc92031_tx_timeout(struct net_device *dev)
1114#ifdef CONFIG_NET_POLL_CONTROLLER 1112#ifdef CONFIG_NET_POLL_CONTROLLER
1115static void sc92031_poll_controller(struct net_device *dev) 1113static void sc92031_poll_controller(struct net_device *dev)
1116{ 1114{
1117 disable_irq(dev->irq); 1115 struct sc92031_priv *priv = netdev_priv(dev);
1118 if (sc92031_interrupt(dev->irq, dev) != IRQ_NONE) 1116 const int irq = priv->pdev->irq;
1117
1118 disable_irq(irq);
1119 if (sc92031_interrupt(irq, dev) != IRQ_NONE)
1119 sc92031_tasklet((unsigned long)dev); 1120 sc92031_tasklet((unsigned long)dev);
1120 enable_irq(dev->irq); 1121 enable_irq(irq);
1121} 1122}
1122#endif 1123#endif
1123 1124
@@ -1402,7 +1403,6 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
1402 struct net_device *dev; 1403 struct net_device *dev;
1403 struct sc92031_priv *priv; 1404 struct sc92031_priv *priv;
1404 u32 mac0, mac1; 1405 u32 mac0, mac1;
1405 unsigned long base_addr;
1406 1406
1407 err = pci_enable_device(pdev); 1407 err = pci_enable_device(pdev);
1408 if (unlikely(err < 0)) 1408 if (unlikely(err < 0))
@@ -1422,7 +1422,7 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
1422 if (unlikely(err < 0)) 1422 if (unlikely(err < 0))
1423 goto out_request_regions; 1423 goto out_request_regions;
1424 1424
1425 port_base = pci_iomap(pdev, SC92031_USE_BAR, 0); 1425 port_base = pci_iomap(pdev, SC92031_USE_PIO, 0);
1426 if (unlikely(!port_base)) { 1426 if (unlikely(!port_base)) {
1427 err = -EIO; 1427 err = -EIO;
1428 goto out_iomap; 1428 goto out_iomap;
@@ -1437,14 +1437,6 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
1437 pci_set_drvdata(pdev, dev); 1437 pci_set_drvdata(pdev, dev);
1438 SET_NETDEV_DEV(dev, &pdev->dev); 1438 SET_NETDEV_DEV(dev, &pdev->dev);
1439 1439
1440#if SC92031_USE_BAR == 0
1441 dev->mem_start = pci_resource_start(pdev, SC92031_USE_BAR);
1442 dev->mem_end = pci_resource_end(pdev, SC92031_USE_BAR);
1443#elif SC92031_USE_BAR == 1
1444 dev->base_addr = pci_resource_start(pdev, SC92031_USE_BAR);
1445#endif
1446 dev->irq = pdev->irq;
1447
1448 /* faked with skb_copy_and_csum_dev */ 1440 /* faked with skb_copy_and_csum_dev */
1449 dev->features = NETIF_F_SG | NETIF_F_HIGHDMA | 1441 dev->features = NETIF_F_SG | NETIF_F_HIGHDMA |
1450 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1442 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@ -1478,13 +1470,9 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
1478 if (err < 0) 1470 if (err < 0)
1479 goto out_register_netdev; 1471 goto out_register_netdev;
1480 1472
1481#if SC92031_USE_BAR == 0
1482 base_addr = dev->mem_start;
1483#elif SC92031_USE_BAR == 1
1484 base_addr = dev->base_addr;
1485#endif
1486 printk(KERN_INFO "%s: SC92031 at 0x%lx, %pM, IRQ %d\n", dev->name, 1473 printk(KERN_INFO "%s: SC92031 at 0x%lx, %pM, IRQ %d\n", dev->name,
1487 base_addr, dev->dev_addr, dev->irq); 1474 (long)pci_resource_start(pdev, SC92031_USE_PIO), dev->dev_addr,
1475 pdev->irq);
1488 1476
1489 return 0; 1477 return 0;
1490 1478
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index a9deda8eaf63..4613591b43e7 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -729,7 +729,7 @@ static void sis190_tx_interrupt(struct net_device *dev,
729 * The interrupt handler does all of the Rx thread work and cleans up after 729 * The interrupt handler does all of the Rx thread work and cleans up after
730 * the Tx thread. 730 * the Tx thread.
731 */ 731 */
732static irqreturn_t sis190_interrupt(int irq, void *__dev) 732static irqreturn_t sis190_irq(int irq, void *__dev)
733{ 733{
734 struct net_device *dev = __dev; 734 struct net_device *dev = __dev;
735 struct sis190_private *tp = netdev_priv(dev); 735 struct sis190_private *tp = netdev_priv(dev);
@@ -772,11 +772,11 @@ out:
772static void sis190_netpoll(struct net_device *dev) 772static void sis190_netpoll(struct net_device *dev)
773{ 773{
774 struct sis190_private *tp = netdev_priv(dev); 774 struct sis190_private *tp = netdev_priv(dev);
775 struct pci_dev *pdev = tp->pci_dev; 775 const int irq = tp->pci_dev->irq;
776 776
777 disable_irq(pdev->irq); 777 disable_irq(irq);
778 sis190_interrupt(pdev->irq, dev); 778 sis190_irq(irq, dev);
779 enable_irq(pdev->irq); 779 enable_irq(irq);
780} 780}
781#endif 781#endif
782 782
@@ -1085,7 +1085,7 @@ static int sis190_open(struct net_device *dev)
1085 1085
1086 sis190_request_timer(dev); 1086 sis190_request_timer(dev);
1087 1087
1088 rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev); 1088 rc = request_irq(pdev->irq, sis190_irq, IRQF_SHARED, dev->name, dev);
1089 if (rc < 0) 1089 if (rc < 0)
1090 goto err_release_timer_2; 1090 goto err_release_timer_2;
1091 1091
@@ -1097,11 +1097,9 @@ err_release_timer_2:
1097 sis190_delete_timer(dev); 1097 sis190_delete_timer(dev);
1098 sis190_rx_clear(tp); 1098 sis190_rx_clear(tp);
1099err_free_rx_1: 1099err_free_rx_1:
1100 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing, 1100 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
1101 tp->rx_dma);
1102err_free_tx_0: 1101err_free_tx_0:
1103 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing, 1102 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1104 tp->tx_dma);
1105 goto out; 1103 goto out;
1106} 1104}
1107 1105
@@ -1141,7 +1139,7 @@ static void sis190_down(struct net_device *dev)
1141 1139
1142 spin_unlock_irq(&tp->lock); 1140 spin_unlock_irq(&tp->lock);
1143 1141
1144 synchronize_irq(dev->irq); 1142 synchronize_irq(tp->pci_dev->irq);
1145 1143
1146 if (!poll_locked) 1144 if (!poll_locked)
1147 poll_locked++; 1145 poll_locked++;
@@ -1161,7 +1159,7 @@ static int sis190_close(struct net_device *dev)
1161 1159
1162 sis190_down(dev); 1160 sis190_down(dev);
1163 1161
1164 free_irq(dev->irq, dev); 1162 free_irq(pdev->irq, dev);
1165 1163
1166 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma); 1164 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
1167 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma); 1165 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
@@ -1884,8 +1882,6 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
1884 dev->netdev_ops = &sis190_netdev_ops; 1882 dev->netdev_ops = &sis190_netdev_ops;
1885 1883
1886 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops); 1884 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
1887 dev->irq = pdev->irq;
1888 dev->base_addr = (unsigned long) 0xdead;
1889 dev->watchdog_timeo = SIS190_TX_TIMEOUT; 1885 dev->watchdog_timeo = SIS190_TX_TIMEOUT;
1890 1886
1891 spin_lock_init(&tp->lock); 1887 spin_lock_init(&tp->lock);
@@ -1902,7 +1898,7 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
1902 netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n", 1898 netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n",
1903 pci_name(pdev), 1899 pci_name(pdev),
1904 sis_chip_info[ent->driver_data].name, 1900 sis_chip_info[ent->driver_data].name,
1905 ioaddr, dev->irq, dev->dev_addr); 1901 ioaddr, pdev->irq, dev->dev_addr);
1906 netdev_info(dev, "%s mode.\n", 1902 netdev_info(dev, "%s mode.\n",
1907 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII"); 1903 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
1908 } 1904 }
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 5ccf02e7e3ad..203d9c6ec23a 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -168,6 +168,8 @@ struct sis900_private {
168 unsigned int cur_phy; 168 unsigned int cur_phy;
169 struct mii_if_info mii_info; 169 struct mii_if_info mii_info;
170 170
171 void __iomem *ioaddr;
172
171 struct timer_list timer; /* Link status detection timer. */ 173 struct timer_list timer; /* Link status detection timer. */
172 u8 autong_complete; /* 1: auto-negotiate complete */ 174 u8 autong_complete; /* 1: auto-negotiate complete */
173 175
@@ -201,13 +203,18 @@ MODULE_PARM_DESC(multicast_filter_limit, "SiS 900/7016 maximum number of filtere
201MODULE_PARM_DESC(max_interrupt_work, "SiS 900/7016 maximum events handled per interrupt"); 203MODULE_PARM_DESC(max_interrupt_work, "SiS 900/7016 maximum events handled per interrupt");
202MODULE_PARM_DESC(sis900_debug, "SiS 900/7016 bitmapped debugging message level"); 204MODULE_PARM_DESC(sis900_debug, "SiS 900/7016 bitmapped debugging message level");
203 205
206#define sw32(reg, val) iowrite32(val, ioaddr + (reg))
207#define sw8(reg, val) iowrite8(val, ioaddr + (reg))
208#define sr32(reg) ioread32(ioaddr + (reg))
209#define sr16(reg) ioread16(ioaddr + (reg))
210
204#ifdef CONFIG_NET_POLL_CONTROLLER 211#ifdef CONFIG_NET_POLL_CONTROLLER
205static void sis900_poll(struct net_device *dev); 212static void sis900_poll(struct net_device *dev);
206#endif 213#endif
207static int sis900_open(struct net_device *net_dev); 214static int sis900_open(struct net_device *net_dev);
208static int sis900_mii_probe (struct net_device * net_dev); 215static int sis900_mii_probe (struct net_device * net_dev);
209static void sis900_init_rxfilter (struct net_device * net_dev); 216static void sis900_init_rxfilter (struct net_device * net_dev);
210static u16 read_eeprom(long ioaddr, int location); 217static u16 read_eeprom(void __iomem *ioaddr, int location);
211static int mdio_read(struct net_device *net_dev, int phy_id, int location); 218static int mdio_read(struct net_device *net_dev, int phy_id, int location);
212static void mdio_write(struct net_device *net_dev, int phy_id, int location, int val); 219static void mdio_write(struct net_device *net_dev, int phy_id, int location, int val);
213static void sis900_timer(unsigned long data); 220static void sis900_timer(unsigned long data);
@@ -231,7 +238,7 @@ static u16 sis900_default_phy(struct net_device * net_dev);
231static void sis900_set_capability( struct net_device *net_dev ,struct mii_phy *phy); 238static void sis900_set_capability( struct net_device *net_dev ,struct mii_phy *phy);
232static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr); 239static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr);
233static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr); 240static void sis900_auto_negotiate(struct net_device *net_dev, int phy_addr);
234static void sis900_set_mode (long ioaddr, int speed, int duplex); 241static void sis900_set_mode(struct sis900_private *, int speed, int duplex);
235static const struct ethtool_ops sis900_ethtool_ops; 242static const struct ethtool_ops sis900_ethtool_ops;
236 243
237/** 244/**
@@ -246,7 +253,8 @@ static const struct ethtool_ops sis900_ethtool_ops;
246 253
247static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev) 254static int __devinit sis900_get_mac_addr(struct pci_dev * pci_dev, struct net_device *net_dev)
248{ 255{
249 long ioaddr = pci_resource_start(pci_dev, 0); 256 struct sis900_private *sis_priv = netdev_priv(net_dev);
257 void __iomem *ioaddr = sis_priv->ioaddr;
250 u16 signature; 258 u16 signature;
251 int i; 259 int i;
252 260
@@ -325,29 +333,30 @@ static int __devinit sis630e_get_mac_addr(struct pci_dev * pci_dev,
325static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev, 333static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
326 struct net_device *net_dev) 334 struct net_device *net_dev)
327{ 335{
328 long ioaddr = net_dev->base_addr; 336 struct sis900_private *sis_priv = netdev_priv(net_dev);
337 void __iomem *ioaddr = sis_priv->ioaddr;
329 u32 rfcrSave; 338 u32 rfcrSave;
330 u32 i; 339 u32 i;
331 340
332 rfcrSave = inl(rfcr + ioaddr); 341 rfcrSave = sr32(rfcr);
333 342
334 outl(rfcrSave | RELOAD, ioaddr + cr); 343 sw32(cr, rfcrSave | RELOAD);
335 outl(0, ioaddr + cr); 344 sw32(cr, 0);
336 345
337 /* disable packet filtering before setting filter */ 346 /* disable packet filtering before setting filter */
338 outl(rfcrSave & ~RFEN, rfcr + ioaddr); 347 sw32(rfcr, rfcrSave & ~RFEN);
339 348
340 /* load MAC addr to filter data register */ 349 /* load MAC addr to filter data register */
341 for (i = 0 ; i < 3 ; i++) { 350 for (i = 0 ; i < 3 ; i++) {
342 outl((i << RFADDR_shift), ioaddr + rfcr); 351 sw32(rfcr, (i << RFADDR_shift));
343 *( ((u16 *)net_dev->dev_addr) + i) = inw(ioaddr + rfdr); 352 *( ((u16 *)net_dev->dev_addr) + i) = sr16(rfdr);
344 } 353 }
345 354
346 /* Store MAC Address in perm_addr */ 355 /* Store MAC Address in perm_addr */
347 memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN); 356 memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
348 357
349 /* enable packet filtering */ 358 /* enable packet filtering */
350 outl(rfcrSave | RFEN, rfcr + ioaddr); 359 sw32(rfcr, rfcrSave | RFEN);
351 360
352 return 1; 361 return 1;
353} 362}
@@ -371,31 +380,30 @@ static int __devinit sis635_get_mac_addr(struct pci_dev * pci_dev,
371static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev, 380static int __devinit sis96x_get_mac_addr(struct pci_dev * pci_dev,
372 struct net_device *net_dev) 381 struct net_device *net_dev)
373{ 382{
374 long ioaddr = net_dev->base_addr; 383 struct sis900_private *sis_priv = netdev_priv(net_dev);
375 long ee_addr = ioaddr + mear; 384 void __iomem *ioaddr = sis_priv->ioaddr;
376 u32 waittime = 0; 385 int wait, rc = 0;
377 int i;
378 386
379 outl(EEREQ, ee_addr); 387 sw32(mear, EEREQ);
380 while(waittime < 2000) { 388 for (wait = 0; wait < 2000; wait++) {
381 if(inl(ee_addr) & EEGNT) { 389 if (sr32(mear) & EEGNT) {
390 u16 *mac = (u16 *)net_dev->dev_addr;
391 int i;
382 392
383 /* get MAC address from EEPROM */ 393 /* get MAC address from EEPROM */
384 for (i = 0; i < 3; i++) 394 for (i = 0; i < 3; i++)
385 ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr); 395 mac[i] = read_eeprom(ioaddr, i + EEPROMMACAddr);
386 396
387 /* Store MAC Address in perm_addr */ 397 /* Store MAC Address in perm_addr */
388 memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN); 398 memcpy(net_dev->perm_addr, net_dev->dev_addr, ETH_ALEN);
389 399
390 outl(EEDONE, ee_addr); 400 rc = 1;
391 return 1; 401 break;
392 } else {
393 udelay(1);
394 waittime ++;
395 } 402 }
403 udelay(1);
396 } 404 }
397 outl(EEDONE, ee_addr); 405 sw32(mear, EEDONE);
398 return 0; 406 return rc;
399} 407}
400 408
401static const struct net_device_ops sis900_netdev_ops = { 409static const struct net_device_ops sis900_netdev_ops = {
@@ -433,7 +441,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
433 struct pci_dev *dev; 441 struct pci_dev *dev;
434 dma_addr_t ring_dma; 442 dma_addr_t ring_dma;
435 void *ring_space; 443 void *ring_space;
436 long ioaddr; 444 void __iomem *ioaddr;
437 int i, ret; 445 int i, ret;
438 const char *card_name = card_names[pci_id->driver_data]; 446 const char *card_name = card_names[pci_id->driver_data];
439 const char *dev_name = pci_name(pci_dev); 447 const char *dev_name = pci_name(pci_dev);
@@ -464,14 +472,17 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
464 SET_NETDEV_DEV(net_dev, &pci_dev->dev); 472 SET_NETDEV_DEV(net_dev, &pci_dev->dev);
465 473
466 /* We do a request_region() to register /proc/ioports info. */ 474 /* We do a request_region() to register /proc/ioports info. */
467 ioaddr = pci_resource_start(pci_dev, 0);
468 ret = pci_request_regions(pci_dev, "sis900"); 475 ret = pci_request_regions(pci_dev, "sis900");
469 if (ret) 476 if (ret)
470 goto err_out; 477 goto err_out;
471 478
479 /* IO region. */
480 ioaddr = pci_iomap(pci_dev, 0, 0);
481 if (!ioaddr)
482 goto err_out_cleardev;
483
472 sis_priv = netdev_priv(net_dev); 484 sis_priv = netdev_priv(net_dev);
473 net_dev->base_addr = ioaddr; 485 sis_priv->ioaddr = ioaddr;
474 net_dev->irq = pci_dev->irq;
475 sis_priv->pci_dev = pci_dev; 486 sis_priv->pci_dev = pci_dev;
476 spin_lock_init(&sis_priv->lock); 487 spin_lock_init(&sis_priv->lock);
477 488
@@ -480,7 +491,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
480 ring_space = pci_alloc_consistent(pci_dev, TX_TOTAL_SIZE, &ring_dma); 491 ring_space = pci_alloc_consistent(pci_dev, TX_TOTAL_SIZE, &ring_dma);
481 if (!ring_space) { 492 if (!ring_space) {
482 ret = -ENOMEM; 493 ret = -ENOMEM;
483 goto err_out_cleardev; 494 goto err_out_unmap;
484 } 495 }
485 sis_priv->tx_ring = ring_space; 496 sis_priv->tx_ring = ring_space;
486 sis_priv->tx_ring_dma = ring_dma; 497 sis_priv->tx_ring_dma = ring_dma;
@@ -534,7 +545,7 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
534 545
535 /* 630ET : set the mii access mode as software-mode */ 546 /* 630ET : set the mii access mode as software-mode */
536 if (sis_priv->chipset_rev == SIS630ET_900_REV) 547 if (sis_priv->chipset_rev == SIS630ET_900_REV)
537 outl(ACCESSMODE | inl(ioaddr + cr), ioaddr + cr); 548 sw32(cr, ACCESSMODE | sr32(cr));
538 549
539 /* probe for mii transceiver */ 550 /* probe for mii transceiver */
540 if (sis900_mii_probe(net_dev) == 0) { 551 if (sis900_mii_probe(net_dev) == 0) {
@@ -556,25 +567,27 @@ static int __devinit sis900_probe(struct pci_dev *pci_dev,
556 goto err_unmap_rx; 567 goto err_unmap_rx;
557 568
558 /* print some information about our NIC */ 569 /* print some information about our NIC */
559 printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %pM\n", 570 printk(KERN_INFO "%s: %s at 0x%p, IRQ %d, %pM\n",
560 net_dev->name, card_name, ioaddr, net_dev->irq, 571 net_dev->name, card_name, ioaddr, pci_dev->irq,
561 net_dev->dev_addr); 572 net_dev->dev_addr);
562 573
563 /* Detect Wake on Lan support */ 574 /* Detect Wake on Lan support */
564 ret = (inl(net_dev->base_addr + CFGPMC) & PMESP) >> 27; 575 ret = (sr32(CFGPMC) & PMESP) >> 27;
565 if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0) 576 if (netif_msg_probe(sis_priv) && (ret & PME_D3C) == 0)
566 printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name); 577 printk(KERN_INFO "%s: Wake on LAN only available from suspend to RAM.", net_dev->name);
567 578
568 return 0; 579 return 0;
569 580
570 err_unmap_rx: 581err_unmap_rx:
571 pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring, 582 pci_free_consistent(pci_dev, RX_TOTAL_SIZE, sis_priv->rx_ring,
572 sis_priv->rx_ring_dma); 583 sis_priv->rx_ring_dma);
573 err_unmap_tx: 584err_unmap_tx:
574 pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring, 585 pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
575 sis_priv->tx_ring_dma); 586 sis_priv->tx_ring_dma);
576 err_out_cleardev: 587err_out_unmap:
577 pci_set_drvdata(pci_dev, NULL); 588 pci_iounmap(pci_dev, ioaddr);
589err_out_cleardev:
590 pci_set_drvdata(pci_dev, NULL);
578 pci_release_regions(pci_dev); 591 pci_release_regions(pci_dev);
579 err_out: 592 err_out:
580 free_netdev(net_dev); 593 free_netdev(net_dev);
@@ -798,7 +811,7 @@ static void sis900_set_capability(struct net_device *net_dev, struct mii_phy *ph
798 811
799 812
800/* Delay between EEPROM clock transitions. */ 813/* Delay between EEPROM clock transitions. */
801#define eeprom_delay() inl(ee_addr) 814#define eeprom_delay() sr32(mear)
802 815
803/** 816/**
804 * read_eeprom - Read Serial EEPROM 817 * read_eeprom - Read Serial EEPROM
@@ -809,41 +822,41 @@ static void sis900_set_capability(struct net_device *net_dev, struct mii_phy *ph
809 * Note that location is in word (16 bits) unit 822 * Note that location is in word (16 bits) unit
810 */ 823 */
811 824
812static u16 __devinit read_eeprom(long ioaddr, int location) 825static u16 __devinit read_eeprom(void __iomem *ioaddr, int location)
813{ 826{
827 u32 read_cmd = location | EEread;
814 int i; 828 int i;
815 u16 retval = 0; 829 u16 retval = 0;
816 long ee_addr = ioaddr + mear;
817 u32 read_cmd = location | EEread;
818 830
819 outl(0, ee_addr); 831 sw32(mear, 0);
820 eeprom_delay(); 832 eeprom_delay();
821 outl(EECS, ee_addr); 833 sw32(mear, EECS);
822 eeprom_delay(); 834 eeprom_delay();
823 835
824 /* Shift the read command (9) bits out. */ 836 /* Shift the read command (9) bits out. */
825 for (i = 8; i >= 0; i--) { 837 for (i = 8; i >= 0; i--) {
826 u32 dataval = (read_cmd & (1 << i)) ? EEDI | EECS : EECS; 838 u32 dataval = (read_cmd & (1 << i)) ? EEDI | EECS : EECS;
827 outl(dataval, ee_addr); 839
840 sw32(mear, dataval);
828 eeprom_delay(); 841 eeprom_delay();
829 outl(dataval | EECLK, ee_addr); 842 sw32(mear, dataval | EECLK);
830 eeprom_delay(); 843 eeprom_delay();
831 } 844 }
832 outl(EECS, ee_addr); 845 sw32(mear, EECS);
833 eeprom_delay(); 846 eeprom_delay();
834 847
835 /* read the 16-bits data in */ 848 /* read the 16-bits data in */
836 for (i = 16; i > 0; i--) { 849 for (i = 16; i > 0; i--) {
837 outl(EECS, ee_addr); 850 sw32(mear, EECS);
838 eeprom_delay(); 851 eeprom_delay();
839 outl(EECS | EECLK, ee_addr); 852 sw32(mear, EECS | EECLK);
840 eeprom_delay(); 853 eeprom_delay();
841 retval = (retval << 1) | ((inl(ee_addr) & EEDO) ? 1 : 0); 854 retval = (retval << 1) | ((sr32(mear) & EEDO) ? 1 : 0);
842 eeprom_delay(); 855 eeprom_delay();
843 } 856 }
844 857
845 /* Terminate the EEPROM access. */ 858 /* Terminate the EEPROM access. */
846 outl(0, ee_addr); 859 sw32(mear, 0);
847 eeprom_delay(); 860 eeprom_delay();
848 861
849 return retval; 862 return retval;
@@ -852,24 +865,27 @@ static u16 __devinit read_eeprom(long ioaddr, int location)
852/* Read and write the MII management registers using software-generated 865/* Read and write the MII management registers using software-generated
853 serial MDIO protocol. Note that the command bits and data bits are 866 serial MDIO protocol. Note that the command bits and data bits are
854 send out separately */ 867 send out separately */
855#define mdio_delay() inl(mdio_addr) 868#define mdio_delay() sr32(mear)
856 869
857static void mdio_idle(long mdio_addr) 870static void mdio_idle(struct sis900_private *sp)
858{ 871{
859 outl(MDIO | MDDIR, mdio_addr); 872 void __iomem *ioaddr = sp->ioaddr;
873
874 sw32(mear, MDIO | MDDIR);
860 mdio_delay(); 875 mdio_delay();
861 outl(MDIO | MDDIR | MDC, mdio_addr); 876 sw32(mear, MDIO | MDDIR | MDC);
862} 877}
863 878
864/* Syncronize the MII management interface by shifting 32 one bits out. */ 879/* Synchronize the MII management interface by shifting 32 one bits out. */
865static void mdio_reset(long mdio_addr) 880static void mdio_reset(struct sis900_private *sp)
866{ 881{
882 void __iomem *ioaddr = sp->ioaddr;
867 int i; 883 int i;
868 884
869 for (i = 31; i >= 0; i--) { 885 for (i = 31; i >= 0; i--) {
870 outl(MDDIR | MDIO, mdio_addr); 886 sw32(mear, MDDIR | MDIO);
871 mdio_delay(); 887 mdio_delay();
872 outl(MDDIR | MDIO | MDC, mdio_addr); 888 sw32(mear, MDDIR | MDIO | MDC);
873 mdio_delay(); 889 mdio_delay();
874 } 890 }
875} 891}
@@ -887,31 +903,33 @@ static void mdio_reset(long mdio_addr)
887 903
888static int mdio_read(struct net_device *net_dev, int phy_id, int location) 904static int mdio_read(struct net_device *net_dev, int phy_id, int location)
889{ 905{
890 long mdio_addr = net_dev->base_addr + mear;
891 int mii_cmd = MIIread|(phy_id<<MIIpmdShift)|(location<<MIIregShift); 906 int mii_cmd = MIIread|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
907 struct sis900_private *sp = netdev_priv(net_dev);
908 void __iomem *ioaddr = sp->ioaddr;
892 u16 retval = 0; 909 u16 retval = 0;
893 int i; 910 int i;
894 911
895 mdio_reset(mdio_addr); 912 mdio_reset(sp);
896 mdio_idle(mdio_addr); 913 mdio_idle(sp);
897 914
898 for (i = 15; i >= 0; i--) { 915 for (i = 15; i >= 0; i--) {
899 int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR; 916 int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
900 outl(dataval, mdio_addr); 917
918 sw32(mear, dataval);
901 mdio_delay(); 919 mdio_delay();
902 outl(dataval | MDC, mdio_addr); 920 sw32(mear, dataval | MDC);
903 mdio_delay(); 921 mdio_delay();
904 } 922 }
905 923
906 /* Read the 16 data bits. */ 924 /* Read the 16 data bits. */
907 for (i = 16; i > 0; i--) { 925 for (i = 16; i > 0; i--) {
908 outl(0, mdio_addr); 926 sw32(mear, 0);
909 mdio_delay(); 927 mdio_delay();
910 retval = (retval << 1) | ((inl(mdio_addr) & MDIO) ? 1 : 0); 928 retval = (retval << 1) | ((sr32(mear) & MDIO) ? 1 : 0);
911 outl(MDC, mdio_addr); 929 sw32(mear, MDC);
912 mdio_delay(); 930 mdio_delay();
913 } 931 }
914 outl(0x00, mdio_addr); 932 sw32(mear, 0x00);
915 933
916 return retval; 934 return retval;
917} 935}
@@ -931,19 +949,21 @@ static int mdio_read(struct net_device *net_dev, int phy_id, int location)
931static void mdio_write(struct net_device *net_dev, int phy_id, int location, 949static void mdio_write(struct net_device *net_dev, int phy_id, int location,
932 int value) 950 int value)
933{ 951{
934 long mdio_addr = net_dev->base_addr + mear;
935 int mii_cmd = MIIwrite|(phy_id<<MIIpmdShift)|(location<<MIIregShift); 952 int mii_cmd = MIIwrite|(phy_id<<MIIpmdShift)|(location<<MIIregShift);
953 struct sis900_private *sp = netdev_priv(net_dev);
954 void __iomem *ioaddr = sp->ioaddr;
936 int i; 955 int i;
937 956
938 mdio_reset(mdio_addr); 957 mdio_reset(sp);
939 mdio_idle(mdio_addr); 958 mdio_idle(sp);
940 959
941 /* Shift the command bits out. */ 960 /* Shift the command bits out. */
942 for (i = 15; i >= 0; i--) { 961 for (i = 15; i >= 0; i--) {
943 int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR; 962 int dataval = (mii_cmd & (1 << i)) ? MDDIR | MDIO : MDDIR;
944 outb(dataval, mdio_addr); 963
964 sw8(mear, dataval);
945 mdio_delay(); 965 mdio_delay();
946 outb(dataval | MDC, mdio_addr); 966 sw8(mear, dataval | MDC);
947 mdio_delay(); 967 mdio_delay();
948 } 968 }
949 mdio_delay(); 969 mdio_delay();
@@ -951,21 +971,22 @@ static void mdio_write(struct net_device *net_dev, int phy_id, int location,
951 /* Shift the value bits out. */ 971 /* Shift the value bits out. */
952 for (i = 15; i >= 0; i--) { 972 for (i = 15; i >= 0; i--) {
953 int dataval = (value & (1 << i)) ? MDDIR | MDIO : MDDIR; 973 int dataval = (value & (1 << i)) ? MDDIR | MDIO : MDDIR;
954 outl(dataval, mdio_addr); 974
975 sw32(mear, dataval);
955 mdio_delay(); 976 mdio_delay();
956 outl(dataval | MDC, mdio_addr); 977 sw32(mear, dataval | MDC);
957 mdio_delay(); 978 mdio_delay();
958 } 979 }
959 mdio_delay(); 980 mdio_delay();
960 981
961 /* Clear out extra bits. */ 982 /* Clear out extra bits. */
962 for (i = 2; i > 0; i--) { 983 for (i = 2; i > 0; i--) {
963 outb(0, mdio_addr); 984 sw8(mear, 0);
964 mdio_delay(); 985 mdio_delay();
965 outb(MDC, mdio_addr); 986 sw8(mear, MDC);
966 mdio_delay(); 987 mdio_delay();
967 } 988 }
968 outl(0x00, mdio_addr); 989 sw32(mear, 0x00);
969} 990}
970 991
971 992
@@ -1000,9 +1021,12 @@ static u16 sis900_reset_phy(struct net_device *net_dev, int phy_addr)
1000*/ 1021*/
1001static void sis900_poll(struct net_device *dev) 1022static void sis900_poll(struct net_device *dev)
1002{ 1023{
1003 disable_irq(dev->irq); 1024 struct sis900_private *sp = netdev_priv(dev);
1004 sis900_interrupt(dev->irq, dev); 1025 const int irq = sp->pci_dev->irq;
1005 enable_irq(dev->irq); 1026
1027 disable_irq(irq);
1028 sis900_interrupt(irq, dev);
1029 enable_irq(irq);
1006} 1030}
1007#endif 1031#endif
1008 1032
@@ -1018,7 +1042,7 @@ static int
1018sis900_open(struct net_device *net_dev) 1042sis900_open(struct net_device *net_dev)
1019{ 1043{
1020 struct sis900_private *sis_priv = netdev_priv(net_dev); 1044 struct sis900_private *sis_priv = netdev_priv(net_dev);
1021 long ioaddr = net_dev->base_addr; 1045 void __iomem *ioaddr = sis_priv->ioaddr;
1022 int ret; 1046 int ret;
1023 1047
1024 /* Soft reset the chip. */ 1048 /* Soft reset the chip. */
@@ -1027,8 +1051,8 @@ sis900_open(struct net_device *net_dev)
1027 /* Equalizer workaround Rule */ 1051 /* Equalizer workaround Rule */
1028 sis630_set_eq(net_dev, sis_priv->chipset_rev); 1052 sis630_set_eq(net_dev, sis_priv->chipset_rev);
1029 1053
1030 ret = request_irq(net_dev->irq, sis900_interrupt, IRQF_SHARED, 1054 ret = request_irq(sis_priv->pci_dev->irq, sis900_interrupt, IRQF_SHARED,
1031 net_dev->name, net_dev); 1055 net_dev->name, net_dev);
1032 if (ret) 1056 if (ret)
1033 return ret; 1057 return ret;
1034 1058
@@ -1042,12 +1066,12 @@ sis900_open(struct net_device *net_dev)
1042 netif_start_queue(net_dev); 1066 netif_start_queue(net_dev);
1043 1067
1044 /* Workaround for EDB */ 1068 /* Workaround for EDB */
1045 sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); 1069 sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
1046 1070
1047 /* Enable all known interrupts by setting the interrupt mask. */ 1071 /* Enable all known interrupts by setting the interrupt mask. */
1048 outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr); 1072 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
1049 outl(RxENA | inl(ioaddr + cr), ioaddr + cr); 1073 sw32(cr, RxENA | sr32(cr));
1050 outl(IE, ioaddr + ier); 1074 sw32(ier, IE);
1051 1075
1052 sis900_check_mode(net_dev, sis_priv->mii); 1076 sis900_check_mode(net_dev, sis_priv->mii);
1053 1077
@@ -1074,31 +1098,30 @@ static void
1074sis900_init_rxfilter (struct net_device * net_dev) 1098sis900_init_rxfilter (struct net_device * net_dev)
1075{ 1099{
1076 struct sis900_private *sis_priv = netdev_priv(net_dev); 1100 struct sis900_private *sis_priv = netdev_priv(net_dev);
1077 long ioaddr = net_dev->base_addr; 1101 void __iomem *ioaddr = sis_priv->ioaddr;
1078 u32 rfcrSave; 1102 u32 rfcrSave;
1079 u32 i; 1103 u32 i;
1080 1104
1081 rfcrSave = inl(rfcr + ioaddr); 1105 rfcrSave = sr32(rfcr);
1082 1106
1083 /* disable packet filtering before setting filter */ 1107 /* disable packet filtering before setting filter */
1084 outl(rfcrSave & ~RFEN, rfcr + ioaddr); 1108 sw32(rfcr, rfcrSave & ~RFEN);
1085 1109
1086 /* load MAC addr to filter data register */ 1110 /* load MAC addr to filter data register */
1087 for (i = 0 ; i < 3 ; i++) { 1111 for (i = 0 ; i < 3 ; i++) {
1088 u32 w; 1112 u32 w = (u32) *((u16 *)(net_dev->dev_addr)+i);
1089 1113
1090 w = (u32) *((u16 *)(net_dev->dev_addr)+i); 1114 sw32(rfcr, i << RFADDR_shift);
1091 outl((i << RFADDR_shift), ioaddr + rfcr); 1115 sw32(rfdr, w);
1092 outl(w, ioaddr + rfdr);
1093 1116
1094 if (netif_msg_hw(sis_priv)) { 1117 if (netif_msg_hw(sis_priv)) {
1095 printk(KERN_DEBUG "%s: Receive Filter Addrss[%d]=%x\n", 1118 printk(KERN_DEBUG "%s: Receive Filter Addrss[%d]=%x\n",
1096 net_dev->name, i, inl(ioaddr + rfdr)); 1119 net_dev->name, i, sr32(rfdr));
1097 } 1120 }
1098 } 1121 }
1099 1122
1100 /* enable packet filtering */ 1123 /* enable packet filtering */
1101 outl(rfcrSave | RFEN, rfcr + ioaddr); 1124 sw32(rfcr, rfcrSave | RFEN);
1102} 1125}
1103 1126
1104/** 1127/**
@@ -1112,7 +1135,7 @@ static void
1112sis900_init_tx_ring(struct net_device *net_dev) 1135sis900_init_tx_ring(struct net_device *net_dev)
1113{ 1136{
1114 struct sis900_private *sis_priv = netdev_priv(net_dev); 1137 struct sis900_private *sis_priv = netdev_priv(net_dev);
1115 long ioaddr = net_dev->base_addr; 1138 void __iomem *ioaddr = sis_priv->ioaddr;
1116 int i; 1139 int i;
1117 1140
1118 sis_priv->tx_full = 0; 1141 sis_priv->tx_full = 0;
@@ -1128,10 +1151,10 @@ sis900_init_tx_ring(struct net_device *net_dev)
1128 } 1151 }
1129 1152
1130 /* load Transmit Descriptor Register */ 1153 /* load Transmit Descriptor Register */
1131 outl(sis_priv->tx_ring_dma, ioaddr + txdp); 1154 sw32(txdp, sis_priv->tx_ring_dma);
1132 if (netif_msg_hw(sis_priv)) 1155 if (netif_msg_hw(sis_priv))
1133 printk(KERN_DEBUG "%s: TX descriptor register loaded with: %8.8x\n", 1156 printk(KERN_DEBUG "%s: TX descriptor register loaded with: %8.8x\n",
1134 net_dev->name, inl(ioaddr + txdp)); 1157 net_dev->name, sr32(txdp));
1135} 1158}
1136 1159
1137/** 1160/**
@@ -1146,7 +1169,7 @@ static void
1146sis900_init_rx_ring(struct net_device *net_dev) 1169sis900_init_rx_ring(struct net_device *net_dev)
1147{ 1170{
1148 struct sis900_private *sis_priv = netdev_priv(net_dev); 1171 struct sis900_private *sis_priv = netdev_priv(net_dev);
1149 long ioaddr = net_dev->base_addr; 1172 void __iomem *ioaddr = sis_priv->ioaddr;
1150 int i; 1173 int i;
1151 1174
1152 sis_priv->cur_rx = 0; 1175 sis_priv->cur_rx = 0;
@@ -1181,10 +1204,10 @@ sis900_init_rx_ring(struct net_device *net_dev)
1181 sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC); 1204 sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC);
1182 1205
1183 /* load Receive Descriptor Register */ 1206 /* load Receive Descriptor Register */
1184 outl(sis_priv->rx_ring_dma, ioaddr + rxdp); 1207 sw32(rxdp, sis_priv->rx_ring_dma);
1185 if (netif_msg_hw(sis_priv)) 1208 if (netif_msg_hw(sis_priv))
1186 printk(KERN_DEBUG "%s: RX descriptor register loaded with: %8.8x\n", 1209 printk(KERN_DEBUG "%s: RX descriptor register loaded with: %8.8x\n",
1187 net_dev->name, inl(ioaddr + rxdp)); 1210 net_dev->name, sr32(rxdp));
1188} 1211}
1189 1212
1190/** 1213/**
@@ -1298,7 +1321,7 @@ static void sis900_timer(unsigned long data)
1298 1321
1299 sis900_read_mode(net_dev, &speed, &duplex); 1322 sis900_read_mode(net_dev, &speed, &duplex);
1300 if (duplex){ 1323 if (duplex){
1301 sis900_set_mode(net_dev->base_addr, speed, duplex); 1324 sis900_set_mode(sis_priv, speed, duplex);
1302 sis630_set_eq(net_dev, sis_priv->chipset_rev); 1325 sis630_set_eq(net_dev, sis_priv->chipset_rev);
1303 netif_start_queue(net_dev); 1326 netif_start_queue(net_dev);
1304 } 1327 }
@@ -1359,25 +1382,25 @@ static void sis900_timer(unsigned long data)
1359static void sis900_check_mode(struct net_device *net_dev, struct mii_phy *mii_phy) 1382static void sis900_check_mode(struct net_device *net_dev, struct mii_phy *mii_phy)
1360{ 1383{
1361 struct sis900_private *sis_priv = netdev_priv(net_dev); 1384 struct sis900_private *sis_priv = netdev_priv(net_dev);
1362 long ioaddr = net_dev->base_addr; 1385 void __iomem *ioaddr = sis_priv->ioaddr;
1363 int speed, duplex; 1386 int speed, duplex;
1364 1387
1365 if (mii_phy->phy_types == LAN) { 1388 if (mii_phy->phy_types == LAN) {
1366 outl(~EXD & inl(ioaddr + cfg), ioaddr + cfg); 1389 sw32(cfg, ~EXD & sr32(cfg));
1367 sis900_set_capability(net_dev , mii_phy); 1390 sis900_set_capability(net_dev , mii_phy);
1368 sis900_auto_negotiate(net_dev, sis_priv->cur_phy); 1391 sis900_auto_negotiate(net_dev, sis_priv->cur_phy);
1369 } else { 1392 } else {
1370 outl(EXD | inl(ioaddr + cfg), ioaddr + cfg); 1393 sw32(cfg, EXD | sr32(cfg));
1371 speed = HW_SPEED_HOME; 1394 speed = HW_SPEED_HOME;
1372 duplex = FDX_CAPABLE_HALF_SELECTED; 1395 duplex = FDX_CAPABLE_HALF_SELECTED;
1373 sis900_set_mode(ioaddr, speed, duplex); 1396 sis900_set_mode(sis_priv, speed, duplex);
1374 sis_priv->autong_complete = 1; 1397 sis_priv->autong_complete = 1;
1375 } 1398 }
1376} 1399}
1377 1400
1378/** 1401/**
1379 * sis900_set_mode - Set the media mode of mac register. 1402 * sis900_set_mode - Set the media mode of mac register.
1380 * @ioaddr: the address of the device 1403 * @sp: the device private data
1381 * @speed : the transmit speed to be determined 1404 * @speed : the transmit speed to be determined
1382 * @duplex: the duplex mode to be determined 1405 * @duplex: the duplex mode to be determined
1383 * 1406 *
@@ -1388,11 +1411,12 @@ static void sis900_check_mode(struct net_device *net_dev, struct mii_phy *mii_ph
1388 * double words. 1411 * double words.
1389 */ 1412 */
1390 1413
1391static void sis900_set_mode (long ioaddr, int speed, int duplex) 1414static void sis900_set_mode(struct sis900_private *sp, int speed, int duplex)
1392{ 1415{
1416 void __iomem *ioaddr = sp->ioaddr;
1393 u32 tx_flags = 0, rx_flags = 0; 1417 u32 tx_flags = 0, rx_flags = 0;
1394 1418
1395 if (inl(ioaddr + cfg) & EDB_MASTER_EN) { 1419 if (sr32( cfg) & EDB_MASTER_EN) {
1396 tx_flags = TxATP | (DMA_BURST_64 << TxMXDMA_shift) | 1420 tx_flags = TxATP | (DMA_BURST_64 << TxMXDMA_shift) |
1397 (TX_FILL_THRESH << TxFILLT_shift); 1421 (TX_FILL_THRESH << TxFILLT_shift);
1398 rx_flags = DMA_BURST_64 << RxMXDMA_shift; 1422 rx_flags = DMA_BURST_64 << RxMXDMA_shift;
@@ -1420,8 +1444,8 @@ static void sis900_set_mode (long ioaddr, int speed, int duplex)
1420 rx_flags |= RxAJAB; 1444 rx_flags |= RxAJAB;
1421#endif 1445#endif
1422 1446
1423 outl (tx_flags, ioaddr + txcfg); 1447 sw32(txcfg, tx_flags);
1424 outl (rx_flags, ioaddr + rxcfg); 1448 sw32(rxcfg, rx_flags);
1425} 1449}
1426 1450
1427/** 1451/**
@@ -1528,16 +1552,17 @@ static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex
1528static void sis900_tx_timeout(struct net_device *net_dev) 1552static void sis900_tx_timeout(struct net_device *net_dev)
1529{ 1553{
1530 struct sis900_private *sis_priv = netdev_priv(net_dev); 1554 struct sis900_private *sis_priv = netdev_priv(net_dev);
1531 long ioaddr = net_dev->base_addr; 1555 void __iomem *ioaddr = sis_priv->ioaddr;
1532 unsigned long flags; 1556 unsigned long flags;
1533 int i; 1557 int i;
1534 1558
1535 if(netif_msg_tx_err(sis_priv)) 1559 if (netif_msg_tx_err(sis_priv)) {
1536 printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x\n", 1560 printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x\n",
1537 net_dev->name, inl(ioaddr + cr), inl(ioaddr + isr)); 1561 net_dev->name, sr32(cr), sr32(isr));
1562 }
1538 1563
1539 /* Disable interrupts by clearing the interrupt mask. */ 1564 /* Disable interrupts by clearing the interrupt mask. */
1540 outl(0x0000, ioaddr + imr); 1565 sw32(imr, 0x0000);
1541 1566
1542 /* use spinlock to prevent interrupt handler accessing buffer ring */ 1567 /* use spinlock to prevent interrupt handler accessing buffer ring */
1543 spin_lock_irqsave(&sis_priv->lock, flags); 1568 spin_lock_irqsave(&sis_priv->lock, flags);
@@ -1566,10 +1591,10 @@ static void sis900_tx_timeout(struct net_device *net_dev)
1566 net_dev->trans_start = jiffies; /* prevent tx timeout */ 1591 net_dev->trans_start = jiffies; /* prevent tx timeout */
1567 1592
1568 /* load Transmit Descriptor Register */ 1593 /* load Transmit Descriptor Register */
1569 outl(sis_priv->tx_ring_dma, ioaddr + txdp); 1594 sw32(txdp, sis_priv->tx_ring_dma);
1570 1595
1571 /* Enable all known interrupts by setting the interrupt mask. */ 1596 /* Enable all known interrupts by setting the interrupt mask. */
1572 outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr); 1597 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
1573} 1598}
1574 1599
1575/** 1600/**
@@ -1586,7 +1611,7 @@ static netdev_tx_t
1586sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev) 1611sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1587{ 1612{
1588 struct sis900_private *sis_priv = netdev_priv(net_dev); 1613 struct sis900_private *sis_priv = netdev_priv(net_dev);
1589 long ioaddr = net_dev->base_addr; 1614 void __iomem *ioaddr = sis_priv->ioaddr;
1590 unsigned int entry; 1615 unsigned int entry;
1591 unsigned long flags; 1616 unsigned long flags;
1592 unsigned int index_cur_tx, index_dirty_tx; 1617 unsigned int index_cur_tx, index_dirty_tx;
@@ -1608,7 +1633,7 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1608 sis_priv->tx_ring[entry].bufptr = pci_map_single(sis_priv->pci_dev, 1633 sis_priv->tx_ring[entry].bufptr = pci_map_single(sis_priv->pci_dev,
1609 skb->data, skb->len, PCI_DMA_TODEVICE); 1634 skb->data, skb->len, PCI_DMA_TODEVICE);
1610 sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len); 1635 sis_priv->tx_ring[entry].cmdsts = (OWN | skb->len);
1611 outl(TxENA | inl(ioaddr + cr), ioaddr + cr); 1636 sw32(cr, TxENA | sr32(cr));
1612 1637
1613 sis_priv->cur_tx ++; 1638 sis_priv->cur_tx ++;
1614 index_cur_tx = sis_priv->cur_tx; 1639 index_cur_tx = sis_priv->cur_tx;
@@ -1654,14 +1679,14 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
1654 struct net_device *net_dev = dev_instance; 1679 struct net_device *net_dev = dev_instance;
1655 struct sis900_private *sis_priv = netdev_priv(net_dev); 1680 struct sis900_private *sis_priv = netdev_priv(net_dev);
1656 int boguscnt = max_interrupt_work; 1681 int boguscnt = max_interrupt_work;
1657 long ioaddr = net_dev->base_addr; 1682 void __iomem *ioaddr = sis_priv->ioaddr;
1658 u32 status; 1683 u32 status;
1659 unsigned int handled = 0; 1684 unsigned int handled = 0;
1660 1685
1661 spin_lock (&sis_priv->lock); 1686 spin_lock (&sis_priv->lock);
1662 1687
1663 do { 1688 do {
1664 status = inl(ioaddr + isr); 1689 status = sr32(isr);
1665 1690
1666 if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0) 1691 if ((status & (HIBERR|TxURN|TxERR|TxIDLE|RxORN|RxERR|RxOK)) == 0)
1667 /* nothing intresting happened */ 1692 /* nothing intresting happened */
@@ -1696,7 +1721,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
1696 if(netif_msg_intr(sis_priv)) 1721 if(netif_msg_intr(sis_priv))
1697 printk(KERN_DEBUG "%s: exiting interrupt, " 1722 printk(KERN_DEBUG "%s: exiting interrupt, "
1698 "interrupt status = 0x%#8.8x.\n", 1723 "interrupt status = 0x%#8.8x.\n",
1699 net_dev->name, inl(ioaddr + isr)); 1724 net_dev->name, sr32(isr));
1700 1725
1701 spin_unlock (&sis_priv->lock); 1726 spin_unlock (&sis_priv->lock);
1702 return IRQ_RETVAL(handled); 1727 return IRQ_RETVAL(handled);
@@ -1715,7 +1740,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance)
1715static int sis900_rx(struct net_device *net_dev) 1740static int sis900_rx(struct net_device *net_dev)
1716{ 1741{
1717 struct sis900_private *sis_priv = netdev_priv(net_dev); 1742 struct sis900_private *sis_priv = netdev_priv(net_dev);
1718 long ioaddr = net_dev->base_addr; 1743 void __iomem *ioaddr = sis_priv->ioaddr;
1719 unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC; 1744 unsigned int entry = sis_priv->cur_rx % NUM_RX_DESC;
1720 u32 rx_status = sis_priv->rx_ring[entry].cmdsts; 1745 u32 rx_status = sis_priv->rx_ring[entry].cmdsts;
1721 int rx_work_limit; 1746 int rx_work_limit;
@@ -1847,7 +1872,7 @@ refill_rx_ring:
1847 } 1872 }
1848 } 1873 }
1849 /* re-enable the potentially idle receive state matchine */ 1874 /* re-enable the potentially idle receive state matchine */
1850 outl(RxENA | inl(ioaddr + cr), ioaddr + cr ); 1875 sw32(cr , RxENA | sr32(cr));
1851 1876
1852 return 0; 1877 return 0;
1853} 1878}
@@ -1932,31 +1957,31 @@ static void sis900_finish_xmit (struct net_device *net_dev)
1932 1957
1933static int sis900_close(struct net_device *net_dev) 1958static int sis900_close(struct net_device *net_dev)
1934{ 1959{
1935 long ioaddr = net_dev->base_addr;
1936 struct sis900_private *sis_priv = netdev_priv(net_dev); 1960 struct sis900_private *sis_priv = netdev_priv(net_dev);
1961 struct pci_dev *pdev = sis_priv->pci_dev;
1962 void __iomem *ioaddr = sis_priv->ioaddr;
1937 struct sk_buff *skb; 1963 struct sk_buff *skb;
1938 int i; 1964 int i;
1939 1965
1940 netif_stop_queue(net_dev); 1966 netif_stop_queue(net_dev);
1941 1967
1942 /* Disable interrupts by clearing the interrupt mask. */ 1968 /* Disable interrupts by clearing the interrupt mask. */
1943 outl(0x0000, ioaddr + imr); 1969 sw32(imr, 0x0000);
1944 outl(0x0000, ioaddr + ier); 1970 sw32(ier, 0x0000);
1945 1971
1946 /* Stop the chip's Tx and Rx Status Machine */ 1972 /* Stop the chip's Tx and Rx Status Machine */
1947 outl(RxDIS | TxDIS | inl(ioaddr + cr), ioaddr + cr); 1973 sw32(cr, RxDIS | TxDIS | sr32(cr));
1948 1974
1949 del_timer(&sis_priv->timer); 1975 del_timer(&sis_priv->timer);
1950 1976
1951 free_irq(net_dev->irq, net_dev); 1977 free_irq(pdev->irq, net_dev);
1952 1978
1953 /* Free Tx and RX skbuff */ 1979 /* Free Tx and RX skbuff */
1954 for (i = 0; i < NUM_RX_DESC; i++) { 1980 for (i = 0; i < NUM_RX_DESC; i++) {
1955 skb = sis_priv->rx_skbuff[i]; 1981 skb = sis_priv->rx_skbuff[i];
1956 if (skb) { 1982 if (skb) {
1957 pci_unmap_single(sis_priv->pci_dev, 1983 pci_unmap_single(pdev, sis_priv->rx_ring[i].bufptr,
1958 sis_priv->rx_ring[i].bufptr, 1984 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1959 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1960 dev_kfree_skb(skb); 1985 dev_kfree_skb(skb);
1961 sis_priv->rx_skbuff[i] = NULL; 1986 sis_priv->rx_skbuff[i] = NULL;
1962 } 1987 }
@@ -1964,9 +1989,8 @@ static int sis900_close(struct net_device *net_dev)
1964 for (i = 0; i < NUM_TX_DESC; i++) { 1989 for (i = 0; i < NUM_TX_DESC; i++) {
1965 skb = sis_priv->tx_skbuff[i]; 1990 skb = sis_priv->tx_skbuff[i];
1966 if (skb) { 1991 if (skb) {
1967 pci_unmap_single(sis_priv->pci_dev, 1992 pci_unmap_single(pdev, sis_priv->tx_ring[i].bufptr,
1968 sis_priv->tx_ring[i].bufptr, skb->len, 1993 skb->len, PCI_DMA_TODEVICE);
1969 PCI_DMA_TODEVICE);
1970 dev_kfree_skb(skb); 1994 dev_kfree_skb(skb);
1971 sis_priv->tx_skbuff[i] = NULL; 1995 sis_priv->tx_skbuff[i] = NULL;
1972 } 1996 }
@@ -2055,14 +2079,14 @@ static int sis900_nway_reset(struct net_device *net_dev)
2055static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol) 2079static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
2056{ 2080{
2057 struct sis900_private *sis_priv = netdev_priv(net_dev); 2081 struct sis900_private *sis_priv = netdev_priv(net_dev);
2058 long pmctrl_addr = net_dev->base_addr + pmctrl; 2082 void __iomem *ioaddr = sis_priv->ioaddr;
2059 u32 cfgpmcsr = 0, pmctrl_bits = 0; 2083 u32 cfgpmcsr = 0, pmctrl_bits = 0;
2060 2084
2061 if (wol->wolopts == 0) { 2085 if (wol->wolopts == 0) {
2062 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr); 2086 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
2063 cfgpmcsr &= ~PME_EN; 2087 cfgpmcsr &= ~PME_EN;
2064 pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr); 2088 pci_write_config_dword(sis_priv->pci_dev, CFGPMCSR, cfgpmcsr);
2065 outl(pmctrl_bits, pmctrl_addr); 2089 sw32(pmctrl, pmctrl_bits);
2066 if (netif_msg_wol(sis_priv)) 2090 if (netif_msg_wol(sis_priv))
2067 printk(KERN_DEBUG "%s: Wake on LAN disabled\n", net_dev->name); 2091 printk(KERN_DEBUG "%s: Wake on LAN disabled\n", net_dev->name);
2068 return 0; 2092 return 0;
@@ -2077,7 +2101,7 @@ static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wo
2077 if (wol->wolopts & WAKE_PHY) 2101 if (wol->wolopts & WAKE_PHY)
2078 pmctrl_bits |= LINKON; 2102 pmctrl_bits |= LINKON;
2079 2103
2080 outl(pmctrl_bits, pmctrl_addr); 2104 sw32(pmctrl, pmctrl_bits);
2081 2105
2082 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr); 2106 pci_read_config_dword(sis_priv->pci_dev, CFGPMCSR, &cfgpmcsr);
2083 cfgpmcsr |= PME_EN; 2107 cfgpmcsr |= PME_EN;
@@ -2090,10 +2114,11 @@ static int sis900_set_wol(struct net_device *net_dev, struct ethtool_wolinfo *wo
2090 2114
2091static void sis900_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol) 2115static void sis900_get_wol(struct net_device *net_dev, struct ethtool_wolinfo *wol)
2092{ 2116{
2093 long pmctrl_addr = net_dev->base_addr + pmctrl; 2117 struct sis900_private *sp = netdev_priv(net_dev);
2118 void __iomem *ioaddr = sp->ioaddr;
2094 u32 pmctrl_bits; 2119 u32 pmctrl_bits;
2095 2120
2096 pmctrl_bits = inl(pmctrl_addr); 2121 pmctrl_bits = sr32(pmctrl);
2097 if (pmctrl_bits & MAGICPKT) 2122 if (pmctrl_bits & MAGICPKT)
2098 wol->wolopts |= WAKE_MAGIC; 2123 wol->wolopts |= WAKE_MAGIC;
2099 if (pmctrl_bits & LINKON) 2124 if (pmctrl_bits & LINKON)
@@ -2279,8 +2304,8 @@ static inline u16 sis900_mcast_bitnr(u8 *addr, u8 revision)
2279 2304
2280static void set_rx_mode(struct net_device *net_dev) 2305static void set_rx_mode(struct net_device *net_dev)
2281{ 2306{
2282 long ioaddr = net_dev->base_addr;
2283 struct sis900_private *sis_priv = netdev_priv(net_dev); 2307 struct sis900_private *sis_priv = netdev_priv(net_dev);
2308 void __iomem *ioaddr = sis_priv->ioaddr;
2284 u16 mc_filter[16] = {0}; /* 256/128 bits multicast hash table */ 2309 u16 mc_filter[16] = {0}; /* 256/128 bits multicast hash table */
2285 int i, table_entries; 2310 int i, table_entries;
2286 u32 rx_mode; 2311 u32 rx_mode;
@@ -2322,24 +2347,24 @@ static void set_rx_mode(struct net_device *net_dev)
2322 /* update Multicast Hash Table in Receive Filter */ 2347 /* update Multicast Hash Table in Receive Filter */
2323 for (i = 0; i < table_entries; i++) { 2348 for (i = 0; i < table_entries; i++) {
2324 /* why plus 0x04 ??, That makes the correct value for hash table. */ 2349 /* why plus 0x04 ??, That makes the correct value for hash table. */
2325 outl((u32)(0x00000004+i) << RFADDR_shift, ioaddr + rfcr); 2350 sw32(rfcr, (u32)(0x00000004 + i) << RFADDR_shift);
2326 outl(mc_filter[i], ioaddr + rfdr); 2351 sw32(rfdr, mc_filter[i]);
2327 } 2352 }
2328 2353
2329 outl(RFEN | rx_mode, ioaddr + rfcr); 2354 sw32(rfcr, RFEN | rx_mode);
2330 2355
2331 /* sis900 is capable of looping back packets at MAC level for 2356 /* sis900 is capable of looping back packets at MAC level for
2332 * debugging purpose */ 2357 * debugging purpose */
2333 if (net_dev->flags & IFF_LOOPBACK) { 2358 if (net_dev->flags & IFF_LOOPBACK) {
2334 u32 cr_saved; 2359 u32 cr_saved;
2335 /* We must disable Tx/Rx before setting loopback mode */ 2360 /* We must disable Tx/Rx before setting loopback mode */
2336 cr_saved = inl(ioaddr + cr); 2361 cr_saved = sr32(cr);
2337 outl(cr_saved | TxDIS | RxDIS, ioaddr + cr); 2362 sw32(cr, cr_saved | TxDIS | RxDIS);
2338 /* enable loopback */ 2363 /* enable loopback */
2339 outl(inl(ioaddr + txcfg) | TxMLB, ioaddr + txcfg); 2364 sw32(txcfg, sr32(txcfg) | TxMLB);
2340 outl(inl(ioaddr + rxcfg) | RxATX, ioaddr + rxcfg); 2365 sw32(rxcfg, sr32(rxcfg) | RxATX);
2341 /* restore cr */ 2366 /* restore cr */
2342 outl(cr_saved, ioaddr + cr); 2367 sw32(cr, cr_saved);
2343 } 2368 }
2344} 2369}
2345 2370
@@ -2355,26 +2380,25 @@ static void set_rx_mode(struct net_device *net_dev)
2355static void sis900_reset(struct net_device *net_dev) 2380static void sis900_reset(struct net_device *net_dev)
2356{ 2381{
2357 struct sis900_private *sis_priv = netdev_priv(net_dev); 2382 struct sis900_private *sis_priv = netdev_priv(net_dev);
2358 long ioaddr = net_dev->base_addr; 2383 void __iomem *ioaddr = sis_priv->ioaddr;
2359 int i = 0;
2360 u32 status = TxRCMP | RxRCMP; 2384 u32 status = TxRCMP | RxRCMP;
2385 int i;
2361 2386
2362 outl(0, ioaddr + ier); 2387 sw32(ier, 0);
2363 outl(0, ioaddr + imr); 2388 sw32(imr, 0);
2364 outl(0, ioaddr + rfcr); 2389 sw32(rfcr, 0);
2365 2390
2366 outl(RxRESET | TxRESET | RESET | inl(ioaddr + cr), ioaddr + cr); 2391 sw32(cr, RxRESET | TxRESET | RESET | sr32(cr));
2367 2392
2368 /* Check that the chip has finished the reset. */ 2393 /* Check that the chip has finished the reset. */
2369 while (status && (i++ < 1000)) { 2394 for (i = 0; status && (i < 1000); i++)
2370 status ^= (inl(isr + ioaddr) & status); 2395 status ^= sr32(isr) & status;
2371 }
2372 2396
2373 if( (sis_priv->chipset_rev >= SIS635A_900_REV) || 2397 if (sis_priv->chipset_rev >= SIS635A_900_REV ||
2374 (sis_priv->chipset_rev == SIS900B_900_REV) ) 2398 sis_priv->chipset_rev == SIS900B_900_REV)
2375 outl(PESEL | RND_CNT, ioaddr + cfg); 2399 sw32(cfg, PESEL | RND_CNT);
2376 else 2400 else
2377 outl(PESEL, ioaddr + cfg); 2401 sw32(cfg, PESEL);
2378} 2402}
2379 2403
2380/** 2404/**
@@ -2388,10 +2412,12 @@ static void __devexit sis900_remove(struct pci_dev *pci_dev)
2388{ 2412{
2389 struct net_device *net_dev = pci_get_drvdata(pci_dev); 2413 struct net_device *net_dev = pci_get_drvdata(pci_dev);
2390 struct sis900_private *sis_priv = netdev_priv(net_dev); 2414 struct sis900_private *sis_priv = netdev_priv(net_dev);
2391 struct mii_phy *phy = NULL; 2415
2416 unregister_netdev(net_dev);
2392 2417
2393 while (sis_priv->first_mii) { 2418 while (sis_priv->first_mii) {
2394 phy = sis_priv->first_mii; 2419 struct mii_phy *phy = sis_priv->first_mii;
2420
2395 sis_priv->first_mii = phy->next; 2421 sis_priv->first_mii = phy->next;
2396 kfree(phy); 2422 kfree(phy);
2397 } 2423 }
@@ -2400,7 +2426,7 @@ static void __devexit sis900_remove(struct pci_dev *pci_dev)
2400 sis_priv->rx_ring_dma); 2426 sis_priv->rx_ring_dma);
2401 pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring, 2427 pci_free_consistent(pci_dev, TX_TOTAL_SIZE, sis_priv->tx_ring,
2402 sis_priv->tx_ring_dma); 2428 sis_priv->tx_ring_dma);
2403 unregister_netdev(net_dev); 2429 pci_iounmap(pci_dev, sis_priv->ioaddr);
2404 free_netdev(net_dev); 2430 free_netdev(net_dev);
2405 pci_release_regions(pci_dev); 2431 pci_release_regions(pci_dev);
2406 pci_set_drvdata(pci_dev, NULL); 2432 pci_set_drvdata(pci_dev, NULL);
@@ -2411,7 +2437,8 @@ static void __devexit sis900_remove(struct pci_dev *pci_dev)
2411static int sis900_suspend(struct pci_dev *pci_dev, pm_message_t state) 2437static int sis900_suspend(struct pci_dev *pci_dev, pm_message_t state)
2412{ 2438{
2413 struct net_device *net_dev = pci_get_drvdata(pci_dev); 2439 struct net_device *net_dev = pci_get_drvdata(pci_dev);
2414 long ioaddr = net_dev->base_addr; 2440 struct sis900_private *sis_priv = netdev_priv(net_dev);
2441 void __iomem *ioaddr = sis_priv->ioaddr;
2415 2442
2416 if(!netif_running(net_dev)) 2443 if(!netif_running(net_dev))
2417 return 0; 2444 return 0;
@@ -2420,7 +2447,7 @@ static int sis900_suspend(struct pci_dev *pci_dev, pm_message_t state)
2420 netif_device_detach(net_dev); 2447 netif_device_detach(net_dev);
2421 2448
2422 /* Stop the chip's Tx and Rx Status Machine */ 2449 /* Stop the chip's Tx and Rx Status Machine */
2423 outl(RxDIS | TxDIS | inl(ioaddr + cr), ioaddr + cr); 2450 sw32(cr, RxDIS | TxDIS | sr32(cr));
2424 2451
2425 pci_set_power_state(pci_dev, PCI_D3hot); 2452 pci_set_power_state(pci_dev, PCI_D3hot);
2426 pci_save_state(pci_dev); 2453 pci_save_state(pci_dev);
@@ -2432,7 +2459,7 @@ static int sis900_resume(struct pci_dev *pci_dev)
2432{ 2459{
2433 struct net_device *net_dev = pci_get_drvdata(pci_dev); 2460 struct net_device *net_dev = pci_get_drvdata(pci_dev);
2434 struct sis900_private *sis_priv = netdev_priv(net_dev); 2461 struct sis900_private *sis_priv = netdev_priv(net_dev);
2435 long ioaddr = net_dev->base_addr; 2462 void __iomem *ioaddr = sis_priv->ioaddr;
2436 2463
2437 if(!netif_running(net_dev)) 2464 if(!netif_running(net_dev))
2438 return 0; 2465 return 0;
@@ -2453,9 +2480,9 @@ static int sis900_resume(struct pci_dev *pci_dev)
2453 sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED); 2480 sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
2454 2481
2455 /* Enable all known interrupts by setting the interrupt mask. */ 2482 /* Enable all known interrupts by setting the interrupt mask. */
2456 outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr); 2483 sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
2457 outl(RxENA | inl(ioaddr + cr), ioaddr + cr); 2484 sw32(cr, RxENA | sr32(cr));
2458 outl(IE, ioaddr + ier); 2485 sw32(ier, IE);
2459 2486
2460 sis900_check_mode(net_dev, sis_priv->mii); 2487 sis900_check_mode(net_dev, sis_priv->mii);
2461 2488
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 2a662e6112e9..d01e59c348ad 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -146,6 +146,12 @@ enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
146#define EPIC_TOTAL_SIZE 0x100 146#define EPIC_TOTAL_SIZE 0x100
147#define USE_IO_OPS 1 147#define USE_IO_OPS 1
148 148
149#ifdef USE_IO_OPS
150#define EPIC_BAR 0
151#else
152#define EPIC_BAR 1
153#endif
154
149typedef enum { 155typedef enum {
150 SMSC_83C170_0, 156 SMSC_83C170_0,
151 SMSC_83C170, 157 SMSC_83C170,
@@ -176,21 +182,11 @@ static DEFINE_PCI_DEVICE_TABLE(epic_pci_tbl) = {
176}; 182};
177MODULE_DEVICE_TABLE (pci, epic_pci_tbl); 183MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
178 184
179 185#define ew16(reg, val) iowrite16(val, ioaddr + (reg))
180#ifndef USE_IO_OPS 186#define ew32(reg, val) iowrite32(val, ioaddr + (reg))
181#undef inb 187#define er8(reg) ioread8(ioaddr + (reg))
182#undef inw 188#define er16(reg) ioread16(ioaddr + (reg))
183#undef inl 189#define er32(reg) ioread32(ioaddr + (reg))
184#undef outb
185#undef outw
186#undef outl
187#define inb readb
188#define inw readw
189#define inl readl
190#define outb writeb
191#define outw writew
192#define outl writel
193#endif
194 190
195/* Offsets to registers, using the (ugh) SMC names. */ 191/* Offsets to registers, using the (ugh) SMC names. */
196enum epic_registers { 192enum epic_registers {
@@ -275,6 +271,7 @@ struct epic_private {
275 u32 irq_mask; 271 u32 irq_mask;
276 unsigned int rx_buf_sz; /* Based on MTU+slack. */ 272 unsigned int rx_buf_sz; /* Based on MTU+slack. */
277 273
274 void __iomem *ioaddr;
278 struct pci_dev *pci_dev; /* PCI bus location. */ 275 struct pci_dev *pci_dev; /* PCI bus location. */
279 int chip_id, chip_flags; 276 int chip_id, chip_flags;
280 277
@@ -290,7 +287,7 @@ struct epic_private {
290}; 287};
291 288
292static int epic_open(struct net_device *dev); 289static int epic_open(struct net_device *dev);
293static int read_eeprom(long ioaddr, int location); 290static int read_eeprom(struct epic_private *, int);
294static int mdio_read(struct net_device *dev, int phy_id, int location); 291static int mdio_read(struct net_device *dev, int phy_id, int location);
295static void mdio_write(struct net_device *dev, int phy_id, int loc, int val); 292static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
296static void epic_restart(struct net_device *dev); 293static void epic_restart(struct net_device *dev);
@@ -321,11 +318,11 @@ static const struct net_device_ops epic_netdev_ops = {
321 .ndo_validate_addr = eth_validate_addr, 318 .ndo_validate_addr = eth_validate_addr,
322}; 319};
323 320
324static int __devinit epic_init_one (struct pci_dev *pdev, 321static int __devinit epic_init_one(struct pci_dev *pdev,
325 const struct pci_device_id *ent) 322 const struct pci_device_id *ent)
326{ 323{
327 static int card_idx = -1; 324 static int card_idx = -1;
328 long ioaddr; 325 void __iomem *ioaddr;
329 int chip_idx = (int) ent->driver_data; 326 int chip_idx = (int) ent->driver_data;
330 int irq; 327 int irq;
331 struct net_device *dev; 328 struct net_device *dev;
@@ -368,19 +365,15 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
368 365
369 SET_NETDEV_DEV(dev, &pdev->dev); 366 SET_NETDEV_DEV(dev, &pdev->dev);
370 367
371#ifdef USE_IO_OPS 368 ioaddr = pci_iomap(pdev, EPIC_BAR, 0);
372 ioaddr = pci_resource_start (pdev, 0);
373#else
374 ioaddr = pci_resource_start (pdev, 1);
375 ioaddr = (long) pci_ioremap_bar(pdev, 1);
376 if (!ioaddr) { 369 if (!ioaddr) {
377 dev_err(&pdev->dev, "ioremap failed\n"); 370 dev_err(&pdev->dev, "ioremap failed\n");
378 goto err_out_free_netdev; 371 goto err_out_free_netdev;
379 } 372 }
380#endif
381 373
382 pci_set_drvdata(pdev, dev); 374 pci_set_drvdata(pdev, dev);
383 ep = netdev_priv(dev); 375 ep = netdev_priv(dev);
376 ep->ioaddr = ioaddr;
384 ep->mii.dev = dev; 377 ep->mii.dev = dev;
385 ep->mii.mdio_read = mdio_read; 378 ep->mii.mdio_read = mdio_read;
386 ep->mii.mdio_write = mdio_write; 379 ep->mii.mdio_write = mdio_write;
@@ -409,34 +402,31 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
409 duplex = full_duplex[card_idx]; 402 duplex = full_duplex[card_idx];
410 } 403 }
411 404
412 dev->base_addr = ioaddr;
413 dev->irq = irq;
414
415 spin_lock_init(&ep->lock); 405 spin_lock_init(&ep->lock);
416 spin_lock_init(&ep->napi_lock); 406 spin_lock_init(&ep->napi_lock);
417 ep->reschedule_in_poll = 0; 407 ep->reschedule_in_poll = 0;
418 408
419 /* Bring the chip out of low-power mode. */ 409 /* Bring the chip out of low-power mode. */
420 outl(0x4200, ioaddr + GENCTL); 410 ew32(GENCTL, 0x4200);
421 /* Magic?! If we don't set this bit the MII interface won't work. */ 411 /* Magic?! If we don't set this bit the MII interface won't work. */
422 /* This magic is documented in SMSC app note 7.15 */ 412 /* This magic is documented in SMSC app note 7.15 */
423 for (i = 16; i > 0; i--) 413 for (i = 16; i > 0; i--)
424 outl(0x0008, ioaddr + TEST1); 414 ew32(TEST1, 0x0008);
425 415
426 /* Turn on the MII transceiver. */ 416 /* Turn on the MII transceiver. */
427 outl(0x12, ioaddr + MIICfg); 417 ew32(MIICfg, 0x12);
428 if (chip_idx == 1) 418 if (chip_idx == 1)
429 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); 419 ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
430 outl(0x0200, ioaddr + GENCTL); 420 ew32(GENCTL, 0x0200);
431 421
432 /* Note: the '175 does not have a serial EEPROM. */ 422 /* Note: the '175 does not have a serial EEPROM. */
433 for (i = 0; i < 3; i++) 423 for (i = 0; i < 3; i++)
434 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(inw(ioaddr + LAN0 + i*4)); 424 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4));
435 425
436 if (debug > 2) { 426 if (debug > 2) {
437 dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n"); 427 dev_printk(KERN_DEBUG, &pdev->dev, "EEPROM contents:\n");
438 for (i = 0; i < 64; i++) 428 for (i = 0; i < 64; i++)
439 printk(" %4.4x%s", read_eeprom(ioaddr, i), 429 printk(" %4.4x%s", read_eeprom(ep, i),
440 i % 16 == 15 ? "\n" : ""); 430 i % 16 == 15 ? "\n" : "");
441 } 431 }
442 432
@@ -481,8 +471,8 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
481 471
482 /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */ 472 /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
483 if (ep->chip_flags & MII_PWRDWN) 473 if (ep->chip_flags & MII_PWRDWN)
484 outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL); 474 ew32(NVCTL, er32(NVCTL) & ~0x483c);
485 outl(0x0008, ioaddr + GENCTL); 475 ew32(GENCTL, 0x0008);
486 476
487 /* The lower four bits are the media type. */ 477 /* The lower four bits are the media type. */
488 if (duplex) { 478 if (duplex) {
@@ -501,8 +491,9 @@ static int __devinit epic_init_one (struct pci_dev *pdev,
501 if (ret < 0) 491 if (ret < 0)
502 goto err_out_unmap_rx; 492 goto err_out_unmap_rx;
503 493
504 printk(KERN_INFO "%s: %s at %#lx, IRQ %d, %pM\n", 494 printk(KERN_INFO "%s: %s at %lx, IRQ %d, %pM\n",
505 dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq, 495 dev->name, pci_id_tbl[chip_idx].name,
496 (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
506 dev->dev_addr); 497 dev->dev_addr);
507 498
508out: 499out:
@@ -513,10 +504,8 @@ err_out_unmap_rx:
513err_out_unmap_tx: 504err_out_unmap_tx:
514 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma); 505 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
515err_out_iounmap: 506err_out_iounmap:
516#ifndef USE_IO_OPS 507 pci_iounmap(pdev, ioaddr);
517 iounmap(ioaddr);
518err_out_free_netdev: 508err_out_free_netdev:
519#endif
520 free_netdev(dev); 509 free_netdev(dev);
521err_out_free_res: 510err_out_free_res:
522 pci_release_regions(pdev); 511 pci_release_regions(pdev);
@@ -540,7 +529,7 @@ err_out_disable:
540 This serves to flush the operation to the PCI bus. 529 This serves to flush the operation to the PCI bus.
541 */ 530 */
542 531
543#define eeprom_delay() inl(ee_addr) 532#define eeprom_delay() er32(EECTL)
544 533
545/* The EEPROM commands include the alway-set leading bit. */ 534/* The EEPROM commands include the alway-set leading bit. */
546#define EE_WRITE_CMD (5 << 6) 535#define EE_WRITE_CMD (5 << 6)
@@ -550,67 +539,67 @@ err_out_disable:
550 539
551static void epic_disable_int(struct net_device *dev, struct epic_private *ep) 540static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
552{ 541{
553 long ioaddr = dev->base_addr; 542 void __iomem *ioaddr = ep->ioaddr;
554 543
555 outl(0x00000000, ioaddr + INTMASK); 544 ew32(INTMASK, 0x00000000);
556} 545}
557 546
558static inline void __epic_pci_commit(long ioaddr) 547static inline void __epic_pci_commit(void __iomem *ioaddr)
559{ 548{
560#ifndef USE_IO_OPS 549#ifndef USE_IO_OPS
561 inl(ioaddr + INTMASK); 550 er32(INTMASK);
562#endif 551#endif
563} 552}
564 553
565static inline void epic_napi_irq_off(struct net_device *dev, 554static inline void epic_napi_irq_off(struct net_device *dev,
566 struct epic_private *ep) 555 struct epic_private *ep)
567{ 556{
568 long ioaddr = dev->base_addr; 557 void __iomem *ioaddr = ep->ioaddr;
569 558
570 outl(ep->irq_mask & ~EpicNapiEvent, ioaddr + INTMASK); 559 ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent);
571 __epic_pci_commit(ioaddr); 560 __epic_pci_commit(ioaddr);
572} 561}
573 562
574static inline void epic_napi_irq_on(struct net_device *dev, 563static inline void epic_napi_irq_on(struct net_device *dev,
575 struct epic_private *ep) 564 struct epic_private *ep)
576{ 565{
577 long ioaddr = dev->base_addr; 566 void __iomem *ioaddr = ep->ioaddr;
578 567
579 /* No need to commit possible posted write */ 568 /* No need to commit possible posted write */
580 outl(ep->irq_mask | EpicNapiEvent, ioaddr + INTMASK); 569 ew32(INTMASK, ep->irq_mask | EpicNapiEvent);
581} 570}
582 571
583static int __devinit read_eeprom(long ioaddr, int location) 572static int __devinit read_eeprom(struct epic_private *ep, int location)
584{ 573{
574 void __iomem *ioaddr = ep->ioaddr;
585 int i; 575 int i;
586 int retval = 0; 576 int retval = 0;
587 long ee_addr = ioaddr + EECTL;
588 int read_cmd = location | 577 int read_cmd = location |
589 (inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD); 578 (er32(EECTL) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
590 579
591 outl(EE_ENB & ~EE_CS, ee_addr); 580 ew32(EECTL, EE_ENB & ~EE_CS);
592 outl(EE_ENB, ee_addr); 581 ew32(EECTL, EE_ENB);
593 582
594 /* Shift the read command bits out. */ 583 /* Shift the read command bits out. */
595 for (i = 12; i >= 0; i--) { 584 for (i = 12; i >= 0; i--) {
596 short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0; 585 short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
597 outl(EE_ENB | dataval, ee_addr); 586 ew32(EECTL, EE_ENB | dataval);
598 eeprom_delay(); 587 eeprom_delay();
599 outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr); 588 ew32(EECTL, EE_ENB | dataval | EE_SHIFT_CLK);
600 eeprom_delay(); 589 eeprom_delay();
601 } 590 }
602 outl(EE_ENB, ee_addr); 591 ew32(EECTL, EE_ENB);
603 592
604 for (i = 16; i > 0; i--) { 593 for (i = 16; i > 0; i--) {
605 outl(EE_ENB | EE_SHIFT_CLK, ee_addr); 594 ew32(EECTL, EE_ENB | EE_SHIFT_CLK);
606 eeprom_delay(); 595 eeprom_delay();
607 retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0); 596 retval = (retval << 1) | ((er32(EECTL) & EE_DATA_READ) ? 1 : 0);
608 outl(EE_ENB, ee_addr); 597 ew32(EECTL, EE_ENB);
609 eeprom_delay(); 598 eeprom_delay();
610 } 599 }
611 600
612 /* Terminate the EEPROM access. */ 601 /* Terminate the EEPROM access. */
613 outl(EE_ENB & ~EE_CS, ee_addr); 602 ew32(EECTL, EE_ENB & ~EE_CS);
614 return retval; 603 return retval;
615} 604}
616 605
@@ -618,22 +607,23 @@ static int __devinit read_eeprom(long ioaddr, int location)
618#define MII_WRITEOP 2 607#define MII_WRITEOP 2
619static int mdio_read(struct net_device *dev, int phy_id, int location) 608static int mdio_read(struct net_device *dev, int phy_id, int location)
620{ 609{
621 long ioaddr = dev->base_addr; 610 struct epic_private *ep = netdev_priv(dev);
611 void __iomem *ioaddr = ep->ioaddr;
622 int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP; 612 int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
623 int i; 613 int i;
624 614
625 outl(read_cmd, ioaddr + MIICtrl); 615 ew32(MIICtrl, read_cmd);
626 /* Typical operation takes 25 loops. */ 616 /* Typical operation takes 25 loops. */
627 for (i = 400; i > 0; i--) { 617 for (i = 400; i > 0; i--) {
628 barrier(); 618 barrier();
629 if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) { 619 if ((er32(MIICtrl) & MII_READOP) == 0) {
630 /* Work around read failure bug. */ 620 /* Work around read failure bug. */
631 if (phy_id == 1 && location < 6 && 621 if (phy_id == 1 && location < 6 &&
632 inw(ioaddr + MIIData) == 0xffff) { 622 er16(MIIData) == 0xffff) {
633 outl(read_cmd, ioaddr + MIICtrl); 623 ew32(MIICtrl, read_cmd);
634 continue; 624 continue;
635 } 625 }
636 return inw(ioaddr + MIIData); 626 return er16(MIIData);
637 } 627 }
638 } 628 }
639 return 0xffff; 629 return 0xffff;
@@ -641,14 +631,15 @@ static int mdio_read(struct net_device *dev, int phy_id, int location)
641 631
642static void mdio_write(struct net_device *dev, int phy_id, int loc, int value) 632static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
643{ 633{
644 long ioaddr = dev->base_addr; 634 struct epic_private *ep = netdev_priv(dev);
635 void __iomem *ioaddr = ep->ioaddr;
645 int i; 636 int i;
646 637
647 outw(value, ioaddr + MIIData); 638 ew16(MIIData, value);
648 outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl); 639 ew32(MIICtrl, (phy_id << 9) | (loc << 4) | MII_WRITEOP);
649 for (i = 10000; i > 0; i--) { 640 for (i = 10000; i > 0; i--) {
650 barrier(); 641 barrier();
651 if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0) 642 if ((er32(MIICtrl) & MII_WRITEOP) == 0)
652 break; 643 break;
653 } 644 }
654} 645}
@@ -657,25 +648,26 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
657static int epic_open(struct net_device *dev) 648static int epic_open(struct net_device *dev)
658{ 649{
659 struct epic_private *ep = netdev_priv(dev); 650 struct epic_private *ep = netdev_priv(dev);
660 long ioaddr = dev->base_addr; 651 void __iomem *ioaddr = ep->ioaddr;
661 int i; 652 const int irq = ep->pci_dev->irq;
662 int retval; 653 int rc, i;
663 654
664 /* Soft reset the chip. */ 655 /* Soft reset the chip. */
665 outl(0x4001, ioaddr + GENCTL); 656 ew32(GENCTL, 0x4001);
666 657
667 napi_enable(&ep->napi); 658 napi_enable(&ep->napi);
668 if ((retval = request_irq(dev->irq, epic_interrupt, IRQF_SHARED, dev->name, dev))) { 659 rc = request_irq(irq, epic_interrupt, IRQF_SHARED, dev->name, dev);
660 if (rc) {
669 napi_disable(&ep->napi); 661 napi_disable(&ep->napi);
670 return retval; 662 return rc;
671 } 663 }
672 664
673 epic_init_ring(dev); 665 epic_init_ring(dev);
674 666
675 outl(0x4000, ioaddr + GENCTL); 667 ew32(GENCTL, 0x4000);
676 /* This magic is documented in SMSC app note 7.15 */ 668 /* This magic is documented in SMSC app note 7.15 */
677 for (i = 16; i > 0; i--) 669 for (i = 16; i > 0; i--)
678 outl(0x0008, ioaddr + TEST1); 670 ew32(TEST1, 0x0008);
679 671
680 /* Pull the chip out of low-power mode, enable interrupts, and set for 672 /* Pull the chip out of low-power mode, enable interrupts, and set for
681 PCI read multiple. The MIIcfg setting and strange write order are 673 PCI read multiple. The MIIcfg setting and strange write order are
@@ -683,29 +675,29 @@ static int epic_open(struct net_device *dev)
683 wiring on the Ositech CardBus card. 675 wiring on the Ositech CardBus card.
684 */ 676 */
685#if 0 677#if 0
686 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg); 678 ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
687#endif 679#endif
688 if (ep->chip_flags & MII_PWRDWN) 680 if (ep->chip_flags & MII_PWRDWN)
689 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); 681 ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
690 682
691 /* Tell the chip to byteswap descriptors on big-endian hosts */ 683 /* Tell the chip to byteswap descriptors on big-endian hosts */
692#ifdef __BIG_ENDIAN 684#ifdef __BIG_ENDIAN
693 outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 685 ew32(GENCTL, 0x4432 | (RX_FIFO_THRESH << 8));
694 inl(ioaddr + GENCTL); 686 er32(GENCTL);
695 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 687 ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
696#else 688#else
697 outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 689 ew32(GENCTL, 0x4412 | (RX_FIFO_THRESH << 8));
698 inl(ioaddr + GENCTL); 690 er32(GENCTL);
699 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 691 ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
700#endif 692#endif
701 693
702 udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */ 694 udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
703 695
704 for (i = 0; i < 3; i++) 696 for (i = 0; i < 3; i++)
705 outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4); 697 ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
706 698
707 ep->tx_threshold = TX_FIFO_THRESH; 699 ep->tx_threshold = TX_FIFO_THRESH;
708 outl(ep->tx_threshold, ioaddr + TxThresh); 700 ew32(TxThresh, ep->tx_threshold);
709 701
710 if (media2miictl[dev->if_port & 15]) { 702 if (media2miictl[dev->if_port & 15]) {
711 if (ep->mii_phy_cnt) 703 if (ep->mii_phy_cnt)
@@ -731,26 +723,27 @@ static int epic_open(struct net_device *dev)
731 } 723 }
732 } 724 }
733 725
734 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl); 726 ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
735 outl(ep->rx_ring_dma, ioaddr + PRxCDAR); 727 ew32(PRxCDAR, ep->rx_ring_dma);
736 outl(ep->tx_ring_dma, ioaddr + PTxCDAR); 728 ew32(PTxCDAR, ep->tx_ring_dma);
737 729
738 /* Start the chip's Rx process. */ 730 /* Start the chip's Rx process. */
739 set_rx_mode(dev); 731 set_rx_mode(dev);
740 outl(StartRx | RxQueued, ioaddr + COMMAND); 732 ew32(COMMAND, StartRx | RxQueued);
741 733
742 netif_start_queue(dev); 734 netif_start_queue(dev);
743 735
744 /* Enable interrupts by setting the interrupt mask. */ 736 /* Enable interrupts by setting the interrupt mask. */
745 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) 737 ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
746 | CntFull | TxUnderrun 738 ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
747 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK); 739 TxUnderrun);
748 740
749 if (debug > 1) 741 if (debug > 1) {
750 printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x " 742 printk(KERN_DEBUG "%s: epic_open() ioaddr %p IRQ %d "
751 "%s-duplex.\n", 743 "status %4.4x %s-duplex.\n",
752 dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL), 744 dev->name, ioaddr, irq, er32(GENCTL),
753 ep->mii.full_duplex ? "full" : "half"); 745 ep->mii.full_duplex ? "full" : "half");
746 }
754 747
755 /* Set the timer to switch to check for link beat and perhaps switch 748 /* Set the timer to switch to check for link beat and perhaps switch
756 to an alternate media type. */ 749 to an alternate media type. */
@@ -760,27 +753,29 @@ static int epic_open(struct net_device *dev)
760 ep->timer.function = epic_timer; /* timer handler */ 753 ep->timer.function = epic_timer; /* timer handler */
761 add_timer(&ep->timer); 754 add_timer(&ep->timer);
762 755
763 return 0; 756 return rc;
764} 757}
765 758
766/* Reset the chip to recover from a PCI transaction error. 759/* Reset the chip to recover from a PCI transaction error.
767 This may occur at interrupt time. */ 760 This may occur at interrupt time. */
768static void epic_pause(struct net_device *dev) 761static void epic_pause(struct net_device *dev)
769{ 762{
770 long ioaddr = dev->base_addr; 763 struct net_device_stats *stats = &dev->stats;
764 struct epic_private *ep = netdev_priv(dev);
765 void __iomem *ioaddr = ep->ioaddr;
771 766
772 netif_stop_queue (dev); 767 netif_stop_queue (dev);
773 768
774 /* Disable interrupts by clearing the interrupt mask. */ 769 /* Disable interrupts by clearing the interrupt mask. */
775 outl(0x00000000, ioaddr + INTMASK); 770 ew32(INTMASK, 0x00000000);
776 /* Stop the chip's Tx and Rx DMA processes. */ 771 /* Stop the chip's Tx and Rx DMA processes. */
777 outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND); 772 ew16(COMMAND, StopRx | StopTxDMA | StopRxDMA);
778 773
779 /* Update the error counts. */ 774 /* Update the error counts. */
780 if (inw(ioaddr + COMMAND) != 0xffff) { 775 if (er16(COMMAND) != 0xffff) {
781 dev->stats.rx_missed_errors += inb(ioaddr + MPCNT); 776 stats->rx_missed_errors += er8(MPCNT);
782 dev->stats.rx_frame_errors += inb(ioaddr + ALICNT); 777 stats->rx_frame_errors += er8(ALICNT);
783 dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT); 778 stats->rx_crc_errors += er8(CRCCNT);
784 } 779 }
785 780
786 /* Remove the packets on the Rx queue. */ 781 /* Remove the packets on the Rx queue. */
@@ -789,12 +784,12 @@ static void epic_pause(struct net_device *dev)
789 784
790static void epic_restart(struct net_device *dev) 785static void epic_restart(struct net_device *dev)
791{ 786{
792 long ioaddr = dev->base_addr;
793 struct epic_private *ep = netdev_priv(dev); 787 struct epic_private *ep = netdev_priv(dev);
788 void __iomem *ioaddr = ep->ioaddr;
794 int i; 789 int i;
795 790
796 /* Soft reset the chip. */ 791 /* Soft reset the chip. */
797 outl(0x4001, ioaddr + GENCTL); 792 ew32(GENCTL, 0x4001);
798 793
799 printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n", 794 printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
800 dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx); 795 dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
@@ -802,47 +797,46 @@ static void epic_restart(struct net_device *dev)
802 797
803 /* This magic is documented in SMSC app note 7.15 */ 798 /* This magic is documented in SMSC app note 7.15 */
804 for (i = 16; i > 0; i--) 799 for (i = 16; i > 0; i--)
805 outl(0x0008, ioaddr + TEST1); 800 ew32(TEST1, 0x0008);
806 801
807#ifdef __BIG_ENDIAN 802#ifdef __BIG_ENDIAN
808 outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 803 ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
809#else 804#else
810 outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL); 805 ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
811#endif 806#endif
812 outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg); 807 ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
813 if (ep->chip_flags & MII_PWRDWN) 808 if (ep->chip_flags & MII_PWRDWN)
814 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); 809 ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
815 810
816 for (i = 0; i < 3; i++) 811 for (i = 0; i < 3; i++)
817 outl(le16_to_cpu(((__le16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4); 812 ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
818 813
819 ep->tx_threshold = TX_FIFO_THRESH; 814 ep->tx_threshold = TX_FIFO_THRESH;
820 outl(ep->tx_threshold, ioaddr + TxThresh); 815 ew32(TxThresh, ep->tx_threshold);
821 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl); 816 ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
822 outl(ep->rx_ring_dma + (ep->cur_rx%RX_RING_SIZE)* 817 ew32(PRxCDAR, ep->rx_ring_dma +
823 sizeof(struct epic_rx_desc), ioaddr + PRxCDAR); 818 (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc));
824 outl(ep->tx_ring_dma + (ep->dirty_tx%TX_RING_SIZE)* 819 ew32(PTxCDAR, ep->tx_ring_dma +
825 sizeof(struct epic_tx_desc), ioaddr + PTxCDAR); 820 (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc));
826 821
827 /* Start the chip's Rx process. */ 822 /* Start the chip's Rx process. */
828 set_rx_mode(dev); 823 set_rx_mode(dev);
829 outl(StartRx | RxQueued, ioaddr + COMMAND); 824 ew32(COMMAND, StartRx | RxQueued);
830 825
831 /* Enable interrupts by setting the interrupt mask. */ 826 /* Enable interrupts by setting the interrupt mask. */
832 outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170) 827 ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
833 | CntFull | TxUnderrun 828 ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
834 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK); 829 TxUnderrun);
835 830
836 printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x" 831 printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
837 " interrupt %4.4x.\n", 832 " interrupt %4.4x.\n",
838 dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL), 833 dev->name, er32(COMMAND), er32(GENCTL), er32(INTSTAT));
839 (int)inl(ioaddr + INTSTAT));
840} 834}
841 835
842static void check_media(struct net_device *dev) 836static void check_media(struct net_device *dev)
843{ 837{
844 struct epic_private *ep = netdev_priv(dev); 838 struct epic_private *ep = netdev_priv(dev);
845 long ioaddr = dev->base_addr; 839 void __iomem *ioaddr = ep->ioaddr;
846 int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0; 840 int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
847 int negotiated = mii_lpa & ep->mii.advertising; 841 int negotiated = mii_lpa & ep->mii.advertising;
848 int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040; 842 int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
@@ -856,7 +850,7 @@ static void check_media(struct net_device *dev)
856 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link" 850 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
857 " partner capability of %4.4x.\n", dev->name, 851 " partner capability of %4.4x.\n", dev->name,
858 ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa); 852 ep->mii.full_duplex ? "full" : "half", ep->phys[0], mii_lpa);
859 outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl); 853 ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79);
860 } 854 }
861} 855}
862 856
@@ -864,16 +858,15 @@ static void epic_timer(unsigned long data)
864{ 858{
865 struct net_device *dev = (struct net_device *)data; 859 struct net_device *dev = (struct net_device *)data;
866 struct epic_private *ep = netdev_priv(dev); 860 struct epic_private *ep = netdev_priv(dev);
867 long ioaddr = dev->base_addr; 861 void __iomem *ioaddr = ep->ioaddr;
868 int next_tick = 5*HZ; 862 int next_tick = 5*HZ;
869 863
870 if (debug > 3) { 864 if (debug > 3) {
871 printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n", 865 printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
872 dev->name, (int)inl(ioaddr + TxSTAT)); 866 dev->name, er32(TxSTAT));
873 printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x " 867 printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
874 "IntStatus %4.4x RxStatus %4.4x.\n", 868 "IntStatus %4.4x RxStatus %4.4x.\n", dev->name,
875 dev->name, (int)inl(ioaddr + INTMASK), 869 er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
876 (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
877 } 870 }
878 871
879 check_media(dev); 872 check_media(dev);
@@ -885,23 +878,22 @@ static void epic_timer(unsigned long data)
885static void epic_tx_timeout(struct net_device *dev) 878static void epic_tx_timeout(struct net_device *dev)
886{ 879{
887 struct epic_private *ep = netdev_priv(dev); 880 struct epic_private *ep = netdev_priv(dev);
888 long ioaddr = dev->base_addr; 881 void __iomem *ioaddr = ep->ioaddr;
889 882
890 if (debug > 0) { 883 if (debug > 0) {
891 printk(KERN_WARNING "%s: Transmit timeout using MII device, " 884 printk(KERN_WARNING "%s: Transmit timeout using MII device, "
892 "Tx status %4.4x.\n", 885 "Tx status %4.4x.\n", dev->name, er16(TxSTAT));
893 dev->name, (int)inw(ioaddr + TxSTAT));
894 if (debug > 1) { 886 if (debug > 1) {
895 printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n", 887 printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
896 dev->name, ep->dirty_tx, ep->cur_tx); 888 dev->name, ep->dirty_tx, ep->cur_tx);
897 } 889 }
898 } 890 }
899 if (inw(ioaddr + TxSTAT) & 0x10) { /* Tx FIFO underflow. */ 891 if (er16(TxSTAT) & 0x10) { /* Tx FIFO underflow. */
900 dev->stats.tx_fifo_errors++; 892 dev->stats.tx_fifo_errors++;
901 outl(RestartTx, ioaddr + COMMAND); 893 ew32(COMMAND, RestartTx);
902 } else { 894 } else {
903 epic_restart(dev); 895 epic_restart(dev);
904 outl(TxQueued, dev->base_addr + COMMAND); 896 ew32(COMMAND, TxQueued);
905 } 897 }
906 898
907 dev->trans_start = jiffies; /* prevent tx timeout */ 899 dev->trans_start = jiffies; /* prevent tx timeout */
@@ -959,6 +951,7 @@ static void epic_init_ring(struct net_device *dev)
959static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev) 951static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
960{ 952{
961 struct epic_private *ep = netdev_priv(dev); 953 struct epic_private *ep = netdev_priv(dev);
954 void __iomem *ioaddr = ep->ioaddr;
962 int entry, free_count; 955 int entry, free_count;
963 u32 ctrl_word; 956 u32 ctrl_word;
964 unsigned long flags; 957 unsigned long flags;
@@ -999,13 +992,12 @@ static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
999 992
1000 spin_unlock_irqrestore(&ep->lock, flags); 993 spin_unlock_irqrestore(&ep->lock, flags);
1001 /* Trigger an immediate transmit demand. */ 994 /* Trigger an immediate transmit demand. */
1002 outl(TxQueued, dev->base_addr + COMMAND); 995 ew32(COMMAND, TxQueued);
1003 996
1004 if (debug > 4) 997 if (debug > 4)
1005 printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, " 998 printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
1006 "flag %2.2x Tx status %8.8x.\n", 999 "flag %2.2x Tx status %8.8x.\n", dev->name, skb->len,
1007 dev->name, (int)skb->len, entry, ctrl_word, 1000 entry, ctrl_word, er32(TxSTAT));
1008 (int)inl(dev->base_addr + TxSTAT));
1009 1001
1010 return NETDEV_TX_OK; 1002 return NETDEV_TX_OK;
1011} 1003}
@@ -1086,18 +1078,17 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1086{ 1078{
1087 struct net_device *dev = dev_instance; 1079 struct net_device *dev = dev_instance;
1088 struct epic_private *ep = netdev_priv(dev); 1080 struct epic_private *ep = netdev_priv(dev);
1089 long ioaddr = dev->base_addr; 1081 void __iomem *ioaddr = ep->ioaddr;
1090 unsigned int handled = 0; 1082 unsigned int handled = 0;
1091 int status; 1083 int status;
1092 1084
1093 status = inl(ioaddr + INTSTAT); 1085 status = er32(INTSTAT);
1094 /* Acknowledge all of the current interrupt sources ASAP. */ 1086 /* Acknowledge all of the current interrupt sources ASAP. */
1095 outl(status & EpicNormalEvent, ioaddr + INTSTAT); 1087 ew32(INTSTAT, status & EpicNormalEvent);
1096 1088
1097 if (debug > 4) { 1089 if (debug > 4) {
1098 printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new " 1090 printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
1099 "intstat=%#8.8x.\n", dev->name, status, 1091 "intstat=%#8.8x.\n", dev->name, status, er32(INTSTAT));
1100 (int)inl(ioaddr + INTSTAT));
1101 } 1092 }
1102 1093
1103 if ((status & IntrSummary) == 0) 1094 if ((status & IntrSummary) == 0)
@@ -1118,19 +1109,21 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1118 1109
1119 /* Check uncommon events all at once. */ 1110 /* Check uncommon events all at once. */
1120 if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) { 1111 if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1112 struct net_device_stats *stats = &dev->stats;
1113
1121 if (status == EpicRemoved) 1114 if (status == EpicRemoved)
1122 goto out; 1115 goto out;
1123 1116
1124 /* Always update the error counts to avoid overhead later. */ 1117 /* Always update the error counts to avoid overhead later. */
1125 dev->stats.rx_missed_errors += inb(ioaddr + MPCNT); 1118 stats->rx_missed_errors += er8(MPCNT);
1126 dev->stats.rx_frame_errors += inb(ioaddr + ALICNT); 1119 stats->rx_frame_errors += er8(ALICNT);
1127 dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT); 1120 stats->rx_crc_errors += er8(CRCCNT);
1128 1121
1129 if (status & TxUnderrun) { /* Tx FIFO underflow. */ 1122 if (status & TxUnderrun) { /* Tx FIFO underflow. */
1130 dev->stats.tx_fifo_errors++; 1123 stats->tx_fifo_errors++;
1131 outl(ep->tx_threshold += 128, ioaddr + TxThresh); 1124 ew32(TxThresh, ep->tx_threshold += 128);
1132 /* Restart the transmit process. */ 1125 /* Restart the transmit process. */
1133 outl(RestartTx, ioaddr + COMMAND); 1126 ew32(COMMAND, RestartTx);
1134 } 1127 }
1135 if (status & PCIBusErr170) { 1128 if (status & PCIBusErr170) {
1136 printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n", 1129 printk(KERN_ERR "%s: PCI Bus Error! status %4.4x.\n",
@@ -1139,7 +1132,7 @@ static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1139 epic_restart(dev); 1132 epic_restart(dev);
1140 } 1133 }
1141 /* Clear all error sources. */ 1134 /* Clear all error sources. */
1142 outl(status & 0x7f18, ioaddr + INTSTAT); 1135 ew32(INTSTAT, status & 0x7f18);
1143 } 1136 }
1144 1137
1145out: 1138out:
@@ -1248,17 +1241,17 @@ static int epic_rx(struct net_device *dev, int budget)
1248 1241
1249static void epic_rx_err(struct net_device *dev, struct epic_private *ep) 1242static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1250{ 1243{
1251 long ioaddr = dev->base_addr; 1244 void __iomem *ioaddr = ep->ioaddr;
1252 int status; 1245 int status;
1253 1246
1254 status = inl(ioaddr + INTSTAT); 1247 status = er32(INTSTAT);
1255 1248
1256 if (status == EpicRemoved) 1249 if (status == EpicRemoved)
1257 return; 1250 return;
1258 if (status & RxOverflow) /* Missed a Rx frame. */ 1251 if (status & RxOverflow) /* Missed a Rx frame. */
1259 dev->stats.rx_errors++; 1252 dev->stats.rx_errors++;
1260 if (status & (RxOverflow | RxFull)) 1253 if (status & (RxOverflow | RxFull))
1261 outw(RxQueued, ioaddr + COMMAND); 1254 ew16(COMMAND, RxQueued);
1262} 1255}
1263 1256
1264static int epic_poll(struct napi_struct *napi, int budget) 1257static int epic_poll(struct napi_struct *napi, int budget)
@@ -1266,7 +1259,7 @@ static int epic_poll(struct napi_struct *napi, int budget)
1266 struct epic_private *ep = container_of(napi, struct epic_private, napi); 1259 struct epic_private *ep = container_of(napi, struct epic_private, napi);
1267 struct net_device *dev = ep->mii.dev; 1260 struct net_device *dev = ep->mii.dev;
1268 int work_done = 0; 1261 int work_done = 0;
1269 long ioaddr = dev->base_addr; 1262 void __iomem *ioaddr = ep->ioaddr;
1270 1263
1271rx_action: 1264rx_action:
1272 1265
@@ -1287,7 +1280,7 @@ rx_action:
1287 more = ep->reschedule_in_poll; 1280 more = ep->reschedule_in_poll;
1288 if (!more) { 1281 if (!more) {
1289 __napi_complete(napi); 1282 __napi_complete(napi);
1290 outl(EpicNapiEvent, ioaddr + INTSTAT); 1283 ew32(INTSTAT, EpicNapiEvent);
1291 epic_napi_irq_on(dev, ep); 1284 epic_napi_irq_on(dev, ep);
1292 } else 1285 } else
1293 ep->reschedule_in_poll--; 1286 ep->reschedule_in_poll--;
@@ -1303,8 +1296,9 @@ rx_action:
1303 1296
1304static int epic_close(struct net_device *dev) 1297static int epic_close(struct net_device *dev)
1305{ 1298{
1306 long ioaddr = dev->base_addr;
1307 struct epic_private *ep = netdev_priv(dev); 1299 struct epic_private *ep = netdev_priv(dev);
1300 struct pci_dev *pdev = ep->pci_dev;
1301 void __iomem *ioaddr = ep->ioaddr;
1308 struct sk_buff *skb; 1302 struct sk_buff *skb;
1309 int i; 1303 int i;
1310 1304
@@ -1313,13 +1307,13 @@ static int epic_close(struct net_device *dev)
1313 1307
1314 if (debug > 1) 1308 if (debug > 1)
1315 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n", 1309 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1316 dev->name, (int)inl(ioaddr + INTSTAT)); 1310 dev->name, er32(INTSTAT));
1317 1311
1318 del_timer_sync(&ep->timer); 1312 del_timer_sync(&ep->timer);
1319 1313
1320 epic_disable_int(dev, ep); 1314 epic_disable_int(dev, ep);
1321 1315
1322 free_irq(dev->irq, dev); 1316 free_irq(pdev->irq, dev);
1323 1317
1324 epic_pause(dev); 1318 epic_pause(dev);
1325 1319
@@ -1330,7 +1324,7 @@ static int epic_close(struct net_device *dev)
1330 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */ 1324 ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
1331 ep->rx_ring[i].buflength = 0; 1325 ep->rx_ring[i].buflength = 0;
1332 if (skb) { 1326 if (skb) {
1333 pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr, 1327 pci_unmap_single(pdev, ep->rx_ring[i].bufaddr,
1334 ep->rx_buf_sz, PCI_DMA_FROMDEVICE); 1328 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1335 dev_kfree_skb(skb); 1329 dev_kfree_skb(skb);
1336 } 1330 }
@@ -1341,26 +1335,28 @@ static int epic_close(struct net_device *dev)
1341 ep->tx_skbuff[i] = NULL; 1335 ep->tx_skbuff[i] = NULL;
1342 if (!skb) 1336 if (!skb)
1343 continue; 1337 continue;
1344 pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr, 1338 pci_unmap_single(pdev, ep->tx_ring[i].bufaddr, skb->len,
1345 skb->len, PCI_DMA_TODEVICE); 1339 PCI_DMA_TODEVICE);
1346 dev_kfree_skb(skb); 1340 dev_kfree_skb(skb);
1347 } 1341 }
1348 1342
1349 /* Green! Leave the chip in low-power mode. */ 1343 /* Green! Leave the chip in low-power mode. */
1350 outl(0x0008, ioaddr + GENCTL); 1344 ew32(GENCTL, 0x0008);
1351 1345
1352 return 0; 1346 return 0;
1353} 1347}
1354 1348
1355static struct net_device_stats *epic_get_stats(struct net_device *dev) 1349static struct net_device_stats *epic_get_stats(struct net_device *dev)
1356{ 1350{
1357 long ioaddr = dev->base_addr; 1351 struct epic_private *ep = netdev_priv(dev);
1352 void __iomem *ioaddr = ep->ioaddr;
1358 1353
1359 if (netif_running(dev)) { 1354 if (netif_running(dev)) {
1360 /* Update the error counts. */ 1355 struct net_device_stats *stats = &dev->stats;
1361 dev->stats.rx_missed_errors += inb(ioaddr + MPCNT); 1356
1362 dev->stats.rx_frame_errors += inb(ioaddr + ALICNT); 1357 stats->rx_missed_errors += er8(MPCNT);
1363 dev->stats.rx_crc_errors += inb(ioaddr + CRCCNT); 1358 stats->rx_frame_errors += er8(ALICNT);
1359 stats->rx_crc_errors += er8(CRCCNT);
1364 } 1360 }
1365 1361
1366 return &dev->stats; 1362 return &dev->stats;
@@ -1373,13 +1369,13 @@ static struct net_device_stats *epic_get_stats(struct net_device *dev)
1373 1369
1374static void set_rx_mode(struct net_device *dev) 1370static void set_rx_mode(struct net_device *dev)
1375{ 1371{
1376 long ioaddr = dev->base_addr;
1377 struct epic_private *ep = netdev_priv(dev); 1372 struct epic_private *ep = netdev_priv(dev);
1373 void __iomem *ioaddr = ep->ioaddr;
1378 unsigned char mc_filter[8]; /* Multicast hash filter */ 1374 unsigned char mc_filter[8]; /* Multicast hash filter */
1379 int i; 1375 int i;
1380 1376
1381 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1377 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1382 outl(0x002C, ioaddr + RxCtrl); 1378 ew32(RxCtrl, 0x002c);
1383 /* Unconditionally log net taps. */ 1379 /* Unconditionally log net taps. */
1384 memset(mc_filter, 0xff, sizeof(mc_filter)); 1380 memset(mc_filter, 0xff, sizeof(mc_filter));
1385 } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) { 1381 } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
@@ -1387,9 +1383,9 @@ static void set_rx_mode(struct net_device *dev)
1387 is never enabled. */ 1383 is never enabled. */
1388 /* Too many to filter perfectly -- accept all multicasts. */ 1384 /* Too many to filter perfectly -- accept all multicasts. */
1389 memset(mc_filter, 0xff, sizeof(mc_filter)); 1385 memset(mc_filter, 0xff, sizeof(mc_filter));
1390 outl(0x000C, ioaddr + RxCtrl); 1386 ew32(RxCtrl, 0x000c);
1391 } else if (netdev_mc_empty(dev)) { 1387 } else if (netdev_mc_empty(dev)) {
1392 outl(0x0004, ioaddr + RxCtrl); 1388 ew32(RxCtrl, 0x0004);
1393 return; 1389 return;
1394 } else { /* Never executed, for now. */ 1390 } else { /* Never executed, for now. */
1395 struct netdev_hw_addr *ha; 1391 struct netdev_hw_addr *ha;
@@ -1404,7 +1400,7 @@ static void set_rx_mode(struct net_device *dev)
1404 /* ToDo: perhaps we need to stop the Tx and Rx process here? */ 1400 /* ToDo: perhaps we need to stop the Tx and Rx process here? */
1405 if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) { 1401 if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1406 for (i = 0; i < 4; i++) 1402 for (i = 0; i < 4; i++)
1407 outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4); 1403 ew16(MC0 + i*4, ((u16 *)mc_filter)[i]);
1408 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter)); 1404 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1409 } 1405 }
1410} 1406}
@@ -1466,22 +1462,26 @@ static void netdev_set_msglevel(struct net_device *dev, u32 value)
1466 1462
1467static int ethtool_begin(struct net_device *dev) 1463static int ethtool_begin(struct net_device *dev)
1468{ 1464{
1469 unsigned long ioaddr = dev->base_addr; 1465 struct epic_private *ep = netdev_priv(dev);
1466 void __iomem *ioaddr = ep->ioaddr;
1467
1470 /* power-up, if interface is down */ 1468 /* power-up, if interface is down */
1471 if (! netif_running(dev)) { 1469 if (!netif_running(dev)) {
1472 outl(0x0200, ioaddr + GENCTL); 1470 ew32(GENCTL, 0x0200);
1473 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); 1471 ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1474 } 1472 }
1475 return 0; 1473 return 0;
1476} 1474}
1477 1475
1478static void ethtool_complete(struct net_device *dev) 1476static void ethtool_complete(struct net_device *dev)
1479{ 1477{
1480 unsigned long ioaddr = dev->base_addr; 1478 struct epic_private *ep = netdev_priv(dev);
1479 void __iomem *ioaddr = ep->ioaddr;
1480
1481 /* power-down, if interface is down */ 1481 /* power-down, if interface is down */
1482 if (! netif_running(dev)) { 1482 if (!netif_running(dev)) {
1483 outl(0x0008, ioaddr + GENCTL); 1483 ew32(GENCTL, 0x0008);
1484 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL); 1484 ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1485 } 1485 }
1486} 1486}
1487 1487
@@ -1500,14 +1500,14 @@ static const struct ethtool_ops netdev_ethtool_ops = {
1500static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1500static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1501{ 1501{
1502 struct epic_private *np = netdev_priv(dev); 1502 struct epic_private *np = netdev_priv(dev);
1503 long ioaddr = dev->base_addr; 1503 void __iomem *ioaddr = np->ioaddr;
1504 struct mii_ioctl_data *data = if_mii(rq); 1504 struct mii_ioctl_data *data = if_mii(rq);
1505 int rc; 1505 int rc;
1506 1506
1507 /* power-up, if interface is down */ 1507 /* power-up, if interface is down */
1508 if (! netif_running(dev)) { 1508 if (! netif_running(dev)) {
1509 outl(0x0200, ioaddr + GENCTL); 1509 ew32(GENCTL, 0x0200);
1510 outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL); 1510 ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1511 } 1511 }
1512 1512
1513 /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */ 1513 /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
@@ -1517,14 +1517,14 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1517 1517
1518 /* power-down, if interface is down */ 1518 /* power-down, if interface is down */
1519 if (! netif_running(dev)) { 1519 if (! netif_running(dev)) {
1520 outl(0x0008, ioaddr + GENCTL); 1520 ew32(GENCTL, 0x0008);
1521 outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL); 1521 ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1522 } 1522 }
1523 return rc; 1523 return rc;
1524} 1524}
1525 1525
1526 1526
1527static void __devexit epic_remove_one (struct pci_dev *pdev) 1527static void __devexit epic_remove_one(struct pci_dev *pdev)
1528{ 1528{
1529 struct net_device *dev = pci_get_drvdata(pdev); 1529 struct net_device *dev = pci_get_drvdata(pdev);
1530 struct epic_private *ep = netdev_priv(dev); 1530 struct epic_private *ep = netdev_priv(dev);
@@ -1532,9 +1532,7 @@ static void __devexit epic_remove_one (struct pci_dev *pdev)
1532 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma); 1532 pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
1533 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma); 1533 pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
1534 unregister_netdev(dev); 1534 unregister_netdev(dev);
1535#ifndef USE_IO_OPS 1535 pci_iounmap(pdev, ep->ioaddr);
1536 iounmap((void*) dev->base_addr);
1537#endif
1538 pci_release_regions(pdev); 1536 pci_release_regions(pdev);
1539 free_netdev(dev); 1537 free_netdev(dev);
1540 pci_disable_device(pdev); 1538 pci_disable_device(pdev);
@@ -1548,13 +1546,14 @@ static void __devexit epic_remove_one (struct pci_dev *pdev)
1548static int epic_suspend (struct pci_dev *pdev, pm_message_t state) 1546static int epic_suspend (struct pci_dev *pdev, pm_message_t state)
1549{ 1547{
1550 struct net_device *dev = pci_get_drvdata(pdev); 1548 struct net_device *dev = pci_get_drvdata(pdev);
1551 long ioaddr = dev->base_addr; 1549 struct epic_private *ep = netdev_priv(dev);
1550 void __iomem *ioaddr = ep->ioaddr;
1552 1551
1553 if (!netif_running(dev)) 1552 if (!netif_running(dev))
1554 return 0; 1553 return 0;
1555 epic_pause(dev); 1554 epic_pause(dev);
1556 /* Put the chip into low-power mode. */ 1555 /* Put the chip into low-power mode. */
1557 outl(0x0008, ioaddr + GENCTL); 1556 ew32(GENCTL, 0x0008);
1558 /* pci_power_off(pdev, -1); */ 1557 /* pci_power_off(pdev, -1); */
1559 return 0; 1558 return 0;
1560} 1559}
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index cd3defb11ffb..dab9c6f671ec 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2066,6 +2066,7 @@ static const struct ethtool_ops smsc911x_ethtool_ops = {
2066 .get_eeprom_len = smsc911x_ethtool_get_eeprom_len, 2066 .get_eeprom_len = smsc911x_ethtool_get_eeprom_len,
2067 .get_eeprom = smsc911x_ethtool_get_eeprom, 2067 .get_eeprom = smsc911x_ethtool_get_eeprom,
2068 .set_eeprom = smsc911x_ethtool_set_eeprom, 2068 .set_eeprom = smsc911x_ethtool_set_eeprom,
2069 .get_ts_info = ethtool_op_get_ts_info,
2069}; 2070};
2070 2071
2071static const struct net_device_ops smsc911x_netdev_ops = { 2072static const struct net_device_ops smsc911x_netdev_ops = {
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 38386478532b..fd33b21f6c96 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -54,7 +54,7 @@ struct smsc9420_ring_info {
54}; 54};
55 55
56struct smsc9420_pdata { 56struct smsc9420_pdata {
57 void __iomem *base_addr; 57 void __iomem *ioaddr;
58 struct pci_dev *pdev; 58 struct pci_dev *pdev;
59 struct net_device *dev; 59 struct net_device *dev;
60 60
@@ -114,13 +114,13 @@ do { if ((pd)->msg_enable & NETIF_MSG_##TYPE) \
114 114
115static inline u32 smsc9420_reg_read(struct smsc9420_pdata *pd, u32 offset) 115static inline u32 smsc9420_reg_read(struct smsc9420_pdata *pd, u32 offset)
116{ 116{
117 return ioread32(pd->base_addr + offset); 117 return ioread32(pd->ioaddr + offset);
118} 118}
119 119
120static inline void 120static inline void
121smsc9420_reg_write(struct smsc9420_pdata *pd, u32 offset, u32 value) 121smsc9420_reg_write(struct smsc9420_pdata *pd, u32 offset, u32 value)
122{ 122{
123 iowrite32(value, pd->base_addr + offset); 123 iowrite32(value, pd->ioaddr + offset);
124} 124}
125 125
126static inline void smsc9420_pci_flush_write(struct smsc9420_pdata *pd) 126static inline void smsc9420_pci_flush_write(struct smsc9420_pdata *pd)
@@ -469,6 +469,7 @@ static const struct ethtool_ops smsc9420_ethtool_ops = {
469 .set_eeprom = smsc9420_ethtool_set_eeprom, 469 .set_eeprom = smsc9420_ethtool_set_eeprom,
470 .get_regs_len = smsc9420_ethtool_getregslen, 470 .get_regs_len = smsc9420_ethtool_getregslen,
471 .get_regs = smsc9420_ethtool_getregs, 471 .get_regs = smsc9420_ethtool_getregs,
472 .get_ts_info = ethtool_op_get_ts_info,
472}; 473};
473 474
474/* Sets the device MAC address to dev_addr */ 475/* Sets the device MAC address to dev_addr */
@@ -659,7 +660,7 @@ static irqreturn_t smsc9420_isr(int irq, void *dev_id)
659 ulong flags; 660 ulong flags;
660 661
661 BUG_ON(!pd); 662 BUG_ON(!pd);
662 BUG_ON(!pd->base_addr); 663 BUG_ON(!pd->ioaddr);
663 664
664 int_cfg = smsc9420_reg_read(pd, INT_CFG); 665 int_cfg = smsc9420_reg_read(pd, INT_CFG);
665 666
@@ -720,9 +721,12 @@ static irqreturn_t smsc9420_isr(int irq, void *dev_id)
720#ifdef CONFIG_NET_POLL_CONTROLLER 721#ifdef CONFIG_NET_POLL_CONTROLLER
721static void smsc9420_poll_controller(struct net_device *dev) 722static void smsc9420_poll_controller(struct net_device *dev)
722{ 723{
723 disable_irq(dev->irq); 724 struct smsc9420_pdata *pd = netdev_priv(dev);
725 const int irq = pd->pdev->irq;
726
727 disable_irq(irq);
724 smsc9420_isr(0, dev); 728 smsc9420_isr(0, dev);
725 enable_irq(dev->irq); 729 enable_irq(irq);
726} 730}
727#endif /* CONFIG_NET_POLL_CONTROLLER */ 731#endif /* CONFIG_NET_POLL_CONTROLLER */
728 732
@@ -759,7 +763,7 @@ static int smsc9420_stop(struct net_device *dev)
759 smsc9420_stop_rx(pd); 763 smsc9420_stop_rx(pd);
760 smsc9420_free_rx_ring(pd); 764 smsc9420_free_rx_ring(pd);
761 765
762 free_irq(dev->irq, pd); 766 free_irq(pd->pdev->irq, pd);
763 767
764 smsc9420_dmac_soft_reset(pd); 768 smsc9420_dmac_soft_reset(pd);
765 769
@@ -1331,15 +1335,12 @@ out:
1331 1335
1332static int smsc9420_open(struct net_device *dev) 1336static int smsc9420_open(struct net_device *dev)
1333{ 1337{
1334 struct smsc9420_pdata *pd; 1338 struct smsc9420_pdata *pd = netdev_priv(dev);
1335 u32 bus_mode, mac_cr, dmac_control, int_cfg, dma_intr_ena, int_ctl; 1339 u32 bus_mode, mac_cr, dmac_control, int_cfg, dma_intr_ena, int_ctl;
1340 const int irq = pd->pdev->irq;
1336 unsigned long flags; 1341 unsigned long flags;
1337 int result = 0, timeout; 1342 int result = 0, timeout;
1338 1343
1339 BUG_ON(!dev);
1340 pd = netdev_priv(dev);
1341 BUG_ON(!pd);
1342
1343 if (!is_valid_ether_addr(dev->dev_addr)) { 1344 if (!is_valid_ether_addr(dev->dev_addr)) {
1344 smsc_warn(IFUP, "dev_addr is not a valid MAC address"); 1345 smsc_warn(IFUP, "dev_addr is not a valid MAC address");
1345 result = -EADDRNOTAVAIL; 1346 result = -EADDRNOTAVAIL;
@@ -1358,9 +1359,10 @@ static int smsc9420_open(struct net_device *dev)
1358 smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF); 1359 smsc9420_reg_write(pd, INT_STAT, 0xFFFFFFFF);
1359 smsc9420_pci_flush_write(pd); 1360 smsc9420_pci_flush_write(pd);
1360 1361
1361 if (request_irq(dev->irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED, 1362 result = request_irq(irq, smsc9420_isr, IRQF_SHARED | IRQF_DISABLED,
1362 DRV_NAME, pd)) { 1363 DRV_NAME, pd);
1363 smsc_warn(IFUP, "Unable to use IRQ = %d", dev->irq); 1364 if (result) {
1365 smsc_warn(IFUP, "Unable to use IRQ = %d", irq);
1364 result = -ENODEV; 1366 result = -ENODEV;
1365 goto out_0; 1367 goto out_0;
1366 } 1368 }
@@ -1395,7 +1397,7 @@ static int smsc9420_open(struct net_device *dev)
1395 smsc9420_pci_flush_write(pd); 1397 smsc9420_pci_flush_write(pd);
1396 1398
1397 /* test the IRQ connection to the ISR */ 1399 /* test the IRQ connection to the ISR */
1398 smsc_dbg(IFUP, "Testing ISR using IRQ %d", dev->irq); 1400 smsc_dbg(IFUP, "Testing ISR using IRQ %d", irq);
1399 pd->software_irq_signal = false; 1401 pd->software_irq_signal = false;
1400 1402
1401 spin_lock_irqsave(&pd->int_lock, flags); 1403 spin_lock_irqsave(&pd->int_lock, flags);
@@ -1430,7 +1432,7 @@ static int smsc9420_open(struct net_device *dev)
1430 goto out_free_irq_1; 1432 goto out_free_irq_1;
1431 } 1433 }
1432 1434
1433 smsc_dbg(IFUP, "ISR passed test using IRQ %d", dev->irq); 1435 smsc_dbg(IFUP, "ISR passed test using IRQ %d", irq);
1434 1436
1435 result = smsc9420_alloc_tx_ring(pd); 1437 result = smsc9420_alloc_tx_ring(pd);
1436 if (result) { 1438 if (result) {
@@ -1490,7 +1492,7 @@ out_free_rx_ring_3:
1490out_free_tx_ring_2: 1492out_free_tx_ring_2:
1491 smsc9420_free_tx_ring(pd); 1493 smsc9420_free_tx_ring(pd);
1492out_free_irq_1: 1494out_free_irq_1:
1493 free_irq(dev->irq, pd); 1495 free_irq(irq, pd);
1494out_0: 1496out_0:
1495 return result; 1497 return result;
1496} 1498}
@@ -1519,7 +1521,7 @@ static int smsc9420_suspend(struct pci_dev *pdev, pm_message_t state)
1519 smsc9420_stop_rx(pd); 1521 smsc9420_stop_rx(pd);
1520 smsc9420_free_rx_ring(pd); 1522 smsc9420_free_rx_ring(pd);
1521 1523
1522 free_irq(dev->irq, pd); 1524 free_irq(pd->pdev->irq, pd);
1523 1525
1524 netif_device_detach(dev); 1526 netif_device_detach(dev);
1525 } 1527 }
@@ -1552,6 +1554,7 @@ static int smsc9420_resume(struct pci_dev *pdev)
1552 smsc_warn(IFUP, "pci_enable_wake failed: %d", err); 1554 smsc_warn(IFUP, "pci_enable_wake failed: %d", err);
1553 1555
1554 if (netif_running(dev)) { 1556 if (netif_running(dev)) {
1557 /* FIXME: gross. It looks like ancient PM relic.*/
1555 err = smsc9420_open(dev); 1558 err = smsc9420_open(dev);
1556 netif_device_attach(dev); 1559 netif_device_attach(dev);
1557 } 1560 }
@@ -1625,8 +1628,6 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1625 /* registers are double mapped with 0 offset for LE and 0x200 for BE */ 1628 /* registers are double mapped with 0 offset for LE and 0x200 for BE */
1626 virt_addr += LAN9420_CPSR_ENDIAN_OFFSET; 1629 virt_addr += LAN9420_CPSR_ENDIAN_OFFSET;
1627 1630
1628 dev->base_addr = (ulong)virt_addr;
1629
1630 pd = netdev_priv(dev); 1631 pd = netdev_priv(dev);
1631 1632
1632 /* pci descriptors are created in the PCI consistent area */ 1633 /* pci descriptors are created in the PCI consistent area */
@@ -1646,7 +1647,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1646 1647
1647 pd->pdev = pdev; 1648 pd->pdev = pdev;
1648 pd->dev = dev; 1649 pd->dev = dev;
1649 pd->base_addr = virt_addr; 1650 pd->ioaddr = virt_addr;
1650 pd->msg_enable = smsc_debug; 1651 pd->msg_enable = smsc_debug;
1651 pd->rx_csum = true; 1652 pd->rx_csum = true;
1652 1653
@@ -1669,7 +1670,6 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1669 1670
1670 dev->netdev_ops = &smsc9420_netdev_ops; 1671 dev->netdev_ops = &smsc9420_netdev_ops;
1671 dev->ethtool_ops = &smsc9420_ethtool_ops; 1672 dev->ethtool_ops = &smsc9420_ethtool_ops;
1672 dev->irq = pdev->irq;
1673 1673
1674 netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_WEIGHT); 1674 netif_napi_add(dev, &pd->napi, smsc9420_rx_poll, NAPI_WEIGHT);
1675 1675
@@ -1727,7 +1727,7 @@ static void __devexit smsc9420_remove(struct pci_dev *pdev)
1727 pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) * 1727 pci_free_consistent(pdev, sizeof(struct smsc9420_dma_desc) *
1728 (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr); 1728 (RX_RING_SIZE + TX_RING_SIZE), pd->rx_ring, pd->rx_dma_addr);
1729 1729
1730 iounmap(pd->base_addr - LAN9420_CPSR_ENDIAN_OFFSET); 1730 iounmap(pd->ioaddr - LAN9420_CPSR_ENDIAN_OFFSET);
1731 pci_release_regions(pdev); 1731 pci_release_regions(pdev);
1732 free_netdev(dev); 1732 free_netdev(dev);
1733 pci_disable_device(pdev); 1733 pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 0319d640f728..f5dedcbf4651 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -97,6 +97,16 @@ struct stmmac_extra_stats {
97 unsigned long normal_irq_n; 97 unsigned long normal_irq_n;
98}; 98};
99 99
100/* CSR Frequency Access Defines*/
101#define CSR_F_35M 35000000
102#define CSR_F_60M 60000000
103#define CSR_F_100M 100000000
104#define CSR_F_150M 150000000
105#define CSR_F_250M 250000000
106#define CSR_F_300M 300000000
107
108#define MAC_CSR_H_FRQ_MASK 0x20
109
100#define HASH_TABLE_SIZE 64 110#define HASH_TABLE_SIZE 64
101#define PAUSE_TIME 0x200 111#define PAUSE_TIME 0x200
102 112
@@ -137,6 +147,7 @@ struct stmmac_extra_stats {
137#define DMA_HW_FEAT_FLEXIPPSEN 0x04000000 /* Flexible PPS Output */ 147#define DMA_HW_FEAT_FLEXIPPSEN 0x04000000 /* Flexible PPS Output */
138#define DMA_HW_FEAT_SAVLANINS 0x08000000 /* Source Addr or VLAN Insertion */ 148#define DMA_HW_FEAT_SAVLANINS 0x08000000 /* Source Addr or VLAN Insertion */
139#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY interface */ 149#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY interface */
150#define DEFAULT_DMA_PBL 8
140 151
141enum rx_frame_status { /* IPC status */ 152enum rx_frame_status { /* IPC status */
142 good_frame = 0, 153 good_frame = 0,
@@ -228,7 +239,7 @@ struct stmmac_desc_ops {
228 int (*get_rx_owner) (struct dma_desc *p); 239 int (*get_rx_owner) (struct dma_desc *p);
229 void (*set_rx_owner) (struct dma_desc *p); 240 void (*set_rx_owner) (struct dma_desc *p);
230 /* Get the receive frame size */ 241 /* Get the receive frame size */
231 int (*get_rx_frame_len) (struct dma_desc *p); 242 int (*get_rx_frame_len) (struct dma_desc *p, int rx_coe_type);
232 /* Return the reception status looking at the RDES1 */ 243 /* Return the reception status looking at the RDES1 */
233 int (*rx_status) (void *data, struct stmmac_extra_stats *x, 244 int (*rx_status) (void *data, struct stmmac_extra_stats *x,
234 struct dma_desc *p); 245 struct dma_desc *p);
@@ -236,7 +247,8 @@ struct stmmac_desc_ops {
236 247
237struct stmmac_dma_ops { 248struct stmmac_dma_ops {
238 /* DMA core initialization */ 249 /* DMA core initialization */
239 int (*init) (void __iomem *ioaddr, int pbl, u32 dma_tx, u32 dma_rx); 250 int (*init) (void __iomem *ioaddr, int pbl, int fb, int burst_len,
251 u32 dma_tx, u32 dma_rx);
240 /* Dump DMA registers */ 252 /* Dump DMA registers */
241 void (*dump_regs) (void __iomem *ioaddr); 253 void (*dump_regs) (void __iomem *ioaddr);
242 /* Set tx/rx threshold in the csr6 register 254 /* Set tx/rx threshold in the csr6 register
@@ -261,8 +273,8 @@ struct stmmac_dma_ops {
261struct stmmac_ops { 273struct stmmac_ops {
262 /* MAC core initialization */ 274 /* MAC core initialization */
263 void (*core_init) (void __iomem *ioaddr) ____cacheline_aligned; 275 void (*core_init) (void __iomem *ioaddr) ____cacheline_aligned;
264 /* Support checksum offload engine */ 276 /* Enable and verify that the IPC module is supported */
265 int (*rx_coe) (void __iomem *ioaddr); 277 int (*rx_ipc) (void __iomem *ioaddr);
266 /* Dump MAC registers */ 278 /* Dump MAC registers */
267 void (*dump_regs) (void __iomem *ioaddr); 279 void (*dump_regs) (void __iomem *ioaddr);
268 /* Handle extra events on specific interrupts hw dependent */ 280 /* Handle extra events on specific interrupts hw dependent */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index cfcef0ea0fa5..54339a78e358 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -142,7 +142,7 @@ enum rx_tx_priority_ratio {
142#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */ 142#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
143#define DMA_BUS_MODE_RPBL_SHIFT 17 143#define DMA_BUS_MODE_RPBL_SHIFT 17
144#define DMA_BUS_MODE_USP 0x00800000 144#define DMA_BUS_MODE_USP 0x00800000
145#define DMA_BUS_MODE_4PBL 0x01000000 145#define DMA_BUS_MODE_PBL 0x01000000
146#define DMA_BUS_MODE_AAL 0x02000000 146#define DMA_BUS_MODE_AAL 0x02000000
147 147
148/* DMA CRS Control and Status Register Mapping */ 148/* DMA CRS Control and Status Register Mapping */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index b1c48b975945..e7cbcd99c2cb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -46,7 +46,7 @@ static void dwmac1000_core_init(void __iomem *ioaddr)
46#endif 46#endif
47} 47}
48 48
49static int dwmac1000_rx_coe_supported(void __iomem *ioaddr) 49static int dwmac1000_rx_ipc_enable(void __iomem *ioaddr)
50{ 50{
51 u32 value = readl(ioaddr + GMAC_CONTROL); 51 u32 value = readl(ioaddr + GMAC_CONTROL);
52 52
@@ -211,7 +211,7 @@ static void dwmac1000_irq_status(void __iomem *ioaddr)
211 211
212static const struct stmmac_ops dwmac1000_ops = { 212static const struct stmmac_ops dwmac1000_ops = {
213 .core_init = dwmac1000_core_init, 213 .core_init = dwmac1000_core_init,
214 .rx_coe = dwmac1000_rx_coe_supported, 214 .rx_ipc = dwmac1000_rx_ipc_enable,
215 .dump_regs = dwmac1000_dump_regs, 215 .dump_regs = dwmac1000_dump_regs,
216 .host_irq_status = dwmac1000_irq_status, 216 .host_irq_status = dwmac1000_irq_status,
217 .set_filter = dwmac1000_set_filter, 217 .set_filter = dwmac1000_set_filter,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index 4d5402a1d262..3675c5731565 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -30,8 +30,8 @@
30#include "dwmac1000.h" 30#include "dwmac1000.h"
31#include "dwmac_dma.h" 31#include "dwmac_dma.h"
32 32
33static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx, 33static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb,
34 u32 dma_rx) 34 int burst_len, u32 dma_tx, u32 dma_rx)
35{ 35{
36 u32 value = readl(ioaddr + DMA_BUS_MODE); 36 u32 value = readl(ioaddr + DMA_BUS_MODE);
37 int limit; 37 int limit;
@@ -48,15 +48,47 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
48 if (limit < 0) 48 if (limit < 0)
49 return -EBUSY; 49 return -EBUSY;
50 50
51 value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL | 51 /*
52 ((pbl << DMA_BUS_MODE_PBL_SHIFT) | 52 * Set the DMA PBL (Programmable Burst Length) mode
53 (pbl << DMA_BUS_MODE_RPBL_SHIFT)); 53 * Before stmmac core 3.50 this mode bit was 4xPBL, and
54 * post 3.5 mode bit acts as 8*PBL.
55 * For core rev < 3.5, when the core is set for 4xPBL mode, the
56 * DMA transfers the data in 4, 8, 16, 32, 64 & 128 beats
57 * depending on pbl value.
58 * For core rev > 3.5, when the core is set for 8xPBL mode, the
59 * DMA transfers the data in 8, 16, 32, 64, 128 & 256 beats
60 * depending on pbl value.
61 */
62 value = DMA_BUS_MODE_PBL | ((pbl << DMA_BUS_MODE_PBL_SHIFT) |
63 (pbl << DMA_BUS_MODE_RPBL_SHIFT));
64
65 /* Set the Fixed burst mode */
66 if (fb)
67 value |= DMA_BUS_MODE_FB;
54 68
55#ifdef CONFIG_STMMAC_DA 69#ifdef CONFIG_STMMAC_DA
56 value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */ 70 value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */
57#endif 71#endif
58 writel(value, ioaddr + DMA_BUS_MODE); 72 writel(value, ioaddr + DMA_BUS_MODE);
59 73
74 /* In case of GMAC AXI configuration, program the DMA_AXI_BUS_MODE
75 * for supported bursts.
76 *
77 * Note: This is applicable only for revision GMACv3.61a. For
78 * older version this register is reserved and shall have no
79 * effect.
80 *
81 * Note:
82 * For Fixed Burst Mode: if we directly write 0xFF to this
83 * register using the configurations pass from platform code,
84 * this would ensure that all bursts supported by core are set
85 * and those which are not supported would remain ineffective.
86 *
87 * For Non Fixed Burst Mode: provide the maximum value of the
88 * burst length. Any burst equal or below the provided burst
89 * length would be allowed to perform. */
90 writel(burst_len, ioaddr + DMA_AXI_BUS_MODE);
91
60 /* Mask interrupts by writing to CSR7 */ 92 /* Mask interrupts by writing to CSR7 */
61 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); 93 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
62 94
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 138fb8dd1e87..efde50ff03f8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -43,11 +43,6 @@ static void dwmac100_core_init(void __iomem *ioaddr)
43#endif 43#endif
44} 44}
45 45
46static int dwmac100_rx_coe_supported(void __iomem *ioaddr)
47{
48 return 0;
49}
50
51static void dwmac100_dump_mac_regs(void __iomem *ioaddr) 46static void dwmac100_dump_mac_regs(void __iomem *ioaddr)
52{ 47{
53 pr_info("\t----------------------------------------------\n" 48 pr_info("\t----------------------------------------------\n"
@@ -72,6 +67,11 @@ static void dwmac100_dump_mac_regs(void __iomem *ioaddr)
72 readl(ioaddr + MAC_VLAN2)); 67 readl(ioaddr + MAC_VLAN2));
73} 68}
74 69
70static int dwmac100_rx_ipc_enable(void __iomem *ioaddr)
71{
72 return 0;
73}
74
75static void dwmac100_irq_status(void __iomem *ioaddr) 75static void dwmac100_irq_status(void __iomem *ioaddr)
76{ 76{
77 return; 77 return;
@@ -160,7 +160,7 @@ static void dwmac100_pmt(void __iomem *ioaddr, unsigned long mode)
160 160
161static const struct stmmac_ops dwmac100_ops = { 161static const struct stmmac_ops dwmac100_ops = {
162 .core_init = dwmac100_core_init, 162 .core_init = dwmac100_core_init,
163 .rx_coe = dwmac100_rx_coe_supported, 163 .rx_ipc = dwmac100_rx_ipc_enable,
164 .dump_regs = dwmac100_dump_mac_regs, 164 .dump_regs = dwmac100_dump_mac_regs,
165 .host_irq_status = dwmac100_irq_status, 165 .host_irq_status = dwmac100_irq_status,
166 .set_filter = dwmac100_set_filter, 166 .set_filter = dwmac100_set_filter,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index bc17fd08b55d..92ed2e07609e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -32,8 +32,8 @@
32#include "dwmac100.h" 32#include "dwmac100.h"
33#include "dwmac_dma.h" 33#include "dwmac_dma.h"
34 34
35static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx, 35static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb,
36 u32 dma_rx) 36 int burst_len, u32 dma_tx, u32 dma_rx)
37{ 37{
38 u32 value = readl(ioaddr + DMA_BUS_MODE); 38 u32 value = readl(ioaddr + DMA_BUS_MODE);
39 int limit; 39 int limit;
@@ -52,7 +52,7 @@ static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, u32 dma_tx,
52 52
53 /* Enable Application Access by writing to DMA CSR0 */ 53 /* Enable Application Access by writing to DMA CSR0 */
54 writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT), 54 writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
55 ioaddr + DMA_BUS_MODE); 55 ioaddr + DMA_BUS_MODE);
56 56
57 /* Mask interrupts by writing to CSR7 */ 57 /* Mask interrupts by writing to CSR7 */
58 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA); 58 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 437edacd602e..6e0360f9cfde 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -32,6 +32,7 @@
32#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */ 32#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
33#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */ 33#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
34#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */ 34#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
35#define DMA_AXI_BUS_MODE 0x00001028 /* AXI Bus Mode */
35#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */ 36#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
36#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */ 37#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
37#define DMA_HW_FEATURE 0x00001058 /* HW Feature Register */ 38#define DMA_HW_FEATURE 0x00001058 /* HW Feature Register */
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index ad1b627f8ec2..2fc8ef95f97a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -22,6 +22,7 @@
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#include <linux/stmmac.h>
25#include "common.h" 26#include "common.h"
26#include "descs_com.h" 27#include "descs_com.h"
27 28
@@ -309,9 +310,17 @@ static void enh_desc_close_tx_desc(struct dma_desc *p)
309 p->des01.etx.interrupt = 1; 310 p->des01.etx.interrupt = 1;
310} 311}
311 312
312static int enh_desc_get_rx_frame_len(struct dma_desc *p) 313static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
313{ 314{
314 return p->des01.erx.frame_length; 315 /* The type-1 checksum offload engines append the checksum at
316 * the end of frame and the two bytes of checksum are added in
317 * the length.
318 * Adjust for that in the framelen for type-1 checksum offload
319 * engines. */
320 if (rx_coe_type == STMMAC_RX_COE_TYPE1)
321 return p->des01.erx.frame_length - 2;
322 else
323 return p->des01.erx.frame_length;
315} 324}
316 325
317const struct stmmac_desc_ops enh_desc_ops = { 326const struct stmmac_desc_ops enh_desc_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 25953bb45a73..68962c549a2d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -22,6 +22,7 @@
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#include <linux/stmmac.h>
25#include "common.h" 26#include "common.h"
26#include "descs_com.h" 27#include "descs_com.h"
27 28
@@ -201,9 +202,17 @@ static void ndesc_close_tx_desc(struct dma_desc *p)
201 p->des01.tx.interrupt = 1; 202 p->des01.tx.interrupt = 1;
202} 203}
203 204
204static int ndesc_get_rx_frame_len(struct dma_desc *p) 205static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
205{ 206{
206 return p->des01.rx.frame_length; 207 /* The type-1 checksum offload engines append the checksum at
208 * the end of frame and the two bytes of checksum are added in
209 * the length.
210 * Adjust for that in the framelen for type-1 checksum offload
211 * engines. */
212 if (rx_coe_type == STMMAC_RX_COE_TYPE1)
213 return p->des01.rx.frame_length - 2;
214 else
215 return p->des01.rx.frame_length;
207} 216}
208 217
209const struct stmmac_desc_ops ndesc_ops = { 218const struct stmmac_desc_ops ndesc_ops = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index b4b095fdcf29..db2de9a49952 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -21,7 +21,9 @@
21*******************************************************************************/ 21*******************************************************************************/
22 22
23#define STMMAC_RESOURCE_NAME "stmmaceth" 23#define STMMAC_RESOURCE_NAME "stmmaceth"
24#define DRV_MODULE_VERSION "Feb_2012" 24#define DRV_MODULE_VERSION "March_2012"
25
26#include <linux/clk.h>
25#include <linux/stmmac.h> 27#include <linux/stmmac.h>
26#include <linux/phy.h> 28#include <linux/phy.h>
27#include "common.h" 29#include "common.h"
@@ -56,8 +58,6 @@ struct stmmac_priv {
56 58
57 struct stmmac_extra_stats xstats; 59 struct stmmac_extra_stats xstats;
58 struct napi_struct napi; 60 struct napi_struct napi;
59
60 int rx_coe;
61 int no_csum_insertion; 61 int no_csum_insertion;
62 62
63 struct phy_device *phydev; 63 struct phy_device *phydev;
@@ -81,6 +81,10 @@ struct stmmac_priv {
81 struct stmmac_counters mmc; 81 struct stmmac_counters mmc;
82 struct dma_features dma_cap; 82 struct dma_features dma_cap;
83 int hw_cap_support; 83 int hw_cap_support;
84#ifdef CONFIG_HAVE_CLK
85 struct clk *stmmac_clk;
86#endif
87 int clk_csr;
84}; 88};
85 89
86extern int phyaddr; 90extern int phyaddr;
@@ -99,3 +103,42 @@ int stmmac_dvr_remove(struct net_device *ndev);
99struct stmmac_priv *stmmac_dvr_probe(struct device *device, 103struct stmmac_priv *stmmac_dvr_probe(struct device *device,
100 struct plat_stmmacenet_data *plat_dat, 104 struct plat_stmmacenet_data *plat_dat,
101 void __iomem *addr); 105 void __iomem *addr);
106
107#ifdef CONFIG_HAVE_CLK
108static inline int stmmac_clk_enable(struct stmmac_priv *priv)
109{
110 if (!IS_ERR(priv->stmmac_clk))
111 return clk_enable(priv->stmmac_clk);
112
113 return 0;
114}
115
116static inline void stmmac_clk_disable(struct stmmac_priv *priv)
117{
118 if (IS_ERR(priv->stmmac_clk))
119 return;
120
121 clk_disable(priv->stmmac_clk);
122}
123static inline int stmmac_clk_get(struct stmmac_priv *priv)
124{
125 priv->stmmac_clk = clk_get(priv->device, NULL);
126
127 if (IS_ERR(priv->stmmac_clk))
128 return PTR_ERR(priv->stmmac_clk);
129
130 return 0;
131}
132#else
133static inline int stmmac_clk_enable(struct stmmac_priv *priv)
134{
135 return 0;
136}
137static inline void stmmac_clk_disable(struct stmmac_priv *priv)
138{
139}
140static inline int stmmac_clk_get(struct stmmac_priv *priv)
141{
142 return 0;
143}
144#endif /* CONFIG_HAVE_CLK */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index f98e1511660f..ce431846fc6f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -481,6 +481,7 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
481 .get_wol = stmmac_get_wol, 481 .get_wol = stmmac_get_wol,
482 .set_wol = stmmac_set_wol, 482 .set_wol = stmmac_set_wol,
483 .get_sset_count = stmmac_get_sset_count, 483 .get_sset_count = stmmac_get_sset_count,
484 .get_ts_info = ethtool_op_get_ts_info,
484}; 485};
485 486
486void stmmac_set_ethtool_ops(struct net_device *netdev) 487void stmmac_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 48d56da62f08..1a4cf8128f91 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -163,6 +163,38 @@ static void stmmac_verify_args(void)
163 pause = PAUSE_TIME; 163 pause = PAUSE_TIME;
164} 164}
165 165
166static void stmmac_clk_csr_set(struct stmmac_priv *priv)
167{
168#ifdef CONFIG_HAVE_CLK
169 u32 clk_rate;
170
171 if (IS_ERR(priv->stmmac_clk))
172 return;
173
174 clk_rate = clk_get_rate(priv->stmmac_clk);
175
176 /* Platform provided default clk_csr would be assumed valid
177 * for all other cases except for the below mentioned ones. */
178 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
179 if (clk_rate < CSR_F_35M)
180 priv->clk_csr = STMMAC_CSR_20_35M;
181 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
182 priv->clk_csr = STMMAC_CSR_35_60M;
183 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
184 priv->clk_csr = STMMAC_CSR_60_100M;
185 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
186 priv->clk_csr = STMMAC_CSR_100_150M;
187 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
188 priv->clk_csr = STMMAC_CSR_150_250M;
189 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
190 priv->clk_csr = STMMAC_CSR_250_300M;
191 } /* For values higher than the IEEE 802.3 specified frequency
192 * we can not estimate the proper divider as it is not known
193 * the frequency of clk_csr_i. So we do not change the default
194 * divider. */
195#endif
196}
197
166#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG) 198#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
167static void print_pkt(unsigned char *buf, int len) 199static void print_pkt(unsigned char *buf, int len)
168{ 200{
@@ -307,7 +339,13 @@ static int stmmac_init_phy(struct net_device *dev)
307 priv->speed = 0; 339 priv->speed = 0;
308 priv->oldduplex = -1; 340 priv->oldduplex = -1;
309 341
310 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", priv->plat->bus_id); 342 if (priv->plat->phy_bus_name)
343 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
344 priv->plat->phy_bus_name, priv->plat->bus_id);
345 else
346 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
347 priv->plat->bus_id);
348
311 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, 349 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
312 priv->plat->phy_addr); 350 priv->plat->phy_addr);
313 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id); 351 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
@@ -884,6 +922,24 @@ static void stmmac_check_ether_addr(struct stmmac_priv *priv)
884 priv->dev->dev_addr); 922 priv->dev->dev_addr);
885} 923}
886 924
925static int stmmac_init_dma_engine(struct stmmac_priv *priv)
926{
927 int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0;
928
929 /* Some DMA parameters can be passed from the platform;
930 * in case of these are not passed we keep a default
931 * (good for all the chips) and init the DMA! */
932 if (priv->plat->dma_cfg) {
933 pbl = priv->plat->dma_cfg->pbl;
934 fixed_burst = priv->plat->dma_cfg->fixed_burst;
935 burst_len = priv->plat->dma_cfg->burst_len;
936 }
937
938 return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst,
939 burst_len, priv->dma_tx_phy,
940 priv->dma_rx_phy);
941}
942
887/** 943/**
888 * stmmac_open - open entry point of the driver 944 * stmmac_open - open entry point of the driver
889 * @dev : pointer to the device structure. 945 * @dev : pointer to the device structure.
@@ -898,16 +954,6 @@ static int stmmac_open(struct net_device *dev)
898 struct stmmac_priv *priv = netdev_priv(dev); 954 struct stmmac_priv *priv = netdev_priv(dev);
899 int ret; 955 int ret;
900 956
901 stmmac_check_ether_addr(priv);
902
903 /* MDIO bus Registration */
904 ret = stmmac_mdio_register(dev);
905 if (ret < 0) {
906 pr_debug("%s: MDIO bus (id: %d) registration failed",
907 __func__, priv->plat->bus_id);
908 return ret;
909 }
910
911#ifdef CONFIG_STMMAC_TIMER 957#ifdef CONFIG_STMMAC_TIMER
912 priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); 958 priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
913 if (unlikely(priv->tm == NULL)) 959 if (unlikely(priv->tm == NULL))
@@ -925,6 +971,10 @@ static int stmmac_open(struct net_device *dev)
925 } else 971 } else
926 priv->tm->enable = 1; 972 priv->tm->enable = 1;
927#endif 973#endif
974 stmmac_clk_enable(priv);
975
976 stmmac_check_ether_addr(priv);
977
928 ret = stmmac_init_phy(dev); 978 ret = stmmac_init_phy(dev);
929 if (unlikely(ret)) { 979 if (unlikely(ret)) {
930 pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); 980 pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret);
@@ -938,8 +988,7 @@ static int stmmac_open(struct net_device *dev)
938 init_dma_desc_rings(dev); 988 init_dma_desc_rings(dev);
939 989
940 /* DMA initialization and SW reset */ 990 /* DMA initialization and SW reset */
941 ret = priv->hw->dma->init(priv->ioaddr, priv->plat->pbl, 991 ret = stmmac_init_dma_engine(priv);
942 priv->dma_tx_phy, priv->dma_rx_phy);
943 if (ret < 0) { 992 if (ret < 0) {
944 pr_err("%s: DMA initialization failed\n", __func__); 993 pr_err("%s: DMA initialization failed\n", __func__);
945 goto open_error; 994 goto open_error;
@@ -1026,6 +1075,8 @@ open_error:
1026 if (priv->phydev) 1075 if (priv->phydev)
1027 phy_disconnect(priv->phydev); 1076 phy_disconnect(priv->phydev);
1028 1077
1078 stmmac_clk_disable(priv);
1079
1029 return ret; 1080 return ret;
1030} 1081}
1031 1082
@@ -1077,7 +1128,7 @@ static int stmmac_release(struct net_device *dev)
1077#ifdef CONFIG_STMMAC_DEBUG_FS 1128#ifdef CONFIG_STMMAC_DEBUG_FS
1078 stmmac_exit_fs(); 1129 stmmac_exit_fs();
1079#endif 1130#endif
1080 stmmac_mdio_unregister(dev); 1131 stmmac_clk_disable(priv);
1081 1132
1082 return 0; 1133 return 0;
1083} 1134}
@@ -1276,7 +1327,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1276 struct sk_buff *skb; 1327 struct sk_buff *skb;
1277 int frame_len; 1328 int frame_len;
1278 1329
1279 frame_len = priv->hw->desc->get_rx_frame_len(p); 1330 frame_len = priv->hw->desc->get_rx_frame_len(p,
1331 priv->plat->rx_coe);
1280 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 1332 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
1281 * Type frames (LLC/LLC-SNAP) */ 1333 * Type frames (LLC/LLC-SNAP) */
1282 if (unlikely(status != llc_snap)) 1334 if (unlikely(status != llc_snap))
@@ -1312,7 +1364,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1312#endif 1364#endif
1313 skb->protocol = eth_type_trans(skb, priv->dev); 1365 skb->protocol = eth_type_trans(skb, priv->dev);
1314 1366
1315 if (unlikely(!priv->rx_coe)) { 1367 if (unlikely(!priv->plat->rx_coe)) {
1316 /* No RX COE for old mac10/100 devices */ 1368 /* No RX COE for old mac10/100 devices */
1317 skb_checksum_none_assert(skb); 1369 skb_checksum_none_assert(skb);
1318 netif_receive_skb(skb); 1370 netif_receive_skb(skb);
@@ -1459,8 +1511,10 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
1459{ 1511{
1460 struct stmmac_priv *priv = netdev_priv(dev); 1512 struct stmmac_priv *priv = netdev_priv(dev);
1461 1513
1462 if (!priv->rx_coe) 1514 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
1463 features &= ~NETIF_F_RXCSUM; 1515 features &= ~NETIF_F_RXCSUM;
1516 else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1)
1517 features &= ~NETIF_F_IPV6_CSUM;
1464 if (!priv->plat->tx_coe) 1518 if (!priv->plat->tx_coe)
1465 features &= ~NETIF_F_ALL_CSUM; 1519 features &= ~NETIF_F_ALL_CSUM;
1466 1520
@@ -1765,17 +1819,32 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
1765 * register (if supported). 1819 * register (if supported).
1766 */ 1820 */
1767 priv->plat->enh_desc = priv->dma_cap.enh_desc; 1821 priv->plat->enh_desc = priv->dma_cap.enh_desc;
1768 priv->plat->tx_coe = priv->dma_cap.tx_coe;
1769 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; 1822 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
1823
1824 priv->plat->tx_coe = priv->dma_cap.tx_coe;
1825
1826 if (priv->dma_cap.rx_coe_type2)
1827 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
1828 else if (priv->dma_cap.rx_coe_type1)
1829 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
1830
1770 } else 1831 } else
1771 pr_info(" No HW DMA feature register supported"); 1832 pr_info(" No HW DMA feature register supported");
1772 1833
1773 /* Select the enhnaced/normal descriptor structures */ 1834 /* Select the enhnaced/normal descriptor structures */
1774 stmmac_selec_desc_mode(priv); 1835 stmmac_selec_desc_mode(priv);
1775 1836
1776 priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr); 1837 /* Enable the IPC (Checksum Offload) and check if the feature has been
1777 if (priv->rx_coe) 1838 * enabled during the core configuration. */
1778 pr_info(" RX Checksum Offload Engine supported\n"); 1839 ret = priv->hw->mac->rx_ipc(priv->ioaddr);
1840 if (!ret) {
1841 pr_warning(" RX IPC Checksum Offload not configured.\n");
1842 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
1843 }
1844
1845 if (priv->plat->rx_coe)
1846 pr_info(" RX Checksum Offload Engine supported (type %d)\n",
1847 priv->plat->rx_coe);
1779 if (priv->plat->tx_coe) 1848 if (priv->plat->tx_coe)
1780 pr_info(" TX Checksum insertion supported\n"); 1849 pr_info(" TX Checksum insertion supported\n");
1781 1850
@@ -1856,6 +1925,28 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
1856 goto error; 1925 goto error;
1857 } 1926 }
1858 1927
1928 if (stmmac_clk_get(priv))
1929 pr_warning("%s: warning: cannot get CSR clock\n", __func__);
1930
1931 /* If a specific clk_csr value is passed from the platform
1932 * this means that the CSR Clock Range selection cannot be
1933 * changed at run-time and it is fixed. Viceversa the driver'll try to
1934 * set the MDC clock dynamically according to the csr actual
1935 * clock input.
1936 */
1937 if (!priv->plat->clk_csr)
1938 stmmac_clk_csr_set(priv);
1939 else
1940 priv->clk_csr = priv->plat->clk_csr;
1941
1942 /* MDIO bus Registration */
1943 ret = stmmac_mdio_register(ndev);
1944 if (ret < 0) {
1945 pr_debug("%s: MDIO bus (id: %d) registration failed",
1946 __func__, priv->plat->bus_id);
1947 goto error;
1948 }
1949
1859 return priv; 1950 return priv;
1860 1951
1861error: 1952error:
@@ -1883,6 +1974,7 @@ int stmmac_dvr_remove(struct net_device *ndev)
1883 priv->hw->dma->stop_tx(priv->ioaddr); 1974 priv->hw->dma->stop_tx(priv->ioaddr);
1884 1975
1885 stmmac_set_mac(priv->ioaddr, false); 1976 stmmac_set_mac(priv->ioaddr, false);
1977 stmmac_mdio_unregister(ndev);
1886 netif_carrier_off(ndev); 1978 netif_carrier_off(ndev);
1887 unregister_netdev(ndev); 1979 unregister_netdev(ndev);
1888 free_netdev(ndev); 1980 free_netdev(ndev);
@@ -1925,9 +2017,11 @@ int stmmac_suspend(struct net_device *ndev)
1925 /* Enable Power down mode by programming the PMT regs */ 2017 /* Enable Power down mode by programming the PMT regs */
1926 if (device_may_wakeup(priv->device)) 2018 if (device_may_wakeup(priv->device))
1927 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts); 2019 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
1928 else 2020 else {
1929 stmmac_set_mac(priv->ioaddr, false); 2021 stmmac_set_mac(priv->ioaddr, false);
1930 2022 /* Disable clock in case of PWM is off */
2023 stmmac_clk_disable(priv);
2024 }
1931 spin_unlock(&priv->lock); 2025 spin_unlock(&priv->lock);
1932 return 0; 2026 return 0;
1933} 2027}
@@ -1948,6 +2042,9 @@ int stmmac_resume(struct net_device *ndev)
1948 * from another devices (e.g. serial console). */ 2042 * from another devices (e.g. serial console). */
1949 if (device_may_wakeup(priv->device)) 2043 if (device_may_wakeup(priv->device))
1950 priv->hw->mac->pmt(priv->ioaddr, 0); 2044 priv->hw->mac->pmt(priv->ioaddr, 0);
2045 else
2046 /* enable the clk prevously disabled */
2047 stmmac_clk_enable(priv);
1951 2048
1952 netif_device_attach(ndev); 2049 netif_device_attach(ndev);
1953 2050
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 73195329aa46..ade108232048 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -34,6 +34,22 @@
34#define MII_BUSY 0x00000001 34#define MII_BUSY 0x00000001
35#define MII_WRITE 0x00000002 35#define MII_WRITE 0x00000002
36 36
37static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr)
38{
39 unsigned long curr;
40 unsigned long finish = jiffies + 3 * HZ;
41
42 do {
43 curr = jiffies;
44 if (readl(ioaddr + mii_addr) & MII_BUSY)
45 cpu_relax();
46 else
47 return 0;
48 } while (!time_after_eq(curr, finish));
49
50 return -EBUSY;
51}
52
37/** 53/**
38 * stmmac_mdio_read 54 * stmmac_mdio_read
39 * @bus: points to the mii_bus structure 55 * @bus: points to the mii_bus structure
@@ -54,11 +70,15 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
54 int data; 70 int data;
55 u16 regValue = (((phyaddr << 11) & (0x0000F800)) | 71 u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
56 ((phyreg << 6) & (0x000007C0))); 72 ((phyreg << 6) & (0x000007C0)));
57 regValue |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2); 73 regValue |= MII_BUSY | ((priv->clk_csr & 0xF) << 2);
74
75 if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
76 return -EBUSY;
58 77
59 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
60 writel(regValue, priv->ioaddr + mii_address); 78 writel(regValue, priv->ioaddr + mii_address);
61 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1); 79
80 if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
81 return -EBUSY;
62 82
63 /* Read the data from the MII data register */ 83 /* Read the data from the MII data register */
64 data = (int)readl(priv->ioaddr + mii_data); 84 data = (int)readl(priv->ioaddr + mii_data);
@@ -86,20 +106,18 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
86 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0))) 106 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
87 | MII_WRITE; 107 | MII_WRITE;
88 108
89 value |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2); 109 value |= MII_BUSY | ((priv->clk_csr & 0xF) << 2);
90
91 110
92 /* Wait until any existing MII operation is complete */ 111 /* Wait until any existing MII operation is complete */
93 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1); 112 if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
113 return -EBUSY;
94 114
95 /* Set the MII address register to write */ 115 /* Set the MII address register to write */
96 writel(phydata, priv->ioaddr + mii_data); 116 writel(phydata, priv->ioaddr + mii_data);
97 writel(value, priv->ioaddr + mii_address); 117 writel(value, priv->ioaddr + mii_address);
98 118
99 /* Wait until any existing MII operation is complete */ 119 /* Wait until any existing MII operation is complete */
100 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1); 120 return stmmac_mdio_busy_wait(priv->ioaddr, mii_address);
101
102 return 0;
103} 121}
104 122
105/** 123/**
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index da66ed7c3c5d..58fab5303e9c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -28,6 +28,7 @@
28 28
29struct plat_stmmacenet_data plat_dat; 29struct plat_stmmacenet_data plat_dat;
30struct stmmac_mdio_bus_data mdio_data; 30struct stmmac_mdio_bus_data mdio_data;
31struct stmmac_dma_cfg dma_cfg;
31 32
32static void stmmac_default_data(void) 33static void stmmac_default_data(void)
33{ 34{
@@ -35,7 +36,6 @@ static void stmmac_default_data(void)
35 plat_dat.bus_id = 1; 36 plat_dat.bus_id = 1;
36 plat_dat.phy_addr = 0; 37 plat_dat.phy_addr = 0;
37 plat_dat.interface = PHY_INTERFACE_MODE_GMII; 38 plat_dat.interface = PHY_INTERFACE_MODE_GMII;
38 plat_dat.pbl = 32;
39 plat_dat.clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */ 39 plat_dat.clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
40 plat_dat.has_gmac = 1; 40 plat_dat.has_gmac = 1;
41 plat_dat.force_sf_dma_mode = 1; 41 plat_dat.force_sf_dma_mode = 1;
@@ -44,6 +44,10 @@ static void stmmac_default_data(void)
44 mdio_data.phy_reset = NULL; 44 mdio_data.phy_reset = NULL;
45 mdio_data.phy_mask = 0; 45 mdio_data.phy_mask = 0;
46 plat_dat.mdio_bus_data = &mdio_data; 46 plat_dat.mdio_bus_data = &mdio_data;
47
48 dma_cfg.pbl = 32;
49 dma_cfg.burst_len = DMA_AXI_BLEN_256;
50 plat_dat.dma_cfg = &dma_cfg;
47} 51}
48 52
49/** 53/**
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 116529a366b2..3dd8f0803808 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -50,7 +50,6 @@ static int __devinit stmmac_probe_config_dt(struct platform_device *pdev,
50 * once needed on other platforms. 50 * once needed on other platforms.
51 */ 51 */
52 if (of_device_is_compatible(np, "st,spear600-gmac")) { 52 if (of_device_is_compatible(np, "st,spear600-gmac")) {
53 plat->pbl = 8;
54 plat->has_gmac = 1; 53 plat->has_gmac = 1;
55 plat->pmt = 1; 54 plat->pmt = 1;
56 } 55 }
@@ -189,9 +188,6 @@ static int stmmac_pltfr_remove(struct platform_device *pdev)
189 if (priv->plat->exit) 188 if (priv->plat->exit)
190 priv->plat->exit(pdev); 189 priv->plat->exit(pdev);
191 190
192 if (priv->plat->exit)
193 priv->plat->exit(pdev);
194
195 platform_set_drvdata(pdev, NULL); 191 platform_set_drvdata(pdev, NULL);
196 192
197 iounmap((void *)priv->ioaddr); 193 iounmap((void *)priv->ioaddr);
@@ -218,14 +214,26 @@ static int stmmac_pltfr_resume(struct device *dev)
218 214
219int stmmac_pltfr_freeze(struct device *dev) 215int stmmac_pltfr_freeze(struct device *dev)
220{ 216{
217 int ret;
218 struct plat_stmmacenet_data *plat_dat = dev_get_platdata(dev);
221 struct net_device *ndev = dev_get_drvdata(dev); 219 struct net_device *ndev = dev_get_drvdata(dev);
220 struct platform_device *pdev = to_platform_device(dev);
222 221
223 return stmmac_freeze(ndev); 222 ret = stmmac_freeze(ndev);
223 if (plat_dat->exit)
224 plat_dat->exit(pdev);
225
226 return ret;
224} 227}
225 228
226int stmmac_pltfr_restore(struct device *dev) 229int stmmac_pltfr_restore(struct device *dev)
227{ 230{
231 struct plat_stmmacenet_data *plat_dat = dev_get_platdata(dev);
228 struct net_device *ndev = dev_get_drvdata(dev); 232 struct net_device *ndev = dev_get_drvdata(dev);
233 struct platform_device *pdev = to_platform_device(dev);
234
235 if (plat_dat->init)
236 plat_dat->init(pdev);
229 237
230 return stmmac_restore(ndev); 238 return stmmac_restore(ndev);
231} 239}
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index c99b3b0e2eae..703c8cce2a2c 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9838,7 +9838,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
9838 goto err_out_release_parent; 9838 goto err_out_release_parent;
9839 } 9839 }
9840 } 9840 }
9841 if (err || dma_mask == DMA_BIT_MASK(32)) { 9841 if (err) {
9842 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 9842 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
9843 if (err) { 9843 if (err) {
9844 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); 9844 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 4ba969096717..3cf4ab755838 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -401,7 +401,7 @@ static int gem_rxmac_reset(struct gem *gp)
401 return 1; 401 return 1;
402 } 402 }
403 403
404 udelay(5000); 404 mdelay(5);
405 405
406 /* Execute RX reset command. */ 406 /* Execute RX reset command. */
407 writel(gp->swrst_base | GREG_SWRST_RXRST, 407 writel(gp->swrst_base | GREG_SWRST_RXRST,
@@ -2898,7 +2898,6 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
2898 } 2898 }
2899 2899
2900 gp->pdev = pdev; 2900 gp->pdev = pdev;
2901 dev->base_addr = (long) pdev;
2902 gp->dev = dev; 2901 gp->dev = dev;
2903 2902
2904 gp->msg_enable = DEFAULT_MSG; 2903 gp->msg_enable = DEFAULT_MSG;
@@ -2972,7 +2971,6 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
2972 netif_napi_add(dev, &gp->napi, gem_poll, 64); 2971 netif_napi_add(dev, &gp->napi, gem_poll, 64);
2973 dev->ethtool_ops = &gem_ethtool_ops; 2972 dev->ethtool_ops = &gem_ethtool_ops;
2974 dev->watchdog_timeo = 5 * HZ; 2973 dev->watchdog_timeo = 5 * HZ;
2975 dev->irq = pdev->irq;
2976 dev->dma = 0; 2974 dev->dma = 0;
2977 2975
2978 /* Set that now, in case PM kicks in now */ 2976 /* Set that now, in case PM kicks in now */
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index b95e7e681b38..dfc00c4683e5 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2182,11 +2182,12 @@ static int happy_meal_open(struct net_device *dev)
2182 * into a single source which we register handling at probe time. 2182 * into a single source which we register handling at probe time.
2183 */ 2183 */
2184 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) { 2184 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) {
2185 if (request_irq(dev->irq, happy_meal_interrupt, 2185 res = request_irq(hp->irq, happy_meal_interrupt, IRQF_SHARED,
2186 IRQF_SHARED, dev->name, (void *)dev)) { 2186 dev->name, dev);
2187 if (res) {
2187 HMD(("EAGAIN\n")); 2188 HMD(("EAGAIN\n"));
2188 printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n", 2189 printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n",
2189 dev->irq); 2190 hp->irq);
2190 2191
2191 return -EAGAIN; 2192 return -EAGAIN;
2192 } 2193 }
@@ -2199,7 +2200,7 @@ static int happy_meal_open(struct net_device *dev)
2199 spin_unlock_irq(&hp->happy_lock); 2200 spin_unlock_irq(&hp->happy_lock);
2200 2201
2201 if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)) 2202 if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO))
2202 free_irq(dev->irq, dev); 2203 free_irq(hp->irq, dev);
2203 return res; 2204 return res;
2204} 2205}
2205 2206
@@ -2221,7 +2222,7 @@ static int happy_meal_close(struct net_device *dev)
2221 * time and never unregister. 2222 * time and never unregister.
2222 */ 2223 */
2223 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) 2224 if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)
2224 free_irq(dev->irq, dev); 2225 free_irq(hp->irq, dev);
2225 2226
2226 return 0; 2227 return 0;
2227} 2228}
@@ -2777,7 +2778,7 @@ static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int i
2777 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; 2778 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
2778 dev->features |= dev->hw_features | NETIF_F_RXCSUM; 2779 dev->features |= dev->hw_features | NETIF_F_RXCSUM;
2779 2780
2780 dev->irq = op->archdata.irqs[0]; 2781 hp->irq = op->archdata.irqs[0];
2781 2782
2782#if defined(CONFIG_SBUS) && defined(CONFIG_PCI) 2783#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
2783 /* Hook up SBUS register/descriptor accessors. */ 2784 /* Hook up SBUS register/descriptor accessors. */
@@ -2981,8 +2982,6 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
2981 if (hme_version_printed++ == 0) 2982 if (hme_version_printed++ == 0)
2982 printk(KERN_INFO "%s", version); 2983 printk(KERN_INFO "%s", version);
2983 2984
2984 dev->base_addr = (long) pdev;
2985
2986 hp = netdev_priv(dev); 2985 hp = netdev_priv(dev);
2987 2986
2988 hp->happy_dev = pdev; 2987 hp->happy_dev = pdev;
@@ -3087,12 +3086,11 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
3087 3086
3088 init_timer(&hp->happy_timer); 3087 init_timer(&hp->happy_timer);
3089 3088
3089 hp->irq = pdev->irq;
3090 hp->dev = dev; 3090 hp->dev = dev;
3091 dev->netdev_ops = &hme_netdev_ops; 3091 dev->netdev_ops = &hme_netdev_ops;
3092 dev->watchdog_timeo = 5*HZ; 3092 dev->watchdog_timeo = 5*HZ;
3093 dev->ethtool_ops = &hme_ethtool_ops; 3093 dev->ethtool_ops = &hme_ethtool_ops;
3094 dev->irq = pdev->irq;
3095 dev->dma = 0;
3096 3094
3097 /* Happy Meal can do it all... */ 3095 /* Happy Meal can do it all... */
3098 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; 3096 dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
diff --git a/drivers/net/ethernet/sun/sunhme.h b/drivers/net/ethernet/sun/sunhme.h
index 64f278360d89..f4307654e4ae 100644
--- a/drivers/net/ethernet/sun/sunhme.h
+++ b/drivers/net/ethernet/sun/sunhme.h
@@ -432,6 +432,7 @@ struct happy_meal {
432 432
433 dma_addr_t hblock_dvma; /* DVMA visible address happy block */ 433 dma_addr_t hblock_dvma; /* DVMA visible address happy block */
434 unsigned int happy_flags; /* Driver state flags */ 434 unsigned int happy_flags; /* Driver state flags */
435 int irq;
435 enum happy_transceiver tcvr_type; /* Kind of transceiver in use */ 436 enum happy_transceiver tcvr_type; /* Kind of transceiver in use */
436 unsigned int happy_bursts; /* Get your mind out of the gutter */ 437 unsigned int happy_bursts; /* Get your mind out of the gutter */
437 unsigned int paddr; /* PHY address for transceiver */ 438 unsigned int paddr; /* PHY address for transceiver */
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index ad973ffc9ff3..8846516678c3 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1317,7 +1317,7 @@ static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
1317 1317
1318static void print_rxfd(struct rxf_desc *rxfd) 1318static void print_rxfd(struct rxf_desc *rxfd)
1319{ 1319{
1320 DBG("=== RxF desc CHIP ORDER/ENDIANESS =============\n" 1320 DBG("=== RxF desc CHIP ORDER/ENDIANNESS =============\n"
1321 "info 0x%x va_lo %u pa_lo 0x%x pa_hi 0x%x len 0x%x\n", 1321 "info 0x%x va_lo %u pa_lo 0x%x pa_hi 0x%x len 0x%x\n",
1322 rxfd->info, rxfd->va_lo, rxfd->pa_lo, rxfd->pa_hi, rxfd->len); 1322 rxfd->info, rxfd->va_lo, rxfd->pa_lo, rxfd->pa_hi, rxfd->len);
1323} 1323}
@@ -1988,10 +1988,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1988 /* these fields are used for info purposes only 1988 /* these fields are used for info purposes only
1989 * so we can have them same for all ports of the board */ 1989 * so we can have them same for all ports of the board */
1990 ndev->if_port = port; 1990 ndev->if_port = port;
1991 ndev->base_addr = pciaddr;
1992 ndev->mem_start = pciaddr;
1993 ndev->mem_end = pciaddr + regionSize;
1994 ndev->irq = pdev->irq;
1995 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO 1991 ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
1996 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 1992 | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
1997 NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM 1993 NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 34558766cbf0..d614c374ed9d 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -92,7 +92,7 @@ enum cpdma_state {
92 CPDMA_STATE_TEARDOWN, 92 CPDMA_STATE_TEARDOWN,
93}; 93};
94 94
95const char *cpdma_state_str[] = { "idle", "active", "teardown" }; 95static const char *cpdma_state_str[] = { "idle", "active", "teardown" };
96 96
97struct cpdma_ctlr { 97struct cpdma_ctlr {
98 enum cpdma_state state; 98 enum cpdma_state state;
@@ -276,6 +276,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
276 ctlr->num_chan = CPDMA_MAX_CHANNELS; 276 ctlr->num_chan = CPDMA_MAX_CHANNELS;
277 return ctlr; 277 return ctlr;
278} 278}
279EXPORT_SYMBOL_GPL(cpdma_ctlr_create);
279 280
280int cpdma_ctlr_start(struct cpdma_ctlr *ctlr) 281int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
281{ 282{
@@ -321,6 +322,7 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
321 spin_unlock_irqrestore(&ctlr->lock, flags); 322 spin_unlock_irqrestore(&ctlr->lock, flags);
322 return 0; 323 return 0;
323} 324}
325EXPORT_SYMBOL_GPL(cpdma_ctlr_start);
324 326
325int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr) 327int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
326{ 328{
@@ -351,6 +353,7 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
351 spin_unlock_irqrestore(&ctlr->lock, flags); 353 spin_unlock_irqrestore(&ctlr->lock, flags);
352 return 0; 354 return 0;
353} 355}
356EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
354 357
355int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr) 358int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
356{ 359{
@@ -421,6 +424,7 @@ int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
421 spin_unlock_irqrestore(&ctlr->lock, flags); 424 spin_unlock_irqrestore(&ctlr->lock, flags);
422 return 0; 425 return 0;
423} 426}
427EXPORT_SYMBOL_GPL(cpdma_ctlr_dump);
424 428
425int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr) 429int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
426{ 430{
@@ -444,6 +448,7 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
444 kfree(ctlr); 448 kfree(ctlr);
445 return ret; 449 return ret;
446} 450}
451EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
447 452
448int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable) 453int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
449{ 454{
@@ -528,6 +533,7 @@ err_chan_busy:
528err_chan_alloc: 533err_chan_alloc:
529 return ERR_PTR(ret); 534 return ERR_PTR(ret);
530} 535}
536EXPORT_SYMBOL_GPL(cpdma_chan_create);
531 537
532int cpdma_chan_destroy(struct cpdma_chan *chan) 538int cpdma_chan_destroy(struct cpdma_chan *chan)
533{ 539{
@@ -545,6 +551,7 @@ int cpdma_chan_destroy(struct cpdma_chan *chan)
545 kfree(chan); 551 kfree(chan);
546 return 0; 552 return 0;
547} 553}
554EXPORT_SYMBOL_GPL(cpdma_chan_destroy);
548 555
549int cpdma_chan_get_stats(struct cpdma_chan *chan, 556int cpdma_chan_get_stats(struct cpdma_chan *chan,
550 struct cpdma_chan_stats *stats) 557 struct cpdma_chan_stats *stats)
@@ -693,6 +700,7 @@ unlock_ret:
693 spin_unlock_irqrestore(&chan->lock, flags); 700 spin_unlock_irqrestore(&chan->lock, flags);
694 return ret; 701 return ret;
695} 702}
703EXPORT_SYMBOL_GPL(cpdma_chan_submit);
696 704
697static void __cpdma_chan_free(struct cpdma_chan *chan, 705static void __cpdma_chan_free(struct cpdma_chan *chan,
698 struct cpdma_desc __iomem *desc, 706 struct cpdma_desc __iomem *desc,
@@ -776,6 +784,7 @@ int cpdma_chan_process(struct cpdma_chan *chan, int quota)
776 } 784 }
777 return used; 785 return used;
778} 786}
787EXPORT_SYMBOL_GPL(cpdma_chan_process);
779 788
780int cpdma_chan_start(struct cpdma_chan *chan) 789int cpdma_chan_start(struct cpdma_chan *chan)
781{ 790{
@@ -803,6 +812,7 @@ int cpdma_chan_start(struct cpdma_chan *chan)
803 spin_unlock_irqrestore(&chan->lock, flags); 812 spin_unlock_irqrestore(&chan->lock, flags);
804 return 0; 813 return 0;
805} 814}
815EXPORT_SYMBOL_GPL(cpdma_chan_start);
806 816
807int cpdma_chan_stop(struct cpdma_chan *chan) 817int cpdma_chan_stop(struct cpdma_chan *chan)
808{ 818{
@@ -863,6 +873,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
863 spin_unlock_irqrestore(&chan->lock, flags); 873 spin_unlock_irqrestore(&chan->lock, flags);
864 return 0; 874 return 0;
865} 875}
876EXPORT_SYMBOL_GPL(cpdma_chan_stop);
866 877
867int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable) 878int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
868{ 879{
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 08aff1a2087c..4da93a5d7ec6 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -627,6 +627,7 @@ static const struct ethtool_ops ethtool_ops = {
627 .get_link = ethtool_op_get_link, 627 .get_link = ethtool_op_get_link,
628 .get_coalesce = emac_get_coalesce, 628 .get_coalesce = emac_get_coalesce,
629 .set_coalesce = emac_set_coalesce, 629 .set_coalesce = emac_set_coalesce,
630 .get_ts_info = ethtool_op_get_ts_info,
630}; 631};
631 632
632/** 633/**
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index efd36691ce54..3e6abf0f2771 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -2545,7 +2545,7 @@ static void tlan_phy_reset(struct net_device *dev)
2545 2545
2546 phy = priv->phy[priv->phy_num]; 2546 phy = priv->phy[priv->phy_num];
2547 2547
2548 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Reseting PHY.\n", dev->name); 2548 TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Resetting PHY.\n", dev->name);
2549 tlan_mii_sync(dev->base_addr); 2549 tlan_mii_sync(dev->base_addr);
2550 value = MII_GC_LOOPBK | MII_GC_RESET; 2550 value = MII_GC_LOOPBK | MII_GC_RESET;
2551 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value); 2551 tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index fcfa01f7ceb6..0459c096629f 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -689,9 +689,12 @@ static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
689#ifdef CONFIG_NET_POLL_CONTROLLER 689#ifdef CONFIG_NET_POLL_CONTROLLER
690static void rhine_poll(struct net_device *dev) 690static void rhine_poll(struct net_device *dev)
691{ 691{
692 disable_irq(dev->irq); 692 struct rhine_private *rp = netdev_priv(dev);
693 rhine_interrupt(dev->irq, (void *)dev); 693 const int irq = rp->pdev->irq;
694 enable_irq(dev->irq); 694
695 disable_irq(irq);
696 rhine_interrupt(irq, dev);
697 enable_irq(irq);
695} 698}
696#endif 699#endif
697 700
@@ -972,7 +975,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
972 } 975 }
973#endif /* USE_MMIO */ 976#endif /* USE_MMIO */
974 977
975 dev->base_addr = (unsigned long)ioaddr;
976 rp->base = ioaddr; 978 rp->base = ioaddr;
977 979
978 /* Get chip registers into a sane state */ 980 /* Get chip registers into a sane state */
@@ -995,8 +997,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
995 if (!phy_id) 997 if (!phy_id)
996 phy_id = ioread8(ioaddr + 0x6C); 998 phy_id = ioread8(ioaddr + 0x6C);
997 999
998 dev->irq = pdev->irq;
999
1000 spin_lock_init(&rp->lock); 1000 spin_lock_init(&rp->lock);
1001 mutex_init(&rp->task_lock); 1001 mutex_init(&rp->task_lock);
1002 INIT_WORK(&rp->reset_task, rhine_reset_task); 1002 INIT_WORK(&rp->reset_task, rhine_reset_task);
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 8a5d7c100a5e..ea3e0a21ba74 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2488,8 +2488,8 @@ static int velocity_close(struct net_device *dev)
2488 2488
2489 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED) 2489 if (vptr->flags & VELOCITY_FLAGS_WOL_ENABLED)
2490 velocity_get_ip(vptr); 2490 velocity_get_ip(vptr);
2491 if (dev->irq != 0) 2491
2492 free_irq(dev->irq, dev); 2492 free_irq(vptr->pdev->irq, dev);
2493 2493
2494 velocity_free_rings(vptr); 2494 velocity_free_rings(vptr);
2495 2495
@@ -2755,8 +2755,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
2755 if (ret < 0) 2755 if (ret < 0)
2756 goto err_free_dev; 2756 goto err_free_dev;
2757 2757
2758 dev->irq = pdev->irq;
2759
2760 ret = velocity_get_pci_info(vptr, pdev); 2758 ret = velocity_get_pci_info(vptr, pdev);
2761 if (ret < 0) { 2759 if (ret < 0) {
2762 /* error message already printed */ 2760 /* error message already printed */
@@ -2779,8 +2777,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
2779 2777
2780 mac_wol_reset(regs); 2778 mac_wol_reset(regs);
2781 2779
2782 dev->base_addr = vptr->ioaddr;
2783
2784 for (i = 0; i < 6; i++) 2780 for (i = 0; i < 6; i++)
2785 dev->dev_addr[i] = readb(&regs->PAR[i]); 2781 dev->dev_addr[i] = readb(&regs->PAR[i]);
2786 2782
@@ -2806,7 +2802,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
2806 2802
2807 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs); 2803 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs);
2808 2804
2809 dev->irq = pdev->irq;
2810 dev->netdev_ops = &velocity_netdev_ops; 2805 dev->netdev_ops = &velocity_netdev_ops;
2811 dev->ethtool_ops = &velocity_ethtool_ops; 2806 dev->ethtool_ops = &velocity_ethtool_ops;
2812 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT); 2807 netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
diff --git a/drivers/net/ethernet/wiznet/Kconfig b/drivers/net/ethernet/wiznet/Kconfig
new file mode 100644
index 000000000000..cb18043f5830
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/Kconfig
@@ -0,0 +1,73 @@
1#
2# WIZnet devices configuration
3#
4
5config NET_VENDOR_WIZNET
6 bool "WIZnet devices"
7 default y
8 ---help---
9 If you have a network (Ethernet) card belonging to this class, say Y
10 and read the Ethernet-HOWTO, available from
11 <http://www.tldp.org/docs.html#howto>.
12
13 Note that the answer to this question doesn't directly affect the
14 kernel: saying N will just cause the configurator to skip all
15 the questions about WIZnet devices. If you say Y, you will be asked
16 for your specific card in the following questions.
17
18if NET_VENDOR_WIZNET
19
20config WIZNET_W5100
21 tristate "WIZnet W5100 Ethernet support"
22 depends on HAS_IOMEM
23 ---help---
24 Support for WIZnet W5100 chips.
25
26 W5100 is a single chip with integrated 10/100 Ethernet MAC,
27 PHY and hardware TCP/IP stack, but this driver is limited to
28 the MAC and PHY functions only, onchip TCP/IP is unused.
29
30 To compile this driver as a module, choose M here: the module
31 will be called w5100.
32
33config WIZNET_W5300
34 tristate "WIZnet W5300 Ethernet support"
35 depends on HAS_IOMEM
36 ---help---
37 Support for WIZnet W5300 chips.
38
39 W5300 is a single chip with integrated 10/100 Ethernet MAC,
40 PHY and hardware TCP/IP stack, but this driver is limited to
41 the MAC and PHY functions only, onchip TCP/IP is unused.
42
43 To compile this driver as a module, choose M here: the module
44 will be called w5300.
45
46choice
47 prompt "WIZnet interface mode"
48 depends on WIZNET_W5100 || WIZNET_W5300
49 default WIZNET_BUS_ANY
50
51config WIZNET_BUS_DIRECT
52 bool "Direct address bus mode"
53 ---help---
54 In direct address mode host system can directly access all registers
55 after mapping to Memory-Mapped I/O space.
56
57config WIZNET_BUS_INDIRECT
58 bool "Indirect address bus mode"
59 ---help---
60 In indirect address mode host system indirectly accesses registers
61 using Indirect Mode Address Register and Indirect Mode Data Register,
62 which are directly mapped to Memory-Mapped I/O space.
63
64config WIZNET_BUS_ANY
65 bool "Select interface mode in runtime"
66 ---help---
67 If interface mode is unknown in compile time, it can be selected
68 in runtime from board/platform resources configuration.
69
70 Performance may decrease compared to explicitly selected bus mode.
71endchoice
72
73endif # NET_VENDOR_WIZNET
diff --git a/drivers/net/ethernet/wiznet/Makefile b/drivers/net/ethernet/wiznet/Makefile
new file mode 100644
index 000000000000..c614535227e8
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_WIZNET_W5100) += w5100.o
2obj-$(CONFIG_WIZNET_W5300) += w5300.o
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
new file mode 100644
index 000000000000..a75e9ef5a4ce
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -0,0 +1,808 @@
1/*
2 * Ethernet driver for the WIZnet W5100 chip.
3 *
4 * Copyright (C) 2006-2008 WIZnet Co.,Ltd.
5 * Copyright (C) 2012 Mike Sinkovsky <msink@permonline.ru>
6 *
7 * Licensed under the GPL-2 or later.
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/kconfig.h>
13#include <linux/netdevice.h>
14#include <linux/etherdevice.h>
15#include <linux/platform_device.h>
16#include <linux/platform_data/wiznet.h>
17#include <linux/ethtool.h>
18#include <linux/skbuff.h>
19#include <linux/types.h>
20#include <linux/errno.h>
21#include <linux/delay.h>
22#include <linux/slab.h>
23#include <linux/spinlock.h>
24#include <linux/io.h>
25#include <linux/ioport.h>
26#include <linux/interrupt.h>
27#include <linux/irq.h>
28#include <linux/gpio.h>
29
30#define DRV_NAME "w5100"
31#define DRV_VERSION "2012-04-04"
32
33MODULE_DESCRIPTION("WIZnet W5100 Ethernet driver v"DRV_VERSION);
34MODULE_AUTHOR("Mike Sinkovsky <msink@permonline.ru>");
35MODULE_ALIAS("platform:"DRV_NAME);
36MODULE_LICENSE("GPL");
37
38/*
39 * Registers
40 */
41#define W5100_COMMON_REGS 0x0000
42#define W5100_MR 0x0000 /* Mode Register */
43#define MR_RST 0x80 /* S/W reset */
44#define MR_PB 0x10 /* Ping block */
45#define MR_AI 0x02 /* Address Auto-Increment */
46#define MR_IND 0x01 /* Indirect mode */
47#define W5100_SHAR 0x0009 /* Source MAC address */
48#define W5100_IR 0x0015 /* Interrupt Register */
49#define W5100_IMR 0x0016 /* Interrupt Mask Register */
50#define IR_S0 0x01 /* S0 interrupt */
51#define W5100_RTR 0x0017 /* Retry Time-value Register */
52#define RTR_DEFAULT 2000 /* =0x07d0 (2000) */
53#define W5100_RMSR 0x001a /* Receive Memory Size */
54#define W5100_TMSR 0x001b /* Transmit Memory Size */
55#define W5100_COMMON_REGS_LEN 0x0040
56
57#define W5100_S0_REGS 0x0400
58#define W5100_S0_MR 0x0400 /* S0 Mode Register */
59#define S0_MR_MACRAW 0x04 /* MAC RAW mode (promiscous) */
60#define S0_MR_MACRAW_MF 0x44 /* MAC RAW mode (filtered) */
61#define W5100_S0_CR 0x0401 /* S0 Command Register */
62#define S0_CR_OPEN 0x01 /* OPEN command */
63#define S0_CR_CLOSE 0x10 /* CLOSE command */
64#define S0_CR_SEND 0x20 /* SEND command */
65#define S0_CR_RECV 0x40 /* RECV command */
66#define W5100_S0_IR 0x0402 /* S0 Interrupt Register */
67#define S0_IR_SENDOK 0x10 /* complete sending */
68#define S0_IR_RECV 0x04 /* receiving data */
69#define W5100_S0_SR 0x0403 /* S0 Status Register */
70#define S0_SR_MACRAW 0x42 /* mac raw mode */
71#define W5100_S0_TX_FSR 0x0420 /* S0 Transmit free memory size */
72#define W5100_S0_TX_RD 0x0422 /* S0 Transmit memory read pointer */
73#define W5100_S0_TX_WR 0x0424 /* S0 Transmit memory write pointer */
74#define W5100_S0_RX_RSR 0x0426 /* S0 Receive free memory size */
75#define W5100_S0_RX_RD 0x0428 /* S0 Receive memory read pointer */
76#define W5100_S0_REGS_LEN 0x0040
77
78#define W5100_TX_MEM_START 0x4000
79#define W5100_TX_MEM_END 0x5fff
80#define W5100_TX_MEM_MASK 0x1fff
81#define W5100_RX_MEM_START 0x6000
82#define W5100_RX_MEM_END 0x7fff
83#define W5100_RX_MEM_MASK 0x1fff
84
85/*
86 * Device driver private data structure
87 */
88struct w5100_priv {
89 void __iomem *base;
90 spinlock_t reg_lock;
91 bool indirect;
92 u8 (*read)(struct w5100_priv *priv, u16 addr);
93 void (*write)(struct w5100_priv *priv, u16 addr, u8 data);
94 u16 (*read16)(struct w5100_priv *priv, u16 addr);
95 void (*write16)(struct w5100_priv *priv, u16 addr, u16 data);
96 void (*readbuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len);
97 void (*writebuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len);
98 int irq;
99 int link_irq;
100 int link_gpio;
101
102 struct napi_struct napi;
103 struct net_device *ndev;
104 bool promisc;
105 u32 msg_enable;
106};
107
108/************************************************************************
109 *
110 * Lowlevel I/O functions
111 *
112 ***********************************************************************/
113
114/*
115 * In direct address mode host system can directly access W5100 registers
116 * after mapping to Memory-Mapped I/O space.
117 *
118 * 0x8000 bytes are required for memory space.
119 */
120static inline u8 w5100_read_direct(struct w5100_priv *priv, u16 addr)
121{
122 return ioread8(priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
123}
124
125static inline void w5100_write_direct(struct w5100_priv *priv,
126 u16 addr, u8 data)
127{
128 iowrite8(data, priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
129}
130
131static u16 w5100_read16_direct(struct w5100_priv *priv, u16 addr)
132{
133 u16 data;
134 data = w5100_read_direct(priv, addr) << 8;
135 data |= w5100_read_direct(priv, addr + 1);
136 return data;
137}
138
139static void w5100_write16_direct(struct w5100_priv *priv, u16 addr, u16 data)
140{
141 w5100_write_direct(priv, addr, data >> 8);
142 w5100_write_direct(priv, addr + 1, data);
143}
144
145static void w5100_readbuf_direct(struct w5100_priv *priv,
146 u16 offset, u8 *buf, int len)
147{
148 u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK);
149 int i;
150
151 for (i = 0; i < len; i++, addr++) {
152 if (unlikely(addr > W5100_RX_MEM_END))
153 addr = W5100_RX_MEM_START;
154 *buf++ = w5100_read_direct(priv, addr);
155 }
156}
157
158static void w5100_writebuf_direct(struct w5100_priv *priv,
159 u16 offset, u8 *buf, int len)
160{
161 u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK);
162 int i;
163
164 for (i = 0; i < len; i++, addr++) {
165 if (unlikely(addr > W5100_TX_MEM_END))
166 addr = W5100_TX_MEM_START;
167 w5100_write_direct(priv, addr, *buf++);
168 }
169}
170
171/*
172 * In indirect address mode host system indirectly accesses registers by
173 * using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data
174 * Register (IDM_DR), which are directly mapped to Memory-Mapped I/O space.
175 * Mode Register (MR) is directly accessible.
176 *
177 * Only 0x04 bytes are required for memory space.
178 */
179#define W5100_IDM_AR 0x01 /* Indirect Mode Address Register */
180#define W5100_IDM_DR 0x03 /* Indirect Mode Data Register */
181
182static u8 w5100_read_indirect(struct w5100_priv *priv, u16 addr)
183{
184 unsigned long flags;
185 u8 data;
186
187 spin_lock_irqsave(&priv->reg_lock, flags);
188 w5100_write16_direct(priv, W5100_IDM_AR, addr);
189 mmiowb();
190 data = w5100_read_direct(priv, W5100_IDM_DR);
191 spin_unlock_irqrestore(&priv->reg_lock, flags);
192
193 return data;
194}
195
196static void w5100_write_indirect(struct w5100_priv *priv, u16 addr, u8 data)
197{
198 unsigned long flags;
199
200 spin_lock_irqsave(&priv->reg_lock, flags);
201 w5100_write16_direct(priv, W5100_IDM_AR, addr);
202 mmiowb();
203 w5100_write_direct(priv, W5100_IDM_DR, data);
204 mmiowb();
205 spin_unlock_irqrestore(&priv->reg_lock, flags);
206}
207
208static u16 w5100_read16_indirect(struct w5100_priv *priv, u16 addr)
209{
210 unsigned long flags;
211 u16 data;
212
213 spin_lock_irqsave(&priv->reg_lock, flags);
214 w5100_write16_direct(priv, W5100_IDM_AR, addr);
215 mmiowb();
216 data = w5100_read_direct(priv, W5100_IDM_DR) << 8;
217 data |= w5100_read_direct(priv, W5100_IDM_DR);
218 spin_unlock_irqrestore(&priv->reg_lock, flags);
219
220 return data;
221}
222
223static void w5100_write16_indirect(struct w5100_priv *priv, u16 addr, u16 data)
224{
225 unsigned long flags;
226
227 spin_lock_irqsave(&priv->reg_lock, flags);
228 w5100_write16_direct(priv, W5100_IDM_AR, addr);
229 mmiowb();
230 w5100_write_direct(priv, W5100_IDM_DR, data >> 8);
231 w5100_write_direct(priv, W5100_IDM_DR, data);
232 mmiowb();
233 spin_unlock_irqrestore(&priv->reg_lock, flags);
234}
235
236static void w5100_readbuf_indirect(struct w5100_priv *priv,
237 u16 offset, u8 *buf, int len)
238{
239 u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK);
240 unsigned long flags;
241 int i;
242
243 spin_lock_irqsave(&priv->reg_lock, flags);
244 w5100_write16_direct(priv, W5100_IDM_AR, addr);
245 mmiowb();
246
247 for (i = 0; i < len; i++, addr++) {
248 if (unlikely(addr > W5100_RX_MEM_END)) {
249 addr = W5100_RX_MEM_START;
250 w5100_write16_direct(priv, W5100_IDM_AR, addr);
251 mmiowb();
252 }
253 *buf++ = w5100_read_direct(priv, W5100_IDM_DR);
254 }
255 mmiowb();
256 spin_unlock_irqrestore(&priv->reg_lock, flags);
257}
258
259static void w5100_writebuf_indirect(struct w5100_priv *priv,
260 u16 offset, u8 *buf, int len)
261{
262 u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK);
263 unsigned long flags;
264 int i;
265
266 spin_lock_irqsave(&priv->reg_lock, flags);
267 w5100_write16_direct(priv, W5100_IDM_AR, addr);
268 mmiowb();
269
270 for (i = 0; i < len; i++, addr++) {
271 if (unlikely(addr > W5100_TX_MEM_END)) {
272 addr = W5100_TX_MEM_START;
273 w5100_write16_direct(priv, W5100_IDM_AR, addr);
274 mmiowb();
275 }
276 w5100_write_direct(priv, W5100_IDM_DR, *buf++);
277 }
278 mmiowb();
279 spin_unlock_irqrestore(&priv->reg_lock, flags);
280}
281
282#if defined(CONFIG_WIZNET_BUS_DIRECT)
283#define w5100_read w5100_read_direct
284#define w5100_write w5100_write_direct
285#define w5100_read16 w5100_read16_direct
286#define w5100_write16 w5100_write16_direct
287#define w5100_readbuf w5100_readbuf_direct
288#define w5100_writebuf w5100_writebuf_direct
289
290#elif defined(CONFIG_WIZNET_BUS_INDIRECT)
291#define w5100_read w5100_read_indirect
292#define w5100_write w5100_write_indirect
293#define w5100_read16 w5100_read16_indirect
294#define w5100_write16 w5100_write16_indirect
295#define w5100_readbuf w5100_readbuf_indirect
296#define w5100_writebuf w5100_writebuf_indirect
297
298#else /* CONFIG_WIZNET_BUS_ANY */
299#define w5100_read priv->read
300#define w5100_write priv->write
301#define w5100_read16 priv->read16
302#define w5100_write16 priv->write16
303#define w5100_readbuf priv->readbuf
304#define w5100_writebuf priv->writebuf
305#endif
306
307static int w5100_command(struct w5100_priv *priv, u16 cmd)
308{
309 unsigned long timeout = jiffies + msecs_to_jiffies(100);
310
311 w5100_write(priv, W5100_S0_CR, cmd);
312 mmiowb();
313
314 while (w5100_read(priv, W5100_S0_CR) != 0) {
315 if (time_after(jiffies, timeout))
316 return -EIO;
317 cpu_relax();
318 }
319
320 return 0;
321}
322
323static void w5100_write_macaddr(struct w5100_priv *priv)
324{
325 struct net_device *ndev = priv->ndev;
326 int i;
327
328 for (i = 0; i < ETH_ALEN; i++)
329 w5100_write(priv, W5100_SHAR + i, ndev->dev_addr[i]);
330 mmiowb();
331}
332
333static void w5100_hw_reset(struct w5100_priv *priv)
334{
335 w5100_write_direct(priv, W5100_MR, MR_RST);
336 mmiowb();
337 mdelay(5);
338 w5100_write_direct(priv, W5100_MR, priv->indirect ?
339 MR_PB | MR_AI | MR_IND :
340 MR_PB);
341 mmiowb();
342 w5100_write(priv, W5100_IMR, 0);
343 w5100_write_macaddr(priv);
344
345 /* Configure 16K of internal memory
346 * as 8K RX buffer and 8K TX buffer
347 */
348 w5100_write(priv, W5100_RMSR, 0x03);
349 w5100_write(priv, W5100_TMSR, 0x03);
350 mmiowb();
351}
352
353static void w5100_hw_start(struct w5100_priv *priv)
354{
355 w5100_write(priv, W5100_S0_MR, priv->promisc ?
356 S0_MR_MACRAW : S0_MR_MACRAW_MF);
357 mmiowb();
358 w5100_command(priv, S0_CR_OPEN);
359 w5100_write(priv, W5100_IMR, IR_S0);
360 mmiowb();
361}
362
363static void w5100_hw_close(struct w5100_priv *priv)
364{
365 w5100_write(priv, W5100_IMR, 0);
366 mmiowb();
367 w5100_command(priv, S0_CR_CLOSE);
368}
369
370/***********************************************************************
371 *
372 * Device driver functions / callbacks
373 *
374 ***********************************************************************/
375
376static void w5100_get_drvinfo(struct net_device *ndev,
377 struct ethtool_drvinfo *info)
378{
379 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
380 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
381 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
382 sizeof(info->bus_info));
383}
384
385static u32 w5100_get_link(struct net_device *ndev)
386{
387 struct w5100_priv *priv = netdev_priv(ndev);
388
389 if (gpio_is_valid(priv->link_gpio))
390 return !!gpio_get_value(priv->link_gpio);
391
392 return 1;
393}
394
395static u32 w5100_get_msglevel(struct net_device *ndev)
396{
397 struct w5100_priv *priv = netdev_priv(ndev);
398
399 return priv->msg_enable;
400}
401
402static void w5100_set_msglevel(struct net_device *ndev, u32 value)
403{
404 struct w5100_priv *priv = netdev_priv(ndev);
405
406 priv->msg_enable = value;
407}
408
409static int w5100_get_regs_len(struct net_device *ndev)
410{
411 return W5100_COMMON_REGS_LEN + W5100_S0_REGS_LEN;
412}
413
414static void w5100_get_regs(struct net_device *ndev,
415 struct ethtool_regs *regs, void *_buf)
416{
417 struct w5100_priv *priv = netdev_priv(ndev);
418 u8 *buf = _buf;
419 u16 i;
420
421 regs->version = 1;
422 for (i = 0; i < W5100_COMMON_REGS_LEN; i++)
423 *buf++ = w5100_read(priv, W5100_COMMON_REGS + i);
424 for (i = 0; i < W5100_S0_REGS_LEN; i++)
425 *buf++ = w5100_read(priv, W5100_S0_REGS + i);
426}
427
428static void w5100_tx_timeout(struct net_device *ndev)
429{
430 struct w5100_priv *priv = netdev_priv(ndev);
431
432 netif_stop_queue(ndev);
433 w5100_hw_reset(priv);
434 w5100_hw_start(priv);
435 ndev->stats.tx_errors++;
436 ndev->trans_start = jiffies;
437 netif_wake_queue(ndev);
438}
439
440static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
441{
442 struct w5100_priv *priv = netdev_priv(ndev);
443 u16 offset;
444
445 netif_stop_queue(ndev);
446
447 offset = w5100_read16(priv, W5100_S0_TX_WR);
448 w5100_writebuf(priv, offset, skb->data, skb->len);
449 w5100_write16(priv, W5100_S0_TX_WR, offset + skb->len);
450 mmiowb();
451 ndev->stats.tx_bytes += skb->len;
452 ndev->stats.tx_packets++;
453 dev_kfree_skb(skb);
454
455 w5100_command(priv, S0_CR_SEND);
456
457 return NETDEV_TX_OK;
458}
459
460static int w5100_napi_poll(struct napi_struct *napi, int budget)
461{
462 struct w5100_priv *priv = container_of(napi, struct w5100_priv, napi);
463 struct net_device *ndev = priv->ndev;
464 struct sk_buff *skb;
465 int rx_count;
466 u16 rx_len;
467 u16 offset;
468 u8 header[2];
469
470 for (rx_count = 0; rx_count < budget; rx_count++) {
471 u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR);
472 if (rx_buf_len == 0)
473 break;
474
475 offset = w5100_read16(priv, W5100_S0_RX_RD);
476 w5100_readbuf(priv, offset, header, 2);
477 rx_len = get_unaligned_be16(header) - 2;
478
479 skb = netdev_alloc_skb_ip_align(ndev, rx_len);
480 if (unlikely(!skb)) {
481 w5100_write16(priv, W5100_S0_RX_RD,
482 offset + rx_buf_len);
483 w5100_command(priv, S0_CR_RECV);
484 ndev->stats.rx_dropped++;
485 return -ENOMEM;
486 }
487
488 skb_put(skb, rx_len);
489 w5100_readbuf(priv, offset + 2, skb->data, rx_len);
490 w5100_write16(priv, W5100_S0_RX_RD, offset + 2 + rx_len);
491 mmiowb();
492 w5100_command(priv, S0_CR_RECV);
493 skb->protocol = eth_type_trans(skb, ndev);
494
495 netif_receive_skb(skb);
496 ndev->stats.rx_packets++;
497 ndev->stats.rx_bytes += rx_len;
498 }
499
500 if (rx_count < budget) {
501 w5100_write(priv, W5100_IMR, IR_S0);
502 mmiowb();
503 napi_complete(napi);
504 }
505
506 return rx_count;
507}
508
509static irqreturn_t w5100_interrupt(int irq, void *ndev_instance)
510{
511 struct net_device *ndev = ndev_instance;
512 struct w5100_priv *priv = netdev_priv(ndev);
513
514 int ir = w5100_read(priv, W5100_S0_IR);
515 if (!ir)
516 return IRQ_NONE;
517 w5100_write(priv, W5100_S0_IR, ir);
518 mmiowb();
519
520 if (ir & S0_IR_SENDOK) {
521 netif_dbg(priv, tx_done, ndev, "tx done\n");
522 netif_wake_queue(ndev);
523 }
524
525 if (ir & S0_IR_RECV) {
526 if (napi_schedule_prep(&priv->napi)) {
527 w5100_write(priv, W5100_IMR, 0);
528 mmiowb();
529 __napi_schedule(&priv->napi);
530 }
531 }
532
533 return IRQ_HANDLED;
534}
535
536static irqreturn_t w5100_detect_link(int irq, void *ndev_instance)
537{
538 struct net_device *ndev = ndev_instance;
539 struct w5100_priv *priv = netdev_priv(ndev);
540
541 if (netif_running(ndev)) {
542 if (gpio_get_value(priv->link_gpio) != 0) {
543 netif_info(priv, link, ndev, "link is up\n");
544 netif_carrier_on(ndev);
545 } else {
546 netif_info(priv, link, ndev, "link is down\n");
547 netif_carrier_off(ndev);
548 }
549 }
550
551 return IRQ_HANDLED;
552}
553
554static void w5100_set_rx_mode(struct net_device *ndev)
555{
556 struct w5100_priv *priv = netdev_priv(ndev);
557 bool set_promisc = (ndev->flags & IFF_PROMISC) != 0;
558
559 if (priv->promisc != set_promisc) {
560 priv->promisc = set_promisc;
561 w5100_hw_start(priv);
562 }
563}
564
565static int w5100_set_macaddr(struct net_device *ndev, void *addr)
566{
567 struct w5100_priv *priv = netdev_priv(ndev);
568 struct sockaddr *sock_addr = addr;
569
570 if (!is_valid_ether_addr(sock_addr->sa_data))
571 return -EADDRNOTAVAIL;
572 memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
573 ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
574 w5100_write_macaddr(priv);
575 return 0;
576}
577
578static int w5100_open(struct net_device *ndev)
579{
580 struct w5100_priv *priv = netdev_priv(ndev);
581
582 netif_info(priv, ifup, ndev, "enabling\n");
583 if (!is_valid_ether_addr(ndev->dev_addr))
584 return -EINVAL;
585 w5100_hw_start(priv);
586 napi_enable(&priv->napi);
587 netif_start_queue(ndev);
588 if (!gpio_is_valid(priv->link_gpio) ||
589 gpio_get_value(priv->link_gpio) != 0)
590 netif_carrier_on(ndev);
591 return 0;
592}
593
594static int w5100_stop(struct net_device *ndev)
595{
596 struct w5100_priv *priv = netdev_priv(ndev);
597
598 netif_info(priv, ifdown, ndev, "shutting down\n");
599 w5100_hw_close(priv);
600 netif_carrier_off(ndev);
601 netif_stop_queue(ndev);
602 napi_disable(&priv->napi);
603 return 0;
604}
605
606static const struct ethtool_ops w5100_ethtool_ops = {
607 .get_drvinfo = w5100_get_drvinfo,
608 .get_msglevel = w5100_get_msglevel,
609 .set_msglevel = w5100_set_msglevel,
610 .get_link = w5100_get_link,
611 .get_regs_len = w5100_get_regs_len,
612 .get_regs = w5100_get_regs,
613};
614
615static const struct net_device_ops w5100_netdev_ops = {
616 .ndo_open = w5100_open,
617 .ndo_stop = w5100_stop,
618 .ndo_start_xmit = w5100_start_tx,
619 .ndo_tx_timeout = w5100_tx_timeout,
620 .ndo_set_rx_mode = w5100_set_rx_mode,
621 .ndo_set_mac_address = w5100_set_macaddr,
622 .ndo_validate_addr = eth_validate_addr,
623 .ndo_change_mtu = eth_change_mtu,
624};
625
626static int __devinit w5100_hw_probe(struct platform_device *pdev)
627{
628 struct wiznet_platform_data *data = pdev->dev.platform_data;
629 struct net_device *ndev = platform_get_drvdata(pdev);
630 struct w5100_priv *priv = netdev_priv(ndev);
631 const char *name = netdev_name(ndev);
632 struct resource *mem;
633 int mem_size;
634 int irq;
635 int ret;
636
637 if (data && is_valid_ether_addr(data->mac_addr)) {
638 memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
639 } else {
640 random_ether_addr(ndev->dev_addr);
641 ndev->addr_assign_type |= NET_ADDR_RANDOM;
642 }
643
644 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
645 if (!mem)
646 return -ENXIO;
647 mem_size = resource_size(mem);
648 if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name))
649 return -EBUSY;
650 priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
651 if (!priv->base)
652 return -EBUSY;
653
654 spin_lock_init(&priv->reg_lock);
655 priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE;
656 if (priv->indirect) {
657 priv->read = w5100_read_indirect;
658 priv->write = w5100_write_indirect;
659 priv->read16 = w5100_read16_indirect;
660 priv->write16 = w5100_write16_indirect;
661 priv->readbuf = w5100_readbuf_indirect;
662 priv->writebuf = w5100_writebuf_indirect;
663 } else {
664 priv->read = w5100_read_direct;
665 priv->write = w5100_write_direct;
666 priv->read16 = w5100_read16_direct;
667 priv->write16 = w5100_write16_direct;
668 priv->readbuf = w5100_readbuf_direct;
669 priv->writebuf = w5100_writebuf_direct;
670 }
671
672 w5100_hw_reset(priv);
673 if (w5100_read16(priv, W5100_RTR) != RTR_DEFAULT)
674 return -ENODEV;
675
676 irq = platform_get_irq(pdev, 0);
677 if (irq < 0)
678 return irq;
679 ret = request_irq(irq, w5100_interrupt,
680 IRQ_TYPE_LEVEL_LOW, name, ndev);
681 if (ret < 0)
682 return ret;
683 priv->irq = irq;
684
685 priv->link_gpio = data ? data->link_gpio : -EINVAL;
686 if (gpio_is_valid(priv->link_gpio)) {
687 char *link_name = devm_kzalloc(&pdev->dev, 16, GFP_KERNEL);
688 if (!link_name)
689 return -ENOMEM;
690 snprintf(link_name, 16, "%s-link", name);
691 priv->link_irq = gpio_to_irq(priv->link_gpio);
692 if (request_any_context_irq(priv->link_irq, w5100_detect_link,
693 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
694 link_name, priv->ndev) < 0)
695 priv->link_gpio = -EINVAL;
696 }
697
698 netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, irq);
699 return 0;
700}
701
702static int __devinit w5100_probe(struct platform_device *pdev)
703{
704 struct w5100_priv *priv;
705 struct net_device *ndev;
706 int err;
707
708 ndev = alloc_etherdev(sizeof(*priv));
709 if (!ndev)
710 return -ENOMEM;
711 SET_NETDEV_DEV(ndev, &pdev->dev);
712 platform_set_drvdata(pdev, ndev);
713 priv = netdev_priv(ndev);
714 priv->ndev = ndev;
715
716 ether_setup(ndev);
717 ndev->netdev_ops = &w5100_netdev_ops;
718 ndev->ethtool_ops = &w5100_ethtool_ops;
719 ndev->watchdog_timeo = HZ;
720 netif_napi_add(ndev, &priv->napi, w5100_napi_poll, 16);
721
722 /* This chip doesn't support VLAN packets with normal MTU,
723 * so disable VLAN for this device.
724 */
725 ndev->features |= NETIF_F_VLAN_CHALLENGED;
726
727 err = register_netdev(ndev);
728 if (err < 0)
729 goto err_register;
730
731 err = w5100_hw_probe(pdev);
732 if (err < 0)
733 goto err_hw_probe;
734
735 return 0;
736
737err_hw_probe:
738 unregister_netdev(ndev);
739err_register:
740 free_netdev(ndev);
741 platform_set_drvdata(pdev, NULL);
742 return err;
743}
744
745static int __devexit w5100_remove(struct platform_device *pdev)
746{
747 struct net_device *ndev = platform_get_drvdata(pdev);
748 struct w5100_priv *priv = netdev_priv(ndev);
749
750 w5100_hw_reset(priv);
751 free_irq(priv->irq, ndev);
752 if (gpio_is_valid(priv->link_gpio))
753 free_irq(priv->link_irq, ndev);
754
755 unregister_netdev(ndev);
756 free_netdev(ndev);
757 platform_set_drvdata(pdev, NULL);
758 return 0;
759}
760
761#ifdef CONFIG_PM
762static int w5100_suspend(struct device *dev)
763{
764 struct platform_device *pdev = to_platform_device(dev);
765 struct net_device *ndev = platform_get_drvdata(pdev);
766 struct w5100_priv *priv = netdev_priv(ndev);
767
768 if (netif_running(ndev)) {
769 netif_carrier_off(ndev);
770 netif_device_detach(ndev);
771
772 w5100_hw_close(priv);
773 }
774 return 0;
775}
776
777static int w5100_resume(struct device *dev)
778{
779 struct platform_device *pdev = to_platform_device(dev);
780 struct net_device *ndev = platform_get_drvdata(pdev);
781 struct w5100_priv *priv = netdev_priv(ndev);
782
783 if (netif_running(ndev)) {
784 w5100_hw_reset(priv);
785 w5100_hw_start(priv);
786
787 netif_device_attach(ndev);
788 if (!gpio_is_valid(priv->link_gpio) ||
789 gpio_get_value(priv->link_gpio) != 0)
790 netif_carrier_on(ndev);
791 }
792 return 0;
793}
794#endif /* CONFIG_PM */
795
796static SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
797
798static struct platform_driver w5100_driver = {
799 .driver = {
800 .name = DRV_NAME,
801 .owner = THIS_MODULE,
802 .pm = &w5100_pm_ops,
803 },
804 .probe = w5100_probe,
805 .remove = __devexit_p(w5100_remove),
806};
807
808module_platform_driver(w5100_driver);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
new file mode 100644
index 000000000000..3306a20ec211
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -0,0 +1,720 @@
1/*
2 * Ethernet driver for the WIZnet W5300 chip.
3 *
4 * Copyright (C) 2008-2009 WIZnet Co.,Ltd.
5 * Copyright (C) 2011 Taehun Kim <kth3321 <at> gmail.com>
6 * Copyright (C) 2012 Mike Sinkovsky <msink@permonline.ru>
7 *
8 * Licensed under the GPL-2 or later.
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/kconfig.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/platform_device.h>
17#include <linux/platform_data/wiznet.h>
18#include <linux/ethtool.h>
19#include <linux/skbuff.h>
20#include <linux/types.h>
21#include <linux/errno.h>
22#include <linux/delay.h>
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25#include <linux/io.h>
26#include <linux/ioport.h>
27#include <linux/interrupt.h>
28#include <linux/irq.h>
29#include <linux/gpio.h>
30
31#define DRV_NAME "w5300"
32#define DRV_VERSION "2012-04-04"
33
34MODULE_DESCRIPTION("WIZnet W5300 Ethernet driver v"DRV_VERSION);
35MODULE_AUTHOR("Mike Sinkovsky <msink@permonline.ru>");
36MODULE_ALIAS("platform:"DRV_NAME);
37MODULE_LICENSE("GPL");
38
39/*
40 * Registers
41 */
42#define W5300_MR 0x0000 /* Mode Register */
43#define MR_DBW (1 << 15) /* Data bus width */
44#define MR_MPF (1 << 14) /* Mac layer pause frame */
45#define MR_WDF(n) (((n)&7)<<11) /* Write data fetch time */
46#define MR_RDH (1 << 10) /* Read data hold time */
47#define MR_FS (1 << 8) /* FIFO swap */
48#define MR_RST (1 << 7) /* S/W reset */
49#define MR_PB (1 << 4) /* Ping block */
50#define MR_DBS (1 << 2) /* Data bus swap */
51#define MR_IND (1 << 0) /* Indirect mode */
52#define W5300_IR 0x0002 /* Interrupt Register */
53#define W5300_IMR 0x0004 /* Interrupt Mask Register */
54#define IR_S0 0x0001 /* S0 interrupt */
55#define W5300_SHARL 0x0008 /* Source MAC address (0123) */
56#define W5300_SHARH 0x000c /* Source MAC address (45) */
57#define W5300_TMSRL 0x0020 /* Transmit Memory Size (0123) */
58#define W5300_TMSRH 0x0024 /* Transmit Memory Size (4567) */
59#define W5300_RMSRL 0x0028 /* Receive Memory Size (0123) */
60#define W5300_RMSRH 0x002c /* Receive Memory Size (4567) */
61#define W5300_MTYPE 0x0030 /* Memory Type */
62#define W5300_IDR 0x00fe /* Chip ID register */
63#define IDR_W5300 0x5300 /* =0x5300 for WIZnet W5300 */
64#define W5300_S0_MR 0x0200 /* S0 Mode Register */
65#define S0_MR_CLOSED 0x0000 /* Close mode */
66#define S0_MR_MACRAW 0x0004 /* MAC RAW mode (promiscous) */
67#define S0_MR_MACRAW_MF 0x0044 /* MAC RAW mode (filtered) */
68#define W5300_S0_CR 0x0202 /* S0 Command Register */
69#define S0_CR_OPEN 0x0001 /* OPEN command */
70#define S0_CR_CLOSE 0x0010 /* CLOSE command */
71#define S0_CR_SEND 0x0020 /* SEND command */
72#define S0_CR_RECV 0x0040 /* RECV command */
73#define W5300_S0_IMR 0x0204 /* S0 Interrupt Mask Register */
74#define W5300_S0_IR 0x0206 /* S0 Interrupt Register */
75#define S0_IR_RECV 0x0004 /* Receive interrupt */
76#define S0_IR_SENDOK 0x0010 /* Send OK interrupt */
77#define W5300_S0_SSR 0x0208 /* S0 Socket Status Register */
78#define W5300_S0_TX_WRSR 0x0220 /* S0 TX Write Size Register */
79#define W5300_S0_TX_FSR 0x0224 /* S0 TX Free Size Register */
80#define W5300_S0_RX_RSR 0x0228 /* S0 Received data Size */
81#define W5300_S0_TX_FIFO 0x022e /* S0 Transmit FIFO */
82#define W5300_S0_RX_FIFO 0x0230 /* S0 Receive FIFO */
83#define W5300_REGS_LEN 0x0400
84
85/*
86 * Device driver private data structure
87 */
88struct w5300_priv {
89 void __iomem *base;
90 spinlock_t reg_lock;
91 bool indirect;
92 u16 (*read) (struct w5300_priv *priv, u16 addr);
93 void (*write)(struct w5300_priv *priv, u16 addr, u16 data);
94 int irq;
95 int link_irq;
96 int link_gpio;
97
98 struct napi_struct napi;
99 struct net_device *ndev;
100 bool promisc;
101 u32 msg_enable;
102};
103
104/************************************************************************
105 *
106 * Lowlevel I/O functions
107 *
108 ***********************************************************************/
109
110/*
111 * In direct address mode host system can directly access W5300 registers
112 * after mapping to Memory-Mapped I/O space.
113 *
114 * 0x400 bytes are required for memory space.
115 */
116static inline u16 w5300_read_direct(struct w5300_priv *priv, u16 addr)
117{
118 return ioread16(priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
119}
120
121static inline void w5300_write_direct(struct w5300_priv *priv,
122 u16 addr, u16 data)
123{
124 iowrite16(data, priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
125}
126
127/*
128 * In indirect address mode host system indirectly accesses registers by
129 * using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data
130 * Register (IDM_DR), which are directly mapped to Memory-Mapped I/O space.
131 * Mode Register (MR) is directly accessible.
132 *
133 * Only 0x06 bytes are required for memory space.
134 */
135#define W5300_IDM_AR 0x0002 /* Indirect Mode Address */
136#define W5300_IDM_DR 0x0004 /* Indirect Mode Data */
137
138static u16 w5300_read_indirect(struct w5300_priv *priv, u16 addr)
139{
140 unsigned long flags;
141 u16 data;
142
143 spin_lock_irqsave(&priv->reg_lock, flags);
144 w5300_write_direct(priv, W5300_IDM_AR, addr);
145 mmiowb();
146 data = w5300_read_direct(priv, W5300_IDM_DR);
147 spin_unlock_irqrestore(&priv->reg_lock, flags);
148
149 return data;
150}
151
152static void w5300_write_indirect(struct w5300_priv *priv, u16 addr, u16 data)
153{
154 unsigned long flags;
155
156 spin_lock_irqsave(&priv->reg_lock, flags);
157 w5300_write_direct(priv, W5300_IDM_AR, addr);
158 mmiowb();
159 w5300_write_direct(priv, W5300_IDM_DR, data);
160 mmiowb();
161 spin_unlock_irqrestore(&priv->reg_lock, flags);
162}
163
164#if defined(CONFIG_WIZNET_BUS_DIRECT)
165#define w5300_read w5300_read_direct
166#define w5300_write w5300_write_direct
167
168#elif defined(CONFIG_WIZNET_BUS_INDIRECT)
169#define w5300_read w5300_read_indirect
170#define w5300_write w5300_write_indirect
171
172#else /* CONFIG_WIZNET_BUS_ANY */
173#define w5300_read priv->read
174#define w5300_write priv->write
175#endif
176
177static u32 w5300_read32(struct w5300_priv *priv, u16 addr)
178{
179 u32 data;
180 data = w5300_read(priv, addr) << 16;
181 data |= w5300_read(priv, addr + 2);
182 return data;
183}
184
185static void w5300_write32(struct w5300_priv *priv, u16 addr, u32 data)
186{
187 w5300_write(priv, addr, data >> 16);
188 w5300_write(priv, addr + 2, data);
189}
190
191static int w5300_command(struct w5300_priv *priv, u16 cmd)
192{
193 unsigned long timeout = jiffies + msecs_to_jiffies(100);
194
195 w5300_write(priv, W5300_S0_CR, cmd);
196 mmiowb();
197
198 while (w5300_read(priv, W5300_S0_CR) != 0) {
199 if (time_after(jiffies, timeout))
200 return -EIO;
201 cpu_relax();
202 }
203
204 return 0;
205}
206
207static void w5300_read_frame(struct w5300_priv *priv, u8 *buf, int len)
208{
209 u16 fifo;
210 int i;
211
212 for (i = 0; i < len; i += 2) {
213 fifo = w5300_read(priv, W5300_S0_RX_FIFO);
214 *buf++ = fifo >> 8;
215 *buf++ = fifo;
216 }
217 fifo = w5300_read(priv, W5300_S0_RX_FIFO);
218 fifo = w5300_read(priv, W5300_S0_RX_FIFO);
219}
220
221static void w5300_write_frame(struct w5300_priv *priv, u8 *buf, int len)
222{
223 u16 fifo;
224 int i;
225
226 for (i = 0; i < len; i += 2) {
227 fifo = *buf++ << 8;
228 fifo |= *buf++;
229 w5300_write(priv, W5300_S0_TX_FIFO, fifo);
230 }
231 w5300_write32(priv, W5300_S0_TX_WRSR, len);
232}
233
234static void w5300_write_macaddr(struct w5300_priv *priv)
235{
236 struct net_device *ndev = priv->ndev;
237 w5300_write32(priv, W5300_SHARL,
238 ndev->dev_addr[0] << 24 |
239 ndev->dev_addr[1] << 16 |
240 ndev->dev_addr[2] << 8 |
241 ndev->dev_addr[3]);
242 w5300_write(priv, W5300_SHARH,
243 ndev->dev_addr[4] << 8 |
244 ndev->dev_addr[5]);
245 mmiowb();
246}
247
248static void w5300_hw_reset(struct w5300_priv *priv)
249{
250 w5300_write_direct(priv, W5300_MR, MR_RST);
251 mmiowb();
252 mdelay(5);
253 w5300_write_direct(priv, W5300_MR, priv->indirect ?
254 MR_WDF(7) | MR_PB | MR_IND :
255 MR_WDF(7) | MR_PB);
256 mmiowb();
257 w5300_write(priv, W5300_IMR, 0);
258 w5300_write_macaddr(priv);
259
260 /* Configure 128K of internal memory
261 * as 64K RX fifo and 64K TX fifo
262 */
263 w5300_write32(priv, W5300_RMSRL, 64 << 24);
264 w5300_write32(priv, W5300_RMSRH, 0);
265 w5300_write32(priv, W5300_TMSRL, 64 << 24);
266 w5300_write32(priv, W5300_TMSRH, 0);
267 w5300_write(priv, W5300_MTYPE, 0x00ff);
268 mmiowb();
269}
270
271static void w5300_hw_start(struct w5300_priv *priv)
272{
273 w5300_write(priv, W5300_S0_MR, priv->promisc ?
274 S0_MR_MACRAW : S0_MR_MACRAW_MF);
275 mmiowb();
276 w5300_command(priv, S0_CR_OPEN);
277 w5300_write(priv, W5300_S0_IMR, S0_IR_RECV | S0_IR_SENDOK);
278 w5300_write(priv, W5300_IMR, IR_S0);
279 mmiowb();
280}
281
282static void w5300_hw_close(struct w5300_priv *priv)
283{
284 w5300_write(priv, W5300_IMR, 0);
285 mmiowb();
286 w5300_command(priv, S0_CR_CLOSE);
287}
288
289/***********************************************************************
290 *
291 * Device driver functions / callbacks
292 *
293 ***********************************************************************/
294
295static void w5300_get_drvinfo(struct net_device *ndev,
296 struct ethtool_drvinfo *info)
297{
298 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
299 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
300 strlcpy(info->bus_info, dev_name(ndev->dev.parent),
301 sizeof(info->bus_info));
302}
303
304static u32 w5300_get_link(struct net_device *ndev)
305{
306 struct w5300_priv *priv = netdev_priv(ndev);
307
308 if (gpio_is_valid(priv->link_gpio))
309 return !!gpio_get_value(priv->link_gpio);
310
311 return 1;
312}
313
314static u32 w5300_get_msglevel(struct net_device *ndev)
315{
316 struct w5300_priv *priv = netdev_priv(ndev);
317
318 return priv->msg_enable;
319}
320
321static void w5300_set_msglevel(struct net_device *ndev, u32 value)
322{
323 struct w5300_priv *priv = netdev_priv(ndev);
324
325 priv->msg_enable = value;
326}
327
328static int w5300_get_regs_len(struct net_device *ndev)
329{
330 return W5300_REGS_LEN;
331}
332
333static void w5300_get_regs(struct net_device *ndev,
334 struct ethtool_regs *regs, void *_buf)
335{
336 struct w5300_priv *priv = netdev_priv(ndev);
337 u8 *buf = _buf;
338 u16 addr;
339 u16 data;
340
341 regs->version = 1;
342 for (addr = 0; addr < W5300_REGS_LEN; addr += 2) {
343 switch (addr & 0x23f) {
344 case W5300_S0_TX_FIFO: /* cannot read TX_FIFO */
345 case W5300_S0_RX_FIFO: /* cannot read RX_FIFO */
346 data = 0xffff;
347 break;
348 default:
349 data = w5300_read(priv, addr);
350 break;
351 }
352 *buf++ = data >> 8;
353 *buf++ = data;
354 }
355}
356
357static void w5300_tx_timeout(struct net_device *ndev)
358{
359 struct w5300_priv *priv = netdev_priv(ndev);
360
361 netif_stop_queue(ndev);
362 w5300_hw_reset(priv);
363 w5300_hw_start(priv);
364 ndev->stats.tx_errors++;
365 ndev->trans_start = jiffies;
366 netif_wake_queue(ndev);
367}
368
369static int w5300_start_tx(struct sk_buff *skb, struct net_device *ndev)
370{
371 struct w5300_priv *priv = netdev_priv(ndev);
372
373 netif_stop_queue(ndev);
374
375 w5300_write_frame(priv, skb->data, skb->len);
376 mmiowb();
377 ndev->stats.tx_packets++;
378 ndev->stats.tx_bytes += skb->len;
379 dev_kfree_skb(skb);
380 netif_dbg(priv, tx_queued, ndev, "tx queued\n");
381
382 w5300_command(priv, S0_CR_SEND);
383
384 return NETDEV_TX_OK;
385}
386
387static int w5300_napi_poll(struct napi_struct *napi, int budget)
388{
389 struct w5300_priv *priv = container_of(napi, struct w5300_priv, napi);
390 struct net_device *ndev = priv->ndev;
391 struct sk_buff *skb;
392 int rx_count;
393 u16 rx_len;
394
395 for (rx_count = 0; rx_count < budget; rx_count++) {
396 u32 rx_fifo_len = w5300_read32(priv, W5300_S0_RX_RSR);
397 if (rx_fifo_len == 0)
398 break;
399
400 rx_len = w5300_read(priv, W5300_S0_RX_FIFO);
401
402 skb = netdev_alloc_skb_ip_align(ndev, roundup(rx_len, 2));
403 if (unlikely(!skb)) {
404 u32 i;
405 for (i = 0; i < rx_fifo_len; i += 2)
406 w5300_read(priv, W5300_S0_RX_FIFO);
407 ndev->stats.rx_dropped++;
408 return -ENOMEM;
409 }
410
411 skb_put(skb, rx_len);
412 w5300_read_frame(priv, skb->data, rx_len);
413 skb->protocol = eth_type_trans(skb, ndev);
414
415 netif_receive_skb(skb);
416 ndev->stats.rx_packets++;
417 ndev->stats.rx_bytes += rx_len;
418 }
419
420 if (rx_count < budget) {
421 w5300_write(priv, W5300_IMR, IR_S0);
422 mmiowb();
423 napi_complete(napi);
424 }
425
426 return rx_count;
427}
428
429static irqreturn_t w5300_interrupt(int irq, void *ndev_instance)
430{
431 struct net_device *ndev = ndev_instance;
432 struct w5300_priv *priv = netdev_priv(ndev);
433
434 int ir = w5300_read(priv, W5300_S0_IR);
435 if (!ir)
436 return IRQ_NONE;
437 w5300_write(priv, W5300_S0_IR, ir);
438 mmiowb();
439
440 if (ir & S0_IR_SENDOK) {
441 netif_dbg(priv, tx_done, ndev, "tx done\n");
442 netif_wake_queue(ndev);
443 }
444
445 if (ir & S0_IR_RECV) {
446 if (napi_schedule_prep(&priv->napi)) {
447 w5300_write(priv, W5300_IMR, 0);
448 mmiowb();
449 __napi_schedule(&priv->napi);
450 }
451 }
452
453 return IRQ_HANDLED;
454}
455
456static irqreturn_t w5300_detect_link(int irq, void *ndev_instance)
457{
458 struct net_device *ndev = ndev_instance;
459 struct w5300_priv *priv = netdev_priv(ndev);
460
461 if (netif_running(ndev)) {
462 if (gpio_get_value(priv->link_gpio) != 0) {
463 netif_info(priv, link, ndev, "link is up\n");
464 netif_carrier_on(ndev);
465 } else {
466 netif_info(priv, link, ndev, "link is down\n");
467 netif_carrier_off(ndev);
468 }
469 }
470
471 return IRQ_HANDLED;
472}
473
474static void w5300_set_rx_mode(struct net_device *ndev)
475{
476 struct w5300_priv *priv = netdev_priv(ndev);
477 bool set_promisc = (ndev->flags & IFF_PROMISC) != 0;
478
479 if (priv->promisc != set_promisc) {
480 priv->promisc = set_promisc;
481 w5300_hw_start(priv);
482 }
483}
484
485static int w5300_set_macaddr(struct net_device *ndev, void *addr)
486{
487 struct w5300_priv *priv = netdev_priv(ndev);
488 struct sockaddr *sock_addr = addr;
489
490 if (!is_valid_ether_addr(sock_addr->sa_data))
491 return -EADDRNOTAVAIL;
492 memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
493 ndev->addr_assign_type &= ~NET_ADDR_RANDOM;
494 w5300_write_macaddr(priv);
495 return 0;
496}
497
498static int w5300_open(struct net_device *ndev)
499{
500 struct w5300_priv *priv = netdev_priv(ndev);
501
502 netif_info(priv, ifup, ndev, "enabling\n");
503 if (!is_valid_ether_addr(ndev->dev_addr))
504 return -EINVAL;
505 w5300_hw_start(priv);
506 napi_enable(&priv->napi);
507 netif_start_queue(ndev);
508 if (!gpio_is_valid(priv->link_gpio) ||
509 gpio_get_value(priv->link_gpio) != 0)
510 netif_carrier_on(ndev);
511 return 0;
512}
513
514static int w5300_stop(struct net_device *ndev)
515{
516 struct w5300_priv *priv = netdev_priv(ndev);
517
518 netif_info(priv, ifdown, ndev, "shutting down\n");
519 w5300_hw_close(priv);
520 netif_carrier_off(ndev);
521 netif_stop_queue(ndev);
522 napi_disable(&priv->napi);
523 return 0;
524}
525
526static const struct ethtool_ops w5300_ethtool_ops = {
527 .get_drvinfo = w5300_get_drvinfo,
528 .get_msglevel = w5300_get_msglevel,
529 .set_msglevel = w5300_set_msglevel,
530 .get_link = w5300_get_link,
531 .get_regs_len = w5300_get_regs_len,
532 .get_regs = w5300_get_regs,
533};
534
535static const struct net_device_ops w5300_netdev_ops = {
536 .ndo_open = w5300_open,
537 .ndo_stop = w5300_stop,
538 .ndo_start_xmit = w5300_start_tx,
539 .ndo_tx_timeout = w5300_tx_timeout,
540 .ndo_set_rx_mode = w5300_set_rx_mode,
541 .ndo_set_mac_address = w5300_set_macaddr,
542 .ndo_validate_addr = eth_validate_addr,
543 .ndo_change_mtu = eth_change_mtu,
544};
545
546static int __devinit w5300_hw_probe(struct platform_device *pdev)
547{
548 struct wiznet_platform_data *data = pdev->dev.platform_data;
549 struct net_device *ndev = platform_get_drvdata(pdev);
550 struct w5300_priv *priv = netdev_priv(ndev);
551 const char *name = netdev_name(ndev);
552 struct resource *mem;
553 int mem_size;
554 int irq;
555 int ret;
556
557 if (data && is_valid_ether_addr(data->mac_addr)) {
558 memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
559 } else {
560 random_ether_addr(ndev->dev_addr);
561 ndev->addr_assign_type |= NET_ADDR_RANDOM;
562 }
563
564 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
565 if (!mem)
566 return -ENXIO;
567 mem_size = resource_size(mem);
568 if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name))
569 return -EBUSY;
570 priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
571 if (!priv->base)
572 return -EBUSY;
573
574 spin_lock_init(&priv->reg_lock);
575 priv->indirect = mem_size < W5300_BUS_DIRECT_SIZE;
576 if (priv->indirect) {
577 priv->read = w5300_read_indirect;
578 priv->write = w5300_write_indirect;
579 } else {
580 priv->read = w5300_read_direct;
581 priv->write = w5300_write_direct;
582 }
583
584 w5300_hw_reset(priv);
585 if (w5300_read(priv, W5300_IDR) != IDR_W5300)
586 return -ENODEV;
587
588 irq = platform_get_irq(pdev, 0);
589 if (irq < 0)
590 return irq;
591 ret = request_irq(irq, w5300_interrupt,
592 IRQ_TYPE_LEVEL_LOW, name, ndev);
593 if (ret < 0)
594 return ret;
595 priv->irq = irq;
596
597 priv->link_gpio = data ? data->link_gpio : -EINVAL;
598 if (gpio_is_valid(priv->link_gpio)) {
599 char *link_name = devm_kzalloc(&pdev->dev, 16, GFP_KERNEL);
600 if (!link_name)
601 return -ENOMEM;
602 snprintf(link_name, 16, "%s-link", name);
603 priv->link_irq = gpio_to_irq(priv->link_gpio);
604 if (request_any_context_irq(priv->link_irq, w5300_detect_link,
605 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
606 link_name, priv->ndev) < 0)
607 priv->link_gpio = -EINVAL;
608 }
609
610 netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, irq);
611 return 0;
612}
613
614static int __devinit w5300_probe(struct platform_device *pdev)
615{
616 struct w5300_priv *priv;
617 struct net_device *ndev;
618 int err;
619
620 ndev = alloc_etherdev(sizeof(*priv));
621 if (!ndev)
622 return -ENOMEM;
623 SET_NETDEV_DEV(ndev, &pdev->dev);
624 platform_set_drvdata(pdev, ndev);
625 priv = netdev_priv(ndev);
626 priv->ndev = ndev;
627
628 ether_setup(ndev);
629 ndev->netdev_ops = &w5300_netdev_ops;
630 ndev->ethtool_ops = &w5300_ethtool_ops;
631 ndev->watchdog_timeo = HZ;
632 netif_napi_add(ndev, &priv->napi, w5300_napi_poll, 16);
633
634 /* This chip doesn't support VLAN packets with normal MTU,
635 * so disable VLAN for this device.
636 */
637 ndev->features |= NETIF_F_VLAN_CHALLENGED;
638
639 err = register_netdev(ndev);
640 if (err < 0)
641 goto err_register;
642
643 err = w5300_hw_probe(pdev);
644 if (err < 0)
645 goto err_hw_probe;
646
647 return 0;
648
649err_hw_probe:
650 unregister_netdev(ndev);
651err_register:
652 free_netdev(ndev);
653 platform_set_drvdata(pdev, NULL);
654 return err;
655}
656
657static int __devexit w5300_remove(struct platform_device *pdev)
658{
659 struct net_device *ndev = platform_get_drvdata(pdev);
660 struct w5300_priv *priv = netdev_priv(ndev);
661
662 w5300_hw_reset(priv);
663 free_irq(priv->irq, ndev);
664 if (gpio_is_valid(priv->link_gpio))
665 free_irq(priv->link_irq, ndev);
666
667 unregister_netdev(ndev);
668 free_netdev(ndev);
669 platform_set_drvdata(pdev, NULL);
670 return 0;
671}
672
673#ifdef CONFIG_PM
674static int w5300_suspend(struct device *dev)
675{
676 struct platform_device *pdev = to_platform_device(dev);
677 struct net_device *ndev = platform_get_drvdata(pdev);
678 struct w5300_priv *priv = netdev_priv(ndev);
679
680 if (netif_running(ndev)) {
681 netif_carrier_off(ndev);
682 netif_device_detach(ndev);
683
684 w5300_hw_close(priv);
685 }
686 return 0;
687}
688
689static int w5300_resume(struct device *dev)
690{
691 struct platform_device *pdev = to_platform_device(dev);
692 struct net_device *ndev = platform_get_drvdata(pdev);
693 struct w5300_priv *priv = netdev_priv(ndev);
694
695 if (!netif_running(ndev)) {
696 w5300_hw_reset(priv);
697 w5300_hw_start(priv);
698
699 netif_device_attach(ndev);
700 if (!gpio_is_valid(priv->link_gpio) ||
701 gpio_get_value(priv->link_gpio) != 0)
702 netif_carrier_on(ndev);
703 }
704 return 0;
705}
706#endif /* CONFIG_PM */
707
708static SIMPLE_DEV_PM_OPS(w5300_pm_ops, w5300_suspend, w5300_resume);
709
710static struct platform_driver w5300_driver = {
711 .driver = {
712 .name = DRV_NAME,
713 .owner = THIS_MODULE,
714 .pm = &w5300_pm_ops,
715 },
716 .probe = w5300_probe,
717 .remove = __devexit_p(w5300_remove),
718};
719
720module_platform_driver(w5300_driver);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index d21591a2c593..1eaf7128afee 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1000,6 +1000,7 @@ static const struct ethtool_ops temac_ethtool_ops = {
1000 .set_settings = temac_set_settings, 1000 .set_settings = temac_set_settings,
1001 .nway_reset = temac_nway_reset, 1001 .nway_reset = temac_nway_reset,
1002 .get_link = ethtool_op_get_link, 1002 .get_link = ethtool_op_get_link,
1003 .get_ts_info = ethtool_op_get_ts_info,
1003}; 1004};
1004 1005
1005static int __devinit temac_of_probe(struct platform_device *op) 1006static int __devinit temac_of_probe(struct platform_device *op)
diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig
index cf67352cea14..3f431019e615 100644
--- a/drivers/net/ethernet/xscale/Kconfig
+++ b/drivers/net/ethernet/xscale/Kconfig
@@ -5,8 +5,8 @@
5config NET_VENDOR_XSCALE 5config NET_VENDOR_XSCALE
6 bool "Intel XScale IXP devices" 6 bool "Intel XScale IXP devices"
7 default y 7 default y
8 depends on NET_VENDOR_INTEL && ((ARM && ARCH_IXP4XX && \ 8 depends on NET_VENDOR_INTEL && (ARM && ARCH_IXP4XX && \
9 IXP4XX_NPE && IXP4XX_QMGR) || ARCH_ENP2611) 9 IXP4XX_NPE && IXP4XX_QMGR)
10 ---help--- 10 ---help---
11 If you have a network (Ethernet) card belonging to this class, say Y 11 If you have a network (Ethernet) card belonging to this class, say Y
12 and read the Ethernet-HOWTO, available from 12 and read the Ethernet-HOWTO, available from
@@ -27,6 +27,4 @@ config IXP4XX_ETH
27 Say Y here if you want to use built-in Ethernet ports 27 Say Y here if you want to use built-in Ethernet ports
28 on IXP4xx processor. 28 on IXP4xx processor.
29 29
30source "drivers/net/ethernet/xscale/ixp2000/Kconfig"
31
32endif # NET_VENDOR_XSCALE 30endif # NET_VENDOR_XSCALE
diff --git a/drivers/net/ethernet/xscale/Makefile b/drivers/net/ethernet/xscale/Makefile
index b195b9d7fe81..abc3b031fba7 100644
--- a/drivers/net/ethernet/xscale/Makefile
+++ b/drivers/net/ethernet/xscale/Makefile
@@ -2,5 +2,4 @@
2# Makefile for the Intel XScale IXP device drivers. 2# Makefile for the Intel XScale IXP device drivers.
3# 3#
4 4
5obj-$(CONFIG_ENP2611_MSF_NET) += ixp2000/
6obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o 5obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o
diff --git a/drivers/net/ethernet/xscale/ixp2000/Kconfig b/drivers/net/ethernet/xscale/ixp2000/Kconfig
deleted file mode 100644
index 58dbc5b876bc..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/Kconfig
+++ /dev/null
@@ -1,6 +0,0 @@
1config ENP2611_MSF_NET
2 tristate "Radisys ENP2611 MSF network interface support"
3 depends on ARCH_ENP2611
4 ---help---
5 This is a driver for the MSF network interface unit in
6 the IXP2400 on the Radisys ENP2611 platform.
diff --git a/drivers/net/ethernet/xscale/ixp2000/Makefile b/drivers/net/ethernet/xscale/ixp2000/Makefile
deleted file mode 100644
index fd38351ceaa7..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
1obj-$(CONFIG_ENP2611_MSF_NET) += enp2611_mod.o
2
3enp2611_mod-objs := caleb.o enp2611.o ixp2400-msf.o ixpdev.o pm3386.o
diff --git a/drivers/net/ethernet/xscale/ixp2000/caleb.c b/drivers/net/ethernet/xscale/ixp2000/caleb.c
deleted file mode 100644
index 7dea5b95012c..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/caleb.c
+++ /dev/null
@@ -1,136 +0,0 @@
1/*
2 * Helper functions for the SPI-3 bridge FPGA on the Radisys ENP2611
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/delay.h>
14#include <asm/io.h>
15#include "caleb.h"
16
17#define CALEB_IDLO 0x00
18#define CALEB_IDHI 0x01
19#define CALEB_RID 0x02
20#define CALEB_RESET 0x03
21#define CALEB_INTREN0 0x04
22#define CALEB_INTREN1 0x05
23#define CALEB_INTRSTAT0 0x06
24#define CALEB_INTRSTAT1 0x07
25#define CALEB_PORTEN 0x08
26#define CALEB_BURST 0x09
27#define CALEB_PORTPAUS 0x0A
28#define CALEB_PORTPAUSD 0x0B
29#define CALEB_PHY0RX 0x10
30#define CALEB_PHY1RX 0x11
31#define CALEB_PHY0TX 0x12
32#define CALEB_PHY1TX 0x13
33#define CALEB_IXPRX_HI_CNTR 0x15
34#define CALEB_PHY0RX_HI_CNTR 0x16
35#define CALEB_PHY1RX_HI_CNTR 0x17
36#define CALEB_IXPRX_CNTR 0x18
37#define CALEB_PHY0RX_CNTR 0x19
38#define CALEB_PHY1RX_CNTR 0x1A
39#define CALEB_IXPTX_CNTR 0x1B
40#define CALEB_PHY0TX_CNTR 0x1C
41#define CALEB_PHY1TX_CNTR 0x1D
42#define CALEB_DEBUG0 0x1E
43#define CALEB_DEBUG1 0x1F
44
45
46static u8 caleb_reg_read(int reg)
47{
48 u8 value;
49
50 value = *((volatile u8 *)(ENP2611_CALEB_VIRT_BASE + reg));
51
52// printk(KERN_INFO "caleb_reg_read(%d) = %.2x\n", reg, value);
53
54 return value;
55}
56
57static void caleb_reg_write(int reg, u8 value)
58{
59 u8 dummy;
60
61// printk(KERN_INFO "caleb_reg_write(%d, %.2x)\n", reg, value);
62
63 *((volatile u8 *)(ENP2611_CALEB_VIRT_BASE + reg)) = value;
64
65 dummy = *((volatile u8 *)ENP2611_CALEB_VIRT_BASE);
66 __asm__ __volatile__("mov %0, %0" : "+r" (dummy));
67}
68
69
70void caleb_reset(void)
71{
72 /*
73 * Perform a chip reset.
74 */
75 caleb_reg_write(CALEB_RESET, 0x02);
76 udelay(1);
77
78 /*
79 * Enable all interrupt sources. This is needed to get
80 * meaningful results out of the status bits (register 6
81 * and 7.)
82 */
83 caleb_reg_write(CALEB_INTREN0, 0xff);
84 caleb_reg_write(CALEB_INTREN1, 0x07);
85
86 /*
87 * Set RX and TX FIFO thresholds to 1.5kb.
88 */
89 caleb_reg_write(CALEB_PHY0RX, 0x11);
90 caleb_reg_write(CALEB_PHY1RX, 0x11);
91 caleb_reg_write(CALEB_PHY0TX, 0x11);
92 caleb_reg_write(CALEB_PHY1TX, 0x11);
93
94 /*
95 * Program SPI-3 burst size.
96 */
97 caleb_reg_write(CALEB_BURST, 0); // 64-byte RBUF mpackets
98// caleb_reg_write(CALEB_BURST, 1); // 128-byte RBUF mpackets
99// caleb_reg_write(CALEB_BURST, 2); // 256-byte RBUF mpackets
100}
101
102void caleb_enable_rx(int port)
103{
104 u8 temp;
105
106 temp = caleb_reg_read(CALEB_PORTEN);
107 temp |= 1 << port;
108 caleb_reg_write(CALEB_PORTEN, temp);
109}
110
111void caleb_disable_rx(int port)
112{
113 u8 temp;
114
115 temp = caleb_reg_read(CALEB_PORTEN);
116 temp &= ~(1 << port);
117 caleb_reg_write(CALEB_PORTEN, temp);
118}
119
120void caleb_enable_tx(int port)
121{
122 u8 temp;
123
124 temp = caleb_reg_read(CALEB_PORTEN);
125 temp |= 1 << (port + 4);
126 caleb_reg_write(CALEB_PORTEN, temp);
127}
128
129void caleb_disable_tx(int port)
130{
131 u8 temp;
132
133 temp = caleb_reg_read(CALEB_PORTEN);
134 temp &= ~(1 << (port + 4));
135 caleb_reg_write(CALEB_PORTEN, temp);
136}
diff --git a/drivers/net/ethernet/xscale/ixp2000/caleb.h b/drivers/net/ethernet/xscale/ixp2000/caleb.h
deleted file mode 100644
index e93a1ef5b8a3..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/caleb.h
+++ /dev/null
@@ -1,22 +0,0 @@
1/*
2 * Helper functions for the SPI-3 bridge FPGA on the Radisys ENP2611
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __CALEB_H
13#define __CALEB_H
14
15void caleb_reset(void);
16void caleb_enable_rx(int port);
17void caleb_disable_rx(int port);
18void caleb_enable_tx(int port);
19void caleb_disable_tx(int port);
20
21
22#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/enp2611.c b/drivers/net/ethernet/xscale/ixp2000/enp2611.c
deleted file mode 100644
index 34a6cfd17930..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/enp2611.c
+++ /dev/null
@@ -1,232 +0,0 @@
1/*
2 * IXP2400 MSF network device driver for the Radisys ENP2611
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/init.h>
17#include <linux/moduleparam.h>
18#include <asm/hardware/uengine.h>
19#include <asm/mach-types.h>
20#include <asm/io.h>
21#include "ixpdev.h"
22#include "caleb.h"
23#include "ixp2400-msf.h"
24#include "pm3386.h"
25
26/***********************************************************************
27 * The Radisys ENP2611 is a PCI form factor board with three SFP GBIC
28 * slots, connected via two PMC/Sierra 3386s and an SPI-3 bridge FPGA
29 * to the IXP2400.
30 *
31 * +-------------+
32 * SFP GBIC #0 ---+ | +---------+
33 * | PM3386 #0 +-------+ |
34 * SFP GBIC #1 ---+ | | "Caleb" | +---------+
35 * +-------------+ | | | |
36 * | SPI-3 +---------+ IXP2400 |
37 * +-------------+ | bridge | | |
38 * SFP GBIC #2 ---+ | | FPGA | +---------+
39 * | PM3386 #1 +-------+ |
40 * | | +---------+
41 * +-------------+
42 * ^ ^ ^
43 * | 1.25Gbaud | 104MHz | 104MHz
44 * | SERDES ea. | SPI-3 ea. | SPI-3
45 *
46 ***********************************************************************/
47static struct ixp2400_msf_parameters enp2611_msf_parameters =
48{
49 .rx_mode = IXP2400_RX_MODE_UTOPIA_POS |
50 IXP2400_RX_MODE_1x32 |
51 IXP2400_RX_MODE_MPHY |
52 IXP2400_RX_MODE_MPHY_32 |
53 IXP2400_RX_MODE_MPHY_POLLED_STATUS |
54 IXP2400_RX_MODE_MPHY_LEVEL3 |
55 IXP2400_RX_MODE_RBUF_SIZE_64,
56
57 .rxclk01_multiplier = IXP2400_PLL_MULTIPLIER_16,
58
59 .rx_poll_ports = 3,
60
61 .rx_channel_mode = {
62 IXP2400_PORT_RX_MODE_MASTER |
63 IXP2400_PORT_RX_MODE_POS_PHY |
64 IXP2400_PORT_RX_MODE_POS_PHY_L3 |
65 IXP2400_PORT_RX_MODE_ODD_PARITY |
66 IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
67
68 IXP2400_PORT_RX_MODE_MASTER |
69 IXP2400_PORT_RX_MODE_POS_PHY |
70 IXP2400_PORT_RX_MODE_POS_PHY_L3 |
71 IXP2400_PORT_RX_MODE_ODD_PARITY |
72 IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
73
74 IXP2400_PORT_RX_MODE_MASTER |
75 IXP2400_PORT_RX_MODE_POS_PHY |
76 IXP2400_PORT_RX_MODE_POS_PHY_L3 |
77 IXP2400_PORT_RX_MODE_ODD_PARITY |
78 IXP2400_PORT_RX_MODE_2_CYCLE_DECODE,
79
80 IXP2400_PORT_RX_MODE_MASTER |
81 IXP2400_PORT_RX_MODE_POS_PHY |
82 IXP2400_PORT_RX_MODE_POS_PHY_L3 |
83 IXP2400_PORT_RX_MODE_ODD_PARITY |
84 IXP2400_PORT_RX_MODE_2_CYCLE_DECODE
85 },
86
87 .tx_mode = IXP2400_TX_MODE_UTOPIA_POS |
88 IXP2400_TX_MODE_1x32 |
89 IXP2400_TX_MODE_MPHY |
90 IXP2400_TX_MODE_MPHY_32 |
91 IXP2400_TX_MODE_MPHY_POLLED_STATUS |
92 IXP2400_TX_MODE_MPHY_LEVEL3 |
93 IXP2400_TX_MODE_TBUF_SIZE_64,
94
95 .txclk01_multiplier = IXP2400_PLL_MULTIPLIER_16,
96
97 .tx_poll_ports = 3,
98
99 .tx_channel_mode = {
100 IXP2400_PORT_TX_MODE_MASTER |
101 IXP2400_PORT_TX_MODE_POS_PHY |
102 IXP2400_PORT_TX_MODE_ODD_PARITY |
103 IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
104
105 IXP2400_PORT_TX_MODE_MASTER |
106 IXP2400_PORT_TX_MODE_POS_PHY |
107 IXP2400_PORT_TX_MODE_ODD_PARITY |
108 IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
109
110 IXP2400_PORT_TX_MODE_MASTER |
111 IXP2400_PORT_TX_MODE_POS_PHY |
112 IXP2400_PORT_TX_MODE_ODD_PARITY |
113 IXP2400_PORT_TX_MODE_2_CYCLE_DECODE,
114
115 IXP2400_PORT_TX_MODE_MASTER |
116 IXP2400_PORT_TX_MODE_POS_PHY |
117 IXP2400_PORT_TX_MODE_ODD_PARITY |
118 IXP2400_PORT_TX_MODE_2_CYCLE_DECODE
119 }
120};
121
122static struct net_device *nds[3];
123static struct timer_list link_check_timer;
124
125/* @@@ Poll the SFP moddef0 line too. */
126/* @@@ Try to use the pm3386 DOOL interrupt as well. */
127static void enp2611_check_link_status(unsigned long __dummy)
128{
129 int i;
130
131 for (i = 0; i < 3; i++) {
132 struct net_device *dev;
133 int status;
134
135 dev = nds[i];
136 if (dev == NULL)
137 continue;
138
139 status = pm3386_is_link_up(i);
140 if (status && !netif_carrier_ok(dev)) {
141 /* @@@ Should report autonegotiation status. */
142 printk(KERN_INFO "%s: NIC Link is Up\n", dev->name);
143
144 pm3386_enable_tx(i);
145 caleb_enable_tx(i);
146 netif_carrier_on(dev);
147 } else if (!status && netif_carrier_ok(dev)) {
148 printk(KERN_INFO "%s: NIC Link is Down\n", dev->name);
149
150 netif_carrier_off(dev);
151 caleb_disable_tx(i);
152 pm3386_disable_tx(i);
153 }
154 }
155
156 link_check_timer.expires = jiffies + HZ / 10;
157 add_timer(&link_check_timer);
158}
159
160static void enp2611_set_port_admin_status(int port, int up)
161{
162 if (up) {
163 caleb_enable_rx(port);
164
165 pm3386_set_carrier(port, 1);
166 pm3386_enable_rx(port);
167 } else {
168 caleb_disable_tx(port);
169 pm3386_disable_tx(port);
170 /* @@@ Flush out pending packets. */
171 pm3386_set_carrier(port, 0);
172
173 pm3386_disable_rx(port);
174 caleb_disable_rx(port);
175 }
176}
177
178static int __init enp2611_init_module(void)
179{
180 int ports;
181 int i;
182
183 if (!machine_is_enp2611())
184 return -ENODEV;
185
186 caleb_reset();
187 pm3386_reset();
188
189 ports = pm3386_port_count();
190 for (i = 0; i < ports; i++) {
191 nds[i] = ixpdev_alloc(i, sizeof(struct ixpdev_priv));
192 if (nds[i] == NULL) {
193 while (--i >= 0)
194 free_netdev(nds[i]);
195 return -ENOMEM;
196 }
197
198 pm3386_init_port(i);
199 pm3386_get_mac(i, nds[i]->dev_addr);
200 }
201
202 ixp2400_msf_init(&enp2611_msf_parameters);
203
204 if (ixpdev_init(ports, nds, enp2611_set_port_admin_status)) {
205 for (i = 0; i < ports; i++)
206 if (nds[i])
207 free_netdev(nds[i]);
208 return -EINVAL;
209 }
210
211 init_timer(&link_check_timer);
212 link_check_timer.function = enp2611_check_link_status;
213 link_check_timer.expires = jiffies;
214 add_timer(&link_check_timer);
215
216 return 0;
217}
218
219static void __exit enp2611_cleanup_module(void)
220{
221 int i;
222
223 del_timer_sync(&link_check_timer);
224
225 ixpdev_deinit();
226 for (i = 0; i < 3; i++)
227 free_netdev(nds[i]);
228}
229
230module_init(enp2611_init_module);
231module_exit(enp2611_cleanup_module);
232MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c b/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c
deleted file mode 100644
index f5ffd7e05d26..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.c
+++ /dev/null
@@ -1,212 +0,0 @@
1/*
2 * Generic library functions for the MSF (Media and Switch Fabric) unit
3 * found on the Intel IXP2400 network processor.
4 *
5 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
6 * Dedicated to Marija Kulikova.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as
10 * published by the Free Software Foundation; either version 2.1 of the
11 * License, or (at your option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/init.h>
16#include <mach/hardware.h>
17#include <mach/ixp2000-regs.h>
18#include <asm/delay.h>
19#include <asm/io.h>
20#include "ixp2400-msf.h"
21
22/*
23 * This is the Intel recommended PLL init procedure as described on
24 * page 340 of the IXP2400/IXP2800 Programmer's Reference Manual.
25 */
26static void ixp2400_pll_init(struct ixp2400_msf_parameters *mp)
27{
28 int rx_dual_clock;
29 int tx_dual_clock;
30 u32 value;
31
32 /*
33 * If the RX mode is not 1x32, we have to enable both RX PLLs
34 * (#0 and #1.) The same thing for the TX direction.
35 */
36 rx_dual_clock = !!(mp->rx_mode & IXP2400_RX_MODE_WIDTH_MASK);
37 tx_dual_clock = !!(mp->tx_mode & IXP2400_TX_MODE_WIDTH_MASK);
38
39 /*
40 * Read initial value.
41 */
42 value = ixp2000_reg_read(IXP2000_MSF_CLK_CNTRL);
43
44 /*
45 * Put PLLs in powerdown and bypass mode.
46 */
47 value |= 0x0000f0f0;
48 ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
49
50 /*
51 * Set single or dual clock mode bits.
52 */
53 value &= ~0x03000000;
54 value |= (rx_dual_clock << 24) | (tx_dual_clock << 25);
55
56 /*
57 * Set multipliers.
58 */
59 value &= ~0x00ff0000;
60 value |= mp->rxclk01_multiplier << 16;
61 value |= mp->rxclk23_multiplier << 18;
62 value |= mp->txclk01_multiplier << 20;
63 value |= mp->txclk23_multiplier << 22;
64
65 /*
66 * And write value.
67 */
68 ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
69
70 /*
71 * Disable PLL bypass mode.
72 */
73 value &= ~(0x00005000 | rx_dual_clock << 13 | tx_dual_clock << 15);
74 ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
75
76 /*
77 * Turn on PLLs.
78 */
79 value &= ~(0x00000050 | rx_dual_clock << 5 | tx_dual_clock << 7);
80 ixp2000_reg_write(IXP2000_MSF_CLK_CNTRL, value);
81
82 /*
83 * Wait for PLLs to lock. There are lock status bits, but IXP2400
84 * erratum #65 says that these lock bits should not be relied upon
85 * as they might not accurately reflect the true state of the PLLs.
86 */
87 udelay(100);
88}
89
90/*
91 * Needed according to p480 of Programmer's Reference Manual.
92 */
93static void ixp2400_msf_free_rbuf_entries(struct ixp2400_msf_parameters *mp)
94{
95 int size_bits;
96 int i;
97
98 /*
99 * Work around IXP2400 erratum #69 (silent RBUF-to-DRAM transfer
100 * corruption) in the Intel-recommended way: do not add the RBUF
101 * elements susceptible to corruption to the freelist.
102 */
103 size_bits = mp->rx_mode & IXP2400_RX_MODE_RBUF_SIZE_MASK;
104 if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_64) {
105 for (i = 1; i < 128; i++) {
106 if (i == 9 || i == 18 || i == 27)
107 continue;
108 ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
109 }
110 } else if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_128) {
111 for (i = 1; i < 64; i++) {
112 if (i == 4 || i == 9 || i == 13)
113 continue;
114 ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
115 }
116 } else if (size_bits == IXP2400_RX_MODE_RBUF_SIZE_256) {
117 for (i = 1; i < 32; i++) {
118 if (i == 2 || i == 4 || i == 6)
119 continue;
120 ixp2000_reg_write(IXP2000_MSF_RBUF_ELEMENT_DONE, i);
121 }
122 }
123}
124
125static u32 ixp2400_msf_valid_channels(u32 reg)
126{
127 u32 channels;
128
129 channels = 0;
130 switch (reg & IXP2400_RX_MODE_WIDTH_MASK) {
131 case IXP2400_RX_MODE_1x32:
132 channels = 0x1;
133 if (reg & IXP2400_RX_MODE_MPHY &&
134 !(reg & IXP2400_RX_MODE_MPHY_32))
135 channels = 0xf;
136 break;
137
138 case IXP2400_RX_MODE_2x16:
139 channels = 0x5;
140 break;
141
142 case IXP2400_RX_MODE_4x8:
143 channels = 0xf;
144 break;
145
146 case IXP2400_RX_MODE_1x16_2x8:
147 channels = 0xd;
148 break;
149 }
150
151 return channels;
152}
153
154static void ixp2400_msf_enable_rx(struct ixp2400_msf_parameters *mp)
155{
156 u32 value;
157
158 value = ixp2000_reg_read(IXP2000_MSF_RX_CONTROL) & 0x0fffffff;
159 value |= ixp2400_msf_valid_channels(mp->rx_mode) << 28;
160 ixp2000_reg_write(IXP2000_MSF_RX_CONTROL, value);
161}
162
163static void ixp2400_msf_enable_tx(struct ixp2400_msf_parameters *mp)
164{
165 u32 value;
166
167 value = ixp2000_reg_read(IXP2000_MSF_TX_CONTROL) & 0x0fffffff;
168 value |= ixp2400_msf_valid_channels(mp->tx_mode) << 28;
169 ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, value);
170}
171
172
173void ixp2400_msf_init(struct ixp2400_msf_parameters *mp)
174{
175 u32 value;
176 int i;
177
178 /*
179 * Init the RX/TX PLLs based on the passed parameter block.
180 */
181 ixp2400_pll_init(mp);
182
183 /*
184 * Reset MSF. Bit 7 in IXP_RESET_0 resets the MSF.
185 */
186 value = ixp2000_reg_read(IXP2000_RESET0);
187 ixp2000_reg_write(IXP2000_RESET0, value | 0x80);
188 ixp2000_reg_write(IXP2000_RESET0, value & ~0x80);
189
190 /*
191 * Initialise the RX section.
192 */
193 ixp2000_reg_write(IXP2000_MSF_RX_MPHY_POLL_LIMIT, mp->rx_poll_ports - 1);
194 ixp2000_reg_write(IXP2000_MSF_RX_CONTROL, mp->rx_mode);
195 for (i = 0; i < 4; i++) {
196 ixp2000_reg_write(IXP2000_MSF_RX_UP_CONTROL_0 + i,
197 mp->rx_channel_mode[i]);
198 }
199 ixp2400_msf_free_rbuf_entries(mp);
200 ixp2400_msf_enable_rx(mp);
201
202 /*
203 * Initialise the TX section.
204 */
205 ixp2000_reg_write(IXP2000_MSF_TX_MPHY_POLL_LIMIT, mp->tx_poll_ports - 1);
206 ixp2000_reg_write(IXP2000_MSF_TX_CONTROL, mp->tx_mode);
207 for (i = 0; i < 4; i++) {
208 ixp2000_reg_write(IXP2000_MSF_TX_UP_CONTROL_0 + i,
209 mp->tx_channel_mode[i]);
210 }
211 ixp2400_msf_enable_tx(mp);
212}
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h b/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h
deleted file mode 100644
index 3ac1af2771da..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400-msf.h
+++ /dev/null
@@ -1,115 +0,0 @@
1/*
2 * Generic library functions for the MSF (Media and Switch Fabric) unit
3 * found on the Intel IXP2400 network processor.
4 *
5 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
6 * Dedicated to Marija Kulikova.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as
10 * published by the Free Software Foundation; either version 2.1 of the
11 * License, or (at your option) any later version.
12 */
13
14#ifndef __IXP2400_MSF_H
15#define __IXP2400_MSF_H
16
17struct ixp2400_msf_parameters
18{
19 u32 rx_mode;
20 unsigned rxclk01_multiplier:2;
21 unsigned rxclk23_multiplier:2;
22 unsigned rx_poll_ports:6;
23 u32 rx_channel_mode[4];
24
25 u32 tx_mode;
26 unsigned txclk01_multiplier:2;
27 unsigned txclk23_multiplier:2;
28 unsigned tx_poll_ports:6;
29 u32 tx_channel_mode[4];
30};
31
32void ixp2400_msf_init(struct ixp2400_msf_parameters *mp);
33
34#define IXP2400_PLL_MULTIPLIER_48 0x00
35#define IXP2400_PLL_MULTIPLIER_24 0x01
36#define IXP2400_PLL_MULTIPLIER_16 0x02
37#define IXP2400_PLL_MULTIPLIER_12 0x03
38
39#define IXP2400_RX_MODE_CSIX 0x00400000
40#define IXP2400_RX_MODE_UTOPIA_POS 0x00000000
41#define IXP2400_RX_MODE_WIDTH_MASK 0x00300000
42#define IXP2400_RX_MODE_1x16_2x8 0x00300000
43#define IXP2400_RX_MODE_4x8 0x00200000
44#define IXP2400_RX_MODE_2x16 0x00100000
45#define IXP2400_RX_MODE_1x32 0x00000000
46#define IXP2400_RX_MODE_MPHY 0x00080000
47#define IXP2400_RX_MODE_SPHY 0x00000000
48#define IXP2400_RX_MODE_MPHY_32 0x00040000
49#define IXP2400_RX_MODE_MPHY_4 0x00000000
50#define IXP2400_RX_MODE_MPHY_POLLED_STATUS 0x00020000
51#define IXP2400_RX_MODE_MPHY_DIRECT_STATUS 0x00000000
52#define IXP2400_RX_MODE_CBUS_FULL_DUPLEX 0x00010000
53#define IXP2400_RX_MODE_CBUS_SIMPLEX 0x00000000
54#define IXP2400_RX_MODE_MPHY_LEVEL2 0x00004000
55#define IXP2400_RX_MODE_MPHY_LEVEL3 0x00000000
56#define IXP2400_RX_MODE_CBUS_8BIT 0x00002000
57#define IXP2400_RX_MODE_CBUS_4BIT 0x00000000
58#define IXP2400_RX_MODE_CSIX_SINGLE_FREELIST 0x00000200
59#define IXP2400_RX_MODE_CSIX_SPLIT_FREELISTS 0x00000000
60#define IXP2400_RX_MODE_RBUF_SIZE_MASK 0x0000000c
61#define IXP2400_RX_MODE_RBUF_SIZE_256 0x00000008
62#define IXP2400_RX_MODE_RBUF_SIZE_128 0x00000004
63#define IXP2400_RX_MODE_RBUF_SIZE_64 0x00000000
64
65#define IXP2400_PORT_RX_MODE_SLAVE 0x00000040
66#define IXP2400_PORT_RX_MODE_MASTER 0x00000000
67#define IXP2400_PORT_RX_MODE_POS_PHY_L3 0x00000020
68#define IXP2400_PORT_RX_MODE_POS_PHY_L2 0x00000000
69#define IXP2400_PORT_RX_MODE_POS_PHY 0x00000010
70#define IXP2400_PORT_RX_MODE_UTOPIA 0x00000000
71#define IXP2400_PORT_RX_MODE_EVEN_PARITY 0x0000000c
72#define IXP2400_PORT_RX_MODE_ODD_PARITY 0x00000008
73#define IXP2400_PORT_RX_MODE_NO_PARITY 0x00000000
74#define IXP2400_PORT_RX_MODE_UTOPIA_BIG_CELLS 0x00000002
75#define IXP2400_PORT_RX_MODE_UTOPIA_NORMAL_CELLS 0x00000000
76#define IXP2400_PORT_RX_MODE_2_CYCLE_DECODE 0x00000001
77#define IXP2400_PORT_RX_MODE_1_CYCLE_DECODE 0x00000000
78
79#define IXP2400_TX_MODE_CSIX 0x00400000
80#define IXP2400_TX_MODE_UTOPIA_POS 0x00000000
81#define IXP2400_TX_MODE_WIDTH_MASK 0x00300000
82#define IXP2400_TX_MODE_1x16_2x8 0x00300000
83#define IXP2400_TX_MODE_4x8 0x00200000
84#define IXP2400_TX_MODE_2x16 0x00100000
85#define IXP2400_TX_MODE_1x32 0x00000000
86#define IXP2400_TX_MODE_MPHY 0x00080000
87#define IXP2400_TX_MODE_SPHY 0x00000000
88#define IXP2400_TX_MODE_MPHY_32 0x00040000
89#define IXP2400_TX_MODE_MPHY_4 0x00000000
90#define IXP2400_TX_MODE_MPHY_POLLED_STATUS 0x00020000
91#define IXP2400_TX_MODE_MPHY_DIRECT_STATUS 0x00000000
92#define IXP2400_TX_MODE_CBUS_FULL_DUPLEX 0x00010000
93#define IXP2400_TX_MODE_CBUS_SIMPLEX 0x00000000
94#define IXP2400_TX_MODE_MPHY_LEVEL2 0x00004000
95#define IXP2400_TX_MODE_MPHY_LEVEL3 0x00000000
96#define IXP2400_TX_MODE_CBUS_8BIT 0x00002000
97#define IXP2400_TX_MODE_CBUS_4BIT 0x00000000
98#define IXP2400_TX_MODE_TBUF_SIZE_MASK 0x0000000c
99#define IXP2400_TX_MODE_TBUF_SIZE_256 0x00000008
100#define IXP2400_TX_MODE_TBUF_SIZE_128 0x00000004
101#define IXP2400_TX_MODE_TBUF_SIZE_64 0x00000000
102
103#define IXP2400_PORT_TX_MODE_SLAVE 0x00000040
104#define IXP2400_PORT_TX_MODE_MASTER 0x00000000
105#define IXP2400_PORT_TX_MODE_POS_PHY 0x00000010
106#define IXP2400_PORT_TX_MODE_UTOPIA 0x00000000
107#define IXP2400_PORT_TX_MODE_EVEN_PARITY 0x0000000c
108#define IXP2400_PORT_TX_MODE_ODD_PARITY 0x00000008
109#define IXP2400_PORT_TX_MODE_NO_PARITY 0x00000000
110#define IXP2400_PORT_TX_MODE_UTOPIA_BIG_CELLS 0x00000002
111#define IXP2400_PORT_TX_MODE_2_CYCLE_DECODE 0x00000001
112#define IXP2400_PORT_TX_MODE_1_CYCLE_DECODE 0x00000000
113
114
115#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc b/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc
deleted file mode 100644
index 42a73e357afa..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.uc
+++ /dev/null
@@ -1,408 +0,0 @@
1/*
2 * RX ucode for the Intel IXP2400 in POS-PHY mode.
3 * Copyright (C) 2004, 2005 Lennert Buytenhek
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * Assumptions made in this code:
12 * - The IXP2400 MSF is configured for POS-PHY mode, in a mode where
13 * only one full element list is used. This includes, for example,
14 * 1x32 SPHY and 1x32 MPHY32, but not 4x8 SPHY or 1x32 MPHY4. (This
15 * is not an exhaustive list.)
16 * - The RBUF uses 64-byte mpackets.
17 * - RX descriptors reside in SRAM, and have the following format:
18 * struct rx_desc
19 * {
20 * // to uengine
21 * u32 buf_phys_addr;
22 * u32 buf_length;
23 *
24 * // from uengine
25 * u32 channel;
26 * u32 pkt_length;
27 * };
28 * - Packet data resides in DRAM.
29 * - Packet buffer addresses are 8-byte aligned.
30 * - Scratch ring 0 is rx_pending.
31 * - Scratch ring 1 is rx_done, and has status condition 'full'.
32 * - The host triggers rx_done flush and rx_pending refill on seeing INTA.
33 * - This code is run on all eight threads of the microengine it runs on.
34 *
35 * Local memory is used for per-channel RX state.
36 */
37
38#define RX_THREAD_FREELIST_0 0x0030
39#define RBUF_ELEMENT_DONE 0x0044
40
41#define CHANNEL_FLAGS *l$index0[0]
42#define CHANNEL_FLAG_RECEIVING 1
43#define PACKET_LENGTH *l$index0[1]
44#define PACKET_CHECKSUM *l$index0[2]
45#define BUFFER_HANDLE *l$index0[3]
46#define BUFFER_START *l$index0[4]
47#define BUFFER_LENGTH *l$index0[5]
48
49#define CHANNEL_STATE_SIZE 24 // in bytes
50#define CHANNEL_STATE_SHIFT 5 // ceil(log2(state size))
51
52
53 .sig volatile sig1
54 .sig volatile sig2
55 .sig volatile sig3
56
57 .sig mpacket_arrived
58 .reg add_to_rx_freelist
59 .reg read $rsw0, $rsw1
60 .xfer_order $rsw0 $rsw1
61
62 .reg zero
63
64 /*
65 * Initialise add_to_rx_freelist.
66 */
67 .begin
68 .reg temp
69 .reg temp2
70
71 immed[add_to_rx_freelist, RX_THREAD_FREELIST_0]
72 immed_w1[add_to_rx_freelist, (&$rsw0 | (&mpacket_arrived << 12))]
73
74 local_csr_rd[ACTIVE_CTX_STS]
75 immed[temp, 0]
76 alu[temp2, temp, and, 0x1f]
77 alu_shf[add_to_rx_freelist, add_to_rx_freelist, or, temp2, <<20]
78 alu[temp2, temp, and, 0x80]
79 alu_shf[add_to_rx_freelist, add_to_rx_freelist, or, temp2, <<18]
80 .end
81
82 immed[zero, 0]
83
84 /*
85 * Skip context 0 initialisation?
86 */
87 .begin
88 br!=ctx[0, mpacket_receive_loop#]
89 .end
90
91 /*
92 * Initialise local memory.
93 */
94 .begin
95 .reg addr
96 .reg temp
97
98 immed[temp, 0]
99 init_local_mem_loop#:
100 alu_shf[addr, --, b, temp, <<CHANNEL_STATE_SHIFT]
101 local_csr_wr[ACTIVE_LM_ADDR_0, addr]
102 nop
103 nop
104 nop
105
106 immed[CHANNEL_FLAGS, 0]
107
108 alu[temp, temp, +, 1]
109 alu[--, temp, and, 0x20]
110 beq[init_local_mem_loop#]
111 .end
112
113 /*
114 * Initialise signal pipeline.
115 */
116 .begin
117 local_csr_wr[SAME_ME_SIGNAL, (&sig1 << 3)]
118 .set_sig sig1
119
120 local_csr_wr[SAME_ME_SIGNAL, (&sig2 << 3)]
121 .set_sig sig2
122
123 local_csr_wr[SAME_ME_SIGNAL, (&sig3 << 3)]
124 .set_sig sig3
125 .end
126
127mpacket_receive_loop#:
128 /*
129 * Synchronise and wait for mpacket.
130 */
131 .begin
132 ctx_arb[sig1]
133 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig1 << 3))]
134
135 msf[fast_wr, --, add_to_rx_freelist, 0]
136 .set_sig mpacket_arrived
137 ctx_arb[mpacket_arrived]
138 .set $rsw0 $rsw1
139 .end
140
141 /*
142 * We halt if we see {inbparerr,parerr,null,soperror}.
143 */
144 .begin
145 alu_shf[--, 0x1b, and, $rsw0, >>8]
146 bne[abort_rswerr#]
147 .end
148
149 /*
150 * Point local memory pointer to this channel's state area.
151 */
152 .begin
153 .reg chanaddr
154
155 alu[chanaddr, $rsw0, and, 0x1f]
156 alu_shf[chanaddr, --, b, chanaddr, <<CHANNEL_STATE_SHIFT]
157 local_csr_wr[ACTIVE_LM_ADDR_0, chanaddr]
158 nop
159 nop
160 nop
161 .end
162
163 /*
164 * Check whether we received a SOP mpacket while we were already
165 * working on a packet, or a non-SOP mpacket while there was no
166 * packet pending. (SOP == RECEIVING -> abort) If everything's
167 * okay, update the RECEIVING flag to reflect our new state.
168 */
169 .begin
170 .reg temp
171 .reg eop
172
173 #if CHANNEL_FLAG_RECEIVING != 1
174 #error CHANNEL_FLAG_RECEIVING is not 1
175 #endif
176
177 alu_shf[temp, 1, and, $rsw0, >>15]
178 alu[temp, temp, xor, CHANNEL_FLAGS]
179 alu[--, temp, and, CHANNEL_FLAG_RECEIVING]
180 beq[abort_proterr#]
181
182 alu_shf[eop, 1, and, $rsw0, >>14]
183 alu[CHANNEL_FLAGS, temp, xor, eop]
184 .end
185
186 /*
187 * Copy the mpacket into the right spot, and in case of EOP,
188 * write back the descriptor and pass the packet on.
189 */
190 .begin
191 .reg buffer_offset
192 .reg _packet_length
193 .reg _packet_checksum
194 .reg _buffer_handle
195 .reg _buffer_start
196 .reg _buffer_length
197
198 /*
199 * Determine buffer_offset, _packet_length and
200 * _packet_checksum.
201 */
202 .begin
203 .reg temp
204
205 alu[--, 1, and, $rsw0, >>15]
206 beq[not_sop#]
207
208 immed[PACKET_LENGTH, 0]
209 immed[PACKET_CHECKSUM, 0]
210
211 not_sop#:
212 alu[buffer_offset, --, b, PACKET_LENGTH]
213 alu_shf[temp, 0xff, and, $rsw0, >>16]
214 alu[_packet_length, buffer_offset, +, temp]
215 alu[PACKET_LENGTH, --, b, _packet_length]
216
217 immed[temp, 0xffff]
218 alu[temp, $rsw1, and, temp]
219 alu[_packet_checksum, PACKET_CHECKSUM, +, temp]
220 alu[PACKET_CHECKSUM, --, b, _packet_checksum]
221 .end
222
223 /*
224 * Allocate buffer in case of SOP.
225 */
226 .begin
227 .reg temp
228
229 alu[temp, 1, and, $rsw0, >>15]
230 beq[skip_buffer_alloc#]
231
232 .begin
233 .sig zzz
234 .reg read $stemp $stemp2
235 .xfer_order $stemp $stemp2
236
237 rx_nobufs#:
238 scratch[get, $stemp, zero, 0, 1], ctx_swap[zzz]
239 alu[_buffer_handle, --, b, $stemp]
240 beq[rx_nobufs#]
241
242 sram[read, $stemp, _buffer_handle, 0, 2],
243 ctx_swap[zzz]
244 alu[_buffer_start, --, b, $stemp]
245 alu[_buffer_length, --, b, $stemp2]
246 .end
247
248 skip_buffer_alloc#:
249 .end
250
251 /*
252 * Resynchronise.
253 */
254 .begin
255 ctx_arb[sig2]
256 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig2 << 3))]
257 .end
258
259 /*
260 * Synchronise buffer state.
261 */
262 .begin
263 .reg temp
264
265 alu[temp, 1, and, $rsw0, >>15]
266 beq[copy_from_local_mem#]
267
268 alu[BUFFER_HANDLE, --, b, _buffer_handle]
269 alu[BUFFER_START, --, b, _buffer_start]
270 alu[BUFFER_LENGTH, --, b, _buffer_length]
271 br[sync_state_done#]
272
273 copy_from_local_mem#:
274 alu[_buffer_handle, --, b, BUFFER_HANDLE]
275 alu[_buffer_start, --, b, BUFFER_START]
276 alu[_buffer_length, --, b, BUFFER_LENGTH]
277
278 sync_state_done#:
279 .end
280
281#if 0
282 /*
283 * Debug buffer state management.
284 */
285 .begin
286 .reg temp
287
288 alu[temp, 1, and, $rsw0, >>14]
289 beq[no_poison#]
290 immed[BUFFER_HANDLE, 0xdead]
291 immed[BUFFER_START, 0xdead]
292 immed[BUFFER_LENGTH, 0xdead]
293 no_poison#:
294
295 immed[temp, 0xdead]
296 alu[--, _buffer_handle, -, temp]
297 beq[state_corrupted#]
298 alu[--, _buffer_start, -, temp]
299 beq[state_corrupted#]
300 alu[--, _buffer_length, -, temp]
301 beq[state_corrupted#]
302 .end
303#endif
304
305 /*
306 * Check buffer length.
307 */
308 .begin
309 alu[--, _buffer_length, -, _packet_length]
310 blo[buffer_overflow#]
311 .end
312
313 /*
314 * Copy the mpacket and give back the RBUF element.
315 */
316 .begin
317 .reg element
318 .reg xfer_size
319 .reg temp
320 .sig copy_sig
321
322 alu_shf[element, 0x7f, and, $rsw0, >>24]
323 alu_shf[xfer_size, 0xff, and, $rsw0, >>16]
324
325 alu[xfer_size, xfer_size, -, 1]
326 alu_shf[xfer_size, 0x10, or, xfer_size, >>3]
327 alu_shf[temp, 0x10, or, xfer_size, <<21]
328 alu_shf[temp, temp, or, element, <<11]
329 alu_shf[--, temp, or, 1, <<18]
330
331 dram[rbuf_rd, --, _buffer_start, buffer_offset, max_8],
332 indirect_ref, sig_done[copy_sig]
333 ctx_arb[copy_sig]
334
335 alu[temp, RBUF_ELEMENT_DONE, or, element, <<16]
336 msf[fast_wr, --, temp, 0]
337 .end
338
339 /*
340 * If EOP, write back the packet descriptor.
341 */
342 .begin
343 .reg write $stemp $stemp2
344 .xfer_order $stemp $stemp2
345 .sig zzz
346
347 alu_shf[--, 1, and, $rsw0, >>14]
348 beq[no_writeback#]
349
350 alu[$stemp, $rsw0, and, 0x1f]
351 alu[$stemp2, --, b, _packet_length]
352 sram[write, $stemp, _buffer_handle, 8, 2], ctx_swap[zzz]
353
354 no_writeback#:
355 .end
356
357 /*
358 * Resynchronise.
359 */
360 .begin
361 ctx_arb[sig3]
362 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig3 << 3))]
363 .end
364
365 /*
366 * If EOP, put the buffer back onto the scratch ring.
367 */
368 .begin
369 .reg write $stemp
370 .sig zzz
371
372 br_inp_state[SCR_Ring1_Status, rx_done_ring_overflow#]
373
374 alu_shf[--, 1, and, $rsw0, >>14]
375 beq[mpacket_receive_loop#]
376
377 alu[--, 1, and, $rsw0, >>10]
378 bne[rxerr#]
379
380 alu[$stemp, --, b, _buffer_handle]
381 scratch[put, $stemp, zero, 4, 1], ctx_swap[zzz]
382 cap[fast_wr, 0, XSCALE_INT_A]
383 br[mpacket_receive_loop#]
384
385 rxerr#:
386 alu[$stemp, --, b, _buffer_handle]
387 scratch[put, $stemp, zero, 0, 1], ctx_swap[zzz]
388 br[mpacket_receive_loop#]
389 .end
390 .end
391
392
393abort_rswerr#:
394 halt
395
396abort_proterr#:
397 halt
398
399state_corrupted#:
400 halt
401
402buffer_overflow#:
403 halt
404
405rx_done_ring_overflow#:
406 halt
407
408
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode b/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode
deleted file mode 100644
index e8aee2f81aad..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400_rx.ucode
+++ /dev/null
@@ -1,130 +0,0 @@
1static struct ixp2000_uengine_code ixp2400_rx =
2{
3 .cpu_model_bitmask = 0x000003fe,
4 .cpu_min_revision = 0,
5 .cpu_max_revision = 255,
6
7 .uengine_parameters = IXP2000_UENGINE_8_CONTEXTS |
8 IXP2000_UENGINE_PRN_UPDATE_EVERY |
9 IXP2000_UENGINE_NN_FROM_PREVIOUS |
10 IXP2000_UENGINE_ASSERT_EMPTY_AT_0 |
11 IXP2000_UENGINE_LM_ADDR1_PER_CONTEXT |
12 IXP2000_UENGINE_LM_ADDR0_PER_CONTEXT,
13
14 .initial_reg_values = (struct ixp2000_reg_value []) {
15 { -1, -1 }
16 },
17
18 .num_insns = 109,
19 .insns = (u8 []) {
20 0xf0, 0x00, 0x0c, 0xc0, 0x05,
21 0xf4, 0x44, 0x0c, 0x00, 0x05,
22 0xfc, 0x04, 0x4c, 0x00, 0x00,
23 0xf0, 0x00, 0x00, 0x3b, 0x00,
24 0xb4, 0x40, 0xf0, 0x3b, 0x1f,
25 0x8a, 0xc0, 0x50, 0x3e, 0x05,
26 0xb4, 0x40, 0xf0, 0x3b, 0x80,
27 0x9a, 0xe0, 0x00, 0x3e, 0x05,
28 0xf0, 0x00, 0x00, 0x07, 0x00,
29 0xd8, 0x05, 0xc0, 0x00, 0x11,
30 0xf0, 0x00, 0x00, 0x0f, 0x00,
31 0x91, 0xb0, 0x20, 0x0e, 0x00,
32 0xfc, 0x06, 0x60, 0x0b, 0x00,
33 0xf0, 0x00, 0x0c, 0x03, 0x00,
34 0xf0, 0x00, 0x0c, 0x03, 0x00,
35 0xf0, 0x00, 0x0c, 0x03, 0x00,
36 0xf0, 0x00, 0x0c, 0x02, 0x00,
37 0xb0, 0xc0, 0x30, 0x0f, 0x01,
38 0xa4, 0x70, 0x00, 0x0f, 0x20,
39 0xd8, 0x02, 0xc0, 0x01, 0x00,
40 0xfc, 0x10, 0xac, 0x23, 0x08,
41 0xfc, 0x10, 0xac, 0x43, 0x10,
42 0xfc, 0x10, 0xac, 0x63, 0x18,
43 0xe0, 0x00, 0x00, 0x00, 0x02,
44 0xfc, 0x10, 0xae, 0x23, 0x88,
45 0x3d, 0x00, 0x04, 0x03, 0x20,
46 0xe0, 0x00, 0x00, 0x00, 0x10,
47 0x84, 0x82, 0x02, 0x01, 0x3b,
48 0xd8, 0x1a, 0x00, 0x01, 0x01,
49 0xb4, 0x00, 0x8c, 0x7d, 0x80,
50 0x91, 0xb0, 0x80, 0x22, 0x00,
51 0xfc, 0x06, 0x60, 0x23, 0x00,
52 0xf0, 0x00, 0x0c, 0x03, 0x00,
53 0xf0, 0x00, 0x0c, 0x03, 0x00,
54 0xf0, 0x00, 0x0c, 0x03, 0x00,
55 0x94, 0xf0, 0x92, 0x01, 0x21,
56 0xac, 0x40, 0x60, 0x26, 0x00,
57 0xa4, 0x30, 0x0c, 0x04, 0x06,
58 0xd8, 0x1a, 0x40, 0x01, 0x00,
59 0x94, 0xe0, 0xa2, 0x01, 0x21,
60 0xac, 0x20, 0x00, 0x28, 0x06,
61 0x84, 0xf2, 0x02, 0x01, 0x21,
62 0xd8, 0x0b, 0x40, 0x01, 0x00,
63 0xf0, 0x00, 0x0c, 0x02, 0x01,
64 0xf0, 0x00, 0x0c, 0x02, 0x02,
65 0xa0, 0x00, 0x08, 0x04, 0x00,
66 0x95, 0x00, 0xc6, 0x01, 0xff,
67 0xa0, 0x80, 0x10, 0x30, 0x00,
68 0xa0, 0x60, 0x1c, 0x00, 0x01,
69 0xf0, 0x0f, 0xf0, 0x33, 0xff,
70 0xb4, 0x00, 0xc0, 0x31, 0x81,
71 0xb0, 0x80, 0xb0, 0x32, 0x02,
72 0xa0, 0x20, 0x20, 0x2c, 0x00,
73 0x94, 0xf0, 0xd2, 0x01, 0x21,
74 0xd8, 0x0f, 0x40, 0x01, 0x00,
75 0x19, 0x40, 0x10, 0x04, 0x20,
76 0xa0, 0x00, 0x26, 0x04, 0x00,
77 0xd8, 0x0d, 0xc0, 0x01, 0x00,
78 0x00, 0x42, 0x10, 0x80, 0x02,
79 0xb0, 0x00, 0x46, 0x04, 0x00,
80 0xb0, 0x00, 0x56, 0x08, 0x00,
81 0xe0, 0x00, 0x00, 0x00, 0x04,
82 0xfc, 0x10, 0xae, 0x43, 0x90,
83 0x84, 0xf0, 0x32, 0x01, 0x21,
84 0xd8, 0x11, 0x40, 0x01, 0x00,
85 0xa0, 0x60, 0x3c, 0x00, 0x02,
86 0xa0, 0x20, 0x40, 0x10, 0x00,
87 0xa0, 0x20, 0x50, 0x14, 0x00,
88 0xd8, 0x12, 0x00, 0x00, 0x18,
89 0xa0, 0x00, 0x28, 0x0c, 0x00,
90 0xb0, 0x00, 0x48, 0x10, 0x00,
91 0xb0, 0x00, 0x58, 0x14, 0x00,
92 0xaa, 0xf0, 0x00, 0x14, 0x01,
93 0xd8, 0x1a, 0xc0, 0x01, 0x05,
94 0x85, 0x80, 0x42, 0x01, 0xff,
95 0x95, 0x00, 0x66, 0x01, 0xff,
96 0xba, 0xc0, 0x60, 0x1b, 0x01,
97 0x9a, 0x30, 0x60, 0x19, 0x30,
98 0x9a, 0xb0, 0x70, 0x1a, 0x30,
99 0x9b, 0x50, 0x78, 0x1e, 0x04,
100 0x8a, 0xe2, 0x08, 0x1e, 0x21,
101 0x6a, 0x4e, 0x00, 0x13, 0x00,
102 0xe0, 0x00, 0x00, 0x00, 0x30,
103 0x9b, 0x00, 0x7a, 0x92, 0x04,
104 0x3d, 0x00, 0x04, 0x1f, 0x20,
105 0x84, 0xe2, 0x02, 0x01, 0x21,
106 0xd8, 0x16, 0x80, 0x01, 0x00,
107 0xa4, 0x18, 0x0c, 0x7d, 0x80,
108 0xa0, 0x58, 0x1c, 0x00, 0x01,
109 0x01, 0x42, 0x00, 0xa0, 0x02,
110 0xe0, 0x00, 0x00, 0x00, 0x08,
111 0xfc, 0x10, 0xae, 0x63, 0x98,
112 0xd8, 0x1b, 0x00, 0xc2, 0x14,
113 0x84, 0xe2, 0x02, 0x01, 0x21,
114 0xd8, 0x05, 0xc0, 0x01, 0x00,
115 0x84, 0xa2, 0x02, 0x01, 0x21,
116 0xd8, 0x19, 0x40, 0x01, 0x01,
117 0xa0, 0x58, 0x0c, 0x00, 0x02,
118 0x1a, 0x40, 0x00, 0x04, 0x24,
119 0x33, 0x00, 0x01, 0x2f, 0x20,
120 0xd8, 0x05, 0xc0, 0x00, 0x18,
121 0xa0, 0x58, 0x0c, 0x00, 0x02,
122 0x1a, 0x40, 0x00, 0x04, 0x20,
123 0xd8, 0x05, 0xc0, 0x00, 0x18,
124 0xe0, 0x00, 0x02, 0x00, 0x00,
125 0xe0, 0x00, 0x02, 0x00, 0x00,
126 0xe0, 0x00, 0x02, 0x00, 0x00,
127 0xe0, 0x00, 0x02, 0x00, 0x00,
128 0xe0, 0x00, 0x02, 0x00, 0x00,
129 }
130};
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc b/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc
deleted file mode 100644
index d090d1884fb7..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.uc
+++ /dev/null
@@ -1,272 +0,0 @@
1/*
2 * TX ucode for the Intel IXP2400 in POS-PHY mode.
3 * Copyright (C) 2004, 2005 Lennert Buytenhek
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * Assumptions made in this code:
12 * - The IXP2400 MSF is configured for POS-PHY mode, in a mode where
13 * only one TBUF partition is used. This includes, for example,
14 * 1x32 SPHY and 1x32 MPHY32, but not 4x8 SPHY or 1x32 MPHY4. (This
15 * is not an exhaustive list.)
16 * - The TBUF uses 64-byte mpackets.
17 * - TX descriptors reside in SRAM, and have the following format:
18 * struct tx_desc
19 * {
20 * // to uengine
21 * u32 buf_phys_addr;
22 * u32 pkt_length;
23 * u32 channel;
24 * };
25 * - Packet data resides in DRAM.
26 * - Packet buffer addresses are 8-byte aligned.
27 * - Scratch ring 2 is tx_pending.
28 * - Scratch ring 3 is tx_done, and has status condition 'full'.
29 * - This code is run on all eight threads of the microengine it runs on.
30 */
31
32#define TX_SEQUENCE_0 0x0060
33#define TBUF_CTRL 0x1800
34
35#define PARTITION_SIZE 128
36#define PARTITION_THRESH 96
37
38
39 .sig volatile sig1
40 .sig volatile sig2
41 .sig volatile sig3
42
43 .reg @old_tx_seq_0
44 .reg @mpkts_in_flight
45 .reg @next_tbuf_mpacket
46
47 .reg @buffer_handle
48 .reg @buffer_start
49 .reg @packet_length
50 .reg @channel
51 .reg @packet_offset
52
53 .reg zero
54
55 immed[zero, 0]
56
57 /*
58 * Skip context 0 initialisation?
59 */
60 .begin
61 br!=ctx[0, mpacket_tx_loop#]
62 .end
63
64 /*
65 * Wait until all pending TBUF elements have been transmitted.
66 */
67 .begin
68 .reg read $tx
69 .sig zzz
70
71 loop_empty#:
72 msf[read, $tx, zero, TX_SEQUENCE_0, 1], ctx_swap[zzz]
73 alu_shf[--, --, b, $tx, >>31]
74 beq[loop_empty#]
75
76 alu[@old_tx_seq_0, --, b, $tx]
77 .end
78
79 immed[@mpkts_in_flight, 0]
80 alu[@next_tbuf_mpacket, @old_tx_seq_0, and, (PARTITION_SIZE - 1)]
81
82 immed[@buffer_handle, 0]
83
84 /*
85 * Initialise signal pipeline.
86 */
87 .begin
88 local_csr_wr[SAME_ME_SIGNAL, (&sig1 << 3)]
89 .set_sig sig1
90
91 local_csr_wr[SAME_ME_SIGNAL, (&sig2 << 3)]
92 .set_sig sig2
93
94 local_csr_wr[SAME_ME_SIGNAL, (&sig3 << 3)]
95 .set_sig sig3
96 .end
97
98mpacket_tx_loop#:
99 .begin
100 .reg tbuf_element_index
101 .reg buffer_handle
102 .reg sop_eop
103 .reg packet_data
104 .reg channel
105 .reg mpacket_size
106
107 /*
108 * If there is no packet currently being transmitted,
109 * dequeue the next TX descriptor, and fetch the buffer
110 * address, packet length and destination channel number.
111 */
112 .begin
113 .reg read $stemp $stemp2 $stemp3
114 .xfer_order $stemp $stemp2 $stemp3
115 .sig zzz
116
117 ctx_arb[sig1]
118
119 alu[--, --, b, @buffer_handle]
120 bne[already_got_packet#]
121
122 tx_nobufs#:
123 scratch[get, $stemp, zero, 8, 1], ctx_swap[zzz]
124 alu[@buffer_handle, --, b, $stemp]
125 beq[tx_nobufs#]
126
127 sram[read, $stemp, $stemp, 0, 3], ctx_swap[zzz]
128 alu[@buffer_start, --, b, $stemp]
129 alu[@packet_length, --, b, $stemp2]
130 beq[zero_byte_packet#]
131 alu[@channel, --, b, $stemp3]
132 immed[@packet_offset, 0]
133
134 already_got_packet#:
135 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig1 << 3))]
136 .end
137
138 /*
139 * Determine tbuf element index, SOP/EOP flags, mpacket
140 * offset and mpacket size and cache buffer_handle and
141 * channel number.
142 */
143 .begin
144 alu[tbuf_element_index, --, b, @next_tbuf_mpacket]
145 alu[@next_tbuf_mpacket, @next_tbuf_mpacket, +, 1]
146 alu[@next_tbuf_mpacket, @next_tbuf_mpacket, and,
147 (PARTITION_SIZE - 1)]
148
149 alu[buffer_handle, --, b, @buffer_handle]
150 immed[@buffer_handle, 0]
151
152 immed[sop_eop, 1]
153
154 alu[packet_data, --, b, @packet_offset]
155 bne[no_sop#]
156 alu[sop_eop, sop_eop, or, 2]
157 no_sop#:
158 alu[packet_data, packet_data, +, @buffer_start]
159
160 alu[channel, --, b, @channel]
161
162 alu[mpacket_size, @packet_length, -, @packet_offset]
163 alu[--, 64, -, mpacket_size]
164 bhs[eop#]
165 alu[@buffer_handle, --, b, buffer_handle]
166 immed[mpacket_size, 64]
167 alu[sop_eop, sop_eop, and, 2]
168 eop#:
169
170 alu[@packet_offset, @packet_offset, +, mpacket_size]
171 .end
172
173 /*
174 * Wait until there's enough space in the TBUF.
175 */
176 .begin
177 .reg read $tx
178 .reg temp
179 .sig zzz
180
181 ctx_arb[sig2]
182
183 br[test_space#]
184
185 loop_space#:
186 msf[read, $tx, zero, TX_SEQUENCE_0, 1], ctx_swap[zzz]
187
188 alu[temp, $tx, -, @old_tx_seq_0]
189 alu[temp, temp, and, 0xff]
190 alu[@mpkts_in_flight, @mpkts_in_flight, -, temp]
191
192 alu[@old_tx_seq_0, --, b, $tx]
193
194 test_space#:
195 alu[--, PARTITION_THRESH, -, @mpkts_in_flight]
196 blo[loop_space#]
197
198 alu[@mpkts_in_flight, @mpkts_in_flight, +, 1]
199
200 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig2 << 3))]
201 .end
202
203 /*
204 * Copy the packet data to the TBUF.
205 */
206 .begin
207 .reg temp
208 .sig copy_sig
209
210 alu[temp, mpacket_size, -, 1]
211 alu_shf[temp, 0x10, or, temp, >>3]
212 alu_shf[temp, 0x10, or, temp, <<21]
213 alu_shf[temp, temp, or, tbuf_element_index, <<11]
214 alu_shf[--, temp, or, 1, <<18]
215
216 dram[tbuf_wr, --, packet_data, 0, max_8],
217 indirect_ref, sig_done[copy_sig]
218 ctx_arb[copy_sig]
219 .end
220
221 /*
222 * Mark TBUF element as ready-to-be-transmitted.
223 */
224 .begin
225 .reg write $tsw $tsw2
226 .xfer_order $tsw $tsw2
227 .reg temp
228 .sig zzz
229
230 alu_shf[temp, channel, or, mpacket_size, <<24]
231 alu_shf[$tsw, temp, or, sop_eop, <<8]
232 immed[$tsw2, 0]
233
234 immed[temp, TBUF_CTRL]
235 alu_shf[temp, temp, or, tbuf_element_index, <<3]
236 msf[write, $tsw, temp, 0, 2], ctx_swap[zzz]
237 .end
238
239 /*
240 * Resynchronise.
241 */
242 .begin
243 ctx_arb[sig3]
244 local_csr_wr[SAME_ME_SIGNAL, (0x80 | (&sig3 << 3))]
245 .end
246
247 /*
248 * If this was an EOP mpacket, recycle the TX buffer
249 * and signal the host.
250 */
251 .begin
252 .reg write $stemp
253 .sig zzz
254
255 alu[--, sop_eop, and, 1]
256 beq[mpacket_tx_loop#]
257
258 tx_done_ring_full#:
259 br_inp_state[SCR_Ring3_Status, tx_done_ring_full#]
260
261 alu[$stemp, --, b, buffer_handle]
262 scratch[put, $stemp, zero, 12, 1], ctx_swap[zzz]
263 cap[fast_wr, 0, XSCALE_INT_A]
264 br[mpacket_tx_loop#]
265 .end
266 .end
267
268
269zero_byte_packet#:
270 halt
271
272
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode b/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode
deleted file mode 100644
index a433e24b0a51..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixp2400_tx.ucode
+++ /dev/null
@@ -1,98 +0,0 @@
1static struct ixp2000_uengine_code ixp2400_tx =
2{
3 .cpu_model_bitmask = 0x000003fe,
4 .cpu_min_revision = 0,
5 .cpu_max_revision = 255,
6
7 .uengine_parameters = IXP2000_UENGINE_8_CONTEXTS |
8 IXP2000_UENGINE_PRN_UPDATE_EVERY |
9 IXP2000_UENGINE_NN_FROM_PREVIOUS |
10 IXP2000_UENGINE_ASSERT_EMPTY_AT_0 |
11 IXP2000_UENGINE_LM_ADDR1_PER_CONTEXT |
12 IXP2000_UENGINE_LM_ADDR0_PER_CONTEXT,
13
14 .initial_reg_values = (struct ixp2000_reg_value []) {
15 { -1, -1 }
16 },
17
18 .num_insns = 77,
19 .insns = (u8 []) {
20 0xf0, 0x00, 0x00, 0x07, 0x00,
21 0xd8, 0x03, 0x00, 0x00, 0x11,
22 0x3c, 0x40, 0x00, 0x04, 0xe0,
23 0x81, 0xf2, 0x02, 0x01, 0x00,
24 0xd8, 0x00, 0x80, 0x01, 0x00,
25 0xb0, 0x08, 0x06, 0x00, 0x00,
26 0xf0, 0x00, 0x0c, 0x00, 0x80,
27 0xb4, 0x49, 0x02, 0x03, 0x7f,
28 0xf0, 0x00, 0x02, 0x83, 0x00,
29 0xfc, 0x10, 0xac, 0x23, 0x08,
30 0xfc, 0x10, 0xac, 0x43, 0x10,
31 0xfc, 0x10, 0xac, 0x63, 0x18,
32 0xe0, 0x00, 0x00, 0x00, 0x02,
33 0xa0, 0x30, 0x02, 0x80, 0x00,
34 0xd8, 0x06, 0x00, 0x01, 0x01,
35 0x19, 0x40, 0x00, 0x04, 0x28,
36 0xb0, 0x0a, 0x06, 0x00, 0x00,
37 0xd8, 0x03, 0xc0, 0x01, 0x00,
38 0x00, 0x44, 0x00, 0x80, 0x80,
39 0xa0, 0x09, 0x06, 0x00, 0x00,
40 0xb0, 0x0b, 0x06, 0x04, 0x00,
41 0xd8, 0x13, 0x00, 0x01, 0x00,
42 0xb0, 0x0c, 0x06, 0x08, 0x00,
43 0xf0, 0x00, 0x0c, 0x00, 0xa0,
44 0xfc, 0x10, 0xae, 0x23, 0x88,
45 0xa0, 0x00, 0x12, 0x40, 0x00,
46 0xb0, 0xc9, 0x02, 0x43, 0x01,
47 0xb4, 0x49, 0x02, 0x43, 0x7f,
48 0xb0, 0x00, 0x22, 0x80, 0x00,
49 0xf0, 0x00, 0x02, 0x83, 0x00,
50 0xf0, 0x00, 0x0c, 0x04, 0x02,
51 0xb0, 0x40, 0x6c, 0x00, 0xa0,
52 0xd8, 0x08, 0x80, 0x01, 0x01,
53 0xaa, 0x00, 0x2c, 0x08, 0x02,
54 0xa0, 0xc0, 0x30, 0x18, 0x90,
55 0xa0, 0x00, 0x43, 0x00, 0x00,
56 0xba, 0xc0, 0x32, 0xc0, 0xa0,
57 0xaa, 0xb0, 0x00, 0x0f, 0x40,
58 0xd8, 0x0a, 0x80, 0x01, 0x04,
59 0xb0, 0x0a, 0x00, 0x08, 0x00,
60 0xf0, 0x00, 0x00, 0x0f, 0x40,
61 0xa4, 0x00, 0x2c, 0x08, 0x02,
62 0xa0, 0x8a, 0x00, 0x0c, 0xa0,
63 0xe0, 0x00, 0x00, 0x00, 0x04,
64 0xd8, 0x0c, 0x80, 0x00, 0x18,
65 0x3c, 0x40, 0x00, 0x04, 0xe0,
66 0xba, 0x80, 0x42, 0x01, 0x80,
67 0xb4, 0x40, 0x40, 0x13, 0xff,
68 0xaa, 0x88, 0x00, 0x10, 0x80,
69 0xb0, 0x08, 0x06, 0x00, 0x00,
70 0xaa, 0xf0, 0x0d, 0x80, 0x80,
71 0xd8, 0x0b, 0x40, 0x01, 0x05,
72 0xa0, 0x88, 0x0c, 0x04, 0x80,
73 0xfc, 0x10, 0xae, 0x43, 0x90,
74 0xba, 0xc0, 0x50, 0x0f, 0x01,
75 0x9a, 0x30, 0x50, 0x15, 0x30,
76 0x9a, 0xb0, 0x50, 0x16, 0x30,
77 0x9b, 0x50, 0x58, 0x16, 0x01,
78 0x8a, 0xe2, 0x08, 0x16, 0x21,
79 0x6b, 0x4e, 0x00, 0x83, 0x03,
80 0xe0, 0x00, 0x00, 0x00, 0x30,
81 0x9a, 0x80, 0x70, 0x0e, 0x04,
82 0x8b, 0x88, 0x08, 0x1e, 0x02,
83 0xf0, 0x00, 0x0c, 0x01, 0x81,
84 0xf0, 0x01, 0x80, 0x1f, 0x00,
85 0x9b, 0xd0, 0x78, 0x1e, 0x01,
86 0x3d, 0x42, 0x00, 0x1c, 0x20,
87 0xe0, 0x00, 0x00, 0x00, 0x08,
88 0xfc, 0x10, 0xae, 0x63, 0x98,
89 0xa4, 0x30, 0x0c, 0x04, 0x02,
90 0xd8, 0x03, 0x00, 0x01, 0x00,
91 0xd8, 0x11, 0xc1, 0x42, 0x14,
92 0xa0, 0x18, 0x00, 0x08, 0x00,
93 0x1a, 0x40, 0x00, 0x04, 0x2c,
94 0x33, 0x00, 0x01, 0x2f, 0x20,
95 0xd8, 0x03, 0x00, 0x00, 0x18,
96 0xe0, 0x00, 0x02, 0x00, 0x00,
97 }
98};
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixpdev.c b/drivers/net/ethernet/xscale/ixp2000/ixpdev.c
deleted file mode 100644
index 45008377c8bf..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixpdev.c
+++ /dev/null
@@ -1,437 +0,0 @@
1/*
2 * IXP2000 MSF network device driver
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/kernel.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/moduleparam.h>
19#include <linux/gfp.h>
20#include <asm/hardware/uengine.h>
21#include <asm/io.h>
22#include "ixp2400_rx.ucode"
23#include "ixp2400_tx.ucode"
24#include "ixpdev_priv.h"
25#include "ixpdev.h"
26#include "pm3386.h"
27
28#define DRV_MODULE_VERSION "0.2"
29
30static int nds_count;
31static struct net_device **nds;
32static int nds_open;
33static void (*set_port_admin_status)(int port, int up);
34
35static struct ixpdev_rx_desc * const rx_desc =
36 (struct ixpdev_rx_desc *)(IXP2000_SRAM0_VIRT_BASE + RX_BUF_DESC_BASE);
37static struct ixpdev_tx_desc * const tx_desc =
38 (struct ixpdev_tx_desc *)(IXP2000_SRAM0_VIRT_BASE + TX_BUF_DESC_BASE);
39static int tx_pointer;
40
41
42static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
43{
44 struct ixpdev_priv *ip = netdev_priv(dev);
45 struct ixpdev_tx_desc *desc;
46 int entry;
47 unsigned long flags;
48
49 if (unlikely(skb->len > PAGE_SIZE)) {
50 /* @@@ Count drops. */
51 dev_kfree_skb(skb);
52 return NETDEV_TX_OK;
53 }
54
55 entry = tx_pointer;
56 tx_pointer = (tx_pointer + 1) % TX_BUF_COUNT;
57
58 desc = tx_desc + entry;
59 desc->pkt_length = skb->len;
60 desc->channel = ip->channel;
61
62 skb_copy_and_csum_dev(skb, phys_to_virt(desc->buf_addr));
63 dev_kfree_skb(skb);
64
65 ixp2000_reg_write(RING_TX_PENDING,
66 TX_BUF_DESC_BASE + (entry * sizeof(struct ixpdev_tx_desc)));
67
68 local_irq_save(flags);
69 ip->tx_queue_entries++;
70 if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
71 netif_stop_queue(dev);
72 local_irq_restore(flags);
73
74 return NETDEV_TX_OK;
75}
76
77
78static int ixpdev_rx(struct net_device *dev, int processed, int budget)
79{
80 while (processed < budget) {
81 struct ixpdev_rx_desc *desc;
82 struct sk_buff *skb;
83 void *buf;
84 u32 _desc;
85
86 _desc = ixp2000_reg_read(RING_RX_DONE);
87 if (_desc == 0)
88 return 0;
89
90 desc = rx_desc +
91 ((_desc - RX_BUF_DESC_BASE) / sizeof(struct ixpdev_rx_desc));
92 buf = phys_to_virt(desc->buf_addr);
93
94 if (desc->pkt_length < 4 || desc->pkt_length > PAGE_SIZE) {
95 printk(KERN_ERR "ixp2000: rx err, length %d\n",
96 desc->pkt_length);
97 goto err;
98 }
99
100 if (desc->channel < 0 || desc->channel >= nds_count) {
101 printk(KERN_ERR "ixp2000: rx err, channel %d\n",
102 desc->channel);
103 goto err;
104 }
105
106 /* @@@ Make FCS stripping configurable. */
107 desc->pkt_length -= 4;
108
109 if (unlikely(!netif_running(nds[desc->channel])))
110 goto err;
111
112 skb = netdev_alloc_skb_ip_align(dev, desc->pkt_length);
113 if (likely(skb != NULL)) {
114 skb_copy_to_linear_data(skb, buf, desc->pkt_length);
115 skb_put(skb, desc->pkt_length);
116 skb->protocol = eth_type_trans(skb, nds[desc->channel]);
117
118 netif_receive_skb(skb);
119 }
120
121err:
122 ixp2000_reg_write(RING_RX_PENDING, _desc);
123 processed++;
124 }
125
126 return processed;
127}
128
129/* dev always points to nds[0]. */
130static int ixpdev_poll(struct napi_struct *napi, int budget)
131{
132 struct ixpdev_priv *ip = container_of(napi, struct ixpdev_priv, napi);
133 struct net_device *dev = ip->dev;
134 int rx;
135
136 rx = 0;
137 do {
138 ixp2000_reg_write(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0x00ff);
139
140 rx = ixpdev_rx(dev, rx, budget);
141 if (rx >= budget)
142 break;
143 } while (ixp2000_reg_read(IXP2000_IRQ_THD_RAW_STATUS_A_0) & 0x00ff);
144
145 napi_complete(napi);
146 ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0x00ff);
147
148 return rx;
149}
150
151static void ixpdev_tx_complete(void)
152{
153 int channel;
154 u32 wake;
155
156 wake = 0;
157 while (1) {
158 struct ixpdev_priv *ip;
159 u32 desc;
160 int entry;
161
162 desc = ixp2000_reg_read(RING_TX_DONE);
163 if (desc == 0)
164 break;
165
166 /* @@@ Check whether entries come back in order. */
167 entry = (desc - TX_BUF_DESC_BASE) / sizeof(struct ixpdev_tx_desc);
168 channel = tx_desc[entry].channel;
169
170 if (channel < 0 || channel >= nds_count) {
171 printk(KERN_ERR "ixp2000: txcomp channel index "
172 "out of bounds (%d, %.8i, %d)\n",
173 channel, (unsigned int)desc, entry);
174 continue;
175 }
176
177 ip = netdev_priv(nds[channel]);
178 if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
179 wake |= 1 << channel;
180 ip->tx_queue_entries--;
181 }
182
183 for (channel = 0; wake != 0; channel++) {
184 if (wake & (1 << channel)) {
185 netif_wake_queue(nds[channel]);
186 wake &= ~(1 << channel);
187 }
188 }
189}
190
191static irqreturn_t ixpdev_interrupt(int irq, void *dev_id)
192{
193 u32 status;
194
195 status = ixp2000_reg_read(IXP2000_IRQ_THD_STATUS_A_0);
196 if (status == 0)
197 return IRQ_NONE;
198
199 /*
200 * Any of the eight receive units signaled RX?
201 */
202 if (status & 0x00ff) {
203 struct net_device *dev = nds[0];
204 struct ixpdev_priv *ip = netdev_priv(dev);
205
206 ixp2000_reg_wrb(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0x00ff);
207 if (likely(napi_schedule_prep(&ip->napi))) {
208 __napi_schedule(&ip->napi);
209 } else {
210 printk(KERN_CRIT "ixp2000: irq while polling!!\n");
211 }
212 }
213
214 /*
215 * Any of the eight transmit units signaled TXdone?
216 */
217 if (status & 0xff00) {
218 ixp2000_reg_wrb(IXP2000_IRQ_THD_RAW_STATUS_A_0, 0xff00);
219 ixpdev_tx_complete();
220 }
221
222 return IRQ_HANDLED;
223}
224
225#ifdef CONFIG_NET_POLL_CONTROLLER
226static void ixpdev_poll_controller(struct net_device *dev)
227{
228 disable_irq(IRQ_IXP2000_THDA0);
229 ixpdev_interrupt(IRQ_IXP2000_THDA0, dev);
230 enable_irq(IRQ_IXP2000_THDA0);
231}
232#endif
233
234static int ixpdev_open(struct net_device *dev)
235{
236 struct ixpdev_priv *ip = netdev_priv(dev);
237 int err;
238
239 napi_enable(&ip->napi);
240 if (!nds_open++) {
241 err = request_irq(IRQ_IXP2000_THDA0, ixpdev_interrupt,
242 IRQF_SHARED, "ixp2000_eth", nds);
243 if (err) {
244 nds_open--;
245 napi_disable(&ip->napi);
246 return err;
247 }
248
249 ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0xffff);
250 }
251
252 set_port_admin_status(ip->channel, 1);
253 netif_start_queue(dev);
254
255 return 0;
256}
257
258static int ixpdev_close(struct net_device *dev)
259{
260 struct ixpdev_priv *ip = netdev_priv(dev);
261
262 netif_stop_queue(dev);
263 napi_disable(&ip->napi);
264 set_port_admin_status(ip->channel, 0);
265
266 if (!--nds_open) {
267 ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_CLEAR_A_0, 0xffff);
268 free_irq(IRQ_IXP2000_THDA0, nds);
269 }
270
271 return 0;
272}
273
274static struct net_device_stats *ixpdev_get_stats(struct net_device *dev)
275{
276 struct ixpdev_priv *ip = netdev_priv(dev);
277
278 pm3386_get_stats(ip->channel, &(dev->stats));
279
280 return &(dev->stats);
281}
282
283static const struct net_device_ops ixpdev_netdev_ops = {
284 .ndo_open = ixpdev_open,
285 .ndo_stop = ixpdev_close,
286 .ndo_start_xmit = ixpdev_xmit,
287 .ndo_change_mtu = eth_change_mtu,
288 .ndo_validate_addr = eth_validate_addr,
289 .ndo_set_mac_address = eth_mac_addr,
290 .ndo_get_stats = ixpdev_get_stats,
291#ifdef CONFIG_NET_POLL_CONTROLLER
292 .ndo_poll_controller = ixpdev_poll_controller,
293#endif
294};
295
296struct net_device *ixpdev_alloc(int channel, int sizeof_priv)
297{
298 struct net_device *dev;
299 struct ixpdev_priv *ip;
300
301 dev = alloc_etherdev(sizeof_priv);
302 if (dev == NULL)
303 return NULL;
304
305 dev->netdev_ops = &ixpdev_netdev_ops;
306
307 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
308
309 ip = netdev_priv(dev);
310 ip->dev = dev;
311 netif_napi_add(dev, &ip->napi, ixpdev_poll, 64);
312 ip->channel = channel;
313 ip->tx_queue_entries = 0;
314
315 return dev;
316}
317
318int ixpdev_init(int __nds_count, struct net_device **__nds,
319 void (*__set_port_admin_status)(int port, int up))
320{
321 int i;
322 int err;
323
324 BUILD_BUG_ON(RX_BUF_COUNT > 192 || TX_BUF_COUNT > 192);
325
326 printk(KERN_INFO "IXP2000 MSF ethernet driver %s\n", DRV_MODULE_VERSION);
327
328 nds_count = __nds_count;
329 nds = __nds;
330 set_port_admin_status = __set_port_admin_status;
331
332 for (i = 0; i < RX_BUF_COUNT; i++) {
333 void *buf;
334
335 buf = (void *)get_zeroed_page(GFP_KERNEL);
336 if (buf == NULL) {
337 err = -ENOMEM;
338 while (--i >= 0)
339 free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
340 goto err_out;
341 }
342 rx_desc[i].buf_addr = virt_to_phys(buf);
343 rx_desc[i].buf_length = PAGE_SIZE;
344 }
345
346 /* @@@ Maybe we shouldn't be preallocating TX buffers. */
347 for (i = 0; i < TX_BUF_COUNT; i++) {
348 void *buf;
349
350 buf = (void *)get_zeroed_page(GFP_KERNEL);
351 if (buf == NULL) {
352 err = -ENOMEM;
353 while (--i >= 0)
354 free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
355 goto err_free_rx;
356 }
357 tx_desc[i].buf_addr = virt_to_phys(buf);
358 }
359
360 /* 256 entries, ring status set means 'empty', base address 0x0000. */
361 ixp2000_reg_write(RING_RX_PENDING_BASE, 0x44000000);
362 ixp2000_reg_write(RING_RX_PENDING_HEAD, 0x00000000);
363 ixp2000_reg_write(RING_RX_PENDING_TAIL, 0x00000000);
364
365 /* 256 entries, ring status set means 'full', base address 0x0400. */
366 ixp2000_reg_write(RING_RX_DONE_BASE, 0x40000400);
367 ixp2000_reg_write(RING_RX_DONE_HEAD, 0x00000000);
368 ixp2000_reg_write(RING_RX_DONE_TAIL, 0x00000000);
369
370 for (i = 0; i < RX_BUF_COUNT; i++) {
371 ixp2000_reg_write(RING_RX_PENDING,
372 RX_BUF_DESC_BASE + (i * sizeof(struct ixpdev_rx_desc)));
373 }
374
375 ixp2000_uengine_load(0, &ixp2400_rx);
376 ixp2000_uengine_start_contexts(0, 0xff);
377
378 /* 256 entries, ring status set means 'empty', base address 0x0800. */
379 ixp2000_reg_write(RING_TX_PENDING_BASE, 0x44000800);
380 ixp2000_reg_write(RING_TX_PENDING_HEAD, 0x00000000);
381 ixp2000_reg_write(RING_TX_PENDING_TAIL, 0x00000000);
382
383 /* 256 entries, ring status set means 'full', base address 0x0c00. */
384 ixp2000_reg_write(RING_TX_DONE_BASE, 0x40000c00);
385 ixp2000_reg_write(RING_TX_DONE_HEAD, 0x00000000);
386 ixp2000_reg_write(RING_TX_DONE_TAIL, 0x00000000);
387
388 ixp2000_uengine_load(1, &ixp2400_tx);
389 ixp2000_uengine_start_contexts(1, 0xff);
390
391 for (i = 0; i < nds_count; i++) {
392 err = register_netdev(nds[i]);
393 if (err) {
394 while (--i >= 0)
395 unregister_netdev(nds[i]);
396 goto err_free_tx;
397 }
398 }
399
400 for (i = 0; i < nds_count; i++) {
401 printk(KERN_INFO "%s: IXP2000 MSF ethernet (port %d), %pM.\n",
402 nds[i]->name, i, nds[i]->dev_addr);
403 }
404
405 return 0;
406
407err_free_tx:
408 for (i = 0; i < TX_BUF_COUNT; i++)
409 free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
410
411err_free_rx:
412 for (i = 0; i < RX_BUF_COUNT; i++)
413 free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
414
415err_out:
416 return err;
417}
418
419void ixpdev_deinit(void)
420{
421 int i;
422
423 /* @@@ Flush out pending packets. */
424
425 for (i = 0; i < nds_count; i++)
426 unregister_netdev(nds[i]);
427
428 ixp2000_uengine_stop_contexts(1, 0xff);
429 ixp2000_uengine_stop_contexts(0, 0xff);
430 ixp2000_uengine_reset(0x3);
431
432 for (i = 0; i < TX_BUF_COUNT; i++)
433 free_page((unsigned long)phys_to_virt(tx_desc[i].buf_addr));
434
435 for (i = 0; i < RX_BUF_COUNT; i++)
436 free_page((unsigned long)phys_to_virt(rx_desc[i].buf_addr));
437}
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixpdev.h b/drivers/net/ethernet/xscale/ixp2000/ixpdev.h
deleted file mode 100644
index 391ece623243..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixpdev.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * IXP2000 MSF network device driver
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __IXPDEV_H
13#define __IXPDEV_H
14
15struct ixpdev_priv
16{
17 struct net_device *dev;
18 struct napi_struct napi;
19 int channel;
20 int tx_queue_entries;
21};
22
23struct net_device *ixpdev_alloc(int channel, int sizeof_priv);
24int ixpdev_init(int num_ports, struct net_device **nds,
25 void (*set_port_admin_status)(int port, int up));
26void ixpdev_deinit(void);
27
28
29#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h b/drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h
deleted file mode 100644
index 86aa08ea0c33..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/ixpdev_priv.h
+++ /dev/null
@@ -1,57 +0,0 @@
1/*
2 * IXP2000 MSF network device driver
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __IXPDEV_PRIV_H
13#define __IXPDEV_PRIV_H
14
15#define RX_BUF_DESC_BASE 0x00001000
16#define RX_BUF_COUNT ((3 * PAGE_SIZE) / (4 * sizeof(struct ixpdev_rx_desc)))
17#define TX_BUF_DESC_BASE 0x00002000
18#define TX_BUF_COUNT ((3 * PAGE_SIZE) / (4 * sizeof(struct ixpdev_tx_desc)))
19#define TX_BUF_COUNT_PER_CHAN (TX_BUF_COUNT / 4)
20
21#define RING_RX_PENDING ((u32 *)IXP2000_SCRATCH_RING_VIRT_BASE)
22#define RING_RX_DONE ((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 4))
23#define RING_TX_PENDING ((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 8))
24#define RING_TX_DONE ((u32 *)(IXP2000_SCRATCH_RING_VIRT_BASE + 12))
25
26#define SCRATCH_REG(x) ((u32 *)(IXP2000_GLOBAL_REG_VIRT_BASE | 0x0800 | (x)))
27#define RING_RX_PENDING_BASE SCRATCH_REG(0x00)
28#define RING_RX_PENDING_HEAD SCRATCH_REG(0x04)
29#define RING_RX_PENDING_TAIL SCRATCH_REG(0x08)
30#define RING_RX_DONE_BASE SCRATCH_REG(0x10)
31#define RING_RX_DONE_HEAD SCRATCH_REG(0x14)
32#define RING_RX_DONE_TAIL SCRATCH_REG(0x18)
33#define RING_TX_PENDING_BASE SCRATCH_REG(0x20)
34#define RING_TX_PENDING_HEAD SCRATCH_REG(0x24)
35#define RING_TX_PENDING_TAIL SCRATCH_REG(0x28)
36#define RING_TX_DONE_BASE SCRATCH_REG(0x30)
37#define RING_TX_DONE_HEAD SCRATCH_REG(0x34)
38#define RING_TX_DONE_TAIL SCRATCH_REG(0x38)
39
40struct ixpdev_rx_desc
41{
42 u32 buf_addr;
43 u32 buf_length;
44 u32 channel;
45 u32 pkt_length;
46};
47
48struct ixpdev_tx_desc
49{
50 u32 buf_addr;
51 u32 pkt_length;
52 u32 channel;
53 u32 unused;
54};
55
56
57#endif
diff --git a/drivers/net/ethernet/xscale/ixp2000/pm3386.c b/drivers/net/ethernet/xscale/ixp2000/pm3386.c
deleted file mode 100644
index e08d3f9863b8..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/pm3386.c
+++ /dev/null
@@ -1,351 +0,0 @@
1/*
2 * Helper functions for the PM3386s on the Radisys ENP2611
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/delay.h>
14#include <linux/netdevice.h>
15#include <asm/io.h>
16#include "pm3386.h"
17
18/*
19 * Read from register 'reg' of PM3386 device 'pm'.
20 */
21static u16 pm3386_reg_read(int pm, int reg)
22{
23 void *_reg;
24 u16 value;
25
26 _reg = (void *)ENP2611_PM3386_0_VIRT_BASE;
27 if (pm == 1)
28 _reg = (void *)ENP2611_PM3386_1_VIRT_BASE;
29
30 value = *((volatile u16 *)(_reg + (reg << 1)));
31
32// printk(KERN_INFO "pm3386_reg_read(%d, %.3x) = %.8x\n", pm, reg, value);
33
34 return value;
35}
36
37/*
38 * Write to register 'reg' of PM3386 device 'pm', and perform
39 * a readback from the identification register.
40 */
41static void pm3386_reg_write(int pm, int reg, u16 value)
42{
43 void *_reg;
44 u16 dummy;
45
46// printk(KERN_INFO "pm3386_reg_write(%d, %.3x, %.8x)\n", pm, reg, value);
47
48 _reg = (void *)ENP2611_PM3386_0_VIRT_BASE;
49 if (pm == 1)
50 _reg = (void *)ENP2611_PM3386_1_VIRT_BASE;
51
52 *((volatile u16 *)(_reg + (reg << 1))) = value;
53
54 dummy = *((volatile u16 *)_reg);
55 __asm__ __volatile__("mov %0, %0" : "+r" (dummy));
56}
57
58/*
59 * Read from port 'port' register 'reg', where the registers
60 * for the different ports are 'spacing' registers apart.
61 */
62static u16 pm3386_port_reg_read(int port, int _reg, int spacing)
63{
64 int reg;
65
66 reg = _reg;
67 if (port & 1)
68 reg += spacing;
69
70 return pm3386_reg_read(port >> 1, reg);
71}
72
73/*
74 * Write to port 'port' register 'reg', where the registers
75 * for the different ports are 'spacing' registers apart.
76 */
77static void pm3386_port_reg_write(int port, int _reg, int spacing, u16 value)
78{
79 int reg;
80
81 reg = _reg;
82 if (port & 1)
83 reg += spacing;
84
85 pm3386_reg_write(port >> 1, reg, value);
86}
87
88int pm3386_secondary_present(void)
89{
90 return pm3386_reg_read(1, 0) == 0x3386;
91}
92
93void pm3386_reset(void)
94{
95 u8 mac[3][6];
96 int secondary;
97
98 secondary = pm3386_secondary_present();
99
100 /* Save programmed MAC addresses. */
101 pm3386_get_mac(0, mac[0]);
102 pm3386_get_mac(1, mac[1]);
103 if (secondary)
104 pm3386_get_mac(2, mac[2]);
105
106 /* Assert analog and digital reset. */
107 pm3386_reg_write(0, 0x002, 0x0060);
108 if (secondary)
109 pm3386_reg_write(1, 0x002, 0x0060);
110 mdelay(1);
111
112 /* Deassert analog reset. */
113 pm3386_reg_write(0, 0x002, 0x0062);
114 if (secondary)
115 pm3386_reg_write(1, 0x002, 0x0062);
116 mdelay(10);
117
118 /* Deassert digital reset. */
119 pm3386_reg_write(0, 0x002, 0x0063);
120 if (secondary)
121 pm3386_reg_write(1, 0x002, 0x0063);
122 mdelay(10);
123
124 /* Restore programmed MAC addresses. */
125 pm3386_set_mac(0, mac[0]);
126 pm3386_set_mac(1, mac[1]);
127 if (secondary)
128 pm3386_set_mac(2, mac[2]);
129
130 /* Disable carrier on all ports. */
131 pm3386_set_carrier(0, 0);
132 pm3386_set_carrier(1, 0);
133 if (secondary)
134 pm3386_set_carrier(2, 0);
135}
136
137static u16 swaph(u16 x)
138{
139 return ((x << 8) | (x >> 8)) & 0xffff;
140}
141
142int pm3386_port_count(void)
143{
144 return 2 + pm3386_secondary_present();
145}
146
147void pm3386_init_port(int port)
148{
149 int pm = port >> 1;
150
151 /*
152 * Work around ENP2611 bootloader programming MAC address
153 * in reverse.
154 */
155 if (pm3386_port_reg_read(port, 0x30a, 0x100) == 0x0000 &&
156 (pm3386_port_reg_read(port, 0x309, 0x100) & 0xff00) == 0x5000) {
157 u16 temp[3];
158
159 temp[0] = pm3386_port_reg_read(port, 0x308, 0x100);
160 temp[1] = pm3386_port_reg_read(port, 0x309, 0x100);
161 temp[2] = pm3386_port_reg_read(port, 0x30a, 0x100);
162 pm3386_port_reg_write(port, 0x308, 0x100, swaph(temp[2]));
163 pm3386_port_reg_write(port, 0x309, 0x100, swaph(temp[1]));
164 pm3386_port_reg_write(port, 0x30a, 0x100, swaph(temp[0]));
165 }
166
167 /*
168 * Initialise narrowbanding mode. See application note 2010486
169 * for more information. (@@@ We also need to issue a reset
170 * when ROOL or DOOL are detected.)
171 */
172 pm3386_port_reg_write(port, 0x708, 0x10, 0xd055);
173 udelay(500);
174 pm3386_port_reg_write(port, 0x708, 0x10, 0x5055);
175
176 /*
177 * SPI-3 ingress block. Set 64 bytes SPI-3 burst size
178 * towards SPI-3 bridge.
179 */
180 pm3386_port_reg_write(port, 0x122, 0x20, 0x0002);
181
182 /*
183 * Enable ingress protocol checking, and soft reset the
184 * SPI-3 ingress block.
185 */
186 pm3386_reg_write(pm, 0x103, 0x0003);
187 while (!(pm3386_reg_read(pm, 0x103) & 0x80))
188 ;
189
190 /*
191 * SPI-3 egress block. Gather 12288 bytes of the current
192 * packet in the TX fifo before initiating transmit on the
193 * SERDES interface. (Prevents TX underflows.)
194 */
195 pm3386_port_reg_write(port, 0x221, 0x20, 0x0007);
196
197 /*
198 * Enforce odd parity from the SPI-3 bridge, and soft reset
199 * the SPI-3 egress block.
200 */
201 pm3386_reg_write(pm, 0x203, 0x000d & ~(4 << (port & 1)));
202 while ((pm3386_reg_read(pm, 0x203) & 0x000c) != 0x000c)
203 ;
204
205 /*
206 * EGMAC block. Set this channels to reject long preambles,
207 * not send or transmit PAUSE frames, enable preamble checking,
208 * disable frame length checking, enable FCS appending, enable
209 * TX frame padding.
210 */
211 pm3386_port_reg_write(port, 0x302, 0x100, 0x0113);
212
213 /*
214 * Soft reset the EGMAC block.
215 */
216 pm3386_port_reg_write(port, 0x301, 0x100, 0x8000);
217 pm3386_port_reg_write(port, 0x301, 0x100, 0x0000);
218
219 /*
220 * Auto-sense autonegotiation status.
221 */
222 pm3386_port_reg_write(port, 0x306, 0x100, 0x0100);
223
224 /*
225 * Allow reception of jumbo frames.
226 */
227 pm3386_port_reg_write(port, 0x310, 0x100, 9018);
228
229 /*
230 * Allow transmission of jumbo frames.
231 */
232 pm3386_port_reg_write(port, 0x336, 0x100, 9018);
233
234 /* @@@ Should set 0x337/0x437 (RX forwarding threshold.) */
235
236 /*
237 * Set autonegotiation parameters to 'no PAUSE, full duplex.'
238 */
239 pm3386_port_reg_write(port, 0x31c, 0x100, 0x0020);
240
241 /*
242 * Enable and restart autonegotiation.
243 */
244 pm3386_port_reg_write(port, 0x318, 0x100, 0x0003);
245 pm3386_port_reg_write(port, 0x318, 0x100, 0x0002);
246}
247
248void pm3386_get_mac(int port, u8 *mac)
249{
250 u16 temp;
251
252 temp = pm3386_port_reg_read(port, 0x308, 0x100);
253 mac[0] = temp & 0xff;
254 mac[1] = (temp >> 8) & 0xff;
255
256 temp = pm3386_port_reg_read(port, 0x309, 0x100);
257 mac[2] = temp & 0xff;
258 mac[3] = (temp >> 8) & 0xff;
259
260 temp = pm3386_port_reg_read(port, 0x30a, 0x100);
261 mac[4] = temp & 0xff;
262 mac[5] = (temp >> 8) & 0xff;
263}
264
265void pm3386_set_mac(int port, u8 *mac)
266{
267 pm3386_port_reg_write(port, 0x308, 0x100, (mac[1] << 8) | mac[0]);
268 pm3386_port_reg_write(port, 0x309, 0x100, (mac[3] << 8) | mac[2]);
269 pm3386_port_reg_write(port, 0x30a, 0x100, (mac[5] << 8) | mac[4]);
270}
271
272static u32 pm3386_get_stat(int port, u16 base)
273{
274 u32 value;
275
276 value = pm3386_port_reg_read(port, base, 0x100);
277 value |= pm3386_port_reg_read(port, base + 1, 0x100) << 16;
278
279 return value;
280}
281
282void pm3386_get_stats(int port, struct net_device_stats *stats)
283{
284 /*
285 * Snapshot statistics counters.
286 */
287 pm3386_port_reg_write(port, 0x500, 0x100, 0x0001);
288 while (pm3386_port_reg_read(port, 0x500, 0x100) & 0x0001)
289 ;
290
291 memset(stats, 0, sizeof(*stats));
292
293 stats->rx_packets = pm3386_get_stat(port, 0x510);
294 stats->tx_packets = pm3386_get_stat(port, 0x590);
295 stats->rx_bytes = pm3386_get_stat(port, 0x514);
296 stats->tx_bytes = pm3386_get_stat(port, 0x594);
297 /* @@@ Add other stats. */
298}
299
300void pm3386_set_carrier(int port, int state)
301{
302 pm3386_port_reg_write(port, 0x703, 0x10, state ? 0x1001 : 0x0000);
303}
304
305int pm3386_is_link_up(int port)
306{
307 u16 temp;
308
309 temp = pm3386_port_reg_read(port, 0x31a, 0x100);
310 temp = pm3386_port_reg_read(port, 0x31a, 0x100);
311
312 return !!(temp & 0x0002);
313}
314
315void pm3386_enable_rx(int port)
316{
317 u16 temp;
318
319 temp = pm3386_port_reg_read(port, 0x303, 0x100);
320 temp |= 0x1000;
321 pm3386_port_reg_write(port, 0x303, 0x100, temp);
322}
323
324void pm3386_disable_rx(int port)
325{
326 u16 temp;
327
328 temp = pm3386_port_reg_read(port, 0x303, 0x100);
329 temp &= 0xefff;
330 pm3386_port_reg_write(port, 0x303, 0x100, temp);
331}
332
333void pm3386_enable_tx(int port)
334{
335 u16 temp;
336
337 temp = pm3386_port_reg_read(port, 0x303, 0x100);
338 temp |= 0x4000;
339 pm3386_port_reg_write(port, 0x303, 0x100, temp);
340}
341
342void pm3386_disable_tx(int port)
343{
344 u16 temp;
345
346 temp = pm3386_port_reg_read(port, 0x303, 0x100);
347 temp &= 0xbfff;
348 pm3386_port_reg_write(port, 0x303, 0x100, temp);
349}
350
351MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/xscale/ixp2000/pm3386.h b/drivers/net/ethernet/xscale/ixp2000/pm3386.h
deleted file mode 100644
index cc4183dca911..000000000000
--- a/drivers/net/ethernet/xscale/ixp2000/pm3386.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Helper functions for the PM3386s on the Radisys ENP2611
3 * Copyright (C) 2004, 2005 Lennert Buytenhek <buytenh@wantstofly.org>
4 * Dedicated to Marija Kulikova.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __PM3386_H
13#define __PM3386_H
14
15void pm3386_reset(void);
16int pm3386_port_count(void);
17void pm3386_init_port(int port);
18void pm3386_get_mac(int port, u8 *mac);
19void pm3386_set_mac(int port, u8 *mac);
20void pm3386_get_stats(int port, struct net_device_stats *stats);
21void pm3386_set_carrier(int port, int state);
22int pm3386_is_link_up(int port);
23void pm3386_enable_rx(int port);
24void pm3386_disable_rx(int port);
25void pm3386_enable_tx(int port);
26void pm3386_disable_tx(int port);
27
28
29#endif
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 41a8b5a9849e..482648fcf0b6 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1002,12 +1002,41 @@ static int ixp4xx_nway_reset(struct net_device *dev)
1002 return phy_start_aneg(port->phydev); 1002 return phy_start_aneg(port->phydev);
1003} 1003}
1004 1004
1005int ixp46x_phc_index = -1;
1006
1007static int ixp4xx_get_ts_info(struct net_device *dev,
1008 struct ethtool_ts_info *info)
1009{
1010 if (!cpu_is_ixp46x()) {
1011 info->so_timestamping =
1012 SOF_TIMESTAMPING_TX_SOFTWARE |
1013 SOF_TIMESTAMPING_RX_SOFTWARE |
1014 SOF_TIMESTAMPING_SOFTWARE;
1015 info->phc_index = -1;
1016 return 0;
1017 }
1018 info->so_timestamping =
1019 SOF_TIMESTAMPING_TX_HARDWARE |
1020 SOF_TIMESTAMPING_RX_HARDWARE |
1021 SOF_TIMESTAMPING_RAW_HARDWARE;
1022 info->phc_index = ixp46x_phc_index;
1023 info->tx_types =
1024 (1 << HWTSTAMP_TX_OFF) |
1025 (1 << HWTSTAMP_TX_ON);
1026 info->rx_filters =
1027 (1 << HWTSTAMP_FILTER_NONE) |
1028 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
1029 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ);
1030 return 0;
1031}
1032
1005static const struct ethtool_ops ixp4xx_ethtool_ops = { 1033static const struct ethtool_ops ixp4xx_ethtool_ops = {
1006 .get_drvinfo = ixp4xx_get_drvinfo, 1034 .get_drvinfo = ixp4xx_get_drvinfo,
1007 .get_settings = ixp4xx_get_settings, 1035 .get_settings = ixp4xx_get_settings,
1008 .set_settings = ixp4xx_set_settings, 1036 .set_settings = ixp4xx_set_settings,
1009 .nway_reset = ixp4xx_nway_reset, 1037 .nway_reset = ixp4xx_nway_reset,
1010 .get_link = ethtool_op_get_link, 1038 .get_link = ethtool_op_get_link,
1039 .get_ts_info = ixp4xx_get_ts_info,
1011}; 1040};
1012 1041
1013 1042
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 168c8f41d09f..d4719632ffc6 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -113,10 +113,9 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
113 113
114 SET_NETDEV_DEV(dev, &pdev->dev); 114 SET_NETDEV_DEV(dev, &pdev->dev);
115 115
116 if (pci_request_regions(pdev, "rrunner")) { 116 ret = pci_request_regions(pdev, "rrunner");
117 ret = -EIO; 117 if (ret < 0)
118 goto out; 118 goto out;
119 }
120 119
121 pci_set_drvdata(pdev, dev); 120 pci_set_drvdata(pdev, dev);
122 121
@@ -124,11 +123,8 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
124 123
125 spin_lock_init(&rrpriv->lock); 124 spin_lock_init(&rrpriv->lock);
126 125
127 dev->irq = pdev->irq;
128 dev->netdev_ops = &rr_netdev_ops; 126 dev->netdev_ops = &rr_netdev_ops;
129 127
130 dev->base_addr = pci_resource_start(pdev, 0);
131
132 /* display version info if adapter is found */ 128 /* display version info if adapter is found */
133 if (!version_disp) { 129 if (!version_disp) {
134 /* set display flag to TRUE so that */ 130 /* set display flag to TRUE so that */
@@ -146,16 +142,15 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
146 pci_set_master(pdev); 142 pci_set_master(pdev);
147 143
148 printk(KERN_INFO "%s: Essential RoadRunner serial HIPPI " 144 printk(KERN_INFO "%s: Essential RoadRunner serial HIPPI "
149 "at 0x%08lx, irq %i, PCI latency %i\n", dev->name, 145 "at 0x%llx, irq %i, PCI latency %i\n", dev->name,
150 dev->base_addr, dev->irq, pci_latency); 146 (unsigned long long)pci_resource_start(pdev, 0),
147 pdev->irq, pci_latency);
151 148
152 /* 149 /*
153 * Remap the regs into kernel space. 150 * Remap the MMIO regs into kernel space.
154 */ 151 */
155 152 rrpriv->regs = pci_iomap(pdev, 0, 0x1000);
156 rrpriv->regs = ioremap(dev->base_addr, 0x1000); 153 if (!rrpriv->regs) {
157
158 if (!rrpriv->regs){
159 printk(KERN_ERR "%s: Unable to map I/O register, " 154 printk(KERN_ERR "%s: Unable to map I/O register, "
160 "RoadRunner will be disabled.\n", dev->name); 155 "RoadRunner will be disabled.\n", dev->name);
161 ret = -EIO; 156 ret = -EIO;
@@ -202,8 +197,6 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
202 197
203 rr_init(dev); 198 rr_init(dev);
204 199
205 dev->base_addr = 0;
206
207 ret = register_netdev(dev); 200 ret = register_netdev(dev);
208 if (ret) 201 if (ret)
209 goto out; 202 goto out;
@@ -217,7 +210,7 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
217 pci_free_consistent(pdev, TX_TOTAL_SIZE, rrpriv->tx_ring, 210 pci_free_consistent(pdev, TX_TOTAL_SIZE, rrpriv->tx_ring,
218 rrpriv->tx_ring_dma); 211 rrpriv->tx_ring_dma);
219 if (rrpriv->regs) 212 if (rrpriv->regs)
220 iounmap(rrpriv->regs); 213 pci_iounmap(pdev, rrpriv->regs);
221 if (pdev) { 214 if (pdev) {
222 pci_release_regions(pdev); 215 pci_release_regions(pdev);
223 pci_set_drvdata(pdev, NULL); 216 pci_set_drvdata(pdev, NULL);
@@ -231,29 +224,26 @@ static int __devinit rr_init_one(struct pci_dev *pdev,
231static void __devexit rr_remove_one (struct pci_dev *pdev) 224static void __devexit rr_remove_one (struct pci_dev *pdev)
232{ 225{
233 struct net_device *dev = pci_get_drvdata(pdev); 226 struct net_device *dev = pci_get_drvdata(pdev);
227 struct rr_private *rr = netdev_priv(dev);
234 228
235 if (dev) { 229 if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)) {
236 struct rr_private *rr = netdev_priv(dev); 230 printk(KERN_ERR "%s: trying to unload running NIC\n",
237 231 dev->name);
238 if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)){ 232 writel(HALT_NIC, &rr->regs->HostCtrl);
239 printk(KERN_ERR "%s: trying to unload running NIC\n",
240 dev->name);
241 writel(HALT_NIC, &rr->regs->HostCtrl);
242 }
243
244 pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring,
245 rr->evt_ring_dma);
246 pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring,
247 rr->rx_ring_dma);
248 pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring,
249 rr->tx_ring_dma);
250 unregister_netdev(dev);
251 iounmap(rr->regs);
252 free_netdev(dev);
253 pci_release_regions(pdev);
254 pci_disable_device(pdev);
255 pci_set_drvdata(pdev, NULL);
256 } 233 }
234
235 unregister_netdev(dev);
236 pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring,
237 rr->evt_ring_dma);
238 pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring,
239 rr->rx_ring_dma);
240 pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring,
241 rr->tx_ring_dma);
242 pci_iounmap(pdev, rr->regs);
243 pci_release_regions(pdev);
244 pci_disable_device(pdev);
245 pci_set_drvdata(pdev, NULL);
246 free_netdev(dev);
257} 247}
258 248
259 249
@@ -1229,9 +1219,9 @@ static int rr_open(struct net_device *dev)
1229 readl(&regs->HostCtrl); 1219 readl(&regs->HostCtrl);
1230 spin_unlock_irqrestore(&rrpriv->lock, flags); 1220 spin_unlock_irqrestore(&rrpriv->lock, flags);
1231 1221
1232 if (request_irq(dev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) { 1222 if (request_irq(pdev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) {
1233 printk(KERN_WARNING "%s: Requested IRQ %d is busy\n", 1223 printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
1234 dev->name, dev->irq); 1224 dev->name, pdev->irq);
1235 ecode = -EAGAIN; 1225 ecode = -EAGAIN;
1236 goto error; 1226 goto error;
1237 } 1227 }
@@ -1338,16 +1328,15 @@ static void rr_dump(struct net_device *dev)
1338 1328
1339static int rr_close(struct net_device *dev) 1329static int rr_close(struct net_device *dev)
1340{ 1330{
1341 struct rr_private *rrpriv; 1331 struct rr_private *rrpriv = netdev_priv(dev);
1342 struct rr_regs __iomem *regs; 1332 struct rr_regs __iomem *regs = rrpriv->regs;
1333 struct pci_dev *pdev = rrpriv->pci_dev;
1343 unsigned long flags; 1334 unsigned long flags;
1344 u32 tmp; 1335 u32 tmp;
1345 short i; 1336 short i;
1346 1337
1347 netif_stop_queue(dev); 1338 netif_stop_queue(dev);
1348 1339
1349 rrpriv = netdev_priv(dev);
1350 regs = rrpriv->regs;
1351 1340
1352 /* 1341 /*
1353 * Lock to make sure we are not cleaning up while another CPU 1342 * Lock to make sure we are not cleaning up while another CPU
@@ -1386,15 +1375,15 @@ static int rr_close(struct net_device *dev)
1386 rr_raz_tx(rrpriv, dev); 1375 rr_raz_tx(rrpriv, dev);
1387 rr_raz_rx(rrpriv, dev); 1376 rr_raz_rx(rrpriv, dev);
1388 1377
1389 pci_free_consistent(rrpriv->pci_dev, 256 * sizeof(struct ring_ctrl), 1378 pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl),
1390 rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma); 1379 rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
1391 rrpriv->rx_ctrl = NULL; 1380 rrpriv->rx_ctrl = NULL;
1392 1381
1393 pci_free_consistent(rrpriv->pci_dev, sizeof(struct rr_info), 1382 pci_free_consistent(pdev, sizeof(struct rr_info), rrpriv->info,
1394 rrpriv->info, rrpriv->info_dma); 1383 rrpriv->info_dma);
1395 rrpriv->info = NULL; 1384 rrpriv->info = NULL;
1396 1385
1397 free_irq(dev->irq, dev); 1386 free_irq(pdev->irq, dev);
1398 spin_unlock_irqrestore(&rrpriv->lock, flags); 1387 spin_unlock_irqrestore(&rrpriv->lock, flags);
1399 1388
1400 return 0; 1389 return 0;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index d025c83cd12a..8b919471472f 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -428,6 +428,24 @@ int netvsc_device_remove(struct hv_device *device)
428 return 0; 428 return 0;
429} 429}
430 430
431
432#define RING_AVAIL_PERCENT_HIWATER 20
433#define RING_AVAIL_PERCENT_LOWATER 10
434
435/*
436 * Get the percentage of available bytes to write in the ring.
437 * The return value is in range from 0 to 100.
438 */
439static inline u32 hv_ringbuf_avail_percent(
440 struct hv_ring_buffer_info *ring_info)
441{
442 u32 avail_read, avail_write;
443
444 hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
445
446 return avail_write * 100 / ring_info->ring_datasize;
447}
448
431static void netvsc_send_completion(struct hv_device *device, 449static void netvsc_send_completion(struct hv_device *device,
432 struct vmpacket_descriptor *packet) 450 struct vmpacket_descriptor *packet)
433{ 451{
@@ -455,6 +473,8 @@ static void netvsc_send_completion(struct hv_device *device,
455 complete(&net_device->channel_init_wait); 473 complete(&net_device->channel_init_wait);
456 } else if (nvsp_packet->hdr.msg_type == 474 } else if (nvsp_packet->hdr.msg_type ==
457 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) { 475 NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
476 int num_outstanding_sends;
477
458 /* Get the send context */ 478 /* Get the send context */
459 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long) 479 nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
460 packet->trans_id; 480 packet->trans_id;
@@ -463,10 +483,14 @@ static void netvsc_send_completion(struct hv_device *device,
463 nvsc_packet->completion.send.send_completion( 483 nvsc_packet->completion.send.send_completion(
464 nvsc_packet->completion.send.send_completion_ctx); 484 nvsc_packet->completion.send.send_completion_ctx);
465 485
466 atomic_dec(&net_device->num_outstanding_sends); 486 num_outstanding_sends =
487 atomic_dec_return(&net_device->num_outstanding_sends);
467 488
468 if (netif_queue_stopped(ndev) && !net_device->start_remove) 489 if (netif_queue_stopped(ndev) && !net_device->start_remove &&
469 netif_wake_queue(ndev); 490 (hv_ringbuf_avail_percent(&device->channel->outbound)
491 > RING_AVAIL_PERCENT_HIWATER ||
492 num_outstanding_sends < 1))
493 netif_wake_queue(ndev);
470 } else { 494 } else {
471 netdev_err(ndev, "Unknown send completion packet type- " 495 netdev_err(ndev, "Unknown send completion packet type- "
472 "%d received!!\n", nvsp_packet->hdr.msg_type); 496 "%d received!!\n", nvsp_packet->hdr.msg_type);
@@ -519,10 +543,19 @@ int netvsc_send(struct hv_device *device,
519 543
520 if (ret == 0) { 544 if (ret == 0) {
521 atomic_inc(&net_device->num_outstanding_sends); 545 atomic_inc(&net_device->num_outstanding_sends);
546 if (hv_ringbuf_avail_percent(&device->channel->outbound) <
547 RING_AVAIL_PERCENT_LOWATER) {
548 netif_stop_queue(ndev);
549 if (atomic_read(&net_device->
550 num_outstanding_sends) < 1)
551 netif_wake_queue(ndev);
552 }
522 } else if (ret == -EAGAIN) { 553 } else if (ret == -EAGAIN) {
523 netif_stop_queue(ndev); 554 netif_stop_queue(ndev);
524 if (atomic_read(&net_device->num_outstanding_sends) < 1) 555 if (atomic_read(&net_device->num_outstanding_sends) < 1) {
525 netif_wake_queue(ndev); 556 netif_wake_queue(ndev);
557 ret = -ENOSPC;
558 }
526 } else { 559 } else {
527 netdev_err(ndev, "Unable to send packet %p ret %d\n", 560 netdev_err(ndev, "Unable to send packet %p ret %d\n",
528 packet, ret); 561 packet, ret);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 2d59138db7f3..8f8ed3320425 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -211,9 +211,13 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
211 net->stats.tx_packets++; 211 net->stats.tx_packets++;
212 } else { 212 } else {
213 kfree(packet); 213 kfree(packet);
214 if (ret != -EAGAIN) {
215 dev_kfree_skb_any(skb);
216 net->stats.tx_dropped++;
217 }
214 } 218 }
215 219
216 return ret ? NETDEV_TX_BUSY : NETDEV_TX_OK; 220 return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
217} 221}
218 222
219/* 223/*
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 468047866c8c..35758445297e 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -321,8 +321,8 @@ config AU1000_FIR
321 Say M to build a module; it will be called au1k_ir.ko 321 Say M to build a module; it will be called au1k_ir.ko
322 322
323config SMC_IRCC_FIR 323config SMC_IRCC_FIR
324 tristate "SMSC IrCC (EXPERIMENTAL)" 324 tristate "SMSC IrCC"
325 depends on EXPERIMENTAL && IRDA && ISA_DMA_API 325 depends on IRDA && ISA_DMA_API
326 help 326 help
327 Say Y here if you want to build support for the SMC Infrared 327 Say Y here if you want to build support for the SMC Infrared
328 Communications Controller. It is used in a wide variety of 328 Communications Controller. It is used in a wide variety of
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 4351296dde32..510b9c8d23a9 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1710,7 +1710,7 @@ toshoboe_gotosleep (struct pci_dev *pci_dev, pm_message_t crap)
1710 1710
1711/* Flush all packets */ 1711/* Flush all packets */
1712 while ((i--) && (self->txpending)) 1712 while ((i--) && (self->txpending))
1713 udelay (10000); 1713 msleep(10);
1714 1714
1715 spin_lock_irqsave(&self->spinlock, flags); 1715 spin_lock_irqsave(&self->spinlock, flags);
1716 1716
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index 725d6b367822..eb315b8d07a3 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -737,7 +737,7 @@ static int sh_irda_stop(struct net_device *ndev)
737 netif_stop_queue(ndev); 737 netif_stop_queue(ndev);
738 pm_runtime_put_sync(&self->pdev->dev); 738 pm_runtime_put_sync(&self->pdev->dev);
739 739
740 dev_info(&ndev->dev, "stoped\n"); 740 dev_info(&ndev->dev, "stopped\n");
741 741
742 return 0; 742 return 0;
743} 743}
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index e6661b5c1f83..256eddf1f75a 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -685,7 +685,7 @@ static int sh_sir_stop(struct net_device *ndev)
685 685
686 netif_stop_queue(ndev); 686 netif_stop_queue(ndev);
687 687
688 dev_info(&ndev->dev, "stoped\n"); 688 dev_info(&ndev->dev, "stopped\n");
689 689
690 return 0; 690 return 0;
691} 691}
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 6c95d4087b2d..a926813ee91d 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -1,7 +1,6 @@
1/********************************************************************* 1/*********************************************************************
2 * 2 *
3 * Description: Driver for the SMC Infrared Communications Controller 3 * Description: Driver for the SMC Infrared Communications Controller
4 * Status: Experimental.
5 * Author: Daniele Peri (peri@csai.unipa.it) 4 * Author: Daniele Peri (peri@csai.unipa.it)
6 * Created at: 5 * Created at:
7 * Modified at: 6 * Modified at:
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index f975afdc315c..9653ed6998fe 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -312,7 +312,8 @@ static int macvlan_open(struct net_device *dev)
312 int err; 312 int err;
313 313
314 if (vlan->port->passthru) { 314 if (vlan->port->passthru) {
315 dev_set_promiscuity(lowerdev, 1); 315 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
316 dev_set_promiscuity(lowerdev, 1);
316 goto hash_add; 317 goto hash_add;
317 } 318 }
318 319
@@ -344,12 +345,15 @@ static int macvlan_stop(struct net_device *dev)
344 struct macvlan_dev *vlan = netdev_priv(dev); 345 struct macvlan_dev *vlan = netdev_priv(dev);
345 struct net_device *lowerdev = vlan->lowerdev; 346 struct net_device *lowerdev = vlan->lowerdev;
346 347
348 dev_uc_unsync(lowerdev, dev);
349 dev_mc_unsync(lowerdev, dev);
350
347 if (vlan->port->passthru) { 351 if (vlan->port->passthru) {
348 dev_set_promiscuity(lowerdev, -1); 352 if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
353 dev_set_promiscuity(lowerdev, -1);
349 goto hash_del; 354 goto hash_del;
350 } 355 }
351 356
352 dev_mc_unsync(lowerdev, dev);
353 if (dev->flags & IFF_ALLMULTI) 357 if (dev->flags & IFF_ALLMULTI)
354 dev_set_allmulti(lowerdev, -1); 358 dev_set_allmulti(lowerdev, -1);
355 359
@@ -399,10 +403,11 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
399 dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); 403 dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
400} 404}
401 405
402static void macvlan_set_multicast_list(struct net_device *dev) 406static void macvlan_set_mac_lists(struct net_device *dev)
403{ 407{
404 struct macvlan_dev *vlan = netdev_priv(dev); 408 struct macvlan_dev *vlan = netdev_priv(dev);
405 409
410 dev_uc_sync(vlan->lowerdev, dev);
406 dev_mc_sync(vlan->lowerdev, dev); 411 dev_mc_sync(vlan->lowerdev, dev);
407} 412}
408 413
@@ -542,6 +547,43 @@ static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
542 return 0; 547 return 0;
543} 548}
544 549
550static int macvlan_fdb_add(struct ndmsg *ndm,
551 struct net_device *dev,
552 unsigned char *addr,
553 u16 flags)
554{
555 struct macvlan_dev *vlan = netdev_priv(dev);
556 int err = -EINVAL;
557
558 if (!vlan->port->passthru)
559 return -EOPNOTSUPP;
560
561 if (is_unicast_ether_addr(addr))
562 err = dev_uc_add_excl(dev, addr);
563 else if (is_multicast_ether_addr(addr))
564 err = dev_mc_add_excl(dev, addr);
565
566 return err;
567}
568
569static int macvlan_fdb_del(struct ndmsg *ndm,
570 struct net_device *dev,
571 unsigned char *addr)
572{
573 struct macvlan_dev *vlan = netdev_priv(dev);
574 int err = -EINVAL;
575
576 if (!vlan->port->passthru)
577 return -EOPNOTSUPP;
578
579 if (is_unicast_ether_addr(addr))
580 err = dev_uc_del(dev, addr);
581 else if (is_multicast_ether_addr(addr))
582 err = dev_mc_del(dev, addr);
583
584 return err;
585}
586
545static void macvlan_ethtool_get_drvinfo(struct net_device *dev, 587static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
546 struct ethtool_drvinfo *drvinfo) 588 struct ethtool_drvinfo *drvinfo)
547{ 589{
@@ -572,11 +614,14 @@ static const struct net_device_ops macvlan_netdev_ops = {
572 .ndo_change_mtu = macvlan_change_mtu, 614 .ndo_change_mtu = macvlan_change_mtu,
573 .ndo_change_rx_flags = macvlan_change_rx_flags, 615 .ndo_change_rx_flags = macvlan_change_rx_flags,
574 .ndo_set_mac_address = macvlan_set_mac_address, 616 .ndo_set_mac_address = macvlan_set_mac_address,
575 .ndo_set_rx_mode = macvlan_set_multicast_list, 617 .ndo_set_rx_mode = macvlan_set_mac_lists,
576 .ndo_get_stats64 = macvlan_dev_get_stats64, 618 .ndo_get_stats64 = macvlan_dev_get_stats64,
577 .ndo_validate_addr = eth_validate_addr, 619 .ndo_validate_addr = eth_validate_addr,
578 .ndo_vlan_rx_add_vid = macvlan_vlan_rx_add_vid, 620 .ndo_vlan_rx_add_vid = macvlan_vlan_rx_add_vid,
579 .ndo_vlan_rx_kill_vid = macvlan_vlan_rx_kill_vid, 621 .ndo_vlan_rx_kill_vid = macvlan_vlan_rx_kill_vid,
622 .ndo_fdb_add = macvlan_fdb_add,
623 .ndo_fdb_del = macvlan_fdb_del,
624 .ndo_fdb_dump = ndo_dflt_fdb_dump,
580}; 625};
581 626
582void macvlan_common_setup(struct net_device *dev) 627void macvlan_common_setup(struct net_device *dev)
@@ -711,6 +756,9 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
711 if (data && data[IFLA_MACVLAN_MODE]) 756 if (data && data[IFLA_MACVLAN_MODE])
712 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); 757 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
713 758
759 if (data && data[IFLA_MACVLAN_FLAGS])
760 vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
761
714 if (vlan->mode == MACVLAN_MODE_PASSTHRU) { 762 if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
715 if (port->count) 763 if (port->count)
716 return -EINVAL; 764 return -EINVAL;
@@ -760,6 +808,16 @@ static int macvlan_changelink(struct net_device *dev,
760 struct macvlan_dev *vlan = netdev_priv(dev); 808 struct macvlan_dev *vlan = netdev_priv(dev);
761 if (data && data[IFLA_MACVLAN_MODE]) 809 if (data && data[IFLA_MACVLAN_MODE])
762 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); 810 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
811 if (data && data[IFLA_MACVLAN_FLAGS]) {
812 __u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
813 bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC;
814
815 if (promisc && (flags & MACVLAN_FLAG_NOPROMISC))
816 dev_set_promiscuity(vlan->lowerdev, -1);
817 else if (promisc && !(flags & MACVLAN_FLAG_NOPROMISC))
818 dev_set_promiscuity(vlan->lowerdev, 1);
819 vlan->flags = flags;
820 }
763 return 0; 821 return 0;
764} 822}
765 823
@@ -773,7 +831,10 @@ static int macvlan_fill_info(struct sk_buff *skb,
773{ 831{
774 struct macvlan_dev *vlan = netdev_priv(dev); 832 struct macvlan_dev *vlan = netdev_priv(dev);
775 833
776 NLA_PUT_U32(skb, IFLA_MACVLAN_MODE, vlan->mode); 834 if (nla_put_u32(skb, IFLA_MACVLAN_MODE, vlan->mode))
835 goto nla_put_failure;
836 if (nla_put_u16(skb, IFLA_MACVLAN_FLAGS, vlan->flags))
837 goto nla_put_failure;
777 return 0; 838 return 0;
778 839
779nla_put_failure: 840nla_put_failure:
@@ -781,7 +842,8 @@ nla_put_failure:
781} 842}
782 843
783static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = { 844static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
784 [IFLA_MACVLAN_MODE] = { .type = NLA_U32 }, 845 [IFLA_MACVLAN_MODE] = { .type = NLA_U32 },
846 [IFLA_MACVLAN_FLAGS] = { .type = NLA_U16 },
785}; 847};
786 848
787int macvlan_link_register(struct rtnl_link_ops *ops) 849int macvlan_link_register(struct rtnl_link_ops *ops)
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 0427c6561c84..163559c16988 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -505,10 +505,11 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
505 if (copy > size) { 505 if (copy > size) {
506 ++from; 506 ++from;
507 --count; 507 --count;
508 } 508 offset = 0;
509 } else
510 offset += size;
509 copy -= size; 511 copy -= size;
510 offset1 += size; 512 offset1 += size;
511 offset = 0;
512 } 513 }
513 514
514 if (len == offset1) 515 if (len == offset1)
@@ -518,24 +519,29 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
518 struct page *page[MAX_SKB_FRAGS]; 519 struct page *page[MAX_SKB_FRAGS];
519 int num_pages; 520 int num_pages;
520 unsigned long base; 521 unsigned long base;
522 unsigned long truesize;
521 523
522 len = from->iov_len - offset1; 524 len = from->iov_len - offset;
523 if (!len) { 525 if (!len) {
524 offset1 = 0; 526 offset = 0;
525 ++from; 527 ++from;
526 continue; 528 continue;
527 } 529 }
528 base = (unsigned long)from->iov_base + offset1; 530 base = (unsigned long)from->iov_base + offset;
529 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT; 531 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
532 if (i + size > MAX_SKB_FRAGS)
533 return -EMSGSIZE;
530 num_pages = get_user_pages_fast(base, size, 0, &page[i]); 534 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
531 if ((num_pages != size) || 535 if (num_pages != size) {
532 (num_pages > MAX_SKB_FRAGS - skb_shinfo(skb)->nr_frags)) 536 for (i = 0; i < num_pages; i++)
533 /* put_page is in skb free */ 537 put_page(page[i]);
534 return -EFAULT; 538 return -EFAULT;
539 }
540 truesize = size * PAGE_SIZE;
535 skb->data_len += len; 541 skb->data_len += len;
536 skb->len += len; 542 skb->len += len;
537 skb->truesize += len; 543 skb->truesize += truesize;
538 atomic_add(len, &skb->sk->sk_wmem_alloc); 544 atomic_add(truesize, &skb->sk->sk_wmem_alloc);
539 while (len) { 545 while (len) {
540 int off = base & ~PAGE_MASK; 546 int off = base & ~PAGE_MASK;
541 int size = min_t(int, len, PAGE_SIZE - off); 547 int size = min_t(int, len, PAGE_SIZE - off);
@@ -546,7 +552,7 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
546 len -= size; 552 len -= size;
547 i++; 553 i++;
548 } 554 }
549 offset1 = 0; 555 offset = 0;
550 ++from; 556 ++from;
551 } 557 }
552 return 0; 558 return 0;
@@ -646,7 +652,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
646 int err; 652 int err;
647 struct virtio_net_hdr vnet_hdr = { 0 }; 653 struct virtio_net_hdr vnet_hdr = { 0 };
648 int vnet_hdr_len = 0; 654 int vnet_hdr_len = 0;
649 int copylen; 655 int copylen = 0;
650 bool zerocopy = false; 656 bool zerocopy = false;
651 657
652 if (q->flags & IFF_VNET_HDR) { 658 if (q->flags & IFF_VNET_HDR) {
@@ -675,15 +681,31 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
675 if (unlikely(len < ETH_HLEN)) 681 if (unlikely(len < ETH_HLEN))
676 goto err; 682 goto err;
677 683
684 err = -EMSGSIZE;
685 if (unlikely(count > UIO_MAXIOV))
686 goto err;
687
678 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) 688 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY))
679 zerocopy = true; 689 zerocopy = true;
680 690
681 if (zerocopy) { 691 if (zerocopy) {
692 /* Userspace may produce vectors with count greater than
693 * MAX_SKB_FRAGS, so we need to linearize parts of the skb
694 * to let the rest of data to be fit in the frags.
695 */
696 if (count > MAX_SKB_FRAGS) {
697 copylen = iov_length(iv, count - MAX_SKB_FRAGS);
698 if (copylen < vnet_hdr_len)
699 copylen = 0;
700 else
701 copylen -= vnet_hdr_len;
702 }
682 /* There are 256 bytes to be copied in skb, so there is enough 703 /* There are 256 bytes to be copied in skb, so there is enough
683 * room for skb expand head in case it is used. 704 * room for skb expand head in case it is used.
684 * The rest buffer is mapped from userspace. 705 * The rest buffer is mapped from userspace.
685 */ 706 */
686 copylen = vnet_hdr.hdr_len; 707 if (copylen < vnet_hdr.hdr_len)
708 copylen = vnet_hdr.hdr_len;
687 if (!copylen) 709 if (!copylen)
688 copylen = GOODCOPY_LEN; 710 copylen = GOODCOPY_LEN;
689 } else 711 } else
@@ -694,10 +716,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
694 if (!skb) 716 if (!skb)
695 goto err; 717 goto err;
696 718
697 if (zerocopy) { 719 if (zerocopy)
698 err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count); 720 err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
699 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 721 else
700 } else
701 err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len, 722 err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
702 len); 723 len);
703 if (err) 724 if (err)
@@ -716,8 +737,10 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
716 rcu_read_lock_bh(); 737 rcu_read_lock_bh();
717 vlan = rcu_dereference_bh(q->vlan); 738 vlan = rcu_dereference_bh(q->vlan);
718 /* copy skb_ubuf_info for callback when skb has no error */ 739 /* copy skb_ubuf_info for callback when skb has no error */
719 if (zerocopy) 740 if (zerocopy) {
720 skb_shinfo(skb)->destructor_arg = m->msg_control; 741 skb_shinfo(skb)->destructor_arg = m->msg_control;
742 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
743 }
721 if (vlan) 744 if (vlan)
722 macvlan_start_xmit(skb, vlan->dev); 745 macvlan_start_xmit(skb, vlan->dev);
723 else 746 else
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 0e01f4e5cd64..944cdfb80fe4 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -135,6 +135,25 @@ config MDIO_OCTEON
135 135
136 If in doubt, say Y. 136 If in doubt, say Y.
137 137
138config MDIO_BUS_MUX
139 tristate
140 depends on OF_MDIO
141 help
142 This module provides a driver framework for MDIO bus
143 multiplexers which connect one of several child MDIO busses
144 to a parent bus. Switching between child busses is done by
145 device specific drivers.
146
147config MDIO_BUS_MUX_GPIO
148 tristate "Support for GPIO controlled MDIO bus multiplexers"
149 depends on OF_GPIO && OF_MDIO
150 select MDIO_BUS_MUX
151 help
152 This module provides a driver for MDIO bus multiplexers that
153 are controlled via GPIO lines. The multiplexer connects one of
154 several child MDIO busses to a parent bus. Child bus
155 selection is under the control of GPIO lines.
156
138endif # PHYLIB 157endif # PHYLIB
139 158
140config MICREL_KS8995MA 159config MICREL_KS8995MA
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index b7438b1b94b9..f51af688ef8b 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -25,3 +25,5 @@ obj-$(CONFIG_MICREL_PHY) += micrel.o
25obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o 25obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
26obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o 26obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
27obj-$(CONFIG_AMD_PHY) += amd.o 27obj-$(CONFIG_AMD_PHY) += amd.o
28obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o
29obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index e16f98cb4f04..cd802eb25fd2 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -39,10 +39,7 @@ static int bcm63xx_config_init(struct phy_device *phydev)
39 MII_BCM63XX_IR_SPEED | 39 MII_BCM63XX_IR_SPEED |
40 MII_BCM63XX_IR_LINK) | 40 MII_BCM63XX_IR_LINK) |
41 MII_BCM63XX_IR_EN; 41 MII_BCM63XX_IR_EN;
42 err = phy_write(phydev, MII_BCM63XX_IR, reg); 42 return phy_write(phydev, MII_BCM63XX_IR, reg);
43 if (err < 0)
44 return err;
45 return 0;
46} 43}
47 44
48static int bcm63xx_ack_interrupt(struct phy_device *phydev) 45static int bcm63xx_ack_interrupt(struct phy_device *phydev)
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index 2f774acdb551..5f59cc064778 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -134,12 +134,7 @@ static int dm9161_config_init(struct phy_device *phydev)
134 return err; 134 return err;
135 135
136 /* Reconnect the PHY, and enable Autonegotiation */ 136 /* Reconnect the PHY, and enable Autonegotiation */
137 err = phy_write(phydev, MII_BMCR, BMCR_ANENABLE); 137 return phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
138
139 if (err < 0)
140 return err;
141
142 return 0;
143} 138}
144 139
145static int dm9161_ack_interrupt(struct phy_device *phydev) 140static int dm9161_ack_interrupt(struct phy_device *phydev)
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index dd7ae19579d1..940b29022d0c 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1215,6 +1215,36 @@ static void dp83640_txtstamp(struct phy_device *phydev,
1215 } 1215 }
1216} 1216}
1217 1217
1218static int dp83640_ts_info(struct phy_device *dev, struct ethtool_ts_info *info)
1219{
1220 struct dp83640_private *dp83640 = dev->priv;
1221
1222 info->so_timestamping =
1223 SOF_TIMESTAMPING_TX_HARDWARE |
1224 SOF_TIMESTAMPING_RX_HARDWARE |
1225 SOF_TIMESTAMPING_RAW_HARDWARE;
1226 info->phc_index = ptp_clock_index(dp83640->clock->ptp_clock);
1227 info->tx_types =
1228 (1 << HWTSTAMP_TX_OFF) |
1229 (1 << HWTSTAMP_TX_ON) |
1230 (1 << HWTSTAMP_TX_ONESTEP_SYNC);
1231 info->rx_filters =
1232 (1 << HWTSTAMP_FILTER_NONE) |
1233 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
1234 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
1235 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
1236 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
1237 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
1238 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
1239 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1240 (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
1241 (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
1242 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
1243 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
1244 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
1245 return 0;
1246}
1247
1218static struct phy_driver dp83640_driver = { 1248static struct phy_driver dp83640_driver = {
1219 .phy_id = DP83640_PHY_ID, 1249 .phy_id = DP83640_PHY_ID,
1220 .phy_id_mask = 0xfffffff0, 1250 .phy_id_mask = 0xfffffff0,
@@ -1225,6 +1255,7 @@ static struct phy_driver dp83640_driver = {
1225 .remove = dp83640_remove, 1255 .remove = dp83640_remove,
1226 .config_aneg = genphy_config_aneg, 1256 .config_aneg = genphy_config_aneg,
1227 .read_status = genphy_read_status, 1257 .read_status = genphy_read_status,
1258 .ts_info = dp83640_ts_info,
1228 .hwtstamp = dp83640_hwtstamp, 1259 .hwtstamp = dp83640_hwtstamp,
1229 .rxtstamp = dp83640_rxtstamp, 1260 .rxtstamp = dp83640_rxtstamp,
1230 .txtstamp = dp83640_txtstamp, 1261 .txtstamp = dp83640_txtstamp,
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e8b9c53c304b..418928d644bf 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -455,11 +455,7 @@ static int m88e1111_config_init(struct phy_device *phydev)
455 if (err < 0) 455 if (err < 0)
456 return err; 456 return err;
457 457
458 err = phy_write(phydev, MII_BMCR, BMCR_RESET); 458 return phy_write(phydev, MII_BMCR, BMCR_RESET);
459 if (err < 0)
460 return err;
461
462 return 0;
463} 459}
464 460
465static int m88e1118_config_aneg(struct phy_device *phydev) 461static int m88e1118_config_aneg(struct phy_device *phydev)
@@ -515,11 +511,7 @@ static int m88e1118_config_init(struct phy_device *phydev)
515 if (err < 0) 511 if (err < 0)
516 return err; 512 return err;
517 513
518 err = phy_write(phydev, MII_BMCR, BMCR_RESET); 514 return phy_write(phydev, MII_BMCR, BMCR_RESET);
519 if (err < 0)
520 return err;
521
522 return 0;
523} 515}
524 516
525static int m88e1149_config_init(struct phy_device *phydev) 517static int m88e1149_config_init(struct phy_device *phydev)
@@ -545,11 +537,7 @@ static int m88e1149_config_init(struct phy_device *phydev)
545 if (err < 0) 537 if (err < 0)
546 return err; 538 return err;
547 539
548 err = phy_write(phydev, MII_BMCR, BMCR_RESET); 540 return phy_write(phydev, MII_BMCR, BMCR_RESET);
549 if (err < 0)
550 return err;
551
552 return 0;
553} 541}
554 542
555static int m88e1145_config_init(struct phy_device *phydev) 543static int m88e1145_config_init(struct phy_device *phydev)
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
new file mode 100644
index 000000000000..e0cc4ef33dee
--- /dev/null
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -0,0 +1,142 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2011, 2012 Cavium, Inc.
7 */
8
9#include <linux/platform_device.h>
10#include <linux/device.h>
11#include <linux/of_mdio.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/phy.h>
15#include <linux/mdio-mux.h>
16#include <linux/of_gpio.h>
17
18#define DRV_VERSION "1.0"
19#define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver"
20
21#define MDIO_MUX_GPIO_MAX_BITS 8
22
23struct mdio_mux_gpio_state {
24 int gpio[MDIO_MUX_GPIO_MAX_BITS];
25 unsigned int num_gpios;
26 void *mux_handle;
27};
28
29static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
30 void *data)
31{
32 int change;
33 unsigned int n;
34 struct mdio_mux_gpio_state *s = data;
35
36 if (current_child == desired_child)
37 return 0;
38
39 change = current_child == -1 ? -1 : current_child ^ desired_child;
40
41 for (n = 0; n < s->num_gpios; n++) {
42 if (change & 1)
43 gpio_set_value_cansleep(s->gpio[n],
44 (desired_child & 1) != 0);
45 change >>= 1;
46 desired_child >>= 1;
47 }
48
49 return 0;
50}
51
52static int __devinit mdio_mux_gpio_probe(struct platform_device *pdev)
53{
54 enum of_gpio_flags f;
55 struct mdio_mux_gpio_state *s;
56 unsigned int num_gpios;
57 unsigned int n;
58 int r;
59
60 if (!pdev->dev.of_node)
61 return -ENODEV;
62
63 num_gpios = of_gpio_count(pdev->dev.of_node);
64 if (num_gpios == 0 || num_gpios > MDIO_MUX_GPIO_MAX_BITS)
65 return -ENODEV;
66
67 s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
68 if (!s)
69 return -ENOMEM;
70
71 s->num_gpios = num_gpios;
72
73 for (n = 0; n < num_gpios; ) {
74 int gpio = of_get_gpio_flags(pdev->dev.of_node, n, &f);
75 if (gpio < 0) {
76 r = (gpio == -ENODEV) ? -EPROBE_DEFER : gpio;
77 goto err;
78 }
79 s->gpio[n] = gpio;
80
81 n++;
82
83 r = gpio_request(gpio, "mdio_mux_gpio");
84 if (r)
85 goto err;
86
87 r = gpio_direction_output(gpio, 0);
88 if (r)
89 goto err;
90 }
91
92 r = mdio_mux_init(&pdev->dev,
93 mdio_mux_gpio_switch_fn, &s->mux_handle, s);
94
95 if (r == 0) {
96 pdev->dev.platform_data = s;
97 return 0;
98 }
99err:
100 while (n) {
101 n--;
102 gpio_free(s->gpio[n]);
103 }
104 devm_kfree(&pdev->dev, s);
105 return r;
106}
107
108static int __devexit mdio_mux_gpio_remove(struct platform_device *pdev)
109{
110 struct mdio_mux_gpio_state *s = pdev->dev.platform_data;
111 mdio_mux_uninit(s->mux_handle);
112 return 0;
113}
114
115static struct of_device_id mdio_mux_gpio_match[] = {
116 {
117 .compatible = "mdio-mux-gpio",
118 },
119 {
120 /* Legacy compatible property. */
121 .compatible = "cavium,mdio-mux-sn74cbtlv3253",
122 },
123 {},
124};
125MODULE_DEVICE_TABLE(of, mdio_mux_gpio_match);
126
127static struct platform_driver mdio_mux_gpio_driver = {
128 .driver = {
129 .name = "mdio-mux-gpio",
130 .owner = THIS_MODULE,
131 .of_match_table = mdio_mux_gpio_match,
132 },
133 .probe = mdio_mux_gpio_probe,
134 .remove = __devexit_p(mdio_mux_gpio_remove),
135};
136
137module_platform_driver(mdio_mux_gpio_driver);
138
139MODULE_DESCRIPTION(DRV_DESCRIPTION);
140MODULE_VERSION(DRV_VERSION);
141MODULE_AUTHOR("David Daney");
142MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
new file mode 100644
index 000000000000..39ea0674dcde
--- /dev/null
+++ b/drivers/net/phy/mdio-mux.c
@@ -0,0 +1,192 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2011, 2012 Cavium, Inc.
7 */
8
9#include <linux/platform_device.h>
10#include <linux/mdio-mux.h>
11#include <linux/of_mdio.h>
12#include <linux/device.h>
13#include <linux/module.h>
14#include <linux/phy.h>
15
16#define DRV_VERSION "1.0"
17#define DRV_DESCRIPTION "MDIO bus multiplexer driver"
18
19struct mdio_mux_child_bus;
20
21struct mdio_mux_parent_bus {
22 struct mii_bus *mii_bus;
23 int current_child;
24 int parent_id;
25 void *switch_data;
26 int (*switch_fn)(int current_child, int desired_child, void *data);
27
28 /* List of our children linked through their next fields. */
29 struct mdio_mux_child_bus *children;
30};
31
32struct mdio_mux_child_bus {
33 struct mii_bus *mii_bus;
34 struct mdio_mux_parent_bus *parent;
35 struct mdio_mux_child_bus *next;
36 int bus_number;
37 int phy_irq[PHY_MAX_ADDR];
38};
39
40/*
41 * The parent bus' lock is used to order access to the switch_fn.
42 */
43static int mdio_mux_read(struct mii_bus *bus, int phy_id, int regnum)
44{
45 struct mdio_mux_child_bus *cb = bus->priv;
46 struct mdio_mux_parent_bus *pb = cb->parent;
47 int r;
48
49 mutex_lock(&pb->mii_bus->mdio_lock);
50 r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
51 if (r)
52 goto out;
53
54 pb->current_child = cb->bus_number;
55
56 r = pb->mii_bus->read(pb->mii_bus, phy_id, regnum);
57out:
58 mutex_unlock(&pb->mii_bus->mdio_lock);
59
60 return r;
61}
62
63/*
64 * The parent bus' lock is used to order access to the switch_fn.
65 */
66static int mdio_mux_write(struct mii_bus *bus, int phy_id,
67 int regnum, u16 val)
68{
69 struct mdio_mux_child_bus *cb = bus->priv;
70 struct mdio_mux_parent_bus *pb = cb->parent;
71
72 int r;
73
74 mutex_lock(&pb->mii_bus->mdio_lock);
75 r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
76 if (r)
77 goto out;
78
79 pb->current_child = cb->bus_number;
80
81 r = pb->mii_bus->write(pb->mii_bus, phy_id, regnum, val);
82out:
83 mutex_unlock(&pb->mii_bus->mdio_lock);
84
85 return r;
86}
87
88static int parent_count;
89
90int mdio_mux_init(struct device *dev,
91 int (*switch_fn)(int cur, int desired, void *data),
92 void **mux_handle,
93 void *data)
94{
95 struct device_node *parent_bus_node;
96 struct device_node *child_bus_node;
97 int r, ret_val;
98 struct mii_bus *parent_bus;
99 struct mdio_mux_parent_bus *pb;
100 struct mdio_mux_child_bus *cb;
101
102 if (!dev->of_node)
103 return -ENODEV;
104
105 parent_bus_node = of_parse_phandle(dev->of_node, "mdio-parent-bus", 0);
106
107 if (!parent_bus_node)
108 return -ENODEV;
109
110 parent_bus = of_mdio_find_bus(parent_bus_node);
111 if (parent_bus == NULL) {
112 ret_val = -EPROBE_DEFER;
113 goto err_parent_bus;
114 }
115
116 pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL);
117 if (pb == NULL) {
118 ret_val = -ENOMEM;
119 goto err_parent_bus;
120 }
121
122 pb->switch_data = data;
123 pb->switch_fn = switch_fn;
124 pb->current_child = -1;
125 pb->parent_id = parent_count++;
126 pb->mii_bus = parent_bus;
127
128 ret_val = -ENODEV;
129 for_each_child_of_node(dev->of_node, child_bus_node) {
130 u32 v;
131
132 r = of_property_read_u32(child_bus_node, "reg", &v);
133 if (r)
134 continue;
135
136 cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
137 if (cb == NULL) {
138 dev_err(dev,
139 "Error: Failed to allocate memory for child\n");
140 ret_val = -ENOMEM;
141 break;
142 }
143 cb->bus_number = v;
144 cb->parent = pb;
145 cb->mii_bus = mdiobus_alloc();
146 cb->mii_bus->priv = cb;
147
148 cb->mii_bus->irq = cb->phy_irq;
149 cb->mii_bus->name = "mdio_mux";
150 snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%x.%x",
151 pb->parent_id, v);
152 cb->mii_bus->parent = dev;
153 cb->mii_bus->read = mdio_mux_read;
154 cb->mii_bus->write = mdio_mux_write;
155 r = of_mdiobus_register(cb->mii_bus, child_bus_node);
156 if (r) {
157 mdiobus_free(cb->mii_bus);
158 devm_kfree(dev, cb);
159 } else {
160 of_node_get(child_bus_node);
161 cb->next = pb->children;
162 pb->children = cb;
163 }
164 }
165 if (pb->children) {
166 *mux_handle = pb;
167 dev_info(dev, "Version " DRV_VERSION "\n");
168 return 0;
169 }
170err_parent_bus:
171 of_node_put(parent_bus_node);
172 return ret_val;
173}
174EXPORT_SYMBOL_GPL(mdio_mux_init);
175
176void mdio_mux_uninit(void *mux_handle)
177{
178 struct mdio_mux_parent_bus *pb = mux_handle;
179 struct mdio_mux_child_bus *cb = pb->children;
180
181 while (cb) {
182 mdiobus_unregister(cb->mii_bus);
183 mdiobus_free(cb->mii_bus);
184 cb = cb->next;
185 }
186}
187EXPORT_SYMBOL_GPL(mdio_mux_uninit);
188
189MODULE_DESCRIPTION(DRV_DESCRIPTION);
190MODULE_VERSION(DRV_VERSION);
191MODULE_AUTHOR("David Daney");
192MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 8985cc62cf41..83d5c9f55686 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -88,6 +88,38 @@ static struct class mdio_bus_class = {
88 .dev_release = mdiobus_release, 88 .dev_release = mdiobus_release,
89}; 89};
90 90
91#ifdef CONFIG_OF_MDIO
92/* Helper function for of_mdio_find_bus */
93static int of_mdio_bus_match(struct device *dev, void *mdio_bus_np)
94{
95 return dev->of_node == mdio_bus_np;
96}
97/**
98 * of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
99 * @mdio_np: Pointer to the mii_bus.
100 *
101 * Returns a pointer to the mii_bus, or NULL if none found.
102 *
103 * Because the association of a device_node and mii_bus is made via
104 * of_mdiobus_register(), the mii_bus cannot be found before it is
105 * registered with of_mdiobus_register().
106 *
107 */
108struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np)
109{
110 struct device *d;
111
112 if (!mdio_bus_np)
113 return NULL;
114
115 d = class_find_device(&mdio_bus_class, NULL, mdio_bus_np,
116 of_mdio_bus_match);
117
118 return d ? to_mii_bus(d) : NULL;
119}
120EXPORT_SYMBOL(of_mdio_find_bus);
121#endif
122
91/** 123/**
92 * mdiobus_register - bring up all the PHYs on a given bus and attach them to bus 124 * mdiobus_register - bring up all the PHYs on a given bus and attach them to bus
93 * @bus: target mii_bus 125 * @bus: target mii_bus
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 116a2dd7c879..4eb98bc52a0a 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -348,7 +348,6 @@ static int __devexit ks8995_remove(struct spi_device *spi)
348static struct spi_driver ks8995_driver = { 348static struct spi_driver ks8995_driver = {
349 .driver = { 349 .driver = {
350 .name = "spi-ks8995", 350 .name = "spi-ks8995",
351 .bus = &spi_bus_type,
352 .owner = THIS_MODULE, 351 .owner = THIS_MODULE,
353 }, 352 },
354 .probe = ks8995_probe, 353 .probe = ks8995_probe,
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 885dbdd9c39e..72b50f57e7b2 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -116,8 +116,8 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr)
116 int i; 116 int i;
117 117
118 rcu_read_lock(); 118 rcu_read_lock();
119 for (i = find_next_bit(callid_bitmap, MAX_CALLID, 1); i < MAX_CALLID; 119 i = 1;
120 i = find_next_bit(callid_bitmap, MAX_CALLID, i + 1)) { 120 for_each_set_bit_from(i, callid_bitmap, MAX_CALLID) {
121 sock = rcu_dereference(callid_sock[i]); 121 sock = rcu_dereference(callid_sock[i]);
122 if (!sock) 122 if (!sock)
123 continue; 123 continue;
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
index 248a144033ca..89024d5fc33a 100644
--- a/drivers/net/team/Kconfig
+++ b/drivers/net/team/Kconfig
@@ -40,4 +40,15 @@ config NET_TEAM_MODE_ACTIVEBACKUP
40 To compile this team mode as a module, choose M here: the module 40 To compile this team mode as a module, choose M here: the module
41 will be called team_mode_activebackup. 41 will be called team_mode_activebackup.
42 42
43config NET_TEAM_MODE_LOADBALANCE
44 tristate "Load-balance mode support"
45 depends on NET_TEAM
46 ---help---
47 This mode provides load balancing functionality. Tx port selection
48 is done using BPF function set up from userspace (bpf_hash_func
49 option)
50
51 To compile this team mode as a module, choose M here: the module
52 will be called team_mode_loadbalance.
53
43endif # NET_TEAM 54endif # NET_TEAM
diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile
index 85f2028a87af..fb9f4c1c51ff 100644
--- a/drivers/net/team/Makefile
+++ b/drivers/net/team/Makefile
@@ -5,3 +5,4 @@
5obj-$(CONFIG_NET_TEAM) += team.o 5obj-$(CONFIG_NET_TEAM) += team.o
6obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o 6obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o
7obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o 7obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o
8obj-$(CONFIG_NET_TEAM_MODE_LOADBALANCE) += team_mode_loadbalance.o
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 8f81805c6825..c61ae35a53ce 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -65,7 +65,7 @@ static int __set_port_mac(struct net_device *port_dev,
65 return dev_set_mac_address(port_dev, &addr); 65 return dev_set_mac_address(port_dev, &addr);
66} 66}
67 67
68int team_port_set_orig_mac(struct team_port *port) 68static int team_port_set_orig_mac(struct team_port *port)
69{ 69{
70 return __set_port_mac(port->dev, port->orig.dev_addr); 70 return __set_port_mac(port->dev, port->orig.dev_addr);
71} 71}
@@ -76,12 +76,26 @@ int team_port_set_team_mac(struct team_port *port)
76} 76}
77EXPORT_SYMBOL(team_port_set_team_mac); 77EXPORT_SYMBOL(team_port_set_team_mac);
78 78
79static void team_refresh_port_linkup(struct team_port *port)
80{
81 port->linkup = port->user.linkup_enabled ? port->user.linkup :
82 port->state.linkup;
83}
79 84
80/******************* 85/*******************
81 * Options handling 86 * Options handling
82 *******************/ 87 *******************/
83 88
84struct team_option *__team_find_option(struct team *team, const char *opt_name) 89struct team_option_inst { /* One for each option instance */
90 struct list_head list;
91 struct team_option *option;
92 struct team_port *port; /* != NULL if per-port */
93 bool changed;
94 bool removed;
95};
96
97static struct team_option *__team_find_option(struct team *team,
98 const char *opt_name)
85{ 99{
86 struct team_option *option; 100 struct team_option *option;
87 101
@@ -92,9 +106,121 @@ struct team_option *__team_find_option(struct team *team, const char *opt_name)
92 return NULL; 106 return NULL;
93} 107}
94 108
95int __team_options_register(struct team *team, 109static int __team_option_inst_add(struct team *team, struct team_option *option,
96 const struct team_option *option, 110 struct team_port *port)
97 size_t option_count) 111{
112 struct team_option_inst *opt_inst;
113
114 opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
115 if (!opt_inst)
116 return -ENOMEM;
117 opt_inst->option = option;
118 opt_inst->port = port;
119 opt_inst->changed = true;
120 opt_inst->removed = false;
121 list_add_tail(&opt_inst->list, &team->option_inst_list);
122 return 0;
123}
124
125static void __team_option_inst_del(struct team_option_inst *opt_inst)
126{
127 list_del(&opt_inst->list);
128 kfree(opt_inst);
129}
130
131static void __team_option_inst_del_option(struct team *team,
132 struct team_option *option)
133{
134 struct team_option_inst *opt_inst, *tmp;
135
136 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
137 if (opt_inst->option == option)
138 __team_option_inst_del(opt_inst);
139 }
140}
141
142static int __team_option_inst_add_option(struct team *team,
143 struct team_option *option)
144{
145 struct team_port *port;
146 int err;
147
148 if (!option->per_port)
149 return __team_option_inst_add(team, option, 0);
150
151 list_for_each_entry(port, &team->port_list, list) {
152 err = __team_option_inst_add(team, option, port);
153 if (err)
154 goto inst_del_option;
155 }
156 return 0;
157
158inst_del_option:
159 __team_option_inst_del_option(team, option);
160 return err;
161}
162
163static void __team_option_inst_mark_removed_option(struct team *team,
164 struct team_option *option)
165{
166 struct team_option_inst *opt_inst;
167
168 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
169 if (opt_inst->option == option) {
170 opt_inst->changed = true;
171 opt_inst->removed = true;
172 }
173 }
174}
175
176static void __team_option_inst_del_port(struct team *team,
177 struct team_port *port)
178{
179 struct team_option_inst *opt_inst, *tmp;
180
181 list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
182 if (opt_inst->option->per_port &&
183 opt_inst->port == port)
184 __team_option_inst_del(opt_inst);
185 }
186}
187
188static int __team_option_inst_add_port(struct team *team,
189 struct team_port *port)
190{
191 struct team_option *option;
192 int err;
193
194 list_for_each_entry(option, &team->option_list, list) {
195 if (!option->per_port)
196 continue;
197 err = __team_option_inst_add(team, option, port);
198 if (err)
199 goto inst_del_port;
200 }
201 return 0;
202
203inst_del_port:
204 __team_option_inst_del_port(team, port);
205 return err;
206}
207
208static void __team_option_inst_mark_removed_port(struct team *team,
209 struct team_port *port)
210{
211 struct team_option_inst *opt_inst;
212
213 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
214 if (opt_inst->port == port) {
215 opt_inst->changed = true;
216 opt_inst->removed = true;
217 }
218 }
219}
220
221static int __team_options_register(struct team *team,
222 const struct team_option *option,
223 size_t option_count)
98{ 224{
99 int i; 225 int i;
100 struct team_option **dst_opts; 226 struct team_option **dst_opts;
@@ -107,26 +233,32 @@ int __team_options_register(struct team *team,
107 for (i = 0; i < option_count; i++, option++) { 233 for (i = 0; i < option_count; i++, option++) {
108 if (__team_find_option(team, option->name)) { 234 if (__team_find_option(team, option->name)) {
109 err = -EEXIST; 235 err = -EEXIST;
110 goto rollback; 236 goto alloc_rollback;
111 } 237 }
112 dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL); 238 dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
113 if (!dst_opts[i]) { 239 if (!dst_opts[i]) {
114 err = -ENOMEM; 240 err = -ENOMEM;
115 goto rollback; 241 goto alloc_rollback;
116 } 242 }
117 } 243 }
118 244
119 for (i = 0; i < option_count; i++) { 245 for (i = 0; i < option_count; i++) {
120 dst_opts[i]->changed = true; 246 err = __team_option_inst_add_option(team, dst_opts[i]);
121 dst_opts[i]->removed = false; 247 if (err)
248 goto inst_rollback;
122 list_add_tail(&dst_opts[i]->list, &team->option_list); 249 list_add_tail(&dst_opts[i]->list, &team->option_list);
123 } 250 }
124 251
125 kfree(dst_opts); 252 kfree(dst_opts);
126 return 0; 253 return 0;
127 254
128rollback: 255inst_rollback:
129 for (i = 0; i < option_count; i++) 256 for (i--; i >= 0; i--)
257 __team_option_inst_del_option(team, dst_opts[i]);
258
259 i = option_count - 1;
260alloc_rollback:
261 for (i--; i >= 0; i--)
130 kfree(dst_opts[i]); 262 kfree(dst_opts[i]);
131 263
132 kfree(dst_opts); 264 kfree(dst_opts);
@@ -143,10 +275,8 @@ static void __team_options_mark_removed(struct team *team,
143 struct team_option *del_opt; 275 struct team_option *del_opt;
144 276
145 del_opt = __team_find_option(team, option->name); 277 del_opt = __team_find_option(team, option->name);
146 if (del_opt) { 278 if (del_opt)
147 del_opt->changed = true; 279 __team_option_inst_mark_removed_option(team, del_opt);
148 del_opt->removed = true;
149 }
150 } 280 }
151} 281}
152 282
@@ -161,6 +291,7 @@ static void __team_options_unregister(struct team *team,
161 291
162 del_opt = __team_find_option(team, option->name); 292 del_opt = __team_find_option(team, option->name);
163 if (del_opt) { 293 if (del_opt) {
294 __team_option_inst_del_option(team, del_opt);
164 list_del(&del_opt->list); 295 list_del(&del_opt->list);
165 kfree(del_opt); 296 kfree(del_opt);
166 } 297 }
@@ -193,22 +324,42 @@ void team_options_unregister(struct team *team,
193} 324}
194EXPORT_SYMBOL(team_options_unregister); 325EXPORT_SYMBOL(team_options_unregister);
195 326
196static int team_option_get(struct team *team, struct team_option *option, 327static int team_option_port_add(struct team *team, struct team_port *port)
197 void *arg) 328{
329 int err;
330
331 err = __team_option_inst_add_port(team, port);
332 if (err)
333 return err;
334 __team_options_change_check(team);
335 return 0;
336}
337
338static void team_option_port_del(struct team *team, struct team_port *port)
339{
340 __team_option_inst_mark_removed_port(team, port);
341 __team_options_change_check(team);
342 __team_option_inst_del_port(team, port);
343}
344
345static int team_option_get(struct team *team,
346 struct team_option_inst *opt_inst,
347 struct team_gsetter_ctx *ctx)
198{ 348{
199 return option->getter(team, arg); 349 return opt_inst->option->getter(team, ctx);
200} 350}
201 351
202static int team_option_set(struct team *team, struct team_option *option, 352static int team_option_set(struct team *team,
203 void *arg) 353 struct team_option_inst *opt_inst,
354 struct team_gsetter_ctx *ctx)
204{ 355{
205 int err; 356 int err;
206 357
207 err = option->setter(team, arg); 358 err = opt_inst->option->setter(team, ctx);
208 if (err) 359 if (err)
209 return err; 360 return err;
210 361
211 option->changed = true; 362 opt_inst->changed = true;
212 __team_options_change_check(team); 363 __team_options_change_check(team);
213 return err; 364 return err;
214} 365}
@@ -408,6 +559,8 @@ static int team_change_mode(struct team *team, const char *kind)
408 * Rx path frame handler 559 * Rx path frame handler
409 ************************/ 560 ************************/
410 561
562static bool team_port_enabled(struct team_port *port);
563
411/* note: already called with rcu_read_lock */ 564/* note: already called with rcu_read_lock */
412static rx_handler_result_t team_handle_frame(struct sk_buff **pskb) 565static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
413{ 566{
@@ -424,8 +577,12 @@ static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
424 577
425 port = team_port_get_rcu(skb->dev); 578 port = team_port_get_rcu(skb->dev);
426 team = port->team; 579 team = port->team;
427 580 if (!team_port_enabled(port)) {
428 res = team->ops.receive(team, port, skb); 581 /* allow exact match delivery for disabled ports */
582 res = RX_HANDLER_EXACT;
583 } else {
584 res = team->ops.receive(team, port, skb);
585 }
429 if (res == RX_HANDLER_ANOTHER) { 586 if (res == RX_HANDLER_ANOTHER) {
430 struct team_pcpu_stats *pcpu_stats; 587 struct team_pcpu_stats *pcpu_stats;
431 588
@@ -461,17 +618,25 @@ static bool team_port_find(const struct team *team,
461 return false; 618 return false;
462} 619}
463 620
621static bool team_port_enabled(struct team_port *port)
622{
623 return port->index != -1;
624}
625
464/* 626/*
465 * Add/delete port to the team port list. Write guarded by rtnl_lock. 627 * Enable/disable port by adding to enabled port hashlist and setting
466 * Takes care of correct port->index setup (might be racy). 628 * port->index (Might be racy so reader could see incorrect ifindex when
629 * processing a flying packet, but that is not a problem). Write guarded
630 * by team->lock.
467 */ 631 */
468static void team_port_list_add_port(struct team *team, 632static void team_port_enable(struct team *team,
469 struct team_port *port) 633 struct team_port *port)
470{ 634{
471 port->index = team->port_count++; 635 if (team_port_enabled(port))
636 return;
637 port->index = team->en_port_count++;
472 hlist_add_head_rcu(&port->hlist, 638 hlist_add_head_rcu(&port->hlist,
473 team_port_index_hash(team, port->index)); 639 team_port_index_hash(team, port->index));
474 list_add_tail_rcu(&port->list, &team->port_list);
475} 640}
476 641
477static void __reconstruct_port_hlist(struct team *team, int rm_index) 642static void __reconstruct_port_hlist(struct team *team, int rm_index)
@@ -479,7 +644,7 @@ static void __reconstruct_port_hlist(struct team *team, int rm_index)
479 int i; 644 int i;
480 struct team_port *port; 645 struct team_port *port;
481 646
482 for (i = rm_index + 1; i < team->port_count; i++) { 647 for (i = rm_index + 1; i < team->en_port_count; i++) {
483 port = team_get_port_by_index(team, i); 648 port = team_get_port_by_index(team, i);
484 hlist_del_rcu(&port->hlist); 649 hlist_del_rcu(&port->hlist);
485 port->index--; 650 port->index--;
@@ -488,15 +653,17 @@ static void __reconstruct_port_hlist(struct team *team, int rm_index)
488 } 653 }
489} 654}
490 655
491static void team_port_list_del_port(struct team *team, 656static void team_port_disable(struct team *team,
492 struct team_port *port) 657 struct team_port *port)
493{ 658{
494 int rm_index = port->index; 659 int rm_index = port->index;
495 660
661 if (!team_port_enabled(port))
662 return;
496 hlist_del_rcu(&port->hlist); 663 hlist_del_rcu(&port->hlist);
497 list_del_rcu(&port->list);
498 __reconstruct_port_hlist(team, rm_index); 664 __reconstruct_port_hlist(team, rm_index);
499 team->port_count--; 665 team->en_port_count--;
666 port->index = -1;
500} 667}
501 668
502#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \ 669#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
@@ -642,7 +809,16 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
642 goto err_handler_register; 809 goto err_handler_register;
643 } 810 }
644 811
645 team_port_list_add_port(team, port); 812 err = team_option_port_add(team, port);
813 if (err) {
814 netdev_err(dev, "Device %s failed to add per-port options\n",
815 portname);
816 goto err_option_port_add;
817 }
818
819 port->index = -1;
820 team_port_enable(team, port);
821 list_add_tail_rcu(&port->list, &team->port_list);
646 team_adjust_ops(team); 822 team_adjust_ops(team);
647 __team_compute_features(team); 823 __team_compute_features(team);
648 __team_port_change_check(port, !!netif_carrier_ok(port_dev)); 824 __team_port_change_check(port, !!netif_carrier_ok(port_dev));
@@ -651,6 +827,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
651 827
652 return 0; 828 return 0;
653 829
830err_option_port_add:
831 netdev_rx_handler_unregister(port_dev);
832
654err_handler_register: 833err_handler_register:
655 netdev_set_master(port_dev, NULL); 834 netdev_set_master(port_dev, NULL);
656 835
@@ -688,8 +867,10 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
688 867
689 port->removed = true; 868 port->removed = true;
690 __team_port_change_check(port, false); 869 __team_port_change_check(port, false);
691 team_port_list_del_port(team, port); 870 team_port_disable(team, port);
871 list_del_rcu(&port->list);
692 team_adjust_ops(team); 872 team_adjust_ops(team);
873 team_option_port_del(team, port);
693 netdev_rx_handler_unregister(port_dev); 874 netdev_rx_handler_unregister(port_dev);
694 netdev_set_master(port_dev, NULL); 875 netdev_set_master(port_dev, NULL);
695 vlan_vids_del_by_dev(port_dev, dev); 876 vlan_vids_del_by_dev(port_dev, dev);
@@ -712,19 +893,66 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
712 893
713static const char team_no_mode_kind[] = "*NOMODE*"; 894static const char team_no_mode_kind[] = "*NOMODE*";
714 895
715static int team_mode_option_get(struct team *team, void *arg) 896static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
897{
898 ctx->data.str_val = team->mode ? team->mode->kind : team_no_mode_kind;
899 return 0;
900}
901
902static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
903{
904 return team_change_mode(team, ctx->data.str_val);
905}
906
907static int team_port_en_option_get(struct team *team,
908 struct team_gsetter_ctx *ctx)
909{
910 ctx->data.bool_val = team_port_enabled(ctx->port);
911 return 0;
912}
913
914static int team_port_en_option_set(struct team *team,
915 struct team_gsetter_ctx *ctx)
916{
917 if (ctx->data.bool_val)
918 team_port_enable(team, ctx->port);
919 else
920 team_port_disable(team, ctx->port);
921 return 0;
922}
923
924static int team_user_linkup_option_get(struct team *team,
925 struct team_gsetter_ctx *ctx)
926{
927 ctx->data.bool_val = ctx->port->user.linkup;
928 return 0;
929}
930
931static int team_user_linkup_option_set(struct team *team,
932 struct team_gsetter_ctx *ctx)
933{
934 ctx->port->user.linkup = ctx->data.bool_val;
935 team_refresh_port_linkup(ctx->port);
936 return 0;
937}
938
939static int team_user_linkup_en_option_get(struct team *team,
940 struct team_gsetter_ctx *ctx)
716{ 941{
717 const char **str = arg; 942 struct team_port *port = ctx->port;
718 943
719 *str = team->mode ? team->mode->kind : team_no_mode_kind; 944 ctx->data.bool_val = port->user.linkup_enabled;
720 return 0; 945 return 0;
721} 946}
722 947
723static int team_mode_option_set(struct team *team, void *arg) 948static int team_user_linkup_en_option_set(struct team *team,
949 struct team_gsetter_ctx *ctx)
724{ 950{
725 const char **str = arg; 951 struct team_port *port = ctx->port;
726 952
727 return team_change_mode(team, *str); 953 port->user.linkup_enabled = ctx->data.bool_val;
954 team_refresh_port_linkup(ctx->port);
955 return 0;
728} 956}
729 957
730static const struct team_option team_options[] = { 958static const struct team_option team_options[] = {
@@ -734,6 +962,27 @@ static const struct team_option team_options[] = {
734 .getter = team_mode_option_get, 962 .getter = team_mode_option_get,
735 .setter = team_mode_option_set, 963 .setter = team_mode_option_set,
736 }, 964 },
965 {
966 .name = "enabled",
967 .type = TEAM_OPTION_TYPE_BOOL,
968 .per_port = true,
969 .getter = team_port_en_option_get,
970 .setter = team_port_en_option_set,
971 },
972 {
973 .name = "user_linkup",
974 .type = TEAM_OPTION_TYPE_BOOL,
975 .per_port = true,
976 .getter = team_user_linkup_option_get,
977 .setter = team_user_linkup_option_set,
978 },
979 {
980 .name = "user_linkup_enabled",
981 .type = TEAM_OPTION_TYPE_BOOL,
982 .per_port = true,
983 .getter = team_user_linkup_en_option_get,
984 .setter = team_user_linkup_en_option_set,
985 },
737}; 986};
738 987
739static int team_init(struct net_device *dev) 988static int team_init(struct net_device *dev)
@@ -750,12 +999,13 @@ static int team_init(struct net_device *dev)
750 return -ENOMEM; 999 return -ENOMEM;
751 1000
752 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++) 1001 for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
753 INIT_HLIST_HEAD(&team->port_hlist[i]); 1002 INIT_HLIST_HEAD(&team->en_port_hlist[i]);
754 INIT_LIST_HEAD(&team->port_list); 1003 INIT_LIST_HEAD(&team->port_list);
755 1004
756 team_adjust_ops(team); 1005 team_adjust_ops(team);
757 1006
758 INIT_LIST_HEAD(&team->option_list); 1007 INIT_LIST_HEAD(&team->option_list);
1008 INIT_LIST_HEAD(&team->option_inst_list);
759 err = team_options_register(team, team_options, ARRAY_SIZE(team_options)); 1009 err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
760 if (err) 1010 if (err)
761 goto err_options_register; 1011 goto err_options_register;
@@ -1145,10 +1395,7 @@ team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
1145 }, 1395 },
1146 [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG }, 1396 [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
1147 [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 }, 1397 [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
1148 [TEAM_ATTR_OPTION_DATA] = { 1398 [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY },
1149 .type = NLA_BINARY,
1150 .len = TEAM_STRING_MAX_LEN,
1151 },
1152}; 1399};
1153 1400
1154static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) 1401static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
@@ -1241,46 +1488,86 @@ static int team_nl_fill_options_get(struct sk_buff *skb,
1241{ 1488{
1242 struct nlattr *option_list; 1489 struct nlattr *option_list;
1243 void *hdr; 1490 void *hdr;
1244 struct team_option *option; 1491 struct team_option_inst *opt_inst;
1492 int err;
1245 1493
1246 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, 1494 hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
1247 TEAM_CMD_OPTIONS_GET); 1495 TEAM_CMD_OPTIONS_GET);
1248 if (IS_ERR(hdr)) 1496 if (IS_ERR(hdr))
1249 return PTR_ERR(hdr); 1497 return PTR_ERR(hdr);
1250 1498
1251 NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex); 1499 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
1500 goto nla_put_failure;
1252 option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION); 1501 option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
1253 if (!option_list) 1502 if (!option_list)
1254 return -EMSGSIZE; 1503 return -EMSGSIZE;
1255 1504
1256 list_for_each_entry(option, &team->option_list, list) { 1505 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
1257 struct nlattr *option_item; 1506 struct nlattr *option_item;
1258 long arg; 1507 struct team_option *option = opt_inst->option;
1508 struct team_gsetter_ctx ctx;
1259 1509
1260 /* Include only changed options if fill all mode is not on */ 1510 /* Include only changed options if fill all mode is not on */
1261 if (!fillall && !option->changed) 1511 if (!fillall && !opt_inst->changed)
1262 continue; 1512 continue;
1263 option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION); 1513 option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
1264 if (!option_item) 1514 if (!option_item)
1265 goto nla_put_failure; 1515 goto nla_put_failure;
1266 NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name); 1516 if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
1267 if (option->changed) { 1517 goto nla_put_failure;
1268 NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED); 1518 if (opt_inst->changed) {
1269 option->changed = false; 1519 if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
1520 goto nla_put_failure;
1521 opt_inst->changed = false;
1270 } 1522 }
1271 if (option->removed) 1523 if (opt_inst->removed &&
1272 NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_REMOVED); 1524 nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
1525 goto nla_put_failure;
1526 if (opt_inst->port &&
1527 nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
1528 opt_inst->port->dev->ifindex))
1529 goto nla_put_failure;
1530 ctx.port = opt_inst->port;
1273 switch (option->type) { 1531 switch (option->type) {
1274 case TEAM_OPTION_TYPE_U32: 1532 case TEAM_OPTION_TYPE_U32:
1275 NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32); 1533 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
1276 team_option_get(team, option, &arg); 1534 goto nla_put_failure;
1277 NLA_PUT_U32(skb, TEAM_ATTR_OPTION_DATA, arg); 1535 err = team_option_get(team, opt_inst, &ctx);
1536 if (err)
1537 goto errout;
1538 if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA,
1539 ctx.data.u32_val))
1540 goto nla_put_failure;
1278 break; 1541 break;
1279 case TEAM_OPTION_TYPE_STRING: 1542 case TEAM_OPTION_TYPE_STRING:
1280 NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING); 1543 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
1281 team_option_get(team, option, &arg); 1544 goto nla_put_failure;
1282 NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_DATA, 1545 err = team_option_get(team, opt_inst, &ctx);
1283 (char *) arg); 1546 if (err)
1547 goto errout;
1548 if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
1549 ctx.data.str_val))
1550 goto nla_put_failure;
1551 break;
1552 case TEAM_OPTION_TYPE_BINARY:
1553 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
1554 goto nla_put_failure;
1555 err = team_option_get(team, opt_inst, &ctx);
1556 if (err)
1557 goto errout;
1558 if (nla_put(skb, TEAM_ATTR_OPTION_DATA,
1559 ctx.data.bin_val.len, ctx.data.bin_val.ptr))
1560 goto nla_put_failure;
1561 break;
1562 case TEAM_OPTION_TYPE_BOOL:
1563 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
1564 goto nla_put_failure;
1565 err = team_option_get(team, opt_inst, &ctx);
1566 if (err)
1567 goto errout;
1568 if (ctx.data.bool_val &&
1569 nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
1570 goto nla_put_failure;
1284 break; 1571 break;
1285 default: 1572 default:
1286 BUG(); 1573 BUG();
@@ -1292,8 +1579,10 @@ static int team_nl_fill_options_get(struct sk_buff *skb,
1292 return genlmsg_end(skb, hdr); 1579 return genlmsg_end(skb, hdr);
1293 1580
1294nla_put_failure: 1581nla_put_failure:
1582 err = -EMSGSIZE;
1583errout:
1295 genlmsg_cancel(skb, hdr); 1584 genlmsg_cancel(skb, hdr);
1296 return -EMSGSIZE; 1585 return err;
1297} 1586}
1298 1587
1299static int team_nl_fill_options_get_all(struct sk_buff *skb, 1588static int team_nl_fill_options_get_all(struct sk_buff *skb,
@@ -1339,9 +1628,12 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1339 } 1628 }
1340 1629
1341 nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) { 1630 nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
1342 struct nlattr *mode_attrs[TEAM_ATTR_OPTION_MAX + 1]; 1631 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
1632 struct nlattr *attr_port_ifindex;
1633 struct nlattr *attr_data;
1343 enum team_option_type opt_type; 1634 enum team_option_type opt_type;
1344 struct team_option *option; 1635 int opt_port_ifindex = 0; /* != 0 for per-port options */
1636 struct team_option_inst *opt_inst;
1345 char *opt_name; 1637 char *opt_name;
1346 bool opt_found = false; 1638 bool opt_found = false;
1347 1639
@@ -1349,48 +1641,78 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
1349 err = -EINVAL; 1641 err = -EINVAL;
1350 goto team_put; 1642 goto team_put;
1351 } 1643 }
1352 err = nla_parse_nested(mode_attrs, TEAM_ATTR_OPTION_MAX, 1644 err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX,
1353 nl_option, team_nl_option_policy); 1645 nl_option, team_nl_option_policy);
1354 if (err) 1646 if (err)
1355 goto team_put; 1647 goto team_put;
1356 if (!mode_attrs[TEAM_ATTR_OPTION_NAME] || 1648 if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
1357 !mode_attrs[TEAM_ATTR_OPTION_TYPE] || 1649 !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
1358 !mode_attrs[TEAM_ATTR_OPTION_DATA]) {
1359 err = -EINVAL; 1650 err = -EINVAL;
1360 goto team_put; 1651 goto team_put;
1361 } 1652 }
1362 switch (nla_get_u8(mode_attrs[TEAM_ATTR_OPTION_TYPE])) { 1653 switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
1363 case NLA_U32: 1654 case NLA_U32:
1364 opt_type = TEAM_OPTION_TYPE_U32; 1655 opt_type = TEAM_OPTION_TYPE_U32;
1365 break; 1656 break;
1366 case NLA_STRING: 1657 case NLA_STRING:
1367 opt_type = TEAM_OPTION_TYPE_STRING; 1658 opt_type = TEAM_OPTION_TYPE_STRING;
1368 break; 1659 break;
1660 case NLA_BINARY:
1661 opt_type = TEAM_OPTION_TYPE_BINARY;
1662 break;
1663 case NLA_FLAG:
1664 opt_type = TEAM_OPTION_TYPE_BOOL;
1665 break;
1369 default: 1666 default:
1370 goto team_put; 1667 goto team_put;
1371 } 1668 }
1372 1669
1373 opt_name = nla_data(mode_attrs[TEAM_ATTR_OPTION_NAME]); 1670 attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
1374 list_for_each_entry(option, &team->option_list, list) { 1671 if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
1375 long arg; 1672 err = -EINVAL;
1376 struct nlattr *opt_data_attr; 1673 goto team_put;
1674 }
1675
1676 opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
1677 attr_port_ifindex = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
1678 if (attr_port_ifindex)
1679 opt_port_ifindex = nla_get_u32(attr_port_ifindex);
1680
1681 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
1682 struct team_option *option = opt_inst->option;
1683 struct team_gsetter_ctx ctx;
1684 int tmp_ifindex;
1377 1685
1686 tmp_ifindex = opt_inst->port ?
1687 opt_inst->port->dev->ifindex : 0;
1378 if (option->type != opt_type || 1688 if (option->type != opt_type ||
1379 strcmp(option->name, opt_name)) 1689 strcmp(option->name, opt_name) ||
1690 tmp_ifindex != opt_port_ifindex)
1380 continue; 1691 continue;
1381 opt_found = true; 1692 opt_found = true;
1382 opt_data_attr = mode_attrs[TEAM_ATTR_OPTION_DATA]; 1693 ctx.port = opt_inst->port;
1383 switch (opt_type) { 1694 switch (opt_type) {
1384 case TEAM_OPTION_TYPE_U32: 1695 case TEAM_OPTION_TYPE_U32:
1385 arg = nla_get_u32(opt_data_attr); 1696 ctx.data.u32_val = nla_get_u32(attr_data);
1386 break; 1697 break;
1387 case TEAM_OPTION_TYPE_STRING: 1698 case TEAM_OPTION_TYPE_STRING:
1388 arg = (long) nla_data(opt_data_attr); 1699 if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
1700 err = -EINVAL;
1701 goto team_put;
1702 }
1703 ctx.data.str_val = nla_data(attr_data);
1704 break;
1705 case TEAM_OPTION_TYPE_BINARY:
1706 ctx.data.bin_val.len = nla_len(attr_data);
1707 ctx.data.bin_val.ptr = nla_data(attr_data);
1708 break;
1709 case TEAM_OPTION_TYPE_BOOL:
1710 ctx.data.bool_val = attr_data ? true : false;
1389 break; 1711 break;
1390 default: 1712 default:
1391 BUG(); 1713 BUG();
1392 } 1714 }
1393 err = team_option_set(team, option, &arg); 1715 err = team_option_set(team, opt_inst, &ctx);
1394 if (err) 1716 if (err)
1395 goto team_put; 1717 goto team_put;
1396 } 1718 }
@@ -1420,7 +1742,8 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
1420 if (IS_ERR(hdr)) 1742 if (IS_ERR(hdr))
1421 return PTR_ERR(hdr); 1743 return PTR_ERR(hdr);
1422 1744
1423 NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex); 1745 if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
1746 goto nla_put_failure;
1424 port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT); 1747 port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
1425 if (!port_list) 1748 if (!port_list)
1426 return -EMSGSIZE; 1749 return -EMSGSIZE;
@@ -1434,17 +1757,20 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
1434 port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT); 1757 port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
1435 if (!port_item) 1758 if (!port_item)
1436 goto nla_put_failure; 1759 goto nla_put_failure;
1437 NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex); 1760 if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
1761 goto nla_put_failure;
1438 if (port->changed) { 1762 if (port->changed) {
1439 NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED); 1763 if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
1764 goto nla_put_failure;
1440 port->changed = false; 1765 port->changed = false;
1441 } 1766 }
1442 if (port->removed) 1767 if ((port->removed &&
1443 NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_REMOVED); 1768 nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
1444 if (port->linkup) 1769 (port->state.linkup &&
1445 NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP); 1770 nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
1446 NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed); 1771 nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
1447 NLA_PUT_U8(skb, TEAM_ATTR_PORT_DUPLEX, port->duplex); 1772 nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
1773 goto nla_put_failure;
1448 nla_nest_end(skb, port_item); 1774 nla_nest_end(skb, port_item);
1449 } 1775 }
1450 1776
@@ -1603,23 +1929,24 @@ static void __team_port_change_check(struct team_port *port, bool linkup)
1603{ 1929{
1604 int err; 1930 int err;
1605 1931
1606 if (!port->removed && port->linkup == linkup) 1932 if (!port->removed && port->state.linkup == linkup)
1607 return; 1933 return;
1608 1934
1609 port->changed = true; 1935 port->changed = true;
1610 port->linkup = linkup; 1936 port->state.linkup = linkup;
1937 team_refresh_port_linkup(port);
1611 if (linkup) { 1938 if (linkup) {
1612 struct ethtool_cmd ecmd; 1939 struct ethtool_cmd ecmd;
1613 1940
1614 err = __ethtool_get_settings(port->dev, &ecmd); 1941 err = __ethtool_get_settings(port->dev, &ecmd);
1615 if (!err) { 1942 if (!err) {
1616 port->speed = ethtool_cmd_speed(&ecmd); 1943 port->state.speed = ethtool_cmd_speed(&ecmd);
1617 port->duplex = ecmd.duplex; 1944 port->state.duplex = ecmd.duplex;
1618 goto send_event; 1945 goto send_event;
1619 } 1946 }
1620 } 1947 }
1621 port->speed = 0; 1948 port->state.speed = 0;
1622 port->duplex = 0; 1949 port->state.duplex = 0;
1623 1950
1624send_event: 1951send_event:
1625 err = team_nl_send_event_port_list_get(port->team); 1952 err = team_nl_send_event_port_list_get(port->team);
diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c
index f4d960e82e29..fd6bd03aaa89 100644
--- a/drivers/net/team/team_mode_activebackup.c
+++ b/drivers/net/team/team_mode_activebackup.c
@@ -59,23 +59,21 @@ static void ab_port_leave(struct team *team, struct team_port *port)
59 RCU_INIT_POINTER(ab_priv(team)->active_port, NULL); 59 RCU_INIT_POINTER(ab_priv(team)->active_port, NULL);
60} 60}
61 61
62static int ab_active_port_get(struct team *team, void *arg) 62static int ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx)
63{ 63{
64 u32 *ifindex = arg;
65
66 *ifindex = 0;
67 if (ab_priv(team)->active_port) 64 if (ab_priv(team)->active_port)
68 *ifindex = ab_priv(team)->active_port->dev->ifindex; 65 ctx->data.u32_val = ab_priv(team)->active_port->dev->ifindex;
66 else
67 ctx->data.u32_val = 0;
69 return 0; 68 return 0;
70} 69}
71 70
72static int ab_active_port_set(struct team *team, void *arg) 71static int ab_active_port_set(struct team *team, struct team_gsetter_ctx *ctx)
73{ 72{
74 u32 *ifindex = arg;
75 struct team_port *port; 73 struct team_port *port;
76 74
77 list_for_each_entry_rcu(port, &team->port_list, list) { 75 list_for_each_entry(port, &team->port_list, list) {
78 if (port->dev->ifindex == *ifindex) { 76 if (port->dev->ifindex == ctx->data.u32_val) {
79 rcu_assign_pointer(ab_priv(team)->active_port, port); 77 rcu_assign_pointer(ab_priv(team)->active_port, port);
80 return 0; 78 return 0;
81 } 79 }
@@ -92,12 +90,12 @@ static const struct team_option ab_options[] = {
92 }, 90 },
93}; 91};
94 92
95int ab_init(struct team *team) 93static int ab_init(struct team *team)
96{ 94{
97 return team_options_register(team, ab_options, ARRAY_SIZE(ab_options)); 95 return team_options_register(team, ab_options, ARRAY_SIZE(ab_options));
98} 96}
99 97
100void ab_exit(struct team *team) 98static void ab_exit(struct team *team)
101{ 99{
102 team_options_unregister(team, ab_options, ARRAY_SIZE(ab_options)); 100 team_options_unregister(team, ab_options, ARRAY_SIZE(ab_options));
103} 101}
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
new file mode 100644
index 000000000000..86e8183c8e3d
--- /dev/null
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -0,0 +1,174 @@
1/*
2 * drivers/net/team/team_mode_loadbalance.c - Load-balancing mode for team
3 * Copyright (c) 2012 Jiri Pirko <jpirko@redhat.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/errno.h>
16#include <linux/netdevice.h>
17#include <linux/filter.h>
18#include <linux/if_team.h>
19
20struct lb_priv {
21 struct sk_filter __rcu *fp;
22 struct sock_fprog *orig_fprog;
23};
24
25static struct lb_priv *lb_priv(struct team *team)
26{
27 return (struct lb_priv *) &team->mode_priv;
28}
29
30static bool lb_transmit(struct team *team, struct sk_buff *skb)
31{
32 struct sk_filter *fp;
33 struct team_port *port;
34 unsigned int hash;
35 int port_index;
36
37 fp = rcu_dereference(lb_priv(team)->fp);
38 if (unlikely(!fp))
39 goto drop;
40 hash = SK_RUN_FILTER(fp, skb);
41 port_index = hash % team->en_port_count;
42 port = team_get_port_by_index_rcu(team, port_index);
43 if (unlikely(!port))
44 goto drop;
45 skb->dev = port->dev;
46 if (dev_queue_xmit(skb))
47 return false;
48 return true;
49
50drop:
51 dev_kfree_skb_any(skb);
52 return false;
53}
54
55static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx)
56{
57 if (!lb_priv(team)->orig_fprog) {
58 ctx->data.bin_val.len = 0;
59 ctx->data.bin_val.ptr = NULL;
60 return 0;
61 }
62 ctx->data.bin_val.len = lb_priv(team)->orig_fprog->len *
63 sizeof(struct sock_filter);
64 ctx->data.bin_val.ptr = lb_priv(team)->orig_fprog->filter;
65 return 0;
66}
67
68static int __fprog_create(struct sock_fprog **pfprog, u32 data_len,
69 const void *data)
70{
71 struct sock_fprog *fprog;
72 struct sock_filter *filter = (struct sock_filter *) data;
73
74 if (data_len % sizeof(struct sock_filter))
75 return -EINVAL;
76 fprog = kmalloc(sizeof(struct sock_fprog), GFP_KERNEL);
77 if (!fprog)
78 return -ENOMEM;
79 fprog->filter = kmemdup(filter, data_len, GFP_KERNEL);
80 if (!fprog->filter) {
81 kfree(fprog);
82 return -ENOMEM;
83 }
84 fprog->len = data_len / sizeof(struct sock_filter);
85 *pfprog = fprog;
86 return 0;
87}
88
89static void __fprog_destroy(struct sock_fprog *fprog)
90{
91 kfree(fprog->filter);
92 kfree(fprog);
93}
94
95static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
96{
97 struct sk_filter *fp = NULL;
98 struct sock_fprog *fprog = NULL;
99 int err;
100
101 if (ctx->data.bin_val.len) {
102 err = __fprog_create(&fprog, ctx->data.bin_val.len,
103 ctx->data.bin_val.ptr);
104 if (err)
105 return err;
106 err = sk_unattached_filter_create(&fp, fprog);
107 if (err) {
108 __fprog_destroy(fprog);
109 return err;
110 }
111 }
112
113 if (lb_priv(team)->orig_fprog) {
114 /* Clear old filter data */
115 __fprog_destroy(lb_priv(team)->orig_fprog);
116 sk_unattached_filter_destroy(lb_priv(team)->fp);
117 }
118
119 rcu_assign_pointer(lb_priv(team)->fp, fp);
120 lb_priv(team)->orig_fprog = fprog;
121 return 0;
122}
123
124static const struct team_option lb_options[] = {
125 {
126 .name = "bpf_hash_func",
127 .type = TEAM_OPTION_TYPE_BINARY,
128 .getter = lb_bpf_func_get,
129 .setter = lb_bpf_func_set,
130 },
131};
132
133static int lb_init(struct team *team)
134{
135 return team_options_register(team, lb_options,
136 ARRAY_SIZE(lb_options));
137}
138
139static void lb_exit(struct team *team)
140{
141 team_options_unregister(team, lb_options,
142 ARRAY_SIZE(lb_options));
143}
144
145static const struct team_mode_ops lb_mode_ops = {
146 .init = lb_init,
147 .exit = lb_exit,
148 .transmit = lb_transmit,
149};
150
151static struct team_mode lb_mode = {
152 .kind = "loadbalance",
153 .owner = THIS_MODULE,
154 .priv_size = sizeof(struct lb_priv),
155 .ops = &lb_mode_ops,
156};
157
158static int __init lb_init_module(void)
159{
160 return team_mode_register(&lb_mode);
161}
162
163static void __exit lb_cleanup_module(void)
164{
165 team_mode_unregister(&lb_mode);
166}
167
168module_init(lb_init_module);
169module_exit(lb_cleanup_module);
170
171MODULE_LICENSE("GPL v2");
172MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
173MODULE_DESCRIPTION("Load-balancing mode for team");
174MODULE_ALIAS("team-mode-loadbalance");
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index a0e8f806331a..6abfbdc96be5 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -50,7 +50,7 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb)
50 struct team_port *port; 50 struct team_port *port;
51 int port_index; 51 int port_index;
52 52
53 port_index = rr_priv(team)->sent_packets++ % team->port_count; 53 port_index = rr_priv(team)->sent_packets++ % team->en_port_count;
54 port = team_get_port_by_index_rcu(team, port_index); 54 port = team_get_port_by_index_rcu(team, port_index);
55 port = __get_first_port_up(team, port); 55 port = __get_first_port_up(team, port);
56 if (unlikely(!port)) 56 if (unlikely(!port))
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index b15ac81d46fa..0924f572f59b 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -1826,18 +1826,6 @@ static struct pci_driver xl_3c359_driver = {
1826 .remove = __devexit_p(xl_remove_one), 1826 .remove = __devexit_p(xl_remove_one),
1827}; 1827};
1828 1828
1829static int __init xl_pci_init (void) 1829module_pci_driver(xl_3c359_driver);
1830{
1831 return pci_register_driver(&xl_3c359_driver);
1832}
1833
1834
1835static void __exit xl_pci_cleanup (void)
1836{
1837 pci_unregister_driver (&xl_3c359_driver);
1838}
1839
1840module_init(xl_pci_init);
1841module_exit(xl_pci_cleanup);
1842 1830
1843MODULE_LICENSE("GPL") ; 1831MODULE_LICENSE("GPL") ;
diff --git a/drivers/net/tokenring/Kconfig b/drivers/net/tokenring/Kconfig
index 45550d42b368..ef3bb1326e4f 100644
--- a/drivers/net/tokenring/Kconfig
+++ b/drivers/net/tokenring/Kconfig
@@ -98,7 +98,7 @@ config 3C359
98 98
99config TMS380TR 99config TMS380TR
100 tristate "Generic TMS380 Token Ring ISA/PCI adapter support" 100 tristate "Generic TMS380 Token Ring ISA/PCI adapter support"
101 depends on PCI || ISA && ISA_DMA_API || MCA 101 depends on PCI || ISA || MCA
102 select FW_LOADER 102 select FW_LOADER
103 ---help--- 103 ---help---
104 This driver provides generic support for token ring adapters 104 This driver provides generic support for token ring adapters
@@ -137,7 +137,7 @@ config TMSPCI
137 137
138config SKISA 138config SKISA
139 tristate "SysKonnect TR4/16 ISA support" 139 tristate "SysKonnect TR4/16 ISA support"
140 depends on TMS380TR && ISA 140 depends on TMS380TR && ISA && ISA_DMA_API
141 help 141 help
142 This tms380 module supports SysKonnect TR4/16 ISA cards. 142 This tms380 module supports SysKonnect TR4/16 ISA cards.
143 143
@@ -149,7 +149,7 @@ config SKISA
149 149
150config PROTEON 150config PROTEON
151 tristate "Proteon ISA support" 151 tristate "Proteon ISA support"
152 depends on TMS380TR && ISA 152 depends on TMS380TR && ISA && ISA_DMA_API
153 help 153 help
154 This tms380 module supports Proteon ISA cards. 154 This tms380 module supports Proteon ISA cards.
155 155
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 3e4b4f091113..97e4c65c1e29 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -1904,14 +1904,6 @@ static struct pci_driver streamer_pci_driver = {
1904 .remove = __devexit_p(streamer_remove_one), 1904 .remove = __devexit_p(streamer_remove_one),
1905}; 1905};
1906 1906
1907static int __init streamer_init_module(void) { 1907module_pci_driver(streamer_pci_driver);
1908 return pci_register_driver(&streamer_pci_driver);
1909}
1910
1911static void __exit streamer_cleanup_module(void) {
1912 pci_unregister_driver(&streamer_pci_driver);
1913}
1914 1908
1915module_init(streamer_init_module);
1916module_exit(streamer_cleanup_module);
1917MODULE_LICENSE("GPL"); 1909MODULE_LICENSE("GPL");
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index 0e234741cc79..4d45fe8bd206 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -1732,18 +1732,6 @@ static struct pci_driver olympic_driver = {
1732 .remove = __devexit_p(olympic_remove_one), 1732 .remove = __devexit_p(olympic_remove_one),
1733}; 1733};
1734 1734
1735static int __init olympic_pci_init(void) 1735module_pci_driver(olympic_driver);
1736{
1737 return pci_register_driver(&olympic_driver) ;
1738}
1739
1740static void __exit olympic_pci_cleanup(void)
1741{
1742 pci_unregister_driver(&olympic_driver) ;
1743}
1744
1745
1746module_init(olympic_pci_init) ;
1747module_exit(olympic_pci_cleanup) ;
1748 1736
1749MODULE_LICENSE("GPL"); 1737MODULE_LICENSE("GPL");
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index be4813e0366c..b5e0855e4b39 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -254,7 +254,7 @@ int tms380tr_open(struct net_device *dev)
254 254
255 /* Reset the hardware here. Don't forget to set the station address. */ 255 /* Reset the hardware here. Don't forget to set the station address. */
256 256
257#ifdef CONFIG_ISA 257#if defined(CONFIG_ISA) && defined(CONFIG_ISA_DMA_API)
258 if(dev->dma > 0) 258 if(dev->dma > 0)
259 { 259 {
260 unsigned long flags=claim_dma_lock(); 260 unsigned long flags=claim_dma_lock();
@@ -1125,8 +1125,8 @@ int tms380tr_close(struct net_device *dev)
1125 1125
1126 del_timer(&tp->timer); 1126 del_timer(&tp->timer);
1127 tms380tr_disable_interrupts(dev); 1127 tms380tr_disable_interrupts(dev);
1128 1128
1129#ifdef CONFIG_ISA 1129#if defined(CONFIG_ISA) && defined(CONFIG_ISA_DMA_API)
1130 if(dev->dma > 0) 1130 if(dev->dma > 0)
1131 { 1131 {
1132 unsigned long flags=claim_dma_lock(); 1132 unsigned long flags=claim_dma_lock();
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
index fb9918da5792..90f3fa44a151 100644
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -233,16 +233,4 @@ static struct pci_driver tms_pci_driver = {
233 .remove = __devexit_p(tms_pci_detach), 233 .remove = __devexit_p(tms_pci_detach),
234}; 234};
235 235
236static int __init tms_pci_init (void) 236module_pci_driver(tms_pci_driver);
237{
238 return pci_register_driver(&tms_pci_driver);
239}
240
241static void __exit tms_pci_rmmod (void)
242{
243 pci_unregister_driver (&tms_pci_driver);
244}
245
246module_init(tms_pci_init);
247module_exit(tms_pci_rmmod);
248
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 2d927fb4adf4..80b837c88f0d 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -885,6 +885,7 @@ static const struct ethtool_ops usbnet_ethtool_ops = {
885 .get_drvinfo = usbnet_get_drvinfo, 885 .get_drvinfo = usbnet_get_drvinfo,
886 .get_msglevel = usbnet_get_msglevel, 886 .get_msglevel = usbnet_get_msglevel,
887 .set_msglevel = usbnet_set_msglevel, 887 .set_msglevel = usbnet_set_msglevel,
888 .get_ts_info = ethtool_op_get_ts_info,
888}; 889};
889 890
890/*-------------------------------------------------------------------------*/ 891/*-------------------------------------------------------------------------*/
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index af8acc85f4bb..fa58c7869954 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -66,12 +66,21 @@ struct virtnet_info {
66 /* Host will merge rx buffers for big packets (shake it! shake it!) */ 66 /* Host will merge rx buffers for big packets (shake it! shake it!) */
67 bool mergeable_rx_bufs; 67 bool mergeable_rx_bufs;
68 68
69 /* enable config space updates */
70 bool config_enable;
71
69 /* Active statistics */ 72 /* Active statistics */
70 struct virtnet_stats __percpu *stats; 73 struct virtnet_stats __percpu *stats;
71 74
72 /* Work struct for refilling if we run low on memory. */ 75 /* Work struct for refilling if we run low on memory. */
73 struct delayed_work refill; 76 struct delayed_work refill;
74 77
78 /* Work struct for config space updates */
79 struct work_struct config_work;
80
81 /* Lock for config space updates */
82 struct mutex config_lock;
83
75 /* Chain pages by the private ptr. */ 84 /* Chain pages by the private ptr. */
76 struct page *pages; 85 struct page *pages;
77 86
@@ -780,6 +789,16 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
780 return status == VIRTIO_NET_OK; 789 return status == VIRTIO_NET_OK;
781} 790}
782 791
792static void virtnet_ack_link_announce(struct virtnet_info *vi)
793{
794 rtnl_lock();
795 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
796 VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL,
797 0, 0))
798 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
799 rtnl_unlock();
800}
801
783static int virtnet_close(struct net_device *dev) 802static int virtnet_close(struct net_device *dev)
784{ 803{
785 struct virtnet_info *vi = netdev_priv(dev); 804 struct virtnet_info *vi = netdev_priv(dev);
@@ -951,20 +970,31 @@ static const struct net_device_ops virtnet_netdev = {
951#endif 970#endif
952}; 971};
953 972
954static void virtnet_update_status(struct virtnet_info *vi) 973static void virtnet_config_changed_work(struct work_struct *work)
955{ 974{
975 struct virtnet_info *vi =
976 container_of(work, struct virtnet_info, config_work);
956 u16 v; 977 u16 v;
957 978
979 mutex_lock(&vi->config_lock);
980 if (!vi->config_enable)
981 goto done;
982
958 if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS, 983 if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS,
959 offsetof(struct virtio_net_config, status), 984 offsetof(struct virtio_net_config, status),
960 &v) < 0) 985 &v) < 0)
961 return; 986 goto done;
987
988 if (v & VIRTIO_NET_S_ANNOUNCE) {
989 netif_notify_peers(vi->dev);
990 virtnet_ack_link_announce(vi);
991 }
962 992
963 /* Ignore unknown (future) status bits */ 993 /* Ignore unknown (future) status bits */
964 v &= VIRTIO_NET_S_LINK_UP; 994 v &= VIRTIO_NET_S_LINK_UP;
965 995
966 if (vi->status == v) 996 if (vi->status == v)
967 return; 997 goto done;
968 998
969 vi->status = v; 999 vi->status = v;
970 1000
@@ -975,13 +1005,15 @@ static void virtnet_update_status(struct virtnet_info *vi)
975 netif_carrier_off(vi->dev); 1005 netif_carrier_off(vi->dev);
976 netif_stop_queue(vi->dev); 1006 netif_stop_queue(vi->dev);
977 } 1007 }
1008done:
1009 mutex_unlock(&vi->config_lock);
978} 1010}
979 1011
980static void virtnet_config_changed(struct virtio_device *vdev) 1012static void virtnet_config_changed(struct virtio_device *vdev)
981{ 1013{
982 struct virtnet_info *vi = vdev->priv; 1014 struct virtnet_info *vi = vdev->priv;
983 1015
984 virtnet_update_status(vi); 1016 queue_work(system_nrt_wq, &vi->config_work);
985} 1017}
986 1018
987static int init_vqs(struct virtnet_info *vi) 1019static int init_vqs(struct virtnet_info *vi)
@@ -1075,6 +1107,9 @@ static int virtnet_probe(struct virtio_device *vdev)
1075 goto free; 1107 goto free;
1076 1108
1077 INIT_DELAYED_WORK(&vi->refill, refill_work); 1109 INIT_DELAYED_WORK(&vi->refill, refill_work);
1110 mutex_init(&vi->config_lock);
1111 vi->config_enable = true;
1112 INIT_WORK(&vi->config_work, virtnet_config_changed_work);
1078 sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg)); 1113 sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg));
1079 sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg)); 1114 sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg));
1080 1115
@@ -1110,7 +1145,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1110 otherwise get link status from config. */ 1145 otherwise get link status from config. */
1111 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { 1146 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
1112 netif_carrier_off(dev); 1147 netif_carrier_off(dev);
1113 virtnet_update_status(vi); 1148 queue_work(system_nrt_wq, &vi->config_work);
1114 } else { 1149 } else {
1115 vi->status = VIRTIO_NET_S_LINK_UP; 1150 vi->status = VIRTIO_NET_S_LINK_UP;
1116 netif_carrier_on(dev); 1151 netif_carrier_on(dev);
@@ -1169,10 +1204,17 @@ static void __devexit virtnet_remove(struct virtio_device *vdev)
1169{ 1204{
1170 struct virtnet_info *vi = vdev->priv; 1205 struct virtnet_info *vi = vdev->priv;
1171 1206
1207 /* Prevent config work handler from accessing the device. */
1208 mutex_lock(&vi->config_lock);
1209 vi->config_enable = false;
1210 mutex_unlock(&vi->config_lock);
1211
1172 unregister_netdev(vi->dev); 1212 unregister_netdev(vi->dev);
1173 1213
1174 remove_vq_common(vi); 1214 remove_vq_common(vi);
1175 1215
1216 flush_work(&vi->config_work);
1217
1176 free_percpu(vi->stats); 1218 free_percpu(vi->stats);
1177 free_netdev(vi->dev); 1219 free_netdev(vi->dev);
1178} 1220}
@@ -1182,6 +1224,11 @@ static int virtnet_freeze(struct virtio_device *vdev)
1182{ 1224{
1183 struct virtnet_info *vi = vdev->priv; 1225 struct virtnet_info *vi = vdev->priv;
1184 1226
1227 /* Prevent config work handler from accessing the device */
1228 mutex_lock(&vi->config_lock);
1229 vi->config_enable = false;
1230 mutex_unlock(&vi->config_lock);
1231
1185 virtqueue_disable_cb(vi->rvq); 1232 virtqueue_disable_cb(vi->rvq);
1186 virtqueue_disable_cb(vi->svq); 1233 virtqueue_disable_cb(vi->svq);
1187 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) 1234 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
@@ -1195,6 +1242,8 @@ static int virtnet_freeze(struct virtio_device *vdev)
1195 1242
1196 remove_vq_common(vi); 1243 remove_vq_common(vi);
1197 1244
1245 flush_work(&vi->config_work);
1246
1198 return 0; 1247 return 0;
1199} 1248}
1200 1249
@@ -1215,6 +1264,10 @@ static int virtnet_restore(struct virtio_device *vdev)
1215 if (!try_fill_recv(vi, GFP_KERNEL)) 1264 if (!try_fill_recv(vi, GFP_KERNEL))
1216 queue_delayed_work(system_nrt_wq, &vi->refill, 0); 1265 queue_delayed_work(system_nrt_wq, &vi->refill, 0);
1217 1266
1267 mutex_lock(&vi->config_lock);
1268 vi->config_enable = true;
1269 mutex_unlock(&vi->config_lock);
1270
1218 return 0; 1271 return 0;
1219} 1272}
1220#endif 1273#endif
@@ -1232,6 +1285,7 @@ static unsigned int features[] = {
1232 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, 1285 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
1233 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, 1286 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
1234 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, 1287 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
1288 VIRTIO_NET_F_GUEST_ANNOUNCE,
1235}; 1289};
1236 1290
1237static struct virtio_driver virtio_net_driver = { 1291static struct virtio_driver virtio_net_driver = {
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index c676de7de024..9eb6479306d6 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -2055,15 +2055,4 @@ static struct pci_driver dscc4_driver = {
2055 .remove = __devexit_p(dscc4_remove_one), 2055 .remove = __devexit_p(dscc4_remove_one),
2056}; 2056};
2057 2057
2058static int __init dscc4_init_module(void) 2058module_pci_driver(dscc4_driver);
2059{
2060 return pci_register_driver(&dscc4_driver);
2061}
2062
2063static void __exit dscc4_cleanup_module(void)
2064{
2065 pci_unregister_driver(&dscc4_driver);
2066}
2067
2068module_init(dscc4_init_module);
2069module_exit(dscc4_cleanup_module);
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 76a8a4a522e9..f5d533a706ea 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1120,7 +1120,7 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/
1120{ 1120{
1121 lmc_softc_t *sc = dev_to_sc(dev); 1121 lmc_softc_t *sc = dev_to_sc(dev);
1122 1122
1123 lmc_trace(dev, "lmc_runnig_reset in"); 1123 lmc_trace(dev, "lmc_running_reset in");
1124 1124
1125 /* stop interrupts */ 1125 /* stop interrupts */
1126 /* Clear the interrupt mask */ 1126 /* Clear the interrupt mask */
@@ -1736,18 +1736,7 @@ static struct pci_driver lmc_driver = {
1736 .remove = __devexit_p(lmc_remove_one), 1736 .remove = __devexit_p(lmc_remove_one),
1737}; 1737};
1738 1738
1739static int __init init_lmc(void) 1739module_pci_driver(lmc_driver);
1740{
1741 return pci_register_driver(&lmc_driver);
1742}
1743
1744static void __exit exit_lmc(void)
1745{
1746 pci_unregister_driver(&lmc_driver);
1747}
1748
1749module_init(init_lmc);
1750module_exit(exit_lmc);
1751 1740
1752unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/ 1741unsigned lmc_mii_readreg (lmc_softc_t * const sc, unsigned devaddr, unsigned regno) /*fold00*/
1753{ 1742{
diff --git a/drivers/net/wimax/i2400m/Kconfig b/drivers/net/wimax/i2400m/Kconfig
index 3f703384295e..672de18a776c 100644
--- a/drivers/net/wimax/i2400m/Kconfig
+++ b/drivers/net/wimax/i2400m/Kconfig
@@ -32,8 +32,9 @@ config WIMAX_I2400M_SDIO
32 If unsure, it is safe to select M (module). 32 If unsure, it is safe to select M (module).
33 33
34config WIMAX_IWMC3200_SDIO 34config WIMAX_IWMC3200_SDIO
35 bool "Intel Wireless Multicom WiMAX Connection 3200 over SDIO" 35 bool "Intel Wireless Multicom WiMAX Connection 3200 over SDIO (EXPERIMENTAL)"
36 depends on WIMAX_I2400M_SDIO 36 depends on WIMAX_I2400M_SDIO
37 depends on EXPERIMENTAL
37 select IWMC3200TOP 38 select IWMC3200TOP
38 help 39 help
39 Select if you have a device based on the Intel Multicom WiMAX 40 Select if you have a device based on the Intel Multicom WiMAX
diff --git a/drivers/net/wimax/i2400m/usb-rx.c b/drivers/net/wimax/i2400m/usb-rx.c
index e3257681e360..b78ee676e102 100644
--- a/drivers/net/wimax/i2400m/usb-rx.c
+++ b/drivers/net/wimax/i2400m/usb-rx.c
@@ -277,7 +277,7 @@ retry:
277 d_printf(1, dev, "RX: size changed to %d, received %d, " 277 d_printf(1, dev, "RX: size changed to %d, received %d, "
278 "copied %d, capacity %ld\n", 278 "copied %d, capacity %ld\n",
279 rx_size, read_size, rx_skb->len, 279 rx_size, read_size, rx_skb->len,
280 (long) (skb_end_pointer(new_skb) - new_skb->head)); 280 (long) skb_end_offset(new_skb));
281 goto retry; 281 goto retry;
282 } 282 }
283 /* In most cases, it happens due to the hardware scheduling a 283 /* In most cases, it happens due to the hardware scheduling a
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 29b1e033a10b..713d033891e6 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -695,7 +695,7 @@ int i2400mu_resume(struct usb_interface *iface)
695 d_fnstart(3, dev, "(iface %p)\n", iface); 695 d_fnstart(3, dev, "(iface %p)\n", iface);
696 rmb(); /* see i2400m->updown's documentation */ 696 rmb(); /* see i2400m->updown's documentation */
697 if (i2400m->updown == 0) { 697 if (i2400m->updown == 0) {
698 d_printf(1, dev, "fw was down, no resume neeed\n"); 698 d_printf(1, dev, "fw was down, no resume needed\n");
699 goto out; 699 goto out;
700 } 700 }
701 d_printf(1, dev, "fw was up, resuming\n"); 701 d_printf(1, dev, "fw was up, resuming\n");
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index abd3b71cd4ab..5f58fa53238c 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -282,8 +282,7 @@ source "drivers/net/wireless/orinoco/Kconfig"
282source "drivers/net/wireless/p54/Kconfig" 282source "drivers/net/wireless/p54/Kconfig"
283source "drivers/net/wireless/rt2x00/Kconfig" 283source "drivers/net/wireless/rt2x00/Kconfig"
284source "drivers/net/wireless/rtlwifi/Kconfig" 284source "drivers/net/wireless/rtlwifi/Kconfig"
285source "drivers/net/wireless/wl1251/Kconfig" 285source "drivers/net/wireless/ti/Kconfig"
286source "drivers/net/wireless/wl12xx/Kconfig"
287source "drivers/net/wireless/zd1211rw/Kconfig" 286source "drivers/net/wireless/zd1211rw/Kconfig"
288source "drivers/net/wireless/mwifiex/Kconfig" 287source "drivers/net/wireless/mwifiex/Kconfig"
289 288
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 98db76196b59..0ce218b931d4 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -51,9 +51,7 @@ obj-$(CONFIG_ATH_COMMON) += ath/
51 51
52obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o 52obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o
53 53
54obj-$(CONFIG_WL1251) += wl1251/ 54obj-$(CONFIG_WL_TI) += ti/
55obj-$(CONFIG_WL12XX) += wl12xx/
56obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx/
57 55
58obj-$(CONFIG_IWM) += iwmc3200wifi/ 56obj-$(CONFIG_IWM) += iwmc3200wifi/
59 57
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index f5ce5623da99..0ac09a2bd144 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1991,19 +1991,4 @@ static struct pci_driver adm8211_driver = {
1991#endif /* CONFIG_PM */ 1991#endif /* CONFIG_PM */
1992}; 1992};
1993 1993
1994 1994module_pci_driver(adm8211_driver);
1995
1996static int __init adm8211_init(void)
1997{
1998 return pci_register_driver(&adm8211_driver);
1999}
2000
2001
2002static void __exit adm8211_exit(void)
2003{
2004 pci_unregister_driver(&adm8211_driver);
2005}
2006
2007
2008module_init(adm8211_init);
2009module_exit(adm8211_exit);
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 4045e5ab0555..faa8bcb4aac1 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -2512,10 +2512,8 @@ static void __exit at76_mod_exit(void)
2512 2512
2513 printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION " unloading\n"); 2513 printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION " unloading\n");
2514 usb_deregister(&at76_driver); 2514 usb_deregister(&at76_driver);
2515 for (i = 0; i < ARRAY_SIZE(firmwares); i++) { 2515 for (i = 0; i < ARRAY_SIZE(firmwares); i++)
2516 if (firmwares[i].fw) 2516 release_firmware(firmwares[i].fw);
2517 release_firmware(firmwares[i].fw);
2518 }
2519 led_trigger_unregister_simple(ledtrig_tx); 2517 led_trigger_unregister_simple(ledtrig_tx);
2520} 2518}
2521 2519
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index 35e93704c4ef..5c008757662b 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -14,6 +14,8 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include "ath5k.h" 19#include "ath5k.h"
18#include "reg.h" 20#include "reg.h"
19#include "debug.h" 21#include "debug.h"
@@ -728,33 +730,25 @@ void
728ath5k_ani_print_counters(struct ath5k_hw *ah) 730ath5k_ani_print_counters(struct ath5k_hw *ah)
729{ 731{
730 /* clears too */ 732 /* clears too */
731 printk(KERN_NOTICE "ACK fail\t%d\n", 733 pr_notice("ACK fail\t%d\n", ath5k_hw_reg_read(ah, AR5K_ACK_FAIL));
732 ath5k_hw_reg_read(ah, AR5K_ACK_FAIL)); 734 pr_notice("RTS fail\t%d\n", ath5k_hw_reg_read(ah, AR5K_RTS_FAIL));
733 printk(KERN_NOTICE "RTS fail\t%d\n", 735 pr_notice("RTS success\t%d\n", ath5k_hw_reg_read(ah, AR5K_RTS_OK));
734 ath5k_hw_reg_read(ah, AR5K_RTS_FAIL)); 736 pr_notice("FCS error\t%d\n", ath5k_hw_reg_read(ah, AR5K_FCS_FAIL));
735 printk(KERN_NOTICE "RTS success\t%d\n",
736 ath5k_hw_reg_read(ah, AR5K_RTS_OK));
737 printk(KERN_NOTICE "FCS error\t%d\n",
738 ath5k_hw_reg_read(ah, AR5K_FCS_FAIL));
739 737
740 /* no clear */ 738 /* no clear */
741 printk(KERN_NOTICE "tx\t%d\n", 739 pr_notice("tx\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX));
742 ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX)); 740 pr_notice("rx\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX));
743 printk(KERN_NOTICE "rx\t%d\n", 741 pr_notice("busy\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR));
744 ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX)); 742 pr_notice("cycles\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE));
745 printk(KERN_NOTICE "busy\t%d\n", 743
746 ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR)); 744 pr_notice("AR5K_PHYERR_CNT1\t%d\n",
747 printk(KERN_NOTICE "cycles\t%d\n", 745 ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1));
748 ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE)); 746 pr_notice("AR5K_PHYERR_CNT2\t%d\n",
749 747 ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2));
750 printk(KERN_NOTICE "AR5K_PHYERR_CNT1\t%d\n", 748 pr_notice("AR5K_OFDM_FIL_CNT\t%d\n",
751 ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1)); 749 ath5k_hw_reg_read(ah, AR5K_OFDM_FIL_CNT));
752 printk(KERN_NOTICE "AR5K_PHYERR_CNT2\t%d\n", 750 pr_notice("AR5K_CCK_FIL_CNT\t%d\n",
753 ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2)); 751 ath5k_hw_reg_read(ah, AR5K_CCK_FIL_CNT));
754 printk(KERN_NOTICE "AR5K_OFDM_FIL_CNT\t%d\n",
755 ath5k_hw_reg_read(ah, AR5K_OFDM_FIL_CNT));
756 printk(KERN_NOTICE "AR5K_CCK_FIL_CNT\t%d\n",
757 ath5k_hw_reg_read(ah, AR5K_CCK_FIL_CNT));
758} 752}
759 753
760#endif 754#endif
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 8d434b8f5855..64a453a6dfe4 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -76,26 +76,29 @@
76 GENERIC DRIVER DEFINITIONS 76 GENERIC DRIVER DEFINITIONS
77\****************************/ 77\****************************/
78 78
79#define ATH5K_PRINTF(fmt, ...) \ 79#define ATH5K_PRINTF(fmt, ...) \
80 printk(KERN_WARNING "%s: " fmt, __func__, ##__VA_ARGS__) 80 pr_warn("%s: " fmt, __func__, ##__VA_ARGS__)
81 81
82#define ATH5K_PRINTK(_sc, _level, _fmt, ...) \ 82void __printf(3, 4)
83 printk(_level "ath5k %s: " _fmt, \ 83_ath5k_printk(const struct ath5k_hw *ah, const char *level,
84 ((_sc) && (_sc)->hw) ? wiphy_name((_sc)->hw->wiphy) : "", \ 84 const char *fmt, ...);
85 ##__VA_ARGS__)
86 85
87#define ATH5K_PRINTK_LIMIT(_sc, _level, _fmt, ...) do { \ 86#define ATH5K_PRINTK(_sc, _level, _fmt, ...) \
88 if (net_ratelimit()) \ 87 _ath5k_printk(_sc, _level, _fmt, ##__VA_ARGS__)
89 ATH5K_PRINTK(_sc, _level, _fmt, ##__VA_ARGS__); \
90 } while (0)
91 88
92#define ATH5K_INFO(_sc, _fmt, ...) \ 89#define ATH5K_PRINTK_LIMIT(_sc, _level, _fmt, ...) \
90do { \
91 if (net_ratelimit()) \
92 ATH5K_PRINTK(_sc, _level, _fmt, ##__VA_ARGS__); \
93} while (0)
94
95#define ATH5K_INFO(_sc, _fmt, ...) \
93 ATH5K_PRINTK(_sc, KERN_INFO, _fmt, ##__VA_ARGS__) 96 ATH5K_PRINTK(_sc, KERN_INFO, _fmt, ##__VA_ARGS__)
94 97
95#define ATH5K_WARN(_sc, _fmt, ...) \ 98#define ATH5K_WARN(_sc, _fmt, ...) \
96 ATH5K_PRINTK_LIMIT(_sc, KERN_WARNING, _fmt, ##__VA_ARGS__) 99 ATH5K_PRINTK_LIMIT(_sc, KERN_WARNING, _fmt, ##__VA_ARGS__)
97 100
98#define ATH5K_ERR(_sc, _fmt, ...) \ 101#define ATH5K_ERR(_sc, _fmt, ...) \
99 ATH5K_PRINTK_LIMIT(_sc, KERN_ERR, _fmt, ##__VA_ARGS__) 102 ATH5K_PRINTK_LIMIT(_sc, KERN_ERR, _fmt, ##__VA_ARGS__)
100 103
101/* 104/*
@@ -1524,7 +1527,7 @@ void ath5k_eeprom_detach(struct ath5k_hw *ah);
1524 1527
1525/* Protocol Control Unit Functions */ 1528/* Protocol Control Unit Functions */
1526/* Helpers */ 1529/* Helpers */
1527int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, 1530int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band,
1528 int len, struct ieee80211_rate *rate, bool shortpre); 1531 int len, struct ieee80211_rate *rate, bool shortpre);
1529unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah); 1532unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah);
1530unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah); 1533unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah);
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index d7114c75fe9b..7106547a14dd 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -20,6 +20,8 @@
20* Attach/Detach Functions and helpers * 20* Attach/Detach Functions and helpers *
21\*************************************/ 21\*************************************/
22 22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
23#include <linux/pci.h> 25#include <linux/pci.h>
24#include <linux/slab.h> 26#include <linux/slab.h>
25#include "ath5k.h" 27#include "ath5k.h"
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 0e643b016b32..49e3b19cf781 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -40,6 +40,8 @@
40 * 40 *
41 */ 41 */
42 42
43#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
43#include <linux/module.h> 45#include <linux/module.h>
44#include <linux/delay.h> 46#include <linux/delay.h>
45#include <linux/dma-mapping.h> 47#include <linux/dma-mapping.h>
@@ -1168,7 +1170,7 @@ ath5k_check_ibss_tsf(struct ath5k_hw *ah, struct sk_buff *skb,
1168 1170
1169 if (ieee80211_is_beacon(mgmt->frame_control) && 1171 if (ieee80211_is_beacon(mgmt->frame_control) &&
1170 le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS && 1172 le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS &&
1171 memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) == 0) { 1173 compare_ether_addr(mgmt->bssid, common->curbssid) == 0) {
1172 /* 1174 /*
1173 * Received an IBSS beacon with the same BSSID. Hardware *must* 1175 * Received an IBSS beacon with the same BSSID. Hardware *must*
1174 * have updated the local TSF. We have to work around various 1176 * have updated the local TSF. We have to work around various
@@ -1232,7 +1234,7 @@ ath5k_update_beacon_rssi(struct ath5k_hw *ah, struct sk_buff *skb, int rssi)
1232 1234
1233 /* only beacons from our BSSID */ 1235 /* only beacons from our BSSID */
1234 if (!ieee80211_is_beacon(mgmt->frame_control) || 1236 if (!ieee80211_is_beacon(mgmt->frame_control) ||
1235 memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0) 1237 compare_ether_addr(mgmt->bssid, common->curbssid) != 0)
1236 return; 1238 return;
1237 1239
1238 ewma_add(&ah->ah_beacon_rssi_avg, rssi); 1240 ewma_add(&ah->ah_beacon_rssi_avg, rssi);
@@ -3038,3 +3040,23 @@ ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable)
3038 ath5k_hw_set_rx_filter(ah, rfilt); 3040 ath5k_hw_set_rx_filter(ah, rfilt);
3039 ah->filter_flags = rfilt; 3041 ah->filter_flags = rfilt;
3040} 3042}
3043
3044void _ath5k_printk(const struct ath5k_hw *ah, const char *level,
3045 const char *fmt, ...)
3046{
3047 struct va_format vaf;
3048 va_list args;
3049
3050 va_start(args, fmt);
3051
3052 vaf.fmt = fmt;
3053 vaf.va = &args;
3054
3055 if (ah && ah->hw)
3056 printk("%s" pr_fmt("%s: %pV"),
3057 level, wiphy_name(ah->hw->wiphy), &vaf);
3058 else
3059 printk("%s" pr_fmt("%pV"), level, &vaf);
3060
3061 va_end(args);
3062}
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index e5e8f45d86ac..9d00dab666a8 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -57,6 +57,9 @@
57 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 57 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
58 * THE POSSIBILITY OF SUCH DAMAGES. 58 * THE POSSIBILITY OF SUCH DAMAGES.
59 */ 59 */
60
61#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
62
60#include <linux/export.h> 63#include <linux/export.h>
61#include <linux/moduleparam.h> 64#include <linux/moduleparam.h>
62 65
@@ -247,10 +250,10 @@ static ssize_t write_file_beacon(struct file *file,
247 250
248 if (strncmp(buf, "disable", 7) == 0) { 251 if (strncmp(buf, "disable", 7) == 0) {
249 AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE); 252 AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE);
250 printk(KERN_INFO "debugfs disable beacons\n"); 253 pr_info("debugfs disable beacons\n");
251 } else if (strncmp(buf, "enable", 6) == 0) { 254 } else if (strncmp(buf, "enable", 6) == 0) {
252 AR5K_REG_ENABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE); 255 AR5K_REG_ENABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE);
253 printk(KERN_INFO "debugfs enable beacons\n"); 256 pr_info("debugfs enable beacons\n");
254 } 257 }
255 return count; 258 return count;
256} 259}
@@ -450,19 +453,19 @@ static ssize_t write_file_antenna(struct file *file,
450 453
451 if (strncmp(buf, "diversity", 9) == 0) { 454 if (strncmp(buf, "diversity", 9) == 0) {
452 ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT); 455 ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
453 printk(KERN_INFO "ath5k debug: enable diversity\n"); 456 pr_info("debug: enable diversity\n");
454 } else if (strncmp(buf, "fixed-a", 7) == 0) { 457 } else if (strncmp(buf, "fixed-a", 7) == 0) {
455 ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_A); 458 ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_A);
456 printk(KERN_INFO "ath5k debugfs: fixed antenna A\n"); 459 pr_info("debug: fixed antenna A\n");
457 } else if (strncmp(buf, "fixed-b", 7) == 0) { 460 } else if (strncmp(buf, "fixed-b", 7) == 0) {
458 ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_B); 461 ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_B);
459 printk(KERN_INFO "ath5k debug: fixed antenna B\n"); 462 pr_info("debug: fixed antenna B\n");
460 } else if (strncmp(buf, "clear", 5) == 0) { 463 } else if (strncmp(buf, "clear", 5) == 0) {
461 for (i = 0; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) { 464 for (i = 0; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) {
462 ah->stats.antenna_rx[i] = 0; 465 ah->stats.antenna_rx[i] = 0;
463 ah->stats.antenna_tx[i] = 0; 466 ah->stats.antenna_tx[i] = 0;
464 } 467 }
465 printk(KERN_INFO "ath5k debug: cleared antenna stats\n"); 468 pr_info("debug: cleared antenna stats\n");
466 } 469 }
467 return count; 470 return count;
468} 471}
@@ -632,7 +635,7 @@ static ssize_t write_file_frameerrors(struct file *file,
632 st->txerr_fifo = 0; 635 st->txerr_fifo = 0;
633 st->txerr_filt = 0; 636 st->txerr_filt = 0;
634 st->tx_all_count = 0; 637 st->tx_all_count = 0;
635 printk(KERN_INFO "ath5k debug: cleared frameerrors stats\n"); 638 pr_info("debug: cleared frameerrors stats\n");
636 } 639 }
637 return count; 640 return count;
638} 641}
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index f8bfa3ac2af0..bd8d4392d68b 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -21,6 +21,8 @@
21 Hardware Descriptor Functions 21 Hardware Descriptor Functions
22\******************************/ 22\******************************/
23 23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
24#include "ath5k.h" 26#include "ath5k.h"
25#include "reg.h" 27#include "reg.h"
26#include "debug.h" 28#include "debug.h"
@@ -441,10 +443,8 @@ ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
441 struct ath5k_desc *desc, 443 struct ath5k_desc *desc,
442 struct ath5k_tx_status *ts) 444 struct ath5k_tx_status *ts)
443{ 445{
444 struct ath5k_hw_2w_tx_ctl *tx_ctl;
445 struct ath5k_hw_tx_status *tx_status; 446 struct ath5k_hw_tx_status *tx_status;
446 447
447 tx_ctl = &desc->ud.ds_tx5210.tx_ctl;
448 tx_status = &desc->ud.ds_tx5210.tx_stat; 448 tx_status = &desc->ud.ds_tx5210.tx_stat;
449 449
450 /* No frame has been send or error */ 450 /* No frame has been send or error */
@@ -495,11 +495,9 @@ ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
495 struct ath5k_desc *desc, 495 struct ath5k_desc *desc,
496 struct ath5k_tx_status *ts) 496 struct ath5k_tx_status *ts)
497{ 497{
498 struct ath5k_hw_4w_tx_ctl *tx_ctl;
499 struct ath5k_hw_tx_status *tx_status; 498 struct ath5k_hw_tx_status *tx_status;
500 u32 txstat0, txstat1; 499 u32 txstat0, txstat1;
501 500
502 tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
503 tx_status = &desc->ud.ds_tx5212.tx_stat; 501 tx_status = &desc->ud.ds_tx5212.tx_stat;
504 502
505 txstat1 = ACCESS_ONCE(tx_status->tx_status_1); 503 txstat1 = ACCESS_ONCE(tx_status->tx_status_1);
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index 5cc9aa814697..ce86f158423b 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -29,6 +29,8 @@
29 * status registers (ISR). 29 * status registers (ISR).
30 */ 30 */
31 31
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
32#include "ath5k.h" 34#include "ath5k.h"
33#include "reg.h" 35#include "reg.h"
34#include "debug.h" 36#include "debug.h"
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index cd708c15b774..4026c906cc7b 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -21,6 +21,8 @@
21* EEPROM access functions and helpers * 21* EEPROM access functions and helpers *
22\*************************************/ 22\*************************************/
23 23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
24#include <linux/slab.h> 26#include <linux/slab.h>
25 27
26#include "ath5k.h" 28#include "ath5k.h"
diff --git a/drivers/net/wireless/ath/ath5k/initvals.c b/drivers/net/wireless/ath/ath5k/initvals.c
index a1ea78e05b47..ee1c2fa8b591 100644
--- a/drivers/net/wireless/ath/ath5k/initvals.c
+++ b/drivers/net/wireless/ath/ath5k/initvals.c
@@ -19,6 +19,8 @@
19 * 19 *
20 */ 20 */
21 21
22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
23
22#include "ath5k.h" 24#include "ath5k.h"
23#include "reg.h" 25#include "reg.h"
24#include "debug.h" 26#include "debug.h"
@@ -1574,8 +1576,7 @@ ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
1574 1576
1575 /* AR5K_MODE_11B */ 1577 /* AR5K_MODE_11B */
1576 if (mode > 2) { 1578 if (mode > 2) {
1577 ATH5K_ERR(ah, 1579 ATH5K_ERR(ah, "unsupported channel mode: %d\n", mode);
1578 "unsupported channel mode: %d\n", mode);
1579 return -EINVAL; 1580 return -EINVAL;
1580 } 1581 }
1581 1582
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index c1151c723711..b9f708a45f4e 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -39,6 +39,8 @@
39 * 39 *
40 */ 40 */
41 41
42#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
43
42#include <linux/pci.h> 44#include <linux/pci.h>
43#include "ath5k.h" 45#include "ath5k.h"
44 46
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 5c5329955414..22b80af0f47c 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -41,6 +41,8 @@
41 * 41 *
42 */ 42 */
43 43
44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
44#include <net/mac80211.h> 46#include <net/mac80211.h>
45#include <asm/unaligned.h> 47#include <asm/unaligned.h>
46 48
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index 849fa060ebc4..dff48fbc63bf 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -14,6 +14,8 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include <linux/nl80211.h> 19#include <linux/nl80211.h>
18#include <linux/pci.h> 20#include <linux/pci.h>
19#include <linux/pci-aspm.h> 21#include <linux/pci-aspm.h>
@@ -45,6 +47,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = {
45 { PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */ 47 { PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */
46 { PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */ 48 { PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */
47 { PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */ 49 { PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */
50 { PCI_VDEVICE(ATHEROS, 0xff1b) }, /* AR5BXB63 */
48 { 0 } 51 { 0 }
49}; 52};
50MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table); 53MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
@@ -337,28 +340,4 @@ static struct pci_driver ath5k_pci_driver = {
337 .driver.pm = ATH5K_PM_OPS, 340 .driver.pm = ATH5K_PM_OPS,
338}; 341};
339 342
340/* 343module_pci_driver(ath5k_pci_driver);
341 * Module init/exit functions
342 */
343static int __init
344init_ath5k_pci(void)
345{
346 int ret;
347
348 ret = pci_register_driver(&ath5k_pci_driver);
349 if (ret) {
350 printk(KERN_ERR "ath5k_pci: can't register pci driver\n");
351 return ret;
352 }
353
354 return 0;
355}
356
357static void __exit
358exit_ath5k_pci(void)
359{
360 pci_unregister_driver(&ath5k_pci_driver);
361}
362
363module_init(init_ath5k_pci);
364module_exit(exit_ath5k_pci);
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index cebfd6fd31d3..1f16b4227d8f 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -110,7 +110,7 @@ static const unsigned int ack_rates_high[] =
110 * bwmodes. 110 * bwmodes.
111 */ 111 */
112int 112int
113ath5k_hw_get_frame_duration(struct ath5k_hw *ah, 113ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band,
114 int len, struct ieee80211_rate *rate, bool shortpre) 114 int len, struct ieee80211_rate *rate, bool shortpre)
115{ 115{
116 int sifs, preamble, plcp_bits, sym_time; 116 int sifs, preamble, plcp_bits, sym_time;
@@ -120,7 +120,7 @@ ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
120 /* Fallback */ 120 /* Fallback */
121 if (!ah->ah_bwmode) { 121 if (!ah->ah_bwmode) {
122 __le16 raw_dur = ieee80211_generic_frame_duration(ah->hw, 122 __le16 raw_dur = ieee80211_generic_frame_duration(ah->hw,
123 NULL, len, rate); 123 NULL, band, len, rate);
124 124
125 /* subtract difference between long and short preamble */ 125 /* subtract difference between long and short preamble */
126 dur = le16_to_cpu(raw_dur); 126 dur = le16_to_cpu(raw_dur);
@@ -302,14 +302,15 @@ ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
302 * actual rate for this rate. See mac80211 tx.c 302 * actual rate for this rate. See mac80211 tx.c
303 * ieee80211_duration() for a brief description of 303 * ieee80211_duration() for a brief description of
304 * what rate we should choose to TX ACKs. */ 304 * what rate we should choose to TX ACKs. */
305 tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, false); 305 tx_time = ath5k_hw_get_frame_duration(ah, band, 10,
306 rate, false);
306 307
307 ath5k_hw_reg_write(ah, tx_time, reg); 308 ath5k_hw_reg_write(ah, tx_time, reg);
308 309
309 if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)) 310 if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE))
310 continue; 311 continue;
311 312
312 tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, true); 313 tx_time = ath5k_hw_get_frame_duration(ah, band, 10, rate, true);
313 ath5k_hw_reg_write(ah, tx_time, 314 ath5k_hw_reg_write(ah, tx_time,
314 reg + (AR5K_SET_SHORT_PREAMBLE << 2)); 315 reg + (AR5K_SET_SHORT_PREAMBLE << 2));
315 } 316 }
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 3a2845489a1b..8b71a2d947e0 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -22,6 +22,8 @@
22* PHY related functions * 22* PHY related functions *
23\***********************/ 23\***********************/
24 24
25#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26
25#include <linux/delay.h> 27#include <linux/delay.h>
26#include <linux/slab.h> 28#include <linux/slab.h>
27#include <asm/unaligned.h> 29#include <asm/unaligned.h>
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 30b50f934172..65fe929529a8 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -20,6 +20,8 @@
20Queue Control Unit, DCF Control Unit Functions 20Queue Control Unit, DCF Control Unit Functions
21\********************************************/ 21\********************************************/
22 22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
23#include "ath5k.h" 25#include "ath5k.h"
24#include "reg.h" 26#include "reg.h"
25#include "debug.h" 27#include "debug.h"
@@ -563,6 +565,7 @@ ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
563int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time) 565int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
564{ 566{
565 struct ieee80211_channel *channel = ah->ah_current_channel; 567 struct ieee80211_channel *channel = ah->ah_current_channel;
568 enum ieee80211_band band;
566 struct ieee80211_rate *rate; 569 struct ieee80211_rate *rate;
567 u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock; 570 u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
568 u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time); 571 u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
@@ -598,11 +601,12 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
598 * Also we have different lowest rate for 802.11a 601 * Also we have different lowest rate for 802.11a
599 */ 602 */
600 if (channel->band == IEEE80211_BAND_5GHZ) 603 if (channel->band == IEEE80211_BAND_5GHZ)
601 rate = &ah->sbands[IEEE80211_BAND_5GHZ].bitrates[0]; 604 band = IEEE80211_BAND_5GHZ;
602 else 605 else
603 rate = &ah->sbands[IEEE80211_BAND_2GHZ].bitrates[0]; 606 band = IEEE80211_BAND_2GHZ;
604 607
605 ack_tx_time = ath5k_hw_get_frame_duration(ah, 10, rate, false); 608 rate = &ah->sbands[band].bitrates[0];
609 ack_tx_time = ath5k_hw_get_frame_duration(ah, band, 10, rate, false);
606 610
607 /* ack_tx_time includes an SIFS already */ 611 /* ack_tx_time includes an SIFS already */
608 eifs = ack_tx_time + sifs + 2 * slot_time; 612 eifs = ack_tx_time + sifs + 2 * slot_time;
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 200f165c0c6d..0c2dd4771c36 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -23,6 +23,8 @@
23 Reset function and helpers 23 Reset function and helpers
24\****************************/ 24\****************************/
25 25
26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
26#include <asm/unaligned.h> 28#include <asm/unaligned.h>
27 29
28#include <linux/pci.h> /* To determine if a card is pci-e */ 30#include <linux/pci.h> /* To determine if a card is pci-e */
diff --git a/drivers/net/wireless/ath/ath5k/sysfs.c b/drivers/net/wireless/ath/ath5k/sysfs.c
index 9364da7bd131..04cf0ca72610 100644
--- a/drivers/net/wireless/ath/ath5k/sysfs.c
+++ b/drivers/net/wireless/ath/ath5k/sysfs.c
@@ -1,3 +1,5 @@
1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
1#include <linux/device.h> 3#include <linux/device.h>
2#include <linux/pci.h> 4#include <linux/pci.h>
3 5
diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile
index 85746c3eb027..8cae8886f17d 100644
--- a/drivers/net/wireless/ath/ath6kl/Makefile
+++ b/drivers/net/wireless/ath/ath6kl/Makefile
@@ -25,7 +25,8 @@
25obj-$(CONFIG_ATH6KL) += ath6kl_core.o 25obj-$(CONFIG_ATH6KL) += ath6kl_core.o
26ath6kl_core-y += debug.o 26ath6kl_core-y += debug.o
27ath6kl_core-y += hif.o 27ath6kl_core-y += hif.o
28ath6kl_core-y += htc.o 28ath6kl_core-y += htc_mbox.o
29ath6kl_core-y += htc_pipe.o
29ath6kl_core-y += bmi.o 30ath6kl_core-y += bmi.o
30ath6kl_core-y += cfg80211.o 31ath6kl_core-y += cfg80211.o
31ath6kl_core-y += init.o 32ath6kl_core-y += init.o
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 00d38952b5fb..28a65d3a03d0 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -15,6 +15,8 @@
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */ 16 */
17 17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
18#include <linux/moduleparam.h> 20#include <linux/moduleparam.h>
19#include <linux/inetdevice.h> 21#include <linux/inetdevice.h>
20#include <linux/export.h> 22#include <linux/export.h>
@@ -49,6 +51,8 @@
49 .max_power = 30, \ 51 .max_power = 30, \
50} 52}
51 53
54#define DEFAULT_BG_SCAN_PERIOD 60
55
52static struct ieee80211_rate ath6kl_rates[] = { 56static struct ieee80211_rate ath6kl_rates[] = {
53 RATETAB_ENT(10, 0x1, 0), 57 RATETAB_ENT(10, 0x1, 0),
54 RATETAB_ENT(20, 0x2, 0), 58 RATETAB_ENT(20, 0x2, 0),
@@ -69,7 +73,8 @@ static struct ieee80211_rate ath6kl_rates[] = {
69#define ath6kl_g_rates (ath6kl_rates + 0) 73#define ath6kl_g_rates (ath6kl_rates + 0)
70#define ath6kl_g_rates_size 12 74#define ath6kl_g_rates_size 12
71 75
72#define ath6kl_g_htcap (IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \ 76#define ath6kl_g_htcap IEEE80211_HT_CAP_SGI_20
77#define ath6kl_a_htcap (IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
73 IEEE80211_HT_CAP_SGI_20 | \ 78 IEEE80211_HT_CAP_SGI_20 | \
74 IEEE80211_HT_CAP_SGI_40) 79 IEEE80211_HT_CAP_SGI_40)
75 80
@@ -126,7 +131,7 @@ static struct ieee80211_supported_band ath6kl_band_5ghz = {
126 .channels = ath6kl_5ghz_a_channels, 131 .channels = ath6kl_5ghz_a_channels,
127 .n_bitrates = ath6kl_a_rates_size, 132 .n_bitrates = ath6kl_a_rates_size,
128 .bitrates = ath6kl_a_rates, 133 .bitrates = ath6kl_a_rates,
129 .ht_cap.cap = ath6kl_g_htcap, 134 .ht_cap.cap = ath6kl_a_htcap,
130 .ht_cap.ht_supported = true, 135 .ht_cap.ht_supported = true,
131}; 136};
132 137
@@ -607,6 +612,17 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
607 vif->req_bssid, vif->ch_hint, 612 vif->req_bssid, vif->ch_hint,
608 ar->connect_ctrl_flags, nw_subtype); 613 ar->connect_ctrl_flags, nw_subtype);
609 614
615 /* disable background scan if period is 0 */
616 if (sme->bg_scan_period == 0)
617 sme->bg_scan_period = 0xffff;
618
619 /* configure default value if not specified */
620 if (sme->bg_scan_period == -1)
621 sme->bg_scan_period = DEFAULT_BG_SCAN_PERIOD;
622
623 ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, 0, 0,
624 sme->bg_scan_period, 0, 0, 0, 3, 0, 0, 0);
625
610 up(&ar->sem); 626 up(&ar->sem);
611 627
612 if (status == -EINVAL) { 628 if (status == -EINVAL) {
@@ -941,6 +957,8 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
941 if (test_bit(CONNECTED, &vif->flags)) 957 if (test_bit(CONNECTED, &vif->flags))
942 force_fg_scan = 1; 958 force_fg_scan = 1;
943 959
960 vif->scan_req = request;
961
944 if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, 962 if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
945 ar->fw_capabilities)) { 963 ar->fw_capabilities)) {
946 /* 964 /*
@@ -963,10 +981,10 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
963 ATH6KL_FG_SCAN_INTERVAL, 981 ATH6KL_FG_SCAN_INTERVAL,
964 n_channels, channels); 982 n_channels, channels);
965 } 983 }
966 if (ret) 984 if (ret) {
967 ath6kl_err("wmi_startscan_cmd failed\n"); 985 ath6kl_err("wmi_startscan_cmd failed\n");
968 else 986 vif->scan_req = NULL;
969 vif->scan_req = request; 987 }
970 988
971 kfree(channels); 989 kfree(channels);
972 990
@@ -1436,9 +1454,38 @@ static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
1436 struct vif_params *params) 1454 struct vif_params *params)
1437{ 1455{
1438 struct ath6kl_vif *vif = netdev_priv(ndev); 1456 struct ath6kl_vif *vif = netdev_priv(ndev);
1457 int i;
1439 1458
1440 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type); 1459 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type);
1441 1460
1461 /*
1462 * Don't bring up p2p on an interface which is not initialized
1463 * for p2p operation where fw does not have capability to switch
1464 * dynamically between non-p2p and p2p type interface.
1465 */
1466 if (!test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
1467 vif->ar->fw_capabilities) &&
1468 (type == NL80211_IFTYPE_P2P_CLIENT ||
1469 type == NL80211_IFTYPE_P2P_GO)) {
1470 if (vif->ar->vif_max == 1) {
1471 if (vif->fw_vif_idx != 0)
1472 return -EINVAL;
1473 else
1474 goto set_iface_type;
1475 }
1476
1477 for (i = vif->ar->max_norm_iface; i < vif->ar->vif_max; i++) {
1478 if (i == vif->fw_vif_idx)
1479 break;
1480 }
1481
1482 if (i == vif->ar->vif_max) {
1483 ath6kl_err("Invalid interface to bring up P2P\n");
1484 return -EINVAL;
1485 }
1486 }
1487
1488set_iface_type:
1442 switch (type) { 1489 switch (type) {
1443 case NL80211_IFTYPE_STATION: 1490 case NL80211_IFTYPE_STATION:
1444 vif->next_mode = INFRA_NETWORK; 1491 vif->next_mode = INFRA_NETWORK;
@@ -1924,12 +1971,61 @@ static int ath6kl_wow_sta(struct ath6kl *ar, struct ath6kl_vif *vif)
1924 return 0; 1971 return 0;
1925} 1972}
1926 1973
1974static int is_hsleep_mode_procsed(struct ath6kl_vif *vif)
1975{
1976 return test_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags);
1977}
1978
1979static bool is_ctrl_ep_empty(struct ath6kl *ar)
1980{
1981 return !ar->tx_pending[ar->ctrl_ep];
1982}
1983
1984static int ath6kl_cfg80211_host_sleep(struct ath6kl *ar, struct ath6kl_vif *vif)
1985{
1986 int ret, left;
1987
1988 clear_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags);
1989
1990 ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
1991 ATH6KL_HOST_MODE_ASLEEP);
1992 if (ret)
1993 return ret;
1994
1995 left = wait_event_interruptible_timeout(ar->event_wq,
1996 is_hsleep_mode_procsed(vif),
1997 WMI_TIMEOUT);
1998 if (left == 0) {
1999 ath6kl_warn("timeout, didn't get host sleep cmd processed event\n");
2000 ret = -ETIMEDOUT;
2001 } else if (left < 0) {
2002 ath6kl_warn("error while waiting for host sleep cmd processed event %d\n",
2003 left);
2004 ret = left;
2005 }
2006
2007 if (ar->tx_pending[ar->ctrl_ep]) {
2008 left = wait_event_interruptible_timeout(ar->event_wq,
2009 is_ctrl_ep_empty(ar),
2010 WMI_TIMEOUT);
2011 if (left == 0) {
2012 ath6kl_warn("clear wmi ctrl data timeout\n");
2013 ret = -ETIMEDOUT;
2014 } else if (left < 0) {
2015 ath6kl_warn("clear wmi ctrl data failed: %d\n", left);
2016 ret = left;
2017 }
2018 }
2019
2020 return ret;
2021}
2022
1927static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow) 2023static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
1928{ 2024{
1929 struct in_device *in_dev; 2025 struct in_device *in_dev;
1930 struct in_ifaddr *ifa; 2026 struct in_ifaddr *ifa;
1931 struct ath6kl_vif *vif; 2027 struct ath6kl_vif *vif;
1932 int ret, left; 2028 int ret;
1933 u32 filter = 0; 2029 u32 filter = 0;
1934 u16 i, bmiss_time; 2030 u16 i, bmiss_time;
1935 u8 index = 0; 2031 u8 index = 0;
@@ -2030,39 +2126,11 @@ skip_arp:
2030 if (ret) 2126 if (ret)
2031 return ret; 2127 return ret;
2032 2128
2033 clear_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags); 2129 ret = ath6kl_cfg80211_host_sleep(ar, vif);
2034
2035 ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
2036 ATH6KL_HOST_MODE_ASLEEP);
2037 if (ret) 2130 if (ret)
2038 return ret; 2131 return ret;
2039 2132
2040 left = wait_event_interruptible_timeout(ar->event_wq, 2133 return 0;
2041 test_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags),
2042 WMI_TIMEOUT);
2043 if (left == 0) {
2044 ath6kl_warn("timeout, didn't get host sleep cmd "
2045 "processed event\n");
2046 ret = -ETIMEDOUT;
2047 } else if (left < 0) {
2048 ath6kl_warn("error while waiting for host sleep cmd "
2049 "processed event %d\n", left);
2050 ret = left;
2051 }
2052
2053 if (ar->tx_pending[ar->ctrl_ep]) {
2054 left = wait_event_interruptible_timeout(ar->event_wq,
2055 ar->tx_pending[ar->ctrl_ep] == 0, WMI_TIMEOUT);
2056 if (left == 0) {
2057 ath6kl_warn("clear wmi ctrl data timeout\n");
2058 ret = -ETIMEDOUT;
2059 } else if (left < 0) {
2060 ath6kl_warn("clear wmi ctrl data failed: %d\n", left);
2061 ret = left;
2062 }
2063 }
2064
2065 return ret;
2066} 2134}
2067 2135
2068static int ath6kl_wow_resume(struct ath6kl *ar) 2136static int ath6kl_wow_resume(struct ath6kl *ar)
@@ -2109,10 +2177,82 @@ static int ath6kl_wow_resume(struct ath6kl *ar)
2109 return 0; 2177 return 0;
2110} 2178}
2111 2179
2180static int ath6kl_cfg80211_deepsleep_suspend(struct ath6kl *ar)
2181{
2182 struct ath6kl_vif *vif;
2183 int ret;
2184
2185 vif = ath6kl_vif_first(ar);
2186 if (!vif)
2187 return -EIO;
2188
2189 if (!ath6kl_cfg80211_ready(vif))
2190 return -EIO;
2191
2192 ath6kl_cfg80211_stop_all(ar);
2193
2194 /* Save the current power mode before enabling power save */
2195 ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
2196
2197 ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER);
2198 if (ret)
2199 return ret;
2200
2201 /* Disable WOW mode */
2202 ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx,
2203 ATH6KL_WOW_MODE_DISABLE,
2204 0, 0);
2205 if (ret)
2206 return ret;
2207
2208 /* Flush all non control pkts in TX path */
2209 ath6kl_tx_data_cleanup(ar);
2210
2211 ret = ath6kl_cfg80211_host_sleep(ar, vif);
2212 if (ret)
2213 return ret;
2214
2215 return 0;
2216}
2217
2218static int ath6kl_cfg80211_deepsleep_resume(struct ath6kl *ar)
2219{
2220 struct ath6kl_vif *vif;
2221 int ret;
2222
2223 vif = ath6kl_vif_first(ar);
2224
2225 if (!vif)
2226 return -EIO;
2227
2228 if (ar->wmi->pwr_mode != ar->wmi->saved_pwr_mode) {
2229 ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0,
2230 ar->wmi->saved_pwr_mode);
2231 if (ret)
2232 return ret;
2233 }
2234
2235 ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
2236 ATH6KL_HOST_MODE_AWAKE);
2237 if (ret)
2238 return ret;
2239
2240 ar->state = ATH6KL_STATE_ON;
2241
2242 /* Reset scan parameter to default values */
2243 ret = ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
2244 0, 0, 0, 0, 0, 0, 3, 0, 0, 0);
2245 if (ret)
2246 return ret;
2247
2248 return 0;
2249}
2250
2112int ath6kl_cfg80211_suspend(struct ath6kl *ar, 2251int ath6kl_cfg80211_suspend(struct ath6kl *ar,
2113 enum ath6kl_cfg_suspend_mode mode, 2252 enum ath6kl_cfg_suspend_mode mode,
2114 struct cfg80211_wowlan *wow) 2253 struct cfg80211_wowlan *wow)
2115{ 2254{
2255 struct ath6kl_vif *vif;
2116 enum ath6kl_state prev_state; 2256 enum ath6kl_state prev_state;
2117 int ret; 2257 int ret;
2118 2258
@@ -2137,15 +2277,12 @@ int ath6kl_cfg80211_suspend(struct ath6kl *ar,
2137 2277
2138 case ATH6KL_CFG_SUSPEND_DEEPSLEEP: 2278 case ATH6KL_CFG_SUSPEND_DEEPSLEEP:
2139 2279
2140 ath6kl_cfg80211_stop_all(ar); 2280 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "deep sleep suspend\n");
2141
2142 /* save the current power mode before enabling power save */
2143 ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
2144 2281
2145 ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER); 2282 ret = ath6kl_cfg80211_deepsleep_suspend(ar);
2146 if (ret) { 2283 if (ret) {
2147 ath6kl_warn("wmi powermode command failed during suspend: %d\n", 2284 ath6kl_err("deepsleep suspend failed: %d\n", ret);
2148 ret); 2285 return ret;
2149 } 2286 }
2150 2287
2151 ar->state = ATH6KL_STATE_DEEPSLEEP; 2288 ar->state = ATH6KL_STATE_DEEPSLEEP;
@@ -2185,6 +2322,9 @@ int ath6kl_cfg80211_suspend(struct ath6kl *ar,
2185 break; 2322 break;
2186 } 2323 }
2187 2324
2325 list_for_each_entry(vif, &ar->vif_list, list)
2326 ath6kl_cfg80211_scan_complete_event(vif, true);
2327
2188 return 0; 2328 return 0;
2189} 2329}
2190EXPORT_SYMBOL(ath6kl_cfg80211_suspend); 2330EXPORT_SYMBOL(ath6kl_cfg80211_suspend);
@@ -2206,17 +2346,13 @@ int ath6kl_cfg80211_resume(struct ath6kl *ar)
2206 break; 2346 break;
2207 2347
2208 case ATH6KL_STATE_DEEPSLEEP: 2348 case ATH6KL_STATE_DEEPSLEEP:
2209 if (ar->wmi->pwr_mode != ar->wmi->saved_pwr_mode) { 2349 ath6kl_dbg(ATH6KL_DBG_SUSPEND, "deep sleep resume\n");
2210 ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0,
2211 ar->wmi->saved_pwr_mode);
2212 if (ret) {
2213 ath6kl_warn("wmi powermode command failed during resume: %d\n",
2214 ret);
2215 }
2216 }
2217
2218 ar->state = ATH6KL_STATE_ON;
2219 2350
2351 ret = ath6kl_cfg80211_deepsleep_resume(ar);
2352 if (ret) {
2353 ath6kl_warn("deep sleep resume failed: %d\n", ret);
2354 return ret;
2355 }
2220 break; 2356 break;
2221 2357
2222 case ATH6KL_STATE_CUTPOWER: 2358 case ATH6KL_STATE_CUTPOWER:
@@ -2290,31 +2426,25 @@ void ath6kl_check_wow_status(struct ath6kl *ar)
2290} 2426}
2291#endif 2427#endif
2292 2428
2293static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev, 2429static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band,
2294 struct ieee80211_channel *chan, 2430 bool ht_enable)
2295 enum nl80211_channel_type channel_type)
2296{ 2431{
2297 struct ath6kl_vif *vif; 2432 struct ath6kl_htcap *htcap = &vif->htcap;
2298
2299 /*
2300 * 'dev' could be NULL if a channel change is required for the hardware
2301 * device itself, instead of a particular VIF.
2302 *
2303 * FIXME: To be handled properly when monitor mode is supported.
2304 */
2305 if (!dev)
2306 return -EBUSY;
2307
2308 vif = netdev_priv(dev);
2309 2433
2310 if (!ath6kl_cfg80211_ready(vif)) 2434 if (htcap->ht_enable == ht_enable)
2311 return -EIO; 2435 return 0;
2312 2436
2313 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n", 2437 if (ht_enable) {
2314 __func__, chan->center_freq, chan->hw_value); 2438 /* Set default ht capabilities */
2315 vif->next_chan = chan->center_freq; 2439 htcap->ht_enable = true;
2440 htcap->cap_info = (band == IEEE80211_BAND_2GHZ) ?
2441 ath6kl_g_htcap : ath6kl_a_htcap;
2442 htcap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K;
2443 } else /* Disable ht */
2444 memset(htcap, 0, sizeof(*htcap));
2316 2445
2317 return 0; 2446 return ath6kl_wmi_set_htcap_cmd(vif->ar->wmi, vif->fw_vif_idx,
2447 band, htcap);
2318} 2448}
2319 2449
2320static bool ath6kl_is_p2p_ie(const u8 *pos) 2450static bool ath6kl_is_p2p_ie(const u8 *pos)
@@ -2391,6 +2521,81 @@ static int ath6kl_set_ies(struct ath6kl_vif *vif,
2391 return 0; 2521 return 0;
2392} 2522}
2393 2523
2524static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev,
2525 struct ieee80211_channel *chan,
2526 enum nl80211_channel_type channel_type)
2527{
2528 struct ath6kl_vif *vif;
2529
2530 /*
2531 * 'dev' could be NULL if a channel change is required for the hardware
2532 * device itself, instead of a particular VIF.
2533 *
2534 * FIXME: To be handled properly when monitor mode is supported.
2535 */
2536 if (!dev)
2537 return -EBUSY;
2538
2539 vif = netdev_priv(dev);
2540
2541 if (!ath6kl_cfg80211_ready(vif))
2542 return -EIO;
2543
2544 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n",
2545 __func__, chan->center_freq, chan->hw_value);
2546 vif->next_chan = chan->center_freq;
2547 vif->next_ch_type = channel_type;
2548 vif->next_ch_band = chan->band;
2549
2550 return 0;
2551}
2552
2553static int ath6kl_get_rsn_capab(struct cfg80211_beacon_data *beacon,
2554 u8 *rsn_capab)
2555{
2556 const u8 *rsn_ie;
2557 size_t rsn_ie_len;
2558 u16 cnt;
2559
2560 if (!beacon->tail)
2561 return -EINVAL;
2562
2563 rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, beacon->tail, beacon->tail_len);
2564 if (!rsn_ie)
2565 return -EINVAL;
2566
2567 rsn_ie_len = *(rsn_ie + 1);
2568 /* skip element id and length */
2569 rsn_ie += 2;
2570
2571 /* skip version, group cipher */
2572 if (rsn_ie_len < 6)
2573 return -EINVAL;
2574 rsn_ie += 6;
2575 rsn_ie_len -= 6;
2576
2577 /* skip pairwise cipher suite */
2578 if (rsn_ie_len < 2)
2579 return -EINVAL;
2580 cnt = *((u16 *) rsn_ie);
2581 rsn_ie += (2 + cnt * 4);
2582 rsn_ie_len -= (2 + cnt * 4);
2583
2584 /* skip akm suite */
2585 if (rsn_ie_len < 2)
2586 return -EINVAL;
2587 cnt = *((u16 *) rsn_ie);
2588 rsn_ie += (2 + cnt * 4);
2589 rsn_ie_len -= (2 + cnt * 4);
2590
2591 if (rsn_ie_len < 2)
2592 return -EINVAL;
2593
2594 memcpy(rsn_capab, rsn_ie, 2);
2595
2596 return 0;
2597}
2598
2394static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev, 2599static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
2395 struct cfg80211_ap_settings *info) 2600 struct cfg80211_ap_settings *info)
2396{ 2601{
@@ -2403,6 +2608,7 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
2403 struct wmi_connect_cmd p; 2608 struct wmi_connect_cmd p;
2404 int res; 2609 int res;
2405 int i, ret; 2610 int i, ret;
2611 u16 rsn_capab = 0;
2406 2612
2407 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s:\n", __func__); 2613 ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s:\n", __func__);
2408 2614
@@ -2532,6 +2738,34 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
2532 p.nw_subtype = SUBTYPE_NONE; 2738 p.nw_subtype = SUBTYPE_NONE;
2533 } 2739 }
2534 2740
2741 if (info->inactivity_timeout) {
2742 res = ath6kl_wmi_set_inact_period(ar->wmi, vif->fw_vif_idx,
2743 info->inactivity_timeout);
2744 if (res < 0)
2745 return res;
2746 }
2747
2748 if (ath6kl_set_htcap(vif, vif->next_ch_band,
2749 vif->next_ch_type != NL80211_CHAN_NO_HT))
2750 return -EIO;
2751
2752 /*
2753 * Get the PTKSA replay counter in the RSN IE. Supplicant
2754 * will use the RSN IE in M3 message and firmware has to
2755 * advertise the same in beacon/probe response. Send
2756 * the complete RSN IE capability field to firmware
2757 */
2758 if (!ath6kl_get_rsn_capab(&info->beacon, (u8 *) &rsn_capab) &&
2759 test_bit(ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE,
2760 ar->fw_capabilities)) {
2761 res = ath6kl_wmi_set_ie_cmd(ar->wmi, vif->fw_vif_idx,
2762 WLAN_EID_RSN, WMI_RSN_IE_CAPB,
2763 (const u8 *) &rsn_capab,
2764 sizeof(rsn_capab));
2765 if (res < 0)
2766 return res;
2767 }
2768
2535 res = ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, &p); 2769 res = ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, &p);
2536 if (res < 0) 2770 if (res < 0)
2537 return res; 2771 return res;
@@ -2566,6 +2800,13 @@ static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev)
2566 ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx); 2800 ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
2567 clear_bit(CONNECTED, &vif->flags); 2801 clear_bit(CONNECTED, &vif->flags);
2568 2802
2803 /* Restore ht setting in firmware */
2804 if (ath6kl_set_htcap(vif, IEEE80211_BAND_2GHZ, true))
2805 return -EIO;
2806
2807 if (ath6kl_set_htcap(vif, IEEE80211_BAND_5GHZ, true))
2808 return -EIO;
2809
2569 return 0; 2810 return 0;
2570} 2811}
2571 2812
@@ -2747,6 +2988,21 @@ static bool ath6kl_mgmt_powersave_ap(struct ath6kl_vif *vif,
2747 return false; 2988 return false;
2748} 2989}
2749 2990
2991/* Check if SSID length is greater than DIRECT- */
2992static bool ath6kl_is_p2p_go_ssid(const u8 *buf, size_t len)
2993{
2994 const struct ieee80211_mgmt *mgmt;
2995 mgmt = (const struct ieee80211_mgmt *) buf;
2996
2997 /* variable[1] contains the SSID tag length */
2998 if (buf + len >= &mgmt->u.probe_resp.variable[1] &&
2999 (mgmt->u.probe_resp.variable[1] > P2P_WILDCARD_SSID_LEN)) {
3000 return true;
3001 }
3002
3003 return false;
3004}
3005
2750static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev, 3006static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
2751 struct ieee80211_channel *chan, bool offchan, 3007 struct ieee80211_channel *chan, bool offchan,
2752 enum nl80211_channel_type channel_type, 3008 enum nl80211_channel_type channel_type,
@@ -2761,11 +3017,11 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
2761 bool more_data, queued; 3017 bool more_data, queued;
2762 3018
2763 mgmt = (const struct ieee80211_mgmt *) buf; 3019 mgmt = (const struct ieee80211_mgmt *) buf;
2764 if (buf + len >= mgmt->u.probe_resp.variable && 3020 if (vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) &&
2765 vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) && 3021 ieee80211_is_probe_resp(mgmt->frame_control) &&
2766 ieee80211_is_probe_resp(mgmt->frame_control)) { 3022 ath6kl_is_p2p_go_ssid(buf, len)) {
2767 /* 3023 /*
2768 * Send Probe Response frame in AP mode using a separate WMI 3024 * Send Probe Response frame in GO mode using a separate WMI
2769 * command to allow the target to fill in the generic IEs. 3025 * command to allow the target to fill in the generic IEs.
2770 */ 3026 */
2771 *cookie = 0; /* TX status not supported */ 3027 *cookie = 0; /* TX status not supported */
@@ -2833,6 +3089,8 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
2833 if (vif->sme_state != SME_DISCONNECTED) 3089 if (vif->sme_state != SME_DISCONNECTED)
2834 return -EBUSY; 3090 return -EBUSY;
2835 3091
3092 ath6kl_cfg80211_scan_complete_event(vif, true);
3093
2836 for (i = 0; i < ar->wiphy->max_sched_scan_ssids; i++) { 3094 for (i = 0; i < ar->wiphy->max_sched_scan_ssids; i++) {
2837 ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, 3095 ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
2838 i, DISABLE_SSID_FLAG, 3096 i, DISABLE_SSID_FLAG,
@@ -3094,6 +3352,7 @@ struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
3094 vif->next_mode = nw_type; 3352 vif->next_mode = nw_type;
3095 vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL; 3353 vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL;
3096 vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME; 3354 vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME;
3355 vif->htcap.ht_enable = true;
3097 3356
3098 memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN); 3357 memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
3099 if (fw_vif_idx != 0) 3358 if (fw_vif_idx != 0)
@@ -3181,6 +3440,10 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
3181 if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities)) 3440 if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities))
3182 ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN; 3441 ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
3183 3442
3443 if (test_bit(ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT,
3444 ar->fw_capabilities))
3445 ar->wiphy->features = NL80211_FEATURE_INACTIVITY_TIMER;
3446
3184 ar->wiphy->probe_resp_offload = 3447 ar->wiphy->probe_resp_offload =
3185 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | 3448 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
3186 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | 3449 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h
index a60e78c0472f..98a886154d9c 100644
--- a/drivers/net/wireless/ath/ath6kl/common.h
+++ b/drivers/net/wireless/ath/ath6kl/common.h
@@ -22,7 +22,8 @@
22 22
23#define ATH6KL_MAX_IE 256 23#define ATH6KL_MAX_IE 256
24 24
25extern int ath6kl_printk(const char *level, const char *fmt, ...); 25extern __printf(2, 3)
26int ath6kl_printk(const char *level, const char *fmt, ...);
26 27
27/* 28/*
28 * Reflects the version of binary interface exposed by ATH6KL target 29 * Reflects the version of binary interface exposed by ATH6KL target
@@ -77,6 +78,7 @@ enum crypto_type {
77 78
78struct htc_endpoint_credit_dist; 79struct htc_endpoint_credit_dist;
79struct ath6kl; 80struct ath6kl;
81struct ath6kl_htcap;
80enum htc_credit_dist_reason; 82enum htc_credit_dist_reason;
81struct ath6kl_htc_credit_info; 83struct ath6kl_htc_credit_info;
82 84
diff --git a/drivers/net/wireless/ath/ath6kl/core.c b/drivers/net/wireless/ath/ath6kl/core.c
index 45e641f3a41b..fdb3b1decc76 100644
--- a/drivers/net/wireless/ath/ath6kl/core.c
+++ b/drivers/net/wireless/ath/ath6kl/core.c
@@ -20,9 +20,11 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/moduleparam.h> 21#include <linux/moduleparam.h>
22#include <linux/export.h> 22#include <linux/export.h>
23#include <linux/vmalloc.h>
23 24
24#include "debug.h" 25#include "debug.h"
25#include "hif-ops.h" 26#include "hif-ops.h"
27#include "htc-ops.h"
26#include "cfg80211.h" 28#include "cfg80211.h"
27 29
28unsigned int debug_mask; 30unsigned int debug_mask;
@@ -39,12 +41,36 @@ module_param(uart_debug, uint, 0644);
39module_param(ath6kl_p2p, uint, 0644); 41module_param(ath6kl_p2p, uint, 0644);
40module_param(testmode, uint, 0644); 42module_param(testmode, uint, 0644);
41 43
42int ath6kl_core_init(struct ath6kl *ar) 44void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
45{
46 ath6kl_htc_tx_complete(ar, skb);
47}
48EXPORT_SYMBOL(ath6kl_core_tx_complete);
49
50void ath6kl_core_rx_complete(struct ath6kl *ar, struct sk_buff *skb, u8 pipe)
51{
52 ath6kl_htc_rx_complete(ar, skb, pipe);
53}
54EXPORT_SYMBOL(ath6kl_core_rx_complete);
55
56int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type)
43{ 57{
44 struct ath6kl_bmi_target_info targ_info; 58 struct ath6kl_bmi_target_info targ_info;
45 struct net_device *ndev; 59 struct net_device *ndev;
46 int ret = 0, i; 60 int ret = 0, i;
47 61
62 switch (htc_type) {
63 case ATH6KL_HTC_TYPE_MBOX:
64 ath6kl_htc_mbox_attach(ar);
65 break;
66 case ATH6KL_HTC_TYPE_PIPE:
67 ath6kl_htc_pipe_attach(ar);
68 break;
69 default:
70 WARN_ON(1);
71 return -ENOMEM;
72 }
73
48 ar->ath6kl_wq = create_singlethread_workqueue("ath6kl"); 74 ar->ath6kl_wq = create_singlethread_workqueue("ath6kl");
49 if (!ar->ath6kl_wq) 75 if (!ar->ath6kl_wq)
50 return -ENOMEM; 76 return -ENOMEM;
@@ -280,7 +306,7 @@ void ath6kl_core_cleanup(struct ath6kl *ar)
280 306
281 kfree(ar->fw_board); 307 kfree(ar->fw_board);
282 kfree(ar->fw_otp); 308 kfree(ar->fw_otp);
283 kfree(ar->fw); 309 vfree(ar->fw);
284 kfree(ar->fw_patch); 310 kfree(ar->fw_patch);
285 kfree(ar->fw_testscript); 311 kfree(ar->fw_testscript);
286 312
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index f1dd8906be45..9d67964a51dd 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -91,6 +91,15 @@ enum ath6kl_fw_capability {
91 */ 91 */
92 ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, 92 ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
93 93
94 /*
95 * Firmware has support to cleanup inactive stations
96 * in AP mode.
97 */
98 ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT,
99
100 /* Firmware has support to override rsn cap of rsn ie */
101 ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE,
102
94 /* this needs to be last */ 103 /* this needs to be last */
95 ATH6KL_FW_CAPABILITY_MAX, 104 ATH6KL_FW_CAPABILITY_MAX,
96}; 105};
@@ -205,6 +214,8 @@ struct ath6kl_fw_ie {
205#define ATH6KL_CONF_ENABLE_TX_BURST BIT(3) 214#define ATH6KL_CONF_ENABLE_TX_BURST BIT(3)
206#define ATH6KL_CONF_UART_DEBUG BIT(4) 215#define ATH6KL_CONF_UART_DEBUG BIT(4)
207 216
217#define P2P_WILDCARD_SSID_LEN 7 /* DIRECT- */
218
208enum wlan_low_pwr_state { 219enum wlan_low_pwr_state {
209 WLAN_POWER_STATE_ON, 220 WLAN_POWER_STATE_ON,
210 WLAN_POWER_STATE_CUT_PWR, 221 WLAN_POWER_STATE_CUT_PWR,
@@ -454,6 +465,11 @@ enum ath6kl_hif_type {
454 ATH6KL_HIF_TYPE_USB, 465 ATH6KL_HIF_TYPE_USB,
455}; 466};
456 467
468enum ath6kl_htc_type {
469 ATH6KL_HTC_TYPE_MBOX,
470 ATH6KL_HTC_TYPE_PIPE,
471};
472
457/* Max number of filters that hw supports */ 473/* Max number of filters that hw supports */
458#define ATH6K_MAX_MC_FILTERS_PER_LIST 7 474#define ATH6K_MAX_MC_FILTERS_PER_LIST 7
459struct ath6kl_mc_filter { 475struct ath6kl_mc_filter {
@@ -461,6 +477,12 @@ struct ath6kl_mc_filter {
461 char hw_addr[ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE]; 477 char hw_addr[ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE];
462}; 478};
463 479
480struct ath6kl_htcap {
481 bool ht_enable;
482 u8 ampdu_factor;
483 unsigned short cap_info;
484};
485
464/* 486/*
465 * Driver's maximum limit, note that some firmwares support only one vif 487 * Driver's maximum limit, note that some firmwares support only one vif
466 * and the runtime (current) limit must be checked from ar->vif_max. 488 * and the runtime (current) limit must be checked from ar->vif_max.
@@ -509,6 +531,7 @@ struct ath6kl_vif {
509 struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1]; 531 struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
510 struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1]; 532 struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
511 struct aggr_info *aggr_cntxt; 533 struct aggr_info *aggr_cntxt;
534 struct ath6kl_htcap htcap;
512 535
513 struct timer_list disconnect_timer; 536 struct timer_list disconnect_timer;
514 struct timer_list sched_scan_timer; 537 struct timer_list sched_scan_timer;
@@ -521,6 +544,8 @@ struct ath6kl_vif {
521 u32 send_action_id; 544 u32 send_action_id;
522 bool probe_req_report; 545 bool probe_req_report;
523 u16 next_chan; 546 u16 next_chan;
547 enum nl80211_channel_type next_ch_type;
548 enum ieee80211_band next_ch_band;
524 u16 assoc_bss_beacon_int; 549 u16 assoc_bss_beacon_int;
525 u16 listen_intvl_t; 550 u16 listen_intvl_t;
526 u16 bmiss_time_t; 551 u16 bmiss_time_t;
@@ -568,6 +593,7 @@ struct ath6kl {
568 593
569 struct ath6kl_bmi bmi; 594 struct ath6kl_bmi bmi;
570 const struct ath6kl_hif_ops *hif_ops; 595 const struct ath6kl_hif_ops *hif_ops;
596 const struct ath6kl_htc_ops *htc_ops;
571 struct wmi *wmi; 597 struct wmi *wmi;
572 int tx_pending[ENDPOINT_MAX]; 598 int tx_pending[ENDPOINT_MAX];
573 int total_tx_data_pend; 599 int total_tx_data_pend;
@@ -746,7 +772,8 @@ void init_netdev(struct net_device *dev);
746void ath6kl_cookie_init(struct ath6kl *ar); 772void ath6kl_cookie_init(struct ath6kl *ar);
747void ath6kl_cookie_cleanup(struct ath6kl *ar); 773void ath6kl_cookie_cleanup(struct ath6kl *ar);
748void ath6kl_rx(struct htc_target *target, struct htc_packet *packet); 774void ath6kl_rx(struct htc_target *target, struct htc_packet *packet);
749void ath6kl_tx_complete(void *context, struct list_head *packet_queue); 775void ath6kl_tx_complete(struct htc_target *context,
776 struct list_head *packet_queue);
750enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target, 777enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
751 struct htc_packet *packet); 778 struct htc_packet *packet);
752void ath6kl_stop_txrx(struct ath6kl *ar); 779void ath6kl_stop_txrx(struct ath6kl *ar);
@@ -821,8 +848,11 @@ int ath6kl_init_hw_params(struct ath6kl *ar);
821 848
822void ath6kl_check_wow_status(struct ath6kl *ar); 849void ath6kl_check_wow_status(struct ath6kl *ar);
823 850
851void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb);
852void ath6kl_core_rx_complete(struct ath6kl *ar, struct sk_buff *skb, u8 pipe);
853
824struct ath6kl *ath6kl_core_create(struct device *dev); 854struct ath6kl *ath6kl_core_create(struct device *dev);
825int ath6kl_core_init(struct ath6kl *ar); 855int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type);
826void ath6kl_core_cleanup(struct ath6kl *ar); 856void ath6kl_core_cleanup(struct ath6kl *ar);
827void ath6kl_core_destroy(struct ath6kl *ar); 857void ath6kl_core_destroy(struct ath6kl *ar);
828 858
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index d01403a263ff..1b76aff78508 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -616,6 +616,12 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
616 "Num disconnects", tgt_stats->cs_discon_cnt); 616 "Num disconnects", tgt_stats->cs_discon_cnt);
617 len += scnprintf(buf + len, buf_len - len, "%20s %10d\n", 617 len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
618 "Beacon avg rssi", tgt_stats->cs_ave_beacon_rssi); 618 "Beacon avg rssi", tgt_stats->cs_ave_beacon_rssi);
619 len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
620 "ARP pkt received", tgt_stats->arp_received);
621 len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
622 "ARP pkt matched", tgt_stats->arp_matched);
623 len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
624 "ARP pkt replied", tgt_stats->arp_replied);
619 625
620 if (len > buf_len) 626 if (len > buf_len)
621 len = buf_len; 627 len = buf_len;
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index 1803a0baae82..49639d8266c2 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -43,6 +43,7 @@ enum ATH6K_DEBUG_MASK {
43 ATH6KL_DBG_WMI_DUMP = BIT(19), 43 ATH6KL_DBG_WMI_DUMP = BIT(19),
44 ATH6KL_DBG_SUSPEND = BIT(20), 44 ATH6KL_DBG_SUSPEND = BIT(20),
45 ATH6KL_DBG_USB = BIT(21), 45 ATH6KL_DBG_USB = BIT(21),
46 ATH6KL_DBG_USB_BULK = BIT(22),
46 ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */ 47 ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */
47}; 48};
48 49
diff --git a/drivers/net/wireless/ath/ath6kl/hif-ops.h b/drivers/net/wireless/ath/ath6kl/hif-ops.h
index fd84086638e3..8c9e72d5250d 100644
--- a/drivers/net/wireless/ath/ath6kl/hif-ops.h
+++ b/drivers/net/wireless/ath/ath6kl/hif-ops.h
@@ -150,4 +150,38 @@ static inline void ath6kl_hif_stop(struct ath6kl *ar)
150 ar->hif_ops->stop(ar); 150 ar->hif_ops->stop(ar);
151} 151}
152 152
153static inline int ath6kl_hif_pipe_send(struct ath6kl *ar,
154 u8 pipe, struct sk_buff *hdr_buf,
155 struct sk_buff *buf)
156{
157 ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe send\n");
158
159 return ar->hif_ops->pipe_send(ar, pipe, hdr_buf, buf);
160}
161
162static inline void ath6kl_hif_pipe_get_default(struct ath6kl *ar,
163 u8 *ul_pipe, u8 *dl_pipe)
164{
165 ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe get default\n");
166
167 ar->hif_ops->pipe_get_default(ar, ul_pipe, dl_pipe);
168}
169
170static inline int ath6kl_hif_pipe_map_service(struct ath6kl *ar,
171 u16 service_id, u8 *ul_pipe,
172 u8 *dl_pipe)
173{
174 ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe get default\n");
175
176 return ar->hif_ops->pipe_map_service(ar, service_id, ul_pipe, dl_pipe);
177}
178
179static inline u16 ath6kl_hif_pipe_get_free_queue_number(struct ath6kl *ar,
180 u8 pipe)
181{
182 ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe get free queue number\n");
183
184 return ar->hif_ops->pipe_get_free_queue_number(ar, pipe);
185}
186
153#endif 187#endif
diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h
index 20ed6b73517b..61f6b21fb0ae 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.h
+++ b/drivers/net/wireless/ath/ath6kl/hif.h
@@ -256,6 +256,12 @@ struct ath6kl_hif_ops {
256 int (*power_on)(struct ath6kl *ar); 256 int (*power_on)(struct ath6kl *ar);
257 int (*power_off)(struct ath6kl *ar); 257 int (*power_off)(struct ath6kl *ar);
258 void (*stop)(struct ath6kl *ar); 258 void (*stop)(struct ath6kl *ar);
259 int (*pipe_send)(struct ath6kl *ar, u8 pipe, struct sk_buff *hdr_buf,
260 struct sk_buff *buf);
261 void (*pipe_get_default)(struct ath6kl *ar, u8 *pipe_ul, u8 *pipe_dl);
262 int (*pipe_map_service)(struct ath6kl *ar, u16 service_id, u8 *pipe_ul,
263 u8 *pipe_dl);
264 u16 (*pipe_get_free_queue_number)(struct ath6kl *ar, u8 pipe);
259}; 265};
260 266
261int ath6kl_hif_setup(struct ath6kl_device *dev); 267int ath6kl_hif_setup(struct ath6kl_device *dev);
diff --git a/drivers/net/wireless/ath/ath6kl/htc-ops.h b/drivers/net/wireless/ath/ath6kl/htc-ops.h
new file mode 100644
index 000000000000..2d4eed55cfd1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc-ops.h
@@ -0,0 +1,113 @@
1/*
2 * Copyright (c) 2004-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef HTC_OPS_H
18#define HTC_OPS_H
19
20#include "htc.h"
21#include "debug.h"
22
23static inline void *ath6kl_htc_create(struct ath6kl *ar)
24{
25 return ar->htc_ops->create(ar);
26}
27
28static inline int ath6kl_htc_wait_target(struct htc_target *target)
29{
30 return target->dev->ar->htc_ops->wait_target(target);
31}
32
33static inline int ath6kl_htc_start(struct htc_target *target)
34{
35 return target->dev->ar->htc_ops->start(target);
36}
37
38static inline int ath6kl_htc_conn_service(struct htc_target *target,
39 struct htc_service_connect_req *req,
40 struct htc_service_connect_resp *resp)
41{
42 return target->dev->ar->htc_ops->conn_service(target, req, resp);
43}
44
45static inline int ath6kl_htc_tx(struct htc_target *target,
46 struct htc_packet *packet)
47{
48 return target->dev->ar->htc_ops->tx(target, packet);
49}
50
51static inline void ath6kl_htc_stop(struct htc_target *target)
52{
53 return target->dev->ar->htc_ops->stop(target);
54}
55
56static inline void ath6kl_htc_cleanup(struct htc_target *target)
57{
58 return target->dev->ar->htc_ops->cleanup(target);
59}
60
61static inline void ath6kl_htc_flush_txep(struct htc_target *target,
62 enum htc_endpoint_id endpoint,
63 u16 tag)
64{
65 return target->dev->ar->htc_ops->flush_txep(target, endpoint, tag);
66}
67
68static inline void ath6kl_htc_flush_rx_buf(struct htc_target *target)
69{
70 return target->dev->ar->htc_ops->flush_rx_buf(target);
71}
72
73static inline void ath6kl_htc_activity_changed(struct htc_target *target,
74 enum htc_endpoint_id endpoint,
75 bool active)
76{
77 return target->dev->ar->htc_ops->activity_changed(target, endpoint,
78 active);
79}
80
81static inline int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
82 enum htc_endpoint_id endpoint)
83{
84 return target->dev->ar->htc_ops->get_rxbuf_num(target, endpoint);
85}
86
87static inline int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
88 struct list_head *pktq)
89{
90 return target->dev->ar->htc_ops->add_rxbuf_multiple(target, pktq);
91}
92
93static inline int ath6kl_htc_credit_setup(struct htc_target *target,
94 struct ath6kl_htc_credit_info *info)
95{
96 return target->dev->ar->htc_ops->credit_setup(target, info);
97}
98
99static inline void ath6kl_htc_tx_complete(struct ath6kl *ar,
100 struct sk_buff *skb)
101{
102 ar->htc_ops->tx_complete(ar, skb);
103}
104
105
106static inline void ath6kl_htc_rx_complete(struct ath6kl *ar,
107 struct sk_buff *skb, u8 pipe)
108{
109 ar->htc_ops->rx_complete(ar, skb, pipe);
110}
111
112
113#endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h
index 5027ccc36b62..a2c8ff809793 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.h
+++ b/drivers/net/wireless/ath/ath6kl/htc.h
@@ -25,6 +25,7 @@
25/* send direction */ 25/* send direction */
26#define HTC_FLAGS_NEED_CREDIT_UPDATE (1 << 0) 26#define HTC_FLAGS_NEED_CREDIT_UPDATE (1 << 0)
27#define HTC_FLAGS_SEND_BUNDLE (1 << 1) 27#define HTC_FLAGS_SEND_BUNDLE (1 << 1)
28#define HTC_FLAGS_TX_FIXUP_NETBUF (1 << 2)
28 29
29/* receive direction */ 30/* receive direction */
30#define HTC_FLG_RX_UNUSED (1 << 0) 31#define HTC_FLG_RX_UNUSED (1 << 0)
@@ -56,6 +57,10 @@
56#define HTC_CONN_FLGS_THRESH_LVL_THREE_QUAT 0x2 57#define HTC_CONN_FLGS_THRESH_LVL_THREE_QUAT 0x2
57#define HTC_CONN_FLGS_REDUCE_CRED_DRIB 0x4 58#define HTC_CONN_FLGS_REDUCE_CRED_DRIB 0x4
58#define HTC_CONN_FLGS_THRESH_MASK 0x3 59#define HTC_CONN_FLGS_THRESH_MASK 0x3
60/* disable credit flow control on a specific service */
61#define HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL (1 << 3)
62#define HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT 8
63#define HTC_CONN_FLGS_SET_RECV_ALLOC_MASK 0xFF00
59 64
60/* connect response status codes */ 65/* connect response status codes */
61#define HTC_SERVICE_SUCCESS 0 66#define HTC_SERVICE_SUCCESS 0
@@ -75,6 +80,7 @@
75#define HTC_RECORD_LOOKAHEAD_BUNDLE 3 80#define HTC_RECORD_LOOKAHEAD_BUNDLE 3
76 81
77#define HTC_SETUP_COMP_FLG_RX_BNDL_EN (1 << 0) 82#define HTC_SETUP_COMP_FLG_RX_BNDL_EN (1 << 0)
83#define HTC_SETUP_COMP_FLG_DISABLE_TX_CREDIT_FLOW (1 << 1)
78 84
79#define MAKE_SERVICE_ID(group, index) \ 85#define MAKE_SERVICE_ID(group, index) \
80 (int)(((int)group << 8) | (int)(index)) 86 (int)(((int)group << 8) | (int)(index))
@@ -109,6 +115,8 @@
109 115
110/* HTC operational parameters */ 116/* HTC operational parameters */
111#define HTC_TARGET_RESPONSE_TIMEOUT 2000 /* in ms */ 117#define HTC_TARGET_RESPONSE_TIMEOUT 2000 /* in ms */
118#define HTC_TARGET_RESPONSE_POLL_WAIT 10
119#define HTC_TARGET_RESPONSE_POLL_COUNT 200
112#define HTC_TARGET_DEBUG_INTR_MASK 0x01 120#define HTC_TARGET_DEBUG_INTR_MASK 0x01
113#define HTC_TARGET_CREDIT_INTR_MASK 0xF0 121#define HTC_TARGET_CREDIT_INTR_MASK 0xF0
114 122
@@ -128,6 +136,7 @@
128 136
129#define HTC_RECV_WAIT_BUFFERS (1 << 0) 137#define HTC_RECV_WAIT_BUFFERS (1 << 0)
130#define HTC_OP_STATE_STOPPING (1 << 0) 138#define HTC_OP_STATE_STOPPING (1 << 0)
139#define HTC_OP_STATE_SETUP_COMPLETE (1 << 1)
131 140
132/* 141/*
133 * The frame header length and message formats defined herein were selected 142 * The frame header length and message formats defined herein were selected
@@ -311,6 +320,14 @@ struct htc_packet {
311 320
312 void (*completion) (struct htc_target *, struct htc_packet *); 321 void (*completion) (struct htc_target *, struct htc_packet *);
313 struct htc_target *context; 322 struct htc_target *context;
323
324 /*
325 * optimization for network-oriented data, the HTC packet
326 * can pass the network buffer corresponding to the HTC packet
327 * lower layers may optimized the transfer knowing this is
328 * a network buffer
329 */
330 struct sk_buff *skb;
314}; 331};
315 332
316enum htc_send_full_action { 333enum htc_send_full_action {
@@ -319,12 +336,14 @@ enum htc_send_full_action {
319}; 336};
320 337
321struct htc_ep_callbacks { 338struct htc_ep_callbacks {
339 void (*tx_complete) (struct htc_target *, struct htc_packet *);
322 void (*rx) (struct htc_target *, struct htc_packet *); 340 void (*rx) (struct htc_target *, struct htc_packet *);
323 void (*rx_refill) (struct htc_target *, enum htc_endpoint_id endpoint); 341 void (*rx_refill) (struct htc_target *, enum htc_endpoint_id endpoint);
324 enum htc_send_full_action (*tx_full) (struct htc_target *, 342 enum htc_send_full_action (*tx_full) (struct htc_target *,
325 struct htc_packet *); 343 struct htc_packet *);
326 struct htc_packet *(*rx_allocthresh) (struct htc_target *, 344 struct htc_packet *(*rx_allocthresh) (struct htc_target *,
327 enum htc_endpoint_id, int); 345 enum htc_endpoint_id, int);
346 void (*tx_comp_multi) (struct htc_target *, struct list_head *);
328 int rx_alloc_thresh; 347 int rx_alloc_thresh;
329 int rx_refill_thresh; 348 int rx_refill_thresh;
330}; 349};
@@ -502,6 +521,13 @@ struct htc_endpoint {
502 u32 conn_flags; 521 u32 conn_flags;
503 struct htc_endpoint_stats ep_st; 522 struct htc_endpoint_stats ep_st;
504 u16 tx_drop_packet_threshold; 523 u16 tx_drop_packet_threshold;
524
525 struct {
526 u8 pipeid_ul;
527 u8 pipeid_dl;
528 struct list_head tx_lookup_queue;
529 bool tx_credit_flow_enabled;
530 } pipe;
505}; 531};
506 532
507struct htc_control_buffer { 533struct htc_control_buffer {
@@ -509,6 +535,42 @@ struct htc_control_buffer {
509 u8 *buf; 535 u8 *buf;
510}; 536};
511 537
538struct htc_pipe_txcredit_alloc {
539 u16 service_id;
540 u8 credit_alloc;
541};
542
543enum htc_send_queue_result {
544 HTC_SEND_QUEUE_OK = 0, /* packet was queued */
545 HTC_SEND_QUEUE_DROP = 1, /* this packet should be dropped */
546};
547
548struct ath6kl_htc_ops {
549 void* (*create)(struct ath6kl *ar);
550 int (*wait_target)(struct htc_target *target);
551 int (*start)(struct htc_target *target);
552 int (*conn_service)(struct htc_target *target,
553 struct htc_service_connect_req *req,
554 struct htc_service_connect_resp *resp);
555 int (*tx)(struct htc_target *target, struct htc_packet *packet);
556 void (*stop)(struct htc_target *target);
557 void (*cleanup)(struct htc_target *target);
558 void (*flush_txep)(struct htc_target *target,
559 enum htc_endpoint_id endpoint, u16 tag);
560 void (*flush_rx_buf)(struct htc_target *target);
561 void (*activity_changed)(struct htc_target *target,
562 enum htc_endpoint_id endpoint,
563 bool active);
564 int (*get_rxbuf_num)(struct htc_target *target,
565 enum htc_endpoint_id endpoint);
566 int (*add_rxbuf_multiple)(struct htc_target *target,
567 struct list_head *pktq);
568 int (*credit_setup)(struct htc_target *target,
569 struct ath6kl_htc_credit_info *cred_info);
570 int (*tx_complete)(struct ath6kl *ar, struct sk_buff *skb);
571 int (*rx_complete)(struct ath6kl *ar, struct sk_buff *skb, u8 pipe);
572};
573
512struct ath6kl_device; 574struct ath6kl_device;
513 575
514/* our HTC target state */ 576/* our HTC target state */
@@ -557,36 +619,19 @@ struct htc_target {
557 619
558 /* counts the number of Tx without bundling continously per AC */ 620 /* counts the number of Tx without bundling continously per AC */
559 u32 ac_tx_count[WMM_NUM_AC]; 621 u32 ac_tx_count[WMM_NUM_AC];
622
623 struct {
624 struct htc_packet *htc_packet_pool;
625 u8 ctrl_response_buf[HTC_MAX_CTRL_MSG_LEN];
626 int ctrl_response_len;
627 bool ctrl_response_valid;
628 struct htc_pipe_txcredit_alloc txcredit_alloc[ENDPOINT_MAX];
629 } pipe;
560}; 630};
561 631
562void *ath6kl_htc_create(struct ath6kl *ar);
563void ath6kl_htc_set_credit_dist(struct htc_target *target,
564 struct ath6kl_htc_credit_info *cred_info,
565 u16 svc_pri_order[], int len);
566int ath6kl_htc_wait_target(struct htc_target *target);
567int ath6kl_htc_start(struct htc_target *target);
568int ath6kl_htc_conn_service(struct htc_target *target,
569 struct htc_service_connect_req *req,
570 struct htc_service_connect_resp *resp);
571int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet);
572void ath6kl_htc_stop(struct htc_target *target);
573void ath6kl_htc_cleanup(struct htc_target *target);
574void ath6kl_htc_flush_txep(struct htc_target *target,
575 enum htc_endpoint_id endpoint, u16 tag);
576void ath6kl_htc_flush_rx_buf(struct htc_target *target);
577void ath6kl_htc_indicate_activity_change(struct htc_target *target,
578 enum htc_endpoint_id endpoint,
579 bool active);
580int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
581 enum htc_endpoint_id endpoint);
582int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
583 struct list_head *pktq);
584int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, 632int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
585 u32 msg_look_ahead, int *n_pkts); 633 u32 msg_look_ahead, int *n_pkts);
586 634
587int ath6kl_credit_setup(void *htc_handle,
588 struct ath6kl_htc_credit_info *cred_info);
589
590static inline void set_htc_pkt_info(struct htc_packet *packet, void *context, 635static inline void set_htc_pkt_info(struct htc_packet *packet, void *context,
591 u8 *buf, unsigned int len, 636 u8 *buf, unsigned int len,
592 enum htc_endpoint_id eid, u16 tag) 637 enum htc_endpoint_id eid, u16 tag)
@@ -626,4 +671,7 @@ static inline int get_queue_depth(struct list_head *queue)
626 return depth; 671 return depth;
627} 672}
628 673
674void ath6kl_htc_pipe_attach(struct ath6kl *ar);
675void ath6kl_htc_mbox_attach(struct ath6kl *ar);
676
629#endif 677#endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index 4849d99cce77..065e61516d7a 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -23,6 +23,14 @@
23 23
24#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask)) 24#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
25 25
26static void ath6kl_htc_mbox_cleanup(struct htc_target *target);
27static void ath6kl_htc_mbox_stop(struct htc_target *target);
28static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target,
29 struct list_head *pkt_queue);
30static void ath6kl_htc_set_credit_dist(struct htc_target *target,
31 struct ath6kl_htc_credit_info *cred_info,
32 u16 svc_pri_order[], int len);
33
26/* threshold to re-enable Tx bundling for an AC*/ 34/* threshold to re-enable Tx bundling for an AC*/
27#define TX_RESUME_BUNDLE_THRESHOLD 1500 35#define TX_RESUME_BUNDLE_THRESHOLD 1500
28 36
@@ -130,8 +138,8 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
130} 138}
131 139
132/* initialize and setup credit distribution */ 140/* initialize and setup credit distribution */
133int ath6kl_credit_setup(void *htc_handle, 141static int ath6kl_htc_mbox_credit_setup(struct htc_target *htc_target,
134 struct ath6kl_htc_credit_info *cred_info) 142 struct ath6kl_htc_credit_info *cred_info)
135{ 143{
136 u16 servicepriority[5]; 144 u16 servicepriority[5];
137 145
@@ -144,7 +152,7 @@ int ath6kl_credit_setup(void *htc_handle,
144 servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */ 152 servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
145 153
146 /* set priority list */ 154 /* set priority list */
147 ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5); 155 ath6kl_htc_set_credit_dist(htc_target, cred_info, servicepriority, 5);
148 156
149 return 0; 157 return 0;
150} 158}
@@ -432,7 +440,7 @@ static void htc_tx_complete(struct htc_endpoint *endpoint,
432 "htc tx complete ep %d pkts %d\n", 440 "htc tx complete ep %d pkts %d\n",
433 endpoint->eid, get_queue_depth(txq)); 441 endpoint->eid, get_queue_depth(txq));
434 442
435 ath6kl_tx_complete(endpoint->target->dev->ar, txq); 443 ath6kl_tx_complete(endpoint->target, txq);
436} 444}
437 445
438static void htc_tx_comp_handler(struct htc_target *target, 446static void htc_tx_comp_handler(struct htc_target *target,
@@ -1065,7 +1073,7 @@ static int htc_setup_tx_complete(struct htc_target *target)
1065 return status; 1073 return status;
1066} 1074}
1067 1075
1068void ath6kl_htc_set_credit_dist(struct htc_target *target, 1076static void ath6kl_htc_set_credit_dist(struct htc_target *target,
1069 struct ath6kl_htc_credit_info *credit_info, 1077 struct ath6kl_htc_credit_info *credit_info,
1070 u16 srvc_pri_order[], int list_len) 1078 u16 srvc_pri_order[], int list_len)
1071{ 1079{
@@ -1093,7 +1101,8 @@ void ath6kl_htc_set_credit_dist(struct htc_target *target,
1093 } 1101 }
1094} 1102}
1095 1103
1096int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet) 1104static int ath6kl_htc_mbox_tx(struct htc_target *target,
1105 struct htc_packet *packet)
1097{ 1106{
1098 struct htc_endpoint *endpoint; 1107 struct htc_endpoint *endpoint;
1099 struct list_head queue; 1108 struct list_head queue;
@@ -1121,7 +1130,7 @@ int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet)
1121} 1130}
1122 1131
1123/* flush endpoint TX queue */ 1132/* flush endpoint TX queue */
1124void ath6kl_htc_flush_txep(struct htc_target *target, 1133static void ath6kl_htc_mbox_flush_txep(struct htc_target *target,
1125 enum htc_endpoint_id eid, u16 tag) 1134 enum htc_endpoint_id eid, u16 tag)
1126{ 1135{
1127 struct htc_packet *packet, *tmp_pkt; 1136 struct htc_packet *packet, *tmp_pkt;
@@ -1173,12 +1182,13 @@ static void ath6kl_htc_flush_txep_all(struct htc_target *target)
1173 if (endpoint->svc_id == 0) 1182 if (endpoint->svc_id == 0)
1174 /* not in use.. */ 1183 /* not in use.. */
1175 continue; 1184 continue;
1176 ath6kl_htc_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL); 1185 ath6kl_htc_mbox_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
1177 } 1186 }
1178} 1187}
1179 1188
1180void ath6kl_htc_indicate_activity_change(struct htc_target *target, 1189static void ath6kl_htc_mbox_activity_changed(struct htc_target *target,
1181 enum htc_endpoint_id eid, bool active) 1190 enum htc_endpoint_id eid,
1191 bool active)
1182{ 1192{
1183 struct htc_endpoint *endpoint = &target->endpoint[eid]; 1193 struct htc_endpoint *endpoint = &target->endpoint[eid];
1184 bool dist = false; 1194 bool dist = false;
@@ -1246,7 +1256,7 @@ static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
1246 1256
1247 INIT_LIST_HEAD(&queue); 1257 INIT_LIST_HEAD(&queue);
1248 list_add_tail(&packet->list, &queue); 1258 list_add_tail(&packet->list, &queue);
1249 return ath6kl_htc_add_rxbuf_multiple(target, &queue); 1259 return ath6kl_htc_mbox_add_rxbuf_multiple(target, &queue);
1250} 1260}
1251 1261
1252static void htc_reclaim_rxbuf(struct htc_target *target, 1262static void htc_reclaim_rxbuf(struct htc_target *target,
@@ -1353,7 +1363,9 @@ static int ath6kl_htc_rx_setup(struct htc_target *target,
1353 sizeof(*htc_hdr)); 1363 sizeof(*htc_hdr));
1354 1364
1355 if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) { 1365 if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
1356 ath6kl_warn("Rx buffer requested with invalid length\n"); 1366 ath6kl_warn("Rx buffer requested with invalid length htc_hdr:eid %d, flags 0x%x, len %d\n",
1367 htc_hdr->eid, htc_hdr->flags,
1368 le16_to_cpu(htc_hdr->payld_len));
1357 return -EINVAL; 1369 return -EINVAL;
1358 } 1370 }
1359 1371
@@ -2288,7 +2300,7 @@ fail_ctrl_rx:
2288 return NULL; 2300 return NULL;
2289} 2301}
2290 2302
2291int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target, 2303static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target,
2292 struct list_head *pkt_queue) 2304 struct list_head *pkt_queue)
2293{ 2305{
2294 struct htc_endpoint *endpoint; 2306 struct htc_endpoint *endpoint;
@@ -2350,7 +2362,7 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
2350 return status; 2362 return status;
2351} 2363}
2352 2364
2353void ath6kl_htc_flush_rx_buf(struct htc_target *target) 2365static void ath6kl_htc_mbox_flush_rx_buf(struct htc_target *target)
2354{ 2366{
2355 struct htc_endpoint *endpoint; 2367 struct htc_endpoint *endpoint;
2356 struct htc_packet *packet, *tmp_pkt; 2368 struct htc_packet *packet, *tmp_pkt;
@@ -2392,7 +2404,7 @@ void ath6kl_htc_flush_rx_buf(struct htc_target *target)
2392 } 2404 }
2393} 2405}
2394 2406
2395int ath6kl_htc_conn_service(struct htc_target *target, 2407static int ath6kl_htc_mbox_conn_service(struct htc_target *target,
2396 struct htc_service_connect_req *conn_req, 2408 struct htc_service_connect_req *conn_req,
2397 struct htc_service_connect_resp *conn_resp) 2409 struct htc_service_connect_resp *conn_resp)
2398{ 2410{
@@ -2564,7 +2576,7 @@ static void reset_ep_state(struct htc_target *target)
2564 INIT_LIST_HEAD(&target->cred_dist_list); 2576 INIT_LIST_HEAD(&target->cred_dist_list);
2565} 2577}
2566 2578
2567int ath6kl_htc_get_rxbuf_num(struct htc_target *target, 2579static int ath6kl_htc_mbox_get_rxbuf_num(struct htc_target *target,
2568 enum htc_endpoint_id endpoint) 2580 enum htc_endpoint_id endpoint)
2569{ 2581{
2570 int num; 2582 int num;
@@ -2624,7 +2636,7 @@ static void htc_setup_msg_bndl(struct htc_target *target)
2624 } 2636 }
2625} 2637}
2626 2638
2627int ath6kl_htc_wait_target(struct htc_target *target) 2639static int ath6kl_htc_mbox_wait_target(struct htc_target *target)
2628{ 2640{
2629 struct htc_packet *packet = NULL; 2641 struct htc_packet *packet = NULL;
2630 struct htc_ready_ext_msg *rdy_msg; 2642 struct htc_ready_ext_msg *rdy_msg;
@@ -2693,12 +2705,12 @@ int ath6kl_htc_wait_target(struct htc_target *target)
2693 connect.svc_id = HTC_CTRL_RSVD_SVC; 2705 connect.svc_id = HTC_CTRL_RSVD_SVC;
2694 2706
2695 /* connect fake service */ 2707 /* connect fake service */
2696 status = ath6kl_htc_conn_service((void *)target, &connect, &resp); 2708 status = ath6kl_htc_mbox_conn_service((void *)target, &connect, &resp);
2697 2709
2698 if (status) 2710 if (status)
2699 /* 2711 /*
2700 * FIXME: this call doesn't make sense, the caller should 2712 * FIXME: this call doesn't make sense, the caller should
2701 * call ath6kl_htc_cleanup() when it wants remove htc 2713 * call ath6kl_htc_mbox_cleanup() when it wants remove htc
2702 */ 2714 */
2703 ath6kl_hif_cleanup_scatter(target->dev->ar); 2715 ath6kl_hif_cleanup_scatter(target->dev->ar);
2704 2716
@@ -2715,7 +2727,7 @@ fail_wait_target:
2715 * Start HTC, enable interrupts and let the target know 2727 * Start HTC, enable interrupts and let the target know
2716 * host has finished setup. 2728 * host has finished setup.
2717 */ 2729 */
2718int ath6kl_htc_start(struct htc_target *target) 2730static int ath6kl_htc_mbox_start(struct htc_target *target)
2719{ 2731{
2720 struct htc_packet *packet; 2732 struct htc_packet *packet;
2721 int status; 2733 int status;
@@ -2752,7 +2764,7 @@ int ath6kl_htc_start(struct htc_target *target)
2752 status = ath6kl_hif_unmask_intrs(target->dev); 2764 status = ath6kl_hif_unmask_intrs(target->dev);
2753 2765
2754 if (status) 2766 if (status)
2755 ath6kl_htc_stop(target); 2767 ath6kl_htc_mbox_stop(target);
2756 2768
2757 return status; 2769 return status;
2758} 2770}
@@ -2796,7 +2808,7 @@ static int ath6kl_htc_reset(struct htc_target *target)
2796} 2808}
2797 2809
2798/* htc_stop: stop interrupt reception, and flush all queued buffers */ 2810/* htc_stop: stop interrupt reception, and flush all queued buffers */
2799void ath6kl_htc_stop(struct htc_target *target) 2811static void ath6kl_htc_mbox_stop(struct htc_target *target)
2800{ 2812{
2801 spin_lock_bh(&target->htc_lock); 2813 spin_lock_bh(&target->htc_lock);
2802 target->htc_flags |= HTC_OP_STATE_STOPPING; 2814 target->htc_flags |= HTC_OP_STATE_STOPPING;
@@ -2811,12 +2823,12 @@ void ath6kl_htc_stop(struct htc_target *target)
2811 2823
2812 ath6kl_htc_flush_txep_all(target); 2824 ath6kl_htc_flush_txep_all(target);
2813 2825
2814 ath6kl_htc_flush_rx_buf(target); 2826 ath6kl_htc_mbox_flush_rx_buf(target);
2815 2827
2816 ath6kl_htc_reset(target); 2828 ath6kl_htc_reset(target);
2817} 2829}
2818 2830
2819void *ath6kl_htc_create(struct ath6kl *ar) 2831static void *ath6kl_htc_mbox_create(struct ath6kl *ar)
2820{ 2832{
2821 struct htc_target *target = NULL; 2833 struct htc_target *target = NULL;
2822 int status = 0; 2834 int status = 0;
@@ -2857,13 +2869,13 @@ void *ath6kl_htc_create(struct ath6kl *ar)
2857 return target; 2869 return target;
2858 2870
2859err_htc_cleanup: 2871err_htc_cleanup:
2860 ath6kl_htc_cleanup(target); 2872 ath6kl_htc_mbox_cleanup(target);
2861 2873
2862 return NULL; 2874 return NULL;
2863} 2875}
2864 2876
2865/* cleanup the HTC instance */ 2877/* cleanup the HTC instance */
2866void ath6kl_htc_cleanup(struct htc_target *target) 2878static void ath6kl_htc_mbox_cleanup(struct htc_target *target)
2867{ 2879{
2868 struct htc_packet *packet, *tmp_packet; 2880 struct htc_packet *packet, *tmp_packet;
2869 2881
@@ -2888,3 +2900,24 @@ void ath6kl_htc_cleanup(struct htc_target *target)
2888 kfree(target->dev); 2900 kfree(target->dev);
2889 kfree(target); 2901 kfree(target);
2890} 2902}
2903
2904static const struct ath6kl_htc_ops ath6kl_htc_mbox_ops = {
2905 .create = ath6kl_htc_mbox_create,
2906 .wait_target = ath6kl_htc_mbox_wait_target,
2907 .start = ath6kl_htc_mbox_start,
2908 .conn_service = ath6kl_htc_mbox_conn_service,
2909 .tx = ath6kl_htc_mbox_tx,
2910 .stop = ath6kl_htc_mbox_stop,
2911 .cleanup = ath6kl_htc_mbox_cleanup,
2912 .flush_txep = ath6kl_htc_mbox_flush_txep,
2913 .flush_rx_buf = ath6kl_htc_mbox_flush_rx_buf,
2914 .activity_changed = ath6kl_htc_mbox_activity_changed,
2915 .get_rxbuf_num = ath6kl_htc_mbox_get_rxbuf_num,
2916 .add_rxbuf_multiple = ath6kl_htc_mbox_add_rxbuf_multiple,
2917 .credit_setup = ath6kl_htc_mbox_credit_setup,
2918};
2919
2920void ath6kl_htc_mbox_attach(struct ath6kl *ar)
2921{
2922 ar->htc_ops = &ath6kl_htc_mbox_ops;
2923}
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
new file mode 100644
index 000000000000..b277b3446882
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -0,0 +1,1713 @@
1/*
2 * Copyright (c) 2007-2011 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "core.h"
18#include "debug.h"
19#include "hif-ops.h"
20
21#define HTC_PACKET_CONTAINER_ALLOCATION 32
22#define HTC_CONTROL_BUFFER_SIZE (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH)
23
24static int ath6kl_htc_pipe_tx(struct htc_target *handle,
25 struct htc_packet *packet);
26static void ath6kl_htc_pipe_cleanup(struct htc_target *handle);
27
28/* htc pipe tx path */
29static inline void restore_tx_packet(struct htc_packet *packet)
30{
31 if (packet->info.tx.flags & HTC_FLAGS_TX_FIXUP_NETBUF) {
32 skb_pull(packet->skb, sizeof(struct htc_frame_hdr));
33 packet->info.tx.flags &= ~HTC_FLAGS_TX_FIXUP_NETBUF;
34 }
35}
36
37static void do_send_completion(struct htc_endpoint *ep,
38 struct list_head *queue_to_indicate)
39{
40 struct htc_packet *packet;
41
42 if (list_empty(queue_to_indicate)) {
43 /* nothing to indicate */
44 return;
45 }
46
47 if (ep->ep_cb.tx_comp_multi != NULL) {
48 ath6kl_dbg(ATH6KL_DBG_HTC,
49 "%s: calling ep %d, send complete multiple callback (%d pkts)\n",
50 __func__, ep->eid,
51 get_queue_depth(queue_to_indicate));
52 /*
53 * a multiple send complete handler is being used,
54 * pass the queue to the handler
55 */
56 ep->ep_cb.tx_comp_multi(ep->target, queue_to_indicate);
57 /*
58 * all packets are now owned by the callback,
59 * reset queue to be safe
60 */
61 INIT_LIST_HEAD(queue_to_indicate);
62 } else {
63 /* using legacy EpTxComplete */
64 do {
65 packet = list_first_entry(queue_to_indicate,
66 struct htc_packet, list);
67
68 list_del(&packet->list);
69 ath6kl_dbg(ATH6KL_DBG_HTC,
70 "%s: calling ep %d send complete callback on packet 0x%p\n",
71 __func__, ep->eid, packet);
72 ep->ep_cb.tx_complete(ep->target, packet);
73 } while (!list_empty(queue_to_indicate));
74 }
75}
76
77static void send_packet_completion(struct htc_target *target,
78 struct htc_packet *packet)
79{
80 struct htc_endpoint *ep = &target->endpoint[packet->endpoint];
81 struct list_head container;
82
83 restore_tx_packet(packet);
84 INIT_LIST_HEAD(&container);
85 list_add_tail(&packet->list, &container);
86
87 /* do completion */
88 do_send_completion(ep, &container);
89}
90
91static void get_htc_packet_credit_based(struct htc_target *target,
92 struct htc_endpoint *ep,
93 struct list_head *queue)
94{
95 int credits_required;
96 int remainder;
97 u8 send_flags;
98 struct htc_packet *packet;
99 unsigned int transfer_len;
100
101 /* NOTE : the TX lock is held when this function is called */
102
103 /* loop until we can grab as many packets out of the queue as we can */
104 while (true) {
105 send_flags = 0;
106 if (list_empty(&ep->txq))
107 break;
108
109 /* get packet at head, but don't remove it */
110 packet = list_first_entry(&ep->txq, struct htc_packet, list);
111 if (packet == NULL)
112 break;
113
114 ath6kl_dbg(ATH6KL_DBG_HTC,
115 "%s: got head packet:0x%p , queue depth: %d\n",
116 __func__, packet, get_queue_depth(&ep->txq));
117
118 transfer_len = packet->act_len + HTC_HDR_LENGTH;
119
120 if (transfer_len <= target->tgt_cred_sz) {
121 credits_required = 1;
122 } else {
123 /* figure out how many credits this message requires */
124 credits_required = transfer_len / target->tgt_cred_sz;
125 remainder = transfer_len % target->tgt_cred_sz;
126
127 if (remainder)
128 credits_required++;
129 }
130
131 ath6kl_dbg(ATH6KL_DBG_HTC, "%s: creds required:%d got:%d\n",
132 __func__, credits_required, ep->cred_dist.credits);
133
134 if (ep->eid == ENDPOINT_0) {
135 /*
136 * endpoint 0 is special, it always has a credit and
137 * does not require credit based flow control
138 */
139 credits_required = 0;
140
141 } else {
142
143 if (ep->cred_dist.credits < credits_required)
144 break;
145
146 ep->cred_dist.credits -= credits_required;
147 ep->ep_st.cred_cosumd += credits_required;
148
149 /* check if we need credits back from the target */
150 if (ep->cred_dist.credits <
151 ep->cred_dist.cred_per_msg) {
152 /* tell the target we need credits ASAP! */
153 send_flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
154 ep->ep_st.cred_low_indicate += 1;
155 ath6kl_dbg(ATH6KL_DBG_HTC,
156 "%s: host needs credits\n",
157 __func__);
158 }
159 }
160
161 /* now we can fully dequeue */
162 packet = list_first_entry(&ep->txq, struct htc_packet, list);
163
164 list_del(&packet->list);
165 /* save the number of credits this packet consumed */
166 packet->info.tx.cred_used = credits_required;
167 /* save send flags */
168 packet->info.tx.flags = send_flags;
169 packet->info.tx.seqno = ep->seqno;
170 ep->seqno++;
171 /* queue this packet into the caller's queue */
172 list_add_tail(&packet->list, queue);
173 }
174
175}
176
177static void get_htc_packet(struct htc_target *target,
178 struct htc_endpoint *ep,
179 struct list_head *queue, int resources)
180{
181 struct htc_packet *packet;
182
183 /* NOTE : the TX lock is held when this function is called */
184
185 /* loop until we can grab as many packets out of the queue as we can */
186 while (resources) {
187 if (list_empty(&ep->txq))
188 break;
189
190 packet = list_first_entry(&ep->txq, struct htc_packet, list);
191 list_del(&packet->list);
192
193 ath6kl_dbg(ATH6KL_DBG_HTC,
194 "%s: got packet:0x%p , new queue depth: %d\n",
195 __func__, packet, get_queue_depth(&ep->txq));
196 packet->info.tx.seqno = ep->seqno;
197 packet->info.tx.flags = 0;
198 packet->info.tx.cred_used = 0;
199 ep->seqno++;
200
201 /* queue this packet into the caller's queue */
202 list_add_tail(&packet->list, queue);
203 resources--;
204 }
205}
206
207static int htc_issue_packets(struct htc_target *target,
208 struct htc_endpoint *ep,
209 struct list_head *pkt_queue)
210{
211 int status = 0;
212 u16 payload_len;
213 struct sk_buff *skb;
214 struct htc_frame_hdr *htc_hdr;
215 struct htc_packet *packet;
216
217 ath6kl_dbg(ATH6KL_DBG_HTC,
218 "%s: queue: 0x%p, pkts %d\n", __func__,
219 pkt_queue, get_queue_depth(pkt_queue));
220
221 while (!list_empty(pkt_queue)) {
222 packet = list_first_entry(pkt_queue, struct htc_packet, list);
223 list_del(&packet->list);
224
225 skb = packet->skb;
226 if (!skb) {
227 WARN_ON_ONCE(1);
228 status = -EINVAL;
229 break;
230 }
231
232 payload_len = packet->act_len;
233
234 /* setup HTC frame header */
235 htc_hdr = (struct htc_frame_hdr *) skb_push(skb,
236 sizeof(*htc_hdr));
237 if (!htc_hdr) {
238 WARN_ON_ONCE(1);
239 status = -EINVAL;
240 break;
241 }
242
243 packet->info.tx.flags |= HTC_FLAGS_TX_FIXUP_NETBUF;
244
245 /* Endianess? */
246 put_unaligned((u16) payload_len, &htc_hdr->payld_len);
247 htc_hdr->flags = packet->info.tx.flags;
248 htc_hdr->eid = (u8) packet->endpoint;
249 htc_hdr->ctrl[0] = 0;
250 htc_hdr->ctrl[1] = (u8) packet->info.tx.seqno;
251
252 spin_lock_bh(&target->tx_lock);
253
254 /* store in look up queue to match completions */
255 list_add_tail(&packet->list, &ep->pipe.tx_lookup_queue);
256 ep->ep_st.tx_issued += 1;
257 spin_unlock_bh(&target->tx_lock);
258
259 status = ath6kl_hif_pipe_send(target->dev->ar,
260 ep->pipe.pipeid_ul, NULL, skb);
261
262 if (status != 0) {
263 if (status != -ENOMEM) {
264 /* TODO: if more than 1 endpoint maps to the
265 * same PipeID, it is possible to run out of
266 * resources in the HIF layer.
267 * Don't emit the error
268 */
269 ath6kl_dbg(ATH6KL_DBG_HTC,
270 "%s: failed status:%d\n",
271 __func__, status);
272 }
273 spin_lock_bh(&target->tx_lock);
274 list_del(&packet->list);
275
276 /* reclaim credits */
277 ep->cred_dist.credits += packet->info.tx.cred_used;
278 spin_unlock_bh(&target->tx_lock);
279
280 /* put it back into the callers queue */
281 list_add(&packet->list, pkt_queue);
282 break;
283 }
284
285 }
286
287 if (status != 0) {
288 while (!list_empty(pkt_queue)) {
289 if (status != -ENOMEM) {
290 ath6kl_dbg(ATH6KL_DBG_HTC,
291 "%s: failed pkt:0x%p status:%d\n",
292 __func__, packet, status);
293 }
294
295 packet = list_first_entry(pkt_queue,
296 struct htc_packet, list);
297 list_del(&packet->list);
298 packet->status = status;
299 send_packet_completion(target, packet);
300 }
301 }
302
303 return status;
304}
305
306static enum htc_send_queue_result htc_try_send(struct htc_target *target,
307 struct htc_endpoint *ep,
308 struct list_head *txq)
309{
310 struct list_head send_queue; /* temp queue to hold packets */
311 struct htc_packet *packet, *tmp_pkt;
312 struct ath6kl *ar = target->dev->ar;
313 enum htc_send_full_action action;
314 int tx_resources, overflow, txqueue_depth, i, good_pkts;
315 u8 pipeid;
316
317 ath6kl_dbg(ATH6KL_DBG_HTC, "%s: (queue:0x%p depth:%d)\n",
318 __func__, txq,
319 (txq == NULL) ? 0 : get_queue_depth(txq));
320
321 /* init the local send queue */
322 INIT_LIST_HEAD(&send_queue);
323
324 /*
325 * txq equals to NULL means
326 * caller didn't provide a queue, just wants us to
327 * check queues and send
328 */
329 if (txq != NULL) {
330 if (list_empty(txq)) {
331 /* empty queue */
332 return HTC_SEND_QUEUE_DROP;
333 }
334
335 spin_lock_bh(&target->tx_lock);
336 txqueue_depth = get_queue_depth(&ep->txq);
337 spin_unlock_bh(&target->tx_lock);
338
339 if (txqueue_depth >= ep->max_txq_depth) {
340 /* we've already overflowed */
341 overflow = get_queue_depth(txq);
342 } else {
343 /* get how much we will overflow by */
344 overflow = txqueue_depth;
345 overflow += get_queue_depth(txq);
346 /* get how much we will overflow the TX queue by */
347 overflow -= ep->max_txq_depth;
348 }
349
350 /* if overflow is negative or zero, we are okay */
351 if (overflow > 0) {
352 ath6kl_dbg(ATH6KL_DBG_HTC,
353 "%s: Endpoint %d, TX queue will overflow :%d, Tx Depth:%d, Max:%d\n",
354 __func__, ep->eid, overflow, txqueue_depth,
355 ep->max_txq_depth);
356 }
357 if ((overflow <= 0) ||
358 (ep->ep_cb.tx_full == NULL)) {
359 /*
360 * all packets will fit or caller did not provide send
361 * full indication handler -- just move all of them
362 * to the local send_queue object
363 */
364 list_splice_tail_init(txq, &send_queue);
365 } else {
366 good_pkts = get_queue_depth(txq) - overflow;
367 if (good_pkts < 0) {
368 WARN_ON_ONCE(1);
369 return HTC_SEND_QUEUE_DROP;
370 }
371
372 /* we have overflowed, and a callback is provided */
373 /* dequeue all non-overflow packets to the sendqueue */
374 for (i = 0; i < good_pkts; i++) {
375 /* pop off caller's queue */
376 packet = list_first_entry(txq,
377 struct htc_packet,
378 list);
379 list_del(&packet->list);
380 /* insert into local queue */
381 list_add_tail(&packet->list, &send_queue);
382 }
383
384 /*
385 * the caller's queue has all the packets that won't fit
386 * walk through the caller's queue and indicate each to
387 * the send full handler
388 */
389 list_for_each_entry_safe(packet, tmp_pkt,
390 txq, list) {
391
392 ath6kl_dbg(ATH6KL_DBG_HTC,
393 "%s: Indicat overflowed TX pkts: %p\n",
394 __func__, packet);
395 action = ep->ep_cb.tx_full(ep->target, packet);
396 if (action == HTC_SEND_FULL_DROP) {
397 /* callback wants the packet dropped */
398 ep->ep_st.tx_dropped += 1;
399
400 /* leave this one in the caller's queue
401 * for cleanup */
402 } else {
403 /* callback wants to keep this packet,
404 * remove from caller's queue */
405 list_del(&packet->list);
406 /* put it in the send queue */
407 list_add_tail(&packet->list,
408 &send_queue);
409 }
410
411 }
412
413 if (list_empty(&send_queue)) {
414 /* no packets made it in, caller will cleanup */
415 return HTC_SEND_QUEUE_DROP;
416 }
417 }
418 }
419
420 if (!ep->pipe.tx_credit_flow_enabled) {
421 tx_resources =
422 ath6kl_hif_pipe_get_free_queue_number(ar,
423 ep->pipe.pipeid_ul);
424 } else {
425 tx_resources = 0;
426 }
427
428 spin_lock_bh(&target->tx_lock);
429 if (!list_empty(&send_queue)) {
430 /* transfer packets to tail */
431 list_splice_tail_init(&send_queue, &ep->txq);
432 if (!list_empty(&send_queue)) {
433 WARN_ON_ONCE(1);
434 spin_unlock_bh(&target->tx_lock);
435 return HTC_SEND_QUEUE_DROP;
436 }
437 INIT_LIST_HEAD(&send_queue);
438 }
439
440 /* increment tx processing count on entry */
441 ep->tx_proc_cnt++;
442
443 if (ep->tx_proc_cnt > 1) {
444 /*
445 * Another thread or task is draining the TX queues on this
446 * endpoint that thread will reset the tx processing count
447 * when the queue is drained.
448 */
449 ep->tx_proc_cnt--;
450 spin_unlock_bh(&target->tx_lock);
451 return HTC_SEND_QUEUE_OK;
452 }
453
454 /***** beyond this point only 1 thread may enter ******/
455
456 /*
457 * Now drain the endpoint TX queue for transmission as long as we have
458 * enough transmit resources.
459 */
460 while (true) {
461
462 if (get_queue_depth(&ep->txq) == 0)
463 break;
464
465 if (ep->pipe.tx_credit_flow_enabled) {
466 /*
467 * Credit based mechanism provides flow control
468 * based on target transmit resource availability,
469 * we assume that the HIF layer will always have
470 * bus resources greater than target transmit
471 * resources.
472 */
473 get_htc_packet_credit_based(target, ep, &send_queue);
474 } else {
475 /*
476 * Get all packets for this endpoint that we can
477 * for this pass.
478 */
479 get_htc_packet(target, ep, &send_queue, tx_resources);
480 }
481
482 if (get_queue_depth(&send_queue) == 0) {
483 /*
484 * Didn't get packets due to out of resources or TX
485 * queue was drained.
486 */
487 break;
488 }
489
490 spin_unlock_bh(&target->tx_lock);
491
492 /* send what we can */
493 htc_issue_packets(target, ep, &send_queue);
494
495 if (!ep->pipe.tx_credit_flow_enabled) {
496 pipeid = ep->pipe.pipeid_ul;
497 tx_resources =
498 ath6kl_hif_pipe_get_free_queue_number(ar, pipeid);
499 }
500
501 spin_lock_bh(&target->tx_lock);
502
503 }
504 /* done with this endpoint, we can clear the count */
505 ep->tx_proc_cnt = 0;
506 spin_unlock_bh(&target->tx_lock);
507
508 return HTC_SEND_QUEUE_OK;
509}
510
511/* htc control packet manipulation */
512static void destroy_htc_txctrl_packet(struct htc_packet *packet)
513{
514 struct sk_buff *skb;
515 skb = packet->skb;
516 if (skb != NULL)
517 dev_kfree_skb(skb);
518
519 kfree(packet);
520}
521
522static struct htc_packet *build_htc_txctrl_packet(void)
523{
524 struct htc_packet *packet = NULL;
525 struct sk_buff *skb;
526
527 packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL);
528 if (packet == NULL)
529 return NULL;
530
531 skb = __dev_alloc_skb(HTC_CONTROL_BUFFER_SIZE, GFP_KERNEL);
532
533 if (skb == NULL) {
534 kfree(packet);
535 return NULL;
536 }
537 packet->skb = skb;
538
539 return packet;
540}
541
542static void htc_free_txctrl_packet(struct htc_target *target,
543 struct htc_packet *packet)
544{
545 destroy_htc_txctrl_packet(packet);
546}
547
548static struct htc_packet *htc_alloc_txctrl_packet(struct htc_target *target)
549{
550 return build_htc_txctrl_packet();
551}
552
553static void htc_txctrl_complete(struct htc_target *target,
554 struct htc_packet *packet)
555{
556 htc_free_txctrl_packet(target, packet);
557}
558
559#define MAX_MESSAGE_SIZE 1536
560
561static int htc_setup_target_buffer_assignments(struct htc_target *target)
562{
563 int status, credits, credit_per_maxmsg, i;
564 struct htc_pipe_txcredit_alloc *entry;
565 unsigned int hif_usbaudioclass = 0;
566
567 credit_per_maxmsg = MAX_MESSAGE_SIZE / target->tgt_cred_sz;
568 if (MAX_MESSAGE_SIZE % target->tgt_cred_sz)
569 credit_per_maxmsg++;
570
571 /* TODO, this should be configured by the caller! */
572
573 credits = target->tgt_creds;
574 entry = &target->pipe.txcredit_alloc[0];
575
576 status = -ENOMEM;
577
578 /* FIXME: hif_usbaudioclass is always zero */
579 if (hif_usbaudioclass) {
580 ath6kl_dbg(ATH6KL_DBG_HTC,
581 "%s: For USB Audio Class- Total:%d\n",
582 __func__, credits);
583 entry++;
584 entry++;
585 /* Setup VO Service To have Max Credits */
586 entry->service_id = WMI_DATA_VO_SVC;
587 entry->credit_alloc = (credits - 6);
588 if (entry->credit_alloc == 0)
589 entry->credit_alloc++;
590
591 credits -= (int) entry->credit_alloc;
592 if (credits <= 0)
593 return status;
594
595 entry++;
596 entry->service_id = WMI_CONTROL_SVC;
597 entry->credit_alloc = credit_per_maxmsg;
598 credits -= (int) entry->credit_alloc;
599 if (credits <= 0)
600 return status;
601
602 /* leftovers go to best effort */
603 entry++;
604 entry++;
605 entry->service_id = WMI_DATA_BE_SVC;
606 entry->credit_alloc = (u8) credits;
607 status = 0;
608 } else {
609 entry++;
610 entry->service_id = WMI_DATA_VI_SVC;
611 entry->credit_alloc = credits / 4;
612 if (entry->credit_alloc == 0)
613 entry->credit_alloc++;
614
615 credits -= (int) entry->credit_alloc;
616 if (credits <= 0)
617 return status;
618
619 entry++;
620 entry->service_id = WMI_DATA_VO_SVC;
621 entry->credit_alloc = credits / 4;
622 if (entry->credit_alloc == 0)
623 entry->credit_alloc++;
624
625 credits -= (int) entry->credit_alloc;
626 if (credits <= 0)
627 return status;
628
629 entry++;
630 entry->service_id = WMI_CONTROL_SVC;
631 entry->credit_alloc = credit_per_maxmsg;
632 credits -= (int) entry->credit_alloc;
633 if (credits <= 0)
634 return status;
635
636 entry++;
637 entry->service_id = WMI_DATA_BK_SVC;
638 entry->credit_alloc = credit_per_maxmsg;
639 credits -= (int) entry->credit_alloc;
640 if (credits <= 0)
641 return status;
642
643 /* leftovers go to best effort */
644 entry++;
645 entry->service_id = WMI_DATA_BE_SVC;
646 entry->credit_alloc = (u8) credits;
647 status = 0;
648 }
649
650 if (status == 0) {
651 for (i = 0; i < ENDPOINT_MAX; i++) {
652 if (target->pipe.txcredit_alloc[i].service_id != 0) {
653 ath6kl_dbg(ATH6KL_DBG_HTC,
654 "HTC Service Index : %d TX : 0x%2.2X : alloc:%d\n",
655 i,
656 target->pipe.txcredit_alloc[i].
657 service_id,
658 target->pipe.txcredit_alloc[i].
659 credit_alloc);
660 }
661 }
662 }
663 return status;
664}
665
666/* process credit reports and call distribution function */
667static void htc_process_credit_report(struct htc_target *target,
668 struct htc_credit_report *rpt,
669 int num_entries,
670 enum htc_endpoint_id from_ep)
671{
672 int total_credits = 0, i;
673 struct htc_endpoint *ep;
674
675 /* lock out TX while we update credits */
676 spin_lock_bh(&target->tx_lock);
677
678 for (i = 0; i < num_entries; i++, rpt++) {
679 if (rpt->eid >= ENDPOINT_MAX) {
680 WARN_ON_ONCE(1);
681 spin_unlock_bh(&target->tx_lock);
682 return;
683 }
684
685 ep = &target->endpoint[rpt->eid];
686 ep->cred_dist.credits += rpt->credits;
687
688 if (ep->cred_dist.credits && get_queue_depth(&ep->txq)) {
689 spin_unlock_bh(&target->tx_lock);
690 htc_try_send(target, ep, NULL);
691 spin_lock_bh(&target->tx_lock);
692 }
693
694 total_credits += rpt->credits;
695 }
696 ath6kl_dbg(ATH6KL_DBG_HTC,
697 "Report indicated %d credits to distribute\n",
698 total_credits);
699
700 spin_unlock_bh(&target->tx_lock);
701}
702
703/* flush endpoint TX queue */
704static void htc_flush_tx_endpoint(struct htc_target *target,
705 struct htc_endpoint *ep, u16 tag)
706{
707 struct htc_packet *packet;
708
709 spin_lock_bh(&target->tx_lock);
710 while (get_queue_depth(&ep->txq)) {
711 packet = list_first_entry(&ep->txq, struct htc_packet, list);
712 list_del(&packet->list);
713 packet->status = 0;
714 send_packet_completion(target, packet);
715 }
716 spin_unlock_bh(&target->tx_lock);
717}
718
719/*
720 * In the adapted HIF layer, struct sk_buff * are passed between HIF and HTC,
721 * since upper layers expects struct htc_packet containers we use the completed
722 * skb and lookup it's corresponding HTC packet buffer from a lookup list.
723 * This is extra overhead that can be fixed by re-aligning HIF interfaces with
724 * HTC.
725 */
726static struct htc_packet *htc_lookup_tx_packet(struct htc_target *target,
727 struct htc_endpoint *ep,
728 struct sk_buff *skb)
729{
730 struct htc_packet *packet, *tmp_pkt, *found_packet = NULL;
731
732 spin_lock_bh(&target->tx_lock);
733
734 /*
735 * interate from the front of tx lookup queue
736 * this lookup should be fast since lower layers completes in-order and
737 * so the completed packet should be at the head of the list generally
738 */
739 list_for_each_entry_safe(packet, tmp_pkt, &ep->pipe.tx_lookup_queue,
740 list) {
741 /* check for removal */
742 if (skb == packet->skb) {
743 /* found it */
744 list_del(&packet->list);
745 found_packet = packet;
746 break;
747 }
748 }
749
750 spin_unlock_bh(&target->tx_lock);
751
752 return found_packet;
753}
754
755static int ath6kl_htc_pipe_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
756{
757 struct htc_target *target = ar->htc_target;
758 struct htc_frame_hdr *htc_hdr;
759 struct htc_endpoint *ep;
760 struct htc_packet *packet;
761 u8 ep_id, *netdata;
762 u32 netlen;
763
764 netdata = skb->data;
765 netlen = skb->len;
766
767 htc_hdr = (struct htc_frame_hdr *) netdata;
768
769 ep_id = htc_hdr->eid;
770 ep = &target->endpoint[ep_id];
771
772 packet = htc_lookup_tx_packet(target, ep, skb);
773 if (packet == NULL) {
774 /* may have already been flushed and freed */
775 ath6kl_err("HTC TX lookup failed!\n");
776 } else {
777 /* will be giving this buffer back to upper layers */
778 packet->status = 0;
779 send_packet_completion(target, packet);
780 }
781 skb = NULL;
782
783 if (!ep->pipe.tx_credit_flow_enabled) {
784 /*
785 * note: when using TX credit flow, the re-checking of queues
786 * happens when credits flow back from the target. in the
787 * non-TX credit case, we recheck after the packet completes
788 */
789 htc_try_send(target, ep, NULL);
790 }
791
792 return 0;
793}
794
795static int htc_send_packets_multiple(struct htc_target *target,
796 struct list_head *pkt_queue)
797{
798 struct htc_endpoint *ep;
799 struct htc_packet *packet, *tmp_pkt;
800
801 if (list_empty(pkt_queue))
802 return -EINVAL;
803
804 /* get first packet to find out which ep the packets will go into */
805 packet = list_first_entry(pkt_queue, struct htc_packet, list);
806 if (packet == NULL)
807 return -EINVAL;
808
809 if (packet->endpoint >= ENDPOINT_MAX) {
810 WARN_ON_ONCE(1);
811 return -EINVAL;
812 }
813 ep = &target->endpoint[packet->endpoint];
814
815 htc_try_send(target, ep, pkt_queue);
816
817 /* do completion on any packets that couldn't get in */
818 if (!list_empty(pkt_queue)) {
819 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
820 packet->status = -ENOMEM;
821 }
822
823 do_send_completion(ep, pkt_queue);
824 }
825
826 return 0;
827}
828
829/* htc pipe rx path */
830static struct htc_packet *alloc_htc_packet_container(struct htc_target *target)
831{
832 struct htc_packet *packet;
833 spin_lock_bh(&target->rx_lock);
834
835 if (target->pipe.htc_packet_pool == NULL) {
836 spin_unlock_bh(&target->rx_lock);
837 return NULL;
838 }
839
840 packet = target->pipe.htc_packet_pool;
841 target->pipe.htc_packet_pool = (struct htc_packet *) packet->list.next;
842
843 spin_unlock_bh(&target->rx_lock);
844
845 packet->list.next = NULL;
846 return packet;
847}
848
849static void free_htc_packet_container(struct htc_target *target,
850 struct htc_packet *packet)
851{
852 struct list_head *lh;
853
854 spin_lock_bh(&target->rx_lock);
855
856 if (target->pipe.htc_packet_pool == NULL) {
857 target->pipe.htc_packet_pool = packet;
858 packet->list.next = NULL;
859 } else {
860 lh = (struct list_head *) target->pipe.htc_packet_pool;
861 packet->list.next = lh;
862 target->pipe.htc_packet_pool = packet;
863 }
864
865 spin_unlock_bh(&target->rx_lock);
866}
867
868static int htc_process_trailer(struct htc_target *target, u8 *buffer,
869 int len, enum htc_endpoint_id from_ep)
870{
871 struct htc_credit_report *report;
872 struct htc_record_hdr *record;
873 u8 *record_buf, *orig_buf;
874 int orig_len, status;
875
876 orig_buf = buffer;
877 orig_len = len;
878 status = 0;
879
880 while (len > 0) {
881 if (len < sizeof(struct htc_record_hdr)) {
882 status = -EINVAL;
883 break;
884 }
885
886 /* these are byte aligned structs */
887 record = (struct htc_record_hdr *) buffer;
888 len -= sizeof(struct htc_record_hdr);
889 buffer += sizeof(struct htc_record_hdr);
890
891 if (record->len > len) {
892 /* no room left in buffer for record */
893 ath6kl_dbg(ATH6KL_DBG_HTC,
894 "invalid length: %d (id:%d) buffer has: %d bytes left\n",
895 record->len, record->rec_id, len);
896 status = -EINVAL;
897 break;
898 }
899
900 /* start of record follows the header */
901 record_buf = buffer;
902
903 switch (record->rec_id) {
904 case HTC_RECORD_CREDITS:
905 if (record->len < sizeof(struct htc_credit_report)) {
906 WARN_ON_ONCE(1);
907 return -EINVAL;
908 }
909
910 report = (struct htc_credit_report *) record_buf;
911 htc_process_credit_report(target, report,
912 record->len / sizeof(*report),
913 from_ep);
914 break;
915 default:
916 ath6kl_dbg(ATH6KL_DBG_HTC,
917 "unhandled record: id:%d length:%d\n",
918 record->rec_id, record->len);
919 break;
920 }
921
922 if (status != 0)
923 break;
924
925 /* advance buffer past this record for next time around */
926 buffer += record->len;
927 len -= record->len;
928 }
929
930 return status;
931}
932
933static void do_recv_completion(struct htc_endpoint *ep,
934 struct list_head *queue_to_indicate)
935{
936 struct htc_packet *packet;
937
938 if (list_empty(queue_to_indicate)) {
939 /* nothing to indicate */
940 return;
941 }
942
943 /* using legacy EpRecv */
944 while (!list_empty(queue_to_indicate)) {
945 packet = list_first_entry(queue_to_indicate,
946 struct htc_packet, list);
947 list_del(&packet->list);
948 ep->ep_cb.rx(ep->target, packet);
949 }
950
951 return;
952}
953
954static void recv_packet_completion(struct htc_target *target,
955 struct htc_endpoint *ep,
956 struct htc_packet *packet)
957{
958 struct list_head container;
959 INIT_LIST_HEAD(&container);
960 list_add_tail(&packet->list, &container);
961
962 /* do completion */
963 do_recv_completion(ep, &container);
964}
965
966static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
967 u8 pipeid)
968{
969 struct htc_target *target = ar->htc_target;
970 u8 *netdata, *trailer, hdr_info;
971 struct htc_frame_hdr *htc_hdr;
972 u32 netlen, trailerlen = 0;
973 struct htc_packet *packet;
974 struct htc_endpoint *ep;
975 u16 payload_len;
976 int status = 0;
977
978 netdata = skb->data;
979 netlen = skb->len;
980
981 htc_hdr = (struct htc_frame_hdr *) netdata;
982
983 ep = &target->endpoint[htc_hdr->eid];
984
985 if (htc_hdr->eid >= ENDPOINT_MAX) {
986 ath6kl_dbg(ATH6KL_DBG_HTC,
987 "HTC Rx: invalid EndpointID=%d\n",
988 htc_hdr->eid);
989 status = -EINVAL;
990 goto free_skb;
991 }
992
993 payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
994
995 if (netlen < (payload_len + HTC_HDR_LENGTH)) {
996 ath6kl_dbg(ATH6KL_DBG_HTC,
997 "HTC Rx: insufficient length, got:%d expected =%u\n",
998 netlen, payload_len + HTC_HDR_LENGTH);
999 status = -EINVAL;
1000 goto free_skb;
1001 }
1002
1003 /* get flags to check for trailer */
1004 hdr_info = htc_hdr->flags;
1005 if (hdr_info & HTC_FLG_RX_TRAILER) {
1006 /* extract the trailer length */
1007 hdr_info = htc_hdr->ctrl[0];
1008 if ((hdr_info < sizeof(struct htc_record_hdr)) ||
1009 (hdr_info > payload_len)) {
1010 ath6kl_dbg(ATH6KL_DBG_HTC,
1011 "invalid header: payloadlen should be %d, CB[0]: %d\n",
1012 payload_len, hdr_info);
1013 status = -EINVAL;
1014 goto free_skb;
1015 }
1016
1017 trailerlen = hdr_info;
1018 /* process trailer after hdr/apps payload */
1019 trailer = (u8 *) htc_hdr + HTC_HDR_LENGTH +
1020 payload_len - hdr_info;
1021 status = htc_process_trailer(target, trailer, hdr_info,
1022 htc_hdr->eid);
1023 if (status != 0)
1024 goto free_skb;
1025 }
1026
1027 if (((int) payload_len - (int) trailerlen) <= 0) {
1028 /* zero length packet with trailer, just drop these */
1029 goto free_skb;
1030 }
1031
1032 if (htc_hdr->eid == ENDPOINT_0) {
1033 /* handle HTC control message */
1034 if (target->htc_flags & HTC_OP_STATE_SETUP_COMPLETE) {
1035 /*
1036 * fatal: target should not send unsolicited
1037 * messageson the endpoint 0
1038 */
1039 ath6kl_dbg(ATH6KL_DBG_HTC,
1040 "HTC ignores Rx Ctrl after setup complete\n");
1041 status = -EINVAL;
1042 goto free_skb;
1043 }
1044
1045 /* remove HTC header */
1046 skb_pull(skb, HTC_HDR_LENGTH);
1047
1048 netdata = skb->data;
1049 netlen = skb->len;
1050
1051 spin_lock_bh(&target->rx_lock);
1052
1053 target->pipe.ctrl_response_valid = true;
1054 target->pipe.ctrl_response_len = min_t(int, netlen,
1055 HTC_MAX_CTRL_MSG_LEN);
1056 memcpy(target->pipe.ctrl_response_buf, netdata,
1057 target->pipe.ctrl_response_len);
1058
1059 spin_unlock_bh(&target->rx_lock);
1060
1061 dev_kfree_skb(skb);
1062 skb = NULL;
1063 goto free_skb;
1064 }
1065
1066 /*
1067 * TODO: the message based HIF architecture allocates net bufs
1068 * for recv packets since it bridges that HIF to upper layers,
1069 * which expects HTC packets, we form the packets here
1070 */
1071 packet = alloc_htc_packet_container(target);
1072 if (packet == NULL) {
1073 status = -ENOMEM;
1074 goto free_skb;
1075 }
1076
1077 packet->status = 0;
1078 packet->endpoint = htc_hdr->eid;
1079 packet->pkt_cntxt = skb;
1080
1081 /* TODO: for backwards compatibility */
1082 packet->buf = skb_push(skb, 0) + HTC_HDR_LENGTH;
1083 packet->act_len = netlen - HTC_HDR_LENGTH - trailerlen;
1084
1085 /*
1086 * TODO: this is a hack because the driver layer will set the
1087 * actual len of the skb again which will just double the len
1088 */
1089 skb_trim(skb, 0);
1090
1091 recv_packet_completion(target, ep, packet);
1092
1093 /* recover the packet container */
1094 free_htc_packet_container(target, packet);
1095 skb = NULL;
1096
1097free_skb:
1098 if (skb != NULL)
1099 dev_kfree_skb(skb);
1100
1101 return status;
1102
1103}
1104
1105static void htc_flush_rx_queue(struct htc_target *target,
1106 struct htc_endpoint *ep)
1107{
1108 struct list_head container;
1109 struct htc_packet *packet;
1110
1111 spin_lock_bh(&target->rx_lock);
1112
1113 while (1) {
1114 if (list_empty(&ep->rx_bufq))
1115 break;
1116
1117 packet = list_first_entry(&ep->rx_bufq,
1118 struct htc_packet, list);
1119 list_del(&packet->list);
1120
1121 spin_unlock_bh(&target->rx_lock);
1122 packet->status = -ECANCELED;
1123 packet->act_len = 0;
1124
1125 ath6kl_dbg(ATH6KL_DBG_HTC,
1126 "Flushing RX packet:0x%p, length:%d, ep:%d\n",
1127 packet, packet->buf_len,
1128 packet->endpoint);
1129
1130 INIT_LIST_HEAD(&container);
1131 list_add_tail(&packet->list, &container);
1132
1133 /* give the packet back */
1134 do_recv_completion(ep, &container);
1135 spin_lock_bh(&target->rx_lock);
1136 }
1137
1138 spin_unlock_bh(&target->rx_lock);
1139}
1140
1141/* polling routine to wait for a control packet to be received */
1142static int htc_wait_recv_ctrl_message(struct htc_target *target)
1143{
1144 int count = HTC_TARGET_RESPONSE_POLL_COUNT;
1145
1146 while (count > 0) {
1147 spin_lock_bh(&target->rx_lock);
1148
1149 if (target->pipe.ctrl_response_valid) {
1150 target->pipe.ctrl_response_valid = false;
1151 spin_unlock_bh(&target->rx_lock);
1152 break;
1153 }
1154
1155 spin_unlock_bh(&target->rx_lock);
1156
1157 count--;
1158
1159 msleep_interruptible(HTC_TARGET_RESPONSE_POLL_WAIT);
1160 }
1161
1162 if (count <= 0) {
1163 ath6kl_dbg(ATH6KL_DBG_HTC, "%s: Timeout!\n", __func__);
1164 return -ECOMM;
1165 }
1166
1167 return 0;
1168}
1169
1170static void htc_rxctrl_complete(struct htc_target *context,
1171 struct htc_packet *packet)
1172{
1173 /* TODO, can't really receive HTC control messages yet.... */
1174 ath6kl_dbg(ATH6KL_DBG_HTC, "%s: invalid call function\n", __func__);
1175}
1176
1177/* htc pipe initialization */
1178static void reset_endpoint_states(struct htc_target *target)
1179{
1180 struct htc_endpoint *ep;
1181 int i;
1182
1183 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
1184 ep = &target->endpoint[i];
1185 ep->svc_id = 0;
1186 ep->len_max = 0;
1187 ep->max_txq_depth = 0;
1188 ep->eid = i;
1189 INIT_LIST_HEAD(&ep->txq);
1190 INIT_LIST_HEAD(&ep->pipe.tx_lookup_queue);
1191 INIT_LIST_HEAD(&ep->rx_bufq);
1192 ep->target = target;
1193 ep->pipe.tx_credit_flow_enabled = (bool) 1; /* FIXME */
1194 }
1195}
1196
1197/* start HTC, this is called after all services are connected */
1198static int htc_config_target_hif_pipe(struct htc_target *target)
1199{
1200 return 0;
1201}
1202
1203/* htc service functions */
1204static u8 htc_get_credit_alloc(struct htc_target *target, u16 service_id)
1205{
1206 u8 allocation = 0;
1207 int i;
1208
1209 for (i = 0; i < ENDPOINT_MAX; i++) {
1210 if (target->pipe.txcredit_alloc[i].service_id == service_id)
1211 allocation =
1212 target->pipe.txcredit_alloc[i].credit_alloc;
1213 }
1214
1215 if (allocation == 0) {
1216 ath6kl_dbg(ATH6KL_DBG_HTC,
1217 "HTC Service TX : 0x%2.2X : allocation is zero!\n",
1218 service_id);
1219 }
1220
1221 return allocation;
1222}
1223
1224static int ath6kl_htc_pipe_conn_service(struct htc_target *target,
1225 struct htc_service_connect_req *conn_req,
1226 struct htc_service_connect_resp *conn_resp)
1227{
1228 struct ath6kl *ar = target->dev->ar;
1229 struct htc_packet *packet = NULL;
1230 struct htc_conn_service_resp *resp_msg;
1231 struct htc_conn_service_msg *conn_msg;
1232 enum htc_endpoint_id assigned_epid = ENDPOINT_MAX;
1233 bool disable_credit_flowctrl = false;
1234 unsigned int max_msg_size = 0;
1235 struct htc_endpoint *ep;
1236 int length, status = 0;
1237 struct sk_buff *skb;
1238 u8 tx_alloc;
1239 u16 flags;
1240
1241 if (conn_req->svc_id == 0) {
1242 WARN_ON_ONCE(1);
1243 status = -EINVAL;
1244 goto free_packet;
1245 }
1246
1247 if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
1248 /* special case for pseudo control service */
1249 assigned_epid = ENDPOINT_0;
1250 max_msg_size = HTC_MAX_CTRL_MSG_LEN;
1251 tx_alloc = 0;
1252
1253 } else {
1254
1255 tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id);
1256 if (tx_alloc == 0) {
1257 status = -ENOMEM;
1258 goto free_packet;
1259 }
1260
1261 /* allocate a packet to send to the target */
1262 packet = htc_alloc_txctrl_packet(target);
1263
1264 if (packet == NULL) {
1265 WARN_ON_ONCE(1);
1266 status = -ENOMEM;
1267 goto free_packet;
1268 }
1269
1270 skb = packet->skb;
1271 length = sizeof(struct htc_conn_service_msg);
1272
1273 /* assemble connect service message */
1274 conn_msg = (struct htc_conn_service_msg *) skb_put(skb,
1275 length);
1276 if (conn_msg == NULL) {
1277 WARN_ON_ONCE(1);
1278 status = -EINVAL;
1279 goto free_packet;
1280 }
1281
1282 memset(conn_msg, 0,
1283 sizeof(struct htc_conn_service_msg));
1284 conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
1285 conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
1286 conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags &
1287 ~HTC_CONN_FLGS_SET_RECV_ALLOC_MASK);
1288
1289 /* tell target desired recv alloc for this ep */
1290 flags = tx_alloc << HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT;
1291 conn_msg->conn_flags |= cpu_to_le16(flags);
1292
1293 if (conn_req->conn_flags &
1294 HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL) {
1295 disable_credit_flowctrl = true;
1296 }
1297
1298 set_htc_pkt_info(packet, NULL, (u8 *) conn_msg,
1299 length,
1300 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
1301
1302 status = ath6kl_htc_pipe_tx(target, packet);
1303
1304 /* we don't own it anymore */
1305 packet = NULL;
1306 if (status != 0)
1307 goto free_packet;
1308
1309 /* wait for response */
1310 status = htc_wait_recv_ctrl_message(target);
1311 if (status != 0)
1312 goto free_packet;
1313
1314 /* we controlled the buffer creation so it has to be
1315 * properly aligned
1316 */
1317 resp_msg = (struct htc_conn_service_resp *)
1318 target->pipe.ctrl_response_buf;
1319
1320 if (resp_msg->msg_id != cpu_to_le16(HTC_MSG_CONN_SVC_RESP_ID) ||
1321 (target->pipe.ctrl_response_len < sizeof(*resp_msg))) {
1322 /* this message is not valid */
1323 WARN_ON_ONCE(1);
1324 status = -EINVAL;
1325 goto free_packet;
1326 }
1327
1328 ath6kl_dbg(ATH6KL_DBG_TRC,
1329 "%s: service 0x%X conn resp: status: %d ep: %d\n",
1330 __func__, resp_msg->svc_id, resp_msg->status,
1331 resp_msg->eid);
1332
1333 conn_resp->resp_code = resp_msg->status;
1334 /* check response status */
1335 if (resp_msg->status != HTC_SERVICE_SUCCESS) {
1336 ath6kl_dbg(ATH6KL_DBG_HTC,
1337 "Target failed service 0x%X connect request (status:%d)\n",
1338 resp_msg->svc_id, resp_msg->status);
1339 status = -EINVAL;
1340 goto free_packet;
1341 }
1342
1343 assigned_epid = (enum htc_endpoint_id) resp_msg->eid;
1344 max_msg_size = le16_to_cpu(resp_msg->max_msg_sz);
1345 }
1346
1347 /* the rest are parameter checks so set the error status */
1348 status = -EINVAL;
1349
1350 if (assigned_epid >= ENDPOINT_MAX) {
1351 WARN_ON_ONCE(1);
1352 goto free_packet;
1353 }
1354
1355 if (max_msg_size == 0) {
1356 WARN_ON_ONCE(1);
1357 goto free_packet;
1358 }
1359
1360 ep = &target->endpoint[assigned_epid];
1361 ep->eid = assigned_epid;
1362 if (ep->svc_id != 0) {
1363 /* endpoint already in use! */
1364 WARN_ON_ONCE(1);
1365 goto free_packet;
1366 }
1367
1368 /* return assigned endpoint to caller */
1369 conn_resp->endpoint = assigned_epid;
1370 conn_resp->len_max = max_msg_size;
1371
1372 /* setup the endpoint */
1373 ep->svc_id = conn_req->svc_id; /* this marks ep in use */
1374 ep->max_txq_depth = conn_req->max_txq_depth;
1375 ep->len_max = max_msg_size;
1376 ep->cred_dist.credits = tx_alloc;
1377 ep->cred_dist.cred_sz = target->tgt_cred_sz;
1378 ep->cred_dist.cred_per_msg = max_msg_size / target->tgt_cred_sz;
1379 if (max_msg_size % target->tgt_cred_sz)
1380 ep->cred_dist.cred_per_msg++;
1381
1382 /* copy all the callbacks */
1383 ep->ep_cb = conn_req->ep_cb;
1384
1385 status = ath6kl_hif_pipe_map_service(ar, ep->svc_id,
1386 &ep->pipe.pipeid_ul,
1387 &ep->pipe.pipeid_dl);
1388 if (status != 0)
1389 goto free_packet;
1390
1391 ath6kl_dbg(ATH6KL_DBG_HTC,
1392 "SVC Ready: 0x%4.4X: ULpipe:%d DLpipe:%d id:%d\n",
1393 ep->svc_id, ep->pipe.pipeid_ul,
1394 ep->pipe.pipeid_dl, ep->eid);
1395
1396 if (disable_credit_flowctrl && ep->pipe.tx_credit_flow_enabled) {
1397 ep->pipe.tx_credit_flow_enabled = false;
1398 ath6kl_dbg(ATH6KL_DBG_HTC,
1399 "SVC: 0x%4.4X ep:%d TX flow control off\n",
1400 ep->svc_id, assigned_epid);
1401 }
1402
1403free_packet:
1404 if (packet != NULL)
1405 htc_free_txctrl_packet(target, packet);
1406 return status;
1407}
1408
1409/* htc export functions */
1410static void *ath6kl_htc_pipe_create(struct ath6kl *ar)
1411{
1412 int status = 0;
1413 struct htc_endpoint *ep = NULL;
1414 struct htc_target *target = NULL;
1415 struct htc_packet *packet;
1416 int i;
1417
1418 target = kzalloc(sizeof(struct htc_target), GFP_KERNEL);
1419 if (target == NULL) {
1420 ath6kl_err("htc create unable to allocate memory\n");
1421 status = -ENOMEM;
1422 goto fail_htc_create;
1423 }
1424
1425 spin_lock_init(&target->htc_lock);
1426 spin_lock_init(&target->rx_lock);
1427 spin_lock_init(&target->tx_lock);
1428
1429 reset_endpoint_states(target);
1430
1431 for (i = 0; i < HTC_PACKET_CONTAINER_ALLOCATION; i++) {
1432 packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL);
1433
1434 if (packet != NULL)
1435 free_htc_packet_container(target, packet);
1436 }
1437
1438 target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
1439 if (!target->dev) {
1440 ath6kl_err("unable to allocate memory\n");
1441 status = -ENOMEM;
1442 goto fail_htc_create;
1443 }
1444 target->dev->ar = ar;
1445 target->dev->htc_cnxt = target;
1446
1447 /* Get HIF default pipe for HTC message exchange */
1448 ep = &target->endpoint[ENDPOINT_0];
1449
1450 ath6kl_hif_pipe_get_default(ar, &ep->pipe.pipeid_ul,
1451 &ep->pipe.pipeid_dl);
1452
1453 return target;
1454
1455fail_htc_create:
1456 if (status != 0) {
1457 if (target != NULL)
1458 ath6kl_htc_pipe_cleanup(target);
1459
1460 target = NULL;
1461 }
1462 return target;
1463}
1464
1465/* cleanup the HTC instance */
1466static void ath6kl_htc_pipe_cleanup(struct htc_target *target)
1467{
1468 struct htc_packet *packet;
1469
1470 while (true) {
1471 packet = alloc_htc_packet_container(target);
1472 if (packet == NULL)
1473 break;
1474 kfree(packet);
1475 }
1476
1477 kfree(target->dev);
1478
1479 /* kfree our instance */
1480 kfree(target);
1481}
1482
1483static int ath6kl_htc_pipe_start(struct htc_target *target)
1484{
1485 struct sk_buff *skb;
1486 struct htc_setup_comp_ext_msg *setup;
1487 struct htc_packet *packet;
1488
1489 htc_config_target_hif_pipe(target);
1490
1491 /* allocate a buffer to send */
1492 packet = htc_alloc_txctrl_packet(target);
1493 if (packet == NULL) {
1494 WARN_ON_ONCE(1);
1495 return -ENOMEM;
1496 }
1497
1498 skb = packet->skb;
1499
1500 /* assemble setup complete message */
1501 setup = (struct htc_setup_comp_ext_msg *) skb_put(skb,
1502 sizeof(*setup));
1503 memset(setup, 0, sizeof(struct htc_setup_comp_ext_msg));
1504 setup->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
1505
1506 ath6kl_dbg(ATH6KL_DBG_HTC, "HTC using TX credit flow control\n");
1507
1508 set_htc_pkt_info(packet, NULL, (u8 *) setup,
1509 sizeof(struct htc_setup_comp_ext_msg),
1510 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
1511
1512 target->htc_flags |= HTC_OP_STATE_SETUP_COMPLETE;
1513
1514 return ath6kl_htc_pipe_tx(target, packet);
1515}
1516
1517static void ath6kl_htc_pipe_stop(struct htc_target *target)
1518{
1519 int i;
1520 struct htc_endpoint *ep;
1521
1522 /* cleanup endpoints */
1523 for (i = 0; i < ENDPOINT_MAX; i++) {
1524 ep = &target->endpoint[i];
1525 htc_flush_rx_queue(target, ep);
1526 htc_flush_tx_endpoint(target, ep, HTC_TX_PACKET_TAG_ALL);
1527 }
1528
1529 reset_endpoint_states(target);
1530 target->htc_flags &= ~HTC_OP_STATE_SETUP_COMPLETE;
1531}
1532
1533static int ath6kl_htc_pipe_get_rxbuf_num(struct htc_target *target,
1534 enum htc_endpoint_id endpoint)
1535{
1536 int num;
1537
1538 spin_lock_bh(&target->rx_lock);
1539 num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
1540 spin_unlock_bh(&target->rx_lock);
1541
1542 return num;
1543}
1544
1545static int ath6kl_htc_pipe_tx(struct htc_target *target,
1546 struct htc_packet *packet)
1547{
1548 struct list_head queue;
1549
1550 ath6kl_dbg(ATH6KL_DBG_HTC,
1551 "%s: endPointId: %d, buffer: 0x%p, length: %d\n",
1552 __func__, packet->endpoint, packet->buf,
1553 packet->act_len);
1554
1555 INIT_LIST_HEAD(&queue);
1556 list_add_tail(&packet->list, &queue);
1557
1558 return htc_send_packets_multiple(target, &queue);
1559}
1560
1561static int ath6kl_htc_pipe_wait_target(struct htc_target *target)
1562{
1563 struct htc_ready_ext_msg *ready_msg;
1564 struct htc_service_connect_req connect;
1565 struct htc_service_connect_resp resp;
1566 int status = 0;
1567
1568 status = htc_wait_recv_ctrl_message(target);
1569
1570 if (status != 0)
1571 return status;
1572
1573 if (target->pipe.ctrl_response_len < sizeof(*ready_msg)) {
1574 ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg len:%d!\n",
1575 target->pipe.ctrl_response_len);
1576 return -ECOMM;
1577 }
1578
1579 ready_msg = (struct htc_ready_ext_msg *) target->pipe.ctrl_response_buf;
1580
1581 if (ready_msg->ver2_0_info.msg_id != cpu_to_le16(HTC_MSG_READY_ID)) {
1582 ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg : 0x%X !\n",
1583 ready_msg->ver2_0_info.msg_id);
1584 return -ECOMM;
1585 }
1586
1587 ath6kl_dbg(ATH6KL_DBG_HTC,
1588 "Target Ready! : transmit resources : %d size:%d\n",
1589 ready_msg->ver2_0_info.cred_cnt,
1590 ready_msg->ver2_0_info.cred_sz);
1591
1592 target->tgt_creds = le16_to_cpu(ready_msg->ver2_0_info.cred_cnt);
1593 target->tgt_cred_sz = le16_to_cpu(ready_msg->ver2_0_info.cred_sz);
1594
1595 if ((target->tgt_creds == 0) || (target->tgt_cred_sz == 0))
1596 return -ECOMM;
1597
1598 htc_setup_target_buffer_assignments(target);
1599
1600 /* setup our pseudo HTC control endpoint connection */
1601 memset(&connect, 0, sizeof(connect));
1602 memset(&resp, 0, sizeof(resp));
1603 connect.ep_cb.tx_complete = htc_txctrl_complete;
1604 connect.ep_cb.rx = htc_rxctrl_complete;
1605 connect.max_txq_depth = NUM_CONTROL_TX_BUFFERS;
1606 connect.svc_id = HTC_CTRL_RSVD_SVC;
1607
1608 /* connect fake service */
1609 status = ath6kl_htc_pipe_conn_service(target, &connect, &resp);
1610
1611 return status;
1612}
1613
1614static void ath6kl_htc_pipe_flush_txep(struct htc_target *target,
1615 enum htc_endpoint_id endpoint, u16 tag)
1616{
1617 struct htc_endpoint *ep = &target->endpoint[endpoint];
1618
1619 if (ep->svc_id == 0) {
1620 WARN_ON_ONCE(1);
1621 /* not in use.. */
1622 return;
1623 }
1624
1625 htc_flush_tx_endpoint(target, ep, tag);
1626}
1627
1628static int ath6kl_htc_pipe_add_rxbuf_multiple(struct htc_target *target,
1629 struct list_head *pkt_queue)
1630{
1631 struct htc_packet *packet, *tmp_pkt, *first;
1632 struct htc_endpoint *ep;
1633 int status = 0;
1634
1635 if (list_empty(pkt_queue))
1636 return -EINVAL;
1637
1638 first = list_first_entry(pkt_queue, struct htc_packet, list);
1639 if (first == NULL) {
1640 WARN_ON_ONCE(1);
1641 return -EINVAL;
1642 }
1643
1644 if (first->endpoint >= ENDPOINT_MAX) {
1645 WARN_ON_ONCE(1);
1646 return -EINVAL;
1647 }
1648
1649 ath6kl_dbg(ATH6KL_DBG_HTC, "%s: epid: %d, cnt:%d, len: %d\n",
1650 __func__, first->endpoint, get_queue_depth(pkt_queue),
1651 first->buf_len);
1652
1653 ep = &target->endpoint[first->endpoint];
1654
1655 spin_lock_bh(&target->rx_lock);
1656
1657 /* store receive packets */
1658 list_splice_tail_init(pkt_queue, &ep->rx_bufq);
1659
1660 spin_unlock_bh(&target->rx_lock);
1661
1662 if (status != 0) {
1663 /* walk through queue and mark each one canceled */
1664 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
1665 packet->status = -ECANCELED;
1666 }
1667
1668 do_recv_completion(ep, pkt_queue);
1669 }
1670
1671 return status;
1672}
1673
1674static void ath6kl_htc_pipe_activity_changed(struct htc_target *target,
1675 enum htc_endpoint_id ep,
1676 bool active)
1677{
1678 /* TODO */
1679}
1680
1681static void ath6kl_htc_pipe_flush_rx_buf(struct htc_target *target)
1682{
1683 /* TODO */
1684}
1685
1686static int ath6kl_htc_pipe_credit_setup(struct htc_target *target,
1687 struct ath6kl_htc_credit_info *info)
1688{
1689 return 0;
1690}
1691
1692static const struct ath6kl_htc_ops ath6kl_htc_pipe_ops = {
1693 .create = ath6kl_htc_pipe_create,
1694 .wait_target = ath6kl_htc_pipe_wait_target,
1695 .start = ath6kl_htc_pipe_start,
1696 .conn_service = ath6kl_htc_pipe_conn_service,
1697 .tx = ath6kl_htc_pipe_tx,
1698 .stop = ath6kl_htc_pipe_stop,
1699 .cleanup = ath6kl_htc_pipe_cleanup,
1700 .flush_txep = ath6kl_htc_pipe_flush_txep,
1701 .flush_rx_buf = ath6kl_htc_pipe_flush_rx_buf,
1702 .activity_changed = ath6kl_htc_pipe_activity_changed,
1703 .get_rxbuf_num = ath6kl_htc_pipe_get_rxbuf_num,
1704 .add_rxbuf_multiple = ath6kl_htc_pipe_add_rxbuf_multiple,
1705 .credit_setup = ath6kl_htc_pipe_credit_setup,
1706 .tx_complete = ath6kl_htc_pipe_tx_complete,
1707 .rx_complete = ath6kl_htc_pipe_rx_complete,
1708};
1709
1710void ath6kl_htc_pipe_attach(struct ath6kl *ar)
1711{
1712 ar->htc_ops = &ath6kl_htc_pipe_ops;
1713}
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 03cae142f178..29ef50ea07d5 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -16,17 +16,21 @@
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */ 17 */
18 18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
19#include <linux/moduleparam.h> 21#include <linux/moduleparam.h>
20#include <linux/errno.h> 22#include <linux/errno.h>
21#include <linux/export.h> 23#include <linux/export.h>
22#include <linux/of.h> 24#include <linux/of.h>
23#include <linux/mmc/sdio_func.h> 25#include <linux/mmc/sdio_func.h>
26#include <linux/vmalloc.h>
24 27
25#include "core.h" 28#include "core.h"
26#include "cfg80211.h" 29#include "cfg80211.h"
27#include "target.h" 30#include "target.h"
28#include "debug.h" 31#include "debug.h"
29#include "hif-ops.h" 32#include "hif-ops.h"
33#include "htc-ops.h"
30 34
31static const struct ath6kl_hw hw_list[] = { 35static const struct ath6kl_hw hw_list[] = {
32 { 36 {
@@ -256,6 +260,7 @@ static int ath6kl_init_service_ep(struct ath6kl *ar)
256 memset(&connect, 0, sizeof(connect)); 260 memset(&connect, 0, sizeof(connect));
257 261
258 /* these fields are the same for all service endpoints */ 262 /* these fields are the same for all service endpoints */
263 connect.ep_cb.tx_comp_multi = ath6kl_tx_complete;
259 connect.ep_cb.rx = ath6kl_rx; 264 connect.ep_cb.rx = ath6kl_rx;
260 connect.ep_cb.rx_refill = ath6kl_rx_refill; 265 connect.ep_cb.rx_refill = ath6kl_rx_refill;
261 connect.ep_cb.tx_full = ath6kl_tx_queue_full; 266 connect.ep_cb.tx_full = ath6kl_tx_queue_full;
@@ -485,22 +490,31 @@ int ath6kl_configure_target(struct ath6kl *ar)
485 fw_mode |= fw_iftype << (i * HI_OPTION_FW_MODE_BITS); 490 fw_mode |= fw_iftype << (i * HI_OPTION_FW_MODE_BITS);
486 491
487 /* 492 /*
488 * By default, submodes : 493 * Submodes when fw does not support dynamic interface
494 * switching:
489 * vif[0] - AP/STA/IBSS 495 * vif[0] - AP/STA/IBSS
490 * vif[1] - "P2P dev"/"P2P GO"/"P2P Client" 496 * vif[1] - "P2P dev"/"P2P GO"/"P2P Client"
491 * vif[2] - "P2P dev"/"P2P GO"/"P2P Client" 497 * vif[2] - "P2P dev"/"P2P GO"/"P2P Client"
498 * Otherwise, All the interface are initialized to p2p dev.
492 */ 499 */
493 500
494 for (i = 0; i < ar->max_norm_iface; i++) 501 if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
495 fw_submode |= HI_OPTION_FW_SUBMODE_NONE << 502 ar->fw_capabilities)) {
496 (i * HI_OPTION_FW_SUBMODE_BITS); 503 for (i = 0; i < ar->vif_max; i++)
504 fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV <<
505 (i * HI_OPTION_FW_SUBMODE_BITS);
506 } else {
507 for (i = 0; i < ar->max_norm_iface; i++)
508 fw_submode |= HI_OPTION_FW_SUBMODE_NONE <<
509 (i * HI_OPTION_FW_SUBMODE_BITS);
497 510
498 for (i = ar->max_norm_iface; i < ar->vif_max; i++) 511 for (i = ar->max_norm_iface; i < ar->vif_max; i++)
499 fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV << 512 fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV <<
500 (i * HI_OPTION_FW_SUBMODE_BITS); 513 (i * HI_OPTION_FW_SUBMODE_BITS);
501 514
502 if (ar->p2p && ar->vif_max == 1) 515 if (ar->p2p && ar->vif_max == 1)
503 fw_submode = HI_OPTION_FW_SUBMODE_P2PDEV; 516 fw_submode = HI_OPTION_FW_SUBMODE_P2PDEV;
517 }
504 518
505 if (ath6kl_bmi_write_hi32(ar, hi_app_host_interest, 519 if (ath6kl_bmi_write_hi32(ar, hi_app_host_interest,
506 HTC_PROTOCOL_VERSION) != 0) { 520 HTC_PROTOCOL_VERSION) != 0) {
@@ -539,18 +553,20 @@ int ath6kl_configure_target(struct ath6kl *ar)
539 * but possible in theory. 553 * but possible in theory.
540 */ 554 */
541 555
542 param = ar->hw.board_ext_data_addr; 556 if (ar->target_type == TARGET_TYPE_AR6003) {
543 ram_reserved_size = ar->hw.reserved_ram_size; 557 param = ar->hw.board_ext_data_addr;
558 ram_reserved_size = ar->hw.reserved_ram_size;
544 559
545 if (ath6kl_bmi_write_hi32(ar, hi_board_ext_data, param) != 0) { 560 if (ath6kl_bmi_write_hi32(ar, hi_board_ext_data, param) != 0) {
546 ath6kl_err("bmi_write_memory for hi_board_ext_data failed\n"); 561 ath6kl_err("bmi_write_memory for hi_board_ext_data failed\n");
547 return -EIO; 562 return -EIO;
548 } 563 }
549 564
550 if (ath6kl_bmi_write_hi32(ar, hi_end_ram_reserve_sz, 565 if (ath6kl_bmi_write_hi32(ar, hi_end_ram_reserve_sz,
551 ram_reserved_size) != 0) { 566 ram_reserved_size) != 0) {
552 ath6kl_err("bmi_write_memory for hi_end_ram_reserve_sz failed\n"); 567 ath6kl_err("bmi_write_memory for hi_end_ram_reserve_sz failed\n");
553 return -EIO; 568 return -EIO;
569 }
554 } 570 }
555 571
556 /* set the block size for the target */ 572 /* set the block size for the target */
@@ -924,13 +940,14 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
924 if (ar->fw != NULL) 940 if (ar->fw != NULL)
925 break; 941 break;
926 942
927 ar->fw = kmemdup(data, ie_len, GFP_KERNEL); 943 ar->fw = vmalloc(ie_len);
928 944
929 if (ar->fw == NULL) { 945 if (ar->fw == NULL) {
930 ret = -ENOMEM; 946 ret = -ENOMEM;
931 goto out; 947 goto out;
932 } 948 }
933 949
950 memcpy(ar->fw, data, ie_len);
934 ar->fw_len = ie_len; 951 ar->fw_len = ie_len;
935 break; 952 break;
936 case ATH6KL_FW_IE_PATCH_IMAGE: 953 case ATH6KL_FW_IE_PATCH_IMAGE:
@@ -1507,7 +1524,7 @@ int ath6kl_init_hw_start(struct ath6kl *ar)
1507 } 1524 }
1508 1525
1509 /* setup credit distribution */ 1526 /* setup credit distribution */
1510 ath6kl_credit_setup(ar->htc_target, &ar->credit_state_info); 1527 ath6kl_htc_credit_setup(ar->htc_target, &ar->credit_state_info);
1511 1528
1512 /* start HTC */ 1529 /* start HTC */
1513 ret = ath6kl_htc_start(ar->htc_target); 1530 ret = ath6kl_htc_start(ar->htc_target);
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 229e1922ebe4..4d818f96c415 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -15,6 +15,8 @@
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */ 16 */
17 17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
18#include "core.h" 20#include "core.h"
19#include "hif-ops.h" 21#include "hif-ops.h"
20#include "cfg80211.h" 22#include "cfg80211.h"
@@ -756,6 +758,10 @@ static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len)
756 stats->wow_evt_discarded += 758 stats->wow_evt_discarded +=
757 le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded); 759 le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded);
758 760
761 stats->arp_received = le32_to_cpu(tgt_stats->arp_stats.arp_received);
762 stats->arp_replied = le32_to_cpu(tgt_stats->arp_stats.arp_replied);
763 stats->arp_matched = le32_to_cpu(tgt_stats->arp_stats.arp_matched);
764
759 if (test_bit(STATS_UPDATE_PEND, &vif->flags)) { 765 if (test_bit(STATS_UPDATE_PEND, &vif->flags)) {
760 clear_bit(STATS_UPDATE_PEND, &vif->flags); 766 clear_bit(STATS_UPDATE_PEND, &vif->flags);
761 wake_up(&ar->event_wq); 767 wake_up(&ar->event_wq);
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 53528648b425..44ea7a742101 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -1362,7 +1362,7 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
1362 goto err_core_alloc; 1362 goto err_core_alloc;
1363 } 1363 }
1364 1364
1365 ret = ath6kl_core_init(ar); 1365 ret = ath6kl_core_init(ar, ATH6KL_HTC_TYPE_MBOX);
1366 if (ret) { 1366 if (ret) {
1367 ath6kl_err("Failed to init ath6kl core\n"); 1367 ath6kl_err("Failed to init ath6kl core\n");
1368 goto err_core_alloc; 1368 goto err_core_alloc;
diff --git a/drivers/net/wireless/ath/ath6kl/testmode.c b/drivers/net/wireless/ath/ath6kl/testmode.c
index 6675c92b542b..acc9aa832f76 100644
--- a/drivers/net/wireless/ath/ath6kl/testmode.c
+++ b/drivers/net/wireless/ath/ath6kl/testmode.c
@@ -55,8 +55,9 @@ void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, size_t buf_len)
55 ath6kl_warn("failed to allocate testmode rx skb!\n"); 55 ath6kl_warn("failed to allocate testmode rx skb!\n");
56 return; 56 return;
57 } 57 }
58 NLA_PUT_U32(skb, ATH6KL_TM_ATTR_CMD, ATH6KL_TM_CMD_TCMD); 58 if (nla_put_u32(skb, ATH6KL_TM_ATTR_CMD, ATH6KL_TM_CMD_TCMD) ||
59 NLA_PUT(skb, ATH6KL_TM_ATTR_DATA, buf_len, buf); 59 nla_put(skb, ATH6KL_TM_ATTR_DATA, buf_len, buf))
60 goto nla_put_failure;
60 cfg80211_testmode_event(skb, GFP_KERNEL); 61 cfg80211_testmode_event(skb, GFP_KERNEL);
61 return; 62 return;
62 63
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index f85353fd1792..82f2f5cb475b 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -15,8 +15,11 @@
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */ 16 */
17 17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
18#include "core.h" 20#include "core.h"
19#include "debug.h" 21#include "debug.h"
22#include "htc-ops.h"
20 23
21/* 24/*
22 * tid - tid_mux0..tid_mux3 25 * tid - tid_mux0..tid_mux3
@@ -322,6 +325,7 @@ int ath6kl_control_tx(void *devt, struct sk_buff *skb,
322 cookie->map_no = 0; 325 cookie->map_no = 0;
323 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len, 326 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
324 eid, ATH6KL_CONTROL_PKT_TAG); 327 eid, ATH6KL_CONTROL_PKT_TAG);
328 cookie->htc_pkt.skb = skb;
325 329
326 /* 330 /*
327 * This interface is asynchronous, if there is an error, cleanup 331 * This interface is asynchronous, if there is an error, cleanup
@@ -490,6 +494,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
490 cookie->map_no = map_no; 494 cookie->map_no = map_no;
491 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len, 495 set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
492 eid, htc_tag); 496 eid, htc_tag);
497 cookie->htc_pkt.skb = skb;
493 498
494 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ", 499 ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ",
495 skb->data, skb->len); 500 skb->data, skb->len);
@@ -570,7 +575,7 @@ void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active)
570 575
571notify_htc: 576notify_htc:
572 /* notify HTC, this may cause credit distribution changes */ 577 /* notify HTC, this may cause credit distribution changes */
573 ath6kl_htc_indicate_activity_change(ar->htc_target, eid, active); 578 ath6kl_htc_activity_changed(ar->htc_target, eid, active);
574} 579}
575 580
576enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target, 581enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
@@ -666,9 +671,10 @@ static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif,
666 } 671 }
667} 672}
668 673
669void ath6kl_tx_complete(void *context, struct list_head *packet_queue) 674void ath6kl_tx_complete(struct htc_target *target,
675 struct list_head *packet_queue)
670{ 676{
671 struct ath6kl *ar = context; 677 struct ath6kl *ar = target->dev->ar;
672 struct sk_buff_head skb_queue; 678 struct sk_buff_head skb_queue;
673 struct htc_packet *packet; 679 struct htc_packet *packet;
674 struct sk_buff *skb; 680 struct sk_buff *skb;
@@ -887,6 +893,7 @@ void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
887 skb->data = PTR_ALIGN(skb->data - 4, 4); 893 skb->data = PTR_ALIGN(skb->data - 4, 4);
888 set_htc_rxpkt_info(packet, skb, skb->data, 894 set_htc_rxpkt_info(packet, skb, skb->data,
889 ATH6KL_BUFFER_SIZE, endpoint); 895 ATH6KL_BUFFER_SIZE, endpoint);
896 packet->skb = skb;
890 list_add_tail(&packet->list, &queue); 897 list_add_tail(&packet->list, &queue);
891 } 898 }
892 899
@@ -909,6 +916,8 @@ void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
909 skb->data = PTR_ALIGN(skb->data - 4, 4); 916 skb->data = PTR_ALIGN(skb->data - 4, 4);
910 set_htc_rxpkt_info(packet, skb, skb->data, 917 set_htc_rxpkt_info(packet, skb, skb->data,
911 ATH6KL_AMSDU_BUFFER_SIZE, 0); 918 ATH6KL_AMSDU_BUFFER_SIZE, 0);
919 packet->skb = skb;
920
912 spin_lock_bh(&ar->lock); 921 spin_lock_bh(&ar->lock);
913 list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue); 922 list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
914 spin_unlock_bh(&ar->lock); 923 spin_unlock_bh(&ar->lock);
@@ -1281,6 +1290,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1281 struct wmi_data_hdr *dhdr; 1290 struct wmi_data_hdr *dhdr;
1282 int min_hdr_len; 1291 int min_hdr_len;
1283 u8 meta_type, dot11_hdr = 0; 1292 u8 meta_type, dot11_hdr = 0;
1293 u8 pad_before_data_start;
1284 int status = packet->status; 1294 int status = packet->status;
1285 enum htc_endpoint_id ept = packet->endpoint; 1295 enum htc_endpoint_id ept = packet->endpoint;
1286 bool is_amsdu, prev_ps, ps_state = false; 1296 bool is_amsdu, prev_ps, ps_state = false;
@@ -1492,6 +1502,10 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1492 seq_no = wmi_data_hdr_get_seqno(dhdr); 1502 seq_no = wmi_data_hdr_get_seqno(dhdr);
1493 meta_type = wmi_data_hdr_get_meta(dhdr); 1503 meta_type = wmi_data_hdr_get_meta(dhdr);
1494 dot11_hdr = wmi_data_hdr_get_dot11(dhdr); 1504 dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
1505 pad_before_data_start =
1506 (le16_to_cpu(dhdr->info3) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT)
1507 & WMI_DATA_HDR_PAD_BEFORE_DATA_MASK;
1508
1495 skb_pull(skb, sizeof(struct wmi_data_hdr)); 1509 skb_pull(skb, sizeof(struct wmi_data_hdr));
1496 1510
1497 switch (meta_type) { 1511 switch (meta_type) {
@@ -1510,6 +1524,8 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1510 break; 1524 break;
1511 } 1525 }
1512 1526
1527 skb_pull(skb, pad_before_data_start);
1528
1513 if (dot11_hdr) 1529 if (dot11_hdr)
1514 status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb); 1530 status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
1515 else if (!is_amsdu) 1531 else if (!is_amsdu)
@@ -1579,7 +1595,8 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1579 /* aggregation code will handle the skb */ 1595 /* aggregation code will handle the skb */
1580 return; 1596 return;
1581 } 1597 }
1582 } 1598 } else if (!is_broadcast_ether_addr(datap->h_dest))
1599 vif->net_stats.multicast++;
1583 1600
1584 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb); 1601 ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1585} 1602}
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index 325b1224c2b1..ec7f1f5fd1ca 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -21,15 +21,77 @@
21#include "debug.h" 21#include "debug.h"
22#include "core.h" 22#include "core.h"
23 23
24/* constants */
25#define TX_URB_COUNT 32
26#define RX_URB_COUNT 32
27#define ATH6KL_USB_RX_BUFFER_SIZE 1700
28
29/* tx/rx pipes for usb */
30enum ATH6KL_USB_PIPE_ID {
31 ATH6KL_USB_PIPE_TX_CTRL = 0,
32 ATH6KL_USB_PIPE_TX_DATA_LP,
33 ATH6KL_USB_PIPE_TX_DATA_MP,
34 ATH6KL_USB_PIPE_TX_DATA_HP,
35 ATH6KL_USB_PIPE_RX_CTRL,
36 ATH6KL_USB_PIPE_RX_DATA,
37 ATH6KL_USB_PIPE_RX_DATA2,
38 ATH6KL_USB_PIPE_RX_INT,
39 ATH6KL_USB_PIPE_MAX
40};
41
42#define ATH6KL_USB_PIPE_INVALID ATH6KL_USB_PIPE_MAX
43
44struct ath6kl_usb_pipe {
45 struct list_head urb_list_head;
46 struct usb_anchor urb_submitted;
47 u32 urb_alloc;
48 u32 urb_cnt;
49 u32 urb_cnt_thresh;
50 unsigned int usb_pipe_handle;
51 u32 flags;
52 u8 ep_address;
53 u8 logical_pipe_num;
54 struct ath6kl_usb *ar_usb;
55 u16 max_packet_size;
56 struct work_struct io_complete_work;
57 struct sk_buff_head io_comp_queue;
58 struct usb_endpoint_descriptor *ep_desc;
59};
60
61#define ATH6KL_USB_PIPE_FLAG_TX (1 << 0)
62
24/* usb device object */ 63/* usb device object */
25struct ath6kl_usb { 64struct ath6kl_usb {
65 /* protects pipe->urb_list_head and pipe->urb_cnt */
66 spinlock_t cs_lock;
67
26 struct usb_device *udev; 68 struct usb_device *udev;
27 struct usb_interface *interface; 69 struct usb_interface *interface;
70 struct ath6kl_usb_pipe pipes[ATH6KL_USB_PIPE_MAX];
28 u8 *diag_cmd_buffer; 71 u8 *diag_cmd_buffer;
29 u8 *diag_resp_buffer; 72 u8 *diag_resp_buffer;
30 struct ath6kl *ar; 73 struct ath6kl *ar;
31}; 74};
32 75
76/* usb urb object */
77struct ath6kl_urb_context {
78 struct list_head link;
79 struct ath6kl_usb_pipe *pipe;
80 struct sk_buff *skb;
81 struct ath6kl *ar;
82};
83
84/* USB endpoint definitions */
85#define ATH6KL_USB_EP_ADDR_APP_CTRL_IN 0x81
86#define ATH6KL_USB_EP_ADDR_APP_DATA_IN 0x82
87#define ATH6KL_USB_EP_ADDR_APP_DATA2_IN 0x83
88#define ATH6KL_USB_EP_ADDR_APP_INT_IN 0x84
89
90#define ATH6KL_USB_EP_ADDR_APP_CTRL_OUT 0x01
91#define ATH6KL_USB_EP_ADDR_APP_DATA_LP_OUT 0x02
92#define ATH6KL_USB_EP_ADDR_APP_DATA_MP_OUT 0x03
93#define ATH6KL_USB_EP_ADDR_APP_DATA_HP_OUT 0x04
94
33/* diagnostic command defnitions */ 95/* diagnostic command defnitions */
34#define ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD 1 96#define ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD 1
35#define ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP 2 97#define ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP 2
@@ -55,11 +117,493 @@ struct ath6kl_usb_ctrl_diag_resp_read {
55 __le32 value; 117 __le32 value;
56} __packed; 118} __packed;
57 119
120/* function declarations */
121static void ath6kl_usb_recv_complete(struct urb *urb);
122
123#define ATH6KL_USB_IS_BULK_EP(attr) (((attr) & 3) == 0x02)
124#define ATH6KL_USB_IS_INT_EP(attr) (((attr) & 3) == 0x03)
125#define ATH6KL_USB_IS_ISOC_EP(attr) (((attr) & 3) == 0x01)
126#define ATH6KL_USB_IS_DIR_IN(addr) ((addr) & 0x80)
127
128/* pipe/urb operations */
129static struct ath6kl_urb_context *
130ath6kl_usb_alloc_urb_from_pipe(struct ath6kl_usb_pipe *pipe)
131{
132 struct ath6kl_urb_context *urb_context = NULL;
133 unsigned long flags;
134
135 spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
136 if (!list_empty(&pipe->urb_list_head)) {
137 urb_context =
138 list_first_entry(&pipe->urb_list_head,
139 struct ath6kl_urb_context, link);
140 list_del(&urb_context->link);
141 pipe->urb_cnt--;
142 }
143 spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags);
144
145 return urb_context;
146}
147
148static void ath6kl_usb_free_urb_to_pipe(struct ath6kl_usb_pipe *pipe,
149 struct ath6kl_urb_context *urb_context)
150{
151 unsigned long flags;
152
153 spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
154 pipe->urb_cnt++;
155
156 list_add(&urb_context->link, &pipe->urb_list_head);
157 spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags);
158}
159
160static void ath6kl_usb_cleanup_recv_urb(struct ath6kl_urb_context *urb_context)
161{
162 if (urb_context->skb != NULL) {
163 dev_kfree_skb(urb_context->skb);
164 urb_context->skb = NULL;
165 }
166
167 ath6kl_usb_free_urb_to_pipe(urb_context->pipe, urb_context);
168}
169
170static inline struct ath6kl_usb *ath6kl_usb_priv(struct ath6kl *ar)
171{
172 return ar->hif_priv;
173}
174
175/* pipe resource allocation/cleanup */
176static int ath6kl_usb_alloc_pipe_resources(struct ath6kl_usb_pipe *pipe,
177 int urb_cnt)
178{
179 struct ath6kl_urb_context *urb_context;
180 int status = 0, i;
181
182 INIT_LIST_HEAD(&pipe->urb_list_head);
183 init_usb_anchor(&pipe->urb_submitted);
184
185 for (i = 0; i < urb_cnt; i++) {
186 urb_context = kzalloc(sizeof(struct ath6kl_urb_context),
187 GFP_KERNEL);
188 if (urb_context == NULL)
189 /* FIXME: set status to -ENOMEM */
190 break;
191
192 urb_context->pipe = pipe;
193
194 /*
195 * we are only allocate the urb contexts here, the actual URB
196 * is allocated from the kernel as needed to do a transaction
197 */
198 pipe->urb_alloc++;
199 ath6kl_usb_free_urb_to_pipe(pipe, urb_context);
200 }
201
202 ath6kl_dbg(ATH6KL_DBG_USB,
203 "ath6kl usb: alloc resources lpipe:%d hpipe:0x%X urbs:%d\n",
204 pipe->logical_pipe_num, pipe->usb_pipe_handle,
205 pipe->urb_alloc);
206
207 return status;
208}
209
210static void ath6kl_usb_free_pipe_resources(struct ath6kl_usb_pipe *pipe)
211{
212 struct ath6kl_urb_context *urb_context;
213
214 if (pipe->ar_usb == NULL) {
215 /* nothing allocated for this pipe */
216 return;
217 }
218
219 ath6kl_dbg(ATH6KL_DBG_USB,
220 "ath6kl usb: free resources lpipe:%d"
221 "hpipe:0x%X urbs:%d avail:%d\n",
222 pipe->logical_pipe_num, pipe->usb_pipe_handle,
223 pipe->urb_alloc, pipe->urb_cnt);
224
225 if (pipe->urb_alloc != pipe->urb_cnt) {
226 ath6kl_dbg(ATH6KL_DBG_USB,
227 "ath6kl usb: urb leak! lpipe:%d"
228 "hpipe:0x%X urbs:%d avail:%d\n",
229 pipe->logical_pipe_num, pipe->usb_pipe_handle,
230 pipe->urb_alloc, pipe->urb_cnt);
231 }
232
233 while (true) {
234 urb_context = ath6kl_usb_alloc_urb_from_pipe(pipe);
235 if (urb_context == NULL)
236 break;
237 kfree(urb_context);
238 }
239
240}
241
242static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb)
243{
244 int i;
245
246 for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++)
247 ath6kl_usb_free_pipe_resources(&ar_usb->pipes[i]);
248
249}
250
251static u8 ath6kl_usb_get_logical_pipe_num(struct ath6kl_usb *ar_usb,
252 u8 ep_address, int *urb_count)
253{
254 u8 pipe_num = ATH6KL_USB_PIPE_INVALID;
255
256 switch (ep_address) {
257 case ATH6KL_USB_EP_ADDR_APP_CTRL_IN:
258 pipe_num = ATH6KL_USB_PIPE_RX_CTRL;
259 *urb_count = RX_URB_COUNT;
260 break;
261 case ATH6KL_USB_EP_ADDR_APP_DATA_IN:
262 pipe_num = ATH6KL_USB_PIPE_RX_DATA;
263 *urb_count = RX_URB_COUNT;
264 break;
265 case ATH6KL_USB_EP_ADDR_APP_INT_IN:
266 pipe_num = ATH6KL_USB_PIPE_RX_INT;
267 *urb_count = RX_URB_COUNT;
268 break;
269 case ATH6KL_USB_EP_ADDR_APP_DATA2_IN:
270 pipe_num = ATH6KL_USB_PIPE_RX_DATA2;
271 *urb_count = RX_URB_COUNT;
272 break;
273 case ATH6KL_USB_EP_ADDR_APP_CTRL_OUT:
274 pipe_num = ATH6KL_USB_PIPE_TX_CTRL;
275 *urb_count = TX_URB_COUNT;
276 break;
277 case ATH6KL_USB_EP_ADDR_APP_DATA_LP_OUT:
278 pipe_num = ATH6KL_USB_PIPE_TX_DATA_LP;
279 *urb_count = TX_URB_COUNT;
280 break;
281 case ATH6KL_USB_EP_ADDR_APP_DATA_MP_OUT:
282 pipe_num = ATH6KL_USB_PIPE_TX_DATA_MP;
283 *urb_count = TX_URB_COUNT;
284 break;
285 case ATH6KL_USB_EP_ADDR_APP_DATA_HP_OUT:
286 pipe_num = ATH6KL_USB_PIPE_TX_DATA_HP;
287 *urb_count = TX_URB_COUNT;
288 break;
289 default:
290 /* note: there may be endpoints not currently used */
291 break;
292 }
293
294 return pipe_num;
295}
296
297static int ath6kl_usb_setup_pipe_resources(struct ath6kl_usb *ar_usb)
298{
299 struct usb_interface *interface = ar_usb->interface;
300 struct usb_host_interface *iface_desc = interface->cur_altsetting;
301 struct usb_endpoint_descriptor *endpoint;
302 struct ath6kl_usb_pipe *pipe;
303 int i, urbcount, status = 0;
304 u8 pipe_num;
305
306 ath6kl_dbg(ATH6KL_DBG_USB, "setting up USB Pipes using interface\n");
307
308 /* walk decriptors and setup pipes */
309 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
310 endpoint = &iface_desc->endpoint[i].desc;
311
312 if (ATH6KL_USB_IS_BULK_EP(endpoint->bmAttributes)) {
313 ath6kl_dbg(ATH6KL_DBG_USB,
314 "%s Bulk Ep:0x%2.2X maxpktsz:%d\n",
315 ATH6KL_USB_IS_DIR_IN
316 (endpoint->bEndpointAddress) ?
317 "RX" : "TX", endpoint->bEndpointAddress,
318 le16_to_cpu(endpoint->wMaxPacketSize));
319 } else if (ATH6KL_USB_IS_INT_EP(endpoint->bmAttributes)) {
320 ath6kl_dbg(ATH6KL_DBG_USB,
321 "%s Int Ep:0x%2.2X maxpktsz:%d interval:%d\n",
322 ATH6KL_USB_IS_DIR_IN
323 (endpoint->bEndpointAddress) ?
324 "RX" : "TX", endpoint->bEndpointAddress,
325 le16_to_cpu(endpoint->wMaxPacketSize),
326 endpoint->bInterval);
327 } else if (ATH6KL_USB_IS_ISOC_EP(endpoint->bmAttributes)) {
328 /* TODO for ISO */
329 ath6kl_dbg(ATH6KL_DBG_USB,
330 "%s ISOC Ep:0x%2.2X maxpktsz:%d interval:%d\n",
331 ATH6KL_USB_IS_DIR_IN
332 (endpoint->bEndpointAddress) ?
333 "RX" : "TX", endpoint->bEndpointAddress,
334 le16_to_cpu(endpoint->wMaxPacketSize),
335 endpoint->bInterval);
336 }
337 urbcount = 0;
338
339 pipe_num =
340 ath6kl_usb_get_logical_pipe_num(ar_usb,
341 endpoint->bEndpointAddress,
342 &urbcount);
343 if (pipe_num == ATH6KL_USB_PIPE_INVALID)
344 continue;
345
346 pipe = &ar_usb->pipes[pipe_num];
347 if (pipe->ar_usb != NULL) {
348 /* hmmm..pipe was already setup */
349 continue;
350 }
351
352 pipe->ar_usb = ar_usb;
353 pipe->logical_pipe_num = pipe_num;
354 pipe->ep_address = endpoint->bEndpointAddress;
355 pipe->max_packet_size = le16_to_cpu(endpoint->wMaxPacketSize);
356
357 if (ATH6KL_USB_IS_BULK_EP(endpoint->bmAttributes)) {
358 if (ATH6KL_USB_IS_DIR_IN(pipe->ep_address)) {
359 pipe->usb_pipe_handle =
360 usb_rcvbulkpipe(ar_usb->udev,
361 pipe->ep_address);
362 } else {
363 pipe->usb_pipe_handle =
364 usb_sndbulkpipe(ar_usb->udev,
365 pipe->ep_address);
366 }
367 } else if (ATH6KL_USB_IS_INT_EP(endpoint->bmAttributes)) {
368 if (ATH6KL_USB_IS_DIR_IN(pipe->ep_address)) {
369 pipe->usb_pipe_handle =
370 usb_rcvintpipe(ar_usb->udev,
371 pipe->ep_address);
372 } else {
373 pipe->usb_pipe_handle =
374 usb_sndintpipe(ar_usb->udev,
375 pipe->ep_address);
376 }
377 } else if (ATH6KL_USB_IS_ISOC_EP(endpoint->bmAttributes)) {
378 /* TODO for ISO */
379 if (ATH6KL_USB_IS_DIR_IN(pipe->ep_address)) {
380 pipe->usb_pipe_handle =
381 usb_rcvisocpipe(ar_usb->udev,
382 pipe->ep_address);
383 } else {
384 pipe->usb_pipe_handle =
385 usb_sndisocpipe(ar_usb->udev,
386 pipe->ep_address);
387 }
388 }
389
390 pipe->ep_desc = endpoint;
391
392 if (!ATH6KL_USB_IS_DIR_IN(pipe->ep_address))
393 pipe->flags |= ATH6KL_USB_PIPE_FLAG_TX;
394
395 status = ath6kl_usb_alloc_pipe_resources(pipe, urbcount);
396 if (status != 0)
397 break;
398 }
399
400 return status;
401}
402
403/* pipe operations */
404static void ath6kl_usb_post_recv_transfers(struct ath6kl_usb_pipe *recv_pipe,
405 int buffer_length)
406{
407 struct ath6kl_urb_context *urb_context;
408 struct urb *urb;
409 int usb_status;
410
411 while (true) {
412 urb_context = ath6kl_usb_alloc_urb_from_pipe(recv_pipe);
413 if (urb_context == NULL)
414 break;
415
416 urb_context->skb = dev_alloc_skb(buffer_length);
417 if (urb_context->skb == NULL)
418 goto err_cleanup_urb;
419
420 urb = usb_alloc_urb(0, GFP_ATOMIC);
421 if (urb == NULL)
422 goto err_cleanup_urb;
423
424 usb_fill_bulk_urb(urb,
425 recv_pipe->ar_usb->udev,
426 recv_pipe->usb_pipe_handle,
427 urb_context->skb->data,
428 buffer_length,
429 ath6kl_usb_recv_complete, urb_context);
430
431 ath6kl_dbg(ATH6KL_DBG_USB_BULK,
432 "ath6kl usb: bulk recv submit:%d, 0x%X (ep:0x%2.2X), %d bytes buf:0x%p\n",
433 recv_pipe->logical_pipe_num,
434 recv_pipe->usb_pipe_handle, recv_pipe->ep_address,
435 buffer_length, urb_context->skb);
436
437 usb_anchor_urb(urb, &recv_pipe->urb_submitted);
438 usb_status = usb_submit_urb(urb, GFP_ATOMIC);
439
440 if (usb_status) {
441 ath6kl_dbg(ATH6KL_DBG_USB_BULK,
442 "ath6kl usb : usb bulk recv failed %d\n",
443 usb_status);
444 usb_unanchor_urb(urb);
445 usb_free_urb(urb);
446 goto err_cleanup_urb;
447 }
448 usb_free_urb(urb);
449 }
450 return;
451
452err_cleanup_urb:
453 ath6kl_usb_cleanup_recv_urb(urb_context);
454 return;
455}
456
457static void ath6kl_usb_flush_all(struct ath6kl_usb *ar_usb)
458{
459 int i;
460
461 for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++) {
462 if (ar_usb->pipes[i].ar_usb != NULL)
463 usb_kill_anchored_urbs(&ar_usb->pipes[i].urb_submitted);
464 }
465
466 /*
467 * Flushing any pending I/O may schedule work this call will block
468 * until all scheduled work runs to completion.
469 */
470 flush_scheduled_work();
471}
472
473static void ath6kl_usb_start_recv_pipes(struct ath6kl_usb *ar_usb)
474{
475 /*
476 * note: control pipe is no longer used
477 * ar_usb->pipes[ATH6KL_USB_PIPE_RX_CTRL].urb_cnt_thresh =
478 * ar_usb->pipes[ATH6KL_USB_PIPE_RX_CTRL].urb_alloc/2;
479 * ath6kl_usb_post_recv_transfers(&ar_usb->
480 * pipes[ATH6KL_USB_PIPE_RX_CTRL],
481 * ATH6KL_USB_RX_BUFFER_SIZE);
482 */
483
484 ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA].urb_cnt_thresh =
485 ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA].urb_alloc / 2;
486 ath6kl_usb_post_recv_transfers(&ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA],
487 ATH6KL_USB_RX_BUFFER_SIZE);
488}
489
490/* hif usb rx/tx completion functions */
491static void ath6kl_usb_recv_complete(struct urb *urb)
492{
493 struct ath6kl_urb_context *urb_context = urb->context;
494 struct ath6kl_usb_pipe *pipe = urb_context->pipe;
495 struct sk_buff *skb = NULL;
496 int status = 0;
497
498 ath6kl_dbg(ATH6KL_DBG_USB_BULK,
499 "%s: recv pipe: %d, stat:%d, len:%d urb:0x%p\n", __func__,
500 pipe->logical_pipe_num, urb->status, urb->actual_length,
501 urb);
502
503 if (urb->status != 0) {
504 status = -EIO;
505 switch (urb->status) {
506 case -ECONNRESET:
507 case -ENOENT:
508 case -ESHUTDOWN:
509 /*
510 * no need to spew these errors when device
511 * removed or urb killed due to driver shutdown
512 */
513 status = -ECANCELED;
514 break;
515 default:
516 ath6kl_dbg(ATH6KL_DBG_USB_BULK,
517 "%s recv pipe: %d (ep:0x%2.2X), failed:%d\n",
518 __func__, pipe->logical_pipe_num,
519 pipe->ep_address, urb->status);
520 break;
521 }
522 goto cleanup_recv_urb;
523 }
524
525 if (urb->actual_length == 0)
526 goto cleanup_recv_urb;
527
528 skb = urb_context->skb;
529
530 /* we are going to pass it up */
531 urb_context->skb = NULL;
532 skb_put(skb, urb->actual_length);
533
534 /* note: queue implements a lock */
535 skb_queue_tail(&pipe->io_comp_queue, skb);
536 schedule_work(&pipe->io_complete_work);
537
538cleanup_recv_urb:
539 ath6kl_usb_cleanup_recv_urb(urb_context);
540
541 if (status == 0 &&
542 pipe->urb_cnt >= pipe->urb_cnt_thresh) {
543 /* our free urbs are piling up, post more transfers */
544 ath6kl_usb_post_recv_transfers(pipe, ATH6KL_USB_RX_BUFFER_SIZE);
545 }
546}
547
548static void ath6kl_usb_usb_transmit_complete(struct urb *urb)
549{
550 struct ath6kl_urb_context *urb_context = urb->context;
551 struct ath6kl_usb_pipe *pipe = urb_context->pipe;
552 struct sk_buff *skb;
553
554 ath6kl_dbg(ATH6KL_DBG_USB_BULK,
555 "%s: pipe: %d, stat:%d, len:%d\n",
556 __func__, pipe->logical_pipe_num, urb->status,
557 urb->actual_length);
558
559 if (urb->status != 0) {
560 ath6kl_dbg(ATH6KL_DBG_USB_BULK,
561 "%s: pipe: %d, failed:%d\n",
562 __func__, pipe->logical_pipe_num, urb->status);
563 }
564
565 skb = urb_context->skb;
566 urb_context->skb = NULL;
567 ath6kl_usb_free_urb_to_pipe(urb_context->pipe, urb_context);
568
569 /* note: queue implements a lock */
570 skb_queue_tail(&pipe->io_comp_queue, skb);
571 schedule_work(&pipe->io_complete_work);
572}
573
574static void ath6kl_usb_io_comp_work(struct work_struct *work)
575{
576 struct ath6kl_usb_pipe *pipe = container_of(work,
577 struct ath6kl_usb_pipe,
578 io_complete_work);
579 struct ath6kl_usb *ar_usb;
580 struct sk_buff *skb;
581
582 ar_usb = pipe->ar_usb;
583
584 while ((skb = skb_dequeue(&pipe->io_comp_queue))) {
585 if (pipe->flags & ATH6KL_USB_PIPE_FLAG_TX) {
586 ath6kl_dbg(ATH6KL_DBG_USB_BULK,
587 "ath6kl usb xmit callback buf:0x%p\n", skb);
588 ath6kl_core_tx_complete(ar_usb->ar, skb);
589 } else {
590 ath6kl_dbg(ATH6KL_DBG_USB_BULK,
591 "ath6kl usb recv callback buf:0x%p\n", skb);
592 ath6kl_core_rx_complete(ar_usb->ar, skb,
593 pipe->logical_pipe_num);
594 }
595 }
596}
597
58#define ATH6KL_USB_MAX_DIAG_CMD (sizeof(struct ath6kl_usb_ctrl_diag_cmd_write)) 598#define ATH6KL_USB_MAX_DIAG_CMD (sizeof(struct ath6kl_usb_ctrl_diag_cmd_write))
59#define ATH6KL_USB_MAX_DIAG_RESP (sizeof(struct ath6kl_usb_ctrl_diag_resp_read)) 599#define ATH6KL_USB_MAX_DIAG_RESP (sizeof(struct ath6kl_usb_ctrl_diag_resp_read))
60 600
61static void ath6kl_usb_destroy(struct ath6kl_usb *ar_usb) 601static void ath6kl_usb_destroy(struct ath6kl_usb *ar_usb)
62{ 602{
603 ath6kl_usb_flush_all(ar_usb);
604
605 ath6kl_usb_cleanup_pipe_resources(ar_usb);
606
63 usb_set_intfdata(ar_usb->interface, NULL); 607 usb_set_intfdata(ar_usb->interface, NULL);
64 608
65 kfree(ar_usb->diag_cmd_buffer); 609 kfree(ar_usb->diag_cmd_buffer);
@@ -70,19 +614,28 @@ static void ath6kl_usb_destroy(struct ath6kl_usb *ar_usb)
70 614
71static struct ath6kl_usb *ath6kl_usb_create(struct usb_interface *interface) 615static struct ath6kl_usb *ath6kl_usb_create(struct usb_interface *interface)
72{ 616{
73 struct ath6kl_usb *ar_usb = NULL;
74 struct usb_device *dev = interface_to_usbdev(interface); 617 struct usb_device *dev = interface_to_usbdev(interface);
618 struct ath6kl_usb *ar_usb;
619 struct ath6kl_usb_pipe *pipe;
75 int status = 0; 620 int status = 0;
621 int i;
76 622
77 ar_usb = kzalloc(sizeof(struct ath6kl_usb), GFP_KERNEL); 623 ar_usb = kzalloc(sizeof(struct ath6kl_usb), GFP_KERNEL);
78 if (ar_usb == NULL) 624 if (ar_usb == NULL)
79 goto fail_ath6kl_usb_create; 625 goto fail_ath6kl_usb_create;
80 626
81 memset(ar_usb, 0, sizeof(struct ath6kl_usb));
82 usb_set_intfdata(interface, ar_usb); 627 usb_set_intfdata(interface, ar_usb);
628 spin_lock_init(&(ar_usb->cs_lock));
83 ar_usb->udev = dev; 629 ar_usb->udev = dev;
84 ar_usb->interface = interface; 630 ar_usb->interface = interface;
85 631
632 for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++) {
633 pipe = &ar_usb->pipes[i];
634 INIT_WORK(&pipe->io_complete_work,
635 ath6kl_usb_io_comp_work);
636 skb_queue_head_init(&pipe->io_comp_queue);
637 }
638
86 ar_usb->diag_cmd_buffer = kzalloc(ATH6KL_USB_MAX_DIAG_CMD, GFP_KERNEL); 639 ar_usb->diag_cmd_buffer = kzalloc(ATH6KL_USB_MAX_DIAG_CMD, GFP_KERNEL);
87 if (ar_usb->diag_cmd_buffer == NULL) { 640 if (ar_usb->diag_cmd_buffer == NULL) {
88 status = -ENOMEM; 641 status = -ENOMEM;
@@ -96,6 +649,8 @@ static struct ath6kl_usb *ath6kl_usb_create(struct usb_interface *interface)
96 goto fail_ath6kl_usb_create; 649 goto fail_ath6kl_usb_create;
97 } 650 }
98 651
652 status = ath6kl_usb_setup_pipe_resources(ar_usb);
653
99fail_ath6kl_usb_create: 654fail_ath6kl_usb_create:
100 if (status != 0) { 655 if (status != 0) {
101 ath6kl_usb_destroy(ar_usb); 656 ath6kl_usb_destroy(ar_usb);
@@ -114,11 +669,177 @@ static void ath6kl_usb_device_detached(struct usb_interface *interface)
114 669
115 ath6kl_stop_txrx(ar_usb->ar); 670 ath6kl_stop_txrx(ar_usb->ar);
116 671
672 /* Delay to wait for the target to reboot */
673 mdelay(20);
117 ath6kl_core_cleanup(ar_usb->ar); 674 ath6kl_core_cleanup(ar_usb->ar);
118
119 ath6kl_usb_destroy(ar_usb); 675 ath6kl_usb_destroy(ar_usb);
120} 676}
121 677
678/* exported hif usb APIs for htc pipe */
679static void hif_start(struct ath6kl *ar)
680{
681 struct ath6kl_usb *device = ath6kl_usb_priv(ar);
682 int i;
683
684 ath6kl_usb_start_recv_pipes(device);
685
686 /* set the TX resource avail threshold for each TX pipe */
687 for (i = ATH6KL_USB_PIPE_TX_CTRL;
688 i <= ATH6KL_USB_PIPE_TX_DATA_HP; i++) {
689 device->pipes[i].urb_cnt_thresh =
690 device->pipes[i].urb_alloc / 2;
691 }
692}
693
694static int ath6kl_usb_send(struct ath6kl *ar, u8 PipeID,
695 struct sk_buff *hdr_skb, struct sk_buff *skb)
696{
697 struct ath6kl_usb *device = ath6kl_usb_priv(ar);
698 struct ath6kl_usb_pipe *pipe = &device->pipes[PipeID];
699 struct ath6kl_urb_context *urb_context;
700 int usb_status, status = 0;
701 struct urb *urb;
702 u8 *data;
703 u32 len;
704
705 ath6kl_dbg(ATH6KL_DBG_USB_BULK, "+%s pipe : %d, buf:0x%p\n",
706 __func__, PipeID, skb);
707
708 urb_context = ath6kl_usb_alloc_urb_from_pipe(pipe);
709
710 if (urb_context == NULL) {
711 /*
712 * TODO: it is possible to run out of urbs if
713 * 2 endpoints map to the same pipe ID
714 */
715 ath6kl_dbg(ATH6KL_DBG_USB_BULK,
716 "%s pipe:%d no urbs left. URB Cnt : %d\n",
717 __func__, PipeID, pipe->urb_cnt);
718 status = -ENOMEM;
719 goto fail_hif_send;
720 }
721
722 urb_context->skb = skb;
723
724 data = skb->data;
725 len = skb->len;
726
727 urb = usb_alloc_urb(0, GFP_ATOMIC);
728 if (urb == NULL) {
729 status = -ENOMEM;
730 ath6kl_usb_free_urb_to_pipe(urb_context->pipe,
731 urb_context);
732 goto fail_hif_send;
733 }
734
735 usb_fill_bulk_urb(urb,
736 device->udev,
737 pipe->usb_pipe_handle,
738 data,
739 len,
740 ath6kl_usb_usb_transmit_complete, urb_context);
741
742 if ((len % pipe->max_packet_size) == 0) {
743 /* hit a max packet boundary on this pipe */
744 urb->transfer_flags |= URB_ZERO_PACKET;
745 }
746
747 ath6kl_dbg(ATH6KL_DBG_USB_BULK,
748 "athusb bulk send submit:%d, 0x%X (ep:0x%2.2X), %d bytes\n",
749 pipe->logical_pipe_num, pipe->usb_pipe_handle,
750 pipe->ep_address, len);
751
752 usb_anchor_urb(urb, &pipe->urb_submitted);
753 usb_status = usb_submit_urb(urb, GFP_ATOMIC);
754
755 if (usb_status) {
756 ath6kl_dbg(ATH6KL_DBG_USB_BULK,
757 "ath6kl usb : usb bulk transmit failed %d\n",
758 usb_status);
759 usb_unanchor_urb(urb);
760 ath6kl_usb_free_urb_to_pipe(urb_context->pipe,
761 urb_context);
762 status = -EINVAL;
763 }
764 usb_free_urb(urb);
765
766fail_hif_send:
767 return status;
768}
769
770static void hif_stop(struct ath6kl *ar)
771{
772 struct ath6kl_usb *device = ath6kl_usb_priv(ar);
773
774 ath6kl_usb_flush_all(device);
775}
776
777static void ath6kl_usb_get_default_pipe(struct ath6kl *ar,
778 u8 *ul_pipe, u8 *dl_pipe)
779{
780 *ul_pipe = ATH6KL_USB_PIPE_TX_CTRL;
781 *dl_pipe = ATH6KL_USB_PIPE_RX_CTRL;
782}
783
784static int ath6kl_usb_map_service_pipe(struct ath6kl *ar, u16 svc_id,
785 u8 *ul_pipe, u8 *dl_pipe)
786{
787 int status = 0;
788
789 switch (svc_id) {
790 case HTC_CTRL_RSVD_SVC:
791 case WMI_CONTROL_SVC:
792 *ul_pipe = ATH6KL_USB_PIPE_TX_CTRL;
793 /* due to large control packets, shift to data pipe */
794 *dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
795 break;
796 case WMI_DATA_BE_SVC:
797 case WMI_DATA_BK_SVC:
798 *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_LP;
799 /*
800 * Disable rxdata2 directly, it will be enabled
801 * if FW enable rxdata2
802 */
803 *dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
804 break;
805 case WMI_DATA_VI_SVC:
806 *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_MP;
807 /*
808 * Disable rxdata2 directly, it will be enabled
809 * if FW enable rxdata2
810 */
811 *dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
812 break;
813 case WMI_DATA_VO_SVC:
814 *ul_pipe = ATH6KL_USB_PIPE_TX_DATA_HP;
815 /*
816 * Disable rxdata2 directly, it will be enabled
817 * if FW enable rxdata2
818 */
819 *dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
820 break;
821 default:
822 status = -EPERM;
823 break;
824 }
825
826 return status;
827}
828
829static u16 ath6kl_usb_get_free_queue_number(struct ath6kl *ar, u8 pipe_id)
830{
831 struct ath6kl_usb *device = ath6kl_usb_priv(ar);
832
833 return device->pipes[pipe_id].urb_cnt;
834}
835
836static void hif_detach_htc(struct ath6kl *ar)
837{
838 struct ath6kl_usb *device = ath6kl_usb_priv(ar);
839
840 ath6kl_usb_flush_all(device);
841}
842
122static int ath6kl_usb_submit_ctrl_out(struct ath6kl_usb *ar_usb, 843static int ath6kl_usb_submit_ctrl_out(struct ath6kl_usb *ar_usb,
123 u8 req, u16 value, u16 index, void *data, 844 u8 req, u16 value, u16 index, void *data,
124 u32 size) 845 u32 size)
@@ -301,14 +1022,21 @@ static int ath6kl_usb_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
301 1022
302static int ath6kl_usb_power_on(struct ath6kl *ar) 1023static int ath6kl_usb_power_on(struct ath6kl *ar)
303{ 1024{
1025 hif_start(ar);
304 return 0; 1026 return 0;
305} 1027}
306 1028
307static int ath6kl_usb_power_off(struct ath6kl *ar) 1029static int ath6kl_usb_power_off(struct ath6kl *ar)
308{ 1030{
1031 hif_detach_htc(ar);
309 return 0; 1032 return 0;
310} 1033}
311 1034
1035static void ath6kl_usb_stop(struct ath6kl *ar)
1036{
1037 hif_stop(ar);
1038}
1039
312static const struct ath6kl_hif_ops ath6kl_usb_ops = { 1040static const struct ath6kl_hif_ops ath6kl_usb_ops = {
313 .diag_read32 = ath6kl_usb_diag_read32, 1041 .diag_read32 = ath6kl_usb_diag_read32,
314 .diag_write32 = ath6kl_usb_diag_write32, 1042 .diag_write32 = ath6kl_usb_diag_write32,
@@ -316,6 +1044,11 @@ static const struct ath6kl_hif_ops ath6kl_usb_ops = {
316 .bmi_write = ath6kl_usb_bmi_write, 1044 .bmi_write = ath6kl_usb_bmi_write,
317 .power_on = ath6kl_usb_power_on, 1045 .power_on = ath6kl_usb_power_on,
318 .power_off = ath6kl_usb_power_off, 1046 .power_off = ath6kl_usb_power_off,
1047 .stop = ath6kl_usb_stop,
1048 .pipe_send = ath6kl_usb_send,
1049 .pipe_get_default = ath6kl_usb_get_default_pipe,
1050 .pipe_map_service = ath6kl_usb_map_service_pipe,
1051 .pipe_get_free_queue_number = ath6kl_usb_get_free_queue_number,
319}; 1052};
320 1053
321/* ath6kl usb driver registered functions */ 1054/* ath6kl usb driver registered functions */
@@ -368,7 +1101,7 @@ static int ath6kl_usb_probe(struct usb_interface *interface,
368 1101
369 ar_usb->ar = ar; 1102 ar_usb->ar = ar;
370 1103
371 ret = ath6kl_core_init(ar); 1104 ret = ath6kl_core_init(ar, ATH6KL_HTC_TYPE_PIPE);
372 if (ret) { 1105 if (ret) {
373 ath6kl_err("Failed to init ath6kl core: %d\n", ret); 1106 ath6kl_err("Failed to init ath6kl core: %d\n", ret);
374 goto err_core_free; 1107 goto err_core_free;
@@ -392,6 +1125,46 @@ static void ath6kl_usb_remove(struct usb_interface *interface)
392 ath6kl_usb_device_detached(interface); 1125 ath6kl_usb_device_detached(interface);
393} 1126}
394 1127
1128#ifdef CONFIG_PM
1129
1130static int ath6kl_usb_suspend(struct usb_interface *interface,
1131 pm_message_t message)
1132{
1133 struct ath6kl_usb *device;
1134 device = usb_get_intfdata(interface);
1135
1136 ath6kl_usb_flush_all(device);
1137 return 0;
1138}
1139
1140static int ath6kl_usb_resume(struct usb_interface *interface)
1141{
1142 struct ath6kl_usb *device;
1143 device = usb_get_intfdata(interface);
1144
1145 ath6kl_usb_post_recv_transfers(&device->pipes[ATH6KL_USB_PIPE_RX_DATA],
1146 ATH6KL_USB_RX_BUFFER_SIZE);
1147 ath6kl_usb_post_recv_transfers(&device->pipes[ATH6KL_USB_PIPE_RX_DATA2],
1148 ATH6KL_USB_RX_BUFFER_SIZE);
1149
1150 return 0;
1151}
1152
1153static int ath6kl_usb_reset_resume(struct usb_interface *intf)
1154{
1155 if (usb_get_intfdata(intf))
1156 ath6kl_usb_remove(intf);
1157 return 0;
1158}
1159
1160#else
1161
1162#define ath6kl_usb_suspend NULL
1163#define ath6kl_usb_resume NULL
1164#define ath6kl_usb_reset_resume NULL
1165
1166#endif
1167
395/* table of devices that work with this driver */ 1168/* table of devices that work with this driver */
396static struct usb_device_id ath6kl_usb_ids[] = { 1169static struct usb_device_id ath6kl_usb_ids[] = {
397 {USB_DEVICE(0x0cf3, 0x9374)}, 1170 {USB_DEVICE(0x0cf3, 0x9374)},
@@ -403,8 +1176,12 @@ MODULE_DEVICE_TABLE(usb, ath6kl_usb_ids);
403static struct usb_driver ath6kl_usb_driver = { 1176static struct usb_driver ath6kl_usb_driver = {
404 .name = "ath6kl_usb", 1177 .name = "ath6kl_usb",
405 .probe = ath6kl_usb_probe, 1178 .probe = ath6kl_usb_probe,
1179 .suspend = ath6kl_usb_suspend,
1180 .resume = ath6kl_usb_resume,
1181 .reset_resume = ath6kl_usb_reset_resume,
406 .disconnect = ath6kl_usb_remove, 1182 .disconnect = ath6kl_usb_remove,
407 .id_table = ath6kl_usb_ids, 1183 .id_table = ath6kl_usb_ids,
1184 .supports_autosuspend = true,
408}; 1185};
409 1186
410static int ath6kl_usb_init(void) 1187static int ath6kl_usb_init(void)
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 2b442332cd0f..7c8a9977faf5 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -2882,6 +2882,43 @@ int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
2882 return ret; 2882 return ret;
2883} 2883}
2884 2884
2885int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx,
2886 enum ieee80211_band band,
2887 struct ath6kl_htcap *htcap)
2888{
2889 struct sk_buff *skb;
2890 struct wmi_set_htcap_cmd *cmd;
2891
2892 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
2893 if (!skb)
2894 return -ENOMEM;
2895
2896 cmd = (struct wmi_set_htcap_cmd *) skb->data;
2897
2898 /*
2899 * NOTE: Band in firmware matches enum ieee80211_band, it is unlikely
2900 * this will be changed in firmware. If at all there is any change in
2901 * band value, the host needs to be fixed.
2902 */
2903 cmd->band = band;
2904 cmd->ht_enable = !!htcap->ht_enable;
2905 cmd->ht20_sgi = !!(htcap->cap_info & IEEE80211_HT_CAP_SGI_20);
2906 cmd->ht40_supported =
2907 !!(htcap->cap_info & IEEE80211_HT_CAP_SUP_WIDTH_20_40);
2908 cmd->ht40_sgi = !!(htcap->cap_info & IEEE80211_HT_CAP_SGI_40);
2909 cmd->intolerant_40mhz =
2910 !!(htcap->cap_info & IEEE80211_HT_CAP_40MHZ_INTOLERANT);
2911 cmd->max_ampdu_len_exp = htcap->ampdu_factor;
2912
2913 ath6kl_dbg(ATH6KL_DBG_WMI,
2914 "Set htcap: band:%d ht_enable:%d 40mhz:%d sgi_20mhz:%d sgi_40mhz:%d 40mhz_intolerant:%d ampdu_len_exp:%d\n",
2915 cmd->band, cmd->ht_enable, cmd->ht40_supported,
2916 cmd->ht20_sgi, cmd->ht40_sgi, cmd->intolerant_40mhz,
2917 cmd->max_ampdu_len_exp);
2918 return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_HT_CAP_CMDID,
2919 NO_SYNC_WMIFLAG);
2920}
2921
2885int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len) 2922int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len)
2886{ 2923{
2887 struct sk_buff *skb; 2924 struct sk_buff *skb;
@@ -3032,6 +3069,9 @@ int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd, const u8 *mac,
3032 cm->reason = cpu_to_le16(reason); 3069 cm->reason = cpu_to_le16(reason);
3033 cm->cmd = cmd; 3070 cm->cmd = cmd;
3034 3071
3072 ath6kl_dbg(ATH6KL_DBG_WMI, "ap_set_mlme: cmd=%d reason=%d\n", cm->cmd,
3073 cm->reason);
3074
3035 return ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_SET_MLME_CMDID, 3075 return ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_SET_MLME_CMDID,
3036 NO_SYNC_WMIFLAG); 3076 NO_SYNC_WMIFLAG);
3037} 3077}
@@ -3181,6 +3221,29 @@ int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
3181 NO_SYNC_WMIFLAG); 3221 NO_SYNC_WMIFLAG);
3182} 3222}
3183 3223
3224int ath6kl_wmi_set_ie_cmd(struct wmi *wmi, u8 if_idx, u8 ie_id, u8 ie_field,
3225 const u8 *ie_info, u8 ie_len)
3226{
3227 struct sk_buff *skb;
3228 struct wmi_set_ie_cmd *p;
3229
3230 skb = ath6kl_wmi_get_new_buf(sizeof(*p) + ie_len);
3231 if (!skb)
3232 return -ENOMEM;
3233
3234 ath6kl_dbg(ATH6KL_DBG_WMI, "set_ie_cmd: ie_id=%u ie_ie_field=%u ie_len=%u\n",
3235 ie_id, ie_field, ie_len);
3236 p = (struct wmi_set_ie_cmd *) skb->data;
3237 p->ie_id = ie_id;
3238 p->ie_field = ie_field;
3239 p->ie_len = ie_len;
3240 if (ie_info && ie_len > 0)
3241 memcpy(p->ie_info, ie_info, ie_len);
3242
3243 return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_IE_CMDID,
3244 NO_SYNC_WMIFLAG);
3245}
3246
3184int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable) 3247int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable)
3185{ 3248{
3186 struct sk_buff *skb; 3249 struct sk_buff *skb;
@@ -3392,6 +3455,23 @@ int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx)
3392 WMI_CANCEL_REMAIN_ON_CHNL_CMDID); 3455 WMI_CANCEL_REMAIN_ON_CHNL_CMDID);
3393} 3456}
3394 3457
3458int ath6kl_wmi_set_inact_period(struct wmi *wmi, u8 if_idx, int inact_timeout)
3459{
3460 struct sk_buff *skb;
3461 struct wmi_set_inact_period_cmd *cmd;
3462
3463 skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
3464 if (!skb)
3465 return -ENOMEM;
3466
3467 cmd = (struct wmi_set_inact_period_cmd *) skb->data;
3468 cmd->inact_period = cpu_to_le32(inact_timeout);
3469 cmd->num_null_func = 0;
3470
3471 return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_CONN_INACT_CMDID,
3472 NO_SYNC_WMIFLAG);
3473}
3474
3395static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb) 3475static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
3396{ 3476{
3397 struct wmix_cmd_hdr *cmd; 3477 struct wmix_cmd_hdr *cmd;
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 4092e3e80790..d3d2ab5c1689 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -182,6 +182,9 @@ enum wmi_data_hdr_flags {
182#define WMI_DATA_HDR_META_MASK 0x7 182#define WMI_DATA_HDR_META_MASK 0x7
183#define WMI_DATA_HDR_META_SHIFT 13 183#define WMI_DATA_HDR_META_SHIFT 13
184 184
185#define WMI_DATA_HDR_PAD_BEFORE_DATA_MASK 0xFF
186#define WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT 0x8
187
185/* Macros for operating on WMI_DATA_HDR (info3) field */ 188/* Macros for operating on WMI_DATA_HDR (info3) field */
186#define WMI_DATA_HDR_IF_IDX_MASK 0xF 189#define WMI_DATA_HDR_IF_IDX_MASK 0xF
187 190
@@ -423,6 +426,7 @@ enum wmi_cmd_id {
423 WMI_SET_FRAMERATES_CMDID, 426 WMI_SET_FRAMERATES_CMDID,
424 WMI_SET_AP_PS_CMDID, 427 WMI_SET_AP_PS_CMDID,
425 WMI_SET_QOS_SUPP_CMDID, 428 WMI_SET_QOS_SUPP_CMDID,
429 WMI_SET_IE_CMDID,
426 430
427 /* WMI_THIN_RESERVED_... mark the start and end 431 /* WMI_THIN_RESERVED_... mark the start and end
428 * values for WMI_THIN_RESERVED command IDs. These 432 * values for WMI_THIN_RESERVED command IDs. These
@@ -629,6 +633,11 @@ enum wmi_mgmt_frame_type {
629 WMI_NUM_MGMT_FRAME 633 WMI_NUM_MGMT_FRAME
630}; 634};
631 635
636enum wmi_ie_field_type {
637 WMI_RSN_IE_CAPB = 0x1,
638 WMI_IE_FULL = 0xFF, /* indicats full IE */
639};
640
632/* WMI_CONNECT_CMDID */ 641/* WMI_CONNECT_CMDID */
633enum network_type { 642enum network_type {
634 INFRA_NETWORK = 0x01, 643 INFRA_NETWORK = 0x01,
@@ -1268,6 +1277,16 @@ struct wmi_mcast_filter_add_del_cmd {
1268 u8 mcast_mac[ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE]; 1277 u8 mcast_mac[ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE];
1269} __packed; 1278} __packed;
1270 1279
1280struct wmi_set_htcap_cmd {
1281 u8 band;
1282 u8 ht_enable;
1283 u8 ht40_supported;
1284 u8 ht20_sgi;
1285 u8 ht40_sgi;
1286 u8 intolerant_40mhz;
1287 u8 max_ampdu_len_exp;
1288} __packed;
1289
1271/* Command Replies */ 1290/* Command Replies */
1272 1291
1273/* WMI_GET_CHANNEL_LIST_CMDID reply */ 1292/* WMI_GET_CHANNEL_LIST_CMDID reply */
@@ -1913,6 +1932,14 @@ struct wmi_set_appie_cmd {
1913 u8 ie_info[0]; 1932 u8 ie_info[0];
1914} __packed; 1933} __packed;
1915 1934
1935struct wmi_set_ie_cmd {
1936 u8 ie_id;
1937 u8 ie_field; /* enum wmi_ie_field_type */
1938 u8 ie_len;
1939 u8 reserved;
1940 u8 ie_info[0];
1941} __packed;
1942
1916/* Notify the WSC registration status to the target */ 1943/* Notify the WSC registration status to the target */
1917#define WSC_REG_ACTIVE 1 1944#define WSC_REG_ACTIVE 1
1918#define WSC_REG_INACTIVE 0 1945#define WSC_REG_INACTIVE 0
@@ -2141,6 +2168,11 @@ struct wmi_ap_hidden_ssid_cmd {
2141 u8 hidden_ssid; 2168 u8 hidden_ssid;
2142} __packed; 2169} __packed;
2143 2170
2171struct wmi_set_inact_period_cmd {
2172 __le32 inact_period;
2173 u8 num_null_func;
2174} __packed;
2175
2144/* AP mode events */ 2176/* AP mode events */
2145struct wmi_ap_set_apsd_cmd { 2177struct wmi_ap_set_apsd_cmd {
2146 u8 enable; 2178 u8 enable;
@@ -2465,6 +2497,9 @@ int ath6kl_wmi_get_roam_tbl_cmd(struct wmi *wmi);
2465int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg); 2497int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg);
2466int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx, 2498int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
2467 u8 keep_alive_intvl); 2499 u8 keep_alive_intvl);
2500int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx,
2501 enum ieee80211_band band,
2502 struct ath6kl_htcap *htcap);
2468int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len); 2503int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len);
2469 2504
2470s32 ath6kl_wmi_get_rate(s8 rate_index); 2505s32 ath6kl_wmi_get_rate(s8 rate_index);
@@ -2515,6 +2550,9 @@ int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx,
2515int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type, 2550int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
2516 const u8 *ie, u8 ie_len); 2551 const u8 *ie, u8 ie_len);
2517 2552
2553int ath6kl_wmi_set_ie_cmd(struct wmi *wmi, u8 if_idx, u8 ie_id, u8 ie_field,
2554 const u8 *ie_info, u8 ie_len);
2555
2518/* P2P */ 2556/* P2P */
2519int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable); 2557int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable);
2520 2558
@@ -2538,6 +2576,8 @@ int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx);
2538int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type, 2576int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
2539 const u8 *ie, u8 ie_len); 2577 const u8 *ie, u8 ie_len);
2540 2578
2579int ath6kl_wmi_set_inact_period(struct wmi *wmi, u8 if_idx, int inact_timeout);
2580
2541void ath6kl_wmi_sscan_timer(unsigned long ptr); 2581void ath6kl_wmi_sscan_timer(unsigned long ptr);
2542 2582
2543struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx); 2583struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx);
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 27d95fe5ade0..3f0b84723789 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -11,7 +11,10 @@ ath9k-$(CONFIG_ATH9K_PCI) += pci.o
11ath9k-$(CONFIG_ATH9K_AHB) += ahb.o 11ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
12ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o 12ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
13ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o 13ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
14ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += dfs.o 14ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += \
15 dfs.o \
16 dfs_pattern_detector.o \
17 dfs_pri_detector.o
15 18
16obj-$(CONFIG_ATH9K) += ath9k.o 19obj-$(CONFIG_ATH9K) += ath9k.o
17 20
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 7e0ea4e98334..b4c77f9d7470 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -46,8 +46,8 @@ static const struct ani_ofdm_level_entry ofdm_level_table[] = {
46 { 5, 4, 1 }, /* lvl 5 */ 46 { 5, 4, 1 }, /* lvl 5 */
47 { 6, 5, 1 }, /* lvl 6 */ 47 { 6, 5, 1 }, /* lvl 6 */
48 { 7, 6, 1 }, /* lvl 7 */ 48 { 7, 6, 1 }, /* lvl 7 */
49 { 7, 7, 1 }, /* lvl 8 */ 49 { 7, 6, 0 }, /* lvl 8 */
50 { 7, 8, 0 } /* lvl 9 */ 50 { 7, 7, 0 } /* lvl 9 */
51}; 51};
52#define ATH9K_ANI_OFDM_NUM_LEVEL \ 52#define ATH9K_ANI_OFDM_NUM_LEVEL \
53 ARRAY_SIZE(ofdm_level_table) 53 ARRAY_SIZE(ofdm_level_table)
@@ -91,8 +91,8 @@ static const struct ani_cck_level_entry cck_level_table[] = {
91 { 4, 0 }, /* lvl 4 */ 91 { 4, 0 }, /* lvl 4 */
92 { 5, 0 }, /* lvl 5 */ 92 { 5, 0 }, /* lvl 5 */
93 { 6, 0 }, /* lvl 6 */ 93 { 6, 0 }, /* lvl 6 */
94 { 7, 0 }, /* lvl 7 (only for high rssi) */ 94 { 6, 0 }, /* lvl 7 (only for high rssi) */
95 { 8, 0 } /* lvl 8 (only for high rssi) */ 95 { 7, 0 } /* lvl 8 (only for high rssi) */
96}; 96};
97 97
98#define ATH9K_ANI_CCK_NUM_LEVEL \ 98#define ATH9K_ANI_CCK_NUM_LEVEL \
@@ -274,7 +274,9 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel)
274 aniState->rssiThrLow, aniState->rssiThrHigh); 274 aniState->rssiThrLow, aniState->rssiThrHigh);
275 275
276 if (aniState->update_ani) 276 if (aniState->update_ani)
277 aniState->ofdmNoiseImmunityLevel = immunityLevel; 277 aniState->ofdmNoiseImmunityLevel =
278 (immunityLevel > ATH9K_ANI_OFDM_DEF_LEVEL) ?
279 immunityLevel : ATH9K_ANI_OFDM_DEF_LEVEL;
278 280
279 entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel]; 281 entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
280 entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel]; 282 entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
@@ -290,16 +292,9 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel)
290 ATH9K_ANI_FIRSTEP_LEVEL, 292 ATH9K_ANI_FIRSTEP_LEVEL,
291 entry_ofdm->fir_step_level); 293 entry_ofdm->fir_step_level);
292 294
293 if ((ah->opmode != NL80211_IFTYPE_STATION && 295 if ((aniState->noiseFloor >= aniState->rssiThrHigh) &&
294 ah->opmode != NL80211_IFTYPE_ADHOC) || 296 (!aniState->ofdmWeakSigDetectOff !=
295 aniState->noiseFloor <= aniState->rssiThrHigh) { 297 entry_ofdm->ofdm_weak_signal_on)) {
296 if (aniState->ofdmWeakSigDetectOff)
297 /* force on ofdm weak sig detect */
298 ath9k_hw_ani_control(ah,
299 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
300 true);
301 else if (aniState->ofdmWeakSigDetectOff ==
302 entry_ofdm->ofdm_weak_signal_on)
303 ath9k_hw_ani_control(ah, 298 ath9k_hw_ani_control(ah,
304 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION, 299 ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION,
305 entry_ofdm->ofdm_weak_signal_on); 300 entry_ofdm->ofdm_weak_signal_on);
@@ -347,7 +342,9 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel)
347 immunityLevel = ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI; 342 immunityLevel = ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI;
348 343
349 if (aniState->update_ani) 344 if (aniState->update_ani)
350 aniState->cckNoiseImmunityLevel = immunityLevel; 345 aniState->cckNoiseImmunityLevel =
346 (immunityLevel > ATH9K_ANI_CCK_DEF_LEVEL) ?
347 immunityLevel : ATH9K_ANI_CCK_DEF_LEVEL;
351 348
352 entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel]; 349 entry_ofdm = &ofdm_level_table[aniState->ofdmNoiseImmunityLevel];
353 entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel]; 350 entry_cck = &cck_level_table[aniState->cckNoiseImmunityLevel];
@@ -717,26 +714,30 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan)
717 ofdmPhyErrRate, aniState->cckNoiseImmunityLevel, 714 ofdmPhyErrRate, aniState->cckNoiseImmunityLevel,
718 cckPhyErrRate, aniState->ofdmsTurn); 715 cckPhyErrRate, aniState->ofdmsTurn);
719 716
720 if (aniState->listenTime > 5 * ah->aniperiod) { 717 if (aniState->listenTime > ah->aniperiod) {
721 if (ofdmPhyErrRate <= ah->config.ofdm_trig_low && 718 if (cckPhyErrRate < ah->config.cck_trig_low &&
722 cckPhyErrRate <= ah->config.cck_trig_low) { 719 ((ofdmPhyErrRate < ah->config.ofdm_trig_low &&
720 aniState->ofdmNoiseImmunityLevel <
721 ATH9K_ANI_OFDM_DEF_LEVEL) ||
722 (ofdmPhyErrRate < ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI &&
723 aniState->ofdmNoiseImmunityLevel >=
724 ATH9K_ANI_OFDM_DEF_LEVEL))) {
723 ath9k_hw_ani_lower_immunity(ah); 725 ath9k_hw_ani_lower_immunity(ah);
724 aniState->ofdmsTurn = !aniState->ofdmsTurn; 726 aniState->ofdmsTurn = !aniState->ofdmsTurn;
725 } 727 } else if ((ofdmPhyErrRate > ah->config.ofdm_trig_high &&
726 ath9k_ani_restart(ah); 728 aniState->ofdmNoiseImmunityLevel >=
727 } else if (aniState->listenTime > ah->aniperiod) { 729 ATH9K_ANI_OFDM_DEF_LEVEL) ||
728 /* check to see if need to raise immunity */ 730 (ofdmPhyErrRate >
729 if (ofdmPhyErrRate > ah->config.ofdm_trig_high && 731 ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI &&
730 (cckPhyErrRate <= ah->config.cck_trig_high || 732 aniState->ofdmNoiseImmunityLevel <
731 aniState->ofdmsTurn)) { 733 ATH9K_ANI_OFDM_DEF_LEVEL)) {
732 ath9k_hw_ani_ofdm_err_trigger(ah); 734 ath9k_hw_ani_ofdm_err_trigger(ah);
733 ath9k_ani_restart(ah);
734 aniState->ofdmsTurn = false; 735 aniState->ofdmsTurn = false;
735 } else if (cckPhyErrRate > ah->config.cck_trig_high) { 736 } else if (cckPhyErrRate > ah->config.cck_trig_high) {
736 ath9k_hw_ani_cck_err_trigger(ah); 737 ath9k_hw_ani_cck_err_trigger(ah);
737 ath9k_ani_restart(ah);
738 aniState->ofdmsTurn = true; 738 aniState->ofdmsTurn = true;
739 } 739 }
740 ath9k_ani_restart(ah);
740 } 741 }
741} 742}
742EXPORT_SYMBOL(ath9k_hw_ani_monitor); 743EXPORT_SYMBOL(ath9k_hw_ani_monitor);
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index 83029d6c7b22..72e2b874e179 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -25,11 +25,13 @@
25 25
26/* units are errors per second */ 26/* units are errors per second */
27#define ATH9K_ANI_OFDM_TRIG_HIGH_OLD 500 27#define ATH9K_ANI_OFDM_TRIG_HIGH_OLD 500
28#define ATH9K_ANI_OFDM_TRIG_HIGH_NEW 1000 28#define ATH9K_ANI_OFDM_TRIG_HIGH_NEW 3500
29#define ATH9K_ANI_OFDM_TRIG_HIGH_BELOW_INI 1000
29 30
30/* units are errors per second */ 31/* units are errors per second */
31#define ATH9K_ANI_OFDM_TRIG_LOW_OLD 200 32#define ATH9K_ANI_OFDM_TRIG_LOW_OLD 200
32#define ATH9K_ANI_OFDM_TRIG_LOW_NEW 400 33#define ATH9K_ANI_OFDM_TRIG_LOW_NEW 400
34#define ATH9K_ANI_OFDM_TRIG_LOW_ABOVE_INI 900
33 35
34/* units are errors per second */ 36/* units are errors per second */
35#define ATH9K_ANI_CCK_TRIG_HIGH_OLD 200 37#define ATH9K_ANI_CCK_TRIG_HIGH_OLD 200
@@ -53,7 +55,7 @@
53#define ATH9K_ANI_RSSI_THR_LOW 7 55#define ATH9K_ANI_RSSI_THR_LOW 7
54 56
55#define ATH9K_ANI_PERIOD_OLD 100 57#define ATH9K_ANI_PERIOD_OLD 100
56#define ATH9K_ANI_PERIOD_NEW 1000 58#define ATH9K_ANI_PERIOD_NEW 300
57 59
58/* in ms */ 60/* in ms */
59#define ATH9K_ANI_POLLINTERVAL_OLD 100 61#define ATH9K_ANI_POLLINTERVAL_OLD 100
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index aba088005b22..c7492c6a2519 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -245,7 +245,6 @@ static int ar5008_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
245 REG_WRITE(ah, AR_PHY(0x37), reg32); 245 REG_WRITE(ah, AR_PHY(0x37), reg32);
246 246
247 ah->curchan = chan; 247 ah->curchan = chan;
248 ah->curchan_rad_index = -1;
249 248
250 return 0; 249 return 0;
251} 250}
@@ -619,19 +618,10 @@ static void ar5008_hw_init_bb(struct ath_hw *ah,
619 u32 synthDelay; 618 u32 synthDelay;
620 619
621 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; 620 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
622 if (IS_CHAN_B(chan))
623 synthDelay = (4 * synthDelay) / 22;
624 else
625 synthDelay /= 10;
626
627 if (IS_CHAN_HALF_RATE(chan))
628 synthDelay *= 2;
629 else if (IS_CHAN_QUARTER_RATE(chan))
630 synthDelay *= 4;
631 621
632 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); 622 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
633 623
634 udelay(synthDelay + BASE_ACTIVATE_DELAY); 624 ath9k_hw_synth_delay(ah, chan, synthDelay);
635} 625}
636 626
637static void ar5008_hw_init_chain_masks(struct ath_hw *ah) 627static void ar5008_hw_init_chain_masks(struct ath_hw *ah)
@@ -949,12 +939,8 @@ static bool ar5008_hw_rfbus_req(struct ath_hw *ah)
949static void ar5008_hw_rfbus_done(struct ath_hw *ah) 939static void ar5008_hw_rfbus_done(struct ath_hw *ah)
950{ 940{
951 u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; 941 u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
952 if (IS_CHAN_B(ah->curchan))
953 synthDelay = (4 * synthDelay) / 22;
954 else
955 synthDelay /= 10;
956 942
957 udelay(synthDelay + BASE_ACTIVATE_DELAY); 943 ath9k_hw_synth_delay(ah, ah->curchan, synthDelay);
958 944
959 REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0); 945 REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
960} 946}
@@ -1047,46 +1033,8 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
1047 break; 1033 break;
1048 } 1034 }
1049 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{ 1035 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
1050 static const int m1ThreshLow[] = { 127, 50 };
1051 static const int m2ThreshLow[] = { 127, 40 };
1052 static const int m1Thresh[] = { 127, 0x4d };
1053 static const int m2Thresh[] = { 127, 0x40 };
1054 static const int m2CountThr[] = { 31, 16 };
1055 static const int m2CountThrLow[] = { 63, 48 };
1056 u32 on = param ? 1 : 0; 1036 u32 on = param ? 1 : 0;
1057 1037
1058 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
1059 AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
1060 m1ThreshLow[on]);
1061 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
1062 AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
1063 m2ThreshLow[on]);
1064 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
1065 AR_PHY_SFCORR_M1_THRESH,
1066 m1Thresh[on]);
1067 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
1068 AR_PHY_SFCORR_M2_THRESH,
1069 m2Thresh[on]);
1070 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
1071 AR_PHY_SFCORR_M2COUNT_THR,
1072 m2CountThr[on]);
1073 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
1074 AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
1075 m2CountThrLow[on]);
1076
1077 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
1078 AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
1079 m1ThreshLow[on]);
1080 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
1081 AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
1082 m2ThreshLow[on]);
1083 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
1084 AR_PHY_SFCORR_EXT_M1_THRESH,
1085 m1Thresh[on]);
1086 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
1087 AR_PHY_SFCORR_EXT_M2_THRESH,
1088 m2Thresh[on]);
1089
1090 if (on) 1038 if (on)
1091 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW, 1039 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
1092 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); 1040 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index aa2abaf31cba..8d78253c26ce 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -136,6 +136,7 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
136 } 136 }
137 137
138 if (sync_cause) { 138 if (sync_cause) {
139 ath9k_debug_sync_cause(common, sync_cause);
139 fatal_int = 140 fatal_int =
140 (sync_cause & 141 (sync_cause &
141 (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR)) 142 (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index 3cbbb033fcea..846dd7974eb8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -152,7 +152,6 @@ static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
152 REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32); 152 REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
153 153
154 ah->curchan = chan; 154 ah->curchan = chan;
155 ah->curchan_rad_index = -1;
156 155
157 return 0; 156 return 0;
158} 157}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index 46c79a3d4737..952cb2b4656b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -777,11 +777,11 @@ static const u32 ar9300Common_rx_gain_table_2p2[][2] = {
777 {0x0000a074, 0x00000000}, 777 {0x0000a074, 0x00000000},
778 {0x0000a078, 0x00000000}, 778 {0x0000a078, 0x00000000},
779 {0x0000a07c, 0x00000000}, 779 {0x0000a07c, 0x00000000},
780 {0x0000a080, 0x22222229}, 780 {0x0000a080, 0x1a1a1a1a},
781 {0x0000a084, 0x1d1d1d1d}, 781 {0x0000a084, 0x1a1a1a1a},
782 {0x0000a088, 0x1d1d1d1d}, 782 {0x0000a088, 0x1a1a1a1a},
783 {0x0000a08c, 0x1d1d1d1d}, 783 {0x0000a08c, 0x1a1a1a1a},
784 {0x0000a090, 0x171d1d1d}, 784 {0x0000a090, 0x171a1a1a},
785 {0x0000a094, 0x11111717}, 785 {0x0000a094, 0x11111717},
786 {0x0000a098, 0x00030311}, 786 {0x0000a098, 0x00030311},
787 {0x0000a09c, 0x00000000}, 787 {0x0000a09c, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 63089cc1fafd..a0387a027db0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -1000,10 +1000,12 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
1000 if (mci && IS_CHAN_2GHZ(chan) && run_agc_cal) 1000 if (mci && IS_CHAN_2GHZ(chan) && run_agc_cal)
1001 ar9003_mci_init_cal_req(ah, &is_reusable); 1001 ar9003_mci_init_cal_req(ah, &is_reusable);
1002 1002
1003 txiqcal_done = ar9003_hw_tx_iq_cal_run(ah); 1003 if (!(IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan))) {
1004 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS); 1004 txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
1005 udelay(5); 1005 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
1006 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); 1006 udelay(5);
1007 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
1008 }
1007 1009
1008skip_tx_iqcal: 1010skip_tx_iqcal:
1009 if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) { 1011 if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 6bb4db052bb0..ac53d901801d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -30,11 +30,6 @@
30#define CTL_11A_EXT (CTL_11A | EXT_ADDITIVE) 30#define CTL_11A_EXT (CTL_11A | EXT_ADDITIVE)
31#define CTL_11G_EXT (CTL_11G | EXT_ADDITIVE) 31#define CTL_11G_EXT (CTL_11G | EXT_ADDITIVE)
32#define CTL_11B_EXT (CTL_11B | EXT_ADDITIVE) 32#define CTL_11B_EXT (CTL_11B | EXT_ADDITIVE)
33#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 /* 10*log10(2)*2 */
34#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 9 /* 10*log10(3)*2 */
35#define PWRINCR_3_TO_1_CHAIN 9 /* 10*log(3)*2 */
36#define PWRINCR_3_TO_2_CHAIN 3 /* floor(10*log(3/2)*2) */
37#define PWRINCR_2_TO_1_CHAIN 6 /* 10*log(2)*2 */
38 33
39#define SUB_NUM_CTL_MODES_AT_5G_40 2 /* excluding HT40, EXT-OFDM */ 34#define SUB_NUM_CTL_MODES_AT_5G_40 2 /* excluding HT40, EXT-OFDM */
40#define SUB_NUM_CTL_MODES_AT_2G_40 3 /* excluding HT40, EXT-OFDM, EXT-CCK */ 35#define SUB_NUM_CTL_MODES_AT_2G_40 3 /* excluding HT40, EXT-OFDM, EXT-CCK */
@@ -2936,15 +2931,6 @@ static const struct ar9300_eeprom *ar9003_eeprom_struct_find_by_id(int id)
2936#undef N_LOOP 2931#undef N_LOOP
2937} 2932}
2938 2933
2939
2940static u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz)
2941{
2942 if (fbin == AR5416_BCHAN_UNUSED)
2943 return fbin;
2944
2945 return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin));
2946}
2947
2948static int ath9k_hw_ar9300_check_eeprom(struct ath_hw *ah) 2934static int ath9k_hw_ar9300_check_eeprom(struct ath_hw *ah)
2949{ 2935{
2950 return 0; 2936 return 0;
@@ -4070,7 +4056,7 @@ static u8 ar9003_hw_eeprom_get_tgt_pwr(struct ath_hw *ah,
4070 * targetpower piers stored on eeprom 4056 * targetpower piers stored on eeprom
4071 */ 4057 */
4072 for (i = 0; i < numPiers; i++) { 4058 for (i = 0; i < numPiers; i++) {
4073 freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz); 4059 freqArray[i] = ath9k_hw_fbin2freq(pFreqBin[i], is2GHz);
4074 targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex]; 4060 targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
4075 } 4061 }
4076 4062
@@ -4106,7 +4092,7 @@ static u8 ar9003_hw_eeprom_get_ht20_tgt_pwr(struct ath_hw *ah,
4106 * from targetpower piers stored on eeprom 4092 * from targetpower piers stored on eeprom
4107 */ 4093 */
4108 for (i = 0; i < numPiers; i++) { 4094 for (i = 0; i < numPiers; i++) {
4109 freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz); 4095 freqArray[i] = ath9k_hw_fbin2freq(pFreqBin[i], is2GHz);
4110 targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex]; 4096 targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
4111 } 4097 }
4112 4098
@@ -4142,7 +4128,7 @@ static u8 ar9003_hw_eeprom_get_ht40_tgt_pwr(struct ath_hw *ah,
4142 * targetpower piers stored on eeprom 4128 * targetpower piers stored on eeprom
4143 */ 4129 */
4144 for (i = 0; i < numPiers; i++) { 4130 for (i = 0; i < numPiers; i++) {
4145 freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz); 4131 freqArray[i] = ath9k_hw_fbin2freq(pFreqBin[i], is2GHz);
4146 targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex]; 4132 targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
4147 } 4133 }
4148 4134
@@ -4167,7 +4153,7 @@ static u8 ar9003_hw_eeprom_get_cck_tgt_pwr(struct ath_hw *ah,
4167 * targetpower piers stored on eeprom 4153 * targetpower piers stored on eeprom
4168 */ 4154 */
4169 for (i = 0; i < numPiers; i++) { 4155 for (i = 0; i < numPiers; i++) {
4170 freqArray[i] = FBIN2FREQ(pFreqBin[i], 1); 4156 freqArray[i] = ath9k_hw_fbin2freq(pFreqBin[i], 1);
4171 targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex]; 4157 targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
4172 } 4158 }
4173 4159
@@ -4295,18 +4281,10 @@ static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
4295#undef POW_SM 4281#undef POW_SM
4296} 4282}
4297 4283
4298static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq, 4284static void ar9003_hw_get_legacy_target_powers(struct ath_hw *ah, u16 freq,
4299 u8 *targetPowerValT2) 4285 u8 *targetPowerValT2,
4286 bool is2GHz)
4300{ 4287{
4301 /* XXX: hard code for now, need to get from eeprom struct */
4302 u8 ht40PowerIncForPdadc = 0;
4303 bool is2GHz = false;
4304 unsigned int i = 0;
4305 struct ath_common *common = ath9k_hw_common(ah);
4306
4307 if (freq < 4000)
4308 is2GHz = true;
4309
4310 targetPowerValT2[ALL_TARGET_LEGACY_6_24] = 4288 targetPowerValT2[ALL_TARGET_LEGACY_6_24] =
4311 ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_6_24, freq, 4289 ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_6_24, freq,
4312 is2GHz); 4290 is2GHz);
@@ -4319,6 +4297,11 @@ static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq,
4319 targetPowerValT2[ALL_TARGET_LEGACY_54] = 4297 targetPowerValT2[ALL_TARGET_LEGACY_54] =
4320 ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_54, freq, 4298 ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_54, freq,
4321 is2GHz); 4299 is2GHz);
4300}
4301
4302static void ar9003_hw_get_cck_target_powers(struct ath_hw *ah, u16 freq,
4303 u8 *targetPowerValT2)
4304{
4322 targetPowerValT2[ALL_TARGET_LEGACY_1L_5L] = 4305 targetPowerValT2[ALL_TARGET_LEGACY_1L_5L] =
4323 ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_1L_5L, 4306 ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_1L_5L,
4324 freq); 4307 freq);
@@ -4328,6 +4311,11 @@ static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq,
4328 ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11L, freq); 4311 ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11L, freq);
4329 targetPowerValT2[ALL_TARGET_LEGACY_11S] = 4312 targetPowerValT2[ALL_TARGET_LEGACY_11S] =
4330 ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11S, freq); 4313 ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11S, freq);
4314}
4315
4316static void ar9003_hw_get_ht20_target_powers(struct ath_hw *ah, u16 freq,
4317 u8 *targetPowerValT2, bool is2GHz)
4318{
4331 targetPowerValT2[ALL_TARGET_HT20_0_8_16] = 4319 targetPowerValT2[ALL_TARGET_HT20_0_8_16] =
4332 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq, 4320 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq,
4333 is2GHz); 4321 is2GHz);
@@ -4370,6 +4358,16 @@ static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq,
4370 targetPowerValT2[ALL_TARGET_HT20_23] = 4358 targetPowerValT2[ALL_TARGET_HT20_23] =
4371 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_23, freq, 4359 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_23, freq,
4372 is2GHz); 4360 is2GHz);
4361}
4362
4363static void ar9003_hw_get_ht40_target_powers(struct ath_hw *ah,
4364 u16 freq,
4365 u8 *targetPowerValT2,
4366 bool is2GHz)
4367{
4368 /* XXX: hard code for now, need to get from eeprom struct */
4369 u8 ht40PowerIncForPdadc = 0;
4370
4373 targetPowerValT2[ALL_TARGET_HT40_0_8_16] = 4371 targetPowerValT2[ALL_TARGET_HT40_0_8_16] =
4374 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq, 4372 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq,
4375 is2GHz) + ht40PowerIncForPdadc; 4373 is2GHz) + ht40PowerIncForPdadc;
@@ -4413,6 +4411,26 @@ static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq,
4413 targetPowerValT2[ALL_TARGET_HT40_23] = 4411 targetPowerValT2[ALL_TARGET_HT40_23] =
4414 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_23, freq, 4412 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_23, freq,
4415 is2GHz) + ht40PowerIncForPdadc; 4413 is2GHz) + ht40PowerIncForPdadc;
4414}
4415
4416static void ar9003_hw_get_target_power_eeprom(struct ath_hw *ah,
4417 struct ath9k_channel *chan,
4418 u8 *targetPowerValT2)
4419{
4420 bool is2GHz = IS_CHAN_2GHZ(chan);
4421 unsigned int i = 0;
4422 struct ath_common *common = ath9k_hw_common(ah);
4423 u16 freq = chan->channel;
4424
4425 if (is2GHz)
4426 ar9003_hw_get_cck_target_powers(ah, freq, targetPowerValT2);
4427
4428 ar9003_hw_get_legacy_target_powers(ah, freq, targetPowerValT2, is2GHz);
4429 ar9003_hw_get_ht20_target_powers(ah, freq, targetPowerValT2, is2GHz);
4430
4431 if (IS_CHAN_HT40(chan))
4432 ar9003_hw_get_ht40_target_powers(ah, freq, targetPowerValT2,
4433 is2GHz);
4416 4434
4417 for (i = 0; i < ar9300RateSize; i++) { 4435 for (i = 0; i < ar9300RateSize; i++) {
4418 ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n", 4436 ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n",
@@ -4464,7 +4482,7 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
4464 is2GHz = 1; 4482 is2GHz = 1;
4465 } 4483 }
4466 4484
4467 *pfrequency = FBIN2FREQ(*pCalPier, is2GHz); 4485 *pfrequency = ath9k_hw_fbin2freq(*pCalPier, is2GHz);
4468 *pcorrection = pCalPierStruct->refPower; 4486 *pcorrection = pCalPierStruct->refPower;
4469 *ptemperature = pCalPierStruct->tempMeas; 4487 *ptemperature = pCalPierStruct->tempMeas;
4470 *pvoltage = pCalPierStruct->voltMeas; 4488 *pvoltage = pCalPierStruct->voltMeas;
@@ -4789,34 +4807,9 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
4789 bool is2ghz = IS_CHAN_2GHZ(chan); 4807 bool is2ghz = IS_CHAN_2GHZ(chan);
4790 4808
4791 ath9k_hw_get_channel_centers(ah, chan, &centers); 4809 ath9k_hw_get_channel_centers(ah, chan, &centers);
4792 scaledPower = powerLimit - antenna_reduction; 4810 scaledPower = ath9k_hw_get_scaled_power(ah, powerLimit,
4793 4811 antenna_reduction);
4794 /*
4795 * Reduce scaled Power by number of chains active to get
4796 * to per chain tx power level
4797 */
4798 switch (ar5416_get_ntxchains(ah->txchainmask)) {
4799 case 1:
4800 break;
4801 case 2:
4802 if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
4803 scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
4804 else
4805 scaledPower = 0;
4806 break;
4807 case 3:
4808 if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
4809 scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
4810 else
4811 scaledPower = 0;
4812 break;
4813 }
4814 4812
4815 scaledPower = max((u16)0, scaledPower);
4816
4817 /*
4818 * Get target powers from EEPROM - our baseline for TX Power
4819 */
4820 if (is2ghz) { 4813 if (is2ghz) {
4821 /* Setup for CTL modes */ 4814 /* Setup for CTL modes */
4822 /* CTL_11B, CTL_11G, CTL_2GHT20 */ 4815 /* CTL_11B, CTL_11G, CTL_2GHT20 */
@@ -4988,7 +4981,12 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
4988 unsigned int i = 0, paprd_scale_factor = 0; 4981 unsigned int i = 0, paprd_scale_factor = 0;
4989 u8 pwr_idx, min_pwridx = 0; 4982 u8 pwr_idx, min_pwridx = 0;
4990 4983
4991 ar9003_hw_set_target_power_eeprom(ah, chan->channel, targetPowerValT2); 4984 memset(targetPowerValT2, 0 , sizeof(targetPowerValT2));
4985
4986 /*
4987 * Get target powers from EEPROM - our baseline for TX Power
4988 */
4989 ar9003_hw_get_target_power_eeprom(ah, chan, targetPowerValT2);
4992 4990
4993 if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) { 4991 if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) {
4994 if (IS_CHAN_2GHZ(chan)) 4992 if (IS_CHAN_2GHZ(chan))
@@ -5060,8 +5058,6 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
5060 i, targetPowerValT2[i]); 5058 i, targetPowerValT2[i]);
5061 } 5059 }
5062 5060
5063 ah->txpower_limit = regulatory->max_power_level;
5064
5065 /* Write target power array to registers */ 5061 /* Write target power array to registers */
5066 ar9003_hw_tx_power_regwrite(ah, targetPowerValT2); 5062 ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
5067 ar9003_hw_calibration_apply(ah, chan->channel); 5063 ar9003_hw_calibration_apply(ah, chan->channel);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index bb223fe82816..2505ac44f0c1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -42,7 +42,6 @@
42#define AR9300_EEPMISC_WOW 0x02 42#define AR9300_EEPMISC_WOW 0x02
43#define AR9300_CUSTOMER_DATA_SIZE 20 43#define AR9300_CUSTOMER_DATA_SIZE 20
44 44
45#define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x))
46#define AR9300_MAX_CHAINS 3 45#define AR9300_MAX_CHAINS 3
47#define AR9300_ANT_16S 25 46#define AR9300_ANT_16S 25
48#define AR9300_FUTURE_MODAL_SZ 6 47#define AR9300_FUTURE_MODAL_SZ 6
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index 0f56e322dd3b..a0e3394b10dc 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -305,11 +305,6 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
305 ar9462_common_rx_gain_table_2p0, 305 ar9462_common_rx_gain_table_2p0,
306 ARRAY_SIZE(ar9462_common_rx_gain_table_2p0), 2); 306 ARRAY_SIZE(ar9462_common_rx_gain_table_2p0), 2);
307 307
308 INIT_INI_ARRAY(&ah->ini_BTCOEX_MAX_TXPWR,
309 ar9462_2p0_BTCOEX_MAX_TXPWR_table,
310 ARRAY_SIZE(ar9462_2p0_BTCOEX_MAX_TXPWR_table),
311 2);
312
313 /* Awake -> Sleep Setting */ 308 /* Awake -> Sleep Setting */
314 INIT_INI_ARRAY(&ah->iniPcieSerdes, 309 INIT_INI_ARRAY(&ah->iniPcieSerdes,
315 PCIE_PLL_ON_CREQ_DIS_L1_2P0, 310 PCIE_PLL_ON_CREQ_DIS_L1_2P0,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index a66a13b76848..d9e0824af093 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -306,6 +306,8 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
306 ar9003_mci_get_isr(ah, masked); 306 ar9003_mci_get_isr(ah, masked);
307 307
308 if (sync_cause) { 308 if (sync_cause) {
309 ath9k_debug_sync_cause(common, sync_cause);
310
309 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) { 311 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
310 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF); 312 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
311 REG_WRITE(ah, AR_RC, 0); 313 REG_WRITE(ah, AR_RC, 0);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 600aca9fe6b1..11abb972be1f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -152,7 +152,6 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
152 REG_WRITE(ah, AR_PHY_65NM_CH0_SYNTH7, reg32); 152 REG_WRITE(ah, AR_PHY_65NM_CH0_SYNTH7, reg32);
153 153
154 ah->curchan = chan; 154 ah->curchan = chan;
155 ah->curchan_rad_index = -1;
156 155
157 return 0; 156 return 0;
158} 157}
@@ -209,11 +208,12 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
209 continue; 208 continue;
210 negative = 0; 209 negative = 0;
211 if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah)) 210 if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah))
212 cur_bb_spur = FBIN2FREQ(spur_fbin_ptr[i], 211 cur_bb_spur = ath9k_hw_fbin2freq(spur_fbin_ptr[i],
213 IS_CHAN_2GHZ(chan)) - synth_freq; 212 IS_CHAN_2GHZ(chan));
214 else 213 else
215 cur_bb_spur = spur_freq[i] - synth_freq; 214 cur_bb_spur = spur_freq[i];
216 215
216 cur_bb_spur -= synth_freq;
217 if (cur_bb_spur < 0) { 217 if (cur_bb_spur < 0) {
218 negative = 1; 218 negative = 1;
219 cur_bb_spur = -cur_bb_spur; 219 cur_bb_spur = -cur_bb_spur;
@@ -443,7 +443,8 @@ static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
443 ar9003_hw_spur_ofdm_clear(ah); 443 ar9003_hw_spur_ofdm_clear(ah);
444 444
445 for (i = 0; i < AR_EEPROM_MODAL_SPURS && spurChansPtr[i]; i++) { 445 for (i = 0; i < AR_EEPROM_MODAL_SPURS && spurChansPtr[i]; i++) {
446 freq_offset = FBIN2FREQ(spurChansPtr[i], mode) - synth_freq; 446 freq_offset = ath9k_hw_fbin2freq(spurChansPtr[i], mode);
447 freq_offset -= synth_freq;
447 if (abs(freq_offset) < range) { 448 if (abs(freq_offset) < range) {
448 ar9003_hw_spur_ofdm_work(ah, chan, freq_offset); 449 ar9003_hw_spur_ofdm_work(ah, chan, freq_offset);
449 break; 450 break;
@@ -525,22 +526,10 @@ static void ar9003_hw_init_bb(struct ath_hw *ah,
525 * Value is in 100ns increments. 526 * Value is in 100ns increments.
526 */ 527 */
527 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; 528 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
528 if (IS_CHAN_B(chan))
529 synthDelay = (4 * synthDelay) / 22;
530 else
531 synthDelay /= 10;
532 529
533 /* Activate the PHY (includes baseband activate + synthesizer on) */ 530 /* Activate the PHY (includes baseband activate + synthesizer on) */
534 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); 531 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
535 532 ath9k_hw_synth_delay(ah, chan, synthDelay);
536 /*
537 * There is an issue if the AP starts the calibration before
538 * the base band timeout completes. This could result in the
539 * rx_clear false triggering. As a workaround we add delay an
540 * extra BASE_ACTIVATE_DELAY usecs to ensure this condition
541 * does not happen.
542 */
543 udelay(synthDelay + BASE_ACTIVATE_DELAY);
544} 533}
545 534
546static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx) 535static void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
@@ -684,9 +673,6 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
684 673
685 REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites); 674 REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites);
686 675
687 if (AR_SREV_9462(ah))
688 ar9003_hw_prog_ini(ah, &ah->ini_BTCOEX_MAX_TXPWR, 1);
689
690 if (chan->channel == 2484) 676 if (chan->channel == 2484)
691 ar9003_hw_prog_ini(ah, &ah->ini_japan2484, 1); 677 ar9003_hw_prog_ini(ah, &ah->ini_japan2484, 1);
692 678
@@ -725,6 +711,14 @@ static void ar9003_hw_set_rfmode(struct ath_hw *ah,
725 711
726 if (IS_CHAN_A_FAST_CLOCK(ah, chan)) 712 if (IS_CHAN_A_FAST_CLOCK(ah, chan))
727 rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE); 713 rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
714 if (IS_CHAN_QUARTER_RATE(chan))
715 rfMode |= AR_PHY_MODE_QUARTER;
716 if (IS_CHAN_HALF_RATE(chan))
717 rfMode |= AR_PHY_MODE_HALF;
718
719 if (rfMode & (AR_PHY_MODE_QUARTER | AR_PHY_MODE_HALF))
720 REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL,
721 AR_PHY_FRAME_CTL_CF_OVERLAP_WINDOW, 3);
728 722
729 REG_WRITE(ah, AR_PHY_MODE, rfMode); 723 REG_WRITE(ah, AR_PHY_MODE, rfMode);
730} 724}
@@ -795,12 +789,8 @@ static bool ar9003_hw_rfbus_req(struct ath_hw *ah)
795static void ar9003_hw_rfbus_done(struct ath_hw *ah) 789static void ar9003_hw_rfbus_done(struct ath_hw *ah)
796{ 790{
797 u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; 791 u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
798 if (IS_CHAN_B(ah->curchan))
799 synthDelay = (4 * synthDelay) / 22;
800 else
801 synthDelay /= 10;
802 792
803 udelay(synthDelay + BASE_ACTIVATE_DELAY); 793 ath9k_hw_synth_delay(ah, ah->curchan, synthDelay);
804 794
805 REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0); 795 REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
806} 796}
@@ -823,55 +813,6 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
823 * on == 0 means more noise imm 813 * on == 0 means more noise imm
824 */ 814 */
825 u32 on = param ? 1 : 0; 815 u32 on = param ? 1 : 0;
826 /*
827 * make register setting for default
828 * (weak sig detect ON) come from INI file
829 */
830 int m1ThreshLow = on ?
831 aniState->iniDef.m1ThreshLow : m1ThreshLow_off;
832 int m2ThreshLow = on ?
833 aniState->iniDef.m2ThreshLow : m2ThreshLow_off;
834 int m1Thresh = on ?
835 aniState->iniDef.m1Thresh : m1Thresh_off;
836 int m2Thresh = on ?
837 aniState->iniDef.m2Thresh : m2Thresh_off;
838 int m2CountThr = on ?
839 aniState->iniDef.m2CountThr : m2CountThr_off;
840 int m2CountThrLow = on ?
841 aniState->iniDef.m2CountThrLow : m2CountThrLow_off;
842 int m1ThreshLowExt = on ?
843 aniState->iniDef.m1ThreshLowExt : m1ThreshLowExt_off;
844 int m2ThreshLowExt = on ?
845 aniState->iniDef.m2ThreshLowExt : m2ThreshLowExt_off;
846 int m1ThreshExt = on ?
847 aniState->iniDef.m1ThreshExt : m1ThreshExt_off;
848 int m2ThreshExt = on ?
849 aniState->iniDef.m2ThreshExt : m2ThreshExt_off;
850
851 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
852 AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
853 m1ThreshLow);
854 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
855 AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
856 m2ThreshLow);
857 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
858 AR_PHY_SFCORR_M1_THRESH, m1Thresh);
859 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
860 AR_PHY_SFCORR_M2_THRESH, m2Thresh);
861 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
862 AR_PHY_SFCORR_M2COUNT_THR, m2CountThr);
863 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
864 AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
865 m2CountThrLow);
866
867 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
868 AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLowExt);
869 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
870 AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLowExt);
871 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
872 AR_PHY_SFCORR_EXT_M1_THRESH, m1ThreshExt);
873 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
874 AR_PHY_SFCORR_EXT_M2_THRESH, m2ThreshExt);
875 816
876 if (on) 817 if (on)
877 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW, 818 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index d834d97fe727..7268a48a92a1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -468,6 +468,9 @@
468#define AR_PHY_ADDAC_PARA_CTL (AR_SM_BASE + 0x150) 468#define AR_PHY_ADDAC_PARA_CTL (AR_SM_BASE + 0x150)
469#define AR_PHY_XPA_CFG (AR_SM_BASE + 0x158) 469#define AR_PHY_XPA_CFG (AR_SM_BASE + 0x158)
470 470
471#define AR_PHY_FRAME_CTL_CF_OVERLAP_WINDOW 3
472#define AR_PHY_FRAME_CTL_CF_OVERLAP_WINDOW_S 0
473
471#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A 0x0001FC00 474#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A 0x0001FC00
472#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A_S 10 475#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A_S 10
473#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A 0x3FF 476#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A 0x3FF
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index b6ba1e8149be..1d6658e139b5 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -1115,9 +1115,9 @@ static const u32 ar9462_2p0_mac_core[][2] = {
1115 {0x000081f8, 0x00000000}, 1115 {0x000081f8, 0x00000000},
1116 {0x000081fc, 0x00000000}, 1116 {0x000081fc, 0x00000000},
1117 {0x00008240, 0x00100000}, 1117 {0x00008240, 0x00100000},
1118 {0x00008244, 0x0010f400}, 1118 {0x00008244, 0x0010f424},
1119 {0x00008248, 0x00000800}, 1119 {0x00008248, 0x00000800},
1120 {0x0000824c, 0x0001e800}, 1120 {0x0000824c, 0x0001e848},
1121 {0x00008250, 0x00000000}, 1121 {0x00008250, 0x00000000},
1122 {0x00008254, 0x00000000}, 1122 {0x00008254, 0x00000000},
1123 {0x00008258, 0x00000000}, 1123 {0x00008258, 0x00000000},
@@ -1448,16 +1448,4 @@ static const u32 ar9462_common_mixed_rx_gain_table_2p0[][2] = {
1448 {0x0000b1fc, 0x00000196}, 1448 {0x0000b1fc, 0x00000196},
1449}; 1449};
1450 1450
1451static const u32 ar9462_2p0_BTCOEX_MAX_TXPWR_table[][2] = {
1452 /* Addr allmodes */
1453 {0x000018c0, 0x10101010},
1454 {0x000018c4, 0x10101010},
1455 {0x000018c8, 0x10101010},
1456 {0x000018cc, 0x10101010},
1457 {0x000018d0, 0x10101010},
1458 {0x000018d4, 0x10101010},
1459 {0x000018d8, 0x10101010},
1460 {0x000018dc, 0x10101010},
1461};
1462
1463#endif /* INITVALS_9462_2P0_H */ 1451#endif /* INITVALS_9462_2P0_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 8c84049682ab..a277cf6f339d 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -26,6 +26,7 @@
26#include "debug.h" 26#include "debug.h"
27#include "common.h" 27#include "common.h"
28#include "mci.h" 28#include "mci.h"
29#include "dfs.h"
29 30
30/* 31/*
31 * Header for the ath9k.ko driver core *only* -- hw code nor any other driver 32 * Header for the ath9k.ko driver core *only* -- hw code nor any other driver
@@ -369,7 +370,7 @@ struct ath_vif {
369 * number of beacon intervals, the game's up. 370 * number of beacon intervals, the game's up.
370 */ 371 */
371#define BSTUCK_THRESH 9 372#define BSTUCK_THRESH 9
372#define ATH_BCBUF 4 373#define ATH_BCBUF 8
373#define ATH_DEFAULT_BINTVAL 100 /* TU */ 374#define ATH_DEFAULT_BINTVAL 100 /* TU */
374#define ATH_DEFAULT_BMISS_LIMIT 10 375#define ATH_DEFAULT_BMISS_LIMIT 10
375#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024) 376#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
@@ -430,6 +431,8 @@ void ath9k_set_beaconing_status(struct ath_softc *sc, bool status);
430void ath_reset_work(struct work_struct *work); 431void ath_reset_work(struct work_struct *work);
431void ath_hw_check(struct work_struct *work); 432void ath_hw_check(struct work_struct *work);
432void ath_hw_pll_work(struct work_struct *work); 433void ath_hw_pll_work(struct work_struct *work);
434void ath_rx_poll(unsigned long data);
435void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon);
433void ath_paprd_calibrate(struct work_struct *work); 436void ath_paprd_calibrate(struct work_struct *work);
434void ath_ani_calibrate(unsigned long data); 437void ath_ani_calibrate(unsigned long data);
435void ath_start_ani(struct ath_common *common); 438void ath_start_ani(struct ath_common *common);
@@ -670,6 +673,7 @@ struct ath_softc {
670 struct ath_beacon_config cur_beacon_conf; 673 struct ath_beacon_config cur_beacon_conf;
671 struct delayed_work tx_complete_work; 674 struct delayed_work tx_complete_work;
672 struct delayed_work hw_pll_work; 675 struct delayed_work hw_pll_work;
676 struct timer_list rx_poll_timer;
673 677
674#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT 678#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
675 struct ath_btcoex btcoex; 679 struct ath_btcoex btcoex;
@@ -680,6 +684,7 @@ struct ath_softc {
680 684
681 struct ath_ant_comb ant_comb; 685 struct ath_ant_comb ant_comb;
682 u8 ant_tx, ant_rx; 686 u8 ant_tx, ant_rx;
687 struct dfs_pattern_detector *dfs_detector;
683}; 688};
684 689
685void ath9k_tasklet(unsigned long data); 690void ath9k_tasklet(unsigned long data);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 626418222c85..11bc55e3d697 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -91,7 +91,7 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ieee80211_vif *vif,
91 info.txpower = MAX_RATE_POWER; 91 info.txpower = MAX_RATE_POWER;
92 info.keyix = ATH9K_TXKEYIX_INVALID; 92 info.keyix = ATH9K_TXKEYIX_INVALID;
93 info.keytype = ATH9K_KEY_TYPE_CLEAR; 93 info.keytype = ATH9K_KEY_TYPE_CLEAR;
94 info.flags = ATH9K_TXDESC_NOACK | ATH9K_TXDESC_INTREQ; 94 info.flags = ATH9K_TXDESC_NOACK | ATH9K_TXDESC_CLRDMASK;
95 95
96 info.buf_addr[0] = bf->bf_buf_addr; 96 info.buf_addr[0] = bf->bf_buf_addr;
97 info.buf_len[0] = roundup(skb->len, 4); 97 info.buf_len[0] = roundup(skb->len, 4);
@@ -359,6 +359,11 @@ void ath_beacon_tasklet(unsigned long data)
359 int slot; 359 int slot;
360 u32 bfaddr, bc = 0; 360 u32 bfaddr, bc = 0;
361 361
362 if (work_pending(&sc->hw_reset_work)) {
363 ath_dbg(common, RESET,
364 "reset work is pending, skip beaconing now\n");
365 return;
366 }
362 /* 367 /*
363 * Check if the previous beacon has gone out. If 368 * Check if the previous beacon has gone out. If
364 * not don't try to post another, skip this period 369 * not don't try to post another, skip this period
@@ -369,6 +374,9 @@ void ath_beacon_tasklet(unsigned long data)
369 if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0) { 374 if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0) {
370 sc->beacon.bmisscnt++; 375 sc->beacon.bmisscnt++;
371 376
377 if (!ath9k_hw_check_alive(ah))
378 ieee80211_queue_work(sc->hw, &sc->hw_check_work);
379
372 if (sc->beacon.bmisscnt < BSTUCK_THRESH * sc->nbcnvifs) { 380 if (sc->beacon.bmisscnt < BSTUCK_THRESH * sc->nbcnvifs) {
373 ath_dbg(common, BSTUCK, 381 ath_dbg(common, BSTUCK,
374 "missed %u consecutive beacons\n", 382 "missed %u consecutive beacons\n",
@@ -378,6 +386,7 @@ void ath_beacon_tasklet(unsigned long data)
378 ath9k_hw_bstuck_nfcal(ah); 386 ath9k_hw_bstuck_nfcal(ah);
379 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) { 387 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
380 ath_dbg(common, BSTUCK, "beacon is officially stuck\n"); 388 ath_dbg(common, BSTUCK, "beacon is officially stuck\n");
389 sc->beacon.bmisscnt = 0;
381 sc->sc_flags |= SC_OP_TSF_RESET; 390 sc->sc_flags |= SC_OP_TSF_RESET;
382 ieee80211_queue_work(sc->hw, &sc->hw_reset_work); 391 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
383 } 392 }
@@ -650,6 +659,8 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
650 u32 tsf, intval, nexttbtt; 659 u32 tsf, intval, nexttbtt;
651 660
652 ath9k_reset_beacon_status(sc); 661 ath9k_reset_beacon_status(sc);
662 if (!(sc->sc_flags & SC_OP_BEACONS))
663 ath9k_hw_settsf64(ah, sc->beacon.bc_tstamp);
653 664
654 intval = TU_TO_USEC(conf->beacon_interval); 665 intval = TU_TO_USEC(conf->beacon_interval);
655 tsf = roundup(ath9k_hw_gettsf32(ah) + TU_TO_USEC(FUDGE), intval); 666 tsf = roundup(ath9k_hw_gettsf32(ah) + TU_TO_USEC(FUDGE), intval);
@@ -806,8 +817,10 @@ void ath9k_set_beaconing_status(struct ath_softc *sc, bool status)
806{ 817{
807 struct ath_hw *ah = sc->sc_ah; 818 struct ath_hw *ah = sc->sc_ah;
808 819
809 if (!ath_has_valid_bslot(sc)) 820 if (!ath_has_valid_bslot(sc)) {
821 sc->sc_flags &= ~SC_OP_BEACONS;
810 return; 822 return;
823 }
811 824
812 ath9k_ps_wakeup(sc); 825 ath9k_ps_wakeup(sc);
813 if (status) { 826 if (status) {
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index ec3271993411..1ca6da80d4ad 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -108,9 +108,7 @@ void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah)
108 return; 108 return;
109 } 109 }
110 110
111 if (AR_SREV_9462(ah)) { 111 if (AR_SREV_9300_20_OR_LATER(ah)) {
112 btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI;
113 } else if (AR_SREV_9300_20_OR_LATER(ah)) {
114 btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE; 112 btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
115 btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300; 113 btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300;
116 btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300; 114 btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300;
@@ -284,11 +282,12 @@ void ath9k_hw_btcoex_enable(struct ath_hw *ah)
284 ath9k_hw_btcoex_enable_2wire(ah); 282 ath9k_hw_btcoex_enable_2wire(ah);
285 break; 283 break;
286 case ATH_BTCOEX_CFG_3WIRE: 284 case ATH_BTCOEX_CFG_3WIRE:
285 if (AR_SREV_9462(ah)) {
286 ath9k_hw_btcoex_enable_mci(ah);
287 return;
288 }
287 ath9k_hw_btcoex_enable_3wire(ah); 289 ath9k_hw_btcoex_enable_3wire(ah);
288 break; 290 break;
289 case ATH_BTCOEX_CFG_MCI:
290 ath9k_hw_btcoex_enable_mci(ah);
291 return;
292 } 291 }
293 292
294 REG_RMW(ah, AR_GPIO_PDPU, 293 REG_RMW(ah, AR_GPIO_PDPU,
@@ -305,11 +304,12 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
305 int i; 304 int i;
306 305
307 btcoex_hw->enabled = false; 306 btcoex_hw->enabled = false;
308 if (btcoex_hw->scheme == ATH_BTCOEX_CFG_MCI) { 307 if (AR_SREV_9462(ah)) {
309 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE); 308 ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
310 for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++) 309 for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
311 REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i), 310 REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
312 btcoex_hw->wlan_weight[i]); 311 btcoex_hw->wlan_weight[i]);
312 return;
313 } 313 }
314 ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0); 314 ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
315 315
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 8f93aef4414f..3a1e1cfabd5e 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -51,7 +51,6 @@ enum ath_btcoex_scheme {
51 ATH_BTCOEX_CFG_NONE, 51 ATH_BTCOEX_CFG_NONE,
52 ATH_BTCOEX_CFG_2WIRE, 52 ATH_BTCOEX_CFG_2WIRE,
53 ATH_BTCOEX_CFG_3WIRE, 53 ATH_BTCOEX_CFG_3WIRE,
54 ATH_BTCOEX_CFG_MCI,
55}; 54};
56 55
57struct ath9k_hw_mci { 56struct ath9k_hw_mci {
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index ff47b32ecaf4..fde700c4e490 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -380,63 +380,75 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
380 size_t count, loff_t *ppos) 380 size_t count, loff_t *ppos)
381{ 381{
382 struct ath_softc *sc = file->private_data; 382 struct ath_softc *sc = file->private_data;
383 char buf[512];
384 unsigned int len = 0; 383 unsigned int len = 0;
384 int rv;
385 int mxlen = 4000;
386 char *buf = kmalloc(mxlen, GFP_KERNEL);
387 if (!buf)
388 return -ENOMEM;
389
390#define PR_IS(a, s) \
391 do { \
392 len += snprintf(buf + len, mxlen - len, \
393 "%21s: %10u\n", a, \
394 sc->debug.stats.istats.s); \
395 } while (0)
385 396
386 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 397 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
387 len += snprintf(buf + len, sizeof(buf) - len, 398 PR_IS("RXLP", rxlp);
388 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp); 399 PR_IS("RXHP", rxhp);
389 len += snprintf(buf + len, sizeof(buf) - len, 400 PR_IS("WATHDOG", bb_watchdog);
390 "%8s: %10u\n", "RXHP", sc->debug.stats.istats.rxhp);
391 len += snprintf(buf + len, sizeof(buf) - len,
392 "%8s: %10u\n", "WATCHDOG",
393 sc->debug.stats.istats.bb_watchdog);
394 } else { 401 } else {
395 len += snprintf(buf + len, sizeof(buf) - len, 402 PR_IS("RX", rxok);
396 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
397 } 403 }
398 len += snprintf(buf + len, sizeof(buf) - len, 404 PR_IS("RXEOL", rxeol);
399 "%8s: %10u\n", "RXEOL", sc->debug.stats.istats.rxeol); 405 PR_IS("RXORN", rxorn);
400 len += snprintf(buf + len, sizeof(buf) - len, 406 PR_IS("TX", txok);
401 "%8s: %10u\n", "RXORN", sc->debug.stats.istats.rxorn); 407 PR_IS("TXURN", txurn);
402 len += snprintf(buf + len, sizeof(buf) - len, 408 PR_IS("MIB", mib);
403 "%8s: %10u\n", "TX", sc->debug.stats.istats.txok); 409 PR_IS("RXPHY", rxphyerr);
404 len += snprintf(buf + len, sizeof(buf) - len, 410 PR_IS("RXKCM", rx_keycache_miss);
405 "%8s: %10u\n", "TXURN", sc->debug.stats.istats.txurn); 411 PR_IS("SWBA", swba);
406 len += snprintf(buf + len, sizeof(buf) - len, 412 PR_IS("BMISS", bmiss);
407 "%8s: %10u\n", "MIB", sc->debug.stats.istats.mib); 413 PR_IS("BNR", bnr);
408 len += snprintf(buf + len, sizeof(buf) - len, 414 PR_IS("CST", cst);
409 "%8s: %10u\n", "RXPHY", sc->debug.stats.istats.rxphyerr); 415 PR_IS("GTT", gtt);
410 len += snprintf(buf + len, sizeof(buf) - len, 416 PR_IS("TIM", tim);
411 "%8s: %10u\n", "RXKCM", sc->debug.stats.istats.rx_keycache_miss); 417 PR_IS("CABEND", cabend);
412 len += snprintf(buf + len, sizeof(buf) - len, 418 PR_IS("DTIMSYNC", dtimsync);
413 "%8s: %10u\n", "SWBA", sc->debug.stats.istats.swba); 419 PR_IS("DTIM", dtim);
414 len += snprintf(buf + len, sizeof(buf) - len, 420 PR_IS("TSFOOR", tsfoor);
415 "%8s: %10u\n", "BMISS", sc->debug.stats.istats.bmiss); 421 PR_IS("TOTAL", total);
416 len += snprintf(buf + len, sizeof(buf) - len, 422
417 "%8s: %10u\n", "BNR", sc->debug.stats.istats.bnr); 423 len += snprintf(buf + len, mxlen - len,
418 len += snprintf(buf + len, sizeof(buf) - len, 424 "SYNC_CAUSE stats:\n");
419 "%8s: %10u\n", "CST", sc->debug.stats.istats.cst); 425
420 len += snprintf(buf + len, sizeof(buf) - len, 426 PR_IS("Sync-All", sync_cause_all);
421 "%8s: %10u\n", "GTT", sc->debug.stats.istats.gtt); 427 PR_IS("RTC-IRQ", sync_rtc_irq);
422 len += snprintf(buf + len, sizeof(buf) - len, 428 PR_IS("MAC-IRQ", sync_mac_irq);
423 "%8s: %10u\n", "TIM", sc->debug.stats.istats.tim); 429 PR_IS("EEPROM-Illegal-Access", eeprom_illegal_access);
424 len += snprintf(buf + len, sizeof(buf) - len, 430 PR_IS("APB-Timeout", apb_timeout);
425 "%8s: %10u\n", "CABEND", sc->debug.stats.istats.cabend); 431 PR_IS("PCI-Mode-Conflict", pci_mode_conflict);
426 len += snprintf(buf + len, sizeof(buf) - len, 432 PR_IS("HOST1-Fatal", host1_fatal);
427 "%8s: %10u\n", "DTIMSYNC", sc->debug.stats.istats.dtimsync); 433 PR_IS("HOST1-Perr", host1_perr);
428 len += snprintf(buf + len, sizeof(buf) - len, 434 PR_IS("TRCV-FIFO-Perr", trcv_fifo_perr);
429 "%8s: %10u\n", "DTIM", sc->debug.stats.istats.dtim); 435 PR_IS("RADM-CPL-EP", radm_cpl_ep);
430 len += snprintf(buf + len, sizeof(buf) - len, 436 PR_IS("RADM-CPL-DLLP-Abort", radm_cpl_dllp_abort);
431 "%8s: %10u\n", "TSFOOR", sc->debug.stats.istats.tsfoor); 437 PR_IS("RADM-CPL-TLP-Abort", radm_cpl_tlp_abort);
432 len += snprintf(buf + len, sizeof(buf) - len, 438 PR_IS("RADM-CPL-ECRC-Err", radm_cpl_ecrc_err);
433 "%8s: %10u\n", "TOTAL", sc->debug.stats.istats.total); 439 PR_IS("RADM-CPL-Timeout", radm_cpl_timeout);
434 440 PR_IS("Local-Bus-Timeout", local_timeout);
435 441 PR_IS("PM-Access", pm_access);
436 if (len > sizeof(buf)) 442 PR_IS("MAC-Awake", mac_awake);
437 len = sizeof(buf); 443 PR_IS("MAC-Asleep", mac_asleep);
438 444 PR_IS("MAC-Sleep-Access", mac_sleep_access);
439 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 445
446 if (len > mxlen)
447 len = mxlen;
448
449 rv = simple_read_from_buffer(user_buf, count, ppos, buf, len);
450 kfree(buf);
451 return rv;
440} 452}
441 453
442static const struct file_operations fops_interrupt = { 454static const struct file_operations fops_interrupt = {
@@ -524,6 +536,7 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
524 PR("hw-put-tx-buf: ", puttxbuf); 536 PR("hw-put-tx-buf: ", puttxbuf);
525 PR("hw-tx-start: ", txstart); 537 PR("hw-tx-start: ", txstart);
526 PR("hw-tx-proc-desc: ", txprocdesc); 538 PR("hw-tx-proc-desc: ", txprocdesc);
539 PR("TX-Failed: ", txfailed);
527 len += snprintf(buf + len, size - len, 540 len += snprintf(buf + len, size - len,
528 "%s%11p%11p%10p%10p\n", "txq-memory-address:", 541 "%s%11p%11p%10p%10p\n", "txq-memory-address:",
529 sc->tx.txq_map[WME_AC_BE], 542 sc->tx.txq_map[WME_AC_BE],
@@ -880,6 +893,13 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
880 len += snprintf(buf + len, size - len, "%22s : %10u\n", s, \ 893 len += snprintf(buf + len, size - len, "%22s : %10u\n", s, \
881 sc->debug.stats.rxstats.phy_err_stats[p]); 894 sc->debug.stats.rxstats.phy_err_stats[p]);
882 895
896#define RXS_ERR(s, e) \
897 do { \
898 len += snprintf(buf + len, size - len, \
899 "%22s : %10u\n", s, \
900 sc->debug.stats.rxstats.e); \
901 } while (0)
902
883 struct ath_softc *sc = file->private_data; 903 struct ath_softc *sc = file->private_data;
884 char *buf; 904 char *buf;
885 unsigned int len = 0, size = 1600; 905 unsigned int len = 0, size = 1600;
@@ -889,27 +909,18 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
889 if (buf == NULL) 909 if (buf == NULL)
890 return -ENOMEM; 910 return -ENOMEM;
891 911
892 len += snprintf(buf + len, size - len, 912 RXS_ERR("CRC ERR", crc_err);
893 "%22s : %10u\n", "CRC ERR", 913 RXS_ERR("DECRYPT CRC ERR", decrypt_crc_err);
894 sc->debug.stats.rxstats.crc_err); 914 RXS_ERR("PHY ERR", phy_err);
895 len += snprintf(buf + len, size - len, 915 RXS_ERR("MIC ERR", mic_err);
896 "%22s : %10u\n", "DECRYPT CRC ERR", 916 RXS_ERR("PRE-DELIM CRC ERR", pre_delim_crc_err);
897 sc->debug.stats.rxstats.decrypt_crc_err); 917 RXS_ERR("POST-DELIM CRC ERR", post_delim_crc_err);
898 len += snprintf(buf + len, size - len, 918 RXS_ERR("DECRYPT BUSY ERR", decrypt_busy_err);
899 "%22s : %10u\n", "PHY ERR", 919 RXS_ERR("RX-LENGTH-ERR", rx_len_err);
900 sc->debug.stats.rxstats.phy_err); 920 RXS_ERR("RX-OOM-ERR", rx_oom_err);
901 len += snprintf(buf + len, size - len, 921 RXS_ERR("RX-RATE-ERR", rx_rate_err);
902 "%22s : %10u\n", "MIC ERR", 922 RXS_ERR("RX-DROP-RXFLUSH", rx_drop_rxflush);
903 sc->debug.stats.rxstats.mic_err); 923 RXS_ERR("RX-TOO-MANY-FRAGS", rx_too_many_frags_err);
904 len += snprintf(buf + len, size - len,
905 "%22s : %10u\n", "PRE-DELIM CRC ERR",
906 sc->debug.stats.rxstats.pre_delim_crc_err);
907 len += snprintf(buf + len, size - len,
908 "%22s : %10u\n", "POST-DELIM CRC ERR",
909 sc->debug.stats.rxstats.post_delim_crc_err);
910 len += snprintf(buf + len, size - len,
911 "%22s : %10u\n", "DECRYPT BUSY ERR",
912 sc->debug.stats.rxstats.decrypt_busy_err);
913 924
914 PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN); 925 PHY_ERR("UNDERRUN ERR", ATH9K_PHYERR_UNDERRUN);
915 PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING); 926 PHY_ERR("TIMING ERR", ATH9K_PHYERR_TIMING);
@@ -938,12 +949,10 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
938 PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL); 949 PHY_ERR("HT-LENGTH ERR", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
939 PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL); 950 PHY_ERR("HT-RATE ERR", ATH9K_PHYERR_HT_RATE_ILLEGAL);
940 951
941 len += snprintf(buf + len, size - len, 952 RXS_ERR("RX-Pkts-All", rx_pkts_all);
942 "%22s : %10u\n", "RX-Pkts-All", 953 RXS_ERR("RX-Bytes-All", rx_bytes_all);
943 sc->debug.stats.rxstats.rx_pkts_all); 954 RXS_ERR("RX-Beacons", rx_beacons);
944 len += snprintf(buf + len, size - len, 955 RXS_ERR("RX-Frags", rx_frags);
945 "%22s : %10u\n", "RX-Bytes-All",
946 sc->debug.stats.rxstats.rx_bytes_all);
947 956
948 if (len > size) 957 if (len > size)
949 len = size; 958 len = size;
@@ -953,12 +962,12 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
953 962
954 return retval; 963 return retval;
955 964
965#undef RXS_ERR
956#undef PHY_ERR 966#undef PHY_ERR
957} 967}
958 968
959void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs) 969void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
960{ 970{
961#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++
962#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++ 971#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
963#define RX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].rs\ 972#define RX_SAMP_DBG(c) (sc->debug.bb_mac_samp[sc->debug.sampidx].rs\
964 [sc->debug.rsidx].c) 973 [sc->debug.rsidx].c)
@@ -1004,7 +1013,6 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
1004 1013
1005#endif 1014#endif
1006 1015
1007#undef RX_STAT_INC
1008#undef RX_PHY_ERR_INC 1016#undef RX_PHY_ERR_INC
1009#undef RX_SAMP_DBG 1017#undef RX_SAMP_DBG
1010} 1018}
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 64fcfad467bf..c34da09d9103 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -60,6 +60,7 @@ struct ath_buf;
60 * @tsfoor: TSF out of range, indicates that the corrected TSF received 60 * @tsfoor: TSF out of range, indicates that the corrected TSF received
61 * from a beacon differs from the PCU's internal TSF by more than a 61 * from a beacon differs from the PCU's internal TSF by more than a
62 * (programmable) threshold 62 * (programmable) threshold
63 * @local_timeout: Internal bus timeout.
63 */ 64 */
64struct ath_interrupt_stats { 65struct ath_interrupt_stats {
65 u32 total; 66 u32 total;
@@ -85,8 +86,30 @@ struct ath_interrupt_stats {
85 u32 dtim; 86 u32 dtim;
86 u32 bb_watchdog; 87 u32 bb_watchdog;
87 u32 tsfoor; 88 u32 tsfoor;
89
90 /* Sync-cause stats */
91 u32 sync_cause_all;
92 u32 sync_rtc_irq;
93 u32 sync_mac_irq;
94 u32 eeprom_illegal_access;
95 u32 apb_timeout;
96 u32 pci_mode_conflict;
97 u32 host1_fatal;
98 u32 host1_perr;
99 u32 trcv_fifo_perr;
100 u32 radm_cpl_ep;
101 u32 radm_cpl_dllp_abort;
102 u32 radm_cpl_tlp_abort;
103 u32 radm_cpl_ecrc_err;
104 u32 radm_cpl_timeout;
105 u32 local_timeout;
106 u32 pm_access;
107 u32 mac_awake;
108 u32 mac_asleep;
109 u32 mac_sleep_access;
88}; 110};
89 111
112
90/** 113/**
91 * struct ath_tx_stats - Statistics about TX 114 * struct ath_tx_stats - Statistics about TX
92 * @tx_pkts_all: No. of total frames transmitted, including ones that 115 * @tx_pkts_all: No. of total frames transmitted, including ones that
@@ -113,6 +136,7 @@ struct ath_interrupt_stats {
113 * @puttxbuf: Number of times hardware was given txbuf to write. 136 * @puttxbuf: Number of times hardware was given txbuf to write.
114 * @txstart: Number of times hardware was told to start tx. 137 * @txstart: Number of times hardware was told to start tx.
115 * @txprocdesc: Number of times tx descriptor was processed 138 * @txprocdesc: Number of times tx descriptor was processed
139 * @txfailed: Out-of-memory or other errors in xmit path.
116 */ 140 */
117struct ath_tx_stats { 141struct ath_tx_stats {
118 u32 tx_pkts_all; 142 u32 tx_pkts_all;
@@ -135,8 +159,11 @@ struct ath_tx_stats {
135 u32 puttxbuf; 159 u32 puttxbuf;
136 u32 txstart; 160 u32 txstart;
137 u32 txprocdesc; 161 u32 txprocdesc;
162 u32 txfailed;
138}; 163};
139 164
165#define RX_STAT_INC(c) (sc->debug.stats.rxstats.c++)
166
140/** 167/**
141 * struct ath_rx_stats - RX Statistics 168 * struct ath_rx_stats - RX Statistics
142 * @rx_pkts_all: No. of total frames received, including ones that 169 * @rx_pkts_all: No. of total frames received, including ones that
@@ -153,6 +180,13 @@ struct ath_tx_stats {
153 * @post_delim_crc_err: Post-Frame delimiter CRC error detections 180 * @post_delim_crc_err: Post-Frame delimiter CRC error detections
154 * @decrypt_busy_err: Decryption interruptions counter 181 * @decrypt_busy_err: Decryption interruptions counter
155 * @phy_err_stats: Individual PHY error statistics 182 * @phy_err_stats: Individual PHY error statistics
183 * @rx_len_err: No. of frames discarded due to bad length.
184 * @rx_oom_err: No. of frames dropped due to OOM issues.
185 * @rx_rate_err: No. of frames dropped due to rate errors.
186 * @rx_too_many_frags_err: Frames dropped due to too-many-frags received.
187 * @rx_drop_rxflush: No. of frames dropped due to RX-FLUSH.
188 * @rx_beacons: No. of beacons received.
189 * @rx_frags: No. of rx-fragements received.
156 */ 190 */
157struct ath_rx_stats { 191struct ath_rx_stats {
158 u32 rx_pkts_all; 192 u32 rx_pkts_all;
@@ -165,6 +199,13 @@ struct ath_rx_stats {
165 u32 post_delim_crc_err; 199 u32 post_delim_crc_err;
166 u32 decrypt_busy_err; 200 u32 decrypt_busy_err;
167 u32 phy_err_stats[ATH9K_PHYERR_MAX]; 201 u32 phy_err_stats[ATH9K_PHYERR_MAX];
202 u32 rx_len_err;
203 u32 rx_oom_err;
204 u32 rx_rate_err;
205 u32 rx_too_many_frags_err;
206 u32 rx_drop_rxflush;
207 u32 rx_beacons;
208 u32 rx_frags;
168}; 209};
169 210
170enum ath_reset_type { 211enum ath_reset_type {
@@ -174,6 +215,7 @@ enum ath_reset_type {
174 RESET_TYPE_TX_ERROR, 215 RESET_TYPE_TX_ERROR,
175 RESET_TYPE_TX_HANG, 216 RESET_TYPE_TX_HANG,
176 RESET_TYPE_PLL_HANG, 217 RESET_TYPE_PLL_HANG,
218 RESET_TYPE_MAC_HANG,
177 __RESET_TYPE_MAX 219 __RESET_TYPE_MAX
178}; 220};
179 221
@@ -247,6 +289,8 @@ void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
247 289
248#else 290#else
249 291
292#define RX_STAT_INC(c) /* NOP */
293
250static inline int ath9k_init_debug(struct ath_hw *ah) 294static inline int ath9k_init_debug(struct ath_hw *ah)
251{ 295{
252 return 0; 296 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
index f4f56aff1e9d..ecc81792f2dc 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.c
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -21,17 +21,6 @@
21#include "dfs.h" 21#include "dfs.h"
22#include "dfs_debug.h" 22#include "dfs_debug.h"
23 23
24/*
25 * TODO: move into or synchronize this with generic header
26 * as soon as IF is defined
27 */
28struct dfs_radar_pulse {
29 u16 freq;
30 u64 ts;
31 u32 width;
32 u8 rssi;
33};
34
35/* internal struct to pass radar data */ 24/* internal struct to pass radar data */
36struct ath_radar_data { 25struct ath_radar_data {
37 u8 pulse_bw_info; 26 u8 pulse_bw_info;
@@ -60,44 +49,44 @@ static u32 dur_to_usecs(struct ath_hw *ah, u32 dur)
60#define EXT_CH_RADAR_FOUND 0x02 49#define EXT_CH_RADAR_FOUND 0x02
61static bool 50static bool
62ath9k_postprocess_radar_event(struct ath_softc *sc, 51ath9k_postprocess_radar_event(struct ath_softc *sc,
63 struct ath_radar_data *are, 52 struct ath_radar_data *ard,
64 struct dfs_radar_pulse *drp) 53 struct pulse_event *pe)
65{ 54{
66 u8 rssi; 55 u8 rssi;
67 u16 dur; 56 u16 dur;
68 57
69 ath_dbg(ath9k_hw_common(sc->sc_ah), DFS, 58 ath_dbg(ath9k_hw_common(sc->sc_ah), DFS,
70 "pulse_bw_info=0x%x, pri,ext len/rssi=(%u/%u, %u/%u)\n", 59 "pulse_bw_info=0x%x, pri,ext len/rssi=(%u/%u, %u/%u)\n",
71 are->pulse_bw_info, 60 ard->pulse_bw_info,
72 are->pulse_length_pri, are->rssi, 61 ard->pulse_length_pri, ard->rssi,
73 are->pulse_length_ext, are->ext_rssi); 62 ard->pulse_length_ext, ard->ext_rssi);
74 63
75 /* 64 /*
76 * Only the last 2 bits of the BW info are relevant, they indicate 65 * Only the last 2 bits of the BW info are relevant, they indicate
77 * which channel the radar was detected in. 66 * which channel the radar was detected in.
78 */ 67 */
79 are->pulse_bw_info &= 0x03; 68 ard->pulse_bw_info &= 0x03;
80 69
81 switch (are->pulse_bw_info) { 70 switch (ard->pulse_bw_info) {
82 case PRI_CH_RADAR_FOUND: 71 case PRI_CH_RADAR_FOUND:
83 /* radar in ctrl channel */ 72 /* radar in ctrl channel */
84 dur = are->pulse_length_pri; 73 dur = ard->pulse_length_pri;
85 DFS_STAT_INC(sc, pri_phy_errors); 74 DFS_STAT_INC(sc, pri_phy_errors);
86 /* 75 /*
87 * cannot use ctrl channel RSSI 76 * cannot use ctrl channel RSSI
88 * if extension channel is stronger 77 * if extension channel is stronger
89 */ 78 */
90 rssi = (are->ext_rssi >= (are->rssi + 3)) ? 0 : are->rssi; 79 rssi = (ard->ext_rssi >= (ard->rssi + 3)) ? 0 : ard->rssi;
91 break; 80 break;
92 case EXT_CH_RADAR_FOUND: 81 case EXT_CH_RADAR_FOUND:
93 /* radar in extension channel */ 82 /* radar in extension channel */
94 dur = are->pulse_length_ext; 83 dur = ard->pulse_length_ext;
95 DFS_STAT_INC(sc, ext_phy_errors); 84 DFS_STAT_INC(sc, ext_phy_errors);
96 /* 85 /*
97 * cannot use extension channel RSSI 86 * cannot use extension channel RSSI
98 * if control channel is stronger 87 * if control channel is stronger
99 */ 88 */
100 rssi = (are->rssi >= (are->ext_rssi + 12)) ? 0 : are->ext_rssi; 89 rssi = (ard->rssi >= (ard->ext_rssi + 12)) ? 0 : ard->ext_rssi;
101 break; 90 break;
102 case (PRI_CH_RADAR_FOUND | EXT_CH_RADAR_FOUND): 91 case (PRI_CH_RADAR_FOUND | EXT_CH_RADAR_FOUND):
103 /* 92 /*
@@ -107,14 +96,14 @@ ath9k_postprocess_radar_event(struct ath_softc *sc,
107 * Radiated testing, when pulse is on DC, different pri and 96 * Radiated testing, when pulse is on DC, different pri and
108 * ext durations are reported, so take the larger of the two 97 * ext durations are reported, so take the larger of the two
109 */ 98 */
110 if (are->pulse_length_ext >= are->pulse_length_pri) 99 if (ard->pulse_length_ext >= ard->pulse_length_pri)
111 dur = are->pulse_length_ext; 100 dur = ard->pulse_length_ext;
112 else 101 else
113 dur = are->pulse_length_pri; 102 dur = ard->pulse_length_pri;
114 DFS_STAT_INC(sc, dc_phy_errors); 103 DFS_STAT_INC(sc, dc_phy_errors);
115 104
116 /* when both are present use stronger one */ 105 /* when both are present use stronger one */
117 rssi = (are->rssi < are->ext_rssi) ? are->ext_rssi : are->rssi; 106 rssi = (ard->rssi < ard->ext_rssi) ? ard->ext_rssi : ard->rssi;
118 break; 107 break;
119 default: 108 default:
120 /* 109 /*
@@ -137,8 +126,8 @@ ath9k_postprocess_radar_event(struct ath_softc *sc,
137 */ 126 */
138 127
139 /* convert duration to usecs */ 128 /* convert duration to usecs */
140 drp->width = dur_to_usecs(sc->sc_ah, dur); 129 pe->width = dur_to_usecs(sc->sc_ah, dur);
141 drp->rssi = rssi; 130 pe->rssi = rssi;
142 131
143 DFS_STAT_INC(sc, pulses_detected); 132 DFS_STAT_INC(sc, pulses_detected);
144 return true; 133 return true;
@@ -155,15 +144,17 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
155 struct ath_radar_data ard; 144 struct ath_radar_data ard;
156 u16 datalen; 145 u16 datalen;
157 char *vdata_end; 146 char *vdata_end;
158 struct dfs_radar_pulse drp; 147 struct pulse_event pe;
159 struct ath_hw *ah = sc->sc_ah; 148 struct ath_hw *ah = sc->sc_ah;
160 struct ath_common *common = ath9k_hw_common(ah); 149 struct ath_common *common = ath9k_hw_common(ah);
161 150
162 if ((!(rs->rs_phyerr != ATH9K_PHYERR_RADAR)) && 151 DFS_STAT_INC(sc, pulses_total);
163 (!(rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT))) { 152 if ((rs->rs_phyerr != ATH9K_PHYERR_RADAR) &&
153 (rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT)) {
164 ath_dbg(common, DFS, 154 ath_dbg(common, DFS,
165 "Error: rs_phyer=0x%x not a radar error\n", 155 "Error: rs_phyer=0x%x not a radar error\n",
166 rs->rs_phyerr); 156 rs->rs_phyerr);
157 DFS_STAT_INC(sc, pulses_no_dfs);
167 return; 158 return;
168 } 159 }
169 160
@@ -189,27 +180,22 @@ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
189 ard.pulse_bw_info = vdata_end[-1]; 180 ard.pulse_bw_info = vdata_end[-1];
190 ard.pulse_length_ext = vdata_end[-2]; 181 ard.pulse_length_ext = vdata_end[-2];
191 ard.pulse_length_pri = vdata_end[-3]; 182 ard.pulse_length_pri = vdata_end[-3];
192 183 pe.freq = ah->curchan->channel;
193 ath_dbg(common, DFS, 184 pe.ts = mactime;
194 "bw_info=%d, length_pri=%d, length_ext=%d, " 185 if (ath9k_postprocess_radar_event(sc, &ard, &pe)) {
195 "rssi_pri=%d, rssi_ext=%d\n", 186 struct dfs_pattern_detector *pd = sc->dfs_detector;
196 ard.pulse_bw_info, ard.pulse_length_pri, ard.pulse_length_ext,
197 ard.rssi, ard.ext_rssi);
198
199 drp.freq = ah->curchan->channel;
200 drp.ts = mactime;
201 if (ath9k_postprocess_radar_event(sc, &ard, &drp)) {
202 static u64 last_ts; 187 static u64 last_ts;
203 ath_dbg(common, DFS, 188 ath_dbg(common, DFS,
204 "ath9k_dfs_process_phyerr: channel=%d, ts=%llu, " 189 "ath9k_dfs_process_phyerr: channel=%d, ts=%llu, "
205 "width=%d, rssi=%d, delta_ts=%llu\n", 190 "width=%d, rssi=%d, delta_ts=%llu\n",
206 drp.freq, drp.ts, drp.width, drp.rssi, drp.ts-last_ts); 191 pe.freq, pe.ts, pe.width, pe.rssi, pe.ts-last_ts);
207 last_ts = drp.ts; 192 last_ts = pe.ts;
208 /* 193 DFS_STAT_INC(sc, pulses_processed);
209 * TODO: forward pulse to pattern detector 194 if (pd != NULL && pd->add_pulse(pd, &pe)) {
210 * 195 DFS_STAT_INC(sc, radar_detected);
211 * ieee80211_add_radar_pulse(drp.freq, drp.ts, 196 /*
212 * drp.width, drp.rssi); 197 * TODO: forward radar event to DFS management layer
213 */ 198 */
199 }
214 } 200 }
215} 201}
diff --git a/drivers/net/wireless/ath/ath9k/dfs.h b/drivers/net/wireless/ath/ath9k/dfs.h
index c2412857f122..3c839f06a06a 100644
--- a/drivers/net/wireless/ath/ath9k/dfs.h
+++ b/drivers/net/wireless/ath/ath9k/dfs.h
@@ -17,6 +17,7 @@
17 17
18#ifndef ATH9K_DFS_H 18#ifndef ATH9K_DFS_H
19#define ATH9K_DFS_H 19#define ATH9K_DFS_H
20#include "dfs_pattern_detector.h"
20 21
21#if defined(CONFIG_ATH9K_DFS_CERTIFIED) 22#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
22/** 23/**
@@ -31,13 +32,14 @@
31 * 32 *
32 * The radar information provided as raw payload data is validated and 33 * The radar information provided as raw payload data is validated and
33 * filtered for false pulses. Events passing all tests are forwarded to 34 * filtered for false pulses. Events passing all tests are forwarded to
34 * the upper layer for pattern detection. 35 * the DFS detector for pattern detection.
35 */ 36 */
36void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data, 37void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
37 struct ath_rx_status *rs, u64 mactime); 38 struct ath_rx_status *rs, u64 mactime);
38#else 39#else
39static inline void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data, 40static inline void
40 struct ath_rx_status *rs, u64 mactime) { } 41ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
42 struct ath_rx_status *rs, u64 mactime) { }
41#endif 43#endif
42 44
43#endif /* ATH9K_DFS_H */ 45#endif /* ATH9K_DFS_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
index 4364c103ed33..55d28072adeb 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.c
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -21,9 +21,15 @@
21#include "ath9k.h" 21#include "ath9k.h"
22#include "dfs_debug.h" 22#include "dfs_debug.h"
23 23
24
25struct ath_dfs_pool_stats global_dfs_pool_stats = { 0 };
26
24#define ATH9K_DFS_STAT(s, p) \ 27#define ATH9K_DFS_STAT(s, p) \
25 len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \ 28 len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
26 sc->debug.stats.dfs_stats.p); 29 sc->debug.stats.dfs_stats.p);
30#define ATH9K_DFS_POOL_STAT(s, p) \
31 len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
32 global_dfs_pool_stats.p);
27 33
28static ssize_t read_file_dfs(struct file *file, char __user *user_buf, 34static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
29 size_t count, loff_t *ppos) 35 size_t count, loff_t *ppos)
@@ -43,6 +49,9 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
43 hw_ver->macVersion, hw_ver->macRev, 49 hw_ver->macVersion, hw_ver->macRev,
44 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ? 50 (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
45 "enabled" : "disabled"); 51 "enabled" : "disabled");
52 len += snprintf(buf + len, size - len, "Pulse detector statistics:\n");
53 ATH9K_DFS_STAT("pulse events reported ", pulses_total);
54 ATH9K_DFS_STAT("invalid pulse events ", pulses_no_dfs);
46 ATH9K_DFS_STAT("DFS pulses detected ", pulses_detected); 55 ATH9K_DFS_STAT("DFS pulses detected ", pulses_detected);
47 ATH9K_DFS_STAT("Datalen discards ", datalen_discards); 56 ATH9K_DFS_STAT("Datalen discards ", datalen_discards);
48 ATH9K_DFS_STAT("RSSI discards ", rssi_discards); 57 ATH9K_DFS_STAT("RSSI discards ", rssi_discards);
@@ -50,6 +59,18 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
50 ATH9K_DFS_STAT("Primary channel pulses ", pri_phy_errors); 59 ATH9K_DFS_STAT("Primary channel pulses ", pri_phy_errors);
51 ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors); 60 ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors);
52 ATH9K_DFS_STAT("Dual channel pulses ", dc_phy_errors); 61 ATH9K_DFS_STAT("Dual channel pulses ", dc_phy_errors);
62 len += snprintf(buf + len, size - len, "Radar detector statistics "
63 "(current DFS region: %d)\n", sc->dfs_detector->region);
64 ATH9K_DFS_STAT("Pulse events processed ", pulses_processed);
65 ATH9K_DFS_STAT("Radars detected ", radar_detected);
66 len += snprintf(buf + len, size - len, "Global Pool statistics:\n");
67 ATH9K_DFS_POOL_STAT("Pool references ", pool_reference);
68 ATH9K_DFS_POOL_STAT("Pulses allocated ", pulse_allocated);
69 ATH9K_DFS_POOL_STAT("Pulses alloc error ", pulse_alloc_error);
70 ATH9K_DFS_POOL_STAT("Pulses in use ", pulse_used);
71 ATH9K_DFS_POOL_STAT("Seqs. allocated ", pseq_allocated);
72 ATH9K_DFS_POOL_STAT("Seqs. alloc error ", pseq_alloc_error);
73 ATH9K_DFS_POOL_STAT("Seqs. in use ", pseq_used);
53 74
54 if (len > size) 75 if (len > size)
55 len = size; 76 len = size;
@@ -60,8 +81,33 @@ static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
60 return retval; 81 return retval;
61} 82}
62 83
84/* magic number to prevent accidental reset of DFS statistics */
85#define DFS_STATS_RESET_MAGIC 0x80000000
86static ssize_t write_file_dfs(struct file *file, const char __user *user_buf,
87 size_t count, loff_t *ppos)
88{
89 struct ath_softc *sc = file->private_data;
90 unsigned long val;
91 char buf[32];
92 ssize_t len;
93
94 len = min(count, sizeof(buf) - 1);
95 if (copy_from_user(buf, user_buf, len))
96 return -EFAULT;
97
98 buf[len] = '\0';
99 if (strict_strtoul(buf, 0, &val))
100 return -EINVAL;
101
102 if (val == DFS_STATS_RESET_MAGIC)
103 memset(&sc->debug.stats.dfs_stats, 0,
104 sizeof(sc->debug.stats.dfs_stats));
105 return count;
106}
107
63static const struct file_operations fops_dfs_stats = { 108static const struct file_operations fops_dfs_stats = {
64 .read = read_file_dfs, 109 .read = read_file_dfs,
110 .write = write_file_dfs,
65 .open = simple_open, 111 .open = simple_open,
66 .owner = THIS_MODULE, 112 .owner = THIS_MODULE,
67 .llseek = default_llseek, 113 .llseek = default_llseek,
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.h b/drivers/net/wireless/ath/ath9k/dfs_debug.h
index 4911724cb445..e36810a4b585 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.h
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.h
@@ -22,17 +22,23 @@
22#include "hw.h" 22#include "hw.h"
23 23
24/** 24/**
25 * struct ath_dfs_stats - DFS Statistics 25 * struct ath_dfs_stats - DFS Statistics per wiphy
26 * 26 * @pulses_total: pulses reported by HW
27 * @pulses_detected: No. of pulses detected so far 27 * @pulses_no_dfs: pulses wrongly reported as DFS
28 * @datalen_discards: No. of pulses discarded due to invalid datalen 28 * @pulses_detected: pulses detected so far
29 * @rssi_discards: No. of pulses discarded due to invalid RSSI 29 * @datalen_discards: pulses discarded due to invalid datalen
30 * @bwinfo_discards: No. of pulses discarded due to invalid BW info 30 * @rssi_discards: pulses discarded due to invalid RSSI
31 * @pri_phy_errors: No. of pulses reported for primary channel 31 * @bwinfo_discards: pulses discarded due to invalid BW info
32 * @ext_phy_errors: No. of pulses reported for extension channel 32 * @pri_phy_errors: pulses reported for primary channel
33 * @dc_phy_errors: No. of pulses reported for primary + extension channel 33 * @ext_phy_errors: pulses reported for extension channel
34 * @dc_phy_errors: pulses reported for primary + extension channel
35 * @pulses_processed: pulses forwarded to detector
36 * @radar_detected: radars detected
34 */ 37 */
35struct ath_dfs_stats { 38struct ath_dfs_stats {
39 /* pulse stats */
40 u32 pulses_total;
41 u32 pulses_no_dfs;
36 u32 pulses_detected; 42 u32 pulses_detected;
37 u32 datalen_discards; 43 u32 datalen_discards;
38 u32 rssi_discards; 44 u32 rssi_discards;
@@ -40,18 +46,39 @@ struct ath_dfs_stats {
40 u32 pri_phy_errors; 46 u32 pri_phy_errors;
41 u32 ext_phy_errors; 47 u32 ext_phy_errors;
42 u32 dc_phy_errors; 48 u32 dc_phy_errors;
49 /* pattern detection stats */
50 u32 pulses_processed;
51 u32 radar_detected;
43}; 52};
44 53
54/**
55 * struct ath_dfs_pool_stats - DFS Statistics for global pools
56 */
57struct ath_dfs_pool_stats {
58 u32 pool_reference;
59 u32 pulse_allocated;
60 u32 pulse_alloc_error;
61 u32 pulse_used;
62 u32 pseq_allocated;
63 u32 pseq_alloc_error;
64 u32 pseq_used;
65};
45#if defined(CONFIG_ATH9K_DFS_DEBUGFS) 66#if defined(CONFIG_ATH9K_DFS_DEBUGFS)
46 67
47#define DFS_STAT_INC(sc, c) (sc->debug.stats.dfs_stats.c++) 68#define DFS_STAT_INC(sc, c) (sc->debug.stats.dfs_stats.c++)
48void ath9k_dfs_init_debug(struct ath_softc *sc); 69void ath9k_dfs_init_debug(struct ath_softc *sc);
49 70
71#define DFS_POOL_STAT_INC(c) (global_dfs_pool_stats.c++)
72#define DFS_POOL_STAT_DEC(c) (global_dfs_pool_stats.c--)
73extern struct ath_dfs_pool_stats global_dfs_pool_stats;
74
50#else 75#else
51 76
52#define DFS_STAT_INC(sc, c) do { } while (0) 77#define DFS_STAT_INC(sc, c) do { } while (0)
53static inline void ath9k_dfs_init_debug(struct ath_softc *sc) { } 78static inline void ath9k_dfs_init_debug(struct ath_softc *sc) { }
54 79
80#define DFS_POOL_STAT_INC(c) do { } while (0)
81#define DFS_POOL_STAT_DEC(c) do { } while (0)
55#endif /* CONFIG_ATH9K_DFS_DEBUGFS */ 82#endif /* CONFIG_ATH9K_DFS_DEBUGFS */
56 83
57#endif /* ATH9K_DFS_DEBUG_H */ 84#endif /* ATH9K_DFS_DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
new file mode 100644
index 000000000000..ea2a6cf7ef23
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
@@ -0,0 +1,300 @@
1/*
2 * Copyright (c) 2012 Neratec Solutions AG
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/slab.h>
18#include <linux/export.h>
19
20#include "dfs_pattern_detector.h"
21#include "dfs_pri_detector.h"
22
23/*
24 * tolerated deviation of radar time stamp in usecs on both sides
25 * TODO: this might need to be HW-dependent
26 */
27#define PRI_TOLERANCE 16
28
29/**
30 * struct radar_types - contains array of patterns defined for one DFS domain
31 * @domain: DFS regulatory domain
32 * @num_radar_types: number of radar types to follow
33 * @radar_types: radar types array
34 */
35struct radar_types {
36 enum nl80211_dfs_regions region;
37 u32 num_radar_types;
38 const struct radar_detector_specs *radar_types;
39};
40
41/* percentage on ppb threshold to trigger detection */
42#define MIN_PPB_THRESH 50
43#define PPB_THRESH(PPB) ((PPB * MIN_PPB_THRESH + 50) / 100)
44#define PRF2PRI(PRF) ((1000000 + PRF / 2) / PRF)
45
46#define ETSI_PATTERN(ID, WMIN, WMAX, PMIN, PMAX, PRF, PPB) \
47{ \
48 ID, WMIN, WMAX, (PRF2PRI(PMAX) - PRI_TOLERANCE), \
49 (PRF2PRI(PMIN) * PRF + PRI_TOLERANCE), PRF, PPB * PRF, \
50 PPB_THRESH(PPB), PRI_TOLERANCE, \
51}
52
53/* radar types as defined by ETSI EN-301-893 v1.5.1 */
54static const struct radar_detector_specs etsi_radar_ref_types_v15[] = {
55 ETSI_PATTERN(0, 0, 1, 700, 700, 1, 18),
56 ETSI_PATTERN(1, 0, 5, 200, 1000, 1, 10),
57 ETSI_PATTERN(2, 0, 15, 200, 1600, 1, 15),
58 ETSI_PATTERN(3, 0, 15, 2300, 4000, 1, 25),
59 ETSI_PATTERN(4, 20, 30, 2000, 4000, 1, 20),
60 ETSI_PATTERN(5, 0, 2, 300, 400, 3, 10),
61 ETSI_PATTERN(6, 0, 2, 400, 1200, 3, 15),
62};
63
64static const struct radar_types etsi_radar_types_v15 = {
65 .region = NL80211_DFS_ETSI,
66 .num_radar_types = ARRAY_SIZE(etsi_radar_ref_types_v15),
67 .radar_types = etsi_radar_ref_types_v15,
68};
69
70/* for now, we support ETSI radar types, FCC and JP are TODO */
71static const struct radar_types *dfs_domains[] = {
72 &etsi_radar_types_v15,
73};
74
75/**
76 * get_dfs_domain_radar_types() - get radar types for a given DFS domain
77 * @param domain DFS domain
78 * @return radar_types ptr on success, NULL if DFS domain is not supported
79 */
80static const struct radar_types *
81get_dfs_domain_radar_types(enum nl80211_dfs_regions region)
82{
83 u32 i;
84 for (i = 0; i < ARRAY_SIZE(dfs_domains); i++) {
85 if (dfs_domains[i]->region == region)
86 return dfs_domains[i];
87 }
88 return NULL;
89}
90
91/**
92 * struct channel_detector - detector elements for a DFS channel
93 * @head: list_head
94 * @freq: frequency for this channel detector in MHz
95 * @detectors: array of dynamically created detector elements for this freq
96 *
97 * Channel detectors are required to provide multi-channel DFS detection, e.g.
98 * to support off-channel scanning. A pattern detector has a list of channels
99 * radar pulses have been reported for in the past.
100 */
101struct channel_detector {
102 struct list_head head;
103 u16 freq;
104 struct pri_detector **detectors;
105};
106
107/* channel_detector_reset() - reset detector lines for a given channel */
108static void channel_detector_reset(struct dfs_pattern_detector *dpd,
109 struct channel_detector *cd)
110{
111 u32 i;
112 if (cd == NULL)
113 return;
114 for (i = 0; i < dpd->num_radar_types; i++)
115 cd->detectors[i]->reset(cd->detectors[i], dpd->last_pulse_ts);
116}
117
118/* channel_detector_exit() - destructor */
119static void channel_detector_exit(struct dfs_pattern_detector *dpd,
120 struct channel_detector *cd)
121{
122 u32 i;
123 if (cd == NULL)
124 return;
125 list_del(&cd->head);
126 for (i = 0; i < dpd->num_radar_types; i++) {
127 struct pri_detector *de = cd->detectors[i];
128 if (de != NULL)
129 de->exit(de);
130 }
131 kfree(cd->detectors);
132 kfree(cd);
133}
134
135static struct channel_detector *
136channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
137{
138 u32 sz, i;
139 struct channel_detector *cd;
140
141 cd = kmalloc(sizeof(*cd), GFP_KERNEL);
142 if (cd == NULL)
143 goto fail;
144
145 INIT_LIST_HEAD(&cd->head);
146 cd->freq = freq;
147 sz = sizeof(cd->detectors) * dpd->num_radar_types;
148 cd->detectors = kzalloc(sz, GFP_KERNEL);
149 if (cd->detectors == NULL)
150 goto fail;
151
152 for (i = 0; i < dpd->num_radar_types; i++) {
153 const struct radar_detector_specs *rs = &dpd->radar_spec[i];
154 struct pri_detector *de = pri_detector_init(rs);
155 if (de == NULL)
156 goto fail;
157 cd->detectors[i] = de;
158 }
159 list_add(&cd->head, &dpd->channel_detectors);
160 return cd;
161
162fail:
163 pr_err("failed to allocate channel_detector for freq=%d\n", freq);
164 channel_detector_exit(dpd, cd);
165 return NULL;
166}
167
168/**
169 * channel_detector_get() - get channel detector for given frequency
170 * @param dpd instance pointer
171 * @param freq frequency in MHz
172 * @return pointer to channel detector on success, NULL otherwise
173 *
174 * Return existing channel detector for the given frequency or return a
175 * newly create one.
176 */
177static struct channel_detector *
178channel_detector_get(struct dfs_pattern_detector *dpd, u16 freq)
179{
180 struct channel_detector *cd;
181 list_for_each_entry(cd, &dpd->channel_detectors, head) {
182 if (cd->freq == freq)
183 return cd;
184 }
185 return channel_detector_create(dpd, freq);
186}
187
188/*
189 * DFS Pattern Detector
190 */
191
192/* dpd_reset(): reset all channel detectors */
193static void dpd_reset(struct dfs_pattern_detector *dpd)
194{
195 struct channel_detector *cd;
196 if (!list_empty(&dpd->channel_detectors))
197 list_for_each_entry(cd, &dpd->channel_detectors, head)
198 channel_detector_reset(dpd, cd);
199
200}
201static void dpd_exit(struct dfs_pattern_detector *dpd)
202{
203 struct channel_detector *cd, *cd0;
204 if (!list_empty(&dpd->channel_detectors))
205 list_for_each_entry_safe(cd, cd0, &dpd->channel_detectors, head)
206 channel_detector_exit(dpd, cd);
207 kfree(dpd);
208}
209
210static bool
211dpd_add_pulse(struct dfs_pattern_detector *dpd, struct pulse_event *event)
212{
213 u32 i;
214 bool ts_wraparound;
215 struct channel_detector *cd;
216
217 if (dpd->region == NL80211_DFS_UNSET) {
218 /*
219 * pulses received for a non-supported or un-initialized
220 * domain are treated as detected radars
221 */
222 return true;
223 }
224
225 cd = channel_detector_get(dpd, event->freq);
226 if (cd == NULL)
227 return false;
228
229 ts_wraparound = (event->ts < dpd->last_pulse_ts);
230 dpd->last_pulse_ts = event->ts;
231 if (ts_wraparound) {
232 /*
233 * reset detector on time stamp wraparound
234 * with monotonic time stamps, this should never happen
235 */
236 pr_warn("DFS: time stamp wraparound detected, resetting\n");
237 dpd_reset(dpd);
238 }
239 /* do type individual pattern matching */
240 for (i = 0; i < dpd->num_radar_types; i++) {
241 if (cd->detectors[i]->add_pulse(cd->detectors[i], event) != 0) {
242 channel_detector_reset(dpd, cd);
243 return true;
244 }
245 }
246 return false;
247}
248
249static bool dpd_set_domain(struct dfs_pattern_detector *dpd,
250 enum nl80211_dfs_regions region)
251{
252 const struct radar_types *rt;
253 struct channel_detector *cd, *cd0;
254
255 if (dpd->region == region)
256 return true;
257
258 dpd->region = NL80211_DFS_UNSET;
259
260 rt = get_dfs_domain_radar_types(region);
261 if (rt == NULL)
262 return false;
263
264 /* delete all channel detectors for previous DFS domain */
265 if (!list_empty(&dpd->channel_detectors))
266 list_for_each_entry_safe(cd, cd0, &dpd->channel_detectors, head)
267 channel_detector_exit(dpd, cd);
268 dpd->radar_spec = rt->radar_types;
269 dpd->num_radar_types = rt->num_radar_types;
270
271 dpd->region = region;
272 return true;
273}
274
275static struct dfs_pattern_detector default_dpd = {
276 .exit = dpd_exit,
277 .set_domain = dpd_set_domain,
278 .add_pulse = dpd_add_pulse,
279 .region = NL80211_DFS_UNSET,
280};
281
282struct dfs_pattern_detector *
283dfs_pattern_detector_init(enum nl80211_dfs_regions region)
284{
285 struct dfs_pattern_detector *dpd;
286 dpd = kmalloc(sizeof(*dpd), GFP_KERNEL);
287 if (dpd == NULL) {
288 pr_err("allocation of dfs_pattern_detector failed\n");
289 return NULL;
290 }
291 *dpd = default_dpd;
292 INIT_LIST_HEAD(&dpd->channel_detectors);
293
294 if (dpd->set_domain(dpd, region))
295 return dpd;
296
297 pr_err("Could not set DFS domain to %d. ", region);
298 return NULL;
299}
300EXPORT_SYMBOL(dfs_pattern_detector_init);
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
new file mode 100644
index 000000000000..fd0328a30995
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.h
@@ -0,0 +1,104 @@
1/*
2 * Copyright (c) 2012 Neratec Solutions AG
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef DFS_PATTERN_DETECTOR_H
18#define DFS_PATTERN_DETECTOR_H
19
20#include <linux/types.h>
21#include <linux/list.h>
22#include <linux/nl80211.h>
23
24/**
25 * struct pulse_event - describing pulses reported by PHY
26 * @ts: pulse time stamp in us
27 * @freq: channel frequency in MHz
28 * @width: pulse duration in us
29 * @rssi: rssi of radar event
30 */
31struct pulse_event {
32 u64 ts;
33 u16 freq;
34 u8 width;
35 u8 rssi;
36};
37
38/**
39 * struct radar_detector_specs - detector specs for a radar pattern type
40 * @type_id: pattern type, as defined by regulatory
41 * @width_min: minimum radar pulse width in [us]
42 * @width_max: maximum radar pulse width in [us]
43 * @pri_min: minimum pulse repetition interval in [us] (including tolerance)
44 * @pri_max: minimum pri in [us] (including tolerance)
45 * @num_pri: maximum number of different pri for this type
46 * @ppb: pulses per bursts for this type
47 * @ppb_thresh: number of pulses required to trigger detection
48 * @max_pri_tolerance: pulse time stamp tolerance on both sides [us]
49 */
50struct radar_detector_specs {
51 u8 type_id;
52 u8 width_min;
53 u8 width_max;
54 u16 pri_min;
55 u16 pri_max;
56 u8 num_pri;
57 u8 ppb;
58 u8 ppb_thresh;
59 u8 max_pri_tolerance;
60};
61
62/**
63 * struct dfs_pattern_detector - DFS pattern detector
64 * @exit(): destructor
65 * @set_domain(): set DFS domain, resets detector lines upon domain changes
66 * @add_pulse(): add radar pulse to detector, returns true on detection
67 * @region: active DFS region, NL80211_DFS_UNSET until set
68 * @num_radar_types: number of different radar types
69 * @last_pulse_ts: time stamp of last valid pulse in usecs
70 * @radar_detector_specs: array of radar detection specs
71 * @channel_detectors: list connecting channel_detector elements
72 */
73struct dfs_pattern_detector {
74 void (*exit)(struct dfs_pattern_detector *dpd);
75 bool (*set_domain)(struct dfs_pattern_detector *dpd,
76 enum nl80211_dfs_regions region);
77 bool (*add_pulse)(struct dfs_pattern_detector *dpd,
78 struct pulse_event *pe);
79
80 enum nl80211_dfs_regions region;
81 u8 num_radar_types;
82 u64 last_pulse_ts;
83
84 const struct radar_detector_specs *radar_spec;
85 struct list_head channel_detectors;
86};
87
88/**
89 * dfs_pattern_detector_init() - constructor for pattern detector class
90 * @param region: DFS domain to be used, can be NL80211_DFS_UNSET at creation
91 * @return instance pointer on success, NULL otherwise
92 */
93#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
94extern struct dfs_pattern_detector *
95dfs_pattern_detector_init(enum nl80211_dfs_regions region);
96#else
97static inline struct dfs_pattern_detector *
98dfs_pattern_detector_init(enum nl80211_dfs_regions region)
99{
100 return NULL;
101}
102#endif /* CONFIG_ATH9K_DFS_CERTIFIED */
103
104#endif /* DFS_PATTERN_DETECTOR_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
new file mode 100644
index 000000000000..91b8dceeadb1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
@@ -0,0 +1,452 @@
1/*
2 * Copyright (c) 2012 Neratec Solutions AG
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19
20#include "ath9k.h"
21#include "dfs_pattern_detector.h"
22#include "dfs_pri_detector.h"
23#include "dfs_debug.h"
24
25/**
26 * struct pri_sequence - sequence of pulses matching one PRI
27 * @head: list_head
28 * @pri: pulse repetition interval (PRI) in usecs
29 * @dur: duration of sequence in usecs
30 * @count: number of pulses in this sequence
31 * @count_falses: number of not matching pulses in this sequence
32 * @first_ts: time stamp of first pulse in usecs
33 * @last_ts: time stamp of last pulse in usecs
34 * @deadline_ts: deadline when this sequence becomes invalid (first_ts + dur)
35 */
36struct pri_sequence {
37 struct list_head head;
38 u32 pri;
39 u32 dur;
40 u32 count;
41 u32 count_falses;
42 u64 first_ts;
43 u64 last_ts;
44 u64 deadline_ts;
45};
46
47/**
48 * struct pulse_elem - elements in pulse queue
49 * @ts: time stamp in usecs
50 */
51struct pulse_elem {
52 struct list_head head;
53 u64 ts;
54};
55
56/**
57 * pde_get_multiple() - get number of multiples considering a given tolerance
58 * @return factor if abs(val - factor*fraction) <= tolerance, 0 otherwise
59 */
60static u32 pde_get_multiple(u32 val, u32 fraction, u32 tolerance)
61{
62 u32 remainder;
63 u32 factor;
64 u32 delta;
65
66 if (fraction == 0)
67 return 0;
68
69 delta = (val < fraction) ? (fraction - val) : (val - fraction);
70
71 if (delta <= tolerance)
72 /* val and fraction are within tolerance */
73 return 1;
74
75 factor = val / fraction;
76 remainder = val % fraction;
77 if (remainder > tolerance) {
78 /* no exact match */
79 if ((fraction - remainder) <= tolerance)
80 /* remainder is within tolerance */
81 factor++;
82 else
83 factor = 0;
84 }
85 return factor;
86}
87
88/**
89 * DOC: Singleton Pulse and Sequence Pools
90 *
91 * Instances of pri_sequence and pulse_elem are kept in singleton pools to
92 * reduce the number of dynamic allocations. They are shared between all
93 * instances and grow up to the peak number of simultaneously used objects.
94 *
95 * Memory is freed after all references to the pools are released.
96 */
97static u32 singleton_pool_references;
98static LIST_HEAD(pulse_pool);
99static LIST_HEAD(pseq_pool);
100static DEFINE_SPINLOCK(pool_lock);
101
102static void pool_register_ref(void)
103{
104 spin_lock_bh(&pool_lock);
105 singleton_pool_references++;
106 DFS_POOL_STAT_INC(pool_reference);
107 spin_unlock_bh(&pool_lock);
108}
109
110static void pool_deregister_ref(void)
111{
112 spin_lock_bh(&pool_lock);
113 singleton_pool_references--;
114 DFS_POOL_STAT_DEC(pool_reference);
115 if (singleton_pool_references == 0) {
116 /* free singleton pools with no references left */
117 struct pri_sequence *ps, *ps0;
118 struct pulse_elem *p, *p0;
119
120 list_for_each_entry_safe(p, p0, &pulse_pool, head) {
121 list_del(&p->head);
122 DFS_POOL_STAT_DEC(pulse_allocated);
123 kfree(p);
124 }
125 list_for_each_entry_safe(ps, ps0, &pseq_pool, head) {
126 list_del(&ps->head);
127 DFS_POOL_STAT_DEC(pseq_allocated);
128 kfree(ps);
129 }
130 }
131 spin_unlock_bh(&pool_lock);
132}
133
134static void pool_put_pulse_elem(struct pulse_elem *pe)
135{
136 spin_lock_bh(&pool_lock);
137 list_add(&pe->head, &pulse_pool);
138 DFS_POOL_STAT_DEC(pulse_used);
139 spin_unlock_bh(&pool_lock);
140}
141
142static void pool_put_pseq_elem(struct pri_sequence *pse)
143{
144 spin_lock_bh(&pool_lock);
145 list_add(&pse->head, &pseq_pool);
146 DFS_POOL_STAT_DEC(pseq_used);
147 spin_unlock_bh(&pool_lock);
148}
149
150static struct pri_sequence *pool_get_pseq_elem(void)
151{
152 struct pri_sequence *pse = NULL;
153 spin_lock_bh(&pool_lock);
154 if (!list_empty(&pseq_pool)) {
155 pse = list_first_entry(&pseq_pool, struct pri_sequence, head);
156 list_del(&pse->head);
157 DFS_POOL_STAT_INC(pseq_used);
158 }
159 spin_unlock_bh(&pool_lock);
160 return pse;
161}
162
163static struct pulse_elem *pool_get_pulse_elem(void)
164{
165 struct pulse_elem *pe = NULL;
166 spin_lock_bh(&pool_lock);
167 if (!list_empty(&pulse_pool)) {
168 pe = list_first_entry(&pulse_pool, struct pulse_elem, head);
169 list_del(&pe->head);
170 DFS_POOL_STAT_INC(pulse_used);
171 }
172 spin_unlock_bh(&pool_lock);
173 return pe;
174}
175
176static struct pulse_elem *pulse_queue_get_tail(struct pri_detector *pde)
177{
178 struct list_head *l = &pde->pulses;
179 if (list_empty(l))
180 return NULL;
181 return list_entry(l->prev, struct pulse_elem, head);
182}
183
184static bool pulse_queue_dequeue(struct pri_detector *pde)
185{
186 struct pulse_elem *p = pulse_queue_get_tail(pde);
187 if (p != NULL) {
188 list_del_init(&p->head);
189 pde->count--;
190 /* give it back to pool */
191 pool_put_pulse_elem(p);
192 }
193 return (pde->count > 0);
194}
195
196/* remove pulses older than window */
197static void pulse_queue_check_window(struct pri_detector *pde)
198{
199 u64 min_valid_ts;
200 struct pulse_elem *p;
201
202 /* there is no delta time with less than 2 pulses */
203 if (pde->count < 2)
204 return;
205
206 if (pde->last_ts <= pde->window_size)
207 return;
208
209 min_valid_ts = pde->last_ts - pde->window_size;
210 while ((p = pulse_queue_get_tail(pde)) != NULL) {
211 if (p->ts >= min_valid_ts)
212 return;
213 pulse_queue_dequeue(pde);
214 }
215}
216
217static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts)
218{
219 struct pulse_elem *p = pool_get_pulse_elem();
220 if (p == NULL) {
221 p = kmalloc(sizeof(*p), GFP_KERNEL);
222 if (p == NULL) {
223 DFS_POOL_STAT_INC(pulse_alloc_error);
224 return false;
225 }
226 DFS_POOL_STAT_INC(pulse_allocated);
227 DFS_POOL_STAT_INC(pulse_used);
228 }
229 INIT_LIST_HEAD(&p->head);
230 p->ts = ts;
231 list_add(&p->head, &pde->pulses);
232 pde->count++;
233 pde->last_ts = ts;
234 pulse_queue_check_window(pde);
235 if (pde->count >= pde->max_count)
236 pulse_queue_dequeue(pde);
237 return true;
238}
239
240static bool pseq_handler_create_sequences(struct pri_detector *pde,
241 u64 ts, u32 min_count)
242{
243 struct pulse_elem *p;
244 list_for_each_entry(p, &pde->pulses, head) {
245 struct pri_sequence ps, *new_ps;
246 struct pulse_elem *p2;
247 u32 tmp_false_count;
248 u64 min_valid_ts;
249 u32 delta_ts = ts - p->ts;
250
251 if (delta_ts < pde->rs->pri_min)
252 /* ignore too small pri */
253 continue;
254
255 if (delta_ts > pde->rs->pri_max)
256 /* stop on too large pri (sorted list) */
257 break;
258
259 /* build a new sequence with new potential pri */
260 ps.count = 2;
261 ps.count_falses = 0;
262 ps.first_ts = p->ts;
263 ps.last_ts = ts;
264 ps.pri = ts - p->ts;
265 ps.dur = ps.pri * (pde->rs->ppb - 1)
266 + 2 * pde->rs->max_pri_tolerance;
267
268 p2 = p;
269 tmp_false_count = 0;
270 min_valid_ts = ts - ps.dur;
271 /* check which past pulses are candidates for new sequence */
272 list_for_each_entry_continue(p2, &pde->pulses, head) {
273 u32 factor;
274 if (p2->ts < min_valid_ts)
275 /* stop on crossing window border */
276 break;
277 /* check if pulse match (multi)PRI */
278 factor = pde_get_multiple(ps.last_ts - p2->ts, ps.pri,
279 pde->rs->max_pri_tolerance);
280 if (factor > 0) {
281 ps.count++;
282 ps.first_ts = p2->ts;
283 /*
284 * on match, add the intermediate falses
285 * and reset counter
286 */
287 ps.count_falses += tmp_false_count;
288 tmp_false_count = 0;
289 } else {
290 /* this is a potential false one */
291 tmp_false_count++;
292 }
293 }
294 if (ps.count < min_count)
295 /* did not reach minimum count, drop sequence */
296 continue;
297
298 /* this is a valid one, add it */
299 ps.deadline_ts = ps.first_ts + ps.dur;
300 new_ps = pool_get_pseq_elem();
301 if (new_ps == NULL) {
302 new_ps = kmalloc(sizeof(*new_ps), GFP_KERNEL);
303 if (new_ps == NULL) {
304 DFS_POOL_STAT_INC(pseq_alloc_error);
305 return false;
306 }
307 DFS_POOL_STAT_INC(pseq_allocated);
308 DFS_POOL_STAT_INC(pseq_used);
309 }
310 memcpy(new_ps, &ps, sizeof(ps));
311 INIT_LIST_HEAD(&new_ps->head);
312 list_add(&new_ps->head, &pde->sequences);
313 }
314 return true;
315}
316
317/* check new ts and add to all matching existing sequences */
318static u32
319pseq_handler_add_to_existing_seqs(struct pri_detector *pde, u64 ts)
320{
321 u32 max_count = 0;
322 struct pri_sequence *ps, *ps2;
323 list_for_each_entry_safe(ps, ps2, &pde->sequences, head) {
324 u32 delta_ts;
325 u32 factor;
326
327 /* first ensure that sequence is within window */
328 if (ts > ps->deadline_ts) {
329 list_del_init(&ps->head);
330 pool_put_pseq_elem(ps);
331 continue;
332 }
333
334 delta_ts = ts - ps->last_ts;
335 factor = pde_get_multiple(delta_ts, ps->pri,
336 pde->rs->max_pri_tolerance);
337 if (factor > 0) {
338 ps->last_ts = ts;
339 ps->count++;
340
341 if (max_count < ps->count)
342 max_count = ps->count;
343 } else {
344 ps->count_falses++;
345 }
346 }
347 return max_count;
348}
349
350static struct pri_sequence *
351pseq_handler_check_detection(struct pri_detector *pde)
352{
353 struct pri_sequence *ps;
354
355 if (list_empty(&pde->sequences))
356 return NULL;
357
358 list_for_each_entry(ps, &pde->sequences, head) {
359 /*
360 * we assume to have enough matching confidence if we
361 * 1) have enough pulses
362 * 2) have more matching than false pulses
363 */
364 if ((ps->count >= pde->rs->ppb_thresh) &&
365 (ps->count * pde->rs->num_pri >= ps->count_falses))
366 return ps;
367 }
368 return NULL;
369}
370
371
372/* free pulse queue and sequences list and give objects back to pools */
373static void pri_detector_reset(struct pri_detector *pde, u64 ts)
374{
375 struct pri_sequence *ps, *ps0;
376 struct pulse_elem *p, *p0;
377 list_for_each_entry_safe(ps, ps0, &pde->sequences, head) {
378 list_del_init(&ps->head);
379 pool_put_pseq_elem(ps);
380 }
381 list_for_each_entry_safe(p, p0, &pde->pulses, head) {
382 list_del_init(&p->head);
383 pool_put_pulse_elem(p);
384 }
385 pde->count = 0;
386 pde->last_ts = ts;
387}
388
389static void pri_detector_exit(struct pri_detector *de)
390{
391 pri_detector_reset(de, 0);
392 pool_deregister_ref();
393 kfree(de);
394}
395
396static bool pri_detector_add_pulse(struct pri_detector *de,
397 struct pulse_event *event)
398{
399 u32 max_updated_seq;
400 struct pri_sequence *ps;
401 u64 ts = event->ts;
402 const struct radar_detector_specs *rs = de->rs;
403
404 /* ignore pulses not within width range */
405 if ((rs->width_min > event->width) || (rs->width_max < event->width))
406 return false;
407
408 if ((ts - de->last_ts) < rs->max_pri_tolerance)
409 /* if delta to last pulse is too short, don't use this pulse */
410 return false;
411 de->last_ts = ts;
412
413 max_updated_seq = pseq_handler_add_to_existing_seqs(de, ts);
414
415 if (!pseq_handler_create_sequences(de, ts, max_updated_seq)) {
416 pr_err("failed to create pulse sequences\n");
417 pri_detector_reset(de, ts);
418 return false;
419 }
420
421 ps = pseq_handler_check_detection(de);
422
423 if (ps != NULL) {
424 pr_info("DFS: radar found: pri=%d, count=%d, count_false=%d\n",
425 ps->pri, ps->count, ps->count_falses);
426 pri_detector_reset(de, ts);
427 return true;
428 }
429 pulse_queue_enqueue(de, ts);
430 return false;
431}
432
433struct pri_detector *
434pri_detector_init(const struct radar_detector_specs *rs)
435{
436 struct pri_detector *de;
437 de = kzalloc(sizeof(*de), GFP_KERNEL);
438 if (de == NULL)
439 return NULL;
440 de->exit = pri_detector_exit;
441 de->add_pulse = pri_detector_add_pulse;
442 de->reset = pri_detector_reset;
443
444 INIT_LIST_HEAD(&de->sequences);
445 INIT_LIST_HEAD(&de->pulses);
446 de->window_size = rs->pri_max * rs->ppb * rs->num_pri;
447 de->max_count = rs->ppb * 2;
448 de->rs = rs;
449
450 pool_register_ref();
451 return de;
452}
diff --git a/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h
new file mode 100644
index 000000000000..81cde9f28e44
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_pri_detector.h
@@ -0,0 +1,52 @@
1/*
2 * Copyright (c) 2012 Neratec Solutions AG
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef DFS_PRI_DETECTOR_H
18#define DFS_PRI_DETECTOR_H
19
20#include <linux/list.h>
21
22/**
23 * struct pri_detector - PRI detector element for a dedicated radar type
24 * @exit(): destructor
25 * @add_pulse(): add pulse event, returns true if pattern was detected
26 * @reset(): clear states and reset to given time stamp
27 * @rs: detector specs for this detector element
28 * @last_ts: last pulse time stamp considered for this element in usecs
29 * @sequences: list_head holding potential pulse sequences
30 * @pulses: list connecting pulse_elem objects
31 * @count: number of pulses in queue
32 * @max_count: maximum number of pulses to be queued
33 * @window_size: window size back from newest pulse time stamp in usecs
34 */
35struct pri_detector {
36 void (*exit) (struct pri_detector *de);
37 bool (*add_pulse)(struct pri_detector *de, struct pulse_event *e);
38 void (*reset) (struct pri_detector *de, u64 ts);
39
40/* private: internal use only */
41 const struct radar_detector_specs *rs;
42 u64 last_ts;
43 struct list_head sequences;
44 struct list_head pulses;
45 u32 count;
46 u32 max_count;
47 u32 window_size;
48};
49
50struct pri_detector *pri_detector_init(const struct radar_detector_specs *rs);
51
52#endif /* DFS_PRI_DETECTOR_H */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index c43523233319..0512397a293c 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -16,14 +16,6 @@
16 16
17#include "hw.h" 17#include "hw.h"
18 18
19static inline u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz)
20{
21 if (fbin == AR5416_BCHAN_UNUSED)
22 return fbin;
23
24 return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin));
25}
26
27void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val) 19void ath9k_hw_analog_shift_regwrite(struct ath_hw *ah, u32 reg, u32 val)
28{ 20{
29 REG_WRITE(ah, reg, val); 21 REG_WRITE(ah, reg, val);
@@ -290,6 +282,34 @@ u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower,
290 return twiceMaxEdgePower; 282 return twiceMaxEdgePower;
291} 283}
292 284
285u16 ath9k_hw_get_scaled_power(struct ath_hw *ah, u16 power_limit,
286 u8 antenna_reduction)
287{
288 u16 reduction = antenna_reduction;
289
290 /*
291 * Reduce scaled Power by number of chains active
292 * to get the per chain tx power level.
293 */
294 switch (ar5416_get_ntxchains(ah->txchainmask)) {
295 case 1:
296 break;
297 case 2:
298 reduction += POWER_CORRECTION_FOR_TWO_CHAIN;
299 break;
300 case 3:
301 reduction += POWER_CORRECTION_FOR_THREE_CHAIN;
302 break;
303 }
304
305 if (power_limit > reduction)
306 power_limit -= reduction;
307 else
308 power_limit = 0;
309
310 return power_limit;
311}
312
293void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah) 313void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah)
294{ 314{
295 struct ath_common *common = ath9k_hw_common(ah); 315 struct ath_common *common = ath9k_hw_common(ah);
@@ -299,10 +319,10 @@ void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah)
299 case 1: 319 case 1:
300 break; 320 break;
301 case 2: 321 case 2:
302 regulatory->max_power_level += INCREASE_MAXPOW_BY_TWO_CHAIN; 322 regulatory->max_power_level += POWER_CORRECTION_FOR_TWO_CHAIN;
303 break; 323 break;
304 case 3: 324 case 3:
305 regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN; 325 regulatory->max_power_level += POWER_CORRECTION_FOR_THREE_CHAIN;
306 break; 326 break;
307 default: 327 default:
308 ath_dbg(common, EEPROM, "Invalid chainmask configuration\n"); 328 ath_dbg(common, EEPROM, "Invalid chainmask configuration\n");
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 5ff7ab965120..33acb920ed3f 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -79,8 +79,8 @@
79#define SUB_NUM_CTL_MODES_AT_5G_40 2 79#define SUB_NUM_CTL_MODES_AT_5G_40 2
80#define SUB_NUM_CTL_MODES_AT_2G_40 3 80#define SUB_NUM_CTL_MODES_AT_2G_40 3
81 81
82#define INCREASE_MAXPOW_BY_TWO_CHAIN 6 /* 10*log10(2)*2 */ 82#define POWER_CORRECTION_FOR_TWO_CHAIN 6 /* 10*log10(2)*2 */
83#define INCREASE_MAXPOW_BY_THREE_CHAIN 10 /* 10*log10(3)*2 */ 83#define POWER_CORRECTION_FOR_THREE_CHAIN 10 /* 10*log10(3)*2 */
84 84
85/* 85/*
86 * For AR9285 and later chipsets, the following bits are not being programmed 86 * For AR9285 and later chipsets, the following bits are not being programmed
@@ -686,6 +686,8 @@ void ath9k_hw_get_target_powers(struct ath_hw *ah,
686 u16 numRates, bool isHt40Target); 686 u16 numRates, bool isHt40Target);
687u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower, 687u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower,
688 bool is2GHz, int num_band_edges); 688 bool is2GHz, int num_band_edges);
689u16 ath9k_hw_get_scaled_power(struct ath_hw *ah, u16 power_limit,
690 u8 antenna_reduction);
689void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah); 691void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah);
690int ath9k_hw_eeprom_init(struct ath_hw *ah); 692int ath9k_hw_eeprom_init(struct ath_hw *ah);
691 693
@@ -697,6 +699,14 @@ void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
697 u16 *pPdGainBoundaries, u8 *pPDADCValues, 699 u16 *pPdGainBoundaries, u8 *pPDADCValues,
698 u16 numXpdGains); 700 u16 numXpdGains);
699 701
702static inline u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz)
703{
704 if (fbin == AR5416_BCHAN_UNUSED)
705 return fbin;
706
707 return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin));
708}
709
700#define ar5416_get_ntxchains(_txchainmask) \ 710#define ar5416_get_ntxchains(_txchainmask) \
701 (((_txchainmask >> 2) & 1) + \ 711 (((_txchainmask >> 2) & 1) + \
702 ((_txchainmask >> 1) & 1) + (_txchainmask & 1)) 712 ((_txchainmask >> 1) & 1) + (_txchainmask & 1))
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index b34e8b2990b1..aa614767adff 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -564,9 +564,6 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
564 (((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == \ 564 (((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == \
565 ((pEepData->ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL)) 565 ((pEepData->ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL))
566 566
567#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6
568#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10
569
570 u16 twiceMaxEdgePower; 567 u16 twiceMaxEdgePower;
571 int i; 568 int i;
572 struct cal_ctl_data_ar9287 *rep; 569 struct cal_ctl_data_ar9287 *rep;
@@ -591,29 +588,8 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
591 tx_chainmask = ah->txchainmask; 588 tx_chainmask = ah->txchainmask;
592 589
593 ath9k_hw_get_channel_centers(ah, chan, &centers); 590 ath9k_hw_get_channel_centers(ah, chan, &centers);
594 scaledPower = powerLimit - antenna_reduction; 591 scaledPower = ath9k_hw_get_scaled_power(ah, powerLimit,
595 592 antenna_reduction);
596 /*
597 * Reduce scaled Power by number of chains active
598 * to get the per chain tx power level.
599 */
600 switch (ar5416_get_ntxchains(tx_chainmask)) {
601 case 1:
602 break;
603 case 2:
604 if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
605 scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
606 else
607 scaledPower = 0;
608 break;
609 case 3:
610 if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
611 scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
612 else
613 scaledPower = 0;
614 break;
615 }
616 scaledPower = max((u16)0, scaledPower);
617 593
618 /* 594 /*
619 * Get TX power from EEPROM. 595 * Get TX power from EEPROM.
@@ -786,8 +762,6 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
786 762
787#undef CMP_CTL 763#undef CMP_CTL
788#undef CMP_NO_CTL 764#undef CMP_NO_CTL
789#undef REDUCE_SCALED_POWER_BY_TWO_CHAIN
790#undef REDUCE_SCALED_POWER_BY_THREE_CHAIN
791} 765}
792 766
793static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah, 767static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 619b95d764ff..b5fba8b18b8b 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -991,9 +991,6 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
991 u16 antenna_reduction, 991 u16 antenna_reduction,
992 u16 powerLimit) 992 u16 powerLimit)
993{ 993{
994#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6 /* 10*log10(2)*2 */
995#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 9 /* 10*log10(3)*2 */
996
997 struct ar5416_eeprom_def *pEepData = &ah->eeprom.def; 994 struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
998 u16 twiceMaxEdgePower; 995 u16 twiceMaxEdgePower;
999 int i; 996 int i;
@@ -1027,24 +1024,8 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
1027 1024
1028 ath9k_hw_get_channel_centers(ah, chan, &centers); 1025 ath9k_hw_get_channel_centers(ah, chan, &centers);
1029 1026
1030 scaledPower = powerLimit - antenna_reduction; 1027 scaledPower = ath9k_hw_get_scaled_power(ah, powerLimit,
1031 1028 antenna_reduction);
1032 switch (ar5416_get_ntxchains(tx_chainmask)) {
1033 case 1:
1034 break;
1035 case 2:
1036 if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
1037 scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
1038 else
1039 scaledPower = 0;
1040 break;
1041 case 3:
1042 if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
1043 scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
1044 else
1045 scaledPower = 0;
1046 break;
1047 }
1048 1029
1049 if (IS_CHAN_2GHZ(chan)) { 1030 if (IS_CHAN_2GHZ(chan)) {
1050 numCtlModes = ARRAY_SIZE(ctlModesFor11g) - 1031 numCtlModes = ARRAY_SIZE(ctlModesFor11g) -
@@ -1263,20 +1244,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
1263 regulatory->max_power_level = ratesArray[i]; 1244 regulatory->max_power_level = ratesArray[i];
1264 } 1245 }
1265 1246
1266 switch(ar5416_get_ntxchains(ah->txchainmask)) { 1247 ath9k_hw_update_regulatory_maxpower(ah);
1267 case 1:
1268 break;
1269 case 2:
1270 regulatory->max_power_level += INCREASE_MAXPOW_BY_TWO_CHAIN;
1271 break;
1272 case 3:
1273 regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
1274 break;
1275 default:
1276 ath_dbg(ath9k_hw_common(ah), EEPROM,
1277 "Invalid chainmask configuration\n");
1278 break;
1279 }
1280 1248
1281 if (test) 1249 if (test)
1282 return; 1250 return;
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index fbe23de1297f..281a9af0f1b6 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -41,6 +41,9 @@ void ath_init_leds(struct ath_softc *sc)
41{ 41{
42 int ret; 42 int ret;
43 43
44 if (AR_SREV_9100(sc->sc_ah))
45 return;
46
44 if (sc->sc_ah->led_pin < 0) { 47 if (sc->sc_ah->led_pin < 0) {
45 if (AR_SREV_9287(sc->sc_ah)) 48 if (AR_SREV_9287(sc->sc_ah))
46 sc->sc_ah->led_pin = ATH_LED_PIN_9287; 49 sc->sc_ah->led_pin = ATH_LED_PIN_9287;
@@ -362,7 +365,7 @@ void ath9k_stop_btcoex(struct ath_softc *sc)
362 ath9k_hw_btcoex_disable(ah); 365 ath9k_hw_btcoex_disable(ah);
363 if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) 366 if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
364 ath9k_btcoex_timer_pause(sc); 367 ath9k_btcoex_timer_pause(sc);
365 if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_MCI) 368 if (AR_SREV_9462(ah))
366 ath_mci_flush_profile(&sc->btcoex.mci); 369 ath_mci_flush_profile(&sc->btcoex.mci);
367 } 370 }
368} 371}
@@ -373,7 +376,7 @@ void ath9k_deinit_btcoex(struct ath_softc *sc)
373 ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_3WIRE) 376 ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_3WIRE)
374 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer); 377 ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
375 378
376 if (ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_MCI) 379 if (AR_SREV_9462(sc->sc_ah))
377 ath_mci_cleanup(sc); 380 ath_mci_cleanup(sc);
378} 381}
379 382
@@ -399,17 +402,16 @@ int ath9k_init_btcoex(struct ath_softc *sc)
399 txq = sc->tx.txq_map[WME_AC_BE]; 402 txq = sc->tx.txq_map[WME_AC_BE];
400 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum); 403 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
401 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; 404 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
402 break; 405 if (AR_SREV_9462(ah)) {
403 case ATH_BTCOEX_CFG_MCI: 406 sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
404 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; 407 INIT_LIST_HEAD(&sc->btcoex.mci.info);
405 sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
406 INIT_LIST_HEAD(&sc->btcoex.mci.info);
407 408
408 r = ath_mci_setup(sc); 409 r = ath_mci_setup(sc);
409 if (r) 410 if (r)
410 return r; 411 return r;
411 412
412 ath9k_hw_btcoex_init_mci(ah); 413 ath9k_hw_btcoex_init_mci(ah);
414 }
413 415
414 break; 416 break;
415 default: 417 default:
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 424aabb2c730..f67cd952e741 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -53,6 +53,8 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
53 .driver_info = AR9280_USB }, /* SMC Networks */ 53 .driver_info = AR9280_USB }, /* SMC Networks */
54 { USB_DEVICE(0x0411, 0x017f), 54 { USB_DEVICE(0x0411, 0x017f),
55 .driver_info = AR9280_USB }, /* Sony UWA-BR100 */ 55 .driver_info = AR9280_USB }, /* Sony UWA-BR100 */
56 { USB_DEVICE(0x04da, 0x3904),
57 .driver_info = AR9280_USB },
56 58
57 { USB_DEVICE(0x0cf3, 0x20ff), 59 { USB_DEVICE(0x0cf3, 0x20ff),
58 .driver_info = STORAGE_DEVICE }, 60 .driver_info = STORAGE_DEVICE },
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index de5ee15ee639..25213d521bc2 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -14,6 +14,8 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include "htc.h" 19#include "htc.h"
18 20
19MODULE_AUTHOR("Atheros Communications"); 21MODULE_AUTHOR("Atheros Communications");
@@ -711,7 +713,8 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
711 713
712 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 714 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
713 715
714 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 716 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN |
717 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
715 718
716 hw->queues = 4; 719 hw->queues = 4;
717 hw->channel_change_time = 5000; 720 hw->channel_change_time = 5000;
@@ -966,9 +969,7 @@ int ath9k_htc_resume(struct htc_target *htc_handle)
966static int __init ath9k_htc_init(void) 969static int __init ath9k_htc_init(void)
967{ 970{
968 if (ath9k_hif_usb_init() < 0) { 971 if (ath9k_hif_usb_init() < 0) {
969 printk(KERN_ERR 972 pr_err("No USB devices found, driver not installed\n");
970 "ath9k_htc: No USB devices found,"
971 " driver not installed.\n");
972 return -ENODEV; 973 return -ENODEV;
973 } 974 }
974 975
@@ -979,6 +980,6 @@ module_init(ath9k_htc_init);
979static void __exit ath9k_htc_exit(void) 980static void __exit ath9k_htc_exit(void)
980{ 981{
981 ath9k_hif_usb_exit(); 982 ath9k_hif_usb_exit();
982 printk(KERN_INFO "ath9k_htc: Driver unloaded\n"); 983 pr_info("Driver unloaded\n");
983} 984}
984module_exit(ath9k_htc_exit); 985module_exit(ath9k_htc_exit);
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index c25226a32ddc..4a9570dfba72 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -14,6 +14,8 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include "htc.h" 19#include "htc.h"
18 20
19static int htc_issue_send(struct htc_target *target, struct sk_buff* skb, 21static int htc_issue_send(struct htc_target *target, struct sk_buff* skb,
@@ -461,7 +463,7 @@ int ath9k_htc_hw_init(struct htc_target *target,
461 char *product, u32 drv_info) 463 char *product, u32 drv_info)
462{ 464{
463 if (ath9k_htc_probe_device(target, dev, devid, product, drv_info)) { 465 if (ath9k_htc_probe_device(target, dev, devid, product, drv_info)) {
464 printk(KERN_ERR "Failed to initialize the device\n"); 466 pr_err("Failed to initialize the device\n");
465 return -ENODEV; 467 return -ENODEV;
466 } 468 }
467 469
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index fa84e37bf091..f84477c5ebb1 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -24,6 +24,8 @@
24#include "rc.h" 24#include "rc.h"
25#include "ar9003_mac.h" 25#include "ar9003_mac.h"
26#include "ar9003_mci.h" 26#include "ar9003_mci.h"
27#include "debug.h"
28#include "ath9k.h"
27 29
28static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type); 30static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
29 31
@@ -83,6 +85,53 @@ static void ath9k_hw_ani_cache_ini_regs(struct ath_hw *ah)
83/* Helper Functions */ 85/* Helper Functions */
84/********************/ 86/********************/
85 87
88#ifdef CONFIG_ATH9K_DEBUGFS
89
90void ath9k_debug_sync_cause(struct ath_common *common, u32 sync_cause)
91{
92 struct ath_softc *sc = common->priv;
93 if (sync_cause)
94 sc->debug.stats.istats.sync_cause_all++;
95 if (sync_cause & AR_INTR_SYNC_RTC_IRQ)
96 sc->debug.stats.istats.sync_rtc_irq++;
97 if (sync_cause & AR_INTR_SYNC_MAC_IRQ)
98 sc->debug.stats.istats.sync_mac_irq++;
99 if (sync_cause & AR_INTR_SYNC_EEPROM_ILLEGAL_ACCESS)
100 sc->debug.stats.istats.eeprom_illegal_access++;
101 if (sync_cause & AR_INTR_SYNC_APB_TIMEOUT)
102 sc->debug.stats.istats.apb_timeout++;
103 if (sync_cause & AR_INTR_SYNC_PCI_MODE_CONFLICT)
104 sc->debug.stats.istats.pci_mode_conflict++;
105 if (sync_cause & AR_INTR_SYNC_HOST1_FATAL)
106 sc->debug.stats.istats.host1_fatal++;
107 if (sync_cause & AR_INTR_SYNC_HOST1_PERR)
108 sc->debug.stats.istats.host1_perr++;
109 if (sync_cause & AR_INTR_SYNC_TRCV_FIFO_PERR)
110 sc->debug.stats.istats.trcv_fifo_perr++;
111 if (sync_cause & AR_INTR_SYNC_RADM_CPL_EP)
112 sc->debug.stats.istats.radm_cpl_ep++;
113 if (sync_cause & AR_INTR_SYNC_RADM_CPL_DLLP_ABORT)
114 sc->debug.stats.istats.radm_cpl_dllp_abort++;
115 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TLP_ABORT)
116 sc->debug.stats.istats.radm_cpl_tlp_abort++;
117 if (sync_cause & AR_INTR_SYNC_RADM_CPL_ECRC_ERR)
118 sc->debug.stats.istats.radm_cpl_ecrc_err++;
119 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT)
120 sc->debug.stats.istats.radm_cpl_timeout++;
121 if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
122 sc->debug.stats.istats.local_timeout++;
123 if (sync_cause & AR_INTR_SYNC_PM_ACCESS)
124 sc->debug.stats.istats.pm_access++;
125 if (sync_cause & AR_INTR_SYNC_MAC_AWAKE)
126 sc->debug.stats.istats.mac_awake++;
127 if (sync_cause & AR_INTR_SYNC_MAC_ASLEEP)
128 sc->debug.stats.istats.mac_asleep++;
129 if (sync_cause & AR_INTR_SYNC_MAC_SLEEP_ACCESS)
130 sc->debug.stats.istats.mac_sleep_access++;
131}
132#endif
133
134
86static void ath9k_hw_set_clockrate(struct ath_hw *ah) 135static void ath9k_hw_set_clockrate(struct ath_hw *ah)
87{ 136{
88 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf; 137 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
@@ -142,6 +191,22 @@ bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout)
142} 191}
143EXPORT_SYMBOL(ath9k_hw_wait); 192EXPORT_SYMBOL(ath9k_hw_wait);
144 193
194void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan,
195 int hw_delay)
196{
197 if (IS_CHAN_B(chan))
198 hw_delay = (4 * hw_delay) / 22;
199 else
200 hw_delay /= 10;
201
202 if (IS_CHAN_HALF_RATE(chan))
203 hw_delay *= 2;
204 else if (IS_CHAN_QUARTER_RATE(chan))
205 hw_delay *= 4;
206
207 udelay(hw_delay + BASE_ACTIVATE_DELAY);
208}
209
145void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array, 210void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array,
146 int column, unsigned int *writecnt) 211 int column, unsigned int *writecnt)
147{ 212{
@@ -388,8 +453,8 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
388{ 453{
389 int i; 454 int i;
390 455
391 ah->config.dma_beacon_response_time = 2; 456 ah->config.dma_beacon_response_time = 1;
392 ah->config.sw_beacon_response_time = 10; 457 ah->config.sw_beacon_response_time = 6;
393 ah->config.additional_swba_backoff = 0; 458 ah->config.additional_swba_backoff = 0;
394 ah->config.ack_6mb = 0x0; 459 ah->config.ack_6mb = 0x0;
395 ah->config.cwm_ignore_extcca = 0; 460 ah->config.cwm_ignore_extcca = 0;
@@ -445,7 +510,6 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
445 AR_STA_ID1_MCAST_KSRCH; 510 AR_STA_ID1_MCAST_KSRCH;
446 if (AR_SREV_9100(ah)) 511 if (AR_SREV_9100(ah))
447 ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX; 512 ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX;
448 ah->enable_32kHz_clock = DONT_USE_32KHZ;
449 ah->slottime = ATH9K_SLOT_TIME_9; 513 ah->slottime = ATH9K_SLOT_TIME_9;
450 ah->globaltxtimeout = (u32) -1; 514 ah->globaltxtimeout = (u32) -1;
451 ah->power_mode = ATH9K_PM_UNDEFINED; 515 ah->power_mode = ATH9K_PM_UNDEFINED;
@@ -972,7 +1036,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
972 struct ath_common *common = ath9k_hw_common(ah); 1036 struct ath_common *common = ath9k_hw_common(ah);
973 struct ieee80211_conf *conf = &common->hw->conf; 1037 struct ieee80211_conf *conf = &common->hw->conf;
974 const struct ath9k_channel *chan = ah->curchan; 1038 const struct ath9k_channel *chan = ah->curchan;
975 int acktimeout, ctstimeout; 1039 int acktimeout, ctstimeout, ack_offset = 0;
976 int slottime; 1040 int slottime;
977 int sifstime; 1041 int sifstime;
978 int rx_lat = 0, tx_lat = 0, eifs = 0; 1042 int rx_lat = 0, tx_lat = 0, eifs = 0;
@@ -993,6 +1057,11 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
993 rx_lat = 37; 1057 rx_lat = 37;
994 tx_lat = 54; 1058 tx_lat = 54;
995 1059
1060 if (IS_CHAN_5GHZ(chan))
1061 sifstime = 16;
1062 else
1063 sifstime = 10;
1064
996 if (IS_CHAN_HALF_RATE(chan)) { 1065 if (IS_CHAN_HALF_RATE(chan)) {
997 eifs = 175; 1066 eifs = 175;
998 rx_lat *= 2; 1067 rx_lat *= 2;
@@ -1000,8 +1069,9 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
1000 if (IS_CHAN_A_FAST_CLOCK(ah, chan)) 1069 if (IS_CHAN_A_FAST_CLOCK(ah, chan))
1001 tx_lat += 11; 1070 tx_lat += 11;
1002 1071
1072 sifstime *= 2;
1073 ack_offset = 16;
1003 slottime = 13; 1074 slottime = 13;
1004 sifstime = 32;
1005 } else if (IS_CHAN_QUARTER_RATE(chan)) { 1075 } else if (IS_CHAN_QUARTER_RATE(chan)) {
1006 eifs = 340; 1076 eifs = 340;
1007 rx_lat = (rx_lat * 4) - 1; 1077 rx_lat = (rx_lat * 4) - 1;
@@ -1009,8 +1079,9 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
1009 if (IS_CHAN_A_FAST_CLOCK(ah, chan)) 1079 if (IS_CHAN_A_FAST_CLOCK(ah, chan))
1010 tx_lat += 22; 1080 tx_lat += 22;
1011 1081
1082 sifstime *= 4;
1083 ack_offset = 32;
1012 slottime = 21; 1084 slottime = 21;
1013 sifstime = 64;
1014 } else { 1085 } else {
1015 if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah)) { 1086 if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah)) {
1016 eifs = AR_D_GBL_IFS_EIFS_ASYNC_FIFO; 1087 eifs = AR_D_GBL_IFS_EIFS_ASYNC_FIFO;
@@ -1024,14 +1095,10 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
1024 tx_lat = MS(reg, AR_USEC_TX_LAT); 1095 tx_lat = MS(reg, AR_USEC_TX_LAT);
1025 1096
1026 slottime = ah->slottime; 1097 slottime = ah->slottime;
1027 if (IS_CHAN_5GHZ(chan))
1028 sifstime = 16;
1029 else
1030 sifstime = 10;
1031 } 1098 }
1032 1099
1033 /* As defined by IEEE 802.11-2007 17.3.8.6 */ 1100 /* As defined by IEEE 802.11-2007 17.3.8.6 */
1034 acktimeout = slottime + sifstime + 3 * ah->coverage_class; 1101 acktimeout = slottime + sifstime + 3 * ah->coverage_class + ack_offset;
1035 ctstimeout = acktimeout; 1102 ctstimeout = acktimeout;
1036 1103
1037 /* 1104 /*
@@ -1041,7 +1108,8 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
1041 * BA frames in some implementations, but it has been found to fix ACK 1108 * BA frames in some implementations, but it has been found to fix ACK
1042 * timeout issues in other cases as well. 1109 * timeout issues in other cases as well.
1043 */ 1110 */
1044 if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ) { 1111 if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ &&
1112 !IS_CHAN_HALF_RATE(chan) && !IS_CHAN_QUARTER_RATE(chan)) {
1045 acktimeout += 64 - sifstime - ah->slottime; 1113 acktimeout += 64 - sifstime - ah->slottime;
1046 ctstimeout += 48 - sifstime - ah->slottime; 1114 ctstimeout += 48 - sifstime - ah->slottime;
1047 } 1115 }
@@ -1491,11 +1559,84 @@ static void ath9k_hw_apply_gpio_override(struct ath_hw *ah)
1491 } 1559 }
1492} 1560}
1493 1561
1562static bool ath9k_hw_check_dcs(u32 dma_dbg, u32 num_dcu_states,
1563 int *hang_state, int *hang_pos)
1564{
1565 static u32 dcu_chain_state[] = {5, 6, 9}; /* DCU chain stuck states */
1566 u32 chain_state, dcs_pos, i;
1567
1568 for (dcs_pos = 0; dcs_pos < num_dcu_states; dcs_pos++) {
1569 chain_state = (dma_dbg >> (5 * dcs_pos)) & 0x1f;
1570 for (i = 0; i < 3; i++) {
1571 if (chain_state == dcu_chain_state[i]) {
1572 *hang_state = chain_state;
1573 *hang_pos = dcs_pos;
1574 return true;
1575 }
1576 }
1577 }
1578 return false;
1579}
1580
1581#define DCU_COMPLETE_STATE 1
1582#define DCU_COMPLETE_STATE_MASK 0x3
1583#define NUM_STATUS_READS 50
1584static bool ath9k_hw_detect_mac_hang(struct ath_hw *ah)
1585{
1586 u32 chain_state, comp_state, dcs_reg = AR_DMADBG_4;
1587 u32 i, hang_pos, hang_state, num_state = 6;
1588
1589 comp_state = REG_READ(ah, AR_DMADBG_6);
1590
1591 if ((comp_state & DCU_COMPLETE_STATE_MASK) != DCU_COMPLETE_STATE) {
1592 ath_dbg(ath9k_hw_common(ah), RESET,
1593 "MAC Hang signature not found at DCU complete\n");
1594 return false;
1595 }
1596
1597 chain_state = REG_READ(ah, dcs_reg);
1598 if (ath9k_hw_check_dcs(chain_state, num_state, &hang_state, &hang_pos))
1599 goto hang_check_iter;
1600
1601 dcs_reg = AR_DMADBG_5;
1602 num_state = 4;
1603 chain_state = REG_READ(ah, dcs_reg);
1604 if (ath9k_hw_check_dcs(chain_state, num_state, &hang_state, &hang_pos))
1605 goto hang_check_iter;
1606
1607 ath_dbg(ath9k_hw_common(ah), RESET,
1608 "MAC Hang signature 1 not found\n");
1609 return false;
1610
1611hang_check_iter:
1612 ath_dbg(ath9k_hw_common(ah), RESET,
1613 "DCU registers: chain %08x complete %08x Hang: state %d pos %d\n",
1614 chain_state, comp_state, hang_state, hang_pos);
1615
1616 for (i = 0; i < NUM_STATUS_READS; i++) {
1617 chain_state = REG_READ(ah, dcs_reg);
1618 chain_state = (chain_state >> (5 * hang_pos)) & 0x1f;
1619 comp_state = REG_READ(ah, AR_DMADBG_6);
1620
1621 if (((comp_state & DCU_COMPLETE_STATE_MASK) !=
1622 DCU_COMPLETE_STATE) ||
1623 (chain_state != hang_state))
1624 return false;
1625 }
1626
1627 ath_dbg(ath9k_hw_common(ah), RESET, "MAC Hang signature 1 found\n");
1628
1629 return true;
1630}
1631
1494bool ath9k_hw_check_alive(struct ath_hw *ah) 1632bool ath9k_hw_check_alive(struct ath_hw *ah)
1495{ 1633{
1496 int count = 50; 1634 int count = 50;
1497 u32 reg; 1635 u32 reg;
1498 1636
1637 if (AR_SREV_9300(ah))
1638 return !ath9k_hw_detect_mac_hang(ah);
1639
1499 if (AR_SREV_9285_12_OR_LATER(ah)) 1640 if (AR_SREV_9285_12_OR_LATER(ah))
1500 return true; 1641 return true;
1501 1642
@@ -1546,6 +1687,10 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
1546 if (chan->channel == ah->curchan->channel) 1687 if (chan->channel == ah->curchan->channel)
1547 goto fail; 1688 goto fail;
1548 1689
1690 if ((ah->curchan->channelFlags | chan->channelFlags) &
1691 (CHANNEL_HALF | CHANNEL_QUARTER))
1692 goto fail;
1693
1549 if ((chan->channelFlags & CHANNEL_ALL) != 1694 if ((chan->channelFlags & CHANNEL_ALL) !=
1550 (ah->curchan->channelFlags & CHANNEL_ALL)) 1695 (ah->curchan->channelFlags & CHANNEL_ALL))
1551 goto fail; 1696 goto fail;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index e88f182ff45c..828b9bbc456d 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -708,7 +708,6 @@ struct ath_hw {
708 struct ar5416Stats stats; 708 struct ar5416Stats stats;
709 struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES]; 709 struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES];
710 710
711 int16_t curchan_rad_index;
712 enum ath9k_int imask; 711 enum ath9k_int imask;
713 u32 imrs2_reg; 712 u32 imrs2_reg;
714 u32 txok_interrupt_mask; 713 u32 txok_interrupt_mask;
@@ -762,11 +761,6 @@ struct ath_hw {
762 761
763 u32 sta_id1_defaults; 762 u32 sta_id1_defaults;
764 u32 misc_mode; 763 u32 misc_mode;
765 enum {
766 AUTO_32KHZ,
767 USE_32KHZ,
768 DONT_USE_32KHZ,
769 } enable_32kHz_clock;
770 764
771 /* Private to hardware code */ 765 /* Private to hardware code */
772 struct ath_hw_private_ops private_ops; 766 struct ath_hw_private_ops private_ops;
@@ -783,7 +777,6 @@ struct ath_hw {
783 u32 *analogBank7Data; 777 u32 *analogBank7Data;
784 u32 *bank6Temp; 778 u32 *bank6Temp;
785 779
786 u8 txpower_limit;
787 int coverage_class; 780 int coverage_class;
788 u32 slottime; 781 u32 slottime;
789 u32 globaltxtimeout; 782 u32 globaltxtimeout;
@@ -848,7 +841,6 @@ struct ath_hw {
848 struct ath_gen_timer_table hw_gen_timers; 841 struct ath_gen_timer_table hw_gen_timers;
849 842
850 struct ar9003_txs *ts_ring; 843 struct ar9003_txs *ts_ring;
851 void *ts_start;
852 u32 ts_paddr_start; 844 u32 ts_paddr_start;
853 u32 ts_paddr_end; 845 u32 ts_paddr_end;
854 u16 ts_tail; 846 u16 ts_tail;
@@ -915,7 +907,6 @@ static inline u8 get_streams(int mask)
915} 907}
916 908
917/* Initialization, Detach, Reset */ 909/* Initialization, Detach, Reset */
918const char *ath9k_hw_probe(u16 vendorid, u16 devid);
919void ath9k_hw_deinit(struct ath_hw *ah); 910void ath9k_hw_deinit(struct ath_hw *ah);
920int ath9k_hw_init(struct ath_hw *ah); 911int ath9k_hw_init(struct ath_hw *ah);
921int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, 912int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
@@ -932,6 +923,8 @@ void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val);
932void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna); 923void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna);
933 924
934/* General Operation */ 925/* General Operation */
926void ath9k_hw_synth_delay(struct ath_hw *ah, struct ath9k_channel *chan,
927 int hw_delay);
935bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout); 928bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout);
936void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array, 929void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array,
937 int column, unsigned int *writecnt); 930 int column, unsigned int *writecnt);
@@ -965,6 +958,13 @@ bool ath9k_hw_check_alive(struct ath_hw *ah);
965 958
966bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode); 959bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode);
967 960
961#ifdef CONFIG_ATH9K_DEBUGFS
962void ath9k_debug_sync_cause(struct ath_common *common, u32 sync_cause);
963#else
964static inline void ath9k_debug_sync_cause(struct ath_common *common,
965 u32 sync_cause) {}
966#endif
967
968/* Generic hw timer primitives */ 968/* Generic hw timer primitives */
969struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah, 969struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
970 void (*trigger)(void *), 970 void (*trigger)(void *),
@@ -1012,7 +1012,6 @@ int ar9003_paprd_create_curve(struct ath_hw *ah,
1012int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain); 1012int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain);
1013int ar9003_paprd_init_table(struct ath_hw *ah); 1013int ar9003_paprd_init_table(struct ath_hw *ah);
1014bool ar9003_paprd_is_done(struct ath_hw *ah); 1014bool ar9003_paprd_is_done(struct ath_hw *ah);
1015void ar9003_hw_set_paprd_txdesc(struct ath_hw *ah, void *ds, u8 chains);
1016 1015
1017/* Hardware family op attach helpers */ 1016/* Hardware family op attach helpers */
1018void ar5008_hw_attach_phy_ops(struct ath_hw *ah); 1017void ar5008_hw_attach_phy_ops(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index cb006458fc4b..dee9e092449a 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -14,6 +14,8 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
18#include <linux/slab.h> 20#include <linux/slab.h>
19#include <linux/ath9k_platform.h> 21#include <linux/ath9k_platform.h>
@@ -519,6 +521,8 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
519 atomic_set(&ah->intr_ref_cnt, -1); 521 atomic_set(&ah->intr_ref_cnt, -1);
520 sc->sc_ah = ah; 522 sc->sc_ah = ah;
521 523
524 sc->dfs_detector = dfs_pattern_detector_init(NL80211_DFS_UNSET);
525
522 if (!pdata) { 526 if (!pdata) {
523 ah->ah_flags |= AH_USE_EEPROM; 527 ah->ah_flags |= AH_USE_EEPROM;
524 sc->sc_ah->led_pin = -1; 528 sc->sc_ah->led_pin = -1;
@@ -642,6 +646,24 @@ void ath9k_reload_chainmask_settings(struct ath_softc *sc)
642 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap); 646 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
643} 647}
644 648
649static const struct ieee80211_iface_limit if_limits[] = {
650 { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) |
651 BIT(NL80211_IFTYPE_P2P_CLIENT) |
652 BIT(NL80211_IFTYPE_WDS) },
653 { .max = 8, .types =
654#ifdef CONFIG_MAC80211_MESH
655 BIT(NL80211_IFTYPE_MESH_POINT) |
656#endif
657 BIT(NL80211_IFTYPE_AP) |
658 BIT(NL80211_IFTYPE_P2P_GO) },
659};
660
661static const struct ieee80211_iface_combination if_comb = {
662 .limits = if_limits,
663 .n_limits = ARRAY_SIZE(if_limits),
664 .max_interfaces = 2048,
665 .num_different_channels = 1,
666};
645 667
646void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) 668void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
647{ 669{
@@ -671,11 +693,15 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
671 BIT(NL80211_IFTYPE_ADHOC) | 693 BIT(NL80211_IFTYPE_ADHOC) |
672 BIT(NL80211_IFTYPE_MESH_POINT); 694 BIT(NL80211_IFTYPE_MESH_POINT);
673 695
696 hw->wiphy->iface_combinations = &if_comb;
697 hw->wiphy->n_iface_combinations = 1;
698
674 if (AR_SREV_5416(sc->sc_ah)) 699 if (AR_SREV_5416(sc->sc_ah))
675 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 700 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
676 701
677 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 702 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
678 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 703 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
704 hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
679 705
680 hw->queues = 4; 706 hw->queues = 4;
681 hw->max_rates = 4; 707 hw->max_rates = 4;
@@ -779,6 +805,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
779 goto error_world; 805 goto error_world;
780 } 806 }
781 807
808 setup_timer(&sc->rx_poll_timer, ath_rx_poll, (unsigned long)sc);
782 sc->last_rssi = ATH_RSSI_DUMMY_MARKER; 809 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
783 810
784 ath_init_leds(sc); 811 ath_init_leds(sc);
@@ -821,6 +848,8 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
821 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 848 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
822 849
823 ath9k_hw_deinit(sc->sc_ah); 850 ath9k_hw_deinit(sc->sc_ah);
851 if (sc->dfs_detector != NULL)
852 sc->dfs_detector->exit(sc->dfs_detector);
824 853
825 kfree(sc->sc_ah); 854 kfree(sc->sc_ah);
826 sc->sc_ah = NULL; 855 sc->sc_ah = NULL;
@@ -866,17 +895,14 @@ static int __init ath9k_init(void)
866 /* Register rate control algorithm */ 895 /* Register rate control algorithm */
867 error = ath_rate_control_register(); 896 error = ath_rate_control_register();
868 if (error != 0) { 897 if (error != 0) {
869 printk(KERN_ERR 898 pr_err("Unable to register rate control algorithm: %d\n",
870 "ath9k: Unable to register rate control " 899 error);
871 "algorithm: %d\n",
872 error);
873 goto err_out; 900 goto err_out;
874 } 901 }
875 902
876 error = ath_pci_init(); 903 error = ath_pci_init();
877 if (error < 0) { 904 if (error < 0) {
878 printk(KERN_ERR 905 pr_err("No PCI devices found, driver not installed\n");
879 "ath9k: No PCI devices found, driver not installed.\n");
880 error = -ENODEV; 906 error = -ENODEV;
881 goto err_rate_unregister; 907 goto err_rate_unregister;
882 } 908 }
@@ -905,6 +931,6 @@ static void __exit ath9k_exit(void)
905 ath_ahb_exit(); 931 ath_ahb_exit();
906 ath_pci_exit(); 932 ath_pci_exit();
907 ath_rate_control_unregister(); 933 ath_rate_control_unregister();
908 printk(KERN_INFO "%s: Driver unloaded\n", dev_info); 934 pr_info("%s: Driver unloaded\n", dev_info);
909} 935}
910module_exit(ath9k_exit); 936module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index f7bd2532269c..04ef775ccee1 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -133,8 +133,16 @@ EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);
133 133
134void ath9k_hw_abort_tx_dma(struct ath_hw *ah) 134void ath9k_hw_abort_tx_dma(struct ath_hw *ah)
135{ 135{
136 int maxdelay = 1000;
136 int i, q; 137 int i, q;
137 138
139 if (ah->curchan) {
140 if (IS_CHAN_HALF_RATE(ah->curchan))
141 maxdelay *= 2;
142 else if (IS_CHAN_QUARTER_RATE(ah->curchan))
143 maxdelay *= 4;
144 }
145
138 REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M); 146 REG_WRITE(ah, AR_Q_TXD, AR_Q_TXD_M);
139 147
140 REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF); 148 REG_SET_BIT(ah, AR_PCU_MISC, AR_PCU_FORCE_QUIET_COLL | AR_PCU_CLEAR_VMF);
@@ -142,7 +150,7 @@ void ath9k_hw_abort_tx_dma(struct ath_hw *ah)
142 REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF); 150 REG_SET_BIT(ah, AR_D_GBL_IFS_MISC, AR_D_GBL_IFS_MISC_IGNORE_BACKOFF);
143 151
144 for (q = 0; q < AR_NUM_QCU; q++) { 152 for (q = 0; q < AR_NUM_QCU; q++) {
145 for (i = 0; i < 1000; i++) { 153 for (i = 0; i < maxdelay; i++) {
146 if (i) 154 if (i)
147 udelay(5); 155 udelay(5);
148 156
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 798ea57252b4..dfa78e8b6470 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -113,21 +113,25 @@ void ath9k_ps_restore(struct ath_softc *sc)
113 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 113 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
114 enum ath9k_power_mode mode; 114 enum ath9k_power_mode mode;
115 unsigned long flags; 115 unsigned long flags;
116 bool reset;
116 117
117 spin_lock_irqsave(&sc->sc_pm_lock, flags); 118 spin_lock_irqsave(&sc->sc_pm_lock, flags);
118 if (--sc->ps_usecount != 0) 119 if (--sc->ps_usecount != 0)
119 goto unlock; 120 goto unlock;
120 121
121 if (sc->ps_idle && (sc->ps_flags & PS_WAIT_FOR_TX_ACK)) 122 if (sc->ps_idle) {
123 ath9k_hw_setrxabort(sc->sc_ah, 1);
124 ath9k_hw_stopdmarecv(sc->sc_ah, &reset);
122 mode = ATH9K_PM_FULL_SLEEP; 125 mode = ATH9K_PM_FULL_SLEEP;
123 else if (sc->ps_enabled && 126 } else if (sc->ps_enabled &&
124 !(sc->ps_flags & (PS_WAIT_FOR_BEACON | 127 !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
125 PS_WAIT_FOR_CAB | 128 PS_WAIT_FOR_CAB |
126 PS_WAIT_FOR_PSPOLL_DATA | 129 PS_WAIT_FOR_PSPOLL_DATA |
127 PS_WAIT_FOR_TX_ACK))) 130 PS_WAIT_FOR_TX_ACK))) {
128 mode = ATH9K_PM_NETWORK_SLEEP; 131 mode = ATH9K_PM_NETWORK_SLEEP;
129 else 132 } else {
130 goto unlock; 133 goto unlock;
134 }
131 135
132 spin_lock(&common->cc_lock); 136 spin_lock(&common->cc_lock);
133 ath_hw_cycle_counters_update(common); 137 ath_hw_cycle_counters_update(common);
@@ -241,6 +245,7 @@ static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
241 245
242 sc->hw_busy_count = 0; 246 sc->hw_busy_count = 0;
243 del_timer_sync(&common->ani.timer); 247 del_timer_sync(&common->ani.timer);
248 del_timer_sync(&sc->rx_poll_timer);
244 249
245 ath9k_debug_samp_bb_mac(sc); 250 ath9k_debug_samp_bb_mac(sc);
246 ath9k_hw_disable_interrupts(ah); 251 ath9k_hw_disable_interrupts(ah);
@@ -282,6 +287,7 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
282 287
283 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); 288 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
284 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2); 289 ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work, HZ/2);
290 ath_start_rx_poll(sc, 3);
285 if (!common->disable_ani) 291 if (!common->disable_ani)
286 ath_start_ani(common); 292 ath_start_ani(common);
287 } 293 }
@@ -690,17 +696,6 @@ void ath9k_tasklet(unsigned long data)
690 goto out; 696 goto out;
691 } 697 }
692 698
693 /*
694 * Only run the baseband hang check if beacons stop working in AP or
695 * IBSS mode, because it has a high false positive rate. For station
696 * mode it should not be necessary, since the upper layers will detect
697 * this through a beacon miss automatically and the following channel
698 * change will trigger a hardware reset anyway
699 */
700 if (ath9k_hw_numtxpending(ah, sc->beacon.beaconq) != 0 &&
701 !ath9k_hw_check_alive(ah))
702 ieee80211_queue_work(sc->hw, &sc->hw_check_work);
703
704 if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) { 699 if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
705 /* 700 /*
706 * TSF sync does not look correct; remain awake to sync with 701 * TSF sync does not look correct; remain awake to sync with
@@ -912,10 +907,19 @@ void ath_hw_check(struct work_struct *work)
912 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 907 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
913 unsigned long flags; 908 unsigned long flags;
914 int busy; 909 int busy;
910 u8 is_alive, nbeacon = 1;
915 911
916 ath9k_ps_wakeup(sc); 912 ath9k_ps_wakeup(sc);
917 if (ath9k_hw_check_alive(sc->sc_ah)) 913 is_alive = ath9k_hw_check_alive(sc->sc_ah);
914
915 if (is_alive && !AR_SREV_9300(sc->sc_ah))
918 goto out; 916 goto out;
917 else if (!is_alive && AR_SREV_9300(sc->sc_ah)) {
918 ath_dbg(common, RESET,
919 "DCU stuck is detected. Schedule chip reset\n");
920 RESET_STAT_INC(sc, RESET_TYPE_MAC_HANG);
921 goto sched_reset;
922 }
919 923
920 spin_lock_irqsave(&common->cc_lock, flags); 924 spin_lock_irqsave(&common->cc_lock, flags);
921 busy = ath_update_survey_stats(sc); 925 busy = ath_update_survey_stats(sc);
@@ -926,12 +930,18 @@ void ath_hw_check(struct work_struct *work)
926 if (busy >= 99) { 930 if (busy >= 99) {
927 if (++sc->hw_busy_count >= 3) { 931 if (++sc->hw_busy_count >= 3) {
928 RESET_STAT_INC(sc, RESET_TYPE_BB_HANG); 932 RESET_STAT_INC(sc, RESET_TYPE_BB_HANG);
929 ieee80211_queue_work(sc->hw, &sc->hw_reset_work); 933 goto sched_reset;
930 } 934 }
931 935 } else if (busy >= 0) {
932 } else if (busy >= 0)
933 sc->hw_busy_count = 0; 936 sc->hw_busy_count = 0;
937 nbeacon = 3;
938 }
934 939
940 ath_start_rx_poll(sc, nbeacon);
941 goto out;
942
943sched_reset:
944 ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
935out: 945out:
936 ath9k_ps_restore(sc); 946 ath9k_ps_restore(sc);
937} 947}
@@ -1094,14 +1104,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1094 } 1104 }
1095 } 1105 }
1096 1106
1097 /* 1107 if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_NETWORK_SLEEP)) {
1098 * Cannot tx while the hardware is in full sleep, it first needs a full
1099 * chip reset to recover from that
1100 */
1101 if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP))
1102 goto exit;
1103
1104 if (unlikely(sc->sc_ah->power_mode != ATH9K_PM_AWAKE)) {
1105 /* 1108 /*
1106 * We are using PS-Poll and mac80211 can request TX while in 1109 * We are using PS-Poll and mac80211 can request TX while in
1107 * power save mode. Need to wake up hardware for the TX to be 1110 * power save mode. Need to wake up hardware for the TX to be
@@ -1120,12 +1123,21 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1120 } 1123 }
1121 /* 1124 /*
1122 * The actual restore operation will happen only after 1125 * The actual restore operation will happen only after
1123 * the sc_flags bit is cleared. We are just dropping 1126 * the ps_flags bit is cleared. We are just dropping
1124 * the ps_usecount here. 1127 * the ps_usecount here.
1125 */ 1128 */
1126 ath9k_ps_restore(sc); 1129 ath9k_ps_restore(sc);
1127 } 1130 }
1128 1131
1132 /*
1133 * Cannot tx while the hardware is in full sleep, it first needs a full
1134 * chip reset to recover from that
1135 */
1136 if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP)) {
1137 ath_err(common, "TX while HW is in FULL_SLEEP mode\n");
1138 goto exit;
1139 }
1140
1129 memset(&txctl, 0, sizeof(struct ath_tx_control)); 1141 memset(&txctl, 0, sizeof(struct ath_tx_control));
1130 txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)]; 1142 txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
1131 1143
@@ -1133,6 +1145,7 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1133 1145
1134 if (ath_tx_start(hw, skb, &txctl) != 0) { 1146 if (ath_tx_start(hw, skb, &txctl) != 0) {
1135 ath_dbg(common, XMIT, "TX failed\n"); 1147 ath_dbg(common, XMIT, "TX failed\n");
1148 TX_STAT_INC(txctl.txq->axq_qnum, txfailed);
1136 goto exit; 1149 goto exit;
1137 } 1150 }
1138 1151
@@ -1151,6 +1164,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1151 mutex_lock(&sc->mutex); 1164 mutex_lock(&sc->mutex);
1152 1165
1153 ath_cancel_work(sc); 1166 ath_cancel_work(sc);
1167 del_timer_sync(&sc->rx_poll_timer);
1154 1168
1155 if (sc->sc_flags & SC_OP_INVALID) { 1169 if (sc->sc_flags & SC_OP_INVALID) {
1156 ath_dbg(common, ANY, "Device not present\n"); 1170 ath_dbg(common, ANY, "Device not present\n");
@@ -1237,7 +1251,6 @@ static void ath9k_reclaim_beacon(struct ath_softc *sc,
1237 ath9k_set_beaconing_status(sc, false); 1251 ath9k_set_beaconing_status(sc, false);
1238 ath_beacon_return(sc, avp); 1252 ath_beacon_return(sc, avp);
1239 ath9k_set_beaconing_status(sc, true); 1253 ath9k_set_beaconing_status(sc, true);
1240 sc->sc_flags &= ~SC_OP_BEACONS;
1241} 1254}
1242 1255
1243static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif) 1256static void ath9k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
@@ -1368,21 +1381,31 @@ static void ath9k_do_vif_add_setup(struct ieee80211_hw *hw,
1368 ath9k_calculate_summary_state(hw, vif); 1381 ath9k_calculate_summary_state(hw, vif);
1369 1382
1370 if (ath9k_uses_beacons(vif->type)) { 1383 if (ath9k_uses_beacons(vif->type)) {
1371 int error; 1384 /* Reserve a beacon slot for the vif */
1372 /* This may fail because upper levels do not have beacons
1373 * properly configured yet. That's OK, we assume it
1374 * will be properly configured and then we will be notified
1375 * in the info_changed method and set up beacons properly
1376 * there.
1377 */
1378 ath9k_set_beaconing_status(sc, false); 1385 ath9k_set_beaconing_status(sc, false);
1379 error = ath_beacon_alloc(sc, vif); 1386 ath_beacon_alloc(sc, vif);
1380 if (!error)
1381 ath_beacon_config(sc, vif);
1382 ath9k_set_beaconing_status(sc, true); 1387 ath9k_set_beaconing_status(sc, true);
1383 } 1388 }
1384} 1389}
1385 1390
1391void ath_start_rx_poll(struct ath_softc *sc, u8 nbeacon)
1392{
1393 if (!AR_SREV_9300(sc->sc_ah))
1394 return;
1395
1396 if (!(sc->sc_flags & SC_OP_PRIM_STA_VIF))
1397 return;
1398
1399 mod_timer(&sc->rx_poll_timer, jiffies + msecs_to_jiffies
1400 (nbeacon * sc->cur_beacon_conf.beacon_interval));
1401}
1402
1403void ath_rx_poll(unsigned long data)
1404{
1405 struct ath_softc *sc = (struct ath_softc *)data;
1406
1407 ieee80211_queue_work(sc->hw, &sc->hw_check_work);
1408}
1386 1409
1387static int ath9k_add_interface(struct ieee80211_hw *hw, 1410static int ath9k_add_interface(struct ieee80211_hw *hw,
1388 struct ieee80211_vif *vif) 1411 struct ieee80211_vif *vif)
@@ -1511,6 +1534,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1511static void ath9k_enable_ps(struct ath_softc *sc) 1534static void ath9k_enable_ps(struct ath_softc *sc)
1512{ 1535{
1513 struct ath_hw *ah = sc->sc_ah; 1536 struct ath_hw *ah = sc->sc_ah;
1537 struct ath_common *common = ath9k_hw_common(ah);
1514 1538
1515 sc->ps_enabled = true; 1539 sc->ps_enabled = true;
1516 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { 1540 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
@@ -1520,11 +1544,13 @@ static void ath9k_enable_ps(struct ath_softc *sc)
1520 } 1544 }
1521 ath9k_hw_setrxabort(ah, 1); 1545 ath9k_hw_setrxabort(ah, 1);
1522 } 1546 }
1547 ath_dbg(common, PS, "PowerSave enabled\n");
1523} 1548}
1524 1549
1525static void ath9k_disable_ps(struct ath_softc *sc) 1550static void ath9k_disable_ps(struct ath_softc *sc)
1526{ 1551{
1527 struct ath_hw *ah = sc->sc_ah; 1552 struct ath_hw *ah = sc->sc_ah;
1553 struct ath_common *common = ath9k_hw_common(ah);
1528 1554
1529 sc->ps_enabled = false; 1555 sc->ps_enabled = false;
1530 ath9k_hw_setpower(ah, ATH9K_PM_AWAKE); 1556 ath9k_hw_setpower(ah, ATH9K_PM_AWAKE);
@@ -1539,7 +1565,7 @@ static void ath9k_disable_ps(struct ath_softc *sc)
1539 ath9k_hw_set_interrupts(ah); 1565 ath9k_hw_set_interrupts(ah);
1540 } 1566 }
1541 } 1567 }
1542 1568 ath_dbg(common, PS, "PowerSave disabled\n");
1543} 1569}
1544 1570
1545static int ath9k_config(struct ieee80211_hw *hw, u32 changed) 1571static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
@@ -1911,6 +1937,8 @@ static void ath9k_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
1911 sc->last_rssi = ATH_RSSI_DUMMY_MARKER; 1937 sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
1912 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER; 1938 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
1913 1939
1940 ath_start_rx_poll(sc, 3);
1941
1914 if (!common->disable_ani) { 1942 if (!common->disable_ani) {
1915 sc->sc_flags |= SC_OP_ANI_RUN; 1943 sc->sc_flags |= SC_OP_ANI_RUN;
1916 ath_start_ani(common); 1944 ath_start_ani(common);
@@ -1950,6 +1978,7 @@ static void ath9k_config_bss(struct ath_softc *sc, struct ieee80211_vif *vif)
1950 /* Stop ANI */ 1978 /* Stop ANI */
1951 sc->sc_flags &= ~SC_OP_ANI_RUN; 1979 sc->sc_flags &= ~SC_OP_ANI_RUN;
1952 del_timer_sync(&common->ani.timer); 1980 del_timer_sync(&common->ani.timer);
1981 del_timer_sync(&sc->rx_poll_timer);
1953 memset(&sc->caldata, 0, sizeof(sc->caldata)); 1982 memset(&sc->caldata, 0, sizeof(sc->caldata));
1954 } 1983 }
1955} 1984}
@@ -1964,7 +1993,6 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1964 struct ath_common *common = ath9k_hw_common(ah); 1993 struct ath_common *common = ath9k_hw_common(ah);
1965 struct ath_vif *avp = (void *)vif->drv_priv; 1994 struct ath_vif *avp = (void *)vif->drv_priv;
1966 int slottime; 1995 int slottime;
1967 int error;
1968 1996
1969 ath9k_ps_wakeup(sc); 1997 ath9k_ps_wakeup(sc);
1970 mutex_lock(&sc->mutex); 1998 mutex_lock(&sc->mutex);
@@ -1993,16 +2021,29 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1993 } else { 2021 } else {
1994 sc->sc_flags &= ~SC_OP_ANI_RUN; 2022 sc->sc_flags &= ~SC_OP_ANI_RUN;
1995 del_timer_sync(&common->ani.timer); 2023 del_timer_sync(&common->ani.timer);
2024 del_timer_sync(&sc->rx_poll_timer);
1996 } 2025 }
1997 } 2026 }
1998 2027
1999 /* Enable transmission of beacons (AP, IBSS, MESH) */ 2028 /*
2000 if ((changed & BSS_CHANGED_BEACON) || 2029 * In case of AP mode, the HW TSF has to be reset
2001 ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon)) { 2030 * when the beacon interval changes.
2031 */
2032 if ((changed & BSS_CHANGED_BEACON_INT) &&
2033 (vif->type == NL80211_IFTYPE_AP))
2034 sc->sc_flags |= SC_OP_TSF_RESET;
2035
2036 /* Configure beaconing (AP, IBSS, MESH) */
2037 if (ath9k_uses_beacons(vif->type) &&
2038 ((changed & BSS_CHANGED_BEACON) ||
2039 (changed & BSS_CHANGED_BEACON_ENABLED) ||
2040 (changed & BSS_CHANGED_BEACON_INT))) {
2002 ath9k_set_beaconing_status(sc, false); 2041 ath9k_set_beaconing_status(sc, false);
2003 error = ath_beacon_alloc(sc, vif); 2042 if (bss_conf->enable_beacon)
2004 if (!error) 2043 ath_beacon_alloc(sc, vif);
2005 ath_beacon_config(sc, vif); 2044 else
2045 avp->is_bslot_active = false;
2046 ath_beacon_config(sc, vif);
2006 ath9k_set_beaconing_status(sc, true); 2047 ath9k_set_beaconing_status(sc, true);
2007 } 2048 }
2008 2049
@@ -2025,30 +2066,6 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2025 } 2066 }
2026 } 2067 }
2027 2068
2028 /* Disable transmission of beacons */
2029 if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
2030 !bss_conf->enable_beacon) {
2031 ath9k_set_beaconing_status(sc, false);
2032 avp->is_bslot_active = false;
2033 ath9k_set_beaconing_status(sc, true);
2034 }
2035
2036 if (changed & BSS_CHANGED_BEACON_INT) {
2037 /*
2038 * In case of AP mode, the HW TSF has to be reset
2039 * when the beacon interval changes.
2040 */
2041 if (vif->type == NL80211_IFTYPE_AP) {
2042 sc->sc_flags |= SC_OP_TSF_RESET;
2043 ath9k_set_beaconing_status(sc, false);
2044 error = ath_beacon_alloc(sc, vif);
2045 if (!error)
2046 ath_beacon_config(sc, vif);
2047 ath9k_set_beaconing_status(sc, true);
2048 } else
2049 ath_beacon_config(sc, vif);
2050 }
2051
2052 mutex_unlock(&sc->mutex); 2069 mutex_unlock(&sc->mutex);
2053 ath9k_ps_restore(sc); 2070 ath9k_ps_restore(sc);
2054} 2071}
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 77dc327def8d..a856b51255f4 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -14,6 +14,8 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include <linux/nl80211.h> 19#include <linux/nl80211.h>
18#include <linux/pci.h> 20#include <linux/pci.h>
19#include <linux/pci-aspm.h> 21#include <linux/pci-aspm.h>
@@ -171,14 +173,13 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
171 173
172 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 174 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
173 if (ret) { 175 if (ret) {
174 printk(KERN_ERR "ath9k: 32-bit DMA not available\n"); 176 pr_err("32-bit DMA not available\n");
175 goto err_dma; 177 goto err_dma;
176 } 178 }
177 179
178 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 180 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
179 if (ret) { 181 if (ret) {
180 printk(KERN_ERR "ath9k: 32-bit DMA consistent " 182 pr_err("32-bit DMA consistent DMA enable failed\n");
181 "DMA enable failed\n");
182 goto err_dma; 183 goto err_dma;
183 } 184 }
184 185
@@ -224,7 +225,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
224 225
225 mem = pci_iomap(pdev, 0, 0); 226 mem = pci_iomap(pdev, 0, 0);
226 if (!mem) { 227 if (!mem) {
227 printk(KERN_ERR "PCI memory map error\n") ; 228 pr_err("PCI memory map error\n") ;
228 ret = -EIO; 229 ret = -EIO;
229 goto err_iomap; 230 goto err_iomap;
230 } 231 }
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 08bb45532701..92a6c0a87f89 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1436,7 +1436,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
1436 1436
1437static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband, 1437static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1438 struct ieee80211_sta *sta, void *priv_sta, 1438 struct ieee80211_sta *sta, void *priv_sta,
1439 u32 changed, enum nl80211_channel_type oper_chan_type) 1439 u32 changed)
1440{ 1440{
1441 struct ath_softc *sc = priv; 1441 struct ath_softc *sc = priv;
1442 struct ath_rate_priv *ath_rc_priv = priv_sta; 1442 struct ath_rate_priv *ath_rc_priv = priv_sta;
@@ -1447,12 +1447,11 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1447 1447
1448 /* FIXME: Handle AP mode later when we support CWM */ 1448 /* FIXME: Handle AP mode later when we support CWM */
1449 1449
1450 if (changed & IEEE80211_RC_HT_CHANGED) { 1450 if (changed & IEEE80211_RC_BW_CHANGED) {
1451 if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION) 1451 if (sc->sc_ah->opmode != NL80211_IFTYPE_STATION)
1452 return; 1452 return;
1453 1453
1454 if (oper_chan_type == NL80211_CHAN_HT40MINUS || 1454 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
1455 oper_chan_type == NL80211_CHAN_HT40PLUS)
1456 oper_cw40 = true; 1455 oper_cw40 = true;
1457 1456
1458 if (oper_cw40) 1457 if (oper_cw40)
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 1c4583c7ff7c..544e5490ca2e 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -812,6 +812,7 @@ static bool ath9k_rx_accept(struct ath_common *common,
812 is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID && 812 is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
813 test_bit(rx_stats->rs_keyix, common->tkip_keymap); 813 test_bit(rx_stats->rs_keyix, common->tkip_keymap);
814 strip_mic = is_valid_tkip && ieee80211_is_data(fc) && 814 strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
815 ieee80211_has_protected(fc) &&
815 !(rx_stats->rs_status & 816 !(rx_stats->rs_status &
816 (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC | 817 (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
817 ATH9K_RXERR_KEYMISS)); 818 ATH9K_RXERR_KEYMISS));
@@ -824,15 +825,20 @@ static bool ath9k_rx_accept(struct ath_common *common,
824 if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID) 825 if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID)
825 rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS; 826 rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
826 827
827 if (!rx_stats->rs_datalen) 828 if (!rx_stats->rs_datalen) {
829 RX_STAT_INC(rx_len_err);
828 return false; 830 return false;
831 }
832
829 /* 833 /*
830 * rs_status follows rs_datalen so if rs_datalen is too large 834 * rs_status follows rs_datalen so if rs_datalen is too large
831 * we can take a hint that hardware corrupted it, so ignore 835 * we can take a hint that hardware corrupted it, so ignore
832 * those frames. 836 * those frames.
833 */ 837 */
834 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) 838 if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) {
839 RX_STAT_INC(rx_len_err);
835 return false; 840 return false;
841 }
836 842
837 /* Only use error bits from the last fragment */ 843 /* Only use error bits from the last fragment */
838 if (rx_stats->rs_more) 844 if (rx_stats->rs_more)
@@ -902,6 +908,7 @@ static int ath9k_process_rate(struct ath_common *common,
902 struct ieee80211_supported_band *sband; 908 struct ieee80211_supported_band *sband;
903 enum ieee80211_band band; 909 enum ieee80211_band band;
904 unsigned int i = 0; 910 unsigned int i = 0;
911 struct ath_softc __maybe_unused *sc = common->priv;
905 912
906 band = hw->conf.channel->band; 913 band = hw->conf.channel->band;
907 sband = hw->wiphy->bands[band]; 914 sband = hw->wiphy->bands[band];
@@ -936,7 +943,7 @@ static int ath9k_process_rate(struct ath_common *common,
936 ath_dbg(common, ANY, 943 ath_dbg(common, ANY,
937 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n", 944 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
938 rx_stats->rs_rate); 945 rx_stats->rs_rate);
939 946 RX_STAT_INC(rx_rate_err);
940 return -EINVAL; 947 return -EINVAL;
941} 948}
942 949
@@ -1823,10 +1830,14 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1823 1830
1824 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len); 1831 hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
1825 rxs = IEEE80211_SKB_RXCB(hdr_skb); 1832 rxs = IEEE80211_SKB_RXCB(hdr_skb);
1826 if (ieee80211_is_beacon(hdr->frame_control) && 1833 if (ieee80211_is_beacon(hdr->frame_control)) {
1827 !is_zero_ether_addr(common->curbssid) && 1834 RX_STAT_INC(rx_beacons);
1828 !compare_ether_addr(hdr->addr3, common->curbssid)) 1835 if (!is_zero_ether_addr(common->curbssid) &&
1829 rs.is_mybeacon = true; 1836 !compare_ether_addr(hdr->addr3, common->curbssid))
1837 rs.is_mybeacon = true;
1838 else
1839 rs.is_mybeacon = false;
1840 }
1830 else 1841 else
1831 rs.is_mybeacon = false; 1842 rs.is_mybeacon = false;
1832 1843
@@ -1836,8 +1847,10 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1836 * If we're asked to flush receive queue, directly 1847 * If we're asked to flush receive queue, directly
1837 * chain it back at the queue without processing it. 1848 * chain it back at the queue without processing it.
1838 */ 1849 */
1839 if (sc->sc_flags & SC_OP_RXFLUSH) 1850 if (sc->sc_flags & SC_OP_RXFLUSH) {
1851 RX_STAT_INC(rx_drop_rxflush);
1840 goto requeue_drop_frag; 1852 goto requeue_drop_frag;
1853 }
1841 1854
1842 memset(rxs, 0, sizeof(struct ieee80211_rx_status)); 1855 memset(rxs, 0, sizeof(struct ieee80211_rx_status));
1843 1856
@@ -1855,6 +1868,10 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1855 if (retval) 1868 if (retval)
1856 goto requeue_drop_frag; 1869 goto requeue_drop_frag;
1857 1870
1871 if (rs.is_mybeacon) {
1872 sc->hw_busy_count = 0;
1873 ath_start_rx_poll(sc, 3);
1874 }
1858 /* Ensure we always have an skb to requeue once we are done 1875 /* Ensure we always have an skb to requeue once we are done
1859 * processing the current buffer's skb */ 1876 * processing the current buffer's skb */
1860 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC); 1877 requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
@@ -1863,8 +1880,10 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1863 * tell hardware it can give us a new frame using the old 1880 * tell hardware it can give us a new frame using the old
1864 * skb and put it at the tail of the sc->rx.rxbuf list for 1881 * skb and put it at the tail of the sc->rx.rxbuf list for
1865 * processing. */ 1882 * processing. */
1866 if (!requeue_skb) 1883 if (!requeue_skb) {
1884 RX_STAT_INC(rx_oom_err);
1867 goto requeue_drop_frag; 1885 goto requeue_drop_frag;
1886 }
1868 1887
1869 /* Unmap the frame */ 1888 /* Unmap the frame */
1870 dma_unmap_single(sc->dev, bf->bf_buf_addr, 1889 dma_unmap_single(sc->dev, bf->bf_buf_addr,
@@ -1895,6 +1914,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1895 } 1914 }
1896 1915
1897 if (rs.rs_more) { 1916 if (rs.rs_more) {
1917 RX_STAT_INC(rx_frags);
1898 /* 1918 /*
1899 * rs_more indicates chained descriptors which can be 1919 * rs_more indicates chained descriptors which can be
1900 * used to link buffers together for a sort of 1920 * used to link buffers together for a sort of
@@ -1904,6 +1924,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1904 /* too many fragments - cannot handle frame */ 1924 /* too many fragments - cannot handle frame */
1905 dev_kfree_skb_any(sc->rx.frag); 1925 dev_kfree_skb_any(sc->rx.frag);
1906 dev_kfree_skb_any(skb); 1926 dev_kfree_skb_any(skb);
1927 RX_STAT_INC(rx_too_many_frags_err);
1907 skb = NULL; 1928 skb = NULL;
1908 } 1929 }
1909 sc->rx.frag = skb; 1930 sc->rx.frag = skb;
@@ -1915,6 +1936,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1915 1936
1916 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) { 1937 if (pskb_expand_head(hdr_skb, 0, space, GFP_ATOMIC) < 0) {
1917 dev_kfree_skb(skb); 1938 dev_kfree_skb(skb);
1939 RX_STAT_INC(rx_oom_err);
1918 goto requeue_drop_frag; 1940 goto requeue_drop_frag;
1919 } 1941 }
1920 1942
diff --git a/drivers/net/wireless/ath/carl9170/cmd.h b/drivers/net/wireless/ath/carl9170/cmd.h
index 885c42778b8b..65919c902f55 100644
--- a/drivers/net/wireless/ath/carl9170/cmd.h
+++ b/drivers/net/wireless/ath/carl9170/cmd.h
@@ -114,7 +114,7 @@ __regwrite_out : \
114 114
115#define carl9170_regwrite_result() \ 115#define carl9170_regwrite_result() \
116 __err; \ 116 __err; \
117} while (0); 117} while (0)
118 118
119 119
120#define carl9170_async_regwrite_get_buf() \ 120#define carl9170_async_regwrite_get_buf() \
@@ -126,7 +126,7 @@ do { \
126 __err = -ENOMEM; \ 126 __err = -ENOMEM; \
127 goto __async_regwrite_out; \ 127 goto __async_regwrite_out; \
128 } \ 128 } \
129} while (0); 129} while (0)
130 130
131#define carl9170_async_regwrite_begin(carl) \ 131#define carl9170_async_regwrite_begin(carl) \
132do { \ 132do { \
@@ -169,6 +169,6 @@ __async_regwrite_out: \
169 169
170#define carl9170_async_regwrite_result() \ 170#define carl9170_async_regwrite_result() \
171 __err; \ 171 __err; \
172} while (0); 172} while (0)
173 173
174#endif /* __CMD_H */ 174#endif /* __CMD_H */
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index cffde8d9a521..5c73c03872f3 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -355,6 +355,8 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
355 355
356 ar->hw->wiphy->interface_modes |= if_comb_types; 356 ar->hw->wiphy->interface_modes |= if_comb_types;
357 357
358 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
359
358#undef SUPPORTED 360#undef SUPPORTED
359 return carl9170_fw_tx_sequence(ar); 361 return carl9170_fw_tx_sequence(ar);
360} 362}
diff --git a/drivers/net/wireless/ath/main.c b/drivers/net/wireless/ath/main.c
index ea2c737138d3..8e99540cd90e 100644
--- a/drivers/net/wireless/ath/main.c
+++ b/drivers/net/wireless/ath/main.c
@@ -14,6 +14,8 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include <linux/kernel.h> 19#include <linux/kernel.h>
18#include <linux/module.h> 20#include <linux/module.h>
19 21
@@ -49,7 +51,7 @@ struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
49 if (off != 0) 51 if (off != 0)
50 skb_reserve(skb, common->cachelsz - off); 52 skb_reserve(skb, common->cachelsz - off);
51 } else { 53 } else {
52 printk(KERN_ERR "skbuff alloc of size %u failed\n", len); 54 pr_err("skbuff alloc of size %u failed\n", len);
53 return NULL; 55 return NULL;
54 } 56 }
55 57
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 10dea37431b3..d81698015bf7 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -14,6 +14,8 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include <linux/kernel.h> 19#include <linux/kernel.h>
18#include <linux/export.h> 20#include <linux/export.h>
19#include <net/cfg80211.h> 21#include <net/cfg80211.h>
@@ -562,7 +564,7 @@ static int __ath_regd_init(struct ath_regulatory *reg)
562 printk(KERN_DEBUG "ath: EEPROM regdomain: 0x%0x\n", reg->current_rd); 564 printk(KERN_DEBUG "ath: EEPROM regdomain: 0x%0x\n", reg->current_rd);
563 565
564 if (!ath_regd_is_eeprom_valid(reg)) { 566 if (!ath_regd_is_eeprom_valid(reg)) {
565 printk(KERN_ERR "ath: Invalid EEPROM contents\n"); 567 pr_err("Invalid EEPROM contents\n");
566 return -EINVAL; 568 return -EINVAL;
567 } 569 }
568 570
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 6c87a823f5a9..d07c0301da6a 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -3989,8 +3989,7 @@ static int reset_atmel_card(struct net_device *dev)
3989 atmel_copy_to_card(priv->dev, 0x8000, &fw[0x6000], len - 0x6000); 3989 atmel_copy_to_card(priv->dev, 0x8000, &fw[0x6000], len - 0x6000);
3990 } 3990 }
3991 3991
3992 if (fw_entry) 3992 release_firmware(fw_entry);
3993 release_firmware(fw_entry);
3994 } 3993 }
3995 3994
3996 err = atmel_wakeup_firmware(priv); 3995 err = atmel_wakeup_firmware(priv);
diff --git a/drivers/net/wireless/atmel_pci.c b/drivers/net/wireless/atmel_pci.c
index 9ab1192004c0..51e33b53386e 100644
--- a/drivers/net/wireless/atmel_pci.c
+++ b/drivers/net/wireless/atmel_pci.c
@@ -74,15 +74,4 @@ static void __devexit atmel_pci_remove(struct pci_dev *pdev)
74 stop_atmel_card(pci_get_drvdata(pdev)); 74 stop_atmel_card(pci_get_drvdata(pdev));
75} 75}
76 76
77static int __init atmel_init_module(void) 77module_pci_driver(atmel_driver);
78{
79 return pci_register_driver(&atmel_driver);
80}
81
82static void __exit atmel_cleanup_module(void)
83{
84 pci_unregister_driver(&atmel_driver);
85}
86
87module_init(atmel_init_module);
88module_exit(atmel_cleanup_module);
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index e4d6dc2e37d1..617afc8211b2 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4010,6 +4010,20 @@ static int b43_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
4010 if (modparam_nohwcrypt) 4010 if (modparam_nohwcrypt)
4011 return -ENOSPC; /* User disabled HW-crypto */ 4011 return -ENOSPC; /* User disabled HW-crypto */
4012 4012
4013 if ((vif->type == NL80211_IFTYPE_ADHOC ||
4014 vif->type == NL80211_IFTYPE_MESH_POINT) &&
4015 (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
4016 key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
4017 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
4018 /*
4019 * For now, disable hw crypto for the RSN IBSS group keys. This
4020 * could be optimized in the future, but until that gets
4021 * implemented, use of software crypto for group addressed
4022 * frames is a acceptable to allow RSN IBSS to be used.
4023 */
4024 return -EOPNOTSUPP;
4025 }
4026
4013 mutex_lock(&wl->mutex); 4027 mutex_lock(&wl->mutex);
4014 4028
4015 dev = wl->current_dev; 4029 dev = wl->current_dev;
@@ -5281,6 +5295,8 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
5281 BIT(NL80211_IFTYPE_WDS) | 5295 BIT(NL80211_IFTYPE_WDS) |
5282 BIT(NL80211_IFTYPE_ADHOC); 5296 BIT(NL80211_IFTYPE_ADHOC);
5283 5297
5298 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
5299
5284 hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1; 5300 hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1;
5285 wl->mac80211_initially_registered_queues = hw->queues; 5301 wl->mac80211_initially_registered_queues = hw->queues;
5286 hw->max_rates = 2; 5302 hw->max_rates = 2;
diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c
index 80b0755ed3af..a54fb2d29089 100644
--- a/drivers/net/wireless/b43/sdio.c
+++ b/drivers/net/wireless/b43/sdio.c
@@ -193,7 +193,7 @@ static struct sdio_driver b43_sdio_driver = {
193 .name = "b43-sdio", 193 .name = "b43-sdio",
194 .id_table = b43_sdio_ids, 194 .id_table = b43_sdio_ids,
195 .probe = b43_sdio_probe, 195 .probe = b43_sdio_probe,
196 .remove = b43_sdio_remove, 196 .remove = __devexit_p(b43_sdio_remove),
197}; 197};
198 198
199int b43_sdio_init(void) 199int b43_sdio_init(void)
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 2c5367884b3f..b31ccc02fa21 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -290,7 +290,8 @@ int b43_generate_txhdr(struct b43_wldev *dev,
290 txhdr->dur_fb = wlhdr->duration_id; 290 txhdr->dur_fb = wlhdr->duration_id;
291 } else { 291 } else {
292 txhdr->dur_fb = ieee80211_generic_frame_duration( 292 txhdr->dur_fb = ieee80211_generic_frame_duration(
293 dev->wl->hw, info->control.vif, fragment_len, fbrate); 293 dev->wl->hw, info->control.vif, info->band,
294 fragment_len, fbrate);
294 } 295 }
295 296
296 plcp_fragment_len = fragment_len + FCS_LEN; 297 plcp_fragment_len = fragment_len + FCS_LEN;
@@ -378,7 +379,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
378 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 379 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
379 phy_ctl |= B43_TXH_PHY_SHORTPRMBL; 380 phy_ctl |= B43_TXH_PHY_SHORTPRMBL;
380 381
381 switch (b43_ieee80211_antenna_sanitize(dev, info->antenna_sel_tx)) { 382 switch (b43_ieee80211_antenna_sanitize(dev, 0)) {
382 case 0: /* Default */ 383 case 0: /* Default */
383 phy_ctl |= B43_TXH_PHY_ANT01AUTO; 384 phy_ctl |= B43_TXH_PHY_ANT01AUTO;
384 break; 385 break;
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index df7e16dfb36c..1be214b815fb 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -1056,6 +1056,7 @@ static void b43legacy_write_probe_resp_plcp(struct b43legacy_wldev *dev,
1056 b43legacy_generate_plcp_hdr(&plcp, size + FCS_LEN, rate->hw_value); 1056 b43legacy_generate_plcp_hdr(&plcp, size + FCS_LEN, rate->hw_value);
1057 dur = ieee80211_generic_frame_duration(dev->wl->hw, 1057 dur = ieee80211_generic_frame_duration(dev->wl->hw,
1058 dev->wl->vif, 1058 dev->wl->vif,
1059 IEEE80211_BAND_2GHZ,
1059 size, 1060 size,
1060 rate); 1061 rate);
1061 /* Write PLCP in two parts and timing for packet transfer */ 1062 /* Write PLCP in two parts and timing for packet transfer */
@@ -1121,6 +1122,7 @@ static const u8 *b43legacy_generate_probe_resp(struct b43legacy_wldev *dev,
1121 IEEE80211_STYPE_PROBE_RESP); 1122 IEEE80211_STYPE_PROBE_RESP);
1122 dur = ieee80211_generic_frame_duration(dev->wl->hw, 1123 dur = ieee80211_generic_frame_duration(dev->wl->hw,
1123 dev->wl->vif, 1124 dev->wl->vif,
1125 IEEE80211_BAND_2GHZ,
1124 *dest_size, 1126 *dest_size,
1125 rate); 1127 rate);
1126 hdr->duration_id = dur; 1128 hdr->duration_id = dur;
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 5188fab0b377..a8012f2749ee 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -228,6 +228,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
228 } else { 228 } else {
229 txhdr->dur_fb = ieee80211_generic_frame_duration(dev->wl->hw, 229 txhdr->dur_fb = ieee80211_generic_frame_duration(dev->wl->hw,
230 info->control.vif, 230 info->control.vif,
231 info->band,
231 fragment_len, 232 fragment_len,
232 rate_fb); 233 rate_fb);
233 } 234 }
@@ -277,19 +278,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
277 phy_ctl |= B43legacy_TX4_PHY_ENC_OFDM; 278 phy_ctl |= B43legacy_TX4_PHY_ENC_OFDM;
278 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 279 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
279 phy_ctl |= B43legacy_TX4_PHY_SHORTPRMBL; 280 phy_ctl |= B43legacy_TX4_PHY_SHORTPRMBL;
280 switch (info->antenna_sel_tx) { 281 phy_ctl |= B43legacy_TX4_PHY_ANTLAST;
281 case 0:
282 phy_ctl |= B43legacy_TX4_PHY_ANTLAST;
283 break;
284 case 1:
285 phy_ctl |= B43legacy_TX4_PHY_ANT0;
286 break;
287 case 2:
288 phy_ctl |= B43legacy_TX4_PHY_ANT1;
289 break;
290 default:
291 B43legacy_BUG_ON(1);
292 }
293 282
294 /* MAC control */ 283 /* MAC control */
295 rates = info->control.rates; 284 rates = info->control.rates;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 07686a748d3c..9f637014486e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -632,7 +632,6 @@ extern const struct bcmevent_name bcmevent_names[];
632extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen, 632extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen,
633 char *buf, uint len); 633 char *buf, uint len);
634 634
635extern int brcmf_net_attach(struct brcmf_pub *drvr, int idx);
636extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev); 635extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
637 636
638extern s32 brcmf_exec_dcmd(struct net_device *dev, u32 cmd, void *arg, u32 len); 637extern s32 brcmf_exec_dcmd(struct net_device *dev, u32 cmd, void *arg, u32 len);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
index b3e3b7f25d82..a5c15cac5e7d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
@@ -421,6 +421,7 @@ int brcmf_proto_hdrpull(struct device *dev, int *ifidx,
421 pktbuf->priority = h->priority & BDC_PRIORITY_MASK; 421 pktbuf->priority = h->priority & BDC_PRIORITY_MASK;
422 422
423 skb_pull(pktbuf, BDC_HEADER_LEN); 423 skb_pull(pktbuf, BDC_HEADER_LEN);
424 skb_pull(pktbuf, h->data_offset << 2);
424 425
425 return 0; 426 return 0;
426} 427}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 4187435220f3..236cb9fa460c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -799,7 +799,6 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
799{ 799{
800 char iovbuf[BRCMF_EVENTING_MASK_LEN + 12]; /* Room for 800 char iovbuf[BRCMF_EVENTING_MASK_LEN + 12]; /* Room for
801 "event_msgs" + '\0' + bitvec */ 801 "event_msgs" + '\0' + bitvec */
802 uint up = 0;
803 char buf[128], *ptr; 802 char buf[128], *ptr;
804 u32 dongle_align = drvr->bus_if->align; 803 u32 dongle_align = drvr->bus_if->align;
805 u32 glom = 0; 804 u32 glom = 0;
@@ -853,9 +852,6 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
853 brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf, 852 brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf,
854 sizeof(iovbuf)); 853 sizeof(iovbuf));
855 854
856 /* Force STA UP */
857 brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_UP, (char *)&up, sizeof(up));
858
859 /* Setup event_msgs */ 855 /* Setup event_msgs */
860 brcmf_c_mkiovar("event_msgs", drvr->eventmask, BRCMF_EVENTING_MASK_LEN, 856 brcmf_c_mkiovar("event_msgs", drvr->eventmask, BRCMF_EVENTING_MASK_LEN,
861 iovbuf, sizeof(iovbuf)); 857 iovbuf, sizeof(iovbuf));
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 2a1e5ae0c402..8933f9b31a9a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -799,6 +799,7 @@ static int brcmf_netdev_open(struct net_device *ndev)
799 struct brcmf_bus *bus_if = drvr->bus_if; 799 struct brcmf_bus *bus_if = drvr->bus_if;
800 u32 toe_ol; 800 u32 toe_ol;
801 s32 ret = 0; 801 s32 ret = 0;
802 uint up = 0;
802 803
803 brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx); 804 brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx);
804 805
@@ -822,6 +823,10 @@ static int brcmf_netdev_open(struct net_device *ndev)
822 drvr->iflist[ifp->idx]->ndev->features &= 823 drvr->iflist[ifp->idx]->ndev->features &=
823 ~NETIF_F_IP_CSUM; 824 ~NETIF_F_IP_CSUM;
824 } 825 }
826
827 /* make sure RF is ready for work */
828 brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_UP, (char *)&up, sizeof(up));
829
825 /* Allow transmit calls */ 830 /* Allow transmit calls */
826 netif_start_queue(ndev); 831 netif_start_queue(ndev);
827 drvr->bus_if->drvr_up = true; 832 drvr->bus_if->drvr_up = true;
@@ -843,6 +848,63 @@ static const struct net_device_ops brcmf_netdev_ops_pri = {
843 .ndo_set_rx_mode = brcmf_netdev_set_multicast_list 848 .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
844}; 849};
845 850
851static int brcmf_net_attach(struct brcmf_if *ifp)
852{
853 struct brcmf_pub *drvr = ifp->drvr;
854 struct net_device *ndev;
855 u8 temp_addr[ETH_ALEN];
856
857 brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx);
858
859 ndev = drvr->iflist[ifp->idx]->ndev;
860 ndev->netdev_ops = &brcmf_netdev_ops_pri;
861
862 /*
863 * determine mac address to use
864 */
865 if (is_valid_ether_addr(ifp->mac_addr))
866 memcpy(temp_addr, ifp->mac_addr, ETH_ALEN);
867 else
868 memcpy(temp_addr, drvr->mac, ETH_ALEN);
869
870 if (ifp->idx == 1) {
871 brcmf_dbg(TRACE, "ACCESS POINT MAC:\n");
872 /* ACCESSPOINT INTERFACE CASE */
873 temp_addr[0] |= 0X02; /* set bit 2 ,
874 - Locally Administered address */
875
876 }
877 ndev->hard_header_len = ETH_HLEN + drvr->hdrlen;
878 ndev->ethtool_ops = &brcmf_ethtool_ops;
879
880 drvr->rxsz = ndev->mtu + ndev->hard_header_len +
881 drvr->hdrlen;
882
883 memcpy(ndev->dev_addr, temp_addr, ETH_ALEN);
884
885 /* attach to cfg80211 for primary interface */
886 if (!ifp->idx) {
887 drvr->config = brcmf_cfg80211_attach(ndev, drvr->dev, drvr);
888 if (drvr->config == NULL) {
889 brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
890 goto fail;
891 }
892 }
893
894 if (register_netdev(ndev) != 0) {
895 brcmf_dbg(ERROR, "couldn't register the net device\n");
896 goto fail;
897 }
898
899 brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
900
901 return 0;
902
903fail:
904 ndev->netdev_ops = NULL;
905 return -EBADE;
906}
907
846int 908int
847brcmf_add_if(struct device *dev, int ifidx, char *name, u8 *mac_addr) 909brcmf_add_if(struct device *dev, int ifidx, char *name, u8 *mac_addr)
848{ 910{
@@ -882,7 +944,7 @@ brcmf_add_if(struct device *dev, int ifidx, char *name, u8 *mac_addr)
882 if (mac_addr != NULL) 944 if (mac_addr != NULL)
883 memcpy(&ifp->mac_addr, mac_addr, ETH_ALEN); 945 memcpy(&ifp->mac_addr, mac_addr, ETH_ALEN);
884 946
885 if (brcmf_net_attach(drvr, ifp->idx)) { 947 if (brcmf_net_attach(ifp)) {
886 brcmf_dbg(ERROR, "brcmf_net_attach failed"); 948 brcmf_dbg(ERROR, "brcmf_net_attach failed");
887 free_netdev(ifp->ndev); 949 free_netdev(ifp->ndev);
888 drvr->iflist[ifidx] = NULL; 950 drvr->iflist[ifidx] = NULL;
@@ -1016,69 +1078,16 @@ int brcmf_bus_start(struct device *dev)
1016 if (ret < 0) 1078 if (ret < 0)
1017 return ret; 1079 return ret;
1018 1080
1081 /* add primary networking interface */
1082 ret = brcmf_add_if(dev, 0, "wlan%d", drvr->mac);
1083 if (ret < 0)
1084 return ret;
1085
1019 /* signal bus ready */ 1086 /* signal bus ready */
1020 bus_if->state = BRCMF_BUS_DATA; 1087 bus_if->state = BRCMF_BUS_DATA;
1021 return 0; 1088 return 0;
1022} 1089}
1023 1090
1024int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx)
1025{
1026 struct net_device *ndev;
1027 u8 temp_addr[ETH_ALEN] = {
1028 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33};
1029
1030 brcmf_dbg(TRACE, "ifidx %d\n", ifidx);
1031
1032 ndev = drvr->iflist[ifidx]->ndev;
1033 ndev->netdev_ops = &brcmf_netdev_ops_pri;
1034
1035 /*
1036 * We have to use the primary MAC for virtual interfaces
1037 */
1038 if (ifidx != 0) {
1039 /* for virtual interfaces use the primary MAC */
1040 memcpy(temp_addr, drvr->mac, ETH_ALEN);
1041
1042 }
1043
1044 if (ifidx == 1) {
1045 brcmf_dbg(TRACE, "ACCESS POINT MAC:\n");
1046 /* ACCESSPOINT INTERFACE CASE */
1047 temp_addr[0] |= 0X02; /* set bit 2 ,
1048 - Locally Administered address */
1049
1050 }
1051 ndev->hard_header_len = ETH_HLEN + drvr->hdrlen;
1052 ndev->ethtool_ops = &brcmf_ethtool_ops;
1053
1054 drvr->rxsz = ndev->mtu + ndev->hard_header_len +
1055 drvr->hdrlen;
1056
1057 memcpy(ndev->dev_addr, temp_addr, ETH_ALEN);
1058
1059 /* attach to cfg80211 for primary interface */
1060 if (!ifidx) {
1061 drvr->config = brcmf_cfg80211_attach(ndev, drvr->dev, drvr);
1062 if (drvr->config == NULL) {
1063 brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
1064 goto fail;
1065 }
1066 }
1067
1068 if (register_netdev(ndev) != 0) {
1069 brcmf_dbg(ERROR, "couldn't register the net device\n");
1070 goto fail;
1071 }
1072
1073 brcmf_dbg(INFO, "%s: Broadcom Dongle Host Driver\n", ndev->name);
1074
1075 return 0;
1076
1077fail:
1078 ndev->netdev_ops = NULL;
1079 return -EBADE;
1080}
1081
1082static void brcmf_bus_detach(struct brcmf_pub *drvr) 1091static void brcmf_bus_detach(struct brcmf_pub *drvr)
1083{ 1092{
1084 brcmf_dbg(TRACE, "Enter\n"); 1093 brcmf_dbg(TRACE, "Enter\n");
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index e2b34e1563f4..a80b840051a7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -3988,12 +3988,6 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
3988 } 3988 }
3989 } 3989 }
3990 3990
3991 /* add interface and open for business */
3992 if (brcmf_add_if(bus->sdiodev->dev, 0, "wlan%d", NULL)) {
3993 brcmf_dbg(ERROR, "Add primary net device interface failed!!\n");
3994 goto fail;
3995 }
3996
3997 return bus; 3991 return bus;
3998 3992
3999fail: 3993fail:
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 82364223e817..1d67ecf681b7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -1383,14 +1383,6 @@ static int brcmf_usb_probe_cb(struct device *dev, const char *desc,
1383 goto fail; 1383 goto fail;
1384 } 1384 }
1385 1385
1386 /* add interface and open for business */
1387 ret = brcmf_add_if(dev, 0, "wlan%d", NULL);
1388 if (ret) {
1389 brcmf_dbg(ERROR, "Add primary net device interface failed!!\n");
1390 brcmf_detach(dev);
1391 goto fail;
1392 }
1393
1394 return 0; 1386 return 0;
1395fail: 1387fail:
1396 /* Release resources in reverse order */ 1388 /* Release resources in reverse order */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index 55e9f45fce22..0efe88e25a9a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -628,6 +628,40 @@ brcms_c_country_aggregate_map(struct brcms_cm_info *wlc_cm, const char *ccode,
628 return false; 628 return false;
629} 629}
630 630
631/*
632 * Indicates whether the country provided is valid to pass
633 * to cfg80211 or not.
634 *
635 * returns true if valid; false if not.
636 */
637static bool brcms_c_country_valid(const char *ccode)
638{
639 /*
640 * only allow ascii alpha uppercase for the first 2
641 * chars.
642 */
643 if (!((0x80 & ccode[0]) == 0 && ccode[0] >= 0x41 && ccode[0] <= 0x5A &&
644 (0x80 & ccode[1]) == 0 && ccode[1] >= 0x41 && ccode[1] <= 0x5A &&
645 ccode[2] == '\0'))
646 return false;
647
648 /*
649 * do not match ISO 3166-1 user assigned country codes
650 * that may be in the driver table
651 */
652 if (!strcmp("AA", ccode) || /* AA */
653 !strcmp("ZZ", ccode) || /* ZZ */
654 ccode[0] == 'X' || /* XA - XZ */
655 (ccode[0] == 'Q' && /* QM - QZ */
656 (ccode[1] >= 'M' && ccode[1] <= 'Z')))
657 return false;
658
659 if (!strcmp("NA", ccode))
660 return false;
661
662 return true;
663}
664
631/* Lookup a country info structure from a null terminated country 665/* Lookup a country info structure from a null terminated country
632 * abbreviation and regrev directly with no translation. 666 * abbreviation and regrev directly with no translation.
633 */ 667 */
@@ -1089,7 +1123,7 @@ struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc)
1089 1123
1090 /* store the country code for passing up as a regulatory hint */ 1124 /* store the country code for passing up as a regulatory hint */
1091 ccode = getvar(wlc->hw->sih, BRCMS_SROM_CCODE); 1125 ccode = getvar(wlc->hw->sih, BRCMS_SROM_CCODE);
1092 if (ccode) 1126 if (ccode && brcms_c_country_valid(ccode))
1093 strncpy(wlc->pub->srom_ccode, ccode, BRCM_CNTRY_BUF_SZ - 1); 1127 strncpy(wlc->pub->srom_ccode, ccode, BRCM_CNTRY_BUF_SZ - 1);
1094 1128
1095 /* 1129 /*
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/d11.h b/drivers/net/wireless/brcm80211/brcmsmac/d11.h
index 1948cb2771e9..3f659e09f1cc 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/d11.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/d11.h
@@ -733,7 +733,7 @@ struct cck_phy_hdr {
733 do { \ 733 do { \
734 plcp[1] = len & 0xff; \ 734 plcp[1] = len & 0xff; \
735 plcp[2] = ((len >> 8) & 0xff); \ 735 plcp[2] = ((len >> 8) & 0xff); \
736 } while (0); 736 } while (0)
737 737
738#define BRCMS_SET_MIMO_PLCP_AMPDU(plcp) (plcp[3] |= MIMO_PLCP_AMPDU) 738#define BRCMS_SET_MIMO_PLCP_AMPDU(plcp) (plcp[3] |= MIMO_PLCP_AMPDU)
739#define BRCMS_CLR_MIMO_PLCP_AMPDU(plcp) (plcp[3] &= ~MIMO_PLCP_AMPDU) 739#define BRCMS_CLR_MIMO_PLCP_AMPDU(plcp) (plcp[3] &= ~MIMO_PLCP_AMPDU)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 569ab8abd2a1..aa15558f75c8 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -1069,11 +1069,7 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
1069 wiphy_err(wl->wiphy, "%s: ieee80211_register_hw failed, status" 1069 wiphy_err(wl->wiphy, "%s: ieee80211_register_hw failed, status"
1070 "%d\n", __func__, err); 1070 "%d\n", __func__, err);
1071 1071
1072 if (wl->pub->srom_ccode[0]) 1072 if (wl->pub->srom_ccode[0] && brcms_set_hint(wl, wl->pub->srom_ccode))
1073 err = brcms_set_hint(wl, wl->pub->srom_ccode);
1074 else
1075 err = brcms_set_hint(wl, "US");
1076 if (err)
1077 wiphy_err(wl->wiphy, "%s: regulatory_hint failed, status %d\n", 1073 wiphy_err(wl->wiphy, "%s: regulatory_hint failed, status %d\n",
1078 __func__, err); 1074 __func__, err);
1079 1075
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
index ce8562aa5db0..0fce56235f38 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -207,8 +207,7 @@ static const iqcal_gain_params_lcnphy *tbl_iqcal_gainparams_lcnphy[1] = {
207}; 207};
208 208
209static const u16 iqcal_gainparams_numgains_lcnphy[1] = { 209static const u16 iqcal_gainparams_numgains_lcnphy[1] = {
210 sizeof(tbl_iqcal_gainparams_lcnphy_2G) / 210 ARRAY_SIZE(tbl_iqcal_gainparams_lcnphy_2G),
211 sizeof(*tbl_iqcal_gainparams_lcnphy_2G),
212}; 211};
213 212
214static const struct lcnphy_sfo_cfg lcnphy_sfo_cfg[] = { 213static const struct lcnphy_sfo_cfg lcnphy_sfo_cfg[] = {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
index 39095741fd05..812b6e38526e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
@@ -16353,11 +16353,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
16353 wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX, 16353 wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX,
16354 rfseq_rx2tx_events_rev3_ipa, 16354 rfseq_rx2tx_events_rev3_ipa,
16355 rfseq_rx2tx_dlys_rev3_ipa, 16355 rfseq_rx2tx_dlys_rev3_ipa,
16356 sizeof 16356 ARRAY_SIZE(rfseq_rx2tx_events_rev3_ipa));
16357 (rfseq_rx2tx_events_rev3_ipa) /
16358 sizeof
16359 (rfseq_rx2tx_events_rev3_ipa
16360 [0]));
16361 16357
16362 mod_phy_reg(pi, 0x299, (0x3 << 14), (0x1 << 14)); 16358 mod_phy_reg(pi, 0x299, (0x3 << 14), (0x1 << 14));
16363 mod_phy_reg(pi, 0x29d, (0x3 << 14), (0x1 << 14)); 16359 mod_phy_reg(pi, 0x29d, (0x3 << 14), (0x1 << 14));
@@ -16858,18 +16854,13 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
16858 wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_TX2RX, 16854 wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_TX2RX,
16859 rfseq_tx2rx_events_rev3, 16855 rfseq_tx2rx_events_rev3,
16860 rfseq_tx2rx_dlys_rev3, 16856 rfseq_tx2rx_dlys_rev3,
16861 sizeof(rfseq_tx2rx_events_rev3) / 16857 ARRAY_SIZE(rfseq_tx2rx_events_rev3));
16862 sizeof(rfseq_tx2rx_events_rev3[0]));
16863 16858
16864 if (PHY_IPA(pi)) 16859 if (PHY_IPA(pi))
16865 wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX, 16860 wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX,
16866 rfseq_rx2tx_events_rev3_ipa, 16861 rfseq_rx2tx_events_rev3_ipa,
16867 rfseq_rx2tx_dlys_rev3_ipa, 16862 rfseq_rx2tx_dlys_rev3_ipa,
16868 sizeof 16863 ARRAY_SIZE(rfseq_rx2tx_events_rev3_ipa));
16869 (rfseq_rx2tx_events_rev3_ipa) /
16870 sizeof
16871 (rfseq_rx2tx_events_rev3_ipa
16872 [0]));
16873 16864
16874 if ((pi->sh->hw_phyrxchain != 0x3) && 16865 if ((pi->sh->hw_phyrxchain != 0x3) &&
16875 (pi->sh->hw_phyrxchain != pi->sh->hw_phytxchain)) { 16866 (pi->sh->hw_phyrxchain != pi->sh->hw_phytxchain)) {
@@ -16885,8 +16876,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
16885 pi, NPHY_RFSEQ_RX2TX, 16876 pi, NPHY_RFSEQ_RX2TX,
16886 rfseq_rx2tx_events_rev3, 16877 rfseq_rx2tx_events_rev3,
16887 rfseq_rx2tx_dlys_rev3, 16878 rfseq_rx2tx_dlys_rev3,
16888 sizeof(rfseq_rx2tx_events_rev3) / 16879 ARRAY_SIZE(rfseq_rx2tx_events_rev3));
16889 sizeof(rfseq_rx2tx_events_rev3[0]));
16890 } 16880 }
16891 16881
16892 if (CHSPEC_IS2G(pi->radio_chanspec)) 16882 if (CHSPEC_IS2G(pi->radio_chanspec))
@@ -17209,13 +17199,11 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi)
17209 17199
17210 wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX, rfseq_rx2tx_events, 17200 wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_RX2TX, rfseq_rx2tx_events,
17211 rfseq_rx2tx_dlys, 17201 rfseq_rx2tx_dlys,
17212 sizeof(rfseq_rx2tx_events) / 17202 ARRAY_SIZE(rfseq_rx2tx_events));
17213 sizeof(rfseq_rx2tx_events[0]));
17214 17203
17215 wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_TX2RX, rfseq_tx2rx_events, 17204 wlc_phy_set_rfseq_nphy(pi, NPHY_RFSEQ_TX2RX, rfseq_tx2rx_events,
17216 rfseq_tx2rx_dlys, 17205 rfseq_tx2rx_dlys,
17217 sizeof(rfseq_tx2rx_events) / 17206 ARRAY_SIZE(rfseq_tx2rx_events));
17218 sizeof(rfseq_tx2rx_events[0]));
17219 17207
17220 wlc_phy_workarounds_nphy_gainctrl(pi); 17208 wlc_phy_workarounds_nphy_gainctrl(pi);
17221 17209
@@ -19357,8 +19345,7 @@ static void wlc_phy_spurwar_nphy(struct brcms_phy *pi)
19357 } 19345 }
19358 19346
19359 if (isAdjustNoiseVar) { 19347 if (isAdjustNoiseVar) {
19360 numTonesAdjust = sizeof(nphy_adj_tone_id_buf) / 19348 numTonesAdjust = ARRAY_SIZE(nphy_adj_tone_id_buf);
19361 sizeof(nphy_adj_tone_id_buf[0]);
19362 19349
19363 wlc_phy_adjust_min_noisevar_nphy( 19350 wlc_phy_adjust_min_noisevar_nphy(
19364 pi, 19351 pi,
@@ -25204,32 +25191,26 @@ static u8 wlc_phy_a3_nphy(struct brcms_phy *pi, u8 start_gain, u8 core)
25204 25191
25205 phy_a15 = pad_gain_codes_used_2057rev5; 25192 phy_a15 = pad_gain_codes_used_2057rev5;
25206 phy_a13 = 25193 phy_a13 =
25207 sizeof(pad_gain_codes_used_2057rev5) / 25194 ARRAY_SIZE(pad_gain_codes_used_2057rev5) - 1;
25208 sizeof(pad_gain_codes_used_2057rev5
25209 [0]) - 1;
25210 25195
25211 } else if ((pi->pubpi.radiorev == 7) 25196 } else if ((pi->pubpi.radiorev == 7)
25212 || (pi->pubpi.radiorev == 8)) { 25197 || (pi->pubpi.radiorev == 8)) {
25213 25198
25214 phy_a15 = pad_gain_codes_used_2057rev7; 25199 phy_a15 = pad_gain_codes_used_2057rev7;
25215 phy_a13 = 25200 phy_a13 =
25216 sizeof(pad_gain_codes_used_2057rev7) / 25201 ARRAY_SIZE(pad_gain_codes_used_2057rev7) - 1;
25217 sizeof(pad_gain_codes_used_2057rev7
25218 [0]) - 1;
25219 25202
25220 } else { 25203 } else {
25221 25204
25222 phy_a15 = pad_all_gain_codes_2057; 25205 phy_a15 = pad_all_gain_codes_2057;
25223 phy_a13 = sizeof(pad_all_gain_codes_2057) / 25206 phy_a13 = ARRAY_SIZE(pad_all_gain_codes_2057) -
25224 sizeof(pad_all_gain_codes_2057[0]) -
25225 1; 25207 1;
25226 } 25208 }
25227 25209
25228 } else { 25210 } else {
25229 25211
25230 phy_a15 = pga_all_gain_codes_2057; 25212 phy_a15 = pga_all_gain_codes_2057;
25231 phy_a13 = sizeof(pga_all_gain_codes_2057) / 25213 phy_a13 = ARRAY_SIZE(pga_all_gain_codes_2057) - 1;
25232 sizeof(pga_all_gain_codes_2057[0]) - 1;
25233 } 25214 }
25234 25215
25235 phy_a14 = 0; 25216 phy_a14 = 0;
diff --git a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
index 5fb17d53c9b2..333193f20e1c 100644
--- a/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
+++ b/drivers/net/wireless/brcm80211/include/brcm_hw_ids.h
@@ -17,17 +17,7 @@
17#ifndef _BRCM_HW_IDS_H_ 17#ifndef _BRCM_HW_IDS_H_
18#define _BRCM_HW_IDS_H_ 18#define _BRCM_HW_IDS_H_
19 19
20#define BCM4325_D11DUAL_ID 0x431b 20#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */
21#define BCM4325_D11G_ID 0x431c
22#define BCM4325_D11A_ID 0x431d
23
24#define BCM4329_D11N2G_ID 0x432f /* 4329 802.11n 2.4G device */
25#define BCM4329_D11N5G_ID 0x4330 /* 4329 802.11n 5G device */
26#define BCM4329_D11NDUAL_ID 0x432e
27
28#define BCM4319_D11N_ID 0x4337 /* 4319 802.11n dualband device */
29#define BCM4319_D11N2G_ID 0x4338 /* 4319 802.11n 2.4G device */
30#define BCM4319_D11N5G_ID 0x4339 /* 4319 802.11n 5G device */
31 21
32#define BCM43224_D11N_ID 0x4353 /* 43224 802.11n dualband device */ 22#define BCM43224_D11N_ID 0x4353 /* 43224 802.11n dualband device */
33#define BCM43224_D11N_ID_VEN1 0x0576 /* Vendor specific 43224 802.11n db */ 23#define BCM43224_D11N_ID_VEN1 0x0576 /* Vendor specific 43224 802.11n db */
@@ -37,23 +27,15 @@
37#define BCM43236_D11N_ID 0x4346 /* 43236 802.11n dualband device */ 27#define BCM43236_D11N_ID 0x4346 /* 43236 802.11n dualband device */
38#define BCM43236_D11N2G_ID 0x4347 /* 43236 802.11n 2.4GHz device */ 28#define BCM43236_D11N2G_ID 0x4347 /* 43236 802.11n 2.4GHz device */
39 29
40#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */ 30/* Chipcommon Core Chip IDs */
41 31#define BCM4313_CHIP_ID 0x4313
42/* Chip IDs */ 32#define BCM43224_CHIP_ID 43224
43#define BCM4313_CHIP_ID 0x4313 /* 4313 chip id */ 33#define BCM43225_CHIP_ID 43225
44#define BCM4319_CHIP_ID 0x4319 /* 4319 chip id */ 34#define BCM43235_CHIP_ID 43235
45 35#define BCM43236_CHIP_ID 43236
46#define BCM43224_CHIP_ID 43224 /* 43224 chipcommon chipid */ 36#define BCM43238_CHIP_ID 43238
47#define BCM43225_CHIP_ID 43225 /* 43225 chipcommon chipid */ 37#define BCM4329_CHIP_ID 0x4329
48#define BCM43421_CHIP_ID 43421 /* 43421 chipcommon chipid */ 38#define BCM4330_CHIP_ID 0x4330
49#define BCM43235_CHIP_ID 43235 /* 43235 chipcommon chipid */ 39#define BCM4331_CHIP_ID 0x4331
50#define BCM43236_CHIP_ID 43236 /* 43236 chipcommon chipid */
51#define BCM43238_CHIP_ID 43238 /* 43238 chipcommon chipid */
52#define BCM4329_CHIP_ID 0x4329 /* 4329 chipcommon chipid */
53#define BCM4325_CHIP_ID 0x4325 /* 4325 chipcommon chipid */
54#define BCM4331_CHIP_ID 0x4331 /* 4331 chipcommon chipid */
55#define BCM4336_CHIP_ID 0x4336 /* 4336 chipcommon chipid */
56#define BCM4330_CHIP_ID 0x4330 /* 4330 chipcommon chipid */
57#define BCM6362_CHIP_ID 0x6362 /* 6362 chipcommon chipid */
58 40
59#endif /* _BRCM_HW_IDS_H_ */ 41#endif /* _BRCM_HW_IDS_H_ */
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index bfa0d54221e8..627bc12074c7 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -244,8 +244,7 @@ u16 hostap_tx_callback_register(local_info_t *local,
244 unsigned long flags; 244 unsigned long flags;
245 struct hostap_tx_callback_info *entry; 245 struct hostap_tx_callback_info *entry;
246 246
247 entry = kmalloc(sizeof(*entry), 247 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
248 GFP_ATOMIC);
249 if (entry == NULL) 248 if (entry == NULL)
250 return 0; 249 return 0;
251 250
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
index 972a9c3af39e..05ca3402dca7 100644
--- a/drivers/net/wireless/hostap/hostap_pci.c
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -457,18 +457,4 @@ static struct pci_driver prism2_pci_driver = {
457#endif /* CONFIG_PM */ 457#endif /* CONFIG_PM */
458}; 458};
459 459
460 460module_pci_driver(prism2_pci_driver);
461static int __init init_prism2_pci(void)
462{
463 return pci_register_driver(&prism2_pci_driver);
464}
465
466
467static void __exit exit_prism2_pci(void)
468{
469 pci_unregister_driver(&prism2_pci_driver);
470}
471
472
473module_init(init_prism2_pci);
474module_exit(exit_prism2_pci);
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index 33e79037770b..c3d067ee4db9 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -616,18 +616,4 @@ static struct pci_driver prism2_plx_driver = {
616 .remove = prism2_plx_remove, 616 .remove = prism2_plx_remove,
617}; 617};
618 618
619 619module_pci_driver(prism2_plx_driver);
620static int __init init_prism2_plx(void)
621{
622 return pci_register_driver(&prism2_plx_driver);
623}
624
625
626static void __exit exit_prism2_plx(void)
627{
628 pci_unregister_driver(&prism2_plx_driver);
629}
630
631
632module_init(init_prism2_plx);
633module_exit(exit_prism2_plx);
diff --git a/drivers/net/wireless/ipw2x00/ipw.h b/drivers/net/wireless/ipw2x00/ipw.h
new file mode 100644
index 000000000000..4007bf5ed6f3
--- /dev/null
+++ b/drivers/net/wireless/ipw2x00/ipw.h
@@ -0,0 +1,23 @@
1/*
2 * Intel Pro/Wireless 2100, 2200BG, 2915ABG network connection driver
3 *
4 * Copyright 2012 Stanislav Yakovlev <stas.yakovlev@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef __IPW_H__
12#define __IPW_H__
13
14#include <linux/ieee80211.h>
15
16static const u32 ipw_cipher_suites[] = {
17 WLAN_CIPHER_SUITE_WEP40,
18 WLAN_CIPHER_SUITE_WEP104,
19 WLAN_CIPHER_SUITE_TKIP,
20 WLAN_CIPHER_SUITE_CCMP,
21};
22
23#endif
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index f0551f807f69..c72136c07774 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -166,6 +166,7 @@ that only one external action is invoked at a time.
166#include <net/lib80211.h> 166#include <net/lib80211.h>
167 167
168#include "ipw2100.h" 168#include "ipw2100.h"
169#include "ipw.h"
169 170
170#define IPW2100_VERSION "git-1.2.2" 171#define IPW2100_VERSION "git-1.2.2"
171 172
@@ -343,38 +344,50 @@ static struct iw_handler_def ipw2100_wx_handler_def;
343 344
344static inline void read_register(struct net_device *dev, u32 reg, u32 * val) 345static inline void read_register(struct net_device *dev, u32 reg, u32 * val)
345{ 346{
346 *val = readl((void __iomem *)(dev->base_addr + reg)); 347 struct ipw2100_priv *priv = libipw_priv(dev);
348
349 *val = ioread32(priv->ioaddr + reg);
347 IPW_DEBUG_IO("r: 0x%08X => 0x%08X\n", reg, *val); 350 IPW_DEBUG_IO("r: 0x%08X => 0x%08X\n", reg, *val);
348} 351}
349 352
350static inline void write_register(struct net_device *dev, u32 reg, u32 val) 353static inline void write_register(struct net_device *dev, u32 reg, u32 val)
351{ 354{
352 writel(val, (void __iomem *)(dev->base_addr + reg)); 355 struct ipw2100_priv *priv = libipw_priv(dev);
356
357 iowrite32(val, priv->ioaddr + reg);
353 IPW_DEBUG_IO("w: 0x%08X <= 0x%08X\n", reg, val); 358 IPW_DEBUG_IO("w: 0x%08X <= 0x%08X\n", reg, val);
354} 359}
355 360
356static inline void read_register_word(struct net_device *dev, u32 reg, 361static inline void read_register_word(struct net_device *dev, u32 reg,
357 u16 * val) 362 u16 * val)
358{ 363{
359 *val = readw((void __iomem *)(dev->base_addr + reg)); 364 struct ipw2100_priv *priv = libipw_priv(dev);
365
366 *val = ioread16(priv->ioaddr + reg);
360 IPW_DEBUG_IO("r: 0x%08X => %04X\n", reg, *val); 367 IPW_DEBUG_IO("r: 0x%08X => %04X\n", reg, *val);
361} 368}
362 369
363static inline void read_register_byte(struct net_device *dev, u32 reg, u8 * val) 370static inline void read_register_byte(struct net_device *dev, u32 reg, u8 * val)
364{ 371{
365 *val = readb((void __iomem *)(dev->base_addr + reg)); 372 struct ipw2100_priv *priv = libipw_priv(dev);
373
374 *val = ioread8(priv->ioaddr + reg);
366 IPW_DEBUG_IO("r: 0x%08X => %02X\n", reg, *val); 375 IPW_DEBUG_IO("r: 0x%08X => %02X\n", reg, *val);
367} 376}
368 377
369static inline void write_register_word(struct net_device *dev, u32 reg, u16 val) 378static inline void write_register_word(struct net_device *dev, u32 reg, u16 val)
370{ 379{
371 writew(val, (void __iomem *)(dev->base_addr + reg)); 380 struct ipw2100_priv *priv = libipw_priv(dev);
381
382 iowrite16(val, priv->ioaddr + reg);
372 IPW_DEBUG_IO("w: 0x%08X <= %04X\n", reg, val); 383 IPW_DEBUG_IO("w: 0x%08X <= %04X\n", reg, val);
373} 384}
374 385
375static inline void write_register_byte(struct net_device *dev, u32 reg, u8 val) 386static inline void write_register_byte(struct net_device *dev, u32 reg, u8 val)
376{ 387{
377 writeb(val, (void __iomem *)(dev->base_addr + reg)); 388 struct ipw2100_priv *priv = libipw_priv(dev);
389
390 iowrite8(val, priv->ioaddr + reg);
378 IPW_DEBUG_IO("w: 0x%08X =< %02X\n", reg, val); 391 IPW_DEBUG_IO("w: 0x%08X =< %02X\n", reg, val);
379} 392}
380 393
@@ -506,13 +519,13 @@ static void read_nic_memory(struct net_device *dev, u32 addr, u32 len,
506 read_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA + i, buf); 519 read_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA + i, buf);
507} 520}
508 521
509static inline int ipw2100_hw_is_adapter_in_system(struct net_device *dev) 522static bool ipw2100_hw_is_adapter_in_system(struct net_device *dev)
510{ 523{
511 return (dev->base_addr && 524 u32 dbg;
512 (readl 525
513 ((void __iomem *)(dev->base_addr + 526 read_register(dev, IPW_REG_DOA_DEBUG_AREA_START, &dbg);
514 IPW_REG_DOA_DEBUG_AREA_START)) 527
515 == IPW_DATA_DOA_DEBUG_VALUE)); 528 return dbg == IPW_DATA_DOA_DEBUG_VALUE;
516} 529}
517 530
518static int ipw2100_get_ordinal(struct ipw2100_priv *priv, u32 ord, 531static int ipw2100_get_ordinal(struct ipw2100_priv *priv, u32 ord,
@@ -1946,6 +1959,9 @@ static int ipw2100_wdev_init(struct net_device *dev)
1946 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band; 1959 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
1947 } 1960 }
1948 1961
1962 wdev->wiphy->cipher_suites = ipw_cipher_suites;
1963 wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
1964
1949 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev); 1965 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
1950 if (wiphy_register(wdev->wiphy)) { 1966 if (wiphy_register(wdev->wiphy)) {
1951 ipw2100_down(priv); 1967 ipw2100_down(priv);
@@ -3773,7 +3789,7 @@ IPW2100_ORD(STAT_TX_HOST_REQUESTS, "requested Host Tx's (MSDU)"),
3773 IPW2100_ORD(COUNTRY_CODE, 3789 IPW2100_ORD(COUNTRY_CODE,
3774 "IEEE country code as recv'd from beacon"), 3790 "IEEE country code as recv'd from beacon"),
3775 IPW2100_ORD(COUNTRY_CHANNELS, 3791 IPW2100_ORD(COUNTRY_CHANNELS,
3776 "channels suported by country"), 3792 "channels supported by country"),
3777 IPW2100_ORD(RESET_CNT, "adapter resets (warm)"), 3793 IPW2100_ORD(RESET_CNT, "adapter resets (warm)"),
3778 IPW2100_ORD(BEACON_INTERVAL, "Beacon interval"), 3794 IPW2100_ORD(BEACON_INTERVAL, "Beacon interval"),
3779 IPW2100_ORD(ANTENNA_DIVERSITY, 3795 IPW2100_ORD(ANTENNA_DIVERSITY,
@@ -4062,7 +4078,7 @@ static int ipw2100_switch_mode(struct ipw2100_priv *priv, u32 mode)
4062 ipw2100_firmware.version = 0; 4078 ipw2100_firmware.version = 0;
4063#endif 4079#endif
4064 4080
4065 printk(KERN_INFO "%s: Reseting on mode change.\n", priv->net_dev->name); 4081 printk(KERN_INFO "%s: Resetting on mode change.\n", priv->net_dev->name);
4066 priv->reset_backoff = 0; 4082 priv->reset_backoff = 0;
4067 schedule_reset(priv); 4083 schedule_reset(priv);
4068 4084
@@ -6082,9 +6098,7 @@ static const struct net_device_ops ipw2100_netdev_ops = {
6082/* Look into using netdev destructor to shutdown libipw? */ 6098/* Look into using netdev destructor to shutdown libipw? */
6083 6099
6084static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, 6100static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6085 void __iomem * base_addr, 6101 void __iomem * ioaddr)
6086 unsigned long mem_start,
6087 unsigned long mem_len)
6088{ 6102{
6089 struct ipw2100_priv *priv; 6103 struct ipw2100_priv *priv;
6090 struct net_device *dev; 6104 struct net_device *dev;
@@ -6096,6 +6110,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6096 priv->ieee = netdev_priv(dev); 6110 priv->ieee = netdev_priv(dev);
6097 priv->pci_dev = pci_dev; 6111 priv->pci_dev = pci_dev;
6098 priv->net_dev = dev; 6112 priv->net_dev = dev;
6113 priv->ioaddr = ioaddr;
6099 6114
6100 priv->ieee->hard_start_xmit = ipw2100_tx; 6115 priv->ieee->hard_start_xmit = ipw2100_tx;
6101 priv->ieee->set_security = shim__set_security; 6116 priv->ieee->set_security = shim__set_security;
@@ -6111,10 +6126,6 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6111 dev->watchdog_timeo = 3 * HZ; 6126 dev->watchdog_timeo = 3 * HZ;
6112 dev->irq = 0; 6127 dev->irq = 0;
6113 6128
6114 dev->base_addr = (unsigned long)base_addr;
6115 dev->mem_start = mem_start;
6116 dev->mem_end = dev->mem_start + mem_len - 1;
6117
6118 /* NOTE: We don't use the wireless_handlers hook 6129 /* NOTE: We don't use the wireless_handlers hook
6119 * in dev as the system will start throwing WX requests 6130 * in dev as the system will start throwing WX requests
6120 * to us before we're actually initialized and it just 6131 * to us before we're actually initialized and it just
@@ -6215,8 +6226,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6215static int ipw2100_pci_init_one(struct pci_dev *pci_dev, 6226static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6216 const struct pci_device_id *ent) 6227 const struct pci_device_id *ent)
6217{ 6228{
6218 unsigned long mem_start, mem_len, mem_flags; 6229 void __iomem *ioaddr;
6219 void __iomem *base_addr = NULL;
6220 struct net_device *dev = NULL; 6230 struct net_device *dev = NULL;
6221 struct ipw2100_priv *priv = NULL; 6231 struct ipw2100_priv *priv = NULL;
6222 int err = 0; 6232 int err = 0;
@@ -6225,18 +6235,14 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6225 6235
6226 IPW_DEBUG_INFO("enter\n"); 6236 IPW_DEBUG_INFO("enter\n");
6227 6237
6228 mem_start = pci_resource_start(pci_dev, 0); 6238 if (!(pci_resource_flags(pci_dev, 0) & IORESOURCE_MEM)) {
6229 mem_len = pci_resource_len(pci_dev, 0);
6230 mem_flags = pci_resource_flags(pci_dev, 0);
6231
6232 if ((mem_flags & IORESOURCE_MEM) != IORESOURCE_MEM) {
6233 IPW_DEBUG_INFO("weird - resource type is not memory\n"); 6239 IPW_DEBUG_INFO("weird - resource type is not memory\n");
6234 err = -ENODEV; 6240 err = -ENODEV;
6235 goto fail; 6241 goto out;
6236 } 6242 }
6237 6243
6238 base_addr = ioremap_nocache(mem_start, mem_len); 6244 ioaddr = pci_iomap(pci_dev, 0, 0);
6239 if (!base_addr) { 6245 if (!ioaddr) {
6240 printk(KERN_WARNING DRV_NAME 6246 printk(KERN_WARNING DRV_NAME
6241 "Error calling ioremap_nocache.\n"); 6247 "Error calling ioremap_nocache.\n");
6242 err = -EIO; 6248 err = -EIO;
@@ -6244,7 +6250,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6244 } 6250 }
6245 6251
6246 /* allocate and initialize our net_device */ 6252 /* allocate and initialize our net_device */
6247 dev = ipw2100_alloc_device(pci_dev, base_addr, mem_start, mem_len); 6253 dev = ipw2100_alloc_device(pci_dev, ioaddr);
6248 if (!dev) { 6254 if (!dev) {
6249 printk(KERN_WARNING DRV_NAME 6255 printk(KERN_WARNING DRV_NAME
6250 "Error calling ipw2100_alloc_device.\n"); 6256 "Error calling ipw2100_alloc_device.\n");
@@ -6379,8 +6385,8 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6379 priv->status |= STATUS_INITIALIZED; 6385 priv->status |= STATUS_INITIALIZED;
6380 6386
6381 mutex_unlock(&priv->action_mutex); 6387 mutex_unlock(&priv->action_mutex);
6382 6388out:
6383 return 0; 6389 return err;
6384 6390
6385 fail_unlock: 6391 fail_unlock:
6386 mutex_unlock(&priv->action_mutex); 6392 mutex_unlock(&priv->action_mutex);
@@ -6409,63 +6415,56 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6409 pci_set_drvdata(pci_dev, NULL); 6415 pci_set_drvdata(pci_dev, NULL);
6410 } 6416 }
6411 6417
6412 if (base_addr) 6418 pci_iounmap(pci_dev, ioaddr);
6413 iounmap(base_addr);
6414 6419
6415 pci_release_regions(pci_dev); 6420 pci_release_regions(pci_dev);
6416 pci_disable_device(pci_dev); 6421 pci_disable_device(pci_dev);
6417 6422 goto out;
6418 return err;
6419} 6423}
6420 6424
6421static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev) 6425static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
6422{ 6426{
6423 struct ipw2100_priv *priv = pci_get_drvdata(pci_dev); 6427 struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
6424 struct net_device *dev; 6428 struct net_device *dev = priv->net_dev;
6425 6429
6426 if (priv) { 6430 mutex_lock(&priv->action_mutex);
6427 mutex_lock(&priv->action_mutex);
6428 6431
6429 priv->status &= ~STATUS_INITIALIZED; 6432 priv->status &= ~STATUS_INITIALIZED;
6430 6433
6431 dev = priv->net_dev; 6434 sysfs_remove_group(&pci_dev->dev.kobj, &ipw2100_attribute_group);
6432 sysfs_remove_group(&pci_dev->dev.kobj,
6433 &ipw2100_attribute_group);
6434 6435
6435#ifdef CONFIG_PM 6436#ifdef CONFIG_PM
6436 if (ipw2100_firmware.version) 6437 if (ipw2100_firmware.version)
6437 ipw2100_release_firmware(priv, &ipw2100_firmware); 6438 ipw2100_release_firmware(priv, &ipw2100_firmware);
6438#endif 6439#endif
6439 /* Take down the hardware */ 6440 /* Take down the hardware */
6440 ipw2100_down(priv); 6441 ipw2100_down(priv);
6441 6442
6442 /* Release the mutex so that the network subsystem can 6443 /* Release the mutex so that the network subsystem can
6443 * complete any needed calls into the driver... */ 6444 * complete any needed calls into the driver... */
6444 mutex_unlock(&priv->action_mutex); 6445 mutex_unlock(&priv->action_mutex);
6445 6446
6446 /* Unregister the device first - this results in close() 6447 /* Unregister the device first - this results in close()
6447 * being called if the device is open. If we free storage 6448 * being called if the device is open. If we free storage
6448 * first, then close() will crash. */ 6449 * first, then close() will crash.
6449 unregister_netdev(dev); 6450 * FIXME: remove the comment above. */
6451 unregister_netdev(dev);
6450 6452
6451 ipw2100_kill_works(priv); 6453 ipw2100_kill_works(priv);
6452 6454
6453 ipw2100_queues_free(priv); 6455 ipw2100_queues_free(priv);
6454 6456
6455 /* Free potential debugging firmware snapshot */ 6457 /* Free potential debugging firmware snapshot */
6456 ipw2100_snapshot_free(priv); 6458 ipw2100_snapshot_free(priv);
6457 6459
6458 if (dev->irq) 6460 free_irq(dev->irq, priv);
6459 free_irq(dev->irq, priv);
6460 6461
6461 if (dev->base_addr) 6462 pci_iounmap(pci_dev, priv->ioaddr);
6462 iounmap((void __iomem *)dev->base_addr);
6463 6463
6464 /* wiphy_unregister needs to be here, before free_libipw */ 6464 /* wiphy_unregister needs to be here, before free_libipw */
6465 wiphy_unregister(priv->ieee->wdev.wiphy); 6465 wiphy_unregister(priv->ieee->wdev.wiphy);
6466 kfree(priv->ieee->bg_band.channels); 6466 kfree(priv->ieee->bg_band.channels);
6467 free_libipw(dev, 0); 6467 free_libipw(dev, 0);
6468 }
6469 6468
6470 pci_release_regions(pci_dev); 6469 pci_release_regions(pci_dev);
6471 pci_disable_device(pci_dev); 6470 pci_disable_device(pci_dev);
@@ -8508,8 +8507,7 @@ static void ipw2100_release_firmware(struct ipw2100_priv *priv,
8508 struct ipw2100_fw *fw) 8507 struct ipw2100_fw *fw)
8509{ 8508{
8510 fw->version = 0; 8509 fw->version = 0;
8511 if (fw->fw_entry) 8510 release_firmware(fw->fw_entry);
8512 release_firmware(fw->fw_entry);
8513 fw->fw_entry = NULL; 8511 fw->fw_entry = NULL;
8514} 8512}
8515 8513
@@ -8609,7 +8607,7 @@ static int ipw2100_ucode_download(struct ipw2100_priv *priv,
8609 struct net_device *dev = priv->net_dev; 8607 struct net_device *dev = priv->net_dev;
8610 const unsigned char *microcode_data = fw->uc.data; 8608 const unsigned char *microcode_data = fw->uc.data;
8611 unsigned int microcode_data_left = fw->uc.size; 8609 unsigned int microcode_data_left = fw->uc.size;
8612 void __iomem *reg = (void __iomem *)dev->base_addr; 8610 void __iomem *reg = priv->ioaddr;
8613 8611
8614 struct symbol_alive_response response; 8612 struct symbol_alive_response response;
8615 int i, j; 8613 int i, j;
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.h b/drivers/net/wireless/ipw2x00/ipw2100.h
index 99cba968aa58..973125242490 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.h
+++ b/drivers/net/wireless/ipw2x00/ipw2100.h
@@ -135,15 +135,6 @@ enum {
135 IPW_HW_STATE_ENABLED = 0 135 IPW_HW_STATE_ENABLED = 0
136}; 136};
137 137
138struct ssid_context {
139 char ssid[IW_ESSID_MAX_SIZE + 1];
140 int ssid_len;
141 unsigned char bssid[ETH_ALEN];
142 int port_type;
143 int channel;
144
145};
146
147extern const char *port_type_str[]; 138extern const char *port_type_str[];
148extern const char *band_str[]; 139extern const char *band_str[];
149 140
@@ -488,6 +479,7 @@ enum {
488#define CAP_PRIVACY_ON (1<<1) /* Off = No privacy */ 479#define CAP_PRIVACY_ON (1<<1) /* Off = No privacy */
489 480
490struct ipw2100_priv { 481struct ipw2100_priv {
482 void __iomem *ioaddr;
491 483
492 int stop_hang_check; /* Set 1 when shutting down to kill hang_check */ 484 int stop_hang_check; /* Set 1 when shutting down to kill hang_check */
493 int stop_rf_kill; /* Set 1 when shutting down to kill rf_kill */ 485 int stop_rf_kill; /* Set 1 when shutting down to kill rf_kill */
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 1779db3aa2b0..b3707dadad15 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -34,6 +34,7 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <net/cfg80211-wext.h> 35#include <net/cfg80211-wext.h>
36#include "ipw2200.h" 36#include "ipw2200.h"
37#include "ipw.h"
37 38
38 39
39#ifndef KBUILD_EXTMOD 40#ifndef KBUILD_EXTMOD
@@ -3668,8 +3669,7 @@ static int ipw_load(struct ipw_priv *priv)
3668 priv->rxq = NULL; 3669 priv->rxq = NULL;
3669 } 3670 }
3670 ipw_tx_queue_free(priv); 3671 ipw_tx_queue_free(priv);
3671 if (raw) 3672 release_firmware(raw);
3672 release_firmware(raw);
3673#ifdef CONFIG_PM 3673#ifdef CONFIG_PM
3674 fw_loaded = 0; 3674 fw_loaded = 0;
3675 raw = NULL; 3675 raw = NULL;
@@ -7035,7 +7035,7 @@ static int ipw_qos_activate(struct ipw_priv *priv,
7035 cpu_to_le16(burst_duration); 7035 cpu_to_le16(burst_duration);
7036 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) { 7036 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7037 if (type == IEEE_B) { 7037 if (type == IEEE_B) {
7038 IPW_DEBUG_QOS("QoS activate IBSS nework mode %d\n", 7038 IPW_DEBUG_QOS("QoS activate IBSS network mode %d\n",
7039 type); 7039 type);
7040 if (priv->qos_data.qos_enable == 0) 7040 if (priv->qos_data.qos_enable == 0)
7041 active_one = &def_parameters_CCK; 7041 active_one = &def_parameters_CCK;
@@ -11544,6 +11544,9 @@ static int ipw_wdev_init(struct net_device *dev)
11544 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band; 11544 wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
11545 } 11545 }
11546 11546
11547 wdev->wiphy->cipher_suites = ipw_cipher_suites;
11548 wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites);
11549
11547 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev); 11550 set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
11548 11551
11549 /* With that information in place, we can now register the wiphy... */ 11552 /* With that information in place, we can now register the wiphy... */
@@ -11837,10 +11840,6 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11837 net_dev->wireless_data = &priv->wireless_data; 11840 net_dev->wireless_data = &priv->wireless_data;
11838 net_dev->wireless_handlers = &ipw_wx_handler_def; 11841 net_dev->wireless_handlers = &ipw_wx_handler_def;
11839 net_dev->ethtool_ops = &ipw_ethtool_ops; 11842 net_dev->ethtool_ops = &ipw_ethtool_ops;
11840 net_dev->irq = pdev->irq;
11841 net_dev->base_addr = (unsigned long)priv->hw_base;
11842 net_dev->mem_start = pci_resource_start(pdev, 0);
11843 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
11844 11843
11845 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group); 11844 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
11846 if (err) { 11845 if (err) {
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index 8874588fb929..0b22fb421735 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -584,61 +584,6 @@ struct libipw_tim_parameters {
584 584
585/*******************************************************/ 585/*******************************************************/
586 586
587enum { /* libipw_basic_report.map */
588 LIBIPW_BASIC_MAP_BSS = (1 << 0),
589 LIBIPW_BASIC_MAP_OFDM = (1 << 1),
590 LIBIPW_BASIC_MAP_UNIDENTIFIED = (1 << 2),
591 LIBIPW_BASIC_MAP_RADAR = (1 << 3),
592 LIBIPW_BASIC_MAP_UNMEASURED = (1 << 4),
593 /* Bits 5-7 are reserved */
594
595};
596struct libipw_basic_report {
597 u8 channel;
598 __le64 start_time;
599 __le16 duration;
600 u8 map;
601} __packed;
602
603enum { /* libipw_measurement_request.mode */
604 /* Bit 0 is reserved */
605 LIBIPW_MEASUREMENT_ENABLE = (1 << 1),
606 LIBIPW_MEASUREMENT_REQUEST = (1 << 2),
607 LIBIPW_MEASUREMENT_REPORT = (1 << 3),
608 /* Bits 4-7 are reserved */
609};
610
611enum {
612 LIBIPW_REPORT_BASIC = 0, /* required */
613 LIBIPW_REPORT_CCA = 1, /* optional */
614 LIBIPW_REPORT_RPI = 2, /* optional */
615 /* 3-255 reserved */
616};
617
618struct libipw_measurement_params {
619 u8 channel;
620 __le64 start_time;
621 __le16 duration;
622} __packed;
623
624struct libipw_measurement_request {
625 struct libipw_info_element ie;
626 u8 token;
627 u8 mode;
628 u8 type;
629 struct libipw_measurement_params params[0];
630} __packed;
631
632struct libipw_measurement_report {
633 struct libipw_info_element ie;
634 u8 token;
635 u8 mode;
636 u8 type;
637 union {
638 struct libipw_basic_report basic[0];
639 } u;
640} __packed;
641
642struct libipw_tpc_report { 587struct libipw_tpc_report {
643 u8 transmit_power; 588 u8 transmit_power;
644 u8 link_margin; 589 u8 link_margin;
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index c46275a92565..f2baf94f069c 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -2850,9 +2850,9 @@ void
2850il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags, 2850il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
2851 struct ieee80211_tx_info *info) 2851 struct ieee80211_tx_info *info)
2852{ 2852{
2853 struct ieee80211_tx_rate *r = &info->control.rates[0]; 2853 struct ieee80211_tx_rate *r = &info->status.rates[0];
2854 2854
2855 info->antenna_sel_tx = 2855 info->status.antenna =
2856 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); 2856 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
2857 if (rate_n_flags & RATE_MCS_HT_MSK) 2857 if (rate_n_flags & RATE_MCS_HT_MSK)
2858 r->flags |= IEEE80211_TX_RC_MCS; 2858 r->flags |= IEEE80211_TX_RC_MCS;
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index 11ab1247fae1..f3b8e91aa3dc 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -873,7 +873,7 @@ il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
873 tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI) || 873 tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI) ||
874 tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH) || 874 tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ||
875 tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA) || 875 tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA) ||
876 tbl_type.ant_type != info->antenna_sel_tx || 876 tbl_type.ant_type != info->status.antenna ||
877 !!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS) 877 !!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)
878 || !!(tx_rate & RATE_MCS_GF_MSK) != 878 || !!(tx_rate & RATE_MCS_GF_MSK) !=
879 !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD) || rs_idx != mac_idx) { 879 !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD) || rs_idx != mac_idx) {
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 2fe62730dddd..db6c6e528022 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -113,20 +113,21 @@ config IWLWIFI_DEVICE_TESTMODE
113 generic netlink message via NL80211_TESTMODE channel. 113 generic netlink message via NL80211_TESTMODE channel.
114 114
115config IWLWIFI_P2P 115config IWLWIFI_P2P
116 bool "iwlwifi experimental P2P support" 116 def_bool y
117 depends on IWLWIFI 117 bool "iwlwifi experimental P2P support"
118 help 118 depends on IWLWIFI
119 This option enables experimental P2P support for some devices 119 help
120 based on microcode support. Since P2P support is still under 120 This option enables experimental P2P support for some devices
121 development, this option may even enable it for some devices 121 based on microcode support. Since P2P support is still under
122 now that turn out to not support it in the future due to 122 development, this option may even enable it for some devices
123 microcode restrictions. 123 now that turn out to not support it in the future due to
124 microcode restrictions.
124 125
125 To determine if your microcode supports the experimental P2P 126 To determine if your microcode supports the experimental P2P
126 offered by this option, check if the driver advertises AP 127 offered by this option, check if the driver advertises AP
127 support when it is loaded. 128 support when it is loaded.
128 129
129 Say Y only if you want to experiment with P2P. 130 Say Y only if you want to experiment with P2P.
130 131
131config IWLWIFI_EXPERIMENTAL_MFP 132config IWLWIFI_EXPERIMENTAL_MFP
132 bool "support MFP (802.11w) even if uCode doesn't advertise" 133 bool "support MFP (802.11w) even if uCode doesn't advertise"
@@ -136,3 +137,11 @@ config IWLWIFI_EXPERIMENTAL_MFP
136 even if the microcode doesn't advertise it. 137 even if the microcode doesn't advertise it.
137 138
138 Say Y only if you want to experiment with MFP. 139 Say Y only if you want to experiment with MFP.
140
141config IWLWIFI_UCODE16
142 bool "support uCode 16.0"
143 depends on IWLWIFI
144 help
145 This option enables support for uCode version 16.0.
146
147 Say Y if you want to use 16.0 microcode.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 85d163ed3db1..406f297a9a56 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -5,9 +5,9 @@ iwlwifi-objs += iwl-ucode.o iwl-agn-tx.o iwl-debug.o
5iwlwifi-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o 5iwlwifi-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
6iwlwifi-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-rx.o 6iwlwifi-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-rx.o
7 7
8iwlwifi-objs += iwl-core.o iwl-eeprom.o iwl-power.o 8iwlwifi-objs += iwl-eeprom.o iwl-power.o
9iwlwifi-objs += iwl-scan.o iwl-led.o 9iwlwifi-objs += iwl-scan.o iwl-led.o
10iwlwifi-objs += iwl-agn-rxon.o 10iwlwifi-objs += iwl-agn-rxon.o iwl-agn-devices.o
11iwlwifi-objs += iwl-5000.o 11iwlwifi-objs += iwl-5000.o
12iwlwifi-objs += iwl-6000.o 12iwlwifi-objs += iwl-6000.o
13iwlwifi-objs += iwl-1000.o 13iwlwifi-objs += iwl-1000.o
@@ -17,6 +17,8 @@ iwlwifi-objs += iwl-drv.o
17iwlwifi-objs += iwl-notif-wait.o 17iwlwifi-objs += iwl-notif-wait.o
18iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o 18iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
19 19
20
21iwlwifi-$(CONFIG_IWLWIFI_UCODE16) += iwl-phy-db.o
20iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o 22iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
21iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o 23iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
22iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o 24iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 8d80e233bc7a..2629a6602dfa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -24,26 +24,12 @@
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
26 26
27#include <linux/kernel.h>
28#include <linux/module.h> 27#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/skbuff.h>
32#include <linux/netdevice.h>
33#include <net/mac80211.h>
34#include <linux/etherdevice.h>
35#include <asm/unaligned.h>
36#include <linux/stringify.h> 28#include <linux/stringify.h>
37 29#include "iwl-config.h"
38#include "iwl-eeprom.h"
39#include "iwl-dev.h"
40#include "iwl-core.h"
41#include "iwl-io.h"
42#include "iwl-agn.h"
43#include "iwl-agn-hw.h"
44#include "iwl-shared.h"
45#include "iwl-cfg.h" 30#include "iwl-cfg.h"
46#include "iwl-prph.h" 31#include "iwl-csr.h"
32#include "iwl-agn-hw.h"
47 33
48/* Highest firmware API version supported */ 34/* Highest firmware API version supported */
49#define IWL1000_UCODE_API_MAX 5 35#define IWL1000_UCODE_API_MAX 5
@@ -57,6 +43,10 @@
57#define IWL1000_UCODE_API_MIN 1 43#define IWL1000_UCODE_API_MIN 1
58#define IWL100_UCODE_API_MIN 5 44#define IWL100_UCODE_API_MIN 5
59 45
46/* EEPROM version */
47#define EEPROM_1000_TX_POWER_VERSION (4)
48#define EEPROM_1000_EEPROM_VERSION (0x15C)
49
60#define IWL1000_FW_PRE "iwlwifi-1000-" 50#define IWL1000_FW_PRE "iwlwifi-1000-"
61#define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE __stringify(api) ".ucode" 51#define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE __stringify(api) ".ucode"
62 52
@@ -64,100 +54,8 @@
64#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE __stringify(api) ".ucode" 54#define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE __stringify(api) ".ucode"
65 55
66 56
67/*
68 * For 1000, use advance thermal throttling critical temperature threshold,
69 * but legacy thermal management implementation for now.
70 * This is for the reason of 1000 uCode using advance thermal throttling API
71 * but not implement ct_kill_exit based on ct_kill exit temperature
72 * so the thermal throttling will still based on legacy thermal throttling
73 * management.
74 * The code here need to be modified once 1000 uCode has the advanced thermal
75 * throttling algorithm in place
76 */
77static void iwl1000_set_ct_threshold(struct iwl_priv *priv)
78{
79 /* want Celsius */
80 hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
81 hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
82}
83
84/* NIC configuration for 1000 series */
85static void iwl1000_nic_config(struct iwl_priv *priv)
86{
87 /* set CSR_HW_CONFIG_REG for uCode use */
88 iwl_set_bit(trans(priv), CSR_HW_IF_CONFIG_REG,
89 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
90 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
91
92 /* Setting digital SVR for 1000 card to 1.32V */
93 /* locking is acquired in iwl_set_bits_mask_prph() function */
94 iwl_set_bits_mask_prph(trans(priv), APMG_DIGITAL_SVR_REG,
95 APMG_SVR_DIGITAL_VOLTAGE_1_32,
96 ~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK);
97}
98
99static const struct iwl_sensitivity_ranges iwl1000_sensitivity = {
100 .min_nrg_cck = 95,
101 .auto_corr_min_ofdm = 90,
102 .auto_corr_min_ofdm_mrc = 170,
103 .auto_corr_min_ofdm_x1 = 120,
104 .auto_corr_min_ofdm_mrc_x1 = 240,
105
106 .auto_corr_max_ofdm = 120,
107 .auto_corr_max_ofdm_mrc = 210,
108 .auto_corr_max_ofdm_x1 = 155,
109 .auto_corr_max_ofdm_mrc_x1 = 290,
110
111 .auto_corr_min_cck = 125,
112 .auto_corr_max_cck = 200,
113 .auto_corr_min_cck_mrc = 170,
114 .auto_corr_max_cck_mrc = 400,
115 .nrg_th_cck = 95,
116 .nrg_th_ofdm = 95,
117
118 .barker_corr_th_min = 190,
119 .barker_corr_th_min_mrc = 390,
120 .nrg_th_cca = 62,
121};
122
123static void iwl1000_hw_set_hw_params(struct iwl_priv *priv)
124{
125 hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ);
126
127 hw_params(priv).tx_chains_num =
128 num_of_ant(hw_params(priv).valid_tx_ant);
129 if (cfg(priv)->rx_with_siso_diversity)
130 hw_params(priv).rx_chains_num = 1;
131 else
132 hw_params(priv).rx_chains_num =
133 num_of_ant(hw_params(priv).valid_rx_ant);
134
135 iwl1000_set_ct_threshold(priv);
136
137 /* Set initial sensitivity parameters */
138 hw_params(priv).sens = &iwl1000_sensitivity;
139}
140
141static struct iwl_lib_ops iwl1000_lib = {
142 .set_hw_params = iwl1000_hw_set_hw_params,
143 .nic_config = iwl1000_nic_config,
144 .eeprom_ops = {
145 .regulatory_bands = {
146 EEPROM_REG_BAND_1_CHANNELS,
147 EEPROM_REG_BAND_2_CHANNELS,
148 EEPROM_REG_BAND_3_CHANNELS,
149 EEPROM_REG_BAND_4_CHANNELS,
150 EEPROM_REG_BAND_5_CHANNELS,
151 EEPROM_REG_BAND_24_HT40_CHANNELS,
152 EEPROM_REGULATORY_BAND_NO_HT40,
153 },
154 },
155 .temperature = iwlagn_temperature,
156};
157
158static const struct iwl_base_params iwl1000_base_params = { 57static const struct iwl_base_params iwl1000_base_params = {
159 .num_of_queues = IWLAGN_NUM_QUEUES, 58 .num_of_queues = IWLAGN_NUM_QUEUES,
160 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
161 .eeprom_size = OTP_LOW_IMAGE_SIZE, 59 .eeprom_size = OTP_LOW_IMAGE_SIZE,
162 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 60 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
163 .max_ll_items = OTP_MAX_LL_ITEMS_1000, 61 .max_ll_items = OTP_MAX_LL_ITEMS_1000,
@@ -166,15 +64,13 @@ static const struct iwl_base_params iwl1000_base_params = {
166 .support_ct_kill_exit = true, 64 .support_ct_kill_exit = true,
167 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, 65 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
168 .chain_noise_scale = 1000, 66 .chain_noise_scale = 1000,
169 .wd_timeout = IWL_DEF_WD_TIMEOUT, 67 .wd_timeout = IWL_WATCHHDOG_DISABLED,
170 .max_event_log_size = 128, 68 .max_event_log_size = 128,
171 .wd_disable = true,
172}; 69};
173 70
174static const struct iwl_ht_params iwl1000_ht_params = { 71static const struct iwl_ht_params iwl1000_ht_params = {
175 .ht_greenfield_support = true, 72 .ht_greenfield_support = true,
176 .use_rts_for_aggregation = true, /* use rts/cts protection */ 73 .use_rts_for_aggregation = true, /* use rts/cts protection */
177 .smps_mode = IEEE80211_SMPS_DYNAMIC,
178}; 74};
179 75
180#define IWL_DEVICE_1000 \ 76#define IWL_DEVICE_1000 \
@@ -182,11 +78,11 @@ static const struct iwl_ht_params iwl1000_ht_params = {
182 .ucode_api_max = IWL1000_UCODE_API_MAX, \ 78 .ucode_api_max = IWL1000_UCODE_API_MAX, \
183 .ucode_api_ok = IWL1000_UCODE_API_OK, \ 79 .ucode_api_ok = IWL1000_UCODE_API_OK, \
184 .ucode_api_min = IWL1000_UCODE_API_MIN, \ 80 .ucode_api_min = IWL1000_UCODE_API_MIN, \
81 .device_family = IWL_DEVICE_FAMILY_1000, \
185 .max_inst_size = IWLAGN_RTC_INST_SIZE, \ 82 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
186 .max_data_size = IWLAGN_RTC_DATA_SIZE, \ 83 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
187 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \ 84 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
188 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ 85 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
189 .lib = &iwl1000_lib, \
190 .base_params = &iwl1000_base_params, \ 86 .base_params = &iwl1000_base_params, \
191 .led_mode = IWL_LED_BLINK 87 .led_mode = IWL_LED_BLINK
192 88
@@ -206,11 +102,11 @@ const struct iwl_cfg iwl1000_bg_cfg = {
206 .ucode_api_max = IWL100_UCODE_API_MAX, \ 102 .ucode_api_max = IWL100_UCODE_API_MAX, \
207 .ucode_api_ok = IWL100_UCODE_API_OK, \ 103 .ucode_api_ok = IWL100_UCODE_API_OK, \
208 .ucode_api_min = IWL100_UCODE_API_MIN, \ 104 .ucode_api_min = IWL100_UCODE_API_MIN, \
105 .device_family = IWL_DEVICE_FAMILY_100, \
209 .max_inst_size = IWLAGN_RTC_INST_SIZE, \ 106 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
210 .max_data_size = IWLAGN_RTC_DATA_SIZE, \ 107 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
211 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \ 108 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
212 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ 109 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
213 .lib = &iwl1000_lib, \
214 .base_params = &iwl1000_base_params, \ 110 .base_params = &iwl1000_base_params, \
215 .led_mode = IWL_LED_RF_STATE, \ 111 .led_mode = IWL_LED_RF_STATE, \
216 .rx_with_siso_diversity = true 112 .rx_with_siso_diversity = true
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index ea108622e0bd..7f793417c787 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -24,25 +24,12 @@
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
26 26
27#include <linux/kernel.h>
28#include <linux/module.h> 27#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/skbuff.h>
32#include <linux/netdevice.h>
33#include <net/mac80211.h>
34#include <linux/etherdevice.h>
35#include <asm/unaligned.h>
36#include <linux/stringify.h> 28#include <linux/stringify.h>
37 29#include "iwl-config.h"
38#include "iwl-eeprom.h"
39#include "iwl-dev.h"
40#include "iwl-core.h"
41#include "iwl-io.h"
42#include "iwl-agn.h"
43#include "iwl-agn-hw.h"
44#include "iwl-shared.h"
45#include "iwl-cfg.h" 30#include "iwl-cfg.h"
31#include "iwl-agn-hw.h"
32#include "iwl-commands.h" /* needed for BT for now */
46 33
47/* Highest firmware API version supported */ 34/* Highest firmware API version supported */
48#define IWL2030_UCODE_API_MAX 6 35#define IWL2030_UCODE_API_MAX 6
@@ -62,6 +49,11 @@
62#define IWL105_UCODE_API_MIN 5 49#define IWL105_UCODE_API_MIN 5
63#define IWL135_UCODE_API_MIN 5 50#define IWL135_UCODE_API_MIN 5
64 51
52/* EEPROM version */
53#define EEPROM_2000_TX_POWER_VERSION (6)
54#define EEPROM_2000_EEPROM_VERSION (0x805)
55
56
65#define IWL2030_FW_PRE "iwlwifi-2030-" 57#define IWL2030_FW_PRE "iwlwifi-2030-"
66#define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE __stringify(api) ".ucode" 58#define IWL2030_MODULE_FIRMWARE(api) IWL2030_FW_PRE __stringify(api) ".ucode"
67 59
@@ -74,105 +66,9 @@
74#define IWL135_FW_PRE "iwlwifi-135-" 66#define IWL135_FW_PRE "iwlwifi-135-"
75#define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE __stringify(api) ".ucode" 67#define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE __stringify(api) ".ucode"
76 68
77static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
78{
79 /* want Celsius */
80 hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD;
81 hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
82}
83
84/* NIC configuration for 2000 series */
85static void iwl2000_nic_config(struct iwl_priv *priv)
86{
87 iwl_rf_config(priv);
88
89 if (cfg(priv)->iq_invert)
90 iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
91 CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
92}
93
94static const struct iwl_sensitivity_ranges iwl2000_sensitivity = {
95 .min_nrg_cck = 97,
96 .auto_corr_min_ofdm = 80,
97 .auto_corr_min_ofdm_mrc = 128,
98 .auto_corr_min_ofdm_x1 = 105,
99 .auto_corr_min_ofdm_mrc_x1 = 192,
100
101 .auto_corr_max_ofdm = 145,
102 .auto_corr_max_ofdm_mrc = 232,
103 .auto_corr_max_ofdm_x1 = 110,
104 .auto_corr_max_ofdm_mrc_x1 = 232,
105
106 .auto_corr_min_cck = 125,
107 .auto_corr_max_cck = 175,
108 .auto_corr_min_cck_mrc = 160,
109 .auto_corr_max_cck_mrc = 310,
110 .nrg_th_cck = 97,
111 .nrg_th_ofdm = 100,
112
113 .barker_corr_th_min = 190,
114 .barker_corr_th_min_mrc = 390,
115 .nrg_th_cca = 62,
116};
117
118static void iwl2000_hw_set_hw_params(struct iwl_priv *priv)
119{
120 hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ);
121
122 hw_params(priv).tx_chains_num =
123 num_of_ant(hw_params(priv).valid_tx_ant);
124 if (cfg(priv)->rx_with_siso_diversity)
125 hw_params(priv).rx_chains_num = 1;
126 else
127 hw_params(priv).rx_chains_num =
128 num_of_ant(hw_params(priv).valid_rx_ant);
129
130 iwl2000_set_ct_threshold(priv);
131
132 /* Set initial sensitivity parameters */
133 hw_params(priv).sens = &iwl2000_sensitivity;
134}
135
136static struct iwl_lib_ops iwl2000_lib = {
137 .set_hw_params = iwl2000_hw_set_hw_params,
138 .nic_config = iwl2000_nic_config,
139 .eeprom_ops = {
140 .regulatory_bands = {
141 EEPROM_REG_BAND_1_CHANNELS,
142 EEPROM_REG_BAND_2_CHANNELS,
143 EEPROM_REG_BAND_3_CHANNELS,
144 EEPROM_REG_BAND_4_CHANNELS,
145 EEPROM_REG_BAND_5_CHANNELS,
146 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
147 EEPROM_REGULATORY_BAND_NO_HT40,
148 },
149 .enhanced_txpower = true,
150 },
151 .temperature = iwlagn_temperature,
152};
153
154static struct iwl_lib_ops iwl2030_lib = {
155 .set_hw_params = iwl2000_hw_set_hw_params,
156 .nic_config = iwl2000_nic_config,
157 .eeprom_ops = {
158 .regulatory_bands = {
159 EEPROM_REG_BAND_1_CHANNELS,
160 EEPROM_REG_BAND_2_CHANNELS,
161 EEPROM_REG_BAND_3_CHANNELS,
162 EEPROM_REG_BAND_4_CHANNELS,
163 EEPROM_REG_BAND_5_CHANNELS,
164 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
165 EEPROM_REGULATORY_BAND_NO_HT40,
166 },
167 .enhanced_txpower = true,
168 },
169 .temperature = iwlagn_temperature,
170};
171
172static const struct iwl_base_params iwl2000_base_params = { 69static const struct iwl_base_params iwl2000_base_params = {
173 .eeprom_size = OTP_LOW_IMAGE_SIZE, 70 .eeprom_size = OTP_LOW_IMAGE_SIZE,
174 .num_of_queues = IWLAGN_NUM_QUEUES, 71 .num_of_queues = IWLAGN_NUM_QUEUES,
175 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
176 .pll_cfg_val = 0, 72 .pll_cfg_val = 0,
177 .max_ll_items = OTP_MAX_LL_ITEMS_2x00, 73 .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
178 .shadow_ram_support = true, 74 .shadow_ram_support = true,
@@ -191,7 +87,6 @@ static const struct iwl_base_params iwl2000_base_params = {
191static const struct iwl_base_params iwl2030_base_params = { 87static const struct iwl_base_params iwl2030_base_params = {
192 .eeprom_size = OTP_LOW_IMAGE_SIZE, 88 .eeprom_size = OTP_LOW_IMAGE_SIZE,
193 .num_of_queues = IWLAGN_NUM_QUEUES, 89 .num_of_queues = IWLAGN_NUM_QUEUES,
194 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
195 .pll_cfg_val = 0, 90 .pll_cfg_val = 0,
196 .max_ll_items = OTP_MAX_LL_ITEMS_2x00, 91 .max_ll_items = OTP_MAX_LL_ITEMS_2x00,
197 .shadow_ram_support = true, 92 .shadow_ram_support = true,
@@ -226,16 +121,15 @@ static const struct iwl_bt_params iwl2030_bt_params = {
226 .ucode_api_max = IWL2000_UCODE_API_MAX, \ 121 .ucode_api_max = IWL2000_UCODE_API_MAX, \
227 .ucode_api_ok = IWL2000_UCODE_API_OK, \ 122 .ucode_api_ok = IWL2000_UCODE_API_OK, \
228 .ucode_api_min = IWL2000_UCODE_API_MIN, \ 123 .ucode_api_min = IWL2000_UCODE_API_MIN, \
124 .device_family = IWL_DEVICE_FAMILY_2000, \
229 .max_inst_size = IWL60_RTC_INST_SIZE, \ 125 .max_inst_size = IWL60_RTC_INST_SIZE, \
230 .max_data_size = IWL60_RTC_DATA_SIZE, \ 126 .max_data_size = IWL60_RTC_DATA_SIZE, \
231 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ 127 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
232 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 128 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
233 .lib = &iwl2000_lib, \
234 .base_params = &iwl2000_base_params, \ 129 .base_params = &iwl2000_base_params, \
235 .need_temp_offset_calib = true, \ 130 .need_temp_offset_calib = true, \
236 .temp_offset_v2 = true, \ 131 .temp_offset_v2 = true, \
237 .led_mode = IWL_LED_RF_STATE, \ 132 .led_mode = IWL_LED_RF_STATE
238 .iq_invert = true \
239 133
240const struct iwl_cfg iwl2000_2bgn_cfg = { 134const struct iwl_cfg iwl2000_2bgn_cfg = {
241 .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN", 135 .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
@@ -254,18 +148,17 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
254 .ucode_api_max = IWL2030_UCODE_API_MAX, \ 148 .ucode_api_max = IWL2030_UCODE_API_MAX, \
255 .ucode_api_ok = IWL2030_UCODE_API_OK, \ 149 .ucode_api_ok = IWL2030_UCODE_API_OK, \
256 .ucode_api_min = IWL2030_UCODE_API_MIN, \ 150 .ucode_api_min = IWL2030_UCODE_API_MIN, \
151 .device_family = IWL_DEVICE_FAMILY_2030, \
257 .max_inst_size = IWL60_RTC_INST_SIZE, \ 152 .max_inst_size = IWL60_RTC_INST_SIZE, \
258 .max_data_size = IWL60_RTC_DATA_SIZE, \ 153 .max_data_size = IWL60_RTC_DATA_SIZE, \
259 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ 154 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
260 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 155 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
261 .lib = &iwl2030_lib, \
262 .base_params = &iwl2030_base_params, \ 156 .base_params = &iwl2030_base_params, \
263 .bt_params = &iwl2030_bt_params, \ 157 .bt_params = &iwl2030_bt_params, \
264 .need_temp_offset_calib = true, \ 158 .need_temp_offset_calib = true, \
265 .temp_offset_v2 = true, \ 159 .temp_offset_v2 = true, \
266 .led_mode = IWL_LED_RF_STATE, \ 160 .led_mode = IWL_LED_RF_STATE, \
267 .adv_pm = true, \ 161 .adv_pm = true
268 .iq_invert = true \
269 162
270const struct iwl_cfg iwl2030_2bgn_cfg = { 163const struct iwl_cfg iwl2030_2bgn_cfg = {
271 .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN", 164 .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
@@ -278,18 +171,17 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
278 .ucode_api_max = IWL105_UCODE_API_MAX, \ 171 .ucode_api_max = IWL105_UCODE_API_MAX, \
279 .ucode_api_ok = IWL105_UCODE_API_OK, \ 172 .ucode_api_ok = IWL105_UCODE_API_OK, \
280 .ucode_api_min = IWL105_UCODE_API_MIN, \ 173 .ucode_api_min = IWL105_UCODE_API_MIN, \
174 .device_family = IWL_DEVICE_FAMILY_105, \
281 .max_inst_size = IWL60_RTC_INST_SIZE, \ 175 .max_inst_size = IWL60_RTC_INST_SIZE, \
282 .max_data_size = IWL60_RTC_DATA_SIZE, \ 176 .max_data_size = IWL60_RTC_DATA_SIZE, \
283 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ 177 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
284 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 178 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
285 .lib = &iwl2000_lib, \
286 .base_params = &iwl2000_base_params, \ 179 .base_params = &iwl2000_base_params, \
287 .need_temp_offset_calib = true, \ 180 .need_temp_offset_calib = true, \
288 .temp_offset_v2 = true, \ 181 .temp_offset_v2 = true, \
289 .led_mode = IWL_LED_RF_STATE, \ 182 .led_mode = IWL_LED_RF_STATE, \
290 .adv_pm = true, \ 183 .adv_pm = true, \
291 .rx_with_siso_diversity = true, \ 184 .rx_with_siso_diversity = true
292 .iq_invert = true \
293 185
294const struct iwl_cfg iwl105_bgn_cfg = { 186const struct iwl_cfg iwl105_bgn_cfg = {
295 .name = "Intel(R) Centrino(R) Wireless-N 105 BGN", 187 .name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
@@ -308,19 +200,18 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
308 .ucode_api_max = IWL135_UCODE_API_MAX, \ 200 .ucode_api_max = IWL135_UCODE_API_MAX, \
309 .ucode_api_ok = IWL135_UCODE_API_OK, \ 201 .ucode_api_ok = IWL135_UCODE_API_OK, \
310 .ucode_api_min = IWL135_UCODE_API_MIN, \ 202 .ucode_api_min = IWL135_UCODE_API_MIN, \
203 .device_family = IWL_DEVICE_FAMILY_135, \
311 .max_inst_size = IWL60_RTC_INST_SIZE, \ 204 .max_inst_size = IWL60_RTC_INST_SIZE, \
312 .max_data_size = IWL60_RTC_DATA_SIZE, \ 205 .max_data_size = IWL60_RTC_DATA_SIZE, \
313 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \ 206 .eeprom_ver = EEPROM_2000_EEPROM_VERSION, \
314 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \ 207 .eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
315 .lib = &iwl2030_lib, \
316 .base_params = &iwl2030_base_params, \ 208 .base_params = &iwl2030_base_params, \
317 .bt_params = &iwl2030_bt_params, \ 209 .bt_params = &iwl2030_bt_params, \
318 .need_temp_offset_calib = true, \ 210 .need_temp_offset_calib = true, \
319 .temp_offset_v2 = true, \ 211 .temp_offset_v2 = true, \
320 .led_mode = IWL_LED_RF_STATE, \ 212 .led_mode = IWL_LED_RF_STATE, \
321 .adv_pm = true, \ 213 .adv_pm = true, \
322 .rx_with_siso_diversity = true, \ 214 .rx_with_siso_diversity = true
323 .iq_invert = true \
324 215
325const struct iwl_cfg iwl135_bgn_cfg = { 216const struct iwl_cfg iwl135_bgn_cfg = {
326 .name = "Intel(R) Centrino(R) Wireless-N 135 BGN", 217 .name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index de0920c74cdd..8e26bc825f23 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -24,28 +24,12 @@
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
26 26
27#include <linux/kernel.h>
28#include <linux/module.h> 27#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/sched.h>
32#include <linux/skbuff.h>
33#include <linux/netdevice.h>
34#include <net/mac80211.h>
35#include <linux/etherdevice.h>
36#include <asm/unaligned.h>
37#include <linux/stringify.h> 28#include <linux/stringify.h>
38 29#include "iwl-config.h"
39#include "iwl-eeprom.h"
40#include "iwl-dev.h"
41#include "iwl-core.h"
42#include "iwl-io.h"
43#include "iwl-agn.h"
44#include "iwl-agn-hw.h"
45#include "iwl-trans.h"
46#include "iwl-shared.h"
47#include "iwl-cfg.h" 30#include "iwl-cfg.h"
48#include "iwl-prph.h" 31#include "iwl-agn-hw.h"
32#include "iwl-csr.h"
49 33
50/* Highest firmware API version supported */ 34/* Highest firmware API version supported */
51#define IWL5000_UCODE_API_MAX 5 35#define IWL5000_UCODE_API_MAX 5
@@ -59,268 +43,28 @@
59#define IWL5000_UCODE_API_MIN 1 43#define IWL5000_UCODE_API_MIN 1
60#define IWL5150_UCODE_API_MIN 1 44#define IWL5150_UCODE_API_MIN 1
61 45
46/* EEPROM versions */
47#define EEPROM_5000_TX_POWER_VERSION (4)
48#define EEPROM_5000_EEPROM_VERSION (0x11A)
49#define EEPROM_5050_TX_POWER_VERSION (4)
50#define EEPROM_5050_EEPROM_VERSION (0x21E)
51
62#define IWL5000_FW_PRE "iwlwifi-5000-" 52#define IWL5000_FW_PRE "iwlwifi-5000-"
63#define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE __stringify(api) ".ucode" 53#define IWL5000_MODULE_FIRMWARE(api) IWL5000_FW_PRE __stringify(api) ".ucode"
64 54
65#define IWL5150_FW_PRE "iwlwifi-5150-" 55#define IWL5150_FW_PRE "iwlwifi-5150-"
66#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE __stringify(api) ".ucode" 56#define IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE __stringify(api) ".ucode"
67 57
68/* NIC configuration for 5000 series */
69static void iwl5000_nic_config(struct iwl_priv *priv)
70{
71 iwl_rf_config(priv);
72
73 /* W/A : NIC is stuck in a reset state after Early PCIe power off
74 * (PCIe power is lost before PERST# is asserted),
75 * causing ME FW to lose ownership and not being able to obtain it back.
76 */
77 iwl_set_bits_mask_prph(trans(priv), APMG_PS_CTRL_REG,
78 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
79 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
80}
81
82static const struct iwl_sensitivity_ranges iwl5000_sensitivity = {
83 .min_nrg_cck = 100,
84 .auto_corr_min_ofdm = 90,
85 .auto_corr_min_ofdm_mrc = 170,
86 .auto_corr_min_ofdm_x1 = 105,
87 .auto_corr_min_ofdm_mrc_x1 = 220,
88
89 .auto_corr_max_ofdm = 120,
90 .auto_corr_max_ofdm_mrc = 210,
91 .auto_corr_max_ofdm_x1 = 120,
92 .auto_corr_max_ofdm_mrc_x1 = 240,
93
94 .auto_corr_min_cck = 125,
95 .auto_corr_max_cck = 200,
96 .auto_corr_min_cck_mrc = 200,
97 .auto_corr_max_cck_mrc = 400,
98 .nrg_th_cck = 100,
99 .nrg_th_ofdm = 100,
100
101 .barker_corr_th_min = 190,
102 .barker_corr_th_min_mrc = 390,
103 .nrg_th_cca = 62,
104};
105
106static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
107 .min_nrg_cck = 95,
108 .auto_corr_min_ofdm = 90,
109 .auto_corr_min_ofdm_mrc = 170,
110 .auto_corr_min_ofdm_x1 = 105,
111 .auto_corr_min_ofdm_mrc_x1 = 220,
112
113 .auto_corr_max_ofdm = 120,
114 .auto_corr_max_ofdm_mrc = 210,
115 /* max = min for performance bug in 5150 DSP */
116 .auto_corr_max_ofdm_x1 = 105,
117 .auto_corr_max_ofdm_mrc_x1 = 220,
118
119 .auto_corr_min_cck = 125,
120 .auto_corr_max_cck = 200,
121 .auto_corr_min_cck_mrc = 170,
122 .auto_corr_max_cck_mrc = 400,
123 .nrg_th_cck = 95,
124 .nrg_th_ofdm = 95,
125
126 .barker_corr_th_min = 190,
127 .barker_corr_th_min_mrc = 390,
128 .nrg_th_cca = 62,
129};
130
131#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5)
132
133static s32 iwl_temp_calib_to_offset(struct iwl_shared *shrd)
134{
135 u16 temperature, voltage;
136 __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(shrd,
137 EEPROM_KELVIN_TEMPERATURE);
138
139 temperature = le16_to_cpu(temp_calib[0]);
140 voltage = le16_to_cpu(temp_calib[1]);
141
142 /* offset = temp - volt / coeff */
143 return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
144}
145
146static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
147{
148 const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
149 s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) -
150 iwl_temp_calib_to_offset(priv->shrd);
151
152 hw_params(priv).ct_kill_threshold = threshold * volt2temp_coef;
153}
154
155static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
156{
157 /* want Celsius */
158 hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
159}
160
161static void iwl5000_hw_set_hw_params(struct iwl_priv *priv)
162{
163 hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
164 BIT(IEEE80211_BAND_5GHZ);
165
166 hw_params(priv).tx_chains_num =
167 num_of_ant(hw_params(priv).valid_tx_ant);
168 hw_params(priv).rx_chains_num =
169 num_of_ant(hw_params(priv).valid_rx_ant);
170
171 iwl5000_set_ct_threshold(priv);
172
173 /* Set initial sensitivity parameters */
174 hw_params(priv).sens = &iwl5000_sensitivity;
175}
176
177static void iwl5150_hw_set_hw_params(struct iwl_priv *priv)
178{
179 hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
180 BIT(IEEE80211_BAND_5GHZ);
181
182 hw_params(priv).tx_chains_num =
183 num_of_ant(hw_params(priv).valid_tx_ant);
184 hw_params(priv).rx_chains_num =
185 num_of_ant(hw_params(priv).valid_rx_ant);
186
187 iwl5150_set_ct_threshold(priv);
188
189 /* Set initial sensitivity parameters */
190 hw_params(priv).sens = &iwl5150_sensitivity;
191}
192
193static void iwl5150_temperature(struct iwl_priv *priv)
194{
195 u32 vt = 0;
196 s32 offset = iwl_temp_calib_to_offset(priv->shrd);
197
198 vt = le32_to_cpu(priv->statistics.common.temperature);
199 vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset;
200 /* now vt hold the temperature in Kelvin */
201 priv->temperature = KELVIN_TO_CELSIUS(vt);
202 iwl_tt_handler(priv);
203}
204
205static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
206 struct ieee80211_channel_switch *ch_switch)
207{
208 /*
209 * MULTI-FIXME
210 * See iwlagn_mac_channel_switch.
211 */
212 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
213 struct iwl5000_channel_switch_cmd cmd;
214 const struct iwl_channel_info *ch_info;
215 u32 switch_time_in_usec, ucode_switch_time;
216 u16 ch;
217 u32 tsf_low;
218 u8 switch_count;
219 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
220 struct ieee80211_vif *vif = ctx->vif;
221 struct iwl_host_cmd hcmd = {
222 .id = REPLY_CHANNEL_SWITCH,
223 .len = { sizeof(cmd), },
224 .flags = CMD_SYNC,
225 .data = { &cmd, },
226 };
227
228 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
229 ch = ch_switch->channel->hw_value;
230 IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
231 ctx->active.channel, ch);
232 cmd.channel = cpu_to_le16(ch);
233 cmd.rxon_flags = ctx->staging.flags;
234 cmd.rxon_filter_flags = ctx->staging.filter_flags;
235 switch_count = ch_switch->count;
236 tsf_low = ch_switch->timestamp & 0x0ffffffff;
237 /*
238 * calculate the ucode channel switch time
239 * adding TSF as one of the factor for when to switch
240 */
241 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
242 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
243 beacon_interval)) {
244 switch_count -= (priv->ucode_beacon_time -
245 tsf_low) / beacon_interval;
246 } else
247 switch_count = 0;
248 }
249 if (switch_count <= 1)
250 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
251 else {
252 switch_time_in_usec =
253 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
254 ucode_switch_time = iwl_usecs_to_beacons(priv,
255 switch_time_in_usec,
256 beacon_interval);
257 cmd.switch_time = iwl_add_beacon_time(priv,
258 priv->ucode_beacon_time,
259 ucode_switch_time,
260 beacon_interval);
261 }
262 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
263 cmd.switch_time);
264 ch_info = iwl_get_channel_info(priv, priv->band, ch);
265 if (ch_info)
266 cmd.expect_beacon = is_channel_radar(ch_info);
267 else {
268 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
269 ctx->active.channel, ch);
270 return -EFAULT;
271 }
272
273 return iwl_dvm_send_cmd(priv, &hcmd);
274}
275
276static struct iwl_lib_ops iwl5000_lib = {
277 .set_hw_params = iwl5000_hw_set_hw_params,
278 .set_channel_switch = iwl5000_hw_channel_switch,
279 .nic_config = iwl5000_nic_config,
280 .eeprom_ops = {
281 .regulatory_bands = {
282 EEPROM_REG_BAND_1_CHANNELS,
283 EEPROM_REG_BAND_2_CHANNELS,
284 EEPROM_REG_BAND_3_CHANNELS,
285 EEPROM_REG_BAND_4_CHANNELS,
286 EEPROM_REG_BAND_5_CHANNELS,
287 EEPROM_REG_BAND_24_HT40_CHANNELS,
288 EEPROM_REG_BAND_52_HT40_CHANNELS
289 },
290 },
291 .temperature = iwlagn_temperature,
292};
293
294static struct iwl_lib_ops iwl5150_lib = {
295 .set_hw_params = iwl5150_hw_set_hw_params,
296 .set_channel_switch = iwl5000_hw_channel_switch,
297 .nic_config = iwl5000_nic_config,
298 .eeprom_ops = {
299 .regulatory_bands = {
300 EEPROM_REG_BAND_1_CHANNELS,
301 EEPROM_REG_BAND_2_CHANNELS,
302 EEPROM_REG_BAND_3_CHANNELS,
303 EEPROM_REG_BAND_4_CHANNELS,
304 EEPROM_REG_BAND_5_CHANNELS,
305 EEPROM_REG_BAND_24_HT40_CHANNELS,
306 EEPROM_REG_BAND_52_HT40_CHANNELS
307 },
308 },
309 .temperature = iwl5150_temperature,
310};
311
312static const struct iwl_base_params iwl5000_base_params = { 58static const struct iwl_base_params iwl5000_base_params = {
313 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, 59 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
314 .num_of_queues = IWLAGN_NUM_QUEUES, 60 .num_of_queues = IWLAGN_NUM_QUEUES,
315 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
316 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 61 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
317 .led_compensation = 51, 62 .led_compensation = 51,
318 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 63 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
319 .chain_noise_scale = 1000, 64 .chain_noise_scale = 1000,
320 .wd_timeout = IWL_LONG_WD_TIMEOUT, 65 .wd_timeout = IWL_WATCHHDOG_DISABLED,
321 .max_event_log_size = 512, 66 .max_event_log_size = 512,
322 .no_idle_support = true, 67 .no_idle_support = true,
323 .wd_disable = true,
324}; 68};
325 69
326static const struct iwl_ht_params iwl5000_ht_params = { 70static const struct iwl_ht_params iwl5000_ht_params = {
@@ -332,11 +76,11 @@ static const struct iwl_ht_params iwl5000_ht_params = {
332 .ucode_api_max = IWL5000_UCODE_API_MAX, \ 76 .ucode_api_max = IWL5000_UCODE_API_MAX, \
333 .ucode_api_ok = IWL5000_UCODE_API_OK, \ 77 .ucode_api_ok = IWL5000_UCODE_API_OK, \
334 .ucode_api_min = IWL5000_UCODE_API_MIN, \ 78 .ucode_api_min = IWL5000_UCODE_API_MIN, \
79 .device_family = IWL_DEVICE_FAMILY_5000, \
335 .max_inst_size = IWLAGN_RTC_INST_SIZE, \ 80 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
336 .max_data_size = IWLAGN_RTC_DATA_SIZE, \ 81 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
337 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, \ 82 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, \
338 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \ 83 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
339 .lib = &iwl5000_lib, \
340 .base_params = &iwl5000_base_params, \ 84 .base_params = &iwl5000_base_params, \
341 .led_mode = IWL_LED_BLINK 85 .led_mode = IWL_LED_BLINK
342 86
@@ -378,11 +122,11 @@ const struct iwl_cfg iwl5350_agn_cfg = {
378 .ucode_api_max = IWL5000_UCODE_API_MAX, 122 .ucode_api_max = IWL5000_UCODE_API_MAX,
379 .ucode_api_ok = IWL5000_UCODE_API_OK, 123 .ucode_api_ok = IWL5000_UCODE_API_OK,
380 .ucode_api_min = IWL5000_UCODE_API_MIN, 124 .ucode_api_min = IWL5000_UCODE_API_MIN,
125 .device_family = IWL_DEVICE_FAMILY_5000,
381 .max_inst_size = IWLAGN_RTC_INST_SIZE, 126 .max_inst_size = IWLAGN_RTC_INST_SIZE,
382 .max_data_size = IWLAGN_RTC_DATA_SIZE, 127 .max_data_size = IWLAGN_RTC_DATA_SIZE,
383 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 128 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
384 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 129 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
385 .lib = &iwl5000_lib,
386 .base_params = &iwl5000_base_params, 130 .base_params = &iwl5000_base_params,
387 .ht_params = &iwl5000_ht_params, 131 .ht_params = &iwl5000_ht_params,
388 .led_mode = IWL_LED_BLINK, 132 .led_mode = IWL_LED_BLINK,
@@ -394,11 +138,11 @@ const struct iwl_cfg iwl5350_agn_cfg = {
394 .ucode_api_max = IWL5150_UCODE_API_MAX, \ 138 .ucode_api_max = IWL5150_UCODE_API_MAX, \
395 .ucode_api_ok = IWL5150_UCODE_API_OK, \ 139 .ucode_api_ok = IWL5150_UCODE_API_OK, \
396 .ucode_api_min = IWL5150_UCODE_API_MIN, \ 140 .ucode_api_min = IWL5150_UCODE_API_MIN, \
141 .device_family = IWL_DEVICE_FAMILY_5150, \
397 .max_inst_size = IWLAGN_RTC_INST_SIZE, \ 142 .max_inst_size = IWLAGN_RTC_INST_SIZE, \
398 .max_data_size = IWLAGN_RTC_DATA_SIZE, \ 143 .max_data_size = IWLAGN_RTC_DATA_SIZE, \
399 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, \ 144 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, \
400 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \ 145 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
401 .lib = &iwl5150_lib, \
402 .base_params = &iwl5000_base_params, \ 146 .base_params = &iwl5000_base_params, \
403 .no_xtal_calib = true, \ 147 .no_xtal_calib = true, \
404 .led_mode = IWL_LED_BLINK, \ 148 .led_mode = IWL_LED_BLINK, \
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index f0c91505a7f7..381b02cf339c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -24,26 +24,12 @@
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
26 26
27#include <linux/kernel.h>
28#include <linux/module.h> 27#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/skbuff.h>
32#include <linux/netdevice.h>
33#include <net/mac80211.h>
34#include <linux/etherdevice.h>
35#include <asm/unaligned.h>
36#include <linux/stringify.h> 28#include <linux/stringify.h>
37 29#include "iwl-config.h"
38#include "iwl-eeprom.h"
39#include "iwl-dev.h"
40#include "iwl-core.h"
41#include "iwl-io.h"
42#include "iwl-agn.h"
43#include "iwl-agn-hw.h"
44#include "iwl-trans.h"
45#include "iwl-shared.h"
46#include "iwl-cfg.h" 30#include "iwl-cfg.h"
31#include "iwl-agn-hw.h"
32#include "iwl-commands.h" /* needed for BT for now */
47 33
48/* Highest firmware API version supported */ 34/* Highest firmware API version supported */
49#define IWL6000_UCODE_API_MAX 6 35#define IWL6000_UCODE_API_MAX 6
@@ -61,6 +47,20 @@
61#define IWL6050_UCODE_API_MIN 4 47#define IWL6050_UCODE_API_MIN 4
62#define IWL6000G2_UCODE_API_MIN 4 48#define IWL6000G2_UCODE_API_MIN 4
63 49
50/* EEPROM versions */
51#define EEPROM_6000_TX_POWER_VERSION (4)
52#define EEPROM_6000_EEPROM_VERSION (0x423)
53#define EEPROM_6050_TX_POWER_VERSION (4)
54#define EEPROM_6050_EEPROM_VERSION (0x532)
55#define EEPROM_6150_TX_POWER_VERSION (6)
56#define EEPROM_6150_EEPROM_VERSION (0x553)
57#define EEPROM_6005_TX_POWER_VERSION (6)
58#define EEPROM_6005_EEPROM_VERSION (0x709)
59#define EEPROM_6030_TX_POWER_VERSION (6)
60#define EEPROM_6030_EEPROM_VERSION (0x709)
61#define EEPROM_6035_TX_POWER_VERSION (6)
62#define EEPROM_6035_EEPROM_VERSION (0x753)
63
64#define IWL6000_FW_PRE "iwlwifi-6000-" 64#define IWL6000_FW_PRE "iwlwifi-6000-"
65#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE __stringify(api) ".ucode" 65#define IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE __stringify(api) ".ucode"
66 66
@@ -73,205 +73,9 @@
73#define IWL6030_FW_PRE "iwlwifi-6000g2b-" 73#define IWL6030_FW_PRE "iwlwifi-6000g2b-"
74#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE __stringify(api) ".ucode" 74#define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE __stringify(api) ".ucode"
75 75
76static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
77{
78 /* want Celsius */
79 hw_params(priv).ct_kill_threshold = CT_KILL_THRESHOLD;
80 hw_params(priv).ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
81}
82
83static void iwl6050_additional_nic_config(struct iwl_priv *priv)
84{
85 /* Indicate calibration version to uCode. */
86 if (iwl_eeprom_calib_version(priv->shrd) >= 6)
87 iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
88 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
89}
90
91static void iwl6150_additional_nic_config(struct iwl_priv *priv)
92{
93 /* Indicate calibration version to uCode. */
94 if (iwl_eeprom_calib_version(priv->shrd) >= 6)
95 iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
96 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
97 iwl_set_bit(trans(priv), CSR_GP_DRIVER_REG,
98 CSR_GP_DRIVER_REG_BIT_6050_1x2);
99}
100
101static void iwl6000i_additional_nic_config(struct iwl_priv *priv)
102{
103 /* 2x2 IPA phy type */
104 iwl_write32(trans(priv), CSR_GP_DRIVER_REG,
105 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
106}
107
108/* NIC configuration for 6000 series */
109static void iwl6000_nic_config(struct iwl_priv *priv)
110{
111 iwl_rf_config(priv);
112
113 /* do additional nic configuration if needed */
114 if (cfg(priv)->additional_nic_config)
115 cfg(priv)->additional_nic_config(priv);
116}
117
118static const struct iwl_sensitivity_ranges iwl6000_sensitivity = {
119 .min_nrg_cck = 110,
120 .auto_corr_min_ofdm = 80,
121 .auto_corr_min_ofdm_mrc = 128,
122 .auto_corr_min_ofdm_x1 = 105,
123 .auto_corr_min_ofdm_mrc_x1 = 192,
124
125 .auto_corr_max_ofdm = 145,
126 .auto_corr_max_ofdm_mrc = 232,
127 .auto_corr_max_ofdm_x1 = 110,
128 .auto_corr_max_ofdm_mrc_x1 = 232,
129
130 .auto_corr_min_cck = 125,
131 .auto_corr_max_cck = 175,
132 .auto_corr_min_cck_mrc = 160,
133 .auto_corr_max_cck_mrc = 310,
134 .nrg_th_cck = 110,
135 .nrg_th_ofdm = 110,
136
137 .barker_corr_th_min = 190,
138 .barker_corr_th_min_mrc = 336,
139 .nrg_th_cca = 62,
140};
141
142static void iwl6000_hw_set_hw_params(struct iwl_priv *priv)
143{
144 hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
145 BIT(IEEE80211_BAND_5GHZ);
146
147 hw_params(priv).tx_chains_num =
148 num_of_ant(hw_params(priv).valid_tx_ant);
149 if (cfg(priv)->rx_with_siso_diversity)
150 hw_params(priv).rx_chains_num = 1;
151 else
152 hw_params(priv).rx_chains_num =
153 num_of_ant(hw_params(priv).valid_rx_ant);
154
155 iwl6000_set_ct_threshold(priv);
156
157 /* Set initial sensitivity parameters */
158 hw_params(priv).sens = &iwl6000_sensitivity;
159
160}
161
162static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
163 struct ieee80211_channel_switch *ch_switch)
164{
165 /*
166 * MULTI-FIXME
167 * See iwlagn_mac_channel_switch.
168 */
169 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
170 struct iwl6000_channel_switch_cmd cmd;
171 const struct iwl_channel_info *ch_info;
172 u32 switch_time_in_usec, ucode_switch_time;
173 u16 ch;
174 u32 tsf_low;
175 u8 switch_count;
176 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
177 struct ieee80211_vif *vif = ctx->vif;
178 struct iwl_host_cmd hcmd = {
179 .id = REPLY_CHANNEL_SWITCH,
180 .len = { sizeof(cmd), },
181 .flags = CMD_SYNC,
182 .data = { &cmd, },
183 };
184
185 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
186 ch = ch_switch->channel->hw_value;
187 IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
188 ctx->active.channel, ch);
189 cmd.channel = cpu_to_le16(ch);
190 cmd.rxon_flags = ctx->staging.flags;
191 cmd.rxon_filter_flags = ctx->staging.filter_flags;
192 switch_count = ch_switch->count;
193 tsf_low = ch_switch->timestamp & 0x0ffffffff;
194 /*
195 * calculate the ucode channel switch time
196 * adding TSF as one of the factor for when to switch
197 */
198 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
199 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
200 beacon_interval)) {
201 switch_count -= (priv->ucode_beacon_time -
202 tsf_low) / beacon_interval;
203 } else
204 switch_count = 0;
205 }
206 if (switch_count <= 1)
207 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
208 else {
209 switch_time_in_usec =
210 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
211 ucode_switch_time = iwl_usecs_to_beacons(priv,
212 switch_time_in_usec,
213 beacon_interval);
214 cmd.switch_time = iwl_add_beacon_time(priv,
215 priv->ucode_beacon_time,
216 ucode_switch_time,
217 beacon_interval);
218 }
219 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
220 cmd.switch_time);
221 ch_info = iwl_get_channel_info(priv, priv->band, ch);
222 if (ch_info)
223 cmd.expect_beacon = is_channel_radar(ch_info);
224 else {
225 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
226 ctx->active.channel, ch);
227 return -EFAULT;
228 }
229
230 return iwl_dvm_send_cmd(priv, &hcmd);
231}
232
233static struct iwl_lib_ops iwl6000_lib = {
234 .set_hw_params = iwl6000_hw_set_hw_params,
235 .set_channel_switch = iwl6000_hw_channel_switch,
236 .nic_config = iwl6000_nic_config,
237 .eeprom_ops = {
238 .regulatory_bands = {
239 EEPROM_REG_BAND_1_CHANNELS,
240 EEPROM_REG_BAND_2_CHANNELS,
241 EEPROM_REG_BAND_3_CHANNELS,
242 EEPROM_REG_BAND_4_CHANNELS,
243 EEPROM_REG_BAND_5_CHANNELS,
244 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
245 EEPROM_REG_BAND_52_HT40_CHANNELS
246 },
247 .enhanced_txpower = true,
248 },
249 .temperature = iwlagn_temperature,
250};
251
252static struct iwl_lib_ops iwl6030_lib = {
253 .set_hw_params = iwl6000_hw_set_hw_params,
254 .set_channel_switch = iwl6000_hw_channel_switch,
255 .nic_config = iwl6000_nic_config,
256 .eeprom_ops = {
257 .regulatory_bands = {
258 EEPROM_REG_BAND_1_CHANNELS,
259 EEPROM_REG_BAND_2_CHANNELS,
260 EEPROM_REG_BAND_3_CHANNELS,
261 EEPROM_REG_BAND_4_CHANNELS,
262 EEPROM_REG_BAND_5_CHANNELS,
263 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
264 EEPROM_REG_BAND_52_HT40_CHANNELS
265 },
266 .enhanced_txpower = true,
267 },
268 .temperature = iwlagn_temperature,
269};
270
271static const struct iwl_base_params iwl6000_base_params = { 76static const struct iwl_base_params iwl6000_base_params = {
272 .eeprom_size = OTP_LOW_IMAGE_SIZE, 77 .eeprom_size = OTP_LOW_IMAGE_SIZE,
273 .num_of_queues = IWLAGN_NUM_QUEUES, 78 .num_of_queues = IWLAGN_NUM_QUEUES,
274 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
275 .pll_cfg_val = 0, 79 .pll_cfg_val = 0,
276 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 80 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
277 .shadow_ram_support = true, 81 .shadow_ram_support = true,
@@ -288,7 +92,6 @@ static const struct iwl_base_params iwl6000_base_params = {
288static const struct iwl_base_params iwl6050_base_params = { 92static const struct iwl_base_params iwl6050_base_params = {
289 .eeprom_size = OTP_LOW_IMAGE_SIZE, 93 .eeprom_size = OTP_LOW_IMAGE_SIZE,
290 .num_of_queues = IWLAGN_NUM_QUEUES, 94 .num_of_queues = IWLAGN_NUM_QUEUES,
291 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
292 .pll_cfg_val = 0, 95 .pll_cfg_val = 0,
293 .max_ll_items = OTP_MAX_LL_ITEMS_6x50, 96 .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
294 .shadow_ram_support = true, 97 .shadow_ram_support = true,
@@ -305,7 +108,6 @@ static const struct iwl_base_params iwl6050_base_params = {
305static const struct iwl_base_params iwl6000_g2_base_params = { 108static const struct iwl_base_params iwl6000_g2_base_params = {
306 .eeprom_size = OTP_LOW_IMAGE_SIZE, 109 .eeprom_size = OTP_LOW_IMAGE_SIZE,
307 .num_of_queues = IWLAGN_NUM_QUEUES, 110 .num_of_queues = IWLAGN_NUM_QUEUES,
308 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
309 .pll_cfg_val = 0, 111 .pll_cfg_val = 0,
310 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 112 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
311 .shadow_ram_support = true, 113 .shadow_ram_support = true,
@@ -338,11 +140,11 @@ static const struct iwl_bt_params iwl6000_bt_params = {
338 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ 140 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
339 .ucode_api_ok = IWL6000G2_UCODE_API_OK, \ 141 .ucode_api_ok = IWL6000G2_UCODE_API_OK, \
340 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ 142 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
143 .device_family = IWL_DEVICE_FAMILY_6005, \
341 .max_inst_size = IWL60_RTC_INST_SIZE, \ 144 .max_inst_size = IWL60_RTC_INST_SIZE, \
342 .max_data_size = IWL60_RTC_DATA_SIZE, \ 145 .max_data_size = IWL60_RTC_DATA_SIZE, \
343 .eeprom_ver = EEPROM_6005_EEPROM_VERSION, \ 146 .eeprom_ver = EEPROM_6005_EEPROM_VERSION, \
344 .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \ 147 .eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
345 .lib = &iwl6000_lib, \
346 .base_params = &iwl6000_g2_base_params, \ 148 .base_params = &iwl6000_g2_base_params, \
347 .need_temp_offset_calib = true, \ 149 .need_temp_offset_calib = true, \
348 .led_mode = IWL_LED_RF_STATE 150 .led_mode = IWL_LED_RF_STATE
@@ -392,11 +194,11 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
392 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ 194 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
393 .ucode_api_ok = IWL6000G2B_UCODE_API_OK, \ 195 .ucode_api_ok = IWL6000G2B_UCODE_API_OK, \
394 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ 196 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
197 .device_family = IWL_DEVICE_FAMILY_6030, \
395 .max_inst_size = IWL60_RTC_INST_SIZE, \ 198 .max_inst_size = IWL60_RTC_INST_SIZE, \
396 .max_data_size = IWL60_RTC_DATA_SIZE, \ 199 .max_data_size = IWL60_RTC_DATA_SIZE, \
397 .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \ 200 .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
398 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ 201 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
399 .lib = &iwl6030_lib, \
400 .base_params = &iwl6000_g2_base_params, \ 202 .base_params = &iwl6000_g2_base_params, \
401 .bt_params = &iwl6000_bt_params, \ 203 .bt_params = &iwl6000_bt_params, \
402 .need_temp_offset_calib = true, \ 204 .need_temp_offset_calib = true, \
@@ -463,14 +265,13 @@ const struct iwl_cfg iwl130_bg_cfg = {
463 .ucode_api_max = IWL6000_UCODE_API_MAX, \ 265 .ucode_api_max = IWL6000_UCODE_API_MAX, \
464 .ucode_api_ok = IWL6000_UCODE_API_OK, \ 266 .ucode_api_ok = IWL6000_UCODE_API_OK, \
465 .ucode_api_min = IWL6000_UCODE_API_MIN, \ 267 .ucode_api_min = IWL6000_UCODE_API_MIN, \
268 .device_family = IWL_DEVICE_FAMILY_6000i, \
466 .max_inst_size = IWL60_RTC_INST_SIZE, \ 269 .max_inst_size = IWL60_RTC_INST_SIZE, \
467 .max_data_size = IWL60_RTC_DATA_SIZE, \ 270 .max_data_size = IWL60_RTC_DATA_SIZE, \
468 .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \ 271 .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \
469 .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \ 272 .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
470 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, \ 273 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, \
471 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \ 274 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
472 .lib = &iwl6000_lib, \
473 .additional_nic_config = iwl6000i_additional_nic_config,\
474 .base_params = &iwl6000_base_params, \ 275 .base_params = &iwl6000_base_params, \
475 .led_mode = IWL_LED_BLINK 276 .led_mode = IWL_LED_BLINK
476 277
@@ -494,12 +295,11 @@ const struct iwl_cfg iwl6000i_2bg_cfg = {
494 .fw_name_pre = IWL6050_FW_PRE, \ 295 .fw_name_pre = IWL6050_FW_PRE, \
495 .ucode_api_max = IWL6050_UCODE_API_MAX, \ 296 .ucode_api_max = IWL6050_UCODE_API_MAX, \
496 .ucode_api_min = IWL6050_UCODE_API_MIN, \ 297 .ucode_api_min = IWL6050_UCODE_API_MIN, \
298 .device_family = IWL_DEVICE_FAMILY_6050, \
497 .max_inst_size = IWL60_RTC_INST_SIZE, \ 299 .max_inst_size = IWL60_RTC_INST_SIZE, \
498 .max_data_size = IWL60_RTC_DATA_SIZE, \ 300 .max_data_size = IWL60_RTC_DATA_SIZE, \
499 .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \ 301 .valid_tx_ant = ANT_AB, /* .cfg overwrite */ \
500 .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \ 302 .valid_rx_ant = ANT_AB, /* .cfg overwrite */ \
501 .lib = &iwl6000_lib, \
502 .additional_nic_config = iwl6050_additional_nic_config, \
503 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \ 303 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \
504 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \ 304 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
505 .base_params = &iwl6050_base_params, \ 305 .base_params = &iwl6050_base_params, \
@@ -521,10 +321,9 @@ const struct iwl_cfg iwl6050_2abg_cfg = {
521 .fw_name_pre = IWL6050_FW_PRE, \ 321 .fw_name_pre = IWL6050_FW_PRE, \
522 .ucode_api_max = IWL6050_UCODE_API_MAX, \ 322 .ucode_api_max = IWL6050_UCODE_API_MAX, \
523 .ucode_api_min = IWL6050_UCODE_API_MIN, \ 323 .ucode_api_min = IWL6050_UCODE_API_MIN, \
324 .device_family = IWL_DEVICE_FAMILY_6150, \
524 .max_inst_size = IWL60_RTC_INST_SIZE, \ 325 .max_inst_size = IWL60_RTC_INST_SIZE, \
525 .max_data_size = IWL60_RTC_DATA_SIZE, \ 326 .max_data_size = IWL60_RTC_DATA_SIZE, \
526 .lib = &iwl6000_lib, \
527 .additional_nic_config = iwl6150_additional_nic_config, \
528 .eeprom_ver = EEPROM_6150_EEPROM_VERSION, \ 327 .eeprom_ver = EEPROM_6150_EEPROM_VERSION, \
529 .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \ 328 .eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
530 .base_params = &iwl6050_base_params, \ 329 .base_params = &iwl6050_base_params, \
@@ -548,11 +347,11 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
548 .ucode_api_max = IWL6000_UCODE_API_MAX, 347 .ucode_api_max = IWL6000_UCODE_API_MAX,
549 .ucode_api_ok = IWL6000_UCODE_API_OK, 348 .ucode_api_ok = IWL6000_UCODE_API_OK,
550 .ucode_api_min = IWL6000_UCODE_API_MIN, 349 .ucode_api_min = IWL6000_UCODE_API_MIN,
350 .device_family = IWL_DEVICE_FAMILY_6000,
551 .max_inst_size = IWL60_RTC_INST_SIZE, 351 .max_inst_size = IWL60_RTC_INST_SIZE,
552 .max_data_size = IWL60_RTC_DATA_SIZE, 352 .max_data_size = IWL60_RTC_DATA_SIZE,
553 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 353 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
554 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, 354 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
555 .lib = &iwl6000_lib,
556 .base_params = &iwl6000_base_params, 355 .base_params = &iwl6000_base_params,
557 .ht_params = &iwl6000_ht_params, 356 .ht_params = &iwl6000_ht_params,
558 .led_mode = IWL_LED_BLINK, 357 .led_mode = IWL_LED_BLINK,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index 84cbe7bb504c..95f27f1a423b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -64,7 +64,6 @@
64#include <net/mac80211.h> 64#include <net/mac80211.h>
65 65
66#include "iwl-dev.h" 66#include "iwl-dev.h"
67#include "iwl-core.h"
68#include "iwl-agn-calib.h" 67#include "iwl-agn-calib.h"
69#include "iwl-trans.h" 68#include "iwl-trans.h"
70#include "iwl-agn.h" 69#include "iwl-agn.h"
@@ -190,7 +189,7 @@ static int iwl_sens_energy_cck(struct iwl_priv *priv,
190 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time; 189 u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
191 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time; 190 u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
192 struct iwl_sensitivity_data *data = NULL; 191 struct iwl_sensitivity_data *data = NULL;
193 const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens; 192 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
194 193
195 data = &(priv->sensitivity_data); 194 data = &(priv->sensitivity_data);
196 195
@@ -373,7 +372,7 @@ static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv,
373 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time; 372 u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
374 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time; 373 u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
375 struct iwl_sensitivity_data *data = NULL; 374 struct iwl_sensitivity_data *data = NULL;
376 const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens; 375 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
377 376
378 data = &(priv->sensitivity_data); 377 data = &(priv->sensitivity_data);
379 378
@@ -521,7 +520,7 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv)
521 520
522 iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]); 521 iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]);
523 522
524 if (cfg(priv)->base_params->hd_v2) { 523 if (priv->cfg->base_params->hd_v2) {
525 cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] = 524 cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] =
526 HD_INA_NON_SQUARE_DET_OFDM_DATA_V2; 525 HD_INA_NON_SQUARE_DET_OFDM_DATA_V2;
527 cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] = 526 cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] =
@@ -597,9 +596,9 @@ void iwl_init_sensitivity(struct iwl_priv *priv)
597 int ret = 0; 596 int ret = 0;
598 int i; 597 int i;
599 struct iwl_sensitivity_data *data = NULL; 598 struct iwl_sensitivity_data *data = NULL;
600 const struct iwl_sensitivity_ranges *ranges = hw_params(priv).sens; 599 const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
601 600
602 if (priv->disable_sens_cal) 601 if (priv->calib_disabled & IWL_SENSITIVITY_CALIB_DISABLED)
603 return; 602 return;
604 603
605 IWL_DEBUG_CALIB(priv, "Start iwl_init_sensitivity\n"); 604 IWL_DEBUG_CALIB(priv, "Start iwl_init_sensitivity\n");
@@ -663,7 +662,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv)
663 struct statistics_rx_phy *ofdm, *cck; 662 struct statistics_rx_phy *ofdm, *cck;
664 struct statistics_general_data statis; 663 struct statistics_general_data statis;
665 664
666 if (priv->disable_sens_cal) 665 if (priv->calib_disabled & IWL_SENSITIVITY_CALIB_DISABLED)
667 return; 666 return;
668 667
669 data = &(priv->sensitivity_data); 668 data = &(priv->sensitivity_data);
@@ -833,28 +832,28 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
833 * To be safe, simply mask out any chains that we know 832 * To be safe, simply mask out any chains that we know
834 * are not on the device. 833 * are not on the device.
835 */ 834 */
836 active_chains &= hw_params(priv).valid_rx_ant; 835 active_chains &= priv->hw_params.valid_rx_ant;
837 836
838 num_tx_chains = 0; 837 num_tx_chains = 0;
839 for (i = 0; i < NUM_RX_CHAINS; i++) { 838 for (i = 0; i < NUM_RX_CHAINS; i++) {
840 /* loops on all the bits of 839 /* loops on all the bits of
841 * priv->hw_setting.valid_tx_ant */ 840 * priv->hw_setting.valid_tx_ant */
842 u8 ant_msk = (1 << i); 841 u8 ant_msk = (1 << i);
843 if (!(hw_params(priv).valid_tx_ant & ant_msk)) 842 if (!(priv->hw_params.valid_tx_ant & ant_msk))
844 continue; 843 continue;
845 844
846 num_tx_chains++; 845 num_tx_chains++;
847 if (data->disconn_array[i] == 0) 846 if (data->disconn_array[i] == 0)
848 /* there is a Tx antenna connected */ 847 /* there is a Tx antenna connected */
849 break; 848 break;
850 if (num_tx_chains == hw_params(priv).tx_chains_num && 849 if (num_tx_chains == priv->hw_params.tx_chains_num &&
851 data->disconn_array[i]) { 850 data->disconn_array[i]) {
852 /* 851 /*
853 * If all chains are disconnected 852 * If all chains are disconnected
854 * connect the first valid tx chain 853 * connect the first valid tx chain
855 */ 854 */
856 first_chain = 855 first_chain =
857 find_first_chain(hw_params(priv).valid_tx_ant); 856 find_first_chain(priv->hw_params.valid_tx_ant);
858 data->disconn_array[first_chain] = 0; 857 data->disconn_array[first_chain] = 0;
859 active_chains |= BIT(first_chain); 858 active_chains |= BIT(first_chain);
860 IWL_DEBUG_CALIB(priv, 859 IWL_DEBUG_CALIB(priv,
@@ -864,13 +863,13 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
864 } 863 }
865 } 864 }
866 865
867 if (active_chains != hw_params(priv).valid_rx_ant && 866 if (active_chains != priv->hw_params.valid_rx_ant &&
868 active_chains != priv->chain_noise_data.active_chains) 867 active_chains != priv->chain_noise_data.active_chains)
869 IWL_DEBUG_CALIB(priv, 868 IWL_DEBUG_CALIB(priv,
870 "Detected that not all antennas are connected! " 869 "Detected that not all antennas are connected! "
871 "Connected: %#x, valid: %#x.\n", 870 "Connected: %#x, valid: %#x.\n",
872 active_chains, 871 active_chains,
873 hw_params(priv).valid_rx_ant); 872 priv->hw_params.valid_rx_ant);
874 873
875 /* Save for use within RXON, TX, SCAN commands, etc. */ 874 /* Save for use within RXON, TX, SCAN commands, etc. */
876 data->active_chains = active_chains; 875 data->active_chains = active_chains;
@@ -895,7 +894,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
895 continue; 894 continue;
896 } 895 }
897 896
898 delta_g = (cfg(priv)->base_params->chain_noise_scale * 897 delta_g = (priv->cfg->base_params->chain_noise_scale *
899 ((s32)average_noise[default_chain] - 898 ((s32)average_noise[default_chain] -
900 (s32)average_noise[i])) / 1500; 899 (s32)average_noise[i])) / 1500;
901 900
@@ -970,7 +969,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
970 */ 969 */
971 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 970 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
972 971
973 if (priv->disable_chain_noise_cal) 972 if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)
974 return; 973 return;
975 974
976 data = &(priv->chain_noise_data); 975 data = &(priv->chain_noise_data);
@@ -1051,11 +1050,11 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
1051 return; 1050 return;
1052 1051
1053 /* Analyze signal for disconnected antenna */ 1052 /* Analyze signal for disconnected antenna */
1054 if (cfg(priv)->bt_params && 1053 if (priv->cfg->bt_params &&
1055 cfg(priv)->bt_params->advanced_bt_coexist) { 1054 priv->cfg->bt_params->advanced_bt_coexist) {
1056 /* Disable disconnected antenna algorithm for advanced 1055 /* Disable disconnected antenna algorithm for advanced
1057 bt coex, assuming valid antennas are connected */ 1056 bt coex, assuming valid antennas are connected */
1058 data->active_chains = hw_params(priv).valid_rx_ant; 1057 data->active_chains = priv->hw_params.valid_rx_ant;
1059 for (i = 0; i < NUM_RX_CHAINS; i++) 1058 for (i = 0; i < NUM_RX_CHAINS; i++)
1060 if (!(data->active_chains & (1<<i))) 1059 if (!(data->active_chains & (1<<i)))
1061 data->disconn_array[i] = 1; 1060 data->disconn_array[i] = 1;
@@ -1085,7 +1084,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
1085 min_average_noise, min_average_noise_antenna_i); 1084 min_average_noise, min_average_noise_antenna_i);
1086 1085
1087 iwlagn_gain_computation(priv, average_noise, 1086 iwlagn_gain_computation(priv, average_noise,
1088 find_first_chain(hw_params(priv).valid_rx_ant)); 1087 find_first_chain(priv->hw_params.valid_rx_ant));
1089 1088
1090 /* Some power changes may have been made during the calibration. 1089 /* Some power changes may have been made during the calibration.
1091 * Update and commit the RXON 1090 * Update and commit the RXON
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
index 9ed6683314a7..dbe13787f272 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
@@ -63,7 +63,6 @@
63#define __iwl_calib_h__ 63#define __iwl_calib_h__
64 64
65#include "iwl-dev.h" 65#include "iwl-dev.h"
66#include "iwl-core.h"
67#include "iwl-commands.h" 66#include "iwl-commands.h"
68 67
69void iwl_chain_noise_calibration(struct iwl_priv *priv); 68void iwl_chain_noise_calibration(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-devices.c b/drivers/net/wireless/iwlwifi/iwl-agn-devices.c
new file mode 100644
index 000000000000..48533b3a0f9a
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-devices.c
@@ -0,0 +1,755 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27/*
28 * DVM device-specific data & functions
29 */
30#include "iwl-agn.h"
31#include "iwl-dev.h"
32#include "iwl-commands.h"
33#include "iwl-io.h"
34#include "iwl-prph.h"
35
36/*
37 * 1000 series
38 * ===========
39 */
40
41/*
42 * For 1000, use advance thermal throttling critical temperature threshold,
43 * but legacy thermal management implementation for now.
44 * This is for the reason of 1000 uCode using advance thermal throttling API
45 * but not implement ct_kill_exit based on ct_kill exit temperature
46 * so the thermal throttling will still based on legacy thermal throttling
47 * management.
48 * The code here need to be modified once 1000 uCode has the advanced thermal
49 * throttling algorithm in place
50 */
51static void iwl1000_set_ct_threshold(struct iwl_priv *priv)
52{
53 /* want Celsius */
54 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
55 priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
56}
57
58/* NIC configuration for 1000 series */
59static void iwl1000_nic_config(struct iwl_priv *priv)
60{
61 /* set CSR_HW_CONFIG_REG for uCode use */
62 iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
63 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
64 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
65
66 /* Setting digital SVR for 1000 card to 1.32V */
67 /* locking is acquired in iwl_set_bits_mask_prph() function */
68 iwl_set_bits_mask_prph(priv->trans, APMG_DIGITAL_SVR_REG,
69 APMG_SVR_DIGITAL_VOLTAGE_1_32,
70 ~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK);
71}
72
73/**
74 * iwl_beacon_time_mask_low - mask of lower 32 bit of beacon time
75 * @priv -- pointer to iwl_priv data structure
76 * @tsf_bits -- number of bits need to shift for masking)
77 */
78static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv,
79 u16 tsf_bits)
80{
81 return (1 << tsf_bits) - 1;
82}
83
84/**
85 * iwl_beacon_time_mask_high - mask of higher 32 bit of beacon time
86 * @priv -- pointer to iwl_priv data structure
87 * @tsf_bits -- number of bits need to shift for masking)
88 */
89static inline u32 iwl_beacon_time_mask_high(struct iwl_priv *priv,
90 u16 tsf_bits)
91{
92 return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
93}
94
95/*
96 * extended beacon time format
97 * time in usec will be changed into a 32-bit value in extended:internal format
98 * the extended part is the beacon counts
99 * the internal part is the time in usec within one beacon interval
100 */
101static u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec,
102 u32 beacon_interval)
103{
104 u32 quot;
105 u32 rem;
106 u32 interval = beacon_interval * TIME_UNIT;
107
108 if (!interval || !usec)
109 return 0;
110
111 quot = (usec / interval) &
112 (iwl_beacon_time_mask_high(priv, IWLAGN_EXT_BEACON_TIME_POS) >>
113 IWLAGN_EXT_BEACON_TIME_POS);
114 rem = (usec % interval) & iwl_beacon_time_mask_low(priv,
115 IWLAGN_EXT_BEACON_TIME_POS);
116
117 return (quot << IWLAGN_EXT_BEACON_TIME_POS) + rem;
118}
119
120/* base is usually what we get from ucode with each received frame,
121 * the same as HW timer counter counting down
122 */
123static __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
124 u32 addon, u32 beacon_interval)
125{
126 u32 base_low = base & iwl_beacon_time_mask_low(priv,
127 IWLAGN_EXT_BEACON_TIME_POS);
128 u32 addon_low = addon & iwl_beacon_time_mask_low(priv,
129 IWLAGN_EXT_BEACON_TIME_POS);
130 u32 interval = beacon_interval * TIME_UNIT;
131 u32 res = (base & iwl_beacon_time_mask_high(priv,
132 IWLAGN_EXT_BEACON_TIME_POS)) +
133 (addon & iwl_beacon_time_mask_high(priv,
134 IWLAGN_EXT_BEACON_TIME_POS));
135
136 if (base_low > addon_low)
137 res += base_low - addon_low;
138 else if (base_low < addon_low) {
139 res += interval + base_low - addon_low;
140 res += (1 << IWLAGN_EXT_BEACON_TIME_POS);
141 } else
142 res += (1 << IWLAGN_EXT_BEACON_TIME_POS);
143
144 return cpu_to_le32(res);
145}
146
147static const struct iwl_sensitivity_ranges iwl1000_sensitivity = {
148 .min_nrg_cck = 95,
149 .auto_corr_min_ofdm = 90,
150 .auto_corr_min_ofdm_mrc = 170,
151 .auto_corr_min_ofdm_x1 = 120,
152 .auto_corr_min_ofdm_mrc_x1 = 240,
153
154 .auto_corr_max_ofdm = 120,
155 .auto_corr_max_ofdm_mrc = 210,
156 .auto_corr_max_ofdm_x1 = 155,
157 .auto_corr_max_ofdm_mrc_x1 = 290,
158
159 .auto_corr_min_cck = 125,
160 .auto_corr_max_cck = 200,
161 .auto_corr_min_cck_mrc = 170,
162 .auto_corr_max_cck_mrc = 400,
163 .nrg_th_cck = 95,
164 .nrg_th_ofdm = 95,
165
166 .barker_corr_th_min = 190,
167 .barker_corr_th_min_mrc = 390,
168 .nrg_th_cca = 62,
169};
170
171static void iwl1000_hw_set_hw_params(struct iwl_priv *priv)
172{
173 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ);
174
175 priv->hw_params.tx_chains_num =
176 num_of_ant(priv->hw_params.valid_tx_ant);
177 if (priv->cfg->rx_with_siso_diversity)
178 priv->hw_params.rx_chains_num = 1;
179 else
180 priv->hw_params.rx_chains_num =
181 num_of_ant(priv->hw_params.valid_rx_ant);
182
183 iwl1000_set_ct_threshold(priv);
184
185 /* Set initial sensitivity parameters */
186 priv->hw_params.sens = &iwl1000_sensitivity;
187}
188
189struct iwl_lib_ops iwl1000_lib = {
190 .set_hw_params = iwl1000_hw_set_hw_params,
191 .nic_config = iwl1000_nic_config,
192 .eeprom_ops = {
193 .regulatory_bands = {
194 EEPROM_REG_BAND_1_CHANNELS,
195 EEPROM_REG_BAND_2_CHANNELS,
196 EEPROM_REG_BAND_3_CHANNELS,
197 EEPROM_REG_BAND_4_CHANNELS,
198 EEPROM_REG_BAND_5_CHANNELS,
199 EEPROM_REG_BAND_24_HT40_CHANNELS,
200 EEPROM_REGULATORY_BAND_NO_HT40,
201 },
202 },
203 .temperature = iwlagn_temperature,
204};
205
206
207/*
208 * 2000 series
209 * ===========
210 */
211
212static void iwl2000_set_ct_threshold(struct iwl_priv *priv)
213{
214 /* want Celsius */
215 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
216 priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
217}
218
219/* NIC configuration for 2000 series */
220static void iwl2000_nic_config(struct iwl_priv *priv)
221{
222 iwl_rf_config(priv);
223
224 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
225 CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
226}
227
228static const struct iwl_sensitivity_ranges iwl2000_sensitivity = {
229 .min_nrg_cck = 97,
230 .auto_corr_min_ofdm = 80,
231 .auto_corr_min_ofdm_mrc = 128,
232 .auto_corr_min_ofdm_x1 = 105,
233 .auto_corr_min_ofdm_mrc_x1 = 192,
234
235 .auto_corr_max_ofdm = 145,
236 .auto_corr_max_ofdm_mrc = 232,
237 .auto_corr_max_ofdm_x1 = 110,
238 .auto_corr_max_ofdm_mrc_x1 = 232,
239
240 .auto_corr_min_cck = 125,
241 .auto_corr_max_cck = 175,
242 .auto_corr_min_cck_mrc = 160,
243 .auto_corr_max_cck_mrc = 310,
244 .nrg_th_cck = 97,
245 .nrg_th_ofdm = 100,
246
247 .barker_corr_th_min = 190,
248 .barker_corr_th_min_mrc = 390,
249 .nrg_th_cca = 62,
250};
251
252static void iwl2000_hw_set_hw_params(struct iwl_priv *priv)
253{
254 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ);
255
256 priv->hw_params.tx_chains_num =
257 num_of_ant(priv->hw_params.valid_tx_ant);
258 if (priv->cfg->rx_with_siso_diversity)
259 priv->hw_params.rx_chains_num = 1;
260 else
261 priv->hw_params.rx_chains_num =
262 num_of_ant(priv->hw_params.valid_rx_ant);
263
264 iwl2000_set_ct_threshold(priv);
265
266 /* Set initial sensitivity parameters */
267 priv->hw_params.sens = &iwl2000_sensitivity;
268}
269
270struct iwl_lib_ops iwl2000_lib = {
271 .set_hw_params = iwl2000_hw_set_hw_params,
272 .nic_config = iwl2000_nic_config,
273 .eeprom_ops = {
274 .regulatory_bands = {
275 EEPROM_REG_BAND_1_CHANNELS,
276 EEPROM_REG_BAND_2_CHANNELS,
277 EEPROM_REG_BAND_3_CHANNELS,
278 EEPROM_REG_BAND_4_CHANNELS,
279 EEPROM_REG_BAND_5_CHANNELS,
280 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
281 EEPROM_REGULATORY_BAND_NO_HT40,
282 },
283 .enhanced_txpower = true,
284 },
285 .temperature = iwlagn_temperature,
286};
287
288struct iwl_lib_ops iwl2030_lib = {
289 .set_hw_params = iwl2000_hw_set_hw_params,
290 .nic_config = iwl2000_nic_config,
291 .eeprom_ops = {
292 .regulatory_bands = {
293 EEPROM_REG_BAND_1_CHANNELS,
294 EEPROM_REG_BAND_2_CHANNELS,
295 EEPROM_REG_BAND_3_CHANNELS,
296 EEPROM_REG_BAND_4_CHANNELS,
297 EEPROM_REG_BAND_5_CHANNELS,
298 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
299 EEPROM_REGULATORY_BAND_NO_HT40,
300 },
301 .enhanced_txpower = true,
302 },
303 .temperature = iwlagn_temperature,
304};
305
306/*
307 * 5000 series
308 * ===========
309 */
310
311/* NIC configuration for 5000 series */
312static void iwl5000_nic_config(struct iwl_priv *priv)
313{
314 iwl_rf_config(priv);
315
316 /* W/A : NIC is stuck in a reset state after Early PCIe power off
317 * (PCIe power is lost before PERST# is asserted),
318 * causing ME FW to lose ownership and not being able to obtain it back.
319 */
320 iwl_set_bits_mask_prph(priv->trans, APMG_PS_CTRL_REG,
321 APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
322 ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
323}
324
325static const struct iwl_sensitivity_ranges iwl5000_sensitivity = {
326 .min_nrg_cck = 100,
327 .auto_corr_min_ofdm = 90,
328 .auto_corr_min_ofdm_mrc = 170,
329 .auto_corr_min_ofdm_x1 = 105,
330 .auto_corr_min_ofdm_mrc_x1 = 220,
331
332 .auto_corr_max_ofdm = 120,
333 .auto_corr_max_ofdm_mrc = 210,
334 .auto_corr_max_ofdm_x1 = 120,
335 .auto_corr_max_ofdm_mrc_x1 = 240,
336
337 .auto_corr_min_cck = 125,
338 .auto_corr_max_cck = 200,
339 .auto_corr_min_cck_mrc = 200,
340 .auto_corr_max_cck_mrc = 400,
341 .nrg_th_cck = 100,
342 .nrg_th_ofdm = 100,
343
344 .barker_corr_th_min = 190,
345 .barker_corr_th_min_mrc = 390,
346 .nrg_th_cca = 62,
347};
348
349static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
350 .min_nrg_cck = 95,
351 .auto_corr_min_ofdm = 90,
352 .auto_corr_min_ofdm_mrc = 170,
353 .auto_corr_min_ofdm_x1 = 105,
354 .auto_corr_min_ofdm_mrc_x1 = 220,
355
356 .auto_corr_max_ofdm = 120,
357 .auto_corr_max_ofdm_mrc = 210,
358 /* max = min for performance bug in 5150 DSP */
359 .auto_corr_max_ofdm_x1 = 105,
360 .auto_corr_max_ofdm_mrc_x1 = 220,
361
362 .auto_corr_min_cck = 125,
363 .auto_corr_max_cck = 200,
364 .auto_corr_min_cck_mrc = 170,
365 .auto_corr_max_cck_mrc = 400,
366 .nrg_th_cck = 95,
367 .nrg_th_ofdm = 95,
368
369 .barker_corr_th_min = 190,
370 .barker_corr_th_min_mrc = 390,
371 .nrg_th_cca = 62,
372};
373
374#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5)
375
376static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
377{
378 u16 temperature, voltage;
379 __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(priv,
380 EEPROM_KELVIN_TEMPERATURE);
381
382 temperature = le16_to_cpu(temp_calib[0]);
383 voltage = le16_to_cpu(temp_calib[1]);
384
385 /* offset = temp - volt / coeff */
386 return (s32)(temperature -
387 voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
388}
389
390static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
391{
392 const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
393 s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) -
394 iwl_temp_calib_to_offset(priv);
395
396 priv->hw_params.ct_kill_threshold = threshold * volt2temp_coef;
397}
398
399static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
400{
401 /* want Celsius */
402 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
403}
404
405static void iwl5000_hw_set_hw_params(struct iwl_priv *priv)
406{
407 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
408 BIT(IEEE80211_BAND_5GHZ);
409
410 priv->hw_params.tx_chains_num =
411 num_of_ant(priv->hw_params.valid_tx_ant);
412 priv->hw_params.rx_chains_num =
413 num_of_ant(priv->hw_params.valid_rx_ant);
414
415 iwl5000_set_ct_threshold(priv);
416
417 /* Set initial sensitivity parameters */
418 priv->hw_params.sens = &iwl5000_sensitivity;
419}
420
421static void iwl5150_hw_set_hw_params(struct iwl_priv *priv)
422{
423 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
424 BIT(IEEE80211_BAND_5GHZ);
425
426 priv->hw_params.tx_chains_num =
427 num_of_ant(priv->hw_params.valid_tx_ant);
428 priv->hw_params.rx_chains_num =
429 num_of_ant(priv->hw_params.valid_rx_ant);
430
431 iwl5150_set_ct_threshold(priv);
432
433 /* Set initial sensitivity parameters */
434 priv->hw_params.sens = &iwl5150_sensitivity;
435}
436
437static void iwl5150_temperature(struct iwl_priv *priv)
438{
439 u32 vt = 0;
440 s32 offset = iwl_temp_calib_to_offset(priv);
441
442 vt = le32_to_cpu(priv->statistics.common.temperature);
443 vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset;
444 /* now vt hold the temperature in Kelvin */
445 priv->temperature = KELVIN_TO_CELSIUS(vt);
446 iwl_tt_handler(priv);
447}
448
449static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
450 struct ieee80211_channel_switch *ch_switch)
451{
452 /*
453 * MULTI-FIXME
454 * See iwlagn_mac_channel_switch.
455 */
456 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
457 struct iwl5000_channel_switch_cmd cmd;
458 const struct iwl_channel_info *ch_info;
459 u32 switch_time_in_usec, ucode_switch_time;
460 u16 ch;
461 u32 tsf_low;
462 u8 switch_count;
463 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
464 struct ieee80211_vif *vif = ctx->vif;
465 struct iwl_host_cmd hcmd = {
466 .id = REPLY_CHANNEL_SWITCH,
467 .len = { sizeof(cmd), },
468 .flags = CMD_SYNC,
469 .data = { &cmd, },
470 };
471
472 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
473 ch = ch_switch->channel->hw_value;
474 IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
475 ctx->active.channel, ch);
476 cmd.channel = cpu_to_le16(ch);
477 cmd.rxon_flags = ctx->staging.flags;
478 cmd.rxon_filter_flags = ctx->staging.filter_flags;
479 switch_count = ch_switch->count;
480 tsf_low = ch_switch->timestamp & 0x0ffffffff;
481 /*
482 * calculate the ucode channel switch time
483 * adding TSF as one of the factor for when to switch
484 */
485 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
486 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
487 beacon_interval)) {
488 switch_count -= (priv->ucode_beacon_time -
489 tsf_low) / beacon_interval;
490 } else
491 switch_count = 0;
492 }
493 if (switch_count <= 1)
494 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
495 else {
496 switch_time_in_usec =
497 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
498 ucode_switch_time = iwl_usecs_to_beacons(priv,
499 switch_time_in_usec,
500 beacon_interval);
501 cmd.switch_time = iwl_add_beacon_time(priv,
502 priv->ucode_beacon_time,
503 ucode_switch_time,
504 beacon_interval);
505 }
506 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
507 cmd.switch_time);
508 ch_info = iwl_get_channel_info(priv, priv->band, ch);
509 if (ch_info)
510 cmd.expect_beacon = is_channel_radar(ch_info);
511 else {
512 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
513 ctx->active.channel, ch);
514 return -EFAULT;
515 }
516
517 return iwl_dvm_send_cmd(priv, &hcmd);
518}
519
520struct iwl_lib_ops iwl5000_lib = {
521 .set_hw_params = iwl5000_hw_set_hw_params,
522 .set_channel_switch = iwl5000_hw_channel_switch,
523 .nic_config = iwl5000_nic_config,
524 .eeprom_ops = {
525 .regulatory_bands = {
526 EEPROM_REG_BAND_1_CHANNELS,
527 EEPROM_REG_BAND_2_CHANNELS,
528 EEPROM_REG_BAND_3_CHANNELS,
529 EEPROM_REG_BAND_4_CHANNELS,
530 EEPROM_REG_BAND_5_CHANNELS,
531 EEPROM_REG_BAND_24_HT40_CHANNELS,
532 EEPROM_REG_BAND_52_HT40_CHANNELS
533 },
534 },
535 .temperature = iwlagn_temperature,
536};
537
538struct iwl_lib_ops iwl5150_lib = {
539 .set_hw_params = iwl5150_hw_set_hw_params,
540 .set_channel_switch = iwl5000_hw_channel_switch,
541 .nic_config = iwl5000_nic_config,
542 .eeprom_ops = {
543 .regulatory_bands = {
544 EEPROM_REG_BAND_1_CHANNELS,
545 EEPROM_REG_BAND_2_CHANNELS,
546 EEPROM_REG_BAND_3_CHANNELS,
547 EEPROM_REG_BAND_4_CHANNELS,
548 EEPROM_REG_BAND_5_CHANNELS,
549 EEPROM_REG_BAND_24_HT40_CHANNELS,
550 EEPROM_REG_BAND_52_HT40_CHANNELS
551 },
552 },
553 .temperature = iwl5150_temperature,
554};
555
556
557
558/*
559 * 6000 series
560 * ===========
561 */
562
563static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
564{
565 /* want Celsius */
566 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD;
567 priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
568}
569
570/* NIC configuration for 6000 series */
571static void iwl6000_nic_config(struct iwl_priv *priv)
572{
573 iwl_rf_config(priv);
574
575 switch (priv->cfg->device_family) {
576 case IWL_DEVICE_FAMILY_6005:
577 case IWL_DEVICE_FAMILY_6030:
578 case IWL_DEVICE_FAMILY_6000:
579 break;
580 case IWL_DEVICE_FAMILY_6000i:
581 /* 2x2 IPA phy type */
582 iwl_write32(priv->trans, CSR_GP_DRIVER_REG,
583 CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
584 break;
585 case IWL_DEVICE_FAMILY_6050:
586 /* Indicate calibration version to uCode. */
587 if (iwl_eeprom_calib_version(priv) >= 6)
588 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
589 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
590 break;
591 case IWL_DEVICE_FAMILY_6150:
592 /* Indicate calibration version to uCode. */
593 if (iwl_eeprom_calib_version(priv) >= 6)
594 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
595 CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
596 iwl_set_bit(priv->trans, CSR_GP_DRIVER_REG,
597 CSR_GP_DRIVER_REG_BIT_6050_1x2);
598 break;
599 default:
600 WARN_ON(1);
601 }
602}
603
604static const struct iwl_sensitivity_ranges iwl6000_sensitivity = {
605 .min_nrg_cck = 110,
606 .auto_corr_min_ofdm = 80,
607 .auto_corr_min_ofdm_mrc = 128,
608 .auto_corr_min_ofdm_x1 = 105,
609 .auto_corr_min_ofdm_mrc_x1 = 192,
610
611 .auto_corr_max_ofdm = 145,
612 .auto_corr_max_ofdm_mrc = 232,
613 .auto_corr_max_ofdm_x1 = 110,
614 .auto_corr_max_ofdm_mrc_x1 = 232,
615
616 .auto_corr_min_cck = 125,
617 .auto_corr_max_cck = 175,
618 .auto_corr_min_cck_mrc = 160,
619 .auto_corr_max_cck_mrc = 310,
620 .nrg_th_cck = 110,
621 .nrg_th_ofdm = 110,
622
623 .barker_corr_th_min = 190,
624 .barker_corr_th_min_mrc = 336,
625 .nrg_th_cca = 62,
626};
627
628static void iwl6000_hw_set_hw_params(struct iwl_priv *priv)
629{
630 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
631 BIT(IEEE80211_BAND_5GHZ);
632
633 priv->hw_params.tx_chains_num =
634 num_of_ant(priv->hw_params.valid_tx_ant);
635 if (priv->cfg->rx_with_siso_diversity)
636 priv->hw_params.rx_chains_num = 1;
637 else
638 priv->hw_params.rx_chains_num =
639 num_of_ant(priv->hw_params.valid_rx_ant);
640
641 iwl6000_set_ct_threshold(priv);
642
643 /* Set initial sensitivity parameters */
644 priv->hw_params.sens = &iwl6000_sensitivity;
645
646}
647
648static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
649 struct ieee80211_channel_switch *ch_switch)
650{
651 /*
652 * MULTI-FIXME
653 * See iwlagn_mac_channel_switch.
654 */
655 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
656 struct iwl6000_channel_switch_cmd cmd;
657 const struct iwl_channel_info *ch_info;
658 u32 switch_time_in_usec, ucode_switch_time;
659 u16 ch;
660 u32 tsf_low;
661 u8 switch_count;
662 u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
663 struct ieee80211_vif *vif = ctx->vif;
664 struct iwl_host_cmd hcmd = {
665 .id = REPLY_CHANNEL_SWITCH,
666 .len = { sizeof(cmd), },
667 .flags = CMD_SYNC,
668 .data = { &cmd, },
669 };
670
671 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
672 ch = ch_switch->channel->hw_value;
673 IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
674 ctx->active.channel, ch);
675 cmd.channel = cpu_to_le16(ch);
676 cmd.rxon_flags = ctx->staging.flags;
677 cmd.rxon_filter_flags = ctx->staging.filter_flags;
678 switch_count = ch_switch->count;
679 tsf_low = ch_switch->timestamp & 0x0ffffffff;
680 /*
681 * calculate the ucode channel switch time
682 * adding TSF as one of the factor for when to switch
683 */
684 if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
685 if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
686 beacon_interval)) {
687 switch_count -= (priv->ucode_beacon_time -
688 tsf_low) / beacon_interval;
689 } else
690 switch_count = 0;
691 }
692 if (switch_count <= 1)
693 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
694 else {
695 switch_time_in_usec =
696 vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
697 ucode_switch_time = iwl_usecs_to_beacons(priv,
698 switch_time_in_usec,
699 beacon_interval);
700 cmd.switch_time = iwl_add_beacon_time(priv,
701 priv->ucode_beacon_time,
702 ucode_switch_time,
703 beacon_interval);
704 }
705 IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
706 cmd.switch_time);
707 ch_info = iwl_get_channel_info(priv, priv->band, ch);
708 if (ch_info)
709 cmd.expect_beacon = is_channel_radar(ch_info);
710 else {
711 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
712 ctx->active.channel, ch);
713 return -EFAULT;
714 }
715
716 return iwl_dvm_send_cmd(priv, &hcmd);
717}
718
719struct iwl_lib_ops iwl6000_lib = {
720 .set_hw_params = iwl6000_hw_set_hw_params,
721 .set_channel_switch = iwl6000_hw_channel_switch,
722 .nic_config = iwl6000_nic_config,
723 .eeprom_ops = {
724 .regulatory_bands = {
725 EEPROM_REG_BAND_1_CHANNELS,
726 EEPROM_REG_BAND_2_CHANNELS,
727 EEPROM_REG_BAND_3_CHANNELS,
728 EEPROM_REG_BAND_4_CHANNELS,
729 EEPROM_REG_BAND_5_CHANNELS,
730 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
731 EEPROM_REG_BAND_52_HT40_CHANNELS
732 },
733 .enhanced_txpower = true,
734 },
735 .temperature = iwlagn_temperature,
736};
737
738struct iwl_lib_ops iwl6030_lib = {
739 .set_hw_params = iwl6000_hw_set_hw_params,
740 .set_channel_switch = iwl6000_hw_channel_switch,
741 .nic_config = iwl6000_nic_config,
742 .eeprom_ops = {
743 .regulatory_bands = {
744 EEPROM_REG_BAND_1_CHANNELS,
745 EEPROM_REG_BAND_2_CHANNELS,
746 EEPROM_REG_BAND_3_CHANNELS,
747 EEPROM_REG_BAND_4_CHANNELS,
748 EEPROM_REG_BAND_5_CHANNELS,
749 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
750 EEPROM_REG_BAND_52_HT40_CHANNELS
751 },
752 .enhanced_txpower = true,
753 },
754 .temperature = iwlagn_temperature,
755};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index d0ec0abd3c89..7960a52f6ad4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -102,10 +102,18 @@
102 102
103/* EEPROM */ 103/* EEPROM */
104#define IWLAGN_EEPROM_IMG_SIZE 2048 104#define IWLAGN_EEPROM_IMG_SIZE 2048
105/* OTP */
106/* lower blocks contain EEPROM image and calibration data */
107#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */
108/* high blocks contain PAPD data */
109#define OTP_HIGH_IMAGE_SIZE_6x00 (6 * 512 * sizeof(u16)) /* 6 KB */
110#define OTP_HIGH_IMAGE_SIZE_1000 (0x200 * sizeof(u16)) /* 1024 bytes */
111#define OTP_MAX_LL_ITEMS_1000 (3) /* OTP blocks for 1000 */
112#define OTP_MAX_LL_ITEMS_6x00 (4) /* OTP blocks for 6x00 */
113#define OTP_MAX_LL_ITEMS_6x50 (7) /* OTP blocks for 6x50 */
114#define OTP_MAX_LL_ITEMS_2x00 (4) /* OTP blocks for 2x00 */
115
105 116
106#define IWLAGN_CMD_FIFO_NUM 7
107#define IWLAGN_NUM_QUEUES 20 117#define IWLAGN_NUM_QUEUES 20
108#define IWLAGN_NUM_AMPDU_QUEUES 9
109#define IWLAGN_FIRST_AMPDU_QUEUE 11
110 118
111#endif /* __iwl_agn_hw_h__ */ 119#endif /* __iwl_agn_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 56f41c9409d1..e1c2bb802050 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -33,12 +33,11 @@
33#include <linux/sched.h> 33#include <linux/sched.h>
34 34
35#include "iwl-dev.h" 35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h" 36#include "iwl-io.h"
38#include "iwl-agn-hw.h" 37#include "iwl-agn-hw.h"
39#include "iwl-agn.h" 38#include "iwl-agn.h"
40#include "iwl-trans.h" 39#include "iwl-trans.h"
41#include "iwl-shared.h" 40#include "iwl-modparams.h"
42 41
43int iwlagn_hw_valid_rtc_data_addr(u32 addr) 42int iwlagn_hw_valid_rtc_data_addr(u32 addr)
44{ 43{
@@ -94,81 +93,6 @@ void iwlagn_temperature(struct iwl_priv *priv)
94 iwl_tt_handler(priv); 93 iwl_tt_handler(priv);
95} 94}
96 95
97u16 iwl_eeprom_calib_version(struct iwl_shared *shrd)
98{
99 struct iwl_eeprom_calib_hdr *hdr;
100
101 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(shrd,
102 EEPROM_CALIB_ALL);
103 return hdr->version;
104
105}
106
107/*
108 * EEPROM
109 */
110static u32 eeprom_indirect_address(const struct iwl_shared *shrd, u32 address)
111{
112 u16 offset = 0;
113
114 if ((address & INDIRECT_ADDRESS) == 0)
115 return address;
116
117 switch (address & INDIRECT_TYPE_MSK) {
118 case INDIRECT_HOST:
119 offset = iwl_eeprom_query16(shrd, EEPROM_LINK_HOST);
120 break;
121 case INDIRECT_GENERAL:
122 offset = iwl_eeprom_query16(shrd, EEPROM_LINK_GENERAL);
123 break;
124 case INDIRECT_REGULATORY:
125 offset = iwl_eeprom_query16(shrd, EEPROM_LINK_REGULATORY);
126 break;
127 case INDIRECT_TXP_LIMIT:
128 offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT);
129 break;
130 case INDIRECT_TXP_LIMIT_SIZE:
131 offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT_SIZE);
132 break;
133 case INDIRECT_CALIBRATION:
134 offset = iwl_eeprom_query16(shrd, EEPROM_LINK_CALIBRATION);
135 break;
136 case INDIRECT_PROCESS_ADJST:
137 offset = iwl_eeprom_query16(shrd, EEPROM_LINK_PROCESS_ADJST);
138 break;
139 case INDIRECT_OTHERS:
140 offset = iwl_eeprom_query16(shrd, EEPROM_LINK_OTHERS);
141 break;
142 default:
143 IWL_ERR(shrd->trans, "illegal indirect type: 0x%X\n",
144 address & INDIRECT_TYPE_MSK);
145 break;
146 }
147
148 /* translate the offset from words to byte */
149 return (address & ADDRESS_MSK) + (offset << 1);
150}
151
152const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset)
153{
154 u32 address = eeprom_indirect_address(shrd, offset);
155 BUG_ON(address >= shrd->cfg->base_params->eeprom_size);
156 return &shrd->eeprom[address];
157}
158
159struct iwl_mod_params iwlagn_mod_params = {
160 .amsdu_size_8K = 1,
161 .restart_fw = 1,
162 .plcp_check = true,
163 .bt_coex_active = true,
164 .no_sleep_autoadjust = true,
165 .power_level = IWL_POWER_INDEX_1,
166 .bt_ch_announce = true,
167 .wanted_ucode_alternative = 1,
168 .auto_agg = true,
169 /* the rest are 0 by default */
170};
171
172int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band) 96int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
173{ 97{
174 int idx = 0; 98 int idx = 0;
@@ -228,13 +152,13 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
228 IWL_SCD_BE_MSK | IWL_SCD_BK_MSK | 152 IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
229 IWL_SCD_MGMT_MSK; 153 IWL_SCD_MGMT_MSK;
230 if ((flush_control & BIT(IWL_RXON_CTX_PAN)) && 154 if ((flush_control & BIT(IWL_RXON_CTX_PAN)) &&
231 (priv->shrd->valid_contexts != BIT(IWL_RXON_CTX_BSS))) 155 (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
232 flush_cmd.fifo_control |= IWL_PAN_SCD_VO_MSK | 156 flush_cmd.fifo_control |= IWL_PAN_SCD_VO_MSK |
233 IWL_PAN_SCD_VI_MSK | IWL_PAN_SCD_BE_MSK | 157 IWL_PAN_SCD_VI_MSK | IWL_PAN_SCD_BE_MSK |
234 IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK | 158 IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
235 IWL_PAN_SCD_MULTICAST_MSK; 159 IWL_PAN_SCD_MULTICAST_MSK;
236 160
237 if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE) 161 if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)
238 flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK; 162 flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
239 163
240 IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n", 164 IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
@@ -253,7 +177,7 @@ void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
253 goto done; 177 goto done;
254 } 178 }
255 IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n"); 179 IWL_DEBUG_INFO(priv, "wait transmit/flush all frames\n");
256 iwl_trans_wait_tx_queue_empty(trans(priv)); 180 iwl_trans_wait_tx_queue_empty(priv->trans);
257done: 181done:
258 ieee80211_wake_queues(priv->hw); 182 ieee80211_wake_queues(priv->hw);
259 mutex_unlock(&priv->mutex); 183 mutex_unlock(&priv->mutex);
@@ -369,24 +293,30 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
369 .bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT, 293 .bt3_prio_sample_time = IWLAGN_BT3_PRIO_SAMPLE_DEFAULT,
370 .bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT, 294 .bt3_timer_t2_value = IWLAGN_BT3_T2_DEFAULT,
371 }; 295 };
372 struct iwl6000_bt_cmd bt_cmd_6000; 296 struct iwl_bt_cmd_v1 bt_cmd_v1;
373 struct iwl2000_bt_cmd bt_cmd_2000; 297 struct iwl_bt_cmd_v2 bt_cmd_v2;
374 int ret; 298 int ret;
375 299
376 BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) != 300 BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
377 sizeof(basic.bt3_lookup_table)); 301 sizeof(basic.bt3_lookup_table));
378 302
379 if (cfg(priv)->bt_params) { 303 if (priv->cfg->bt_params) {
380 if (cfg(priv)->bt_params->bt_session_2) { 304 /*
381 bt_cmd_2000.prio_boost = cpu_to_le32( 305 * newer generation of devices (2000 series and newer)
382 cfg(priv)->bt_params->bt_prio_boost); 306 * use the version 2 of the bt command
383 bt_cmd_2000.tx_prio_boost = 0; 307 * we need to make sure sending the host command
384 bt_cmd_2000.rx_prio_boost = 0; 308 * with correct data structure to avoid uCode assert
309 */
310 if (priv->cfg->bt_params->bt_session_2) {
311 bt_cmd_v2.prio_boost = cpu_to_le32(
312 priv->cfg->bt_params->bt_prio_boost);
313 bt_cmd_v2.tx_prio_boost = 0;
314 bt_cmd_v2.rx_prio_boost = 0;
385 } else { 315 } else {
386 bt_cmd_6000.prio_boost = 316 bt_cmd_v1.prio_boost =
387 cfg(priv)->bt_params->bt_prio_boost; 317 priv->cfg->bt_params->bt_prio_boost;
388 bt_cmd_6000.tx_prio_boost = 0; 318 bt_cmd_v1.tx_prio_boost = 0;
389 bt_cmd_6000.rx_prio_boost = 0; 319 bt_cmd_v1.rx_prio_boost = 0;
390 } 320 }
391 } else { 321 } else {
392 IWL_ERR(priv, "failed to construct BT Coex Config\n"); 322 IWL_ERR(priv, "failed to construct BT Coex Config\n");
@@ -403,7 +333,7 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
403 * (might be in monitor mode), or the interface is in 333 * (might be in monitor mode), or the interface is in
404 * IBSS mode (no proper uCode support for coex then). 334 * IBSS mode (no proper uCode support for coex then).
405 */ 335 */
406 if (!iwlagn_mod_params.bt_coex_active || 336 if (!iwlwifi_mod_params.bt_coex_active ||
407 priv->iw_mode == NL80211_IFTYPE_ADHOC) { 337 priv->iw_mode == NL80211_IFTYPE_ADHOC) {
408 basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED; 338 basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED;
409 } else { 339 } else {
@@ -432,16 +362,16 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
432 priv->bt_full_concurrent ? 362 priv->bt_full_concurrent ?
433 "full concurrency" : "3-wire"); 363 "full concurrency" : "3-wire");
434 364
435 if (cfg(priv)->bt_params->bt_session_2) { 365 if (priv->cfg->bt_params->bt_session_2) {
436 memcpy(&bt_cmd_2000.basic, &basic, 366 memcpy(&bt_cmd_v2.basic, &basic,
437 sizeof(basic)); 367 sizeof(basic));
438 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG, 368 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
439 CMD_SYNC, sizeof(bt_cmd_2000), &bt_cmd_2000); 369 CMD_SYNC, sizeof(bt_cmd_v2), &bt_cmd_v2);
440 } else { 370 } else {
441 memcpy(&bt_cmd_6000.basic, &basic, 371 memcpy(&bt_cmd_v1.basic, &basic,
442 sizeof(basic)); 372 sizeof(basic));
443 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG, 373 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
444 CMD_SYNC, sizeof(bt_cmd_6000), &bt_cmd_6000); 374 CMD_SYNC, sizeof(bt_cmd_v1), &bt_cmd_v1);
445 } 375 }
446 if (ret) 376 if (ret)
447 IWL_ERR(priv, "failed to send BT Coex Config\n"); 377 IWL_ERR(priv, "failed to send BT Coex Config\n");
@@ -615,7 +545,7 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
615 struct iwl_bt_uart_msg *uart_msg) 545 struct iwl_bt_uart_msg *uart_msg)
616{ 546{
617 IWL_DEBUG_COEX(priv, "Message Type = 0x%X, SSN = 0x%X, " 547 IWL_DEBUG_COEX(priv, "Message Type = 0x%X, SSN = 0x%X, "
618 "Update Req = 0x%X", 548 "Update Req = 0x%X\n",
619 (BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >> 549 (BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >>
620 BT_UART_MSG_FRAME1MSGTYPE_POS, 550 BT_UART_MSG_FRAME1MSGTYPE_POS,
621 (BT_UART_MSG_FRAME1SSN_MSK & uart_msg->frame1) >> 551 (BT_UART_MSG_FRAME1SSN_MSK & uart_msg->frame1) >>
@@ -624,7 +554,7 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
624 BT_UART_MSG_FRAME1UPDATEREQ_POS); 554 BT_UART_MSG_FRAME1UPDATEREQ_POS);
625 555
626 IWL_DEBUG_COEX(priv, "Open connections = 0x%X, Traffic load = 0x%X, " 556 IWL_DEBUG_COEX(priv, "Open connections = 0x%X, Traffic load = 0x%X, "
627 "Chl_SeqN = 0x%X, In band = 0x%X", 557 "Chl_SeqN = 0x%X, In band = 0x%X\n",
628 (BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >> 558 (BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >>
629 BT_UART_MSG_FRAME2OPENCONNECTIONS_POS, 559 BT_UART_MSG_FRAME2OPENCONNECTIONS_POS,
630 (BT_UART_MSG_FRAME2TRAFFICLOAD_MSK & uart_msg->frame2) >> 560 (BT_UART_MSG_FRAME2TRAFFICLOAD_MSK & uart_msg->frame2) >>
@@ -635,7 +565,7 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
635 BT_UART_MSG_FRAME2INBAND_POS); 565 BT_UART_MSG_FRAME2INBAND_POS);
636 566
637 IWL_DEBUG_COEX(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, " 567 IWL_DEBUG_COEX(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
638 "ACL = 0x%X, Master = 0x%X, OBEX = 0x%X", 568 "ACL = 0x%X, Master = 0x%X, OBEX = 0x%X\n",
639 (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >> 569 (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
640 BT_UART_MSG_FRAME3SCOESCO_POS, 570 BT_UART_MSG_FRAME3SCOESCO_POS,
641 (BT_UART_MSG_FRAME3SNIFF_MSK & uart_msg->frame3) >> 571 (BT_UART_MSG_FRAME3SNIFF_MSK & uart_msg->frame3) >>
@@ -649,12 +579,12 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
649 (BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >> 579 (BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >>
650 BT_UART_MSG_FRAME3OBEX_POS); 580 BT_UART_MSG_FRAME3OBEX_POS);
651 581
652 IWL_DEBUG_COEX(priv, "Idle duration = 0x%X", 582 IWL_DEBUG_COEX(priv, "Idle duration = 0x%X\n",
653 (BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >> 583 (BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >>
654 BT_UART_MSG_FRAME4IDLEDURATION_POS); 584 BT_UART_MSG_FRAME4IDLEDURATION_POS);
655 585
656 IWL_DEBUG_COEX(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, " 586 IWL_DEBUG_COEX(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
657 "eSCO Retransmissions = 0x%X", 587 "eSCO Retransmissions = 0x%X\n",
658 (BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >> 588 (BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >>
659 BT_UART_MSG_FRAME5TXACTIVITY_POS, 589 BT_UART_MSG_FRAME5TXACTIVITY_POS,
660 (BT_UART_MSG_FRAME5RXACTIVITY_MSK & uart_msg->frame5) >> 590 (BT_UART_MSG_FRAME5RXACTIVITY_MSK & uart_msg->frame5) >>
@@ -662,14 +592,14 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
662 (BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >> 592 (BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >>
663 BT_UART_MSG_FRAME5ESCORETRANSMIT_POS); 593 BT_UART_MSG_FRAME5ESCORETRANSMIT_POS);
664 594
665 IWL_DEBUG_COEX(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X", 595 IWL_DEBUG_COEX(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X\n",
666 (BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >> 596 (BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >>
667 BT_UART_MSG_FRAME6SNIFFINTERVAL_POS, 597 BT_UART_MSG_FRAME6SNIFFINTERVAL_POS,
668 (BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >> 598 (BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
669 BT_UART_MSG_FRAME6DISCOVERABLE_POS); 599 BT_UART_MSG_FRAME6DISCOVERABLE_POS);
670 600
671 IWL_DEBUG_COEX(priv, "Sniff Activity = 0x%X, Page = " 601 IWL_DEBUG_COEX(priv, "Sniff Activity = 0x%X, Page = "
672 "0x%X, Inquiry = 0x%X, Connectable = 0x%X", 602 "0x%X, Inquiry = 0x%X, Connectable = 0x%X\n",
673 (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >> 603 (BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
674 BT_UART_MSG_FRAME7SNIFFACTIVITY_POS, 604 BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
675 (BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >> 605 (BT_UART_MSG_FRAME7PAGE_MSK & uart_msg->frame7) >>
@@ -798,8 +728,8 @@ static bool is_single_rx_stream(struct iwl_priv *priv)
798 */ 728 */
799static int iwl_get_active_rx_chain_count(struct iwl_priv *priv) 729static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
800{ 730{
801 if (cfg(priv)->bt_params && 731 if (priv->cfg->bt_params &&
802 cfg(priv)->bt_params->advanced_bt_coexist && 732 priv->cfg->bt_params->advanced_bt_coexist &&
803 (priv->bt_full_concurrent || 733 (priv->bt_full_concurrent ||
804 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) { 734 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
805 /* 735 /*
@@ -856,7 +786,7 @@ static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
856void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx) 786void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
857{ 787{
858 bool is_single = is_single_rx_stream(priv); 788 bool is_single = is_single_rx_stream(priv);
859 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->shrd->status); 789 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
860 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt; 790 u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
861 u32 active_chains; 791 u32 active_chains;
862 u16 rx_chain; 792 u16 rx_chain;
@@ -868,10 +798,10 @@ void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
868 if (priv->chain_noise_data.active_chains) 798 if (priv->chain_noise_data.active_chains)
869 active_chains = priv->chain_noise_data.active_chains; 799 active_chains = priv->chain_noise_data.active_chains;
870 else 800 else
871 active_chains = hw_params(priv).valid_rx_ant; 801 active_chains = priv->hw_params.valid_rx_ant;
872 802
873 if (cfg(priv)->bt_params && 803 if (priv->cfg->bt_params &&
874 cfg(priv)->bt_params->advanced_bt_coexist && 804 priv->cfg->bt_params->advanced_bt_coexist &&
875 (priv->bt_full_concurrent || 805 (priv->bt_full_concurrent ||
876 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) { 806 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
877 /* 807 /*
@@ -1190,7 +1120,7 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
1190 memcpy(&rxon, &ctx->active, sizeof(rxon)); 1120 memcpy(&rxon, &ctx->active, sizeof(rxon));
1191 1121
1192 priv->ucode_loaded = false; 1122 priv->ucode_loaded = false;
1193 iwl_trans_stop_device(trans(priv)); 1123 iwl_trans_stop_device(priv->trans);
1194 1124
1195 priv->wowlan = true; 1125 priv->wowlan = true;
1196 1126
@@ -1212,7 +1142,7 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
1212 if (ret) 1142 if (ret)
1213 goto out; 1143 goto out;
1214 1144
1215 if (!iwlagn_mod_params.sw_crypto) { 1145 if (!iwlwifi_mod_params.sw_crypto) {
1216 /* mark all keys clear */ 1146 /* mark all keys clear */
1217 priv->ucode_key_table = 0; 1147 priv->ucode_key_table = 0;
1218 ctx->key_mapping_keys = 0; 1148 ctx->key_mapping_keys = 0;
@@ -1298,6 +1228,12 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1298 return -EIO; 1228 return -EIO;
1299 } 1229 }
1300 1230
1231 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
1232 IWL_ERR(priv, "Command %s failed: FW Error\n",
1233 iwl_dvm_get_cmd_string(cmd->id));
1234 return -EIO;
1235 }
1236
1301 /* 1237 /*
1302 * Synchronous commands from this op-mode must hold 1238 * Synchronous commands from this op-mode must hold
1303 * the mutex, this ensures we don't try to send two 1239 * the mutex, this ensures we don't try to send two
@@ -1312,7 +1248,7 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1312 return -EIO; 1248 return -EIO;
1313 } 1249 }
1314 1250
1315 return iwl_trans_send_cmd(trans(priv), cmd); 1251 return iwl_trans_send_cmd(priv->trans, cmd);
1316} 1252}
1317 1253
1318int iwl_dvm_send_cmd_pdu(struct iwl_priv *priv, u8 id, 1254int iwl_dvm_send_cmd_pdu(struct iwl_priv *priv, u8 id,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 7e590b349dd7..51e1a69ffdda 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -36,9 +36,9 @@
36#include <linux/workqueue.h> 36#include <linux/workqueue.h>
37 37
38#include "iwl-dev.h" 38#include "iwl-dev.h"
39#include "iwl-core.h"
40#include "iwl-agn.h" 39#include "iwl-agn.h"
41#include "iwl-op-mode.h" 40#include "iwl-op-mode.h"
41#include "iwl-modparams.h"
42 42
43#define RS_NAME "iwl-agn-rs" 43#define RS_NAME "iwl-agn-rs"
44 44
@@ -420,7 +420,7 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
420 420
421 load = rs_tl_get_load(lq_data, tid); 421 load = rs_tl_get_load(lq_data, tid);
422 422
423 if ((iwlagn_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) { 423 if ((iwlwifi_mod_params.auto_agg) || (load > IWL_AGG_LOAD_THRESHOLD)) {
424 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n", 424 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
425 sta->addr, tid); 425 sta->addr, tid);
426 ret = ieee80211_start_tx_ba_session(sta, tid, 5000); 426 ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
@@ -819,7 +819,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
819 819
820 if (num_of_ant(tbl->ant_type) > 1) 820 if (num_of_ant(tbl->ant_type) > 1)
821 tbl->ant_type = 821 tbl->ant_type =
822 first_antenna(hw_params(priv).valid_tx_ant); 822 first_antenna(priv->hw_params.valid_tx_ant);
823 823
824 tbl->is_ht40 = 0; 824 tbl->is_ht40 = 0;
825 tbl->is_SGI = 0; 825 tbl->is_SGI = 0;
@@ -969,7 +969,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
969 (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) || 969 (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
970 (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) || 970 (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
971 (tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) || 971 (tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
972 (tbl_type.ant_type != info->antenna_sel_tx) || 972 (tbl_type.ant_type != info->status.antenna) ||
973 (!!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)) || 973 (!!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
974 (!!(tx_rate & RATE_MCS_GF_MSK) != !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) || 974 (!!(tx_rate & RATE_MCS_GF_MSK) != !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
975 (rs_index != mac_index)) { 975 (rs_index != mac_index)) {
@@ -1085,7 +1085,7 @@ done:
1085 (priv->tm_fixed_rate != lq_sta->dbg_fixed_rate)) 1085 (priv->tm_fixed_rate != lq_sta->dbg_fixed_rate))
1086 rs_program_fix_rate(priv, lq_sta); 1086 rs_program_fix_rate(priv, lq_sta);
1087#endif 1087#endif
1088 if (cfg(priv)->bt_params && cfg(priv)->bt_params->advanced_bt_coexist) 1088 if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist)
1089 rs_bt_update_lq(priv, ctx, lq_sta); 1089 rs_bt_update_lq(priv, ctx, lq_sta);
1090} 1090}
1091 1091
@@ -1291,7 +1291,7 @@ static int rs_switch_to_mimo2(struct iwl_priv *priv,
1291 return -1; 1291 return -1;
1292 1292
1293 /* Need both Tx chains/antennas to support MIMO */ 1293 /* Need both Tx chains/antennas to support MIMO */
1294 if (hw_params(priv).tx_chains_num < 2) 1294 if (priv->hw_params.tx_chains_num < 2)
1295 return -1; 1295 return -1;
1296 1296
1297 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n"); 1297 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
@@ -1347,7 +1347,7 @@ static int rs_switch_to_mimo3(struct iwl_priv *priv,
1347 return -1; 1347 return -1;
1348 1348
1349 /* Need both Tx chains/antennas to support MIMO */ 1349 /* Need both Tx chains/antennas to support MIMO */
1350 if (hw_params(priv).tx_chains_num < 3) 1350 if (priv->hw_params.tx_chains_num < 3)
1351 return -1; 1351 return -1;
1352 1352
1353 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO3\n"); 1353 IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO3\n");
@@ -1446,8 +1446,8 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1446 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1446 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1447 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1447 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1448 u8 start_action; 1448 u8 start_action;
1449 u8 valid_tx_ant = hw_params(priv).valid_tx_ant; 1449 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1450 u8 tx_chains_num = hw_params(priv).tx_chains_num; 1450 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1451 int ret = 0; 1451 int ret = 0;
1452 u8 update_search_tbl_counter = 0; 1452 u8 update_search_tbl_counter = 0;
1453 1453
@@ -1464,7 +1464,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1464 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: 1464 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1465 /* avoid antenna B and MIMO */ 1465 /* avoid antenna B and MIMO */
1466 valid_tx_ant = 1466 valid_tx_ant =
1467 first_antenna(hw_params(priv).valid_tx_ant); 1467 first_antenna(priv->hw_params.valid_tx_ant);
1468 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 && 1468 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2 &&
1469 tbl->action != IWL_LEGACY_SWITCH_SISO) 1469 tbl->action != IWL_LEGACY_SWITCH_SISO)
1470 tbl->action = IWL_LEGACY_SWITCH_SISO; 1470 tbl->action = IWL_LEGACY_SWITCH_SISO;
@@ -1488,7 +1488,7 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
1488 else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2) 1488 else if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
1489 tbl->action = IWL_LEGACY_SWITCH_SISO; 1489 tbl->action = IWL_LEGACY_SWITCH_SISO;
1490 valid_tx_ant = 1490 valid_tx_ant =
1491 first_antenna(hw_params(priv).valid_tx_ant); 1491 first_antenna(priv->hw_params.valid_tx_ant);
1492 } 1492 }
1493 1493
1494 start_action = tbl->action; 1494 start_action = tbl->action;
@@ -1622,8 +1622,8 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1622 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1622 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1623 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1623 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1624 u8 start_action; 1624 u8 start_action;
1625 u8 valid_tx_ant = hw_params(priv).valid_tx_ant; 1625 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1626 u8 tx_chains_num = hw_params(priv).tx_chains_num; 1626 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1627 u8 update_search_tbl_counter = 0; 1627 u8 update_search_tbl_counter = 0;
1628 int ret; 1628 int ret;
1629 1629
@@ -1640,7 +1640,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1640 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS: 1640 case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
1641 /* avoid antenna B and MIMO */ 1641 /* avoid antenna B and MIMO */
1642 valid_tx_ant = 1642 valid_tx_ant =
1643 first_antenna(hw_params(priv).valid_tx_ant); 1643 first_antenna(priv->hw_params.valid_tx_ant);
1644 if (tbl->action != IWL_SISO_SWITCH_ANTENNA1) 1644 if (tbl->action != IWL_SISO_SWITCH_ANTENNA1)
1645 tbl->action = IWL_SISO_SWITCH_ANTENNA1; 1645 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1646 break; 1646 break;
@@ -1658,7 +1658,7 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
1658 /* configure as 1x1 if bt full concurrency */ 1658 /* configure as 1x1 if bt full concurrency */
1659 if (priv->bt_full_concurrent) { 1659 if (priv->bt_full_concurrent) {
1660 valid_tx_ant = 1660 valid_tx_ant =
1661 first_antenna(hw_params(priv).valid_tx_ant); 1661 first_antenna(priv->hw_params.valid_tx_ant);
1662 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2) 1662 if (tbl->action >= IWL_LEGACY_SWITCH_ANTENNA2)
1663 tbl->action = IWL_SISO_SWITCH_ANTENNA1; 1663 tbl->action = IWL_SISO_SWITCH_ANTENNA1;
1664 } 1664 }
@@ -1794,8 +1794,8 @@ static int rs_move_mimo2_to_other(struct iwl_priv *priv,
1794 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1794 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1795 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1795 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1796 u8 start_action; 1796 u8 start_action;
1797 u8 valid_tx_ant = hw_params(priv).valid_tx_ant; 1797 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1798 u8 tx_chains_num = hw_params(priv).tx_chains_num; 1798 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1799 u8 update_search_tbl_counter = 0; 1799 u8 update_search_tbl_counter = 0;
1800 int ret; 1800 int ret;
1801 1801
@@ -1964,8 +1964,8 @@ static int rs_move_mimo3_to_other(struct iwl_priv *priv,
1964 u32 sz = (sizeof(struct iwl_scale_tbl_info) - 1964 u32 sz = (sizeof(struct iwl_scale_tbl_info) -
1965 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT)); 1965 (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
1966 u8 start_action; 1966 u8 start_action;
1967 u8 valid_tx_ant = hw_params(priv).valid_tx_ant; 1967 u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
1968 u8 tx_chains_num = hw_params(priv).tx_chains_num; 1968 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1969 int ret; 1969 int ret;
1970 u8 update_search_tbl_counter = 0; 1970 u8 update_search_tbl_counter = 0;
1971 1971
@@ -2166,7 +2166,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
2166 (lq_sta->total_success > lq_sta->max_success_limit) || 2166 (lq_sta->total_success > lq_sta->max_success_limit) ||
2167 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer) 2167 ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
2168 && (flush_interval_passed))) { 2168 && (flush_interval_passed))) {
2169 IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:", 2169 IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n",
2170 lq_sta->total_failed, 2170 lq_sta->total_failed,
2171 lq_sta->total_success, 2171 lq_sta->total_success,
2172 flush_interval_passed); 2172 flush_interval_passed);
@@ -2698,7 +2698,7 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2698 2698
2699 i = lq_sta->last_txrate_idx; 2699 i = lq_sta->last_txrate_idx;
2700 2700
2701 valid_tx_ant = hw_params(priv).valid_tx_ant; 2701 valid_tx_ant = priv->hw_params.valid_tx_ant;
2702 2702
2703 if (!lq_sta->search_better_tbl) 2703 if (!lq_sta->search_better_tbl)
2704 active_tbl = lq_sta->active_tbl; 2704 active_tbl = lq_sta->active_tbl;
@@ -2826,6 +2826,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
2826 struct iwl_station_priv *sta_priv; 2826 struct iwl_station_priv *sta_priv;
2827 struct iwl_lq_sta *lq_sta; 2827 struct iwl_lq_sta *lq_sta;
2828 struct ieee80211_supported_band *sband; 2828 struct ieee80211_supported_band *sband;
2829 unsigned long supp; /* must be unsigned long for for_each_set_bit */
2829 2830
2830 sta_priv = (struct iwl_station_priv *) sta->drv_priv; 2831 sta_priv = (struct iwl_station_priv *) sta->drv_priv;
2831 lq_sta = &sta_priv->lq_sta; 2832 lq_sta = &sta_priv->lq_sta;
@@ -2855,8 +2856,15 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
2855 lq_sta->max_rate_idx = -1; 2856 lq_sta->max_rate_idx = -1;
2856 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX; 2857 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
2857 lq_sta->is_green = rs_use_green(sta); 2858 lq_sta->is_green = rs_use_green(sta);
2858 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000); 2859 lq_sta->band = sband->band;
2859 lq_sta->band = priv->band; 2860 /*
2861 * active legacy rates as per supported rates bitmap
2862 */
2863 supp = sta->supp_rates[sband->band];
2864 lq_sta->active_legacy_rate = 0;
2865 for_each_set_bit(i, &supp, BITS_PER_LONG)
2866 lq_sta->active_legacy_rate |= BIT(sband->bitrates[i].hw_value);
2867
2860 /* 2868 /*
2861 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3), 2869 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
2862 * supp_rates[] does not; shift to convert format, force 9 MBits off. 2870 * supp_rates[] does not; shift to convert format, force 9 MBits off.
@@ -2884,15 +2892,15 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
2884 2892
2885 /* These values will be overridden later */ 2893 /* These values will be overridden later */
2886 lq_sta->lq.general_params.single_stream_ant_msk = 2894 lq_sta->lq.general_params.single_stream_ant_msk =
2887 first_antenna(hw_params(priv).valid_tx_ant); 2895 first_antenna(priv->hw_params.valid_tx_ant);
2888 lq_sta->lq.general_params.dual_stream_ant_msk = 2896 lq_sta->lq.general_params.dual_stream_ant_msk =
2889 hw_params(priv).valid_tx_ant & 2897 priv->hw_params.valid_tx_ant &
2890 ~first_antenna(hw_params(priv).valid_tx_ant); 2898 ~first_antenna(priv->hw_params.valid_tx_ant);
2891 if (!lq_sta->lq.general_params.dual_stream_ant_msk) { 2899 if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
2892 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB; 2900 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2893 } else if (num_of_ant(hw_params(priv).valid_tx_ant) == 2) { 2901 } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
2894 lq_sta->lq.general_params.dual_stream_ant_msk = 2902 lq_sta->lq.general_params.dual_stream_ant_msk =
2895 hw_params(priv).valid_tx_ant; 2903 priv->hw_params.valid_tx_ant;
2896 } 2904 }
2897 2905
2898 /* as default allow aggregation for all tids */ 2906 /* as default allow aggregation for all tids */
@@ -2938,7 +2946,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2938 if (priv && priv->bt_full_concurrent) { 2946 if (priv && priv->bt_full_concurrent) {
2939 /* 1x1 only */ 2947 /* 1x1 only */
2940 tbl_type.ant_type = 2948 tbl_type.ant_type =
2941 first_antenna(hw_params(priv).valid_tx_ant); 2949 first_antenna(priv->hw_params.valid_tx_ant);
2942 } 2950 }
2943 2951
2944 /* How many times should we repeat the initial rate? */ 2952 /* How many times should we repeat the initial rate? */
@@ -2970,7 +2978,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2970 if (priv->bt_full_concurrent) 2978 if (priv->bt_full_concurrent)
2971 valid_tx_ant = ANT_A; 2979 valid_tx_ant = ANT_A;
2972 else 2980 else
2973 valid_tx_ant = hw_params(priv).valid_tx_ant; 2981 valid_tx_ant = priv->hw_params.valid_tx_ant;
2974 } 2982 }
2975 2983
2976 /* Fill rest of rate table */ 2984 /* Fill rest of rate table */
@@ -3004,7 +3012,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
3004 if (priv && priv->bt_full_concurrent) { 3012 if (priv && priv->bt_full_concurrent) {
3005 /* 1x1 only */ 3013 /* 1x1 only */
3006 tbl_type.ant_type = 3014 tbl_type.ant_type =
3007 first_antenna(hw_params(priv).valid_tx_ant); 3015 first_antenna(priv->hw_params.valid_tx_ant);
3008 } 3016 }
3009 3017
3010 /* Indicate to uCode which entries might be MIMO. 3018 /* Indicate to uCode which entries might be MIMO.
@@ -3055,11 +3063,11 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
3055 * overwrite if needed, pass aggregation time limit 3063 * overwrite if needed, pass aggregation time limit
3056 * to uCode in uSec 3064 * to uCode in uSec
3057 */ 3065 */
3058 if (priv && cfg(priv)->bt_params && 3066 if (priv && priv->cfg->bt_params &&
3059 cfg(priv)->bt_params->agg_time_limit && 3067 priv->cfg->bt_params->agg_time_limit &&
3060 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH) 3068 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
3061 lq_cmd->agg_params.agg_time_limit = 3069 lq_cmd->agg_params.agg_time_limit =
3062 cpu_to_le16(cfg(priv)->bt_params->agg_time_limit); 3070 cpu_to_le16(priv->cfg->bt_params->agg_time_limit);
3063} 3071}
3064 3072
3065static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) 3073static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
@@ -3091,7 +3099,7 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
3091 u8 ant_sel_tx; 3099 u8 ant_sel_tx;
3092 3100
3093 priv = lq_sta->drv; 3101 priv = lq_sta->drv;
3094 valid_tx_ant = hw_params(priv).valid_tx_ant; 3102 valid_tx_ant = priv->hw_params.valid_tx_ant;
3095 if (lq_sta->dbg_fixed_rate) { 3103 if (lq_sta->dbg_fixed_rate) {
3096 ant_sel_tx = 3104 ant_sel_tx =
3097 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) 3105 ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
@@ -3162,9 +3170,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
3162 desc += sprintf(buff+desc, "fixed rate 0x%X\n", 3170 desc += sprintf(buff+desc, "fixed rate 0x%X\n",
3163 lq_sta->dbg_fixed_rate); 3171 lq_sta->dbg_fixed_rate);
3164 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n", 3172 desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
3165 (hw_params(priv).valid_tx_ant & ANT_A) ? "ANT_A," : "", 3173 (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
3166 (hw_params(priv).valid_tx_ant & ANT_B) ? "ANT_B," : "", 3174 (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
3167 (hw_params(priv).valid_tx_ant & ANT_C) ? "ANT_C" : ""); 3175 (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
3168 desc += sprintf(buff+desc, "lq type %s\n", 3176 desc += sprintf(buff+desc, "lq type %s\n",
3169 (is_legacy(tbl->lq_type)) ? "legacy" : "HT"); 3177 (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
3170 if (is_Ht(tbl->lq_type)) { 3178 if (is_Ht(tbl->lq_type)) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index 203b1c13c491..82d02e1ae89f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -30,6 +30,7 @@
30#include <net/mac80211.h> 30#include <net/mac80211.h>
31 31
32#include "iwl-commands.h" 32#include "iwl-commands.h"
33#include "iwl-config.h"
33 34
34struct iwl_rate_info { 35struct iwl_rate_info {
35 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */ 36 u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
@@ -174,32 +175,6 @@ enum {
174 IWL_RATE_11M_IEEE = 22, 175 IWL_RATE_11M_IEEE = 22,
175}; 176};
176 177
177#define IWL_CCK_BASIC_RATES_MASK \
178 (IWL_RATE_1M_MASK | \
179 IWL_RATE_2M_MASK)
180
181#define IWL_CCK_RATES_MASK \
182 (IWL_CCK_BASIC_RATES_MASK | \
183 IWL_RATE_5M_MASK | \
184 IWL_RATE_11M_MASK)
185
186#define IWL_OFDM_BASIC_RATES_MASK \
187 (IWL_RATE_6M_MASK | \
188 IWL_RATE_12M_MASK | \
189 IWL_RATE_24M_MASK)
190
191#define IWL_OFDM_RATES_MASK \
192 (IWL_OFDM_BASIC_RATES_MASK | \
193 IWL_RATE_9M_MASK | \
194 IWL_RATE_18M_MASK | \
195 IWL_RATE_36M_MASK | \
196 IWL_RATE_48M_MASK | \
197 IWL_RATE_54M_MASK)
198
199#define IWL_BASIC_RATES_MASK \
200 (IWL_OFDM_BASIC_RATES_MASK | \
201 IWL_CCK_BASIC_RATES_MASK)
202
203#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1) 178#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
204 179
205#define IWL_INVALID_VALUE -1 180#define IWL_INVALID_VALUE -1
@@ -306,15 +281,6 @@ enum iwl_table_type {
306#define is_a_band(tbl) ((tbl) == LQ_A) 281#define is_a_band(tbl) ((tbl) == LQ_A)
307#define is_g_and(tbl) ((tbl) == LQ_G) 282#define is_g_and(tbl) ((tbl) == LQ_G)
308 283
309#define ANT_NONE 0x0
310#define ANT_A BIT(0)
311#define ANT_B BIT(1)
312#define ANT_AB (ANT_A | ANT_B)
313#define ANT_C BIT(2)
314#define ANT_AC (ANT_A | ANT_C)
315#define ANT_BC (ANT_B | ANT_C)
316#define ANT_ABC (ANT_AB | ANT_C)
317
318#define IWL_MAX_MCS_DISPLAY_SIZE 12 284#define IWL_MAX_MCS_DISPLAY_SIZE 12
319 285
320struct iwl_rate_mcs_info { 286struct iwl_rate_mcs_info {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
index 22474608a70b..0c252c5d8bf1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
@@ -34,95 +34,91 @@
34#include <asm/unaligned.h> 34#include <asm/unaligned.h>
35#include "iwl-eeprom.h" 35#include "iwl-eeprom.h"
36#include "iwl-dev.h" 36#include "iwl-dev.h"
37#include "iwl-core.h"
38#include "iwl-io.h" 37#include "iwl-io.h"
39#include "iwl-agn-calib.h" 38#include "iwl-agn-calib.h"
40#include "iwl-agn.h" 39#include "iwl-agn.h"
41#include "iwl-shared.h" 40#include "iwl-modparams.h"
42 41
43const char *get_cmd_string(u8 cmd) 42#define IWL_CMD_ENTRY(x) [x] = #x
44{ 43
45 switch (cmd) { 44const char *iwl_dvm_cmd_strings[REPLY_MAX] = {
46 IWL_CMD(REPLY_ALIVE); 45 IWL_CMD_ENTRY(REPLY_ALIVE),
47 IWL_CMD(REPLY_ERROR); 46 IWL_CMD_ENTRY(REPLY_ERROR),
48 IWL_CMD(REPLY_ECHO); 47 IWL_CMD_ENTRY(REPLY_ECHO),
49 IWL_CMD(REPLY_RXON); 48 IWL_CMD_ENTRY(REPLY_RXON),
50 IWL_CMD(REPLY_RXON_ASSOC); 49 IWL_CMD_ENTRY(REPLY_RXON_ASSOC),
51 IWL_CMD(REPLY_QOS_PARAM); 50 IWL_CMD_ENTRY(REPLY_QOS_PARAM),
52 IWL_CMD(REPLY_RXON_TIMING); 51 IWL_CMD_ENTRY(REPLY_RXON_TIMING),
53 IWL_CMD(REPLY_ADD_STA); 52 IWL_CMD_ENTRY(REPLY_ADD_STA),
54 IWL_CMD(REPLY_REMOVE_STA); 53 IWL_CMD_ENTRY(REPLY_REMOVE_STA),
55 IWL_CMD(REPLY_REMOVE_ALL_STA); 54 IWL_CMD_ENTRY(REPLY_REMOVE_ALL_STA),
56 IWL_CMD(REPLY_TXFIFO_FLUSH); 55 IWL_CMD_ENTRY(REPLY_TXFIFO_FLUSH),
57 IWL_CMD(REPLY_WEPKEY); 56 IWL_CMD_ENTRY(REPLY_WEPKEY),
58 IWL_CMD(REPLY_TX); 57 IWL_CMD_ENTRY(REPLY_TX),
59 IWL_CMD(REPLY_LEDS_CMD); 58 IWL_CMD_ENTRY(REPLY_LEDS_CMD),
60 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD); 59 IWL_CMD_ENTRY(REPLY_TX_LINK_QUALITY_CMD),
61 IWL_CMD(COEX_PRIORITY_TABLE_CMD); 60 IWL_CMD_ENTRY(COEX_PRIORITY_TABLE_CMD),
62 IWL_CMD(COEX_MEDIUM_NOTIFICATION); 61 IWL_CMD_ENTRY(COEX_MEDIUM_NOTIFICATION),
63 IWL_CMD(COEX_EVENT_CMD); 62 IWL_CMD_ENTRY(COEX_EVENT_CMD),
64 IWL_CMD(REPLY_QUIET_CMD); 63 IWL_CMD_ENTRY(REPLY_QUIET_CMD),
65 IWL_CMD(REPLY_CHANNEL_SWITCH); 64 IWL_CMD_ENTRY(REPLY_CHANNEL_SWITCH),
66 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION); 65 IWL_CMD_ENTRY(CHANNEL_SWITCH_NOTIFICATION),
67 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD); 66 IWL_CMD_ENTRY(REPLY_SPECTRUM_MEASUREMENT_CMD),
68 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION); 67 IWL_CMD_ENTRY(SPECTRUM_MEASURE_NOTIFICATION),
69 IWL_CMD(POWER_TABLE_CMD); 68 IWL_CMD_ENTRY(POWER_TABLE_CMD),
70 IWL_CMD(PM_SLEEP_NOTIFICATION); 69 IWL_CMD_ENTRY(PM_SLEEP_NOTIFICATION),
71 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC); 70 IWL_CMD_ENTRY(PM_DEBUG_STATISTIC_NOTIFIC),
72 IWL_CMD(REPLY_SCAN_CMD); 71 IWL_CMD_ENTRY(REPLY_SCAN_CMD),
73 IWL_CMD(REPLY_SCAN_ABORT_CMD); 72 IWL_CMD_ENTRY(REPLY_SCAN_ABORT_CMD),
74 IWL_CMD(SCAN_START_NOTIFICATION); 73 IWL_CMD_ENTRY(SCAN_START_NOTIFICATION),
75 IWL_CMD(SCAN_RESULTS_NOTIFICATION); 74 IWL_CMD_ENTRY(SCAN_RESULTS_NOTIFICATION),
76 IWL_CMD(SCAN_COMPLETE_NOTIFICATION); 75 IWL_CMD_ENTRY(SCAN_COMPLETE_NOTIFICATION),
77 IWL_CMD(BEACON_NOTIFICATION); 76 IWL_CMD_ENTRY(BEACON_NOTIFICATION),
78 IWL_CMD(REPLY_TX_BEACON); 77 IWL_CMD_ENTRY(REPLY_TX_BEACON),
79 IWL_CMD(WHO_IS_AWAKE_NOTIFICATION); 78 IWL_CMD_ENTRY(WHO_IS_AWAKE_NOTIFICATION),
80 IWL_CMD(QUIET_NOTIFICATION); 79 IWL_CMD_ENTRY(QUIET_NOTIFICATION),
81 IWL_CMD(REPLY_TX_PWR_TABLE_CMD); 80 IWL_CMD_ENTRY(REPLY_TX_PWR_TABLE_CMD),
82 IWL_CMD(MEASURE_ABORT_NOTIFICATION); 81 IWL_CMD_ENTRY(MEASURE_ABORT_NOTIFICATION),
83 IWL_CMD(REPLY_BT_CONFIG); 82 IWL_CMD_ENTRY(REPLY_BT_CONFIG),
84 IWL_CMD(REPLY_STATISTICS_CMD); 83 IWL_CMD_ENTRY(REPLY_STATISTICS_CMD),
85 IWL_CMD(STATISTICS_NOTIFICATION); 84 IWL_CMD_ENTRY(STATISTICS_NOTIFICATION),
86 IWL_CMD(REPLY_CARD_STATE_CMD); 85 IWL_CMD_ENTRY(REPLY_CARD_STATE_CMD),
87 IWL_CMD(CARD_STATE_NOTIFICATION); 86 IWL_CMD_ENTRY(CARD_STATE_NOTIFICATION),
88 IWL_CMD(MISSED_BEACONS_NOTIFICATION); 87 IWL_CMD_ENTRY(MISSED_BEACONS_NOTIFICATION),
89 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD); 88 IWL_CMD_ENTRY(REPLY_CT_KILL_CONFIG_CMD),
90 IWL_CMD(SENSITIVITY_CMD); 89 IWL_CMD_ENTRY(SENSITIVITY_CMD),
91 IWL_CMD(REPLY_PHY_CALIBRATION_CMD); 90 IWL_CMD_ENTRY(REPLY_PHY_CALIBRATION_CMD),
92 IWL_CMD(REPLY_RX_PHY_CMD); 91 IWL_CMD_ENTRY(REPLY_RX_PHY_CMD),
93 IWL_CMD(REPLY_RX_MPDU_CMD); 92 IWL_CMD_ENTRY(REPLY_RX_MPDU_CMD),
94 IWL_CMD(REPLY_RX); 93 IWL_CMD_ENTRY(REPLY_RX),
95 IWL_CMD(REPLY_COMPRESSED_BA); 94 IWL_CMD_ENTRY(REPLY_COMPRESSED_BA),
96 IWL_CMD(CALIBRATION_CFG_CMD); 95 IWL_CMD_ENTRY(CALIBRATION_CFG_CMD),
97 IWL_CMD(CALIBRATION_RES_NOTIFICATION); 96 IWL_CMD_ENTRY(CALIBRATION_RES_NOTIFICATION),
98 IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION); 97 IWL_CMD_ENTRY(CALIBRATION_COMPLETE_NOTIFICATION),
99 IWL_CMD(REPLY_TX_POWER_DBM_CMD); 98 IWL_CMD_ENTRY(REPLY_TX_POWER_DBM_CMD),
100 IWL_CMD(TEMPERATURE_NOTIFICATION); 99 IWL_CMD_ENTRY(TEMPERATURE_NOTIFICATION),
101 IWL_CMD(TX_ANT_CONFIGURATION_CMD); 100 IWL_CMD_ENTRY(TX_ANT_CONFIGURATION_CMD),
102 IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF); 101 IWL_CMD_ENTRY(REPLY_BT_COEX_PROFILE_NOTIF),
103 IWL_CMD(REPLY_BT_COEX_PRIO_TABLE); 102 IWL_CMD_ENTRY(REPLY_BT_COEX_PRIO_TABLE),
104 IWL_CMD(REPLY_BT_COEX_PROT_ENV); 103 IWL_CMD_ENTRY(REPLY_BT_COEX_PROT_ENV),
105 IWL_CMD(REPLY_WIPAN_PARAMS); 104 IWL_CMD_ENTRY(REPLY_WIPAN_PARAMS),
106 IWL_CMD(REPLY_WIPAN_RXON); 105 IWL_CMD_ENTRY(REPLY_WIPAN_RXON),
107 IWL_CMD(REPLY_WIPAN_RXON_TIMING); 106 IWL_CMD_ENTRY(REPLY_WIPAN_RXON_TIMING),
108 IWL_CMD(REPLY_WIPAN_RXON_ASSOC); 107 IWL_CMD_ENTRY(REPLY_WIPAN_RXON_ASSOC),
109 IWL_CMD(REPLY_WIPAN_QOS_PARAM); 108 IWL_CMD_ENTRY(REPLY_WIPAN_QOS_PARAM),
110 IWL_CMD(REPLY_WIPAN_WEPKEY); 109 IWL_CMD_ENTRY(REPLY_WIPAN_WEPKEY),
111 IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH); 110 IWL_CMD_ENTRY(REPLY_WIPAN_P2P_CHANNEL_SWITCH),
112 IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION); 111 IWL_CMD_ENTRY(REPLY_WIPAN_NOA_NOTIFICATION),
113 IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE); 112 IWL_CMD_ENTRY(REPLY_WIPAN_DEACTIVATION_COMPLETE),
114 IWL_CMD(REPLY_WOWLAN_PATTERNS); 113 IWL_CMD_ENTRY(REPLY_WOWLAN_PATTERNS),
115 IWL_CMD(REPLY_WOWLAN_WAKEUP_FILTER); 114 IWL_CMD_ENTRY(REPLY_WOWLAN_WAKEUP_FILTER),
116 IWL_CMD(REPLY_WOWLAN_TSC_RSC_PARAMS); 115 IWL_CMD_ENTRY(REPLY_WOWLAN_TSC_RSC_PARAMS),
117 IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS); 116 IWL_CMD_ENTRY(REPLY_WOWLAN_TKIP_PARAMS),
118 IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL); 117 IWL_CMD_ENTRY(REPLY_WOWLAN_KEK_KCK_MATERIAL),
119 IWL_CMD(REPLY_WOWLAN_GET_STATUS); 118 IWL_CMD_ENTRY(REPLY_WOWLAN_GET_STATUS),
120 IWL_CMD(REPLY_D3_CONFIG); 119 IWL_CMD_ENTRY(REPLY_D3_CONFIG),
121 default: 120};
122 return "UNKNOWN"; 121#undef IWL_CMD_ENTRY
123
124 }
125}
126 122
127/****************************************************************************** 123/******************************************************************************
128 * 124 *
@@ -137,10 +133,9 @@ static int iwlagn_rx_reply_error(struct iwl_priv *priv,
137 struct iwl_rx_packet *pkt = rxb_addr(rxb); 133 struct iwl_rx_packet *pkt = rxb_addr(rxb);
138 struct iwl_error_resp *err_resp = (void *)pkt->data; 134 struct iwl_error_resp *err_resp = (void *)pkt->data;
139 135
140 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) " 136 IWL_ERR(priv, "Error Reply type 0x%08X cmd REPLY_ERROR (0x%02X) "
141 "seq 0x%04X ser 0x%08X\n", 137 "seq 0x%04X ser 0x%08X\n",
142 le32_to_cpu(err_resp->error_type), 138 le32_to_cpu(err_resp->error_type),
143 get_cmd_string(err_resp->cmd_id),
144 err_resp->cmd_id, 139 err_resp->cmd_id,
145 le16_to_cpu(err_resp->bad_cmd_seq_num), 140 le16_to_cpu(err_resp->bad_cmd_seq_num),
146 le32_to_cpu(err_resp->error_info)); 141 le32_to_cpu(err_resp->error_info));
@@ -216,8 +211,7 @@ static int iwlagn_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
216 u32 __maybe_unused len = 211 u32 __maybe_unused len =
217 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 212 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
218 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled " 213 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
219 "notification for %s:\n", len, 214 "notification for PM_DEBUG_STATISTIC_NOTIFIC:\n", len);
220 get_cmd_string(pkt->hdr.cmd));
221 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len); 215 iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->data, len);
222 return 0; 216 return 0;
223} 217}
@@ -246,69 +240,6 @@ static int iwlagn_rx_beacon_notif(struct iwl_priv *priv,
246 return 0; 240 return 0;
247} 241}
248 242
249/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
250#define ACK_CNT_RATIO (50)
251#define BA_TIMEOUT_CNT (5)
252#define BA_TIMEOUT_MAX (16)
253
254/**
255 * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
256 *
257 * When the ACK count ratio is low and aggregated BA timeout retries exceeding
258 * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
259 * operation state.
260 */
261static bool iwlagn_good_ack_health(struct iwl_priv *priv,
262 struct statistics_tx *cur)
263{
264 int actual_delta, expected_delta, ba_timeout_delta;
265 struct statistics_tx *old;
266
267 if (priv->agg_tids_count)
268 return true;
269
270 lockdep_assert_held(&priv->statistics.lock);
271
272 old = &priv->statistics.tx;
273
274 actual_delta = le32_to_cpu(cur->actual_ack_cnt) -
275 le32_to_cpu(old->actual_ack_cnt);
276 expected_delta = le32_to_cpu(cur->expected_ack_cnt) -
277 le32_to_cpu(old->expected_ack_cnt);
278
279 /* Values should not be negative, but we do not trust the firmware */
280 if (actual_delta <= 0 || expected_delta <= 0)
281 return true;
282
283 ba_timeout_delta = le32_to_cpu(cur->agg.ba_timeout) -
284 le32_to_cpu(old->agg.ba_timeout);
285
286 if ((actual_delta * 100 / expected_delta) < ACK_CNT_RATIO &&
287 ba_timeout_delta > BA_TIMEOUT_CNT) {
288 IWL_DEBUG_RADIO(priv,
289 "deltas: actual %d expected %d ba_timeout %d\n",
290 actual_delta, expected_delta, ba_timeout_delta);
291
292#ifdef CONFIG_IWLWIFI_DEBUGFS
293 /*
294 * This is ifdef'ed on DEBUGFS because otherwise the
295 * statistics aren't available. If DEBUGFS is set but
296 * DEBUG is not, these will just compile out.
297 */
298 IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta %d\n",
299 priv->delta_stats.tx.rx_detected_cnt);
300 IWL_DEBUG_RADIO(priv,
301 "ack_or_ba_timeout_collision delta %d\n",
302 priv->delta_stats.tx.ack_or_ba_timeout_collision);
303#endif
304
305 if (ba_timeout_delta >= BA_TIMEOUT_MAX)
306 return false;
307 }
308
309 return true;
310}
311
312/** 243/**
313 * iwl_good_plcp_health - checks for plcp error. 244 * iwl_good_plcp_health - checks for plcp error.
314 * 245 *
@@ -347,6 +278,45 @@ static bool iwlagn_good_plcp_health(struct iwl_priv *priv,
347 return true; 278 return true;
348} 279}
349 280
281int iwl_force_rf_reset(struct iwl_priv *priv, bool external)
282{
283 struct iwl_rf_reset *rf_reset;
284
285 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
286 return -EAGAIN;
287
288 if (!iwl_is_any_associated(priv)) {
289 IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
290 return -ENOLINK;
291 }
292
293 rf_reset = &priv->rf_reset;
294 rf_reset->reset_request_count++;
295 if (!external && rf_reset->last_reset_jiffies &&
296 time_after(rf_reset->last_reset_jiffies +
297 IWL_DELAY_NEXT_FORCE_RF_RESET, jiffies)) {
298 IWL_DEBUG_INFO(priv, "RF reset rejected\n");
299 rf_reset->reset_reject_count++;
300 return -EAGAIN;
301 }
302 rf_reset->reset_success_count++;
303 rf_reset->last_reset_jiffies = jiffies;
304
305 /*
306 * There is no easy and better way to force reset the radio,
307 * the only known method is switching channel which will force to
308 * reset and tune the radio.
309 * Use internal short scan (single channel) operation to should
310 * achieve this objective.
311 * Driver should reset the radio when number of consecutive missed
312 * beacon, or any other uCode error condition detected.
313 */
314 IWL_DEBUG_INFO(priv, "perform radio reset.\n");
315 iwl_internal_short_hw_scan(priv);
316 return 0;
317}
318
319
350static void iwlagn_recover_from_statistics(struct iwl_priv *priv, 320static void iwlagn_recover_from_statistics(struct iwl_priv *priv,
351 struct statistics_rx_phy *cur_ofdm, 321 struct statistics_rx_phy *cur_ofdm,
352 struct statistics_rx_ht_phy *cur_ofdm_ht, 322 struct statistics_rx_ht_phy *cur_ofdm_ht,
@@ -368,15 +338,9 @@ static void iwlagn_recover_from_statistics(struct iwl_priv *priv,
368 if (msecs < 99) 338 if (msecs < 99)
369 return; 339 return;
370 340
371 if (iwlagn_mod_params.ack_check && !iwlagn_good_ack_health(priv, tx)) { 341 if (iwlwifi_mod_params.plcp_check &&
372 IWL_ERR(priv, "low ack count detected, restart firmware\n");
373 if (!iwl_force_reset(priv, IWL_FW_RESET, false))
374 return;
375 }
376
377 if (iwlagn_mod_params.plcp_check &&
378 !iwlagn_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs)) 342 !iwlagn_good_plcp_health(priv, cur_ofdm, cur_ofdm_ht, msecs))
379 iwl_force_reset(priv, IWL_RF_RESET, false); 343 iwl_force_rf_reset(priv, false);
380} 344}
381 345
382/* Calculate noise level, based on measurements during network silence just 346/* Calculate noise level, based on measurements during network silence just
@@ -589,8 +553,8 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
589 iwlagn_rx_calc_noise(priv); 553 iwlagn_rx_calc_noise(priv);
590 queue_work(priv->workqueue, &priv->run_time_calib_work); 554 queue_work(priv->workqueue, &priv->run_time_calib_work);
591 } 555 }
592 if (cfg(priv)->lib->temperature && change) 556 if (priv->lib->temperature && change)
593 cfg(priv)->lib->temperature(priv); 557 priv->lib->temperature(priv);
594 558
595 spin_unlock(&priv->statistics.lock); 559 spin_unlock(&priv->statistics.lock);
596 560
@@ -639,16 +603,16 @@ static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
639 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | 603 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
640 CT_CARD_DISABLED)) { 604 CT_CARD_DISABLED)) {
641 605
642 iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_SET, 606 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET,
643 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 607 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
644 608
645 iwl_write_direct32(trans(priv), HBUS_TARG_MBX_C, 609 iwl_write_direct32(priv->trans, HBUS_TARG_MBX_C,
646 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); 610 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
647 611
648 if (!(flags & RXON_CARD_DISABLED)) { 612 if (!(flags & RXON_CARD_DISABLED)) {
649 iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR, 613 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
650 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 614 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
651 iwl_write_direct32(trans(priv), HBUS_TARG_MBX_C, 615 iwl_write_direct32(priv->trans, HBUS_TARG_MBX_C,
652 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); 616 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
653 } 617 }
654 if (flags & CT_CARD_DISABLED) 618 if (flags & CT_CARD_DISABLED)
@@ -671,7 +635,7 @@ static int iwlagn_rx_card_state_notif(struct iwl_priv *priv,
671 wiphy_rfkill_set_hw_state(priv->hw->wiphy, 635 wiphy_rfkill_set_hw_state(priv->hw->wiphy,
672 test_bit(STATUS_RF_KILL_HW, &priv->status)); 636 test_bit(STATUS_RF_KILL_HW, &priv->status));
673 else 637 else
674 wake_up(&trans(priv)->wait_command_queue); 638 wake_up(&priv->trans->wait_command_queue);
675 return 0; 639 return 0;
676} 640}
677 641
@@ -783,7 +747,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
783 } 747 }
784 748
785 /* In case of HW accelerated crypto and bad decryption, drop */ 749 /* In case of HW accelerated crypto and bad decryption, drop */
786 if (!iwlagn_mod_params.sw_crypto && 750 if (!iwlwifi_mod_params.sw_crypto &&
787 iwlagn_set_decrypted_flag(priv, hdr, ampdu_status, stats)) 751 iwlagn_set_decrypted_flag(priv, hdr, ampdu_status, stats))
788 return; 752 return;
789 753
@@ -800,12 +764,11 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
800 fraglen = len - hdrlen; 764 fraglen = len - hdrlen;
801 765
802 if (fraglen) { 766 if (fraglen) {
803 int offset = (void *)hdr + hdrlen - rxb_addr(rxb); 767 int offset = (void *)hdr - rxb_addr(rxb) + rxb_offset(rxb);
804 768
805 skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, 769 skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset,
806 fraglen, rxb->truesize); 770 fraglen, rxb->truesize);
807 } 771 }
808 iwl_update_stats(priv, false, fc, len);
809 772
810 /* 773 /*
811 * Wake any queues that were stopped due to a passive channel tx 774 * Wake any queues that were stopped due to a passive channel tx
@@ -977,7 +940,7 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
977 } 940 }
978 941
979 if ((unlikely(phy_res->cfg_phy_cnt > 20))) { 942 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
980 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n", 943 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d\n",
981 phy_res->cfg_phy_cnt); 944 phy_res->cfg_phy_cnt);
982 return 0; 945 return 0;
983 } 946 }
@@ -1012,7 +975,6 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
1012 /* Find max signal strength (dBm) among 3 antenna/receiver chains */ 975 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
1013 rx_status.signal = iwlagn_calc_rssi(priv, phy_res); 976 rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
1014 977
1015 iwl_dbg_log_rx_data_frame(priv, len, header);
1016 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n", 978 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
1017 rx_status.signal, (unsigned long long)rx_status.mactime); 979 rx_status.signal, (unsigned long long)rx_status.mactime);
1018 980
@@ -1141,16 +1103,13 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
1141 handlers[REPLY_COMPRESSED_BA] = 1103 handlers[REPLY_COMPRESSED_BA] =
1142 iwlagn_rx_reply_compressed_ba; 1104 iwlagn_rx_reply_compressed_ba;
1143 1105
1144 /* init calibration handlers */
1145 priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
1146 iwlagn_rx_calib_result;
1147 priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx; 1106 priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
1148 1107
1149 /* set up notification wait support */ 1108 /* set up notification wait support */
1150 iwl_notification_wait_init(&priv->notif_wait); 1109 iwl_notification_wait_init(&priv->notif_wait);
1151 1110
1152 /* Set up BT Rx handlers */ 1111 /* Set up BT Rx handlers */
1153 if (cfg(priv)->bt_params) 1112 if (priv->cfg->bt_params)
1154 iwlagn_bt_rx_handler_setup(priv); 1113 iwlagn_bt_rx_handler_setup(priv);
1155} 1114}
1156 1115
@@ -1192,9 +1151,9 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
1192 err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd); 1151 err = priv->rx_handlers[pkt->hdr.cmd] (priv, rxb, cmd);
1193 } else { 1152 } else {
1194 /* No handling needed */ 1153 /* No handling needed */
1195 IWL_DEBUG_RX(priv, 1154 IWL_DEBUG_RX(priv, "No handler needed for %s, 0x%02x\n",
1196 "No handler needed for %s, 0x%02x\n", 1155 iwl_dvm_get_cmd_string(pkt->hdr.cmd),
1197 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 1156 pkt->hdr.cmd);
1198 } 1157 }
1199 } 1158 }
1200 return err; 1159 return err;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index 2e1a31797a9e..0f7c444f2440 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -24,12 +24,79 @@
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
26 26
27#include <linux/etherdevice.h>
27#include "iwl-dev.h" 28#include "iwl-dev.h"
28#include "iwl-agn.h" 29#include "iwl-agn.h"
29#include "iwl-core.h"
30#include "iwl-agn-calib.h" 30#include "iwl-agn-calib.h"
31#include "iwl-trans.h" 31#include "iwl-trans.h"
32#include "iwl-shared.h" 32#include "iwl-modparams.h"
33
34/*
35 * initialize rxon structure with default values from eeprom
36 */
37void iwl_connection_init_rx_config(struct iwl_priv *priv,
38 struct iwl_rxon_context *ctx)
39{
40 const struct iwl_channel_info *ch_info;
41
42 memset(&ctx->staging, 0, sizeof(ctx->staging));
43
44 if (!ctx->vif) {
45 ctx->staging.dev_type = ctx->unused_devtype;
46 } else
47 switch (ctx->vif->type) {
48 case NL80211_IFTYPE_AP:
49 ctx->staging.dev_type = ctx->ap_devtype;
50 break;
51
52 case NL80211_IFTYPE_STATION:
53 ctx->staging.dev_type = ctx->station_devtype;
54 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
55 break;
56
57 case NL80211_IFTYPE_ADHOC:
58 ctx->staging.dev_type = ctx->ibss_devtype;
59 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
60 ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
61 RXON_FILTER_ACCEPT_GRP_MSK;
62 break;
63
64 default:
65 IWL_ERR(priv, "Unsupported interface type %d\n",
66 ctx->vif->type);
67 break;
68 }
69
70#if 0
71 /* TODO: Figure out when short_preamble would be set and cache from
72 * that */
73 if (!hw_to_local(priv->hw)->short_preamble)
74 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
75 else
76 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
77#endif
78
79 ch_info = iwl_get_channel_info(priv, priv->band,
80 le16_to_cpu(ctx->active.channel));
81
82 if (!ch_info)
83 ch_info = &priv->channel_info[0];
84
85 ctx->staging.channel = cpu_to_le16(ch_info->channel);
86 priv->band = ch_info->band;
87
88 iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
89
90 /* clear both MIX and PURE40 mode flag */
91 ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
92 RXON_FLG_CHANNEL_MODE_PURE_40);
93 if (ctx->vif)
94 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
95
96 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
97 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
98 ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
99}
33 100
34static int iwlagn_disable_bss(struct iwl_priv *priv, 101static int iwlagn_disable_bss(struct iwl_priv *priv,
35 struct iwl_rxon_context *ctx, 102 struct iwl_rxon_context *ctx,
@@ -59,9 +126,12 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
59 __le32 old_filter = send->filter_flags; 126 __le32 old_filter = send->filter_flags;
60 u8 old_dev_type = send->dev_type; 127 u8 old_dev_type = send->dev_type;
61 int ret; 128 int ret;
129 static const u8 deactivate_cmd[] = {
130 REPLY_WIPAN_DEACTIVATION_COMPLETE
131 };
62 132
63 iwl_init_notification_wait(&priv->notif_wait, &disable_wait, 133 iwl_init_notification_wait(&priv->notif_wait, &disable_wait,
64 REPLY_WIPAN_DEACTIVATION_COMPLETE, 134 deactivate_cmd, ARRAY_SIZE(deactivate_cmd),
65 NULL, NULL); 135 NULL, NULL);
66 136
67 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 137 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
@@ -101,8 +171,7 @@ static int iwlagn_disconn_pan(struct iwl_priv *priv,
101 return ret; 171 return ret;
102} 172}
103 173
104static void iwlagn_update_qos(struct iwl_priv *priv, 174void iwlagn_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
105 struct iwl_rxon_context *ctx)
106{ 175{
107 int ret; 176 int ret;
108 177
@@ -129,8 +198,8 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
129 IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n"); 198 IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n");
130} 199}
131 200
132static int iwlagn_update_beacon(struct iwl_priv *priv, 201int iwlagn_update_beacon(struct iwl_priv *priv,
133 struct ieee80211_vif *vif) 202 struct ieee80211_vif *vif)
134{ 203{
135 lockdep_assert_held(&priv->mutex); 204 lockdep_assert_held(&priv->mutex);
136 205
@@ -186,6 +255,109 @@ static int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
186 return ret; 255 return ret;
187} 256}
188 257
258static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
259{
260 u16 new_val;
261 u16 beacon_factor;
262
263 /*
264 * If mac80211 hasn't given us a beacon interval, program
265 * the default into the device (not checking this here
266 * would cause the adjustment below to return the maximum
267 * value, which may break PAN.)
268 */
269 if (!beacon_val)
270 return DEFAULT_BEACON_INTERVAL;
271
272 /*
273 * If the beacon interval we obtained from the peer
274 * is too large, we'll have to wake up more often
275 * (and in IBSS case, we'll beacon too much)
276 *
277 * For example, if max_beacon_val is 4096, and the
278 * requested beacon interval is 7000, we'll have to
279 * use 3500 to be able to wake up on the beacons.
280 *
281 * This could badly influence beacon detection stats.
282 */
283
284 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
285 new_val = beacon_val / beacon_factor;
286
287 if (!new_val)
288 new_val = max_beacon_val;
289
290 return new_val;
291}
292
293static int iwl_send_rxon_timing(struct iwl_priv *priv,
294 struct iwl_rxon_context *ctx)
295{
296 u64 tsf;
297 s32 interval_tm, rem;
298 struct ieee80211_conf *conf = NULL;
299 u16 beacon_int;
300 struct ieee80211_vif *vif = ctx->vif;
301
302 conf = &priv->hw->conf;
303
304 lockdep_assert_held(&priv->mutex);
305
306 memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
307
308 ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
309 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
310
311 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
312
313 /*
314 * TODO: For IBSS we need to get atim_window from mac80211,
315 * for now just always use 0
316 */
317 ctx->timing.atim_window = 0;
318
319 if (ctx->ctxid == IWL_RXON_CTX_PAN &&
320 (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) &&
321 iwl_is_associated(priv, IWL_RXON_CTX_BSS) &&
322 priv->contexts[IWL_RXON_CTX_BSS].vif &&
323 priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) {
324 ctx->timing.beacon_interval =
325 priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval;
326 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
327 } else if (ctx->ctxid == IWL_RXON_CTX_BSS &&
328 iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
329 priv->contexts[IWL_RXON_CTX_PAN].vif &&
330 priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int &&
331 (!iwl_is_associated_ctx(ctx) || !ctx->vif ||
332 !ctx->vif->bss_conf.beacon_int)) {
333 ctx->timing.beacon_interval =
334 priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval;
335 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
336 } else {
337 beacon_int = iwl_adjust_beacon_interval(beacon_int,
338 IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT);
339 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
340 }
341
342 ctx->beacon_int = beacon_int;
343
344 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
345 interval_tm = beacon_int * TIME_UNIT;
346 rem = do_div(tsf, interval_tm);
347 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
348
349 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
350
351 IWL_DEBUG_ASSOC(priv,
352 "beacon interval %d beacon timer %d beacon tim %d\n",
353 le16_to_cpu(ctx->timing.beacon_interval),
354 le32_to_cpu(ctx->timing.beacon_init_val),
355 le16_to_cpu(ctx->timing.atim_window));
356
357 return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
358 CMD_SYNC, sizeof(ctx->timing), &ctx->timing);
359}
360
189static int iwlagn_rxon_disconn(struct iwl_priv *priv, 361static int iwlagn_rxon_disconn(struct iwl_priv *priv,
190 struct iwl_rxon_context *ctx) 362 struct iwl_rxon_context *ctx)
191{ 363{
@@ -228,6 +400,64 @@ static int iwlagn_rxon_disconn(struct iwl_priv *priv,
228 return 0; 400 return 0;
229} 401}
230 402
403static int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
404{
405 int ret;
406 s8 prev_tx_power;
407 bool defer;
408 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
409
410 if (priv->calib_disabled & IWL_TX_POWER_CALIB_DISABLED)
411 return 0;
412
413 lockdep_assert_held(&priv->mutex);
414
415 if (priv->tx_power_user_lmt == tx_power && !force)
416 return 0;
417
418 if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
419 IWL_WARN(priv,
420 "Requested user TXPOWER %d below lower limit %d.\n",
421 tx_power,
422 IWLAGN_TX_POWER_TARGET_POWER_MIN);
423 return -EINVAL;
424 }
425
426 if (tx_power > priv->tx_power_device_lmt) {
427 IWL_WARN(priv,
428 "Requested user TXPOWER %d above upper limit %d.\n",
429 tx_power, priv->tx_power_device_lmt);
430 return -EINVAL;
431 }
432
433 if (!iwl_is_ready_rf(priv))
434 return -EIO;
435
436 /* scan complete and commit_rxon use tx_power_next value,
437 * it always need to be updated for newest request */
438 priv->tx_power_next = tx_power;
439
440 /* do not set tx power when scanning or channel changing */
441 defer = test_bit(STATUS_SCANNING, &priv->status) ||
442 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
443 if (defer && !force) {
444 IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
445 return 0;
446 }
447
448 prev_tx_power = priv->tx_power_user_lmt;
449 priv->tx_power_user_lmt = tx_power;
450
451 ret = iwlagn_send_tx_power(priv);
452
453 /* if fail to set tx_power, restore the orig. tx power */
454 if (ret) {
455 priv->tx_power_user_lmt = prev_tx_power;
456 priv->tx_power_next = prev_tx_power;
457 }
458 return ret;
459}
460
231static int iwlagn_rxon_connect(struct iwl_priv *priv, 461static int iwlagn_rxon_connect(struct iwl_priv *priv,
232 struct iwl_rxon_context *ctx) 462 struct iwl_rxon_context *ctx)
233{ 463{
@@ -295,9 +525,9 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
295 } 525 }
296 526
297 if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION && 527 if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION &&
298 cfg(priv)->ht_params && cfg(priv)->ht_params->smps_mode) 528 priv->cfg->ht_params && priv->cfg->ht_params->smps_mode)
299 ieee80211_request_smps(ctx->vif, 529 ieee80211_request_smps(ctx->vif,
300 cfg(priv)->ht_params->smps_mode); 530 priv->cfg->ht_params->smps_mode);
301 531
302 return 0; 532 return 0;
303} 533}
@@ -309,7 +539,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
309 int slot0 = 300, slot1 = 0; 539 int slot0 = 300, slot1 = 0;
310 int ret; 540 int ret;
311 541
312 if (priv->shrd->valid_contexts == BIT(IWL_RXON_CTX_BSS)) 542 if (priv->valid_contexts == BIT(IWL_RXON_CTX_BSS))
313 return 0; 543 return 0;
314 544
315 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); 545 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
@@ -394,6 +624,414 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
394 return ret; 624 return ret;
395} 625}
396 626
627static void _iwl_set_rxon_ht(struct iwl_priv *priv,
628 struct iwl_ht_config *ht_conf,
629 struct iwl_rxon_context *ctx)
630{
631 struct iwl_rxon_cmd *rxon = &ctx->staging;
632
633 if (!ctx->ht.enabled) {
634 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
635 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
636 RXON_FLG_HT40_PROT_MSK |
637 RXON_FLG_HT_PROT_MSK);
638 return;
639 }
640
641 /* FIXME: if the definition of ht.protection changed, the "translation"
642 * will be needed for rxon->flags
643 */
644 rxon->flags |= cpu_to_le32(ctx->ht.protection <<
645 RXON_FLG_HT_OPERATING_MODE_POS);
646
647 /* Set up channel bandwidth:
648 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
649 /* clear the HT channel mode before set the mode */
650 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
651 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
652 if (iwl_is_ht40_tx_allowed(priv, ctx, NULL)) {
653 /* pure ht40 */
654 if (ctx->ht.protection ==
655 IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
656 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
657 /*
658 * Note: control channel is opposite of extension
659 * channel
660 */
661 switch (ctx->ht.extension_chan_offset) {
662 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
663 rxon->flags &=
664 ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
665 break;
666 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
667 rxon->flags |=
668 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
669 break;
670 }
671 } else {
672 /*
673 * Note: control channel is opposite of extension
674 * channel
675 */
676 switch (ctx->ht.extension_chan_offset) {
677 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
678 rxon->flags &=
679 ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
680 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
681 break;
682 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
683 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
684 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
685 break;
686 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
687 default:
688 /*
689 * channel location only valid if in Mixed
690 * mode
691 */
692 IWL_ERR(priv,
693 "invalid extension channel offset\n");
694 break;
695 }
696 }
697 } else {
698 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
699 }
700
701 iwlagn_set_rxon_chain(priv, ctx);
702
703 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
704 "extension channel offset 0x%x\n",
705 le32_to_cpu(rxon->flags), ctx->ht.protection,
706 ctx->ht.extension_chan_offset);
707}
708
709void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
710{
711 struct iwl_rxon_context *ctx;
712
713 for_each_context(priv, ctx)
714 _iwl_set_rxon_ht(priv, ht_conf, ctx);
715}
716
717/**
718 * iwl_set_rxon_channel - Set the band and channel values in staging RXON
719 * @ch: requested channel as a pointer to struct ieee80211_channel
720
721 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
722 * in the staging RXON flag structure based on the ch->band
723 */
724void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
725 struct iwl_rxon_context *ctx)
726{
727 enum ieee80211_band band = ch->band;
728 u16 channel = ch->hw_value;
729
730 if ((le16_to_cpu(ctx->staging.channel) == channel) &&
731 (priv->band == band))
732 return;
733
734 ctx->staging.channel = cpu_to_le16(channel);
735 if (band == IEEE80211_BAND_5GHZ)
736 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
737 else
738 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
739
740 priv->band = band;
741
742 IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
743
744}
745
746void iwl_set_flags_for_band(struct iwl_priv *priv,
747 struct iwl_rxon_context *ctx,
748 enum ieee80211_band band,
749 struct ieee80211_vif *vif)
750{
751 if (band == IEEE80211_BAND_5GHZ) {
752 ctx->staging.flags &=
753 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
754 | RXON_FLG_CCK_MSK);
755 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
756 } else {
757 /* Copied from iwl_post_associate() */
758 if (vif && vif->bss_conf.use_short_slot)
759 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
760 else
761 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
762
763 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
764 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
765 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
766 }
767}
768
769static void iwl_set_rxon_hwcrypto(struct iwl_priv *priv,
770 struct iwl_rxon_context *ctx, int hw_decrypt)
771{
772 struct iwl_rxon_cmd *rxon = &ctx->staging;
773
774 if (hw_decrypt)
775 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
776 else
777 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
778
779}
780
781/* validate RXON structure is valid */
782static int iwl_check_rxon_cmd(struct iwl_priv *priv,
783 struct iwl_rxon_context *ctx)
784{
785 struct iwl_rxon_cmd *rxon = &ctx->staging;
786 u32 errors = 0;
787
788 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
789 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
790 IWL_WARN(priv, "check 2.4G: wrong narrow\n");
791 errors |= BIT(0);
792 }
793 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
794 IWL_WARN(priv, "check 2.4G: wrong radar\n");
795 errors |= BIT(1);
796 }
797 } else {
798 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
799 IWL_WARN(priv, "check 5.2G: not short slot!\n");
800 errors |= BIT(2);
801 }
802 if (rxon->flags & RXON_FLG_CCK_MSK) {
803 IWL_WARN(priv, "check 5.2G: CCK!\n");
804 errors |= BIT(3);
805 }
806 }
807 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
808 IWL_WARN(priv, "mac/bssid mcast!\n");
809 errors |= BIT(4);
810 }
811
812 /* make sure basic rates 6Mbps and 1Mbps are supported */
813 if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
814 (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
815 IWL_WARN(priv, "neither 1 nor 6 are basic\n");
816 errors |= BIT(5);
817 }
818
819 if (le16_to_cpu(rxon->assoc_id) > 2007) {
820 IWL_WARN(priv, "aid > 2007\n");
821 errors |= BIT(6);
822 }
823
824 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
825 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
826 IWL_WARN(priv, "CCK and short slot\n");
827 errors |= BIT(7);
828 }
829
830 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
831 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
832 IWL_WARN(priv, "CCK and auto detect");
833 errors |= BIT(8);
834 }
835
836 if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
837 RXON_FLG_TGG_PROTECT_MSK)) ==
838 RXON_FLG_TGG_PROTECT_MSK) {
839 IWL_WARN(priv, "TGg but no auto-detect\n");
840 errors |= BIT(9);
841 }
842
843 if (rxon->channel == 0) {
844 IWL_WARN(priv, "zero channel is invalid\n");
845 errors |= BIT(10);
846 }
847
848 WARN(errors, "Invalid RXON (%#x), channel %d",
849 errors, le16_to_cpu(rxon->channel));
850
851 return errors ? -EINVAL : 0;
852}
853
854/**
855 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
856 * @priv: staging_rxon is compared to active_rxon
857 *
858 * If the RXON structure is changing enough to require a new tune,
859 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
860 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
861 */
862int iwl_full_rxon_required(struct iwl_priv *priv,
863 struct iwl_rxon_context *ctx)
864{
865 const struct iwl_rxon_cmd *staging = &ctx->staging;
866 const struct iwl_rxon_cmd *active = &ctx->active;
867
868#define CHK(cond) \
869 if ((cond)) { \
870 IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
871 return 1; \
872 }
873
874#define CHK_NEQ(c1, c2) \
875 if ((c1) != (c2)) { \
876 IWL_DEBUG_INFO(priv, "need full RXON - " \
877 #c1 " != " #c2 " - %d != %d\n", \
878 (c1), (c2)); \
879 return 1; \
880 }
881
882 /* These items are only settable from the full RXON command */
883 CHK(!iwl_is_associated_ctx(ctx));
884 CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
885 CHK(compare_ether_addr(staging->node_addr, active->node_addr));
886 CHK(compare_ether_addr(staging->wlap_bssid_addr,
887 active->wlap_bssid_addr));
888 CHK_NEQ(staging->dev_type, active->dev_type);
889 CHK_NEQ(staging->channel, active->channel);
890 CHK_NEQ(staging->air_propagation, active->air_propagation);
891 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
892 active->ofdm_ht_single_stream_basic_rates);
893 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
894 active->ofdm_ht_dual_stream_basic_rates);
895 CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates,
896 active->ofdm_ht_triple_stream_basic_rates);
897 CHK_NEQ(staging->assoc_id, active->assoc_id);
898
899 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
900 * be updated with the RXON_ASSOC command -- however only some
901 * flag transitions are allowed using RXON_ASSOC */
902
903 /* Check if we are not switching bands */
904 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
905 active->flags & RXON_FLG_BAND_24G_MSK);
906
907 /* Check if we are switching association toggle */
908 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
909 active->filter_flags & RXON_FILTER_ASSOC_MSK);
910
911#undef CHK
912#undef CHK_NEQ
913
914 return 0;
915}
916
917#ifdef CONFIG_IWLWIFI_DEBUG
918void iwl_print_rx_config_cmd(struct iwl_priv *priv,
919 enum iwl_rxon_context_id ctxid)
920{
921 struct iwl_rxon_context *ctx = &priv->contexts[ctxid];
922 struct iwl_rxon_cmd *rxon = &ctx->staging;
923
924 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
925 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
926 IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
927 le16_to_cpu(rxon->channel));
928 IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n",
929 le32_to_cpu(rxon->flags));
930 IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
931 le32_to_cpu(rxon->filter_flags));
932 IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
933 IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
934 rxon->ofdm_basic_rates);
935 IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
936 rxon->cck_basic_rates);
937 IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
938 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
939 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
940 le16_to_cpu(rxon->assoc_id));
941}
942#endif
943
944static void iwl_calc_basic_rates(struct iwl_priv *priv,
945 struct iwl_rxon_context *ctx)
946{
947 int lowest_present_ofdm = 100;
948 int lowest_present_cck = 100;
949 u8 cck = 0;
950 u8 ofdm = 0;
951
952 if (ctx->vif) {
953 struct ieee80211_supported_band *sband;
954 unsigned long basic = ctx->vif->bss_conf.basic_rates;
955 int i;
956
957 sband = priv->hw->wiphy->bands[priv->hw->conf.channel->band];
958
959 for_each_set_bit(i, &basic, BITS_PER_LONG) {
960 int hw = sband->bitrates[i].hw_value;
961 if (hw >= IWL_FIRST_OFDM_RATE) {
962 ofdm |= BIT(hw - IWL_FIRST_OFDM_RATE);
963 if (lowest_present_ofdm > hw)
964 lowest_present_ofdm = hw;
965 } else {
966 BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
967
968 cck |= BIT(hw);
969 if (lowest_present_cck > hw)
970 lowest_present_cck = hw;
971 }
972 }
973 }
974
975 /*
976 * Now we've got the basic rates as bitmaps in the ofdm and cck
977 * variables. This isn't sufficient though, as there might not
978 * be all the right rates in the bitmap. E.g. if the only basic
979 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
980 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
981 *
982 * [...] a STA responding to a received frame shall transmit
983 * its Control Response frame [...] at the highest rate in the
984 * BSSBasicRateSet parameter that is less than or equal to the
985 * rate of the immediately previous frame in the frame exchange
986 * sequence ([...]) and that is of the same modulation class
987 * ([...]) as the received frame. If no rate contained in the
988 * BSSBasicRateSet parameter meets these conditions, then the
989 * control frame sent in response to a received frame shall be
990 * transmitted at the highest mandatory rate of the PHY that is
991 * less than or equal to the rate of the received frame, and
992 * that is of the same modulation class as the received frame.
993 *
994 * As a consequence, we need to add all mandatory rates that are
995 * lower than all of the basic rates to these bitmaps.
996 */
997
998 if (IWL_RATE_24M_INDEX < lowest_present_ofdm)
999 ofdm |= IWL_RATE_24M_MASK >> IWL_FIRST_OFDM_RATE;
1000 if (IWL_RATE_12M_INDEX < lowest_present_ofdm)
1001 ofdm |= IWL_RATE_12M_MASK >> IWL_FIRST_OFDM_RATE;
1002 /* 6M already there or needed so always add */
1003 ofdm |= IWL_RATE_6M_MASK >> IWL_FIRST_OFDM_RATE;
1004
1005 /*
1006 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
1007 * Note, however:
1008 * - if no CCK rates are basic, it must be ERP since there must
1009 * be some basic rates at all, so they're OFDM => ERP PHY
1010 * (or we're in 5 GHz, and the cck bitmap will never be used)
1011 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
1012 * - if 5.5M is basic, 1M and 2M are mandatory
1013 * - if 2M is basic, 1M is mandatory
1014 * - if 1M is basic, that's the only valid ACK rate.
1015 * As a consequence, it's not as complicated as it sounds, just add
1016 * any lower rates to the ACK rate bitmap.
1017 */
1018 if (IWL_RATE_11M_INDEX < lowest_present_ofdm)
1019 ofdm |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE;
1020 if (IWL_RATE_5M_INDEX < lowest_present_ofdm)
1021 ofdm |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE;
1022 if (IWL_RATE_2M_INDEX < lowest_present_ofdm)
1023 ofdm |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE;
1024 /* 1M already there or needed so always add */
1025 cck |= IWL_RATE_1M_MASK >> IWL_FIRST_CCK_RATE;
1026
1027 IWL_DEBUG_RATE(priv, "Set basic rates cck:0x%.2x ofdm:0x%.2x\n",
1028 cck, ofdm);
1029
1030 /* "basic_rates" is a misnomer here -- should be called ACK rates */
1031 ctx->staging.cck_basic_rates = cck;
1032 ctx->staging.ofdm_basic_rates = ofdm;
1033}
1034
397/** 1035/**
398 * iwlagn_commit_rxon - commit staging_rxon to hardware 1036 * iwlagn_commit_rxon - commit staging_rxon to hardware
399 * 1037 *
@@ -433,11 +1071,14 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
433 /* always get timestamp with Rx frame */ 1071 /* always get timestamp with Rx frame */
434 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK; 1072 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
435 1073
1074 /* recalculate basic rates */
1075 iwl_calc_basic_rates(priv, ctx);
1076
436 /* 1077 /*
437 * force CTS-to-self frames protection if RTS-CTS is not preferred 1078 * force CTS-to-self frames protection if RTS-CTS is not preferred
438 * one aggregation protection method 1079 * one aggregation protection method
439 */ 1080 */
440 if (!hw_params(priv).use_rts_for_aggregation) 1081 if (!priv->hw_params.use_rts_for_aggregation)
441 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; 1082 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
442 1083
443 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || 1084 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
@@ -489,7 +1130,7 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
489 return 0; 1130 return 0;
490 } 1131 }
491 1132
492 iwl_set_rxon_hwcrypto(priv, ctx, !iwlagn_mod_params.sw_crypto); 1133 iwl_set_rxon_hwcrypto(priv, ctx, !iwlwifi_mod_params.sw_crypto);
493 1134
494 IWL_DEBUG_INFO(priv, 1135 IWL_DEBUG_INFO(priv,
495 "Going to commit RXON\n" 1136 "Going to commit RXON\n"
@@ -547,7 +1188,7 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
547 const struct iwl_channel_info *ch_info; 1188 const struct iwl_channel_info *ch_info;
548 int ret = 0; 1189 int ret = 0;
549 1190
550 IWL_DEBUG_MAC80211(priv, "enter: changed %#x", changed); 1191 IWL_DEBUG_MAC80211(priv, "enter: changed %#x\n", changed);
551 1192
552 mutex_lock(&priv->mutex); 1193 mutex_lock(&priv->mutex);
553 1194
@@ -621,13 +1262,6 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
621 } 1262 }
622 1263
623 iwl_update_bcast_stations(priv); 1264 iwl_update_bcast_stations(priv);
624
625 /*
626 * The list of supported rates and rate mask can be different
627 * for each band; since the band may have changed, reset
628 * the rate mask to what mac80211 lists.
629 */
630 iwl_set_rate(priv);
631 } 1265 }
632 1266
633 if (changed & (IEEE80211_CONF_CHANGE_PS | 1267 if (changed & (IEEE80211_CONF_CHANGE_PS |
@@ -656,9 +1290,9 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
656 return ret; 1290 return ret;
657} 1291}
658 1292
659static void iwlagn_check_needed_chains(struct iwl_priv *priv, 1293void iwlagn_check_needed_chains(struct iwl_priv *priv,
660 struct iwl_rxon_context *ctx, 1294 struct iwl_rxon_context *ctx,
661 struct ieee80211_bss_conf *bss_conf) 1295 struct ieee80211_bss_conf *bss_conf)
662{ 1296{
663 struct ieee80211_vif *vif = ctx->vif; 1297 struct ieee80211_vif *vif = ctx->vif;
664 struct iwl_rxon_context *tmp; 1298 struct iwl_rxon_context *tmp;
@@ -750,11 +1384,14 @@ static void iwlagn_check_needed_chains(struct iwl_priv *priv,
750 ht_conf->single_chain_sufficient = !need_multiple; 1384 ht_conf->single_chain_sufficient = !need_multiple;
751} 1385}
752 1386
753static void iwlagn_chain_noise_reset(struct iwl_priv *priv) 1387void iwlagn_chain_noise_reset(struct iwl_priv *priv)
754{ 1388{
755 struct iwl_chain_noise_data *data = &priv->chain_noise_data; 1389 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
756 int ret; 1390 int ret;
757 1391
1392 if (!(priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED))
1393 return;
1394
758 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && 1395 if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
759 iwl_is_any_associated(priv)) { 1396 iwl_is_any_associated(priv)) {
760 struct iwl_calib_chain_noise_reset_cmd cmd; 1397 struct iwl_calib_chain_noise_reset_cmd cmd;
@@ -907,8 +1544,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
907 iwl_power_update_mode(priv, false); 1544 iwl_power_update_mode(priv, false);
908 1545
909 /* Enable RX differential gain and sensitivity calibrations */ 1546 /* Enable RX differential gain and sensitivity calibrations */
910 if (!priv->disable_chain_noise_cal) 1547 iwlagn_chain_noise_reset(priv);
911 iwlagn_chain_noise_reset(priv);
912 priv->start_calib = 1; 1548 priv->start_calib = 1;
913 } 1549 }
914 1550
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index c4175603864b..67e6f1d2a08b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -30,10 +30,11 @@
30#include <net/mac80211.h> 30#include <net/mac80211.h>
31 31
32#include "iwl-dev.h" 32#include "iwl-dev.h"
33#include "iwl-core.h"
34#include "iwl-agn.h" 33#include "iwl-agn.h"
35#include "iwl-trans.h" 34#include "iwl-trans.h"
36 35
36const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
37
37static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id) 38static int iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
38{ 39{
39 lockdep_assert_held(&priv->sta_lock); 40 lockdep_assert_held(&priv->sta_lock);
@@ -170,6 +171,50 @@ int iwl_send_add_sta(struct iwl_priv *priv,
170 return cmd.handler_status; 171 return cmd.handler_status;
171} 172}
172 173
174static bool iwl_is_channel_extension(struct iwl_priv *priv,
175 enum ieee80211_band band,
176 u16 channel, u8 extension_chan_offset)
177{
178 const struct iwl_channel_info *ch_info;
179
180 ch_info = iwl_get_channel_info(priv, band, channel);
181 if (!is_channel_valid(ch_info))
182 return false;
183
184 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
185 return !(ch_info->ht40_extension_channel &
186 IEEE80211_CHAN_NO_HT40PLUS);
187 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
188 return !(ch_info->ht40_extension_channel &
189 IEEE80211_CHAN_NO_HT40MINUS);
190
191 return false;
192}
193
194bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
195 struct iwl_rxon_context *ctx,
196 struct ieee80211_sta_ht_cap *ht_cap)
197{
198 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
199 return false;
200
201 /*
202 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
203 * the bit will not set if it is pure 40MHz case
204 */
205 if (ht_cap && !ht_cap->ht_supported)
206 return false;
207
208#ifdef CONFIG_IWLWIFI_DEBUGFS
209 if (priv->disable_ht40)
210 return false;
211#endif
212
213 return iwl_is_channel_extension(priv, priv->band,
214 le16_to_cpu(ctx->staging.channel),
215 ctx->ht.extension_chan_offset);
216}
217
173static void iwl_sta_calc_ht_flags(struct iwl_priv *priv, 218static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
174 struct ieee80211_sta *sta, 219 struct ieee80211_sta *sta,
175 struct iwl_rxon_context *ctx, 220 struct iwl_rxon_context *ctx,
@@ -581,6 +626,56 @@ void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id,
581 spin_unlock_bh(&priv->sta_lock); 626 spin_unlock_bh(&priv->sta_lock);
582} 627}
583 628
629static void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
630 u8 sta_id, struct iwl_link_quality_cmd *link_cmd)
631{
632 int i, r;
633 u32 rate_flags = 0;
634 __le32 rate_n_flags;
635
636 lockdep_assert_held(&priv->mutex);
637
638 memset(link_cmd, 0, sizeof(*link_cmd));
639
640 /* Set up the rate scaling to start at selected rate, fall back
641 * all the way down to 1M in IEEE order, and then spin on 1M */
642 if (priv->band == IEEE80211_BAND_5GHZ)
643 r = IWL_RATE_6M_INDEX;
644 else if (ctx && ctx->vif && ctx->vif->p2p)
645 r = IWL_RATE_6M_INDEX;
646 else
647 r = IWL_RATE_1M_INDEX;
648
649 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
650 rate_flags |= RATE_MCS_CCK_MSK;
651
652 rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
653 RATE_MCS_ANT_POS;
654 rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
655 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
656 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
657
658 link_cmd->general_params.single_stream_ant_msk =
659 first_antenna(priv->hw_params.valid_tx_ant);
660
661 link_cmd->general_params.dual_stream_ant_msk =
662 priv->hw_params.valid_tx_ant &
663 ~first_antenna(priv->hw_params.valid_tx_ant);
664 if (!link_cmd->general_params.dual_stream_ant_msk) {
665 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
666 } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
667 link_cmd->general_params.dual_stream_ant_msk =
668 priv->hw_params.valid_tx_ant;
669 }
670
671 link_cmd->agg_params.agg_dis_start_th =
672 LINK_QUAL_AGG_DISABLE_START_DEF;
673 link_cmd->agg_params.agg_time_limit =
674 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
675
676 link_cmd->sta_id = sta_id;
677}
678
584/** 679/**
585 * iwl_clear_ucode_stations - clear ucode station table bits 680 * iwl_clear_ucode_stations - clear ucode station table bits
586 * 681 *
@@ -841,56 +936,6 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
841} 936}
842 937
843 938
844void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
845 u8 sta_id, struct iwl_link_quality_cmd *link_cmd)
846{
847 int i, r;
848 u32 rate_flags = 0;
849 __le32 rate_n_flags;
850
851 lockdep_assert_held(&priv->mutex);
852
853 memset(link_cmd, 0, sizeof(*link_cmd));
854
855 /* Set up the rate scaling to start at selected rate, fall back
856 * all the way down to 1M in IEEE order, and then spin on 1M */
857 if (priv->band == IEEE80211_BAND_5GHZ)
858 r = IWL_RATE_6M_INDEX;
859 else if (ctx && ctx->vif && ctx->vif->p2p)
860 r = IWL_RATE_6M_INDEX;
861 else
862 r = IWL_RATE_1M_INDEX;
863
864 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
865 rate_flags |= RATE_MCS_CCK_MSK;
866
867 rate_flags |= first_antenna(hw_params(priv).valid_tx_ant) <<
868 RATE_MCS_ANT_POS;
869 rate_n_flags = iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
870 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
871 link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
872
873 link_cmd->general_params.single_stream_ant_msk =
874 first_antenna(hw_params(priv).valid_tx_ant);
875
876 link_cmd->general_params.dual_stream_ant_msk =
877 hw_params(priv).valid_tx_ant &
878 ~first_antenna(hw_params(priv).valid_tx_ant);
879 if (!link_cmd->general_params.dual_stream_ant_msk) {
880 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
881 } else if (num_of_ant(hw_params(priv).valid_tx_ant) == 2) {
882 link_cmd->general_params.dual_stream_ant_msk =
883 hw_params(priv).valid_tx_ant;
884 }
885
886 link_cmd->agg_params.agg_dis_start_th =
887 LINK_QUAL_AGG_DISABLE_START_DEF;
888 link_cmd->agg_params.agg_time_limit =
889 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
890
891 link_cmd->sta_id = sta_id;
892}
893
894static struct iwl_link_quality_cmd * 939static struct iwl_link_quality_cmd *
895iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx, 940iwl_sta_alloc_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
896 u8 sta_id) 941 u8 sta_id)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
index baaf5ba2fc38..a5cfe0aceedb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
@@ -37,11 +37,11 @@
37#include "iwl-agn.h" 37#include "iwl-agn.h"
38#include "iwl-eeprom.h" 38#include "iwl-eeprom.h"
39#include "iwl-dev.h" 39#include "iwl-dev.h"
40#include "iwl-core.h"
41#include "iwl-io.h" 40#include "iwl-io.h"
42#include "iwl-commands.h" 41#include "iwl-commands.h"
43#include "iwl-debug.h" 42#include "iwl-debug.h"
44#include "iwl-agn-tt.h" 43#include "iwl-agn-tt.h"
44#include "iwl-modparams.h"
45 45
46/* default Thermal Throttling transaction table 46/* default Thermal Throttling transaction table
47 * Current state | Throttling Down | Throttling Up 47 * Current state | Throttling Down | Throttling Up
@@ -179,19 +179,19 @@ static void iwl_tt_check_exit_ct_kill(unsigned long data)
179 179
180 if (tt->state == IWL_TI_CT_KILL) { 180 if (tt->state == IWL_TI_CT_KILL) {
181 if (priv->thermal_throttle.ct_kill_toggle) { 181 if (priv->thermal_throttle.ct_kill_toggle) {
182 iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR, 182 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
183 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); 183 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
184 priv->thermal_throttle.ct_kill_toggle = false; 184 priv->thermal_throttle.ct_kill_toggle = false;
185 } else { 185 } else {
186 iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_SET, 186 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_SET,
187 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); 187 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
188 priv->thermal_throttle.ct_kill_toggle = true; 188 priv->thermal_throttle.ct_kill_toggle = true;
189 } 189 }
190 iwl_read32(trans(priv), CSR_UCODE_DRV_GP1); 190 iwl_read32(priv->trans, CSR_UCODE_DRV_GP1);
191 spin_lock_irqsave(&trans(priv)->reg_lock, flags); 191 spin_lock_irqsave(&priv->trans->reg_lock, flags);
192 if (likely(iwl_grab_nic_access(trans(priv)))) 192 if (likely(iwl_grab_nic_access(priv->trans)))
193 iwl_release_nic_access(trans(priv)); 193 iwl_release_nic_access(priv->trans);
194 spin_unlock_irqrestore(&trans(priv)->reg_lock, flags); 194 spin_unlock_irqrestore(&priv->trans->reg_lock, flags);
195 195
196 /* Reschedule the ct_kill timer to occur in 196 /* Reschedule the ct_kill timer to occur in
197 * CT_KILL_EXIT_DURATION seconds to ensure we get a 197 * CT_KILL_EXIT_DURATION seconds to ensure we get a
@@ -632,7 +632,7 @@ void iwl_tt_initialize(struct iwl_priv *priv)
632 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter); 632 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
633 INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit); 633 INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
634 634
635 if (cfg(priv)->base_params->adv_thermal_throttle) { 635 if (priv->cfg->base_params->adv_thermal_throttle) {
636 IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n"); 636 IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n");
637 tt->restriction = kcalloc(IWL_TI_STATE_MAX, 637 tt->restriction = kcalloc(IWL_TI_STATE_MAX,
638 sizeof(struct iwl_tt_restriction), 638 sizeof(struct iwl_tt_restriction),
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 34adedc74d35..f2e9f298a947 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -34,12 +34,22 @@
34#include <linux/ieee80211.h> 34#include <linux/ieee80211.h>
35 35
36#include "iwl-dev.h" 36#include "iwl-dev.h"
37#include "iwl-core.h"
38#include "iwl-io.h" 37#include "iwl-io.h"
39#include "iwl-agn-hw.h" 38#include "iwl-agn-hw.h"
40#include "iwl-agn.h" 39#include "iwl-agn.h"
41#include "iwl-trans.h" 40#include "iwl-trans.h"
42 41
42static const u8 tid_to_ac[] = {
43 IEEE80211_AC_BE,
44 IEEE80211_AC_BK,
45 IEEE80211_AC_BK,
46 IEEE80211_AC_BE,
47 IEEE80211_AC_VI,
48 IEEE80211_AC_VI,
49 IEEE80211_AC_VO,
50 IEEE80211_AC_VO,
51};
52
43static void iwlagn_tx_cmd_protection(struct iwl_priv *priv, 53static void iwlagn_tx_cmd_protection(struct iwl_priv *priv,
44 struct ieee80211_tx_info *info, 54 struct ieee80211_tx_info *info,
45 __le16 fc, __le32 *tx_flags) 55 __le16 fc, __le32 *tx_flags)
@@ -74,8 +84,8 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
74 else if (ieee80211_is_back_req(fc)) 84 else if (ieee80211_is_back_req(fc))
75 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK; 85 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
76 else if (info->band == IEEE80211_BAND_2GHZ && 86 else if (info->band == IEEE80211_BAND_2GHZ &&
77 cfg(priv)->bt_params && 87 priv->cfg->bt_params &&
78 cfg(priv)->bt_params->advanced_bt_coexist && 88 priv->cfg->bt_params->advanced_bt_coexist &&
79 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) || 89 (ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
80 ieee80211_is_reassoc_req(fc) || 90 ieee80211_is_reassoc_req(fc) ||
81 skb->protocol == cpu_to_be16(ETH_P_PAE))) 91 skb->protocol == cpu_to_be16(ETH_P_PAE)))
@@ -192,15 +202,15 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
192 rate_flags |= RATE_MCS_CCK_MSK; 202 rate_flags |= RATE_MCS_CCK_MSK;
193 203
194 /* Set up antennas */ 204 /* Set up antennas */
195 if (cfg(priv)->bt_params && 205 if (priv->cfg->bt_params &&
196 cfg(priv)->bt_params->advanced_bt_coexist && 206 priv->cfg->bt_params->advanced_bt_coexist &&
197 priv->bt_full_concurrent) { 207 priv->bt_full_concurrent) {
198 /* operated as 1x1 in full concurrency mode */ 208 /* operated as 1x1 in full concurrency mode */
199 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 209 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
200 first_antenna(hw_params(priv).valid_tx_ant)); 210 first_antenna(priv->hw_params.valid_tx_ant));
201 } else 211 } else
202 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 212 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
203 hw_params(priv).valid_tx_ant); 213 priv->hw_params.valid_tx_ant);
204 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); 214 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
205 215
206 /* Set the rate in the TX cmd */ 216 /* Set the rate in the TX cmd */
@@ -293,6 +303,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
293 u16 len, seq_number = 0; 303 u16 len, seq_number = 0;
294 u8 sta_id, tid = IWL_MAX_TID_COUNT; 304 u8 sta_id, tid = IWL_MAX_TID_COUNT;
295 bool is_agg = false; 305 bool is_agg = false;
306 int txq_id;
296 307
297 if (info->control.vif) 308 if (info->control.vif)
298 ctx = iwl_rxon_ctx_from_vif(info->control.vif); 309 ctx = iwl_rxon_ctx_from_vif(info->control.vif);
@@ -384,12 +395,9 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
384 395
385 /* TODO need this for burst mode later on */ 396 /* TODO need this for burst mode later on */
386 iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id); 397 iwlagn_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
387 iwl_dbg_log_tx_data_frame(priv, len, hdr);
388 398
389 iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc); 399 iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);
390 400
391 iwl_update_stats(priv, true, fc, len);
392
393 memset(&info->status, 0, sizeof(info->status)); 401 memset(&info->status, 0, sizeof(info->status));
394 402
395 info->driver_data[0] = ctx; 403 info->driver_data[0] = ctx;
@@ -435,7 +443,31 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
435 /* Copy MAC header from skb into command buffer */ 443 /* Copy MAC header from skb into command buffer */
436 memcpy(tx_cmd->hdr, hdr, hdr_len); 444 memcpy(tx_cmd->hdr, hdr, hdr_len);
437 445
438 if (iwl_trans_tx(trans(priv), skb, dev_cmd, ctx->ctxid, sta_id, tid)) 446 if (is_agg)
447 txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
448 else if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
449 /*
450 * Send this frame after DTIM -- there's a special queue
451 * reserved for this for contexts that support AP mode.
452 */
453 txq_id = ctx->mcast_queue;
454
455 /*
456 * The microcode will clear the more data
457 * bit in the last frame it transmits.
458 */
459 hdr->frame_control |=
460 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
461 } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
462 txq_id = IWL_AUX_QUEUE;
463 else
464 txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
465
466 WARN_ON_ONCE(!is_agg && txq_id != info->hw_queue);
467 WARN_ON_ONCE(is_agg &&
468 priv->queue_to_mac80211[txq_id] != info->hw_queue);
469
470 if (iwl_trans_tx(priv->trans, skb, dev_cmd, txq_id))
439 goto drop_unlock_sta; 471 goto drop_unlock_sta;
440 472
441 if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc) && 473 if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc) &&
@@ -464,11 +496,33 @@ drop_unlock_priv:
464 return -1; 496 return -1;
465} 497}
466 498
499static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int mq)
500{
501 int q;
502
503 for (q = IWLAGN_FIRST_AMPDU_QUEUE;
504 q < priv->cfg->base_params->num_of_queues; q++) {
505 if (!test_and_set_bit(q, priv->agg_q_alloc)) {
506 priv->queue_to_mac80211[q] = mq;
507 return q;
508 }
509 }
510
511 return -ENOSPC;
512}
513
514static void iwlagn_dealloc_agg_txq(struct iwl_priv *priv, int q)
515{
516 clear_bit(q, priv->agg_q_alloc);
517 priv->queue_to_mac80211[q] = IWL_INVALID_MAC80211_QUEUE;
518}
519
467int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, 520int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
468 struct ieee80211_sta *sta, u16 tid) 521 struct ieee80211_sta *sta, u16 tid)
469{ 522{
470 struct iwl_tid_data *tid_data; 523 struct iwl_tid_data *tid_data;
471 int sta_id; 524 int sta_id, txq_id;
525 enum iwl_agg_state agg_state;
472 526
473 sta_id = iwl_sta_id(sta); 527 sta_id = iwl_sta_id(sta);
474 528
@@ -480,6 +534,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
480 spin_lock_bh(&priv->sta_lock); 534 spin_lock_bh(&priv->sta_lock);
481 535
482 tid_data = &priv->tid_data[sta_id][tid]; 536 tid_data = &priv->tid_data[sta_id][tid];
537 txq_id = priv->tid_data[sta_id][tid].agg.txq_id;
483 538
484 switch (priv->tid_data[sta_id][tid].agg.state) { 539 switch (priv->tid_data[sta_id][tid].agg.state) {
485 case IWL_EMPTYING_HW_QUEUE_ADDBA: 540 case IWL_EMPTYING_HW_QUEUE_ADDBA:
@@ -491,6 +546,13 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
491 */ 546 */
492 IWL_DEBUG_HT(priv, "AGG stop before setup done\n"); 547 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
493 goto turn_off; 548 goto turn_off;
549 case IWL_AGG_STARTING:
550 /*
551 * This can happen when the session is stopped before
552 * we receive ADDBA response
553 */
554 IWL_DEBUG_HT(priv, "AGG stop before AGG became operational\n");
555 goto turn_off;
494 case IWL_AGG_ON: 556 case IWL_AGG_ON:
495 break; 557 break;
496 default: 558 default:
@@ -504,9 +566,13 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
504 tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number); 566 tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
505 567
506 /* There are still packets for this RA / TID in the HW */ 568 /* There are still packets for this RA / TID in the HW */
507 if (tid_data->agg.ssn != tid_data->next_reclaimed) { 569 if (!test_bit(txq_id, priv->agg_q_alloc)) {
570 IWL_DEBUG_TX_QUEUES(priv,
571 "stopping AGG on STA/TID %d/%d but hwq %d not used\n",
572 sta_id, tid, txq_id);
573 } else if (tid_data->agg.ssn != tid_data->next_reclaimed) {
508 IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, " 574 IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
509 "next_recl = %d", 575 "next_recl = %d\n",
510 tid_data->agg.ssn, 576 tid_data->agg.ssn,
511 tid_data->next_reclaimed); 577 tid_data->next_reclaimed);
512 priv->tid_data[sta_id][tid].agg.state = 578 priv->tid_data[sta_id][tid].agg.state =
@@ -515,14 +581,22 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
515 return 0; 581 return 0;
516 } 582 }
517 583
518 IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d", 584 IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
519 tid_data->agg.ssn); 585 tid_data->agg.ssn);
520turn_off: 586turn_off:
587 agg_state = priv->tid_data[sta_id][tid].agg.state;
521 priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF; 588 priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
522 589
523 spin_unlock_bh(&priv->sta_lock); 590 spin_unlock_bh(&priv->sta_lock);
524 591
525 iwl_trans_tx_agg_disable(trans(priv), sta_id, tid); 592 if (test_bit(txq_id, priv->agg_q_alloc)) {
593 /* If the transport didn't know that we wanted to start
594 * agreggation, don't tell it that we want to stop them
595 */
596 if (agg_state != IWL_AGG_STARTING)
597 iwl_trans_tx_agg_disable(priv->trans, txq_id);
598 iwlagn_dealloc_agg_txq(priv, txq_id);
599 }
526 600
527 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 601 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
528 602
@@ -532,9 +606,9 @@ turn_off:
532int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif, 606int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
533 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 607 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
534{ 608{
609 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
535 struct iwl_tid_data *tid_data; 610 struct iwl_tid_data *tid_data;
536 int sta_id; 611 int sta_id, txq_id, ret;
537 int ret;
538 612
539 IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n", 613 IWL_DEBUG_HT(priv, "TX AGG request on ra = %pM tid = %d\n",
540 sta->addr, tid); 614 sta->addr, tid);
@@ -552,36 +626,37 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
552 return -ENXIO; 626 return -ENXIO;
553 } 627 }
554 628
629 txq_id = iwlagn_alloc_agg_txq(priv, ctx->ac_to_queue[tid_to_ac[tid]]);
630 if (txq_id < 0) {
631 IWL_DEBUG_TX_QUEUES(priv,
632 "No free aggregation queue for %pM/%d\n",
633 sta->addr, tid);
634 return txq_id;
635 }
636
555 ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid); 637 ret = iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
556 if (ret) 638 if (ret)
557 return ret; 639 return ret;
558 640
559 spin_lock_bh(&priv->sta_lock); 641 spin_lock_bh(&priv->sta_lock);
560
561 tid_data = &priv->tid_data[sta_id][tid]; 642 tid_data = &priv->tid_data[sta_id][tid];
562 tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number); 643 tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
644 tid_data->agg.txq_id = txq_id;
563 645
564 *ssn = tid_data->agg.ssn; 646 *ssn = tid_data->agg.ssn;
565 647
566 ret = iwl_trans_tx_agg_alloc(trans(priv), sta_id, tid);
567 if (ret) {
568 spin_unlock_bh(&priv->sta_lock);
569 return ret;
570 }
571
572 if (*ssn == tid_data->next_reclaimed) { 648 if (*ssn == tid_data->next_reclaimed) {
573 IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d", 649 IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
574 tid_data->agg.ssn); 650 tid_data->agg.ssn);
575 tid_data->agg.state = IWL_AGG_ON; 651 tid_data->agg.state = IWL_AGG_STARTING;
576 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); 652 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
577 } else { 653 } else {
578 IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, " 654 IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
579 "next_reclaimed = %d", 655 "next_reclaimed = %d\n",
580 tid_data->agg.ssn, 656 tid_data->agg.ssn,
581 tid_data->next_reclaimed); 657 tid_data->next_reclaimed);
582 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; 658 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
583 } 659 }
584
585 spin_unlock_bh(&priv->sta_lock); 660 spin_unlock_bh(&priv->sta_lock);
586 661
587 return ret; 662 return ret;
@@ -592,15 +667,21 @@ int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
592{ 667{
593 struct iwl_station_priv *sta_priv = (void *) sta->drv_priv; 668 struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
594 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); 669 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
670 int q, fifo;
595 u16 ssn; 671 u16 ssn;
596 672
597 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF); 673 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
598 674
599 spin_lock_bh(&priv->sta_lock); 675 spin_lock_bh(&priv->sta_lock);
600 ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn; 676 ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn;
677 q = priv->tid_data[sta_priv->sta_id][tid].agg.txq_id;
678 priv->tid_data[sta_priv->sta_id][tid].agg.state = IWL_AGG_ON;
601 spin_unlock_bh(&priv->sta_lock); 679 spin_unlock_bh(&priv->sta_lock);
602 680
603 iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, sta_priv->sta_id, tid, 681 fifo = ctx->ac_to_fifo[tid_to_ac[tid]];
682
683 iwl_trans_tx_agg_setup(priv->trans, q, fifo,
684 sta_priv->sta_id, tid,
604 buf_size, ssn); 685 buf_size, ssn);
605 686
606 /* 687 /*
@@ -623,7 +704,7 @@ int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
623 sta_priv->max_agg_bufsize = 704 sta_priv->max_agg_bufsize =
624 min(sta_priv->max_agg_bufsize, buf_size); 705 min(sta_priv->max_agg_bufsize, buf_size);
625 706
626 if (hw_params(priv).use_rts_for_aggregation) { 707 if (priv->hw_params.use_rts_for_aggregation) {
627 /* 708 /*
628 * switch to RTS/CTS if it is the prefer protection 709 * switch to RTS/CTS if it is the prefer protection
629 * method for HT traffic 710 * method for HT traffic
@@ -666,7 +747,9 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
666 IWL_DEBUG_TX_QUEUES(priv, 747 IWL_DEBUG_TX_QUEUES(priv,
667 "Can continue DELBA flow ssn = next_recl =" 748 "Can continue DELBA flow ssn = next_recl ="
668 " %d", tid_data->next_reclaimed); 749 " %d", tid_data->next_reclaimed);
669 iwl_trans_tx_agg_disable(trans(priv), sta_id, tid); 750 iwl_trans_tx_agg_disable(priv->trans,
751 tid_data->agg.txq_id);
752 iwlagn_dealloc_agg_txq(priv, tid_data->agg.txq_id);
670 tid_data->agg.state = IWL_AGG_OFF; 753 tid_data->agg.state = IWL_AGG_OFF;
671 ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid); 754 ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
672 } 755 }
@@ -677,7 +760,7 @@ static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
677 IWL_DEBUG_TX_QUEUES(priv, 760 IWL_DEBUG_TX_QUEUES(priv,
678 "Can continue ADDBA flow ssn = next_recl =" 761 "Can continue ADDBA flow ssn = next_recl ="
679 " %d", tid_data->next_reclaimed); 762 " %d", tid_data->next_reclaimed);
680 tid_data->agg.state = IWL_AGG_ON; 763 tid_data->agg.state = IWL_AGG_STARTING;
681 ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid); 764 ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
682 } 765 }
683 break; 766 break;
@@ -711,9 +794,9 @@ static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
711static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags, 794static void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
712 struct ieee80211_tx_info *info) 795 struct ieee80211_tx_info *info)
713{ 796{
714 struct ieee80211_tx_rate *r = &info->control.rates[0]; 797 struct ieee80211_tx_rate *r = &info->status.rates[0];
715 798
716 info->antenna_sel_tx = 799 info->status.antenna =
717 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS); 800 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
718 if (rate_n_flags & RATE_MCS_HT_MSK) 801 if (rate_n_flags & RATE_MCS_HT_MSK)
719 r->flags |= IEEE80211_TX_RC_MCS; 802 r->flags |= IEEE80211_TX_RC_MCS;
@@ -841,8 +924,8 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
841 * notification again. 924 * notification again.
842 */ 925 */
843 if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 && 926 if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
844 cfg(priv)->bt_params && 927 priv->cfg->bt_params &&
845 cfg(priv)->bt_params->advanced_bt_coexist) { 928 priv->cfg->bt_params->advanced_bt_coexist) {
846 IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n"); 929 IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n");
847 } 930 }
848 931
@@ -1005,6 +1088,29 @@ static void iwl_check_abort_status(struct iwl_priv *priv,
1005 } 1088 }
1006} 1089}
1007 1090
1091static int iwl_reclaim(struct iwl_priv *priv, int sta_id, int tid,
1092 int txq_id, int ssn, struct sk_buff_head *skbs)
1093{
1094 if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
1095 tid != IWL_TID_NON_QOS &&
1096 txq_id != priv->tid_data[sta_id][tid].agg.txq_id)) {
1097 /*
1098 * FIXME: this is a uCode bug which need to be addressed,
1099 * log the information and return for now.
1100 * Since it is can possibly happen very often and in order
1101 * not to fill the syslog, don't use IWL_ERR or IWL_WARN
1102 */
1103 IWL_DEBUG_TX_QUEUES(priv,
1104 "Bad queue mapping txq_id=%d, agg_txq[sta:%d,tid:%d]=%d\n",
1105 txq_id, sta_id, tid,
1106 priv->tid_data[sta_id][tid].agg.txq_id);
1107 return 1;
1108 }
1109
1110 iwl_trans_reclaim(priv->trans, txq_id, ssn, skbs);
1111 return 0;
1112}
1113
1008int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, 1114int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1009 struct iwl_device_cmd *cmd) 1115 struct iwl_device_cmd *cmd)
1010{ 1116{
@@ -1059,13 +1165,12 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1059 if (tid != IWL_TID_NON_QOS) { 1165 if (tid != IWL_TID_NON_QOS) {
1060 priv->tid_data[sta_id][tid].next_reclaimed = 1166 priv->tid_data[sta_id][tid].next_reclaimed =
1061 next_reclaimed; 1167 next_reclaimed;
1062 IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d", 1168 IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n",
1063 next_reclaimed); 1169 next_reclaimed);
1064 } 1170 }
1065 1171
1066 /*we can free until ssn % q.n_bd not inclusive */ 1172 /*we can free until ssn % q.n_bd not inclusive */
1067 WARN_ON(iwl_trans_reclaim(trans(priv), sta_id, tid, 1173 WARN_ON(iwl_reclaim(priv, sta_id, tid, txq_id, ssn, &skbs));
1068 txq_id, ssn, &skbs));
1069 iwlagn_check_ratid_empty(priv, sta_id, tid); 1174 iwlagn_check_ratid_empty(priv, sta_id, tid);
1070 freed = 0; 1175 freed = 0;
1071 1176
@@ -1159,7 +1264,7 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1159 * (in Tx queue's circular buffer) of first TFD/frame in window */ 1264 * (in Tx queue's circular buffer) of first TFD/frame in window */
1160 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); 1265 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1161 1266
1162 if (scd_flow >= cfg(priv)->base_params->num_of_queues) { 1267 if (scd_flow >= priv->cfg->base_params->num_of_queues) {
1163 IWL_ERR(priv, 1268 IWL_ERR(priv,
1164 "BUG_ON scd_flow is bigger than number of queues\n"); 1269 "BUG_ON scd_flow is bigger than number of queues\n");
1165 return 0; 1270 return 0;
@@ -1183,8 +1288,8 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1183 /* Release all TFDs before the SSN, i.e. all TFDs in front of 1288 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1184 * block-ack window (we assume that they've been successfully 1289 * block-ack window (we assume that they've been successfully
1185 * transmitted ... if not, it's too late anyway). */ 1290 * transmitted ... if not, it's too late anyway). */
1186 if (iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow, 1291 if (iwl_reclaim(priv, sta_id, tid, scd_flow,
1187 ba_resp_scd_ssn, &reclaimed_skbs)) { 1292 ba_resp_scd_ssn, &reclaimed_skbs)) {
1188 spin_unlock(&priv->sta_lock); 1293 spin_unlock(&priv->sta_lock);
1189 return 0; 1294 return 0;
1190 } 1295 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 2a9a16f901c3..8bd9610a5d68 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -26,6 +26,9 @@
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 * 27 *
28 *****************************************************************************/ 28 *****************************************************************************/
29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
29#include <linux/kernel.h> 32#include <linux/kernel.h>
30#include <linux/module.h> 33#include <linux/module.h>
31#include <linux/init.h> 34#include <linux/init.h>
@@ -43,13 +46,13 @@
43 46
44#include "iwl-eeprom.h" 47#include "iwl-eeprom.h"
45#include "iwl-dev.h" 48#include "iwl-dev.h"
46#include "iwl-core.h"
47#include "iwl-io.h" 49#include "iwl-io.h"
48#include "iwl-agn-calib.h" 50#include "iwl-agn-calib.h"
49#include "iwl-agn.h" 51#include "iwl-agn.h"
50#include "iwl-shared.h"
51#include "iwl-trans.h" 52#include "iwl-trans.h"
52#include "iwl-op-mode.h" 53#include "iwl-op-mode.h"
54#include "iwl-drv.h"
55#include "iwl-modparams.h"
53 56
54/****************************************************************************** 57/******************************************************************************
55 * 58 *
@@ -177,7 +180,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
177 rate = info->control.rates[0].idx; 180 rate = info->control.rates[0].idx;
178 181
179 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant, 182 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
180 hw_params(priv).valid_tx_ant); 183 priv->hw_params.valid_tx_ant);
181 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant); 184 rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
182 185
183 /* In mac80211, rates for 5 GHz start at 0 */ 186 /* In mac80211, rates for 5 GHz start at 0 */
@@ -286,6 +289,25 @@ out:
286 mutex_unlock(&priv->mutex); 289 mutex_unlock(&priv->mutex);
287} 290}
288 291
292int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
293{
294 struct iwl_statistics_cmd statistics_cmd = {
295 .configuration_flags =
296 clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
297 };
298
299 if (flags & CMD_ASYNC)
300 return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
301 CMD_ASYNC,
302 sizeof(struct iwl_statistics_cmd),
303 &statistics_cmd);
304 else
305 return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
306 CMD_SYNC,
307 sizeof(struct iwl_statistics_cmd),
308 &statistics_cmd);
309}
310
289/** 311/**
290 * iwl_bg_statistics_periodic - Timer callback to queue statistics 312 * iwl_bg_statistics_periodic - Timer callback to queue statistics
291 * 313 *
@@ -326,14 +348,14 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
326 ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32)); 348 ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
327 349
328 /* Make sure device is powered up for SRAM reads */ 350 /* Make sure device is powered up for SRAM reads */
329 spin_lock_irqsave(&trans(priv)->reg_lock, reg_flags); 351 spin_lock_irqsave(&priv->trans->reg_lock, reg_flags);
330 if (unlikely(!iwl_grab_nic_access(trans(priv)))) { 352 if (unlikely(!iwl_grab_nic_access(priv->trans))) {
331 spin_unlock_irqrestore(&trans(priv)->reg_lock, reg_flags); 353 spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags);
332 return; 354 return;
333 } 355 }
334 356
335 /* Set starting address; reads will auto-increment */ 357 /* Set starting address; reads will auto-increment */
336 iwl_write32(trans(priv), HBUS_TARG_MEM_RADDR, ptr); 358 iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, ptr);
337 359
338 /* 360 /*
339 * Refuse to read more than would have fit into the log from 361 * Refuse to read more than would have fit into the log from
@@ -349,20 +371,20 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
349 * place event id # at far right for easier visual parsing. 371 * place event id # at far right for easier visual parsing.
350 */ 372 */
351 for (i = 0; i < num_events; i++) { 373 for (i = 0; i < num_events; i++) {
352 ev = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT); 374 ev = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
353 time = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT); 375 time = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
354 if (mode == 0) { 376 if (mode == 0) {
355 trace_iwlwifi_dev_ucode_cont_event( 377 trace_iwlwifi_dev_ucode_cont_event(
356 trans(priv)->dev, 0, time, ev); 378 priv->trans->dev, 0, time, ev);
357 } else { 379 } else {
358 data = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT); 380 data = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
359 trace_iwlwifi_dev_ucode_cont_event( 381 trace_iwlwifi_dev_ucode_cont_event(
360 trans(priv)->dev, time, data, ev); 382 priv->trans->dev, time, data, ev);
361 } 383 }
362 } 384 }
363 /* Allow device to power down */ 385 /* Allow device to power down */
364 iwl_release_nic_access(trans(priv)); 386 iwl_release_nic_access(priv->trans);
365 spin_unlock_irqrestore(&trans(priv)->reg_lock, reg_flags); 387 spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags);
366} 388}
367 389
368static void iwl_continuous_event_trace(struct iwl_priv *priv) 390static void iwl_continuous_event_trace(struct iwl_priv *priv)
@@ -379,10 +401,9 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
379 u32 num_wraps; /* # times uCode wrapped to top of log */ 401 u32 num_wraps; /* # times uCode wrapped to top of log */
380 u32 next_entry; /* index of next entry to be written by uCode */ 402 u32 next_entry; /* index of next entry to be written by uCode */
381 403
382 base = priv->shrd->device_pointers.log_event_table; 404 base = priv->device_pointers.log_event_table;
383 if (iwlagn_hw_valid_rtc_data_addr(base)) { 405 if (iwlagn_hw_valid_rtc_data_addr(base)) {
384 iwl_read_targ_mem_words(trans(priv), base, &read, sizeof(read)); 406 iwl_read_targ_mem_words(priv->trans, base, &read, sizeof(read));
385
386 capacity = read.capacity; 407 capacity = read.capacity;
387 mode = read.mode; 408 mode = read.mode;
388 num_wraps = read.wrap_counter; 409 num_wraps = read.wrap_counter;
@@ -422,7 +443,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
422 else 443 else
423 priv->event_log.wraps_once_count++; 444 priv->event_log.wraps_once_count++;
424 445
425 trace_iwlwifi_dev_ucode_wrap_event(trans(priv)->dev, 446 trace_iwlwifi_dev_ucode_wrap_event(priv->trans->dev,
426 num_wraps - priv->event_log.num_wraps, 447 num_wraps - priv->event_log.num_wraps,
427 next_entry, priv->event_log.next_entry); 448 next_entry, priv->event_log.next_entry);
428 449
@@ -488,7 +509,76 @@ static void iwl_bg_tx_flush(struct work_struct *work)
488 iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL); 509 iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
489} 510}
490 511
491static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags) 512/*
513 * queue/FIFO/AC mapping definitions
514 */
515
516#define IWL_TX_FIFO_BK 0 /* shared */
517#define IWL_TX_FIFO_BE 1
518#define IWL_TX_FIFO_VI 2 /* shared */
519#define IWL_TX_FIFO_VO 3
520#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK
521#define IWL_TX_FIFO_BE_IPAN 4
522#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI
523#define IWL_TX_FIFO_VO_IPAN 5
524/* re-uses the VO FIFO, uCode will properly flush/schedule */
525#define IWL_TX_FIFO_AUX 5
526#define IWL_TX_FIFO_UNUSED -1
527
528#define IWLAGN_CMD_FIFO_NUM 7
529
530/*
531 * This queue number is required for proper operation
532 * because the ucode will stop/start the scheduler as
533 * required.
534 */
535#define IWL_IPAN_MCAST_QUEUE 8
536
537static const u8 iwlagn_default_queue_to_tx_fifo[] = {
538 IWL_TX_FIFO_VO,
539 IWL_TX_FIFO_VI,
540 IWL_TX_FIFO_BE,
541 IWL_TX_FIFO_BK,
542 IWLAGN_CMD_FIFO_NUM,
543};
544
545static const u8 iwlagn_ipan_queue_to_tx_fifo[] = {
546 IWL_TX_FIFO_VO,
547 IWL_TX_FIFO_VI,
548 IWL_TX_FIFO_BE,
549 IWL_TX_FIFO_BK,
550 IWL_TX_FIFO_BK_IPAN,
551 IWL_TX_FIFO_BE_IPAN,
552 IWL_TX_FIFO_VI_IPAN,
553 IWL_TX_FIFO_VO_IPAN,
554 IWL_TX_FIFO_BE_IPAN,
555 IWLAGN_CMD_FIFO_NUM,
556 IWL_TX_FIFO_AUX,
557};
558
559static const u8 iwlagn_bss_ac_to_fifo[] = {
560 IWL_TX_FIFO_VO,
561 IWL_TX_FIFO_VI,
562 IWL_TX_FIFO_BE,
563 IWL_TX_FIFO_BK,
564};
565
566static const u8 iwlagn_bss_ac_to_queue[] = {
567 0, 1, 2, 3,
568};
569
570static const u8 iwlagn_pan_ac_to_fifo[] = {
571 IWL_TX_FIFO_VO_IPAN,
572 IWL_TX_FIFO_VI_IPAN,
573 IWL_TX_FIFO_BE_IPAN,
574 IWL_TX_FIFO_BK_IPAN,
575};
576
577static const u8 iwlagn_pan_ac_to_queue[] = {
578 7, 6, 5, 4,
579};
580
581void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
492{ 582{
493 int i; 583 int i;
494 584
@@ -496,9 +586,9 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
496 * The default context is always valid, 586 * The default context is always valid,
497 * the PAN context depends on uCode. 587 * the PAN context depends on uCode.
498 */ 588 */
499 priv->shrd->valid_contexts = BIT(IWL_RXON_CTX_BSS); 589 priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
500 if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) 590 if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN)
501 priv->shrd->valid_contexts |= BIT(IWL_RXON_CTX_PAN); 591 priv->valid_contexts |= BIT(IWL_RXON_CTX_PAN);
502 592
503 for (i = 0; i < NUM_IWL_RXON_CTX; i++) 593 for (i = 0; i < NUM_IWL_RXON_CTX; i++)
504 priv->contexts[i].ctxid = i; 594 priv->contexts[i].ctxid = i;
@@ -520,6 +610,10 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
520 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS; 610 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
521 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS; 611 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
522 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS; 612 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
613 memcpy(priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue,
614 iwlagn_bss_ac_to_queue, sizeof(iwlagn_bss_ac_to_queue));
615 memcpy(priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo,
616 iwlagn_bss_ac_to_fifo, sizeof(iwlagn_bss_ac_to_fifo));
523 617
524 priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON; 618 priv->contexts[IWL_RXON_CTX_PAN].rxon_cmd = REPLY_WIPAN_RXON;
525 priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd = 619 priv->contexts[IWL_RXON_CTX_PAN].rxon_timing_cmd =
@@ -542,26 +636,31 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
542 priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP; 636 priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
543 priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA; 637 priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
544 priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P; 638 priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
639 memcpy(priv->contexts[IWL_RXON_CTX_PAN].ac_to_queue,
640 iwlagn_pan_ac_to_queue, sizeof(iwlagn_pan_ac_to_queue));
641 memcpy(priv->contexts[IWL_RXON_CTX_PAN].ac_to_fifo,
642 iwlagn_pan_ac_to_fifo, sizeof(iwlagn_pan_ac_to_fifo));
643 priv->contexts[IWL_RXON_CTX_PAN].mcast_queue = IWL_IPAN_MCAST_QUEUE;
545 644
546 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2); 645 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
547} 646}
548 647
549static void iwl_rf_kill_ct_config(struct iwl_priv *priv) 648void iwl_rf_kill_ct_config(struct iwl_priv *priv)
550{ 649{
551 struct iwl_ct_kill_config cmd; 650 struct iwl_ct_kill_config cmd;
552 struct iwl_ct_kill_throttling_config adv_cmd; 651 struct iwl_ct_kill_throttling_config adv_cmd;
553 int ret = 0; 652 int ret = 0;
554 653
555 iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR, 654 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
556 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); 655 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
557 656
558 priv->thermal_throttle.ct_kill_toggle = false; 657 priv->thermal_throttle.ct_kill_toggle = false;
559 658
560 if (cfg(priv)->base_params->support_ct_kill_exit) { 659 if (priv->cfg->base_params->support_ct_kill_exit) {
561 adv_cmd.critical_temperature_enter = 660 adv_cmd.critical_temperature_enter =
562 cpu_to_le32(hw_params(priv).ct_kill_threshold); 661 cpu_to_le32(priv->hw_params.ct_kill_threshold);
563 adv_cmd.critical_temperature_exit = 662 adv_cmd.critical_temperature_exit =
564 cpu_to_le32(hw_params(priv).ct_kill_exit_threshold); 663 cpu_to_le32(priv->hw_params.ct_kill_exit_threshold);
565 664
566 ret = iwl_dvm_send_cmd_pdu(priv, 665 ret = iwl_dvm_send_cmd_pdu(priv,
567 REPLY_CT_KILL_CONFIG_CMD, 666 REPLY_CT_KILL_CONFIG_CMD,
@@ -572,11 +671,11 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
572 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD " 671 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
573 "succeeded, critical temperature enter is %d," 672 "succeeded, critical temperature enter is %d,"
574 "exit is %d\n", 673 "exit is %d\n",
575 hw_params(priv).ct_kill_threshold, 674 priv->hw_params.ct_kill_threshold,
576 hw_params(priv).ct_kill_exit_threshold); 675 priv->hw_params.ct_kill_exit_threshold);
577 } else { 676 } else {
578 cmd.critical_temperature_R = 677 cmd.critical_temperature_R =
579 cpu_to_le32(hw_params(priv).ct_kill_threshold); 678 cpu_to_le32(priv->hw_params.ct_kill_threshold);
580 679
581 ret = iwl_dvm_send_cmd_pdu(priv, 680 ret = iwl_dvm_send_cmd_pdu(priv,
582 REPLY_CT_KILL_CONFIG_CMD, 681 REPLY_CT_KILL_CONFIG_CMD,
@@ -587,7 +686,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
587 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD " 686 IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
588 "succeeded, " 687 "succeeded, "
589 "critical temperature is %d\n", 688 "critical temperature is %d\n",
590 hw_params(priv).ct_kill_threshold); 689 priv->hw_params.ct_kill_threshold);
591 } 690 }
592} 691}
593 692
@@ -627,6 +726,29 @@ static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
627 } 726 }
628} 727}
629 728
729void iwl_send_bt_config(struct iwl_priv *priv)
730{
731 struct iwl_bt_cmd bt_cmd = {
732 .lead_time = BT_LEAD_TIME_DEF,
733 .max_kill = BT_MAX_KILL_DEF,
734 .kill_ack_mask = 0,
735 .kill_cts_mask = 0,
736 };
737
738 if (!iwlwifi_mod_params.bt_coex_active)
739 bt_cmd.flags = BT_COEX_DISABLE;
740 else
741 bt_cmd.flags = BT_COEX_ENABLE;
742
743 priv->bt_enable_flag = bt_cmd.flags;
744 IWL_DEBUG_INFO(priv, "BT coex %s\n",
745 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
746
747 if (iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
748 CMD_SYNC, sizeof(struct iwl_bt_cmd), &bt_cmd))
749 IWL_ERR(priv, "failed to send BT Coex Config\n");
750}
751
630/** 752/**
631 * iwl_alive_start - called after REPLY_ALIVE notification received 753 * iwl_alive_start - called after REPLY_ALIVE notification received
632 * from protocol/runtime uCode (initialization uCode's 754 * from protocol/runtime uCode (initialization uCode's
@@ -642,9 +764,6 @@ int iwl_alive_start(struct iwl_priv *priv)
642 /* After the ALIVE response, we can send host commands to the uCode */ 764 /* After the ALIVE response, we can send host commands to the uCode */
643 set_bit(STATUS_ALIVE, &priv->status); 765 set_bit(STATUS_ALIVE, &priv->status);
644 766
645 /* Enable watchdog to monitor the driver tx queues */
646 iwl_setup_watchdog(priv);
647
648 if (iwl_is_rfkill(priv)) 767 if (iwl_is_rfkill(priv))
649 return -ERFKILL; 768 return -ERFKILL;
650 769
@@ -654,10 +773,10 @@ int iwl_alive_start(struct iwl_priv *priv)
654 } 773 }
655 774
656 /* download priority table before any calibration request */ 775 /* download priority table before any calibration request */
657 if (cfg(priv)->bt_params && 776 if (priv->cfg->bt_params &&
658 cfg(priv)->bt_params->advanced_bt_coexist) { 777 priv->cfg->bt_params->advanced_bt_coexist) {
659 /* Configure Bluetooth device coexistence support */ 778 /* Configure Bluetooth device coexistence support */
660 if (cfg(priv)->bt_params->bt_sco_disable) 779 if (priv->cfg->bt_params->bt_sco_disable)
661 priv->bt_enable_pspoll = false; 780 priv->bt_enable_pspoll = false;
662 else 781 else
663 priv->bt_enable_pspoll = true; 782 priv->bt_enable_pspoll = true;
@@ -694,10 +813,8 @@ int iwl_alive_start(struct iwl_priv *priv)
694 813
695 ieee80211_wake_queues(priv->hw); 814 ieee80211_wake_queues(priv->hw);
696 815
697 priv->active_rate = IWL_RATES_MASK;
698
699 /* Configure Tx antenna selection based on H/W config */ 816 /* Configure Tx antenna selection based on H/W config */
700 iwlagn_send_tx_ant_config(priv, hw_params(priv).valid_tx_ant); 817 iwlagn_send_tx_ant_config(priv, priv->hw_params.valid_tx_ant);
701 818
702 if (iwl_is_associated_ctx(ctx) && !priv->wowlan) { 819 if (iwl_is_associated_ctx(ctx) && !priv->wowlan) {
703 struct iwl_rxon_cmd *active_rxon = 820 struct iwl_rxon_cmd *active_rxon =
@@ -788,10 +905,6 @@ void iwl_down(struct iwl_priv *priv)
788 exit_pending = 905 exit_pending =
789 test_and_set_bit(STATUS_EXIT_PENDING, &priv->status); 906 test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
790 907
791 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
792 * to prevent rearm timer */
793 del_timer_sync(&priv->watchdog);
794
795 iwl_clear_ucode_stations(priv, NULL); 908 iwl_clear_ucode_stations(priv, NULL);
796 iwl_dealloc_bcast_stations(priv); 909 iwl_dealloc_bcast_stations(priv);
797 iwl_clear_driver_stations(priv); 910 iwl_clear_driver_stations(priv);
@@ -800,9 +913,9 @@ void iwl_down(struct iwl_priv *priv)
800 priv->bt_status = 0; 913 priv->bt_status = 0;
801 priv->cur_rssi_ctx = NULL; 914 priv->cur_rssi_ctx = NULL;
802 priv->bt_is_sco = 0; 915 priv->bt_is_sco = 0;
803 if (cfg(priv)->bt_params) 916 if (priv->cfg->bt_params)
804 priv->bt_traffic_load = 917 priv->bt_traffic_load =
805 cfg(priv)->bt_params->bt_init_traffic_load; 918 priv->cfg->bt_params->bt_init_traffic_load;
806 else 919 else
807 priv->bt_traffic_load = 0; 920 priv->bt_traffic_load = 0;
808 priv->bt_full_concurrent = false; 921 priv->bt_full_concurrent = false;
@@ -817,18 +930,17 @@ void iwl_down(struct iwl_priv *priv)
817 ieee80211_stop_queues(priv->hw); 930 ieee80211_stop_queues(priv->hw);
818 931
819 priv->ucode_loaded = false; 932 priv->ucode_loaded = false;
820 iwl_trans_stop_device(trans(priv)); 933 iwl_trans_stop_device(priv->trans);
821 934
822 /* Clear out all status bits but a few that are stable across reset */ 935 /* Clear out all status bits but a few that are stable across reset */
823 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) << 936 priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
824 STATUS_RF_KILL_HW | 937 STATUS_RF_KILL_HW |
825 test_bit(STATUS_GEO_CONFIGURED, &priv->status) << 938 test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
826 STATUS_GEO_CONFIGURED | 939 STATUS_GEO_CONFIGURED |
940 test_bit(STATUS_FW_ERROR, &priv->status) <<
941 STATUS_FW_ERROR |
827 test_bit(STATUS_EXIT_PENDING, &priv->status) << 942 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
828 STATUS_EXIT_PENDING; 943 STATUS_EXIT_PENDING;
829 priv->shrd->status &=
830 test_bit(STATUS_FW_ERROR, &priv->shrd->status) <<
831 STATUS_FW_ERROR;
832 944
833 dev_kfree_skb(priv->beacon_skb); 945 dev_kfree_skb(priv->beacon_skb);
834 priv->beacon_skb = NULL; 946 priv->beacon_skb = NULL;
@@ -868,6 +980,7 @@ void iwlagn_prepare_restart(struct iwl_priv *priv)
868 u8 bt_load; 980 u8 bt_load;
869 u8 bt_status; 981 u8 bt_status;
870 bool bt_is_sco; 982 bool bt_is_sco;
983 int i;
871 984
872 lockdep_assert_held(&priv->mutex); 985 lockdep_assert_held(&priv->mutex);
873 986
@@ -895,6 +1008,15 @@ void iwlagn_prepare_restart(struct iwl_priv *priv)
895 priv->bt_traffic_load = bt_load; 1008 priv->bt_traffic_load = bt_load;
896 priv->bt_status = bt_status; 1009 priv->bt_status = bt_status;
897 priv->bt_is_sco = bt_is_sco; 1010 priv->bt_is_sco = bt_is_sco;
1011
1012 /* reset aggregation queues */
1013 for (i = IWLAGN_FIRST_AMPDU_QUEUE; i < IWL_MAX_HW_QUEUES; i++)
1014 priv->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
1015 /* and stop counts */
1016 for (i = 0; i < IWL_MAX_HW_QUEUES; i++)
1017 atomic_set(&priv->queue_stop_count[i], 0);
1018
1019 memset(priv->agg_q_alloc, 0, sizeof(priv->agg_q_alloc));
898} 1020}
899 1021
900static void iwl_bg_restart(struct work_struct *data) 1022static void iwl_bg_restart(struct work_struct *data)
@@ -904,7 +1026,7 @@ static void iwl_bg_restart(struct work_struct *data)
904 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 1026 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
905 return; 1027 return;
906 1028
907 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->shrd->status)) { 1029 if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
908 mutex_lock(&priv->mutex); 1030 mutex_lock(&priv->mutex);
909 iwlagn_prepare_restart(priv); 1031 iwlagn_prepare_restart(priv);
910 mutex_unlock(&priv->mutex); 1032 mutex_unlock(&priv->mutex);
@@ -956,7 +1078,7 @@ static void iwlagn_disable_roc_work(struct work_struct *work)
956 * 1078 *
957 *****************************************************************************/ 1079 *****************************************************************************/
958 1080
959static void iwl_setup_deferred_work(struct iwl_priv *priv) 1081void iwl_setup_deferred_work(struct iwl_priv *priv)
960{ 1082{
961 priv->workqueue = create_singlethread_workqueue(DRV_NAME); 1083 priv->workqueue = create_singlethread_workqueue(DRV_NAME);
962 1084
@@ -971,7 +1093,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
971 1093
972 iwl_setup_scan_deferred_work(priv); 1094 iwl_setup_scan_deferred_work(priv);
973 1095
974 if (cfg(priv)->bt_params) 1096 if (priv->cfg->bt_params)
975 iwlagn_bt_setup_deferred_work(priv); 1097 iwlagn_bt_setup_deferred_work(priv);
976 1098
977 init_timer(&priv->statistics_periodic); 1099 init_timer(&priv->statistics_periodic);
@@ -981,15 +1103,11 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
981 init_timer(&priv->ucode_trace); 1103 init_timer(&priv->ucode_trace);
982 priv->ucode_trace.data = (unsigned long)priv; 1104 priv->ucode_trace.data = (unsigned long)priv;
983 priv->ucode_trace.function = iwl_bg_ucode_trace; 1105 priv->ucode_trace.function = iwl_bg_ucode_trace;
984
985 init_timer(&priv->watchdog);
986 priv->watchdog.data = (unsigned long)priv;
987 priv->watchdog.function = iwl_bg_watchdog;
988} 1106}
989 1107
990void iwl_cancel_deferred_work(struct iwl_priv *priv) 1108void iwl_cancel_deferred_work(struct iwl_priv *priv)
991{ 1109{
992 if (cfg(priv)->bt_params) 1110 if (priv->cfg->bt_params)
993 iwlagn_bt_cancel_deferred_work(priv); 1111 iwlagn_bt_cancel_deferred_work(priv);
994 1112
995 cancel_work_sync(&priv->run_time_calib_work); 1113 cancel_work_sync(&priv->run_time_calib_work);
@@ -1025,7 +1143,190 @@ static void iwl_init_hw_rates(struct ieee80211_rate *rates)
1025 } 1143 }
1026} 1144}
1027 1145
1028static int iwl_init_drv(struct iwl_priv *priv) 1146#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
1147#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
1148static void iwl_init_ht_hw_capab(const struct iwl_priv *priv,
1149 struct ieee80211_sta_ht_cap *ht_info,
1150 enum ieee80211_band band)
1151{
1152 u16 max_bit_rate = 0;
1153 u8 rx_chains_num = priv->hw_params.rx_chains_num;
1154 u8 tx_chains_num = priv->hw_params.tx_chains_num;
1155
1156 ht_info->cap = 0;
1157 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
1158
1159 ht_info->ht_supported = true;
1160
1161 if (priv->cfg->ht_params &&
1162 priv->cfg->ht_params->ht_greenfield_support)
1163 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
1164 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
1165 max_bit_rate = MAX_BIT_RATE_20_MHZ;
1166 if (priv->hw_params.ht40_channel & BIT(band)) {
1167 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
1168 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
1169 ht_info->mcs.rx_mask[4] = 0x01;
1170 max_bit_rate = MAX_BIT_RATE_40_MHZ;
1171 }
1172
1173 if (iwlwifi_mod_params.amsdu_size_8K)
1174 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
1175
1176 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
1177 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
1178
1179 ht_info->mcs.rx_mask[0] = 0xFF;
1180 if (rx_chains_num >= 2)
1181 ht_info->mcs.rx_mask[1] = 0xFF;
1182 if (rx_chains_num >= 3)
1183 ht_info->mcs.rx_mask[2] = 0xFF;
1184
1185 /* Highest supported Rx data rate */
1186 max_bit_rate *= rx_chains_num;
1187 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
1188 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
1189
1190 /* Tx MCS capabilities */
1191 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
1192 if (tx_chains_num != rx_chains_num) {
1193 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
1194 ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
1195 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
1196 }
1197}
1198
1199/**
1200 * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom
1201 */
1202static int iwl_init_geos(struct iwl_priv *priv)
1203{
1204 struct iwl_channel_info *ch;
1205 struct ieee80211_supported_band *sband;
1206 struct ieee80211_channel *channels;
1207 struct ieee80211_channel *geo_ch;
1208 struct ieee80211_rate *rates;
1209 int i = 0;
1210 s8 max_tx_power = IWLAGN_TX_POWER_TARGET_POWER_MIN;
1211
1212 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
1213 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
1214 IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
1215 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
1216 return 0;
1217 }
1218
1219 channels = kcalloc(priv->channel_count,
1220 sizeof(struct ieee80211_channel), GFP_KERNEL);
1221 if (!channels)
1222 return -ENOMEM;
1223
1224 rates = kcalloc(IWL_RATE_COUNT_LEGACY, sizeof(struct ieee80211_rate),
1225 GFP_KERNEL);
1226 if (!rates) {
1227 kfree(channels);
1228 return -ENOMEM;
1229 }
1230
1231 /* 5.2GHz channels start after the 2.4GHz channels */
1232 sband = &priv->bands[IEEE80211_BAND_5GHZ];
1233 sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
1234 /* just OFDM */
1235 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
1236 sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
1237
1238 if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)
1239 iwl_init_ht_hw_capab(priv, &sband->ht_cap,
1240 IEEE80211_BAND_5GHZ);
1241
1242 sband = &priv->bands[IEEE80211_BAND_2GHZ];
1243 sband->channels = channels;
1244 /* OFDM & CCK */
1245 sband->bitrates = rates;
1246 sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
1247
1248 if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)
1249 iwl_init_ht_hw_capab(priv, &sband->ht_cap,
1250 IEEE80211_BAND_2GHZ);
1251
1252 priv->ieee_channels = channels;
1253 priv->ieee_rates = rates;
1254
1255 for (i = 0; i < priv->channel_count; i++) {
1256 ch = &priv->channel_info[i];
1257
1258 /* FIXME: might be removed if scan is OK */
1259 if (!is_channel_valid(ch))
1260 continue;
1261
1262 sband = &priv->bands[ch->band];
1263
1264 geo_ch = &sband->channels[sband->n_channels++];
1265
1266 geo_ch->center_freq =
1267 ieee80211_channel_to_frequency(ch->channel, ch->band);
1268 geo_ch->max_power = ch->max_power_avg;
1269 geo_ch->max_antenna_gain = 0xff;
1270 geo_ch->hw_value = ch->channel;
1271
1272 if (is_channel_valid(ch)) {
1273 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
1274 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
1275
1276 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
1277 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
1278
1279 if (ch->flags & EEPROM_CHANNEL_RADAR)
1280 geo_ch->flags |= IEEE80211_CHAN_RADAR;
1281
1282 geo_ch->flags |= ch->ht40_extension_channel;
1283
1284 if (ch->max_power_avg > max_tx_power)
1285 max_tx_power = ch->max_power_avg;
1286 } else {
1287 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
1288 }
1289
1290 IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
1291 ch->channel, geo_ch->center_freq,
1292 is_channel_a_band(ch) ? "5.2" : "2.4",
1293 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
1294 "restricted" : "valid",
1295 geo_ch->flags);
1296 }
1297
1298 priv->tx_power_device_lmt = max_tx_power;
1299 priv->tx_power_user_lmt = max_tx_power;
1300 priv->tx_power_next = max_tx_power;
1301
1302 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
1303 priv->hw_params.sku & EEPROM_SKU_CAP_BAND_52GHZ) {
1304 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
1305 "Please send your %s to maintainer.\n",
1306 priv->trans->hw_id_str);
1307 priv->hw_params.sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
1308 }
1309
1310 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
1311 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
1312 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
1313
1314 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
1315
1316 return 0;
1317}
1318
1319/*
1320 * iwl_free_geos - undo allocations in iwl_init_geos
1321 */
1322static void iwl_free_geos(struct iwl_priv *priv)
1323{
1324 kfree(priv->ieee_channels);
1325 kfree(priv->ieee_rates);
1326 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
1327}
1328
1329int iwl_init_drv(struct iwl_priv *priv)
1029{ 1330{
1030 int ret; 1331 int ret;
1031 1332
@@ -1040,7 +1341,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
1040 priv->band = IEEE80211_BAND_2GHZ; 1341 priv->band = IEEE80211_BAND_2GHZ;
1041 1342
1042 priv->plcp_delta_threshold = 1343 priv->plcp_delta_threshold =
1043 cfg(priv)->base_params->plcp_delta_threshold; 1344 priv->cfg->base_params->plcp_delta_threshold;
1044 1345
1045 priv->iw_mode = NL80211_IFTYPE_STATION; 1346 priv->iw_mode = NL80211_IFTYPE_STATION;
1046 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC; 1347 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
@@ -1049,12 +1350,6 @@ static int iwl_init_drv(struct iwl_priv *priv)
1049 1350
1050 priv->ucode_owner = IWL_OWNERSHIP_DRIVER; 1351 priv->ucode_owner = IWL_OWNERSHIP_DRIVER;
1051 1352
1052 /* initialize force reset */
1053 priv->force_reset[IWL_RF_RESET].reset_duration =
1054 IWL_DELAY_NEXT_FORCE_RF_RESET;
1055 priv->force_reset[IWL_FW_RESET].reset_duration =
1056 IWL_DELAY_NEXT_FORCE_FW_RELOAD;
1057
1058 priv->rx_statistics_jiffies = jiffies; 1353 priv->rx_statistics_jiffies = jiffies;
1059 1354
1060 /* Choose which receivers/antennas to use */ 1355 /* Choose which receivers/antennas to use */
@@ -1063,8 +1358,8 @@ static int iwl_init_drv(struct iwl_priv *priv)
1063 iwl_init_scan_params(priv); 1358 iwl_init_scan_params(priv);
1064 1359
1065 /* init bt coex */ 1360 /* init bt coex */
1066 if (cfg(priv)->bt_params && 1361 if (priv->cfg->bt_params &&
1067 cfg(priv)->bt_params->advanced_bt_coexist) { 1362 priv->cfg->bt_params->advanced_bt_coexist) {
1068 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT; 1363 priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
1069 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT; 1364 priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
1070 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK; 1365 priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
@@ -1094,7 +1389,7 @@ err:
1094 return ret; 1389 return ret;
1095} 1390}
1096 1391
1097static void iwl_uninit_drv(struct iwl_priv *priv) 1392void iwl_uninit_drv(struct iwl_priv *priv)
1098{ 1393{
1099 iwl_free_geos(priv); 1394 iwl_free_geos(priv);
1100 iwl_free_channel_map(priv); 1395 iwl_free_channel_map(priv);
@@ -1107,64 +1402,49 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
1107#endif 1402#endif
1108} 1403}
1109 1404
1110/* Size of one Rx buffer in host DRAM */ 1405void iwl_set_hw_params(struct iwl_priv *priv)
1111#define IWL_RX_BUF_SIZE_4K (4 * 1024)
1112#define IWL_RX_BUF_SIZE_8K (8 * 1024)
1113
1114static void iwl_set_hw_params(struct iwl_priv *priv)
1115{ 1406{
1116 if (cfg(priv)->ht_params) 1407 if (priv->cfg->ht_params)
1117 hw_params(priv).use_rts_for_aggregation = 1408 priv->hw_params.use_rts_for_aggregation =
1118 cfg(priv)->ht_params->use_rts_for_aggregation; 1409 priv->cfg->ht_params->use_rts_for_aggregation;
1119
1120 if (iwlagn_mod_params.amsdu_size_8K)
1121 hw_params(priv).rx_page_order =
1122 get_order(IWL_RX_BUF_SIZE_8K);
1123 else
1124 hw_params(priv).rx_page_order =
1125 get_order(IWL_RX_BUF_SIZE_4K);
1126
1127 if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
1128 hw_params(priv).sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
1129 1410
1130 hw_params(priv).num_ampdu_queues = 1411 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
1131 cfg(priv)->base_params->num_of_ampdu_queues; 1412 priv->hw_params.sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
1132 hw_params(priv).wd_timeout = cfg(priv)->base_params->wd_timeout;
1133 1413
1134 /* Device-specific setup */ 1414 /* Device-specific setup */
1135 cfg(priv)->lib->set_hw_params(priv); 1415 priv->lib->set_hw_params(priv);
1136} 1416}
1137 1417
1138 1418
1139 1419
1140static void iwl_debug_config(struct iwl_priv *priv) 1420void iwl_debug_config(struct iwl_priv *priv)
1141{ 1421{
1142 dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_DEBUG " 1422 dev_printk(KERN_INFO, priv->trans->dev, "CONFIG_IWLWIFI_DEBUG "
1143#ifdef CONFIG_IWLWIFI_DEBUG 1423#ifdef CONFIG_IWLWIFI_DEBUG
1144 "enabled\n"); 1424 "enabled\n");
1145#else 1425#else
1146 "disabled\n"); 1426 "disabled\n");
1147#endif 1427#endif
1148 dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_DEBUGFS " 1428 dev_printk(KERN_INFO, priv->trans->dev, "CONFIG_IWLWIFI_DEBUGFS "
1149#ifdef CONFIG_IWLWIFI_DEBUGFS 1429#ifdef CONFIG_IWLWIFI_DEBUGFS
1150 "enabled\n"); 1430 "enabled\n");
1151#else 1431#else
1152 "disabled\n"); 1432 "disabled\n");
1153#endif 1433#endif
1154 dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TRACING " 1434 dev_printk(KERN_INFO, priv->trans->dev, "CONFIG_IWLWIFI_DEVICE_TRACING "
1155#ifdef CONFIG_IWLWIFI_DEVICE_TRACING 1435#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
1156 "enabled\n"); 1436 "enabled\n");
1157#else 1437#else
1158 "disabled\n"); 1438 "disabled\n");
1159#endif 1439#endif
1160 1440
1161 dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TESTMODE " 1441 dev_printk(KERN_INFO, priv->trans->dev, "CONFIG_IWLWIFI_DEVICE_TESTMODE "
1162#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE 1442#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
1163 "enabled\n"); 1443 "enabled\n");
1164#else 1444#else
1165 "disabled\n"); 1445 "disabled\n");
1166#endif 1446#endif
1167 dev_printk(KERN_INFO, trans(priv)->dev, "CONFIG_IWLWIFI_P2P " 1447 dev_printk(KERN_INFO, priv->trans->dev, "CONFIG_IWLWIFI_P2P "
1168#ifdef CONFIG_IWLWIFI_P2P 1448#ifdef CONFIG_IWLWIFI_P2P
1169 "enabled\n"); 1449 "enabled\n");
1170#else 1450#else
@@ -1173,9 +1453,9 @@ static void iwl_debug_config(struct iwl_priv *priv)
1173} 1453}
1174 1454
1175static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, 1455static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1456 const struct iwl_cfg *cfg,
1176 const struct iwl_fw *fw) 1457 const struct iwl_fw *fw)
1177{ 1458{
1178 int err = 0;
1179 struct iwl_priv *priv; 1459 struct iwl_priv *priv;
1180 struct ieee80211_hw *hw; 1460 struct ieee80211_hw *hw;
1181 struct iwl_op_mode *op_mode; 1461 struct iwl_op_mode *op_mode;
@@ -1190,25 +1470,60 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1190 STATISTICS_NOTIFICATION, 1470 STATISTICS_NOTIFICATION,
1191 REPLY_TX, 1471 REPLY_TX,
1192 }; 1472 };
1473 int i;
1193 1474
1194 /************************ 1475 /************************
1195 * 1. Allocating HW data 1476 * 1. Allocating HW data
1196 ************************/ 1477 ************************/
1197 hw = iwl_alloc_all(); 1478 hw = iwl_alloc_all();
1198 if (!hw) { 1479 if (!hw) {
1199 pr_err("%s: Cannot allocate network device\n", 1480 pr_err("%s: Cannot allocate network device\n", cfg->name);
1200 cfg(trans)->name);
1201 err = -ENOMEM;
1202 goto out; 1481 goto out;
1203 } 1482 }
1204 1483
1205 op_mode = hw->priv; 1484 op_mode = hw->priv;
1206 op_mode->ops = &iwl_dvm_ops; 1485 op_mode->ops = &iwl_dvm_ops;
1207 priv = IWL_OP_MODE_GET_DVM(op_mode); 1486 priv = IWL_OP_MODE_GET_DVM(op_mode);
1208 priv->shrd = trans->shrd; 1487 priv->trans = trans;
1488 priv->dev = trans->dev;
1489 priv->cfg = cfg;
1209 priv->fw = fw; 1490 priv->fw = fw;
1210 /* TODO: remove fw from shared data later */ 1491
1211 priv->shrd->fw = fw; 1492 switch (priv->cfg->device_family) {
1493 case IWL_DEVICE_FAMILY_1000:
1494 case IWL_DEVICE_FAMILY_100:
1495 priv->lib = &iwl1000_lib;
1496 break;
1497 case IWL_DEVICE_FAMILY_2000:
1498 case IWL_DEVICE_FAMILY_105:
1499 priv->lib = &iwl2000_lib;
1500 break;
1501 case IWL_DEVICE_FAMILY_2030:
1502 case IWL_DEVICE_FAMILY_135:
1503 priv->lib = &iwl2030_lib;
1504 break;
1505 case IWL_DEVICE_FAMILY_5000:
1506 priv->lib = &iwl5000_lib;
1507 break;
1508 case IWL_DEVICE_FAMILY_5150:
1509 priv->lib = &iwl5150_lib;
1510 break;
1511 case IWL_DEVICE_FAMILY_6000:
1512 case IWL_DEVICE_FAMILY_6005:
1513 case IWL_DEVICE_FAMILY_6000i:
1514 case IWL_DEVICE_FAMILY_6050:
1515 case IWL_DEVICE_FAMILY_6150:
1516 priv->lib = &iwl6000_lib;
1517 break;
1518 case IWL_DEVICE_FAMILY_6030:
1519 priv->lib = &iwl6030_lib;
1520 break;
1521 default:
1522 break;
1523 }
1524
1525 if (WARN_ON(!priv->lib))
1526 goto out_free_hw;
1212 1527
1213 /* 1528 /*
1214 * Populate the state variables that the transport layer needs 1529 * Populate the state variables that the transport layer needs
@@ -1217,27 +1532,40 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1217 trans_cfg.op_mode = op_mode; 1532 trans_cfg.op_mode = op_mode;
1218 trans_cfg.no_reclaim_cmds = no_reclaim_cmds; 1533 trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
1219 trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); 1534 trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
1535 trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K;
1536 if (!iwlwifi_mod_params.wd_disable)
1537 trans_cfg.queue_watchdog_timeout =
1538 priv->cfg->base_params->wd_timeout;
1539 else
1540 trans_cfg.queue_watchdog_timeout = IWL_WATCHHDOG_DISABLED;
1541 trans_cfg.command_names = iwl_dvm_cmd_strings;
1220 1542
1221 ucode_flags = fw->ucode_capa.flags; 1543 ucode_flags = fw->ucode_capa.flags;
1222 1544
1223#ifndef CONFIG_IWLWIFI_P2P 1545#ifndef CONFIG_IWLWIFI_P2P
1224 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN; 1546 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
1225#endif 1547#endif
1226 1548
1227 if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) { 1549 if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
1228 priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN; 1550 priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
1229 trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM; 1551 trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
1552 trans_cfg.queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
1553 trans_cfg.n_queue_to_fifo =
1554 ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo);
1230 } else { 1555 } else {
1231 priv->sta_key_max_num = STA_KEY_MAX_NUM; 1556 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1232 trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; 1557 trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
1558 trans_cfg.queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
1559 trans_cfg.n_queue_to_fifo =
1560 ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
1233 } 1561 }
1234 1562
1235 /* Configure transport layer */ 1563 /* Configure transport layer */
1236 iwl_trans_configure(trans(priv), &trans_cfg); 1564 iwl_trans_configure(priv->trans, &trans_cfg);
1237 1565
1238 /* At this point both hw and priv are allocated. */ 1566 /* At this point both hw and priv are allocated. */
1239 1567
1240 SET_IEEE80211_DEV(priv->hw, trans(priv)->dev); 1568 SET_IEEE80211_DEV(priv->hw, priv->trans->dev);
1241 1569
1242 /* show what debugging capabilities we have */ 1570 /* show what debugging capabilities we have */
1243 iwl_debug_config(priv); 1571 iwl_debug_config(priv);
@@ -1246,58 +1574,50 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1246 1574
1247 /* is antenna coupling more than 35dB ? */ 1575 /* is antenna coupling more than 35dB ? */
1248 priv->bt_ant_couple_ok = 1576 priv->bt_ant_couple_ok =
1249 (iwlagn_mod_params.ant_coupling > 1577 (iwlwifi_mod_params.ant_coupling >
1250 IWL_BT_ANTENNA_COUPLING_THRESHOLD) ? 1578 IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
1251 true : false; 1579 true : false;
1252 1580
1253 /* enable/disable bt channel inhibition */ 1581 /* enable/disable bt channel inhibition */
1254 priv->bt_ch_announce = iwlagn_mod_params.bt_ch_announce; 1582 priv->bt_ch_announce = iwlwifi_mod_params.bt_ch_announce;
1255 IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n", 1583 IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n",
1256 (priv->bt_ch_announce) ? "On" : "Off"); 1584 (priv->bt_ch_announce) ? "On" : "Off");
1257 1585
1258 if (iwl_alloc_traffic_mem(priv))
1259 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
1260
1261 /* these spin locks will be used in apm_ops.init and EEPROM access 1586 /* these spin locks will be used in apm_ops.init and EEPROM access
1262 * we should init now 1587 * we should init now
1263 */ 1588 */
1264 spin_lock_init(&trans(priv)->reg_lock); 1589 spin_lock_init(&priv->trans->reg_lock);
1265 spin_lock_init(&priv->statistics.lock); 1590 spin_lock_init(&priv->statistics.lock);
1266 1591
1267 /*********************** 1592 /***********************
1268 * 2. Read REV register 1593 * 2. Read REV register
1269 ***********************/ 1594 ***********************/
1270 IWL_INFO(priv, "Detected %s, REV=0x%X\n", 1595 IWL_INFO(priv, "Detected %s, REV=0x%X\n",
1271 cfg(priv)->name, trans(priv)->hw_rev); 1596 priv->cfg->name, priv->trans->hw_rev);
1272 1597
1273 err = iwl_trans_start_hw(trans(priv)); 1598 if (iwl_trans_start_hw(priv->trans))
1274 if (err) 1599 goto out_free_hw;
1275 goto out_free_traffic_mem;
1276 1600
1277 /***************** 1601 /* Read the EEPROM */
1278 * 3. Read EEPROM 1602 if (iwl_eeprom_init(priv, priv->trans->hw_rev)) {
1279 *****************/
1280 err = iwl_eeprom_init(trans(priv), trans(priv)->hw_rev);
1281 /* Reset chip to save power until we load uCode during "up". */
1282 iwl_trans_stop_hw(trans(priv));
1283 if (err) {
1284 IWL_ERR(priv, "Unable to init EEPROM\n"); 1603 IWL_ERR(priv, "Unable to init EEPROM\n");
1285 goto out_free_traffic_mem; 1604 goto out_free_hw;
1286 } 1605 }
1287 err = iwl_eeprom_check_version(priv); 1606 /* Reset chip to save power until we load uCode during "up". */
1288 if (err) 1607 iwl_trans_stop_hw(priv->trans, false);
1608
1609 if (iwl_eeprom_check_version(priv))
1289 goto out_free_eeprom; 1610 goto out_free_eeprom;
1290 1611
1291 err = iwl_eeprom_init_hw_params(priv); 1612 if (iwl_eeprom_init_hw_params(priv))
1292 if (err)
1293 goto out_free_eeprom; 1613 goto out_free_eeprom;
1294 1614
1295 /* extract MAC Address */ 1615 /* extract MAC Address */
1296 iwl_eeprom_get_mac(priv->shrd, priv->addresses[0].addr); 1616 iwl_eeprom_get_mac(priv, priv->addresses[0].addr);
1297 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr); 1617 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
1298 priv->hw->wiphy->addresses = priv->addresses; 1618 priv->hw->wiphy->addresses = priv->addresses;
1299 priv->hw->wiphy->n_addresses = 1; 1619 priv->hw->wiphy->n_addresses = 1;
1300 num_mac = iwl_eeprom_query16(priv->shrd, EEPROM_NUM_MAC_ADDRESS); 1620 num_mac = iwl_eeprom_query16(priv, EEPROM_NUM_MAC_ADDRESS);
1301 if (num_mac > 1) { 1621 if (num_mac > 1) {
1302 memcpy(priv->addresses[1].addr, priv->addresses[0].addr, 1622 memcpy(priv->addresses[1].addr, priv->addresses[0].addr,
1303 ETH_ALEN); 1623 ETH_ALEN);
@@ -1310,7 +1630,7 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1310 ************************/ 1630 ************************/
1311 iwl_set_hw_params(priv); 1631 iwl_set_hw_params(priv);
1312 1632
1313 if (!(hw_params(priv).sku & EEPROM_SKU_CAP_IPAN_ENABLE)) { 1633 if (!(priv->hw_params.sku & EEPROM_SKU_CAP_IPAN_ENABLE)) {
1314 IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN"); 1634 IWL_DEBUG_INFO(priv, "Your EEPROM disabled PAN");
1315 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN; 1635 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
1316 /* 1636 /*
@@ -1320,18 +1640,32 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1320 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P; 1640 ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
1321 priv->sta_key_max_num = STA_KEY_MAX_NUM; 1641 priv->sta_key_max_num = STA_KEY_MAX_NUM;
1322 trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM; 1642 trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
1643 trans_cfg.queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
1644 trans_cfg.n_queue_to_fifo =
1645 ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo);
1323 1646
1324 /* Configure transport layer again*/ 1647 /* Configure transport layer again*/
1325 iwl_trans_configure(trans(priv), &trans_cfg); 1648 iwl_trans_configure(priv->trans, &trans_cfg);
1326 } 1649 }
1327 1650
1328 /******************* 1651 /*******************
1329 * 5. Setup priv 1652 * 5. Setup priv
1330 *******************/ 1653 *******************/
1654 for (i = 0; i < IWL_MAX_HW_QUEUES; i++) {
1655 priv->queue_to_mac80211[i] = IWL_INVALID_MAC80211_QUEUE;
1656 if (i < IWLAGN_FIRST_AMPDU_QUEUE &&
1657 i != IWL_DEFAULT_CMD_QUEUE_NUM &&
1658 i != IWL_IPAN_CMD_QUEUE_NUM)
1659 priv->queue_to_mac80211[i] = i;
1660 atomic_set(&priv->queue_stop_count[i], 0);
1661 }
1662
1663 WARN_ON(trans_cfg.queue_to_fifo[trans_cfg.cmd_queue] !=
1664 IWLAGN_CMD_FIFO_NUM);
1331 1665
1332 err = iwl_init_drv(priv); 1666 if (iwl_init_drv(priv))
1333 if (err)
1334 goto out_free_eeprom; 1667 goto out_free_eeprom;
1668
1335 /* At this point both hw and priv are initialized. */ 1669 /* At this point both hw and priv are initialized. */
1336 1670
1337 /******************** 1671 /********************
@@ -1364,15 +1698,12 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
1364 * 1698 *
1365 * 7. Setup and register with mac80211 and debugfs 1699 * 7. Setup and register with mac80211 and debugfs
1366 **************************************************/ 1700 **************************************************/
1367 err = iwlagn_mac_setup_register(priv, &fw->ucode_capa); 1701 if (iwlagn_mac_setup_register(priv, &fw->ucode_capa))
1368 if (err)
1369 goto out_destroy_workqueue; 1702 goto out_destroy_workqueue;
1370 1703
1371 err = iwl_dbgfs_register(priv, DRV_NAME); 1704 if (iwl_dbgfs_register(priv, DRV_NAME))
1372 if (err)
1373 IWL_ERR(priv, 1705 IWL_ERR(priv,
1374 "failed to create debugfs files. Ignoring error: %d\n", 1706 "failed to create debugfs files. Ignoring error\n");
1375 err);
1376 1707
1377 return op_mode; 1708 return op_mode;
1378 1709
@@ -1381,16 +1712,15 @@ out_destroy_workqueue:
1381 priv->workqueue = NULL; 1712 priv->workqueue = NULL;
1382 iwl_uninit_drv(priv); 1713 iwl_uninit_drv(priv);
1383out_free_eeprom: 1714out_free_eeprom:
1384 iwl_eeprom_free(priv->shrd); 1715 iwl_eeprom_free(priv);
1385out_free_traffic_mem: 1716out_free_hw:
1386 iwl_free_traffic_mem(priv);
1387 ieee80211_free_hw(priv->hw); 1717 ieee80211_free_hw(priv->hw);
1388out: 1718out:
1389 op_mode = NULL; 1719 op_mode = NULL;
1390 return op_mode; 1720 return op_mode;
1391} 1721}
1392 1722
1393static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode) 1723void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
1394{ 1724{
1395 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 1725 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1396 1726
@@ -1405,9 +1735,9 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
1405 1735
1406 /*This will stop the queues, move the device to low power state */ 1736 /*This will stop the queues, move the device to low power state */
1407 priv->ucode_loaded = false; 1737 priv->ucode_loaded = false;
1408 iwl_trans_stop_device(trans(priv)); 1738 iwl_trans_stop_device(priv->trans);
1409 1739
1410 iwl_eeprom_free(priv->shrd); 1740 iwl_eeprom_free(priv);
1411 1741
1412 /*netif_stop_queue(dev); */ 1742 /*netif_stop_queue(dev); */
1413 flush_workqueue(priv->workqueue); 1743 flush_workqueue(priv->workqueue);
@@ -1417,69 +1747,562 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
1417 * until now... */ 1747 * until now... */
1418 destroy_workqueue(priv->workqueue); 1748 destroy_workqueue(priv->workqueue);
1419 priv->workqueue = NULL; 1749 priv->workqueue = NULL;
1420 iwl_free_traffic_mem(priv);
1421 1750
1422 iwl_uninit_drv(priv); 1751 iwl_uninit_drv(priv);
1423 1752
1424 dev_kfree_skb(priv->beacon_skb); 1753 dev_kfree_skb(priv->beacon_skb);
1425 1754
1755 iwl_trans_stop_hw(priv->trans, true);
1426 ieee80211_free_hw(priv->hw); 1756 ieee80211_free_hw(priv->hw);
1427} 1757}
1428 1758
1429static void iwl_cmd_queue_full(struct iwl_op_mode *op_mode) 1759static const char * const desc_lookup_text[] = {
1760 "OK",
1761 "FAIL",
1762 "BAD_PARAM",
1763 "BAD_CHECKSUM",
1764 "NMI_INTERRUPT_WDG",
1765 "SYSASSERT",
1766 "FATAL_ERROR",
1767 "BAD_COMMAND",
1768 "HW_ERROR_TUNE_LOCK",
1769 "HW_ERROR_TEMPERATURE",
1770 "ILLEGAL_CHAN_FREQ",
1771 "VCC_NOT_STABLE",
1772 "FH_ERROR",
1773 "NMI_INTERRUPT_HOST",
1774 "NMI_INTERRUPT_ACTION_PT",
1775 "NMI_INTERRUPT_UNKNOWN",
1776 "UCODE_VERSION_MISMATCH",
1777 "HW_ERROR_ABS_LOCK",
1778 "HW_ERROR_CAL_LOCK_FAIL",
1779 "NMI_INTERRUPT_INST_ACTION_PT",
1780 "NMI_INTERRUPT_DATA_ACTION_PT",
1781 "NMI_TRM_HW_ER",
1782 "NMI_INTERRUPT_TRM",
1783 "NMI_INTERRUPT_BREAK_POINT",
1784 "DEBUG_0",
1785 "DEBUG_1",
1786 "DEBUG_2",
1787 "DEBUG_3",
1788};
1789
1790static struct { char *name; u8 num; } advanced_lookup[] = {
1791 { "NMI_INTERRUPT_WDG", 0x34 },
1792 { "SYSASSERT", 0x35 },
1793 { "UCODE_VERSION_MISMATCH", 0x37 },
1794 { "BAD_COMMAND", 0x38 },
1795 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
1796 { "FATAL_ERROR", 0x3D },
1797 { "NMI_TRM_HW_ERR", 0x46 },
1798 { "NMI_INTERRUPT_TRM", 0x4C },
1799 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
1800 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
1801 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
1802 { "NMI_INTERRUPT_HOST", 0x66 },
1803 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
1804 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
1805 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
1806 { "ADVANCED_SYSASSERT", 0 },
1807};
1808
1809static const char *desc_lookup(u32 num)
1810{
1811 int i;
1812 int max = ARRAY_SIZE(desc_lookup_text);
1813
1814 if (num < max)
1815 return desc_lookup_text[num];
1816
1817 max = ARRAY_SIZE(advanced_lookup) - 1;
1818 for (i = 0; i < max; i++) {
1819 if (advanced_lookup[i].num == num)
1820 break;
1821 }
1822 return advanced_lookup[i].name;
1823}
1824
1825#define ERROR_START_OFFSET (1 * sizeof(u32))
1826#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1827
1828static void iwl_dump_nic_error_log(struct iwl_priv *priv)
1829{
1830 struct iwl_trans *trans = priv->trans;
1831 u32 base;
1832 struct iwl_error_event_table table;
1833
1834 base = priv->device_pointers.error_event_table;
1835 if (priv->cur_ucode == IWL_UCODE_INIT) {
1836 if (!base)
1837 base = priv->fw->init_errlog_ptr;
1838 } else {
1839 if (!base)
1840 base = priv->fw->inst_errlog_ptr;
1841 }
1842
1843 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
1844 IWL_ERR(priv,
1845 "Not valid error log pointer 0x%08X for %s uCode\n",
1846 base,
1847 (priv->cur_ucode == IWL_UCODE_INIT)
1848 ? "Init" : "RT");
1849 return;
1850 }
1851
1852 /*TODO: Update dbgfs with ISR error stats obtained below */
1853 iwl_read_targ_mem_words(trans, base, &table, sizeof(table));
1854
1855 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
1856 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
1857 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
1858 priv->status, table.valid);
1859 }
1860
1861 trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
1862 table.data1, table.data2, table.line,
1863 table.blink1, table.blink2, table.ilink1,
1864 table.ilink2, table.bcon_time, table.gp1,
1865 table.gp2, table.gp3, table.ucode_ver,
1866 table.hw_ver, table.brd_ver);
1867 IWL_ERR(priv, "0x%08X | %-28s\n", table.error_id,
1868 desc_lookup(table.error_id));
1869 IWL_ERR(priv, "0x%08X | uPc\n", table.pc);
1870 IWL_ERR(priv, "0x%08X | branchlink1\n", table.blink1);
1871 IWL_ERR(priv, "0x%08X | branchlink2\n", table.blink2);
1872 IWL_ERR(priv, "0x%08X | interruptlink1\n", table.ilink1);
1873 IWL_ERR(priv, "0x%08X | interruptlink2\n", table.ilink2);
1874 IWL_ERR(priv, "0x%08X | data1\n", table.data1);
1875 IWL_ERR(priv, "0x%08X | data2\n", table.data2);
1876 IWL_ERR(priv, "0x%08X | line\n", table.line);
1877 IWL_ERR(priv, "0x%08X | beacon time\n", table.bcon_time);
1878 IWL_ERR(priv, "0x%08X | tsf low\n", table.tsf_low);
1879 IWL_ERR(priv, "0x%08X | tsf hi\n", table.tsf_hi);
1880 IWL_ERR(priv, "0x%08X | time gp1\n", table.gp1);
1881 IWL_ERR(priv, "0x%08X | time gp2\n", table.gp2);
1882 IWL_ERR(priv, "0x%08X | time gp3\n", table.gp3);
1883 IWL_ERR(priv, "0x%08X | uCode version\n", table.ucode_ver);
1884 IWL_ERR(priv, "0x%08X | hw version\n", table.hw_ver);
1885 IWL_ERR(priv, "0x%08X | board version\n", table.brd_ver);
1886 IWL_ERR(priv, "0x%08X | hcmd\n", table.hcmd);
1887 IWL_ERR(priv, "0x%08X | isr0\n", table.isr0);
1888 IWL_ERR(priv, "0x%08X | isr1\n", table.isr1);
1889 IWL_ERR(priv, "0x%08X | isr2\n", table.isr2);
1890 IWL_ERR(priv, "0x%08X | isr3\n", table.isr3);
1891 IWL_ERR(priv, "0x%08X | isr4\n", table.isr4);
1892 IWL_ERR(priv, "0x%08X | isr_pref\n", table.isr_pref);
1893 IWL_ERR(priv, "0x%08X | wait_event\n", table.wait_event);
1894 IWL_ERR(priv, "0x%08X | l2p_control\n", table.l2p_control);
1895 IWL_ERR(priv, "0x%08X | l2p_duration\n", table.l2p_duration);
1896 IWL_ERR(priv, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
1897 IWL_ERR(priv, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
1898 IWL_ERR(priv, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
1899 IWL_ERR(priv, "0x%08X | timestamp\n", table.u_timestamp);
1900 IWL_ERR(priv, "0x%08X | flow_handler\n", table.flow_handler);
1901}
1902
1903#define EVENT_START_OFFSET (4 * sizeof(u32))
1904
1905/**
1906 * iwl_print_event_log - Dump error event log to syslog
1907 *
1908 */
1909static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1910 u32 num_events, u32 mode,
1911 int pos, char **buf, size_t bufsz)
1912{
1913 u32 i;
1914 u32 base; /* SRAM byte address of event log header */
1915 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
1916 u32 ptr; /* SRAM byte address of log data */
1917 u32 ev, time, data; /* event log data */
1918 unsigned long reg_flags;
1919
1920 struct iwl_trans *trans = priv->trans;
1921
1922 if (num_events == 0)
1923 return pos;
1924
1925 base = priv->device_pointers.log_event_table;
1926 if (priv->cur_ucode == IWL_UCODE_INIT) {
1927 if (!base)
1928 base = priv->fw->init_evtlog_ptr;
1929 } else {
1930 if (!base)
1931 base = priv->fw->inst_evtlog_ptr;
1932 }
1933
1934 if (mode == 0)
1935 event_size = 2 * sizeof(u32);
1936 else
1937 event_size = 3 * sizeof(u32);
1938
1939 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
1940
1941 /* Make sure device is powered up for SRAM reads */
1942 spin_lock_irqsave(&trans->reg_lock, reg_flags);
1943 if (unlikely(!iwl_grab_nic_access(trans)))
1944 goto out_unlock;
1945
1946 /* Set starting address; reads will auto-increment */
1947 iwl_write32(trans, HBUS_TARG_MEM_RADDR, ptr);
1948
1949 /* "time" is actually "data" for mode 0 (no timestamp).
1950 * place event id # at far right for easier visual parsing. */
1951 for (i = 0; i < num_events; i++) {
1952 ev = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
1953 time = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
1954 if (mode == 0) {
1955 /* data, ev */
1956 if (bufsz) {
1957 pos += scnprintf(*buf + pos, bufsz - pos,
1958 "EVT_LOG:0x%08x:%04u\n",
1959 time, ev);
1960 } else {
1961 trace_iwlwifi_dev_ucode_event(trans->dev, 0,
1962 time, ev);
1963 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
1964 time, ev);
1965 }
1966 } else {
1967 data = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
1968 if (bufsz) {
1969 pos += scnprintf(*buf + pos, bufsz - pos,
1970 "EVT_LOGT:%010u:0x%08x:%04u\n",
1971 time, data, ev);
1972 } else {
1973 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
1974 time, data, ev);
1975 trace_iwlwifi_dev_ucode_event(trans->dev, time,
1976 data, ev);
1977 }
1978 }
1979 }
1980
1981 /* Allow device to power down */
1982 iwl_release_nic_access(trans);
1983out_unlock:
1984 spin_unlock_irqrestore(&trans->reg_lock, reg_flags);
1985 return pos;
1986}
1987
1988/**
1989 * iwl_print_last_event_logs - Dump the newest # of event log to syslog
1990 */
1991static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1992 u32 num_wraps, u32 next_entry,
1993 u32 size, u32 mode,
1994 int pos, char **buf, size_t bufsz)
1995{
1996 /*
1997 * display the newest DEFAULT_LOG_ENTRIES entries
1998 * i.e the entries just before the next ont that uCode would fill.
1999 */
2000 if (num_wraps) {
2001 if (next_entry < size) {
2002 pos = iwl_print_event_log(priv,
2003 capacity - (size - next_entry),
2004 size - next_entry, mode,
2005 pos, buf, bufsz);
2006 pos = iwl_print_event_log(priv, 0,
2007 next_entry, mode,
2008 pos, buf, bufsz);
2009 } else
2010 pos = iwl_print_event_log(priv, next_entry - size,
2011 size, mode, pos, buf, bufsz);
2012 } else {
2013 if (next_entry < size) {
2014 pos = iwl_print_event_log(priv, 0, next_entry,
2015 mode, pos, buf, bufsz);
2016 } else {
2017 pos = iwl_print_event_log(priv, next_entry - size,
2018 size, mode, pos, buf, bufsz);
2019 }
2020 }
2021 return pos;
2022}
2023
2024#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
2025
2026int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
2027 char **buf, bool display)
2028{
2029 u32 base; /* SRAM byte address of event log header */
2030 u32 capacity; /* event log capacity in # entries */
2031 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
2032 u32 num_wraps; /* # times uCode wrapped to top of log */
2033 u32 next_entry; /* index of next entry to be written by uCode */
2034 u32 size; /* # entries that we'll print */
2035 u32 logsize;
2036 int pos = 0;
2037 size_t bufsz = 0;
2038 struct iwl_trans *trans = priv->trans;
2039
2040 base = priv->device_pointers.log_event_table;
2041 if (priv->cur_ucode == IWL_UCODE_INIT) {
2042 logsize = priv->fw->init_evtlog_size;
2043 if (!base)
2044 base = priv->fw->init_evtlog_ptr;
2045 } else {
2046 logsize = priv->fw->inst_evtlog_size;
2047 if (!base)
2048 base = priv->fw->inst_evtlog_ptr;
2049 }
2050
2051 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
2052 IWL_ERR(priv,
2053 "Invalid event log pointer 0x%08X for %s uCode\n",
2054 base,
2055 (priv->cur_ucode == IWL_UCODE_INIT)
2056 ? "Init" : "RT");
2057 return -EINVAL;
2058 }
2059
2060 /* event log header */
2061 capacity = iwl_read_targ_mem(trans, base);
2062 mode = iwl_read_targ_mem(trans, base + (1 * sizeof(u32)));
2063 num_wraps = iwl_read_targ_mem(trans, base + (2 * sizeof(u32)));
2064 next_entry = iwl_read_targ_mem(trans, base + (3 * sizeof(u32)));
2065
2066 if (capacity > logsize) {
2067 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d "
2068 "entries\n", capacity, logsize);
2069 capacity = logsize;
2070 }
2071
2072 if (next_entry > logsize) {
2073 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
2074 next_entry, logsize);
2075 next_entry = logsize;
2076 }
2077
2078 size = num_wraps ? capacity : next_entry;
2079
2080 /* bail out if nothing in log */
2081 if (size == 0) {
2082 IWL_ERR(trans, "Start IWL Event Log Dump: nothing in log\n");
2083 return pos;
2084 }
2085
2086#ifdef CONFIG_IWLWIFI_DEBUG
2087 if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log)
2088 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
2089 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
2090#else
2091 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
2092 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
2093#endif
2094 IWL_ERR(priv, "Start IWL Event Log Dump: display last %u entries\n",
2095 size);
2096
2097#ifdef CONFIG_IWLWIFI_DEBUG
2098 if (display) {
2099 if (full_log)
2100 bufsz = capacity * 48;
2101 else
2102 bufsz = size * 48;
2103 *buf = kmalloc(bufsz, GFP_KERNEL);
2104 if (!*buf)
2105 return -ENOMEM;
2106 }
2107 if (iwl_have_debug_level(IWL_DL_FW_ERRORS) || full_log) {
2108 /*
2109 * if uCode has wrapped back to top of log,
2110 * start at the oldest entry,
2111 * i.e the next one that uCode would fill.
2112 */
2113 if (num_wraps)
2114 pos = iwl_print_event_log(priv, next_entry,
2115 capacity - next_entry, mode,
2116 pos, buf, bufsz);
2117 /* (then/else) start at top of log */
2118 pos = iwl_print_event_log(priv, 0,
2119 next_entry, mode, pos, buf, bufsz);
2120 } else
2121 pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
2122 next_entry, size, mode,
2123 pos, buf, bufsz);
2124#else
2125 pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
2126 next_entry, size, mode,
2127 pos, buf, bufsz);
2128#endif
2129 return pos;
2130}
2131
2132static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
2133{
2134 unsigned int reload_msec;
2135 unsigned long reload_jiffies;
2136
2137#ifdef CONFIG_IWLWIFI_DEBUG
2138 if (iwl_have_debug_level(IWL_DL_FW_ERRORS))
2139 iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS);
2140#endif
2141
2142 /* uCode is no longer loaded. */
2143 priv->ucode_loaded = false;
2144
2145 /* Set the FW error flag -- cleared on iwl_down */
2146 set_bit(STATUS_FW_ERROR, &priv->status);
2147
2148 iwl_abort_notification_waits(&priv->notif_wait);
2149
2150 /* Keep the restart process from trying to send host
2151 * commands by clearing the ready bit */
2152 clear_bit(STATUS_READY, &priv->status);
2153
2154 wake_up(&priv->trans->wait_command_queue);
2155
2156 if (!ondemand) {
2157 /*
2158 * If firmware keep reloading, then it indicate something
2159 * serious wrong and firmware having problem to recover
2160 * from it. Instead of keep trying which will fill the syslog
2161 * and hang the system, let's just stop it
2162 */
2163 reload_jiffies = jiffies;
2164 reload_msec = jiffies_to_msecs((long) reload_jiffies -
2165 (long) priv->reload_jiffies);
2166 priv->reload_jiffies = reload_jiffies;
2167 if (reload_msec <= IWL_MIN_RELOAD_DURATION) {
2168 priv->reload_count++;
2169 if (priv->reload_count >= IWL_MAX_CONTINUE_RELOAD_CNT) {
2170 IWL_ERR(priv, "BUG_ON, Stop restarting\n");
2171 return;
2172 }
2173 } else
2174 priv->reload_count = 0;
2175 }
2176
2177 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2178 if (iwlwifi_mod_params.restart_fw) {
2179 IWL_DEBUG_FW_ERRORS(priv,
2180 "Restarting adapter due to uCode error.\n");
2181 queue_work(priv->workqueue, &priv->restart);
2182 } else
2183 IWL_DEBUG_FW_ERRORS(priv,
2184 "Detected FW error, but not restarting\n");
2185 }
2186}
2187
2188void iwl_nic_error(struct iwl_op_mode *op_mode)
2189{
2190 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2191
2192 IWL_ERR(priv, "Loaded firmware version: %s\n",
2193 priv->fw->fw_version);
2194
2195 iwl_dump_nic_error_log(priv);
2196 iwl_dump_nic_event_log(priv, false, NULL, false);
2197
2198 iwlagn_fw_error(priv, false);
2199}
2200
2201void iwl_cmd_queue_full(struct iwl_op_mode *op_mode)
1430{ 2202{
1431 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 2203 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1432 2204
1433 if (!iwl_check_for_ct_kill(priv)) { 2205 if (!iwl_check_for_ct_kill(priv)) {
1434 IWL_ERR(priv, "Restarting adapter queue is full\n"); 2206 IWL_ERR(priv, "Restarting adapter queue is full\n");
1435 iwl_nic_error(op_mode); 2207 iwlagn_fw_error(priv, false);
1436 } 2208 }
1437} 2209}
1438 2210
1439static void iwl_nic_config(struct iwl_op_mode *op_mode) 2211void iwl_nic_config(struct iwl_op_mode *op_mode)
1440{ 2212{
1441 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 2213 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1442 2214
1443 cfg(priv)->lib->nic_config(priv); 2215 priv->lib->nic_config(priv);
1444} 2216}
1445 2217
1446static void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, u8 ac) 2218static void iwl_wimax_active(struct iwl_op_mode *op_mode)
1447{ 2219{
1448 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 2220 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1449 2221
1450 set_bit(ac, &priv->transport_queue_stop); 2222 clear_bit(STATUS_READY, &priv->status);
1451 ieee80211_stop_queue(priv->hw, ac); 2223 IWL_ERR(priv, "RF is used by WiMAX\n");
2224}
2225
2226void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue)
2227{
2228 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2229 int mq = priv->queue_to_mac80211[queue];
2230
2231 if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
2232 return;
2233
2234 if (atomic_inc_return(&priv->queue_stop_count[mq]) > 1) {
2235 IWL_DEBUG_TX_QUEUES(priv,
2236 "queue %d (mac80211 %d) already stopped\n",
2237 queue, mq);
2238 return;
2239 }
2240
2241 set_bit(mq, &priv->transport_queue_stop);
2242 ieee80211_stop_queue(priv->hw, mq);
1452} 2243}
1453 2244
1454static void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, u8 ac) 2245void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue)
1455{ 2246{
1456 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); 2247 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2248 int mq = priv->queue_to_mac80211[queue];
1457 2249
1458 clear_bit(ac, &priv->transport_queue_stop); 2250 if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
2251 return;
2252
2253 if (atomic_dec_return(&priv->queue_stop_count[mq]) > 0) {
2254 IWL_DEBUG_TX_QUEUES(priv,
2255 "queue %d (mac80211 %d) already awake\n",
2256 queue, mq);
2257 return;
2258 }
2259
2260 clear_bit(mq, &priv->transport_queue_stop);
1459 2261
1460 if (!priv->passive_no_rx) 2262 if (!priv->passive_no_rx)
1461 ieee80211_wake_queue(priv->hw, ac); 2263 ieee80211_wake_queue(priv->hw, mq);
1462} 2264}
1463 2265
1464void iwlagn_lift_passive_no_rx(struct iwl_priv *priv) 2266void iwlagn_lift_passive_no_rx(struct iwl_priv *priv)
1465{ 2267{
1466 int ac; 2268 int mq;
1467 2269
1468 if (!priv->passive_no_rx) 2270 if (!priv->passive_no_rx)
1469 return; 2271 return;
1470 2272
1471 for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++) { 2273 for (mq = 0; mq < IWLAGN_FIRST_AMPDU_QUEUE; mq++) {
1472 if (!test_bit(ac, &priv->transport_queue_stop)) { 2274 if (!test_bit(mq, &priv->transport_queue_stop)) {
1473 IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d"); 2275 IWL_DEBUG_TX_QUEUES(priv, "Wake queue %d", mq);
1474 ieee80211_wake_queue(priv->hw, ac); 2276 ieee80211_wake_queue(priv->hw, mq);
1475 } else { 2277 } else {
1476 IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d"); 2278 IWL_DEBUG_TX_QUEUES(priv, "Don't wake queue %d", mq);
1477 } 2279 }
1478 } 2280 }
1479 2281
1480 priv->passive_no_rx = false; 2282 priv->passive_no_rx = false;
1481} 2283}
1482 2284
2285void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
2286{
2287 struct ieee80211_tx_info *info;
2288
2289 info = IEEE80211_SKB_CB(skb);
2290 kmem_cache_free(iwl_tx_cmd_pool, (info->driver_data[1]));
2291 dev_kfree_skb_any(skb);
2292}
2293
2294void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
2295{
2296 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
2297
2298 if (state)
2299 set_bit(STATUS_RF_KILL_HW, &priv->status);
2300 else
2301 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2302
2303 wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
2304}
2305
1483const struct iwl_op_mode_ops iwl_dvm_ops = { 2306const struct iwl_op_mode_ops iwl_dvm_ops = {
1484 .start = iwl_op_mode_dvm_start, 2307 .start = iwl_op_mode_dvm_start,
1485 .stop = iwl_op_mode_dvm_stop, 2308 .stop = iwl_op_mode_dvm_stop,
@@ -1491,6 +2314,7 @@ const struct iwl_op_mode_ops iwl_dvm_ops = {
1491 .nic_error = iwl_nic_error, 2314 .nic_error = iwl_nic_error,
1492 .cmd_queue_full = iwl_cmd_queue_full, 2315 .cmd_queue_full = iwl_cmd_queue_full,
1493 .nic_config = iwl_nic_config, 2316 .nic_config = iwl_nic_config,
2317 .wimax_active = iwl_wimax_active,
1494}; 2318};
1495 2319
1496/***************************************************************************** 2320/*****************************************************************************
@@ -1541,96 +2365,3 @@ static void __exit iwl_exit(void)
1541 2365
1542module_exit(iwl_exit); 2366module_exit(iwl_exit);
1543module_init(iwl_init); 2367module_init(iwl_init);
1544
1545#ifdef CONFIG_IWLWIFI_DEBUG
1546module_param_named(debug, iwlagn_mod_params.debug_level, uint,
1547 S_IRUGO | S_IWUSR);
1548MODULE_PARM_DESC(debug, "debug output mask");
1549#endif
1550
1551module_param_named(swcrypto, iwlagn_mod_params.sw_crypto, int, S_IRUGO);
1552MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
1553module_param_named(11n_disable, iwlagn_mod_params.disable_11n, uint, S_IRUGO);
1554MODULE_PARM_DESC(11n_disable,
1555 "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
1556module_param_named(amsdu_size_8K, iwlagn_mod_params.amsdu_size_8K,
1557 int, S_IRUGO);
1558MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
1559module_param_named(fw_restart, iwlagn_mod_params.restart_fw, int, S_IRUGO);
1560MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
1561
1562module_param_named(ucode_alternative,
1563 iwlagn_mod_params.wanted_ucode_alternative,
1564 int, S_IRUGO);
1565MODULE_PARM_DESC(ucode_alternative,
1566 "specify ucode alternative to use from ucode file");
1567
1568module_param_named(antenna_coupling, iwlagn_mod_params.ant_coupling,
1569 int, S_IRUGO);
1570MODULE_PARM_DESC(antenna_coupling,
1571 "specify antenna coupling in dB (defualt: 0 dB)");
1572
1573module_param_named(bt_ch_inhibition, iwlagn_mod_params.bt_ch_announce,
1574 bool, S_IRUGO);
1575MODULE_PARM_DESC(bt_ch_inhibition,
1576 "Enable BT channel inhibition (default: enable)");
1577
1578module_param_named(plcp_check, iwlagn_mod_params.plcp_check, bool, S_IRUGO);
1579MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
1580
1581module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
1582MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
1583
1584module_param_named(wd_disable, iwlagn_mod_params.wd_disable, int, S_IRUGO);
1585MODULE_PARM_DESC(wd_disable,
1586 "Disable stuck queue watchdog timer 0=system default, "
1587 "1=disable, 2=enable (default: 0)");
1588
1589/*
1590 * set bt_coex_active to true, uCode will do kill/defer
1591 * every time the priority line is asserted (BT is sending signals on the
1592 * priority line in the PCIx).
1593 * set bt_coex_active to false, uCode will ignore the BT activity and
1594 * perform the normal operation
1595 *
1596 * User might experience transmit issue on some platform due to WiFi/BT
1597 * co-exist problem. The possible behaviors are:
1598 * Able to scan and finding all the available AP
1599 * Not able to associate with any AP
1600 * On those platforms, WiFi communication can be restored by set
1601 * "bt_coex_active" module parameter to "false"
1602 *
1603 * default: bt_coex_active = true (BT_COEX_ENABLE)
1604 */
1605module_param_named(bt_coex_active, iwlagn_mod_params.bt_coex_active,
1606 bool, S_IRUGO);
1607MODULE_PARM_DESC(bt_coex_active, "enable wifi/bt co-exist (default: enable)");
1608
1609module_param_named(led_mode, iwlagn_mod_params.led_mode, int, S_IRUGO);
1610MODULE_PARM_DESC(led_mode, "0=system default, "
1611 "1=On(RF On)/Off(RF Off), 2=blinking, 3=Off (default: 0)");
1612
1613module_param_named(power_save, iwlagn_mod_params.power_save,
1614 bool, S_IRUGO);
1615MODULE_PARM_DESC(power_save,
1616 "enable WiFi power management (default: disable)");
1617
1618module_param_named(power_level, iwlagn_mod_params.power_level,
1619 int, S_IRUGO);
1620MODULE_PARM_DESC(power_level,
1621 "default power save level (range from 1 - 5, default: 1)");
1622
1623module_param_named(auto_agg, iwlagn_mod_params.auto_agg,
1624 bool, S_IRUGO);
1625MODULE_PARM_DESC(auto_agg,
1626 "enable agg w/o check traffic load (default: enable)");
1627
1628/*
1629 * For now, keep using power level 1 instead of automatically
1630 * adjusting ...
1631 */
1632module_param_named(no_sleep_autoadjust, iwlagn_mod_params.no_sleep_autoadjust,
1633 bool, S_IRUGO);
1634MODULE_PARM_DESC(no_sleep_autoadjust,
1635 "don't automatically adjust sleep level "
1636 "according to maximum network latency (default: true)");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 3780a03f2716..34900e6523dd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -64,6 +64,43 @@
64#define __iwl_agn_h__ 64#define __iwl_agn_h__
65 65
66#include "iwl-dev.h" 66#include "iwl-dev.h"
67#include "iwl-config.h"
68
69/* The first 11 queues (0-10) are used otherwise */
70#define IWLAGN_FIRST_AMPDU_QUEUE 11
71
72/* AUX (TX during scan dwell) queue */
73#define IWL_AUX_QUEUE 10
74
75/* device operations */
76extern struct iwl_lib_ops iwl1000_lib;
77extern struct iwl_lib_ops iwl2000_lib;
78extern struct iwl_lib_ops iwl2030_lib;
79extern struct iwl_lib_ops iwl5000_lib;
80extern struct iwl_lib_ops iwl5150_lib;
81extern struct iwl_lib_ops iwl6000_lib;
82extern struct iwl_lib_ops iwl6030_lib;
83
84
85#define TIME_UNIT 1024
86
87/*****************************************************
88* DRIVER STATUS FUNCTIONS
89******************************************************/
90#define STATUS_RF_KILL_HW 0
91#define STATUS_CT_KILL 1
92#define STATUS_ALIVE 2
93#define STATUS_READY 3
94#define STATUS_GEO_CONFIGURED 4
95#define STATUS_EXIT_PENDING 5
96#define STATUS_STATISTICS 6
97#define STATUS_SCANNING 7
98#define STATUS_SCAN_ABORTING 8
99#define STATUS_SCAN_HW 9
100#define STATUS_FW_ERROR 10
101#define STATUS_CHANNEL_SWITCH_PENDING 11
102#define STATUS_SCAN_COMPLETE 12
103#define STATUS_POWER_PMI 13
67 104
68struct iwl_ucode_capabilities; 105struct iwl_ucode_capabilities;
69 106
@@ -80,12 +117,9 @@ static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
80void iwl_down(struct iwl_priv *priv); 117void iwl_down(struct iwl_priv *priv);
81void iwl_cancel_deferred_work(struct iwl_priv *priv); 118void iwl_cancel_deferred_work(struct iwl_priv *priv);
82void iwlagn_prepare_restart(struct iwl_priv *priv); 119void iwlagn_prepare_restart(struct iwl_priv *priv);
83void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb);
84int __must_check iwl_rx_dispatch(struct iwl_op_mode *op_mode, 120int __must_check iwl_rx_dispatch(struct iwl_op_mode *op_mode,
85 struct iwl_rx_cmd_buffer *rxb, 121 struct iwl_rx_cmd_buffer *rxb,
86 struct iwl_device_cmd *cmd); 122 struct iwl_device_cmd *cmd);
87void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state);
88void iwl_nic_error(struct iwl_op_mode *op_mode);
89 123
90bool iwl_check_for_ct_kill(struct iwl_priv *priv); 124bool iwl_check_for_ct_kill(struct iwl_priv *priv);
91 125
@@ -103,6 +137,8 @@ int iwl_dvm_send_cmd_pdu(struct iwl_priv *priv, u8 id,
103 u32 flags, u16 len, const void *data); 137 u32 flags, u16 len, const void *data);
104 138
105/* RXON */ 139/* RXON */
140void iwl_connection_init_rx_config(struct iwl_priv *priv,
141 struct iwl_rxon_context *ctx);
106int iwlagn_set_pan_params(struct iwl_priv *priv); 142int iwlagn_set_pan_params(struct iwl_priv *priv);
107int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx); 143int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
108void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx); 144void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
@@ -113,11 +149,15 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
113 u32 changes); 149 u32 changes);
114void iwlagn_config_ht40(struct ieee80211_conf *conf, 150void iwlagn_config_ht40(struct ieee80211_conf *conf,
115 struct iwl_rxon_context *ctx); 151 struct iwl_rxon_context *ctx);
152void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
153void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
154 struct iwl_rxon_context *ctx);
155void iwl_set_flags_for_band(struct iwl_priv *priv,
156 struct iwl_rxon_context *ctx,
157 enum ieee80211_band band,
158 struct ieee80211_vif *vif);
116 159
117/* uCode */ 160/* uCode */
118int iwlagn_rx_calib_result(struct iwl_priv *priv,
119 struct iwl_rx_cmd_buffer *rxb,
120 struct iwl_device_cmd *cmd);
121int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type); 161int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
122void iwl_send_prio_tbl(struct iwl_priv *priv); 162void iwl_send_prio_tbl(struct iwl_priv *priv);
123int iwl_init_alive_start(struct iwl_priv *priv); 163int iwl_init_alive_start(struct iwl_priv *priv);
@@ -128,14 +168,25 @@ int iwl_send_calib_results(struct iwl_priv *priv);
128int iwl_calib_set(struct iwl_priv *priv, 168int iwl_calib_set(struct iwl_priv *priv,
129 const struct iwl_calib_hdr *cmd, int len); 169 const struct iwl_calib_hdr *cmd, int len);
130void iwl_calib_free_results(struct iwl_priv *priv); 170void iwl_calib_free_results(struct iwl_priv *priv);
171int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
172 char **buf, bool display);
173int iwlagn_hw_valid_rtc_data_addr(u32 addr);
131 174
132/* lib */ 175/* lib */
133int iwlagn_send_tx_power(struct iwl_priv *priv); 176int iwlagn_send_tx_power(struct iwl_priv *priv);
134void iwlagn_temperature(struct iwl_priv *priv); 177void iwlagn_temperature(struct iwl_priv *priv);
135u16 iwl_eeprom_calib_version(struct iwl_shared *shrd);
136int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control); 178int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
137void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control); 179void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
138int iwlagn_send_beacon_cmd(struct iwl_priv *priv); 180int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
181int iwl_send_statistics_request(struct iwl_priv *priv,
182 u8 flags, bool clear);
183
184static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
185 struct iwl_priv *priv, enum ieee80211_band band)
186{
187 return priv->hw->wiphy->bands[band];
188}
189
139#ifdef CONFIG_PM_SLEEP 190#ifdef CONFIG_PM_SLEEP
140int iwlagn_send_patterns(struct iwl_priv *priv, 191int iwlagn_send_patterns(struct iwl_priv *priv,
141 struct cfg80211_wowlan *wowlan); 192 struct cfg80211_wowlan *wowlan);
@@ -145,6 +196,7 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan);
145/* rx */ 196/* rx */
146int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band); 197int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
147void iwl_setup_rx_handlers(struct iwl_priv *priv); 198void iwl_setup_rx_handlers(struct iwl_priv *priv);
199void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
148 200
149 201
150/* tx */ 202/* tx */
@@ -189,6 +241,31 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
189/* scan */ 241/* scan */
190void iwlagn_post_scan(struct iwl_priv *priv); 242void iwlagn_post_scan(struct iwl_priv *priv);
191void iwlagn_disable_roc(struct iwl_priv *priv); 243void iwlagn_disable_roc(struct iwl_priv *priv);
244int iwl_force_rf_reset(struct iwl_priv *priv, bool external);
245void iwl_init_scan_params(struct iwl_priv *priv);
246int iwl_scan_cancel(struct iwl_priv *priv);
247void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
248void iwl_force_scan_end(struct iwl_priv *priv);
249void iwl_internal_short_hw_scan(struct iwl_priv *priv);
250void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
251void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
252void iwl_cancel_scan_deferred_work(struct iwl_priv *priv);
253int __must_check iwl_scan_initiate(struct iwl_priv *priv,
254 struct ieee80211_vif *vif,
255 enum iwl_scan_type scan_type,
256 enum ieee80211_band band);
257
258/* For faster active scanning, scan will move to the next channel if fewer than
259 * PLCP_QUIET_THRESH packets are heard on this channel within
260 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
261 * time if it's a quiet channel (nothing responded to our probe, and there's
262 * no other traffic).
263 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
264#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
265#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
266
267#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
268
192 269
193/* bt coex */ 270/* bt coex */
194void iwlagn_send_advance_bt_config(struct iwl_priv *priv); 271void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
@@ -201,6 +278,12 @@ void iwlagn_bt_cancel_deferred_work(struct iwl_priv *priv);
201void iwlagn_bt_coex_rssi_monitor(struct iwl_priv *priv); 278void iwlagn_bt_coex_rssi_monitor(struct iwl_priv *priv);
202void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena); 279void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena);
203 280
281static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
282{
283 return priv->cfg->bt_params &&
284 priv->cfg->bt_params->advanced_bt_coexist;
285}
286
204#ifdef CONFIG_IWLWIFI_DEBUG 287#ifdef CONFIG_IWLWIFI_DEBUG
205const char *iwl_get_tx_fail_reason(u32 status); 288const char *iwl_get_tx_fail_reason(u32 status);
206const char *iwl_get_agg_tx_fail_reason(u16 status); 289const char *iwl_get_agg_tx_fail_reason(u16 status);
@@ -239,8 +322,6 @@ void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id,
239u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx, 322u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
240 const u8 *addr, bool is_ap, struct ieee80211_sta *sta); 323 const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
241 324
242void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
243 u8 sta_id, struct iwl_link_quality_cmd *link_cmd);
244int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, 325int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
245 struct iwl_link_quality_cmd *lq, u8 flags, bool init); 326 struct iwl_link_quality_cmd *lq, u8 flags, bool init);
246int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, 327int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
@@ -248,6 +329,9 @@ int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
248int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx, 329int iwl_sta_update_ht(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
249 struct ieee80211_sta *sta); 330 struct ieee80211_sta *sta);
250 331
332bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
333 struct iwl_rxon_context *ctx,
334 struct ieee80211_sta_ht_cap *ht_cap);
251 335
252static inline int iwl_sta_id(struct ieee80211_sta *sta) 336static inline int iwl_sta_id(struct ieee80211_sta *sta)
253{ 337{
@@ -305,9 +389,6 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
305 return cpu_to_le32(flags|(u32)rate); 389 return cpu_to_le32(flags|(u32)rate);
306} 390}
307 391
308/* eeprom */
309void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac);
310
311extern int iwl_alive_start(struct iwl_priv *priv); 392extern int iwl_alive_start(struct iwl_priv *priv);
312/* svtool */ 393/* svtool */
313#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE 394#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
@@ -386,13 +467,35 @@ static inline int iwl_is_ready_rf(struct iwl_priv *priv)
386 return iwl_is_ready(priv); 467 return iwl_is_ready(priv);
387} 468}
388 469
470static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state)
471{
472 if (state)
473 set_bit(STATUS_POWER_PMI, &priv->status);
474 else
475 clear_bit(STATUS_POWER_PMI, &priv->status);
476 iwl_trans_set_pmi(priv->trans, state);
477}
478
479#ifdef CONFIG_IWLWIFI_DEBUGFS
480int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
481void iwl_dbgfs_unregister(struct iwl_priv *priv);
482#else
483static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
484{
485 return 0;
486}
487static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
488{
489}
490#endif /* CONFIG_IWLWIFI_DEBUGFS */
491
389#ifdef CONFIG_IWLWIFI_DEBUG 492#ifdef CONFIG_IWLWIFI_DEBUG
390#define IWL_DEBUG_QUIET_RFKILL(m, fmt, args...) \ 493#define IWL_DEBUG_QUIET_RFKILL(m, fmt, args...) \
391do { \ 494do { \
392 if (!iwl_is_rfkill((m))) \ 495 if (!iwl_is_rfkill((m))) \
393 IWL_ERR(m, fmt, ##args); \ 496 IWL_ERR(m, fmt, ##args); \
394 else \ 497 else \
395 __iwl_err(trans(m)->dev, true, \ 498 __iwl_err((m)->dev, true, \
396 !iwl_have_debug_level(IWL_DL_RADIO), \ 499 !iwl_have_debug_level(IWL_DL_RADIO), \
397 fmt, ##args); \ 500 fmt, ##args); \
398} while (0) 501} while (0)
@@ -402,8 +505,98 @@ do { \
402 if (!iwl_is_rfkill((m))) \ 505 if (!iwl_is_rfkill((m))) \
403 IWL_ERR(m, fmt, ##args); \ 506 IWL_ERR(m, fmt, ##args); \
404 else \ 507 else \
405 __iwl_err(trans(m)->dev, true, true, fmt, ##args); \ 508 __iwl_err((m)->dev, true, true, fmt, ##args); \
406} while (0) 509} while (0)
407#endif /* CONFIG_IWLWIFI_DEBUG */ 510#endif /* CONFIG_IWLWIFI_DEBUG */
408 511
512extern const char *iwl_dvm_cmd_strings[REPLY_MAX];
513
514static inline const char *iwl_dvm_get_cmd_string(u8 cmd)
515{
516 const char *s = iwl_dvm_cmd_strings[cmd];
517 if (s)
518 return s;
519 return "UNKNOWN";
520}
521
522/* API method exported for mvm hybrid state */
523void iwl_setup_deferred_work(struct iwl_priv *priv);
524int iwl_send_wimax_coex(struct iwl_priv *priv);
525int iwl_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
526void iwl_debug_config(struct iwl_priv *priv);
527void iwl_set_hw_params(struct iwl_priv *priv);
528void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags);
529int iwl_init_drv(struct iwl_priv *priv);
530void iwl_uninit_drv(struct iwl_priv *priv);
531void iwl_send_bt_config(struct iwl_priv *priv);
532void iwl_rf_kill_ct_config(struct iwl_priv *priv);
533int iwl_setup_interface(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
534void iwl_teardown_interface(struct iwl_priv *priv,
535 struct ieee80211_vif *vif,
536 bool mode_change);
537int iwl_full_rxon_required(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
538void iwlagn_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
539void iwlagn_check_needed_chains(struct iwl_priv *priv,
540 struct iwl_rxon_context *ctx,
541 struct ieee80211_bss_conf *bss_conf);
542void iwlagn_chain_noise_reset(struct iwl_priv *priv);
543int iwlagn_update_beacon(struct iwl_priv *priv,
544 struct ieee80211_vif *vif);
545void iwl_tt_handler(struct iwl_priv *priv);
546void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode);
547void iwl_stop_sw_queue(struct iwl_op_mode *op_mode, int queue);
548void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state);
549void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb);
550void iwl_nic_error(struct iwl_op_mode *op_mode);
551void iwl_cmd_queue_full(struct iwl_op_mode *op_mode);
552void iwl_nic_config(struct iwl_op_mode *op_mode);
553int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
554 struct ieee80211_sta *sta, bool set);
555void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
556 enum ieee80211_rssi_event rssi_event);
557int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw);
558int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw);
559void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop);
560void iwl_wake_sw_queue(struct iwl_op_mode *op_mode, int queue);
561void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
562 struct ieee80211_channel_switch *ch_switch);
563int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
564 struct ieee80211_vif *vif,
565 struct ieee80211_sta *sta,
566 enum ieee80211_sta_state old_state,
567 enum ieee80211_sta_state new_state);
568int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
569 struct ieee80211_vif *vif,
570 enum ieee80211_ampdu_mlme_action action,
571 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
572 u8 buf_size);
573int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
574 struct ieee80211_vif *vif,
575 struct cfg80211_scan_request *req);
576void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
577 struct ieee80211_vif *vif,
578 enum sta_notify_cmd cmd,
579 struct ieee80211_sta *sta);
580void iwlagn_configure_filter(struct ieee80211_hw *hw,
581 unsigned int changed_flags,
582 unsigned int *total_flags,
583 u64 multicast);
584int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
585 struct ieee80211_vif *vif, u16 queue,
586 const struct ieee80211_tx_queue_params *params);
587void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
588 struct ieee80211_vif *vif,
589 struct cfg80211_gtk_rekey_data *data);
590void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
591 struct ieee80211_vif *vif,
592 struct ieee80211_key_conf *keyconf,
593 struct ieee80211_sta *sta,
594 u32 iv32, u16 *phase1key);
595int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
596 struct ieee80211_vif *vif,
597 struct ieee80211_sta *sta,
598 struct ieee80211_key_conf *key);
599void iwlagn_mac_stop(struct ieee80211_hw *hw);
600void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
601int iwlagn_mac_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
409#endif /* __iwl_agn_h__ */ 602#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 9ed73e5154be..296347a8290f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -1918,7 +1918,7 @@ struct iwl_basic_bt_cmd {
1918 __le16 valid; 1918 __le16 valid;
1919}; 1919};
1920 1920
1921struct iwl6000_bt_cmd { 1921struct iwl_bt_cmd_v1 {
1922 struct iwl_basic_bt_cmd basic; 1922 struct iwl_basic_bt_cmd basic;
1923 u8 prio_boost; 1923 u8 prio_boost;
1924 /* 1924 /*
@@ -1929,7 +1929,7 @@ struct iwl6000_bt_cmd {
1929 __le16 rx_prio_boost; /* SW boost of WiFi rx priority */ 1929 __le16 rx_prio_boost; /* SW boost of WiFi rx priority */
1930}; 1930};
1931 1931
1932struct iwl2000_bt_cmd { 1932struct iwl_bt_cmd_v2 {
1933 struct iwl_basic_bt_cmd basic; 1933 struct iwl_basic_bt_cmd basic;
1934 __le32 prio_boost; 1934 __le32 prio_boost;
1935 /* 1935 /*
diff --git a/drivers/net/wireless/iwlwifi/iwl-shared.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index b515d657a0ad..67b28aa7f9be 100644
--- a/drivers/net/wireless/iwlwifi/iwl-shared.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -60,136 +60,29 @@
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63#ifndef __iwl_shared_h__ 63#ifndef __IWL_CONFIG_H__
64#define __iwl_shared_h__ 64#define __IWL_CONFIG_H__
65 65
66#include <linux/types.h> 66#include <linux/types.h>
67#include <linux/spinlock.h>
68#include <linux/gfp.h>
69#include <net/mac80211.h> 67#include <net/mac80211.h>
70 68
71#include "iwl-commands.h"
72#include "iwl-fw.h"
73 69
74/** 70enum iwl_device_family {
75 * DOC: shared area - role and goal 71 IWL_DEVICE_FAMILY_UNDEFINED,
76 * 72 IWL_DEVICE_FAMILY_1000,
77 * The shared area contains all the data exported by the upper layer to the 73 IWL_DEVICE_FAMILY_100,
78 * other layers. Since the bus and transport layer shouldn't dereference 74 IWL_DEVICE_FAMILY_2000,
79 * iwl_priv, all the data needed by the upper layer and the transport / bus 75 IWL_DEVICE_FAMILY_2030,
80 * layer must be here. 76 IWL_DEVICE_FAMILY_105,
81 * The shared area also holds pointer to all the other layers. This allows a 77 IWL_DEVICE_FAMILY_135,
82 * layer to call a function from another layer. 78 IWL_DEVICE_FAMILY_5000,
83 * 79 IWL_DEVICE_FAMILY_5150,
84 * NOTE: All the layers hold a pointer to the shared area which must be shrd. 80 IWL_DEVICE_FAMILY_6000,
85 * A few macros assume that (_m)->shrd points to the shared area no matter 81 IWL_DEVICE_FAMILY_6000i,
86 * what _m is. 82 IWL_DEVICE_FAMILY_6005,
87 * 83 IWL_DEVICE_FAMILY_6030,
88 * gets notifications about enumeration, suspend, resume. 84 IWL_DEVICE_FAMILY_6050,
89 * For the moment, the bus layer is not a linux kernel module as itself, and 85 IWL_DEVICE_FAMILY_6150,
90 * the module_init function of the driver must call the bus specific
91 * registration functions. These functions are listed at the end of this file.
92 * For the moment, there is only one implementation of this interface: PCI-e.
93 * This implementation is iwl-pci.c
94 */
95
96struct iwl_priv;
97struct iwl_trans;
98struct iwl_sensitivity_ranges;
99struct iwl_trans_ops;
100
101#define DRV_NAME "iwlwifi"
102#define IWLWIFI_VERSION "in-tree:"
103#define DRV_COPYRIGHT "Copyright(c) 2003-2012 Intel Corporation"
104#define DRV_AUTHOR "<ilw@linux.intel.com>"
105
106extern struct iwl_mod_params iwlagn_mod_params;
107
108#define IWL_DISABLE_HT_ALL BIT(0)
109#define IWL_DISABLE_HT_TXAGG BIT(1)
110#define IWL_DISABLE_HT_RXAGG BIT(2)
111
112/**
113 * struct iwl_mod_params
114 *
115 * Holds the module parameters
116 *
117 * @sw_crypto: using hardware encryption, default = 0
118 * @disable_11n: disable 11n capabilities, default = 0,
119 * use IWL_DISABLE_HT_* constants
120 * @amsdu_size_8K: enable 8K amsdu size, default = 1
121 * @antenna: both antennas (use diversity), default = 0
122 * @restart_fw: restart firmware, default = 1
123 * @plcp_check: enable plcp health check, default = true
124 * @ack_check: disable ack health check, default = false
125 * @wd_disable: enable stuck queue check, default = 0
126 * @bt_coex_active: enable bt coex, default = true
127 * @led_mode: system default, default = 0
128 * @no_sleep_autoadjust: disable autoadjust, default = true
129 * @power_save: disable power save, default = false
130 * @power_level: power level, default = 1
131 * @debug_level: levels are IWL_DL_*
132 * @ant_coupling: antenna coupling in dB, default = 0
133 * @bt_ch_announce: BT channel inhibition, default = enable
134 * @wanted_ucode_alternative: ucode alternative to use, default = 1
135 * @auto_agg: enable agg. without check, default = true
136 */
137struct iwl_mod_params {
138 int sw_crypto;
139 unsigned int disable_11n;
140 int amsdu_size_8K;
141 int antenna;
142 int restart_fw;
143 bool plcp_check;
144 bool ack_check;
145 int wd_disable;
146 bool bt_coex_active;
147 int led_mode;
148 bool no_sleep_autoadjust;
149 bool power_save;
150 int power_level;
151 u32 debug_level;
152 int ant_coupling;
153 bool bt_ch_announce;
154 int wanted_ucode_alternative;
155 bool auto_agg;
156};
157
158/**
159 * struct iwl_hw_params
160 *
161 * Holds the module parameters
162 *
163 * @num_ampdu_queues: num of ampdu queues
164 * @tx_chains_num: Number of TX chains
165 * @rx_chains_num: Number of RX chains
166 * @valid_tx_ant: usable antennas for TX
167 * @valid_rx_ant: usable antennas for RX
168 * @ht40_channel: is 40MHz width possible: BIT(IEEE80211_BAND_XXX)
169 * @sku: sku read from EEPROM
170 * @rx_page_order: Rx buffer page order
171 * @ct_kill_threshold: temperature threshold - in hw dependent unit
172 * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
173 * relevant for 1000, 6000 and up
174 * @wd_timeout: TX queues watchdog timeout
175 * @struct iwl_sensitivity_ranges: range of sensitivity values
176 * @use_rts_for_aggregation: use rts/cts protection for HT traffic
177 */
178struct iwl_hw_params {
179 u8 num_ampdu_queues;
180 u8 tx_chains_num;
181 u8 rx_chains_num;
182 u8 valid_tx_ant;
183 u8 valid_rx_ant;
184 u8 ht40_channel;
185 bool use_rts_for_aggregation;
186 u16 sku;
187 u32 rx_page_order;
188 u32 ct_kill_threshold;
189 u32 ct_kill_exit_threshold;
190 unsigned int wd_timeout;
191
192 const struct iwl_sensitivity_ranges *sens;
193}; 86};
194 87
195/* 88/*
@@ -209,6 +102,34 @@ enum iwl_led_mode {
209}; 102};
210 103
211/* 104/*
105 * This is the threshold value of plcp error rate per 100mSecs. It is
106 * used to set and check for the validity of plcp_delta.
107 */
108#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN 1
109#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF 50
110#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF 100
111#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF 200
112#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX 255
113#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE 0
114
115/* TX queue watchdog timeouts in mSecs */
116#define IWL_WATCHHDOG_DISABLED 0
117#define IWL_DEF_WD_TIMEOUT 2000
118#define IWL_LONG_WD_TIMEOUT 10000
119#define IWL_MAX_WD_TIMEOUT 120000
120
121/* Antenna presence definitions */
122#define ANT_NONE 0x0
123#define ANT_A BIT(0)
124#define ANT_B BIT(1)
125#define ANT_C BIT(2)
126#define ANT_AB (ANT_A | ANT_B)
127#define ANT_AC (ANT_A | ANT_C)
128#define ANT_BC (ANT_B | ANT_C)
129#define ANT_ABC (ANT_A | ANT_B | ANT_C)
130
131
132/*
212 * @max_ll_items: max number of OTP blocks 133 * @max_ll_items: max number of OTP blocks
213 * @shadow_ram_support: shadow support for OTP memory 134 * @shadow_ram_support: shadow support for OTP memory
214 * @led_compensation: compensate on the led on/off time per HW according 135 * @led_compensation: compensate on the led on/off time per HW according
@@ -217,7 +138,6 @@ enum iwl_led_mode {
217 * @chain_noise_num_beacons: number of beacons used to compute chain noise 138 * @chain_noise_num_beacons: number of beacons used to compute chain noise
218 * @adv_thermal_throttle: support advance thermal throttle 139 * @adv_thermal_throttle: support advance thermal throttle
219 * @support_ct_kill_exit: support ct kill exit condition 140 * @support_ct_kill_exit: support ct kill exit condition
220 * @support_wimax_coexist: support wimax/wifi co-exist
221 * @plcp_delta_threshold: plcp error rate threshold used to trigger 141 * @plcp_delta_threshold: plcp error rate threshold used to trigger
222 * radio tuning when there is a high receiving plcp error rate 142 * radio tuning when there is a high receiving plcp error rate
223 * @chain_noise_scale: default chain noise scale used for gain computation 143 * @chain_noise_scale: default chain noise scale used for gain computation
@@ -226,12 +146,10 @@ enum iwl_led_mode {
226 * @shadow_reg_enable: HW shadhow register bit 146 * @shadow_reg_enable: HW shadhow register bit
227 * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up 147 * @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
228 * @no_idle_support: do not support idle mode 148 * @no_idle_support: do not support idle mode
229 * wd_disable: disable watchdog timer
230 */ 149 */
231struct iwl_base_params { 150struct iwl_base_params {
232 int eeprom_size; 151 int eeprom_size;
233 int num_of_queues; /* def: HW dependent */ 152 int num_of_queues; /* def: HW dependent */
234 int num_of_ampdu_queues;/* def: HW dependent */
235 /* for iwl_apm_init() */ 153 /* for iwl_apm_init() */
236 u32 pll_cfg_val; 154 u32 pll_cfg_val;
237 155
@@ -240,7 +158,6 @@ struct iwl_base_params {
240 u16 led_compensation; 158 u16 led_compensation;
241 bool adv_thermal_throttle; 159 bool adv_thermal_throttle;
242 bool support_ct_kill_exit; 160 bool support_ct_kill_exit;
243 const bool support_wimax_coexist;
244 u8 plcp_delta_threshold; 161 u8 plcp_delta_threshold;
245 s32 chain_noise_scale; 162 s32 chain_noise_scale;
246 unsigned int wd_timeout; 163 unsigned int wd_timeout;
@@ -248,7 +165,6 @@ struct iwl_base_params {
248 const bool shadow_reg_enable; 165 const bool shadow_reg_enable;
249 const bool hd_v2; 166 const bool hd_v2;
250 const bool no_idle_support; 167 const bool no_idle_support;
251 const bool wd_disable;
252}; 168};
253 169
254/* 170/*
@@ -292,28 +208,21 @@ struct iwl_ht_params {
292 * @eeprom_ver: EEPROM version 208 * @eeprom_ver: EEPROM version
293 * @eeprom_calib_ver: EEPROM calibration version 209 * @eeprom_calib_ver: EEPROM calibration version
294 * @lib: pointer to the lib ops 210 * @lib: pointer to the lib ops
295 * @additional_nic_config: additional nic configuration
296 * @base_params: pointer to basic parameters 211 * @base_params: pointer to basic parameters
297 * @ht_params: point to ht patameters 212 * @ht_params: point to ht patameters
298 * @bt_params: pointer to bt parameters 213 * @bt_params: pointer to bt parameters
299 * @need_temp_offset_calib: need to perform temperature offset calibration 214 * @need_temp_offset_calib: need to perform temperature offset calibration
300 * @no_xtal_calib: some devices do not need crystal calibration data, 215 * @no_xtal_calib: some devices do not need crystal calibration data,
301 * don't send it to those 216 * don't send it to those
302 * @scan_rx_antennas: available antenna for scan operation
303 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off) 217 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
304 * @adv_pm: advance power management 218 * @adv_pm: advance power management
305 * @rx_with_siso_diversity: 1x1 device with rx antenna diversity 219 * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
306 * @internal_wimax_coex: internal wifi/wimax combo device 220 * @internal_wimax_coex: internal wifi/wimax combo device
307 * @iq_invert: I/Q inversion
308 * @temp_offset_v2: support v2 of temperature offset calibration 221 * @temp_offset_v2: support v2 of temperature offset calibration
309 * 222 *
310 * We enable the driver to be backward compatible wrt API version. The 223 * We enable the driver to be backward compatible wrt. hardware features.
311 * driver specifies which APIs it supports (with @ucode_api_max being the 224 * API differences in uCode shouldn't be handled here but through TLVs
312 * highest and @ucode_api_min the lowest). Firmware will only be loaded if 225 * and/or the uCode API version instead.
313 * it has a supported API version.
314 *
315 * The ideal usage of this infrastructure is to treat a new ucode API
316 * release as a new hardware revision.
317 */ 226 */
318struct iwl_cfg { 227struct iwl_cfg {
319 /* params specific to an individual device within a device family */ 228 /* params specific to an individual device within a device family */
@@ -322,14 +231,13 @@ struct iwl_cfg {
322 const unsigned int ucode_api_max; 231 const unsigned int ucode_api_max;
323 const unsigned int ucode_api_ok; 232 const unsigned int ucode_api_ok;
324 const unsigned int ucode_api_min; 233 const unsigned int ucode_api_min;
234 const enum iwl_device_family device_family;
325 const u32 max_data_size; 235 const u32 max_data_size;
326 const u32 max_inst_size; 236 const u32 max_inst_size;
327 u8 valid_tx_ant; 237 u8 valid_tx_ant;
328 u8 valid_rx_ant; 238 u8 valid_rx_ant;
329 u16 eeprom_ver; 239 u16 eeprom_ver;
330 u16 eeprom_calib_ver; 240 u16 eeprom_calib_ver;
331 const struct iwl_lib_ops *lib;
332 void (*additional_nic_config)(struct iwl_priv *priv);
333 /* params not likely to change within a device family */ 241 /* params not likely to change within a device family */
334 const struct iwl_base_params *base_params; 242 const struct iwl_base_params *base_params;
335 /* params likely to change within a device family */ 243 /* params likely to change within a device family */
@@ -337,99 +245,11 @@ struct iwl_cfg {
337 const struct iwl_bt_params *bt_params; 245 const struct iwl_bt_params *bt_params;
338 const bool need_temp_offset_calib; /* if used set to true */ 246 const bool need_temp_offset_calib; /* if used set to true */
339 const bool no_xtal_calib; 247 const bool no_xtal_calib;
340 u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
341 enum iwl_led_mode led_mode; 248 enum iwl_led_mode led_mode;
342 const bool adv_pm; 249 const bool adv_pm;
343 const bool rx_with_siso_diversity; 250 const bool rx_with_siso_diversity;
344 const bool internal_wimax_coex; 251 const bool internal_wimax_coex;
345 const bool iq_invert;
346 const bool temp_offset_v2; 252 const bool temp_offset_v2;
347}; 253};
348 254
349/** 255#endif /* __IWL_CONFIG_H__ */
350 * struct iwl_shared - shared fields for all the layers of the driver
351 *
352 * @status: STATUS_*
353 * @wowlan: are we running wowlan uCode
354 * @valid_contexts: microcode/device supports multiple contexts
355 * @bus: pointer to the bus layer data
356 * @cfg: see struct iwl_cfg
357 * @priv: pointer to the upper layer data
358 * @trans: pointer to the transport layer data
359 * @nic: pointer to the nic data
360 * @hw_params: see struct iwl_hw_params
361 * @lock: protect general shared data
362 * @eeprom: pointer to the eeprom/OTP image
363 * @ucode_type: indicator of loaded ucode image
364 * @device_pointers: pointers to ucode event tables
365 */
366struct iwl_shared {
367 unsigned long status;
368 u8 valid_contexts;
369
370 const struct iwl_cfg *cfg;
371 struct iwl_trans *trans;
372 void *drv;
373 struct iwl_hw_params hw_params;
374 const struct iwl_fw *fw;
375
376 /* eeprom -- this is in the card's little endian byte order */
377 u8 *eeprom;
378
379 /* ucode related variables */
380 enum iwl_ucode_type ucode_type;
381
382 struct {
383 u32 error_event_table;
384 u32 log_event_table;
385 } device_pointers;
386
387};
388
389/*Whatever _m is (iwl_trans, iwl_priv, these macros will work */
390#define cfg(_m) ((_m)->shrd->cfg)
391#define trans(_m) ((_m)->shrd->trans)
392#define hw_params(_m) ((_m)->shrd->hw_params)
393
394static inline bool iwl_have_debug_level(u32 level)
395{
396 return iwlagn_mod_params.debug_level & level;
397}
398
399enum iwl_rxon_context_id {
400 IWL_RXON_CTX_BSS,
401 IWL_RXON_CTX_PAN,
402
403 NUM_IWL_RXON_CTX
404};
405
406int iwlagn_hw_valid_rtc_data_addr(u32 addr);
407const char *get_cmd_string(u8 cmd);
408
409#define IWL_CMD(x) case x: return #x
410
411/*****************************************************
412* DRIVER STATUS FUNCTIONS
413******************************************************/
414#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
415/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
416#define STATUS_INT_ENABLED 2
417#define STATUS_RF_KILL_HW 3
418#define STATUS_CT_KILL 4
419#define STATUS_INIT 5
420#define STATUS_ALIVE 6
421#define STATUS_READY 7
422#define STATUS_TEMPERATURE 8
423#define STATUS_GEO_CONFIGURED 9
424#define STATUS_EXIT_PENDING 10
425#define STATUS_STATISTICS 12
426#define STATUS_SCANNING 13
427#define STATUS_SCAN_ABORTING 14
428#define STATUS_SCAN_HW 15
429#define STATUS_POWER_PMI 16
430#define STATUS_FW_ERROR 17
431#define STATUS_DEVICE_ENABLED 18
432#define STATUS_CHANNEL_SWITCH_PENDING 19
433#define STATUS_SCAN_COMPLETE 20
434
435#endif /* #__iwl_shared_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
deleted file mode 100644
index 46490d3b95b9..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ /dev/null
@@ -1,1480 +0,0 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <net/mac80211.h>
35
36#include "iwl-eeprom.h"
37#include "iwl-debug.h"
38#include "iwl-core.h"
39#include "iwl-io.h"
40#include "iwl-power.h"
41#include "iwl-shared.h"
42#include "iwl-agn.h"
43#include "iwl-trans.h"
44
45const u8 iwl_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
46
47#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
48#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
49static void iwl_init_ht_hw_capab(const struct iwl_priv *priv,
50 struct ieee80211_sta_ht_cap *ht_info,
51 enum ieee80211_band band)
52{
53 u16 max_bit_rate = 0;
54 u8 rx_chains_num = hw_params(priv).rx_chains_num;
55 u8 tx_chains_num = hw_params(priv).tx_chains_num;
56
57 ht_info->cap = 0;
58 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
59
60 ht_info->ht_supported = true;
61
62 if (cfg(priv)->ht_params &&
63 cfg(priv)->ht_params->ht_greenfield_support)
64 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
65 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
66 max_bit_rate = MAX_BIT_RATE_20_MHZ;
67 if (hw_params(priv).ht40_channel & BIT(band)) {
68 ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
69 ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
70 ht_info->mcs.rx_mask[4] = 0x01;
71 max_bit_rate = MAX_BIT_RATE_40_MHZ;
72 }
73
74 if (iwlagn_mod_params.amsdu_size_8K)
75 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
76
77 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
78 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
79
80 ht_info->mcs.rx_mask[0] = 0xFF;
81 if (rx_chains_num >= 2)
82 ht_info->mcs.rx_mask[1] = 0xFF;
83 if (rx_chains_num >= 3)
84 ht_info->mcs.rx_mask[2] = 0xFF;
85
86 /* Highest supported Rx data rate */
87 max_bit_rate *= rx_chains_num;
88 WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
89 ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
90
91 /* Tx MCS capabilities */
92 ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
93 if (tx_chains_num != rx_chains_num) {
94 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
95 ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
96 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
97 }
98}
99
100/**
101 * iwl_init_geos - Initialize mac80211's geo/channel info based from eeprom
102 */
103int iwl_init_geos(struct iwl_priv *priv)
104{
105 struct iwl_channel_info *ch;
106 struct ieee80211_supported_band *sband;
107 struct ieee80211_channel *channels;
108 struct ieee80211_channel *geo_ch;
109 struct ieee80211_rate *rates;
110 int i = 0;
111 s8 max_tx_power = IWLAGN_TX_POWER_TARGET_POWER_MIN;
112
113 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
114 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
115 IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
116 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
117 return 0;
118 }
119
120 channels = kcalloc(priv->channel_count,
121 sizeof(struct ieee80211_channel), GFP_KERNEL);
122 if (!channels)
123 return -ENOMEM;
124
125 rates = kcalloc(IWL_RATE_COUNT_LEGACY, sizeof(struct ieee80211_rate),
126 GFP_KERNEL);
127 if (!rates) {
128 kfree(channels);
129 return -ENOMEM;
130 }
131
132 /* 5.2GHz channels start after the 2.4GHz channels */
133 sband = &priv->bands[IEEE80211_BAND_5GHZ];
134 sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
135 /* just OFDM */
136 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
137 sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
138
139 if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)
140 iwl_init_ht_hw_capab(priv, &sband->ht_cap,
141 IEEE80211_BAND_5GHZ);
142
143 sband = &priv->bands[IEEE80211_BAND_2GHZ];
144 sband->channels = channels;
145 /* OFDM & CCK */
146 sband->bitrates = rates;
147 sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
148
149 if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)
150 iwl_init_ht_hw_capab(priv, &sband->ht_cap,
151 IEEE80211_BAND_2GHZ);
152
153 priv->ieee_channels = channels;
154 priv->ieee_rates = rates;
155
156 for (i = 0; i < priv->channel_count; i++) {
157 ch = &priv->channel_info[i];
158
159 /* FIXME: might be removed if scan is OK */
160 if (!is_channel_valid(ch))
161 continue;
162
163 sband = &priv->bands[ch->band];
164
165 geo_ch = &sband->channels[sband->n_channels++];
166
167 geo_ch->center_freq =
168 ieee80211_channel_to_frequency(ch->channel, ch->band);
169 geo_ch->max_power = ch->max_power_avg;
170 geo_ch->max_antenna_gain = 0xff;
171 geo_ch->hw_value = ch->channel;
172
173 if (is_channel_valid(ch)) {
174 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
175 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
176
177 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
178 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
179
180 if (ch->flags & EEPROM_CHANNEL_RADAR)
181 geo_ch->flags |= IEEE80211_CHAN_RADAR;
182
183 geo_ch->flags |= ch->ht40_extension_channel;
184
185 if (ch->max_power_avg > max_tx_power)
186 max_tx_power = ch->max_power_avg;
187 } else {
188 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
189 }
190
191 IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
192 ch->channel, geo_ch->center_freq,
193 is_channel_a_band(ch) ? "5.2" : "2.4",
194 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
195 "restricted" : "valid",
196 geo_ch->flags);
197 }
198
199 priv->tx_power_device_lmt = max_tx_power;
200 priv->tx_power_user_lmt = max_tx_power;
201 priv->tx_power_next = max_tx_power;
202
203 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
204 hw_params(priv).sku & EEPROM_SKU_CAP_BAND_52GHZ) {
205 IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
206 "Please send your %s to maintainer.\n",
207 trans(priv)->hw_id_str);
208 hw_params(priv).sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
209 }
210
211 IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
212 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
213 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
214
215 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
216
217 return 0;
218}
219
220/*
221 * iwl_free_geos - undo allocations in iwl_init_geos
222 */
223void iwl_free_geos(struct iwl_priv *priv)
224{
225 kfree(priv->ieee_channels);
226 kfree(priv->ieee_rates);
227 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
228}
229
230static bool iwl_is_channel_extension(struct iwl_priv *priv,
231 enum ieee80211_band band,
232 u16 channel, u8 extension_chan_offset)
233{
234 const struct iwl_channel_info *ch_info;
235
236 ch_info = iwl_get_channel_info(priv, band, channel);
237 if (!is_channel_valid(ch_info))
238 return false;
239
240 if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
241 return !(ch_info->ht40_extension_channel &
242 IEEE80211_CHAN_NO_HT40PLUS);
243 else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
244 return !(ch_info->ht40_extension_channel &
245 IEEE80211_CHAN_NO_HT40MINUS);
246
247 return false;
248}
249
250bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
251 struct iwl_rxon_context *ctx,
252 struct ieee80211_sta_ht_cap *ht_cap)
253{
254 if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
255 return false;
256
257 /*
258 * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
259 * the bit will not set if it is pure 40MHz case
260 */
261 if (ht_cap && !ht_cap->ht_supported)
262 return false;
263
264#ifdef CONFIG_IWLWIFI_DEBUGFS
265 if (priv->disable_ht40)
266 return false;
267#endif
268
269 return iwl_is_channel_extension(priv, priv->band,
270 le16_to_cpu(ctx->staging.channel),
271 ctx->ht.extension_chan_offset);
272}
273
274static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
275{
276 u16 new_val;
277 u16 beacon_factor;
278
279 /*
280 * If mac80211 hasn't given us a beacon interval, program
281 * the default into the device (not checking this here
282 * would cause the adjustment below to return the maximum
283 * value, which may break PAN.)
284 */
285 if (!beacon_val)
286 return DEFAULT_BEACON_INTERVAL;
287
288 /*
289 * If the beacon interval we obtained from the peer
290 * is too large, we'll have to wake up more often
291 * (and in IBSS case, we'll beacon too much)
292 *
293 * For example, if max_beacon_val is 4096, and the
294 * requested beacon interval is 7000, we'll have to
295 * use 3500 to be able to wake up on the beacons.
296 *
297 * This could badly influence beacon detection stats.
298 */
299
300 beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
301 new_val = beacon_val / beacon_factor;
302
303 if (!new_val)
304 new_val = max_beacon_val;
305
306 return new_val;
307}
308
309int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
310{
311 u64 tsf;
312 s32 interval_tm, rem;
313 struct ieee80211_conf *conf = NULL;
314 u16 beacon_int;
315 struct ieee80211_vif *vif = ctx->vif;
316
317 conf = &priv->hw->conf;
318
319 lockdep_assert_held(&priv->mutex);
320
321 memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
322
323 ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
324 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
325
326 beacon_int = vif ? vif->bss_conf.beacon_int : 0;
327
328 /*
329 * TODO: For IBSS we need to get atim_window from mac80211,
330 * for now just always use 0
331 */
332 ctx->timing.atim_window = 0;
333
334 if (ctx->ctxid == IWL_RXON_CTX_PAN &&
335 (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) &&
336 iwl_is_associated(priv, IWL_RXON_CTX_BSS) &&
337 priv->contexts[IWL_RXON_CTX_BSS].vif &&
338 priv->contexts[IWL_RXON_CTX_BSS].vif->bss_conf.beacon_int) {
339 ctx->timing.beacon_interval =
340 priv->contexts[IWL_RXON_CTX_BSS].timing.beacon_interval;
341 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
342 } else if (ctx->ctxid == IWL_RXON_CTX_BSS &&
343 iwl_is_associated(priv, IWL_RXON_CTX_PAN) &&
344 priv->contexts[IWL_RXON_CTX_PAN].vif &&
345 priv->contexts[IWL_RXON_CTX_PAN].vif->bss_conf.beacon_int &&
346 (!iwl_is_associated_ctx(ctx) || !ctx->vif ||
347 !ctx->vif->bss_conf.beacon_int)) {
348 ctx->timing.beacon_interval =
349 priv->contexts[IWL_RXON_CTX_PAN].timing.beacon_interval;
350 beacon_int = le16_to_cpu(ctx->timing.beacon_interval);
351 } else {
352 beacon_int = iwl_adjust_beacon_interval(beacon_int,
353 IWL_MAX_UCODE_BEACON_INTERVAL * TIME_UNIT);
354 ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
355 }
356
357 ctx->beacon_int = beacon_int;
358
359 tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
360 interval_tm = beacon_int * TIME_UNIT;
361 rem = do_div(tsf, interval_tm);
362 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
363
364 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
365
366 IWL_DEBUG_ASSOC(priv,
367 "beacon interval %d beacon timer %d beacon tim %d\n",
368 le16_to_cpu(ctx->timing.beacon_interval),
369 le32_to_cpu(ctx->timing.beacon_init_val),
370 le16_to_cpu(ctx->timing.atim_window));
371
372 return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
373 CMD_SYNC, sizeof(ctx->timing), &ctx->timing);
374}
375
376void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
377 int hw_decrypt)
378{
379 struct iwl_rxon_cmd *rxon = &ctx->staging;
380
381 if (hw_decrypt)
382 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
383 else
384 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
385
386}
387
388/* validate RXON structure is valid */
389int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
390{
391 struct iwl_rxon_cmd *rxon = &ctx->staging;
392 u32 errors = 0;
393
394 if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
395 if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
396 IWL_WARN(priv, "check 2.4G: wrong narrow\n");
397 errors |= BIT(0);
398 }
399 if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
400 IWL_WARN(priv, "check 2.4G: wrong radar\n");
401 errors |= BIT(1);
402 }
403 } else {
404 if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
405 IWL_WARN(priv, "check 5.2G: not short slot!\n");
406 errors |= BIT(2);
407 }
408 if (rxon->flags & RXON_FLG_CCK_MSK) {
409 IWL_WARN(priv, "check 5.2G: CCK!\n");
410 errors |= BIT(3);
411 }
412 }
413 if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
414 IWL_WARN(priv, "mac/bssid mcast!\n");
415 errors |= BIT(4);
416 }
417
418 /* make sure basic rates 6Mbps and 1Mbps are supported */
419 if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
420 (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
421 IWL_WARN(priv, "neither 1 nor 6 are basic\n");
422 errors |= BIT(5);
423 }
424
425 if (le16_to_cpu(rxon->assoc_id) > 2007) {
426 IWL_WARN(priv, "aid > 2007\n");
427 errors |= BIT(6);
428 }
429
430 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
431 == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
432 IWL_WARN(priv, "CCK and short slot\n");
433 errors |= BIT(7);
434 }
435
436 if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
437 == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
438 IWL_WARN(priv, "CCK and auto detect");
439 errors |= BIT(8);
440 }
441
442 if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
443 RXON_FLG_TGG_PROTECT_MSK)) ==
444 RXON_FLG_TGG_PROTECT_MSK) {
445 IWL_WARN(priv, "TGg but no auto-detect\n");
446 errors |= BIT(9);
447 }
448
449 if (rxon->channel == 0) {
450 IWL_WARN(priv, "zero channel is invalid\n");
451 errors |= BIT(10);
452 }
453
454 WARN(errors, "Invalid RXON (%#x), channel %d",
455 errors, le16_to_cpu(rxon->channel));
456
457 return errors ? -EINVAL : 0;
458}
459
460/**
461 * iwl_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
462 * @priv: staging_rxon is compared to active_rxon
463 *
464 * If the RXON structure is changing enough to require a new tune,
465 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
466 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
467 */
468int iwl_full_rxon_required(struct iwl_priv *priv,
469 struct iwl_rxon_context *ctx)
470{
471 const struct iwl_rxon_cmd *staging = &ctx->staging;
472 const struct iwl_rxon_cmd *active = &ctx->active;
473
474#define CHK(cond) \
475 if ((cond)) { \
476 IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
477 return 1; \
478 }
479
480#define CHK_NEQ(c1, c2) \
481 if ((c1) != (c2)) { \
482 IWL_DEBUG_INFO(priv, "need full RXON - " \
483 #c1 " != " #c2 " - %d != %d\n", \
484 (c1), (c2)); \
485 return 1; \
486 }
487
488 /* These items are only settable from the full RXON command */
489 CHK(!iwl_is_associated_ctx(ctx));
490 CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
491 CHK(compare_ether_addr(staging->node_addr, active->node_addr));
492 CHK(compare_ether_addr(staging->wlap_bssid_addr,
493 active->wlap_bssid_addr));
494 CHK_NEQ(staging->dev_type, active->dev_type);
495 CHK_NEQ(staging->channel, active->channel);
496 CHK_NEQ(staging->air_propagation, active->air_propagation);
497 CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
498 active->ofdm_ht_single_stream_basic_rates);
499 CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
500 active->ofdm_ht_dual_stream_basic_rates);
501 CHK_NEQ(staging->ofdm_ht_triple_stream_basic_rates,
502 active->ofdm_ht_triple_stream_basic_rates);
503 CHK_NEQ(staging->assoc_id, active->assoc_id);
504
505 /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
506 * be updated with the RXON_ASSOC command -- however only some
507 * flag transitions are allowed using RXON_ASSOC */
508
509 /* Check if we are not switching bands */
510 CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
511 active->flags & RXON_FLG_BAND_24G_MSK);
512
513 /* Check if we are switching association toggle */
514 CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
515 active->filter_flags & RXON_FILTER_ASSOC_MSK);
516
517#undef CHK
518#undef CHK_NEQ
519
520 return 0;
521}
522
523static void _iwl_set_rxon_ht(struct iwl_priv *priv,
524 struct iwl_ht_config *ht_conf,
525 struct iwl_rxon_context *ctx)
526{
527 struct iwl_rxon_cmd *rxon = &ctx->staging;
528
529 if (!ctx->ht.enabled) {
530 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
531 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
532 RXON_FLG_HT40_PROT_MSK |
533 RXON_FLG_HT_PROT_MSK);
534 return;
535 }
536
537 /* FIXME: if the definition of ht.protection changed, the "translation"
538 * will be needed for rxon->flags
539 */
540 rxon->flags |= cpu_to_le32(ctx->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
541
542 /* Set up channel bandwidth:
543 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
544 /* clear the HT channel mode before set the mode */
545 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
546 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
547 if (iwl_is_ht40_tx_allowed(priv, ctx, NULL)) {
548 /* pure ht40 */
549 if (ctx->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
550 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
551 /* Note: control channel is opposite of extension channel */
552 switch (ctx->ht.extension_chan_offset) {
553 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
554 rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
555 break;
556 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
557 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
558 break;
559 }
560 } else {
561 /* Note: control channel is opposite of extension channel */
562 switch (ctx->ht.extension_chan_offset) {
563 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
564 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
565 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
566 break;
567 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
568 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
569 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
570 break;
571 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
572 default:
573 /* channel location only valid if in Mixed mode */
574 IWL_ERR(priv, "invalid extension channel offset\n");
575 break;
576 }
577 }
578 } else {
579 rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
580 }
581
582 iwlagn_set_rxon_chain(priv, ctx);
583
584 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
585 "extension channel offset 0x%x\n",
586 le32_to_cpu(rxon->flags), ctx->ht.protection,
587 ctx->ht.extension_chan_offset);
588}
589
590void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
591{
592 struct iwl_rxon_context *ctx;
593
594 for_each_context(priv, ctx)
595 _iwl_set_rxon_ht(priv, ht_conf, ctx);
596}
597
598/* Return valid, unused, channel for a passive scan to reset the RF */
599u8 iwl_get_single_channel_number(struct iwl_priv *priv,
600 enum ieee80211_band band)
601{
602 const struct iwl_channel_info *ch_info;
603 int i;
604 u8 channel = 0;
605 u8 min, max;
606 struct iwl_rxon_context *ctx;
607
608 if (band == IEEE80211_BAND_5GHZ) {
609 min = 14;
610 max = priv->channel_count;
611 } else {
612 min = 0;
613 max = 14;
614 }
615
616 for (i = min; i < max; i++) {
617 bool busy = false;
618
619 for_each_context(priv, ctx) {
620 busy = priv->channel_info[i].channel ==
621 le16_to_cpu(ctx->staging.channel);
622 if (busy)
623 break;
624 }
625
626 if (busy)
627 continue;
628
629 channel = priv->channel_info[i].channel;
630 ch_info = iwl_get_channel_info(priv, band, channel);
631 if (is_channel_valid(ch_info))
632 break;
633 }
634
635 return channel;
636}
637
638/**
639 * iwl_set_rxon_channel - Set the band and channel values in staging RXON
640 * @ch: requested channel as a pointer to struct ieee80211_channel
641
642 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
643 * in the staging RXON flag structure based on the ch->band
644 */
645void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
646 struct iwl_rxon_context *ctx)
647{
648 enum ieee80211_band band = ch->band;
649 u16 channel = ch->hw_value;
650
651 if ((le16_to_cpu(ctx->staging.channel) == channel) &&
652 (priv->band == band))
653 return;
654
655 ctx->staging.channel = cpu_to_le16(channel);
656 if (band == IEEE80211_BAND_5GHZ)
657 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
658 else
659 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
660
661 priv->band = band;
662
663 IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
664
665}
666
667void iwl_set_flags_for_band(struct iwl_priv *priv,
668 struct iwl_rxon_context *ctx,
669 enum ieee80211_band band,
670 struct ieee80211_vif *vif)
671{
672 if (band == IEEE80211_BAND_5GHZ) {
673 ctx->staging.flags &=
674 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
675 | RXON_FLG_CCK_MSK);
676 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
677 } else {
678 /* Copied from iwl_post_associate() */
679 if (vif && vif->bss_conf.use_short_slot)
680 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
681 else
682 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
683
684 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
685 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
686 ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
687 }
688}
689
690/*
691 * initialize rxon structure with default values from eeprom
692 */
693void iwl_connection_init_rx_config(struct iwl_priv *priv,
694 struct iwl_rxon_context *ctx)
695{
696 const struct iwl_channel_info *ch_info;
697
698 memset(&ctx->staging, 0, sizeof(ctx->staging));
699
700 if (!ctx->vif) {
701 ctx->staging.dev_type = ctx->unused_devtype;
702 } else switch (ctx->vif->type) {
703 case NL80211_IFTYPE_AP:
704 ctx->staging.dev_type = ctx->ap_devtype;
705 break;
706
707 case NL80211_IFTYPE_STATION:
708 ctx->staging.dev_type = ctx->station_devtype;
709 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
710 break;
711
712 case NL80211_IFTYPE_ADHOC:
713 ctx->staging.dev_type = ctx->ibss_devtype;
714 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
715 ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
716 RXON_FILTER_ACCEPT_GRP_MSK;
717 break;
718
719 default:
720 IWL_ERR(priv, "Unsupported interface type %d\n",
721 ctx->vif->type);
722 break;
723 }
724
725#if 0
726 /* TODO: Figure out when short_preamble would be set and cache from
727 * that */
728 if (!hw_to_local(priv->hw)->short_preamble)
729 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
730 else
731 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
732#endif
733
734 ch_info = iwl_get_channel_info(priv, priv->band,
735 le16_to_cpu(ctx->active.channel));
736
737 if (!ch_info)
738 ch_info = &priv->channel_info[0];
739
740 ctx->staging.channel = cpu_to_le16(ch_info->channel);
741 priv->band = ch_info->band;
742
743 iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
744
745 ctx->staging.ofdm_basic_rates =
746 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
747 ctx->staging.cck_basic_rates =
748 (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
749
750 /* clear both MIX and PURE40 mode flag */
751 ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
752 RXON_FLG_CHANNEL_MODE_PURE_40);
753 if (ctx->vif)
754 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
755
756 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
757 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
758 ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff;
759}
760
761void iwl_set_rate(struct iwl_priv *priv)
762{
763 const struct ieee80211_supported_band *hw = NULL;
764 struct ieee80211_rate *rate;
765 struct iwl_rxon_context *ctx;
766 int i;
767
768 hw = iwl_get_hw_mode(priv, priv->band);
769 if (!hw) {
770 IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
771 return;
772 }
773
774 priv->active_rate = 0;
775
776 for (i = 0; i < hw->n_bitrates; i++) {
777 rate = &(hw->bitrates[i]);
778 if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
779 priv->active_rate |= (1 << rate->hw_value);
780 }
781
782 IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
783
784 for_each_context(priv, ctx) {
785 ctx->staging.cck_basic_rates =
786 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
787
788 ctx->staging.ofdm_basic_rates =
789 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
790 }
791}
792
793void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
794{
795 /*
796 * MULTI-FIXME
797 * See iwlagn_mac_channel_switch.
798 */
799 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
800
801 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
802 return;
803
804 if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
805 ieee80211_chswitch_done(ctx->vif, is_success);
806}
807
808#ifdef CONFIG_IWLWIFI_DEBUG
809void iwl_print_rx_config_cmd(struct iwl_priv *priv,
810 enum iwl_rxon_context_id ctxid)
811{
812 struct iwl_rxon_context *ctx = &priv->contexts[ctxid];
813 struct iwl_rxon_cmd *rxon = &ctx->staging;
814
815 IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
816 iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
817 IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
818 IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
819 IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
820 le32_to_cpu(rxon->filter_flags));
821 IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
822 IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
823 rxon->ofdm_basic_rates);
824 IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
825 IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
826 IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
827 IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
828}
829#endif
830
831static void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
832{
833 unsigned int reload_msec;
834 unsigned long reload_jiffies;
835
836#ifdef CONFIG_IWLWIFI_DEBUG
837 if (iwl_have_debug_level(IWL_DL_FW_ERRORS))
838 iwl_print_rx_config_cmd(priv, IWL_RXON_CTX_BSS);
839#endif
840
841 /* uCode is no longer loaded. */
842 priv->ucode_loaded = false;
843
844 /* Set the FW error flag -- cleared on iwl_down */
845 set_bit(STATUS_FW_ERROR, &priv->shrd->status);
846
847 /* Cancel currently queued command. */
848 clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status);
849
850 iwl_abort_notification_waits(&priv->notif_wait);
851
852 /* Keep the restart process from trying to send host
853 * commands by clearing the ready bit */
854 clear_bit(STATUS_READY, &priv->status);
855
856 wake_up(&trans(priv)->wait_command_queue);
857
858 if (!ondemand) {
859 /*
860 * If firmware keep reloading, then it indicate something
861 * serious wrong and firmware having problem to recover
862 * from it. Instead of keep trying which will fill the syslog
863 * and hang the system, let's just stop it
864 */
865 reload_jiffies = jiffies;
866 reload_msec = jiffies_to_msecs((long) reload_jiffies -
867 (long) priv->reload_jiffies);
868 priv->reload_jiffies = reload_jiffies;
869 if (reload_msec <= IWL_MIN_RELOAD_DURATION) {
870 priv->reload_count++;
871 if (priv->reload_count >= IWL_MAX_CONTINUE_RELOAD_CNT) {
872 IWL_ERR(priv, "BUG_ON, Stop restarting\n");
873 return;
874 }
875 } else
876 priv->reload_count = 0;
877 }
878
879 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
880 if (iwlagn_mod_params.restart_fw) {
881 IWL_DEBUG_FW_ERRORS(priv,
882 "Restarting adapter due to uCode error.\n");
883 queue_work(priv->workqueue, &priv->restart);
884 } else
885 IWL_DEBUG_FW_ERRORS(priv,
886 "Detected FW error, but not restarting\n");
887 }
888}
889
890int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
891{
892 int ret;
893 s8 prev_tx_power;
894 bool defer;
895 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
896
897 lockdep_assert_held(&priv->mutex);
898
899 if (priv->tx_power_user_lmt == tx_power && !force)
900 return 0;
901
902 if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
903 IWL_WARN(priv,
904 "Requested user TXPOWER %d below lower limit %d.\n",
905 tx_power,
906 IWLAGN_TX_POWER_TARGET_POWER_MIN);
907 return -EINVAL;
908 }
909
910 if (tx_power > priv->tx_power_device_lmt) {
911 IWL_WARN(priv,
912 "Requested user TXPOWER %d above upper limit %d.\n",
913 tx_power, priv->tx_power_device_lmt);
914 return -EINVAL;
915 }
916
917 if (!iwl_is_ready_rf(priv))
918 return -EIO;
919
920 /* scan complete and commit_rxon use tx_power_next value,
921 * it always need to be updated for newest request */
922 priv->tx_power_next = tx_power;
923
924 /* do not set tx power when scanning or channel changing */
925 defer = test_bit(STATUS_SCANNING, &priv->status) ||
926 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
927 if (defer && !force) {
928 IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
929 return 0;
930 }
931
932 prev_tx_power = priv->tx_power_user_lmt;
933 priv->tx_power_user_lmt = tx_power;
934
935 ret = iwlagn_send_tx_power(priv);
936
937 /* if fail to set tx_power, restore the orig. tx power */
938 if (ret) {
939 priv->tx_power_user_lmt = prev_tx_power;
940 priv->tx_power_next = prev_tx_power;
941 }
942 return ret;
943}
944
945void iwl_send_bt_config(struct iwl_priv *priv)
946{
947 struct iwl_bt_cmd bt_cmd = {
948 .lead_time = BT_LEAD_TIME_DEF,
949 .max_kill = BT_MAX_KILL_DEF,
950 .kill_ack_mask = 0,
951 .kill_cts_mask = 0,
952 };
953
954 if (!iwlagn_mod_params.bt_coex_active)
955 bt_cmd.flags = BT_COEX_DISABLE;
956 else
957 bt_cmd.flags = BT_COEX_ENABLE;
958
959 priv->bt_enable_flag = bt_cmd.flags;
960 IWL_DEBUG_INFO(priv, "BT coex %s\n",
961 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
962
963 if (iwl_dvm_send_cmd_pdu(priv, REPLY_BT_CONFIG,
964 CMD_SYNC, sizeof(struct iwl_bt_cmd), &bt_cmd))
965 IWL_ERR(priv, "failed to send BT Coex Config\n");
966}
967
968int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
969{
970 struct iwl_statistics_cmd statistics_cmd = {
971 .configuration_flags =
972 clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
973 };
974
975 if (flags & CMD_ASYNC)
976 return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
977 CMD_ASYNC,
978 sizeof(struct iwl_statistics_cmd),
979 &statistics_cmd);
980 else
981 return iwl_dvm_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
982 CMD_SYNC,
983 sizeof(struct iwl_statistics_cmd),
984 &statistics_cmd);
985}
986
987
988
989
990#ifdef CONFIG_IWLWIFI_DEBUGFS
991
992#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
993
994void iwl_reset_traffic_log(struct iwl_priv *priv)
995{
996 priv->tx_traffic_idx = 0;
997 priv->rx_traffic_idx = 0;
998 if (priv->tx_traffic)
999 memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1000 if (priv->rx_traffic)
1001 memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
1002}
1003
1004int iwl_alloc_traffic_mem(struct iwl_priv *priv)
1005{
1006 u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
1007
1008 if (iwl_have_debug_level(IWL_DL_TX)) {
1009 if (!priv->tx_traffic) {
1010 priv->tx_traffic =
1011 kzalloc(traffic_size, GFP_KERNEL);
1012 if (!priv->tx_traffic)
1013 return -ENOMEM;
1014 }
1015 }
1016 if (iwl_have_debug_level(IWL_DL_RX)) {
1017 if (!priv->rx_traffic) {
1018 priv->rx_traffic =
1019 kzalloc(traffic_size, GFP_KERNEL);
1020 if (!priv->rx_traffic)
1021 return -ENOMEM;
1022 }
1023 }
1024 iwl_reset_traffic_log(priv);
1025 return 0;
1026}
1027
1028void iwl_free_traffic_mem(struct iwl_priv *priv)
1029{
1030 kfree(priv->tx_traffic);
1031 priv->tx_traffic = NULL;
1032
1033 kfree(priv->rx_traffic);
1034 priv->rx_traffic = NULL;
1035}
1036
1037void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
1038 u16 length, struct ieee80211_hdr *header)
1039{
1040 __le16 fc;
1041 u16 len;
1042
1043 if (likely(!iwl_have_debug_level(IWL_DL_TX)))
1044 return;
1045
1046 if (!priv->tx_traffic)
1047 return;
1048
1049 fc = header->frame_control;
1050 if (ieee80211_is_data(fc)) {
1051 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1052 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1053 memcpy((priv->tx_traffic +
1054 (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1055 header, len);
1056 priv->tx_traffic_idx =
1057 (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1058 }
1059}
1060
1061void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
1062 u16 length, struct ieee80211_hdr *header)
1063{
1064 __le16 fc;
1065 u16 len;
1066
1067 if (likely(!iwl_have_debug_level(IWL_DL_RX)))
1068 return;
1069
1070 if (!priv->rx_traffic)
1071 return;
1072
1073 fc = header->frame_control;
1074 if (ieee80211_is_data(fc)) {
1075 len = (length > IWL_TRAFFIC_ENTRY_SIZE)
1076 ? IWL_TRAFFIC_ENTRY_SIZE : length;
1077 memcpy((priv->rx_traffic +
1078 (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
1079 header, len);
1080 priv->rx_traffic_idx =
1081 (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
1082 }
1083}
1084
1085const char *get_mgmt_string(int cmd)
1086{
1087 switch (cmd) {
1088 IWL_CMD(MANAGEMENT_ASSOC_REQ);
1089 IWL_CMD(MANAGEMENT_ASSOC_RESP);
1090 IWL_CMD(MANAGEMENT_REASSOC_REQ);
1091 IWL_CMD(MANAGEMENT_REASSOC_RESP);
1092 IWL_CMD(MANAGEMENT_PROBE_REQ);
1093 IWL_CMD(MANAGEMENT_PROBE_RESP);
1094 IWL_CMD(MANAGEMENT_BEACON);
1095 IWL_CMD(MANAGEMENT_ATIM);
1096 IWL_CMD(MANAGEMENT_DISASSOC);
1097 IWL_CMD(MANAGEMENT_AUTH);
1098 IWL_CMD(MANAGEMENT_DEAUTH);
1099 IWL_CMD(MANAGEMENT_ACTION);
1100 default:
1101 return "UNKNOWN";
1102
1103 }
1104}
1105
1106const char *get_ctrl_string(int cmd)
1107{
1108 switch (cmd) {
1109 IWL_CMD(CONTROL_BACK_REQ);
1110 IWL_CMD(CONTROL_BACK);
1111 IWL_CMD(CONTROL_PSPOLL);
1112 IWL_CMD(CONTROL_RTS);
1113 IWL_CMD(CONTROL_CTS);
1114 IWL_CMD(CONTROL_ACK);
1115 IWL_CMD(CONTROL_CFEND);
1116 IWL_CMD(CONTROL_CFENDACK);
1117 default:
1118 return "UNKNOWN";
1119
1120 }
1121}
1122
1123void iwl_clear_traffic_stats(struct iwl_priv *priv)
1124{
1125 memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
1126 memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
1127}
1128
1129/*
1130 * if CONFIG_IWLWIFI_DEBUGFS defined, iwl_update_stats function will
1131 * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass.
1132 * Use debugFs to display the rx/rx_statistics
1133 * if CONFIG_IWLWIFI_DEBUGFS not being defined, then no MGMT and CTRL
1134 * information will be recorded, but DATA pkt still will be recorded
1135 * for the reason of iwl_led.c need to control the led blinking based on
1136 * number of tx and rx data.
1137 *
1138 */
1139void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
1140{
1141 struct traffic_stats *stats;
1142
1143 if (is_tx)
1144 stats = &priv->tx_stats;
1145 else
1146 stats = &priv->rx_stats;
1147
1148 if (ieee80211_is_mgmt(fc)) {
1149 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1150 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
1151 stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
1152 break;
1153 case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
1154 stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
1155 break;
1156 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
1157 stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
1158 break;
1159 case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
1160 stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
1161 break;
1162 case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
1163 stats->mgmt[MANAGEMENT_PROBE_REQ]++;
1164 break;
1165 case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
1166 stats->mgmt[MANAGEMENT_PROBE_RESP]++;
1167 break;
1168 case cpu_to_le16(IEEE80211_STYPE_BEACON):
1169 stats->mgmt[MANAGEMENT_BEACON]++;
1170 break;
1171 case cpu_to_le16(IEEE80211_STYPE_ATIM):
1172 stats->mgmt[MANAGEMENT_ATIM]++;
1173 break;
1174 case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
1175 stats->mgmt[MANAGEMENT_DISASSOC]++;
1176 break;
1177 case cpu_to_le16(IEEE80211_STYPE_AUTH):
1178 stats->mgmt[MANAGEMENT_AUTH]++;
1179 break;
1180 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
1181 stats->mgmt[MANAGEMENT_DEAUTH]++;
1182 break;
1183 case cpu_to_le16(IEEE80211_STYPE_ACTION):
1184 stats->mgmt[MANAGEMENT_ACTION]++;
1185 break;
1186 }
1187 } else if (ieee80211_is_ctl(fc)) {
1188 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
1189 case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
1190 stats->ctrl[CONTROL_BACK_REQ]++;
1191 break;
1192 case cpu_to_le16(IEEE80211_STYPE_BACK):
1193 stats->ctrl[CONTROL_BACK]++;
1194 break;
1195 case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
1196 stats->ctrl[CONTROL_PSPOLL]++;
1197 break;
1198 case cpu_to_le16(IEEE80211_STYPE_RTS):
1199 stats->ctrl[CONTROL_RTS]++;
1200 break;
1201 case cpu_to_le16(IEEE80211_STYPE_CTS):
1202 stats->ctrl[CONTROL_CTS]++;
1203 break;
1204 case cpu_to_le16(IEEE80211_STYPE_ACK):
1205 stats->ctrl[CONTROL_ACK]++;
1206 break;
1207 case cpu_to_le16(IEEE80211_STYPE_CFEND):
1208 stats->ctrl[CONTROL_CFEND]++;
1209 break;
1210 case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
1211 stats->ctrl[CONTROL_CFENDACK]++;
1212 break;
1213 }
1214 } else {
1215 /* data */
1216 stats->data_cnt++;
1217 stats->data_bytes += len;
1218 }
1219}
1220#endif
1221
1222static void iwl_force_rf_reset(struct iwl_priv *priv)
1223{
1224 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1225 return;
1226
1227 if (!iwl_is_any_associated(priv)) {
1228 IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
1229 return;
1230 }
1231 /*
1232 * There is no easy and better way to force reset the radio,
1233 * the only known method is switching channel which will force to
1234 * reset and tune the radio.
1235 * Use internal short scan (single channel) operation to should
1236 * achieve this objective.
1237 * Driver should reset the radio when number of consecutive missed
1238 * beacon, or any other uCode error condition detected.
1239 */
1240 IWL_DEBUG_INFO(priv, "perform radio reset.\n");
1241 iwl_internal_short_hw_scan(priv);
1242}
1243
1244
1245int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
1246{
1247 struct iwl_force_reset *force_reset;
1248
1249 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1250 return -EINVAL;
1251
1252 if (mode >= IWL_MAX_FORCE_RESET) {
1253 IWL_DEBUG_INFO(priv, "invalid reset request.\n");
1254 return -EINVAL;
1255 }
1256 force_reset = &priv->force_reset[mode];
1257 force_reset->reset_request_count++;
1258 if (!external) {
1259 if (force_reset->last_force_reset_jiffies &&
1260 time_after(force_reset->last_force_reset_jiffies +
1261 force_reset->reset_duration, jiffies)) {
1262 IWL_DEBUG_INFO(priv, "force reset rejected\n");
1263 force_reset->reset_reject_count++;
1264 return -EAGAIN;
1265 }
1266 }
1267 force_reset->reset_success_count++;
1268 force_reset->last_force_reset_jiffies = jiffies;
1269 IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
1270 switch (mode) {
1271 case IWL_RF_RESET:
1272 iwl_force_rf_reset(priv);
1273 break;
1274 case IWL_FW_RESET:
1275 /*
1276 * if the request is from external(ex: debugfs),
1277 * then always perform the request in regardless the module
1278 * parameter setting
1279 * if the request is from internal (uCode error or driver
1280 * detect failure), then fw_restart module parameter
1281 * need to be check before performing firmware reload
1282 */
1283 if (!external && !iwlagn_mod_params.restart_fw) {
1284 IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
1285 "module parameter setting\n");
1286 break;
1287 }
1288 IWL_ERR(priv, "On demand firmware reload\n");
1289 iwlagn_fw_error(priv, true);
1290 break;
1291 }
1292 return 0;
1293}
1294
1295
1296int iwl_cmd_echo_test(struct iwl_priv *priv)
1297{
1298 int ret;
1299 struct iwl_host_cmd cmd = {
1300 .id = REPLY_ECHO,
1301 .len = { 0 },
1302 .flags = CMD_SYNC,
1303 };
1304
1305 ret = iwl_dvm_send_cmd(priv, &cmd);
1306 if (ret)
1307 IWL_ERR(priv, "echo testing fail: 0X%x\n", ret);
1308 else
1309 IWL_DEBUG_INFO(priv, "echo testing pass\n");
1310 return ret;
1311}
1312
1313static inline int iwl_check_stuck_queue(struct iwl_priv *priv, int txq)
1314{
1315 if (iwl_trans_check_stuck_queue(trans(priv), txq)) {
1316 int ret;
1317 ret = iwl_force_reset(priv, IWL_FW_RESET, false);
1318 return (ret == -EAGAIN) ? 0 : 1;
1319 }
1320 return 0;
1321}
1322
1323/*
1324 * Making watchdog tick be a quarter of timeout assure we will
1325 * discover the queue hung between timeout and 1.25*timeout
1326 */
1327#define IWL_WD_TICK(timeout) ((timeout) / 4)
1328
1329/*
1330 * Watchdog timer callback, we check each tx queue for stuck, if if hung
1331 * we reset the firmware. If everything is fine just rearm the timer.
1332 */
1333void iwl_bg_watchdog(unsigned long data)
1334{
1335 struct iwl_priv *priv = (struct iwl_priv *)data;
1336 int cnt;
1337 unsigned long timeout;
1338
1339 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1340 return;
1341
1342 if (iwl_is_rfkill(priv))
1343 return;
1344
1345 timeout = hw_params(priv).wd_timeout;
1346 if (timeout == 0)
1347 return;
1348
1349 /* monitor and check for stuck queues */
1350 for (cnt = 0; cnt < cfg(priv)->base_params->num_of_queues; cnt++)
1351 if (iwl_check_stuck_queue(priv, cnt))
1352 return;
1353
1354 mod_timer(&priv->watchdog, jiffies +
1355 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1356}
1357
1358void iwl_setup_watchdog(struct iwl_priv *priv)
1359{
1360 unsigned int timeout = hw_params(priv).wd_timeout;
1361
1362 if (!iwlagn_mod_params.wd_disable) {
1363 /* use system default */
1364 if (timeout && !cfg(priv)->base_params->wd_disable)
1365 mod_timer(&priv->watchdog,
1366 jiffies +
1367 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1368 else
1369 del_timer(&priv->watchdog);
1370 } else {
1371 /* module parameter overwrite default configuration */
1372 if (timeout && iwlagn_mod_params.wd_disable == 2)
1373 mod_timer(&priv->watchdog,
1374 jiffies +
1375 msecs_to_jiffies(IWL_WD_TICK(timeout)));
1376 else
1377 del_timer(&priv->watchdog);
1378 }
1379}
1380
1381/**
1382 * iwl_beacon_time_mask_low - mask of lower 32 bit of beacon time
1383 * @priv -- pointer to iwl_priv data structure
1384 * @tsf_bits -- number of bits need to shift for masking)
1385 */
1386static inline u32 iwl_beacon_time_mask_low(struct iwl_priv *priv,
1387 u16 tsf_bits)
1388{
1389 return (1 << tsf_bits) - 1;
1390}
1391
1392/**
1393 * iwl_beacon_time_mask_high - mask of higher 32 bit of beacon time
1394 * @priv -- pointer to iwl_priv data structure
1395 * @tsf_bits -- number of bits need to shift for masking)
1396 */
1397static inline u32 iwl_beacon_time_mask_high(struct iwl_priv *priv,
1398 u16 tsf_bits)
1399{
1400 return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
1401}
1402
1403/*
1404 * extended beacon time format
1405 * time in usec will be changed into a 32-bit value in extended:internal format
1406 * the extended part is the beacon counts
1407 * the internal part is the time in usec within one beacon interval
1408 */
1409u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval)
1410{
1411 u32 quot;
1412 u32 rem;
1413 u32 interval = beacon_interval * TIME_UNIT;
1414
1415 if (!interval || !usec)
1416 return 0;
1417
1418 quot = (usec / interval) &
1419 (iwl_beacon_time_mask_high(priv, IWLAGN_EXT_BEACON_TIME_POS) >>
1420 IWLAGN_EXT_BEACON_TIME_POS);
1421 rem = (usec % interval) & iwl_beacon_time_mask_low(priv,
1422 IWLAGN_EXT_BEACON_TIME_POS);
1423
1424 return (quot << IWLAGN_EXT_BEACON_TIME_POS) + rem;
1425}
1426
1427/* base is usually what we get from ucode with each received frame,
1428 * the same as HW timer counter counting down
1429 */
1430__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
1431 u32 addon, u32 beacon_interval)
1432{
1433 u32 base_low = base & iwl_beacon_time_mask_low(priv,
1434 IWLAGN_EXT_BEACON_TIME_POS);
1435 u32 addon_low = addon & iwl_beacon_time_mask_low(priv,
1436 IWLAGN_EXT_BEACON_TIME_POS);
1437 u32 interval = beacon_interval * TIME_UNIT;
1438 u32 res = (base & iwl_beacon_time_mask_high(priv,
1439 IWLAGN_EXT_BEACON_TIME_POS)) +
1440 (addon & iwl_beacon_time_mask_high(priv,
1441 IWLAGN_EXT_BEACON_TIME_POS));
1442
1443 if (base_low > addon_low)
1444 res += base_low - addon_low;
1445 else if (base_low < addon_low) {
1446 res += interval + base_low - addon_low;
1447 res += (1 << IWLAGN_EXT_BEACON_TIME_POS);
1448 } else
1449 res += (1 << IWLAGN_EXT_BEACON_TIME_POS);
1450
1451 return cpu_to_le32(res);
1452}
1453
1454void iwl_nic_error(struct iwl_op_mode *op_mode)
1455{
1456 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1457
1458 iwlagn_fw_error(priv, false);
1459}
1460
1461void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
1462{
1463 struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
1464
1465 if (state)
1466 set_bit(STATUS_RF_KILL_HW, &priv->status);
1467 else
1468 clear_bit(STATUS_RF_KILL_HW, &priv->status);
1469
1470 wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
1471}
1472
1473void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
1474{
1475 struct ieee80211_tx_info *info;
1476
1477 info = IEEE80211_SKB_CB(skb);
1478 kmem_cache_free(iwl_tx_cmd_pool, (info->driver_data[1]));
1479 dev_kfree_skb_any(skb);
1480}
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
deleted file mode 100644
index 635eb685edeb..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ /dev/null
@@ -1,234 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_core_h__
64#define __iwl_core_h__
65
66#include "iwl-dev.h"
67#include "iwl-io.h"
68
69/************************
70 * forward declarations *
71 ************************/
72struct iwl_host_cmd;
73struct iwl_cmd;
74
75#define TIME_UNIT 1024
76
77struct iwl_lib_ops {
78 /* set hw dependent parameters */
79 void (*set_hw_params)(struct iwl_priv *priv);
80 int (*set_channel_switch)(struct iwl_priv *priv,
81 struct ieee80211_channel_switch *ch_switch);
82 /* device specific configuration */
83 void (*nic_config)(struct iwl_priv *priv);
84
85 /* eeprom operations (as defined in iwl-eeprom.h) */
86 struct iwl_eeprom_ops eeprom_ops;
87
88 /* temperature */
89 void (*temperature)(struct iwl_priv *priv);
90};
91
92/***************************
93 * L i b *
94 ***************************/
95
96void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
97 int hw_decrypt);
98int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
99int iwl_full_rxon_required(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
100void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
101 struct iwl_rxon_context *ctx);
102void iwl_set_flags_for_band(struct iwl_priv *priv,
103 struct iwl_rxon_context *ctx,
104 enum ieee80211_band band,
105 struct ieee80211_vif *vif);
106u8 iwl_get_single_channel_number(struct iwl_priv *priv,
107 enum ieee80211_band band);
108void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
109bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
110 struct iwl_rxon_context *ctx,
111 struct ieee80211_sta_ht_cap *ht_cap);
112void iwl_connection_init_rx_config(struct iwl_priv *priv,
113 struct iwl_rxon_context *ctx);
114void iwl_set_rate(struct iwl_priv *priv);
115int iwl_cmd_echo_test(struct iwl_priv *priv);
116#ifdef CONFIG_IWLWIFI_DEBUGFS
117int iwl_alloc_traffic_mem(struct iwl_priv *priv);
118void iwl_free_traffic_mem(struct iwl_priv *priv);
119void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
120 u16 length, struct ieee80211_hdr *header);
121void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
122 u16 length, struct ieee80211_hdr *header);
123const char *get_mgmt_string(int cmd);
124const char *get_ctrl_string(int cmd);
125void iwl_clear_traffic_stats(struct iwl_priv *priv);
126void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
127 u16 len);
128void iwl_reset_traffic_log(struct iwl_priv *priv);
129
130#else
131static inline int iwl_alloc_traffic_mem(struct iwl_priv *priv)
132{
133 return 0;
134}
135static inline void iwl_free_traffic_mem(struct iwl_priv *priv)
136{
137}
138static inline void iwl_reset_traffic_log(struct iwl_priv *priv)
139{
140}
141static inline void iwl_dbg_log_tx_data_frame(struct iwl_priv *priv,
142 u16 length, struct ieee80211_hdr *header)
143{
144}
145static inline void iwl_dbg_log_rx_data_frame(struct iwl_priv *priv,
146 u16 length, struct ieee80211_hdr *header)
147{
148}
149static inline void iwl_update_stats(struct iwl_priv *priv, bool is_tx,
150 __le16 fc, u16 len)
151{
152}
153#endif
154
155/*****************************************************
156* RX
157******************************************************/
158void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
159
160void iwl_setup_watchdog(struct iwl_priv *priv);
161/*****************************************************
162 * TX power
163 ****************************************************/
164int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
165
166/*******************************************************************************
167 * Scanning
168 ******************************************************************************/
169void iwl_init_scan_params(struct iwl_priv *priv);
170int iwl_scan_cancel(struct iwl_priv *priv);
171void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
172void iwl_force_scan_end(struct iwl_priv *priv);
173void iwl_internal_short_hw_scan(struct iwl_priv *priv);
174int iwl_force_reset(struct iwl_priv *priv, int mode, bool external);
175void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
176void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
177void iwl_cancel_scan_deferred_work(struct iwl_priv *priv);
178int __must_check iwl_scan_initiate(struct iwl_priv *priv,
179 struct ieee80211_vif *vif,
180 enum iwl_scan_type scan_type,
181 enum ieee80211_band band);
182
183/* For faster active scanning, scan will move to the next channel if fewer than
184 * PLCP_QUIET_THRESH packets are heard on this channel within
185 * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
186 * time if it's a quiet channel (nothing responded to our probe, and there's
187 * no other traffic).
188 * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
189#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
190#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
191
192#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
193
194/* traffic log definitions */
195#define IWL_TRAFFIC_ENTRIES (256)
196#define IWL_TRAFFIC_ENTRY_SIZE (64)
197
198/*****************************************************
199 * S e n d i n g H o s t C o m m a n d s *
200 *****************************************************/
201
202void iwl_bg_watchdog(unsigned long data);
203u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval);
204__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
205 u32 addon, u32 beacon_interval);
206
207
208/*****************************************************
209* GEOS
210******************************************************/
211int iwl_init_geos(struct iwl_priv *priv);
212void iwl_free_geos(struct iwl_priv *priv);
213
214extern void iwl_send_bt_config(struct iwl_priv *priv);
215extern int iwl_send_statistics_request(struct iwl_priv *priv,
216 u8 flags, bool clear);
217
218int iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
219
220static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
221 struct iwl_priv *priv, enum ieee80211_band band)
222{
223 return priv->hw->wiphy->bands[band];
224}
225
226static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
227{
228 return cfg(priv)->bt_params &&
229 cfg(priv)->bt_params->advanced_bt_coexist;
230}
231
232extern bool bt_siso_mode;
233
234#endif /* __iwl_core_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 5f96ce105f08..59750543fce7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -430,6 +430,9 @@
430#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c) 430#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c)
431#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050) 431#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050)
432 432
433/* Used to enable DBGM */
434#define HBUS_TARG_TEST_REG (HBUS_BASE+0x05c)
435
433/* 436/*
434 * Per-Tx-queue write pointer (index, really!) 437 * Per-Tx-queue write pointer (index, really!)
435 * Indicates index to next TFD that driver will fill (1 past latest filled). 438 * Indicates index to next TFD that driver will fill (1 past latest filled).
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.c b/drivers/net/wireless/iwlwifi/iwl-debug.c
index 059efabda184..2d1b42847b9b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.c
@@ -63,6 +63,7 @@
63 63
64#include <linux/interrupt.h> 64#include <linux/interrupt.h>
65#include "iwl-debug.h" 65#include "iwl-debug.h"
66#include "iwl-devtrace.h"
66 67
67#define __iwl_fn(fn) \ 68#define __iwl_fn(fn) \
68void __iwl_ ##fn(struct device *dev, const char *fmt, ...) \ 69void __iwl_ ##fn(struct device *dev, const char *fmt, ...) \
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index a6b32a11e103..8376b842bdba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -29,10 +29,13 @@
29#ifndef __iwl_debug_h__ 29#ifndef __iwl_debug_h__
30#define __iwl_debug_h__ 30#define __iwl_debug_h__
31 31
32#include "iwl-shared.h" 32#include "iwl-modparams.h"
33#include "iwl-devtrace.h"
34 33
35struct iwl_priv; 34
35static inline bool iwl_have_debug_level(u32 level)
36{
37 return iwlwifi_mod_params.debug_level & level;
38}
36 39
37void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace, 40void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace,
38 const char *fmt, ...); 41 const char *fmt, ...);
@@ -41,10 +44,10 @@ void __iwl_info(struct device *dev, const char *fmt, ...);
41void __iwl_crit(struct device *dev, const char *fmt, ...); 44void __iwl_crit(struct device *dev, const char *fmt, ...);
42 45
43/* No matter what is m (priv, bus, trans), this will work */ 46/* No matter what is m (priv, bus, trans), this will work */
44#define IWL_ERR(m, f, a...) __iwl_err(trans(m)->dev, false, false, f, ## a) 47#define IWL_ERR(m, f, a...) __iwl_err((m)->dev, false, false, f, ## a)
45#define IWL_WARN(m, f, a...) __iwl_warn(trans(m)->dev, f, ## a) 48#define IWL_WARN(m, f, a...) __iwl_warn((m)->dev, f, ## a)
46#define IWL_INFO(m, f, a...) __iwl_info(trans(m)->dev, f, ## a) 49#define IWL_INFO(m, f, a...) __iwl_info((m)->dev, f, ## a)
47#define IWL_CRIT(m, f, a...) __iwl_crit(trans(m)->dev, f, ## a) 50#define IWL_CRIT(m, f, a...) __iwl_crit((m)->dev, f, ## a)
48 51
49#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING) 52#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
50void __iwl_dbg(struct device *dev, 53void __iwl_dbg(struct device *dev,
@@ -65,9 +68,9 @@ do { \
65} while (0) 68} while (0)
66 69
67#define IWL_DEBUG(m, level, fmt, args...) \ 70#define IWL_DEBUG(m, level, fmt, args...) \
68 __iwl_dbg(trans(m)->dev, level, false, __func__, fmt, ##args) 71 __iwl_dbg((m)->dev, level, false, __func__, fmt, ##args)
69#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \ 72#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \
70 __iwl_dbg(trans(m)->dev, level, true, __func__, fmt, ##args) 73 __iwl_dbg((m)->dev, level, true, __func__, fmt, ##args)
71 74
72#ifdef CONFIG_IWLWIFI_DEBUG 75#ifdef CONFIG_IWLWIFI_DEBUG
73#define iwl_print_hex_dump(m, level, p, len) \ 76#define iwl_print_hex_dump(m, level, p, len) \
@@ -80,19 +83,6 @@ do { \
80#define iwl_print_hex_dump(m, level, p, len) 83#define iwl_print_hex_dump(m, level, p, len)
81#endif /* CONFIG_IWLWIFI_DEBUG */ 84#endif /* CONFIG_IWLWIFI_DEBUG */
82 85
83#ifdef CONFIG_IWLWIFI_DEBUGFS
84int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
85void iwl_dbgfs_unregister(struct iwl_priv *priv);
86#else
87static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
88{
89 return 0;
90}
91static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
92{
93}
94#endif /* CONFIG_IWLWIFI_DEBUGFS */
95
96/* 86/*
97 * To use the debug system: 87 * To use the debug system:
98 * 88 *
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 2bbaebd99ad4..e7c157e5ebeb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -37,9 +37,9 @@
37 37
38#include "iwl-dev.h" 38#include "iwl-dev.h"
39#include "iwl-debug.h" 39#include "iwl-debug.h"
40#include "iwl-core.h"
41#include "iwl-io.h" 40#include "iwl-io.h"
42#include "iwl-agn.h" 41#include "iwl-agn.h"
42#include "iwl-modparams.h"
43 43
44/* create and remove of files */ 44/* create and remove of files */
45#define DEBUGFS_ADD_FILE(name, parent, mode) do { \ 45#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
@@ -111,105 +111,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
111 .llseek = generic_file_llseek, \ 111 .llseek = generic_file_llseek, \
112}; 112};
113 113
114static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file,
115 char __user *user_buf,
116 size_t count, loff_t *ppos) {
117
118 struct iwl_priv *priv = file->private_data;
119 char *buf;
120 int pos = 0;
121
122 int cnt;
123 ssize_t ret;
124 const size_t bufsz = 100 +
125 sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
126 buf = kzalloc(bufsz, GFP_KERNEL);
127 if (!buf)
128 return -ENOMEM;
129 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
130 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
131 pos += scnprintf(buf + pos, bufsz - pos,
132 "\t%25s\t\t: %u\n",
133 get_mgmt_string(cnt),
134 priv->tx_stats.mgmt[cnt]);
135 }
136 pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
137 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
138 pos += scnprintf(buf + pos, bufsz - pos,
139 "\t%25s\t\t: %u\n",
140 get_ctrl_string(cnt),
141 priv->tx_stats.ctrl[cnt]);
142 }
143 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
144 pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
145 priv->tx_stats.data_cnt);
146 pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
147 priv->tx_stats.data_bytes);
148 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
149 kfree(buf);
150 return ret;
151}
152
153static ssize_t iwl_dbgfs_clear_traffic_statistics_write(struct file *file,
154 const char __user *user_buf,
155 size_t count, loff_t *ppos)
156{
157 struct iwl_priv *priv = file->private_data;
158 u32 clear_flag;
159 char buf[8];
160 int buf_size;
161
162 memset(buf, 0, sizeof(buf));
163 buf_size = min(count, sizeof(buf) - 1);
164 if (copy_from_user(buf, user_buf, buf_size))
165 return -EFAULT;
166 if (sscanf(buf, "%x", &clear_flag) != 1)
167 return -EFAULT;
168 iwl_clear_traffic_stats(priv);
169
170 return count;
171}
172
173static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
174 char __user *user_buf,
175 size_t count, loff_t *ppos) {
176
177 struct iwl_priv *priv = file->private_data;
178 char *buf;
179 int pos = 0;
180 int cnt;
181 ssize_t ret;
182 const size_t bufsz = 100 +
183 sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
184 buf = kzalloc(bufsz, GFP_KERNEL);
185 if (!buf)
186 return -ENOMEM;
187
188 pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
189 for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
190 pos += scnprintf(buf + pos, bufsz - pos,
191 "\t%25s\t\t: %u\n",
192 get_mgmt_string(cnt),
193 priv->rx_stats.mgmt[cnt]);
194 }
195 pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
196 for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
197 pos += scnprintf(buf + pos, bufsz - pos,
198 "\t%25s\t\t: %u\n",
199 get_ctrl_string(cnt),
200 priv->rx_stats.ctrl[cnt]);
201 }
202 pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
203 pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
204 priv->rx_stats.data_cnt);
205 pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
206 priv->rx_stats.data_bytes);
207
208 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
209 kfree(buf);
210 return ret;
211}
212
213static ssize_t iwl_dbgfs_sram_read(struct file *file, 114static ssize_t iwl_dbgfs_sram_read(struct file *file,
214 char __user *user_buf, 115 char __user *user_buf,
215 size_t count, loff_t *ppos) 116 size_t count, loff_t *ppos)
@@ -230,11 +131,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
230 /* default is to dump the entire data segment */ 131 /* default is to dump the entire data segment */
231 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) { 132 if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
232 priv->dbgfs_sram_offset = 0x800000; 133 priv->dbgfs_sram_offset = 0x800000;
233 if (!priv->ucode_loaded) { 134 if (!priv->ucode_loaded)
234 IWL_ERR(priv, "No uCode has been loadded.\n");
235 return -EINVAL; 135 return -EINVAL;
236 } 136 img = &priv->fw->img[priv->cur_ucode];
237 img = &priv->fw->img[priv->shrd->ucode_type];
238 priv->dbgfs_sram_len = img->sec[IWL_UCODE_SECTION_DATA].len; 137 priv->dbgfs_sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
239 } 138 }
240 len = priv->dbgfs_sram_len; 139 len = priv->dbgfs_sram_len;
@@ -259,7 +158,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
259 sram = priv->dbgfs_sram_offset & ~0x3; 158 sram = priv->dbgfs_sram_offset & ~0x3;
260 159
261 /* read the first u32 from sram */ 160 /* read the first u32 from sram */
262 val = iwl_read_targ_mem(trans(priv), sram); 161 val = iwl_read_targ_mem(priv->trans, sram);
263 162
264 for (; len; len--) { 163 for (; len; len--) {
265 /* put the address at the start of every line */ 164 /* put the address at the start of every line */
@@ -278,7 +177,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
278 if (++offset == 4) { 177 if (++offset == 4) {
279 sram += 4; 178 sram += 4;
280 offset = 0; 179 offset = 0;
281 val = iwl_read_targ_mem(trans(priv), sram); 180 val = iwl_read_targ_mem(priv->trans, sram);
282 } 181 }
283 182
284 /* put in extra spaces and split lines for human readability */ 183 /* put in extra spaces and split lines for human readability */
@@ -369,14 +268,19 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
369 i, station->sta.sta.addr, 268 i, station->sta.sta.addr,
370 station->sta.station_flags_msk); 269 station->sta.station_flags_msk);
371 pos += scnprintf(buf + pos, bufsz - pos, 270 pos += scnprintf(buf + pos, bufsz - pos,
372 "TID\tseq_num\trate_n_flags\n"); 271 "TID seqno next_rclmd "
272 "rate_n_flags state txq\n");
373 273
374 for (j = 0; j < IWL_MAX_TID_COUNT; j++) { 274 for (j = 0; j < IWL_MAX_TID_COUNT; j++) {
375 tid_data = &priv->tid_data[i][j]; 275 tid_data = &priv->tid_data[i][j];
376 pos += scnprintf(buf + pos, bufsz - pos, 276 pos += scnprintf(buf + pos, bufsz - pos,
377 "%d:\t%#x\t%#x", 277 "%d: 0x%.4x 0x%.4x 0x%.8x "
278 "%d %.2d",
378 j, tid_data->seq_number, 279 j, tid_data->seq_number,
379 tid_data->agg.rate_n_flags); 280 tid_data->next_reclaimed,
281 tid_data->agg.rate_n_flags,
282 tid_data->agg.state,
283 tid_data->agg.txq_id);
380 284
381 if (tid_data->agg.wait_for_ba) 285 if (tid_data->agg.wait_for_ba)
382 pos += scnprintf(buf + pos, bufsz - pos, 286 pos += scnprintf(buf + pos, bufsz - pos,
@@ -403,30 +307,25 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
403 const u8 *ptr; 307 const u8 *ptr;
404 char *buf; 308 char *buf;
405 u16 eeprom_ver; 309 u16 eeprom_ver;
406 size_t eeprom_len = cfg(priv)->base_params->eeprom_size; 310 size_t eeprom_len = priv->cfg->base_params->eeprom_size;
407 buf_size = 4 * eeprom_len + 256; 311 buf_size = 4 * eeprom_len + 256;
408 312
409 if (eeprom_len % 16) { 313 if (eeprom_len % 16)
410 IWL_ERR(priv, "NVM size is not multiple of 16.\n");
411 return -ENODATA; 314 return -ENODATA;
412 }
413 315
414 ptr = priv->shrd->eeprom; 316 ptr = priv->eeprom;
415 if (!ptr) { 317 if (!ptr)
416 IWL_ERR(priv, "Invalid EEPROM/OTP memory\n");
417 return -ENOMEM; 318 return -ENOMEM;
418 }
419 319
420 /* 4 characters for byte 0xYY */ 320 /* 4 characters for byte 0xYY */
421 buf = kzalloc(buf_size, GFP_KERNEL); 321 buf = kzalloc(buf_size, GFP_KERNEL);
422 if (!buf) { 322 if (!buf)
423 IWL_ERR(priv, "Can not allocate Buffer\n");
424 return -ENOMEM; 323 return -ENOMEM;
425 } 324
426 eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION); 325 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
427 pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, " 326 pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, "
428 "version: 0x%x\n", 327 "version: 0x%x\n",
429 (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP) 328 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
430 ? "OTP" : "EEPROM", eeprom_ver); 329 ? "OTP" : "EEPROM", eeprom_ver);
431 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) { 330 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
432 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs); 331 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
@@ -456,10 +355,8 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
456 return -EAGAIN; 355 return -EAGAIN;
457 356
458 buf = kzalloc(bufsz, GFP_KERNEL); 357 buf = kzalloc(bufsz, GFP_KERNEL);
459 if (!buf) { 358 if (!buf)
460 IWL_ERR(priv, "Can not allocate Buffer\n");
461 return -ENOMEM; 359 return -ENOMEM;
462 }
463 360
464 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ); 361 supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
465 if (supp_band) { 362 if (supp_band) {
@@ -521,8 +418,6 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
521 int pos = 0; 418 int pos = 0;
522 const size_t bufsz = sizeof(buf); 419 const size_t bufsz = sizeof(buf);
523 420
524 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
525 test_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status));
526 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n", 421 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
527 test_bit(STATUS_RF_KILL_HW, &priv->status)); 422 test_bit(STATUS_RF_KILL_HW, &priv->status));
528 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n", 423 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
@@ -544,9 +439,9 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
544 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n", 439 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
545 test_bit(STATUS_SCAN_HW, &priv->status)); 440 test_bit(STATUS_SCAN_HW, &priv->status));
546 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n", 441 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
547 test_bit(STATUS_POWER_PMI, &priv->shrd->status)); 442 test_bit(STATUS_POWER_PMI, &priv->status));
548 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n", 443 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
549 test_bit(STATUS_FW_ERROR, &priv->shrd->status)); 444 test_bit(STATUS_FW_ERROR, &priv->status));
550 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 445 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
551} 446}
552 447
@@ -563,16 +458,14 @@ static ssize_t iwl_dbgfs_rx_handlers_read(struct file *file,
563 ssize_t ret; 458 ssize_t ret;
564 459
565 buf = kzalloc(bufsz, GFP_KERNEL); 460 buf = kzalloc(bufsz, GFP_KERNEL);
566 if (!buf) { 461 if (!buf)
567 IWL_ERR(priv, "Can not allocate Buffer\n");
568 return -ENOMEM; 462 return -ENOMEM;
569 }
570 463
571 for (cnt = 0; cnt < REPLY_MAX; cnt++) { 464 for (cnt = 0; cnt < REPLY_MAX; cnt++) {
572 if (priv->rx_handlers_stats[cnt] > 0) 465 if (priv->rx_handlers_stats[cnt] > 0)
573 pos += scnprintf(buf + pos, bufsz - pos, 466 pos += scnprintf(buf + pos, bufsz - pos,
574 "\tRx handler[%36s]:\t\t %u\n", 467 "\tRx handler[%36s]:\t\t %u\n",
575 get_cmd_string(cnt), 468 iwl_dvm_get_cmd_string(cnt),
576 priv->rx_handlers_stats[cnt]); 469 priv->rx_handlers_stats[cnt]);
577 } 470 }
578 471
@@ -680,11 +573,8 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
680 return -EFAULT; 573 return -EFAULT;
681 if (!iwl_is_any_associated(priv)) 574 if (!iwl_is_any_associated(priv))
682 priv->disable_ht40 = ht40 ? true : false; 575 priv->disable_ht40 = ht40 ? true : false;
683 else { 576 else
684 IWL_ERR(priv, "Sta associated with AP - "
685 "Change to 40MHz channel support is not allowed\n");
686 return -EINVAL; 577 return -EINVAL;
687 }
688 578
689 return count; 579 return count;
690} 580}
@@ -816,87 +706,6 @@ DEBUGFS_READ_FILE_OPS(temperature);
816DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override); 706DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
817DEBUGFS_READ_FILE_OPS(current_sleep_command); 707DEBUGFS_READ_FILE_OPS(current_sleep_command);
818 708
819static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
820 char __user *user_buf,
821 size_t count, loff_t *ppos)
822{
823 struct iwl_priv *priv = file->private_data;
824 int pos = 0, ofs = 0;
825 int cnt = 0, entry;
826
827 char *buf;
828 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
829 (cfg(priv)->base_params->num_of_queues * 32 * 8) + 400;
830 const u8 *ptr;
831 ssize_t ret;
832
833 buf = kzalloc(bufsz, GFP_KERNEL);
834 if (!buf) {
835 IWL_ERR(priv, "Can not allocate buffer\n");
836 return -ENOMEM;
837 }
838 if (priv->tx_traffic && iwl_have_debug_level(IWL_DL_TX)) {
839 ptr = priv->tx_traffic;
840 pos += scnprintf(buf + pos, bufsz - pos,
841 "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
842 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
843 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
844 entry++, ofs += 16) {
845 pos += scnprintf(buf + pos, bufsz - pos,
846 "0x%.4x ", ofs);
847 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
848 buf + pos, bufsz - pos, 0);
849 pos += strlen(buf + pos);
850 if (bufsz - pos > 0)
851 buf[pos++] = '\n';
852 }
853 }
854 }
855
856 if (priv->rx_traffic && iwl_have_debug_level(IWL_DL_RX)) {
857 ptr = priv->rx_traffic;
858 pos += scnprintf(buf + pos, bufsz - pos,
859 "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
860 for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
861 for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
862 entry++, ofs += 16) {
863 pos += scnprintf(buf + pos, bufsz - pos,
864 "0x%.4x ", ofs);
865 hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
866 buf + pos, bufsz - pos, 0);
867 pos += strlen(buf + pos);
868 if (bufsz - pos > 0)
869 buf[pos++] = '\n';
870 }
871 }
872 }
873
874 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
875 kfree(buf);
876 return ret;
877}
878
879static ssize_t iwl_dbgfs_traffic_log_write(struct file *file,
880 const char __user *user_buf,
881 size_t count, loff_t *ppos)
882{
883 struct iwl_priv *priv = file->private_data;
884 char buf[8];
885 int buf_size;
886 int traffic_log;
887
888 memset(buf, 0, sizeof(buf));
889 buf_size = min(count, sizeof(buf) - 1);
890 if (copy_from_user(buf, user_buf, buf_size))
891 return -EFAULT;
892 if (sscanf(buf, "%d", &traffic_log) != 1)
893 return -EFAULT;
894 if (traffic_log == 0)
895 iwl_reset_traffic_log(priv);
896
897 return count;
898}
899
900static const char *fmt_value = " %-30s %10u\n"; 709static const char *fmt_value = " %-30s %10u\n";
901static const char *fmt_hex = " %-30s 0x%02X\n"; 710static const char *fmt_hex = " %-30s 0x%02X\n";
902static const char *fmt_table = " %-30s %10u %10u %10u %10u\n"; 711static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
@@ -947,10 +756,8 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
947 return -EAGAIN; 756 return -EAGAIN;
948 757
949 buf = kzalloc(bufsz, GFP_KERNEL); 758 buf = kzalloc(bufsz, GFP_KERNEL);
950 if (!buf) { 759 if (!buf)
951 IWL_ERR(priv, "Can not allocate Buffer\n");
952 return -ENOMEM; 760 return -ENOMEM;
953 }
954 761
955 /* 762 /*
956 * the statistic information display here is based on 763 * the statistic information display here is based on
@@ -1376,10 +1183,8 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
1376 return -EAGAIN; 1183 return -EAGAIN;
1377 1184
1378 buf = kzalloc(bufsz, GFP_KERNEL); 1185 buf = kzalloc(bufsz, GFP_KERNEL);
1379 if (!buf) { 1186 if (!buf)
1380 IWL_ERR(priv, "Can not allocate Buffer\n");
1381 return -ENOMEM; 1187 return -ENOMEM;
1382 }
1383 1188
1384 /* the statistic information display here is based on 1189 /* the statistic information display here is based on
1385 * the last statistics notification from uCode 1190 * the last statistics notification from uCode
@@ -1536,17 +1341,17 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
1536 if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) { 1341 if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) {
1537 pos += scnprintf(buf + pos, bufsz - pos, 1342 pos += scnprintf(buf + pos, bufsz - pos,
1538 "tx power: (1/2 dB step)\n"); 1343 "tx power: (1/2 dB step)\n");
1539 if ((hw_params(priv).valid_tx_ant & ANT_A) && 1344 if ((priv->hw_params.valid_tx_ant & ANT_A) &&
1540 tx->tx_power.ant_a) 1345 tx->tx_power.ant_a)
1541 pos += scnprintf(buf + pos, bufsz - pos, 1346 pos += scnprintf(buf + pos, bufsz - pos,
1542 fmt_hex, "antenna A:", 1347 fmt_hex, "antenna A:",
1543 tx->tx_power.ant_a); 1348 tx->tx_power.ant_a);
1544 if ((hw_params(priv).valid_tx_ant & ANT_B) && 1349 if ((priv->hw_params.valid_tx_ant & ANT_B) &&
1545 tx->tx_power.ant_b) 1350 tx->tx_power.ant_b)
1546 pos += scnprintf(buf + pos, bufsz - pos, 1351 pos += scnprintf(buf + pos, bufsz - pos,
1547 fmt_hex, "antenna B:", 1352 fmt_hex, "antenna B:",
1548 tx->tx_power.ant_b); 1353 tx->tx_power.ant_b);
1549 if ((hw_params(priv).valid_tx_ant & ANT_C) && 1354 if ((priv->hw_params.valid_tx_ant & ANT_C) &&
1550 tx->tx_power.ant_c) 1355 tx->tx_power.ant_c)
1551 pos += scnprintf(buf + pos, bufsz - pos, 1356 pos += scnprintf(buf + pos, bufsz - pos,
1552 fmt_hex, "antenna C:", 1357 fmt_hex, "antenna C:",
@@ -1578,10 +1383,8 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
1578 return -EAGAIN; 1383 return -EAGAIN;
1579 1384
1580 buf = kzalloc(bufsz, GFP_KERNEL); 1385 buf = kzalloc(bufsz, GFP_KERNEL);
1581 if (!buf) { 1386 if (!buf)
1582 IWL_ERR(priv, "Can not allocate Buffer\n");
1583 return -ENOMEM; 1387 return -ENOMEM;
1584 }
1585 1388
1586 /* the statistic information display here is based on 1389 /* the statistic information display here is based on
1587 * the last statistics notification from uCode 1390 * the last statistics notification from uCode
@@ -1704,16 +1507,11 @@ static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
1704 ret = iwl_send_statistics_request(priv, CMD_SYNC, false); 1507 ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
1705 mutex_unlock(&priv->mutex); 1508 mutex_unlock(&priv->mutex);
1706 1509
1707 if (ret) { 1510 if (ret)
1708 IWL_ERR(priv,
1709 "Error sending statistics request: %zd\n", ret);
1710 return -EAGAIN; 1511 return -EAGAIN;
1711 }
1712 buf = kzalloc(bufsz, GFP_KERNEL); 1512 buf = kzalloc(bufsz, GFP_KERNEL);
1713 if (!buf) { 1513 if (!buf)
1714 IWL_ERR(priv, "Can not allocate Buffer\n");
1715 return -ENOMEM; 1514 return -ENOMEM;
1716 }
1717 1515
1718 /* 1516 /*
1719 * the statistic information display here is based on 1517 * the statistic information display here is based on
@@ -1790,10 +1588,8 @@ static ssize_t iwl_dbgfs_reply_tx_error_read(struct file *file,
1790 return -EAGAIN; 1588 return -EAGAIN;
1791 1589
1792 buf = kzalloc(bufsz, GFP_KERNEL); 1590 buf = kzalloc(bufsz, GFP_KERNEL);
1793 if (!buf) { 1591 if (!buf)
1794 IWL_ERR(priv, "Can not allocate Buffer\n");
1795 return -ENOMEM; 1592 return -ENOMEM;
1796 }
1797 1593
1798 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_TX_Error:\n"); 1594 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_TX_Error:\n");
1799 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t\t%u\n", 1595 pos += scnprintf(buf + pos, bufsz - pos, "%s:\t\t\t\t%u\n",
@@ -1933,10 +1729,8 @@ static ssize_t iwl_dbgfs_sensitivity_read(struct file *file,
1933 1729
1934 data = &priv->sensitivity_data; 1730 data = &priv->sensitivity_data;
1935 buf = kzalloc(bufsz, GFP_KERNEL); 1731 buf = kzalloc(bufsz, GFP_KERNEL);
1936 if (!buf) { 1732 if (!buf)
1937 IWL_ERR(priv, "Can not allocate Buffer\n");
1938 return -ENOMEM; 1733 return -ENOMEM;
1939 }
1940 1734
1941 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n", 1735 pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
1942 data->auto_corr_ofdm); 1736 data->auto_corr_ofdm);
@@ -2014,10 +1808,8 @@ static ssize_t iwl_dbgfs_chain_noise_read(struct file *file,
2014 1808
2015 data = &priv->chain_noise_data; 1809 data = &priv->chain_noise_data;
2016 buf = kzalloc(bufsz, GFP_KERNEL); 1810 buf = kzalloc(bufsz, GFP_KERNEL);
2017 if (!buf) { 1811 if (!buf)
2018 IWL_ERR(priv, "Can not allocate Buffer\n");
2019 return -ENOMEM; 1812 return -ENOMEM;
2020 }
2021 1813
2022 pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n", 1814 pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
2023 data->active_chains); 1815 data->active_chains);
@@ -2068,7 +1860,7 @@ static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
2068 const size_t bufsz = sizeof(buf); 1860 const size_t bufsz = sizeof(buf);
2069 u32 pwrsave_status; 1861 u32 pwrsave_status;
2070 1862
2071 pwrsave_status = iwl_read32(trans(priv), CSR_GP_CNTRL) & 1863 pwrsave_status = iwl_read32(priv->trans, CSR_GP_CNTRL) &
2072 CSR_GP_REG_POWER_SAVE_STATUS_MSK; 1864 CSR_GP_REG_POWER_SAVE_STATUS_MSK;
2073 1865
2074 pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: "); 1866 pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
@@ -2262,59 +2054,39 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
2262 return count; 2054 return count;
2263} 2055}
2264 2056
2265static ssize_t iwl_dbgfs_force_reset_read(struct file *file, 2057static ssize_t iwl_dbgfs_rf_reset_read(struct file *file,
2266 char __user *user_buf, 2058 char __user *user_buf,
2267 size_t count, loff_t *ppos) 2059 size_t count, loff_t *ppos)
2268{ 2060{
2269 struct iwl_priv *priv = file->private_data; 2061 struct iwl_priv *priv = file->private_data;
2270 int i, pos = 0; 2062 int pos = 0;
2271 char buf[300]; 2063 char buf[300];
2272 const size_t bufsz = sizeof(buf); 2064 const size_t bufsz = sizeof(buf);
2273 struct iwl_force_reset *force_reset; 2065 struct iwl_rf_reset *rf_reset = &priv->rf_reset;
2066
2067 pos += scnprintf(buf + pos, bufsz - pos,
2068 "RF reset statistics\n");
2069 pos += scnprintf(buf + pos, bufsz - pos,
2070 "\tnumber of reset request: %d\n",
2071 rf_reset->reset_request_count);
2072 pos += scnprintf(buf + pos, bufsz - pos,
2073 "\tnumber of reset request success: %d\n",
2074 rf_reset->reset_success_count);
2075 pos += scnprintf(buf + pos, bufsz - pos,
2076 "\tnumber of reset request reject: %d\n",
2077 rf_reset->reset_reject_count);
2274 2078
2275 for (i = 0; i < IWL_MAX_FORCE_RESET; i++) {
2276 force_reset = &priv->force_reset[i];
2277 pos += scnprintf(buf + pos, bufsz - pos,
2278 "Force reset method %d\n", i);
2279 pos += scnprintf(buf + pos, bufsz - pos,
2280 "\tnumber of reset request: %d\n",
2281 force_reset->reset_request_count);
2282 pos += scnprintf(buf + pos, bufsz - pos,
2283 "\tnumber of reset request success: %d\n",
2284 force_reset->reset_success_count);
2285 pos += scnprintf(buf + pos, bufsz - pos,
2286 "\tnumber of reset request reject: %d\n",
2287 force_reset->reset_reject_count);
2288 pos += scnprintf(buf + pos, bufsz - pos,
2289 "\treset duration: %lu\n",
2290 force_reset->reset_duration);
2291 }
2292 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2079 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2293} 2080}
2294 2081
2295static ssize_t iwl_dbgfs_force_reset_write(struct file *file, 2082static ssize_t iwl_dbgfs_rf_reset_write(struct file *file,
2296 const char __user *user_buf, 2083 const char __user *user_buf,
2297 size_t count, loff_t *ppos) { 2084 size_t count, loff_t *ppos) {
2298 2085
2299 struct iwl_priv *priv = file->private_data; 2086 struct iwl_priv *priv = file->private_data;
2300 char buf[8]; 2087 int ret;
2301 int buf_size;
2302 int reset, ret;
2303 2088
2304 memset(buf, 0, sizeof(buf)); 2089 ret = iwl_force_rf_reset(priv, true);
2305 buf_size = min(count, sizeof(buf) - 1);
2306 if (copy_from_user(buf, user_buf, buf_size))
2307 return -EFAULT;
2308 if (sscanf(buf, "%d", &reset) != 1)
2309 return -EINVAL;
2310 switch (reset) {
2311 case IWL_RF_RESET:
2312 case IWL_FW_RESET:
2313 ret = iwl_force_reset(priv, reset, true);
2314 break;
2315 default:
2316 return -EINVAL;
2317 }
2318 return ret ? ret : count; 2090 return ret ? ret : count;
2319} 2091}
2320 2092
@@ -2342,29 +2114,6 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
2342 return count; 2114 return count;
2343} 2115}
2344 2116
2345static ssize_t iwl_dbgfs_wd_timeout_write(struct file *file,
2346 const char __user *user_buf,
2347 size_t count, loff_t *ppos)
2348{
2349 struct iwl_priv *priv = file->private_data;
2350 char buf[8];
2351 int buf_size;
2352 int timeout;
2353
2354 memset(buf, 0, sizeof(buf));
2355 buf_size = min(count, sizeof(buf) - 1);
2356 if (copy_from_user(buf, user_buf, buf_size))
2357 return -EFAULT;
2358 if (sscanf(buf, "%d", &timeout) != 1)
2359 return -EINVAL;
2360 if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
2361 timeout = IWL_DEF_WD_TIMEOUT;
2362
2363 hw_params(priv).wd_timeout = timeout;
2364 iwl_setup_watchdog(priv);
2365 return count;
2366}
2367
2368static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file, 2117static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file,
2369 char __user *user_buf, 2118 char __user *user_buf,
2370 size_t count, loff_t *ppos) { 2119 size_t count, loff_t *ppos) {
@@ -2420,10 +2169,10 @@ static ssize_t iwl_dbgfs_protection_mode_read(struct file *file,
2420 char buf[40]; 2169 char buf[40];
2421 const size_t bufsz = sizeof(buf); 2170 const size_t bufsz = sizeof(buf);
2422 2171
2423 if (cfg(priv)->ht_params) 2172 if (priv->cfg->ht_params)
2424 pos += scnprintf(buf + pos, bufsz - pos, 2173 pos += scnprintf(buf + pos, bufsz - pos,
2425 "use %s for aggregation\n", 2174 "use %s for aggregation\n",
2426 (hw_params(priv).use_rts_for_aggregation) ? 2175 (priv->hw_params.use_rts_for_aggregation) ?
2427 "rts/cts" : "cts-to-self"); 2176 "rts/cts" : "cts-to-self");
2428 else 2177 else
2429 pos += scnprintf(buf + pos, bufsz - pos, "N/A"); 2178 pos += scnprintf(buf + pos, bufsz - pos, "N/A");
@@ -2440,7 +2189,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
2440 int buf_size; 2189 int buf_size;
2441 int rts; 2190 int rts;
2442 2191
2443 if (!cfg(priv)->ht_params) 2192 if (!priv->cfg->ht_params)
2444 return -EINVAL; 2193 return -EINVAL;
2445 2194
2446 memset(buf, 0, sizeof(buf)); 2195 memset(buf, 0, sizeof(buf));
@@ -2450,12 +2199,29 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
2450 if (sscanf(buf, "%d", &rts) != 1) 2199 if (sscanf(buf, "%d", &rts) != 1)
2451 return -EINVAL; 2200 return -EINVAL;
2452 if (rts) 2201 if (rts)
2453 hw_params(priv).use_rts_for_aggregation = true; 2202 priv->hw_params.use_rts_for_aggregation = true;
2454 else 2203 else
2455 hw_params(priv).use_rts_for_aggregation = false; 2204 priv->hw_params.use_rts_for_aggregation = false;
2456 return count; 2205 return count;
2457} 2206}
2458 2207
2208static int iwl_cmd_echo_test(struct iwl_priv *priv)
2209{
2210 int ret;
2211 struct iwl_host_cmd cmd = {
2212 .id = REPLY_ECHO,
2213 .len = { 0 },
2214 .flags = CMD_SYNC,
2215 };
2216
2217 ret = iwl_dvm_send_cmd(priv, &cmd);
2218 if (ret)
2219 IWL_ERR(priv, "echo testing fail: 0X%x\n", ret);
2220 else
2221 IWL_DEBUG_INFO(priv, "echo testing pass\n");
2222 return ret;
2223}
2224
2459static ssize_t iwl_dbgfs_echo_test_write(struct file *file, 2225static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
2460 const char __user *user_buf, 2226 const char __user *user_buf,
2461 size_t count, loff_t *ppos) 2227 size_t count, loff_t *ppos)
@@ -2473,9 +2239,93 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
2473 return count; 2239 return count;
2474} 2240}
2475 2241
2476DEBUGFS_READ_FILE_OPS(rx_statistics); 2242static ssize_t iwl_dbgfs_log_event_read(struct file *file,
2477DEBUGFS_READ_FILE_OPS(tx_statistics); 2243 char __user *user_buf,
2478DEBUGFS_READ_WRITE_FILE_OPS(traffic_log); 2244 size_t count, loff_t *ppos)
2245{
2246 struct iwl_priv *priv = file->private_data;
2247 char *buf;
2248 int pos = 0;
2249 ssize_t ret = -ENOMEM;
2250
2251 ret = pos = iwl_dump_nic_event_log(priv, true, &buf, true);
2252 if (buf) {
2253 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2254 kfree(buf);
2255 }
2256 return ret;
2257}
2258
2259static ssize_t iwl_dbgfs_log_event_write(struct file *file,
2260 const char __user *user_buf,
2261 size_t count, loff_t *ppos)
2262{
2263 struct iwl_priv *priv = file->private_data;
2264 u32 event_log_flag;
2265 char buf[8];
2266 int buf_size;
2267
2268 memset(buf, 0, sizeof(buf));
2269 buf_size = min(count, sizeof(buf) - 1);
2270 if (copy_from_user(buf, user_buf, buf_size))
2271 return -EFAULT;
2272 if (sscanf(buf, "%d", &event_log_flag) != 1)
2273 return -EFAULT;
2274 if (event_log_flag == 1)
2275 iwl_dump_nic_event_log(priv, true, NULL, false);
2276
2277 return count;
2278}
2279
2280static ssize_t iwl_dbgfs_calib_disabled_read(struct file *file,
2281 char __user *user_buf,
2282 size_t count, loff_t *ppos)
2283{
2284 struct iwl_priv *priv = file->private_data;
2285 char buf[120];
2286 int pos = 0;
2287 const size_t bufsz = sizeof(buf);
2288
2289 pos += scnprintf(buf + pos, bufsz - pos,
2290 "Sensitivity calibrations %s\n",
2291 (priv->calib_disabled &
2292 IWL_SENSITIVITY_CALIB_DISABLED) ?
2293 "DISABLED" : "ENABLED");
2294 pos += scnprintf(buf + pos, bufsz - pos,
2295 "Chain noise calibrations %s\n",
2296 (priv->calib_disabled &
2297 IWL_CHAIN_NOISE_CALIB_DISABLED) ?
2298 "DISABLED" : "ENABLED");
2299 pos += scnprintf(buf + pos, bufsz - pos,
2300 "Tx power calibrations %s\n",
2301 (priv->calib_disabled &
2302 IWL_TX_POWER_CALIB_DISABLED) ?
2303 "DISABLED" : "ENABLED");
2304
2305 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2306}
2307
2308static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
2309 const char __user *user_buf,
2310 size_t count, loff_t *ppos)
2311{
2312 struct iwl_priv *priv = file->private_data;
2313 char buf[8];
2314 u32 calib_disabled;
2315 int buf_size;
2316
2317 memset(buf, 0, sizeof(buf));
2318 buf_size = min(count, sizeof(buf) - 1);
2319 if (copy_from_user(buf, user_buf, buf_size))
2320 return -EFAULT;
2321 if (sscanf(buf, "%x", &calib_disabled) != 1)
2322 return -EFAULT;
2323
2324 priv->calib_disabled = calib_disabled;
2325
2326 return count;
2327}
2328
2479DEBUGFS_READ_FILE_OPS(ucode_rx_stats); 2329DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
2480DEBUGFS_READ_FILE_OPS(ucode_tx_stats); 2330DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
2481DEBUGFS_READ_FILE_OPS(ucode_general_stats); 2331DEBUGFS_READ_FILE_OPS(ucode_general_stats);
@@ -2483,20 +2333,20 @@ DEBUGFS_READ_FILE_OPS(sensitivity);
2483DEBUGFS_READ_FILE_OPS(chain_noise); 2333DEBUGFS_READ_FILE_OPS(chain_noise);
2484DEBUGFS_READ_FILE_OPS(power_save_status); 2334DEBUGFS_READ_FILE_OPS(power_save_status);
2485DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics); 2335DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
2486DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
2487DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing); 2336DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
2488DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon); 2337DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
2489DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta); 2338DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
2490DEBUGFS_READ_WRITE_FILE_OPS(force_reset); 2339DEBUGFS_READ_WRITE_FILE_OPS(rf_reset);
2491DEBUGFS_READ_FILE_OPS(rxon_flags); 2340DEBUGFS_READ_FILE_OPS(rxon_flags);
2492DEBUGFS_READ_FILE_OPS(rxon_filter_flags); 2341DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
2493DEBUGFS_WRITE_FILE_OPS(txfifo_flush); 2342DEBUGFS_WRITE_FILE_OPS(txfifo_flush);
2494DEBUGFS_READ_FILE_OPS(ucode_bt_stats); 2343DEBUGFS_READ_FILE_OPS(ucode_bt_stats);
2495DEBUGFS_WRITE_FILE_OPS(wd_timeout);
2496DEBUGFS_READ_FILE_OPS(bt_traffic); 2344DEBUGFS_READ_FILE_OPS(bt_traffic);
2497DEBUGFS_READ_WRITE_FILE_OPS(protection_mode); 2345DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
2498DEBUGFS_READ_FILE_OPS(reply_tx_error); 2346DEBUGFS_READ_FILE_OPS(reply_tx_error);
2499DEBUGFS_WRITE_FILE_OPS(echo_test); 2347DEBUGFS_WRITE_FILE_OPS(echo_test);
2348DEBUGFS_READ_WRITE_FILE_OPS(log_event);
2349DEBUGFS_READ_WRITE_FILE_OPS(calib_disabled);
2500 2350
2501/* 2351/*
2502 * Create the debugfs files and directories 2352 * Create the debugfs files and directories
@@ -2537,15 +2387,11 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
2537 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR); 2387 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
2538 DEBUGFS_ADD_FILE(temperature, dir_data, S_IRUSR); 2388 DEBUGFS_ADD_FILE(temperature, dir_data, S_IRUSR);
2539 2389
2540 DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
2541 DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
2542 DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
2543 DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR); 2390 DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
2544 DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR); 2391 DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
2545 DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
2546 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR); 2392 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
2547 DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR); 2393 DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
2548 DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR); 2394 DEBUGFS_ADD_FILE(rf_reset, dir_debug, S_IWUSR | S_IRUSR);
2549 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR); 2395 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
2550 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR); 2396 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
2551 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR); 2397 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
@@ -2558,17 +2404,16 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
2558 DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR); 2404 DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR);
2559 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); 2405 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
2560 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); 2406 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
2561 DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
2562 DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR); 2407 DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR);
2408 DEBUGFS_ADD_FILE(log_event, dir_debug, S_IWUSR | S_IRUSR);
2409
2563 if (iwl_advanced_bt_coexist(priv)) 2410 if (iwl_advanced_bt_coexist(priv))
2564 DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR); 2411 DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
2565 2412
2566 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, 2413 /* Calibrations disabled/enabled status*/
2567 &priv->disable_sens_cal); 2414 DEBUGFS_ADD_FILE(calib_disabled, dir_rf, S_IWUSR | S_IRUSR);
2568 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
2569 &priv->disable_chain_noise_cal);
2570 2415
2571 if (iwl_trans_dbgfs_register(trans(priv), dir_debug)) 2416 if (iwl_trans_dbgfs_register(priv->trans, dir_debug))
2572 goto err; 2417 goto err;
2573 return 0; 2418 return 0;
2574 2419
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 16956b777f96..f70219c3b122 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -38,6 +38,7 @@
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/mutex.h> 39#include <linux/mutex.h>
40 40
41#include "iwl-fw.h"
41#include "iwl-eeprom.h" 42#include "iwl-eeprom.h"
42#include "iwl-csr.h" 43#include "iwl-csr.h"
43#include "iwl-debug.h" 44#include "iwl-debug.h"
@@ -47,12 +48,9 @@
47#include "iwl-agn-rs.h" 48#include "iwl-agn-rs.h"
48#include "iwl-agn-tt.h" 49#include "iwl-agn-tt.h"
49#include "iwl-trans.h" 50#include "iwl-trans.h"
50#include "iwl-shared.h"
51#include "iwl-op-mode.h" 51#include "iwl-op-mode.h"
52#include "iwl-notif-wait.h" 52#include "iwl-notif-wait.h"
53 53
54struct iwl_tx_queue;
55
56/* CT-KILL constants */ 54/* CT-KILL constants */
57#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */ 55#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
58#define CT_KILL_THRESHOLD 114 /* in Celsius */ 56#define CT_KILL_THRESHOLD 114 /* in Celsius */
@@ -196,6 +194,7 @@ struct iwl_qos_info {
196 * These states relate to a specific RA / TID. 194 * These states relate to a specific RA / TID.
197 * 195 *
198 * @IWL_AGG_OFF: aggregation is not used 196 * @IWL_AGG_OFF: aggregation is not used
197 * @IWL_AGG_STARTING: aggregation are starting (between start and oper)
199 * @IWL_AGG_ON: aggregation session is up 198 * @IWL_AGG_ON: aggregation session is up
200 * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the 199 * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
201 * HW queue to be empty from packets for this RA /TID. 200 * HW queue to be empty from packets for this RA /TID.
@@ -204,6 +203,7 @@ struct iwl_qos_info {
204 */ 203 */
205enum iwl_agg_state { 204enum iwl_agg_state {
206 IWL_AGG_OFF = 0, 205 IWL_AGG_OFF = 0,
206 IWL_AGG_STARTING,
207 IWL_AGG_ON, 207 IWL_AGG_ON,
208 IWL_EMPTYING_HW_QUEUE_ADDBA, 208 IWL_EMPTYING_HW_QUEUE_ADDBA,
209 IWL_EMPTYING_HW_QUEUE_DELBA, 209 IWL_EMPTYING_HW_QUEUE_DELBA,
@@ -220,8 +220,7 @@ enum iwl_agg_state {
220 * Tx response (REPLY_TX), and the block ack notification 220 * Tx response (REPLY_TX), and the block ack notification
221 * (REPLY_COMPRESSED_BA). 221 * (REPLY_COMPRESSED_BA).
222 * @state: state of the BA agreement establishment / tear down. 222 * @state: state of the BA agreement establishment / tear down.
223 * @txq_id: Tx queue used by the BA session - used by the transport layer. 223 * @txq_id: Tx queue used by the BA session
224 * Needed by the upper layer for debugfs only.
225 * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or 224 * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
226 * the first packet to be sent in legacy HW queue in Tx AGG stop flow. 225 * the first packet to be sent in legacy HW queue in Tx AGG stop flow.
227 * Basically when next_reclaimed reaches ssn, we can tell mac80211 that 226 * Basically when next_reclaimed reaches ssn, we can tell mac80211 that
@@ -507,44 +506,6 @@ struct reply_agg_tx_error_statistics {
507 u32 unknown; 506 u32 unknown;
508}; 507};
509 508
510/* management statistics */
511enum iwl_mgmt_stats {
512 MANAGEMENT_ASSOC_REQ = 0,
513 MANAGEMENT_ASSOC_RESP,
514 MANAGEMENT_REASSOC_REQ,
515 MANAGEMENT_REASSOC_RESP,
516 MANAGEMENT_PROBE_REQ,
517 MANAGEMENT_PROBE_RESP,
518 MANAGEMENT_BEACON,
519 MANAGEMENT_ATIM,
520 MANAGEMENT_DISASSOC,
521 MANAGEMENT_AUTH,
522 MANAGEMENT_DEAUTH,
523 MANAGEMENT_ACTION,
524 MANAGEMENT_MAX,
525};
526/* control statistics */
527enum iwl_ctrl_stats {
528 CONTROL_BACK_REQ = 0,
529 CONTROL_BACK,
530 CONTROL_PSPOLL,
531 CONTROL_RTS,
532 CONTROL_CTS,
533 CONTROL_ACK,
534 CONTROL_CFEND,
535 CONTROL_CFENDACK,
536 CONTROL_MAX,
537};
538
539struct traffic_stats {
540#ifdef CONFIG_IWLWIFI_DEBUGFS
541 u32 mgmt[MANAGEMENT_MAX];
542 u32 ctrl[CONTROL_MAX];
543 u32 data_cnt;
544 u64 data_bytes;
545#endif
546};
547
548/* 509/*
549 * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds 510 * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
550 * to perform continuous uCode event logging operation if enabled 511 * to perform continuous uCode event logging operation if enabled
@@ -571,24 +532,7 @@ struct iwl_event_log {
571 int wraps_more_count; 532 int wraps_more_count;
572}; 533};
573 534
574/*
575 * This is the threshold value of plcp error rate per 100mSecs. It is
576 * used to set and check for the validity of plcp_delta.
577 */
578#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (1)
579#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50)
580#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100)
581#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200)
582#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255)
583#define IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE (0)
584
585#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3) 535#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
586#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
587
588/* TX queue watchdog timeouts in mSecs */
589#define IWL_DEF_WD_TIMEOUT (2000)
590#define IWL_LONG_WD_TIMEOUT (10000)
591#define IWL_MAX_WD_TIMEOUT (120000)
592 536
593/* BT Antenna Coupling Threshold (dB) */ 537/* BT Antenna Coupling Threshold (dB) */
594#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35) 538#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35)
@@ -598,18 +542,18 @@ struct iwl_event_log {
598#define IWL_MAX_CONTINUE_RELOAD_CNT 4 542#define IWL_MAX_CONTINUE_RELOAD_CNT 4
599 543
600 544
601enum iwl_reset { 545struct iwl_rf_reset {
602 IWL_RF_RESET = 0,
603 IWL_FW_RESET,
604 IWL_MAX_FORCE_RESET,
605};
606
607struct iwl_force_reset {
608 int reset_request_count; 546 int reset_request_count;
609 int reset_success_count; 547 int reset_success_count;
610 int reset_reject_count; 548 int reset_reject_count;
611 unsigned long reset_duration; 549 unsigned long last_reset_jiffies;
612 unsigned long last_force_reset_jiffies; 550};
551
552enum iwl_rxon_context_id {
553 IWL_RXON_CTX_BSS,
554 IWL_RXON_CTX_PAN,
555
556 NUM_IWL_RXON_CTX
613}; 557};
614 558
615/* extend beacon time format bit shifting */ 559/* extend beacon time format bit shifting */
@@ -623,6 +567,10 @@ struct iwl_force_reset {
623struct iwl_rxon_context { 567struct iwl_rxon_context {
624 struct ieee80211_vif *vif; 568 struct ieee80211_vif *vif;
625 569
570 u8 mcast_queue;
571 u8 ac_to_queue[IEEE80211_NUM_ACS];
572 u8 ac_to_fifo[IEEE80211_NUM_ACS];
573
626 /* 574 /*
627 * We could use the vif to indicate active, but we 575 * We could use the vif to indicate active, but we
628 * also need it to be active during disabling when 576 * also need it to be active during disabling when
@@ -677,6 +625,52 @@ enum iwl_scan_type {
677 IWL_SCAN_ROC, 625 IWL_SCAN_ROC,
678}; 626};
679 627
628/**
629 * struct iwl_hw_params
630 *
631 * Holds the module parameters
632 *
633 * @tx_chains_num: Number of TX chains
634 * @rx_chains_num: Number of RX chains
635 * @valid_tx_ant: usable antennas for TX
636 * @valid_rx_ant: usable antennas for RX
637 * @ht40_channel: is 40MHz width possible: BIT(IEEE80211_BAND_XXX)
638 * @sku: sku read from EEPROM
639 * @ct_kill_threshold: temperature threshold - in hw dependent unit
640 * @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
641 * relevant for 1000, 6000 and up
642 * @struct iwl_sensitivity_ranges: range of sensitivity values
643 * @use_rts_for_aggregation: use rts/cts protection for HT traffic
644 */
645struct iwl_hw_params {
646 u8 tx_chains_num;
647 u8 rx_chains_num;
648 u8 valid_tx_ant;
649 u8 valid_rx_ant;
650 u8 ht40_channel;
651 bool use_rts_for_aggregation;
652 u16 sku;
653 u32 ct_kill_threshold;
654 u32 ct_kill_exit_threshold;
655
656 const struct iwl_sensitivity_ranges *sens;
657};
658
659struct iwl_lib_ops {
660 /* set hw dependent parameters */
661 void (*set_hw_params)(struct iwl_priv *priv);
662 int (*set_channel_switch)(struct iwl_priv *priv,
663 struct ieee80211_channel_switch *ch_switch);
664 /* device specific configuration */
665 void (*nic_config)(struct iwl_priv *priv);
666
667 /* eeprom operations (as defined in iwl-eeprom.h) */
668 struct iwl_eeprom_ops eeprom_ops;
669
670 /* temperature */
671 void (*temperature)(struct iwl_priv *priv);
672};
673
680#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE 674#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
681struct iwl_testmode_trace { 675struct iwl_testmode_trace {
682 u32 buff_size; 676 u32 buff_size;
@@ -701,6 +695,17 @@ struct iwl_wipan_noa_data {
701 u8 data[]; 695 u8 data[];
702}; 696};
703 697
698/* Calibration disabling bit mask */
699enum {
700 IWL_CALIB_ENABLE_ALL = 0,
701
702 IWL_SENSITIVITY_CALIB_DISABLED = BIT(0),
703 IWL_CHAIN_NOISE_CALIB_DISABLED = BIT(1),
704 IWL_TX_POWER_CALIB_DISABLED = BIT(2),
705
706 IWL_CALIB_DISABLE_ALL = 0xFFFFFFFF,
707};
708
704#define IWL_OP_MODE_GET_DVM(_iwl_op_mode) \ 709#define IWL_OP_MODE_GET_DVM(_iwl_op_mode) \
705 ((struct iwl_priv *) ((_iwl_op_mode)->op_mode_specific)) 710 ((struct iwl_priv *) ((_iwl_op_mode)->op_mode_specific))
706 711
@@ -710,9 +715,11 @@ struct iwl_wipan_noa_data {
710 715
711struct iwl_priv { 716struct iwl_priv {
712 717
713 /*data shared among all the driver's layers */ 718 struct iwl_trans *trans;
714 struct iwl_shared *shrd; 719 struct device *dev; /* for debug prints only */
720 const struct iwl_cfg *cfg;
715 const struct iwl_fw *fw; 721 const struct iwl_fw *fw;
722 const struct iwl_lib_ops *lib;
716 unsigned long status; 723 unsigned long status;
717 724
718 spinlock_t sta_lock; 725 spinlock_t sta_lock;
@@ -720,6 +727,11 @@ struct iwl_priv {
720 727
721 unsigned long transport_queue_stop; 728 unsigned long transport_queue_stop;
722 bool passive_no_rx; 729 bool passive_no_rx;
730#define IWL_INVALID_MAC80211_QUEUE 0xff
731 u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
732 atomic_t queue_stop_count[IWL_MAX_HW_QUEUES];
733
734 unsigned long agg_q_alloc[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
723 735
724 /* ieee device used by generic ieee processing code */ 736 /* ieee device used by generic ieee processing code */
725 struct ieee80211_hw *hw; 737 struct ieee80211_hw *hw;
@@ -730,7 +742,10 @@ struct iwl_priv {
730 742
731 struct workqueue_struct *workqueue; 743 struct workqueue_struct *workqueue;
732 744
745 struct iwl_hw_params hw_params;
746
733 enum ieee80211_band band; 747 enum ieee80211_band band;
748 u8 valid_contexts;
734 749
735 void (*pre_rx_handler)(struct iwl_priv *priv, 750 void (*pre_rx_handler)(struct iwl_priv *priv,
736 struct iwl_rx_cmd_buffer *rxb); 751 struct iwl_rx_cmd_buffer *rxb);
@@ -763,8 +778,8 @@ struct iwl_priv {
763 /*counters */ 778 /*counters */
764 u32 rx_handlers_stats[REPLY_MAX]; 779 u32 rx_handlers_stats[REPLY_MAX];
765 780
766 /* force reset */ 781 /* rf reset */
767 struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET]; 782 struct iwl_rf_reset rf_reset;
768 783
769 /* firmware reload counter and timestamp */ 784 /* firmware reload counter and timestamp */
770 unsigned long reload_jiffies; 785 unsigned long reload_jiffies;
@@ -810,8 +825,6 @@ struct iwl_priv {
810 825
811 __le16 switch_channel; 826 __le16 switch_channel;
812 827
813 u16 active_rate;
814
815 u8 start_calib; 828 u8 start_calib;
816 struct iwl_sensitivity_data sensitivity_data; 829 struct iwl_sensitivity_data sensitivity_data;
817 struct iwl_chain_noise_data chain_noise_data; 830 struct iwl_chain_noise_data chain_noise_data;
@@ -825,10 +838,6 @@ struct iwl_priv {
825 838
826 int activity_timer_active; 839 int activity_timer_active;
827 840
828 /* counts mgmt, ctl, and data packets */
829 struct traffic_stats tx_stats;
830 struct traffic_stats rx_stats;
831
832 struct iwl_power_mgr power_data; 841 struct iwl_power_mgr power_data;
833 struct iwl_tt_mgmt thermal_throttle; 842 struct iwl_tt_mgmt thermal_throttle;
834 843
@@ -948,23 +957,21 @@ struct iwl_priv {
948 957
949#ifdef CONFIG_IWLWIFI_DEBUGFS 958#ifdef CONFIG_IWLWIFI_DEBUGFS
950 /* debugfs */ 959 /* debugfs */
951 u16 tx_traffic_idx;
952 u16 rx_traffic_idx;
953 u8 *tx_traffic;
954 u8 *rx_traffic;
955 struct dentry *debugfs_dir; 960 struct dentry *debugfs_dir;
956 u32 dbgfs_sram_offset, dbgfs_sram_len; 961 u32 dbgfs_sram_offset, dbgfs_sram_len;
957 bool disable_ht40; 962 bool disable_ht40;
958 void *wowlan_sram; 963 void *wowlan_sram;
959#endif /* CONFIG_IWLWIFI_DEBUGFS */ 964#endif /* CONFIG_IWLWIFI_DEBUGFS */
960 965
966 /* eeprom -- this is in the card's little endian byte order */
967 u8 *eeprom;
968 enum iwl_nvm_type nvm_device_type;
969
961 struct work_struct txpower_work; 970 struct work_struct txpower_work;
962 u32 disable_sens_cal; 971 u32 calib_disabled;
963 u32 disable_chain_noise_cal;
964 struct work_struct run_time_calib_work; 972 struct work_struct run_time_calib_work;
965 struct timer_list statistics_periodic; 973 struct timer_list statistics_periodic;
966 struct timer_list ucode_trace; 974 struct timer_list ucode_trace;
967 struct timer_list watchdog;
968 975
969 struct iwl_event_log event_log; 976 struct iwl_event_log event_log;
970 977
@@ -982,10 +989,18 @@ struct iwl_priv {
982 __le64 replay_ctr; 989 __le64 replay_ctr;
983 __le16 last_seq_ctl; 990 __le16 last_seq_ctl;
984 bool have_rekey_data; 991 bool have_rekey_data;
992
993 /* device_pointers: pointers to ucode event tables */
994 struct {
995 u32 error_event_table;
996 u32 log_event_table;
997 } device_pointers;
998
999 /* indicator of loaded ucode image */
1000 enum iwl_ucode_type cur_ucode;
985}; /*iwl_priv */ 1001}; /*iwl_priv */
986 1002
987extern struct kmem_cache *iwl_tx_cmd_pool; 1003extern struct kmem_cache *iwl_tx_cmd_pool;
988extern struct iwl_mod_params iwlagn_mod_params;
989 1004
990static inline struct iwl_rxon_context * 1005static inline struct iwl_rxon_context *
991iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif) 1006iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
@@ -998,7 +1013,7 @@ iwl_rxon_ctx_from_vif(struct ieee80211_vif *vif)
998#define for_each_context(priv, ctx) \ 1013#define for_each_context(priv, ctx) \
999 for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \ 1014 for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
1000 ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \ 1015 ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
1001 if (priv->shrd->valid_contexts & BIT(ctx->ctxid)) 1016 if (priv->valid_contexts & BIT(ctx->ctxid))
1002 1017
1003static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx) 1018static inline int iwl_is_associated_ctx(struct iwl_rxon_context *ctx)
1004{ 1019{
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 6f312c77af5e..05302d6f989f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -67,9 +67,11 @@
67 67
68#include "iwl-drv.h" 68#include "iwl-drv.h"
69#include "iwl-trans.h" 69#include "iwl-trans.h"
70#include "iwl-shared.h"
71#include "iwl-op-mode.h" 70#include "iwl-op-mode.h"
72#include "iwl-agn-hw.h" 71#include "iwl-agn-hw.h"
72#include "iwl-fw.h"
73#include "iwl-config.h"
74#include "iwl-modparams.h"
73 75
74/* private includes */ 76/* private includes */
75#include "iwl-fw-file.h" 77#include "iwl-fw-file.h"
@@ -77,8 +79,10 @@
77/** 79/**
78 * struct iwl_drv - drv common data 80 * struct iwl_drv - drv common data
79 * @fw: the iwl_fw structure 81 * @fw: the iwl_fw structure
80 * @shrd: pointer to common shared structure
81 * @op_mode: the running op_mode 82 * @op_mode: the running op_mode
83 * @trans: transport layer
84 * @dev: for debug prints only
85 * @cfg: configuration struct
82 * @fw_index: firmware revision to try loading 86 * @fw_index: firmware revision to try loading
83 * @firmware_name: composite filename of ucode file to load 87 * @firmware_name: composite filename of ucode file to load
84 * @request_firmware_complete: the firmware has been obtained from user space 88 * @request_firmware_complete: the firmware has been obtained from user space
@@ -86,8 +90,10 @@
86struct iwl_drv { 90struct iwl_drv {
87 struct iwl_fw fw; 91 struct iwl_fw fw;
88 92
89 struct iwl_shared *shrd;
90 struct iwl_op_mode *op_mode; 93 struct iwl_op_mode *op_mode;
94 struct iwl_trans *trans;
95 struct device *dev;
96 const struct iwl_cfg *cfg;
91 97
92 int fw_index; /* firmware we're trying to load */ 98 int fw_index; /* firmware we're trying to load */
93 char firmware_name[25]; /* name of firmware file to load */ 99 char firmware_name[25]; /* name of firmware file to load */
@@ -110,7 +116,7 @@ struct fw_sec {
110static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc) 116static void iwl_free_fw_desc(struct iwl_drv *drv, struct fw_desc *desc)
111{ 117{
112 if (desc->v_addr) 118 if (desc->v_addr)
113 dma_free_coherent(trans(drv)->dev, desc->len, 119 dma_free_coherent(drv->trans->dev, desc->len,
114 desc->v_addr, desc->p_addr); 120 desc->v_addr, desc->p_addr);
115 desc->v_addr = NULL; 121 desc->v_addr = NULL;
116 desc->len = 0; 122 desc->len = 0;
@@ -138,7 +144,7 @@ static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
138 return -EINVAL; 144 return -EINVAL;
139 } 145 }
140 146
141 desc->v_addr = dma_alloc_coherent(trans(drv)->dev, sec->size, 147 desc->v_addr = dma_alloc_coherent(drv->trans->dev, sec->size,
142 &desc->p_addr, GFP_KERNEL); 148 &desc->p_addr, GFP_KERNEL);
143 if (!desc->v_addr) 149 if (!desc->v_addr)
144 return -ENOMEM; 150 return -ENOMEM;
@@ -156,8 +162,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
156 162
157static int iwl_request_firmware(struct iwl_drv *drv, bool first) 163static int iwl_request_firmware(struct iwl_drv *drv, bool first)
158{ 164{
159 const struct iwl_cfg *cfg = cfg(drv); 165 const char *name_pre = drv->cfg->fw_name_pre;
160 const char *name_pre = cfg->fw_name_pre;
161 char tag[8]; 166 char tag[8];
162 167
163 if (first) { 168 if (first) {
@@ -166,14 +171,14 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
166 strcpy(tag, UCODE_EXPERIMENTAL_TAG); 171 strcpy(tag, UCODE_EXPERIMENTAL_TAG);
167 } else if (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) { 172 } else if (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) {
168#endif 173#endif
169 drv->fw_index = cfg->ucode_api_max; 174 drv->fw_index = drv->cfg->ucode_api_max;
170 sprintf(tag, "%d", drv->fw_index); 175 sprintf(tag, "%d", drv->fw_index);
171 } else { 176 } else {
172 drv->fw_index--; 177 drv->fw_index--;
173 sprintf(tag, "%d", drv->fw_index); 178 sprintf(tag, "%d", drv->fw_index);
174 } 179 }
175 180
176 if (drv->fw_index < cfg->ucode_api_min) { 181 if (drv->fw_index < drv->cfg->ucode_api_min) {
177 IWL_ERR(drv, "no suitable firmware found!\n"); 182 IWL_ERR(drv, "no suitable firmware found!\n");
178 return -ENOENT; 183 return -ENOENT;
179 } 184 }
@@ -186,7 +191,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
186 drv->firmware_name); 191 drv->firmware_name);
187 192
188 return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name, 193 return request_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
189 trans(drv)->dev, 194 drv->trans->dev,
190 GFP_KERNEL, drv, iwl_ucode_callback); 195 GFP_KERNEL, drv, iwl_ucode_callback);
191} 196}
192 197
@@ -284,6 +289,7 @@ static int iwl_store_ucode_sec(struct iwl_firmware_pieces *pieces,
284 289
285 sec->offset = le32_to_cpu(sec_parse->offset); 290 sec->offset = le32_to_cpu(sec_parse->offset);
286 sec->data = sec_parse->data; 291 sec->data = sec_parse->data;
292 sec->size = size - sizeof(sec_parse->offset);
287 293
288 ++img->sec_counter; 294 ++img->sec_counter;
289 295
@@ -414,9 +420,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
414 struct iwl_ucode_tlv *tlv; 420 struct iwl_ucode_tlv *tlv;
415 size_t len = ucode_raw->size; 421 size_t len = ucode_raw->size;
416 const u8 *data; 422 const u8 *data;
417 int wanted_alternative = iwlagn_mod_params.wanted_ucode_alternative;
418 int tmp;
419 u64 alternatives;
420 u32 tlv_len; 423 u32 tlv_len;
421 enum iwl_ucode_tlv_type tlv_type; 424 enum iwl_ucode_tlv_type tlv_type;
422 const u8 *tlv_data; 425 const u8 *tlv_data;
@@ -434,23 +437,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
434 return -EINVAL; 437 return -EINVAL;
435 } 438 }
436 439
437 /*
438 * Check which alternatives are present, and "downgrade"
439 * when the chosen alternative is not present, warning
440 * the user when that happens. Some files may not have
441 * any alternatives, so don't warn in that case.
442 */
443 alternatives = le64_to_cpu(ucode->alternatives);
444 tmp = wanted_alternative;
445 if (wanted_alternative > 63)
446 wanted_alternative = 63;
447 while (wanted_alternative && !(alternatives & BIT(wanted_alternative)))
448 wanted_alternative--;
449 if (wanted_alternative && wanted_alternative != tmp)
450 IWL_WARN(drv,
451 "uCode alternative %d not available, choosing %d\n",
452 tmp, wanted_alternative);
453
454 drv->fw.ucode_ver = le32_to_cpu(ucode->ver); 440 drv->fw.ucode_ver = le32_to_cpu(ucode->ver);
455 build = le32_to_cpu(ucode->build); 441 build = le32_to_cpu(ucode->build);
456 442
@@ -475,14 +461,11 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
475 len -= sizeof(*ucode); 461 len -= sizeof(*ucode);
476 462
477 while (len >= sizeof(*tlv)) { 463 while (len >= sizeof(*tlv)) {
478 u16 tlv_alt;
479
480 len -= sizeof(*tlv); 464 len -= sizeof(*tlv);
481 tlv = (void *)data; 465 tlv = (void *)data;
482 466
483 tlv_len = le32_to_cpu(tlv->length); 467 tlv_len = le32_to_cpu(tlv->length);
484 tlv_type = le16_to_cpu(tlv->type); 468 tlv_type = le32_to_cpu(tlv->type);
485 tlv_alt = le16_to_cpu(tlv->alternative);
486 tlv_data = tlv->data; 469 tlv_data = tlv->data;
487 470
488 if (len < tlv_len) { 471 if (len < tlv_len) {
@@ -493,14 +476,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
493 len -= ALIGN(tlv_len, 4); 476 len -= ALIGN(tlv_len, 4);
494 data += sizeof(*tlv) + ALIGN(tlv_len, 4); 477 data += sizeof(*tlv) + ALIGN(tlv_len, 4);
495 478
496 /*
497 * Alternative 0 is always valid.
498 *
499 * Skip alternative TLVs that are not selected.
500 */
501 if (tlv_alt != 0 && tlv_alt != wanted_alternative)
502 continue;
503
504 switch (tlv_type) { 479 switch (tlv_type) {
505 case IWL_UCODE_TLV_INST: 480 case IWL_UCODE_TLV_INST:
506 set_sec_data(pieces, IWL_UCODE_REGULAR, 481 set_sec_data(pieces, IWL_UCODE_REGULAR,
@@ -755,14 +730,13 @@ static int validate_sec_sizes(struct iwl_drv *drv,
755static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) 730static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
756{ 731{
757 struct iwl_drv *drv = context; 732 struct iwl_drv *drv = context;
758 const struct iwl_cfg *cfg = cfg(drv);
759 struct iwl_fw *fw = &drv->fw; 733 struct iwl_fw *fw = &drv->fw;
760 struct iwl_ucode_header *ucode; 734 struct iwl_ucode_header *ucode;
761 int err; 735 int err;
762 struct iwl_firmware_pieces pieces; 736 struct iwl_firmware_pieces pieces;
763 const unsigned int api_max = cfg->ucode_api_max; 737 const unsigned int api_max = drv->cfg->ucode_api_max;
764 unsigned int api_ok = cfg->ucode_api_ok; 738 unsigned int api_ok = drv->cfg->ucode_api_ok;
765 const unsigned int api_min = cfg->ucode_api_min; 739 const unsigned int api_min = drv->cfg->ucode_api_min;
766 u32 api_ver; 740 u32 api_ver;
767 int i; 741 int i;
768 742
@@ -838,46 +812,10 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
838 IWL_INFO(drv, "loaded firmware version %s", drv->fw.fw_version); 812 IWL_INFO(drv, "loaded firmware version %s", drv->fw.fw_version);
839 813
840 /* 814 /*
841 * For any of the failures below (before allocating pci memory)
842 * we will try to load a version with a smaller API -- maybe the
843 * user just got a corrupted version of the latest API.
844 */
845
846 IWL_DEBUG_INFO(drv, "f/w package hdr ucode version raw = 0x%x\n",
847 drv->fw.ucode_ver);
848 IWL_DEBUG_INFO(drv, "f/w package hdr runtime inst size = %Zd\n",
849 get_sec_size(&pieces, IWL_UCODE_REGULAR,
850 IWL_UCODE_SECTION_INST));
851 IWL_DEBUG_INFO(drv, "f/w package hdr runtime data size = %Zd\n",
852 get_sec_size(&pieces, IWL_UCODE_REGULAR,
853 IWL_UCODE_SECTION_DATA));
854 IWL_DEBUG_INFO(drv, "f/w package hdr init inst size = %Zd\n",
855 get_sec_size(&pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_INST));
856 IWL_DEBUG_INFO(drv, "f/w package hdr init data size = %Zd\n",
857 get_sec_size(&pieces, IWL_UCODE_INIT, IWL_UCODE_SECTION_DATA));
858
859 /* Verify that uCode images will fit in card's SRAM */
860 if (get_sec_size(&pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_INST) >
861 cfg->max_inst_size) {
862 IWL_ERR(drv, "uCode instr len %Zd too large to fit in\n",
863 get_sec_size(&pieces, IWL_UCODE_REGULAR,
864 IWL_UCODE_SECTION_INST));
865 goto try_again;
866 }
867
868 if (get_sec_size(&pieces, IWL_UCODE_REGULAR, IWL_UCODE_SECTION_DATA) >
869 cfg->max_data_size) {
870 IWL_ERR(drv, "uCode data len %Zd too large to fit in\n",
871 get_sec_size(&pieces, IWL_UCODE_REGULAR,
872 IWL_UCODE_SECTION_DATA));
873 goto try_again;
874 }
875
876 /*
877 * In mvm uCode there is no difference between data and instructions 815 * In mvm uCode there is no difference between data and instructions
878 * sections. 816 * sections.
879 */ 817 */
880 if (!fw->mvm_fw && validate_sec_sizes(drv, &pieces, cfg)) 818 if (!fw->mvm_fw && validate_sec_sizes(drv, &pieces, drv->cfg))
881 goto try_again; 819 goto try_again;
882 820
883 /* Allocate ucode buffers for card's bus-master loading ... */ 821 /* Allocate ucode buffers for card's bus-master loading ... */
@@ -901,14 +839,14 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
901 fw->init_evtlog_size = (pieces.init_evtlog_size - 16)/12; 839 fw->init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
902 else 840 else
903 fw->init_evtlog_size = 841 fw->init_evtlog_size =
904 cfg->base_params->max_event_log_size; 842 drv->cfg->base_params->max_event_log_size;
905 fw->init_errlog_ptr = pieces.init_errlog_ptr; 843 fw->init_errlog_ptr = pieces.init_errlog_ptr;
906 fw->inst_evtlog_ptr = pieces.inst_evtlog_ptr; 844 fw->inst_evtlog_ptr = pieces.inst_evtlog_ptr;
907 if (pieces.inst_evtlog_size) 845 if (pieces.inst_evtlog_size)
908 fw->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12; 846 fw->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
909 else 847 else
910 fw->inst_evtlog_size = 848 fw->inst_evtlog_size =
911 cfg->base_params->max_event_log_size; 849 drv->cfg->base_params->max_event_log_size;
912 fw->inst_errlog_ptr = pieces.inst_errlog_ptr; 850 fw->inst_errlog_ptr = pieces.inst_errlog_ptr;
913 851
914 /* 852 /*
@@ -924,7 +862,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
924 release_firmware(ucode_raw); 862 release_firmware(ucode_raw);
925 complete(&drv->request_firmware_complete); 863 complete(&drv->request_firmware_complete);
926 864
927 drv->op_mode = iwl_dvm_ops.start(drv->shrd->trans, &drv->fw); 865 drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw);
928 866
929 if (!drv->op_mode) 867 if (!drv->op_mode)
930 goto out_unbind; 868 goto out_unbind;
@@ -944,24 +882,23 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
944 release_firmware(ucode_raw); 882 release_firmware(ucode_raw);
945 out_unbind: 883 out_unbind:
946 complete(&drv->request_firmware_complete); 884 complete(&drv->request_firmware_complete);
947 device_release_driver(trans(drv)->dev); 885 device_release_driver(drv->trans->dev);
948} 886}
949 887
950int iwl_drv_start(struct iwl_shared *shrd, 888struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
951 struct iwl_trans *trans, const struct iwl_cfg *cfg) 889 const struct iwl_cfg *cfg)
952{ 890{
953 struct iwl_drv *drv; 891 struct iwl_drv *drv;
954 int ret; 892 int ret;
955 893
956 shrd->cfg = cfg;
957
958 drv = kzalloc(sizeof(*drv), GFP_KERNEL); 894 drv = kzalloc(sizeof(*drv), GFP_KERNEL);
959 if (!drv) { 895 if (!drv) {
960 dev_printk(KERN_ERR, trans->dev, "Couldn't allocate iwl_drv"); 896 dev_printk(KERN_ERR, trans->dev, "Couldn't allocate iwl_drv");
961 return -ENOMEM; 897 return NULL;
962 } 898 }
963 drv->shrd = shrd; 899 drv->trans = trans;
964 shrd->drv = drv; 900 drv->dev = trans->dev;
901 drv->cfg = cfg;
965 902
966 init_completion(&drv->request_firmware_complete); 903 init_completion(&drv->request_firmware_complete);
967 904
@@ -970,16 +907,14 @@ int iwl_drv_start(struct iwl_shared *shrd,
970 if (ret) { 907 if (ret) {
971 dev_printk(KERN_ERR, trans->dev, "Couldn't request the fw"); 908 dev_printk(KERN_ERR, trans->dev, "Couldn't request the fw");
972 kfree(drv); 909 kfree(drv);
973 shrd->drv = NULL; 910 drv = NULL;
974 } 911 }
975 912
976 return ret; 913 return drv;
977} 914}
978 915
979void iwl_drv_stop(struct iwl_shared *shrd) 916void iwl_drv_stop(struct iwl_drv *drv)
980{ 917{
981 struct iwl_drv *drv = shrd->drv;
982
983 wait_for_completion(&drv->request_firmware_complete); 918 wait_for_completion(&drv->request_firmware_complete);
984 919
985 /* op_mode can be NULL if its start failed */ 920 /* op_mode can be NULL if its start failed */
@@ -989,5 +924,91 @@ void iwl_drv_stop(struct iwl_shared *shrd)
989 iwl_dealloc_ucode(drv); 924 iwl_dealloc_ucode(drv);
990 925
991 kfree(drv); 926 kfree(drv);
992 shrd->drv = NULL;
993} 927}
928
929
930/* shared module parameters */
931struct iwl_mod_params iwlwifi_mod_params = {
932 .amsdu_size_8K = 1,
933 .restart_fw = 1,
934 .plcp_check = true,
935 .bt_coex_active = true,
936 .power_level = IWL_POWER_INDEX_1,
937 .bt_ch_announce = true,
938 .auto_agg = true,
939 /* the rest are 0 by default */
940};
941
942#ifdef CONFIG_IWLWIFI_DEBUG
943module_param_named(debug, iwlwifi_mod_params.debug_level, uint,
944 S_IRUGO | S_IWUSR);
945MODULE_PARM_DESC(debug, "debug output mask");
946#endif
947
948module_param_named(swcrypto, iwlwifi_mod_params.sw_crypto, int, S_IRUGO);
949MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
950module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO);
951MODULE_PARM_DESC(11n_disable,
952 "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
953module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
954 int, S_IRUGO);
955MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
956module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, int, S_IRUGO);
957MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
958
959module_param_named(antenna_coupling, iwlwifi_mod_params.ant_coupling,
960 int, S_IRUGO);
961MODULE_PARM_DESC(antenna_coupling,
962 "specify antenna coupling in dB (defualt: 0 dB)");
963
964module_param_named(bt_ch_inhibition, iwlwifi_mod_params.bt_ch_announce,
965 bool, S_IRUGO);
966MODULE_PARM_DESC(bt_ch_inhibition,
967 "Enable BT channel inhibition (default: enable)");
968
969module_param_named(plcp_check, iwlwifi_mod_params.plcp_check, bool, S_IRUGO);
970MODULE_PARM_DESC(plcp_check, "Check plcp health (default: 1 [enabled])");
971
972module_param_named(wd_disable, iwlwifi_mod_params.wd_disable, int, S_IRUGO);
973MODULE_PARM_DESC(wd_disable,
974 "Disable stuck queue watchdog timer 0=system default, "
975 "1=disable, 2=enable (default: 0)");
976
977/*
978 * set bt_coex_active to true, uCode will do kill/defer
979 * every time the priority line is asserted (BT is sending signals on the
980 * priority line in the PCIx).
981 * set bt_coex_active to false, uCode will ignore the BT activity and
982 * perform the normal operation
983 *
984 * User might experience transmit issue on some platform due to WiFi/BT
985 * co-exist problem. The possible behaviors are:
986 * Able to scan and finding all the available AP
987 * Not able to associate with any AP
988 * On those platforms, WiFi communication can be restored by set
989 * "bt_coex_active" module parameter to "false"
990 *
991 * default: bt_coex_active = true (BT_COEX_ENABLE)
992 */
993module_param_named(bt_coex_active, iwlwifi_mod_params.bt_coex_active,
994 bool, S_IRUGO);
995MODULE_PARM_DESC(bt_coex_active, "enable wifi/bt co-exist (default: enable)");
996
997module_param_named(led_mode, iwlwifi_mod_params.led_mode, int, S_IRUGO);
998MODULE_PARM_DESC(led_mode, "0=system default, "
999 "1=On(RF On)/Off(RF Off), 2=blinking, 3=Off (default: 0)");
1000
1001module_param_named(power_save, iwlwifi_mod_params.power_save,
1002 bool, S_IRUGO);
1003MODULE_PARM_DESC(power_save,
1004 "enable WiFi power management (default: disable)");
1005
1006module_param_named(power_level, iwlwifi_mod_params.power_level,
1007 int, S_IRUGO);
1008MODULE_PARM_DESC(power_level,
1009 "default power save level (range from 1 - 5, default: 1)");
1010
1011module_param_named(auto_agg, iwlwifi_mod_params.auto_agg,
1012 bool, S_IRUGO);
1013MODULE_PARM_DESC(auto_agg,
1014 "enable agg w/o check traffic load (default: enable)");
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index 3b771c1d9096..2cbf137b25bf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -63,7 +63,12 @@
63#ifndef __iwl_drv_h__ 63#ifndef __iwl_drv_h__
64#define __iwl_drv_h__ 64#define __iwl_drv_h__
65 65
66#include "iwl-shared.h" 66/* for all modules */
67#define DRV_NAME "iwlwifi"
68#define IWLWIFI_VERSION "in-tree:"
69#define DRV_COPYRIGHT "Copyright(c) 2003-2012 Intel Corporation"
70#define DRV_AUTHOR "<ilw@linux.intel.com>"
71
67 72
68/** 73/**
69 * DOC: Driver system flows - drv component 74 * DOC: Driver system flows - drv component
@@ -90,34 +95,32 @@
90 * 8) iwl_ucode_callback starts the wifi implementation to matches the fw 95 * 8) iwl_ucode_callback starts the wifi implementation to matches the fw
91 */ 96 */
92 97
98struct iwl_drv;
99struct iwl_trans;
100struct iwl_cfg;
93/** 101/**
94 * iwl_drv_start - start the drv 102 * iwl_drv_start - start the drv
95 * 103 *
96 * @shrd: the shrd area
97 * @trans_ops: the ops of the transport 104 * @trans_ops: the ops of the transport
98 * @cfg: device specific constants / virtual functions 105 * @cfg: device specific constants / virtual functions
99 * 106 *
100 * TODO: review the parameters given to this function
101 *
102 * starts the driver: fetches the firmware. This should be called by bus 107 * starts the driver: fetches the firmware. This should be called by bus
103 * specific system flows implementations. For example, the bus specific probe 108 * specific system flows implementations. For example, the bus specific probe
104 * function should do bus related operations only, and then call to this 109 * function should do bus related operations only, and then call to this
105 * function. 110 * function. It returns the driver object or %NULL if an error occured.
106 */ 111 */
107int iwl_drv_start(struct iwl_shared *shrd, 112struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
108 struct iwl_trans *trans, const struct iwl_cfg *cfg); 113 const struct iwl_cfg *cfg);
109 114
110/** 115/**
111 * iwl_drv_stop - stop the drv 116 * iwl_drv_stop - stop the drv
112 * 117 *
113 * @shrd: the shrd area 118 * @drv:
114 *
115 * TODO: review the parameters given to this function
116 * 119 *
117 * Stop the driver. This should be called by bus specific system flows 120 * Stop the driver. This should be called by bus specific system flows
118 * implementations. For example, the bus specific remove function should first 121 * implementations. For example, the bus specific remove function should first
119 * call this function and then do the bus related operations only. 122 * call this function and then do the bus related operations only.
120 */ 123 */
121void iwl_drv_stop(struct iwl_shared *shrd); 124void iwl_drv_stop(struct iwl_drv *drv);
122 125
123#endif /* __iwl_drv_h__ */ 126#endif /* __iwl_drv_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 23cea42b9495..50c58911e718 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -68,9 +68,7 @@
68 68
69#include <net/mac80211.h> 69#include <net/mac80211.h>
70 70
71#include "iwl-commands.h"
72#include "iwl-dev.h" 71#include "iwl-dev.h"
73#include "iwl-core.h"
74#include "iwl-debug.h" 72#include "iwl-debug.h"
75#include "iwl-agn.h" 73#include "iwl-agn.h"
76#include "iwl-eeprom.h" 74#include "iwl-eeprom.h"
@@ -187,33 +185,33 @@ static void iwl_eeprom_release_semaphore(struct iwl_trans *trans)
187 185
188} 186}
189 187
190static int iwl_eeprom_verify_signature(struct iwl_trans *trans) 188static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
191{ 189{
192 u32 gp = iwl_read32(trans, CSR_EEPROM_GP) & 190 u32 gp = iwl_read32(priv->trans, CSR_EEPROM_GP) &
193 CSR_EEPROM_GP_VALID_MSK; 191 CSR_EEPROM_GP_VALID_MSK;
194 int ret = 0; 192 int ret = 0;
195 193
196 IWL_DEBUG_EEPROM(trans, "EEPROM signature=0x%08x\n", gp); 194 IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
197 switch (gp) { 195 switch (gp) {
198 case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP: 196 case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
199 if (trans->nvm_device_type != NVM_DEVICE_TYPE_OTP) { 197 if (priv->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
200 IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n", 198 IWL_ERR(priv, "EEPROM with bad signature: 0x%08x\n",
201 gp); 199 gp);
202 ret = -ENOENT; 200 ret = -ENOENT;
203 } 201 }
204 break; 202 break;
205 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: 203 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
206 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: 204 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
207 if (trans->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) { 205 if (priv->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) {
208 IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp); 206 IWL_ERR(priv, "OTP with bad signature: 0x%08x\n", gp);
209 ret = -ENOENT; 207 ret = -ENOENT;
210 } 208 }
211 break; 209 break;
212 case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP: 210 case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
213 default: 211 default:
214 IWL_ERR(trans, "bad EEPROM/OTP signature, type=%s, " 212 IWL_ERR(priv, "bad EEPROM/OTP signature, type=%s, "
215 "EEPROM_GP=0x%08x\n", 213 "EEPROM_GP=0x%08x\n",
216 (trans->nvm_device_type == NVM_DEVICE_TYPE_OTP) 214 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
217 ? "OTP" : "EEPROM", gp); 215 ? "OTP" : "EEPROM", gp);
218 ret = -ENOENT; 216 ret = -ENOENT;
219 break; 217 break;
@@ -221,11 +219,11 @@ static int iwl_eeprom_verify_signature(struct iwl_trans *trans)
221 return ret; 219 return ret;
222} 220}
223 221
224u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset) 222u16 iwl_eeprom_query16(struct iwl_priv *priv, size_t offset)
225{ 223{
226 if (!shrd->eeprom) 224 if (!priv->eeprom)
227 return 0; 225 return 0;
228 return (u16)shrd->eeprom[offset] | ((u16)shrd->eeprom[offset + 1] << 8); 226 return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
229} 227}
230 228
231int iwl_eeprom_check_version(struct iwl_priv *priv) 229int iwl_eeprom_check_version(struct iwl_priv *priv)
@@ -233,11 +231,11 @@ int iwl_eeprom_check_version(struct iwl_priv *priv)
233 u16 eeprom_ver; 231 u16 eeprom_ver;
234 u16 calib_ver; 232 u16 calib_ver;
235 233
236 eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION); 234 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
237 calib_ver = iwl_eeprom_calib_version(priv->shrd); 235 calib_ver = iwl_eeprom_calib_version(priv);
238 236
239 if (eeprom_ver < cfg(priv)->eeprom_ver || 237 if (eeprom_ver < priv->cfg->eeprom_ver ||
240 calib_ver < cfg(priv)->eeprom_calib_ver) 238 calib_ver < priv->cfg->eeprom_calib_ver)
241 goto err; 239 goto err;
242 240
243 IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n", 241 IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
@@ -247,58 +245,115 @@ int iwl_eeprom_check_version(struct iwl_priv *priv)
247err: 245err:
248 IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x " 246 IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
249 "CALIB=0x%x < 0x%x\n", 247 "CALIB=0x%x < 0x%x\n",
250 eeprom_ver, cfg(priv)->eeprom_ver, 248 eeprom_ver, priv->cfg->eeprom_ver,
251 calib_ver, cfg(priv)->eeprom_calib_ver); 249 calib_ver, priv->cfg->eeprom_calib_ver);
252 return -EINVAL; 250 return -EINVAL;
253 251
254} 252}
255 253
256int iwl_eeprom_init_hw_params(struct iwl_priv *priv) 254int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
257{ 255{
258 struct iwl_shared *shrd = priv->shrd;
259 u16 radio_cfg; 256 u16 radio_cfg;
260 257
261 hw_params(priv).sku = iwl_eeprom_query16(shrd, EEPROM_SKU_CAP); 258 priv->hw_params.sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
262 if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE && 259 if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE &&
263 !cfg(priv)->ht_params) { 260 !priv->cfg->ht_params) {
264 IWL_ERR(priv, "Invalid 11n configuration\n"); 261 IWL_ERR(priv, "Invalid 11n configuration\n");
265 return -EINVAL; 262 return -EINVAL;
266 } 263 }
267 264
268 if (!hw_params(priv).sku) { 265 if (!priv->hw_params.sku) {
269 IWL_ERR(priv, "Invalid device sku\n"); 266 IWL_ERR(priv, "Invalid device sku\n");
270 return -EINVAL; 267 return -EINVAL;
271 } 268 }
272 269
273 IWL_INFO(priv, "Device SKU: 0x%X\n", hw_params(priv).sku); 270 IWL_INFO(priv, "Device SKU: 0x%X\n", priv->hw_params.sku);
274 271
275 radio_cfg = iwl_eeprom_query16(shrd, EEPROM_RADIO_CONFIG); 272 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
276 273
277 hw_params(priv).valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg); 274 priv->hw_params.valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
278 hw_params(priv).valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg); 275 priv->hw_params.valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
279 276
280 /* check overrides (some devices have wrong EEPROM) */ 277 /* check overrides (some devices have wrong EEPROM) */
281 if (cfg(priv)->valid_tx_ant) 278 if (priv->cfg->valid_tx_ant)
282 hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant; 279 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
283 if (cfg(priv)->valid_rx_ant) 280 if (priv->cfg->valid_rx_ant)
284 hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant; 281 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
285 282
286 if (!hw_params(priv).valid_tx_ant || !hw_params(priv).valid_rx_ant) { 283 if (!priv->hw_params.valid_tx_ant || !priv->hw_params.valid_rx_ant) {
287 IWL_ERR(priv, "Invalid chain (0x%X, 0x%X)\n", 284 IWL_ERR(priv, "Invalid chain (0x%X, 0x%X)\n",
288 hw_params(priv).valid_tx_ant, 285 priv->hw_params.valid_tx_ant,
289 hw_params(priv).valid_rx_ant); 286 priv->hw_params.valid_rx_ant);
290 return -EINVAL; 287 return -EINVAL;
291 } 288 }
292 289
293 IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n", 290 IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
294 hw_params(priv).valid_tx_ant, hw_params(priv).valid_rx_ant); 291 priv->hw_params.valid_tx_ant, priv->hw_params.valid_rx_ant);
295 292
296 return 0; 293 return 0;
297} 294}
298 295
299void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac) 296u16 iwl_eeprom_calib_version(struct iwl_priv *priv)
300{ 297{
301 const u8 *addr = iwl_eeprom_query_addr(shrd, 298 struct iwl_eeprom_calib_hdr *hdr;
299
300 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
301 EEPROM_CALIB_ALL);
302 return hdr->version;
303}
304
305static u32 eeprom_indirect_address(struct iwl_priv *priv, u32 address)
306{
307 u16 offset = 0;
308
309 if ((address & INDIRECT_ADDRESS) == 0)
310 return address;
311
312 switch (address & INDIRECT_TYPE_MSK) {
313 case INDIRECT_HOST:
314 offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
315 break;
316 case INDIRECT_GENERAL:
317 offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
318 break;
319 case INDIRECT_REGULATORY:
320 offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
321 break;
322 case INDIRECT_TXP_LIMIT:
323 offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT);
324 break;
325 case INDIRECT_TXP_LIMIT_SIZE:
326 offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE);
327 break;
328 case INDIRECT_CALIBRATION:
329 offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
330 break;
331 case INDIRECT_PROCESS_ADJST:
332 offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
333 break;
334 case INDIRECT_OTHERS:
335 offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
336 break;
337 default:
338 IWL_ERR(priv, "illegal indirect type: 0x%X\n",
339 address & INDIRECT_TYPE_MSK);
340 break;
341 }
342
343 /* translate the offset from words to byte */
344 return (address & ADDRESS_MSK) + (offset << 1);
345}
346
347const u8 *iwl_eeprom_query_addr(struct iwl_priv *priv, size_t offset)
348{
349 u32 address = eeprom_indirect_address(priv, offset);
350 BUG_ON(address >= priv->cfg->base_params->eeprom_size);
351 return &priv->eeprom[address];
352}
353
354void iwl_eeprom_get_mac(struct iwl_priv *priv, u8 *mac)
355{
356 const u8 *addr = iwl_eeprom_query_addr(priv,
302 EEPROM_MAC_ADDRESS); 357 EEPROM_MAC_ADDRESS);
303 memcpy(mac, addr, ETH_ALEN); 358 memcpy(mac, addr, ETH_ALEN);
304} 359}
@@ -376,7 +431,7 @@ static int iwl_init_otp_access(struct iwl_trans *trans)
376 * CSR auto clock gate disable bit - 431 * CSR auto clock gate disable bit -
377 * this is only applicable for HW with OTP shadow RAM 432 * this is only applicable for HW with OTP shadow RAM
378 */ 433 */
379 if (cfg(trans)->base_params->shadow_ram_support) 434 if (trans->cfg->base_params->shadow_ram_support)
380 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, 435 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
381 CSR_RESET_LINK_PWR_MGMT_DISABLED); 436 CSR_RESET_LINK_PWR_MGMT_DISABLED);
382 } 437 }
@@ -497,7 +552,7 @@ static int iwl_find_otp_image(struct iwl_trans *trans,
497 } 552 }
498 /* more in the link list, continue */ 553 /* more in the link list, continue */
499 usedblocks++; 554 usedblocks++;
500 } while (usedblocks <= cfg(trans)->base_params->max_ll_items); 555 } while (usedblocks <= trans->cfg->base_params->max_ll_items);
501 556
502 /* OTP has no valid blocks */ 557 /* OTP has no valid blocks */
503 IWL_DEBUG_EEPROM(trans, "OTP has no valid blocks\n"); 558 IWL_DEBUG_EEPROM(trans, "OTP has no valid blocks\n");
@@ -591,7 +646,6 @@ iwl_eeprom_enh_txp_read_element(struct iwl_priv *priv,
591 646
592static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv) 647static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
593{ 648{
594 struct iwl_shared *shrd = priv->shrd;
595 struct iwl_eeprom_enhanced_txpwr *txp_array, *txp; 649 struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
596 int idx, entries; 650 int idx, entries;
597 __le16 *txp_len; 651 __le16 *txp_len;
@@ -600,10 +654,10 @@ static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
600 BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8); 654 BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
601 655
602 /* the length is in 16-bit words, but we want entries */ 656 /* the length is in 16-bit words, but we want entries */
603 txp_len = (__le16 *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_SZ_OFFS); 657 txp_len = (__le16 *) iwl_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS);
604 entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN; 658 entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
605 659
606 txp_array = (void *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_OFFS); 660 txp_array = (void *) iwl_eeprom_query_addr(priv, EEPROM_TXP_OFFS);
607 661
608 for (idx = 0; idx < entries; idx++) { 662 for (idx = 0; idx < entries; idx++) {
609 txp = &txp_array[idx]; 663 txp = &txp_array[idx];
@@ -637,7 +691,7 @@ static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
637 ((txp->delta_20_in_40 & 0xf0) >> 4), 691 ((txp->delta_20_in_40 & 0xf0) >> 4),
638 (txp->delta_20_in_40 & 0x0f)); 692 (txp->delta_20_in_40 & 0x0f));
639 693
640 max_txp_avg = iwl_get_max_txpower_avg(cfg(priv), txp_array, idx, 694 max_txp_avg = iwl_get_max_txpower_avg(priv->cfg, txp_array, idx,
641 &max_txp_avg_halfdbm); 695 &max_txp_avg_halfdbm);
642 696
643 /* 697 /*
@@ -656,66 +710,66 @@ static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
656/** 710/**
657 * iwl_eeprom_init - read EEPROM contents 711 * iwl_eeprom_init - read EEPROM contents
658 * 712 *
659 * Load the EEPROM contents from adapter into shrd->eeprom 713 * Load the EEPROM contents from adapter into priv->eeprom
660 * 714 *
661 * NOTE: This routine uses the non-debug IO access functions. 715 * NOTE: This routine uses the non-debug IO access functions.
662 */ 716 */
663int iwl_eeprom_init(struct iwl_trans *trans, u32 hw_rev) 717int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
664{ 718{
665 __le16 *e; 719 __le16 *e;
666 u32 gp = iwl_read32(trans, CSR_EEPROM_GP); 720 u32 gp = iwl_read32(priv->trans, CSR_EEPROM_GP);
667 int sz; 721 int sz;
668 int ret; 722 int ret;
669 u16 addr; 723 u16 addr;
670 u16 validblockaddr = 0; 724 u16 validblockaddr = 0;
671 u16 cache_addr = 0; 725 u16 cache_addr = 0;
672 726
673 trans->nvm_device_type = iwl_get_nvm_type(trans, hw_rev); 727 priv->nvm_device_type = iwl_get_nvm_type(priv->trans, hw_rev);
674 if (trans->nvm_device_type == -ENOENT) 728 if (priv->nvm_device_type == -ENOENT)
675 return -ENOENT; 729 return -ENOENT;
676 /* allocate eeprom */ 730 /* allocate eeprom */
677 sz = cfg(trans)->base_params->eeprom_size; 731 sz = priv->cfg->base_params->eeprom_size;
678 IWL_DEBUG_EEPROM(trans, "NVM size = %d\n", sz); 732 IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
679 trans->shrd->eeprom = kzalloc(sz, GFP_KERNEL); 733 priv->eeprom = kzalloc(sz, GFP_KERNEL);
680 if (!trans->shrd->eeprom) { 734 if (!priv->eeprom) {
681 ret = -ENOMEM; 735 ret = -ENOMEM;
682 goto alloc_err; 736 goto alloc_err;
683 } 737 }
684 e = (__le16 *)trans->shrd->eeprom; 738 e = (__le16 *)priv->eeprom;
685 739
686 ret = iwl_eeprom_verify_signature(trans); 740 ret = iwl_eeprom_verify_signature(priv);
687 if (ret < 0) { 741 if (ret < 0) {
688 IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp); 742 IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
689 ret = -ENOENT; 743 ret = -ENOENT;
690 goto err; 744 goto err;
691 } 745 }
692 746
693 /* Make sure driver (instead of uCode) is allowed to read EEPROM */ 747 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
694 ret = iwl_eeprom_acquire_semaphore(trans); 748 ret = iwl_eeprom_acquire_semaphore(priv->trans);
695 if (ret < 0) { 749 if (ret < 0) {
696 IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n"); 750 IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
697 ret = -ENOENT; 751 ret = -ENOENT;
698 goto err; 752 goto err;
699 } 753 }
700 754
701 if (trans->nvm_device_type == NVM_DEVICE_TYPE_OTP) { 755 if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
702 756
703 ret = iwl_init_otp_access(trans); 757 ret = iwl_init_otp_access(priv->trans);
704 if (ret) { 758 if (ret) {
705 IWL_ERR(trans, "Failed to initialize OTP access.\n"); 759 IWL_ERR(priv, "Failed to initialize OTP access.\n");
706 ret = -ENOENT; 760 ret = -ENOENT;
707 goto done; 761 goto done;
708 } 762 }
709 iwl_write32(trans, CSR_EEPROM_GP, 763 iwl_write32(priv->trans, CSR_EEPROM_GP,
710 iwl_read32(trans, CSR_EEPROM_GP) & 764 iwl_read32(priv->trans, CSR_EEPROM_GP) &
711 ~CSR_EEPROM_GP_IF_OWNER_MSK); 765 ~CSR_EEPROM_GP_IF_OWNER_MSK);
712 766
713 iwl_set_bit(trans, CSR_OTP_GP_REG, 767 iwl_set_bit(priv->trans, CSR_OTP_GP_REG,
714 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK | 768 CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
715 CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); 769 CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
716 /* traversing the linked list if no shadow ram supported */ 770 /* traversing the linked list if no shadow ram supported */
717 if (!cfg(trans)->base_params->shadow_ram_support) { 771 if (!priv->cfg->base_params->shadow_ram_support) {
718 if (iwl_find_otp_image(trans, &validblockaddr)) { 772 if (iwl_find_otp_image(priv->trans, &validblockaddr)) {
719 ret = -ENOENT; 773 ret = -ENOENT;
720 goto done; 774 goto done;
721 } 775 }
@@ -724,7 +778,8 @@ int iwl_eeprom_init(struct iwl_trans *trans, u32 hw_rev)
724 addr += sizeof(u16)) { 778 addr += sizeof(u16)) {
725 __le16 eeprom_data; 779 __le16 eeprom_data;
726 780
727 ret = iwl_read_otp_word(trans, addr, &eeprom_data); 781 ret = iwl_read_otp_word(priv->trans, addr,
782 &eeprom_data);
728 if (ret) 783 if (ret)
729 goto done; 784 goto done;
730 e[cache_addr / 2] = eeprom_data; 785 e[cache_addr / 2] = eeprom_data;
@@ -735,94 +790,93 @@ int iwl_eeprom_init(struct iwl_trans *trans, u32 hw_rev)
735 for (addr = 0; addr < sz; addr += sizeof(u16)) { 790 for (addr = 0; addr < sz; addr += sizeof(u16)) {
736 u32 r; 791 u32 r;
737 792
738 iwl_write32(trans, CSR_EEPROM_REG, 793 iwl_write32(priv->trans, CSR_EEPROM_REG,
739 CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); 794 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
740 795
741 ret = iwl_poll_bit(trans, CSR_EEPROM_REG, 796 ret = iwl_poll_bit(priv->trans, CSR_EEPROM_REG,
742 CSR_EEPROM_REG_READ_VALID_MSK, 797 CSR_EEPROM_REG_READ_VALID_MSK,
743 CSR_EEPROM_REG_READ_VALID_MSK, 798 CSR_EEPROM_REG_READ_VALID_MSK,
744 IWL_EEPROM_ACCESS_TIMEOUT); 799 IWL_EEPROM_ACCESS_TIMEOUT);
745 if (ret < 0) { 800 if (ret < 0) {
746 IWL_ERR(trans, 801 IWL_ERR(priv,
747 "Time out reading EEPROM[%d]\n", addr); 802 "Time out reading EEPROM[%d]\n", addr);
748 goto done; 803 goto done;
749 } 804 }
750 r = iwl_read32(trans, CSR_EEPROM_REG); 805 r = iwl_read32(priv->trans, CSR_EEPROM_REG);
751 e[addr / 2] = cpu_to_le16(r >> 16); 806 e[addr / 2] = cpu_to_le16(r >> 16);
752 } 807 }
753 } 808 }
754 809
755 IWL_DEBUG_EEPROM(trans, "NVM Type: %s, version: 0x%x\n", 810 IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
756 (trans->nvm_device_type == NVM_DEVICE_TYPE_OTP) 811 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
757 ? "OTP" : "EEPROM", 812 ? "OTP" : "EEPROM",
758 iwl_eeprom_query16(trans->shrd, EEPROM_VERSION)); 813 iwl_eeprom_query16(priv, EEPROM_VERSION));
759 814
760 ret = 0; 815 ret = 0;
761done: 816done:
762 iwl_eeprom_release_semaphore(trans); 817 iwl_eeprom_release_semaphore(priv->trans);
763 818
764err: 819err:
765 if (ret) 820 if (ret)
766 iwl_eeprom_free(trans->shrd); 821 iwl_eeprom_free(priv);
767alloc_err: 822alloc_err:
768 return ret; 823 return ret;
769} 824}
770 825
771void iwl_eeprom_free(struct iwl_shared *shrd) 826void iwl_eeprom_free(struct iwl_priv *priv)
772{ 827{
773 kfree(shrd->eeprom); 828 kfree(priv->eeprom);
774 shrd->eeprom = NULL; 829 priv->eeprom = NULL;
775} 830}
776 831
777static void iwl_init_band_reference(const struct iwl_priv *priv, 832static void iwl_init_band_reference(struct iwl_priv *priv,
778 int eep_band, int *eeprom_ch_count, 833 int eep_band, int *eeprom_ch_count,
779 const struct iwl_eeprom_channel **eeprom_ch_info, 834 const struct iwl_eeprom_channel **eeprom_ch_info,
780 const u8 **eeprom_ch_index) 835 const u8 **eeprom_ch_index)
781{ 836{
782 struct iwl_shared *shrd = priv->shrd; 837 u32 offset = priv->lib->
783 u32 offset = cfg(priv)->lib->
784 eeprom_ops.regulatory_bands[eep_band - 1]; 838 eeprom_ops.regulatory_bands[eep_band - 1];
785 switch (eep_band) { 839 switch (eep_band) {
786 case 1: /* 2.4GHz band */ 840 case 1: /* 2.4GHz band */
787 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1); 841 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
788 *eeprom_ch_info = (struct iwl_eeprom_channel *) 842 *eeprom_ch_info = (struct iwl_eeprom_channel *)
789 iwl_eeprom_query_addr(shrd, offset); 843 iwl_eeprom_query_addr(priv, offset);
790 *eeprom_ch_index = iwl_eeprom_band_1; 844 *eeprom_ch_index = iwl_eeprom_band_1;
791 break; 845 break;
792 case 2: /* 4.9GHz band */ 846 case 2: /* 4.9GHz band */
793 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2); 847 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
794 *eeprom_ch_info = (struct iwl_eeprom_channel *) 848 *eeprom_ch_info = (struct iwl_eeprom_channel *)
795 iwl_eeprom_query_addr(shrd, offset); 849 iwl_eeprom_query_addr(priv, offset);
796 *eeprom_ch_index = iwl_eeprom_band_2; 850 *eeprom_ch_index = iwl_eeprom_band_2;
797 break; 851 break;
798 case 3: /* 5.2GHz band */ 852 case 3: /* 5.2GHz band */
799 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3); 853 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
800 *eeprom_ch_info = (struct iwl_eeprom_channel *) 854 *eeprom_ch_info = (struct iwl_eeprom_channel *)
801 iwl_eeprom_query_addr(shrd, offset); 855 iwl_eeprom_query_addr(priv, offset);
802 *eeprom_ch_index = iwl_eeprom_band_3; 856 *eeprom_ch_index = iwl_eeprom_band_3;
803 break; 857 break;
804 case 4: /* 5.5GHz band */ 858 case 4: /* 5.5GHz band */
805 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4); 859 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
806 *eeprom_ch_info = (struct iwl_eeprom_channel *) 860 *eeprom_ch_info = (struct iwl_eeprom_channel *)
807 iwl_eeprom_query_addr(shrd, offset); 861 iwl_eeprom_query_addr(priv, offset);
808 *eeprom_ch_index = iwl_eeprom_band_4; 862 *eeprom_ch_index = iwl_eeprom_band_4;
809 break; 863 break;
810 case 5: /* 5.7GHz band */ 864 case 5: /* 5.7GHz band */
811 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5); 865 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
812 *eeprom_ch_info = (struct iwl_eeprom_channel *) 866 *eeprom_ch_info = (struct iwl_eeprom_channel *)
813 iwl_eeprom_query_addr(shrd, offset); 867 iwl_eeprom_query_addr(priv, offset);
814 *eeprom_ch_index = iwl_eeprom_band_5; 868 *eeprom_ch_index = iwl_eeprom_band_5;
815 break; 869 break;
816 case 6: /* 2.4GHz ht40 channels */ 870 case 6: /* 2.4GHz ht40 channels */
817 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6); 871 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
818 *eeprom_ch_info = (struct iwl_eeprom_channel *) 872 *eeprom_ch_info = (struct iwl_eeprom_channel *)
819 iwl_eeprom_query_addr(shrd, offset); 873 iwl_eeprom_query_addr(priv, offset);
820 *eeprom_ch_index = iwl_eeprom_band_6; 874 *eeprom_ch_index = iwl_eeprom_band_6;
821 break; 875 break;
822 case 7: /* 5 GHz ht40 channels */ 876 case 7: /* 5 GHz ht40 channels */
823 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7); 877 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
824 *eeprom_ch_info = (struct iwl_eeprom_channel *) 878 *eeprom_ch_info = (struct iwl_eeprom_channel *)
825 iwl_eeprom_query_addr(shrd, offset); 879 iwl_eeprom_query_addr(priv, offset);
826 *eeprom_ch_index = iwl_eeprom_band_7; 880 *eeprom_ch_index = iwl_eeprom_band_7;
827 break; 881 break;
828 default: 882 default:
@@ -987,9 +1041,9 @@ int iwl_init_channel_map(struct iwl_priv *priv)
987 } 1041 }
988 1042
989 /* Check if we do have HT40 channels */ 1043 /* Check if we do have HT40 channels */
990 if (cfg(priv)->lib->eeprom_ops.regulatory_bands[5] == 1044 if (priv->lib->eeprom_ops.regulatory_bands[5] ==
991 EEPROM_REGULATORY_BAND_NO_HT40 && 1045 EEPROM_REGULATORY_BAND_NO_HT40 &&
992 cfg(priv)->lib->eeprom_ops.regulatory_bands[6] == 1046 priv->lib->eeprom_ops.regulatory_bands[6] ==
993 EEPROM_REGULATORY_BAND_NO_HT40) 1047 EEPROM_REGULATORY_BAND_NO_HT40)
994 return 0; 1048 return 0;
995 1049
@@ -1025,7 +1079,7 @@ int iwl_init_channel_map(struct iwl_priv *priv)
1025 * driver need to process addition information 1079 * driver need to process addition information
1026 * to determine the max channel tx power limits 1080 * to determine the max channel tx power limits
1027 */ 1081 */
1028 if (cfg(priv)->lib->eeprom_ops.enhanced_txpower) 1082 if (priv->lib->eeprom_ops.enhanced_txpower)
1029 iwl_eeprom_enhanced_txpower(priv); 1083 iwl_eeprom_enhanced_txpower(priv);
1030 1084
1031 return 0; 1085 return 0;
@@ -1072,11 +1126,11 @@ void iwl_rf_config(struct iwl_priv *priv)
1072{ 1126{
1073 u16 radio_cfg; 1127 u16 radio_cfg;
1074 1128
1075 radio_cfg = iwl_eeprom_query16(priv->shrd, EEPROM_RADIO_CONFIG); 1129 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
1076 1130
1077 /* write radio config values to register */ 1131 /* write radio config values to register */
1078 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) { 1132 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
1079 iwl_set_bit(trans(priv), CSR_HW_IF_CONFIG_REG, 1133 iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
1080 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | 1134 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
1081 EEPROM_RF_CFG_STEP_MSK(radio_cfg) | 1135 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
1082 EEPROM_RF_CFG_DASH_MSK(radio_cfg)); 1136 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
@@ -1088,7 +1142,7 @@ void iwl_rf_config(struct iwl_priv *priv)
1088 WARN_ON(1); 1142 WARN_ON(1);
1089 1143
1090 /* set CSR_HW_CONFIG_REG for uCode use */ 1144 /* set CSR_HW_CONFIG_REG for uCode use */
1091 iwl_set_bit(trans(priv), CSR_HW_IF_CONFIG_REG, 1145 iwl_set_bit(priv->trans, CSR_HW_IF_CONFIG_REG,
1092 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | 1146 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
1093 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); 1147 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
1094} 1148}
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index e4a758340996..64bfd947caeb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -66,8 +66,6 @@
66#include <net/mac80211.h> 66#include <net/mac80211.h>
67 67
68struct iwl_priv; 68struct iwl_priv;
69struct iwl_shared;
70struct iwl_trans;
71 69
72/* 70/*
73 * EEPROM access time values: 71 * EEPROM access time values:
@@ -208,59 +206,6 @@ struct iwl_eeprom_calib_hdr {
208/* 6000 regulatory - indirect access */ 206/* 6000 regulatory - indirect access */
209#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS ((0x80)\ 207#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS ((0x80)\
210 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */ 208 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
211
212/* 5000 Specific */
213#define EEPROM_5000_TX_POWER_VERSION (4)
214#define EEPROM_5000_EEPROM_VERSION (0x11A)
215
216/* 5050 Specific */
217#define EEPROM_5050_TX_POWER_VERSION (4)
218#define EEPROM_5050_EEPROM_VERSION (0x21E)
219
220/* 1000 Specific */
221#define EEPROM_1000_TX_POWER_VERSION (4)
222#define EEPROM_1000_EEPROM_VERSION (0x15C)
223
224/* 6x00 Specific */
225#define EEPROM_6000_TX_POWER_VERSION (4)
226#define EEPROM_6000_EEPROM_VERSION (0x423)
227
228/* 6x50 Specific */
229#define EEPROM_6050_TX_POWER_VERSION (4)
230#define EEPROM_6050_EEPROM_VERSION (0x532)
231
232/* 6150 Specific */
233#define EEPROM_6150_TX_POWER_VERSION (6)
234#define EEPROM_6150_EEPROM_VERSION (0x553)
235
236/* 6x05 Specific */
237#define EEPROM_6005_TX_POWER_VERSION (6)
238#define EEPROM_6005_EEPROM_VERSION (0x709)
239
240/* 6x30 Specific */
241#define EEPROM_6030_TX_POWER_VERSION (6)
242#define EEPROM_6030_EEPROM_VERSION (0x709)
243
244/* 2x00 Specific */
245#define EEPROM_2000_TX_POWER_VERSION (6)
246#define EEPROM_2000_EEPROM_VERSION (0x805)
247
248/* 6x35 Specific */
249#define EEPROM_6035_TX_POWER_VERSION (6)
250#define EEPROM_6035_EEPROM_VERSION (0x753)
251
252
253/* OTP */
254/* lower blocks contain EEPROM image and calibration data */
255#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */
256/* high blocks contain PAPD data */
257#define OTP_HIGH_IMAGE_SIZE_6x00 (6 * 512 * sizeof(u16)) /* 6 KB */
258#define OTP_HIGH_IMAGE_SIZE_1000 (0x200 * sizeof(u16)) /* 1024 bytes */
259#define OTP_MAX_LL_ITEMS_1000 (3) /* OTP blocks for 1000 */
260#define OTP_MAX_LL_ITEMS_6x00 (4) /* OTP blocks for 6x00 */
261#define OTP_MAX_LL_ITEMS_6x50 (7) /* OTP blocks for 6x50 */
262#define OTP_MAX_LL_ITEMS_2x00 (4) /* OTP blocks for 2x00 */
263
264/* 2.4 GHz */ 209/* 2.4 GHz */
265extern const u8 iwl_eeprom_band_1[14]; 210extern const u8 iwl_eeprom_band_1[14];
266 211
@@ -306,12 +251,14 @@ struct iwl_eeprom_ops {
306}; 251};
307 252
308 253
309int iwl_eeprom_init(struct iwl_trans *trans, u32 hw_rev); 254int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev);
310void iwl_eeprom_free(struct iwl_shared *shrd); 255void iwl_eeprom_free(struct iwl_priv *priv);
311int iwl_eeprom_check_version(struct iwl_priv *priv); 256int iwl_eeprom_check_version(struct iwl_priv *priv);
312int iwl_eeprom_init_hw_params(struct iwl_priv *priv); 257int iwl_eeprom_init_hw_params(struct iwl_priv *priv);
313const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset); 258u16 iwl_eeprom_calib_version(struct iwl_priv *priv);
314u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset); 259const u8 *iwl_eeprom_query_addr(struct iwl_priv *priv, size_t offset);
260u16 iwl_eeprom_query16(struct iwl_priv *priv, size_t offset);
261void iwl_eeprom_get_mac(struct iwl_priv *priv, u8 *mac);
315int iwl_init_channel_map(struct iwl_priv *priv); 262int iwl_init_channel_map(struct iwl_priv *priv);
316void iwl_free_channel_map(struct iwl_priv *priv); 263void iwl_free_channel_map(struct iwl_priv *priv);
317const struct iwl_channel_info *iwl_get_channel_info( 264const struct iwl_channel_info *iwl_get_channel_info(
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index c924ccb93c8c..e71564053e7f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -93,15 +93,7 @@ struct iwl_ucode_header {
93 * new TLV uCode file layout 93 * new TLV uCode file layout
94 * 94 *
95 * The new TLV file format contains TLVs, that each specify 95 * The new TLV file format contains TLVs, that each specify
96 * some piece of data. To facilitate "groups", for example 96 * some piece of data.
97 * different instruction image with different capabilities,
98 * bundled with the same init image, an alternative mechanism
99 * is provided:
100 * When the alternative field is 0, that means that the item
101 * is always valid. When it is non-zero, then it is only
102 * valid in conjunction with items of the same alternative,
103 * in which case the driver (user) selects one alternative
104 * to use.
105 */ 97 */
106 98
107enum iwl_ucode_tlv_type { 99enum iwl_ucode_tlv_type {
@@ -132,8 +124,7 @@ enum iwl_ucode_tlv_type {
132}; 124};
133 125
134struct iwl_ucode_tlv { 126struct iwl_ucode_tlv {
135 __le16 type; /* see above */ 127 __le32 type; /* see above */
136 __le16 alternative; /* see comment */
137 __le32 length; /* not including type/length fields */ 128 __le32 length; /* not including type/length fields */
138 u8 data[0]; 129 u8 data[0];
139}; 130};
@@ -152,7 +143,7 @@ struct iwl_tlv_ucode_header {
152 u8 human_readable[64]; 143 u8 human_readable[64];
153 __le32 ver; /* major/minor/API/serial */ 144 __le32 ver; /* major/minor/API/serial */
154 __le32 build; 145 __le32 build;
155 __le64 alternatives; /* bitmask of valid alternatives */ 146 __le64 ignore;
156 /* 147 /*
157 * The data contained herein has a TLV layout, 148 * The data contained herein has a TLV layout,
158 * see above for the TLV header and types. 149 * see above for the TLV header and types.
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 8e36bdc1e522..2153e4cc5572 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -63,6 +63,7 @@
63#ifndef __iwl_fw_h__ 63#ifndef __iwl_fw_h__
64#define __iwl_fw_h__ 64#define __iwl_fw_h__
65#include <linux/types.h> 65#include <linux/types.h>
66#include <net/mac80211.h>
66 67
67/** 68/**
68 * enum iwl_ucode_tlv_flag - ucode API flags 69 * enum iwl_ucode_tlv_flag - ucode API flags
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 09b856768f62..abb3250164ba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -30,7 +30,6 @@
30#define __iwl_io_h__ 30#define __iwl_io_h__
31 31
32#include "iwl-devtrace.h" 32#include "iwl-devtrace.h"
33#include "iwl-shared.h"
34#include "iwl-trans.h" 33#include "iwl-trans.h"
35 34
36static inline void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val) 35static inline void iwl_write8(struct iwl_trans *trans, u32 ofs, u8 val)
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 1993a2b7ae63..47000419f916 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -36,11 +36,10 @@
36#include <asm/unaligned.h> 36#include <asm/unaligned.h>
37 37
38#include "iwl-dev.h" 38#include "iwl-dev.h"
39#include "iwl-core.h"
40#include "iwl-agn.h" 39#include "iwl-agn.h"
41#include "iwl-io.h" 40#include "iwl-io.h"
42#include "iwl-trans.h" 41#include "iwl-trans.h"
43#include "iwl-shared.h" 42#include "iwl-modparams.h"
44 43
45/* Throughput OFF time(ms) ON time (ms) 44/* Throughput OFF time(ms) ON time (ms)
46 * >300 25 25 45 * >300 25 25
@@ -71,7 +70,7 @@ static const struct ieee80211_tpt_blink iwl_blink[] = {
71/* Set led register off */ 70/* Set led register off */
72void iwlagn_led_enable(struct iwl_priv *priv) 71void iwlagn_led_enable(struct iwl_priv *priv)
73{ 72{
74 iwl_write32(trans(priv), CSR_LED_REG, CSR_LED_REG_TRUN_ON); 73 iwl_write32(priv->trans, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
75} 74}
76 75
77/* 76/*
@@ -107,9 +106,9 @@ static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
107 }; 106 };
108 u32 reg; 107 u32 reg;
109 108
110 reg = iwl_read32(trans(priv), CSR_LED_REG); 109 reg = iwl_read32(priv->trans, CSR_LED_REG);
111 if (reg != (reg & CSR_LED_BSM_CTRL_MSK)) 110 if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
112 iwl_write32(trans(priv), CSR_LED_REG, 111 iwl_write32(priv->trans, CSR_LED_REG,
113 reg & CSR_LED_BSM_CTRL_MSK); 112 reg & CSR_LED_BSM_CTRL_MSK);
114 113
115 return iwl_dvm_send_cmd(priv, &cmd); 114 return iwl_dvm_send_cmd(priv, &cmd);
@@ -138,11 +137,11 @@ static int iwl_led_cmd(struct iwl_priv *priv,
138 } 137 }
139 138
140 IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n", 139 IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
141 cfg(priv)->base_params->led_compensation); 140 priv->cfg->base_params->led_compensation);
142 led_cmd.on = iwl_blink_compensation(priv, on, 141 led_cmd.on = iwl_blink_compensation(priv, on,
143 cfg(priv)->base_params->led_compensation); 142 priv->cfg->base_params->led_compensation);
144 led_cmd.off = iwl_blink_compensation(priv, off, 143 led_cmd.off = iwl_blink_compensation(priv, off,
145 cfg(priv)->base_params->led_compensation); 144 priv->cfg->base_params->led_compensation);
146 145
147 ret = iwl_send_led_cmd(priv, &led_cmd); 146 ret = iwl_send_led_cmd(priv, &led_cmd);
148 if (!ret) { 147 if (!ret) {
@@ -175,7 +174,7 @@ static int iwl_led_blink_set(struct led_classdev *led_cdev,
175 174
176void iwl_leds_init(struct iwl_priv *priv) 175void iwl_leds_init(struct iwl_priv *priv)
177{ 176{
178 int mode = iwlagn_mod_params.led_mode; 177 int mode = iwlwifi_mod_params.led_mode;
179 int ret; 178 int ret;
180 179
181 if (mode == IWL_LED_DISABLE) { 180 if (mode == IWL_LED_DISABLE) {
@@ -183,7 +182,7 @@ void iwl_leds_init(struct iwl_priv *priv)
183 return; 182 return;
184 } 183 }
185 if (mode == IWL_LED_DEFAULT) 184 if (mode == IWL_LED_DEFAULT)
186 mode = cfg(priv)->led_mode; 185 mode = priv->cfg->led_mode;
187 186
188 priv->led.name = kasprintf(GFP_KERNEL, "%s-led", 187 priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
189 wiphy_name(priv->hw->wiphy)); 188 wiphy_name(priv->hw->wiphy));
@@ -207,7 +206,7 @@ void iwl_leds_init(struct iwl_priv *priv)
207 break; 206 break;
208 } 207 }
209 208
210 ret = led_classdev_register(trans(priv)->dev, &priv->led); 209 ret = led_classdev_register(priv->trans->dev, &priv->led);
211 if (ret) { 210 if (ret) {
212 kfree(priv->led.name); 211 kfree(priv->led.name);
213 return; 212 return;
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
index c24a7134a6f9..d33cc9cc7d3f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c
+++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
@@ -44,13 +44,12 @@
44 44
45#include "iwl-eeprom.h" 45#include "iwl-eeprom.h"
46#include "iwl-dev.h" 46#include "iwl-dev.h"
47#include "iwl-core.h"
48#include "iwl-io.h" 47#include "iwl-io.h"
49#include "iwl-agn-calib.h" 48#include "iwl-agn-calib.h"
50#include "iwl-agn.h" 49#include "iwl-agn.h"
51#include "iwl-shared.h"
52#include "iwl-trans.h" 50#include "iwl-trans.h"
53#include "iwl-op-mode.h" 51#include "iwl-op-mode.h"
52#include "iwl-modparams.h"
54 53
55/***************************************************************************** 54/*****************************************************************************
56 * 55 *
@@ -147,7 +146,13 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
147 IEEE80211_HW_AMPDU_AGGREGATION | 146 IEEE80211_HW_AMPDU_AGGREGATION |
148 IEEE80211_HW_NEED_DTIM_PERIOD | 147 IEEE80211_HW_NEED_DTIM_PERIOD |
149 IEEE80211_HW_SPECTRUM_MGMT | 148 IEEE80211_HW_SPECTRUM_MGMT |
150 IEEE80211_HW_REPORTS_TX_ACK_STATUS; 149 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
150 IEEE80211_HW_QUEUE_CONTROL |
151 IEEE80211_HW_SUPPORTS_PS |
152 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
153 IEEE80211_HW_SCAN_WHILE_IDLE;
154
155 hw->offchannel_tx_hw_queue = IWL_AUX_QUEUE;
151 156
152 /* 157 /*
153 * Including the following line will crash some AP's. This 158 * Including the following line will crash some AP's. This
@@ -156,10 +161,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
156 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF; 161 hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
157 */ 162 */
158 163
159 hw->flags |= IEEE80211_HW_SUPPORTS_PS | 164 if (priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE)
160 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
161
162 if (hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)
163 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | 165 hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
164 IEEE80211_HW_SUPPORTS_STATIC_SMPS; 166 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
165 167
@@ -197,13 +199,13 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
197 WIPHY_FLAG_IBSS_RSN; 199 WIPHY_FLAG_IBSS_RSN;
198 200
199 if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len && 201 if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
200 trans(priv)->ops->wowlan_suspend && 202 priv->trans->ops->wowlan_suspend &&
201 device_can_wakeup(trans(priv)->dev)) { 203 device_can_wakeup(priv->trans->dev)) {
202 hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT | 204 hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
203 WIPHY_WOWLAN_DISCONNECT | 205 WIPHY_WOWLAN_DISCONNECT |
204 WIPHY_WOWLAN_EAP_IDENTITY_REQ | 206 WIPHY_WOWLAN_EAP_IDENTITY_REQ |
205 WIPHY_WOWLAN_RFKILL_RELEASE; 207 WIPHY_WOWLAN_RFKILL_RELEASE;
206 if (!iwlagn_mod_params.sw_crypto) 208 if (!iwlwifi_mod_params.sw_crypto)
207 hw->wiphy->wowlan.flags |= 209 hw->wiphy->wowlan.flags |=
208 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | 210 WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
209 WIPHY_WOWLAN_GTK_REKEY_FAILURE; 211 WIPHY_WOWLAN_GTK_REKEY_FAILURE;
@@ -215,7 +217,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
215 IWLAGN_WOWLAN_MAX_PATTERN_LEN; 217 IWLAGN_WOWLAN_MAX_PATTERN_LEN;
216 } 218 }
217 219
218 if (iwlagn_mod_params.power_save) 220 if (iwlwifi_mod_params.power_save)
219 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; 221 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
220 else 222 else
221 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 223 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@ -224,8 +226,11 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
224 /* we create the 802.11 header and a zero-length SSID element */ 226 /* we create the 802.11 header and a zero-length SSID element */
225 hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2; 227 hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2;
226 228
227 /* Default value; 4 EDCA QOS priorities */ 229 /*
228 hw->queues = 4; 230 * We don't use all queues: 4 and 9 are unused and any
231 * aggregation queue gets mapped down to the AC queue.
232 */
233 hw->queues = IWLAGN_FIRST_AMPDU_QUEUE;
229 234
230 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; 235 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
231 236
@@ -236,7 +241,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
236 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 241 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
237 &priv->bands[IEEE80211_BAND_5GHZ]; 242 &priv->bands[IEEE80211_BAND_5GHZ];
238 243
239 hw->wiphy->hw_version = trans(priv)->hw_id; 244 hw->wiphy->hw_version = priv->trans->hw_id;
240 245
241 iwl_leds_init(priv); 246 iwl_leds_init(priv);
242 247
@@ -332,7 +337,7 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw)
332 return 0; 337 return 0;
333} 338}
334 339
335static void iwlagn_mac_stop(struct ieee80211_hw *hw) 340void iwlagn_mac_stop(struct ieee80211_hw *hw)
336{ 341{
337 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 342 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
338 343
@@ -355,18 +360,18 @@ static void iwlagn_mac_stop(struct ieee80211_hw *hw)
355 * even if interface is down, trans->down will leave the RF 360 * even if interface is down, trans->down will leave the RF
356 * kill interrupt enabled 361 * kill interrupt enabled
357 */ 362 */
358 iwl_trans_stop_hw(trans(priv)); 363 iwl_trans_stop_hw(priv->trans, false);
359 364
360 IWL_DEBUG_MAC80211(priv, "leave\n"); 365 IWL_DEBUG_MAC80211(priv, "leave\n");
361} 366}
362 367
363static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw, 368void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
364 struct ieee80211_vif *vif, 369 struct ieee80211_vif *vif,
365 struct cfg80211_gtk_rekey_data *data) 370 struct cfg80211_gtk_rekey_data *data)
366{ 371{
367 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 372 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
368 373
369 if (iwlagn_mod_params.sw_crypto) 374 if (iwlwifi_mod_params.sw_crypto)
370 return; 375 return;
371 376
372 IWL_DEBUG_MAC80211(priv, "enter\n"); 377 IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -388,8 +393,7 @@ static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
388 393
389#ifdef CONFIG_PM_SLEEP 394#ifdef CONFIG_PM_SLEEP
390 395
391static int iwlagn_mac_suspend(struct ieee80211_hw *hw, 396int iwlagn_mac_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
392 struct cfg80211_wowlan *wowlan)
393{ 397{
394 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 398 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
395 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 399 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
@@ -412,9 +416,9 @@ static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
412 if (ret) 416 if (ret)
413 goto error; 417 goto error;
414 418
415 device_set_wakeup_enable(trans(priv)->dev, true); 419 device_set_wakeup_enable(priv->trans->dev, true);
416 420
417 iwl_trans_wowlan_suspend(trans(priv)); 421 iwl_trans_wowlan_suspend(priv->trans);
418 422
419 goto out; 423 goto out;
420 424
@@ -437,27 +441,28 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
437 unsigned long flags; 441 unsigned long flags;
438 u32 base, status = 0xffffffff; 442 u32 base, status = 0xffffffff;
439 int ret = -EIO; 443 int ret = -EIO;
440 const struct fw_img *img;
441 444
442 IWL_DEBUG_MAC80211(priv, "enter\n"); 445 IWL_DEBUG_MAC80211(priv, "enter\n");
443 mutex_lock(&priv->mutex); 446 mutex_lock(&priv->mutex);
444 447
445 iwl_write32(trans(priv), CSR_UCODE_DRV_GP1_CLR, 448 iwl_write32(priv->trans, CSR_UCODE_DRV_GP1_CLR,
446 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE); 449 CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
447 450
448 base = priv->shrd->device_pointers.error_event_table; 451 base = priv->device_pointers.error_event_table;
449 if (iwlagn_hw_valid_rtc_data_addr(base)) { 452 if (iwlagn_hw_valid_rtc_data_addr(base)) {
450 spin_lock_irqsave(&trans(priv)->reg_lock, flags); 453 spin_lock_irqsave(&priv->trans->reg_lock, flags);
451 ret = iwl_grab_nic_access_silent(trans(priv)); 454 ret = iwl_grab_nic_access_silent(priv->trans);
452 if (likely(ret == 0)) { 455 if (likely(ret == 0)) {
453 iwl_write32(trans(priv), HBUS_TARG_MEM_RADDR, base); 456 iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, base);
454 status = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT); 457 status = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
455 iwl_release_nic_access(trans(priv)); 458 iwl_release_nic_access(priv->trans);
456 } 459 }
457 spin_unlock_irqrestore(&trans(priv)->reg_lock, flags); 460 spin_unlock_irqrestore(&priv->trans->reg_lock, flags);
458 461
459#ifdef CONFIG_IWLWIFI_DEBUGFS 462#ifdef CONFIG_IWLWIFI_DEBUGFS
460 if (ret == 0) { 463 if (ret == 0) {
464 const struct fw_img *img;
465
461 img = &(priv->fw->img[IWL_UCODE_WOWLAN]); 466 img = &(priv->fw->img[IWL_UCODE_WOWLAN]);
462 if (!priv->wowlan_sram) { 467 if (!priv->wowlan_sram) {
463 priv->wowlan_sram = 468 priv->wowlan_sram =
@@ -467,7 +472,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
467 472
468 if (priv->wowlan_sram) 473 if (priv->wowlan_sram)
469 _iwl_read_targ_mem_words( 474 _iwl_read_targ_mem_words(
470 trans(priv), 0x800000, 475 priv->trans, 0x800000,
471 priv->wowlan_sram, 476 priv->wowlan_sram,
472 img->sec[IWL_UCODE_SECTION_DATA].len / 4); 477 img->sec[IWL_UCODE_SECTION_DATA].len / 4);
473 } 478 }
@@ -479,7 +484,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
479 484
480 priv->wowlan = false; 485 priv->wowlan = false;
481 486
482 device_set_wakeup_enable(trans(priv)->dev, false); 487 device_set_wakeup_enable(priv->trans->dev, false);
483 488
484 iwlagn_prepare_restart(priv); 489 iwlagn_prepare_restart(priv);
485 490
@@ -497,7 +502,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
497 502
498#endif 503#endif
499 504
500static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 505void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
501{ 506{
502 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 507 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
503 508
@@ -508,21 +513,21 @@ static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
508 dev_kfree_skb_any(skb); 513 dev_kfree_skb_any(skb);
509} 514}
510 515
511static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw, 516void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
512 struct ieee80211_vif *vif, 517 struct ieee80211_vif *vif,
513 struct ieee80211_key_conf *keyconf, 518 struct ieee80211_key_conf *keyconf,
514 struct ieee80211_sta *sta, 519 struct ieee80211_sta *sta,
515 u32 iv32, u16 *phase1key) 520 u32 iv32, u16 *phase1key)
516{ 521{
517 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 522 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
518 523
519 iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key); 524 iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key);
520} 525}
521 526
522static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 527int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
523 struct ieee80211_vif *vif, 528 struct ieee80211_vif *vif,
524 struct ieee80211_sta *sta, 529 struct ieee80211_sta *sta,
525 struct ieee80211_key_conf *key) 530 struct ieee80211_key_conf *key)
526{ 531{
527 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 532 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
528 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 533 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -532,7 +537,7 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
532 537
533 IWL_DEBUG_MAC80211(priv, "enter\n"); 538 IWL_DEBUG_MAC80211(priv, "enter\n");
534 539
535 if (iwlagn_mod_params.sw_crypto) { 540 if (iwlwifi_mod_params.sw_crypto) {
536 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); 541 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
537 return -EOPNOTSUPP; 542 return -EOPNOTSUPP;
538 } 543 }
@@ -622,11 +627,11 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
622 return ret; 627 return ret;
623} 628}
624 629
625static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, 630int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
626 struct ieee80211_vif *vif, 631 struct ieee80211_vif *vif,
627 enum ieee80211_ampdu_mlme_action action, 632 enum ieee80211_ampdu_mlme_action action,
628 struct ieee80211_sta *sta, u16 tid, u16 *ssn, 633 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
629 u8 buf_size) 634 u8 buf_size)
630{ 635{
631 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 636 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
632 int ret = -EINVAL; 637 int ret = -EINVAL;
@@ -635,7 +640,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
635 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n", 640 IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
636 sta->addr, tid); 641 sta->addr, tid);
637 642
638 if (!(hw_params(priv).sku & EEPROM_SKU_CAP_11N_ENABLE)) 643 if (!(priv->hw_params.sku & EEPROM_SKU_CAP_11N_ENABLE))
639 return -EACCES; 644 return -EACCES;
640 645
641 IWL_DEBUG_MAC80211(priv, "enter\n"); 646 IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -643,7 +648,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
643 648
644 switch (action) { 649 switch (action) {
645 case IEEE80211_AMPDU_RX_START: 650 case IEEE80211_AMPDU_RX_START:
646 if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) 651 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
647 break; 652 break;
648 IWL_DEBUG_HT(priv, "start Rx\n"); 653 IWL_DEBUG_HT(priv, "start Rx\n");
649 ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn); 654 ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
@@ -653,7 +658,9 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
653 ret = iwl_sta_rx_agg_stop(priv, sta, tid); 658 ret = iwl_sta_rx_agg_stop(priv, sta, tid);
654 break; 659 break;
655 case IEEE80211_AMPDU_TX_START: 660 case IEEE80211_AMPDU_TX_START:
656 if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) 661 if (!priv->trans->ops->tx_agg_setup)
662 break;
663 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
657 break; 664 break;
658 IWL_DEBUG_HT(priv, "start Tx\n"); 665 IWL_DEBUG_HT(priv, "start Tx\n");
659 ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn); 666 ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
@@ -667,7 +674,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
667 priv->agg_tids_count); 674 priv->agg_tids_count);
668 } 675 }
669 if (!priv->agg_tids_count && 676 if (!priv->agg_tids_count &&
670 hw_params(priv).use_rts_for_aggregation) { 677 priv->hw_params.use_rts_for_aggregation) {
671 /* 678 /*
672 * switch off RTS/CTS if it was previously enabled 679 * switch off RTS/CTS if it was previously enabled
673 */ 680 */
@@ -746,11 +753,11 @@ static int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
746 return ret; 753 return ret;
747} 754}
748 755
749static int iwlagn_mac_sta_state(struct ieee80211_hw *hw, 756int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
750 struct ieee80211_vif *vif, 757 struct ieee80211_vif *vif,
751 struct ieee80211_sta *sta, 758 struct ieee80211_sta *sta,
752 enum ieee80211_sta_state old_state, 759 enum ieee80211_sta_state old_state,
753 enum ieee80211_sta_state new_state) 760 enum ieee80211_sta_state new_state)
754{ 761{
755 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 762 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
756 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 763 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -829,8 +836,8 @@ static int iwlagn_mac_sta_state(struct ieee80211_hw *hw,
829 return ret; 836 return ret;
830} 837}
831 838
832static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw, 839void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
833 struct ieee80211_channel_switch *ch_switch) 840 struct ieee80211_channel_switch *ch_switch)
834{ 841{
835 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 842 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
836 const struct iwl_channel_info *ch_info; 843 const struct iwl_channel_info *ch_info;
@@ -863,7 +870,7 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
863 if (!iwl_is_associated_ctx(ctx)) 870 if (!iwl_is_associated_ctx(ctx))
864 goto out; 871 goto out;
865 872
866 if (!cfg(priv)->lib->set_channel_switch) 873 if (!priv->lib->set_channel_switch)
867 goto out; 874 goto out;
868 875
869 ch = channel->hw_value; 876 ch = channel->hw_value;
@@ -892,14 +899,13 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
892 iwl_set_rxon_ht(priv, ht_conf); 899 iwl_set_rxon_ht(priv, ht_conf);
893 iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif); 900 iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
894 901
895 iwl_set_rate(priv);
896 /* 902 /*
897 * at this point, staging_rxon has the 903 * at this point, staging_rxon has the
898 * configuration for channel switch 904 * configuration for channel switch
899 */ 905 */
900 set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status); 906 set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
901 priv->switch_channel = cpu_to_le16(ch); 907 priv->switch_channel = cpu_to_le16(ch);
902 if (cfg(priv)->lib->set_channel_switch(priv, ch_switch)) { 908 if (priv->lib->set_channel_switch(priv, ch_switch)) {
903 clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status); 909 clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
904 priv->switch_channel = 0; 910 priv->switch_channel = 0;
905 ieee80211_chswitch_done(ctx->vif, false); 911 ieee80211_chswitch_done(ctx->vif, false);
@@ -910,10 +916,25 @@ out:
910 IWL_DEBUG_MAC80211(priv, "leave\n"); 916 IWL_DEBUG_MAC80211(priv, "leave\n");
911} 917}
912 918
913static void iwlagn_configure_filter(struct ieee80211_hw *hw, 919void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
914 unsigned int changed_flags, 920{
915 unsigned int *total_flags, 921 /*
916 u64 multicast) 922 * MULTI-FIXME
923 * See iwlagn_mac_channel_switch.
924 */
925 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
926
927 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
928 return;
929
930 if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
931 ieee80211_chswitch_done(ctx->vif, is_success);
932}
933
934void iwlagn_configure_filter(struct ieee80211_hw *hw,
935 unsigned int changed_flags,
936 unsigned int *total_flags,
937 u64 multicast)
917{ 938{
918 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 939 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
919 __le32 filter_or = 0, filter_nand = 0; 940 __le32 filter_or = 0, filter_nand = 0;
@@ -960,7 +981,7 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
960 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; 981 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
961} 982}
962 983
963static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop) 984void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
964{ 985{
965 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 986 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
966 987
@@ -988,7 +1009,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
988 } 1009 }
989 } 1010 }
990 IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n"); 1011 IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
991 iwl_trans_wait_tx_queue_empty(trans(priv)); 1012 iwl_trans_wait_tx_queue_empty(priv->trans);
992done: 1013done:
993 mutex_unlock(&priv->mutex); 1014 mutex_unlock(&priv->mutex);
994 IWL_DEBUG_MAC80211(priv, "leave\n"); 1015 IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -1003,7 +1024,7 @@ static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
1003 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN]; 1024 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
1004 int err = 0; 1025 int err = 0;
1005 1026
1006 if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN))) 1027 if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
1007 return -EOPNOTSUPP; 1028 return -EOPNOTSUPP;
1008 1029
1009 if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT))) 1030 if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)))
@@ -1087,11 +1108,11 @@ static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
1087 return err; 1108 return err;
1088} 1109}
1089 1110
1090static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw) 1111int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
1091{ 1112{
1092 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1113 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1093 1114
1094 if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN))) 1115 if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
1095 return -EOPNOTSUPP; 1116 return -EOPNOTSUPP;
1096 1117
1097 IWL_DEBUG_MAC80211(priv, "enter\n"); 1118 IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -1104,16 +1125,16 @@ static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
1104 return 0; 1125 return 0;
1105} 1126}
1106 1127
1107static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw, 1128void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
1108 enum ieee80211_rssi_event rssi_event) 1129 enum ieee80211_rssi_event rssi_event)
1109{ 1130{
1110 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1131 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1111 1132
1112 IWL_DEBUG_MAC80211(priv, "enter\n"); 1133 IWL_DEBUG_MAC80211(priv, "enter\n");
1113 mutex_lock(&priv->mutex); 1134 mutex_lock(&priv->mutex);
1114 1135
1115 if (cfg(priv)->bt_params && 1136 if (priv->cfg->bt_params &&
1116 cfg(priv)->bt_params->advanced_bt_coexist) { 1137 priv->cfg->bt_params->advanced_bt_coexist) {
1117 if (rssi_event == RSSI_EVENT_LOW) 1138 if (rssi_event == RSSI_EVENT_LOW)
1118 priv->bt_enable_pspoll = true; 1139 priv->bt_enable_pspoll = true;
1119 else if (rssi_event == RSSI_EVENT_HIGH) 1140 else if (rssi_event == RSSI_EVENT_HIGH)
@@ -1129,8 +1150,8 @@ static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
1129 IWL_DEBUG_MAC80211(priv, "leave\n"); 1150 IWL_DEBUG_MAC80211(priv, "leave\n");
1130} 1151}
1131 1152
1132static int iwlagn_mac_set_tim(struct ieee80211_hw *hw, 1153int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
1133 struct ieee80211_sta *sta, bool set) 1154 struct ieee80211_sta *sta, bool set)
1134{ 1155{
1135 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1156 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1136 1157
@@ -1139,9 +1160,9 @@ static int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
1139 return 0; 1160 return 0;
1140} 1161}
1141 1162
1142static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw, 1163int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
1143 struct ieee80211_vif *vif, u16 queue, 1164 struct ieee80211_vif *vif, u16 queue,
1144 const struct ieee80211_tx_queue_params *params) 1165 const struct ieee80211_tx_queue_params *params)
1145{ 1166{
1146 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1167 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1147 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 1168 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -1183,7 +1204,7 @@ static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
1183 return 0; 1204 return 0;
1184} 1205}
1185 1206
1186static int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw) 1207int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
1187{ 1208{
1188 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1209 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1189 1210
@@ -1199,11 +1220,10 @@ static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1199 return iwlagn_commit_rxon(priv, ctx); 1220 return iwlagn_commit_rxon(priv, ctx);
1200} 1221}
1201 1222
1202static int iwl_setup_interface(struct iwl_priv *priv, 1223int iwl_setup_interface(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1203 struct iwl_rxon_context *ctx)
1204{ 1224{
1205 struct ieee80211_vif *vif = ctx->vif; 1225 struct ieee80211_vif *vif = ctx->vif;
1206 int err; 1226 int err, ac;
1207 1227
1208 lockdep_assert_held(&priv->mutex); 1228 lockdep_assert_held(&priv->mutex);
1209 1229
@@ -1223,7 +1243,7 @@ static int iwl_setup_interface(struct iwl_priv *priv,
1223 return err; 1243 return err;
1224 } 1244 }
1225 1245
1226 if (cfg(priv)->bt_params && cfg(priv)->bt_params->advanced_bt_coexist && 1246 if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist &&
1227 vif->type == NL80211_IFTYPE_ADHOC) { 1247 vif->type == NL80211_IFTYPE_ADHOC) {
1228 /* 1248 /*
1229 * pretend to have high BT traffic as long as we 1249 * pretend to have high BT traffic as long as we
@@ -1233,11 +1253,20 @@ static int iwl_setup_interface(struct iwl_priv *priv,
1233 priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH; 1253 priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
1234 } 1254 }
1235 1255
1256 /* set up queue mappings */
1257 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
1258 vif->hw_queue[ac] = ctx->ac_to_queue[ac];
1259
1260 if (vif->type == NL80211_IFTYPE_AP)
1261 vif->cab_queue = ctx->mcast_queue;
1262 else
1263 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
1264
1236 return 0; 1265 return 0;
1237} 1266}
1238 1267
1239static int iwlagn_mac_add_interface(struct ieee80211_hw *hw, 1268static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
1240 struct ieee80211_vif *vif) 1269 struct ieee80211_vif *vif)
1241{ 1270{
1242 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1271 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1243 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 1272 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -1311,9 +1340,9 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
1311 return err; 1340 return err;
1312} 1341}
1313 1342
1314static void iwl_teardown_interface(struct iwl_priv *priv, 1343void iwl_teardown_interface(struct iwl_priv *priv,
1315 struct ieee80211_vif *vif, 1344 struct ieee80211_vif *vif,
1316 bool mode_change) 1345 bool mode_change)
1317{ 1346{
1318 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); 1347 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1319 1348
@@ -1454,9 +1483,9 @@ static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
1454 return err; 1483 return err;
1455} 1484}
1456 1485
1457static int iwlagn_mac_hw_scan(struct ieee80211_hw *hw, 1486int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
1458 struct ieee80211_vif *vif, 1487 struct ieee80211_vif *vif,
1459 struct cfg80211_scan_request *req) 1488 struct cfg80211_scan_request *req)
1460{ 1489{
1461 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1490 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1462 int ret; 1491 int ret;
@@ -1511,7 +1540,7 @@ static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
1511 iwl_send_add_sta(priv, &cmd, CMD_ASYNC); 1540 iwl_send_add_sta(priv, &cmd, CMD_ASYNC);
1512} 1541}
1513 1542
1514static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw, 1543void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
1515 struct ieee80211_vif *vif, 1544 struct ieee80211_vif *vif,
1516 enum sta_notify_cmd cmd, 1545 enum sta_notify_cmd cmd,
1517 struct ieee80211_sta *sta) 1546 struct ieee80211_sta *sta)
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
new file mode 100644
index 000000000000..558b2e63c5cf
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -0,0 +1,124 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_shared_h__
64#define __iwl_shared_h__
65
66#include <linux/types.h>
67#include <linux/spinlock.h>
68#include <linux/gfp.h>
69#include <net/mac80211.h>
70
71extern struct iwl_mod_params iwlwifi_mod_params;
72
73enum iwl_power_level {
74 IWL_POWER_INDEX_1,
75 IWL_POWER_INDEX_2,
76 IWL_POWER_INDEX_3,
77 IWL_POWER_INDEX_4,
78 IWL_POWER_INDEX_5,
79 IWL_POWER_NUM
80};
81
82#define IWL_DISABLE_HT_ALL BIT(0)
83#define IWL_DISABLE_HT_TXAGG BIT(1)
84#define IWL_DISABLE_HT_RXAGG BIT(2)
85
86/**
87 * struct iwl_mod_params
88 *
89 * Holds the module parameters
90 *
91 * @sw_crypto: using hardware encryption, default = 0
92 * @disable_11n: disable 11n capabilities, default = 0,
93 * use IWL_DISABLE_HT_* constants
94 * @amsdu_size_8K: enable 8K amsdu size, default = 1
95 * @restart_fw: restart firmware, default = 1
96 * @plcp_check: enable plcp health check, default = true
97 * @wd_disable: enable stuck queue check, default = 0
98 * @bt_coex_active: enable bt coex, default = true
99 * @led_mode: system default, default = 0
100 * @power_save: disable power save, default = false
101 * @power_level: power level, default = 1
102 * @debug_level: levels are IWL_DL_*
103 * @ant_coupling: antenna coupling in dB, default = 0
104 * @bt_ch_announce: BT channel inhibition, default = enable
105 * @auto_agg: enable agg. without check, default = true
106 */
107struct iwl_mod_params {
108 int sw_crypto;
109 unsigned int disable_11n;
110 int amsdu_size_8K;
111 int restart_fw;
112 bool plcp_check;
113 int wd_disable;
114 bool bt_coex_active;
115 int led_mode;
116 bool power_save;
117 int power_level;
118 u32 debug_level;
119 int ant_coupling;
120 bool bt_ch_announce;
121 bool auto_agg;
122};
123
124#endif /* #__iwl_shared_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
index 88dc4a0f96b4..0066b899fe5c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
@@ -75,21 +75,45 @@ void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait)
75void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait, 75void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
76 struct iwl_rx_packet *pkt) 76 struct iwl_rx_packet *pkt)
77{ 77{
78 bool triggered = false;
79
78 if (!list_empty(&notif_wait->notif_waits)) { 80 if (!list_empty(&notif_wait->notif_waits)) {
79 struct iwl_notification_wait *w; 81 struct iwl_notification_wait *w;
80 82
81 spin_lock(&notif_wait->notif_wait_lock); 83 spin_lock(&notif_wait->notif_wait_lock);
82 list_for_each_entry(w, &notif_wait->notif_waits, list) { 84 list_for_each_entry(w, &notif_wait->notif_waits, list) {
83 if (w->cmd != pkt->hdr.cmd) 85 int i;
86 bool found = false;
87
88 /*
89 * If it already finished (triggered) or has been
90 * aborted then don't evaluate it again to avoid races,
91 * Otherwise the function could be called again even
92 * though it returned true before
93 */
94 if (w->triggered || w->aborted)
95 continue;
96
97 for (i = 0; i < w->n_cmds; i++) {
98 if (w->cmds[i] == pkt->hdr.cmd) {
99 found = true;
100 break;
101 }
102 }
103 if (!found)
84 continue; 104 continue;
85 w->triggered = true; 105
86 if (w->fn) 106 if (!w->fn || w->fn(notif_wait, pkt, w->fn_data)) {
87 w->fn(notif_wait, pkt, w->fn_data); 107 w->triggered = true;
108 triggered = true;
109 }
88 } 110 }
89 spin_unlock(&notif_wait->notif_wait_lock); 111 spin_unlock(&notif_wait->notif_wait_lock);
90 112
91 wake_up_all(&notif_wait->notif_waitq);
92 } 113 }
114
115 if (triggered)
116 wake_up_all(&notif_wait->notif_waitq);
93} 117}
94 118
95void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait) 119void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
@@ -109,14 +133,18 @@ void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
109void 133void
110iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait, 134iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
111 struct iwl_notification_wait *wait_entry, 135 struct iwl_notification_wait *wait_entry,
112 u8 cmd, 136 const u8 *cmds, int n_cmds,
113 void (*fn)(struct iwl_notif_wait_data *notif_wait, 137 bool (*fn)(struct iwl_notif_wait_data *notif_wait,
114 struct iwl_rx_packet *pkt, void *data), 138 struct iwl_rx_packet *pkt, void *data),
115 void *fn_data) 139 void *fn_data)
116{ 140{
141 if (WARN_ON(n_cmds > MAX_NOTIF_CMDS))
142 n_cmds = MAX_NOTIF_CMDS;
143
117 wait_entry->fn = fn; 144 wait_entry->fn = fn;
118 wait_entry->fn_data = fn_data; 145 wait_entry->fn_data = fn_data;
119 wait_entry->cmd = cmd; 146 wait_entry->n_cmds = n_cmds;
147 memcpy(wait_entry->cmds, cmds, n_cmds);
120 wait_entry->triggered = false; 148 wait_entry->triggered = false;
121 wait_entry->aborted = false; 149 wait_entry->aborted = false;
122 150
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
index 5e8af957aa7b..821523100cf1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
@@ -72,11 +72,19 @@ struct iwl_notif_wait_data {
72 wait_queue_head_t notif_waitq; 72 wait_queue_head_t notif_waitq;
73}; 73};
74 74
75#define MAX_NOTIF_CMDS 5
76
75/** 77/**
76 * struct iwl_notification_wait - notification wait entry 78 * struct iwl_notification_wait - notification wait entry
77 * @list: list head for global list 79 * @list: list head for global list
78 * @fn: function called with the notification 80 * @fn: Function called with the notification. If the function
79 * @cmd: command ID 81 * returns true, the wait is over, if it returns false then
82 * the waiter stays blocked. If no function is given, any
83 * of the listed commands will unblock the waiter.
84 * @cmds: command IDs
85 * @n_cmds: number of command IDs
86 * @triggered: waiter should be woken up
87 * @aborted: wait was aborted
80 * 88 *
81 * This structure is not used directly, to wait for a 89 * This structure is not used directly, to wait for a
82 * notification declare it on the stack, and call 90 * notification declare it on the stack, and call
@@ -93,11 +101,12 @@ struct iwl_notif_wait_data {
93struct iwl_notification_wait { 101struct iwl_notification_wait {
94 struct list_head list; 102 struct list_head list;
95 103
96 void (*fn)(struct iwl_notif_wait_data *notif_data, 104 bool (*fn)(struct iwl_notif_wait_data *notif_data,
97 struct iwl_rx_packet *pkt, void *data); 105 struct iwl_rx_packet *pkt, void *data);
98 void *fn_data; 106 void *fn_data;
99 107
100 u8 cmd; 108 u8 cmds[MAX_NOTIF_CMDS];
109 u8 n_cmds;
101 bool triggered, aborted; 110 bool triggered, aborted;
102}; 111};
103 112
@@ -112,8 +121,8 @@ void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data);
112void __acquires(wait_entry) 121void __acquires(wait_entry)
113iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data, 122iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data,
114 struct iwl_notification_wait *wait_entry, 123 struct iwl_notification_wait *wait_entry,
115 u8 cmd, 124 const u8 *cmds, int n_cmds,
116 void (*fn)(struct iwl_notif_wait_data *notif_data, 125 bool (*fn)(struct iwl_notif_wait_data *notif_data,
117 struct iwl_rx_packet *pkt, void *data), 126 struct iwl_rx_packet *pkt, void *data),
118 void *fn_data); 127 void *fn_data);
119 128
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index 6ea4163ff56a..4ef742b28e08 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -69,6 +69,7 @@ struct sk_buff;
69struct iwl_device_cmd; 69struct iwl_device_cmd;
70struct iwl_rx_cmd_buffer; 70struct iwl_rx_cmd_buffer;
71struct iwl_fw; 71struct iwl_fw;
72struct iwl_cfg;
72 73
73/** 74/**
74 * DOC: Operational mode - what is it ? 75 * DOC: Operational mode - what is it ?
@@ -111,10 +112,10 @@ struct iwl_fw;
111 * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the 112 * @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
112 * HCMD the this Rx responds to. 113 * HCMD the this Rx responds to.
113 * Must be atomic. 114 * Must be atomic.
114 * @queue_full: notifies that a HW queue is full. Ac is the ac of the queue 115 * @queue_full: notifies that a HW queue is full.
115 * Must be atomic 116 * Must be atomic
116 * @queue_not_full: notifies that a HW queue is not full any more. 117 * @queue_not_full: notifies that a HW queue is not full any more.
117 * Ac is the ac of the queue. Must be atomic 118 * Must be atomic
118 * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that 119 * @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
119 * the radio is killed. Must be atomic. 120 * the radio is killed. Must be atomic.
120 * @free_skb: allows the transport layer to free skbs that haven't been 121 * @free_skb: allows the transport layer to free skbs that haven't been
@@ -125,20 +126,23 @@ struct iwl_fw;
125 * @cmd_queue_full: Called when the command queue gets full. Must be atomic. 126 * @cmd_queue_full: Called when the command queue gets full. Must be atomic.
126 * @nic_config: configure NIC, called before firmware is started. 127 * @nic_config: configure NIC, called before firmware is started.
127 * May sleep 128 * May sleep
129 * @wimax_active: invoked when WiMax becomes active. Must be atomic.
128 */ 130 */
129struct iwl_op_mode_ops { 131struct iwl_op_mode_ops {
130 struct iwl_op_mode *(*start)(struct iwl_trans *trans, 132 struct iwl_op_mode *(*start)(struct iwl_trans *trans,
133 const struct iwl_cfg *cfg,
131 const struct iwl_fw *fw); 134 const struct iwl_fw *fw);
132 void (*stop)(struct iwl_op_mode *op_mode); 135 void (*stop)(struct iwl_op_mode *op_mode);
133 int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb, 136 int (*rx)(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb,
134 struct iwl_device_cmd *cmd); 137 struct iwl_device_cmd *cmd);
135 void (*queue_full)(struct iwl_op_mode *op_mode, u8 ac); 138 void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
136 void (*queue_not_full)(struct iwl_op_mode *op_mode, u8 ac); 139 void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
137 void (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state); 140 void (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
138 void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb); 141 void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
139 void (*nic_error)(struct iwl_op_mode *op_mode); 142 void (*nic_error)(struct iwl_op_mode *op_mode);
140 void (*cmd_queue_full)(struct iwl_op_mode *op_mode); 143 void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
141 void (*nic_config)(struct iwl_op_mode *op_mode); 144 void (*nic_config)(struct iwl_op_mode *op_mode);
145 void (*wimax_active)(struct iwl_op_mode *op_mode);
142}; 146};
143 147
144/** 148/**
@@ -169,15 +173,16 @@ static inline int iwl_op_mode_rx(struct iwl_op_mode *op_mode,
169 return op_mode->ops->rx(op_mode, rxb, cmd); 173 return op_mode->ops->rx(op_mode, rxb, cmd);
170} 174}
171 175
172static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode, u8 ac) 176static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
177 int queue)
173{ 178{
174 op_mode->ops->queue_full(op_mode, ac); 179 op_mode->ops->queue_full(op_mode, queue);
175} 180}
176 181
177static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode, 182static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
178 u8 ac) 183 int queue)
179{ 184{
180 op_mode->ops->queue_not_full(op_mode, ac); 185 op_mode->ops->queue_not_full(op_mode, queue);
181} 186}
182 187
183static inline void iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, 188static inline void iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode,
@@ -208,6 +213,11 @@ static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode)
208 op_mode->ops->nic_config(op_mode); 213 op_mode->ops->nic_config(op_mode);
209} 214}
210 215
216static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
217{
218 op_mode->ops->wimax_active(op_mode);
219}
220
211/***************************************************** 221/*****************************************************
212* Op mode layers implementations 222* Op mode layers implementations
213******************************************************/ 223******************************************************/
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
index c5e339ee918b..0c8a1c2d8847 100644
--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
+++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
@@ -60,17 +60,18 @@
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 * 61 *
62 *****************************************************************************/ 62 *****************************************************************************/
63
64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65
63#include <linux/module.h> 66#include <linux/module.h>
64#include <linux/pci.h> 67#include <linux/pci.h>
65#include <linux/pci-aspm.h> 68#include <linux/pci-aspm.h>
66 69
67#include "iwl-io.h"
68#include "iwl-shared.h"
69#include "iwl-trans.h" 70#include "iwl-trans.h"
70#include "iwl-csr.h"
71#include "iwl-cfg.h" 71#include "iwl-cfg.h"
72#include "iwl-drv.h" 72#include "iwl-drv.h"
73#include "iwl-trans.h" 73#include "iwl-trans.h"
74#include "iwl-trans-pcie-int.h"
74 75
75#define IWL_PCI_DEVICE(dev, subdev, cfg) \ 76#define IWL_PCI_DEVICE(dev, subdev, cfg) \
76 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \ 77 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
@@ -261,61 +262,46 @@ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
261/* PCI registers */ 262/* PCI registers */
262#define PCI_CFG_RETRY_TIMEOUT 0x041 263#define PCI_CFG_RETRY_TIMEOUT 0x041
263 264
265#ifndef CONFIG_IWLWIFI_IDI
266
264static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 267static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
265{ 268{
266 const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data); 269 const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
267 struct iwl_shared *shrd;
268 struct iwl_trans *iwl_trans; 270 struct iwl_trans *iwl_trans;
269 int err; 271 struct iwl_trans_pcie *trans_pcie;
270
271 shrd = kzalloc(sizeof(*iwl_trans->shrd), GFP_KERNEL);
272 if (!shrd) {
273 dev_printk(KERN_ERR, &pdev->dev,
274 "Couldn't allocate iwl_shared");
275 err = -ENOMEM;
276 goto out_free_bus;
277 }
278 272
279#ifdef CONFIG_IWLWIFI_IDI 273 iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg);
280 iwl_trans = iwl_trans_idi_alloc(shrd, pdev, ent); 274 if (iwl_trans == NULL)
281#else 275 return -ENOMEM;
282 iwl_trans = iwl_trans_pcie_alloc(shrd, pdev, ent);
283#endif
284 if (iwl_trans == NULL) {
285 err = -ENOMEM;
286 goto out_free_bus;
287 }
288 276
289 shrd->trans = iwl_trans;
290 pci_set_drvdata(pdev, iwl_trans); 277 pci_set_drvdata(pdev, iwl_trans);
291 278
292 err = iwl_drv_start(shrd, iwl_trans, cfg); 279 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
293 if (err) 280 trans_pcie->drv = iwl_drv_start(iwl_trans, cfg);
281 if (!trans_pcie->drv)
294 goto out_free_trans; 282 goto out_free_trans;
295 283
296 return 0; 284 return 0;
297 285
298out_free_trans: 286out_free_trans:
299 iwl_trans_free(iwl_trans); 287 iwl_trans_pcie_free(iwl_trans);
300 pci_set_drvdata(pdev, NULL); 288 pci_set_drvdata(pdev, NULL);
301out_free_bus: 289 return -EFAULT;
302 kfree(shrd);
303 return err;
304} 290}
305 291
306static void __devexit iwl_pci_remove(struct pci_dev *pdev) 292static void __devexit iwl_pci_remove(struct pci_dev *pdev)
307{ 293{
308 struct iwl_trans *iwl_trans = pci_get_drvdata(pdev); 294 struct iwl_trans *trans = pci_get_drvdata(pdev);
309 struct iwl_shared *shrd = iwl_trans->shrd; 295 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
310 296
311 iwl_drv_stop(shrd); 297 iwl_drv_stop(trans_pcie->drv);
312 iwl_trans_free(shrd->trans); 298 iwl_trans_pcie_free(trans);
313 299
314 pci_set_drvdata(pdev, NULL); 300 pci_set_drvdata(pdev, NULL);
315
316 kfree(shrd);
317} 301}
318 302
303#endif /* CONFIG_IWLWIFI_IDI */
304
319#ifdef CONFIG_PM_SLEEP 305#ifdef CONFIG_PM_SLEEP
320 306
321static int iwl_pci_suspend(struct device *device) 307static int iwl_pci_suspend(struct device *device)
@@ -360,6 +346,15 @@ static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
360 346
361#endif 347#endif
362 348
349#ifdef CONFIG_IWLWIFI_IDI
350/*
351 * Defined externally in iwl-idi.c
352 */
353int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
354void __devexit iwl_pci_remove(struct pci_dev *pdev);
355
356#endif /* CONFIG_IWLWIFI_IDI */
357
363static struct pci_driver iwl_pci_driver = { 358static struct pci_driver iwl_pci_driver = {
364 .name = DRV_NAME, 359 .name = DRV_NAME,
365 .id_table = iwl_hw_card_ids, 360 .id_table = iwl_hw_card_ids,
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
new file mode 100644
index 000000000000..f166955340fe
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -0,0 +1,288 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <linux/slab.h>
65#include <linux/string.h>
66
67#include "iwl-debug.h"
68#include "iwl-dev.h"
69
70#include "iwl-phy-db.h"
71
72#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
73
74struct iwl_phy_db *iwl_phy_db_init(struct device *dev)
75{
76 struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db),
77 GFP_KERNEL);
78
79 if (!phy_db)
80 return phy_db;
81
82 phy_db->dev = dev;
83
84 /* TODO: add default values of the phy db. */
85 return phy_db;
86}
87
88/*
89 * get phy db section: returns a pointer to a phy db section specified by
90 * type and channel group id.
91 */
92static struct iwl_phy_db_entry *
93iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
94 enum iwl_phy_db_section_type type,
95 u16 chg_id)
96{
97 if (!phy_db || type < 0 || type >= IWL_PHY_DB_MAX)
98 return NULL;
99
100 switch (type) {
101 case IWL_PHY_DB_CFG:
102 return &phy_db->cfg;
103 case IWL_PHY_DB_CALIB_NCH:
104 return &phy_db->calib_nch;
105 case IWL_PHY_DB_CALIB_CH:
106 return &phy_db->calib_ch;
107 case IWL_PHY_DB_CALIB_CHG_PAPD:
108 if (chg_id < 0 || chg_id >= IWL_NUM_PAPD_CH_GROUPS)
109 return NULL;
110 return &phy_db->calib_ch_group_papd[chg_id];
111 case IWL_PHY_DB_CALIB_CHG_TXP:
112 if (chg_id < 0 || chg_id >= IWL_NUM_TXP_CH_GROUPS)
113 return NULL;
114 return &phy_db->calib_ch_group_txp[chg_id];
115 default:
116 return NULL;
117 }
118 return NULL;
119}
120
121static void iwl_phy_db_free_section(struct iwl_phy_db *phy_db,
122 enum iwl_phy_db_section_type type,
123 u16 chg_id)
124{
125 struct iwl_phy_db_entry *entry =
126 iwl_phy_db_get_section(phy_db, type, chg_id);
127 if (!entry)
128 return;
129
130 kfree(entry->data);
131 entry->data = NULL;
132 entry->size = 0;
133}
134
135void iwl_phy_db_free(struct iwl_phy_db *phy_db)
136{
137 int i;
138
139 if (!phy_db)
140 return;
141
142 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
143 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
144 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CH, 0);
145 for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++)
146 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
147 for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++)
148 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i);
149
150 kfree(phy_db);
151}
152
153int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
154 enum iwl_phy_db_section_type type, u8 *data,
155 u16 size, gfp_t alloc_ctx)
156{
157 struct iwl_phy_db_entry *entry;
158 u16 chg_id = 0;
159
160 if (!phy_db)
161 return -EINVAL;
162
163 if (type == IWL_PHY_DB_CALIB_CHG_PAPD ||
164 type == IWL_PHY_DB_CALIB_CHG_TXP)
165 chg_id = le16_to_cpup((__le16 *)data);
166
167 entry = iwl_phy_db_get_section(phy_db, type, chg_id);
168 if (!entry)
169 return -EINVAL;
170
171 kfree(entry->data);
172 entry->data = kmemdup(data, size, alloc_ctx);
173 if (!entry->data) {
174 entry->size = 0;
175 return -ENOMEM;
176 }
177
178 entry->size = size;
179
180 if (type == IWL_PHY_DB_CALIB_CH) {
181 phy_db->channel_num = le32_to_cpup((__le32 *)data);
182 phy_db->channel_size =
183 (size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
184 }
185
186 return 0;
187}
188
189static int is_valid_channel(u16 ch_id)
190{
191 if (ch_id <= 14 ||
192 (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
193 (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
194 (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
195 return 1;
196 return 0;
197}
198
199static u8 ch_id_to_ch_index(u16 ch_id)
200{
201 if (WARN_ON(!is_valid_channel(ch_id)))
202 return 0xff;
203
204 if (ch_id <= 14)
205 return ch_id - 1;
206 if (ch_id <= 64)
207 return (ch_id + 20) / 4;
208 if (ch_id <= 140)
209 return (ch_id - 12) / 4;
210 return (ch_id - 13) / 4;
211}
212
213
214static u16 channel_id_to_papd(u16 ch_id)
215{
216 if (WARN_ON(!is_valid_channel(ch_id)))
217 return 0xff;
218
219 if (1 <= ch_id && ch_id <= 14)
220 return 0;
221 if (36 <= ch_id && ch_id <= 64)
222 return 1;
223 if (100 <= ch_id && ch_id <= 140)
224 return 2;
225 return 3;
226}
227
228static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id)
229{
230 struct iwl_phy_db_chg_txp *txp_chg;
231 int i;
232 u8 ch_index = ch_id_to_ch_index(ch_id);
233 if (ch_index == 0xff)
234 return 0xff;
235
236 for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) {
237 txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
238 if (!txp_chg)
239 return 0xff;
240 /*
241 * Looking for the first channel group that its max channel is
242 * higher then wanted channel.
243 */
244 if (le16_to_cpu(txp_chg->max_channel_idx) >= ch_index)
245 return i;
246 }
247 return 0xff;
248}
249
250int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
251 enum iwl_phy_db_section_type type, u8 **data,
252 u16 *size, u16 ch_id)
253{
254 struct iwl_phy_db_entry *entry;
255 u32 channel_num;
256 u32 channel_size;
257 u16 ch_group_id = 0;
258 u16 index;
259
260 if (!phy_db)
261 return -EINVAL;
262
263 /* find wanted channel group */
264 if (type == IWL_PHY_DB_CALIB_CHG_PAPD)
265 ch_group_id = channel_id_to_papd(ch_id);
266 else if (type == IWL_PHY_DB_CALIB_CHG_TXP)
267 ch_group_id = channel_id_to_txp(phy_db, ch_id);
268
269 entry = iwl_phy_db_get_section(phy_db, type, ch_group_id);
270 if (!entry)
271 return -EINVAL;
272
273 if (type == IWL_PHY_DB_CALIB_CH) {
274 index = ch_id_to_ch_index(ch_id);
275 channel_num = phy_db->channel_num;
276 channel_size = phy_db->channel_size;
277 if (index >= channel_num) {
278 IWL_ERR(phy_db, "Wrong channel number %d", ch_id);
279 return -EINVAL;
280 }
281 *data = entry->data + CHANNEL_NUM_SIZE + index * channel_size;
282 *size = channel_size;
283 } else {
284 *data = entry->data;
285 *size = entry->size;
286 }
287 return 0;
288}
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
new file mode 100644
index 000000000000..c34c6a9303ab
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
@@ -0,0 +1,129 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#ifndef __IWL_PHYDB_H__
65#define __IWL_PHYDB_H__
66
67#include <linux/types.h>
68
69#define IWL_NUM_PAPD_CH_GROUPS 4
70#define IWL_NUM_TXP_CH_GROUPS 8
71
72struct iwl_phy_db_entry {
73 u16 size;
74 u8 *data;
75};
76
77struct iwl_shared;
78
79/**
80 * struct iwl_phy_db - stores phy configuration and calibration data.
81 *
82 * @cfg: phy configuration.
83 * @calib_nch: non channel specific calibration data.
84 * @calib_ch: channel specific calibration data.
85 * @calib_ch_group_papd: calibration data related to papd channel group.
86 * @calib_ch_group_txp: calibration data related to tx power chanel group.
87 */
88struct iwl_phy_db {
89 struct iwl_phy_db_entry cfg;
90 struct iwl_phy_db_entry calib_nch;
91 struct iwl_phy_db_entry calib_ch;
92 struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS];
93 struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS];
94
95 u32 channel_num;
96 u32 channel_size;
97
98 /* for an access to the logger */
99 struct device *dev;
100};
101
102enum iwl_phy_db_section_type {
103 IWL_PHY_DB_CFG = 1,
104 IWL_PHY_DB_CALIB_NCH,
105 IWL_PHY_DB_CALIB_CH,
106 IWL_PHY_DB_CALIB_CHG_PAPD,
107 IWL_PHY_DB_CALIB_CHG_TXP,
108 IWL_PHY_DB_MAX
109};
110
111/* for parsing of tx power channel group data that comes from the firmware*/
112struct iwl_phy_db_chg_txp {
113 __le32 space;
114 __le16 max_channel_idx;
115} __packed;
116
117struct iwl_phy_db *iwl_phy_db_init(struct device *dev);
118
119void iwl_phy_db_free(struct iwl_phy_db *phy_db);
120
121int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
122 enum iwl_phy_db_section_type type, u8 *data,
123 u16 size, gfp_t alloc_ctx);
124
125int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
126 enum iwl_phy_db_section_type type, u8 **data,
127 u16 *size, u16 ch_id);
128
129#endif /* __IWL_PHYDB_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 958d9d09aee3..8352265dbc4b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -37,13 +37,12 @@
37#include "iwl-eeprom.h" 37#include "iwl-eeprom.h"
38#include "iwl-dev.h" 38#include "iwl-dev.h"
39#include "iwl-agn.h" 39#include "iwl-agn.h"
40#include "iwl-core.h"
41#include "iwl-io.h" 40#include "iwl-io.h"
42#include "iwl-commands.h" 41#include "iwl-commands.h"
43#include "iwl-debug.h" 42#include "iwl-debug.h"
44#include "iwl-power.h" 43#include "iwl-power.h"
45#include "iwl-trans.h" 44#include "iwl-trans.h"
46#include "iwl-shared.h" 45#include "iwl-modparams.h"
47 46
48/* 47/*
49 * Setting power level allows the card to go to sleep when not busy. 48 * Setting power level allows the card to go to sleep when not busy.
@@ -167,7 +166,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
167 u8 skip; 166 u8 skip;
168 u32 slp_itrvl; 167 u32 slp_itrvl;
169 168
170 if (cfg(priv)->adv_pm) { 169 if (priv->cfg->adv_pm) {
171 table = apm_range_2; 170 table = apm_range_2;
172 if (period <= IWL_DTIM_RANGE_1_MAX) 171 if (period <= IWL_DTIM_RANGE_1_MAX)
173 table = apm_range_1; 172 table = apm_range_1;
@@ -215,13 +214,13 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
215 else 214 else
216 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK; 215 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
217 216
218 if (cfg(priv)->base_params->shadow_reg_enable) 217 if (priv->cfg->base_params->shadow_reg_enable)
219 cmd->flags |= IWL_POWER_SHADOW_REG_ENA; 218 cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
220 else 219 else
221 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; 220 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
222 221
223 if (iwl_advanced_bt_coexist(priv)) { 222 if (iwl_advanced_bt_coexist(priv)) {
224 if (!cfg(priv)->bt_params->bt_sco_disable) 223 if (!priv->cfg->bt_params->bt_sco_disable)
225 cmd->flags |= IWL_POWER_BT_SCO_ENA; 224 cmd->flags |= IWL_POWER_BT_SCO_ENA;
226 else 225 else
227 cmd->flags &= ~IWL_POWER_BT_SCO_ENA; 226 cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
@@ -268,61 +267,6 @@ static void iwl_power_sleep_cam_cmd(struct iwl_priv *priv,
268 IWL_DEBUG_POWER(priv, "Sleep command for CAM\n"); 267 IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
269} 268}
270 269
271static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv,
272 struct iwl_powertable_cmd *cmd,
273 int dynps_ms, int wakeup_period)
274{
275 /*
276 * These are the original power level 3 sleep successions. The
277 * device may behave better with such succession and was also
278 * only tested with that. Just like the original sleep commands,
279 * also adjust the succession here to the wakeup_period below.
280 * The ranges are the same as for the sleep commands, 0-2, 3-9
281 * and >10, which is selected based on the DTIM interval for
282 * the sleep index but here we use the wakeup period since that
283 * is what we need to do for the latency requirements.
284 */
285 static const u8 slp_succ_r0[IWL_POWER_VEC_SIZE] = { 2, 2, 2, 2, 2 };
286 static const u8 slp_succ_r1[IWL_POWER_VEC_SIZE] = { 2, 4, 6, 7, 9 };
287 static const u8 slp_succ_r2[IWL_POWER_VEC_SIZE] = { 2, 7, 9, 9, 0xFF };
288 const u8 *slp_succ = slp_succ_r0;
289 int i;
290
291 if (wakeup_period > IWL_DTIM_RANGE_0_MAX)
292 slp_succ = slp_succ_r1;
293 if (wakeup_period > IWL_DTIM_RANGE_1_MAX)
294 slp_succ = slp_succ_r2;
295
296 memset(cmd, 0, sizeof(*cmd));
297
298 cmd->flags = IWL_POWER_DRIVER_ALLOW_SLEEP_MSK |
299 IWL_POWER_FAST_PD; /* no use seeing frames for others */
300
301 if (priv->power_data.bus_pm)
302 cmd->flags |= IWL_POWER_PCI_PM_MSK;
303
304 if (cfg(priv)->base_params->shadow_reg_enable)
305 cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
306 else
307 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
308
309 if (iwl_advanced_bt_coexist(priv)) {
310 if (!cfg(priv)->bt_params->bt_sco_disable)
311 cmd->flags |= IWL_POWER_BT_SCO_ENA;
312 else
313 cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
314 }
315
316 cmd->rx_data_timeout = cpu_to_le32(1000 * dynps_ms);
317 cmd->tx_data_timeout = cpu_to_le32(1000 * dynps_ms);
318
319 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
320 cmd->sleep_interval[i] =
321 cpu_to_le32(min_t(int, slp_succ[i], wakeup_period));
322
323 IWL_DEBUG_POWER(priv, "Automatic sleep command\n");
324}
325
326static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd) 270static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
327{ 271{
328 IWL_DEBUG_POWER(priv, "Sending power/sleep command\n"); 272 IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
@@ -350,7 +294,7 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
350 294
351 if (priv->wowlan) 295 if (priv->wowlan)
352 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper); 296 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper);
353 else if (!cfg(priv)->base_params->no_idle_support && 297 else if (!priv->cfg->base_params->no_idle_support &&
354 priv->hw->conf.flags & IEEE80211_CONF_IDLE) 298 priv->hw->conf.flags & IEEE80211_CONF_IDLE)
355 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20); 299 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
356 else if (iwl_tt_is_low_power_state(priv)) { 300 else if (iwl_tt_is_low_power_state(priv)) {
@@ -363,18 +307,15 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
363 iwl_static_sleep_cmd(priv, cmd, 307 iwl_static_sleep_cmd(priv, cmd,
364 priv->power_data.debug_sleep_level_override, 308 priv->power_data.debug_sleep_level_override,
365 dtimper); 309 dtimper);
366 else if (iwlagn_mod_params.no_sleep_autoadjust) { 310 else {
367 if (iwlagn_mod_params.power_level > IWL_POWER_INDEX_1 && 311 if (iwlwifi_mod_params.power_level > IWL_POWER_INDEX_1 &&
368 iwlagn_mod_params.power_level <= IWL_POWER_INDEX_5) 312 iwlwifi_mod_params.power_level <= IWL_POWER_INDEX_5)
369 iwl_static_sleep_cmd(priv, cmd, 313 iwl_static_sleep_cmd(priv, cmd,
370 iwlagn_mod_params.power_level, dtimper); 314 iwlwifi_mod_params.power_level, dtimper);
371 else 315 else
372 iwl_static_sleep_cmd(priv, cmd, 316 iwl_static_sleep_cmd(priv, cmd,
373 IWL_POWER_INDEX_1, dtimper); 317 IWL_POWER_INDEX_1, dtimper);
374 } else 318 }
375 iwl_power_fill_sleep_cmd(priv, cmd,
376 priv->hw->conf.dynamic_ps_timeout,
377 priv->hw->conf.max_sleep_period);
378} 319}
379 320
380int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, 321int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
@@ -403,12 +344,12 @@ int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
403 } 344 }
404 345
405 if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK) 346 if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
406 set_bit(STATUS_POWER_PMI, &priv->shrd->status); 347 iwl_dvm_set_pmi(priv, true);
407 348
408 ret = iwl_set_power(priv, cmd); 349 ret = iwl_set_power(priv, cmd);
409 if (!ret) { 350 if (!ret) {
410 if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)) 351 if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
411 clear_bit(STATUS_POWER_PMI, &priv->shrd->status); 352 iwl_dvm_set_pmi(priv, false);
412 353
413 if (update_chains) 354 if (update_chains)
414 iwl_update_chain_flags(priv); 355 iwl_update_chain_flags(priv);
@@ -436,7 +377,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
436/* initialize to default */ 377/* initialize to default */
437void iwl_power_initialize(struct iwl_priv *priv) 378void iwl_power_initialize(struct iwl_priv *priv)
438{ 379{
439 priv->power_data.bus_pm = trans(priv)->pm_support; 380 priv->power_data.bus_pm = priv->trans->pm_support;
440 381
441 priv->power_data.debug_sleep_level_override = -1; 382 priv->power_data.debug_sleep_level_override = -1;
442 383
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index 07a19fce5fdc..21afc92efacb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -30,15 +30,6 @@
30 30
31#include "iwl-commands.h" 31#include "iwl-commands.h"
32 32
33enum iwl_power_level {
34 IWL_POWER_INDEX_1,
35 IWL_POWER_INDEX_2,
36 IWL_POWER_INDEX_3,
37 IWL_POWER_INDEX_4,
38 IWL_POWER_INDEX_5,
39 IWL_POWER_NUM
40};
41
42struct iwl_power_mgr { 33struct iwl_power_mgr {
43 struct iwl_powertable_cmd sleep_cmd; 34 struct iwl_powertable_cmd sleep_cmd;
44 struct iwl_powertable_cmd sleep_cmd_next; 35 struct iwl_powertable_cmd sleep_cmd_next;
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 902efe4bc898..a8437a6bc18e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -32,7 +32,6 @@
32 32
33#include "iwl-eeprom.h" 33#include "iwl-eeprom.h"
34#include "iwl-dev.h" 34#include "iwl-dev.h"
35#include "iwl-core.h"
36#include "iwl-io.h" 35#include "iwl-io.h"
37#include "iwl-agn.h" 36#include "iwl-agn.h"
38#include "iwl-trans.h" 37#include "iwl-trans.h"
@@ -69,7 +68,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
69 if (!test_bit(STATUS_READY, &priv->status) || 68 if (!test_bit(STATUS_READY, &priv->status) ||
70 !test_bit(STATUS_GEO_CONFIGURED, &priv->status) || 69 !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
71 !test_bit(STATUS_SCAN_HW, &priv->status) || 70 !test_bit(STATUS_SCAN_HW, &priv->status) ||
72 test_bit(STATUS_FW_ERROR, &priv->shrd->status)) 71 test_bit(STATUS_FW_ERROR, &priv->status))
73 return -EIO; 72 return -EIO;
74 73
75 ret = iwl_dvm_send_cmd(priv, &cmd); 74 ret = iwl_dvm_send_cmd(priv, &cmd);
@@ -451,6 +450,46 @@ static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
451 return iwl_limit_dwell(priv, passive); 450 return iwl_limit_dwell(priv, passive);
452} 451}
453 452
453/* Return valid, unused, channel for a passive scan to reset the RF */
454static u8 iwl_get_single_channel_number(struct iwl_priv *priv,
455 enum ieee80211_band band)
456{
457 const struct iwl_channel_info *ch_info;
458 int i;
459 u8 channel = 0;
460 u8 min, max;
461 struct iwl_rxon_context *ctx;
462
463 if (band == IEEE80211_BAND_5GHZ) {
464 min = 14;
465 max = priv->channel_count;
466 } else {
467 min = 0;
468 max = 14;
469 }
470
471 for (i = min; i < max; i++) {
472 bool busy = false;
473
474 for_each_context(priv, ctx) {
475 busy = priv->channel_info[i].channel ==
476 le16_to_cpu(ctx->staging.channel);
477 if (busy)
478 break;
479 }
480
481 if (busy)
482 continue;
483
484 channel = priv->channel_info[i].channel;
485 ch_info = iwl_get_channel_info(priv, band, channel);
486 if (is_channel_valid(ch_info))
487 break;
488 }
489
490 return channel;
491}
492
454static int iwl_get_single_channel_for_scan(struct iwl_priv *priv, 493static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
455 struct ieee80211_vif *vif, 494 struct ieee80211_vif *vif,
456 enum ieee80211_band band, 495 enum ieee80211_band band,
@@ -633,12 +672,12 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
633 u16 rx_chain = 0; 672 u16 rx_chain = 0;
634 enum ieee80211_band band; 673 enum ieee80211_band band;
635 u8 n_probes = 0; 674 u8 n_probes = 0;
636 u8 rx_ant = hw_params(priv).valid_rx_ant; 675 u8 rx_ant = priv->hw_params.valid_rx_ant;
637 u8 rate; 676 u8 rate;
638 bool is_active = false; 677 bool is_active = false;
639 int chan_mod; 678 int chan_mod;
640 u8 active_chains; 679 u8 active_chains;
641 u8 scan_tx_antennas = hw_params(priv).valid_tx_ant; 680 u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
642 int ret; 681 int ret;
643 682
644 lockdep_assert_held(&priv->mutex); 683 lockdep_assert_held(&priv->mutex);
@@ -751,8 +790,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
751 * Internal scans are passive, so we can indiscriminately set 790 * Internal scans are passive, so we can indiscriminately set
752 * the BT ignore flag on 2.4 GHz since it applies to TX only. 791 * the BT ignore flag on 2.4 GHz since it applies to TX only.
753 */ 792 */
754 if (cfg(priv)->bt_params && 793 if (priv->cfg->bt_params &&
755 cfg(priv)->bt_params->advanced_bt_coexist) 794 priv->cfg->bt_params->advanced_bt_coexist)
756 scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT; 795 scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
757 break; 796 break;
758 case IEEE80211_BAND_5GHZ: 797 case IEEE80211_BAND_5GHZ:
@@ -793,12 +832,9 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
793 832
794 band = priv->scan_band; 833 band = priv->scan_band;
795 834
796 if (cfg(priv)->scan_rx_antennas[band])
797 rx_ant = cfg(priv)->scan_rx_antennas[band];
798
799 if (band == IEEE80211_BAND_2GHZ && 835 if (band == IEEE80211_BAND_2GHZ &&
800 cfg(priv)->bt_params && 836 priv->cfg->bt_params &&
801 cfg(priv)->bt_params->advanced_bt_coexist) { 837 priv->cfg->bt_params->advanced_bt_coexist) {
802 /* transmit 2.4 GHz probes only on first antenna */ 838 /* transmit 2.4 GHz probes only on first antenna */
803 scan_tx_antennas = first_antenna(scan_tx_antennas); 839 scan_tx_antennas = first_antenna(scan_tx_antennas);
804 } 840 }
@@ -809,8 +845,12 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
809 rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]); 845 rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
810 scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags); 846 scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
811 847
812 /* In power save mode use one chain, otherwise use all chains */ 848 /*
813 if (test_bit(STATUS_POWER_PMI, &priv->shrd->status)) { 849 * In power save mode while associated use one chain,
850 * otherwise use all chains
851 */
852 if (test_bit(STATUS_POWER_PMI, &priv->status) &&
853 !(priv->hw->conf.flags & IEEE80211_CONF_IDLE)) {
814 /* rx_ant has been set to all valid chains previously */ 854 /* rx_ant has been set to all valid chains previously */
815 active_chains = rx_ant & 855 active_chains = rx_ant &
816 ((u8)(priv->chain_noise_data.active_chains)); 856 ((u8)(priv->chain_noise_data.active_chains));
@@ -822,8 +862,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
822 862
823 rx_ant = first_antenna(active_chains); 863 rx_ant = first_antenna(active_chains);
824 } 864 }
825 if (cfg(priv)->bt_params && 865 if (priv->cfg->bt_params &&
826 cfg(priv)->bt_params->advanced_bt_coexist && 866 priv->cfg->bt_params->advanced_bt_coexist &&
827 priv->bt_full_concurrent) { 867 priv->bt_full_concurrent) {
828 /* operated as 1x1 in full concurrency mode */ 868 /* operated as 1x1 in full concurrency mode */
829 rx_ant = first_antenna(rx_ant); 869 rx_ant = first_antenna(rx_ant);
@@ -831,7 +871,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
831 871
832 /* MIMO is not used here, but value is required */ 872 /* MIMO is not used here, but value is required */
833 rx_chain |= 873 rx_chain |=
834 hw_params(priv).valid_rx_ant << RXON_RX_CHAIN_VALID_POS; 874 priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
835 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; 875 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
836 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS; 876 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
837 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS; 877 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
@@ -944,7 +984,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
944 984
945void iwl_init_scan_params(struct iwl_priv *priv) 985void iwl_init_scan_params(struct iwl_priv *priv)
946{ 986{
947 u8 ant_idx = fls(hw_params(priv).valid_tx_ant) - 1; 987 u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
948 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ]) 988 if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
949 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx; 989 priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
950 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ]) 990 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.c b/drivers/net/wireless/iwlwifi/iwl-testmode.c
index 76f7f9251436..060aac3e22f1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.c
@@ -71,7 +71,6 @@
71#include <net/netlink.h> 71#include <net/netlink.h>
72 72
73#include "iwl-dev.h" 73#include "iwl-dev.h"
74#include "iwl-core.h"
75#include "iwl-debug.h" 74#include "iwl-debug.h"
76#include "iwl-io.h" 75#include "iwl-io.h"
77#include "iwl-agn.h" 76#include "iwl-agn.h"
@@ -184,9 +183,10 @@ static void iwl_testmode_ucode_rx_pkt(struct iwl_priv *priv,
184 "Run out of memory for messages to user space ?\n"); 183 "Run out of memory for messages to user space ?\n");
185 return; 184 return;
186 } 185 }
187 NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT); 186 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
188 /* the length doesn't include len_n_flags field, so add it manually */ 187 /* the length doesn't include len_n_flags field, so add it manually */
189 NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, length + sizeof(__le32), data); 188 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length + sizeof(__le32), data))
189 goto nla_put_failure;
190 cfg80211_testmode_event(skb, GFP_ATOMIC); 190 cfg80211_testmode_event(skb, GFP_ATOMIC);
191 return; 191 return;
192 192
@@ -218,7 +218,7 @@ static void iwl_trace_cleanup(struct iwl_priv *priv)
218 if (priv->testmode_trace.trace_enabled) { 218 if (priv->testmode_trace.trace_enabled) {
219 if (priv->testmode_trace.cpu_addr && 219 if (priv->testmode_trace.cpu_addr &&
220 priv->testmode_trace.dma_addr) 220 priv->testmode_trace.dma_addr)
221 dma_free_coherent(trans(priv)->dev, 221 dma_free_coherent(priv->trans->dev,
222 priv->testmode_trace.total_size, 222 priv->testmode_trace.total_size,
223 priv->testmode_trace.cpu_addr, 223 priv->testmode_trace.cpu_addr,
224 priv->testmode_trace.dma_addr); 224 priv->testmode_trace.dma_addr);
@@ -314,8 +314,9 @@ static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb)
314 memcpy(reply_buf, &(pkt->hdr), reply_len); 314 memcpy(reply_buf, &(pkt->hdr), reply_len);
315 iwl_free_resp(&cmd); 315 iwl_free_resp(&cmd);
316 316
317 NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT); 317 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) ||
318 NLA_PUT(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf); 318 nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf))
319 goto nla_put_failure;
319 return cfg80211_testmode_reply(skb); 320 return cfg80211_testmode_reply(skb);
320 321
321nla_put_failure: 322nla_put_failure:
@@ -371,7 +372,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
371 372
372 switch (cmd) { 373 switch (cmd) {
373 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32: 374 case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
374 val32 = iwl_read_direct32(trans(priv), ofs); 375 val32 = iwl_read_direct32(priv->trans, ofs);
375 IWL_INFO(priv, "32bit value to read 0x%x\n", val32); 376 IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
376 377
377 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); 378 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
@@ -379,7 +380,8 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
379 IWL_ERR(priv, "Memory allocation fail\n"); 380 IWL_ERR(priv, "Memory allocation fail\n");
380 return -ENOMEM; 381 return -ENOMEM;
381 } 382 }
382 NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32); 383 if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32))
384 goto nla_put_failure;
383 status = cfg80211_testmode_reply(skb); 385 status = cfg80211_testmode_reply(skb);
384 if (status < 0) 386 if (status < 0)
385 IWL_ERR(priv, "Error sending msg : %d\n", status); 387 IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -391,7 +393,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
391 } else { 393 } else {
392 val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]); 394 val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
393 IWL_INFO(priv, "32bit value to write 0x%x\n", val32); 395 IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
394 iwl_write_direct32(trans(priv), ofs, val32); 396 iwl_write_direct32(priv->trans, ofs, val32);
395 } 397 }
396 break; 398 break;
397 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8: 399 case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
@@ -401,7 +403,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
401 } else { 403 } else {
402 val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]); 404 val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]);
403 IWL_INFO(priv, "8bit value to write 0x%x\n", val8); 405 IWL_INFO(priv, "8bit value to write 0x%x\n", val8);
404 iwl_write8(trans(priv), ofs, val8); 406 iwl_write8(priv->trans, ofs, val8);
405 } 407 }
406 break; 408 break;
407 default: 409 default:
@@ -420,10 +422,13 @@ nla_put_failure:
420static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv) 422static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
421{ 423{
422 struct iwl_notification_wait calib_wait; 424 struct iwl_notification_wait calib_wait;
425 static const u8 calib_complete[] = {
426 CALIBRATION_COMPLETE_NOTIFICATION
427 };
423 int ret; 428 int ret;
424 429
425 iwl_init_notification_wait(&priv->notif_wait, &calib_wait, 430 iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
426 CALIBRATION_COMPLETE_NOTIFICATION, 431 calib_complete, ARRAY_SIZE(calib_complete),
427 NULL, NULL); 432 NULL, NULL);
428 ret = iwl_init_alive_start(priv); 433 ret = iwl_init_alive_start(priv);
429 if (ret) { 434 if (ret) {
@@ -461,7 +466,7 @@ cfg_init_calib_error:
461static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) 466static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
462{ 467{
463 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 468 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
464 struct iwl_trans *trans = trans(priv); 469 struct iwl_trans *trans = priv->trans;
465 struct sk_buff *skb; 470 struct sk_buff *skb;
466 unsigned char *rsp_data_ptr = NULL; 471 unsigned char *rsp_data_ptr = NULL;
467 int status = 0, rsp_data_len = 0; 472 int status = 0, rsp_data_len = 0;
@@ -470,18 +475,19 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
470 475
471 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { 476 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
472 case IWL_TM_CMD_APP2DEV_GET_DEVICENAME: 477 case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
473 rsp_data_ptr = (unsigned char *)cfg(priv)->name; 478 rsp_data_ptr = (unsigned char *)priv->cfg->name;
474 rsp_data_len = strlen(cfg(priv)->name); 479 rsp_data_len = strlen(priv->cfg->name);
475 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 480 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
476 rsp_data_len + 20); 481 rsp_data_len + 20);
477 if (!skb) { 482 if (!skb) {
478 IWL_ERR(priv, "Memory allocation fail\n"); 483 IWL_ERR(priv, "Memory allocation fail\n");
479 return -ENOMEM; 484 return -ENOMEM;
480 } 485 }
481 NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, 486 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
482 IWL_TM_CMD_DEV2APP_SYNC_RSP); 487 IWL_TM_CMD_DEV2APP_SYNC_RSP) ||
483 NLA_PUT(skb, IWL_TM_ATTR_SYNC_RSP, 488 nla_put(skb, IWL_TM_ATTR_SYNC_RSP,
484 rsp_data_len, rsp_data_ptr); 489 rsp_data_len, rsp_data_ptr))
490 goto nla_put_failure;
485 status = cfg80211_testmode_reply(skb); 491 status = cfg80211_testmode_reply(skb);
486 if (status < 0) 492 if (status < 0)
487 IWL_ERR(priv, "Error sending msg : %d\n", status); 493 IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -529,18 +535,19 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
529 break; 535 break;
530 536
531 case IWL_TM_CMD_APP2DEV_GET_EEPROM: 537 case IWL_TM_CMD_APP2DEV_GET_EEPROM:
532 if (priv->shrd->eeprom) { 538 if (priv->eeprom) {
533 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 539 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
534 cfg(priv)->base_params->eeprom_size + 20); 540 priv->cfg->base_params->eeprom_size + 20);
535 if (!skb) { 541 if (!skb) {
536 IWL_ERR(priv, "Memory allocation fail\n"); 542 IWL_ERR(priv, "Memory allocation fail\n");
537 return -ENOMEM; 543 return -ENOMEM;
538 } 544 }
539 NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND, 545 if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND,
540 IWL_TM_CMD_DEV2APP_EEPROM_RSP); 546 IWL_TM_CMD_DEV2APP_EEPROM_RSP) ||
541 NLA_PUT(skb, IWL_TM_ATTR_EEPROM, 547 nla_put(skb, IWL_TM_ATTR_EEPROM,
542 cfg(priv)->base_params->eeprom_size, 548 priv->cfg->base_params->eeprom_size,
543 priv->shrd->eeprom); 549 priv->eeprom))
550 goto nla_put_failure;
544 status = cfg80211_testmode_reply(skb); 551 status = cfg80211_testmode_reply(skb);
545 if (status < 0) 552 if (status < 0)
546 IWL_ERR(priv, "Error sending msg : %d\n", 553 IWL_ERR(priv, "Error sending msg : %d\n",
@@ -566,15 +573,16 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
566 IWL_ERR(priv, "Memory allocation fail\n"); 573 IWL_ERR(priv, "Memory allocation fail\n");
567 return -ENOMEM; 574 return -ENOMEM;
568 } 575 }
569 NLA_PUT_U32(skb, IWL_TM_ATTR_FW_VERSION, 576 if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION,
570 priv->fw->ucode_ver); 577 priv->fw->ucode_ver))
578 goto nla_put_failure;
571 status = cfg80211_testmode_reply(skb); 579 status = cfg80211_testmode_reply(skb);
572 if (status < 0) 580 if (status < 0)
573 IWL_ERR(priv, "Error sending msg : %d\n", status); 581 IWL_ERR(priv, "Error sending msg : %d\n", status);
574 break; 582 break;
575 583
576 case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: 584 case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
577 devid = trans(priv)->hw_id; 585 devid = priv->trans->hw_id;
578 IWL_INFO(priv, "hw version: 0x%x\n", devid); 586 IWL_INFO(priv, "hw version: 0x%x\n", devid);
579 587
580 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); 588 skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
@@ -582,7 +590,8 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
582 IWL_ERR(priv, "Memory allocation fail\n"); 590 IWL_ERR(priv, "Memory allocation fail\n");
583 return -ENOMEM; 591 return -ENOMEM;
584 } 592 }
585 NLA_PUT_U32(skb, IWL_TM_ATTR_DEVICE_ID, devid); 593 if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid))
594 goto nla_put_failure;
586 status = cfg80211_testmode_reply(skb); 595 status = cfg80211_testmode_reply(skb);
587 if (status < 0) 596 if (status < 0)
588 IWL_ERR(priv, "Error sending msg : %d\n", status); 597 IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -598,13 +607,14 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
598 IWL_ERR(priv, "No uCode has not been loaded\n"); 607 IWL_ERR(priv, "No uCode has not been loaded\n");
599 return -EINVAL; 608 return -EINVAL;
600 } else { 609 } else {
601 img = &priv->fw->img[priv->shrd->ucode_type]; 610 img = &priv->fw->img[priv->cur_ucode];
602 inst_size = img->sec[IWL_UCODE_SECTION_INST].len; 611 inst_size = img->sec[IWL_UCODE_SECTION_INST].len;
603 data_size = img->sec[IWL_UCODE_SECTION_DATA].len; 612 data_size = img->sec[IWL_UCODE_SECTION_DATA].len;
604 } 613 }
605 NLA_PUT_U32(skb, IWL_TM_ATTR_FW_TYPE, priv->shrd->ucode_type); 614 if (nla_put_u32(skb, IWL_TM_ATTR_FW_TYPE, priv->cur_ucode) ||
606 NLA_PUT_U32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size); 615 nla_put_u32(skb, IWL_TM_ATTR_FW_INST_SIZE, inst_size) ||
607 NLA_PUT_U32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size); 616 nla_put_u32(skb, IWL_TM_ATTR_FW_DATA_SIZE, data_size))
617 goto nla_put_failure;
608 status = cfg80211_testmode_reply(skb); 618 status = cfg80211_testmode_reply(skb);
609 if (status < 0) 619 if (status < 0)
610 IWL_ERR(priv, "Error sending msg : %d\n", status); 620 IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -639,7 +649,7 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
639 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 649 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
640 struct sk_buff *skb; 650 struct sk_buff *skb;
641 int status = 0; 651 int status = 0;
642 struct device *dev = trans(priv)->dev; 652 struct device *dev = priv->trans->dev;
643 653
644 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { 654 switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
645 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE: 655 case IWL_TM_CMD_APP2DEV_BEGIN_TRACE:
@@ -678,9 +688,10 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
678 iwl_trace_cleanup(priv); 688 iwl_trace_cleanup(priv);
679 return -ENOMEM; 689 return -ENOMEM;
680 } 690 }
681 NLA_PUT(skb, IWL_TM_ATTR_TRACE_ADDR, 691 if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR,
682 sizeof(priv->testmode_trace.dma_addr), 692 sizeof(priv->testmode_trace.dma_addr),
683 (u64 *)&priv->testmode_trace.dma_addr); 693 (u64 *)&priv->testmode_trace.dma_addr))
694 goto nla_put_failure;
684 status = cfg80211_testmode_reply(skb); 695 status = cfg80211_testmode_reply(skb);
685 if (status < 0) { 696 if (status < 0) {
686 IWL_ERR(priv, "Error sending msg : %d\n", status); 697 IWL_ERR(priv, "Error sending msg : %d\n", status);
@@ -725,9 +736,10 @@ static int iwl_testmode_trace_dump(struct ieee80211_hw *hw,
725 length = priv->testmode_trace.buff_size % 736 length = priv->testmode_trace.buff_size %
726 DUMP_CHUNK_SIZE; 737 DUMP_CHUNK_SIZE;
727 738
728 NLA_PUT(skb, IWL_TM_ATTR_TRACE_DUMP, length, 739 if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length,
729 priv->testmode_trace.trace_addr + 740 priv->testmode_trace.trace_addr +
730 (DUMP_CHUNK_SIZE * idx)); 741 (DUMP_CHUNK_SIZE * idx)))
742 goto nla_put_failure;
731 idx++; 743 idx++;
732 cb->args[4] = idx; 744 cb->args[4] = idx;
733 return 0; 745 return 0;
@@ -779,7 +791,7 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
779 791
780static int iwl_testmode_indirect_read(struct iwl_priv *priv, u32 addr, u32 size) 792static int iwl_testmode_indirect_read(struct iwl_priv *priv, u32 addr, u32 size)
781{ 793{
782 struct iwl_trans *trans = trans(priv); 794 struct iwl_trans *trans = priv->trans;
783 unsigned long flags; 795 unsigned long flags;
784 int i; 796 int i;
785 797
@@ -819,7 +831,7 @@ static int iwl_testmode_indirect_read(struct iwl_priv *priv, u32 addr, u32 size)
819static int iwl_testmode_indirect_write(struct iwl_priv *priv, u32 addr, 831static int iwl_testmode_indirect_write(struct iwl_priv *priv, u32 addr,
820 u32 size, unsigned char *buf) 832 u32 size, unsigned char *buf)
821{ 833{
822 struct iwl_trans *trans = trans(priv); 834 struct iwl_trans *trans = priv->trans;
823 u32 val, i; 835 u32 val, i;
824 unsigned long flags; 836 unsigned long flags;
825 837
@@ -922,9 +934,10 @@ static int iwl_testmode_buffer_dump(struct ieee80211_hw *hw,
922 length = priv->testmode_mem.buff_size % 934 length = priv->testmode_mem.buff_size %
923 DUMP_CHUNK_SIZE; 935 DUMP_CHUNK_SIZE;
924 936
925 NLA_PUT(skb, IWL_TM_ATTR_BUFFER_DUMP, length, 937 if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length,
926 priv->testmode_mem.buff_addr + 938 priv->testmode_mem.buff_addr +
927 (DUMP_CHUNK_SIZE * idx)); 939 (DUMP_CHUNK_SIZE * idx)))
940 goto nla_put_failure;
928 idx++; 941 idx++;
929 cb->args[4] = idx; 942 cb->args[4] = idx;
930 return 0; 943 return 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
index 1c2fe87bd7e2..6213c05a4b52 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
@@ -34,17 +34,15 @@
34#include <linux/skbuff.h> 34#include <linux/skbuff.h>
35#include <linux/wait.h> 35#include <linux/wait.h>
36#include <linux/pci.h> 36#include <linux/pci.h>
37#include <linux/timer.h>
37 38
38#include "iwl-fh.h" 39#include "iwl-fh.h"
39#include "iwl-csr.h" 40#include "iwl-csr.h"
40#include "iwl-shared.h"
41#include "iwl-trans.h" 41#include "iwl-trans.h"
42#include "iwl-debug.h" 42#include "iwl-debug.h"
43#include "iwl-io.h" 43#include "iwl-io.h"
44#include "iwl-op-mode.h" 44#include "iwl-op-mode.h"
45 45
46struct iwl_tx_queue;
47struct iwl_queue;
48struct iwl_host_cmd; 46struct iwl_host_cmd;
49 47
50/*This file includes the declaration that are internal to the 48/*This file includes the declaration that are internal to the
@@ -136,21 +134,14 @@ static inline int iwl_queue_dec_wrap(int index, int n_bd)
136 return --index & (n_bd - 1); 134 return --index & (n_bd - 1);
137} 135}
138 136
139/*
140 * This queue number is required for proper operation
141 * because the ucode will stop/start the scheduler as
142 * required.
143 */
144#define IWL_IPAN_MCAST_QUEUE 8
145
146struct iwl_cmd_meta { 137struct iwl_cmd_meta {
147 /* only for SYNC commands, iff the reply skb is wanted */ 138 /* only for SYNC commands, iff the reply skb is wanted */
148 struct iwl_host_cmd *source; 139 struct iwl_host_cmd *source;
149 140
150 u32 flags;
151
152 DEFINE_DMA_UNMAP_ADDR(mapping); 141 DEFINE_DMA_UNMAP_ADDR(mapping);
153 DEFINE_DMA_UNMAP_LEN(len); 142 DEFINE_DMA_UNMAP_LEN(len);
143
144 u32 flags;
154}; 145};
155 146
156/* 147/*
@@ -188,72 +179,66 @@ struct iwl_queue {
188 * space less than this */ 179 * space less than this */
189}; 180};
190 181
182#define TFD_TX_CMD_SLOTS 256
183#define TFD_CMD_SLOTS 32
184
185struct iwl_pcie_tx_queue_entry {
186 struct iwl_device_cmd *cmd;
187 struct sk_buff *skb;
188 struct iwl_cmd_meta meta;
189};
190
191/** 191/**
192 * struct iwl_tx_queue - Tx Queue for DMA 192 * struct iwl_tx_queue - Tx Queue for DMA
193 * @q: generic Rx/Tx queue descriptor 193 * @q: generic Rx/Tx queue descriptor
194 * @bd: base of circular buffer of TFDs 194 * @tfds: transmit frame descriptors (DMA memory)
195 * @cmd: array of command/TX buffer pointers 195 * @entries: transmit entries (driver state)
196 * @meta: array of meta data for each command/tx buffer 196 * @lock: queue lock
197 * @dma_addr_cmd: physical address of cmd/tx buffer array 197 * @stuck_timer: timer that fires if queue gets stuck
198 * @txb: array of per-TFD driver data 198 * @trans_pcie: pointer back to transport (for timer)
199 * lock: queue lock
200 * @time_stamp: time (in jiffies) of last read_ptr change
201 * @need_update: indicates need to update read/write index 199 * @need_update: indicates need to update read/write index
202 * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled 200 * @active: stores if queue is active
203 * @sta_id: valid if sched_retry is set
204 * @tid: valid if sched_retry is set
205 * 201 *
206 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 202 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
207 * descriptors) and required locking structures. 203 * descriptors) and required locking structures.
208 */ 204 */
209#define TFD_TX_CMD_SLOTS 256
210#define TFD_CMD_SLOTS 32
211
212struct iwl_tx_queue { 205struct iwl_tx_queue {
213 struct iwl_queue q; 206 struct iwl_queue q;
214 struct iwl_tfd *tfds; 207 struct iwl_tfd *tfds;
215 struct iwl_device_cmd **cmd; 208 struct iwl_pcie_tx_queue_entry *entries;
216 struct iwl_cmd_meta *meta;
217 struct sk_buff **skbs;
218 spinlock_t lock; 209 spinlock_t lock;
219 unsigned long time_stamp; 210 struct timer_list stuck_timer;
211 struct iwl_trans_pcie *trans_pcie;
220 u8 need_update; 212 u8 need_update;
221 u8 sched_retry;
222 u8 active; 213 u8 active;
223 u8 swq_id;
224
225 u16 sta_id;
226 u16 tid;
227}; 214};
228 215
229/** 216/**
230 * struct iwl_trans_pcie - PCIe transport specific data 217 * struct iwl_trans_pcie - PCIe transport specific data
231 * @rxq: all the RX queue data 218 * @rxq: all the RX queue data
232 * @rx_replenish: work that will be called when buffers need to be allocated 219 * @rx_replenish: work that will be called when buffers need to be allocated
220 * @drv - pointer to iwl_drv
233 * @trans: pointer to the generic transport area 221 * @trans: pointer to the generic transport area
234 * @irq - the irq number for the device 222 * @irq - the irq number for the device
235 * @irq_requested: true when the irq has been requested 223 * @irq_requested: true when the irq has been requested
236 * @scd_base_addr: scheduler sram base address in SRAM 224 * @scd_base_addr: scheduler sram base address in SRAM
237 * @scd_bc_tbls: pointer to the byte count table of the scheduler 225 * @scd_bc_tbls: pointer to the byte count table of the scheduler
238 * @kw: keep warm address 226 * @kw: keep warm address
239 * @ac_to_fifo: to what fifo is a specifc AC mapped ?
240 * @ac_to_queue: to what tx queue is a specifc AC mapped ?
241 * @mcast_queue:
242 * @txq: Tx DMA processing queues
243 * @txq_ctx_active_msk: what queue is active
244 * queue_stopped: tracks what queue is stopped
245 * queue_stop_count: tracks what SW queue is stopped
246 * @pci_dev: basic pci-network driver stuff 227 * @pci_dev: basic pci-network driver stuff
247 * @hw_base: pci hardware address support 228 * @hw_base: pci hardware address support
248 * @ucode_write_complete: indicates that the ucode has been copied. 229 * @ucode_write_complete: indicates that the ucode has been copied.
249 * @ucode_write_waitq: wait queue for uCode load 230 * @ucode_write_waitq: wait queue for uCode load
250 * @status - transport specific status flags 231 * @status - transport specific status flags
251 * @cmd_queue - command queue number 232 * @cmd_queue - command queue number
233 * @rx_buf_size_8k: 8 kB RX buffer size
234 * @rx_page_order: page order for receive buffer size
235 * @wd_timeout: queue watchdog timeout (jiffies)
252 */ 236 */
253struct iwl_trans_pcie { 237struct iwl_trans_pcie {
254 struct iwl_rx_queue rxq; 238 struct iwl_rx_queue rxq;
255 struct work_struct rx_replenish; 239 struct work_struct rx_replenish;
256 struct iwl_trans *trans; 240 struct iwl_trans *trans;
241 struct iwl_drv *drv;
257 242
258 /* INT ICT Table */ 243 /* INT ICT Table */
259 __le32 *ict_tbl; 244 __le32 *ict_tbl;
@@ -272,16 +257,9 @@ struct iwl_trans_pcie {
272 struct iwl_dma_ptr scd_bc_tbls; 257 struct iwl_dma_ptr scd_bc_tbls;
273 struct iwl_dma_ptr kw; 258 struct iwl_dma_ptr kw;
274 259
275 const u8 *ac_to_fifo[NUM_IWL_RXON_CTX];
276 const u8 *ac_to_queue[NUM_IWL_RXON_CTX];
277 u8 mcast_queue[NUM_IWL_RXON_CTX];
278 u8 agg_txq[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
279
280 struct iwl_tx_queue *txq; 260 struct iwl_tx_queue *txq;
281 unsigned long txq_ctx_active_msk; 261 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
282#define IWL_MAX_HW_QUEUES 32
283 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; 262 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
284 atomic_t queue_stop_count[4];
285 263
286 /* PCI bus related data */ 264 /* PCI bus related data */
287 struct pci_dev *pci_dev; 265 struct pci_dev *pci_dev;
@@ -293,11 +271,41 @@ struct iwl_trans_pcie {
293 u8 cmd_queue; 271 u8 cmd_queue;
294 u8 n_no_reclaim_cmds; 272 u8 n_no_reclaim_cmds;
295 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; 273 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
274 u8 setup_q_to_fifo[IWL_MAX_HW_QUEUES];
275 u8 n_q_to_fifo;
276
277 bool rx_buf_size_8k;
278 u32 rx_page_order;
279
280 const char **command_names;
281
282 /* queue watchdog */
283 unsigned long wd_timeout;
296}; 284};
297 285
286/*****************************************************
287* DRIVER STATUS FUNCTIONS
288******************************************************/
289#define STATUS_HCMD_ACTIVE 0
290#define STATUS_DEVICE_ENABLED 1
291#define STATUS_TPOWER_PMI 2
292#define STATUS_INT_ENABLED 3
293
298#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \ 294#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
299 ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific)) 295 ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific))
300 296
297static inline struct iwl_trans *
298iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
299{
300 return container_of((void *)trans_pcie, struct iwl_trans,
301 trans_specific);
302}
303
304struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
305 const struct pci_device_id *ent,
306 const struct iwl_cfg *cfg);
307void iwl_trans_pcie_free(struct iwl_trans *trans);
308
301/***************************************************** 309/*****************************************************
302* RX 310* RX
303******************************************************/ 311******************************************************/
@@ -331,15 +339,12 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans,
331void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, 339void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
332 struct iwl_tx_queue *txq, 340 struct iwl_tx_queue *txq,
333 u16 byte_cnt); 341 u16 byte_cnt);
334int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, 342void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int queue);
335 int sta_id, int tid);
336void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index); 343void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
337void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, 344void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
338 struct iwl_tx_queue *txq, 345 struct iwl_tx_queue *txq,
339 int tx_fifo_id, int scd_retry); 346 int tx_fifo_id, bool active);
340int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, int sta_id, int tid); 347void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int queue, int fifo,
341void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
342 enum iwl_rxon_context_id ctx,
343 int sta_id, int tid, int frame_limit, u16 ssn); 348 int sta_id, int tid, int frame_limit, u16 ssn);
344void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, 349void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
345 int index, enum dma_data_direction dma_dir); 350 int index, enum dma_data_direction dma_dir);
@@ -350,8 +355,6 @@ int iwl_queue_space(const struct iwl_queue *q);
350/***************************************************** 355/*****************************************************
351* Error handling 356* Error handling
352******************************************************/ 357******************************************************/
353int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
354 char **buf, bool display);
355int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display); 358int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
356void iwl_dump_csr(struct iwl_trans *trans); 359void iwl_dump_csr(struct iwl_trans *trans);
357 360
@@ -388,91 +391,28 @@ static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
388 iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL); 391 iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
389} 392}
390 393
391/*
392 * we have 8 bits used like this:
393 *
394 * 7 6 5 4 3 2 1 0
395 * | | | | | | | |
396 * | | | | | | +-+-------- AC queue (0-3)
397 * | | | | | |
398 * | +-+-+-+-+------------ HW queue ID
399 * |
400 * +---------------------- unused
401 */
402static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
403{
404 BUG_ON(ac > 3); /* only have 2 bits */
405 BUG_ON(hwq > 31); /* only use 5 bits */
406
407 txq->swq_id = (hwq << 2) | ac;
408}
409
410static inline u8 iwl_get_queue_ac(struct iwl_tx_queue *txq)
411{
412 return txq->swq_id & 0x3;
413}
414
415static inline void iwl_wake_queue(struct iwl_trans *trans, 394static inline void iwl_wake_queue(struct iwl_trans *trans,
416 struct iwl_tx_queue *txq) 395 struct iwl_tx_queue *txq)
417{ 396{
418 u8 queue = txq->swq_id; 397 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
419 u8 ac = queue & 3; 398
420 u8 hwq = (queue >> 2) & 0x1f; 399 if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) {
421 struct iwl_trans_pcie *trans_pcie = 400 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id);
422 IWL_TRANS_GET_PCIE_TRANS(trans); 401 iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id);
423
424 if (test_and_clear_bit(hwq, trans_pcie->queue_stopped)) {
425 if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0) {
426 iwl_op_mode_queue_not_full(trans->op_mode, ac);
427 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d ac %d",
428 hwq, ac);
429 } else {
430 IWL_DEBUG_TX_QUEUES(trans,
431 "Don't wake hwq %d ac %d stop count %d",
432 hwq, ac,
433 atomic_read(&trans_pcie->queue_stop_count[ac]));
434 }
435 } 402 }
436} 403}
437 404
438static inline void iwl_stop_queue(struct iwl_trans *trans, 405static inline void iwl_stop_queue(struct iwl_trans *trans,
439 struct iwl_tx_queue *txq) 406 struct iwl_tx_queue *txq)
440{ 407{
441 u8 queue = txq->swq_id; 408 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
442 u8 ac = queue & 3;
443 u8 hwq = (queue >> 2) & 0x1f;
444 struct iwl_trans_pcie *trans_pcie =
445 IWL_TRANS_GET_PCIE_TRANS(trans);
446
447 if (!test_and_set_bit(hwq, trans_pcie->queue_stopped)) {
448 if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0) {
449 iwl_op_mode_queue_full(trans->op_mode, ac);
450 IWL_DEBUG_TX_QUEUES(trans,
451 "Stop hwq %d ac %d stop count %d",
452 hwq, ac,
453 atomic_read(&trans_pcie->queue_stop_count[ac]));
454 } else {
455 IWL_DEBUG_TX_QUEUES(trans,
456 "Don't stop hwq %d ac %d stop count %d",
457 hwq, ac,
458 atomic_read(&trans_pcie->queue_stop_count[ac]));
459 }
460 } else {
461 IWL_DEBUG_TX_QUEUES(trans, "stop hwq %d, but it is stopped",
462 hwq);
463 }
464}
465
466static inline void iwl_txq_ctx_activate(struct iwl_trans_pcie *trans_pcie,
467 int txq_id)
468{
469 set_bit(txq_id, &trans_pcie->txq_ctx_active_msk);
470}
471 409
472static inline void iwl_txq_ctx_deactivate(struct iwl_trans_pcie *trans_pcie, 410 if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) {
473 int txq_id) 411 iwl_op_mode_queue_full(trans->op_mode, txq->q.id);
474{ 412 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id);
475 clear_bit(txq_id, &trans_pcie->txq_ctx_active_msk); 413 } else
414 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
415 txq->q.id);
476} 416}
477 417
478static inline int iwl_queue_used(const struct iwl_queue *q, int i) 418static inline int iwl_queue_used(const struct iwl_queue *q, int i)
@@ -487,19 +427,18 @@ static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
487 return index & (q->n_window - 1); 427 return index & (q->n_window - 1);
488} 428}
489 429
490#define IWL_TX_FIFO_BK 0 /* shared */ 430static inline const char *
491#define IWL_TX_FIFO_BE 1 431trans_pcie_get_cmd_string(struct iwl_trans_pcie *trans_pcie, u8 cmd)
492#define IWL_TX_FIFO_VI 2 /* shared */ 432{
493#define IWL_TX_FIFO_VO 3 433 if (!trans_pcie->command_names || !trans_pcie->command_names[cmd])
494#define IWL_TX_FIFO_BK_IPAN IWL_TX_FIFO_BK 434 return "UNKNOWN";
495#define IWL_TX_FIFO_BE_IPAN 4 435 return trans_pcie->command_names[cmd];
496#define IWL_TX_FIFO_VI_IPAN IWL_TX_FIFO_VI 436}
497#define IWL_TX_FIFO_VO_IPAN 5 437
498/* re-uses the VO FIFO, uCode will properly flush/schedule */ 438static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
499#define IWL_TX_FIFO_AUX 5 439{
500#define IWL_TX_FIFO_UNUSED -1 440 return !(iwl_read32(trans, CSR_GP_CNTRL) &
501 441 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
502/* AUX (TX during scan dwell) queue */ 442}
503#define IWL_AUX_QUEUE 10
504 443
505#endif /* __iwl_trans_int_pcie_h__ */ 444#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
index aa7aea168138..08517d3c80bb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
@@ -140,14 +140,17 @@ void iwl_rx_queue_update_write_ptr(struct iwl_trans *trans,
140 if (q->need_update == 0) 140 if (q->need_update == 0)
141 goto exit_unlock; 141 goto exit_unlock;
142 142
143 if (cfg(trans)->base_params->shadow_reg_enable) { 143 if (trans->cfg->base_params->shadow_reg_enable) {
144 /* shadow register enabled */ 144 /* shadow register enabled */
145 /* Device expects a multiple of 8 */ 145 /* Device expects a multiple of 8 */
146 q->write_actual = (q->write & ~0x7); 146 q->write_actual = (q->write & ~0x7);
147 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual); 147 iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, q->write_actual);
148 } else { 148 } else {
149 struct iwl_trans_pcie *trans_pcie =
150 IWL_TRANS_GET_PCIE_TRANS(trans);
151
149 /* If power-saving is in use, make sure device is awake */ 152 /* If power-saving is in use, make sure device is awake */
150 if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) { 153 if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
151 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 154 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
152 155
153 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 156 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
@@ -271,17 +274,17 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
271 if (rxq->free_count > RX_LOW_WATERMARK) 274 if (rxq->free_count > RX_LOW_WATERMARK)
272 gfp_mask |= __GFP_NOWARN; 275 gfp_mask |= __GFP_NOWARN;
273 276
274 if (hw_params(trans).rx_page_order > 0) 277 if (trans_pcie->rx_page_order > 0)
275 gfp_mask |= __GFP_COMP; 278 gfp_mask |= __GFP_COMP;
276 279
277 /* Alloc a new receive buffer */ 280 /* Alloc a new receive buffer */
278 page = alloc_pages(gfp_mask, 281 page = alloc_pages(gfp_mask,
279 hw_params(trans).rx_page_order); 282 trans_pcie->rx_page_order);
280 if (!page) { 283 if (!page) {
281 if (net_ratelimit()) 284 if (net_ratelimit())
282 IWL_DEBUG_INFO(trans, "alloc_pages failed, " 285 IWL_DEBUG_INFO(trans, "alloc_pages failed, "
283 "order: %d\n", 286 "order: %d\n",
284 hw_params(trans).rx_page_order); 287 trans_pcie->rx_page_order);
285 288
286 if ((rxq->free_count <= RX_LOW_WATERMARK) && 289 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
287 net_ratelimit()) 290 net_ratelimit())
@@ -300,7 +303,7 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
300 303
301 if (list_empty(&rxq->rx_used)) { 304 if (list_empty(&rxq->rx_used)) {
302 spin_unlock_irqrestore(&rxq->lock, flags); 305 spin_unlock_irqrestore(&rxq->lock, flags);
303 __free_pages(page, hw_params(trans).rx_page_order); 306 __free_pages(page, trans_pcie->rx_page_order);
304 return; 307 return;
305 } 308 }
306 element = rxq->rx_used.next; 309 element = rxq->rx_used.next;
@@ -313,7 +316,7 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
313 rxb->page = page; 316 rxb->page = page;
314 /* Get physical address of the RB */ 317 /* Get physical address of the RB */
315 rxb->page_dma = dma_map_page(trans->dev, page, 0, 318 rxb->page_dma = dma_map_page(trans->dev, page, 0,
316 PAGE_SIZE << hw_params(trans).rx_page_order, 319 PAGE_SIZE << trans_pcie->rx_page_order,
317 DMA_FROM_DEVICE); 320 DMA_FROM_DEVICE);
318 /* dma address must be no more than 36 bits */ 321 /* dma address must be no more than 36 bits */
319 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); 322 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
@@ -362,84 +365,98 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
362 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 365 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
363 struct iwl_rx_queue *rxq = &trans_pcie->rxq; 366 struct iwl_rx_queue *rxq = &trans_pcie->rxq;
364 struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue]; 367 struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
365 struct iwl_device_cmd *cmd;
366 unsigned long flags; 368 unsigned long flags;
367 int len, err; 369 bool page_stolen = false;
368 u16 sequence; 370 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
369 struct iwl_rx_cmd_buffer rxcb; 371 u32 offset = 0;
370 struct iwl_rx_packet *pkt;
371 bool reclaim;
372 int index, cmd_index;
373 372
374 if (WARN_ON(!rxb)) 373 if (WARN_ON(!rxb))
375 return; 374 return;
376 375
377 rxcb.truesize = PAGE_SIZE << hw_params(trans).rx_page_order; 376 dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
378 dma_unmap_page(trans->dev, rxb->page_dma,
379 rxcb.truesize,
380 DMA_FROM_DEVICE);
381
382 rxcb._page = rxb->page;
383 pkt = rxb_addr(&rxcb);
384 377
385 IWL_DEBUG_RX(trans, "%s, 0x%02x\n", 378 while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
386 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 379 struct iwl_rx_packet *pkt;
380 struct iwl_device_cmd *cmd;
381 u16 sequence;
382 bool reclaim;
383 int index, cmd_index, err, len;
384 struct iwl_rx_cmd_buffer rxcb = {
385 ._offset = offset,
386 ._page = rxb->page,
387 ._page_stolen = false,
388 .truesize = max_len,
389 };
387 390
391 pkt = rxb_addr(&rxcb);
388 392
389 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 393 if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID))
390 len += sizeof(u32); /* account for status word */ 394 break;
391 trace_iwlwifi_dev_rx(trans->dev, pkt, len);
392
393 /* Reclaim a command buffer only if this packet is a response
394 * to a (driver-originated) command.
395 * If the packet (e.g. Rx frame) originated from uCode,
396 * there is no command buffer to reclaim.
397 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
398 * but apparently a few don't get set; catch them here. */
399 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
400 if (reclaim) {
401 int i;
402 395
403 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) { 396 IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x)\n",
404 if (trans_pcie->no_reclaim_cmds[i] == pkt->hdr.cmd) { 397 rxcb._offset,
405 reclaim = false; 398 trans_pcie_get_cmd_string(trans_pcie, pkt->hdr.cmd),
406 break; 399 pkt->hdr.cmd);
400
401 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
402 len += sizeof(u32); /* account for status word */
403 trace_iwlwifi_dev_rx(trans->dev, pkt, len);
404
405 /* Reclaim a command buffer only if this packet is a response
406 * to a (driver-originated) command.
407 * If the packet (e.g. Rx frame) originated from uCode,
408 * there is no command buffer to reclaim.
409 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
410 * but apparently a few don't get set; catch them here. */
411 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
412 if (reclaim) {
413 int i;
414
415 for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
416 if (trans_pcie->no_reclaim_cmds[i] ==
417 pkt->hdr.cmd) {
418 reclaim = false;
419 break;
420 }
407 } 421 }
408 } 422 }
409 }
410 423
411 sequence = le16_to_cpu(pkt->hdr.sequence); 424 sequence = le16_to_cpu(pkt->hdr.sequence);
412 index = SEQ_TO_INDEX(sequence); 425 index = SEQ_TO_INDEX(sequence);
413 cmd_index = get_cmd_index(&txq->q, index); 426 cmd_index = get_cmd_index(&txq->q, index);
414 427
415 if (reclaim) 428 if (reclaim)
416 cmd = txq->cmd[cmd_index]; 429 cmd = txq->entries[cmd_index].cmd;
417 else 430 else
418 cmd = NULL; 431 cmd = NULL;
419 432
420 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd); 433 err = iwl_op_mode_rx(trans->op_mode, &rxcb, cmd);
421 434
422 /* 435 /*
423 * XXX: After here, we should always check rxcb._page 436 * After here, we should always check rxcb._page_stolen,
424 * against NULL before touching it or its virtual 437 * if it is true then one of the handlers took the page.
425 * memory (pkt). Because some rx_handler might have 438 */
426 * already taken or freed the pages.
427 */
428 439
429 if (reclaim) { 440 if (reclaim) {
430 /* Invoke any callbacks, transfer the buffer to caller, 441 /* Invoke any callbacks, transfer the buffer to caller,
431 * and fire off the (possibly) blocking 442 * and fire off the (possibly) blocking
432 * iwl_trans_send_cmd() 443 * iwl_trans_send_cmd()
433 * as we reclaim the driver command queue */ 444 * as we reclaim the driver command queue */
434 if (rxcb._page) 445 if (!rxcb._page_stolen)
435 iwl_tx_cmd_complete(trans, &rxcb, err); 446 iwl_tx_cmd_complete(trans, &rxcb, err);
436 else 447 else
437 IWL_WARN(trans, "Claim null rxb?\n"); 448 IWL_WARN(trans, "Claim null rxb?\n");
449 }
450
451 page_stolen |= rxcb._page_stolen;
452 offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
438 } 453 }
439 454
440 /* page was stolen from us */ 455 /* page was stolen from us -- free our reference */
441 if (rxcb._page == NULL) 456 if (page_stolen) {
457 __free_pages(rxb->page, trans_pcie->rx_page_order);
442 rxb->page = NULL; 458 rxb->page = NULL;
459 }
443 460
444 /* Reuse the page if possible. For notification packets and 461 /* Reuse the page if possible. For notification packets and
445 * SKBs that fail to Rx correctly, add them back into the 462 * SKBs that fail to Rx correctly, add them back into the
@@ -448,7 +465,7 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
448 if (rxb->page != NULL) { 465 if (rxb->page != NULL) {
449 rxb->page_dma = 466 rxb->page_dma =
450 dma_map_page(trans->dev, rxb->page, 0, 467 dma_map_page(trans->dev, rxb->page, 0,
451 PAGE_SIZE << hw_params(trans).rx_page_order, 468 PAGE_SIZE << trans_pcie->rx_page_order,
452 DMA_FROM_DEVICE); 469 DMA_FROM_DEVICE);
453 list_add_tail(&rxb->list, &rxq->rx_free); 470 list_add_tail(&rxb->list, &rxq->rx_free);
454 rxq->free_count++; 471 rxq->free_count++;
@@ -521,412 +538,32 @@ static void iwl_rx_handle(struct iwl_trans *trans)
521 iwlagn_rx_queue_restock(trans); 538 iwlagn_rx_queue_restock(trans);
522} 539}
523 540
524static const char * const desc_lookup_text[] = {
525 "OK",
526 "FAIL",
527 "BAD_PARAM",
528 "BAD_CHECKSUM",
529 "NMI_INTERRUPT_WDG",
530 "SYSASSERT",
531 "FATAL_ERROR",
532 "BAD_COMMAND",
533 "HW_ERROR_TUNE_LOCK",
534 "HW_ERROR_TEMPERATURE",
535 "ILLEGAL_CHAN_FREQ",
536 "VCC_NOT_STABLE",
537 "FH_ERROR",
538 "NMI_INTERRUPT_HOST",
539 "NMI_INTERRUPT_ACTION_PT",
540 "NMI_INTERRUPT_UNKNOWN",
541 "UCODE_VERSION_MISMATCH",
542 "HW_ERROR_ABS_LOCK",
543 "HW_ERROR_CAL_LOCK_FAIL",
544 "NMI_INTERRUPT_INST_ACTION_PT",
545 "NMI_INTERRUPT_DATA_ACTION_PT",
546 "NMI_TRM_HW_ER",
547 "NMI_INTERRUPT_TRM",
548 "NMI_INTERRUPT_BREAK_POINT",
549 "DEBUG_0",
550 "DEBUG_1",
551 "DEBUG_2",
552 "DEBUG_3",
553};
554
555static struct { char *name; u8 num; } advanced_lookup[] = {
556 { "NMI_INTERRUPT_WDG", 0x34 },
557 { "SYSASSERT", 0x35 },
558 { "UCODE_VERSION_MISMATCH", 0x37 },
559 { "BAD_COMMAND", 0x38 },
560 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
561 { "FATAL_ERROR", 0x3D },
562 { "NMI_TRM_HW_ERR", 0x46 },
563 { "NMI_INTERRUPT_TRM", 0x4C },
564 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
565 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
566 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
567 { "NMI_INTERRUPT_HOST", 0x66 },
568 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
569 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
570 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
571 { "ADVANCED_SYSASSERT", 0 },
572};
573
574static const char *desc_lookup(u32 num)
575{
576 int i;
577 int max = ARRAY_SIZE(desc_lookup_text);
578
579 if (num < max)
580 return desc_lookup_text[num];
581
582 max = ARRAY_SIZE(advanced_lookup) - 1;
583 for (i = 0; i < max; i++) {
584 if (advanced_lookup[i].num == num)
585 break;
586 }
587 return advanced_lookup[i].name;
588}
589
590#define ERROR_START_OFFSET (1 * sizeof(u32))
591#define ERROR_ELEM_SIZE (7 * sizeof(u32))
592
593static void iwl_dump_nic_error_log(struct iwl_trans *trans)
594{
595 u32 base;
596 struct iwl_error_event_table table;
597 struct iwl_trans_pcie *trans_pcie =
598 IWL_TRANS_GET_PCIE_TRANS(trans);
599
600 base = trans->shrd->device_pointers.error_event_table;
601 if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
602 if (!base)
603 base = trans->shrd->fw->init_errlog_ptr;
604 } else {
605 if (!base)
606 base = trans->shrd->fw->inst_errlog_ptr;
607 }
608
609 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
610 IWL_ERR(trans,
611 "Not valid error log pointer 0x%08X for %s uCode\n",
612 base,
613 (trans->shrd->ucode_type == IWL_UCODE_INIT)
614 ? "Init" : "RT");
615 return;
616 }
617
618 iwl_read_targ_mem_words(trans, base, &table, sizeof(table));
619
620 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
621 IWL_ERR(trans, "Start IWL Error Log Dump:\n");
622 IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
623 trans->shrd->status, table.valid);
624 }
625
626 trans_pcie->isr_stats.err_code = table.error_id;
627
628 trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
629 table.data1, table.data2, table.line,
630 table.blink1, table.blink2, table.ilink1,
631 table.ilink2, table.bcon_time, table.gp1,
632 table.gp2, table.gp3, table.ucode_ver,
633 table.hw_ver, table.brd_ver);
634 IWL_ERR(trans, "0x%08X | %-28s\n", table.error_id,
635 desc_lookup(table.error_id));
636 IWL_ERR(trans, "0x%08X | uPc\n", table.pc);
637 IWL_ERR(trans, "0x%08X | branchlink1\n", table.blink1);
638 IWL_ERR(trans, "0x%08X | branchlink2\n", table.blink2);
639 IWL_ERR(trans, "0x%08X | interruptlink1\n", table.ilink1);
640 IWL_ERR(trans, "0x%08X | interruptlink2\n", table.ilink2);
641 IWL_ERR(trans, "0x%08X | data1\n", table.data1);
642 IWL_ERR(trans, "0x%08X | data2\n", table.data2);
643 IWL_ERR(trans, "0x%08X | line\n", table.line);
644 IWL_ERR(trans, "0x%08X | beacon time\n", table.bcon_time);
645 IWL_ERR(trans, "0x%08X | tsf low\n", table.tsf_low);
646 IWL_ERR(trans, "0x%08X | tsf hi\n", table.tsf_hi);
647 IWL_ERR(trans, "0x%08X | time gp1\n", table.gp1);
648 IWL_ERR(trans, "0x%08X | time gp2\n", table.gp2);
649 IWL_ERR(trans, "0x%08X | time gp3\n", table.gp3);
650 IWL_ERR(trans, "0x%08X | uCode version\n", table.ucode_ver);
651 IWL_ERR(trans, "0x%08X | hw version\n", table.hw_ver);
652 IWL_ERR(trans, "0x%08X | board version\n", table.brd_ver);
653 IWL_ERR(trans, "0x%08X | hcmd\n", table.hcmd);
654
655 IWL_ERR(trans, "0x%08X | isr0\n", table.isr0);
656 IWL_ERR(trans, "0x%08X | isr1\n", table.isr1);
657 IWL_ERR(trans, "0x%08X | isr2\n", table.isr2);
658 IWL_ERR(trans, "0x%08X | isr3\n", table.isr3);
659 IWL_ERR(trans, "0x%08X | isr4\n", table.isr4);
660 IWL_ERR(trans, "0x%08X | isr_pref\n", table.isr_pref);
661 IWL_ERR(trans, "0x%08X | wait_event\n", table.wait_event);
662 IWL_ERR(trans, "0x%08X | l2p_control\n", table.l2p_control);
663 IWL_ERR(trans, "0x%08X | l2p_duration\n", table.l2p_duration);
664 IWL_ERR(trans, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
665 IWL_ERR(trans, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
666 IWL_ERR(trans, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
667 IWL_ERR(trans, "0x%08X | timestamp\n", table.u_timestamp);
668 IWL_ERR(trans, "0x%08X | flow_handler\n", table.flow_handler);
669}
670
671/** 541/**
672 * iwl_irq_handle_error - called for HW or SW error interrupt from card 542 * iwl_irq_handle_error - called for HW or SW error interrupt from card
673 */ 543 */
674static void iwl_irq_handle_error(struct iwl_trans *trans) 544static void iwl_irq_handle_error(struct iwl_trans *trans)
675{ 545{
676 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ 546 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
677 if (cfg(trans)->internal_wimax_coex && 547 if (trans->cfg->internal_wimax_coex &&
678 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) & 548 (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
679 APMS_CLK_VAL_MRB_FUNC_MODE) || 549 APMS_CLK_VAL_MRB_FUNC_MODE) ||
680 (iwl_read_prph(trans, APMG_PS_CTRL_REG) & 550 (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
681 APMG_PS_CTRL_VAL_RESET_REQ))) { 551 APMG_PS_CTRL_VAL_RESET_REQ))) {
682 /* 552 struct iwl_trans_pcie *trans_pcie;
683 * Keep the restart process from trying to send host 553
684 * commands by clearing the ready bit. 554 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
685 */ 555 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
686 clear_bit(STATUS_READY, &trans->shrd->status); 556 iwl_op_mode_wimax_active(trans->op_mode);
687 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
688 wake_up(&trans->wait_command_queue); 557 wake_up(&trans->wait_command_queue);
689 IWL_ERR(trans, "RF is used by WiMAX\n");
690 return; 558 return;
691 } 559 }
692 560
693 IWL_ERR(trans, "Loaded firmware version: %s\n",
694 trans->shrd->fw->fw_version);
695
696 iwl_dump_nic_error_log(trans);
697 iwl_dump_csr(trans); 561 iwl_dump_csr(trans);
698 iwl_dump_fh(trans, NULL, false); 562 iwl_dump_fh(trans, NULL, false);
699 iwl_dump_nic_event_log(trans, false, NULL, false);
700 563
701 iwl_op_mode_nic_error(trans->op_mode); 564 iwl_op_mode_nic_error(trans->op_mode);
702} 565}
703 566
704#define EVENT_START_OFFSET (4 * sizeof(u32))
705
706/**
707 * iwl_print_event_log - Dump error event log to syslog
708 *
709 */
710static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx,
711 u32 num_events, u32 mode,
712 int pos, char **buf, size_t bufsz)
713{
714 u32 i;
715 u32 base; /* SRAM byte address of event log header */
716 u32 event_size; /* 2 u32s, or 3 u32s if timestamp recorded */
717 u32 ptr; /* SRAM byte address of log data */
718 u32 ev, time, data; /* event log data */
719 unsigned long reg_flags;
720
721 if (num_events == 0)
722 return pos;
723
724 base = trans->shrd->device_pointers.log_event_table;
725 if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
726 if (!base)
727 base = trans->shrd->fw->init_evtlog_ptr;
728 } else {
729 if (!base)
730 base = trans->shrd->fw->inst_evtlog_ptr;
731 }
732
733 if (mode == 0)
734 event_size = 2 * sizeof(u32);
735 else
736 event_size = 3 * sizeof(u32);
737
738 ptr = base + EVENT_START_OFFSET + (start_idx * event_size);
739
740 /* Make sure device is powered up for SRAM reads */
741 spin_lock_irqsave(&trans->reg_lock, reg_flags);
742 if (unlikely(!iwl_grab_nic_access(trans)))
743 goto out_unlock;
744
745 /* Set starting address; reads will auto-increment */
746 iwl_write32(trans, HBUS_TARG_MEM_RADDR, ptr);
747
748 /* "time" is actually "data" for mode 0 (no timestamp).
749 * place event id # at far right for easier visual parsing. */
750 for (i = 0; i < num_events; i++) {
751 ev = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
752 time = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
753 if (mode == 0) {
754 /* data, ev */
755 if (bufsz) {
756 pos += scnprintf(*buf + pos, bufsz - pos,
757 "EVT_LOG:0x%08x:%04u\n",
758 time, ev);
759 } else {
760 trace_iwlwifi_dev_ucode_event(trans->dev, 0,
761 time, ev);
762 IWL_ERR(trans, "EVT_LOG:0x%08x:%04u\n",
763 time, ev);
764 }
765 } else {
766 data = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
767 if (bufsz) {
768 pos += scnprintf(*buf + pos, bufsz - pos,
769 "EVT_LOGT:%010u:0x%08x:%04u\n",
770 time, data, ev);
771 } else {
772 IWL_ERR(trans, "EVT_LOGT:%010u:0x%08x:%04u\n",
773 time, data, ev);
774 trace_iwlwifi_dev_ucode_event(trans->dev, time,
775 data, ev);
776 }
777 }
778 }
779
780 /* Allow device to power down */
781 iwl_release_nic_access(trans);
782out_unlock:
783 spin_unlock_irqrestore(&trans->reg_lock, reg_flags);
784 return pos;
785}
786
787/**
788 * iwl_print_last_event_logs - Dump the newest # of event log to syslog
789 */
790static int iwl_print_last_event_logs(struct iwl_trans *trans, u32 capacity,
791 u32 num_wraps, u32 next_entry,
792 u32 size, u32 mode,
793 int pos, char **buf, size_t bufsz)
794{
795 /*
796 * display the newest DEFAULT_LOG_ENTRIES entries
797 * i.e the entries just before the next ont that uCode would fill.
798 */
799 if (num_wraps) {
800 if (next_entry < size) {
801 pos = iwl_print_event_log(trans,
802 capacity - (size - next_entry),
803 size - next_entry, mode,
804 pos, buf, bufsz);
805 pos = iwl_print_event_log(trans, 0,
806 next_entry, mode,
807 pos, buf, bufsz);
808 } else
809 pos = iwl_print_event_log(trans, next_entry - size,
810 size, mode, pos, buf, bufsz);
811 } else {
812 if (next_entry < size) {
813 pos = iwl_print_event_log(trans, 0, next_entry,
814 mode, pos, buf, bufsz);
815 } else {
816 pos = iwl_print_event_log(trans, next_entry - size,
817 size, mode, pos, buf, bufsz);
818 }
819 }
820 return pos;
821}
822
823#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
824
825int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
826 char **buf, bool display)
827{
828 u32 base; /* SRAM byte address of event log header */
829 u32 capacity; /* event log capacity in # entries */
830 u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
831 u32 num_wraps; /* # times uCode wrapped to top of log */
832 u32 next_entry; /* index of next entry to be written by uCode */
833 u32 size; /* # entries that we'll print */
834 u32 logsize;
835 int pos = 0;
836 size_t bufsz = 0;
837
838 base = trans->shrd->device_pointers.log_event_table;
839 if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
840 logsize = trans->shrd->fw->init_evtlog_size;
841 if (!base)
842 base = trans->shrd->fw->init_evtlog_ptr;
843 } else {
844 logsize = trans->shrd->fw->inst_evtlog_size;
845 if (!base)
846 base = trans->shrd->fw->inst_evtlog_ptr;
847 }
848
849 if (!iwlagn_hw_valid_rtc_data_addr(base)) {
850 IWL_ERR(trans,
851 "Invalid event log pointer 0x%08X for %s uCode\n",
852 base,
853 (trans->shrd->ucode_type == IWL_UCODE_INIT)
854 ? "Init" : "RT");
855 return -EINVAL;
856 }
857
858 /* event log header */
859 capacity = iwl_read_targ_mem(trans, base);
860 mode = iwl_read_targ_mem(trans, base + (1 * sizeof(u32)));
861 num_wraps = iwl_read_targ_mem(trans, base + (2 * sizeof(u32)));
862 next_entry = iwl_read_targ_mem(trans, base + (3 * sizeof(u32)));
863
864 if (capacity > logsize) {
865 IWL_ERR(trans, "Log capacity %d is bogus, limit to %d "
866 "entries\n", capacity, logsize);
867 capacity = logsize;
868 }
869
870 if (next_entry > logsize) {
871 IWL_ERR(trans, "Log write index %d is bogus, limit to %d\n",
872 next_entry, logsize);
873 next_entry = logsize;
874 }
875
876 size = num_wraps ? capacity : next_entry;
877
878 /* bail out if nothing in log */
879 if (size == 0) {
880 IWL_ERR(trans, "Start IWL Event Log Dump: nothing in log\n");
881 return pos;
882 }
883
884#ifdef CONFIG_IWLWIFI_DEBUG
885 if (!(iwl_have_debug_level(IWL_DL_FW_ERRORS)) && !full_log)
886 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
887 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
888#else
889 size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
890 ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
891#endif
892 IWL_ERR(trans, "Start IWL Event Log Dump: display last %u entries\n",
893 size);
894
895#ifdef CONFIG_IWLWIFI_DEBUG
896 if (display) {
897 if (full_log)
898 bufsz = capacity * 48;
899 else
900 bufsz = size * 48;
901 *buf = kmalloc(bufsz, GFP_KERNEL);
902 if (!*buf)
903 return -ENOMEM;
904 }
905 if (iwl_have_debug_level(IWL_DL_FW_ERRORS) || full_log) {
906 /*
907 * if uCode has wrapped back to top of log,
908 * start at the oldest entry,
909 * i.e the next one that uCode would fill.
910 */
911 if (num_wraps)
912 pos = iwl_print_event_log(trans, next_entry,
913 capacity - next_entry, mode,
914 pos, buf, bufsz);
915 /* (then/else) start at top of log */
916 pos = iwl_print_event_log(trans, 0,
917 next_entry, mode, pos, buf, bufsz);
918 } else
919 pos = iwl_print_last_event_logs(trans, capacity, num_wraps,
920 next_entry, size, mode,
921 pos, buf, bufsz);
922#else
923 pos = iwl_print_last_event_logs(trans, capacity, num_wraps,
924 next_entry, size, mode,
925 pos, buf, bufsz);
926#endif
927 return pos;
928}
929
930/* tasklet for iwlagn interrupt */ 567/* tasklet for iwlagn interrupt */
931void iwl_irq_tasklet(struct iwl_trans *trans) 568void iwl_irq_tasklet(struct iwl_trans *trans)
932{ 569{
@@ -964,7 +601,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
964 if (iwl_have_debug_level(IWL_DL_ISR)) { 601 if (iwl_have_debug_level(IWL_DL_ISR)) {
965 /* just for debug */ 602 /* just for debug */
966 inta_mask = iwl_read32(trans, CSR_INT_MASK); 603 inta_mask = iwl_read32(trans, CSR_INT_MASK);
967 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n ", 604 IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
968 inta, inta_mask); 605 inta, inta_mask);
969 } 606 }
970#endif 607#endif
@@ -1012,8 +649,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
1012 if (inta & CSR_INT_BIT_RF_KILL) { 649 if (inta & CSR_INT_BIT_RF_KILL) {
1013 bool hw_rfkill; 650 bool hw_rfkill;
1014 651
1015 hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) & 652 hw_rfkill = iwl_is_rfkill_set(trans);
1016 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
1017 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n", 653 IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1018 hw_rfkill ? "disable radio" : "enable radio"); 654 hw_rfkill ? "disable radio" : "enable radio");
1019 655
@@ -1044,7 +680,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
1044 if (inta & CSR_INT_BIT_WAKEUP) { 680 if (inta & CSR_INT_BIT_WAKEUP) {
1045 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); 681 IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1046 iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq); 682 iwl_rx_queue_update_write_ptr(trans, &trans_pcie->rxq);
1047 for (i = 0; i < cfg(trans)->base_params->num_of_queues; i++) 683 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++)
1048 iwl_txq_update_write_ptr(trans, 684 iwl_txq_update_write_ptr(trans,
1049 &trans_pcie->txq[i]); 685 &trans_pcie->txq[i]);
1050 686
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
index e92972fd6ecf..21a8a672fbb2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
@@ -37,47 +37,12 @@
37#include "iwl-agn-hw.h" 37#include "iwl-agn-hw.h"
38#include "iwl-op-mode.h" 38#include "iwl-op-mode.h"
39#include "iwl-trans-pcie-int.h" 39#include "iwl-trans-pcie-int.h"
40/* FIXME: need to abstract out TX command (once we know what it looks like) */
41#include "iwl-commands.h"
40 42
41#define IWL_TX_CRC_SIZE 4 43#define IWL_TX_CRC_SIZE 4
42#define IWL_TX_DELIMITER_SIZE 4 44#define IWL_TX_DELIMITER_SIZE 4
43 45
44/*
45 * mac80211 queues, ACs, hardware queues, FIFOs.
46 *
47 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
48 *
49 * Mac80211 uses the following numbers, which we get as from it
50 * by way of skb_get_queue_mapping(skb):
51 *
52 * VO 0
53 * VI 1
54 * BE 2
55 * BK 3
56 *
57 *
58 * Regular (not A-MPDU) frames are put into hardware queues corresponding
59 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
60 * own queue per aggregation session (RA/TID combination), such queues are
61 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
62 * order to map frames to the right queue, we also need an AC->hw queue
63 * mapping. This is implemented here.
64 *
65 * Due to the way hw queues are set up (by the hw specific code), the AC->hw
66 * queue mapping is the identity mapping.
67 */
68
69static const u8 tid_to_ac[] = {
70 IEEE80211_AC_BE,
71 IEEE80211_AC_BK,
72 IEEE80211_AC_BK,
73 IEEE80211_AC_BE,
74 IEEE80211_AC_VI,
75 IEEE80211_AC_VI,
76 IEEE80211_AC_VO,
77 IEEE80211_AC_VO
78};
79
80
81/** 46/**
82 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 47 * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
83 */ 48 */
@@ -95,7 +60,7 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
95 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 60 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
96 __le16 bc_ent; 61 __le16 bc_ent;
97 struct iwl_tx_cmd *tx_cmd = 62 struct iwl_tx_cmd *tx_cmd =
98 (struct iwl_tx_cmd *) txq->cmd[txq->q.write_ptr]->payload; 63 (void *) txq->entries[txq->q.write_ptr].cmd->payload;
99 64
100 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; 65 scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
101 66
@@ -136,13 +101,15 @@ void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq)
136 if (txq->need_update == 0) 101 if (txq->need_update == 0)
137 return; 102 return;
138 103
139 if (cfg(trans)->base_params->shadow_reg_enable) { 104 if (trans->cfg->base_params->shadow_reg_enable) {
140 /* shadow register enabled */ 105 /* shadow register enabled */
141 iwl_write32(trans, HBUS_TARG_WRPTR, 106 iwl_write32(trans, HBUS_TARG_WRPTR,
142 txq->q.write_ptr | (txq_id << 8)); 107 txq->q.write_ptr | (txq_id << 8));
143 } else { 108 } else {
109 struct iwl_trans_pcie *trans_pcie =
110 IWL_TRANS_GET_PCIE_TRANS(trans);
144 /* if we're trying to save power */ 111 /* if we're trying to save power */
145 if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) { 112 if (test_bit(STATUS_TPOWER_PMI, &trans_pcie->status)) {
146 /* wake up nic if it's powered down ... 113 /* wake up nic if it's powered down ...
147 * uCode will wake up, and interrupt us again, so next 114 * uCode will wake up, and interrupt us again, so next
148 * time we'll skip this part. */ 115 * time we'll skip this part. */
@@ -256,13 +223,14 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
256 223
257 lockdep_assert_held(&txq->lock); 224 lockdep_assert_held(&txq->lock);
258 225
259 iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], dma_dir); 226 iwlagn_unmap_tfd(trans, &txq->entries[index].meta,
227 &tfd_tmp[index], dma_dir);
260 228
261 /* free SKB */ 229 /* free SKB */
262 if (txq->skbs) { 230 if (txq->entries) {
263 struct sk_buff *skb; 231 struct sk_buff *skb;
264 232
265 skb = txq->skbs[index]; 233 skb = txq->entries[index].skb;
266 234
267 /* Can be called from irqs-disabled context 235 /* Can be called from irqs-disabled context
268 * If skb is not NULL, it means that the whole queue is being 236 * If skb is not NULL, it means that the whole queue is being
@@ -270,7 +238,7 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
270 */ 238 */
271 if (skb) { 239 if (skb) {
272 iwl_op_mode_free_skb(trans->op_mode, skb); 240 iwl_op_mode_free_skb(trans->op_mode, skb);
273 txq->skbs[index] = NULL; 241 txq->entries[index].skb = NULL;
274 } 242 }
275 } 243 }
276} 244}
@@ -393,7 +361,7 @@ static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
393 u8 sta_id = 0; 361 u8 sta_id = 0;
394 __le16 bc_ent; 362 __le16 bc_ent;
395 struct iwl_tx_cmd *tx_cmd = 363 struct iwl_tx_cmd *tx_cmd =
396 (struct iwl_tx_cmd *) txq->cmd[txq->q.read_ptr]->payload; 364 (void *)txq->entries[txq->q.read_ptr].cmd->payload;
397 365
398 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 366 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
399 367
@@ -448,20 +416,17 @@ static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
448void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, 416void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
449 int txq_id, u32 index) 417 int txq_id, u32 index)
450{ 418{
451 IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d", txq_id, index & 0xff); 419 IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d\n", txq_id, index & 0xff);
452 iwl_write_direct32(trans, HBUS_TARG_WRPTR, 420 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
453 (index & 0xff) | (txq_id << 8)); 421 (index & 0xff) | (txq_id << 8));
454 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index); 422 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index);
455} 423}
456 424
457void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, 425void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
458 struct iwl_tx_queue *txq, 426 struct iwl_tx_queue *txq,
459 int tx_fifo_id, int scd_retry) 427 int tx_fifo_id, bool active)
460{ 428{
461 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
462 int txq_id = txq->q.id; 429 int txq_id = txq->q.id;
463 int active =
464 test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0;
465 430
466 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), 431 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
467 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 432 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
@@ -469,77 +434,22 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
469 (1 << SCD_QUEUE_STTS_REG_POS_WSL) | 434 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
470 SCD_QUEUE_STTS_REG_MSK); 435 SCD_QUEUE_STTS_REG_MSK);
471 436
472 txq->sched_retry = scd_retry;
473
474 if (active) 437 if (active)
475 IWL_DEBUG_TX_QUEUES(trans, "Activate %s Queue %d on FIFO %d\n", 438 IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d\n",
476 scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id); 439 txq_id, tx_fifo_id);
477 else 440 else
478 IWL_DEBUG_TX_QUEUES(trans, "Deactivate %s Queue %d\n", 441 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
479 scd_retry ? "BA" : "AC/CMD", txq_id);
480}
481
482static inline int get_ac_from_tid(u16 tid)
483{
484 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
485 return tid_to_ac[tid];
486
487 /* no support for TIDs 8-15 yet */
488 return -EINVAL;
489}
490
491static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
492 u8 ctx, u16 tid)
493{
494 const u8 *ac_to_fifo = trans_pcie->ac_to_fifo[ctx];
495 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
496 return ac_to_fifo[tid_to_ac[tid]];
497
498 /* no support for TIDs 8-15 yet */
499 return -EINVAL;
500} 442}
501 443
502static inline bool is_agg_txqid_valid(struct iwl_trans *trans, int txq_id) 444void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int txq_id, int fifo,
445 int sta_id, int tid, int frame_limit, u16 ssn)
503{ 446{
504 if (txq_id < IWLAGN_FIRST_AMPDU_QUEUE) 447 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
505 return false;
506 return txq_id < (IWLAGN_FIRST_AMPDU_QUEUE +
507 hw_params(trans).num_ampdu_queues);
508}
509
510void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
511 enum iwl_rxon_context_id ctx, int sta_id,
512 int tid, int frame_limit, u16 ssn)
513{
514 int tx_fifo, txq_id;
515 u16 ra_tid;
516 unsigned long flags; 448 unsigned long flags;
449 u16 ra_tid = BUILD_RAxTID(sta_id, tid);
517 450
518 struct iwl_trans_pcie *trans_pcie = 451 if (test_and_set_bit(txq_id, trans_pcie->queue_used))
519 IWL_TRANS_GET_PCIE_TRANS(trans); 452 WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
520
521 if (WARN_ON(sta_id == IWL_INVALID_STATION))
522 return;
523 if (WARN_ON(tid >= IWL_MAX_TID_COUNT))
524 return;
525
526 tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid);
527 if (WARN_ON(tx_fifo < 0)) {
528 IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo);
529 return;
530 }
531
532 txq_id = trans_pcie->agg_txq[sta_id][tid];
533 if (WARN_ON_ONCE(!is_agg_txqid_valid(trans, txq_id))) {
534 IWL_ERR(trans,
535 "queue number out of range: %d, must be %d to %d\n",
536 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
537 IWLAGN_FIRST_AMPDU_QUEUE +
538 hw_params(trans).num_ampdu_queues - 1);
539 return;
540 }
541
542 ra_tid = BUILD_RAxTID(sta_id, tid);
543 453
544 spin_lock_irqsave(&trans_pcie->irq_lock, flags); 454 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
545 455
@@ -550,10 +460,10 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
550 iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id); 460 iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
551 461
552 /* Set this queue as a chain-building queue */ 462 /* Set this queue as a chain-building queue */
553 iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, (1<<txq_id)); 463 iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
554 464
555 /* enable aggregations for the queue */ 465 /* enable aggregations for the queue */
556 iwl_set_bits_prph(trans, SCD_AGGR_SEL, (1<<txq_id)); 466 iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
557 467
558 /* Place first TFD at index corresponding to start sequence number. 468 /* Place first TFD at index corresponding to start sequence number.
559 * Assumes that ssn_idx is valid (!= 0xFFF) */ 469 * Assumes that ssn_idx is valid (!= 0xFFF) */
@@ -563,92 +473,42 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
563 473
564 /* Set up Tx window size and frame limit for this queue */ 474 /* Set up Tx window size and frame limit for this queue */
565 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + 475 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
566 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + 476 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
567 sizeof(u32), 477 ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
568 ((frame_limit << 478 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
569 SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & 479 ((frame_limit << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
570 SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | 480 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
571 ((frame_limit <<
572 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
573 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
574 481
575 iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id)); 482 iwl_set_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id));
576 483
577 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ 484 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
578 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 485 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
579 tx_fifo, 1); 486 fifo, true);
580
581 trans_pcie->txq[txq_id].sta_id = sta_id;
582 trans_pcie->txq[txq_id].tid = tid;
583 487
584 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 488 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
585} 489}
586 490
587/* 491void iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int txq_id)
588 * Find first available (lowest unused) Tx Queue, mark it "active".
589 * Called only when finding queue for aggregation.
590 * Should never return anything < 7, because they should already
591 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
592 */
593static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
594{
595 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
596 int txq_id;
597
598 for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues;
599 txq_id++)
600 if (!test_and_set_bit(txq_id,
601 &trans_pcie->txq_ctx_active_msk))
602 return txq_id;
603 return -1;
604}
605
606int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
607 int sta_id, int tid)
608{
609 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
610 int txq_id;
611
612 txq_id = iwlagn_txq_ctx_activate_free(trans);
613 if (txq_id == -1) {
614 IWL_ERR(trans, "No free aggregation queue available\n");
615 return -ENXIO;
616 }
617
618 trans_pcie->agg_txq[sta_id][tid] = txq_id;
619 iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
620
621 return 0;
622}
623
624int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
625{ 492{
626 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 493 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
627 u8 txq_id = trans_pcie->agg_txq[sta_id][tid];
628 494
629 if (WARN_ON_ONCE(!is_agg_txqid_valid(trans, txq_id))) { 495 if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
630 IWL_ERR(trans, 496 WARN_ONCE(1, "queue %d not used", txq_id);
631 "queue number out of range: %d, must be %d to %d\n", 497 return;
632 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
633 IWLAGN_FIRST_AMPDU_QUEUE +
634 hw_params(trans).num_ampdu_queues - 1);
635 return -EINVAL;
636 } 498 }
637 499
638 iwlagn_tx_queue_stop_scheduler(trans, txq_id); 500 iwlagn_tx_queue_stop_scheduler(trans, txq_id);
639 501
640 iwl_clear_bits_prph(trans, SCD_AGGR_SEL, (1 << txq_id)); 502 iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
641 503
642 trans_pcie->agg_txq[sta_id][tid] = 0;
643 trans_pcie->txq[txq_id].q.read_ptr = 0; 504 trans_pcie->txq[txq_id].q.read_ptr = 0;
644 trans_pcie->txq[txq_id].q.write_ptr = 0; 505 trans_pcie->txq[txq_id].q.write_ptr = 0;
645 /* supposes that ssn_idx is valid (!= 0xFFF) */
646 iwl_trans_set_wr_ptrs(trans, txq_id, 0); 506 iwl_trans_set_wr_ptrs(trans, txq_id, 0);
647 507
648 iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, (1 << txq_id)); 508 iwl_clear_bits_prph(trans, SCD_INTERRUPT_MASK, BIT(txq_id));
649 iwl_txq_ctx_deactivate(trans_pcie, txq_id); 509
650 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0); 510 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
651 return 0; 511 0, false);
652} 512}
653 513
654/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 514/*************** HOST COMMAND QUEUE FUNCTIONS *****/
@@ -681,11 +541,6 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
681 int trace_idx; 541 int trace_idx;
682#endif 542#endif
683 543
684 if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
685 IWL_WARN(trans, "fw recovery, no hcmd send\n");
686 return -EIO;
687 }
688
689 copy_size = sizeof(out_cmd->hdr); 544 copy_size = sizeof(out_cmd->hdr);
690 cmd_size = sizeof(out_cmd->hdr); 545 cmd_size = sizeof(out_cmd->hdr);
691 546
@@ -726,8 +581,8 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
726 } 581 }
727 582
728 idx = get_cmd_index(q, q->write_ptr); 583 idx = get_cmd_index(q, q->write_ptr);
729 out_cmd = txq->cmd[idx]; 584 out_cmd = txq->entries[idx].cmd;
730 out_meta = &txq->meta[idx]; 585 out_meta = &txq->entries[idx].meta;
731 586
732 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 587 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
733 if (cmd->flags & CMD_WANT_SKB) 588 if (cmd->flags & CMD_WANT_SKB)
@@ -753,12 +608,11 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
753 cmd_dest += cmd->len[i]; 608 cmd_dest += cmd->len[i];
754 } 609 }
755 610
756 IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, " 611 IWL_DEBUG_HC(trans,
757 "%d bytes at %d[%d]:%d\n", 612 "Sending command %s (#%x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
758 get_cmd_string(out_cmd->hdr.cmd), 613 trans_pcie_get_cmd_string(trans_pcie, out_cmd->hdr.cmd),
759 out_cmd->hdr.cmd, 614 out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), cmd_size,
760 le16_to_cpu(out_cmd->hdr.sequence), cmd_size, 615 q->write_ptr, idx, trans_pcie->cmd_queue);
761 q->write_ptr, idx, trans_pcie->cmd_queue);
762 616
763 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size, 617 phys_addr = dma_map_single(trans->dev, &out_cmd->hdr, copy_size,
764 DMA_BIDIRECTIONAL); 618 DMA_BIDIRECTIONAL);
@@ -816,6 +670,10 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
816 trace_bufs[2], trace_lens[2]); 670 trace_bufs[2], trace_lens[2]);
817#endif 671#endif
818 672
673 /* start timer if queue currently empty */
674 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
675 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
676
819 /* Increment and update queue's write index */ 677 /* Increment and update queue's write index */
820 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 678 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
821 iwl_txq_update_write_ptr(trans, txq); 679 iwl_txq_update_write_ptr(trans, txq);
@@ -825,6 +683,22 @@ static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
825 return idx; 683 return idx;
826} 684}
827 685
686static inline void iwl_queue_progress(struct iwl_trans_pcie *trans_pcie,
687 struct iwl_tx_queue *txq)
688{
689 if (!trans_pcie->wd_timeout)
690 return;
691
692 /*
693 * if empty delete timer, otherwise move timer forward
694 * since we're making progress on this queue
695 */
696 if (txq->q.read_ptr == txq->q.write_ptr)
697 del_timer(&txq->stuck_timer);
698 else
699 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
700}
701
828/** 702/**
829 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd 703 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
830 * 704 *
@@ -859,6 +733,8 @@ static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id,
859 } 733 }
860 734
861 } 735 }
736
737 iwl_queue_progress(trans_pcie, txq);
862} 738}
863 739
864/** 740/**
@@ -899,10 +775,8 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
899 spin_lock(&txq->lock); 775 spin_lock(&txq->lock);
900 776
901 cmd_index = get_cmd_index(&txq->q, index); 777 cmd_index = get_cmd_index(&txq->q, index);
902 cmd = txq->cmd[cmd_index]; 778 cmd = txq->entries[cmd_index].cmd;
903 meta = &txq->meta[cmd_index]; 779 meta = &txq->entries[cmd_index].meta;
904
905 txq->time_stamp = jiffies;
906 780
907 iwlagn_unmap_tfd(trans, meta, &txq->tfds[index], 781 iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
908 DMA_BIDIRECTIONAL); 782 DMA_BIDIRECTIONAL);
@@ -913,21 +787,23 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
913 787
914 meta->source->resp_pkt = pkt; 788 meta->source->resp_pkt = pkt;
915 meta->source->_rx_page_addr = (unsigned long)page_address(p); 789 meta->source->_rx_page_addr = (unsigned long)page_address(p);
916 meta->source->_rx_page_order = hw_params(trans).rx_page_order; 790 meta->source->_rx_page_order = trans_pcie->rx_page_order;
917 meta->source->handler_status = handler_status; 791 meta->source->handler_status = handler_status;
918 } 792 }
919 793
920 iwl_hcmd_queue_reclaim(trans, txq_id, index); 794 iwl_hcmd_queue_reclaim(trans, txq_id, index);
921 795
922 if (!(meta->flags & CMD_ASYNC)) { 796 if (!(meta->flags & CMD_ASYNC)) {
923 if (!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) { 797 if (!test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
924 IWL_WARN(trans, 798 IWL_WARN(trans,
925 "HCMD_ACTIVE already clear for command %s\n", 799 "HCMD_ACTIVE already clear for command %s\n",
926 get_cmd_string(cmd->hdr.cmd)); 800 trans_pcie_get_cmd_string(trans_pcie,
801 cmd->hdr.cmd));
927 } 802 }
928 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); 803 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
929 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 804 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
930 get_cmd_string(cmd->hdr.cmd)); 805 trans_pcie_get_cmd_string(trans_pcie,
806 cmd->hdr.cmd));
931 wake_up(&trans->wait_command_queue); 807 wake_up(&trans->wait_command_queue);
932 } 808 }
933 809
@@ -940,6 +816,7 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
940 816
941static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 817static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
942{ 818{
819 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
943 int ret; 820 int ret;
944 821
945 /* An asynchronous command can not expect an SKB to be set. */ 822 /* An asynchronous command can not expect an SKB to be set. */
@@ -951,7 +828,7 @@ static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
951 if (ret < 0) { 828 if (ret < 0) {
952 IWL_ERR(trans, 829 IWL_ERR(trans,
953 "Error sending %s: enqueue_hcmd failed: %d\n", 830 "Error sending %s: enqueue_hcmd failed: %d\n",
954 get_cmd_string(cmd->id), ret); 831 trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret);
955 return ret; 832 return ret;
956 } 833 }
957 return 0; 834 return 0;
@@ -964,55 +841,51 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
964 int ret; 841 int ret;
965 842
966 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", 843 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
967 get_cmd_string(cmd->id)); 844 trans_pcie_get_cmd_string(trans_pcie, cmd->id));
968
969 if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
970 IWL_ERR(trans, "Command %s failed: FW Error\n",
971 get_cmd_string(cmd->id));
972 return -EIO;
973 }
974 845
975 if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE, 846 if (WARN_ON(test_and_set_bit(STATUS_HCMD_ACTIVE,
976 &trans->shrd->status))) { 847 &trans_pcie->status))) {
977 IWL_ERR(trans, "Command %s: a command is already active!\n", 848 IWL_ERR(trans, "Command %s: a command is already active!\n",
978 get_cmd_string(cmd->id)); 849 trans_pcie_get_cmd_string(trans_pcie, cmd->id));
979 return -EIO; 850 return -EIO;
980 } 851 }
981 852
982 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", 853 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
983 get_cmd_string(cmd->id)); 854 trans_pcie_get_cmd_string(trans_pcie, cmd->id));
984 855
985 cmd_idx = iwl_enqueue_hcmd(trans, cmd); 856 cmd_idx = iwl_enqueue_hcmd(trans, cmd);
986 if (cmd_idx < 0) { 857 if (cmd_idx < 0) {
987 ret = cmd_idx; 858 ret = cmd_idx;
988 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); 859 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
989 IWL_ERR(trans, 860 IWL_ERR(trans,
990 "Error sending %s: enqueue_hcmd failed: %d\n", 861 "Error sending %s: enqueue_hcmd failed: %d\n",
991 get_cmd_string(cmd->id), ret); 862 trans_pcie_get_cmd_string(trans_pcie, cmd->id), ret);
992 return ret; 863 return ret;
993 } 864 }
994 865
995 ret = wait_event_timeout(trans->wait_command_queue, 866 ret = wait_event_timeout(trans->wait_command_queue,
996 !test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status), 867 !test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status),
997 HOST_COMPLETE_TIMEOUT); 868 HOST_COMPLETE_TIMEOUT);
998 if (!ret) { 869 if (!ret) {
999 if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) { 870 if (test_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status)) {
1000 struct iwl_tx_queue *txq = 871 struct iwl_tx_queue *txq =
1001 &trans_pcie->txq[trans_pcie->cmd_queue]; 872 &trans_pcie->txq[trans_pcie->cmd_queue];
1002 struct iwl_queue *q = &txq->q; 873 struct iwl_queue *q = &txq->q;
1003 874
1004 IWL_ERR(trans, 875 IWL_ERR(trans,
1005 "Error sending %s: time out after %dms.\n", 876 "Error sending %s: time out after %dms.\n",
1006 get_cmd_string(cmd->id), 877 trans_pcie_get_cmd_string(trans_pcie, cmd->id),
1007 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 878 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1008 879
1009 IWL_ERR(trans, 880 IWL_ERR(trans,
1010 "Current CMD queue read_ptr %d write_ptr %d\n", 881 "Current CMD queue read_ptr %d write_ptr %d\n",
1011 q->read_ptr, q->write_ptr); 882 q->read_ptr, q->write_ptr);
1012 883
1013 clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); 884 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
1014 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command" 885 IWL_DEBUG_INFO(trans,
1015 "%s\n", get_cmd_string(cmd->id)); 886 "Clearing HCMD_ACTIVE for command %s\n",
887 trans_pcie_get_cmd_string(trans_pcie,
888 cmd->id));
1016 ret = -ETIMEDOUT; 889 ret = -ETIMEDOUT;
1017 goto cancel; 890 goto cancel;
1018 } 891 }
@@ -1020,7 +893,7 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1020 893
1021 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { 894 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
1022 IWL_ERR(trans, "Error: Response NULL in '%s'\n", 895 IWL_ERR(trans, "Error: Response NULL in '%s'\n",
1023 get_cmd_string(cmd->id)); 896 trans_pcie_get_cmd_string(trans_pcie, cmd->id));
1024 ret = -EIO; 897 ret = -EIO;
1025 goto cancel; 898 goto cancel;
1026 } 899 }
@@ -1035,8 +908,8 @@ cancel:
1035 * in later, it will possibly set an invalid 908 * in later, it will possibly set an invalid
1036 * address (cmd->meta.source). 909 * address (cmd->meta.source).
1037 */ 910 */
1038 trans_pcie->txq[trans_pcie->cmd_queue].meta[cmd_idx].flags &= 911 trans_pcie->txq[trans_pcie->cmd_queue].
1039 ~CMD_WANT_SKB; 912 entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
1040 } 913 }
1041 914
1042 if (cmd->resp_pkt) { 915 if (cmd->resp_pkt) {
@@ -1091,17 +964,20 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
1091 q->read_ptr != index; 964 q->read_ptr != index;
1092 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 965 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1093 966
1094 if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL)) 967 if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
1095 continue; 968 continue;
1096 969
1097 __skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]); 970 __skb_queue_tail(skbs, txq->entries[txq->q.read_ptr].skb);
1098 971
1099 txq->skbs[txq->q.read_ptr] = NULL; 972 txq->entries[txq->q.read_ptr].skb = NULL;
1100 973
1101 iwlagn_txq_inval_byte_cnt_tbl(trans, txq); 974 iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
1102 975
1103 iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE); 976 iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE);
1104 freed++; 977 freed++;
1105 } 978 }
979
980 iwl_queue_progress(trans_pcie, txq);
981
1106 return freed; 982 return freed;
1107} 983}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index 4d7b30d3e648..63557bb85cda 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -68,18 +68,20 @@
68#include <linux/bitops.h> 68#include <linux/bitops.h>
69#include <linux/gfp.h> 69#include <linux/gfp.h>
70 70
71#include "iwl-drv.h"
71#include "iwl-trans.h" 72#include "iwl-trans.h"
72#include "iwl-trans-pcie-int.h" 73#include "iwl-trans-pcie-int.h"
73#include "iwl-csr.h" 74#include "iwl-csr.h"
74#include "iwl-prph.h" 75#include "iwl-prph.h"
75#include "iwl-shared.h"
76#include "iwl-eeprom.h" 76#include "iwl-eeprom.h"
77#include "iwl-agn-hw.h" 77#include "iwl-agn-hw.h"
78/* FIXME: need to abstract out TX command (once we know what it looks like) */
79#include "iwl-commands.h"
78 80
79#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) 81#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
80 82
81#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \ 83#define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
82 (((1<<cfg(trans)->base_params->num_of_queues) - 1) &\ 84 (((1<<trans->cfg->base_params->num_of_queues) - 1) &\
83 (~(1<<(trans_pcie)->cmd_queue))) 85 (~(1<<(trans_pcie)->cmd_queue)))
84 86
85static int iwl_trans_rx_alloc(struct iwl_trans *trans) 87static int iwl_trans_rx_alloc(struct iwl_trans *trans)
@@ -132,10 +134,10 @@ static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
132 * to an SKB, so we need to unmap and free potential storage */ 134 * to an SKB, so we need to unmap and free potential storage */
133 if (rxq->pool[i].page != NULL) { 135 if (rxq->pool[i].page != NULL) {
134 dma_unmap_page(trans->dev, rxq->pool[i].page_dma, 136 dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
135 PAGE_SIZE << hw_params(trans).rx_page_order, 137 PAGE_SIZE << trans_pcie->rx_page_order,
136 DMA_FROM_DEVICE); 138 DMA_FROM_DEVICE);
137 __free_pages(rxq->pool[i].page, 139 __free_pages(rxq->pool[i].page,
138 hw_params(trans).rx_page_order); 140 trans_pcie->rx_page_order);
139 rxq->pool[i].page = NULL; 141 rxq->pool[i].page = NULL;
140 } 142 }
141 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 143 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@@ -145,11 +147,12 @@ static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
145static void iwl_trans_rx_hw_init(struct iwl_trans *trans, 147static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
146 struct iwl_rx_queue *rxq) 148 struct iwl_rx_queue *rxq)
147{ 149{
150 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
148 u32 rb_size; 151 u32 rb_size;
149 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ 152 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
150 u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */ 153 u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */
151 154
152 if (iwlagn_mod_params.amsdu_size_8K) 155 if (trans_pcie->rx_buf_size_8k)
153 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; 156 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
154 else 157 else
155 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 158 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
@@ -180,7 +183,6 @@ static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
180 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 183 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
181 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | 184 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
182 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 185 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
183 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
184 rb_size| 186 rb_size|
185 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| 187 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
186 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); 188 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
@@ -299,6 +301,33 @@ static inline void iwlagn_free_dma_ptr(struct iwl_trans *trans,
299 memset(ptr, 0, sizeof(*ptr)); 301 memset(ptr, 0, sizeof(*ptr));
300} 302}
301 303
304static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
305{
306 struct iwl_tx_queue *txq = (void *)data;
307 struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
308 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
309
310 spin_lock(&txq->lock);
311 /* check if triggered erroneously */
312 if (txq->q.read_ptr == txq->q.write_ptr) {
313 spin_unlock(&txq->lock);
314 return;
315 }
316 spin_unlock(&txq->lock);
317
318
319 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
320 jiffies_to_msecs(trans_pcie->wd_timeout));
321 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
322 txq->q.read_ptr, txq->q.write_ptr);
323 IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
324 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq->q.id))
325 & (TFD_QUEUE_SIZE_MAX - 1),
326 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq->q.id)));
327
328 iwl_op_mode_nic_error(trans->op_mode);
329}
330
302static int iwl_trans_txq_alloc(struct iwl_trans *trans, 331static int iwl_trans_txq_alloc(struct iwl_trans *trans,
303 struct iwl_tx_queue *txq, int slots_num, 332 struct iwl_tx_queue *txq, int slots_num,
304 u32 txq_id) 333 u32 txq_id)
@@ -307,40 +336,31 @@ static int iwl_trans_txq_alloc(struct iwl_trans *trans,
307 int i; 336 int i;
308 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 337 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
309 338
310 if (WARN_ON(txq->meta || txq->cmd || txq->skbs || txq->tfds)) 339 if (WARN_ON(txq->entries || txq->tfds))
311 return -EINVAL; 340 return -EINVAL;
312 341
342 setup_timer(&txq->stuck_timer, iwl_trans_pcie_queue_stuck_timer,
343 (unsigned long)txq);
344 txq->trans_pcie = trans_pcie;
345
313 txq->q.n_window = slots_num; 346 txq->q.n_window = slots_num;
314 347
315 txq->meta = kcalloc(slots_num, sizeof(txq->meta[0]), GFP_KERNEL); 348 txq->entries = kcalloc(slots_num,
316 txq->cmd = kcalloc(slots_num, sizeof(txq->cmd[0]), GFP_KERNEL); 349 sizeof(struct iwl_pcie_tx_queue_entry),
350 GFP_KERNEL);
317 351
318 if (!txq->meta || !txq->cmd) 352 if (!txq->entries)
319 goto error; 353 goto error;
320 354
321 if (txq_id == trans_pcie->cmd_queue) 355 if (txq_id == trans_pcie->cmd_queue)
322 for (i = 0; i < slots_num; i++) { 356 for (i = 0; i < slots_num; i++) {
323 txq->cmd[i] = kmalloc(sizeof(struct iwl_device_cmd), 357 txq->entries[i].cmd =
324 GFP_KERNEL); 358 kmalloc(sizeof(struct iwl_device_cmd),
325 if (!txq->cmd[i]) 359 GFP_KERNEL);
360 if (!txq->entries[i].cmd)
326 goto error; 361 goto error;
327 } 362 }
328 363
329 /* Alloc driver data array and TFD circular buffer */
330 /* Driver private data, only for Tx (not command) queues,
331 * not shared with device. */
332 if (txq_id != trans_pcie->cmd_queue) {
333 txq->skbs = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->skbs[0]),
334 GFP_KERNEL);
335 if (!txq->skbs) {
336 IWL_ERR(trans, "kmalloc for auxiliary BD "
337 "structures failed\n");
338 goto error;
339 }
340 } else {
341 txq->skbs = NULL;
342 }
343
344 /* Circular buffer of transmit frame descriptors (TFDs), 364 /* Circular buffer of transmit frame descriptors (TFDs),
345 * shared with device */ 365 * shared with device */
346 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, 366 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
@@ -353,37 +373,22 @@ static int iwl_trans_txq_alloc(struct iwl_trans *trans,
353 373
354 return 0; 374 return 0;
355error: 375error:
356 kfree(txq->skbs); 376 if (txq->entries && txq_id == trans_pcie->cmd_queue)
357 txq->skbs = NULL;
358 /* since txq->cmd has been zeroed,
359 * all non allocated cmd[i] will be NULL */
360 if (txq->cmd && txq_id == trans_pcie->cmd_queue)
361 for (i = 0; i < slots_num; i++) 377 for (i = 0; i < slots_num; i++)
362 kfree(txq->cmd[i]); 378 kfree(txq->entries[i].cmd);
363 kfree(txq->meta); 379 kfree(txq->entries);
364 kfree(txq->cmd); 380 txq->entries = NULL;
365 txq->meta = NULL;
366 txq->cmd = NULL;
367 381
368 return -ENOMEM; 382 return -ENOMEM;
369 383
370} 384}
371 385
372static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq, 386static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
373 int slots_num, u32 txq_id) 387 int slots_num, u32 txq_id)
374{ 388{
375 int ret; 389 int ret;
376 390
377 txq->need_update = 0; 391 txq->need_update = 0;
378 memset(txq->meta, 0, sizeof(txq->meta[0]) * slots_num);
379
380 /*
381 * For the default queues 0-3, set up the swq_id
382 * already -- all others need to get one later
383 * (if they need one at all).
384 */
385 if (txq_id < 4)
386 iwl_set_swq_id(txq, txq_id, txq_id);
387 392
388 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 393 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
389 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ 394 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
@@ -461,7 +466,7 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
461 466
462 if (txq_id == trans_pcie->cmd_queue) 467 if (txq_id == trans_pcie->cmd_queue)
463 for (i = 0; i < txq->q.n_window; i++) 468 for (i = 0; i < txq->q.n_window; i++)
464 kfree(txq->cmd[i]); 469 kfree(txq->entries[i].cmd);
465 470
466 /* De-alloc circular buffer of TFDs */ 471 /* De-alloc circular buffer of TFDs */
467 if (txq->q.n_bd) { 472 if (txq->q.n_bd) {
@@ -470,15 +475,10 @@ static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
470 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr)); 475 memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
471 } 476 }
472 477
473 /* De-alloc array of per-TFD driver data */ 478 kfree(txq->entries);
474 kfree(txq->skbs); 479 txq->entries = NULL;
475 txq->skbs = NULL;
476 480
477 /* deallocate arrays */ 481 del_timer_sync(&txq->stuck_timer);
478 kfree(txq->cmd);
479 kfree(txq->meta);
480 txq->cmd = NULL;
481 txq->meta = NULL;
482 482
483 /* 0-fill queue descriptor structure */ 483 /* 0-fill queue descriptor structure */
484 memset(txq, 0, sizeof(*txq)); 484 memset(txq, 0, sizeof(*txq));
@@ -497,7 +497,7 @@ static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
497 /* Tx queues */ 497 /* Tx queues */
498 if (trans_pcie->txq) { 498 if (trans_pcie->txq) {
499 for (txq_id = 0; 499 for (txq_id = 0;
500 txq_id < cfg(trans)->base_params->num_of_queues; txq_id++) 500 txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
501 iwl_tx_queue_free(trans, txq_id); 501 iwl_tx_queue_free(trans, txq_id);
502 } 502 }
503 503
@@ -522,7 +522,7 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans)
522 int txq_id, slots_num; 522 int txq_id, slots_num;
523 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 523 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
524 524
525 u16 scd_bc_tbls_size = cfg(trans)->base_params->num_of_queues * 525 u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
526 sizeof(struct iwlagn_scd_bc_tbl); 526 sizeof(struct iwlagn_scd_bc_tbl);
527 527
528 /*It is not allowed to alloc twice, so warn when this happens. 528 /*It is not allowed to alloc twice, so warn when this happens.
@@ -546,7 +546,7 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans)
546 goto error; 546 goto error;
547 } 547 }
548 548
549 trans_pcie->txq = kcalloc(cfg(trans)->base_params->num_of_queues, 549 trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
550 sizeof(struct iwl_tx_queue), GFP_KERNEL); 550 sizeof(struct iwl_tx_queue), GFP_KERNEL);
551 if (!trans_pcie->txq) { 551 if (!trans_pcie->txq) {
552 IWL_ERR(trans, "Not enough memory for txq\n"); 552 IWL_ERR(trans, "Not enough memory for txq\n");
@@ -555,7 +555,7 @@ static int iwl_trans_tx_alloc(struct iwl_trans *trans)
555 } 555 }
556 556
557 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 557 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
558 for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues; 558 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
559 txq_id++) { 559 txq_id++) {
560 slots_num = (txq_id == trans_pcie->cmd_queue) ? 560 slots_num = (txq_id == trans_pcie->cmd_queue) ?
561 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 561 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
@@ -601,7 +601,7 @@ static int iwl_tx_init(struct iwl_trans *trans)
601 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 601 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
602 602
603 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 603 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
604 for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues; 604 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
605 txq_id++) { 605 txq_id++) {
606 slots_num = (txq_id == trans_pcie->cmd_queue) ? 606 slots_num = (txq_id == trans_pcie->cmd_queue) ?
607 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 607 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
@@ -724,9 +724,9 @@ static int iwl_apm_init(struct iwl_trans *trans)
724 iwl_apm_config(trans); 724 iwl_apm_config(trans);
725 725
726 /* Configure analog phase-lock-loop before activating to D0A */ 726 /* Configure analog phase-lock-loop before activating to D0A */
727 if (cfg(trans)->base_params->pll_cfg_val) 727 if (trans->cfg->base_params->pll_cfg_val)
728 iwl_set_bit(trans, CSR_ANA_PLL_CFG, 728 iwl_set_bit(trans, CSR_ANA_PLL_CFG,
729 cfg(trans)->base_params->pll_cfg_val); 729 trans->cfg->base_params->pll_cfg_val);
730 730
731 /* 731 /*
732 * Set "initialization complete" bit to move adapter from 732 * Set "initialization complete" bit to move adapter from
@@ -836,7 +836,7 @@ static int iwl_nic_init(struct iwl_trans *trans)
836 if (iwl_tx_init(trans)) 836 if (iwl_tx_init(trans))
837 return -ENOMEM; 837 return -ENOMEM;
838 838
839 if (cfg(trans)->base_params->shadow_reg_enable) { 839 if (trans->cfg->base_params->shadow_reg_enable) {
840 /* enable shadow regs in HW */ 840 /* enable shadow regs in HW */
841 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 841 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL,
842 0x800FFFFF); 842 0x800FFFFF);
@@ -895,59 +895,6 @@ static int iwl_prepare_card_hw(struct iwl_trans *trans)
895 return ret; 895 return ret;
896} 896}
897 897
898#define IWL_AC_UNSET -1
899
900struct queue_to_fifo_ac {
901 s8 fifo, ac;
902};
903
904static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
905 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
906 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
907 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
908 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
909 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
910 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
911 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
912 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
913 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
914 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
915 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
916};
917
918static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
919 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
920 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
921 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
922 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
923 { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
924 { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
925 { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
926 { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
927 { IWL_TX_FIFO_BE_IPAN, 2, },
928 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
929 { IWL_TX_FIFO_AUX, IWL_AC_UNSET, },
930};
931
932static const u8 iwlagn_bss_ac_to_fifo[] = {
933 IWL_TX_FIFO_VO,
934 IWL_TX_FIFO_VI,
935 IWL_TX_FIFO_BE,
936 IWL_TX_FIFO_BK,
937};
938static const u8 iwlagn_bss_ac_to_queue[] = {
939 0, 1, 2, 3,
940};
941static const u8 iwlagn_pan_ac_to_fifo[] = {
942 IWL_TX_FIFO_VO_IPAN,
943 IWL_TX_FIFO_VI_IPAN,
944 IWL_TX_FIFO_BE_IPAN,
945 IWL_TX_FIFO_BK_IPAN,
946};
947static const u8 iwlagn_pan_ac_to_queue[] = {
948 7, 6, 5, 4,
949};
950
951/* 898/*
952 * ucode 899 * ucode
953 */ 900 */
@@ -1028,34 +975,21 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1028 const struct fw_img *fw) 975 const struct fw_img *fw)
1029{ 976{
1030 int ret; 977 int ret;
1031 struct iwl_trans_pcie *trans_pcie =
1032 IWL_TRANS_GET_PCIE_TRANS(trans);
1033 bool hw_rfkill; 978 bool hw_rfkill;
1034 979
1035 trans_pcie->ac_to_queue[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_queue;
1036 trans_pcie->ac_to_queue[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_queue;
1037
1038 trans_pcie->ac_to_fifo[IWL_RXON_CTX_BSS] = iwlagn_bss_ac_to_fifo;
1039 trans_pcie->ac_to_fifo[IWL_RXON_CTX_PAN] = iwlagn_pan_ac_to_fifo;
1040
1041 trans_pcie->mcast_queue[IWL_RXON_CTX_BSS] = 0;
1042 trans_pcie->mcast_queue[IWL_RXON_CTX_PAN] = IWL_IPAN_MCAST_QUEUE;
1043
1044 /* This may fail if AMT took ownership of the device */ 980 /* This may fail if AMT took ownership of the device */
1045 if (iwl_prepare_card_hw(trans)) { 981 if (iwl_prepare_card_hw(trans)) {
1046 IWL_WARN(trans, "Exit HW not ready\n"); 982 IWL_WARN(trans, "Exit HW not ready\n");
1047 return -EIO; 983 return -EIO;
1048 } 984 }
1049 985
986 iwl_enable_rfkill_int(trans);
987
1050 /* If platform's RF_KILL switch is NOT set to KILL */ 988 /* If platform's RF_KILL switch is NOT set to KILL */
1051 hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) & 989 hw_rfkill = iwl_is_rfkill_set(trans);
1052 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
1053 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); 990 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
1054 991 if (hw_rfkill)
1055 if (hw_rfkill) {
1056 iwl_enable_rfkill_int(trans);
1057 return -ERFKILL; 992 return -ERFKILL;
1058 }
1059 993
1060 iwl_write32(trans, CSR_INT, 0xFFFFFFFF); 994 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1061 995
@@ -1098,9 +1032,7 @@ static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
1098 1032
1099static void iwl_tx_start(struct iwl_trans *trans) 1033static void iwl_tx_start(struct iwl_trans *trans)
1100{ 1034{
1101 const struct queue_to_fifo_ac *queue_to_fifo; 1035 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1102 struct iwl_trans_pcie *trans_pcie =
1103 IWL_TRANS_GET_PCIE_TRANS(trans);
1104 u32 a; 1036 u32 a;
1105 unsigned long flags; 1037 unsigned long flags;
1106 int i, chan; 1038 int i, chan;
@@ -1121,7 +1053,7 @@ static void iwl_tx_start(struct iwl_trans *trans)
1121 iwl_write_targ_mem(trans, a, 0); 1053 iwl_write_targ_mem(trans, a, 0);
1122 for (; a < trans_pcie->scd_base_addr + 1054 for (; a < trans_pcie->scd_base_addr +
1123 SCD_TRANS_TBL_OFFSET_QUEUE( 1055 SCD_TRANS_TBL_OFFSET_QUEUE(
1124 cfg(trans)->base_params->num_of_queues); 1056 trans->cfg->base_params->num_of_queues);
1125 a += 4) 1057 a += 4)
1126 iwl_write_targ_mem(trans, a, 0); 1058 iwl_write_targ_mem(trans, a, 0);
1127 1059
@@ -1144,7 +1076,7 @@ static void iwl_tx_start(struct iwl_trans *trans)
1144 iwl_write_prph(trans, SCD_AGGR_SEL, 0); 1076 iwl_write_prph(trans, SCD_AGGR_SEL, 0);
1145 1077
1146 /* initiate the queues */ 1078 /* initiate the queues */
1147 for (i = 0; i < cfg(trans)->base_params->num_of_queues; i++) { 1079 for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
1148 iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0); 1080 iwl_write_prph(trans, SCD_QUEUE_RDPTR(i), 0);
1149 iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8)); 1081 iwl_write_direct32(trans, HBUS_TARG_WRPTR, 0 | (i << 8));
1150 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + 1082 iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
@@ -1161,46 +1093,24 @@ static void iwl_tx_start(struct iwl_trans *trans)
1161 } 1093 }
1162 1094
1163 iwl_write_prph(trans, SCD_INTERRUPT_MASK, 1095 iwl_write_prph(trans, SCD_INTERRUPT_MASK,
1164 IWL_MASK(0, cfg(trans)->base_params->num_of_queues)); 1096 IWL_MASK(0, trans->cfg->base_params->num_of_queues));
1165 1097
1166 /* Activate all Tx DMA/FIFO channels */ 1098 /* Activate all Tx DMA/FIFO channels */
1167 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7)); 1099 iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
1168 1100
1169 /* map queues to FIFOs */
1170 if (trans->shrd->valid_contexts != BIT(IWL_RXON_CTX_BSS))
1171 queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
1172 else
1173 queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
1174
1175 iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0); 1101 iwl_trans_set_wr_ptrs(trans, trans_pcie->cmd_queue, 0);
1176 1102
1177 /* make sure all queue are not stopped */ 1103 /* make sure all queue are not stopped/used */
1178 memset(&trans_pcie->queue_stopped[0], 0, 1104 memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
1179 sizeof(trans_pcie->queue_stopped)); 1105 memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
1180 for (i = 0; i < 4; i++)
1181 atomic_set(&trans_pcie->queue_stop_count[i], 0);
1182
1183 /* reset to 0 to enable all the queue first */
1184 trans_pcie->txq_ctx_active_msk = 0;
1185 1106
1186 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) < 1107 for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
1187 IWLAGN_FIRST_AMPDU_QUEUE); 1108 int fifo = trans_pcie->setup_q_to_fifo[i];
1188 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) <
1189 IWLAGN_FIRST_AMPDU_QUEUE);
1190 1109
1191 for (i = 0; i < IWLAGN_FIRST_AMPDU_QUEUE; i++) { 1110 set_bit(i, trans_pcie->queue_used);
1192 int fifo = queue_to_fifo[i].fifo;
1193 int ac = queue_to_fifo[i].ac;
1194 1111
1195 iwl_txq_ctx_activate(trans_pcie, i);
1196
1197 if (fifo == IWL_TX_FIFO_UNUSED)
1198 continue;
1199
1200 if (ac != IWL_AC_UNSET)
1201 iwl_set_swq_id(&trans_pcie->txq[i], ac, i);
1202 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i], 1112 iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i],
1203 fifo, 0); 1113 fifo, true);
1204 } 1114 }
1205 1115
1206 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 1116 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
@@ -1251,7 +1161,7 @@ static int iwl_trans_tx_stop(struct iwl_trans *trans)
1251 } 1161 }
1252 1162
1253 /* Unmap DMA from host system and free skb's */ 1163 /* Unmap DMA from host system and free skb's */
1254 for (txq_id = 0; txq_id < cfg(trans)->base_params->num_of_queues; 1164 for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
1255 txq_id++) 1165 txq_id++)
1256 iwl_tx_queue_unmap(trans, txq_id); 1166 iwl_tx_queue_unmap(trans, txq_id);
1257 1167
@@ -1303,6 +1213,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1303 iwl_disable_interrupts(trans); 1213 iwl_disable_interrupts(trans);
1304 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); 1214 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1305 1215
1216 iwl_enable_rfkill_int(trans);
1217
1306 /* wait to make sure we flush pending tasklet*/ 1218 /* wait to make sure we flush pending tasklet*/
1307 synchronize_irq(trans_pcie->irq); 1219 synchronize_irq(trans_pcie->irq);
1308 tasklet_kill(&trans_pcie->irq_tasklet); 1220 tasklet_kill(&trans_pcie->irq_tasklet);
@@ -1311,6 +1223,12 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1311 1223
1312 /* stop and reset the on-board processor */ 1224 /* stop and reset the on-board processor */
1313 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); 1225 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
1226
1227 /* clear all status bits */
1228 clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
1229 clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
1230 clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
1231 clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
1314} 1232}
1315 1233
1316static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans) 1234static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
@@ -1325,81 +1243,43 @@ static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
1325} 1243}
1326 1244
1327static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 1245static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1328 struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx, 1246 struct iwl_device_cmd *dev_cmd, int txq_id)
1329 u8 sta_id, u8 tid)
1330{ 1247{
1331 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1248 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1332 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1249 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1333 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1334 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload; 1250 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
1335 struct iwl_cmd_meta *out_meta; 1251 struct iwl_cmd_meta *out_meta;
1336 struct iwl_tx_queue *txq; 1252 struct iwl_tx_queue *txq;
1337 struct iwl_queue *q; 1253 struct iwl_queue *q;
1338
1339 dma_addr_t phys_addr = 0; 1254 dma_addr_t phys_addr = 0;
1340 dma_addr_t txcmd_phys; 1255 dma_addr_t txcmd_phys;
1341 dma_addr_t scratch_phys; 1256 dma_addr_t scratch_phys;
1342 u16 len, firstlen, secondlen; 1257 u16 len, firstlen, secondlen;
1343 u8 wait_write_ptr = 0; 1258 u8 wait_write_ptr = 0;
1344 u8 txq_id;
1345 bool is_agg = false;
1346 __le16 fc = hdr->frame_control; 1259 __le16 fc = hdr->frame_control;
1347 u8 hdr_len = ieee80211_hdrlen(fc); 1260 u8 hdr_len = ieee80211_hdrlen(fc);
1348 u16 __maybe_unused wifi_seq; 1261 u16 __maybe_unused wifi_seq;
1349 1262
1350 /*
1351 * Send this frame after DTIM -- there's a special queue
1352 * reserved for this for contexts that support AP mode.
1353 */
1354 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1355 txq_id = trans_pcie->mcast_queue[ctx];
1356
1357 /*
1358 * The microcode will clear the more data
1359 * bit in the last frame it transmits.
1360 */
1361 hdr->frame_control |=
1362 cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1363 } else if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
1364 txq_id = IWL_AUX_QUEUE;
1365 else
1366 txq_id =
1367 trans_pcie->ac_to_queue[ctx][skb_get_queue_mapping(skb)];
1368
1369 /* aggregation is on for this <sta,tid> */
1370 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1371 WARN_ON(tid >= IWL_MAX_TID_COUNT);
1372 txq_id = trans_pcie->agg_txq[sta_id][tid];
1373 is_agg = true;
1374 }
1375
1376 txq = &trans_pcie->txq[txq_id]; 1263 txq = &trans_pcie->txq[txq_id];
1377 q = &txq->q; 1264 q = &txq->q;
1378 1265
1379 spin_lock(&txq->lock); 1266 if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
1267 WARN_ON_ONCE(1);
1268 return -EINVAL;
1269 }
1380 1270
1381 /* In AGG mode, the index in the ring must correspond to the WiFi 1271 spin_lock(&txq->lock);
1382 * sequence number. This is a HW requirements to help the SCD to parse
1383 * the BA.
1384 * Check here that the packets are in the right place on the ring.
1385 */
1386#ifdef CONFIG_IWLWIFI_DEBUG
1387 wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1388 WARN_ONCE(is_agg && ((wifi_seq & 0xff) != q->write_ptr),
1389 "Q: %d WiFi Seq %d tfdNum %d",
1390 txq_id, wifi_seq, q->write_ptr);
1391#endif
1392 1272
1393 /* Set up driver data for this TFD */ 1273 /* Set up driver data for this TFD */
1394 txq->skbs[q->write_ptr] = skb; 1274 txq->entries[q->write_ptr].skb = skb;
1395 txq->cmd[q->write_ptr] = dev_cmd; 1275 txq->entries[q->write_ptr].cmd = dev_cmd;
1396 1276
1397 dev_cmd->hdr.cmd = REPLY_TX; 1277 dev_cmd->hdr.cmd = REPLY_TX;
1398 dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 1278 dev_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1399 INDEX_TO_SEQ(q->write_ptr))); 1279 INDEX_TO_SEQ(q->write_ptr)));
1400 1280
1401 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 1281 /* Set up first empty entry in queue's array of Tx/cmd buffers */
1402 out_meta = &txq->meta[q->write_ptr]; 1282 out_meta = &txq->entries[q->write_ptr].meta;
1403 1283
1404 /* 1284 /*
1405 * Use the first empty entry in this queue's command buffer array 1285 * Use the first empty entry in this queue's command buffer array
@@ -1481,6 +1361,10 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1481 &dev_cmd->hdr, firstlen, 1361 &dev_cmd->hdr, firstlen,
1482 skb->data + hdr_len, secondlen); 1362 skb->data + hdr_len, secondlen);
1483 1363
1364 /* start timer if queue currently empty */
1365 if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
1366 mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
1367
1484 /* Tell device the write index *just past* this latest filled TFD */ 1368 /* Tell device the write index *just past* this latest filled TFD */
1485 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 1369 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1486 iwl_txq_update_write_ptr(trans, txq); 1370 iwl_txq_update_write_ptr(trans, txq);
@@ -1541,8 +1425,10 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1541 1425
1542 iwl_apm_init(trans); 1426 iwl_apm_init(trans);
1543 1427
1544 hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) & 1428 /* From now on, the op_mode will be kept updated about RF kill state */
1545 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); 1429 iwl_enable_rfkill_int(trans);
1430
1431 hw_rfkill = iwl_is_rfkill_set(trans);
1546 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); 1432 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
1547 1433
1548 return err; 1434 return err;
@@ -1555,18 +1441,41 @@ error:
1555 return err; 1441 return err;
1556} 1442}
1557 1443
1558static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans) 1444static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
1445 bool op_mode_leaving)
1559{ 1446{
1447 bool hw_rfkill;
1448 unsigned long flags;
1449 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1450
1560 iwl_apm_stop(trans); 1451 iwl_apm_stop(trans);
1561 1452
1453 spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1454 iwl_disable_interrupts(trans);
1455 spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1456
1562 iwl_write32(trans, CSR_INT, 0xFFFFFFFF); 1457 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1563 1458
1564 /* Even if we stop the HW, we still want the RF kill interrupt */ 1459 if (!op_mode_leaving) {
1565 iwl_enable_rfkill_int(trans); 1460 /*
1461 * Even if we stop the HW, we still want the RF kill
1462 * interrupt
1463 */
1464 iwl_enable_rfkill_int(trans);
1465
1466 /*
1467 * Check again since the RF kill state may have changed while
1468 * all the interrupts were disabled, in this case we couldn't
1469 * receive the RF kill interrupt and update the state in the
1470 * op_mode.
1471 */
1472 hw_rfkill = iwl_is_rfkill_set(trans);
1473 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
1474 }
1566} 1475}
1567 1476
1568static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid, 1477static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1569 int txq_id, int ssn, struct sk_buff_head *skbs) 1478 struct sk_buff_head *skbs)
1570{ 1479{
1571 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1480 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1572 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; 1481 struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
@@ -1576,35 +1485,15 @@ static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
1576 1485
1577 spin_lock(&txq->lock); 1486 spin_lock(&txq->lock);
1578 1487
1579 txq->time_stamp = jiffies;
1580
1581 if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
1582 tid != IWL_TID_NON_QOS &&
1583 txq_id != trans_pcie->agg_txq[sta_id][tid])) {
1584 /*
1585 * FIXME: this is a uCode bug which need to be addressed,
1586 * log the information and return for now.
1587 * Since it is can possibly happen very often and in order
1588 * not to fill the syslog, don't use IWL_ERR or IWL_WARN
1589 */
1590 IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, "
1591 "agg_txq[sta_id[tid] %d", txq_id,
1592 trans_pcie->agg_txq[sta_id][tid]);
1593 spin_unlock(&txq->lock);
1594 return 1;
1595 }
1596
1597 if (txq->q.read_ptr != tfd_num) { 1488 if (txq->q.read_ptr != tfd_num) {
1598 IWL_DEBUG_TX_REPLY(trans, "[Q %d | AC %d] %d -> %d (%d)\n", 1489 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
1599 txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr, 1490 txq_id, txq->q.read_ptr, tfd_num, ssn);
1600 tfd_num, ssn);
1601 freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs); 1491 freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
1602 if (iwl_queue_space(&txq->q) > txq->q.low_mark) 1492 if (iwl_queue_space(&txq->q) > txq->q.low_mark)
1603 iwl_wake_queue(trans, txq); 1493 iwl_wake_queue(trans, txq);
1604 } 1494 }
1605 1495
1606 spin_unlock(&txq->lock); 1496 spin_unlock(&txq->lock);
1607 return 0;
1608} 1497}
1609 1498
1610static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) 1499static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
@@ -1623,7 +1512,7 @@ static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1623} 1512}
1624 1513
1625static void iwl_trans_pcie_configure(struct iwl_trans *trans, 1514static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1626 const struct iwl_trans_config *trans_cfg) 1515 const struct iwl_trans_config *trans_cfg)
1627{ 1516{
1628 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1517 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1629 1518
@@ -1635,9 +1524,31 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1635 if (trans_pcie->n_no_reclaim_cmds) 1524 if (trans_pcie->n_no_reclaim_cmds)
1636 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds, 1525 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
1637 trans_pcie->n_no_reclaim_cmds * sizeof(u8)); 1526 trans_pcie->n_no_reclaim_cmds * sizeof(u8));
1527
1528 trans_pcie->n_q_to_fifo = trans_cfg->n_queue_to_fifo;
1529
1530 if (WARN_ON(trans_pcie->n_q_to_fifo > IWL_MAX_HW_QUEUES))
1531 trans_pcie->n_q_to_fifo = IWL_MAX_HW_QUEUES;
1532
1533 /* at least the command queue must be mapped */
1534 WARN_ON(!trans_pcie->n_q_to_fifo);
1535
1536 memcpy(trans_pcie->setup_q_to_fifo, trans_cfg->queue_to_fifo,
1537 trans_pcie->n_q_to_fifo * sizeof(u8));
1538
1539 trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
1540 if (trans_pcie->rx_buf_size_8k)
1541 trans_pcie->rx_page_order = get_order(8 * 1024);
1542 else
1543 trans_pcie->rx_page_order = get_order(4 * 1024);
1544
1545 trans_pcie->wd_timeout =
1546 msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
1547
1548 trans_pcie->command_names = trans_cfg->command_names;
1638} 1549}
1639 1550
1640static void iwl_trans_pcie_free(struct iwl_trans *trans) 1551void iwl_trans_pcie_free(struct iwl_trans *trans)
1641{ 1552{
1642 struct iwl_trans_pcie *trans_pcie = 1553 struct iwl_trans_pcie *trans_pcie =
1643 IWL_TRANS_GET_PCIE_TRANS(trans); 1554 IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1656,10 +1567,19 @@ static void iwl_trans_pcie_free(struct iwl_trans *trans)
1656 pci_release_regions(trans_pcie->pci_dev); 1567 pci_release_regions(trans_pcie->pci_dev);
1657 pci_disable_device(trans_pcie->pci_dev); 1568 pci_disable_device(trans_pcie->pci_dev);
1658 1569
1659 trans->shrd->trans = NULL;
1660 kfree(trans); 1570 kfree(trans);
1661} 1571}
1662 1572
1573static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
1574{
1575 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1576
1577 if (state)
1578 set_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
1579 else
1580 clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
1581}
1582
1663#ifdef CONFIG_PM_SLEEP 1583#ifdef CONFIG_PM_SLEEP
1664static int iwl_trans_pcie_suspend(struct iwl_trans *trans) 1584static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1665{ 1585{
@@ -1670,16 +1590,14 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1670{ 1590{
1671 bool hw_rfkill; 1591 bool hw_rfkill;
1672 1592
1673 hw_rfkill = !(iwl_read32(trans, CSR_GP_CNTRL) & 1593 iwl_enable_rfkill_int(trans);
1674 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
1675
1676 if (hw_rfkill)
1677 iwl_enable_rfkill_int(trans);
1678 else
1679 iwl_enable_interrupts(trans);
1680 1594
1595 hw_rfkill = iwl_is_rfkill_set(trans);
1681 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); 1596 iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
1682 1597
1598 if (!hw_rfkill)
1599 iwl_enable_interrupts(trans);
1600
1683 return 0; 1601 return 0;
1684} 1602}
1685#endif /* CONFIG_PM_SLEEP */ 1603#endif /* CONFIG_PM_SLEEP */
@@ -1696,7 +1614,7 @@ static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
1696 int ret = 0; 1614 int ret = 0;
1697 1615
1698 /* waiting for all the tx frames complete might take a while */ 1616 /* waiting for all the tx frames complete might take a while */
1699 for (cnt = 0; cnt < cfg(trans)->base_params->num_of_queues; cnt++) { 1617 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1700 if (cnt == trans_pcie->cmd_queue) 1618 if (cnt == trans_pcie->cmd_queue)
1701 continue; 1619 continue;
1702 txq = &trans_pcie->txq[cnt]; 1620 txq = &trans_pcie->txq[cnt];
@@ -1714,42 +1632,9 @@ static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
1714 return ret; 1632 return ret;
1715} 1633}
1716 1634
1717/*
1718 * On every watchdog tick we check (latest) time stamp. If it does not
1719 * change during timeout period and queue is not empty we reset firmware.
1720 */
1721static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt)
1722{
1723 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1724 struct iwl_tx_queue *txq = &trans_pcie->txq[cnt];
1725 struct iwl_queue *q = &txq->q;
1726 unsigned long timeout;
1727
1728 if (q->read_ptr == q->write_ptr) {
1729 txq->time_stamp = jiffies;
1730 return 0;
1731 }
1732
1733 timeout = txq->time_stamp +
1734 msecs_to_jiffies(hw_params(trans).wd_timeout);
1735
1736 if (time_after(jiffies, timeout)) {
1737 IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id,
1738 hw_params(trans).wd_timeout);
1739 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
1740 q->read_ptr, q->write_ptr);
1741 IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
1742 iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt))
1743 & (TFD_QUEUE_SIZE_MAX - 1),
1744 iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
1745 return 1;
1746 }
1747
1748 return 0;
1749}
1750
1751static const char *get_fh_string(int cmd) 1635static const char *get_fh_string(int cmd)
1752{ 1636{
1637#define IWL_CMD(x) case x: return #x
1753 switch (cmd) { 1638 switch (cmd) {
1754 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG); 1639 IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
1755 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG); 1640 IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
@@ -1763,6 +1648,7 @@ static const char *get_fh_string(int cmd)
1763 default: 1648 default:
1764 return "UNKNOWN"; 1649 return "UNKNOWN";
1765 } 1650 }
1651#undef IWL_CMD
1766} 1652}
1767 1653
1768int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display) 1654int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
@@ -1811,6 +1697,7 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
1811 1697
1812static const char *get_csr_string(int cmd) 1698static const char *get_csr_string(int cmd)
1813{ 1699{
1700#define IWL_CMD(x) case x: return #x
1814 switch (cmd) { 1701 switch (cmd) {
1815 IWL_CMD(CSR_HW_IF_CONFIG_REG); 1702 IWL_CMD(CSR_HW_IF_CONFIG_REG);
1816 IWL_CMD(CSR_INT_COALESCING); 1703 IWL_CMD(CSR_INT_COALESCING);
@@ -1838,6 +1725,7 @@ static const char *get_csr_string(int cmd)
1838 default: 1725 default:
1839 return "UNKNOWN"; 1726 return "UNKNOWN";
1840 } 1727 }
1728#undef IWL_CMD
1841} 1729}
1842 1730
1843void iwl_dump_csr(struct iwl_trans *trans) 1731void iwl_dump_csr(struct iwl_trans *trans)
@@ -1938,32 +1826,23 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1938 int ret; 1826 int ret;
1939 size_t bufsz; 1827 size_t bufsz;
1940 1828
1941 bufsz = sizeof(char) * 64 * cfg(trans)->base_params->num_of_queues; 1829 bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues;
1942 1830
1943 if (!trans_pcie->txq) { 1831 if (!trans_pcie->txq)
1944 IWL_ERR(trans, "txq not ready\n");
1945 return -EAGAIN; 1832 return -EAGAIN;
1946 } 1833
1947 buf = kzalloc(bufsz, GFP_KERNEL); 1834 buf = kzalloc(bufsz, GFP_KERNEL);
1948 if (!buf) 1835 if (!buf)
1949 return -ENOMEM; 1836 return -ENOMEM;
1950 1837
1951 for (cnt = 0; cnt < cfg(trans)->base_params->num_of_queues; cnt++) { 1838 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1952 txq = &trans_pcie->txq[cnt]; 1839 txq = &trans_pcie->txq[cnt];
1953 q = &txq->q; 1840 q = &txq->q;
1954 pos += scnprintf(buf + pos, bufsz - pos, 1841 pos += scnprintf(buf + pos, bufsz - pos,
1955 "hwq %.2d: read=%u write=%u stop=%d" 1842 "hwq %.2d: read=%u write=%u use=%d stop=%d\n",
1956 " swq_id=%#.2x (ac %d/hwq %d)\n",
1957 cnt, q->read_ptr, q->write_ptr, 1843 cnt, q->read_ptr, q->write_ptr,
1958 !!test_bit(cnt, trans_pcie->queue_stopped), 1844 !!test_bit(cnt, trans_pcie->queue_used),
1959 txq->swq_id, txq->swq_id & 3, 1845 !!test_bit(cnt, trans_pcie->queue_stopped));
1960 (txq->swq_id >> 2) & 0x1f);
1961 if (cnt >= 4)
1962 continue;
1963 /* for the ACs, display the stop count too */
1964 pos += scnprintf(buf + pos, bufsz - pos,
1965 " stop-count: %d\n",
1966 atomic_read(&trans_pcie->queue_stop_count[cnt]));
1967 } 1846 }
1968 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1847 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1969 kfree(buf); 1848 kfree(buf);
@@ -1997,44 +1876,6 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1997 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1876 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1998} 1877}
1999 1878
2000static ssize_t iwl_dbgfs_log_event_read(struct file *file,
2001 char __user *user_buf,
2002 size_t count, loff_t *ppos)
2003{
2004 struct iwl_trans *trans = file->private_data;
2005 char *buf;
2006 int pos = 0;
2007 ssize_t ret = -ENOMEM;
2008
2009 ret = pos = iwl_dump_nic_event_log(trans, true, &buf, true);
2010 if (buf) {
2011 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2012 kfree(buf);
2013 }
2014 return ret;
2015}
2016
2017static ssize_t iwl_dbgfs_log_event_write(struct file *file,
2018 const char __user *user_buf,
2019 size_t count, loff_t *ppos)
2020{
2021 struct iwl_trans *trans = file->private_data;
2022 u32 event_log_flag;
2023 char buf[8];
2024 int buf_size;
2025
2026 memset(buf, 0, sizeof(buf));
2027 buf_size = min(count, sizeof(buf) - 1);
2028 if (copy_from_user(buf, user_buf, buf_size))
2029 return -EFAULT;
2030 if (sscanf(buf, "%d", &event_log_flag) != 1)
2031 return -EFAULT;
2032 if (event_log_flag == 1)
2033 iwl_dump_nic_event_log(trans, true, NULL, false);
2034
2035 return count;
2036}
2037
2038static ssize_t iwl_dbgfs_interrupt_read(struct file *file, 1879static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
2039 char __user *user_buf, 1880 char __user *user_buf,
2040 size_t count, loff_t *ppos) { 1881 size_t count, loff_t *ppos) {
@@ -2050,10 +1891,8 @@ static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
2050 ssize_t ret; 1891 ssize_t ret;
2051 1892
2052 buf = kzalloc(bufsz, GFP_KERNEL); 1893 buf = kzalloc(bufsz, GFP_KERNEL);
2053 if (!buf) { 1894 if (!buf)
2054 IWL_ERR(trans, "Can not allocate Buffer\n");
2055 return -ENOMEM; 1895 return -ENOMEM;
2056 }
2057 1896
2058 pos += scnprintf(buf + pos, bufsz - pos, 1897 pos += scnprintf(buf + pos, bufsz - pos,
2059 "Interrupt Statistics Report:\n"); 1898 "Interrupt Statistics Report:\n");
@@ -2161,12 +2000,26 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2161 return ret; 2000 return ret;
2162} 2001}
2163 2002
2164DEBUGFS_READ_WRITE_FILE_OPS(log_event); 2003static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
2004 const char __user *user_buf,
2005 size_t count, loff_t *ppos)
2006{
2007 struct iwl_trans *trans = file->private_data;
2008
2009 if (!trans->op_mode)
2010 return -EAGAIN;
2011
2012 iwl_op_mode_nic_error(trans->op_mode);
2013
2014 return count;
2015}
2016
2165DEBUGFS_READ_WRITE_FILE_OPS(interrupt); 2017DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
2166DEBUGFS_READ_FILE_OPS(fh_reg); 2018DEBUGFS_READ_FILE_OPS(fh_reg);
2167DEBUGFS_READ_FILE_OPS(rx_queue); 2019DEBUGFS_READ_FILE_OPS(rx_queue);
2168DEBUGFS_READ_FILE_OPS(tx_queue); 2020DEBUGFS_READ_FILE_OPS(tx_queue);
2169DEBUGFS_WRITE_FILE_OPS(csr); 2021DEBUGFS_WRITE_FILE_OPS(csr);
2022DEBUGFS_WRITE_FILE_OPS(fw_restart);
2170 2023
2171/* 2024/*
2172 * Create the debugfs files and directories 2025 * Create the debugfs files and directories
@@ -2177,10 +2030,10 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2177{ 2030{
2178 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR); 2031 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
2179 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR); 2032 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
2180 DEBUGFS_ADD_FILE(log_event, dir, S_IWUSR | S_IRUSR);
2181 DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR); 2033 DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
2182 DEBUGFS_ADD_FILE(csr, dir, S_IWUSR); 2034 DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
2183 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR); 2035 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
2036 DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
2184 return 0; 2037 return 0;
2185} 2038}
2186#else 2039#else
@@ -2190,7 +2043,7 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2190 2043
2191#endif /*CONFIG_IWLWIFI_DEBUGFS */ 2044#endif /*CONFIG_IWLWIFI_DEBUGFS */
2192 2045
2193const struct iwl_trans_ops trans_ops_pcie = { 2046static const struct iwl_trans_ops trans_ops_pcie = {
2194 .start_hw = iwl_trans_pcie_start_hw, 2047 .start_hw = iwl_trans_pcie_start_hw,
2195 .stop_hw = iwl_trans_pcie_stop_hw, 2048 .stop_hw = iwl_trans_pcie_stop_hw,
2196 .fw_alive = iwl_trans_pcie_fw_alive, 2049 .fw_alive = iwl_trans_pcie_fw_alive,
@@ -2205,15 +2058,11 @@ const struct iwl_trans_ops trans_ops_pcie = {
2205 .reclaim = iwl_trans_pcie_reclaim, 2058 .reclaim = iwl_trans_pcie_reclaim,
2206 2059
2207 .tx_agg_disable = iwl_trans_pcie_tx_agg_disable, 2060 .tx_agg_disable = iwl_trans_pcie_tx_agg_disable,
2208 .tx_agg_alloc = iwl_trans_pcie_tx_agg_alloc,
2209 .tx_agg_setup = iwl_trans_pcie_tx_agg_setup, 2061 .tx_agg_setup = iwl_trans_pcie_tx_agg_setup,
2210 2062
2211 .free = iwl_trans_pcie_free,
2212
2213 .dbgfs_register = iwl_trans_pcie_dbgfs_register, 2063 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
2214 2064
2215 .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty, 2065 .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty,
2216 .check_stuck_queue = iwl_trans_pcie_check_stuck_queue,
2217 2066
2218#ifdef CONFIG_PM_SLEEP 2067#ifdef CONFIG_PM_SLEEP
2219 .suspend = iwl_trans_pcie_suspend, 2068 .suspend = iwl_trans_pcie_suspend,
@@ -2223,11 +2072,12 @@ const struct iwl_trans_ops trans_ops_pcie = {
2223 .write32 = iwl_trans_pcie_write32, 2072 .write32 = iwl_trans_pcie_write32,
2224 .read32 = iwl_trans_pcie_read32, 2073 .read32 = iwl_trans_pcie_read32,
2225 .configure = iwl_trans_pcie_configure, 2074 .configure = iwl_trans_pcie_configure,
2075 .set_pmi = iwl_trans_pcie_set_pmi,
2226}; 2076};
2227 2077
2228struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd, 2078struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2229 struct pci_dev *pdev, 2079 const struct pci_device_id *ent,
2230 const struct pci_device_id *ent) 2080 const struct iwl_cfg *cfg)
2231{ 2081{
2232 struct iwl_trans_pcie *trans_pcie; 2082 struct iwl_trans_pcie *trans_pcie;
2233 struct iwl_trans *trans; 2083 struct iwl_trans *trans;
@@ -2243,7 +2093,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
2243 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2093 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2244 2094
2245 trans->ops = &trans_ops_pcie; 2095 trans->ops = &trans_ops_pcie;
2246 trans->shrd = shrd; 2096 trans->cfg = cfg;
2247 trans_pcie->trans = trans; 2097 trans_pcie->trans = trans;
2248 spin_lock_init(&trans_pcie->irq_lock); 2098 spin_lock_init(&trans_pcie->irq_lock);
2249 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 2099 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index fdf97886a5e4..79a1e7ae4995 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -66,8 +66,9 @@
66#include <linux/ieee80211.h> 66#include <linux/ieee80211.h>
67#include <linux/mm.h> /* for page_address */ 67#include <linux/mm.h> /* for page_address */
68 68
69#include "iwl-shared.h"
70#include "iwl-debug.h" 69#include "iwl-debug.h"
70#include "iwl-config.h"
71#include "iwl-fw.h"
71 72
72/** 73/**
73 * DOC: Transport layer - what is it ? 74 * DOC: Transport layer - what is it ?
@@ -104,13 +105,6 @@
104 * 6) Eventually, the free function will be called. 105 * 6) Eventually, the free function will be called.
105 */ 106 */
106 107
107struct iwl_priv;
108struct iwl_shared;
109struct iwl_op_mode;
110struct fw_img;
111struct sk_buff;
112struct dentry;
113
114/** 108/**
115 * DOC: Host command section 109 * DOC: Host command section
116 * 110 *
@@ -162,6 +156,8 @@ struct iwl_cmd_header {
162 156
163 157
164#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */ 158#define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
159#define FH_RSCSR_FRAME_INVALID 0x55550000
160#define FH_RSCSR_FRAME_ALIGN 0x40
165 161
166struct iwl_rx_packet { 162struct iwl_rx_packet {
167 /* 163 /*
@@ -260,28 +256,43 @@ static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
260 256
261struct iwl_rx_cmd_buffer { 257struct iwl_rx_cmd_buffer {
262 struct page *_page; 258 struct page *_page;
259 int _offset;
260 bool _page_stolen;
263 unsigned int truesize; 261 unsigned int truesize;
264}; 262};
265 263
266static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r) 264static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
267{ 265{
268 return page_address(r->_page); 266 return (void *)((unsigned long)page_address(r->_page) + r->_offset);
267}
268
269static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
270{
271 return r->_offset;
269} 272}
270 273
271static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r) 274static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
272{ 275{
273 struct page *p = r->_page; 276 r->_page_stolen = true;
274 r->_page = NULL; 277 get_page(r->_page);
275 return p; 278 return r->_page;
276} 279}
277 280
278#define MAX_NO_RECLAIM_CMDS 6 281#define MAX_NO_RECLAIM_CMDS 6
279 282
283/*
284 * Maximum number of HW queues the transport layer
285 * currently supports
286 */
287#define IWL_MAX_HW_QUEUES 32
288
280/** 289/**
281 * struct iwl_trans_config - transport configuration 290 * struct iwl_trans_config - transport configuration
282 * 291 *
283 * @op_mode: pointer to the upper layer. 292 * @op_mode: pointer to the upper layer.
284 * Must be set before any other call. 293 * @queue_to_fifo: queue to FIFO mapping to set up by
294 * default
295 * @n_queue_to_fifo: number of queues to set up
285 * @cmd_queue: the index of the command queue. 296 * @cmd_queue: the index of the command queue.
286 * Must be set before start_fw. 297 * Must be set before start_fw.
287 * @no_reclaim_cmds: Some devices erroneously don't set the 298 * @no_reclaim_cmds: Some devices erroneously don't set the
@@ -289,14 +300,29 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
289 * list of such notifications to filter. Max length is 300 * list of such notifications to filter. Max length is
290 * %MAX_NO_RECLAIM_CMDS. 301 * %MAX_NO_RECLAIM_CMDS.
291 * @n_no_reclaim_cmds: # of commands in list 302 * @n_no_reclaim_cmds: # of commands in list
303 * @rx_buf_size_8k: 8 kB RX buffer size needed for A-MSDUs,
304 * if unset 4k will be the RX buffer size
305 * @queue_watchdog_timeout: time (in ms) after which queues
306 * are considered stuck and will trigger device restart
307 * @command_names: array of command names, must be 256 entries
308 * (one for each command); for debugging only
292 */ 309 */
293struct iwl_trans_config { 310struct iwl_trans_config {
294 struct iwl_op_mode *op_mode; 311 struct iwl_op_mode *op_mode;
312 const u8 *queue_to_fifo;
313 u8 n_queue_to_fifo;
314
295 u8 cmd_queue; 315 u8 cmd_queue;
296 const u8 *no_reclaim_cmds; 316 const u8 *no_reclaim_cmds;
297 int n_no_reclaim_cmds; 317 int n_no_reclaim_cmds;
318
319 bool rx_buf_size_8k;
320 unsigned int queue_watchdog_timeout;
321 const char **command_names;
298}; 322};
299 323
324struct iwl_trans;
325
300/** 326/**
301 * struct iwl_trans_ops - transport specific operations 327 * struct iwl_trans_ops - transport specific operations
302 * 328 *
@@ -305,7 +331,8 @@ struct iwl_trans_config {
305 * @start_hw: starts the HW- from that point on, the HW can send interrupts 331 * @start_hw: starts the HW- from that point on, the HW can send interrupts
306 * May sleep 332 * May sleep
307 * @stop_hw: stops the HW- from that point on, the HW will be in low power but 333 * @stop_hw: stops the HW- from that point on, the HW will be in low power but
308 * will still issue interrupt if the HW RF kill is triggered. 334 * will still issue interrupt if the HW RF kill is triggered unless
335 * op_mode_leaving is true.
309 * May sleep 336 * May sleep
310 * @start_fw: allocates and inits all the resources for the transport 337 * @start_fw: allocates and inits all the resources for the transport
311 * layer. Also kick a fw image. 338 * layer. Also kick a fw image.
@@ -323,18 +350,11 @@ struct iwl_trans_config {
323 * Must be atomic 350 * Must be atomic
324 * @reclaim: free packet until ssn. Returns a list of freed packets. 351 * @reclaim: free packet until ssn. Returns a list of freed packets.
325 * Must be atomic 352 * Must be atomic
326 * @tx_agg_alloc: allocate resources for a TX BA session
327 * Must be atomic
328 * @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is 353 * @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is
329 * ready and a successful ADDBA response has been received. 354 * ready and a successful ADDBA response has been received.
330 * May sleep 355 * May sleep
331 * @tx_agg_disable: de-configure a Tx queue to send AMPDUs 356 * @tx_agg_disable: de-configure a Tx queue to send AMPDUs
332 * Must be atomic 357 * Must be atomic
333 * @free: release all the ressource for the transport layer itself such as
334 * irq, tasklet etc... From this point on, the device may not issue
335 * any interrupt (incl. RFKILL).
336 * May sleep
337 * @check_stuck_queue: check if a specific queue is stuck
338 * @wait_tx_queue_empty: wait until all tx queues are empty 358 * @wait_tx_queue_empty: wait until all tx queues are empty
339 * May sleep 359 * May sleep
340 * @dbgfs_register: add the dbgfs files under this directory. Files will be 360 * @dbgfs_register: add the dbgfs files under this directory. Files will be
@@ -347,11 +367,12 @@ struct iwl_trans_config {
347 * @configure: configure parameters required by the transport layer from 367 * @configure: configure parameters required by the transport layer from
348 * the op_mode. May be called several times before start_fw, can't be 368 * the op_mode. May be called several times before start_fw, can't be
349 * called after that. 369 * called after that.
370 * @set_pmi: set the power pmi state
350 */ 371 */
351struct iwl_trans_ops { 372struct iwl_trans_ops {
352 373
353 int (*start_hw)(struct iwl_trans *iwl_trans); 374 int (*start_hw)(struct iwl_trans *iwl_trans);
354 void (*stop_hw)(struct iwl_trans *iwl_trans); 375 void (*stop_hw)(struct iwl_trans *iwl_trans, bool op_mode_leaving);
355 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw); 376 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw);
356 void (*fw_alive)(struct iwl_trans *trans); 377 void (*fw_alive)(struct iwl_trans *trans);
357 void (*stop_device)(struct iwl_trans *trans); 378 void (*stop_device)(struct iwl_trans *trans);
@@ -361,23 +382,15 @@ struct iwl_trans_ops {
361 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 382 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
362 383
363 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb, 384 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
364 struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx, 385 struct iwl_device_cmd *dev_cmd, int queue);
365 u8 sta_id, u8 tid); 386 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
366 int (*reclaim)(struct iwl_trans *trans, int sta_id, int tid, 387 struct sk_buff_head *skbs);
367 int txq_id, int ssn, struct sk_buff_head *skbs);
368 388
369 int (*tx_agg_disable)(struct iwl_trans *trans, 389 void (*tx_agg_setup)(struct iwl_trans *trans, int queue, int fifo,
370 int sta_id, int tid); 390 int sta_id, int tid, int frame_limit, u16 ssn);
371 int (*tx_agg_alloc)(struct iwl_trans *trans, 391 void (*tx_agg_disable)(struct iwl_trans *trans, int queue);
372 int sta_id, int tid);
373 void (*tx_agg_setup)(struct iwl_trans *trans,
374 enum iwl_rxon_context_id ctx, int sta_id, int tid,
375 int frame_limit, u16 ssn);
376
377 void (*free)(struct iwl_trans *trans);
378 392
379 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir); 393 int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
380 int (*check_stuck_queue)(struct iwl_trans *trans, int q);
381 int (*wait_tx_queue_empty)(struct iwl_trans *trans); 394 int (*wait_tx_queue_empty)(struct iwl_trans *trans);
382#ifdef CONFIG_PM_SLEEP 395#ifdef CONFIG_PM_SLEEP
383 int (*suspend)(struct iwl_trans *trans); 396 int (*suspend)(struct iwl_trans *trans);
@@ -388,6 +401,7 @@ struct iwl_trans_ops {
388 u32 (*read32)(struct iwl_trans *trans, u32 ofs); 401 u32 (*read32)(struct iwl_trans *trans, u32 ofs);
389 void (*configure)(struct iwl_trans *trans, 402 void (*configure)(struct iwl_trans *trans,
390 const struct iwl_trans_config *trans_cfg); 403 const struct iwl_trans_config *trans_cfg);
404 void (*set_pmi)(struct iwl_trans *trans, bool state);
391}; 405};
392 406
393/** 407/**
@@ -406,20 +420,19 @@ enum iwl_trans_state {
406 * 420 *
407 * @ops - pointer to iwl_trans_ops 421 * @ops - pointer to iwl_trans_ops
408 * @op_mode - pointer to the op_mode 422 * @op_mode - pointer to the op_mode
409 * @shrd - pointer to iwl_shared which holds shared data from the upper layer 423 * @cfg - pointer to the configuration
410 * @reg_lock - protect hw register access 424 * @reg_lock - protect hw register access
411 * @dev - pointer to struct device * that represents the device 425 * @dev - pointer to struct device * that represents the device
412 * @hw_id: a u32 with the ID of the device / subdevice. 426 * @hw_id: a u32 with the ID of the device / subdevice.
413 * Set during transport allocation. 427 * Set during transport allocation.
414 * @hw_id_str: a string with info about HW ID. Set during transport allocation. 428 * @hw_id_str: a string with info about HW ID. Set during transport allocation.
415 * @nvm_device_type: indicates OTP or eeprom
416 * @pm_support: set to true in start_hw if link pm is supported 429 * @pm_support: set to true in start_hw if link pm is supported
417 * @wait_command_queue: the wait_queue for SYNC host commands 430 * @wait_command_queue: the wait_queue for SYNC host commands
418 */ 431 */
419struct iwl_trans { 432struct iwl_trans {
420 const struct iwl_trans_ops *ops; 433 const struct iwl_trans_ops *ops;
421 struct iwl_op_mode *op_mode; 434 struct iwl_op_mode *op_mode;
422 struct iwl_shared *shrd; 435 const struct iwl_cfg *cfg;
423 enum iwl_trans_state state; 436 enum iwl_trans_state state;
424 spinlock_t reg_lock; 437 spinlock_t reg_lock;
425 438
@@ -428,7 +441,6 @@ struct iwl_trans {
428 u32 hw_id; 441 u32 hw_id;
429 char hw_id_str[52]; 442 char hw_id_str[52];
430 443
431 int nvm_device_type;
432 bool pm_support; 444 bool pm_support;
433 445
434 wait_queue_head_t wait_command_queue; 446 wait_queue_head_t wait_command_queue;
@@ -457,11 +469,12 @@ static inline int iwl_trans_start_hw(struct iwl_trans *trans)
457 return trans->ops->start_hw(trans); 469 return trans->ops->start_hw(trans);
458} 470}
459 471
460static inline void iwl_trans_stop_hw(struct iwl_trans *trans) 472static inline void iwl_trans_stop_hw(struct iwl_trans *trans,
473 bool op_mode_leaving)
461{ 474{
462 might_sleep(); 475 might_sleep();
463 476
464 trans->ops->stop_hw(trans); 477 trans->ops->stop_hw(trans, op_mode_leaving);
465 478
466 trans->state = IWL_TRANS_NO_FW; 479 trans->state = IWL_TRANS_NO_FW;
467} 480}
@@ -508,60 +521,42 @@ static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
508} 521}
509 522
510static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, 523static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
511 struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx, 524 struct iwl_device_cmd *dev_cmd, int queue)
512 u8 sta_id, u8 tid)
513{
514 if (trans->state != IWL_TRANS_FW_ALIVE)
515 IWL_ERR(trans, "%s bad state = %d", __func__, trans->state);
516
517 return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id, tid);
518}
519
520static inline int iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
521 int tid, int txq_id, int ssn,
522 struct sk_buff_head *skbs)
523{ 525{
524 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 526 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
525 "%s bad state = %d", __func__, trans->state); 527 "%s bad state = %d", __func__, trans->state);
526 528
527 return trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn, skbs); 529 return trans->ops->tx(trans, skb, dev_cmd, queue);
528} 530}
529 531
530static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans, 532static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
531 int sta_id, int tid) 533 int ssn, struct sk_buff_head *skbs)
532{ 534{
533 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 535 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
534 "%s bad state = %d", __func__, trans->state); 536 "%s bad state = %d", __func__, trans->state);
535 537
536 return trans->ops->tx_agg_disable(trans, sta_id, tid); 538 trans->ops->reclaim(trans, queue, ssn, skbs);
537} 539}
538 540
539static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans, 541static inline void iwl_trans_tx_agg_disable(struct iwl_trans *trans, int queue)
540 int sta_id, int tid)
541{ 542{
542 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 543 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
543 "%s bad state = %d", __func__, trans->state); 544 "%s bad state = %d", __func__, trans->state);
544 545
545 return trans->ops->tx_agg_alloc(trans, sta_id, tid); 546 trans->ops->tx_agg_disable(trans, queue);
546} 547}
547 548
548 549static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans, int queue,
549static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans, 550 int fifo, int sta_id, int tid,
550 enum iwl_rxon_context_id ctx, 551 int frame_limit, u16 ssn)
551 int sta_id, int tid,
552 int frame_limit, u16 ssn)
553{ 552{
554 might_sleep(); 553 might_sleep();
555 554
556 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, 555 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
557 "%s bad state = %d", __func__, trans->state); 556 "%s bad state = %d", __func__, trans->state);
558 557
559 trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit, ssn); 558 trans->ops->tx_agg_setup(trans, queue, fifo, sta_id, tid,
560} 559 frame_limit, ssn);
561
562static inline void iwl_trans_free(struct iwl_trans *trans)
563{
564 trans->ops->free(trans);
565} 560}
566 561
567static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans) 562static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
@@ -572,13 +567,6 @@ static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
572 return trans->ops->wait_tx_queue_empty(trans); 567 return trans->ops->wait_tx_queue_empty(trans);
573} 568}
574 569
575static inline int iwl_trans_check_stuck_queue(struct iwl_trans *trans, int q)
576{
577 WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
578 "%s bad state = %d", __func__, trans->state);
579
580 return trans->ops->check_stuck_queue(trans, q);
581}
582static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans, 570static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
583 struct dentry *dir) 571 struct dentry *dir)
584{ 572{
@@ -612,20 +600,15 @@ static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
612 return trans->ops->read32(trans, ofs); 600 return trans->ops->read32(trans, ofs);
613} 601}
614 602
603static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
604{
605 trans->ops->set_pmi(trans, state);
606}
607
615/***************************************************** 608/*****************************************************
616* Transport layers implementations + their allocation function 609* driver (transport) register/unregister functions
617******************************************************/ 610******************************************************/
618struct pci_dev;
619struct pci_device_id;
620extern const struct iwl_trans_ops trans_ops_pcie;
621struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd,
622 struct pci_dev *pdev,
623 const struct pci_device_id *ent);
624int __must_check iwl_pci_register_driver(void); 611int __must_check iwl_pci_register_driver(void);
625void iwl_pci_unregister_driver(void); 612void iwl_pci_unregister_driver(void);
626 613
627extern const struct iwl_trans_ops trans_ops_idi;
628struct iwl_trans *iwl_trans_idi_alloc(struct iwl_shared *shrd,
629 void *pdev_void,
630 const void *ent_void);
631#endif /* __iwl_trans_h__ */ 614#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-ucode.c b/drivers/net/wireless/iwlwifi/iwl-ucode.c
index 252828728837..bc40dc68b0f4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-ucode.c
@@ -31,7 +31,6 @@
31#include <linux/init.h> 31#include <linux/init.h>
32 32
33#include "iwl-dev.h" 33#include "iwl-dev.h"
34#include "iwl-core.h"
35#include "iwl-io.h" 34#include "iwl-io.h"
36#include "iwl-agn-hw.h" 35#include "iwl-agn-hw.h"
37#include "iwl-agn.h" 36#include "iwl-agn.h"
@@ -40,37 +39,6 @@
40#include "iwl-fh.h" 39#include "iwl-fh.h"
41#include "iwl-op-mode.h" 40#include "iwl-op-mode.h"
42 41
43static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
44 {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
45 0, COEX_UNASSOC_IDLE_FLAGS},
46 {COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
47 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
48 {COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
49 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
50 {COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
51 0, COEX_CALIBRATION_FLAGS},
52 {COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
53 0, COEX_PERIODIC_CALIBRATION_FLAGS},
54 {COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
55 0, COEX_CONNECTION_ESTAB_FLAGS},
56 {COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
57 0, COEX_ASSOCIATED_IDLE_FLAGS},
58 {COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
59 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
60 {COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
61 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
62 {COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
63 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
64 {COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
65 {COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
66 {COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
67 0, COEX_STAND_ALONE_DEBUG_FLAGS},
68 {COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
69 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
70 {COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
71 {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
72};
73
74/****************************************************************************** 42/******************************************************************************
75 * 43 *
76 * uCode download functions 44 * uCode download functions
@@ -93,7 +61,7 @@ static int iwl_set_Xtal_calib(struct iwl_priv *priv)
93{ 61{
94 struct iwl_calib_xtal_freq_cmd cmd; 62 struct iwl_calib_xtal_freq_cmd cmd;
95 __le16 *xtal_calib = 63 __le16 *xtal_calib =
96 (__le16 *)iwl_eeprom_query_addr(priv->shrd, EEPROM_XTAL); 64 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
97 65
98 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD); 66 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
99 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]); 67 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
@@ -105,8 +73,7 @@ static int iwl_set_temperature_offset_calib(struct iwl_priv *priv)
105{ 73{
106 struct iwl_calib_temperature_offset_cmd cmd; 74 struct iwl_calib_temperature_offset_cmd cmd;
107 __le16 *offset_calib = 75 __le16 *offset_calib =
108 (__le16 *)iwl_eeprom_query_addr(priv->shrd, 76 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
109 EEPROM_RAW_TEMPERATURE);
110 77
111 memset(&cmd, 0, sizeof(cmd)); 78 memset(&cmd, 0, sizeof(cmd));
112 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); 79 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
@@ -122,16 +89,15 @@ static int iwl_set_temperature_offset_calib(struct iwl_priv *priv)
122static int iwl_set_temperature_offset_calib_v2(struct iwl_priv *priv) 89static int iwl_set_temperature_offset_calib_v2(struct iwl_priv *priv)
123{ 90{
124 struct iwl_calib_temperature_offset_v2_cmd cmd; 91 struct iwl_calib_temperature_offset_v2_cmd cmd;
125 __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv->shrd, 92 __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv,
126 EEPROM_KELVIN_TEMPERATURE); 93 EEPROM_KELVIN_TEMPERATURE);
127 __le16 *offset_calib_low = 94 __le16 *offset_calib_low =
128 (__le16 *)iwl_eeprom_query_addr(priv->shrd, 95 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
129 EEPROM_RAW_TEMPERATURE);
130 struct iwl_eeprom_calib_hdr *hdr; 96 struct iwl_eeprom_calib_hdr *hdr;
131 97
132 memset(&cmd, 0, sizeof(cmd)); 98 memset(&cmd, 0, sizeof(cmd));
133 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD); 99 iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
134 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv->shrd, 100 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
135 EEPROM_CALIB_ALL); 101 EEPROM_CALIB_ALL);
136 memcpy(&cmd.radio_sensor_offset_high, offset_calib_high, 102 memcpy(&cmd.radio_sensor_offset_high, offset_calib_high,
137 sizeof(*offset_calib_high)); 103 sizeof(*offset_calib_high));
@@ -174,30 +140,12 @@ static int iwl_send_calib_cfg(struct iwl_priv *priv)
174 return iwl_dvm_send_cmd(priv, &cmd); 140 return iwl_dvm_send_cmd(priv, &cmd);
175} 141}
176 142
177int iwlagn_rx_calib_result(struct iwl_priv *priv,
178 struct iwl_rx_cmd_buffer *rxb,
179 struct iwl_device_cmd *cmd)
180{
181 struct iwl_rx_packet *pkt = rxb_addr(rxb);
182 struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->data;
183 int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
184
185 /* reduce the size of the length field itself */
186 len -= 4;
187
188 if (iwl_calib_set(priv, hdr, len))
189 IWL_ERR(priv, "Failed to record calibration data %d\n",
190 hdr->op_code);
191
192 return 0;
193}
194
195int iwl_init_alive_start(struct iwl_priv *priv) 143int iwl_init_alive_start(struct iwl_priv *priv)
196{ 144{
197 int ret; 145 int ret;
198 146
199 if (cfg(priv)->bt_params && 147 if (priv->cfg->bt_params &&
200 cfg(priv)->bt_params->advanced_bt_coexist) { 148 priv->cfg->bt_params->advanced_bt_coexist) {
201 /* 149 /*
202 * Tell uCode we are ready to perform calibration 150 * Tell uCode we are ready to perform calibration
203 * need to perform this before any calibration 151 * need to perform this before any calibration
@@ -219,8 +167,8 @@ int iwl_init_alive_start(struct iwl_priv *priv)
219 * temperature offset calibration is only needed for runtime ucode, 167 * temperature offset calibration is only needed for runtime ucode,
220 * so prepare the value now. 168 * so prepare the value now.
221 */ 169 */
222 if (cfg(priv)->need_temp_offset_calib) { 170 if (priv->cfg->need_temp_offset_calib) {
223 if (cfg(priv)->temp_offset_v2) 171 if (priv->cfg->temp_offset_v2)
224 return iwl_set_temperature_offset_calib_v2(priv); 172 return iwl_set_temperature_offset_calib_v2(priv);
225 else 173 else
226 return iwl_set_temperature_offset_calib(priv); 174 return iwl_set_temperature_offset_calib(priv);
@@ -229,29 +177,13 @@ int iwl_init_alive_start(struct iwl_priv *priv)
229 return 0; 177 return 0;
230} 178}
231 179
232static int iwl_send_wimax_coex(struct iwl_priv *priv) 180int iwl_send_wimax_coex(struct iwl_priv *priv)
233{ 181{
234 struct iwl_wimax_coex_cmd coex_cmd; 182 struct iwl_wimax_coex_cmd coex_cmd;
235 183
236 if (cfg(priv)->base_params->support_wimax_coexist) { 184 /* coexistence is disabled */
237 /* UnMask wake up src at associated sleep */ 185 memset(&coex_cmd, 0, sizeof(coex_cmd));
238 coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
239 186
240 /* UnMask wake up src at unassociated sleep */
241 coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
242 memcpy(coex_cmd.sta_prio, cu_priorities,
243 sizeof(struct iwl_wimax_coex_event_entry) *
244 COEX_NUM_OF_EVENTS);
245
246 /* enabling the coexistence feature */
247 coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
248
249 /* enabling the priorities tables */
250 coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
251 } else {
252 /* coexistence is disabled */
253 memset(&coex_cmd, 0, sizeof(coex_cmd));
254 }
255 return iwl_dvm_send_cmd_pdu(priv, 187 return iwl_dvm_send_cmd_pdu(priv,
256 COEX_PRIORITY_TABLE_CMD, CMD_SYNC, 188 COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
257 sizeof(coex_cmd), &coex_cmd); 189 sizeof(coex_cmd), &coex_cmd);
@@ -311,7 +243,7 @@ static int iwl_alive_notify(struct iwl_priv *priv)
311{ 243{
312 int ret; 244 int ret;
313 245
314 iwl_trans_fw_alive(trans(priv)); 246 iwl_trans_fw_alive(priv->trans);
315 247
316 priv->passive_no_rx = false; 248 priv->passive_no_rx = false;
317 priv->transport_queue_stop = 0; 249 priv->transport_queue_stop = 0;
@@ -320,7 +252,7 @@ static int iwl_alive_notify(struct iwl_priv *priv)
320 if (ret) 252 if (ret)
321 return ret; 253 return ret;
322 254
323 if (!cfg(priv)->no_xtal_calib) { 255 if (!priv->cfg->no_xtal_calib) {
324 ret = iwl_set_Xtal_calib(priv); 256 ret = iwl_set_Xtal_calib(priv);
325 if (ret) 257 if (ret)
326 return ret; 258 return ret;
@@ -349,9 +281,9 @@ static int iwl_verify_sec_sparse(struct iwl_priv *priv,
349 /* read data comes through single port, auto-incr addr */ 281 /* read data comes through single port, auto-incr addr */
350 /* NOTE: Use the debugless read so we don't flood kernel log 282 /* NOTE: Use the debugless read so we don't flood kernel log
351 * if IWL_DL_IO is set */ 283 * if IWL_DL_IO is set */
352 iwl_write_direct32(trans(priv), HBUS_TARG_MEM_RADDR, 284 iwl_write_direct32(priv->trans, HBUS_TARG_MEM_RADDR,
353 i + fw_desc->offset); 285 i + fw_desc->offset);
354 val = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT); 286 val = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
355 if (val != le32_to_cpu(*image)) 287 if (val != le32_to_cpu(*image))
356 return -EIO; 288 return -EIO;
357 } 289 }
@@ -370,14 +302,14 @@ static void iwl_print_mismatch_sec(struct iwl_priv *priv,
370 302
371 IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len); 303 IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
372 304
373 iwl_write_direct32(trans(priv), HBUS_TARG_MEM_RADDR, 305 iwl_write_direct32(priv->trans, HBUS_TARG_MEM_RADDR,
374 fw_desc->offset); 306 fw_desc->offset);
375 307
376 for (offs = 0; 308 for (offs = 0;
377 offs < len && errors < 20; 309 offs < len && errors < 20;
378 offs += sizeof(u32), image++) { 310 offs += sizeof(u32), image++) {
379 /* read data comes through single port, auto-incr addr */ 311 /* read data comes through single port, auto-incr addr */
380 val = iwl_read32(trans(priv), HBUS_TARG_MEM_RDAT); 312 val = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
381 if (val != le32_to_cpu(*image)) { 313 if (val != le32_to_cpu(*image)) {
382 IWL_ERR(priv, "uCode INST section at " 314 IWL_ERR(priv, "uCode INST section at "
383 "offset 0x%x, is 0x%x, s/b 0x%x\n", 315 "offset 0x%x, is 0x%x, s/b 0x%x\n",
@@ -417,9 +349,8 @@ struct iwl_alive_data {
417 u8 subtype; 349 u8 subtype;
418}; 350};
419 351
420static void iwl_alive_fn(struct iwl_notif_wait_data *notif_wait, 352static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
421 struct iwl_rx_packet *pkt, 353 struct iwl_rx_packet *pkt, void *data)
422 void *data)
423{ 354{
424 struct iwl_priv *priv = 355 struct iwl_priv *priv =
425 container_of(notif_wait, struct iwl_priv, notif_wait); 356 container_of(notif_wait, struct iwl_priv, notif_wait);
@@ -433,13 +364,15 @@ static void iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
433 palive->is_valid, palive->ver_type, 364 palive->is_valid, palive->ver_type,
434 palive->ver_subtype); 365 palive->ver_subtype);
435 366
436 priv->shrd->device_pointers.error_event_table = 367 priv->device_pointers.error_event_table =
437 le32_to_cpu(palive->error_event_table_ptr); 368 le32_to_cpu(palive->error_event_table_ptr);
438 priv->shrd->device_pointers.log_event_table = 369 priv->device_pointers.log_event_table =
439 le32_to_cpu(palive->log_event_table_ptr); 370 le32_to_cpu(palive->log_event_table_ptr);
440 371
441 alive_data->subtype = palive->ver_subtype; 372 alive_data->subtype = palive->ver_subtype;
442 alive_data->valid = palive->is_valid == UCODE_VALID_OK; 373 alive_data->valid = palive->is_valid == UCODE_VALID_OK;
374
375 return true;
443} 376}
444 377
445#define UCODE_ALIVE_TIMEOUT HZ 378#define UCODE_ALIVE_TIMEOUT HZ
@@ -453,9 +386,10 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
453 const struct fw_img *fw; 386 const struct fw_img *fw;
454 int ret; 387 int ret;
455 enum iwl_ucode_type old_type; 388 enum iwl_ucode_type old_type;
389 static const u8 alive_cmd[] = { REPLY_ALIVE };
456 390
457 old_type = priv->shrd->ucode_type; 391 old_type = priv->cur_ucode;
458 priv->shrd->ucode_type = ucode_type; 392 priv->cur_ucode = ucode_type;
459 fw = iwl_get_ucode_image(priv, ucode_type); 393 fw = iwl_get_ucode_image(priv, ucode_type);
460 394
461 priv->ucode_loaded = false; 395 priv->ucode_loaded = false;
@@ -463,12 +397,13 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
463 if (!fw) 397 if (!fw)
464 return -EINVAL; 398 return -EINVAL;
465 399
466 iwl_init_notification_wait(&priv->notif_wait, &alive_wait, REPLY_ALIVE, 400 iwl_init_notification_wait(&priv->notif_wait, &alive_wait,
467 iwl_alive_fn, &alive_data); 401 alive_cmd, ARRAY_SIZE(alive_cmd),
402 iwl_alive_fn, &alive_data);
468 403
469 ret = iwl_trans_start_fw(trans(priv), fw); 404 ret = iwl_trans_start_fw(priv->trans, fw);
470 if (ret) { 405 if (ret) {
471 priv->shrd->ucode_type = old_type; 406 priv->cur_ucode = old_type;
472 iwl_remove_notification(&priv->notif_wait, &alive_wait); 407 iwl_remove_notification(&priv->notif_wait, &alive_wait);
473 return ret; 408 return ret;
474 } 409 }
@@ -480,13 +415,13 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
480 ret = iwl_wait_notification(&priv->notif_wait, &alive_wait, 415 ret = iwl_wait_notification(&priv->notif_wait, &alive_wait,
481 UCODE_ALIVE_TIMEOUT); 416 UCODE_ALIVE_TIMEOUT);
482 if (ret) { 417 if (ret) {
483 priv->shrd->ucode_type = old_type; 418 priv->cur_ucode = old_type;
484 return ret; 419 return ret;
485 } 420 }
486 421
487 if (!alive_data.valid) { 422 if (!alive_data.valid) {
488 IWL_ERR(priv, "Loaded ucode is not valid!\n"); 423 IWL_ERR(priv, "Loaded ucode is not valid!\n");
489 priv->shrd->ucode_type = old_type; 424 priv->cur_ucode = old_type;
490 return -EIO; 425 return -EIO;
491 } 426 }
492 427
@@ -498,7 +433,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
498 if (ucode_type != IWL_UCODE_WOWLAN) { 433 if (ucode_type != IWL_UCODE_WOWLAN) {
499 ret = iwl_verify_ucode(priv, ucode_type); 434 ret = iwl_verify_ucode(priv, ucode_type);
500 if (ret) { 435 if (ret) {
501 priv->shrd->ucode_type = old_type; 436 priv->cur_ucode = old_type;
502 return ret; 437 return ret;
503 } 438 }
504 439
@@ -510,7 +445,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
510 if (ret) { 445 if (ret) {
511 IWL_WARN(priv, 446 IWL_WARN(priv,
512 "Could not complete ALIVE transition: %d\n", ret); 447 "Could not complete ALIVE transition: %d\n", ret);
513 priv->shrd->ucode_type = old_type; 448 priv->cur_ucode = old_type;
514 return ret; 449 return ret;
515 } 450 }
516 451
@@ -519,9 +454,38 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
519 return 0; 454 return 0;
520} 455}
521 456
457static bool iwlagn_wait_calib(struct iwl_notif_wait_data *notif_wait,
458 struct iwl_rx_packet *pkt, void *data)
459{
460 struct iwl_priv *priv = data;
461 struct iwl_calib_hdr *hdr;
462 int len;
463
464 if (pkt->hdr.cmd != CALIBRATION_RES_NOTIFICATION) {
465 WARN_ON(pkt->hdr.cmd != CALIBRATION_COMPLETE_NOTIFICATION);
466 return true;
467 }
468
469 hdr = (struct iwl_calib_hdr *)pkt->data;
470 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
471
472 /* reduce the size by the length field itself */
473 len -= sizeof(__le32);
474
475 if (iwl_calib_set(priv, hdr, len))
476 IWL_ERR(priv, "Failed to record calibration data %d\n",
477 hdr->op_code);
478
479 return false;
480}
481
522int iwl_run_init_ucode(struct iwl_priv *priv) 482int iwl_run_init_ucode(struct iwl_priv *priv)
523{ 483{
524 struct iwl_notification_wait calib_wait; 484 struct iwl_notification_wait calib_wait;
485 static const u8 calib_complete[] = {
486 CALIBRATION_RES_NOTIFICATION,
487 CALIBRATION_COMPLETE_NOTIFICATION
488 };
525 int ret; 489 int ret;
526 490
527 lockdep_assert_held(&priv->mutex); 491 lockdep_assert_held(&priv->mutex);
@@ -534,8 +498,8 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
534 return 0; 498 return 0;
535 499
536 iwl_init_notification_wait(&priv->notif_wait, &calib_wait, 500 iwl_init_notification_wait(&priv->notif_wait, &calib_wait,
537 CALIBRATION_COMPLETE_NOTIFICATION, 501 calib_complete, ARRAY_SIZE(calib_complete),
538 NULL, NULL); 502 iwlagn_wait_calib, priv);
539 503
540 /* Will also start the device */ 504 /* Will also start the device */
541 ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT); 505 ret = iwl_load_ucode_wait_alive(priv, IWL_UCODE_INIT);
@@ -561,7 +525,7 @@ int iwl_run_init_ucode(struct iwl_priv *priv)
561 iwl_remove_notification(&priv->notif_wait, &calib_wait); 525 iwl_remove_notification(&priv->notif_wait, &calib_wait);
562 out: 526 out:
563 /* Whatever happened, stop the device */ 527 /* Whatever happened, stop the device */
564 iwl_trans_stop_device(trans(priv)); 528 iwl_trans_stop_device(priv->trans);
565 priv->ucode_loaded = false; 529 priv->ucode_loaded = false;
566 530
567 return ret; 531 return ret;
diff --git a/drivers/net/wireless/iwmc3200wifi/Kconfig b/drivers/net/wireless/iwmc3200wifi/Kconfig
index 03f998d098c5..7107ce53d4d4 100644
--- a/drivers/net/wireless/iwmc3200wifi/Kconfig
+++ b/drivers/net/wireless/iwmc3200wifi/Kconfig
@@ -1,5 +1,5 @@
1config IWM 1config IWM
2 tristate "Intel Wireless Multicomm 3200 WiFi driver" 2 tristate "Intel Wireless Multicomm 3200 WiFi driver (EXPERIMENTAL)"
3 depends on MMC && EXPERIMENTAL 3 depends on MMC && EXPERIMENTAL
4 depends on CFG80211 4 depends on CFG80211
5 select FW_LOADER 5 select FW_LOADER
diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile
index f7d01bfa2e4a..eac72f7bd341 100644
--- a/drivers/net/wireless/libertas/Makefile
+++ b/drivers/net/wireless/libertas/Makefile
@@ -6,6 +6,7 @@ libertas-y += ethtool.o
6libertas-y += main.o 6libertas-y += main.o
7libertas-y += rx.o 7libertas-y += rx.o
8libertas-y += tx.o 8libertas-y += tx.o
9libertas-y += firmware.o
9libertas-$(CONFIG_LIBERTAS_MESH) += mesh.o 10libertas-$(CONFIG_LIBERTAS_MESH) += mesh.o
10 11
11usb8xxx-objs += if_usb.o 12usb8xxx-objs += if_usb.o
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h
index bc951ab4b681..84a3aa7ac570 100644
--- a/drivers/net/wireless/libertas/decl.h
+++ b/drivers/net/wireless/libertas/decl.h
@@ -19,6 +19,10 @@ struct lbs_fw_table {
19}; 19};
20 20
21struct lbs_private; 21struct lbs_private;
22typedef void (*lbs_fw_cb)(struct lbs_private *priv, int ret,
23 const struct firmware *helper, const struct firmware *mainfw);
24
25struct lbs_private;
22struct sk_buff; 26struct sk_buff;
23struct net_device; 27struct net_device;
24struct cmd_ds_command; 28struct cmd_ds_command;
@@ -66,10 +70,13 @@ int lbs_exit_auto_deep_sleep(struct lbs_private *priv);
66u32 lbs_fw_index_to_data_rate(u8 index); 70u32 lbs_fw_index_to_data_rate(u8 index);
67u8 lbs_data_rate_to_fw_index(u32 rate); 71u8 lbs_data_rate_to_fw_index(u32 rate);
68 72
69int lbs_get_firmware(struct device *dev, const char *user_helper, 73int lbs_get_firmware(struct device *dev, u32 card_model,
70 const char *user_mainfw, u32 card_model,
71 const struct lbs_fw_table *fw_table, 74 const struct lbs_fw_table *fw_table,
72 const struct firmware **helper, 75 const struct firmware **helper,
73 const struct firmware **mainfw); 76 const struct firmware **mainfw);
77int lbs_get_firmware_async(struct lbs_private *priv, struct device *device,
78 u32 card_model, const struct lbs_fw_table *fw_table,
79 lbs_fw_cb callback);
80void lbs_wait_for_firmware_load(struct lbs_private *priv);
74 81
75#endif 82#endif
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index f3fd447131c2..672005430aca 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -7,6 +7,7 @@
7#define _LBS_DEV_H_ 7#define _LBS_DEV_H_
8 8
9#include "defs.h" 9#include "defs.h"
10#include "decl.h"
10#include "host.h" 11#include "host.h"
11 12
12#include <linux/kfifo.h> 13#include <linux/kfifo.h>
@@ -180,6 +181,15 @@ struct lbs_private {
180 wait_queue_head_t scan_q; 181 wait_queue_head_t scan_q;
181 /* Whether the scan was initiated internally and not by cfg80211 */ 182 /* Whether the scan was initiated internally and not by cfg80211 */
182 bool internal_scan; 183 bool internal_scan;
184
185 /* Firmware load */
186 u32 fw_model;
187 wait_queue_head_t fw_waitq;
188 struct device *fw_device;
189 const struct firmware *helper_fw;
190 const struct lbs_fw_table *fw_table;
191 const struct lbs_fw_table *fw_iter;
192 lbs_fw_cb fw_callback;
183}; 193};
184 194
185extern struct cmd_confirm_sleep confirm_sleep; 195extern struct cmd_confirm_sleep confirm_sleep;
diff --git a/drivers/net/wireless/libertas/firmware.c b/drivers/net/wireless/libertas/firmware.c
new file mode 100644
index 000000000000..77f6504fd55d
--- /dev/null
+++ b/drivers/net/wireless/libertas/firmware.c
@@ -0,0 +1,223 @@
1/*
2 * Firmware loading and handling functions.
3 */
4
5#include <linux/firmware.h>
6#include <linux/firmware.h>
7#include <linux/module.h>
8#include <linux/sched.h>
9
10#include "dev.h"
11#include "decl.h"
12
13static void load_next_firmware_from_table(struct lbs_private *private);
14
15static void lbs_fw_loaded(struct lbs_private *priv, int ret,
16 const struct firmware *helper, const struct firmware *mainfw)
17{
18 unsigned long flags;
19
20 lbs_deb_fw("firmware load complete, code %d\n", ret);
21
22 /* User must free helper/mainfw */
23 priv->fw_callback(priv, ret, helper, mainfw);
24
25 spin_lock_irqsave(&priv->driver_lock, flags);
26 priv->fw_callback = NULL;
27 wake_up(&priv->fw_waitq);
28 spin_unlock_irqrestore(&priv->driver_lock, flags);
29}
30
31static void do_load_firmware(struct lbs_private *priv, const char *name,
32 void (*cb)(const struct firmware *fw, void *context))
33{
34 int ret;
35
36 lbs_deb_fw("Requesting %s\n", name);
37 ret = request_firmware_nowait(THIS_MODULE, true, name,
38 priv->fw_device, GFP_KERNEL, priv, cb);
39 if (ret) {
40 lbs_deb_fw("request_firmware_nowait error %d\n", ret);
41 lbs_fw_loaded(priv, ret, NULL, NULL);
42 }
43}
44
45static void main_firmware_cb(const struct firmware *firmware, void *context)
46{
47 struct lbs_private *priv = context;
48
49 if (!firmware) {
50 /* Failed to find firmware: try next table entry */
51 load_next_firmware_from_table(priv);
52 return;
53 }
54
55 /* Firmware found! */
56 lbs_fw_loaded(priv, 0, priv->helper_fw, firmware);
57}
58
59static void helper_firmware_cb(const struct firmware *firmware, void *context)
60{
61 struct lbs_private *priv = context;
62
63 if (!firmware) {
64 /* Failed to find firmware: try next table entry */
65 load_next_firmware_from_table(priv);
66 return;
67 }
68
69 /* Firmware found! */
70 if (priv->fw_iter->fwname) {
71 priv->helper_fw = firmware;
72 do_load_firmware(priv, priv->fw_iter->fwname, main_firmware_cb);
73 } else {
74 /* No main firmware needed for this helper --> success! */
75 lbs_fw_loaded(priv, 0, firmware, NULL);
76 }
77}
78
79static void load_next_firmware_from_table(struct lbs_private *priv)
80{
81 const struct lbs_fw_table *iter;
82
83 if (!priv->fw_iter)
84 iter = priv->fw_table;
85 else
86 iter = ++priv->fw_iter;
87
88 if (priv->helper_fw) {
89 release_firmware(priv->helper_fw);
90 priv->helper_fw = NULL;
91 }
92
93next:
94 if (!iter->helper) {
95 /* End of table hit. */
96 lbs_fw_loaded(priv, -ENOENT, NULL, NULL);
97 return;
98 }
99
100 if (iter->model != priv->fw_model) {
101 iter++;
102 goto next;
103 }
104
105 priv->fw_iter = iter;
106 do_load_firmware(priv, iter->helper, helper_firmware_cb);
107}
108
109void lbs_wait_for_firmware_load(struct lbs_private *priv)
110{
111 wait_event(priv->fw_waitq, priv->fw_callback == NULL);
112}
113
114/**
115 * lbs_get_firmware_async - Retrieves firmware asynchronously. Can load
116 * either a helper firmware and a main firmware (2-stage), or just the helper.
117 *
118 * @priv: Pointer to lbs_private instance
119 * @dev: A pointer to &device structure
120 * @card_model: Bus-specific card model ID used to filter firmware table
121 * elements
122 * @fw_table: Table of firmware file names and device model numbers
123 * terminated by an entry with a NULL helper name
124 * @callback: User callback to invoke when firmware load succeeds or fails.
125 */
126int lbs_get_firmware_async(struct lbs_private *priv, struct device *device,
127 u32 card_model, const struct lbs_fw_table *fw_table,
128 lbs_fw_cb callback)
129{
130 unsigned long flags;
131
132 spin_lock_irqsave(&priv->driver_lock, flags);
133 if (priv->fw_callback) {
134 lbs_deb_fw("firmware load already in progress\n");
135 spin_unlock_irqrestore(&priv->driver_lock, flags);
136 return -EBUSY;
137 }
138
139 priv->fw_device = device;
140 priv->fw_callback = callback;
141 priv->fw_table = fw_table;
142 priv->fw_iter = NULL;
143 priv->fw_model = card_model;
144 spin_unlock_irqrestore(&priv->driver_lock, flags);
145
146 lbs_deb_fw("Starting async firmware load\n");
147 load_next_firmware_from_table(priv);
148 return 0;
149}
150EXPORT_SYMBOL_GPL(lbs_get_firmware_async);
151
152/**
153 * lbs_get_firmware - Retrieves two-stage firmware
154 *
155 * @dev: A pointer to &device structure
156 * @card_model: Bus-specific card model ID used to filter firmware table
157 * elements
158 * @fw_table: Table of firmware file names and device model numbers
159 * terminated by an entry with a NULL helper name
160 * @helper: On success, the helper firmware; caller must free
161 * @mainfw: On success, the main firmware; caller must free
162 *
163 * Deprecated: use lbs_get_firmware_async() instead.
164 *
165 * returns: 0 on success, non-zero on failure
166 */
167int lbs_get_firmware(struct device *dev, u32 card_model,
168 const struct lbs_fw_table *fw_table,
169 const struct firmware **helper,
170 const struct firmware **mainfw)
171{
172 const struct lbs_fw_table *iter;
173 int ret;
174
175 BUG_ON(helper == NULL);
176 BUG_ON(mainfw == NULL);
177
178 /* Search for firmware to use from the table. */
179 iter = fw_table;
180 while (iter && iter->helper) {
181 if (iter->model != card_model)
182 goto next;
183
184 if (*helper == NULL) {
185 ret = request_firmware(helper, iter->helper, dev);
186 if (ret)
187 goto next;
188
189 /* If the device has one-stage firmware (ie cf8305) and
190 * we've got it then we don't need to bother with the
191 * main firmware.
192 */
193 if (iter->fwname == NULL)
194 return 0;
195 }
196
197 if (*mainfw == NULL) {
198 ret = request_firmware(mainfw, iter->fwname, dev);
199 if (ret) {
200 /* Clear the helper to ensure we don't have
201 * mismatched firmware pairs.
202 */
203 release_firmware(*helper);
204 *helper = NULL;
205 }
206 }
207
208 if (*helper && *mainfw)
209 return 0;
210
211 next:
212 iter++;
213 }
214
215 /* Failed */
216 release_firmware(*helper);
217 *helper = NULL;
218 release_firmware(*mainfw);
219 *mainfw = NULL;
220
221 return -ENOENT;
222}
223EXPORT_SYMBOL_GPL(lbs_get_firmware);
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 234ee88dec95..16beaf39dc53 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -738,6 +738,50 @@ done:
738 return ret; 738 return ret;
739} 739}
740 740
741static void if_cs_prog_firmware(struct lbs_private *priv, int ret,
742 const struct firmware *helper,
743 const struct firmware *mainfw)
744{
745 struct if_cs_card *card = priv->card;
746
747 if (ret) {
748 pr_err("failed to find firmware (%d)\n", ret);
749 return;
750 }
751
752 /* Load the firmware */
753 ret = if_cs_prog_helper(card, helper);
754 if (ret == 0 && (card->model != MODEL_8305))
755 ret = if_cs_prog_real(card, mainfw);
756 if (ret)
757 goto out;
758
759 /* Now actually get the IRQ */
760 ret = request_irq(card->p_dev->irq, if_cs_interrupt,
761 IRQF_SHARED, DRV_NAME, card);
762 if (ret) {
763 pr_err("error in request_irq\n");
764 goto out;
765 }
766
767 /*
768 * Clear any interrupt cause that happened while sending
769 * firmware/initializing card
770 */
771 if_cs_write16(card, IF_CS_CARD_INT_CAUSE, IF_CS_BIT_MASK);
772 if_cs_enable_ints(card);
773
774 /* And finally bring the card up */
775 priv->fw_ready = 1;
776 if (lbs_start_card(priv) != 0) {
777 pr_err("could not activate card\n");
778 free_irq(card->p_dev->irq, card);
779 }
780
781out:
782 release_firmware(helper);
783 release_firmware(mainfw);
784}
741 785
742 786
743/********************************************************************/ 787/********************************************************************/
@@ -809,8 +853,6 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
809 unsigned int prod_id; 853 unsigned int prod_id;
810 struct lbs_private *priv; 854 struct lbs_private *priv;
811 struct if_cs_card *card; 855 struct if_cs_card *card;
812 const struct firmware *helper = NULL;
813 const struct firmware *mainfw = NULL;
814 856
815 lbs_deb_enter(LBS_DEB_CS); 857 lbs_deb_enter(LBS_DEB_CS);
816 858
@@ -890,20 +932,6 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
890 goto out2; 932 goto out2;
891 } 933 }
892 934
893 ret = lbs_get_firmware(&p_dev->dev, NULL, NULL, card->model,
894 &fw_table[0], &helper, &mainfw);
895 if (ret) {
896 pr_err("failed to find firmware (%d)\n", ret);
897 goto out2;
898 }
899
900 /* Load the firmware early, before calling into libertas.ko */
901 ret = if_cs_prog_helper(card, helper);
902 if (ret == 0 && (card->model != MODEL_8305))
903 ret = if_cs_prog_real(card, mainfw);
904 if (ret)
905 goto out2;
906
907 /* Make this card known to the libertas driver */ 935 /* Make this card known to the libertas driver */
908 priv = lbs_add_card(card, &p_dev->dev); 936 priv = lbs_add_card(card, &p_dev->dev);
909 if (!priv) { 937 if (!priv) {
@@ -911,37 +939,22 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
911 goto out2; 939 goto out2;
912 } 940 }
913 941
914 /* Finish setting up fields in lbs_private */ 942 /* Set up fields in lbs_private */
915 card->priv = priv; 943 card->priv = priv;
916 priv->card = card; 944 priv->card = card;
917 priv->hw_host_to_card = if_cs_host_to_card; 945 priv->hw_host_to_card = if_cs_host_to_card;
918 priv->enter_deep_sleep = NULL; 946 priv->enter_deep_sleep = NULL;
919 priv->exit_deep_sleep = NULL; 947 priv->exit_deep_sleep = NULL;
920 priv->reset_deep_sleep_wakeup = NULL; 948 priv->reset_deep_sleep_wakeup = NULL;
921 priv->fw_ready = 1;
922 949
923 /* Now actually get the IRQ */ 950 /* Get firmware */
924 ret = request_irq(p_dev->irq, if_cs_interrupt, 951 ret = lbs_get_firmware_async(priv, &p_dev->dev, card->model, fw_table,
925 IRQF_SHARED, DRV_NAME, card); 952 if_cs_prog_firmware);
926 if (ret) { 953 if (ret) {
927 pr_err("error in request_irq\n"); 954 pr_err("failed to find firmware (%d)\n", ret);
928 goto out3;
929 }
930
931 /*
932 * Clear any interrupt cause that happened while sending
933 * firmware/initializing card
934 */
935 if_cs_write16(card, IF_CS_CARD_INT_CAUSE, IF_CS_BIT_MASK);
936 if_cs_enable_ints(card);
937
938 /* And finally bring the card up */
939 if (lbs_start_card(priv) != 0) {
940 pr_err("could not activate card\n");
941 goto out3; 955 goto out3;
942 } 956 }
943 957
944 ret = 0;
945 goto out; 958 goto out;
946 959
947out3: 960out3:
@@ -951,11 +964,6 @@ out2:
951out1: 964out1:
952 pcmcia_disable_device(p_dev); 965 pcmcia_disable_device(p_dev);
953out: 966out:
954 if (helper)
955 release_firmware(helper);
956 if (mainfw)
957 release_firmware(mainfw);
958
959 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret); 967 lbs_deb_leave_args(LBS_DEB_CS, "ret %d", ret);
960 return ret; 968 return ret;
961} 969}
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 9804ebc892d4..76caebaa4397 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -65,12 +65,6 @@ static void if_sdio_interrupt(struct sdio_func *func);
65 */ 65 */
66static u8 user_rmmod; 66static u8 user_rmmod;
67 67
68static char *lbs_helper_name = NULL;
69module_param_named(helper_name, lbs_helper_name, charp, 0644);
70
71static char *lbs_fw_name = NULL;
72module_param_named(fw_name, lbs_fw_name, charp, 0644);
73
74static const struct sdio_device_id if_sdio_ids[] = { 68static const struct sdio_device_id if_sdio_ids[] = {
75 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 69 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL,
76 SDIO_DEVICE_ID_MARVELL_LIBERTAS) }, 70 SDIO_DEVICE_ID_MARVELL_LIBERTAS) },
@@ -123,11 +117,8 @@ struct if_sdio_card {
123 int model; 117 int model;
124 unsigned long ioport; 118 unsigned long ioport;
125 unsigned int scratch_reg; 119 unsigned int scratch_reg;
126 120 bool started;
127 const char *helper; 121 wait_queue_head_t pwron_waitq;
128 const char *firmware;
129 bool helper_allocated;
130 bool firmware_allocated;
131 122
132 u8 buffer[65536] __attribute__((aligned(4))); 123 u8 buffer[65536] __attribute__((aligned(4)));
133 124
@@ -140,6 +131,9 @@ struct if_sdio_card {
140 u8 rx_unit; 131 u8 rx_unit;
141}; 132};
142 133
134static void if_sdio_finish_power_on(struct if_sdio_card *card);
135static int if_sdio_power_off(struct if_sdio_card *card);
136
143/********************************************************************/ 137/********************************************************************/
144/* I/O */ 138/* I/O */
145/********************************************************************/ 139/********************************************************************/
@@ -680,12 +674,39 @@ out:
680 return ret; 674 return ret;
681} 675}
682 676
677static void if_sdio_do_prog_firmware(struct lbs_private *priv, int ret,
678 const struct firmware *helper,
679 const struct firmware *mainfw)
680{
681 struct if_sdio_card *card = priv->card;
682
683 if (ret) {
684 pr_err("failed to find firmware (%d)\n", ret);
685 return;
686 }
687
688 ret = if_sdio_prog_helper(card, helper);
689 if (ret)
690 goto out;
691
692 lbs_deb_sdio("Helper firmware loaded\n");
693
694 ret = if_sdio_prog_real(card, mainfw);
695 if (ret)
696 goto out;
697
698 lbs_deb_sdio("Firmware loaded\n");
699 if_sdio_finish_power_on(card);
700
701out:
702 release_firmware(helper);
703 release_firmware(mainfw);
704}
705
683static int if_sdio_prog_firmware(struct if_sdio_card *card) 706static int if_sdio_prog_firmware(struct if_sdio_card *card)
684{ 707{
685 int ret; 708 int ret;
686 u16 scratch; 709 u16 scratch;
687 const struct firmware *helper = NULL;
688 const struct firmware *mainfw = NULL;
689 710
690 lbs_deb_enter(LBS_DEB_SDIO); 711 lbs_deb_enter(LBS_DEB_SDIO);
691 712
@@ -719,43 +740,18 @@ static int if_sdio_prog_firmware(struct if_sdio_card *card)
719 */ 740 */
720 if (scratch == IF_SDIO_FIRMWARE_OK) { 741 if (scratch == IF_SDIO_FIRMWARE_OK) {
721 lbs_deb_sdio("firmware already loaded\n"); 742 lbs_deb_sdio("firmware already loaded\n");
722 goto success; 743 if_sdio_finish_power_on(card);
744 return 0;
723 } else if ((card->model == MODEL_8686) && (scratch & 0x7fff)) { 745 } else if ((card->model == MODEL_8686) && (scratch & 0x7fff)) {
724 lbs_deb_sdio("firmware may be running\n"); 746 lbs_deb_sdio("firmware may be running\n");
725 goto success; 747 if_sdio_finish_power_on(card);
726 } 748 return 0;
727
728 ret = lbs_get_firmware(&card->func->dev, lbs_helper_name, lbs_fw_name,
729 card->model, &fw_table[0], &helper, &mainfw);
730 if (ret) {
731 pr_err("failed to find firmware (%d)\n", ret);
732 goto out;
733 } 749 }
734 750
735 ret = if_sdio_prog_helper(card, helper); 751 ret = lbs_get_firmware_async(card->priv, &card->func->dev, card->model,
736 if (ret) 752 fw_table, if_sdio_do_prog_firmware);
737 goto out;
738
739 lbs_deb_sdio("Helper firmware loaded\n");
740
741 ret = if_sdio_prog_real(card, mainfw);
742 if (ret)
743 goto out;
744
745 lbs_deb_sdio("Firmware loaded\n");
746
747success:
748 sdio_claim_host(card->func);
749 sdio_set_block_size(card->func, IF_SDIO_BLOCK_SIZE);
750 sdio_release_host(card->func);
751 ret = 0;
752 753
753out: 754out:
754 if (helper)
755 release_firmware(helper);
756 if (mainfw)
757 release_firmware(mainfw);
758
759 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); 755 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
760 return ret; 756 return ret;
761} 757}
@@ -764,55 +760,15 @@ out:
764/* Power management */ 760/* Power management */
765/********************************************************************/ 761/********************************************************************/
766 762
767static int if_sdio_power_on(struct if_sdio_card *card) 763/* Finish power on sequence (after firmware is loaded) */
764static void if_sdio_finish_power_on(struct if_sdio_card *card)
768{ 765{
769 struct sdio_func *func = card->func; 766 struct sdio_func *func = card->func;
770 struct lbs_private *priv = card->priv; 767 struct lbs_private *priv = card->priv;
771 struct mmc_host *host = func->card->host;
772 int ret; 768 int ret;
773 769
774 sdio_claim_host(func); 770 sdio_claim_host(func);
775 771 sdio_set_block_size(card->func, IF_SDIO_BLOCK_SIZE);
776 ret = sdio_enable_func(func);
777 if (ret)
778 goto release;
779
780 /* For 1-bit transfers to the 8686 model, we need to enable the
781 * interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0
782 * bit to allow access to non-vendor registers. */
783 if ((card->model == MODEL_8686) &&
784 (host->caps & MMC_CAP_SDIO_IRQ) &&
785 (host->ios.bus_width == MMC_BUS_WIDTH_1)) {
786 u8 reg;
787
788 func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
789 reg = sdio_f0_readb(func, SDIO_CCCR_IF, &ret);
790 if (ret)
791 goto disable;
792
793 reg |= SDIO_BUS_ECSI;
794 sdio_f0_writeb(func, reg, SDIO_CCCR_IF, &ret);
795 if (ret)
796 goto disable;
797 }
798
799 card->ioport = sdio_readb(func, IF_SDIO_IOPORT, &ret);
800 if (ret)
801 goto disable;
802
803 card->ioport |= sdio_readb(func, IF_SDIO_IOPORT + 1, &ret) << 8;
804 if (ret)
805 goto disable;
806
807 card->ioport |= sdio_readb(func, IF_SDIO_IOPORT + 2, &ret) << 16;
808 if (ret)
809 goto disable;
810
811 sdio_release_host(func);
812 ret = if_sdio_prog_firmware(card);
813 sdio_claim_host(func);
814 if (ret)
815 goto disable;
816 772
817 /* 773 /*
818 * Get rx_unit if the chip is SD8688 or newer. 774 * Get rx_unit if the chip is SD8688 or newer.
@@ -837,7 +793,7 @@ static int if_sdio_power_on(struct if_sdio_card *card)
837 */ 793 */
838 ret = sdio_claim_irq(func, if_sdio_interrupt); 794 ret = sdio_claim_irq(func, if_sdio_interrupt);
839 if (ret) 795 if (ret)
840 goto disable; 796 goto release;
841 797
842 /* 798 /*
843 * Enable interrupts now that everything is set up 799 * Enable interrupts now that everything is set up
@@ -863,11 +819,79 @@ static int if_sdio_power_on(struct if_sdio_card *card)
863 } 819 }
864 820
865 priv->fw_ready = 1; 821 priv->fw_ready = 1;
822 wake_up(&card->pwron_waitq);
866 823
867 return 0; 824 if (!card->started) {
825 ret = lbs_start_card(priv);
826 if_sdio_power_off(card);
827 if (ret == 0) {
828 card->started = true;
829 /* Tell PM core that we don't need the card to be
830 * powered now */
831 pm_runtime_put_noidle(&func->dev);
832 }
833 }
834
835 return;
868 836
869release_irq: 837release_irq:
870 sdio_release_irq(func); 838 sdio_release_irq(func);
839release:
840 sdio_release_host(func);
841}
842
843static int if_sdio_power_on(struct if_sdio_card *card)
844{
845 struct sdio_func *func = card->func;
846 struct mmc_host *host = func->card->host;
847 int ret;
848
849 sdio_claim_host(func);
850
851 ret = sdio_enable_func(func);
852 if (ret)
853 goto release;
854
855 /* For 1-bit transfers to the 8686 model, we need to enable the
856 * interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0
857 * bit to allow access to non-vendor registers. */
858 if ((card->model == MODEL_8686) &&
859 (host->caps & MMC_CAP_SDIO_IRQ) &&
860 (host->ios.bus_width == MMC_BUS_WIDTH_1)) {
861 u8 reg;
862
863 func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
864 reg = sdio_f0_readb(func, SDIO_CCCR_IF, &ret);
865 if (ret)
866 goto disable;
867
868 reg |= SDIO_BUS_ECSI;
869 sdio_f0_writeb(func, reg, SDIO_CCCR_IF, &ret);
870 if (ret)
871 goto disable;
872 }
873
874 card->ioport = sdio_readb(func, IF_SDIO_IOPORT, &ret);
875 if (ret)
876 goto disable;
877
878 card->ioport |= sdio_readb(func, IF_SDIO_IOPORT + 1, &ret) << 8;
879 if (ret)
880 goto disable;
881
882 card->ioport |= sdio_readb(func, IF_SDIO_IOPORT + 2, &ret) << 16;
883 if (ret)
884 goto disable;
885
886 sdio_release_host(func);
887 ret = if_sdio_prog_firmware(card);
888 if (ret) {
889 sdio_disable_func(func);
890 return ret;
891 }
892
893 return 0;
894
871disable: 895disable:
872 sdio_disable_func(func); 896 sdio_disable_func(func);
873release: 897release:
@@ -1074,11 +1098,17 @@ static int if_sdio_power_save(struct lbs_private *priv)
1074static int if_sdio_power_restore(struct lbs_private *priv) 1098static int if_sdio_power_restore(struct lbs_private *priv)
1075{ 1099{
1076 struct if_sdio_card *card = priv->card; 1100 struct if_sdio_card *card = priv->card;
1101 int r;
1077 1102
1078 /* Make sure the card will not be powered off by runtime PM */ 1103 /* Make sure the card will not be powered off by runtime PM */
1079 pm_runtime_get_sync(&card->func->dev); 1104 pm_runtime_get_sync(&card->func->dev);
1080 1105
1081 return if_sdio_power_on(card); 1106 r = if_sdio_power_on(card);
1107 if (r)
1108 return r;
1109
1110 wait_event(card->pwron_waitq, priv->fw_ready);
1111 return 0;
1082} 1112}
1083 1113
1084 1114
@@ -1179,6 +1209,7 @@ static int if_sdio_probe(struct sdio_func *func,
1179 spin_lock_init(&card->lock); 1209 spin_lock_init(&card->lock);
1180 card->workqueue = create_workqueue("libertas_sdio"); 1210 card->workqueue = create_workqueue("libertas_sdio");
1181 INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker); 1211 INIT_WORK(&card->packet_worker, if_sdio_host_to_card_worker);
1212 init_waitqueue_head(&card->pwron_waitq);
1182 1213
1183 /* Check if we support this card */ 1214 /* Check if we support this card */
1184 for (i = 0; i < ARRAY_SIZE(fw_table); i++) { 1215 for (i = 0; i < ARRAY_SIZE(fw_table); i++) {
@@ -1220,14 +1251,6 @@ static int if_sdio_probe(struct sdio_func *func,
1220 if (ret) 1251 if (ret)
1221 goto err_activate_card; 1252 goto err_activate_card;
1222 1253
1223 ret = lbs_start_card(priv);
1224 if_sdio_power_off(card);
1225 if (ret)
1226 goto err_activate_card;
1227
1228 /* Tell PM core that we don't need the card to be powered now */
1229 pm_runtime_put_noidle(&func->dev);
1230
1231out: 1254out:
1232 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); 1255 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
1233 1256
@@ -1244,10 +1267,6 @@ free:
1244 kfree(packet); 1267 kfree(packet);
1245 } 1268 }
1246 1269
1247 if (card->helper_allocated)
1248 kfree(card->helper);
1249 if (card->firmware_allocated)
1250 kfree(card->firmware);
1251 kfree(card); 1270 kfree(card);
1252 1271
1253 goto out; 1272 goto out;
@@ -1295,12 +1314,6 @@ static void if_sdio_remove(struct sdio_func *func)
1295 kfree(packet); 1314 kfree(packet);
1296 } 1315 }
1297 1316
1298 if (card->helper_allocated)
1299 kfree(card->helper);
1300 if (card->firmware_allocated)
1301 kfree(card->firmware);
1302 kfree(card);
1303
1304 lbs_deb_leave(LBS_DEB_SDIO); 1317 lbs_deb_leave(LBS_DEB_SDIO);
1305} 1318}
1306 1319
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 50b1ee7721e9..9604a1c4a74d 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -1064,9 +1064,8 @@ static int if_spi_init_card(struct if_spi_card *card)
1064 goto out; 1064 goto out;
1065 } 1065 }
1066 1066
1067 err = lbs_get_firmware(&card->spi->dev, NULL, NULL, 1067 err = lbs_get_firmware(&card->spi->dev, card->card_id,
1068 card->card_id, &fw_table[0], &helper, 1068 &fw_table[0], &helper, &mainfw);
1069 &mainfw);
1070 if (err) { 1069 if (err) {
1071 netdev_err(priv->dev, "failed to find firmware (%d)\n", 1070 netdev_err(priv->dev, "failed to find firmware (%d)\n",
1072 err); 1071 err);
@@ -1095,10 +1094,8 @@ static int if_spi_init_card(struct if_spi_card *card)
1095 goto out; 1094 goto out;
1096 1095
1097out: 1096out:
1098 if (helper) 1097 release_firmware(helper);
1099 release_firmware(helper); 1098 release_firmware(mainfw);
1100 if (mainfw)
1101 release_firmware(mainfw);
1102 1099
1103 lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err); 1100 lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
1104 1101
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 74da5f1ea243..75403e6e3990 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -29,9 +29,6 @@
29 29
30#define MESSAGE_HEADER_LEN 4 30#define MESSAGE_HEADER_LEN 4
31 31
32static char *lbs_fw_name = NULL;
33module_param_named(fw_name, lbs_fw_name, charp, 0644);
34
35MODULE_FIRMWARE("libertas/usb8388_v9.bin"); 32MODULE_FIRMWARE("libertas/usb8388_v9.bin");
36MODULE_FIRMWARE("libertas/usb8388_v5.bin"); 33MODULE_FIRMWARE("libertas/usb8388_v5.bin");
37MODULE_FIRMWARE("libertas/usb8388.bin"); 34MODULE_FIRMWARE("libertas/usb8388.bin");
@@ -44,6 +41,16 @@ enum {
44 MODEL_8682 = 0x2 41 MODEL_8682 = 0x2
45}; 42};
46 43
44/* table of firmware file names */
45static const struct lbs_fw_table fw_table[] = {
46 { MODEL_8388, "libertas/usb8388_olpc.bin", NULL },
47 { MODEL_8388, "libertas/usb8388_v9.bin", NULL },
48 { MODEL_8388, "libertas/usb8388_v5.bin", NULL },
49 { MODEL_8388, "libertas/usb8388.bin", NULL },
50 { MODEL_8388, "usb8388.bin", NULL },
51 { MODEL_8682, "libertas/usb8682.bin", NULL }
52};
53
47static struct usb_device_id if_usb_table[] = { 54static struct usb_device_id if_usb_table[] = {
48 /* Enter the device signature inside */ 55 /* Enter the device signature inside */
49 { USB_DEVICE(0x1286, 0x2001), .driver_info = MODEL_8388 }, 56 { USB_DEVICE(0x1286, 0x2001), .driver_info = MODEL_8388 },
@@ -55,10 +62,9 @@ MODULE_DEVICE_TABLE(usb, if_usb_table);
55 62
56static void if_usb_receive(struct urb *urb); 63static void if_usb_receive(struct urb *urb);
57static void if_usb_receive_fwload(struct urb *urb); 64static void if_usb_receive_fwload(struct urb *urb);
58static int __if_usb_prog_firmware(struct if_usb_card *cardp, 65static void if_usb_prog_firmware(struct lbs_private *priv, int ret,
59 const char *fwname, int cmd); 66 const struct firmware *fw,
60static int if_usb_prog_firmware(struct if_usb_card *cardp, 67 const struct firmware *unused);
61 const char *fwname, int cmd);
62static int if_usb_host_to_card(struct lbs_private *priv, uint8_t type, 68static int if_usb_host_to_card(struct lbs_private *priv, uint8_t type,
63 uint8_t *payload, uint16_t nb); 69 uint8_t *payload, uint16_t nb);
64static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload, 70static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
@@ -67,69 +73,6 @@ static void if_usb_free(struct if_usb_card *cardp);
67static int if_usb_submit_rx_urb(struct if_usb_card *cardp); 73static int if_usb_submit_rx_urb(struct if_usb_card *cardp);
68static int if_usb_reset_device(struct if_usb_card *cardp); 74static int if_usb_reset_device(struct if_usb_card *cardp);
69 75
70/* sysfs hooks */
71
72/*
73 * Set function to write firmware to device's persistent memory
74 */
75static ssize_t if_usb_firmware_set(struct device *dev,
76 struct device_attribute *attr, const char *buf, size_t count)
77{
78 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
79 struct if_usb_card *cardp = priv->card;
80 int ret;
81
82 BUG_ON(buf == NULL);
83
84 ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_FW);
85 if (ret == 0)
86 return count;
87
88 return ret;
89}
90
91/*
92 * lbs_flash_fw attribute to be exported per ethX interface through sysfs
93 * (/sys/class/net/ethX/lbs_flash_fw). Use this like so to write firmware to
94 * the device's persistent memory:
95 * echo usb8388-5.126.0.p5.bin > /sys/class/net/ethX/lbs_flash_fw
96 */
97static DEVICE_ATTR(lbs_flash_fw, 0200, NULL, if_usb_firmware_set);
98
99/**
100 * if_usb_boot2_set - write firmware to device's persistent memory
101 *
102 * @dev: target device
103 * @attr: device attributes
104 * @buf: firmware buffer to write
105 * @count: number of bytes to write
106 *
107 * returns: number of bytes written or negative error code
108 */
109static ssize_t if_usb_boot2_set(struct device *dev,
110 struct device_attribute *attr, const char *buf, size_t count)
111{
112 struct lbs_private *priv = to_net_dev(dev)->ml_priv;
113 struct if_usb_card *cardp = priv->card;
114 int ret;
115
116 BUG_ON(buf == NULL);
117
118 ret = if_usb_prog_firmware(cardp, buf, BOOT_CMD_UPDATE_BOOT2);
119 if (ret == 0)
120 return count;
121
122 return ret;
123}
124
125/*
126 * lbs_flash_boot2 attribute to be exported per ethX interface through sysfs
127 * (/sys/class/net/ethX/lbs_flash_boot2). Use this like so to write firmware
128 * to the device's persistent memory:
129 * echo usb8388-5.126.0.p5.bin > /sys/class/net/ethX/lbs_flash_boot2
130 */
131static DEVICE_ATTR(lbs_flash_boot2, 0200, NULL, if_usb_boot2_set);
132
133/** 76/**
134 * if_usb_write_bulk_callback - callback function to handle the status 77 * if_usb_write_bulk_callback - callback function to handle the status
135 * of the URB 78 * of the URB
@@ -256,6 +199,7 @@ static int if_usb_probe(struct usb_interface *intf,
256 struct usb_endpoint_descriptor *endpoint; 199 struct usb_endpoint_descriptor *endpoint;
257 struct lbs_private *priv; 200 struct lbs_private *priv;
258 struct if_usb_card *cardp; 201 struct if_usb_card *cardp;
202 int r = -ENOMEM;
259 int i; 203 int i;
260 204
261 udev = interface_to_usbdev(intf); 205 udev = interface_to_usbdev(intf);
@@ -313,20 +257,10 @@ static int if_usb_probe(struct usb_interface *intf,
313 goto dealloc; 257 goto dealloc;
314 } 258 }
315 259
316 /* Upload firmware */
317 kparam_block_sysfs_write(fw_name);
318 if (__if_usb_prog_firmware(cardp, lbs_fw_name, BOOT_CMD_FW_BY_USB)) {
319 kparam_unblock_sysfs_write(fw_name);
320 lbs_deb_usbd(&udev->dev, "FW upload failed\n");
321 goto err_prog_firmware;
322 }
323 kparam_unblock_sysfs_write(fw_name);
324
325 if (!(priv = lbs_add_card(cardp, &intf->dev))) 260 if (!(priv = lbs_add_card(cardp, &intf->dev)))
326 goto err_prog_firmware; 261 goto err_add_card;
327 262
328 cardp->priv = priv; 263 cardp->priv = priv;
329 cardp->priv->fw_ready = 1;
330 264
331 priv->hw_host_to_card = if_usb_host_to_card; 265 priv->hw_host_to_card = if_usb_host_to_card;
332 priv->enter_deep_sleep = NULL; 266 priv->enter_deep_sleep = NULL;
@@ -339,42 +273,25 @@ static int if_usb_probe(struct usb_interface *intf,
339 273
340 cardp->boot2_version = udev->descriptor.bcdDevice; 274 cardp->boot2_version = udev->descriptor.bcdDevice;
341 275
342 if_usb_submit_rx_urb(cardp);
343
344 if (lbs_start_card(priv))
345 goto err_start_card;
346
347 if_usb_setup_firmware(priv);
348
349 usb_get_dev(udev); 276 usb_get_dev(udev);
350 usb_set_intfdata(intf, cardp); 277 usb_set_intfdata(intf, cardp);
351 278
352 if (device_create_file(&priv->dev->dev, &dev_attr_lbs_flash_fw)) 279 r = lbs_get_firmware_async(priv, &udev->dev, cardp->model,
353 netdev_err(priv->dev, 280 fw_table, if_usb_prog_firmware);
354 "cannot register lbs_flash_fw attribute\n"); 281 if (r)
355 282 goto err_get_fw;
356 if (device_create_file(&priv->dev->dev, &dev_attr_lbs_flash_boot2))
357 netdev_err(priv->dev,
358 "cannot register lbs_flash_boot2 attribute\n");
359
360 /*
361 * EHS_REMOVE_WAKEUP is not supported on all versions of the firmware.
362 */
363 priv->wol_criteria = EHS_REMOVE_WAKEUP;
364 if (lbs_host_sleep_cfg(priv, priv->wol_criteria, NULL))
365 priv->ehs_remove_supported = false;
366 283
367 return 0; 284 return 0;
368 285
369err_start_card: 286err_get_fw:
370 lbs_remove_card(priv); 287 lbs_remove_card(priv);
371err_prog_firmware: 288err_add_card:
372 if_usb_reset_device(cardp); 289 if_usb_reset_device(cardp);
373dealloc: 290dealloc:
374 if_usb_free(cardp); 291 if_usb_free(cardp);
375 292
376error: 293error:
377 return -ENOMEM; 294 return r;
378} 295}
379 296
380/** 297/**
@@ -389,9 +306,6 @@ static void if_usb_disconnect(struct usb_interface *intf)
389 306
390 lbs_deb_enter(LBS_DEB_MAIN); 307 lbs_deb_enter(LBS_DEB_MAIN);
391 308
392 device_remove_file(&priv->dev->dev, &dev_attr_lbs_flash_boot2);
393 device_remove_file(&priv->dev->dev, &dev_attr_lbs_flash_fw);
394
395 cardp->surprise_removed = 1; 309 cardp->surprise_removed = 1;
396 310
397 if (priv) { 311 if (priv) {
@@ -912,121 +826,22 @@ static int check_fwfile_format(const uint8_t *data, uint32_t totlen)
912 return ret; 826 return ret;
913} 827}
914 828
915 829static void if_usb_prog_firmware(struct lbs_private *priv, int ret,
916/** 830 const struct firmware *fw,
917* if_usb_prog_firmware - programs the firmware subject to cmd 831 const struct firmware *unused)
918*
919* @cardp: the if_usb_card descriptor
920* @fwname: firmware or boot2 image file name
921* @cmd: either BOOT_CMD_FW_BY_USB, BOOT_CMD_UPDATE_FW,
922* or BOOT_CMD_UPDATE_BOOT2.
923* returns: 0 or error code
924*/
925static int if_usb_prog_firmware(struct if_usb_card *cardp,
926 const char *fwname, int cmd)
927{
928 struct lbs_private *priv = cardp->priv;
929 unsigned long flags, caps;
930 int ret;
931
932 caps = priv->fwcapinfo;
933 if (((cmd == BOOT_CMD_UPDATE_FW) && !(caps & FW_CAPINFO_FIRMWARE_UPGRADE)) ||
934 ((cmd == BOOT_CMD_UPDATE_BOOT2) && !(caps & FW_CAPINFO_BOOT2_UPGRADE)))
935 return -EOPNOTSUPP;
936
937 /* Ensure main thread is idle. */
938 spin_lock_irqsave(&priv->driver_lock, flags);
939 while (priv->cur_cmd != NULL || priv->dnld_sent != DNLD_RES_RECEIVED) {
940 spin_unlock_irqrestore(&priv->driver_lock, flags);
941 if (wait_event_interruptible(priv->waitq,
942 (priv->cur_cmd == NULL &&
943 priv->dnld_sent == DNLD_RES_RECEIVED))) {
944 return -ERESTARTSYS;
945 }
946 spin_lock_irqsave(&priv->driver_lock, flags);
947 }
948 priv->dnld_sent = DNLD_BOOTCMD_SENT;
949 spin_unlock_irqrestore(&priv->driver_lock, flags);
950
951 ret = __if_usb_prog_firmware(cardp, fwname, cmd);
952
953 spin_lock_irqsave(&priv->driver_lock, flags);
954 priv->dnld_sent = DNLD_RES_RECEIVED;
955 spin_unlock_irqrestore(&priv->driver_lock, flags);
956
957 wake_up(&priv->waitq);
958
959 return ret;
960}
961
962/* table of firmware file names */
963static const struct {
964 u32 model;
965 const char *fwname;
966} fw_table[] = {
967 { MODEL_8388, "libertas/usb8388_v9.bin" },
968 { MODEL_8388, "libertas/usb8388_v5.bin" },
969 { MODEL_8388, "libertas/usb8388.bin" },
970 { MODEL_8388, "usb8388.bin" },
971 { MODEL_8682, "libertas/usb8682.bin" }
972};
973
974#ifdef CONFIG_OLPC
975
976static int try_olpc_fw(struct if_usb_card *cardp)
977{
978 int retval = -ENOENT;
979
980 /* try the OLPC firmware first; fall back to fw_table list */
981 if (machine_is_olpc() && cardp->model == MODEL_8388)
982 retval = request_firmware(&cardp->fw,
983 "libertas/usb8388_olpc.bin", &cardp->udev->dev);
984 return retval;
985}
986
987#else
988static int try_olpc_fw(struct if_usb_card *cardp) { return -ENOENT; }
989#endif /* !CONFIG_OLPC */
990
991static int get_fw(struct if_usb_card *cardp, const char *fwname)
992{
993 int i;
994
995 /* Try user-specified firmware first */
996 if (fwname)
997 return request_firmware(&cardp->fw, fwname, &cardp->udev->dev);
998
999 /* Handle OLPC firmware */
1000 if (try_olpc_fw(cardp) == 0)
1001 return 0;
1002
1003 /* Otherwise search for firmware to use */
1004 for (i = 0; i < ARRAY_SIZE(fw_table); i++) {
1005 if (fw_table[i].model != cardp->model)
1006 continue;
1007 if (request_firmware(&cardp->fw, fw_table[i].fwname,
1008 &cardp->udev->dev) == 0)
1009 return 0;
1010 }
1011
1012 return -ENOENT;
1013}
1014
1015static int __if_usb_prog_firmware(struct if_usb_card *cardp,
1016 const char *fwname, int cmd)
1017{ 832{
833 struct if_usb_card *cardp = priv->card;
1018 int i = 0; 834 int i = 0;
1019 static int reset_count = 10; 835 static int reset_count = 10;
1020 int ret = 0;
1021 836
1022 lbs_deb_enter(LBS_DEB_USB); 837 lbs_deb_enter(LBS_DEB_USB);
1023 838
1024 ret = get_fw(cardp, fwname);
1025 if (ret) { 839 if (ret) {
1026 pr_err("failed to find firmware (%d)\n", ret); 840 pr_err("failed to find firmware (%d)\n", ret);
1027 goto done; 841 goto done;
1028 } 842 }
1029 843
844 cardp->fw = fw;
1030 if (check_fwfile_format(cardp->fw->data, cardp->fw->size)) { 845 if (check_fwfile_format(cardp->fw->data, cardp->fw->size)) {
1031 ret = -EINVAL; 846 ret = -EINVAL;
1032 goto release_fw; 847 goto release_fw;
@@ -1053,7 +868,7 @@ restart:
1053 do { 868 do {
1054 int j = 0; 869 int j = 0;
1055 i++; 870 i++;
1056 if_usb_issue_boot_command(cardp, cmd); 871 if_usb_issue_boot_command(cardp, BOOT_CMD_FW_BY_USB);
1057 /* wait for command response */ 872 /* wait for command response */
1058 do { 873 do {
1059 j++; 874 j++;
@@ -1109,13 +924,27 @@ restart:
1109 goto release_fw; 924 goto release_fw;
1110 } 925 }
1111 926
927 cardp->priv->fw_ready = 1;
928 if_usb_submit_rx_urb(cardp);
929
930 if (lbs_start_card(priv))
931 goto release_fw;
932
933 if_usb_setup_firmware(priv);
934
935 /*
936 * EHS_REMOVE_WAKEUP is not supported on all versions of the firmware.
937 */
938 priv->wol_criteria = EHS_REMOVE_WAKEUP;
939 if (lbs_host_sleep_cfg(priv, priv->wol_criteria, NULL))
940 priv->ehs_remove_supported = false;
941
1112 release_fw: 942 release_fw:
1113 release_firmware(cardp->fw); 943 release_firmware(cardp->fw);
1114 cardp->fw = NULL; 944 cardp->fw = NULL;
1115 945
1116 done: 946 done:
1117 lbs_deb_leave_args(LBS_DEB_USB, "ret %d", ret); 947 lbs_deb_leave(LBS_DEB_USB);
1118 return ret;
1119} 948}
1120 949
1121 950
@@ -1128,8 +957,10 @@ static int if_usb_suspend(struct usb_interface *intf, pm_message_t message)
1128 957
1129 lbs_deb_enter(LBS_DEB_USB); 958 lbs_deb_enter(LBS_DEB_USB);
1130 959
1131 if (priv->psstate != PS_STATE_FULL_POWER) 960 if (priv->psstate != PS_STATE_FULL_POWER) {
1132 return -1; 961 ret = -1;
962 goto out;
963 }
1133 964
1134#ifdef CONFIG_OLPC 965#ifdef CONFIG_OLPC
1135 if (machine_is_olpc()) { 966 if (machine_is_olpc()) {
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 957681dede17..e96ee0aa8439 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -878,6 +878,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
878 priv->is_host_sleep_configured = 0; 878 priv->is_host_sleep_configured = 0;
879 priv->is_host_sleep_activated = 0; 879 priv->is_host_sleep_activated = 0;
880 init_waitqueue_head(&priv->host_sleep_q); 880 init_waitqueue_head(&priv->host_sleep_q);
881 init_waitqueue_head(&priv->fw_waitq);
881 mutex_init(&priv->lock); 882 mutex_init(&priv->lock);
882 883
883 setup_timer(&priv->command_timer, lbs_cmd_timeout_handler, 884 setup_timer(&priv->command_timer, lbs_cmd_timeout_handler,
@@ -1033,7 +1034,11 @@ void lbs_remove_card(struct lbs_private *priv)
1033 lbs_deb_enter(LBS_DEB_MAIN); 1034 lbs_deb_enter(LBS_DEB_MAIN);
1034 1035
1035 lbs_remove_mesh(priv); 1036 lbs_remove_mesh(priv);
1036 lbs_scan_deinit(priv); 1037
1038 if (priv->wiphy_registered)
1039 lbs_scan_deinit(priv);
1040
1041 lbs_wait_for_firmware_load(priv);
1037 1042
1038 /* worker thread destruction blocks on the in-flight command which 1043 /* worker thread destruction blocks on the in-flight command which
1039 * should have been cleared already in lbs_stop_card(). 1044 * should have been cleared already in lbs_stop_card().
@@ -1128,6 +1133,11 @@ void lbs_stop_card(struct lbs_private *priv)
1128 goto out; 1133 goto out;
1129 dev = priv->dev; 1134 dev = priv->dev;
1130 1135
1136 /* If the netdev isn't registered, it means that lbs_start_card() was
1137 * never called so we have nothing to do here. */
1138 if (dev->reg_state != NETREG_REGISTERED)
1139 goto out;
1140
1131 netif_stop_queue(dev); 1141 netif_stop_queue(dev);
1132 netif_carrier_off(dev); 1142 netif_carrier_off(dev);
1133 1143
@@ -1177,111 +1187,6 @@ void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx)
1177} 1187}
1178EXPORT_SYMBOL_GPL(lbs_notify_command_response); 1188EXPORT_SYMBOL_GPL(lbs_notify_command_response);
1179 1189
1180/**
1181 * lbs_get_firmware - Retrieves two-stage firmware
1182 *
1183 * @dev: A pointer to &device structure
1184 * @user_helper: User-defined helper firmware file
1185 * @user_mainfw: User-defined main firmware file
1186 * @card_model: Bus-specific card model ID used to filter firmware table
1187 * elements
1188 * @fw_table: Table of firmware file names and device model numbers
1189 * terminated by an entry with a NULL helper name
1190 * @helper: On success, the helper firmware; caller must free
1191 * @mainfw: On success, the main firmware; caller must free
1192 *
1193 * returns: 0 on success, non-zero on failure
1194 */
1195int lbs_get_firmware(struct device *dev, const char *user_helper,
1196 const char *user_mainfw, u32 card_model,
1197 const struct lbs_fw_table *fw_table,
1198 const struct firmware **helper,
1199 const struct firmware **mainfw)
1200{
1201 const struct lbs_fw_table *iter;
1202 int ret;
1203
1204 BUG_ON(helper == NULL);
1205 BUG_ON(mainfw == NULL);
1206
1207 /* Try user-specified firmware first */
1208 if (user_helper) {
1209 ret = request_firmware(helper, user_helper, dev);
1210 if (ret) {
1211 dev_err(dev, "couldn't find helper firmware %s\n",
1212 user_helper);
1213 goto fail;
1214 }
1215 }
1216 if (user_mainfw) {
1217 ret = request_firmware(mainfw, user_mainfw, dev);
1218 if (ret) {
1219 dev_err(dev, "couldn't find main firmware %s\n",
1220 user_mainfw);
1221 goto fail;
1222 }
1223 }
1224
1225 if (*helper && *mainfw)
1226 return 0;
1227
1228 /* Otherwise search for firmware to use. If neither the helper or
1229 * the main firmware were specified by the user, then we need to
1230 * make sure that found helper & main are from the same entry in
1231 * fw_table.
1232 */
1233 iter = fw_table;
1234 while (iter && iter->helper) {
1235 if (iter->model != card_model)
1236 goto next;
1237
1238 if (*helper == NULL) {
1239 ret = request_firmware(helper, iter->helper, dev);
1240 if (ret)
1241 goto next;
1242
1243 /* If the device has one-stage firmware (ie cf8305) and
1244 * we've got it then we don't need to bother with the
1245 * main firmware.
1246 */
1247 if (iter->fwname == NULL)
1248 return 0;
1249 }
1250
1251 if (*mainfw == NULL) {
1252 ret = request_firmware(mainfw, iter->fwname, dev);
1253 if (ret && !user_helper) {
1254 /* Clear the helper if it wasn't user-specified
1255 * and the main firmware load failed, to ensure
1256 * we don't have mismatched firmware pairs.
1257 */
1258 release_firmware(*helper);
1259 *helper = NULL;
1260 }
1261 }
1262
1263 if (*helper && *mainfw)
1264 return 0;
1265
1266 next:
1267 iter++;
1268 }
1269
1270 fail:
1271 /* Failed */
1272 if (*helper) {
1273 release_firmware(*helper);
1274 *helper = NULL;
1275 }
1276 if (*mainfw) {
1277 release_firmware(*mainfw);
1278 *mainfw = NULL;
1279 }
1280
1281 return -ENOENT;
1282}
1283EXPORT_SYMBOL_GPL(lbs_get_firmware);
1284
1285static int __init lbs_init_module(void) 1190static int __init lbs_init_module(void)
1286{ 1191{
1287 lbs_deb_enter(LBS_DEB_MAIN); 1192 lbs_deb_enter(LBS_DEB_MAIN);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index b7ce6a6e355f..03c0c6b1372c 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -582,11 +582,13 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
582 goto nla_put_failure; 582 goto nla_put_failure;
583 } 583 }
584 584
585 NLA_PUT(skb, HWSIM_ATTR_ADDR_TRANSMITTER, 585 if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER,
586 sizeof(struct mac_address), data->addresses[1].addr); 586 sizeof(struct mac_address), data->addresses[1].addr))
587 goto nla_put_failure;
587 588
588 /* We get the skb->data */ 589 /* We get the skb->data */
589 NLA_PUT(skb, HWSIM_ATTR_FRAME, my_skb->len, my_skb->data); 590 if (nla_put(skb, HWSIM_ATTR_FRAME, my_skb->len, my_skb->data))
591 goto nla_put_failure;
590 592
591 /* We get the flags for this transmission, and we translate them to 593 /* We get the flags for this transmission, and we translate them to
592 wmediumd flags */ 594 wmediumd flags */
@@ -597,7 +599,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
597 if (info->flags & IEEE80211_TX_CTL_NO_ACK) 599 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
598 hwsim_flags |= HWSIM_TX_CTL_NO_ACK; 600 hwsim_flags |= HWSIM_TX_CTL_NO_ACK;
599 601
600 NLA_PUT_U32(skb, HWSIM_ATTR_FLAGS, hwsim_flags); 602 if (nla_put_u32(skb, HWSIM_ATTR_FLAGS, hwsim_flags))
603 goto nla_put_failure;
601 604
602 /* We get the tx control (rate and retries) info*/ 605 /* We get the tx control (rate and retries) info*/
603 606
@@ -606,12 +609,14 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
606 tx_attempts[i].count = info->status.rates[i].count; 609 tx_attempts[i].count = info->status.rates[i].count;
607 } 610 }
608 611
609 NLA_PUT(skb, HWSIM_ATTR_TX_INFO, 612 if (nla_put(skb, HWSIM_ATTR_TX_INFO,
610 sizeof(struct hwsim_tx_rate)*IEEE80211_TX_MAX_RATES, 613 sizeof(struct hwsim_tx_rate)*IEEE80211_TX_MAX_RATES,
611 tx_attempts); 614 tx_attempts))
615 goto nla_put_failure;
612 616
613 /* We create a cookie to identify this skb */ 617 /* We create a cookie to identify this skb */
614 NLA_PUT_U64(skb, HWSIM_ATTR_COOKIE, (unsigned long) my_skb); 618 if (nla_put_u64(skb, HWSIM_ATTR_COOKIE, (unsigned long) my_skb))
619 goto nla_put_failure;
615 620
616 genlmsg_end(skb, msg_head); 621 genlmsg_end(skb, msg_head);
617 genlmsg_unicast(&init_net, skb, dst_pid); 622 genlmsg_unicast(&init_net, skb, dst_pid);
@@ -632,6 +637,7 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
632 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 637 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
633 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 638 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
634 struct ieee80211_rx_status rx_status; 639 struct ieee80211_rx_status rx_status;
640 struct ieee80211_rate *txrate = ieee80211_get_tx_rate(hw, info);
635 641
636 if (data->idle) { 642 if (data->idle) {
637 wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n"); 643 wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
@@ -666,6 +672,7 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
666 spin_lock(&hwsim_radio_lock); 672 spin_lock(&hwsim_radio_lock);
667 list_for_each_entry(data2, &hwsim_radios, list) { 673 list_for_each_entry(data2, &hwsim_radios, list) {
668 struct sk_buff *nskb; 674 struct sk_buff *nskb;
675 struct ieee80211_mgmt *mgmt;
669 676
670 if (data == data2) 677 if (data == data2)
671 continue; 678 continue;
@@ -683,8 +690,18 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
683 690
684 if (mac80211_hwsim_addr_match(data2, hdr->addr1)) 691 if (mac80211_hwsim_addr_match(data2, hdr->addr1))
685 ack = true; 692 ack = true;
693
694 /* set bcn timestamp relative to receiver mactime */
686 rx_status.mactime = 695 rx_status.mactime =
687 le64_to_cpu(__mac80211_hwsim_get_tsf(data2)); 696 le64_to_cpu(__mac80211_hwsim_get_tsf(data2));
697 mgmt = (struct ieee80211_mgmt *) nskb->data;
698 if (ieee80211_is_beacon(mgmt->frame_control) ||
699 ieee80211_is_probe_resp(mgmt->frame_control))
700 mgmt->u.beacon.timestamp = cpu_to_le64(
701 rx_status.mactime +
702 (data->tsf_offset - data2->tsf_offset) +
703 24 * 8 * 10 / txrate->bitrate);
704
688 memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status)); 705 memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
689 ieee80211_rx_irqsafe(data2->hw, nskb); 706 ieee80211_rx_irqsafe(data2->hw, nskb);
690 } 707 }
@@ -698,12 +715,6 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
698 bool ack; 715 bool ack;
699 struct ieee80211_tx_info *txi; 716 struct ieee80211_tx_info *txi;
700 u32 _pid; 717 u32 _pid;
701 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) skb->data;
702 struct mac80211_hwsim_data *data = hw->priv;
703
704 if (ieee80211_is_beacon(mgmt->frame_control) ||
705 ieee80211_is_probe_resp(mgmt->frame_control))
706 mgmt->u.beacon.timestamp = __mac80211_hwsim_get_tsf(data);
707 718
708 mac80211_hwsim_monitor_rx(hw, skb); 719 mac80211_hwsim_monitor_rx(hw, skb);
709 720
@@ -800,11 +811,9 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
800 struct ieee80211_vif *vif) 811 struct ieee80211_vif *vif)
801{ 812{
802 struct ieee80211_hw *hw = arg; 813 struct ieee80211_hw *hw = arg;
803 struct mac80211_hwsim_data *data = hw->priv;
804 struct sk_buff *skb; 814 struct sk_buff *skb;
805 struct ieee80211_tx_info *info; 815 struct ieee80211_tx_info *info;
806 u32 _pid; 816 u32 _pid;
807 struct ieee80211_mgmt *mgmt;
808 817
809 hwsim_check_magic(vif); 818 hwsim_check_magic(vif);
810 819
@@ -818,9 +827,6 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
818 return; 827 return;
819 info = IEEE80211_SKB_CB(skb); 828 info = IEEE80211_SKB_CB(skb);
820 829
821 mgmt = (struct ieee80211_mgmt *) skb->data;
822 mgmt->u.beacon.timestamp = __mac80211_hwsim_get_tsf(data);
823
824 mac80211_hwsim_monitor_rx(hw, skb); 830 mac80211_hwsim_monitor_rx(hw, skb);
825 831
826 /* wmediumd mode check */ 832 /* wmediumd mode check */
@@ -1108,7 +1114,8 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
1108 nla_total_size(sizeof(u32))); 1114 nla_total_size(sizeof(u32)));
1109 if (!skb) 1115 if (!skb)
1110 return -ENOMEM; 1116 return -ENOMEM;
1111 NLA_PUT_U32(skb, HWSIM_TM_ATTR_PS, hwsim->ps); 1117 if (nla_put_u32(skb, HWSIM_TM_ATTR_PS, hwsim->ps))
1118 goto nla_put_failure;
1112 return cfg80211_testmode_reply(skb); 1119 return cfg80211_testmode_reply(skb);
1113 default: 1120 default:
1114 return -EOPNOTSUPP; 1121 return -EOPNOTSUPP;
@@ -1444,7 +1451,7 @@ DEFINE_SIMPLE_ATTRIBUTE(hwsim_fops_group,
1444 hwsim_fops_group_read, hwsim_fops_group_write, 1451 hwsim_fops_group_read, hwsim_fops_group_write,
1445 "%llx\n"); 1452 "%llx\n");
1446 1453
1447struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr( 1454static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(
1448 struct mac_address *addr) 1455 struct mac_address *addr)
1449{ 1456{
1450 struct mac80211_hwsim_data *data; 1457 struct mac80211_hwsim_data *data;
@@ -1789,9 +1796,11 @@ static int __init init_mac80211_hwsim(void)
1789 IEEE80211_HW_SIGNAL_DBM | 1796 IEEE80211_HW_SIGNAL_DBM |
1790 IEEE80211_HW_SUPPORTS_STATIC_SMPS | 1797 IEEE80211_HW_SUPPORTS_STATIC_SMPS |
1791 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | 1798 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
1792 IEEE80211_HW_AMPDU_AGGREGATION; 1799 IEEE80211_HW_AMPDU_AGGREGATION |
1800 IEEE80211_HW_WANT_MONITOR_VIF;
1793 1801
1794 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 1802 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
1803 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
1795 1804
1796 /* ask mac80211 to reserve space for magic */ 1805 /* ask mac80211 to reserve space for magic */
1797 hw->vif_data_size = sizeof(struct hwsim_vif_priv); 1806 hw->vif_data_size = sizeof(struct hwsim_vif_priv);
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index a5e182b5e944..fe8ebfebcc0e 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -350,25 +350,26 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
350 ret_len += sizeof(struct mwifiex_ie_types_htcap); 350 ret_len += sizeof(struct mwifiex_ie_types_htcap);
351 } 351 }
352 352
353 if (bss_desc->bcn_ht_info) { 353 if (bss_desc->bcn_ht_oper) {
354 if (priv->bss_mode == NL80211_IFTYPE_ADHOC) { 354 if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
355 ht_info = (struct mwifiex_ie_types_htinfo *) *buffer; 355 ht_info = (struct mwifiex_ie_types_htinfo *) *buffer;
356 memset(ht_info, 0, 356 memset(ht_info, 0,
357 sizeof(struct mwifiex_ie_types_htinfo)); 357 sizeof(struct mwifiex_ie_types_htinfo));
358 ht_info->header.type = 358 ht_info->header.type =
359 cpu_to_le16(WLAN_EID_HT_INFORMATION); 359 cpu_to_le16(WLAN_EID_HT_OPERATION);
360 ht_info->header.len = 360 ht_info->header.len =
361 cpu_to_le16(sizeof(struct ieee80211_ht_info)); 361 cpu_to_le16(
362 sizeof(struct ieee80211_ht_operation));
362 363
363 memcpy((u8 *) ht_info + 364 memcpy((u8 *) ht_info +
364 sizeof(struct mwifiex_ie_types_header), 365 sizeof(struct mwifiex_ie_types_header),
365 (u8 *) bss_desc->bcn_ht_info + 366 (u8 *) bss_desc->bcn_ht_oper +
366 sizeof(struct ieee_types_header), 367 sizeof(struct ieee_types_header),
367 le16_to_cpu(ht_info->header.len)); 368 le16_to_cpu(ht_info->header.len));
368 369
369 if (!(sband->ht_cap.cap & 370 if (!(sband->ht_cap.cap &
370 IEEE80211_HT_CAP_SUP_WIDTH_20_40)) 371 IEEE80211_HT_CAP_SUP_WIDTH_20_40))
371 ht_info->ht_info.ht_param &= 372 ht_info->ht_oper.ht_param &=
372 ~(IEEE80211_HT_PARAM_CHAN_WIDTH_ANY | 373 ~(IEEE80211_HT_PARAM_CHAN_WIDTH_ANY |
373 IEEE80211_HT_PARAM_CHA_SEC_OFFSET); 374 IEEE80211_HT_PARAM_CHA_SEC_OFFSET);
374 375
@@ -385,16 +386,16 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
385 sizeof(struct mwifiex_ie_types_chan_list_param_set) - 386 sizeof(struct mwifiex_ie_types_chan_list_param_set) -
386 sizeof(struct mwifiex_ie_types_header)); 387 sizeof(struct mwifiex_ie_types_header));
387 chan_list->chan_scan_param[0].chan_number = 388 chan_list->chan_scan_param[0].chan_number =
388 bss_desc->bcn_ht_info->control_chan; 389 bss_desc->bcn_ht_oper->primary_chan;
389 chan_list->chan_scan_param[0].radio_type = 390 chan_list->chan_scan_param[0].radio_type =
390 mwifiex_band_to_radio_type((u8) bss_desc->bss_band); 391 mwifiex_band_to_radio_type((u8) bss_desc->bss_band);
391 392
392 if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 && 393 if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
393 bss_desc->bcn_ht_info->ht_param & 394 bss_desc->bcn_ht_oper->ht_param &
394 IEEE80211_HT_PARAM_CHAN_WIDTH_ANY) 395 IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)
395 SET_SECONDARYCHAN(chan_list->chan_scan_param[0]. 396 SET_SECONDARYCHAN(chan_list->chan_scan_param[0].
396 radio_type, 397 radio_type,
397 (bss_desc->bcn_ht_info->ht_param & 398 (bss_desc->bcn_ht_oper->ht_param &
398 IEEE80211_HT_PARAM_CHA_SEC_OFFSET)); 399 IEEE80211_HT_PARAM_CHA_SEC_OFFSET));
399 400
400 *buffer += sizeof(struct mwifiex_ie_types_chan_list_param_set); 401 *buffer += sizeof(struct mwifiex_ie_types_chan_list_param_set);
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 9eefb2a0ce9f..ab84eb943749 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -233,21 +233,27 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
233 233
234 skb_push(skb_aggr, headroom); 234 skb_push(skb_aggr, headroom);
235 235
236 /* 236 if (adapter->iface_type == MWIFIEX_USB) {
237 * Padding per MSDU will affect the length of next 237 adapter->data_sent = true;
238 * packet and hence the exact length of next packet 238 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
239 * is uncertain here. 239 skb_aggr, NULL);
240 * 240 } else {
241 * Also, aggregation of transmission buffer, while 241 /*
242 * downloading the data to the card, wont gain much 242 * Padding per MSDU will affect the length of next
243 * on the AMSDU packets as the AMSDU packets utilizes 243 * packet and hence the exact length of next packet
244 * the transmission buffer space to the maximum 244 * is uncertain here.
245 * (adapter->tx_buf_size). 245 *
246 */ 246 * Also, aggregation of transmission buffer, while
247 tx_param.next_pkt_len = 0; 247 * downloading the data to the card, wont gain much
248 248 * on the AMSDU packets as the AMSDU packets utilizes
249 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, 249 * the transmission buffer space to the maximum
250 skb_aggr, &tx_param); 250 * (adapter->tx_buf_size).
251 */
252 tx_param.next_pkt_len = 0;
253
254 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
255 skb_aggr, &tx_param);
256 }
251 switch (ret) { 257 switch (ret) {
252 case -EBUSY: 258 case -EBUSY:
253 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags); 259 spin_lock_irqsave(&priv->wmm.ra_list_spinlock, ra_list_flags);
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index 2a078cea830a..7f91cc694787 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -30,3 +30,14 @@ config MWIFIEX_PCIE
30 30
31 If you choose to build it as a module, it will be called 31 If you choose to build it as a module, it will be called
32 mwifiex_pcie. 32 mwifiex_pcie.
33
34config MWIFIEX_USB
35 tristate "Marvell WiFi-Ex Driver for USB8797"
36 depends on MWIFIEX && USB
37 select FW_LOADER
38 ---help---
39 This adds support for wireless adapters based on Marvell
40 Avastar 88W8797 chipset with USB interface.
41
42 If you choose to build it as a module, it will be called
43 mwifiex_usb.
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile
index b0257ad1bbed..5c1a46bf1e11 100644
--- a/drivers/net/wireless/mwifiex/Makefile
+++ b/drivers/net/wireless/mwifiex/Makefile
@@ -42,3 +42,6 @@ obj-$(CONFIG_MWIFIEX_SDIO) += mwifiex_sdio.o
42 42
43mwifiex_pcie-y += pcie.o 43mwifiex_pcie-y += pcie.o
44obj-$(CONFIG_MWIFIEX_PCIE) += mwifiex_pcie.o 44obj-$(CONFIG_MWIFIEX_PCIE) += mwifiex_pcie.o
45
46mwifiex_usb-y += usb.o
47obj-$(CONFIG_MWIFIEX_USB) += mwifiex_usb.o
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 65050384c42b..c78ea873a63a 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -516,25 +516,23 @@ static int
516mwifiex_dump_station_info(struct mwifiex_private *priv, 516mwifiex_dump_station_info(struct mwifiex_private *priv,
517 struct station_info *sinfo) 517 struct station_info *sinfo)
518{ 518{
519 struct mwifiex_ds_get_signal signal;
520 struct mwifiex_rate_cfg rate; 519 struct mwifiex_rate_cfg rate;
521 int ret = 0;
522 520
523 sinfo->filled = STATION_INFO_RX_BYTES | STATION_INFO_TX_BYTES | 521 sinfo->filled = STATION_INFO_RX_BYTES | STATION_INFO_TX_BYTES |
524 STATION_INFO_RX_PACKETS | 522 STATION_INFO_RX_PACKETS | STATION_INFO_TX_PACKETS |
525 STATION_INFO_TX_PACKETS 523 STATION_INFO_TX_BITRATE |
526 | STATION_INFO_SIGNAL | STATION_INFO_TX_BITRATE; 524 STATION_INFO_SIGNAL | STATION_INFO_SIGNAL_AVG;
527 525
528 /* Get signal information from the firmware */ 526 /* Get signal information from the firmware */
529 memset(&signal, 0, sizeof(struct mwifiex_ds_get_signal)); 527 if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_RSSI_INFO,
530 if (mwifiex_get_signal_info(priv, &signal)) { 528 HostCmd_ACT_GEN_GET, 0, NULL)) {
531 dev_err(priv->adapter->dev, "getting signal information\n"); 529 dev_err(priv->adapter->dev, "failed to get signal information\n");
532 ret = -EFAULT; 530 return -EFAULT;
533 } 531 }
534 532
535 if (mwifiex_drv_get_data_rate(priv, &rate)) { 533 if (mwifiex_drv_get_data_rate(priv, &rate)) {
536 dev_err(priv->adapter->dev, "getting data rate\n"); 534 dev_err(priv->adapter->dev, "getting data rate\n");
537 ret = -EFAULT; 535 return -EFAULT;
538 } 536 }
539 537
540 /* Get DTIM period information from firmware */ 538 /* Get DTIM period information from firmware */
@@ -557,11 +555,12 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
557 sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 555 sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
558 } 556 }
559 557
558 sinfo->signal_avg = priv->bcn_rssi_avg;
560 sinfo->rx_bytes = priv->stats.rx_bytes; 559 sinfo->rx_bytes = priv->stats.rx_bytes;
561 sinfo->tx_bytes = priv->stats.tx_bytes; 560 sinfo->tx_bytes = priv->stats.tx_bytes;
562 sinfo->rx_packets = priv->stats.rx_packets; 561 sinfo->rx_packets = priv->stats.rx_packets;
563 sinfo->tx_packets = priv->stats.tx_packets; 562 sinfo->tx_packets = priv->stats.tx_packets;
564 sinfo->signal = priv->qual_level; 563 sinfo->signal = priv->bcn_rssi_avg;
565 /* bit rate is in 500 kb/s units. Convert it to 100kb/s units */ 564 /* bit rate is in 500 kb/s units. Convert it to 100kb/s units */
566 sinfo->txrate.legacy = rate.rate * 5; 565 sinfo->txrate.legacy = rate.rate * 5;
567 566
@@ -581,7 +580,7 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
581 priv->curr_bss_params.bss_descriptor.beacon_period; 580 priv->curr_bss_params.bss_descriptor.beacon_period;
582 } 581 }
583 582
584 return ret; 583 return 0;
585} 584}
586 585
587/* 586/*
@@ -604,6 +603,23 @@ mwifiex_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev,
604 return mwifiex_dump_station_info(priv, sinfo); 603 return mwifiex_dump_station_info(priv, sinfo);
605} 604}
606 605
606/*
607 * CFG802.11 operation handler to dump station information.
608 */
609static int
610mwifiex_cfg80211_dump_station(struct wiphy *wiphy, struct net_device *dev,
611 int idx, u8 *mac, struct station_info *sinfo)
612{
613 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
614
615 if (!priv->media_connected || idx)
616 return -ENOENT;
617
618 memcpy(mac, priv->cfg_bssid, ETH_ALEN);
619
620 return mwifiex_dump_station_info(priv, sinfo);
621}
622
607/* Supported rates to be advertised to the cfg80211 */ 623/* Supported rates to be advertised to the cfg80211 */
608 624
609static struct ieee80211_rate mwifiex_rates[] = { 625static struct ieee80211_rate mwifiex_rates[] = {
@@ -750,6 +766,45 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
750} 766}
751 767
752/* 768/*
769 * CFG802.11 operation handler for connection quality monitoring.
770 *
771 * This function subscribes/unsubscribes HIGH_RSSI and LOW_RSSI
772 * events to FW.
773 */
774static int mwifiex_cfg80211_set_cqm_rssi_config(struct wiphy *wiphy,
775 struct net_device *dev,
776 s32 rssi_thold, u32 rssi_hyst)
777{
778 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
779 struct mwifiex_ds_misc_subsc_evt subsc_evt;
780
781 priv->cqm_rssi_thold = rssi_thold;
782 priv->cqm_rssi_hyst = rssi_hyst;
783
784 memset(&subsc_evt, 0x00, sizeof(struct mwifiex_ds_misc_subsc_evt));
785 subsc_evt.events = BITMASK_BCN_RSSI_LOW | BITMASK_BCN_RSSI_HIGH;
786
787 /* Subscribe/unsubscribe low and high rssi events */
788 if (rssi_thold && rssi_hyst) {
789 subsc_evt.action = HostCmd_ACT_BITWISE_SET;
790 subsc_evt.bcn_l_rssi_cfg.abs_value = abs(rssi_thold);
791 subsc_evt.bcn_h_rssi_cfg.abs_value = abs(rssi_thold);
792 subsc_evt.bcn_l_rssi_cfg.evt_freq = 1;
793 subsc_evt.bcn_h_rssi_cfg.evt_freq = 1;
794 return mwifiex_send_cmd_sync(priv,
795 HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
796 0, 0, &subsc_evt);
797 } else {
798 subsc_evt.action = HostCmd_ACT_BITWISE_CLR;
799 return mwifiex_send_cmd_sync(priv,
800 HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
801 0, 0, &subsc_evt);
802 }
803
804 return 0;
805}
806
807/*
753 * CFG802.11 operation handler for disconnection request. 808 * CFG802.11 operation handler for disconnection request.
754 * 809 *
755 * This function does not work when there is already a disconnection 810 * This function does not work when there is already a disconnection
@@ -1107,6 +1162,17 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, struct net_device *dev,
1107 priv->user_scan_cfg->num_ssids = request->n_ssids; 1162 priv->user_scan_cfg->num_ssids = request->n_ssids;
1108 priv->user_scan_cfg->ssid_list = request->ssids; 1163 priv->user_scan_cfg->ssid_list = request->ssids;
1109 1164
1165 if (request->ie && request->ie_len) {
1166 for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) {
1167 if (priv->vs_ie[i].mask != MWIFIEX_VSIE_MASK_CLEAR)
1168 continue;
1169 priv->vs_ie[i].mask = MWIFIEX_VSIE_MASK_SCAN;
1170 memcpy(&priv->vs_ie[i].ie, request->ie,
1171 request->ie_len);
1172 break;
1173 }
1174 }
1175
1110 for (i = 0; i < request->n_channels; i++) { 1176 for (i = 0; i < request->n_channels; i++) {
1111 chan = request->channels[i]; 1177 chan = request->channels[i];
1112 priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value; 1178 priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
@@ -1124,6 +1190,15 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, struct net_device *dev,
1124 if (mwifiex_set_user_scan_ioctl(priv, priv->user_scan_cfg)) 1190 if (mwifiex_set_user_scan_ioctl(priv, priv->user_scan_cfg))
1125 return -EFAULT; 1191 return -EFAULT;
1126 1192
1193 if (request->ie && request->ie_len) {
1194 for (i = 0; i < MWIFIEX_MAX_VSIE_NUM; i++) {
1195 if (priv->vs_ie[i].mask == MWIFIEX_VSIE_MASK_SCAN) {
1196 priv->vs_ie[i].mask = MWIFIEX_VSIE_MASK_CLEAR;
1197 memset(&priv->vs_ie[i].ie, 0,
1198 MWIFIEX_MAX_VSIE_LEN);
1199 }
1200 }
1201 }
1127 return 0; 1202 return 0;
1128} 1203}
1129 1204
@@ -1340,6 +1415,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
1340 .connect = mwifiex_cfg80211_connect, 1415 .connect = mwifiex_cfg80211_connect,
1341 .disconnect = mwifiex_cfg80211_disconnect, 1416 .disconnect = mwifiex_cfg80211_disconnect,
1342 .get_station = mwifiex_cfg80211_get_station, 1417 .get_station = mwifiex_cfg80211_get_station,
1418 .dump_station = mwifiex_cfg80211_dump_station,
1343 .set_wiphy_params = mwifiex_cfg80211_set_wiphy_params, 1419 .set_wiphy_params = mwifiex_cfg80211_set_wiphy_params,
1344 .set_channel = mwifiex_cfg80211_set_channel, 1420 .set_channel = mwifiex_cfg80211_set_channel,
1345 .join_ibss = mwifiex_cfg80211_join_ibss, 1421 .join_ibss = mwifiex_cfg80211_join_ibss,
@@ -1350,6 +1426,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
1350 .set_power_mgmt = mwifiex_cfg80211_set_power_mgmt, 1426 .set_power_mgmt = mwifiex_cfg80211_set_power_mgmt,
1351 .set_tx_power = mwifiex_cfg80211_set_tx_power, 1427 .set_tx_power = mwifiex_cfg80211_set_tx_power,
1352 .set_bitrate_mask = mwifiex_cfg80211_set_bitrate_mask, 1428 .set_bitrate_mask = mwifiex_cfg80211_set_bitrate_mask,
1429 .set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config,
1353}; 1430};
1354 1431
1355/* 1432/*
@@ -1365,6 +1442,7 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv)
1365 void *wdev_priv; 1442 void *wdev_priv;
1366 struct wireless_dev *wdev; 1443 struct wireless_dev *wdev;
1367 struct ieee80211_sta_ht_cap *ht_info; 1444 struct ieee80211_sta_ht_cap *ht_info;
1445 u8 *country_code;
1368 1446
1369 wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); 1447 wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
1370 if (!wdev) { 1448 if (!wdev) {
@@ -1381,6 +1459,7 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv)
1381 } 1459 }
1382 wdev->iftype = NL80211_IFTYPE_STATION; 1460 wdev->iftype = NL80211_IFTYPE_STATION;
1383 wdev->wiphy->max_scan_ssids = 10; 1461 wdev->wiphy->max_scan_ssids = 10;
1462 wdev->wiphy->max_scan_ie_len = MWIFIEX_MAX_VSIE_LEN;
1384 wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 1463 wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
1385 BIT(NL80211_IFTYPE_ADHOC); 1464 BIT(NL80211_IFTYPE_ADHOC);
1386 1465
@@ -1403,8 +1482,8 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv)
1403 memcpy(wdev->wiphy->perm_addr, priv->curr_addr, ETH_ALEN); 1482 memcpy(wdev->wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
1404 wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; 1483 wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
1405 1484
1406 /* Reserve space for bss band information */ 1485 /* Reserve space for mwifiex specific private data for BSS */
1407 wdev->wiphy->bss_priv_size = sizeof(u8); 1486 wdev->wiphy->bss_priv_size = sizeof(struct mwifiex_bss_priv);
1408 1487
1409 wdev->wiphy->reg_notifier = mwifiex_reg_notifier; 1488 wdev->wiphy->reg_notifier = mwifiex_reg_notifier;
1410 1489
@@ -1427,6 +1506,11 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv)
1427 "info: successfully registered wiphy device\n"); 1506 "info: successfully registered wiphy device\n");
1428 } 1507 }
1429 1508
1509 country_code = mwifiex_11d_code_2_region(priv->adapter->region_code);
1510 if (country_code && regulatory_hint(wdev->wiphy, country_code))
1511 dev_err(priv->adapter->dev,
1512 "%s: regulatory_hint failed\n", __func__);
1513
1430 priv->wdev = wdev; 1514 priv->wdev = wdev;
1431 1515
1432 return ret; 1516 return ret;
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index 2fe1c33765b8..560871b0e236 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -71,6 +71,37 @@ u16 region_code_index[MWIFIEX_MAX_REGION_CODE] = { 0x10, 0x20, 0x30,
71 71
72static u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 }; 72static u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 };
73 73
74struct region_code_mapping {
75 u8 code;
76 u8 region[IEEE80211_COUNTRY_STRING_LEN];
77};
78
79static struct region_code_mapping region_code_mapping_t[] = {
80 { 0x10, "US " }, /* US FCC */
81 { 0x20, "CA " }, /* IC Canada */
82 { 0x30, "EU " }, /* ETSI */
83 { 0x31, "ES " }, /* Spain */
84 { 0x32, "FR " }, /* France */
85 { 0x40, "JP " }, /* Japan */
86 { 0x41, "JP " }, /* Japan */
87 { 0x50, "CN " }, /* China */
88};
89
90/* This function converts integer code to region string */
91u8 *mwifiex_11d_code_2_region(u8 code)
92{
93 u8 i;
94 u8 size = sizeof(region_code_mapping_t)/
95 sizeof(struct region_code_mapping);
96
97 /* Look for code in mapping table */
98 for (i = 0; i < size; i++)
99 if (region_code_mapping_t[i].code == code)
100 return region_code_mapping_t[i].region;
101
102 return NULL;
103}
104
74/* 105/*
75 * This function maps an index in supported rates table into 106 * This function maps an index in supported rates table into
76 * the corresponding data rate. 107 * the corresponding data rate.
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 07f6e0092552..1710beffb93a 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -139,6 +139,7 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
139 uint16_t cmd_size; 139 uint16_t cmd_size;
140 struct timeval tstamp; 140 struct timeval tstamp;
141 unsigned long flags; 141 unsigned long flags;
142 __le32 tmp;
142 143
143 if (!adapter || !cmd_node) 144 if (!adapter || !cmd_node)
144 return -1; 145 return -1;
@@ -178,15 +179,28 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv,
178 le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN)), cmd_size, 179 le16_to_cpu(*(__le16 *) ((u8 *) host_cmd + S_DS_GEN)), cmd_size,
179 le16_to_cpu(host_cmd->seq_num)); 180 le16_to_cpu(host_cmd->seq_num));
180 181
181 skb_push(cmd_node->cmd_skb, INTF_HEADER_LEN); 182 if (adapter->iface_type == MWIFIEX_USB) {
182 183 tmp = cpu_to_le32(MWIFIEX_USB_TYPE_CMD);
183 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD, 184 skb_push(cmd_node->cmd_skb, MWIFIEX_TYPE_LEN);
184 cmd_node->cmd_skb, NULL); 185 memcpy(cmd_node->cmd_skb->data, &tmp, MWIFIEX_TYPE_LEN);
185 186 adapter->cmd_sent = true;
186 skb_pull(cmd_node->cmd_skb, INTF_HEADER_LEN); 187 ret = adapter->if_ops.host_to_card(adapter,
188 MWIFIEX_USB_EP_CMD_EVENT,
189 cmd_node->cmd_skb, NULL);
190 skb_pull(cmd_node->cmd_skb, MWIFIEX_TYPE_LEN);
191 if (ret == -EBUSY)
192 cmd_node->cmd_skb = NULL;
193 } else {
194 skb_push(cmd_node->cmd_skb, INTF_HEADER_LEN);
195 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD,
196 cmd_node->cmd_skb, NULL);
197 skb_pull(cmd_node->cmd_skb, INTF_HEADER_LEN);
198 }
187 199
188 if (ret == -1) { 200 if (ret == -1) {
189 dev_err(adapter->dev, "DNLD_CMD: host to card failed\n"); 201 dev_err(adapter->dev, "DNLD_CMD: host to card failed\n");
202 if (adapter->iface_type == MWIFIEX_USB)
203 adapter->cmd_sent = false;
190 if (cmd_node->wait_q_enabled) 204 if (cmd_node->wait_q_enabled)
191 adapter->cmd_wait_q.status = -1; 205 adapter->cmd_wait_q.status = -1;
192 mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd); 206 mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd);
@@ -232,6 +246,9 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
232 struct mwifiex_opt_sleep_confirm *sleep_cfm_buf = 246 struct mwifiex_opt_sleep_confirm *sleep_cfm_buf =
233 (struct mwifiex_opt_sleep_confirm *) 247 (struct mwifiex_opt_sleep_confirm *)
234 adapter->sleep_cfm->data; 248 adapter->sleep_cfm->data;
249 struct sk_buff *sleep_cfm_tmp;
250 __le32 tmp;
251
235 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY); 252 priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY);
236 253
237 sleep_cfm_buf->seq_num = 254 sleep_cfm_buf->seq_num =
@@ -240,10 +257,28 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
240 priv->bss_type))); 257 priv->bss_type)));
241 adapter->seq_num++; 258 adapter->seq_num++;
242 259
243 skb_push(adapter->sleep_cfm, INTF_HEADER_LEN); 260 if (adapter->iface_type == MWIFIEX_USB) {
244 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD, 261 sleep_cfm_tmp =
245 adapter->sleep_cfm, NULL); 262 dev_alloc_skb(sizeof(struct mwifiex_opt_sleep_confirm)
246 skb_pull(adapter->sleep_cfm, INTF_HEADER_LEN); 263 + MWIFIEX_TYPE_LEN);
264 skb_put(sleep_cfm_tmp, sizeof(struct mwifiex_opt_sleep_confirm)
265 + MWIFIEX_TYPE_LEN);
266 tmp = cpu_to_le32(MWIFIEX_USB_TYPE_CMD);
267 memcpy(sleep_cfm_tmp->data, &tmp, MWIFIEX_TYPE_LEN);
268 memcpy(sleep_cfm_tmp->data + MWIFIEX_TYPE_LEN,
269 adapter->sleep_cfm->data,
270 sizeof(struct mwifiex_opt_sleep_confirm));
271 ret = adapter->if_ops.host_to_card(adapter,
272 MWIFIEX_USB_EP_CMD_EVENT,
273 sleep_cfm_tmp, NULL);
274 if (ret != -EBUSY)
275 dev_kfree_skb_any(sleep_cfm_tmp);
276 } else {
277 skb_push(adapter->sleep_cfm, INTF_HEADER_LEN);
278 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_CMD,
279 adapter->sleep_cfm, NULL);
280 skb_pull(adapter->sleep_cfm, INTF_HEADER_LEN);
281 }
247 282
248 if (ret == -1) { 283 if (ret == -1) {
249 dev_err(adapter->dev, "SLEEP_CFM: failed\n"); 284 dev_err(adapter->dev, "SLEEP_CFM: failed\n");
@@ -343,7 +378,12 @@ int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter)
343 } 378 }
344 if (!cmd_array[i].resp_skb) 379 if (!cmd_array[i].resp_skb)
345 continue; 380 continue;
346 dev_kfree_skb_any(cmd_array[i].resp_skb); 381
382 if (adapter->iface_type == MWIFIEX_USB)
383 adapter->if_ops.cmdrsp_complete(adapter,
384 cmd_array[i].resp_skb);
385 else
386 dev_kfree_skb_any(cmd_array[i].resp_skb);
347 } 387 }
348 /* Release struct cmd_ctrl_node */ 388 /* Release struct cmd_ctrl_node */
349 if (adapter->cmd_pool) { 389 if (adapter->cmd_pool) {
@@ -1083,6 +1123,7 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
1083 MWIFIEX_BSS_ROLE_ANY), 1123 MWIFIEX_BSS_ROLE_ANY),
1084 false); 1124 false);
1085} 1125}
1126EXPORT_SYMBOL_GPL(mwifiex_process_hs_config);
1086 1127
1087/* 1128/*
1088 * This function handles the command response of a sleep confirm command. 1129 * This function handles the command response of a sleep confirm command.
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index 1a845074c52a..a870b5885c09 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -212,7 +212,7 @@ mwifiex_info_read(struct file *file, char __user *ubuf,
212 p += sprintf(p, "essid=\"%s\"\n", info.ssid.ssid); 212 p += sprintf(p, "essid=\"%s\"\n", info.ssid.ssid);
213 p += sprintf(p, "bssid=\"%pM\"\n", info.bssid); 213 p += sprintf(p, "bssid=\"%pM\"\n", info.bssid);
214 p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan); 214 p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan);
215 p += sprintf(p, "region_code = \"%02x\"\n", info.region_code); 215 p += sprintf(p, "country_code = \"%s\"\n", info.country_code);
216 216
217 netdev_for_each_mc_addr(ha, netdev) 217 netdev_for_each_mc_addr(ha, netdev)
218 p += sprintf(p, "multicast_address[%d]=\"%pM\"\n", 218 p += sprintf(p, "multicast_address[%d]=\"%pM\"\n",
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index be5fd1652e53..d04aba4131dc 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -53,6 +53,7 @@
53#define MWIFIEX_RATE_BITMAP_MCS127 159 53#define MWIFIEX_RATE_BITMAP_MCS127 159
54 54
55#define MWIFIEX_RX_DATA_BUF_SIZE (4 * 1024) 55#define MWIFIEX_RX_DATA_BUF_SIZE (4 * 1024)
56#define MWIFIEX_RX_CMD_BUF_SIZE (2 * 1024)
56 57
57#define MWIFIEX_RTS_MIN_VALUE (0) 58#define MWIFIEX_RTS_MIN_VALUE (0)
58#define MWIFIEX_RTS_MAX_VALUE (2347) 59#define MWIFIEX_RTS_MAX_VALUE (2347)
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index e98fc5af73dc..2a2cabadb9b5 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -81,6 +81,11 @@ enum KEY_TYPE_ID {
81#define FIRMWARE_READY_SDIO 0xfedc 81#define FIRMWARE_READY_SDIO 0xfedc
82#define FIRMWARE_READY_PCIE 0xfedcba00 82#define FIRMWARE_READY_PCIE 0xfedcba00
83 83
84enum mwifiex_usb_ep {
85 MWIFIEX_USB_EP_CMD_EVENT = 1,
86 MWIFIEX_USB_EP_DATA = 2,
87};
88
84enum MWIFIEX_802_11_PRIVACY_FILTER { 89enum MWIFIEX_802_11_PRIVACY_FILTER {
85 MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL, 90 MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL,
86 MWIFIEX_802_11_PRIV_FILTER_8021X_WEP 91 MWIFIEX_802_11_PRIV_FILTER_8021X_WEP
@@ -92,16 +97,19 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
92#define TLV_TYPE_KEY_MATERIAL (PROPRIETARY_TLV_BASE_ID + 0) 97#define TLV_TYPE_KEY_MATERIAL (PROPRIETARY_TLV_BASE_ID + 0)
93#define TLV_TYPE_CHANLIST (PROPRIETARY_TLV_BASE_ID + 1) 98#define TLV_TYPE_CHANLIST (PROPRIETARY_TLV_BASE_ID + 1)
94#define TLV_TYPE_NUMPROBES (PROPRIETARY_TLV_BASE_ID + 2) 99#define TLV_TYPE_NUMPROBES (PROPRIETARY_TLV_BASE_ID + 2)
100#define TLV_TYPE_RSSI_LOW (PROPRIETARY_TLV_BASE_ID + 4)
95#define TLV_TYPE_PASSTHROUGH (PROPRIETARY_TLV_BASE_ID + 10) 101#define TLV_TYPE_PASSTHROUGH (PROPRIETARY_TLV_BASE_ID + 10)
96#define TLV_TYPE_WMMQSTATUS (PROPRIETARY_TLV_BASE_ID + 16) 102#define TLV_TYPE_WMMQSTATUS (PROPRIETARY_TLV_BASE_ID + 16)
97#define TLV_TYPE_WILDCARDSSID (PROPRIETARY_TLV_BASE_ID + 18) 103#define TLV_TYPE_WILDCARDSSID (PROPRIETARY_TLV_BASE_ID + 18)
98#define TLV_TYPE_TSFTIMESTAMP (PROPRIETARY_TLV_BASE_ID + 19) 104#define TLV_TYPE_TSFTIMESTAMP (PROPRIETARY_TLV_BASE_ID + 19)
105#define TLV_TYPE_RSSI_HIGH (PROPRIETARY_TLV_BASE_ID + 22)
99#define TLV_TYPE_AUTH_TYPE (PROPRIETARY_TLV_BASE_ID + 31) 106#define TLV_TYPE_AUTH_TYPE (PROPRIETARY_TLV_BASE_ID + 31)
100#define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42) 107#define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42)
101#define TLV_TYPE_RATE_DROP_CONTROL (PROPRIETARY_TLV_BASE_ID + 82) 108#define TLV_TYPE_RATE_DROP_CONTROL (PROPRIETARY_TLV_BASE_ID + 82)
102#define TLV_TYPE_RATE_SCOPE (PROPRIETARY_TLV_BASE_ID + 83) 109#define TLV_TYPE_RATE_SCOPE (PROPRIETARY_TLV_BASE_ID + 83)
103#define TLV_TYPE_POWER_GROUP (PROPRIETARY_TLV_BASE_ID + 84) 110#define TLV_TYPE_POWER_GROUP (PROPRIETARY_TLV_BASE_ID + 84)
104#define TLV_TYPE_WAPI_IE (PROPRIETARY_TLV_BASE_ID + 94) 111#define TLV_TYPE_WAPI_IE (PROPRIETARY_TLV_BASE_ID + 94)
112#define TLV_TYPE_MGMT_IE (PROPRIETARY_TLV_BASE_ID + 105)
105#define TLV_TYPE_AUTO_DS_PARAM (PROPRIETARY_TLV_BASE_ID + 113) 113#define TLV_TYPE_AUTO_DS_PARAM (PROPRIETARY_TLV_BASE_ID + 113)
106#define TLV_TYPE_PS_PARAM (PROPRIETARY_TLV_BASE_ID + 114) 114#define TLV_TYPE_PS_PARAM (PROPRIETARY_TLV_BASE_ID + 114)
107 115
@@ -194,6 +202,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
194#define HostCmd_CMD_802_11_KEY_MATERIAL 0x005e 202#define HostCmd_CMD_802_11_KEY_MATERIAL 0x005e
195#define HostCmd_CMD_802_11_BG_SCAN_QUERY 0x006c 203#define HostCmd_CMD_802_11_BG_SCAN_QUERY 0x006c
196#define HostCmd_CMD_WMM_GET_STATUS 0x0071 204#define HostCmd_CMD_WMM_GET_STATUS 0x0071
205#define HostCmd_CMD_802_11_SUBSCRIBE_EVENT 0x0075
197#define HostCmd_CMD_802_11_TX_RATE_QUERY 0x007f 206#define HostCmd_CMD_802_11_TX_RATE_QUERY 0x007f
198#define HostCmd_CMD_802_11_IBSS_COALESCING_STATUS 0x0083 207#define HostCmd_CMD_802_11_IBSS_COALESCING_STATUS 0x0083
199#define HostCmd_CMD_VERSION_EXT 0x0097 208#define HostCmd_CMD_VERSION_EXT 0x0097
@@ -228,6 +237,8 @@ enum ENH_PS_MODES {
228#define HostCmd_RET_BIT 0x8000 237#define HostCmd_RET_BIT 0x8000
229#define HostCmd_ACT_GEN_GET 0x0000 238#define HostCmd_ACT_GEN_GET 0x0000
230#define HostCmd_ACT_GEN_SET 0x0001 239#define HostCmd_ACT_GEN_SET 0x0001
240#define HostCmd_ACT_BITWISE_SET 0x0002
241#define HostCmd_ACT_BITWISE_CLR 0x0003
231#define HostCmd_RESULT_OK 0x0000 242#define HostCmd_RESULT_OK 0x0000
232 243
233#define HostCmd_ACT_MAC_RX_ON 0x0001 244#define HostCmd_ACT_MAC_RX_ON 0x0001
@@ -813,7 +824,7 @@ struct host_cmd_ds_txpwr_cfg {
813struct mwifiex_bcn_param { 824struct mwifiex_bcn_param {
814 u8 bssid[ETH_ALEN]; 825 u8 bssid[ETH_ALEN];
815 u8 rssi; 826 u8 rssi;
816 __le32 timestamp[2]; 827 __le64 timestamp;
817 __le16 beacon_period; 828 __le16 beacon_period;
818 __le16 cap_info_bitmap; 829 __le16 cap_info_bitmap;
819} __packed; 830} __packed;
@@ -1007,7 +1018,7 @@ struct ieee_types_wmm_parameter {
1007 struct ieee_types_vendor_header vend_hdr; 1018 struct ieee_types_vendor_header vend_hdr;
1008 u8 qos_info_bitmap; 1019 u8 qos_info_bitmap;
1009 u8 reserved; 1020 u8 reserved;
1010 struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_MAX_QUEUES]; 1021 struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_NUM_ACS];
1011} __packed; 1022} __packed;
1012 1023
1013struct ieee_types_wmm_info { 1024struct ieee_types_wmm_info {
@@ -1028,7 +1039,7 @@ struct ieee_types_wmm_info {
1028 1039
1029struct host_cmd_ds_wmm_get_status { 1040struct host_cmd_ds_wmm_get_status {
1030 u8 queue_status_tlv[sizeof(struct mwifiex_ie_types_wmm_queue_status) * 1041 u8 queue_status_tlv[sizeof(struct mwifiex_ie_types_wmm_queue_status) *
1031 IEEE80211_MAX_QUEUES]; 1042 IEEE80211_NUM_ACS];
1032 u8 wmm_param_tlv[sizeof(struct ieee_types_wmm_parameter) + 2]; 1043 u8 wmm_param_tlv[sizeof(struct ieee_types_wmm_parameter) + 2];
1033} __packed; 1044} __packed;
1034 1045
@@ -1045,7 +1056,7 @@ struct mwifiex_ie_types_htcap {
1045 1056
1046struct mwifiex_ie_types_htinfo { 1057struct mwifiex_ie_types_htinfo {
1047 struct mwifiex_ie_types_header header; 1058 struct mwifiex_ie_types_header header;
1048 struct ieee80211_ht_info ht_info; 1059 struct ieee80211_ht_operation ht_oper;
1049} __packed; 1060} __packed;
1050 1061
1051struct mwifiex_ie_types_2040bssco { 1062struct mwifiex_ie_types_2040bssco {
@@ -1146,6 +1157,17 @@ struct host_cmd_ds_pcie_details {
1146 u32 sleep_cookie_addr_hi; 1157 u32 sleep_cookie_addr_hi;
1147} __packed; 1158} __packed;
1148 1159
1160struct mwifiex_ie_types_rssi_threshold {
1161 struct mwifiex_ie_types_header header;
1162 u8 abs_value;
1163 u8 evt_freq;
1164} __packed;
1165
1166struct host_cmd_ds_802_11_subsc_evt {
1167 __le16 action;
1168 __le16 events;
1169} __packed;
1170
1149struct host_cmd_ds_command { 1171struct host_cmd_ds_command {
1150 __le16 command; 1172 __le16 command;
1151 __le16 size; 1173 __le16 size;
@@ -1195,6 +1217,7 @@ struct host_cmd_ds_command {
1195 struct host_cmd_ds_set_bss_mode bss_mode; 1217 struct host_cmd_ds_set_bss_mode bss_mode;
1196 struct host_cmd_ds_pcie_details pcie_host_spec; 1218 struct host_cmd_ds_pcie_details pcie_host_spec;
1197 struct host_cmd_ds_802_11_eeprom_access eeprom; 1219 struct host_cmd_ds_802_11_eeprom_access eeprom;
1220 struct host_cmd_ds_802_11_subsc_evt subsc_evt;
1198 } params; 1221 } params;
1199} __packed; 1222} __packed;
1200 1223
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 54bb4839b57c..d440c3eb640b 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -131,6 +131,8 @@ static int mwifiex_init_priv(struct mwifiex_private *priv)
131 priv->wmm_qosinfo = 0; 131 priv->wmm_qosinfo = 0;
132 priv->curr_bcn_buf = NULL; 132 priv->curr_bcn_buf = NULL;
133 priv->curr_bcn_size = 0; 133 priv->curr_bcn_size = 0;
134 priv->wps_ie = NULL;
135 priv->wps_ie_len = 0;
134 136
135 priv->scan_block = false; 137 priv->scan_block = false;
136 138
@@ -186,10 +188,10 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
186 188
187 adapter->cmd_sent = false; 189 adapter->cmd_sent = false;
188 190
189 if (adapter->iface_type == MWIFIEX_PCIE) 191 if (adapter->iface_type == MWIFIEX_SDIO)
190 adapter->data_sent = false;
191 else
192 adapter->data_sent = true; 192 adapter->data_sent = true;
193 else
194 adapter->data_sent = false;
193 195
194 adapter->cmd_resp_received = false; 196 adapter->cmd_resp_received = false;
195 adapter->event_received = false; 197 adapter->event_received = false;
@@ -377,7 +379,8 @@ mwifiex_free_adapter(struct mwifiex_adapter *adapter)
377 379
378 dev_dbg(adapter->dev, "info: free scan table\n"); 380 dev_dbg(adapter->dev, "info: free scan table\n");
379 381
380 adapter->if_ops.cleanup_if(adapter); 382 if (adapter->if_ops.cleanup_if)
383 adapter->if_ops.cleanup_if(adapter);
381 384
382 if (adapter->sleep_cfm) 385 if (adapter->sleep_cfm)
383 dev_kfree_skb_any(adapter->sleep_cfm); 386 dev_kfree_skb_any(adapter->sleep_cfm);
@@ -417,6 +420,8 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
417 spin_lock_init(&adapter->cmd_pending_q_lock); 420 spin_lock_init(&adapter->cmd_pending_q_lock);
418 spin_lock_init(&adapter->scan_pending_q_lock); 421 spin_lock_init(&adapter->scan_pending_q_lock);
419 422
423 skb_queue_head_init(&adapter->usb_rx_data_q);
424
420 for (i = 0; i < adapter->priv_num; ++i) { 425 for (i = 0; i < adapter->priv_num; ++i) {
421 INIT_LIST_HEAD(&adapter->bss_prio_tbl[i].bss_prio_head); 426 INIT_LIST_HEAD(&adapter->bss_prio_tbl[i].bss_prio_head);
422 adapter->bss_prio_tbl[i].bss_prio_cur = NULL; 427 adapter->bss_prio_tbl[i].bss_prio_cur = NULL;
@@ -572,6 +577,7 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
572 struct mwifiex_private *priv; 577 struct mwifiex_private *priv;
573 s32 i; 578 s32 i;
574 unsigned long flags; 579 unsigned long flags;
580 struct sk_buff *skb;
575 581
576 /* mwifiex already shutdown */ 582 /* mwifiex already shutdown */
577 if (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY) 583 if (adapter->hw_status == MWIFIEX_HW_STATUS_NOT_READY)
@@ -599,6 +605,18 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter)
599 605
600 spin_lock_irqsave(&adapter->mwifiex_lock, flags); 606 spin_lock_irqsave(&adapter->mwifiex_lock, flags);
601 607
608 if (adapter->if_ops.data_complete) {
609 while ((skb = skb_dequeue(&adapter->usb_rx_data_q))) {
610 struct mwifiex_rxinfo *rx_info = MWIFIEX_SKB_RXCB(skb);
611
612 priv = adapter->priv[rx_info->bss_num];
613 if (priv)
614 priv->stats.rx_dropped++;
615
616 adapter->if_ops.data_complete(adapter, skb);
617 }
618 }
619
602 /* Free adapter structure */ 620 /* Free adapter structure */
603 mwifiex_free_adapter(adapter); 621 mwifiex_free_adapter(adapter);
604 622
@@ -628,24 +646,28 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
628 int ret; 646 int ret;
629 u32 poll_num = 1; 647 u32 poll_num = 1;
630 648
631 adapter->winner = 0; 649 if (adapter->if_ops.check_fw_status) {
650 adapter->winner = 0;
632 651
633 /* Check if firmware is already running */ 652 /* check if firmware is already running */
634 ret = adapter->if_ops.check_fw_status(adapter, poll_num); 653 ret = adapter->if_ops.check_fw_status(adapter, poll_num);
635 if (!ret) { 654 if (!ret) {
636 dev_notice(adapter->dev, 655 dev_notice(adapter->dev,
637 "WLAN FW already running! Skip FW download\n"); 656 "WLAN FW already running! Skip FW dnld\n");
638 goto done; 657 goto done;
639 } 658 }
640 poll_num = MAX_FIRMWARE_POLL_TRIES; 659
641 660 poll_num = MAX_FIRMWARE_POLL_TRIES;
642 /* Check if we are the winner for downloading FW */ 661
643 if (!adapter->winner) { 662 /* check if we are the winner for downloading FW */
644 dev_notice(adapter->dev, 663 if (!adapter->winner) {
645 "Other intf already running! Skip FW download\n"); 664 dev_notice(adapter->dev,
646 poll_num = MAX_MULTI_INTERFACE_POLL_TRIES; 665 "FW already running! Skip FW dnld\n");
647 goto poll_fw; 666 poll_num = MAX_MULTI_INTERFACE_POLL_TRIES;
667 goto poll_fw;
668 }
648 } 669 }
670
649 if (pmfw) { 671 if (pmfw) {
650 /* Download firmware with helper */ 672 /* Download firmware with helper */
651 ret = adapter->if_ops.prog_fw(adapter, pmfw); 673 ret = adapter->if_ops.prog_fw(adapter, pmfw);
@@ -664,6 +686,8 @@ poll_fw:
664 } 686 }
665done: 687done:
666 /* re-enable host interrupt for mwifiex after fw dnld is successful */ 688 /* re-enable host interrupt for mwifiex after fw dnld is successful */
667 adapter->if_ops.enable_int(adapter); 689 if (adapter->if_ops.enable_int)
690 adapter->if_ops.enable_int(adapter);
691
668 return ret; 692 return ret;
669} 693}
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index 7ca4e8234f3e..f0f95524e96b 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -85,34 +85,6 @@ struct mwifiex_ds_get_stats {
85 u32 wep_icv_error[4]; 85 u32 wep_icv_error[4];
86}; 86};
87 87
88#define BCN_RSSI_AVG_MASK 0x00000002
89#define BCN_NF_AVG_MASK 0x00000200
90#define ALL_RSSI_INFO_MASK 0x00000fff
91
92struct mwifiex_ds_get_signal {
93 /*
94 * Bit0: Last Beacon RSSI, Bit1: Average Beacon RSSI,
95 * Bit2: Last Data RSSI, Bit3: Average Data RSSI,
96 * Bit4: Last Beacon SNR, Bit5: Average Beacon SNR,
97 * Bit6: Last Data SNR, Bit7: Average Data SNR,
98 * Bit8: Last Beacon NF, Bit9: Average Beacon NF,
99 * Bit10: Last Data NF, Bit11: Average Data NF
100 */
101 u16 selector;
102 s16 bcn_rssi_last;
103 s16 bcn_rssi_avg;
104 s16 data_rssi_last;
105 s16 data_rssi_avg;
106 s16 bcn_snr_last;
107 s16 bcn_snr_avg;
108 s16 data_snr_last;
109 s16 data_snr_avg;
110 s16 bcn_nf_last;
111 s16 bcn_nf_avg;
112 s16 data_nf_last;
113 s16 data_nf_avg;
114};
115
116#define MWIFIEX_MAX_VER_STR_LEN 128 88#define MWIFIEX_MAX_VER_STR_LEN 128
117 89
118struct mwifiex_ver_ext { 90struct mwifiex_ver_ext {
@@ -124,7 +96,7 @@ struct mwifiex_bss_info {
124 u32 bss_mode; 96 u32 bss_mode;
125 struct cfg80211_ssid ssid; 97 struct cfg80211_ssid ssid;
126 u32 bss_chan; 98 u32 bss_chan;
127 u32 region_code; 99 u8 country_code[3];
128 u32 media_connected; 100 u32 media_connected;
129 u32 max_power_level; 101 u32 max_power_level;
130 u32 min_power_level; 102 u32 min_power_level;
@@ -308,8 +280,30 @@ struct mwifiex_ds_misc_cmd {
308 u8 cmd[MWIFIEX_SIZE_OF_CMD_BUFFER]; 280 u8 cmd[MWIFIEX_SIZE_OF_CMD_BUFFER];
309}; 281};
310 282
283#define BITMASK_BCN_RSSI_LOW BIT(0)
284#define BITMASK_BCN_RSSI_HIGH BIT(4)
285
286enum subsc_evt_rssi_state {
287 EVENT_HANDLED,
288 RSSI_LOW_RECVD,
289 RSSI_HIGH_RECVD
290};
291
292struct subsc_evt_cfg {
293 u8 abs_value;
294 u8 evt_freq;
295};
296
297struct mwifiex_ds_misc_subsc_evt {
298 u16 action;
299 u16 events;
300 struct subsc_evt_cfg bcn_l_rssi_cfg;
301 struct subsc_evt_cfg bcn_h_rssi_cfg;
302};
303
311#define MWIFIEX_MAX_VSIE_LEN (256) 304#define MWIFIEX_MAX_VSIE_LEN (256)
312#define MWIFIEX_MAX_VSIE_NUM (8) 305#define MWIFIEX_MAX_VSIE_NUM (8)
306#define MWIFIEX_VSIE_MASK_CLEAR 0x00
313#define MWIFIEX_VSIE_MASK_SCAN 0x01 307#define MWIFIEX_VSIE_MASK_SCAN 0x01
314#define MWIFIEX_VSIE_MASK_ASSOC 0x02 308#define MWIFIEX_VSIE_MASK_ASSOC 0x02
315#define MWIFIEX_VSIE_MASK_ADHOC 0x04 309#define MWIFIEX_VSIE_MASK_ADHOC 0x04
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 8f9382b9c3ca..8a390982463e 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -118,15 +118,15 @@ mwifiex_cmd_append_tsf_tlv(struct mwifiex_private *priv, u8 **buffer,
118 *buffer += sizeof(tsf_tlv.header); 118 *buffer += sizeof(tsf_tlv.header);
119 119
120 /* TSF at the time when beacon/probe_response was received */ 120 /* TSF at the time when beacon/probe_response was received */
121 tsf_val = cpu_to_le64(bss_desc->network_tsf); 121 tsf_val = cpu_to_le64(bss_desc->fw_tsf);
122 memcpy(*buffer, &tsf_val, sizeof(tsf_val)); 122 memcpy(*buffer, &tsf_val, sizeof(tsf_val));
123 *buffer += sizeof(tsf_val); 123 *buffer += sizeof(tsf_val);
124 124
125 memcpy(&tsf_val, bss_desc->time_stamp, sizeof(tsf_val)); 125 tsf_val = cpu_to_le64(bss_desc->timestamp);
126 126
127 dev_dbg(priv->adapter->dev, 127 dev_dbg(priv->adapter->dev,
128 "info: %s: TSF offset calc: %016llx - %016llx\n", 128 "info: %s: TSF offset calc: %016llx - %016llx\n",
129 __func__, tsf_val, bss_desc->network_tsf); 129 __func__, bss_desc->timestamp, bss_desc->fw_tsf);
130 130
131 memcpy(*buffer, &tsf_val, sizeof(tsf_val)); 131 memcpy(*buffer, &tsf_val, sizeof(tsf_val));
132 *buffer += sizeof(tsf_val); 132 *buffer += sizeof(tsf_val);
@@ -225,6 +225,48 @@ mwifiex_setup_rates_from_bssdesc(struct mwifiex_private *priv,
225} 225}
226 226
227/* 227/*
228 * This function appends a WPS IE. It is called from the network join command
229 * preparation routine.
230 *
231 * If the IE buffer has been setup by the application, this routine appends
232 * the buffer as a WPS TLV type to the request.
233 */
234static int
235mwifiex_cmd_append_wps_ie(struct mwifiex_private *priv, u8 **buffer)
236{
237 int retLen = 0;
238 struct mwifiex_ie_types_header ie_header;
239
240 if (!buffer || !*buffer)
241 return 0;
242
243 /*
244 * If there is a wps ie buffer setup, append it to the return
245 * parameter buffer pointer.
246 */
247 if (priv->wps_ie_len) {
248 dev_dbg(priv->adapter->dev, "cmd: append wps ie %d to %p\n",
249 priv->wps_ie_len, *buffer);
250
251 /* Wrap the generic IE buffer with a pass through TLV type */
252 ie_header.type = cpu_to_le16(TLV_TYPE_MGMT_IE);
253 ie_header.len = cpu_to_le16(priv->wps_ie_len);
254 memcpy(*buffer, &ie_header, sizeof(ie_header));
255 *buffer += sizeof(ie_header);
256 retLen += sizeof(ie_header);
257
258 memcpy(*buffer, priv->wps_ie, priv->wps_ie_len);
259 *buffer += priv->wps_ie_len;
260 retLen += priv->wps_ie_len;
261
262 }
263
264 kfree(priv->wps_ie);
265 priv->wps_ie_len = 0;
266 return retLen;
267}
268
269/*
228 * This function appends a WAPI IE. 270 * This function appends a WAPI IE.
229 * 271 *
230 * This function is called from the network join command preparation routine. 272 * This function is called from the network join command preparation routine.
@@ -480,6 +522,8 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
480 if (priv->sec_info.wapi_enabled && priv->wapi_ie_len) 522 if (priv->sec_info.wapi_enabled && priv->wapi_ie_len)
481 mwifiex_cmd_append_wapi_ie(priv, &pos); 523 mwifiex_cmd_append_wapi_ie(priv, &pos);
482 524
525 if (priv->wps.session_enable && priv->wps_ie_len)
526 mwifiex_cmd_append_wps_ie(priv, &pos);
483 527
484 mwifiex_cmd_append_generic_ie(priv, &pos); 528 mwifiex_cmd_append_generic_ie(priv, &pos);
485 529
@@ -932,20 +976,20 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
932 /* Fill HT INFORMATION */ 976 /* Fill HT INFORMATION */
933 ht_info = (struct mwifiex_ie_types_htinfo *) pos; 977 ht_info = (struct mwifiex_ie_types_htinfo *) pos;
934 memset(ht_info, 0, sizeof(struct mwifiex_ie_types_htinfo)); 978 memset(ht_info, 0, sizeof(struct mwifiex_ie_types_htinfo));
935 ht_info->header.type = cpu_to_le16(WLAN_EID_HT_INFORMATION); 979 ht_info->header.type = cpu_to_le16(WLAN_EID_HT_OPERATION);
936 ht_info->header.len = 980 ht_info->header.len =
937 cpu_to_le16(sizeof(struct ieee80211_ht_info)); 981 cpu_to_le16(sizeof(struct ieee80211_ht_operation));
938 982
939 ht_info->ht_info.control_chan = 983 ht_info->ht_oper.primary_chan =
940 (u8) priv->curr_bss_params.bss_descriptor.channel; 984 (u8) priv->curr_bss_params.bss_descriptor.channel;
941 if (adapter->sec_chan_offset) { 985 if (adapter->sec_chan_offset) {
942 ht_info->ht_info.ht_param = adapter->sec_chan_offset; 986 ht_info->ht_oper.ht_param = adapter->sec_chan_offset;
943 ht_info->ht_info.ht_param |= 987 ht_info->ht_oper.ht_param |=
944 IEEE80211_HT_PARAM_CHAN_WIDTH_ANY; 988 IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
945 } 989 }
946 ht_info->ht_info.operation_mode = 990 ht_info->ht_oper.operation_mode =
947 cpu_to_le16(IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); 991 cpu_to_le16(IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
948 ht_info->ht_info.basic_set[0] = 0xff; 992 ht_info->ht_oper.basic_set[0] = 0xff;
949 pos += sizeof(struct mwifiex_ie_types_htinfo); 993 pos += sizeof(struct mwifiex_ie_types_htinfo);
950 cmd_append_size += 994 cmd_append_size +=
951 sizeof(struct mwifiex_ie_types_htinfo); 995 sizeof(struct mwifiex_ie_types_htinfo);
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 9d1b3ca6334b..be0f0e583f75 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -58,8 +58,9 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops,
58 memmove(&adapter->if_ops, if_ops, sizeof(struct mwifiex_if_ops)); 58 memmove(&adapter->if_ops, if_ops, sizeof(struct mwifiex_if_ops));
59 59
60 /* card specific initialization has been deferred until now .. */ 60 /* card specific initialization has been deferred until now .. */
61 if (adapter->if_ops.init_if(adapter)) 61 if (adapter->if_ops.init_if)
62 goto error; 62 if (adapter->if_ops.init_if(adapter))
63 goto error;
63 64
64 adapter->priv_num = 0; 65 adapter->priv_num = 0;
65 66
@@ -140,6 +141,7 @@ int mwifiex_main_process(struct mwifiex_adapter *adapter)
140{ 141{
141 int ret = 0; 142 int ret = 0;
142 unsigned long flags; 143 unsigned long flags;
144 struct sk_buff *skb;
143 145
144 spin_lock_irqsave(&adapter->main_proc_lock, flags); 146 spin_lock_irqsave(&adapter->main_proc_lock, flags);
145 147
@@ -161,7 +163,8 @@ process_start:
161 if (adapter->int_status) { 163 if (adapter->int_status) {
162 if (adapter->hs_activated) 164 if (adapter->hs_activated)
163 mwifiex_process_hs_config(adapter); 165 mwifiex_process_hs_config(adapter);
164 adapter->if_ops.process_int_status(adapter); 166 if (adapter->if_ops.process_int_status)
167 adapter->if_ops.process_int_status(adapter);
165 } 168 }
166 169
167 /* Need to wake up the card ? */ 170 /* Need to wake up the card ? */
@@ -174,6 +177,7 @@ process_start:
174 adapter->if_ops.wakeup(adapter); 177 adapter->if_ops.wakeup(adapter);
175 continue; 178 continue;
176 } 179 }
180
177 if (IS_CARD_RX_RCVD(adapter)) { 181 if (IS_CARD_RX_RCVD(adapter)) {
178 adapter->pm_wakeup_fw_try = false; 182 adapter->pm_wakeup_fw_try = false;
179 if (adapter->ps_state == PS_STATE_SLEEP) 183 if (adapter->ps_state == PS_STATE_SLEEP)
@@ -194,6 +198,11 @@ process_start:
194 } 198 }
195 } 199 }
196 200
201 /* Check Rx data for USB */
202 if (adapter->iface_type == MWIFIEX_USB)
203 while ((skb = skb_dequeue(&adapter->usb_rx_data_q)))
204 mwifiex_handle_rx_packet(adapter, skb);
205
197 /* Check for Cmd Resp */ 206 /* Check for Cmd Resp */
198 if (adapter->cmd_resp_received) { 207 if (adapter->cmd_resp_received) {
199 adapter->cmd_resp_received = false; 208 adapter->cmd_resp_received = false;
@@ -292,33 +301,35 @@ static void mwifiex_free_adapter(struct mwifiex_adapter *adapter)
292} 301}
293 302
294/* 303/*
295 * This function initializes the hardware and firmware. 304 * This function gets firmware and initializes it.
296 * 305 *
297 * The main initialization steps followed are - 306 * The main initialization steps followed are -
298 * - Download the correct firmware to card 307 * - Download the correct firmware to card
299 * - Allocate and initialize the adapter structure
300 * - Initialize the private structures
301 * - Issue the init commands to firmware 308 * - Issue the init commands to firmware
302 */ 309 */
303static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter) 310static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
304{ 311{
305 int ret, err; 312 int ret;
313 char fmt[64];
314 struct mwifiex_private *priv;
315 struct mwifiex_adapter *adapter = context;
306 struct mwifiex_fw_image fw; 316 struct mwifiex_fw_image fw;
307 317
308 memset(&fw, 0, sizeof(struct mwifiex_fw_image)); 318 if (!firmware) {
309 319 dev_err(adapter->dev,
310 err = request_firmware(&adapter->firmware, adapter->fw_name, 320 "Failed to get firmware %s\n", adapter->fw_name);
311 adapter->dev);
312 if (err < 0) {
313 dev_err(adapter->dev, "request_firmware() returned"
314 " error code %#x\n", err);
315 ret = -1;
316 goto done; 321 goto done;
317 } 322 }
323
324 memset(&fw, 0, sizeof(struct mwifiex_fw_image));
325 adapter->firmware = firmware;
318 fw.fw_buf = (u8 *) adapter->firmware->data; 326 fw.fw_buf = (u8 *) adapter->firmware->data;
319 fw.fw_len = adapter->firmware->size; 327 fw.fw_len = adapter->firmware->size;
320 328
321 ret = mwifiex_dnld_fw(adapter, &fw); 329 if (adapter->if_ops.dnld_fw)
330 ret = adapter->if_ops.dnld_fw(adapter, &fw);
331 else
332 ret = mwifiex_dnld_fw(adapter, &fw);
322 if (ret == -1) 333 if (ret == -1)
323 goto done; 334 goto done;
324 335
@@ -335,17 +346,54 @@ static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
335 /* Wait for mwifiex_init to complete */ 346 /* Wait for mwifiex_init to complete */
336 wait_event_interruptible(adapter->init_wait_q, 347 wait_event_interruptible(adapter->init_wait_q,
337 adapter->init_wait_q_woken); 348 adapter->init_wait_q_woken);
338 if (adapter->hw_status != MWIFIEX_HW_STATUS_READY) { 349 if (adapter->hw_status != MWIFIEX_HW_STATUS_READY)
339 ret = -1;
340 goto done; 350 goto done;
351
352 priv = adapter->priv[0];
353 if (mwifiex_register_cfg80211(priv) != 0) {
354 dev_err(adapter->dev, "cannot register with cfg80211\n");
355 goto err_init_fw;
341 } 356 }
342 ret = 0;
343 357
358 rtnl_lock();
359 /* Create station interface by default */
360 if (!mwifiex_add_virtual_intf(priv->wdev->wiphy, "mlan%d",
361 NL80211_IFTYPE_STATION, NULL, NULL)) {
362 dev_err(adapter->dev, "cannot create default STA interface\n");
363 goto err_add_intf;
364 }
365 rtnl_unlock();
366
367 mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
368 dev_notice(adapter->dev, "driver_version = %s\n", fmt);
369 goto done;
370
371err_add_intf:
372 mwifiex_del_virtual_intf(priv->wdev->wiphy, priv->netdev);
373 rtnl_unlock();
374err_init_fw:
375 pr_debug("info: %s: unregister device\n", __func__);
376 adapter->if_ops.unregister_dev(adapter);
344done: 377done:
345 if (adapter->firmware) 378 release_firmware(adapter->firmware);
346 release_firmware(adapter->firmware); 379 complete(&adapter->fw_load);
347 if (ret) 380 return;
348 ret = -1; 381}
382
383/*
384 * This function initializes the hardware and gets firmware.
385 */
386static int mwifiex_init_hw_fw(struct mwifiex_adapter *adapter)
387{
388 int ret;
389
390 init_completion(&adapter->fw_load);
391 ret = request_firmware_nowait(THIS_MODULE, 1, adapter->fw_name,
392 adapter->dev, GFP_KERNEL, adapter,
393 mwifiex_fw_dpc);
394 if (ret < 0)
395 dev_err(adapter->dev,
396 "request_firmware_nowait() returned error %d\n", ret);
349 return ret; 397 return ret;
350} 398}
351 399
@@ -650,8 +698,6 @@ mwifiex_add_card(void *card, struct semaphore *sem,
650 struct mwifiex_if_ops *if_ops, u8 iface_type) 698 struct mwifiex_if_ops *if_ops, u8 iface_type)
651{ 699{
652 struct mwifiex_adapter *adapter; 700 struct mwifiex_adapter *adapter;
653 char fmt[64];
654 struct mwifiex_private *priv;
655 701
656 if (down_interruptible(sem)) 702 if (down_interruptible(sem))
657 goto exit_sem_err; 703 goto exit_sem_err;
@@ -692,40 +738,13 @@ mwifiex_add_card(void *card, struct semaphore *sem,
692 goto err_init_fw; 738 goto err_init_fw;
693 } 739 }
694 740
695 priv = adapter->priv[0];
696
697 if (mwifiex_register_cfg80211(priv) != 0) {
698 dev_err(adapter->dev, "cannot register netdevice"
699 " with cfg80211\n");
700 goto err_init_fw;
701 }
702
703 rtnl_lock();
704 /* Create station interface by default */
705 if (!mwifiex_add_virtual_intf(priv->wdev->wiphy, "mlan%d",
706 NL80211_IFTYPE_STATION, NULL, NULL)) {
707 rtnl_unlock();
708 dev_err(adapter->dev, "cannot create default station"
709 " interface\n");
710 goto err_add_intf;
711 }
712
713 rtnl_unlock();
714
715 up(sem); 741 up(sem);
716
717 mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
718 dev_notice(adapter->dev, "driver_version = %s\n", fmt);
719
720 return 0; 742 return 0;
721 743
722err_add_intf:
723 rtnl_lock();
724 mwifiex_del_virtual_intf(priv->wdev->wiphy, priv->netdev);
725 rtnl_unlock();
726err_init_fw: 744err_init_fw:
727 pr_debug("info: %s: unregister device\n", __func__); 745 pr_debug("info: %s: unregister device\n", __func__);
728 adapter->if_ops.unregister_dev(adapter); 746 if (adapter->if_ops.unregister_dev)
747 adapter->if_ops.unregister_dev(adapter);
729err_registerdev: 748err_registerdev:
730 adapter->surprise_removed = true; 749 adapter->surprise_removed = true;
731 mwifiex_terminate_workqueue(adapter); 750 mwifiex_terminate_workqueue(adapter);
@@ -830,7 +849,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
830 849
831 /* Unregister device */ 850 /* Unregister device */
832 dev_dbg(adapter->dev, "info: unregister device\n"); 851 dev_dbg(adapter->dev, "info: unregister device\n");
833 adapter->if_ops.unregister_dev(adapter); 852 if (adapter->if_ops.unregister_dev)
853 adapter->if_ops.unregister_dev(adapter);
834 /* Free adapter structure */ 854 /* Free adapter structure */
835 dev_dbg(adapter->dev, "info: free adapter\n"); 855 dev_dbg(adapter->dev, "info: free adapter\n");
836 mwifiex_free_adapter(adapter); 856 mwifiex_free_adapter(adapter);
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 35225e9b1080..324ad390cacd 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -92,9 +92,16 @@ enum {
92#define MWIFIEX_OUI_NOT_PRESENT 0 92#define MWIFIEX_OUI_NOT_PRESENT 0
93#define MWIFIEX_OUI_PRESENT 1 93#define MWIFIEX_OUI_PRESENT 1
94 94
95/*
96 * Do not check for data_received for USB, as data_received
97 * is handled in mwifiex_usb_recv for USB
98 */
95#define IS_CARD_RX_RCVD(adapter) (adapter->cmd_resp_received || \ 99#define IS_CARD_RX_RCVD(adapter) (adapter->cmd_resp_received || \
96 adapter->event_received || \ 100 adapter->event_received || \
97 adapter->data_received) 101 ((adapter->iface_type != MWIFIEX_USB) && \
102 adapter->data_received) || \
103 ((adapter->iface_type == MWIFIEX_USB) && \
104 !skb_queue_empty(&adapter->usb_rx_data_q)))
98 105
99#define MWIFIEX_TYPE_CMD 1 106#define MWIFIEX_TYPE_CMD 1
100#define MWIFIEX_TYPE_DATA 0 107#define MWIFIEX_TYPE_DATA 0
@@ -110,6 +117,11 @@ enum {
110 117
111#define MWIFIEX_EVENT_HEADER_LEN 4 118#define MWIFIEX_EVENT_HEADER_LEN 4
112 119
120#define MWIFIEX_TYPE_LEN 4
121#define MWIFIEX_USB_TYPE_CMD 0xF00DFACE
122#define MWIFIEX_USB_TYPE_DATA 0xBEADC0DE
123#define MWIFIEX_USB_TYPE_EVENT 0xBEEFFACE
124
113struct mwifiex_dbg { 125struct mwifiex_dbg {
114 u32 num_cmd_host_to_card_failure; 126 u32 num_cmd_host_to_card_failure;
115 u32 num_cmd_sleep_cfm_host_to_card_failure; 127 u32 num_cmd_sleep_cfm_host_to_card_failure;
@@ -162,6 +174,7 @@ enum MWIFIEX_PS_STATE {
162enum mwifiex_iface_type { 174enum mwifiex_iface_type {
163 MWIFIEX_SDIO, 175 MWIFIEX_SDIO,
164 MWIFIEX_PCIE, 176 MWIFIEX_PCIE,
177 MWIFIEX_USB
165}; 178};
166 179
167struct mwifiex_add_ba_param { 180struct mwifiex_add_ba_param {
@@ -201,10 +214,10 @@ struct mwifiex_wmm_desc {
201 u32 packets_out[MAX_NUM_TID]; 214 u32 packets_out[MAX_NUM_TID];
202 /* spin lock to protect ra_list */ 215 /* spin lock to protect ra_list */
203 spinlock_t ra_list_spinlock; 216 spinlock_t ra_list_spinlock;
204 struct mwifiex_wmm_ac_status ac_status[IEEE80211_MAX_QUEUES]; 217 struct mwifiex_wmm_ac_status ac_status[IEEE80211_NUM_ACS];
205 enum mwifiex_wmm_ac_e ac_down_graded_vals[IEEE80211_MAX_QUEUES]; 218 enum mwifiex_wmm_ac_e ac_down_graded_vals[IEEE80211_NUM_ACS];
206 u32 drv_pkt_delay_max; 219 u32 drv_pkt_delay_max;
207 u8 queue_priority[IEEE80211_MAX_QUEUES]; 220 u8 queue_priority[IEEE80211_NUM_ACS];
208 u32 user_pri_pkt_tx_ctrl[WMM_HIGHEST_PRIORITY + 1]; /* UP: 0 to 7 */ 221 u32 user_pri_pkt_tx_ctrl[WMM_HIGHEST_PRIORITY + 1]; /* UP: 0 to 7 */
209 /* Number of transmit packets queued */ 222 /* Number of transmit packets queued */
210 atomic_t tx_pkts_queued; 223 atomic_t tx_pkts_queued;
@@ -260,8 +273,8 @@ struct mwifiex_bssdescriptor {
260 * BAND_A(0X04): 'a' band 273 * BAND_A(0X04): 'a' band
261 */ 274 */
262 u16 bss_band; 275 u16 bss_band;
263 u64 network_tsf; 276 u64 fw_tsf;
264 u8 time_stamp[8]; 277 u64 timestamp;
265 union ieee_types_phy_param_set phy_param_set; 278 union ieee_types_phy_param_set phy_param_set;
266 union ieee_types_ss_param_set ss_param_set; 279 union ieee_types_ss_param_set ss_param_set;
267 u16 cap_info_bitmap; 280 u16 cap_info_bitmap;
@@ -269,7 +282,7 @@ struct mwifiex_bssdescriptor {
269 u8 disable_11n; 282 u8 disable_11n;
270 struct ieee80211_ht_cap *bcn_ht_cap; 283 struct ieee80211_ht_cap *bcn_ht_cap;
271 u16 ht_cap_offset; 284 u16 ht_cap_offset;
272 struct ieee80211_ht_info *bcn_ht_info; 285 struct ieee80211_ht_operation *bcn_ht_oper;
273 u16 ht_info_offset; 286 u16 ht_info_offset;
274 u8 *bcn_bss_co_2040; 287 u8 *bcn_bss_co_2040;
275 u16 bss_co_2040_offset; 288 u16 bss_co_2040_offset;
@@ -407,6 +420,8 @@ struct mwifiex_private {
407 struct host_cmd_ds_802_11_key_material aes_key; 420 struct host_cmd_ds_802_11_key_material aes_key;
408 u8 wapi_ie[256]; 421 u8 wapi_ie[256];
409 u8 wapi_ie_len; 422 u8 wapi_ie_len;
423 u8 *wps_ie;
424 u8 wps_ie_len;
410 u8 wmm_required; 425 u8 wmm_required;
411 u8 wmm_enabled; 426 u8 wmm_enabled;
412 u8 wmm_qosinfo; 427 u8 wmm_qosinfo;
@@ -448,7 +463,6 @@ struct mwifiex_private {
448 struct dentry *dfs_dev_dir; 463 struct dentry *dfs_dev_dir;
449#endif 464#endif
450 u8 nick_name[16]; 465 u8 nick_name[16];
451 u8 qual_level, qual_noise;
452 u16 current_key_index; 466 u16 current_key_index;
453 struct semaphore async_sem; 467 struct semaphore async_sem;
454 u8 scan_pending_on_block; 468 u8 scan_pending_on_block;
@@ -459,6 +473,9 @@ struct mwifiex_private {
459 u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; 473 u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
460 struct wps wps; 474 struct wps wps;
461 u8 scan_block; 475 u8 scan_block;
476 s32 cqm_rssi_thold;
477 u32 cqm_rssi_hyst;
478 u8 subsc_evt_rssi_state;
462}; 479};
463 480
464enum mwifiex_ba_status { 481enum mwifiex_ba_status {
@@ -518,6 +535,11 @@ struct cmd_ctrl_node {
518 u8 cmd_wait_q_woken; 535 u8 cmd_wait_q_woken;
519}; 536};
520 537
538struct mwifiex_bss_priv {
539 u8 band;
540 u64 fw_tsf;
541};
542
521struct mwifiex_if_ops { 543struct mwifiex_if_ops {
522 int (*init_if) (struct mwifiex_adapter *); 544 int (*init_if) (struct mwifiex_adapter *);
523 void (*cleanup_if) (struct mwifiex_adapter *); 545 void (*cleanup_if) (struct mwifiex_adapter *);
@@ -537,6 +559,8 @@ struct mwifiex_if_ops {
537 void (*cleanup_mpa_buf) (struct mwifiex_adapter *); 559 void (*cleanup_mpa_buf) (struct mwifiex_adapter *);
538 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *); 560 int (*cmdrsp_complete) (struct mwifiex_adapter *, struct sk_buff *);
539 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *); 561 int (*event_complete) (struct mwifiex_adapter *, struct sk_buff *);
562 int (*data_complete) (struct mwifiex_adapter *, struct sk_buff *);
563 int (*dnld_fw) (struct mwifiex_adapter *, struct mwifiex_fw_image *);
540}; 564};
541 565
542struct mwifiex_adapter { 566struct mwifiex_adapter {
@@ -599,6 +623,7 @@ struct mwifiex_adapter {
599 struct list_head scan_pending_q; 623 struct list_head scan_pending_q;
600 /* spin lock for scan_pending_q */ 624 /* spin lock for scan_pending_q */
601 spinlock_t scan_pending_q_lock; 625 spinlock_t scan_pending_q_lock;
626 struct sk_buff_head usb_rx_data_q;
602 u32 scan_processing; 627 u32 scan_processing;
603 u16 region_code; 628 u16 region_code;
604 struct mwifiex_802_11d_domain_reg domain_reg; 629 struct mwifiex_802_11d_domain_reg domain_reg;
@@ -651,6 +676,7 @@ struct mwifiex_adapter {
651 u8 scan_wait_q_woken; 676 u8 scan_wait_q_woken;
652 struct cmd_ctrl_node *cmd_queued; 677 struct cmd_ctrl_node *cmd_queued;
653 spinlock_t queue_lock; /* lock for tx queues */ 678 spinlock_t queue_lock; /* lock for tx queues */
679 struct completion fw_load;
654}; 680};
655 681
656int mwifiex_init_lock_list(struct mwifiex_adapter *adapter); 682int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -896,8 +922,6 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
896int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type); 922int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type);
897int mwifiex_enable_hs(struct mwifiex_adapter *adapter); 923int mwifiex_enable_hs(struct mwifiex_adapter *adapter);
898int mwifiex_disable_auto_ds(struct mwifiex_private *priv); 924int mwifiex_disable_auto_ds(struct mwifiex_private *priv);
899int mwifiex_get_signal_info(struct mwifiex_private *priv,
900 struct mwifiex_ds_get_signal *signal);
901int mwifiex_drv_get_data_rate(struct mwifiex_private *priv, 925int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
902 struct mwifiex_rate_cfg *rate); 926 struct mwifiex_rate_cfg *rate);
903int mwifiex_request_scan(struct mwifiex_private *priv, 927int mwifiex_request_scan(struct mwifiex_private *priv,
@@ -950,13 +974,10 @@ int mwifiex_bss_set_channel(struct mwifiex_private *,
950int mwifiex_get_bss_info(struct mwifiex_private *, 974int mwifiex_get_bss_info(struct mwifiex_private *,
951 struct mwifiex_bss_info *); 975 struct mwifiex_bss_info *);
952int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv, 976int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
953 u8 *bssid, s32 rssi, u8 *ie_buf, 977 struct cfg80211_bss *bss,
954 size_t ie_len, u16 beacon_period,
955 u16 cap_info_bitmap, u8 band,
956 struct mwifiex_bssdescriptor *bss_desc); 978 struct mwifiex_bssdescriptor *bss_desc);
957int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, 979int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
958 struct mwifiex_bssdescriptor *bss_entry, 980 struct mwifiex_bssdescriptor *bss_entry);
959 u8 *ie_buf, u32 ie_len);
960int mwifiex_check_network_compatibility(struct mwifiex_private *priv, 981int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
961 struct mwifiex_bssdescriptor *bss_desc); 982 struct mwifiex_bssdescriptor *bss_desc);
962 983
@@ -965,6 +986,7 @@ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy,
965 u32 *flags, struct vif_params *params); 986 u32 *flags, struct vif_params *params);
966int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev); 987int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev);
967 988
989u8 *mwifiex_11d_code_2_region(u8 code);
968 990
969#ifdef CONFIG_DEBUG_FS 991#ifdef CONFIG_DEBUG_FS
970void mwifiex_debugfs_init(void); 992void mwifiex_debugfs_init(void);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 5867facd415d..13fbc4eb1595 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -119,6 +119,9 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
119 if (!adapter || !adapter->priv_num) 119 if (!adapter || !adapter->priv_num)
120 return; 120 return;
121 121
122 /* In case driver is removed when asynchronous FW load is in progress */
123 wait_for_completion(&adapter->fw_load);
124
122 if (user_rmmod) { 125 if (user_rmmod) {
123#ifdef CONFIG_PM 126#ifdef CONFIG_PM
124 if (adapter->is_suspended) 127 if (adapter->is_suspended)
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index aff9cd763f2b..74f045715723 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -1048,10 +1048,8 @@ mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter,
1048 * This function parses provided beacon buffer and updates 1048 * This function parses provided beacon buffer and updates
1049 * respective fields in bss descriptor structure. 1049 * respective fields in bss descriptor structure.
1050 */ 1050 */
1051int 1051int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
1052mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, 1052 struct mwifiex_bssdescriptor *bss_entry)
1053 struct mwifiex_bssdescriptor *bss_entry,
1054 u8 *ie_buf, u32 ie_len)
1055{ 1053{
1056 int ret = 0; 1054 int ret = 0;
1057 u8 element_id; 1055 u8 element_id;
@@ -1073,10 +1071,8 @@ mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
1073 1071
1074 found_data_rate_ie = false; 1072 found_data_rate_ie = false;
1075 rate_size = 0; 1073 rate_size = 0;
1076 current_ptr = ie_buf; 1074 current_ptr = bss_entry->beacon_buf;
1077 bytes_left = ie_len; 1075 bytes_left = bss_entry->beacon_buf_size;
1078 bss_entry->beacon_buf = ie_buf;
1079 bss_entry->beacon_buf_size = ie_len;
1080 1076
1081 /* Process variable IE */ 1077 /* Process variable IE */
1082 while (bytes_left >= 2) { 1078 while (bytes_left >= 2) {
@@ -1221,9 +1217,9 @@ mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter,
1221 sizeof(struct ieee_types_header) - 1217 sizeof(struct ieee_types_header) -
1222 bss_entry->beacon_buf); 1218 bss_entry->beacon_buf);
1223 break; 1219 break;
1224 case WLAN_EID_HT_INFORMATION: 1220 case WLAN_EID_HT_OPERATION:
1225 bss_entry->bcn_ht_info = (struct ieee80211_ht_info *) 1221 bss_entry->bcn_ht_oper =
1226 (current_ptr + 1222 (struct ieee80211_ht_operation *)(current_ptr +
1227 sizeof(struct ieee_types_header)); 1223 sizeof(struct ieee_types_header));
1228 bss_entry->ht_info_offset = (u16) (current_ptr + 1224 bss_entry->ht_info_offset = (u16) (current_ptr +
1229 sizeof(struct ieee_types_header) - 1225 sizeof(struct ieee_types_header) -
@@ -1447,15 +1443,12 @@ int mwifiex_check_network_compatibility(struct mwifiex_private *priv,
1447 return ret; 1443 return ret;
1448} 1444}
1449 1445
1450static int 1446static int mwifiex_update_curr_bss_params(struct mwifiex_private *priv,
1451mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid, 1447 struct cfg80211_bss *bss)
1452 s32 rssi, const u8 *ie_buf, size_t ie_len,
1453 u16 beacon_period, u16 cap_info_bitmap, u8 band)
1454{ 1448{
1455 struct mwifiex_bssdescriptor *bss_desc; 1449 struct mwifiex_bssdescriptor *bss_desc;
1456 int ret; 1450 int ret;
1457 unsigned long flags; 1451 unsigned long flags;
1458 u8 *beacon_ie;
1459 1452
1460 /* Allocate and fill new bss descriptor */ 1453 /* Allocate and fill new bss descriptor */
1461 bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor), 1454 bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor),
@@ -1465,16 +1458,7 @@ mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid,
1465 return -ENOMEM; 1458 return -ENOMEM;
1466 } 1459 }
1467 1460
1468 beacon_ie = kmemdup(ie_buf, ie_len, GFP_KERNEL); 1461 ret = mwifiex_fill_new_bss_desc(priv, bss, bss_desc);
1469 if (!beacon_ie) {
1470 kfree(bss_desc);
1471 dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n");
1472 return -ENOMEM;
1473 }
1474
1475 ret = mwifiex_fill_new_bss_desc(priv, bssid, rssi, beacon_ie,
1476 ie_len, beacon_period,
1477 cap_info_bitmap, band, bss_desc);
1478 if (ret) 1462 if (ret)
1479 goto done; 1463 goto done;
1480 1464
@@ -1493,7 +1477,7 @@ mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid,
1493 priv->curr_bss_params.bss_descriptor.bcn_ht_cap = NULL; 1477 priv->curr_bss_params.bss_descriptor.bcn_ht_cap = NULL;
1494 priv->curr_bss_params.bss_descriptor.ht_cap_offset = 1478 priv->curr_bss_params.bss_descriptor.ht_cap_offset =
1495 0; 1479 0;
1496 priv->curr_bss_params.bss_descriptor.bcn_ht_info = NULL; 1480 priv->curr_bss_params.bss_descriptor.bcn_ht_oper = NULL;
1497 priv->curr_bss_params.bss_descriptor.ht_info_offset = 1481 priv->curr_bss_params.bss_descriptor.ht_info_offset =
1498 0; 1482 0;
1499 priv->curr_bss_params.bss_descriptor.bcn_bss_co_2040 = 1483 priv->curr_bss_params.bss_descriptor.bcn_bss_co_2040 =
@@ -1514,7 +1498,6 @@ mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid,
1514 1498
1515done: 1499done:
1516 kfree(bss_desc); 1500 kfree(bss_desc);
1517 kfree(beacon_ie);
1518 return 0; 1501 return 0;
1519} 1502}
1520 1503
@@ -1620,14 +1603,16 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1620 const u8 *ie_buf; 1603 const u8 *ie_buf;
1621 size_t ie_len; 1604 size_t ie_len;
1622 u16 channel = 0; 1605 u16 channel = 0;
1623 u64 network_tsf = 0; 1606 u64 fw_tsf = 0;
1624 u16 beacon_size = 0; 1607 u16 beacon_size = 0;
1625 u32 curr_bcn_bytes; 1608 u32 curr_bcn_bytes;
1626 u32 freq; 1609 u32 freq;
1627 u16 beacon_period; 1610 u16 beacon_period;
1628 u16 cap_info_bitmap; 1611 u16 cap_info_bitmap;
1629 u8 *current_ptr; 1612 u8 *current_ptr;
1613 u64 timestamp;
1630 struct mwifiex_bcn_param *bcn_param; 1614 struct mwifiex_bcn_param *bcn_param;
1615 struct mwifiex_bss_priv *bss_priv;
1631 1616
1632 if (bytes_left >= sizeof(beacon_size)) { 1617 if (bytes_left >= sizeof(beacon_size)) {
1633 /* Extract & convert beacon size from command buffer */ 1618 /* Extract & convert beacon size from command buffer */
@@ -1667,9 +1652,11 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1667 1652
1668 memcpy(bssid, bcn_param->bssid, ETH_ALEN); 1653 memcpy(bssid, bcn_param->bssid, ETH_ALEN);
1669 1654
1670 rssi = (s32) (bcn_param->rssi); 1655 rssi = (s32) bcn_param->rssi;
1671 dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%02X\n", rssi); 1656 rssi = (-rssi) * 100; /* Convert dBm to mBm */
1657 dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%d\n", rssi);
1672 1658
1659 timestamp = le64_to_cpu(bcn_param->timestamp);
1673 beacon_period = le16_to_cpu(bcn_param->beacon_period); 1660 beacon_period = le16_to_cpu(bcn_param->beacon_period);
1674 1661
1675 cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap); 1662 cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap);
@@ -1709,14 +1696,13 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1709 1696
1710 /* 1697 /*
1711 * If the TSF TLV was appended to the scan results, save this 1698 * If the TSF TLV was appended to the scan results, save this
1712 * entry's TSF value in the networkTSF field.The networkTSF is 1699 * entry's TSF value in the fw_tsf field. It is the firmware's
1713 * the firmware's TSF value at the time the beacon or probe 1700 * TSF value at the time the beacon or probe response was
1714 * response was received. 1701 * received.
1715 */ 1702 */
1716 if (tsf_tlv) 1703 if (tsf_tlv)
1717 memcpy(&network_tsf, 1704 memcpy(&fw_tsf, &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE],
1718 &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE], 1705 sizeof(fw_tsf));
1719 sizeof(network_tsf));
1720 1706
1721 if (channel) { 1707 if (channel) {
1722 struct ieee80211_channel *chan; 1708 struct ieee80211_channel *chan;
@@ -1739,21 +1725,19 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
1739 1725
1740 if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) { 1726 if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
1741 bss = cfg80211_inform_bss(priv->wdev->wiphy, 1727 bss = cfg80211_inform_bss(priv->wdev->wiphy,
1742 chan, bssid, network_tsf, 1728 chan, bssid, timestamp,
1743 cap_info_bitmap, beacon_period, 1729 cap_info_bitmap, beacon_period,
1744 ie_buf, ie_len, rssi, GFP_KERNEL); 1730 ie_buf, ie_len, rssi, GFP_KERNEL);
1745 *(u8 *)bss->priv = band; 1731 bss_priv = (struct mwifiex_bss_priv *)bss->priv;
1746 cfg80211_put_bss(bss); 1732 bss_priv->band = band;
1747 1733 bss_priv->fw_tsf = fw_tsf;
1748 if (priv->media_connected && 1734 if (priv->media_connected &&
1749 !memcmp(bssid, 1735 !memcmp(bssid,
1750 priv->curr_bss_params.bss_descriptor 1736 priv->curr_bss_params.bss_descriptor
1751 .mac_address, ETH_ALEN)) 1737 .mac_address, ETH_ALEN))
1752 mwifiex_update_curr_bss_params 1738 mwifiex_update_curr_bss_params(priv,
1753 (priv, bssid, rssi, 1739 bss);
1754 ie_buf, ie_len, 1740 cfg80211_put_bss(bss);
1755 beacon_period,
1756 cap_info_bitmap, band);
1757 } 1741 }
1758 } else { 1742 } else {
1759 dev_dbg(adapter->dev, "missing BSS channel IE\n"); 1743 dev_dbg(adapter->dev, "missing BSS channel IE\n");
@@ -2019,8 +2003,8 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv)
2019 (curr_bss->beacon_buf + 2003 (curr_bss->beacon_buf +
2020 curr_bss->ht_cap_offset); 2004 curr_bss->ht_cap_offset);
2021 2005
2022 if (curr_bss->bcn_ht_info) 2006 if (curr_bss->bcn_ht_oper)
2023 curr_bss->bcn_ht_info = (struct ieee80211_ht_info *) 2007 curr_bss->bcn_ht_oper = (struct ieee80211_ht_operation *)
2024 (curr_bss->beacon_buf + 2008 (curr_bss->beacon_buf +
2025 curr_bss->ht_info_offset); 2009 curr_bss->ht_info_offset);
2026 2010
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index f8012e2b7f7c..1aa45c4295bb 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -123,6 +123,9 @@ mwifiex_sdio_remove(struct sdio_func *func)
123 if (!adapter || !adapter->priv_num) 123 if (!adapter || !adapter->priv_num)
124 return; 124 return;
125 125
126 /* In case driver is removed when asynchronous FW load is in progress */
127 wait_for_completion(&adapter->fw_load);
128
126 if (user_rmmod) { 129 if (user_rmmod) {
127 if (adapter->is_suspended) 130 if (adapter->is_suspended)
128 mwifiex_sdio_resume(adapter->dev); 131 mwifiex_sdio_resume(adapter->dev);
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index a3fb322205b0..0ead152e3d1e 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -193,7 +193,7 @@
193 a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt+1+(MAX_PORT - \ 193 a->mpa_tx.ports |= (1<<(a->mpa_tx.pkt_cnt+1+(MAX_PORT - \
194 a->mp_end_port))); \ 194 a->mp_end_port))); \
195 a->mpa_tx.pkt_cnt++; \ 195 a->mpa_tx.pkt_cnt++; \
196} while (0); 196} while (0)
197 197
198/* SDIO Tx aggregation limit ? */ 198/* SDIO Tx aggregation limit ? */
199#define MP_TX_AGGR_PKT_LIMIT_REACHED(a) \ 199#define MP_TX_AGGR_PKT_LIMIT_REACHED(a) \
@@ -211,7 +211,7 @@
211 a->mpa_tx.buf_len = 0; \ 211 a->mpa_tx.buf_len = 0; \
212 a->mpa_tx.ports = 0; \ 212 a->mpa_tx.ports = 0; \
213 a->mpa_tx.start_port = 0; \ 213 a->mpa_tx.start_port = 0; \
214} while (0); 214} while (0)
215 215
216/* SDIO Rx aggregation limit ? */ 216/* SDIO Rx aggregation limit ? */
217#define MP_RX_AGGR_PKT_LIMIT_REACHED(a) \ 217#define MP_RX_AGGR_PKT_LIMIT_REACHED(a) \
@@ -242,7 +242,7 @@
242 a->mpa_rx.skb_arr[a->mpa_rx.pkt_cnt] = skb; \ 242 a->mpa_rx.skb_arr[a->mpa_rx.pkt_cnt] = skb; \
243 a->mpa_rx.len_arr[a->mpa_rx.pkt_cnt] = skb->len; \ 243 a->mpa_rx.len_arr[a->mpa_rx.pkt_cnt] = skb->len; \
244 a->mpa_rx.pkt_cnt++; \ 244 a->mpa_rx.pkt_cnt++; \
245} while (0); 245} while (0)
246 246
247/* Reset SDIO Rx aggregation buffer parameters */ 247/* Reset SDIO Rx aggregation buffer parameters */
248#define MP_RX_AGGR_BUF_RESET(a) do { \ 248#define MP_RX_AGGR_BUF_RESET(a) do { \
@@ -250,7 +250,7 @@
250 a->mpa_rx.buf_len = 0; \ 250 a->mpa_rx.buf_len = 0; \
251 a->mpa_rx.ports = 0; \ 251 a->mpa_rx.ports = 0; \
252 a->mpa_rx.start_port = 0; \ 252 a->mpa_rx.start_port = 0; \
253} while (0); 253} while (0)
254 254
255 255
256/* data structure for SDIO MPA TX */ 256/* data structure for SDIO MPA TX */
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 6c8e4594b48b..87ed2a1f6cd9 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -907,6 +907,101 @@ mwifiex_cmd_pcie_host_spec(struct mwifiex_private *priv,
907} 907}
908 908
909/* 909/*
910 * This function prepares command for event subscription, configuration
911 * and query. Events can be subscribed or unsubscribed. Current subscribed
912 * events can be queried. Also, current subscribed events are reported in
913 * every FW response.
914 */
915static int
916mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv,
917 struct host_cmd_ds_command *cmd,
918 struct mwifiex_ds_misc_subsc_evt *subsc_evt_cfg)
919{
920 struct host_cmd_ds_802_11_subsc_evt *subsc_evt = &cmd->params.subsc_evt;
921 struct mwifiex_ie_types_rssi_threshold *rssi_tlv;
922 u16 event_bitmap;
923 u8 *pos;
924
925 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SUBSCRIBE_EVENT);
926 cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_subsc_evt) +
927 S_DS_GEN);
928
929 subsc_evt->action = cpu_to_le16(subsc_evt_cfg->action);
930 dev_dbg(priv->adapter->dev, "cmd: action: %d\n", subsc_evt_cfg->action);
931
932 /*For query requests, no configuration TLV structures are to be added.*/
933 if (subsc_evt_cfg->action == HostCmd_ACT_GEN_GET)
934 return 0;
935
936 subsc_evt->events = cpu_to_le16(subsc_evt_cfg->events);
937
938 event_bitmap = subsc_evt_cfg->events;
939 dev_dbg(priv->adapter->dev, "cmd: event bitmap : %16x\n",
940 event_bitmap);
941
942 if (((subsc_evt_cfg->action == HostCmd_ACT_BITWISE_CLR) ||
943 (subsc_evt_cfg->action == HostCmd_ACT_BITWISE_SET)) &&
944 (event_bitmap == 0)) {
945 dev_dbg(priv->adapter->dev, "Error: No event specified "
946 "for bitwise action type\n");
947 return -EINVAL;
948 }
949
950 /*
951 * Append TLV structures for each of the specified events for
952 * subscribing or re-configuring. This is not required for
953 * bitwise unsubscribing request.
954 */
955 if (subsc_evt_cfg->action == HostCmd_ACT_BITWISE_CLR)
956 return 0;
957
958 pos = ((u8 *)subsc_evt) +
959 sizeof(struct host_cmd_ds_802_11_subsc_evt);
960
961 if (event_bitmap & BITMASK_BCN_RSSI_LOW) {
962 rssi_tlv = (struct mwifiex_ie_types_rssi_threshold *) pos;
963
964 rssi_tlv->header.type = cpu_to_le16(TLV_TYPE_RSSI_LOW);
965 rssi_tlv->header.len =
966 cpu_to_le16(sizeof(struct mwifiex_ie_types_rssi_threshold) -
967 sizeof(struct mwifiex_ie_types_header));
968 rssi_tlv->abs_value = subsc_evt_cfg->bcn_l_rssi_cfg.abs_value;
969 rssi_tlv->evt_freq = subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq;
970
971 dev_dbg(priv->adapter->dev, "Cfg Beacon Low Rssi event, "
972 "RSSI:-%d dBm, Freq:%d\n",
973 subsc_evt_cfg->bcn_l_rssi_cfg.abs_value,
974 subsc_evt_cfg->bcn_l_rssi_cfg.evt_freq);
975
976 pos += sizeof(struct mwifiex_ie_types_rssi_threshold);
977 le16_add_cpu(&cmd->size,
978 sizeof(struct mwifiex_ie_types_rssi_threshold));
979 }
980
981 if (event_bitmap & BITMASK_BCN_RSSI_HIGH) {
982 rssi_tlv = (struct mwifiex_ie_types_rssi_threshold *) pos;
983
984 rssi_tlv->header.type = cpu_to_le16(TLV_TYPE_RSSI_HIGH);
985 rssi_tlv->header.len =
986 cpu_to_le16(sizeof(struct mwifiex_ie_types_rssi_threshold) -
987 sizeof(struct mwifiex_ie_types_header));
988 rssi_tlv->abs_value = subsc_evt_cfg->bcn_h_rssi_cfg.abs_value;
989 rssi_tlv->evt_freq = subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq;
990
991 dev_dbg(priv->adapter->dev, "Cfg Beacon High Rssi event, "
992 "RSSI:-%d dBm, Freq:%d\n",
993 subsc_evt_cfg->bcn_h_rssi_cfg.abs_value,
994 subsc_evt_cfg->bcn_h_rssi_cfg.evt_freq);
995
996 pos += sizeof(struct mwifiex_ie_types_rssi_threshold);
997 le16_add_cpu(&cmd->size,
998 sizeof(struct mwifiex_ie_types_rssi_threshold));
999 }
1000
1001 return 0;
1002}
1003
1004/*
910 * This function prepares the commands before sending them to the firmware. 1005 * This function prepares the commands before sending them to the firmware.
911 * 1006 *
912 * This is a generic function which calls specific command preparation 1007 * This is a generic function which calls specific command preparation
@@ -1086,6 +1181,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
1086 case HostCmd_CMD_PCIE_DESC_DETAILS: 1181 case HostCmd_CMD_PCIE_DESC_DETAILS:
1087 ret = mwifiex_cmd_pcie_host_spec(priv, cmd_ptr, cmd_action); 1182 ret = mwifiex_cmd_pcie_host_spec(priv, cmd_ptr, cmd_action);
1088 break; 1183 break;
1184 case HostCmd_CMD_802_11_SUBSCRIBE_EVENT:
1185 ret = mwifiex_cmd_802_11_subsc_evt(priv, cmd_ptr, data_buf);
1186 break;
1089 default: 1187 default:
1090 dev_err(priv->adapter->dev, 1188 dev_err(priv->adapter->dev,
1091 "PREP_CMD: unknown cmd- %#x\n", cmd_no); 1189 "PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -1195,7 +1293,7 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta)
1195 if (ret) 1293 if (ret)
1196 return -1; 1294 return -1;
1197 1295
1198 if (first_sta) { 1296 if (first_sta && (priv->adapter->iface_type != MWIFIEX_USB)) {
1199 /* Enable auto deep sleep */ 1297 /* Enable auto deep sleep */
1200 auto_ds.auto_ds = DEEP_SLEEP_ON; 1298 auto_ds.auto_ds = DEEP_SLEEP_ON;
1201 auto_ds.idle_time = DEEP_SLEEP_IDLE_TIME; 1299 auto_ds.idle_time = DEEP_SLEEP_IDLE_TIME;
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 4da19ed0f078..3aa54243dea9 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -119,11 +119,11 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
119 * calculated SNR values. 119 * calculated SNR values.
120 */ 120 */
121static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv, 121static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv,
122 struct host_cmd_ds_command *resp, 122 struct host_cmd_ds_command *resp)
123 struct mwifiex_ds_get_signal *signal)
124{ 123{
125 struct host_cmd_ds_802_11_rssi_info_rsp *rssi_info_rsp = 124 struct host_cmd_ds_802_11_rssi_info_rsp *rssi_info_rsp =
126 &resp->params.rssi_info_rsp; 125 &resp->params.rssi_info_rsp;
126 struct mwifiex_ds_misc_subsc_evt subsc_evt;
127 127
128 priv->data_rssi_last = le16_to_cpu(rssi_info_rsp->data_rssi_last); 128 priv->data_rssi_last = le16_to_cpu(rssi_info_rsp->data_rssi_last);
129 priv->data_nf_last = le16_to_cpu(rssi_info_rsp->data_nf_last); 129 priv->data_nf_last = le16_to_cpu(rssi_info_rsp->data_nf_last);
@@ -137,34 +137,29 @@ static int mwifiex_ret_802_11_rssi_info(struct mwifiex_private *priv,
137 priv->bcn_rssi_avg = le16_to_cpu(rssi_info_rsp->bcn_rssi_avg); 137 priv->bcn_rssi_avg = le16_to_cpu(rssi_info_rsp->bcn_rssi_avg);
138 priv->bcn_nf_avg = le16_to_cpu(rssi_info_rsp->bcn_nf_avg); 138 priv->bcn_nf_avg = le16_to_cpu(rssi_info_rsp->bcn_nf_avg);
139 139
140 /* Need to indicate IOCTL complete */ 140 if (priv->subsc_evt_rssi_state == EVENT_HANDLED)
141 if (signal) { 141 return 0;
142 memset(signal, 0, sizeof(*signal)); 142
143 143 /* Resubscribe low and high rssi events with new thresholds */
144 signal->selector = ALL_RSSI_INFO_MASK; 144 memset(&subsc_evt, 0x00, sizeof(struct mwifiex_ds_misc_subsc_evt));
145 145 subsc_evt.events = BITMASK_BCN_RSSI_LOW | BITMASK_BCN_RSSI_HIGH;
146 /* RSSI */ 146 subsc_evt.action = HostCmd_ACT_BITWISE_SET;
147 signal->bcn_rssi_last = priv->bcn_rssi_last; 147 if (priv->subsc_evt_rssi_state == RSSI_LOW_RECVD) {
148 signal->bcn_rssi_avg = priv->bcn_rssi_avg; 148 subsc_evt.bcn_l_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg -
149 signal->data_rssi_last = priv->data_rssi_last; 149 priv->cqm_rssi_hyst);
150 signal->data_rssi_avg = priv->data_rssi_avg; 150 subsc_evt.bcn_h_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold);
151 151 } else if (priv->subsc_evt_rssi_state == RSSI_HIGH_RECVD) {
152 /* SNR */ 152 subsc_evt.bcn_l_rssi_cfg.abs_value = abs(priv->cqm_rssi_thold);
153 signal->bcn_snr_last = 153 subsc_evt.bcn_h_rssi_cfg.abs_value = abs(priv->bcn_rssi_avg +
154 CAL_SNR(priv->bcn_rssi_last, priv->bcn_nf_last); 154 priv->cqm_rssi_hyst);
155 signal->bcn_snr_avg =
156 CAL_SNR(priv->bcn_rssi_avg, priv->bcn_nf_avg);
157 signal->data_snr_last =
158 CAL_SNR(priv->data_rssi_last, priv->data_nf_last);
159 signal->data_snr_avg =
160 CAL_SNR(priv->data_rssi_avg, priv->data_nf_avg);
161
162 /* NF */
163 signal->bcn_nf_last = priv->bcn_nf_last;
164 signal->bcn_nf_avg = priv->bcn_nf_avg;
165 signal->data_nf_last = priv->data_nf_last;
166 signal->data_nf_avg = priv->data_nf_avg;
167 } 155 }
156 subsc_evt.bcn_l_rssi_cfg.evt_freq = 1;
157 subsc_evt.bcn_h_rssi_cfg.evt_freq = 1;
158
159 priv->subsc_evt_rssi_state = EVENT_HANDLED;
160
161 mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SUBSCRIBE_EVENT,
162 0, 0, &subsc_evt);
168 163
169 return 0; 164 return 0;
170} 165}
@@ -785,6 +780,28 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
785} 780}
786 781
787/* 782/*
783 * This function handles the command response for subscribe event command.
784 */
785static int mwifiex_ret_subsc_evt(struct mwifiex_private *priv,
786 struct host_cmd_ds_command *resp,
787 struct mwifiex_ds_misc_subsc_evt *sub_event)
788{
789 struct host_cmd_ds_802_11_subsc_evt *cmd_sub_event =
790 (struct host_cmd_ds_802_11_subsc_evt *)&resp->params.subsc_evt;
791
792 /* For every subscribe event command (Get/Set/Clear), FW reports the
793 * current set of subscribed events*/
794 dev_dbg(priv->adapter->dev, "Bitmap of currently subscribed events: %16x\n",
795 le16_to_cpu(cmd_sub_event->events));
796
797 /*Return the subscribed event info for a Get request*/
798 if (sub_event)
799 sub_event->events = le16_to_cpu(cmd_sub_event->events);
800
801 return 0;
802}
803
804/*
788 * This function handles the command responses. 805 * This function handles the command responses.
789 * 806 *
790 * This is a generic function, which calls command specific 807 * This is a generic function, which calls command specific
@@ -853,7 +870,7 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
853 ret = mwifiex_ret_get_log(priv, resp, data_buf); 870 ret = mwifiex_ret_get_log(priv, resp, data_buf);
854 break; 871 break;
855 case HostCmd_CMD_RSSI_INFO: 872 case HostCmd_CMD_RSSI_INFO:
856 ret = mwifiex_ret_802_11_rssi_info(priv, resp, data_buf); 873 ret = mwifiex_ret_802_11_rssi_info(priv, resp);
857 break; 874 break;
858 case HostCmd_CMD_802_11_SNMP_MIB: 875 case HostCmd_CMD_802_11_SNMP_MIB:
859 ret = mwifiex_ret_802_11_snmp_mib(priv, resp, data_buf); 876 ret = mwifiex_ret_802_11_snmp_mib(priv, resp, data_buf);
@@ -924,6 +941,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
924 break; 941 break;
925 case HostCmd_CMD_PCIE_DESC_DETAILS: 942 case HostCmd_CMD_PCIE_DESC_DETAILS:
926 break; 943 break;
944 case HostCmd_CMD_802_11_SUBSCRIBE_EVENT:
945 ret = mwifiex_ret_subsc_evt(priv, resp, data_buf);
946 break;
927 default: 947 default:
928 dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n", 948 dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
929 resp->command); 949 resp->command);
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index cc531b536a56..f6bbb9307f86 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -128,9 +128,6 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv)
128 mwifiex_stop_net_dev_queue(priv->netdev, adapter); 128 mwifiex_stop_net_dev_queue(priv->netdev, adapter);
129 if (netif_carrier_ok(priv->netdev)) 129 if (netif_carrier_ok(priv->netdev))
130 netif_carrier_off(priv->netdev); 130 netif_carrier_off(priv->netdev);
131 /* Reset wireless stats signal info */
132 priv->qual_level = 0;
133 priv->qual_noise = 0;
134} 131}
135 132
136/* 133/*
@@ -317,6 +314,12 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
317 break; 314 break;
318 315
319 case EVENT_RSSI_LOW: 316 case EVENT_RSSI_LOW:
317 cfg80211_cqm_rssi_notify(priv->netdev,
318 NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
319 GFP_KERNEL);
320 mwifiex_send_cmd_async(priv, HostCmd_CMD_RSSI_INFO,
321 HostCmd_ACT_GEN_GET, 0, NULL);
322 priv->subsc_evt_rssi_state = RSSI_LOW_RECVD;
320 dev_dbg(adapter->dev, "event: Beacon RSSI_LOW\n"); 323 dev_dbg(adapter->dev, "event: Beacon RSSI_LOW\n");
321 break; 324 break;
322 case EVENT_SNR_LOW: 325 case EVENT_SNR_LOW:
@@ -326,6 +329,12 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
326 dev_dbg(adapter->dev, "event: MAX_FAIL\n"); 329 dev_dbg(adapter->dev, "event: MAX_FAIL\n");
327 break; 330 break;
328 case EVENT_RSSI_HIGH: 331 case EVENT_RSSI_HIGH:
332 cfg80211_cqm_rssi_notify(priv->netdev,
333 NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
334 GFP_KERNEL);
335 mwifiex_send_cmd_async(priv, HostCmd_CMD_RSSI_INFO,
336 HostCmd_ACT_GEN_GET, 0, NULL);
337 priv->subsc_evt_rssi_state = RSSI_HIGH_RECVD;
329 dev_dbg(adapter->dev, "event: Beacon RSSI_HIGH\n"); 338 dev_dbg(adapter->dev, "event: Beacon RSSI_HIGH\n");
330 break; 339 break;
331 case EVENT_SNR_HIGH: 340 case EVENT_SNR_HIGH:
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index d7b11defafe0..58970e0f7d13 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -155,20 +155,29 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
155 * information. 155 * information.
156 */ 156 */
157int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv, 157int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
158 u8 *bssid, s32 rssi, u8 *ie_buf, 158 struct cfg80211_bss *bss,
159 size_t ie_len, u16 beacon_period,
160 u16 cap_info_bitmap, u8 band,
161 struct mwifiex_bssdescriptor *bss_desc) 159 struct mwifiex_bssdescriptor *bss_desc)
162{ 160{
163 int ret; 161 int ret;
162 u8 *beacon_ie;
163 struct mwifiex_bss_priv *bss_priv = (void *)bss->priv;
164 164
165 memcpy(bss_desc->mac_address, bssid, ETH_ALEN); 165 beacon_ie = kmemdup(bss->information_elements, bss->len_beacon_ies,
166 bss_desc->rssi = rssi; 166 GFP_KERNEL);
167 bss_desc->beacon_buf = ie_buf; 167 if (!beacon_ie) {
168 bss_desc->beacon_buf_size = ie_len; 168 dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n");
169 bss_desc->beacon_period = beacon_period; 169 return -ENOMEM;
170 bss_desc->cap_info_bitmap = cap_info_bitmap; 170 }
171 bss_desc->bss_band = band; 171
172 memcpy(bss_desc->mac_address, bss->bssid, ETH_ALEN);
173 bss_desc->rssi = bss->signal;
174 bss_desc->beacon_buf = beacon_ie;
175 bss_desc->beacon_buf_size = bss->len_beacon_ies;
176 bss_desc->beacon_period = bss->beacon_interval;
177 bss_desc->cap_info_bitmap = bss->capability;
178 bss_desc->bss_band = bss_priv->band;
179 bss_desc->fw_tsf = bss_priv->fw_tsf;
180 bss_desc->timestamp = bss->tsf;
172 if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_PRIVACY) { 181 if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_PRIVACY) {
173 dev_dbg(priv->adapter->dev, "info: InterpretIE: AP WEP enabled\n"); 182 dev_dbg(priv->adapter->dev, "info: InterpretIE: AP WEP enabled\n");
174 bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP; 183 bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP;
@@ -180,9 +189,9 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
180 else 189 else
181 bss_desc->bss_mode = NL80211_IFTYPE_STATION; 190 bss_desc->bss_mode = NL80211_IFTYPE_STATION;
182 191
183 ret = mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc, 192 ret = mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc);
184 ie_buf, ie_len);
185 193
194 kfree(beacon_ie);
186 return ret; 195 return ret;
187} 196}
188 197
@@ -197,7 +206,6 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
197 int ret; 206 int ret;
198 struct mwifiex_adapter *adapter = priv->adapter; 207 struct mwifiex_adapter *adapter = priv->adapter;
199 struct mwifiex_bssdescriptor *bss_desc = NULL; 208 struct mwifiex_bssdescriptor *bss_desc = NULL;
200 u8 *beacon_ie = NULL;
201 209
202 priv->scan_block = false; 210 priv->scan_block = false;
203 211
@@ -210,19 +218,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
210 return -ENOMEM; 218 return -ENOMEM;
211 } 219 }
212 220
213 beacon_ie = kmemdup(bss->information_elements, 221 ret = mwifiex_fill_new_bss_desc(priv, bss, bss_desc);
214 bss->len_beacon_ies, GFP_KERNEL);
215 if (!beacon_ie) {
216 kfree(bss_desc);
217 dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n");
218 return -ENOMEM;
219 }
220
221 ret = mwifiex_fill_new_bss_desc(priv, bss->bssid, bss->signal,
222 beacon_ie, bss->len_beacon_ies,
223 bss->beacon_interval,
224 bss->capability,
225 *(u8 *)bss->priv, bss_desc);
226 if (ret) 222 if (ret)
227 goto done; 223 goto done;
228 } 224 }
@@ -269,7 +265,6 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
269 (!mwifiex_ssid_cmp(&priv->curr_bss_params.bss_descriptor. 265 (!mwifiex_ssid_cmp(&priv->curr_bss_params.bss_descriptor.
270 ssid, &bss_desc->ssid))) { 266 ssid, &bss_desc->ssid))) {
271 kfree(bss_desc); 267 kfree(bss_desc);
272 kfree(beacon_ie);
273 return 0; 268 return 0;
274 } 269 }
275 270
@@ -304,7 +299,6 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
304 299
305done: 300done:
306 kfree(bss_desc); 301 kfree(bss_desc);
307 kfree(beacon_ie);
308 return ret; 302 return ret;
309} 303}
310 304
@@ -468,7 +462,8 @@ int mwifiex_get_bss_info(struct mwifiex_private *priv,
468 462
469 info->bss_chan = bss_desc->channel; 463 info->bss_chan = bss_desc->channel;
470 464
471 info->region_code = adapter->region_code; 465 memcpy(info->country_code, priv->country_code,
466 IEEE80211_COUNTRY_STRING_LEN);
472 467
473 info->media_connected = priv->media_connected; 468 info->media_connected = priv->media_connected;
474 469
@@ -996,6 +991,39 @@ static int mwifiex_set_wapi_ie(struct mwifiex_private *priv,
996} 991}
997 992
998/* 993/*
994 * IOCTL request handler to set/reset WPS IE.
995 *
996 * The supplied WPS IE is treated as a opaque buffer. Only the first field
997 * is checked to internally enable WPS. If buffer length is zero, the existing
998 * WPS IE is reset.
999 */
1000static int mwifiex_set_wps_ie(struct mwifiex_private *priv,
1001 u8 *ie_data_ptr, u16 ie_len)
1002{
1003 if (ie_len) {
1004 priv->wps_ie = kzalloc(MWIFIEX_MAX_VSIE_LEN, GFP_KERNEL);
1005 if (!priv->wps_ie)
1006 return -ENOMEM;
1007 if (ie_len > sizeof(priv->wps_ie)) {
1008 dev_dbg(priv->adapter->dev,
1009 "info: failed to copy WPS IE, too big\n");
1010 kfree(priv->wps_ie);
1011 return -1;
1012 }
1013 memcpy(priv->wps_ie, ie_data_ptr, ie_len);
1014 priv->wps_ie_len = ie_len;
1015 dev_dbg(priv->adapter->dev, "cmd: Set wps_ie_len=%d IE=%#x\n",
1016 priv->wps_ie_len, priv->wps_ie[0]);
1017 } else {
1018 kfree(priv->wps_ie);
1019 priv->wps_ie_len = ie_len;
1020 dev_dbg(priv->adapter->dev,
1021 "info: Reset wps_ie_len=%d\n", priv->wps_ie_len);
1022 }
1023 return 0;
1024}
1025
1026/*
999 * IOCTL request handler to set WAPI key. 1027 * IOCTL request handler to set WAPI key.
1000 * 1028 *
1001 * This function prepares the correct firmware command and 1029 * This function prepares the correct firmware command and
@@ -1185,39 +1213,6 @@ mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter, char *version,
1185} 1213}
1186 1214
1187/* 1215/*
1188 * Sends IOCTL request to get signal information.
1189 *
1190 * This function allocates the IOCTL request buffer, fills it
1191 * with requisite parameters and calls the IOCTL handler.
1192 */
1193int mwifiex_get_signal_info(struct mwifiex_private *priv,
1194 struct mwifiex_ds_get_signal *signal)
1195{
1196 int status;
1197
1198 signal->selector = ALL_RSSI_INFO_MASK;
1199
1200 /* Signal info can be obtained only if connected */
1201 if (!priv->media_connected) {
1202 dev_dbg(priv->adapter->dev,
1203 "info: Can not get signal in disconnected state\n");
1204 return -1;
1205 }
1206
1207 status = mwifiex_send_cmd_sync(priv, HostCmd_CMD_RSSI_INFO,
1208 HostCmd_ACT_GEN_GET, 0, signal);
1209
1210 if (!status) {
1211 if (signal->selector & BCN_RSSI_AVG_MASK)
1212 priv->qual_level = signal->bcn_rssi_avg;
1213 if (signal->selector & BCN_NF_AVG_MASK)
1214 priv->qual_noise = signal->bcn_nf_avg;
1215 }
1216
1217 return status;
1218}
1219
1220/*
1221 * Sends IOCTL request to set encoding parameters. 1216 * Sends IOCTL request to set encoding parameters.
1222 * 1217 *
1223 * This function allocates the IOCTL request buffer, fills it 1218 * This function allocates the IOCTL request buffer, fills it
@@ -1441,6 +1436,7 @@ mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr,
1441 priv->wps.session_enable = true; 1436 priv->wps.session_enable = true;
1442 dev_dbg(priv->adapter->dev, 1437 dev_dbg(priv->adapter->dev,
1443 "info: WPS Session Enabled.\n"); 1438 "info: WPS Session Enabled.\n");
1439 ret = mwifiex_set_wps_ie(priv, ie_data_ptr, ie_len);
1444 } 1440 }
1445 1441
1446 /* Append the passed data to the end of the 1442 /* Append the passed data to the end of the
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index 750b695aca12..02ce3b77d3e7 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -145,7 +145,12 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
145 " rx_pkt_offset=%d, rx_pkt_length=%d\n", skb->len, 145 " rx_pkt_offset=%d, rx_pkt_length=%d\n", skb->len,
146 local_rx_pd->rx_pkt_offset, local_rx_pd->rx_pkt_length); 146 local_rx_pd->rx_pkt_offset, local_rx_pd->rx_pkt_length);
147 priv->stats.rx_dropped++; 147 priv->stats.rx_dropped++;
148 dev_kfree_skb_any(skb); 148
149 if (adapter->if_ops.data_complete)
150 adapter->if_ops.data_complete(adapter, skb);
151 else
152 dev_kfree_skb_any(skb);
153
149 return ret; 154 return ret;
150 } 155 }
151 156
@@ -196,8 +201,12 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
196 (u8) local_rx_pd->rx_pkt_type, 201 (u8) local_rx_pd->rx_pkt_type,
197 skb); 202 skb);
198 203
199 if (ret || (rx_pkt_type == PKT_TYPE_BAR)) 204 if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
200 dev_kfree_skb_any(skb); 205 if (adapter->if_ops.data_complete)
206 adapter->if_ops.data_complete(adapter, skb);
207 else
208 dev_kfree_skb_any(skb);
209 }
201 210
202 if (ret) 211 if (ret)
203 priv->stats.rx_dropped++; 212 priv->stats.rx_dropped++;
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 7af534feb420..0a046d3a0c16 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -149,10 +149,14 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
149 local_tx_pd->bss_num = priv->bss_num; 149 local_tx_pd->bss_num = priv->bss_num;
150 local_tx_pd->bss_type = priv->bss_type; 150 local_tx_pd->bss_type = priv->bss_type;
151 151
152 skb_push(skb, INTF_HEADER_LEN); 152 if (adapter->iface_type == MWIFIEX_USB) {
153 153 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
154 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, 154 skb, NULL);
155 skb, NULL); 155 } else {
156 skb_push(skb, INTF_HEADER_LEN);
157 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
158 skb, NULL);
159 }
156 switch (ret) { 160 switch (ret) {
157 case -EBUSY: 161 case -EBUSY:
158 adapter->data_sent = true; 162 adapter->data_sent = true;
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index d2af8cb98541..e2faec4db108 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -77,12 +77,23 @@ int mwifiex_process_tx(struct mwifiex_private *priv, struct sk_buff *skb,
77 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) 77 if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA)
78 local_tx_pd = 78 local_tx_pd =
79 (struct txpd *) (head_ptr + INTF_HEADER_LEN); 79 (struct txpd *) (head_ptr + INTF_HEADER_LEN);
80 80 if (adapter->iface_type == MWIFIEX_USB) {
81 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, 81 adapter->data_sent = true;
82 skb, tx_param); 82 skb_pull(skb, INTF_HEADER_LEN);
83 ret = adapter->if_ops.host_to_card(adapter,
84 MWIFIEX_USB_EP_DATA,
85 skb, NULL);
86 } else {
87 ret = adapter->if_ops.host_to_card(adapter,
88 MWIFIEX_TYPE_DATA,
89 skb, tx_param);
90 }
83 } 91 }
84 92
85 switch (ret) { 93 switch (ret) {
94 case -ENOSR:
95 dev_err(adapter->dev, "data: -ENOSR is returned\n");
96 break;
86 case -EBUSY: 97 case -EBUSY:
87 if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) && 98 if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
88 (adapter->pps_uapsd_mode) && (adapter->tx_lock_flag)) { 99 (adapter->pps_uapsd_mode) && (adapter->tx_lock_flag)) {
@@ -135,6 +146,9 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
135 if (!priv) 146 if (!priv)
136 goto done; 147 goto done;
137 148
149 if (adapter->iface_type == MWIFIEX_USB)
150 adapter->data_sent = false;
151
138 mwifiex_set_trans_start(priv->netdev); 152 mwifiex_set_trans_start(priv->netdev);
139 if (!status) { 153 if (!status) {
140 priv->stats.tx_packets++; 154 priv->stats.tx_packets++;
@@ -162,4 +176,5 @@ done:
162 176
163 return 0; 177 return 0;
164} 178}
179EXPORT_SYMBOL_GPL(mwifiex_write_data_complete);
165 180
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
new file mode 100644
index 000000000000..49ebf20c56eb
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -0,0 +1,1052 @@
1/*
2 * Marvell Wireless LAN device driver: USB specific handling
3 *
4 * Copyright (C) 2012, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "main.h"
21#include "usb.h"
22
23#define USB_VERSION "1.0"
24
25static const char usbdriver_name[] = "usb8797";
26
27static u8 user_rmmod;
28static struct mwifiex_if_ops usb_ops;
29static struct semaphore add_remove_card_sem;
30
31static struct usb_device_id mwifiex_usb_table[] = {
32 {USB_DEVICE(USB8797_VID, USB8797_PID_1)},
33 {USB_DEVICE_AND_INTERFACE_INFO(USB8797_VID, USB8797_PID_2,
34 USB_CLASS_VENDOR_SPEC,
35 USB_SUBCLASS_VENDOR_SPEC, 0xff)},
36 { } /* Terminating entry */
37};
38
39MODULE_DEVICE_TABLE(usb, mwifiex_usb_table);
40
41static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size);
42
43/* This function handles received packet. Necessary action is taken based on
44 * cmd/event/data.
45 */
46static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
47 struct sk_buff *skb, u8 ep)
48{
49 struct device *dev = adapter->dev;
50 u32 recv_type;
51 __le32 tmp;
52
53 if (adapter->hs_activated)
54 mwifiex_process_hs_config(adapter);
55
56 if (skb->len < INTF_HEADER_LEN) {
57 dev_err(dev, "%s: invalid skb->len\n", __func__);
58 return -1;
59 }
60
61 switch (ep) {
62 case MWIFIEX_USB_EP_CMD_EVENT:
63 dev_dbg(dev, "%s: EP_CMD_EVENT\n", __func__);
64 skb_copy_from_linear_data(skb, &tmp, INTF_HEADER_LEN);
65 recv_type = le32_to_cpu(tmp);
66 skb_pull(skb, INTF_HEADER_LEN);
67
68 switch (recv_type) {
69 case MWIFIEX_USB_TYPE_CMD:
70 if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) {
71 dev_err(dev, "CMD: skb->len too large\n");
72 return -1;
73 } else if (!adapter->curr_cmd) {
74 dev_dbg(dev, "CMD: no curr_cmd\n");
75 if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
76 mwifiex_process_sleep_confirm_resp(
77 adapter, skb->data,
78 skb->len);
79 return 0;
80 }
81 return -1;
82 }
83
84 adapter->curr_cmd->resp_skb = skb;
85 adapter->cmd_resp_received = true;
86 break;
87 case MWIFIEX_USB_TYPE_EVENT:
88 if (skb->len < sizeof(u32)) {
89 dev_err(dev, "EVENT: skb->len too small\n");
90 return -1;
91 }
92 skb_copy_from_linear_data(skb, &tmp, sizeof(u32));
93 adapter->event_cause = le32_to_cpu(tmp);
94 skb_pull(skb, sizeof(u32));
95 dev_dbg(dev, "event_cause %#x\n", adapter->event_cause);
96
97 if (skb->len > MAX_EVENT_SIZE) {
98 dev_err(dev, "EVENT: event body too large\n");
99 return -1;
100 }
101
102 skb_copy_from_linear_data(skb, adapter->event_body,
103 skb->len);
104 adapter->event_received = true;
105 adapter->event_skb = skb;
106 break;
107 default:
108 dev_err(dev, "unknown recv_type %#x\n", recv_type);
109 return -1;
110 }
111 break;
112 case MWIFIEX_USB_EP_DATA:
113 dev_dbg(dev, "%s: EP_DATA\n", __func__);
114 if (skb->len > MWIFIEX_RX_DATA_BUF_SIZE) {
115 dev_err(dev, "DATA: skb->len too large\n");
116 return -1;
117 }
118 skb_queue_tail(&adapter->usb_rx_data_q, skb);
119 adapter->data_received = true;
120 break;
121 default:
122 dev_err(dev, "%s: unknown endport %#x\n", __func__, ep);
123 return -1;
124 }
125
126 return -EINPROGRESS;
127}
128
129static void mwifiex_usb_rx_complete(struct urb *urb)
130{
131 struct urb_context *context = (struct urb_context *)urb->context;
132 struct mwifiex_adapter *adapter = context->adapter;
133 struct sk_buff *skb = context->skb;
134 struct usb_card_rec *card;
135 int recv_length = urb->actual_length;
136 int size, status;
137
138 if (!adapter || !adapter->card) {
139 pr_err("mwifiex adapter or card structure is not valid\n");
140 return;
141 }
142
143 card = (struct usb_card_rec *)adapter->card;
144 if (card->rx_cmd_ep == context->ep)
145 atomic_dec(&card->rx_cmd_urb_pending);
146 else
147 atomic_dec(&card->rx_data_urb_pending);
148
149 if (recv_length) {
150 if (urb->status || (adapter->surprise_removed)) {
151 dev_err(adapter->dev,
152 "URB status is failed: %d\n", urb->status);
153 /* Do not free skb in case of command ep */
154 if (card->rx_cmd_ep != context->ep)
155 dev_kfree_skb_any(skb);
156 goto setup_for_next;
157 }
158 if (skb->len > recv_length)
159 skb_trim(skb, recv_length);
160 else
161 skb_put(skb, recv_length - skb->len);
162
163 atomic_inc(&adapter->rx_pending);
164 status = mwifiex_usb_recv(adapter, skb, context->ep);
165
166 dev_dbg(adapter->dev, "info: recv_length=%d, status=%d\n",
167 recv_length, status);
168 if (status == -EINPROGRESS) {
169 queue_work(adapter->workqueue, &adapter->main_work);
170
171 /* urb for data_ep is re-submitted now;
172 * urb for cmd_ep will be re-submitted in callback
173 * mwifiex_usb_recv_complete
174 */
175 if (card->rx_cmd_ep == context->ep)
176 return;
177 } else {
178 atomic_dec(&adapter->rx_pending);
179 if (status == -1)
180 dev_err(adapter->dev,
181 "received data processing failed!\n");
182
183 /* Do not free skb in case of command ep */
184 if (card->rx_cmd_ep != context->ep)
185 dev_kfree_skb_any(skb);
186 }
187 } else if (urb->status) {
188 if (!adapter->is_suspended) {
189 dev_warn(adapter->dev,
190 "Card is removed: %d\n", urb->status);
191 adapter->surprise_removed = true;
192 }
193 dev_kfree_skb_any(skb);
194 return;
195 } else {
196 /* Do not free skb in case of command ep */
197 if (card->rx_cmd_ep != context->ep)
198 dev_kfree_skb_any(skb);
199
200 /* fall through setup_for_next */
201 }
202
203setup_for_next:
204 if (card->rx_cmd_ep == context->ep)
205 size = MWIFIEX_RX_CMD_BUF_SIZE;
206 else
207 size = MWIFIEX_RX_DATA_BUF_SIZE;
208
209 mwifiex_usb_submit_rx_urb(context, size);
210
211 return;
212}
213
214static void mwifiex_usb_tx_complete(struct urb *urb)
215{
216 struct urb_context *context = (struct urb_context *)(urb->context);
217 struct mwifiex_adapter *adapter = context->adapter;
218 struct usb_card_rec *card = adapter->card;
219
220 dev_dbg(adapter->dev, "%s: status: %d\n", __func__, urb->status);
221
222 if (context->ep == card->tx_cmd_ep) {
223 dev_dbg(adapter->dev, "%s: CMD\n", __func__);
224 atomic_dec(&card->tx_cmd_urb_pending);
225 adapter->cmd_sent = false;
226 } else {
227 dev_dbg(adapter->dev, "%s: DATA\n", __func__);
228 atomic_dec(&card->tx_data_urb_pending);
229 mwifiex_write_data_complete(adapter, context->skb,
230 urb->status ? -1 : 0);
231 }
232
233 queue_work(adapter->workqueue, &adapter->main_work);
234
235 return;
236}
237
238static int mwifiex_usb_submit_rx_urb(struct urb_context *ctx, int size)
239{
240 struct mwifiex_adapter *adapter = ctx->adapter;
241 struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
242
243 if (card->rx_cmd_ep != ctx->ep) {
244 ctx->skb = dev_alloc_skb(size);
245 if (!ctx->skb) {
246 dev_err(adapter->dev,
247 "%s: dev_alloc_skb failed\n", __func__);
248 return -ENOMEM;
249 }
250 }
251
252 usb_fill_bulk_urb(ctx->urb, card->udev,
253 usb_rcvbulkpipe(card->udev, ctx->ep), ctx->skb->data,
254 size, mwifiex_usb_rx_complete, (void *)ctx);
255
256 if (card->rx_cmd_ep == ctx->ep)
257 atomic_inc(&card->rx_cmd_urb_pending);
258 else
259 atomic_inc(&card->rx_data_urb_pending);
260
261 if (usb_submit_urb(ctx->urb, GFP_ATOMIC)) {
262 dev_err(adapter->dev, "usb_submit_urb failed\n");
263 dev_kfree_skb_any(ctx->skb);
264 ctx->skb = NULL;
265
266 if (card->rx_cmd_ep == ctx->ep)
267 atomic_dec(&card->rx_cmd_urb_pending);
268 else
269 atomic_dec(&card->rx_data_urb_pending);
270
271 return -1;
272 }
273
274 return 0;
275}
276
277static void mwifiex_usb_free(struct usb_card_rec *card)
278{
279 int i;
280
281 if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb)
282 usb_kill_urb(card->rx_cmd.urb);
283
284 usb_free_urb(card->rx_cmd.urb);
285 card->rx_cmd.urb = NULL;
286
287 if (atomic_read(&card->rx_data_urb_pending))
288 for (i = 0; i < MWIFIEX_RX_DATA_URB; i++)
289 if (card->rx_data_list[i].urb)
290 usb_kill_urb(card->rx_data_list[i].urb);
291
292 for (i = 0; i < MWIFIEX_RX_DATA_URB; i++) {
293 usb_free_urb(card->rx_data_list[i].urb);
294 card->rx_data_list[i].urb = NULL;
295 }
296
297 for (i = 0; i < MWIFIEX_TX_DATA_URB; i++) {
298 usb_free_urb(card->tx_data_list[i].urb);
299 card->tx_data_list[i].urb = NULL;
300 }
301
302 usb_free_urb(card->tx_cmd.urb);
303 card->tx_cmd.urb = NULL;
304
305 return;
306}
307
308/* This function probes an mwifiex device and registers it. It allocates
309 * the card structure, initiates the device registration and initialization
310 * procedure by adding a logical interface.
311 */
312static int mwifiex_usb_probe(struct usb_interface *intf,
313 const struct usb_device_id *id)
314{
315 struct usb_device *udev = interface_to_usbdev(intf);
316 struct usb_host_interface *iface_desc = intf->cur_altsetting;
317 struct usb_endpoint_descriptor *epd;
318 int ret, i;
319 struct usb_card_rec *card;
320 u16 id_vendor, id_product, bcd_device, bcd_usb;
321
322 card = kzalloc(sizeof(struct usb_card_rec), GFP_KERNEL);
323 if (!card)
324 return -ENOMEM;
325
326 id_vendor = le16_to_cpu(udev->descriptor.idVendor);
327 id_product = le16_to_cpu(udev->descriptor.idProduct);
328 bcd_device = le16_to_cpu(udev->descriptor.bcdDevice);
329 bcd_usb = le16_to_cpu(udev->descriptor.bcdUSB);
330 pr_debug("info: VID/PID = %X/%X, Boot2 version = %X\n",
331 id_vendor, id_product, bcd_device);
332
333 /* PID_1 is used for firmware downloading only */
334 if (id_product == USB8797_PID_1)
335 card->usb_boot_state = USB8797_FW_DNLD;
336 else
337 card->usb_boot_state = USB8797_FW_READY;
338
339 card->udev = udev;
340 card->intf = intf;
341
342 pr_debug("info: bcdUSB=%#x Device Class=%#x SubClass=%#x Protocl=%#x\n",
343 udev->descriptor.bcdUSB, udev->descriptor.bDeviceClass,
344 udev->descriptor.bDeviceSubClass,
345 udev->descriptor.bDeviceProtocol);
346
347 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
348 epd = &iface_desc->endpoint[i].desc;
349 if (usb_endpoint_dir_in(epd) &&
350 usb_endpoint_num(epd) == MWIFIEX_USB_EP_CMD_EVENT &&
351 usb_endpoint_xfer_bulk(epd)) {
352 pr_debug("info: bulk IN: max pkt size: %d, addr: %d\n",
353 le16_to_cpu(epd->wMaxPacketSize),
354 epd->bEndpointAddress);
355 card->rx_cmd_ep = usb_endpoint_num(epd);
356 atomic_set(&card->rx_cmd_urb_pending, 0);
357 }
358 if (usb_endpoint_dir_in(epd) &&
359 usb_endpoint_num(epd) == MWIFIEX_USB_EP_DATA &&
360 usb_endpoint_xfer_bulk(epd)) {
361 pr_debug("info: bulk IN: max pkt size: %d, addr: %d\n",
362 le16_to_cpu(epd->wMaxPacketSize),
363 epd->bEndpointAddress);
364 card->rx_data_ep = usb_endpoint_num(epd);
365 atomic_set(&card->rx_data_urb_pending, 0);
366 }
367 if (usb_endpoint_dir_out(epd) &&
368 usb_endpoint_num(epd) == MWIFIEX_USB_EP_DATA &&
369 usb_endpoint_xfer_bulk(epd)) {
370 pr_debug("info: bulk OUT: max pkt size: %d, addr: %d\n",
371 le16_to_cpu(epd->wMaxPacketSize),
372 epd->bEndpointAddress);
373 card->tx_data_ep = usb_endpoint_num(epd);
374 atomic_set(&card->tx_data_urb_pending, 0);
375 }
376 if (usb_endpoint_dir_out(epd) &&
377 usb_endpoint_num(epd) == MWIFIEX_USB_EP_CMD_EVENT &&
378 usb_endpoint_xfer_bulk(epd)) {
379 pr_debug("info: bulk OUT: max pkt size: %d, addr: %d\n",
380 le16_to_cpu(epd->wMaxPacketSize),
381 epd->bEndpointAddress);
382 card->tx_cmd_ep = usb_endpoint_num(epd);
383 atomic_set(&card->tx_cmd_urb_pending, 0);
384 card->bulk_out_maxpktsize =
385 le16_to_cpu(epd->wMaxPacketSize);
386 }
387 }
388
389 usb_set_intfdata(intf, card);
390
391 ret = mwifiex_add_card(card, &add_remove_card_sem, &usb_ops,
392 MWIFIEX_USB);
393 if (ret) {
394 pr_err("%s: mwifiex_add_card failed: %d\n", __func__, ret);
395 usb_reset_device(udev);
396 kfree(card);
397 return ret;
398 }
399
400 usb_get_dev(udev);
401
402 return 0;
403}
404
405/* Kernel needs to suspend all functions separately. Therefore all
406 * registered functions must have drivers with suspend and resume
407 * methods. Failing that the kernel simply removes the whole card.
408 *
409 * If already not suspended, this function allocates and sends a
410 * 'host sleep activate' request to the firmware and turns off the traffic.
411 */
412static int mwifiex_usb_suspend(struct usb_interface *intf, pm_message_t message)
413{
414 struct usb_card_rec *card = usb_get_intfdata(intf);
415 struct mwifiex_adapter *adapter;
416 int i;
417
418 if (!card || !card->adapter) {
419 pr_err("%s: card or card->adapter is NULL\n", __func__);
420 return 0;
421 }
422 adapter = card->adapter;
423
424 if (unlikely(adapter->is_suspended))
425 dev_warn(adapter->dev, "Device already suspended\n");
426
427 mwifiex_enable_hs(adapter);
428
429 /* 'is_suspended' flag indicates device is suspended.
430 * It must be set here before the usb_kill_urb() calls. Reason
431 * is in the complete handlers, urb->status(= -ENOENT) and
432 * this flag is used in combination to distinguish between a
433 * 'suspended' state and a 'disconnect' one.
434 */
435 adapter->is_suspended = true;
436
437 for (i = 0; i < adapter->priv_num; i++)
438 netif_carrier_off(adapter->priv[i]->netdev);
439
440 if (atomic_read(&card->rx_cmd_urb_pending) && card->rx_cmd.urb)
441 usb_kill_urb(card->rx_cmd.urb);
442
443 if (atomic_read(&card->rx_data_urb_pending))
444 for (i = 0; i < MWIFIEX_RX_DATA_URB; i++)
445 if (card->rx_data_list[i].urb)
446 usb_kill_urb(card->rx_data_list[i].urb);
447
448 for (i = 0; i < MWIFIEX_TX_DATA_URB; i++)
449 if (card->tx_data_list[i].urb)
450 usb_kill_urb(card->tx_data_list[i].urb);
451
452 if (card->tx_cmd.urb)
453 usb_kill_urb(card->tx_cmd.urb);
454
455 return 0;
456}
457
458/* Kernel needs to suspend all functions separately. Therefore all
459 * registered functions must have drivers with suspend and resume
460 * methods. Failing that the kernel simply removes the whole card.
461 *
462 * If already not resumed, this function turns on the traffic and
463 * sends a 'host sleep cancel' request to the firmware.
464 */
465static int mwifiex_usb_resume(struct usb_interface *intf)
466{
467 struct usb_card_rec *card = usb_get_intfdata(intf);
468 struct mwifiex_adapter *adapter;
469 int i;
470
471 if (!card || !card->adapter) {
472 pr_err("%s: card or card->adapter is NULL\n", __func__);
473 return 0;
474 }
475 adapter = card->adapter;
476
477 if (unlikely(!adapter->is_suspended)) {
478 dev_warn(adapter->dev, "Device already resumed\n");
479 return 0;
480 }
481
482 /* Indicate device resumed. The netdev queue will be resumed only
483 * after the urbs have been re-submitted
484 */
485 adapter->is_suspended = false;
486
487 if (!atomic_read(&card->rx_data_urb_pending))
488 for (i = 0; i < MWIFIEX_RX_DATA_URB; i++)
489 mwifiex_usb_submit_rx_urb(&card->rx_data_list[i],
490 MWIFIEX_RX_DATA_BUF_SIZE);
491
492 if (!atomic_read(&card->rx_cmd_urb_pending)) {
493 card->rx_cmd.skb = dev_alloc_skb(MWIFIEX_RX_CMD_BUF_SIZE);
494 if (card->rx_cmd.skb)
495 mwifiex_usb_submit_rx_urb(&card->rx_cmd,
496 MWIFIEX_RX_CMD_BUF_SIZE);
497 }
498
499 for (i = 0; i < adapter->priv_num; i++)
500 if (adapter->priv[i]->media_connected)
501 netif_carrier_on(adapter->priv[i]->netdev);
502
503 /* Disable Host Sleep */
504 if (adapter->hs_activated)
505 mwifiex_cancel_hs(mwifiex_get_priv(adapter,
506 MWIFIEX_BSS_ROLE_ANY),
507 MWIFIEX_ASYNC_CMD);
508
509#ifdef CONFIG_PM
510 /* Resume handler may be called due to remote wakeup,
511 * force to exit suspend anyway
512 */
513 usb_disable_autosuspend(card->udev);
514#endif /* CONFIG_PM */
515
516 return 0;
517}
518
519static void mwifiex_usb_disconnect(struct usb_interface *intf)
520{
521 struct usb_card_rec *card = usb_get_intfdata(intf);
522 struct mwifiex_adapter *adapter;
523 int i;
524
525 if (!card || !card->adapter) {
526 pr_err("%s: card or card->adapter is NULL\n", __func__);
527 return;
528 }
529
530 adapter = card->adapter;
531 if (!adapter->priv_num)
532 return;
533
534 /* In case driver is removed when asynchronous FW downloading is
535 * in progress
536 */
537 wait_for_completion(&adapter->fw_load);
538
539 if (user_rmmod) {
540#ifdef CONFIG_PM
541 if (adapter->is_suspended)
542 mwifiex_usb_resume(intf);
543#endif
544 for (i = 0; i < adapter->priv_num; i++)
545 if ((GET_BSS_ROLE(adapter->priv[i]) ==
546 MWIFIEX_BSS_ROLE_STA) &&
547 adapter->priv[i]->media_connected)
548 mwifiex_deauthenticate(adapter->priv[i], NULL);
549
550 mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
551 MWIFIEX_BSS_ROLE_ANY),
552 MWIFIEX_FUNC_SHUTDOWN);
553 }
554
555 mwifiex_usb_free(card);
556
557 dev_dbg(adapter->dev, "%s: removing card\n", __func__);
558 mwifiex_remove_card(adapter, &add_remove_card_sem);
559
560 usb_set_intfdata(intf, NULL);
561 usb_put_dev(interface_to_usbdev(intf));
562 kfree(card);
563
564 return;
565}
566
567static struct usb_driver mwifiex_usb_driver = {
568 .name = usbdriver_name,
569 .probe = mwifiex_usb_probe,
570 .disconnect = mwifiex_usb_disconnect,
571 .id_table = mwifiex_usb_table,
572 .suspend = mwifiex_usb_suspend,
573 .resume = mwifiex_usb_resume,
574 .supports_autosuspend = 1,
575};
576
577static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
578{
579 struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
580 int i;
581
582 card->tx_cmd.adapter = adapter;
583 card->tx_cmd.ep = card->tx_cmd_ep;
584
585 card->tx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL);
586 if (!card->tx_cmd.urb) {
587 dev_err(adapter->dev, "tx_cmd.urb allocation failed\n");
588 return -ENOMEM;
589 }
590
591 card->tx_data_ix = 0;
592
593 for (i = 0; i < MWIFIEX_TX_DATA_URB; i++) {
594 card->tx_data_list[i].adapter = adapter;
595 card->tx_data_list[i].ep = card->tx_data_ep;
596
597 card->tx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL);
598 if (!card->tx_data_list[i].urb) {
599 dev_err(adapter->dev,
600 "tx_data_list[] urb allocation failed\n");
601 return -ENOMEM;
602 }
603 }
604
605 return 0;
606}
607
608static int mwifiex_usb_rx_init(struct mwifiex_adapter *adapter)
609{
610 struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
611 int i;
612
613 card->rx_cmd.adapter = adapter;
614 card->rx_cmd.ep = card->rx_cmd_ep;
615
616 card->rx_cmd.urb = usb_alloc_urb(0, GFP_KERNEL);
617 if (!card->rx_cmd.urb) {
618 dev_err(adapter->dev, "rx_cmd.urb allocation failed\n");
619 return -ENOMEM;
620 }
621
622 card->rx_cmd.skb = dev_alloc_skb(MWIFIEX_RX_CMD_BUF_SIZE);
623 if (!card->rx_cmd.skb) {
624 dev_err(adapter->dev, "rx_cmd.skb allocation failed\n");
625 return -ENOMEM;
626 }
627
628 if (mwifiex_usb_submit_rx_urb(&card->rx_cmd, MWIFIEX_RX_CMD_BUF_SIZE))
629 return -1;
630
631 for (i = 0; i < MWIFIEX_RX_DATA_URB; i++) {
632 card->rx_data_list[i].adapter = adapter;
633 card->rx_data_list[i].ep = card->rx_data_ep;
634
635 card->rx_data_list[i].urb = usb_alloc_urb(0, GFP_KERNEL);
636 if (!card->rx_data_list[i].urb) {
637 dev_err(adapter->dev,
638 "rx_data_list[] urb allocation failed\n");
639 return -1;
640 }
641 if (mwifiex_usb_submit_rx_urb(&card->rx_data_list[i],
642 MWIFIEX_RX_DATA_BUF_SIZE))
643 return -1;
644 }
645
646 return 0;
647}
648
649static int mwifiex_write_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf,
650 u32 *len, u8 ep, u32 timeout)
651{
652 struct usb_card_rec *card = adapter->card;
653 int actual_length, ret;
654
655 if (!(*len % card->bulk_out_maxpktsize))
656 (*len)++;
657
658 /* Send the data block */
659 ret = usb_bulk_msg(card->udev, usb_sndbulkpipe(card->udev, ep), pbuf,
660 *len, &actual_length, timeout);
661 if (ret) {
662 dev_err(adapter->dev, "usb_bulk_msg for tx failed: %d\n", ret);
663 ret = -1;
664 }
665
666 *len = actual_length;
667
668 return ret;
669}
670
671static int mwifiex_read_data_sync(struct mwifiex_adapter *adapter, u8 *pbuf,
672 u32 *len, u8 ep, u32 timeout)
673{
674 struct usb_card_rec *card = adapter->card;
675 int actual_length, ret;
676
677 /* Receive the data response */
678 ret = usb_bulk_msg(card->udev, usb_rcvbulkpipe(card->udev, ep), pbuf,
679 *len, &actual_length, timeout);
680 if (ret) {
681 dev_err(adapter->dev, "usb_bulk_msg for rx failed: %d\n", ret);
682 ret = -1;
683 }
684
685 *len = actual_length;
686
687 return ret;
688}
689
690/* This function write a command/data packet to card. */
691static int mwifiex_usb_host_to_card(struct mwifiex_adapter *adapter, u8 ep,
692 struct sk_buff *skb,
693 struct mwifiex_tx_param *tx_param)
694{
695 struct usb_card_rec *card = adapter->card;
696 struct urb_context *context;
697 u8 *data = (u8 *)skb->data;
698 struct urb *tx_urb;
699
700 if (adapter->is_suspended) {
701 dev_err(adapter->dev,
702 "%s: not allowed while suspended\n", __func__);
703 return -1;
704 }
705
706 if (adapter->surprise_removed) {
707 dev_err(adapter->dev, "%s: device removed\n", __func__);
708 return -1;
709 }
710
711 if (ep == card->tx_data_ep &&
712 atomic_read(&card->tx_data_urb_pending) >= MWIFIEX_TX_DATA_URB) {
713 return -EBUSY;
714 }
715
716 dev_dbg(adapter->dev, "%s: ep=%d\n", __func__, ep);
717
718 if (ep == card->tx_cmd_ep) {
719 context = &card->tx_cmd;
720 } else {
721 if (card->tx_data_ix >= MWIFIEX_TX_DATA_URB)
722 card->tx_data_ix = 0;
723 context = &card->tx_data_list[card->tx_data_ix++];
724 }
725
726 context->adapter = adapter;
727 context->ep = ep;
728 context->skb = skb;
729 tx_urb = context->urb;
730
731 usb_fill_bulk_urb(tx_urb, card->udev, usb_sndbulkpipe(card->udev, ep),
732 data, skb->len, mwifiex_usb_tx_complete,
733 (void *)context);
734
735 tx_urb->transfer_flags |= URB_ZERO_PACKET;
736
737 if (ep == card->tx_cmd_ep)
738 atomic_inc(&card->tx_cmd_urb_pending);
739 else
740 atomic_inc(&card->tx_data_urb_pending);
741
742 if (usb_submit_urb(tx_urb, GFP_ATOMIC)) {
743 dev_err(adapter->dev, "%s: usb_submit_urb failed\n", __func__);
744 if (ep == card->tx_cmd_ep) {
745 atomic_dec(&card->tx_cmd_urb_pending);
746 } else {
747 atomic_dec(&card->tx_data_urb_pending);
748 if (card->tx_data_ix)
749 card->tx_data_ix--;
750 else
751 card->tx_data_ix = MWIFIEX_TX_DATA_URB;
752 }
753
754 return -1;
755 } else {
756 if (ep == card->tx_data_ep &&
757 atomic_read(&card->tx_data_urb_pending) ==
758 MWIFIEX_TX_DATA_URB)
759 return -ENOSR;
760 }
761
762 return -EINPROGRESS;
763}
764
765/* This function register usb device and initialize parameter. */
766static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
767{
768 struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
769
770 card->adapter = adapter;
771 adapter->dev = &card->udev->dev;
772 strcpy(adapter->fw_name, USB8797_DEFAULT_FW_NAME);
773
774 return 0;
775}
776
777/* This function reads one block of firmware data. */
778static int mwifiex_get_fw_data(struct mwifiex_adapter *adapter,
779 u32 offset, u32 len, u8 *buf)
780{
781 if (!buf || !len)
782 return -1;
783
784 if (offset + len > adapter->firmware->size)
785 return -1;
786
787 memcpy(buf, adapter->firmware->data + offset, len);
788
789 return 0;
790}
791
792static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
793 struct mwifiex_fw_image *fw)
794{
795 int ret = 0;
796 u8 *firmware = fw->fw_buf, *recv_buff;
797 u32 retries = USB8797_FW_MAX_RETRY, dlen;
798 u32 fw_seqnum = 0, tlen = 0, dnld_cmd = 0;
799 struct fw_data *fwdata;
800 struct fw_sync_header sync_fw;
801 u8 check_winner = 1;
802
803 if (!firmware) {
804 dev_err(adapter->dev,
805 "No firmware image found! Terminating download\n");
806 ret = -1;
807 goto fw_exit;
808 }
809
810 /* Allocate memory for transmit */
811 fwdata = kzalloc(FW_DNLD_TX_BUF_SIZE, GFP_KERNEL);
812 if (!fwdata)
813 goto fw_exit;
814
815 /* Allocate memory for receive */
816 recv_buff = kzalloc(FW_DNLD_RX_BUF_SIZE, GFP_KERNEL);
817 if (!recv_buff)
818 goto cleanup;
819
820 do {
821 /* Send pseudo data to check winner status first */
822 if (check_winner) {
823 memset(&fwdata->fw_hdr, 0, sizeof(struct fw_header));
824 dlen = 0;
825 } else {
826 /* copy the header of the fw_data to get the length */
827 if (firmware)
828 memcpy(&fwdata->fw_hdr, &firmware[tlen],
829 sizeof(struct fw_header));
830 else
831 mwifiex_get_fw_data(adapter, tlen,
832 sizeof(struct fw_header),
833 (u8 *)&fwdata->fw_hdr);
834
835 dlen = le32_to_cpu(fwdata->fw_hdr.data_len);
836 dnld_cmd = le32_to_cpu(fwdata->fw_hdr.dnld_cmd);
837 tlen += sizeof(struct fw_header);
838
839 if (firmware)
840 memcpy(fwdata->data, &firmware[tlen], dlen);
841 else
842 mwifiex_get_fw_data(adapter, tlen, dlen,
843 (u8 *)fwdata->data);
844
845 fwdata->seq_num = cpu_to_le32(fw_seqnum);
846 tlen += dlen;
847 }
848
849 /* If the send/receive fails or CRC occurs then retry */
850 while (retries--) {
851 u8 *buf = (u8 *)fwdata;
852 u32 len = FW_DATA_XMIT_SIZE;
853
854 /* send the firmware block */
855 ret = mwifiex_write_data_sync(adapter, buf, &len,
856 MWIFIEX_USB_EP_CMD_EVENT,
857 MWIFIEX_USB_TIMEOUT);
858 if (ret) {
859 dev_err(adapter->dev,
860 "write_data_sync: failed: %d\n", ret);
861 continue;
862 }
863
864 buf = recv_buff;
865 len = FW_DNLD_RX_BUF_SIZE;
866
867 /* Receive the firmware block response */
868 ret = mwifiex_read_data_sync(adapter, buf, &len,
869 MWIFIEX_USB_EP_CMD_EVENT,
870 MWIFIEX_USB_TIMEOUT);
871 if (ret) {
872 dev_err(adapter->dev,
873 "read_data_sync: failed: %d\n", ret);
874 continue;
875 }
876
877 memcpy(&sync_fw, recv_buff,
878 sizeof(struct fw_sync_header));
879
880 /* check 1st firmware block resp for highest bit set */
881 if (check_winner) {
882 if (le32_to_cpu(sync_fw.cmd) & 0x80000000) {
883 dev_warn(adapter->dev,
884 "USB is not the winner %#x\n",
885 sync_fw.cmd);
886
887 /* returning success */
888 ret = 0;
889 goto cleanup;
890 }
891
892 dev_dbg(adapter->dev,
893 "USB is the winner, start to download FW\n");
894
895 check_winner = 0;
896 break;
897 }
898
899 /* check the firmware block response for CRC errors */
900 if (sync_fw.cmd) {
901 dev_err(adapter->dev,
902 "FW received block with CRC %#x\n",
903 sync_fw.cmd);
904 ret = -1;
905 continue;
906 }
907
908 retries = USB8797_FW_MAX_RETRY;
909 break;
910 }
911 fw_seqnum++;
912 } while ((dnld_cmd != FW_HAS_LAST_BLOCK) && retries);
913
914cleanup:
915 dev_dbg(adapter->dev, "%s: %d bytes downloaded\n", __func__, tlen);
916
917 kfree(recv_buff);
918 kfree(fwdata);
919
920 if (retries)
921 ret = 0;
922fw_exit:
923 return ret;
924}
925
926static int mwifiex_usb_dnld_fw(struct mwifiex_adapter *adapter,
927 struct mwifiex_fw_image *fw)
928{
929 int ret;
930 struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
931
932 if (card->usb_boot_state == USB8797_FW_DNLD) {
933 ret = mwifiex_prog_fw_w_helper(adapter, fw);
934 if (ret)
935 return -1;
936
937 /* Boot state changes after successful firmware download */
938 if (card->usb_boot_state == USB8797_FW_DNLD)
939 return -1;
940 }
941
942 ret = mwifiex_usb_rx_init(adapter);
943 if (!ret)
944 ret = mwifiex_usb_tx_init(adapter);
945
946 return ret;
947}
948
949static void mwifiex_submit_rx_urb(struct mwifiex_adapter *adapter, u8 ep)
950{
951 struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
952
953 skb_push(card->rx_cmd.skb, INTF_HEADER_LEN);
954 if ((ep == card->rx_cmd_ep) &&
955 (!atomic_read(&card->rx_cmd_urb_pending)))
956 mwifiex_usb_submit_rx_urb(&card->rx_cmd,
957 MWIFIEX_RX_CMD_BUF_SIZE);
958
959 return;
960}
961
962static int mwifiex_usb_cmd_event_complete(struct mwifiex_adapter *adapter,
963 struct sk_buff *skb)
964{
965 atomic_dec(&adapter->rx_pending);
966 mwifiex_submit_rx_urb(adapter, MWIFIEX_USB_EP_CMD_EVENT);
967
968 return 0;
969}
970
971static int mwifiex_usb_data_complete(struct mwifiex_adapter *adapter,
972 struct sk_buff *skb)
973{
974 atomic_dec(&adapter->rx_pending);
975 dev_kfree_skb_any(skb);
976
977 return 0;
978}
979
980/* This function wakes up the card. */
981static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
982{
983 /* Simulation of HS_AWAKE event */
984 adapter->pm_wakeup_fw_try = false;
985 adapter->pm_wakeup_card_req = false;
986 adapter->ps_state = PS_STATE_AWAKE;
987
988 return 0;
989}
990
991static struct mwifiex_if_ops usb_ops = {
992 .register_dev = mwifiex_register_dev,
993 .wakeup = mwifiex_pm_wakeup_card,
994 .wakeup_complete = mwifiex_pm_wakeup_card_complete,
995
996 /* USB specific */
997 .dnld_fw = mwifiex_usb_dnld_fw,
998 .cmdrsp_complete = mwifiex_usb_cmd_event_complete,
999 .event_complete = mwifiex_usb_cmd_event_complete,
1000 .data_complete = mwifiex_usb_data_complete,
1001 .host_to_card = mwifiex_usb_host_to_card,
1002};
1003
1004/* This function initializes the USB driver module.
1005 *
1006 * This initiates the semaphore and registers the device with
1007 * USB bus.
1008 */
1009static int mwifiex_usb_init_module(void)
1010{
1011 int ret;
1012
1013 pr_debug("Marvell USB8797 Driver\n");
1014
1015 sema_init(&add_remove_card_sem, 1);
1016
1017 ret = usb_register(&mwifiex_usb_driver);
1018 if (ret)
1019 pr_err("Driver register failed!\n");
1020 else
1021 pr_debug("info: Driver registered successfully!\n");
1022
1023 return ret;
1024}
1025
1026/* This function cleans up the USB driver.
1027 *
1028 * The following major steps are followed in .disconnect for cleanup:
1029 * - Resume the device if its suspended
1030 * - Disconnect the device if connected
1031 * - Shutdown the firmware
1032 * - Unregister the device from USB bus.
1033 */
1034static void mwifiex_usb_cleanup_module(void)
1035{
1036 if (!down_interruptible(&add_remove_card_sem))
1037 up(&add_remove_card_sem);
1038
1039 /* set the flag as user is removing this module */
1040 user_rmmod = 1;
1041
1042 usb_deregister(&mwifiex_usb_driver);
1043}
1044
1045module_init(mwifiex_usb_init_module);
1046module_exit(mwifiex_usb_cleanup_module);
1047
1048MODULE_AUTHOR("Marvell International Ltd.");
1049MODULE_DESCRIPTION("Marvell WiFi-Ex USB Driver version" USB_VERSION);
1050MODULE_VERSION(USB_VERSION);
1051MODULE_LICENSE("GPL v2");
1052MODULE_FIRMWARE("mrvl/usb8797_uapsta.bin");
diff --git a/drivers/net/wireless/mwifiex/usb.h b/drivers/net/wireless/mwifiex/usb.h
new file mode 100644
index 000000000000..98c4316cd1a9
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/usb.h
@@ -0,0 +1,99 @@
1/*
2 * This file contains definitions for mwifiex USB interface driver.
3 *
4 * Copyright (C) 2012, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#ifndef _MWIFIEX_USB_H
21#define _MWIFIEX_USB_H
22
23#include <linux/usb.h>
24
25#define USB8797_VID 0x1286
26#define USB8797_PID_1 0x2043
27#define USB8797_PID_2 0x2044
28
29#define USB8797_FW_DNLD 1
30#define USB8797_FW_READY 2
31#define USB8797_FW_MAX_RETRY 3
32
33#define MWIFIEX_TX_DATA_URB 6
34#define MWIFIEX_RX_DATA_URB 6
35#define MWIFIEX_USB_TIMEOUT 100
36
37#define USB8797_DEFAULT_FW_NAME "mrvl/usb8797_uapsta.bin"
38
39#define FW_DNLD_TX_BUF_SIZE 620
40#define FW_DNLD_RX_BUF_SIZE 2048
41#define FW_HAS_LAST_BLOCK 0x00000004
42
43#define FW_DATA_XMIT_SIZE \
44 (sizeof(struct fw_header) + dlen + sizeof(u32))
45
46struct urb_context {
47 struct mwifiex_adapter *adapter;
48 struct sk_buff *skb;
49 struct urb *urb;
50 u8 ep;
51};
52
53struct usb_card_rec {
54 struct mwifiex_adapter *adapter;
55 struct usb_device *udev;
56 struct usb_interface *intf;
57 u8 rx_cmd_ep;
58 struct urb_context rx_cmd;
59 atomic_t rx_cmd_urb_pending;
60 struct urb_context rx_data_list[MWIFIEX_RX_DATA_URB];
61 u8 usb_boot_state;
62 u8 rx_data_ep;
63 atomic_t rx_data_urb_pending;
64 u8 tx_data_ep;
65 u8 tx_cmd_ep;
66 atomic_t tx_data_urb_pending;
67 atomic_t tx_cmd_urb_pending;
68 int bulk_out_maxpktsize;
69 struct urb_context tx_cmd;
70 int tx_data_ix;
71 struct urb_context tx_data_list[MWIFIEX_TX_DATA_URB];
72};
73
74struct fw_header {
75 __le32 dnld_cmd;
76 __le32 base_addr;
77 __le32 data_len;
78 __le32 crc;
79};
80
81struct fw_sync_header {
82 __le32 cmd;
83 __le32 seq_num;
84};
85
86struct fw_data {
87 struct fw_header fw_hdr;
88 __le32 seq_num;
89 u8 data[1];
90};
91
92/* This function is called after the card has woken up. */
93static inline int
94mwifiex_pm_wakeup_card_complete(struct mwifiex_adapter *adapter)
95{
96 return 0;
97}
98
99#endif /*_MWIFIEX_USB_H */
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 6b399976d6c8..2864c74bdb6f 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -167,6 +167,28 @@ int mwifiex_recv_packet(struct mwifiex_adapter *adapter, struct sk_buff *skb)
167 skb->dev = priv->netdev; 167 skb->dev = priv->netdev;
168 skb->protocol = eth_type_trans(skb, priv->netdev); 168 skb->protocol = eth_type_trans(skb, priv->netdev);
169 skb->ip_summed = CHECKSUM_NONE; 169 skb->ip_summed = CHECKSUM_NONE;
170
171 /* This is required only in case of 11n and USB as we alloc
172 * a buffer of 4K only if its 11N (to be able to receive 4K
173 * AMSDU packets). In case of SD we allocate buffers based
174 * on the size of packet and hence this is not needed.
175 *
176 * Modifying the truesize here as our allocation for each
177 * skb is 4K but we only receive 2K packets and this cause
178 * the kernel to start dropping packets in case where
179 * application has allocated buffer based on 2K size i.e.
180 * if there a 64K packet received (in IP fragments and
181 * application allocates 64K to receive this packet but
182 * this packet would almost double up because we allocate
183 * each 1.5K fragment in 4K and pass it up. As soon as the
184 * 64K limit hits kernel will start to drop rest of the
185 * fragments. Currently we fail the Filesndl-ht.scr script
186 * for UDP, hence this fix
187 */
188 if ((adapter->iface_type == MWIFIEX_USB) &&
189 (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
190 skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
191
170 priv->stats.rx_bytes += skb->len; 192 priv->stats.rx_bytes += skb->len;
171 priv->stats.rx_packets++; 193 priv->stats.rx_packets++;
172 if (in_interrupt()) 194 if (in_interrupt())
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 5a7316c6f125..429a1dee2d26 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -1120,11 +1120,19 @@ mwifiex_send_processed_packet(struct mwifiex_private *priv,
1120 tx_info = MWIFIEX_SKB_TXCB(skb); 1120 tx_info = MWIFIEX_SKB_TXCB(skb);
1121 1121
1122 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags); 1122 spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, ra_list_flags);
1123 tx_param.next_pkt_len = 1123
1124 ((skb_next) ? skb_next->len + 1124 if (adapter->iface_type == MWIFIEX_USB) {
1125 sizeof(struct txpd) : 0); 1125 adapter->data_sent = true;
1126 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA, skb, 1126 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_USB_EP_DATA,
1127 &tx_param); 1127 skb, NULL);
1128 } else {
1129 tx_param.next_pkt_len =
1130 ((skb_next) ? skb_next->len +
1131 sizeof(struct txpd) : 0);
1132 ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
1133 skb, &tx_param);
1134 }
1135
1128 switch (ret) { 1136 switch (ret) {
1129 case -EBUSY: 1137 case -EBUSY:
1130 dev_dbg(adapter->dev, "data: -EBUSY is returned\n"); 1138 dev_dbg(adapter->dev, "data: -EBUSY is returned\n");
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index b48674b577e6..e30cc32f8279 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -5893,18 +5893,7 @@ static struct pci_driver mwl8k_driver = {
5893 .shutdown = __devexit_p(mwl8k_shutdown), 5893 .shutdown = __devexit_p(mwl8k_shutdown),
5894}; 5894};
5895 5895
5896static int __init mwl8k_init(void) 5896module_pci_driver(mwl8k_driver);
5897{
5898 return pci_register_driver(&mwl8k_driver);
5899}
5900
5901static void __exit mwl8k_exit(void)
5902{
5903 pci_unregister_driver(&mwl8k_driver);
5904}
5905
5906module_init(mwl8k_init);
5907module_exit(mwl8k_exit);
5908 5897
5909MODULE_DESCRIPTION(MWL8K_DESC); 5898MODULE_DESCRIPTION(MWL8K_DESC);
5910MODULE_VERSION(MWL8K_VERSION); 5899MODULE_VERSION(MWL8K_VERSION);
diff --git a/drivers/net/wireless/orinoco/fw.c b/drivers/net/wireless/orinoco/fw.c
index 4df8cf64b56c..400a35217644 100644
--- a/drivers/net/wireless/orinoco/fw.c
+++ b/drivers/net/wireless/orinoco/fw.c
@@ -379,11 +379,8 @@ void orinoco_cache_fw(struct orinoco_private *priv, int ap)
379 379
380void orinoco_uncache_fw(struct orinoco_private *priv) 380void orinoco_uncache_fw(struct orinoco_private *priv)
381{ 381{
382 if (priv->cached_pri_fw) 382 release_firmware(priv->cached_pri_fw);
383 release_firmware(priv->cached_pri_fw); 383 release_firmware(priv->cached_fw);
384 if (priv->cached_fw)
385 release_firmware(priv->cached_fw);
386
387 priv->cached_pri_fw = NULL; 384 priv->cached_pri_fw = NULL;
388 priv->cached_fw = NULL; 385 priv->cached_fw = NULL;
389} 386}
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index ee8af1f047c8..7cffea795ad2 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -796,11 +796,14 @@ int p54_register_common(struct ieee80211_hw *dev, struct device *pdev)
796 dev_err(pdev, "Cannot register device (%d).\n", err); 796 dev_err(pdev, "Cannot register device (%d).\n", err);
797 return err; 797 return err;
798 } 798 }
799 priv->registered = true;
799 800
800#ifdef CONFIG_P54_LEDS 801#ifdef CONFIG_P54_LEDS
801 err = p54_init_leds(priv); 802 err = p54_init_leds(priv);
802 if (err) 803 if (err) {
804 p54_unregister_common(dev);
803 return err; 805 return err;
806 }
804#endif /* CONFIG_P54_LEDS */ 807#endif /* CONFIG_P54_LEDS */
805 808
806 dev_info(pdev, "is registered as '%s'\n", wiphy_name(dev->wiphy)); 809 dev_info(pdev, "is registered as '%s'\n", wiphy_name(dev->wiphy));
@@ -840,7 +843,11 @@ void p54_unregister_common(struct ieee80211_hw *dev)
840 p54_unregister_leds(priv); 843 p54_unregister_leds(priv);
841#endif /* CONFIG_P54_LEDS */ 844#endif /* CONFIG_P54_LEDS */
842 845
843 ieee80211_unregister_hw(dev); 846 if (priv->registered) {
847 priv->registered = false;
848 ieee80211_unregister_hw(dev);
849 }
850
844 mutex_destroy(&priv->conf_mutex); 851 mutex_destroy(&priv->conf_mutex);
845 mutex_destroy(&priv->eeprom_mutex); 852 mutex_destroy(&priv->eeprom_mutex);
846} 853}
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index 452fa3a64aa1..40b401ed6845 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -173,6 +173,7 @@ struct p54_common {
173 struct sk_buff_head tx_pending; 173 struct sk_buff_head tx_pending;
174 struct sk_buff_head tx_queue; 174 struct sk_buff_head tx_queue;
175 struct mutex conf_mutex; 175 struct mutex conf_mutex;
176 bool registered;
176 177
177 /* memory management (as seen by the firmware) */ 178 /* memory management (as seen by the firmware) */
178 u32 rx_start; 179 u32 rx_start;
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 45df728183fd..89318adc8c7f 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -667,15 +667,4 @@ static struct pci_driver p54p_driver = {
667 .driver.pm = P54P_PM_OPS, 667 .driver.pm = P54P_PM_OPS,
668}; 668};
669 669
670static int __init p54p_init(void) 670module_pci_driver(p54p_driver);
671{
672 return pci_register_driver(&p54p_driver);
673}
674
675static void __exit p54p_exit(void)
676{
677 pci_unregister_driver(&p54p_driver);
678}
679
680module_init(p54p_init);
681module_exit(p54p_exit);
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index f4d28c39aac7..e1eac830e2fc 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -117,21 +117,18 @@ static const struct {
117 u32 intf; 117 u32 intf;
118 enum p54u_hw_type type; 118 enum p54u_hw_type type;
119 const char *fw; 119 const char *fw;
120 const char *fw_legacy;
121 char hw[20]; 120 char hw[20];
122} p54u_fwlist[__NUM_P54U_HWTYPES] = { 121} p54u_fwlist[__NUM_P54U_HWTYPES] = {
123 { 122 {
124 .type = P54U_NET2280, 123 .type = P54U_NET2280,
125 .intf = FW_LM86, 124 .intf = FW_LM86,
126 .fw = "isl3886usb", 125 .fw = "isl3886usb",
127 .fw_legacy = "isl3890usb",
128 .hw = "ISL3886 + net2280", 126 .hw = "ISL3886 + net2280",
129 }, 127 },
130 { 128 {
131 .type = P54U_3887, 129 .type = P54U_3887,
132 .intf = FW_LM87, 130 .intf = FW_LM87,
133 .fw = "isl3887usb", 131 .fw = "isl3887usb",
134 .fw_legacy = "isl3887usb_bare",
135 .hw = "ISL3887", 132 .hw = "ISL3887",
136 }, 133 },
137}; 134};
@@ -208,6 +205,16 @@ static void p54u_free_urbs(struct ieee80211_hw *dev)
208 usb_kill_anchored_urbs(&priv->submitted); 205 usb_kill_anchored_urbs(&priv->submitted);
209} 206}
210 207
208static void p54u_stop(struct ieee80211_hw *dev)
209{
210 /*
211 * TODO: figure out how to reliably stop the 3887 and net2280 so
212 * the hardware is still usable next time we want to start it.
213 * until then, we just stop listening to the hardware..
214 */
215 p54u_free_urbs(dev);
216}
217
211static int p54u_init_urbs(struct ieee80211_hw *dev) 218static int p54u_init_urbs(struct ieee80211_hw *dev)
212{ 219{
213 struct p54u_priv *priv = dev->priv; 220 struct p54u_priv *priv = dev->priv;
@@ -257,6 +264,16 @@ static int p54u_init_urbs(struct ieee80211_hw *dev)
257 return ret; 264 return ret;
258} 265}
259 266
267static int p54u_open(struct ieee80211_hw *dev)
268{
269 /*
270 * TODO: Because we don't know how to reliably stop the 3887 and
271 * the isl3886+net2280, other than brutally cut off all
272 * communications. We have to reinitialize the urbs on every start.
273 */
274 return p54u_init_urbs(dev);
275}
276
260static __le32 p54u_lm87_chksum(const __le32 *data, size_t length) 277static __le32 p54u_lm87_chksum(const __le32 *data, size_t length)
261{ 278{
262 u32 chk = 0; 279 u32 chk = 0;
@@ -836,70 +853,137 @@ fail:
836 return err; 853 return err;
837} 854}
838 855
839static int p54u_load_firmware(struct ieee80211_hw *dev) 856static int p54_find_type(struct p54u_priv *priv)
840{ 857{
841 struct p54u_priv *priv = dev->priv; 858 int i;
842 int err, i;
843
844 BUILD_BUG_ON(ARRAY_SIZE(p54u_fwlist) != __NUM_P54U_HWTYPES);
845 859
846 for (i = 0; i < __NUM_P54U_HWTYPES; i++) 860 for (i = 0; i < __NUM_P54U_HWTYPES; i++)
847 if (p54u_fwlist[i].type == priv->hw_type) 861 if (p54u_fwlist[i].type == priv->hw_type)
848 break; 862 break;
849
850 if (i == __NUM_P54U_HWTYPES) 863 if (i == __NUM_P54U_HWTYPES)
851 return -EOPNOTSUPP; 864 return -EOPNOTSUPP;
852 865
853 err = request_firmware(&priv->fw, p54u_fwlist[i].fw, &priv->udev->dev); 866 return i;
854 if (err) { 867}
855 dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s "
856 "(%d)!\n", p54u_fwlist[i].fw, err);
857 868
858 err = request_firmware(&priv->fw, p54u_fwlist[i].fw_legacy, 869static int p54u_start_ops(struct p54u_priv *priv)
859 &priv->udev->dev); 870{
860 if (err) 871 struct ieee80211_hw *dev = priv->common.hw;
861 return err; 872 int ret;
862 }
863 873
864 err = p54_parse_firmware(dev, priv->fw); 874 ret = p54_parse_firmware(dev, priv->fw);
865 if (err) 875 if (ret)
866 goto out; 876 goto err_out;
877
878 ret = p54_find_type(priv);
879 if (ret < 0)
880 goto err_out;
867 881
868 if (priv->common.fw_interface != p54u_fwlist[i].intf) { 882 if (priv->common.fw_interface != p54u_fwlist[ret].intf) {
869 dev_err(&priv->udev->dev, "wrong firmware, please get " 883 dev_err(&priv->udev->dev, "wrong firmware, please get "
870 "a firmware for \"%s\" and try again.\n", 884 "a firmware for \"%s\" and try again.\n",
871 p54u_fwlist[i].hw); 885 p54u_fwlist[ret].hw);
872 err = -EINVAL; 886 ret = -ENODEV;
887 goto err_out;
873 } 888 }
874 889
875out: 890 ret = priv->upload_fw(dev);
876 if (err) 891 if (ret)
877 release_firmware(priv->fw); 892 goto err_out;
878 893
879 return err; 894 ret = p54u_open(dev);
895 if (ret)
896 goto err_out;
897
898 ret = p54_read_eeprom(dev);
899 if (ret)
900 goto err_stop;
901
902 p54u_stop(dev);
903
904 ret = p54_register_common(dev, &priv->udev->dev);
905 if (ret)
906 goto err_stop;
907
908 return 0;
909
910err_stop:
911 p54u_stop(dev);
912
913err_out:
914 /*
915 * p54u_disconnect will do the rest of the
916 * cleanup
917 */
918 return ret;
880} 919}
881 920
882static int p54u_open(struct ieee80211_hw *dev) 921static void p54u_load_firmware_cb(const struct firmware *firmware,
922 void *context)
883{ 923{
884 struct p54u_priv *priv = dev->priv; 924 struct p54u_priv *priv = context;
925 struct usb_device *udev = priv->udev;
885 int err; 926 int err;
886 927
887 err = p54u_init_urbs(dev); 928 complete(&priv->fw_wait_load);
888 if (err) { 929 if (firmware) {
889 return err; 930 priv->fw = firmware;
931 err = p54u_start_ops(priv);
932 } else {
933 err = -ENOENT;
934 dev_err(&udev->dev, "Firmware not found.\n");
890 } 935 }
891 936
892 priv->common.open = p54u_init_urbs; 937 if (err) {
938 struct device *parent = priv->udev->dev.parent;
893 939
894 return 0; 940 dev_err(&udev->dev, "failed to initialize device (%d)\n", err);
941
942 if (parent)
943 device_lock(parent);
944
945 device_release_driver(&udev->dev);
946 /*
947 * At this point p54u_disconnect has already freed
948 * the "priv" context. Do not use it anymore!
949 */
950 priv = NULL;
951
952 if (parent)
953 device_unlock(parent);
954 }
955
956 usb_put_dev(udev);
895} 957}
896 958
897static void p54u_stop(struct ieee80211_hw *dev) 959static int p54u_load_firmware(struct ieee80211_hw *dev,
960 struct usb_interface *intf)
898{ 961{
899 /* TODO: figure out how to reliably stop the 3887 and net2280 so 962 struct usb_device *udev = interface_to_usbdev(intf);
900 the hardware is still usable next time we want to start it. 963 struct p54u_priv *priv = dev->priv;
901 until then, we just stop listening to the hardware.. */ 964 struct device *device = &udev->dev;
902 p54u_free_urbs(dev); 965 int err, i;
966
967 BUILD_BUG_ON(ARRAY_SIZE(p54u_fwlist) != __NUM_P54U_HWTYPES);
968
969 init_completion(&priv->fw_wait_load);
970 i = p54_find_type(priv);
971 if (i < 0)
972 return i;
973
974 dev_info(&priv->udev->dev, "Loading firmware file %s\n",
975 p54u_fwlist[i].fw);
976
977 usb_get_dev(udev);
978 err = request_firmware_nowait(THIS_MODULE, 1, p54u_fwlist[i].fw,
979 device, GFP_KERNEL, priv,
980 p54u_load_firmware_cb);
981 if (err) {
982 dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s "
983 "(%d)!\n", p54u_fwlist[i].fw, err);
984 }
985
986 return err;
903} 987}
904 988
905static int __devinit p54u_probe(struct usb_interface *intf, 989static int __devinit p54u_probe(struct usb_interface *intf,
@@ -969,33 +1053,7 @@ static int __devinit p54u_probe(struct usb_interface *intf,
969 priv->common.tx = p54u_tx_net2280; 1053 priv->common.tx = p54u_tx_net2280;
970 priv->upload_fw = p54u_upload_firmware_net2280; 1054 priv->upload_fw = p54u_upload_firmware_net2280;
971 } 1055 }
972 err = p54u_load_firmware(dev); 1056 err = p54u_load_firmware(dev, intf);
973 if (err)
974 goto err_free_dev;
975
976 err = priv->upload_fw(dev);
977 if (err)
978 goto err_free_fw;
979
980 p54u_open(dev);
981 err = p54_read_eeprom(dev);
982 p54u_stop(dev);
983 if (err)
984 goto err_free_fw;
985
986 err = p54_register_common(dev, &udev->dev);
987 if (err)
988 goto err_free_fw;
989
990 return 0;
991
992err_free_fw:
993 release_firmware(priv->fw);
994
995err_free_dev:
996 p54_free_common(dev);
997 usb_set_intfdata(intf, NULL);
998 usb_put_dev(udev);
999 return err; 1057 return err;
1000} 1058}
1001 1059
@@ -1007,9 +1065,10 @@ static void __devexit p54u_disconnect(struct usb_interface *intf)
1007 if (!dev) 1065 if (!dev)
1008 return; 1066 return;
1009 1067
1068 priv = dev->priv;
1069 wait_for_completion(&priv->fw_wait_load);
1010 p54_unregister_common(dev); 1070 p54_unregister_common(dev);
1011 1071
1012 priv = dev->priv;
1013 usb_put_dev(interface_to_usbdev(intf)); 1072 usb_put_dev(interface_to_usbdev(intf));
1014 release_firmware(priv->fw); 1073 release_firmware(priv->fw);
1015 p54_free_common(dev); 1074 p54_free_common(dev);
@@ -1072,7 +1131,7 @@ static struct usb_driver p54u_driver = {
1072 .name = "p54usb", 1131 .name = "p54usb",
1073 .id_table = p54u_table, 1132 .id_table = p54u_table,
1074 .probe = p54u_probe, 1133 .probe = p54u_probe,
1075 .disconnect = p54u_disconnect, 1134 .disconnect = __devexit_p(p54u_disconnect),
1076 .pre_reset = p54u_pre_reset, 1135 .pre_reset = p54u_pre_reset,
1077 .post_reset = p54u_post_reset, 1136 .post_reset = p54u_post_reset,
1078#ifdef CONFIG_PM 1137#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/p54/p54usb.h b/drivers/net/wireless/p54/p54usb.h
index ed4034ade59a..d273be7272b9 100644
--- a/drivers/net/wireless/p54/p54usb.h
+++ b/drivers/net/wireless/p54/p54usb.h
@@ -143,6 +143,9 @@ struct p54u_priv {
143 struct sk_buff_head rx_queue; 143 struct sk_buff_head rx_queue;
144 struct usb_anchor submitted; 144 struct usb_anchor submitted;
145 const struct firmware *fw; 145 const struct firmware *fw;
146
147 /* asynchronous firmware callback */
148 struct completion fw_wait_load;
146}; 149};
147 150
148#endif /* P54USB_H */ 151#endif /* P54USB_H */
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index a08a6f0e4dd1..7c8f118c2b09 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -914,8 +914,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
914 txhdr->hw_queue = queue; 914 txhdr->hw_queue = queue;
915 txhdr->backlog = priv->tx_stats[queue].len - 1; 915 txhdr->backlog = priv->tx_stats[queue].len - 1;
916 memset(txhdr->durations, 0, sizeof(txhdr->durations)); 916 memset(txhdr->durations, 0, sizeof(txhdr->durations));
917 txhdr->tx_antenna = ((info->antenna_sel_tx == 0) ? 917 txhdr->tx_antenna = 2 & priv->tx_diversity_mask;
918 2 : info->antenna_sel_tx - 1) & priv->tx_diversity_mask;
919 if (priv->rxhw == 5) { 918 if (priv->rxhw == 5) {
920 txhdr->longbow.cts_rate = cts_rate; 919 txhdr->longbow.cts_rate = cts_rate;
921 txhdr->longbow.output_power = cpu_to_le16(priv->output_power); 920 txhdr->longbow.output_power = cpu_to_le16(priv->output_power);
diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c
index 9b796cae4afe..a01606b36e03 100644
--- a/drivers/net/wireless/prism54/oid_mgt.c
+++ b/drivers/net/wireless/prism54/oid_mgt.c
@@ -693,8 +693,6 @@ mgt_update_addr(islpci_private *priv)
693 return ret; 693 return ret;
694} 694}
695 695
696#define VEC_SIZE(a) ARRAY_SIZE(a)
697
698int 696int
699mgt_commit(islpci_private *priv) 697mgt_commit(islpci_private *priv)
700{ 698{
@@ -704,10 +702,10 @@ mgt_commit(islpci_private *priv)
704 if (islpci_get_state(priv) < PRV_STATE_INIT) 702 if (islpci_get_state(priv) < PRV_STATE_INIT)
705 return 0; 703 return 0;
706 704
707 rvalue = mgt_commit_list(priv, commit_part1, VEC_SIZE(commit_part1)); 705 rvalue = mgt_commit_list(priv, commit_part1, ARRAY_SIZE(commit_part1));
708 706
709 if (priv->iw_mode != IW_MODE_MONITOR) 707 if (priv->iw_mode != IW_MODE_MONITOR)
710 rvalue |= mgt_commit_list(priv, commit_part2, VEC_SIZE(commit_part2)); 708 rvalue |= mgt_commit_list(priv, commit_part2, ARRAY_SIZE(commit_part2));
711 709
712 u = OID_INL_MODE; 710 u = OID_INL_MODE;
713 rvalue |= mgt_commit_list(priv, &u, 1); 711 rvalue |= mgt_commit_list(priv, &u, 1);
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 3a6b40239bc1..5e6b50143165 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1828,15 +1828,4 @@ static struct pci_driver rt2400pci_driver = {
1828 .resume = rt2x00pci_resume, 1828 .resume = rt2x00pci_resume,
1829}; 1829};
1830 1830
1831static int __init rt2400pci_init(void) 1831module_pci_driver(rt2400pci_driver);
1832{
1833 return pci_register_driver(&rt2400pci_driver);
1834}
1835
1836static void __exit rt2400pci_exit(void)
1837{
1838 pci_unregister_driver(&rt2400pci_driver);
1839}
1840
1841module_init(rt2400pci_init);
1842module_exit(rt2400pci_exit);
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index dcc0e1fcca77..136b849f11b5 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -2119,15 +2119,4 @@ static struct pci_driver rt2500pci_driver = {
2119 .resume = rt2x00pci_resume, 2119 .resume = rt2x00pci_resume,
2120}; 2120};
2121 2121
2122static int __init rt2500pci_init(void) 2122module_pci_driver(rt2500pci_driver);
2123{
2124 return pci_register_driver(&rt2500pci_driver);
2125}
2126
2127static void __exit rt2500pci_exit(void)
2128{
2129 pci_unregister_driver(&rt2500pci_driver);
2130}
2131
2132module_init(rt2500pci_init);
2133module_exit(rt2500pci_exit);
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 1de9c752c88b..c88fd3e61090 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1912,7 +1912,7 @@ static struct usb_device_id rt2500usb_device_table[] = {
1912 { USB_DEVICE(0x0b05, 0x1706) }, 1912 { USB_DEVICE(0x0b05, 0x1706) },
1913 { USB_DEVICE(0x0b05, 0x1707) }, 1913 { USB_DEVICE(0x0b05, 0x1707) },
1914 /* Belkin */ 1914 /* Belkin */
1915 { USB_DEVICE(0x050d, 0x7050) }, 1915 { USB_DEVICE(0x050d, 0x7050) }, /* FCC ID: K7SF5D7050A ver. 2.x */
1916 { USB_DEVICE(0x050d, 0x7051) }, 1916 { USB_DEVICE(0x050d, 0x7051) },
1917 /* Cisco Systems */ 1917 /* Cisco Systems */
1918 { USB_DEVICE(0x13b1, 0x000d) }, 1918 { USB_DEVICE(0x13b1, 0x000d) },
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 063bfa8b91f4..d91f4f628f65 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -83,6 +83,7 @@
83#define REV_RT3090E 0x0211 83#define REV_RT3090E 0x0211
84#define REV_RT3390E 0x0211 84#define REV_RT3390E 0x0211
85#define REV_RT5390F 0x0502 85#define REV_RT5390F 0x0502
86#define REV_RT5390R 0x1502
86 87
87/* 88/*
88 * Signal information. 89 * Signal information.
@@ -98,7 +99,7 @@
98#define EEPROM_BASE 0x0000 99#define EEPROM_BASE 0x0000
99#define EEPROM_SIZE 0x0110 100#define EEPROM_SIZE 0x0110
100#define BBP_BASE 0x0000 101#define BBP_BASE 0x0000
101#define BBP_SIZE 0x0080 102#define BBP_SIZE 0x00ff
102#define RF_BASE 0x0004 103#define RF_BASE 0x0004
103#define RF_SIZE 0x0010 104#define RF_SIZE 0x0010
104 105
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 6c0a12ea6a15..1cd16b416024 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -290,11 +290,25 @@ int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
290 msleep(10); 290 msleep(10);
291 } 291 }
292 292
293 ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n"); 293 ERROR(rt2x00dev, "WPDMA TX/RX busy [0x%08x].\n", reg);
294 return -EACCES; 294 return -EACCES;
295} 295}
296EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready); 296EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready);
297 297
298void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev)
299{
300 u32 reg;
301
302 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
303 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
304 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
305 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
306 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
307 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
308 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
309}
310EXPORT_SYMBOL_GPL(rt2800_disable_wpdma);
311
298static bool rt2800_check_firmware_crc(const u8 *data, const size_t len) 312static bool rt2800_check_firmware_crc(const u8 *data, const size_t len)
299{ 313{
300 u16 fw_crc; 314 u16 fw_crc;
@@ -412,6 +426,8 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
412 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002); 426 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002);
413 } 427 }
414 428
429 rt2800_disable_wpdma(rt2x00dev);
430
415 /* 431 /*
416 * Write firmware to the device. 432 * Write firmware to the device.
417 */ 433 */
@@ -436,10 +452,7 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
436 * Disable DMA, will be reenabled later when enabling 452 * Disable DMA, will be reenabled later when enabling
437 * the radio. 453 * the radio.
438 */ 454 */
439 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg); 455 rt2800_disable_wpdma(rt2x00dev);
440 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
441 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
442 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
443 456
444 /* 457 /*
445 * Initialize firmware. 458 * Initialize firmware.
@@ -2717,13 +2730,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2717 unsigned int i; 2730 unsigned int i;
2718 int ret; 2731 int ret;
2719 2732
2720 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg); 2733 rt2800_disable_wpdma(rt2x00dev);
2721 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
2722 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
2723 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
2724 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
2725 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
2726 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
2727 2734
2728 ret = rt2800_drv_init_registers(rt2x00dev); 2735 ret = rt2800_drv_init_registers(rt2x00dev);
2729 if (ret) 2736 if (ret)
@@ -3349,6 +3356,13 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3349 rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg); 3356 rt2800_register_write(rt2x00dev, GPIO_CTRL_CFG, reg);
3350 } 3357 }
3351 3358
3359 /* This chip has hardware antenna diversity*/
3360 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) {
3361 rt2800_bbp_write(rt2x00dev, 150, 0); /* Disable Antenna Software OFDM */
3362 rt2800_bbp_write(rt2x00dev, 151, 0); /* Disable Antenna Software CCK */
3363 rt2800_bbp_write(rt2x00dev, 154, 0); /* Clear previously selected antenna */
3364 }
3365
3352 rt2800_bbp_read(rt2x00dev, 152, &value); 3366 rt2800_bbp_read(rt2x00dev, 152, &value);
3353 if (ant == 0) 3367 if (ant == 0)
3354 rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1); 3368 rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
@@ -3997,10 +4011,7 @@ void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev)
3997{ 4011{
3998 u32 reg; 4012 u32 reg;
3999 4013
4000 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg); 4014 rt2800_disable_wpdma(rt2x00dev);
4001 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
4002 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
4003 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
4004 4015
4005 /* Wait for DMA, ignore error */ 4016 /* Wait for DMA, ignore error */
4006 rt2800_wait_wpdma_ready(rt2x00dev); 4017 rt2800_wait_wpdma_ready(rt2x00dev);
@@ -4287,6 +4298,11 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
4287 rt2x00dev->default_ant.rx = ANTENNA_A; 4298 rt2x00dev->default_ant.rx = ANTENNA_A;
4288 } 4299 }
4289 4300
4301 if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R)) {
4302 rt2x00dev->default_ant.tx = ANTENNA_HW_DIVERSITY; /* Unused */
4303 rt2x00dev->default_ant.rx = ANTENNA_HW_DIVERSITY; /* Unused */
4304 }
4305
4290 /* 4306 /*
4291 * Determine external LNA informations. 4307 * Determine external LNA informations.
4292 */ 4308 */
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 419e36cb06be..18a0b67b4c68 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -208,5 +208,6 @@ int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
208 u8 buf_size); 208 u8 buf_size);
209int rt2800_get_survey(struct ieee80211_hw *hw, int idx, 209int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
210 struct survey_info *survey); 210 struct survey_info *survey);
211void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev);
211 212
212#endif /* RT2800LIB_H */ 213#endif /* RT2800LIB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 0397bbf0ce01..931331d95217 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -361,7 +361,6 @@ static void rt2800pci_clear_entry(struct queue_entry *entry)
361static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev) 361static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
362{ 362{
363 struct queue_entry_priv_pci *entry_priv; 363 struct queue_entry_priv_pci *entry_priv;
364 u32 reg;
365 364
366 /* 365 /*
367 * Initialize registers. 366 * Initialize registers.
@@ -394,6 +393,16 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
394 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX3, 0); 393 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX3, 0);
395 rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX3, 0); 394 rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX3, 0);
396 395
396 rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR4, 0);
397 rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT4, 0);
398 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX4, 0);
399 rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX4, 0);
400
401 rt2x00pci_register_write(rt2x00dev, TX_BASE_PTR5, 0);
402 rt2x00pci_register_write(rt2x00dev, TX_MAX_CNT5, 0);
403 rt2x00pci_register_write(rt2x00dev, TX_CTX_IDX5, 0);
404 rt2x00pci_register_write(rt2x00dev, TX_DTX_IDX5, 0);
405
397 entry_priv = rt2x00dev->rx->entries[0].priv_data; 406 entry_priv = rt2x00dev->rx->entries[0].priv_data;
398 rt2x00pci_register_write(rt2x00dev, RX_BASE_PTR, entry_priv->desc_dma); 407 rt2x00pci_register_write(rt2x00dev, RX_BASE_PTR, entry_priv->desc_dma);
399 rt2x00pci_register_write(rt2x00dev, RX_MAX_CNT, 408 rt2x00pci_register_write(rt2x00dev, RX_MAX_CNT,
@@ -402,14 +411,7 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
402 rt2x00dev->rx[0].limit - 1); 411 rt2x00dev->rx[0].limit - 1);
403 rt2x00pci_register_write(rt2x00dev, RX_DRX_IDX, 0); 412 rt2x00pci_register_write(rt2x00dev, RX_DRX_IDX, 0);
404 413
405 /* 414 rt2800_disable_wpdma(rt2x00dev);
406 * Enable global DMA configuration
407 */
408 rt2x00pci_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
409 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
410 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
411 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
412 rt2x00pci_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
413 415
414 rt2x00pci_register_write(rt2x00dev, DELAY_INT_CFG, 0); 416 rt2x00pci_register_write(rt2x00dev, DELAY_INT_CFG, 0);
415 417
@@ -504,8 +506,10 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
504{ 506{
505 int retval; 507 int retval;
506 508
507 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) || 509 /* Wait for DMA, ignore error until we initialize queues. */
508 rt2800pci_init_queues(rt2x00dev))) 510 rt2800_wait_wpdma_ready(rt2x00dev);
511
512 if (unlikely(rt2800pci_init_queues(rt2x00dev)))
509 return -EIO; 513 return -EIO;
510 514
511 retval = rt2800_enable_radio(rt2x00dev); 515 retval = rt2800_enable_radio(rt2x00dev);
@@ -1184,7 +1188,9 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
1184 { PCI_DEVICE(0x1814, 0x3593) }, 1188 { PCI_DEVICE(0x1814, 0x3593) },
1185#endif 1189#endif
1186#ifdef CONFIG_RT2800PCI_RT53XX 1190#ifdef CONFIG_RT2800PCI_RT53XX
1191 { PCI_DEVICE(0x1814, 0x5362) },
1187 { PCI_DEVICE(0x1814, 0x5390) }, 1192 { PCI_DEVICE(0x1814, 0x5390) },
1193 { PCI_DEVICE(0x1814, 0x5392) },
1188 { PCI_DEVICE(0x1814, 0x539a) }, 1194 { PCI_DEVICE(0x1814, 0x539a) },
1189 { PCI_DEVICE(0x1814, 0x539f) }, 1195 { PCI_DEVICE(0x1814, 0x539f) },
1190#endif 1196#endif
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 001735f7a661..5601302d09ad 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -922,6 +922,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
922 { USB_DEVICE(0x1482, 0x3c09) }, 922 { USB_DEVICE(0x1482, 0x3c09) },
923 /* AirTies */ 923 /* AirTies */
924 { USB_DEVICE(0x1eda, 0x2012) }, 924 { USB_DEVICE(0x1eda, 0x2012) },
925 { USB_DEVICE(0x1eda, 0x2210) },
925 { USB_DEVICE(0x1eda, 0x2310) }, 926 { USB_DEVICE(0x1eda, 0x2310) },
926 /* Allwin */ 927 /* Allwin */
927 { USB_DEVICE(0x8516, 0x2070) }, 928 { USB_DEVICE(0x8516, 0x2070) },
@@ -991,6 +992,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
991 /* DVICO */ 992 /* DVICO */
992 { USB_DEVICE(0x0fe9, 0xb307) }, 993 { USB_DEVICE(0x0fe9, 0xb307) },
993 /* Edimax */ 994 /* Edimax */
995 { USB_DEVICE(0x7392, 0x4085) },
994 { USB_DEVICE(0x7392, 0x7711) }, 996 { USB_DEVICE(0x7392, 0x7711) },
995 { USB_DEVICE(0x7392, 0x7717) }, 997 { USB_DEVICE(0x7392, 0x7717) },
996 { USB_DEVICE(0x7392, 0x7718) }, 998 { USB_DEVICE(0x7392, 0x7718) },
@@ -1066,6 +1068,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
1066 /* Philips */ 1068 /* Philips */
1067 { USB_DEVICE(0x0471, 0x200f) }, 1069 { USB_DEVICE(0x0471, 0x200f) },
1068 /* Planex */ 1070 /* Planex */
1071 { USB_DEVICE(0x2019, 0x5201) },
1069 { USB_DEVICE(0x2019, 0xab25) }, 1072 { USB_DEVICE(0x2019, 0xab25) },
1070 { USB_DEVICE(0x2019, 0xed06) }, 1073 { USB_DEVICE(0x2019, 0xed06) },
1071 /* Quanta */ 1074 /* Quanta */
@@ -1134,6 +1137,10 @@ static struct usb_device_id rt2800usb_device_table[] = {
1134#ifdef CONFIG_RT2800USB_RT33XX 1137#ifdef CONFIG_RT2800USB_RT33XX
1135 /* Belkin */ 1138 /* Belkin */
1136 { USB_DEVICE(0x050d, 0x945b) }, 1139 { USB_DEVICE(0x050d, 0x945b) },
1140 /* Panasonic */
1141 { USB_DEVICE(0x083a, 0xb511) },
1142 /* Philips */
1143 { USB_DEVICE(0x0471, 0x20dd) },
1137 /* Ralink */ 1144 /* Ralink */
1138 { USB_DEVICE(0x148f, 0x3370) }, 1145 { USB_DEVICE(0x148f, 0x3370) },
1139 { USB_DEVICE(0x148f, 0x8070) }, 1146 { USB_DEVICE(0x148f, 0x8070) },
@@ -1145,6 +1152,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
1145 { USB_DEVICE(0x8516, 0x3572) }, 1152 { USB_DEVICE(0x8516, 0x3572) },
1146 /* Askey */ 1153 /* Askey */
1147 { USB_DEVICE(0x1690, 0x0744) }, 1154 { USB_DEVICE(0x1690, 0x0744) },
1155 { USB_DEVICE(0x1690, 0x0761) },
1156 { USB_DEVICE(0x1690, 0x0764) },
1148 /* Cisco */ 1157 /* Cisco */
1149 { USB_DEVICE(0x167b, 0x4001) }, 1158 { USB_DEVICE(0x167b, 0x4001) },
1150 /* EnGenius */ 1159 /* EnGenius */
@@ -1159,20 +1168,25 @@ static struct usb_device_id rt2800usb_device_table[] = {
1159 /* Sitecom */ 1168 /* Sitecom */
1160 { USB_DEVICE(0x0df6, 0x0041) }, 1169 { USB_DEVICE(0x0df6, 0x0041) },
1161 { USB_DEVICE(0x0df6, 0x0062) }, 1170 { USB_DEVICE(0x0df6, 0x0062) },
1171 { USB_DEVICE(0x0df6, 0x0065) },
1172 { USB_DEVICE(0x0df6, 0x0066) },
1173 { USB_DEVICE(0x0df6, 0x0068) },
1162 /* Toshiba */ 1174 /* Toshiba */
1163 { USB_DEVICE(0x0930, 0x0a07) }, 1175 { USB_DEVICE(0x0930, 0x0a07) },
1164 /* Zinwell */ 1176 /* Zinwell */
1165 { USB_DEVICE(0x5a57, 0x0284) }, 1177 { USB_DEVICE(0x5a57, 0x0284) },
1166#endif 1178#endif
1167#ifdef CONFIG_RT2800USB_RT53XX 1179#ifdef CONFIG_RT2800USB_RT53XX
1168 /* Alpha */
1169 { USB_DEVICE(0x2001, 0x3c15) },
1170 { USB_DEVICE(0x2001, 0x3c19) },
1171 /* Arcadyan */ 1180 /* Arcadyan */
1172 { USB_DEVICE(0x043e, 0x7a12) }, 1181 { USB_DEVICE(0x043e, 0x7a12) },
1173 /* Azurewave */ 1182 /* Azurewave */
1174 { USB_DEVICE(0x13d3, 0x3329) }, 1183 { USB_DEVICE(0x13d3, 0x3329) },
1175 { USB_DEVICE(0x13d3, 0x3365) }, 1184 { USB_DEVICE(0x13d3, 0x3365) },
1185 /* D-Link */
1186 { USB_DEVICE(0x2001, 0x3c15) },
1187 { USB_DEVICE(0x2001, 0x3c19) },
1188 { USB_DEVICE(0x2001, 0x3c1c) },
1189 { USB_DEVICE(0x2001, 0x3c1d) },
1176 /* LG innotek */ 1190 /* LG innotek */
1177 { USB_DEVICE(0x043e, 0x7a22) }, 1191 { USB_DEVICE(0x043e, 0x7a22) },
1178 /* Panasonic */ 1192 /* Panasonic */
@@ -1224,12 +1238,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
1224 { USB_DEVICE(0x07d1, 0x3c0b) }, 1238 { USB_DEVICE(0x07d1, 0x3c0b) },
1225 { USB_DEVICE(0x07d1, 0x3c17) }, 1239 { USB_DEVICE(0x07d1, 0x3c17) },
1226 { USB_DEVICE(0x2001, 0x3c17) }, 1240 { USB_DEVICE(0x2001, 0x3c17) },
1227 /* Edimax */
1228 { USB_DEVICE(0x7392, 0x4085) },
1229 /* Encore */ 1241 /* Encore */
1230 { USB_DEVICE(0x203d, 0x14a1) }, 1242 { USB_DEVICE(0x203d, 0x14a1) },
1231 /* Fujitsu Stylistic 550 */
1232 { USB_DEVICE(0x1690, 0x0761) },
1233 /* Gemtek */ 1243 /* Gemtek */
1234 { USB_DEVICE(0x15a9, 0x0010) }, 1244 { USB_DEVICE(0x15a9, 0x0010) },
1235 /* Gigabyte */ 1245 /* Gigabyte */
@@ -1250,7 +1260,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
1250 { USB_DEVICE(0x05a6, 0x0101) }, 1260 { USB_DEVICE(0x05a6, 0x0101) },
1251 { USB_DEVICE(0x1d4d, 0x0010) }, 1261 { USB_DEVICE(0x1d4d, 0x0010) },
1252 /* Planex */ 1262 /* Planex */
1253 { USB_DEVICE(0x2019, 0x5201) },
1254 { USB_DEVICE(0x2019, 0xab24) }, 1263 { USB_DEVICE(0x2019, 0xab24) },
1255 /* Qcom */ 1264 /* Qcom */
1256 { USB_DEVICE(0x18e8, 0x6259) }, 1265 { USB_DEVICE(0x18e8, 0x6259) },
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 471f87cab4ab..ca36cccaba31 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -692,6 +692,8 @@ enum rt2x00_state_flags {
692 */ 692 */
693 CONFIG_CHANNEL_HT40, 693 CONFIG_CHANNEL_HT40,
694 CONFIG_POWERSAVING, 694 CONFIG_POWERSAVING,
695 CONFIG_HT_DISABLED,
696 CONFIG_QOS_DISABLED,
695 697
696 /* 698 /*
697 * Mark we currently are sequentially reading TX_STA_FIFO register 699 * Mark we currently are sequentially reading TX_STA_FIFO register
@@ -1280,7 +1282,7 @@ void rt2x00lib_dmadone(struct queue_entry *entry);
1280void rt2x00lib_txdone(struct queue_entry *entry, 1282void rt2x00lib_txdone(struct queue_entry *entry,
1281 struct txdone_entry_desc *txdesc); 1283 struct txdone_entry_desc *txdesc);
1282void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status); 1284void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status);
1283void rt2x00lib_rxdone(struct queue_entry *entry); 1285void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp);
1284 1286
1285/* 1287/*
1286 * mac80211 handlers. 1288 * mac80211 handlers.
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 293676bfa571..e7361d913e8e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -217,6 +217,11 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
217 libconf.conf = conf; 217 libconf.conf = conf;
218 218
219 if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL) { 219 if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL) {
220 if (!conf_is_ht(conf))
221 set_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags);
222 else
223 clear_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags);
224
220 if (conf_is_ht40(conf)) { 225 if (conf_is_ht40(conf)) {
221 set_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags); 226 set_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags);
222 hw_value = rt2x00ht_center_channel(rt2x00dev, conf); 227 hw_value = rt2x00ht_center_channel(rt2x00dev, conf);
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 90cc5e772650..e5404e576251 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -391,9 +391,10 @@ void rt2x00lib_txdone(struct queue_entry *entry,
391 tx_info->flags |= IEEE80211_TX_STAT_AMPDU; 391 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
392 tx_info->status.ampdu_len = 1; 392 tx_info->status.ampdu_len = 1;
393 tx_info->status.ampdu_ack_len = success ? 1 : 0; 393 tx_info->status.ampdu_ack_len = success ? 1 : 0;
394 394 /*
395 if (!success) 395 * TODO: Need to tear down BA session here
396 tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK; 396 * if not successful.
397 */
397 } 398 }
398 399
399 if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) { 400 if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
@@ -587,7 +588,7 @@ static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev,
587 return 0; 588 return 0;
588} 589}
589 590
590void rt2x00lib_rxdone(struct queue_entry *entry) 591void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp)
591{ 592{
592 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 593 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
593 struct rxdone_entry_desc rxdesc; 594 struct rxdone_entry_desc rxdesc;
@@ -607,7 +608,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry)
607 * Allocate a new sk_buffer. If no new buffer available, drop the 608 * Allocate a new sk_buffer. If no new buffer available, drop the
608 * received frame and reuse the existing buffer. 609 * received frame and reuse the existing buffer.
609 */ 610 */
610 skb = rt2x00queue_alloc_rxskb(entry); 611 skb = rt2x00queue_alloc_rxskb(entry, gfp);
611 if (!skb) 612 if (!skb)
612 goto submit_entry; 613 goto submit_entry;
613 614
diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.c b/drivers/net/wireless/rt2x00/rt2x00leds.c
index ca585e34d00e..8679d781a264 100644
--- a/drivers/net/wireless/rt2x00/rt2x00leds.c
+++ b/drivers/net/wireless/rt2x00/rt2x00leds.c
@@ -124,17 +124,15 @@ static int rt2x00leds_register_led(struct rt2x00_dev *rt2x00dev,
124 124
125void rt2x00leds_register(struct rt2x00_dev *rt2x00dev) 125void rt2x00leds_register(struct rt2x00_dev *rt2x00dev)
126{ 126{
127 char dev_name[16]; 127 char name[36];
128 char name[32];
129 int retval; 128 int retval;
130 unsigned long on_period; 129 unsigned long on_period;
131 unsigned long off_period; 130 unsigned long off_period;
132 131 const char *phy_name = wiphy_name(rt2x00dev->hw->wiphy);
133 snprintf(dev_name, sizeof(dev_name), "%s-%s",
134 rt2x00dev->ops->name, wiphy_name(rt2x00dev->hw->wiphy));
135 132
136 if (rt2x00dev->led_radio.flags & LED_INITIALIZED) { 133 if (rt2x00dev->led_radio.flags & LED_INITIALIZED) {
137 snprintf(name, sizeof(name), "%s::radio", dev_name); 134 snprintf(name, sizeof(name), "%s-%s::radio",
135 rt2x00dev->ops->name, phy_name);
138 136
139 retval = rt2x00leds_register_led(rt2x00dev, 137 retval = rt2x00leds_register_led(rt2x00dev,
140 &rt2x00dev->led_radio, 138 &rt2x00dev->led_radio,
@@ -144,7 +142,8 @@ void rt2x00leds_register(struct rt2x00_dev *rt2x00dev)
144 } 142 }
145 143
146 if (rt2x00dev->led_assoc.flags & LED_INITIALIZED) { 144 if (rt2x00dev->led_assoc.flags & LED_INITIALIZED) {
147 snprintf(name, sizeof(name), "%s::assoc", dev_name); 145 snprintf(name, sizeof(name), "%s-%s::assoc",
146 rt2x00dev->ops->name, phy_name);
148 147
149 retval = rt2x00leds_register_led(rt2x00dev, 148 retval = rt2x00leds_register_led(rt2x00dev,
150 &rt2x00dev->led_assoc, 149 &rt2x00dev->led_assoc,
@@ -154,7 +153,8 @@ void rt2x00leds_register(struct rt2x00_dev *rt2x00dev)
154 } 153 }
155 154
156 if (rt2x00dev->led_qual.flags & LED_INITIALIZED) { 155 if (rt2x00dev->led_qual.flags & LED_INITIALIZED) {
157 snprintf(name, sizeof(name), "%s::quality", dev_name); 156 snprintf(name, sizeof(name), "%s-%s::quality",
157 rt2x00dev->ops->name, phy_name);
158 158
159 retval = rt2x00leds_register_led(rt2x00dev, 159 retval = rt2x00leds_register_led(rt2x00dev,
160 &rt2x00dev->led_qual, 160 &rt2x00dev->led_qual,
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 78bd43b8961f..a0935987fa3a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -103,7 +103,7 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
103 * rt2x00queue_alloc_rxskb - allocate a skb for RX purposes. 103 * rt2x00queue_alloc_rxskb - allocate a skb for RX purposes.
104 * @entry: The entry for which the skb will be applicable. 104 * @entry: The entry for which the skb will be applicable.
105 */ 105 */
106struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry); 106struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp);
107 107
108/** 108/**
109 * rt2x00queue_free_skb - free a skb 109 * rt2x00queue_free_skb - free a skb
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 2df2eb6d3e06..b49773ef72f2 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -709,9 +709,19 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
709 rt2x00dev->intf_associated--; 709 rt2x00dev->intf_associated--;
710 710
711 rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated); 711 rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated);
712
713 clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
712 } 714 }
713 715
714 /* 716 /*
717 * Check for access point which do not support 802.11e . We have to
718 * generate data frames sequence number in S/W for such AP, because
719 * of H/W bug.
720 */
721 if (changes & BSS_CHANGED_QOS && !bss_conf->qos)
722 set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
723
724 /*
715 * When the erp information has changed, we should perform 725 * When the erp information has changed, we should perform
716 * additional configuration steps. For all other changes we are done. 726 * additional configuration steps. For all other changes we are done.
717 */ 727 */
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 17148bb24426..0a4653a92cab 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -92,7 +92,7 @@ bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
92 /* 92 /*
93 * Send the frame to rt2x00lib for further processing. 93 * Send the frame to rt2x00lib for further processing.
94 */ 94 */
95 rt2x00lib_rxdone(entry); 95 rt2x00lib_rxdone(entry, GFP_ATOMIC);
96 } 96 }
97 97
98 return !max_rx; 98 return !max_rx;
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 9b1b2b7a7807..4c662eccf53c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -33,7 +33,7 @@
33#include "rt2x00.h" 33#include "rt2x00.h"
34#include "rt2x00lib.h" 34#include "rt2x00lib.h"
35 35
36struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry) 36struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp)
37{ 37{
38 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 38 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
39 struct sk_buff *skb; 39 struct sk_buff *skb;
@@ -68,7 +68,7 @@ struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry)
68 /* 68 /*
69 * Allocate skbuffer. 69 * Allocate skbuffer.
70 */ 70 */
71 skb = dev_alloc_skb(frame_size + head_size + tail_size); 71 skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp);
72 if (!skb) 72 if (!skb)
73 return NULL; 73 return NULL;
74 74
@@ -213,8 +213,19 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
213 213
214 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); 214 __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
215 215
216 if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags)) 216 if (!test_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags)) {
217 return; 217 /*
218 * rt2800 has a H/W (or F/W) bug, device incorrectly increase
219 * seqno on retransmited data (non-QOS) frames. To workaround
220 * the problem let's generate seqno in software if QOS is
221 * disabled.
222 */
223 if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
224 __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
225 else
226 /* H/W will generate sequence number */
227 return;
228 }
218 229
219 /* 230 /*
220 * The hardware is not able to insert a sequence number. Assign a 231 * The hardware is not able to insert a sequence number. Assign a
@@ -320,14 +331,6 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
320 txdesc->u.ht.wcid = sta_priv->wcid; 331 txdesc->u.ht.wcid = sta_priv->wcid;
321 } 332 }
322 333
323 txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
324
325 /*
326 * Only one STBC stream is supported for now.
327 */
328 if (tx_info->flags & IEEE80211_TX_CTL_STBC)
329 txdesc->u.ht.stbc = 1;
330
331 /* 334 /*
332 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the 335 * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the
333 * mcs rate to be used 336 * mcs rate to be used
@@ -351,6 +354,24 @@ static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev,
351 txdesc->u.ht.mcs |= 0x08; 354 txdesc->u.ht.mcs |= 0x08;
352 } 355 }
353 356
357 if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) {
358 if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
359 txdesc->u.ht.txop = TXOP_SIFS;
360 else
361 txdesc->u.ht.txop = TXOP_BACKOFF;
362
363 /* Left zero on all other settings. */
364 return;
365 }
366
367 txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */
368
369 /*
370 * Only one STBC stream is supported for now.
371 */
372 if (tx_info->flags & IEEE80211_TX_CTL_STBC)
373 txdesc->u.ht.stbc = 1;
374
354 /* 375 /*
355 * This frame is eligible for an AMPDU, however, don't aggregate 376 * This frame is eligible for an AMPDU, however, don't aggregate
356 * frames that are intended to probe a specific tx rate. 377 * frames that are intended to probe a specific tx rate.
@@ -1142,7 +1163,7 @@ static int rt2x00queue_alloc_rxskbs(struct data_queue *queue)
1142 struct sk_buff *skb; 1163 struct sk_buff *skb;
1143 1164
1144 for (i = 0; i < queue->limit; i++) { 1165 for (i = 0; i < queue->limit; i++) {
1145 skb = rt2x00queue_alloc_rxskb(&queue->entries[i]); 1166 skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL);
1146 if (!skb) 1167 if (!skb)
1147 return -ENOMEM; 1168 return -ENOMEM;
1148 queue->entries[i].skb = skb; 1169 queue->entries[i].skb = skb;
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 66094eb21b61..d357d1ed92f6 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -358,7 +358,7 @@ static void rt2x00usb_work_rxdone(struct work_struct *work)
358 /* 358 /*
359 * Send the frame to rt2x00lib for further processing. 359 * Send the frame to rt2x00lib for further processing.
360 */ 360 */
361 rt2x00lib_rxdone(entry); 361 rt2x00lib_rxdone(entry, GFP_KERNEL);
362 } 362 }
363} 363}
364 364
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index e0c6d117429d..ee22bd74579d 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -3092,15 +3092,4 @@ static struct pci_driver rt61pci_driver = {
3092 .resume = rt2x00pci_resume, 3092 .resume = rt2x00pci_resume,
3093}; 3093};
3094 3094
3095static int __init rt61pci_init(void) 3095module_pci_driver(rt61pci_driver);
3096{
3097 return pci_register_driver(&rt61pci_driver);
3098}
3099
3100static void __exit rt61pci_exit(void)
3101{
3102 pci_unregister_driver(&rt61pci_driver);
3103}
3104
3105module_init(rt61pci_init);
3106module_exit(rt61pci_exit);
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index e477a964081d..155136691a38 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -2412,6 +2412,7 @@ static struct usb_device_id rt73usb_device_table[] = {
2412 { USB_DEVICE(0x0b05, 0x1723) }, 2412 { USB_DEVICE(0x0b05, 0x1723) },
2413 { USB_DEVICE(0x0b05, 0x1724) }, 2413 { USB_DEVICE(0x0b05, 0x1724) },
2414 /* Belkin */ 2414 /* Belkin */
2415 { USB_DEVICE(0x050d, 0x7050) }, /* FCC ID: K7SF5D7050B ver. 3.x */
2415 { USB_DEVICE(0x050d, 0x705a) }, 2416 { USB_DEVICE(0x050d, 0x705a) },
2416 { USB_DEVICE(0x050d, 0x905b) }, 2417 { USB_DEVICE(0x050d, 0x905b) },
2417 { USB_DEVICE(0x050d, 0x905c) }, 2418 { USB_DEVICE(0x050d, 0x905c) },
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 2f14a5fb0cbb..2bebcb71a1e9 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -1173,15 +1173,4 @@ static struct pci_driver rtl8180_driver = {
1173#endif /* CONFIG_PM */ 1173#endif /* CONFIG_PM */
1174}; 1174};
1175 1175
1176static int __init rtl8180_init(void) 1176module_pci_driver(rtl8180_driver);
1177{
1178 return pci_register_driver(&rtl8180_driver);
1179}
1180
1181static void __exit rtl8180_exit(void)
1182{
1183 pci_unregister_driver(&rtl8180_driver);
1184}
1185
1186module_init(rtl8180_init);
1187module_exit(rtl8180_exit);
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index cf53ac9d6f23..d8114962b0c9 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -294,6 +294,7 @@ static void rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
294 hdr->retry = cpu_to_le32((info->control.rates[0].count - 1) << 8); 294 hdr->retry = cpu_to_le32((info->control.rates[0].count - 1) << 8);
295 hdr->tx_duration = 295 hdr->tx_duration =
296 ieee80211_generic_frame_duration(dev, priv->vif, 296 ieee80211_generic_frame_duration(dev, priv->vif,
297 info->band,
297 skb->len, txrate); 298 skb->len, txrate);
298 buf = hdr; 299 buf = hdr;
299 300
diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/rtlwifi/cam.c
index 5c7d57947d23..3d8cc4a0c86d 100644
--- a/drivers/net/wireless/rtlwifi/cam.c
+++ b/drivers/net/wireless/rtlwifi/cam.c
@@ -328,10 +328,9 @@ void rtl_cam_del_entry(struct ieee80211_hw *hw, u8 *sta_addr)
328 RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, "sta_addr is NULL\n"); 328 RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, "sta_addr is NULL\n");
329 } 329 }
330 330
331 if ((sta_addr[0]|sta_addr[1]|sta_addr[2]|sta_addr[3]|\ 331 if (is_zero_ether_addr(sta_addr)) {
332 sta_addr[4]|sta_addr[5]) == 0) {
333 RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG, 332 RT_TRACE(rtlpriv, COMP_SEC, DBG_EMERG,
334 "sta_addr is 00:00:00:00:00:00\n"); 333 "sta_addr is %pM\n", sta_addr);
335 return; 334 return;
336 } 335 }
337 /* Does STA already exist? */ 336 /* Does STA already exist? */
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index cc15fdb36060..f7868c0d79ed 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -34,6 +34,7 @@
34#include "ps.h" 34#include "ps.h"
35#include "efuse.h" 35#include "efuse.h"
36#include <linux/export.h> 36#include <linux/export.h>
37#include <linux/kmemleak.h>
37 38
38static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = { 39static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
39 PCI_VENDOR_ID_INTEL, 40 PCI_VENDOR_ID_INTEL,
@@ -1099,6 +1100,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
1099 u32 bufferaddress; 1100 u32 bufferaddress;
1100 if (!skb) 1101 if (!skb)
1101 return 0; 1102 return 0;
1103 kmemleak_not_leak(skb);
1102 entry = &rtlpci->rx_ring[rx_queue_idx].desc[i]; 1104 entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
1103 1105
1104 /*skb->dev = dev; */ 1106 /*skb->dev = dev; */
diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/rtlwifi/rc.c
index c66f08a0524a..d5cbf01da8ac 100644
--- a/drivers/net/wireless/rtlwifi/rc.c
+++ b/drivers/net/wireless/rtlwifi/rc.c
@@ -225,8 +225,7 @@ static void rtl_rate_init(void *ppriv,
225static void rtl_rate_update(void *ppriv, 225static void rtl_rate_update(void *ppriv,
226 struct ieee80211_supported_band *sband, 226 struct ieee80211_supported_band *sband,
227 struct ieee80211_sta *sta, void *priv_sta, 227 struct ieee80211_sta *sta, void *priv_sta,
228 u32 changed, 228 u32 changed)
229 enum nl80211_channel_type oper_chan_type)
230{ 229{
231} 230}
232 231
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
index 1208b753f62f..f7f48c7ac854 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c
@@ -33,9 +33,6 @@
33#include "../pci.h" 33#include "../pci.h"
34#include "../base.h" 34#include "../base.h"
35 35
36struct dig_t dm_digtable;
37static struct ps_t dm_pstable;
38
39#define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1) 36#define BT_RSSI_STATE_NORMAL_POWER BIT_OFFSET_LEN_MASK_32(0, 1)
40#define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1) 37#define BT_RSSI_STATE_AMDPU_OFF BIT_OFFSET_LEN_MASK_32(1, 1)
41#define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1) 38#define BT_RSSI_STATE_SPECIAL_LOW BIT_OFFSET_LEN_MASK_32(2, 1)
@@ -163,33 +160,37 @@ static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
163 160
164static void rtl92c_dm_diginit(struct ieee80211_hw *hw) 161static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
165{ 162{
166 dm_digtable.dig_enable_flag = true; 163 struct rtl_priv *rtlpriv = rtl_priv(hw);
167 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; 164 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
168 dm_digtable.cur_igvalue = 0x20; 165
169 dm_digtable.pre_igvalue = 0x0; 166 dm_digtable->dig_enable_flag = true;
170 dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT; 167 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
171 dm_digtable.presta_connectstate = DIG_STA_DISCONNECT; 168 dm_digtable->cur_igvalue = 0x20;
172 dm_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT; 169 dm_digtable->pre_igvalue = 0x0;
173 dm_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW; 170 dm_digtable->cursta_connectctate = DIG_STA_DISCONNECT;
174 dm_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH; 171 dm_digtable->presta_connectstate = DIG_STA_DISCONNECT;
175 dm_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW; 172 dm_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
176 dm_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH; 173 dm_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
177 dm_digtable.rx_gain_range_max = DM_DIG_MAX; 174 dm_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
178 dm_digtable.rx_gain_range_min = DM_DIG_MIN; 175 dm_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
179 dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT; 176 dm_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
180 dm_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX; 177 dm_digtable->rx_gain_range_max = DM_DIG_MAX;
181 dm_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN; 178 dm_digtable->rx_gain_range_min = DM_DIG_MIN;
182 dm_digtable.pre_cck_pd_state = CCK_PD_STAGE_MAX; 179 dm_digtable->backoff_val = DM_DIG_BACKOFF_DEFAULT;
183 dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX; 180 dm_digtable->backoff_val_range_max = DM_DIG_BACKOFF_MAX;
181 dm_digtable->backoff_val_range_min = DM_DIG_BACKOFF_MIN;
182 dm_digtable->pre_cck_pd_state = CCK_PD_STAGE_MAX;
183 dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
184} 184}
185 185
186static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw) 186static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
187{ 187{
188 struct rtl_priv *rtlpriv = rtl_priv(hw); 188 struct rtl_priv *rtlpriv = rtl_priv(hw);
189 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
189 long rssi_val_min = 0; 190 long rssi_val_min = 0;
190 191
191 if ((dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) && 192 if ((dm_digtable->curmultista_connectstate == DIG_MULTISTA_CONNECT) &&
192 (dm_digtable.cursta_connectctate == DIG_STA_CONNECT)) { 193 (dm_digtable->cursta_connectctate == DIG_STA_CONNECT)) {
193 if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0) 194 if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0)
194 rssi_val_min = 195 rssi_val_min =
195 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb > 196 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb >
@@ -198,10 +199,10 @@ static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
198 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; 199 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
199 else 200 else
200 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb; 201 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
201 } else if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT || 202 } else if (dm_digtable->cursta_connectctate == DIG_STA_CONNECT ||
202 dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT) { 203 dm_digtable->cursta_connectctate == DIG_STA_BEFORE_CONNECT) {
203 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb; 204 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
204 } else if (dm_digtable.curmultista_connectstate == 205 } else if (dm_digtable->curmultista_connectstate ==
205 DIG_MULTISTA_CONNECT) { 206 DIG_MULTISTA_CONNECT) {
206 rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; 207 rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
207 } 208 }
@@ -260,7 +261,8 @@ static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
260static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw) 261static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
261{ 262{
262 struct rtl_priv *rtlpriv = rtl_priv(hw); 263 struct rtl_priv *rtlpriv = rtl_priv(hw);
263 u8 value_igi = dm_digtable.cur_igvalue; 264 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
265 u8 value_igi = dm_digtable->cur_igvalue;
264 266
265 if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0) 267 if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
266 value_igi--; 268 value_igi--;
@@ -277,43 +279,44 @@ static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
277 if (rtlpriv->falsealm_cnt.cnt_all > 10000) 279 if (rtlpriv->falsealm_cnt.cnt_all > 10000)
278 value_igi = 0x32; 280 value_igi = 0x32;
279 281
280 dm_digtable.cur_igvalue = value_igi; 282 dm_digtable->cur_igvalue = value_igi;
281 rtl92c_dm_write_dig(hw); 283 rtl92c_dm_write_dig(hw);
282} 284}
283 285
284static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw) 286static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
285{ 287{
286 struct rtl_priv *rtlpriv = rtl_priv(hw); 288 struct rtl_priv *rtlpriv = rtl_priv(hw);
289 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
287 290
288 if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable.fa_highthresh) { 291 if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable->fa_highthresh) {
289 if ((dm_digtable.backoff_val - 2) < 292 if ((dm_digtable->backoff_val - 2) <
290 dm_digtable.backoff_val_range_min) 293 dm_digtable->backoff_val_range_min)
291 dm_digtable.backoff_val = 294 dm_digtable->backoff_val =
292 dm_digtable.backoff_val_range_min; 295 dm_digtable->backoff_val_range_min;
293 else 296 else
294 dm_digtable.backoff_val -= 2; 297 dm_digtable->backoff_val -= 2;
295 } else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable.fa_lowthresh) { 298 } else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable->fa_lowthresh) {
296 if ((dm_digtable.backoff_val + 2) > 299 if ((dm_digtable->backoff_val + 2) >
297 dm_digtable.backoff_val_range_max) 300 dm_digtable->backoff_val_range_max)
298 dm_digtable.backoff_val = 301 dm_digtable->backoff_val =
299 dm_digtable.backoff_val_range_max; 302 dm_digtable->backoff_val_range_max;
300 else 303 else
301 dm_digtable.backoff_val += 2; 304 dm_digtable->backoff_val += 2;
302 } 305 }
303 306
304 if ((dm_digtable.rssi_val_min + 10 - dm_digtable.backoff_val) > 307 if ((dm_digtable->rssi_val_min + 10 - dm_digtable->backoff_val) >
305 dm_digtable.rx_gain_range_max) 308 dm_digtable->rx_gain_range_max)
306 dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_max; 309 dm_digtable->cur_igvalue = dm_digtable->rx_gain_range_max;
307 else if ((dm_digtable.rssi_val_min + 10 - 310 else if ((dm_digtable->rssi_val_min + 10 -
308 dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min) 311 dm_digtable->backoff_val) < dm_digtable->rx_gain_range_min)
309 dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_min; 312 dm_digtable->cur_igvalue = dm_digtable->rx_gain_range_min;
310 else 313 else
311 dm_digtable.cur_igvalue = dm_digtable.rssi_val_min + 10 - 314 dm_digtable->cur_igvalue = dm_digtable->rssi_val_min + 10 -
312 dm_digtable.backoff_val; 315 dm_digtable->backoff_val;
313 316
314 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, 317 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
315 "rssi_val_min = %x backoff_val %x\n", 318 "rssi_val_min = %x backoff_val %x\n",
316 dm_digtable.rssi_val_min, dm_digtable.backoff_val); 319 dm_digtable->rssi_val_min, dm_digtable->backoff_val);
317 320
318 rtl92c_dm_write_dig(hw); 321 rtl92c_dm_write_dig(hw);
319} 322}
@@ -322,6 +325,7 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
322{ 325{
323 static u8 initialized; /* initialized to false */ 326 static u8 initialized; /* initialized to false */
324 struct rtl_priv *rtlpriv = rtl_priv(hw); 327 struct rtl_priv *rtlpriv = rtl_priv(hw);
328 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
325 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 329 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
326 long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; 330 long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
327 bool multi_sta = false; 331 bool multi_sta = false;
@@ -330,68 +334,69 @@ static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
330 multi_sta = true; 334 multi_sta = true;
331 335
332 if (!multi_sta || 336 if (!multi_sta ||
333 dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) { 337 dm_digtable->cursta_connectctate != DIG_STA_DISCONNECT) {
334 initialized = false; 338 initialized = false;
335 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; 339 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
336 return; 340 return;
337 } else if (initialized == false) { 341 } else if (initialized == false) {
338 initialized = true; 342 initialized = true;
339 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0; 343 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
340 dm_digtable.cur_igvalue = 0x20; 344 dm_digtable->cur_igvalue = 0x20;
341 rtl92c_dm_write_dig(hw); 345 rtl92c_dm_write_dig(hw);
342 } 346 }
343 347
344 if (dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) { 348 if (dm_digtable->curmultista_connectstate == DIG_MULTISTA_CONNECT) {
345 if ((rssi_strength < dm_digtable.rssi_lowthresh) && 349 if ((rssi_strength < dm_digtable->rssi_lowthresh) &&
346 (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) { 350 (dm_digtable->dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) {
347 351
348 if (dm_digtable.dig_ext_port_stage == 352 if (dm_digtable->dig_ext_port_stage ==
349 DIG_EXT_PORT_STAGE_2) { 353 DIG_EXT_PORT_STAGE_2) {
350 dm_digtable.cur_igvalue = 0x20; 354 dm_digtable->cur_igvalue = 0x20;
351 rtl92c_dm_write_dig(hw); 355 rtl92c_dm_write_dig(hw);
352 } 356 }
353 357
354 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_1; 358 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_1;
355 } else if (rssi_strength > dm_digtable.rssi_highthresh) { 359 } else if (rssi_strength > dm_digtable->rssi_highthresh) {
356 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_2; 360 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_2;
357 rtl92c_dm_ctrl_initgain_by_fa(hw); 361 rtl92c_dm_ctrl_initgain_by_fa(hw);
358 } 362 }
359 } else if (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) { 363 } else if (dm_digtable->dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) {
360 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0; 364 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
361 dm_digtable.cur_igvalue = 0x20; 365 dm_digtable->cur_igvalue = 0x20;
362 rtl92c_dm_write_dig(hw); 366 rtl92c_dm_write_dig(hw);
363 } 367 }
364 368
365 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, 369 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
366 "curmultista_connectstate = %x dig_ext_port_stage %x\n", 370 "curmultista_connectstate = %x dig_ext_port_stage %x\n",
367 dm_digtable.curmultista_connectstate, 371 dm_digtable->curmultista_connectstate,
368 dm_digtable.dig_ext_port_stage); 372 dm_digtable->dig_ext_port_stage);
369} 373}
370 374
371static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw) 375static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
372{ 376{
373 struct rtl_priv *rtlpriv = rtl_priv(hw); 377 struct rtl_priv *rtlpriv = rtl_priv(hw);
378 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
374 379
375 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, 380 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
376 "presta_connectstate = %x, cursta_connectctate = %x\n", 381 "presta_connectstate = %x, cursta_connectctate = %x\n",
377 dm_digtable.presta_connectstate, 382 dm_digtable->presta_connectstate,
378 dm_digtable.cursta_connectctate); 383 dm_digtable->cursta_connectctate);
379 384
380 if (dm_digtable.presta_connectstate == dm_digtable.cursta_connectctate 385 if (dm_digtable->presta_connectstate == dm_digtable->cursta_connectctate
381 || dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT 386 || dm_digtable->cursta_connectctate == DIG_STA_BEFORE_CONNECT
382 || dm_digtable.cursta_connectctate == DIG_STA_CONNECT) { 387 || dm_digtable->cursta_connectctate == DIG_STA_CONNECT) {
383 388
384 if (dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) { 389 if (dm_digtable->cursta_connectctate != DIG_STA_DISCONNECT) {
385 dm_digtable.rssi_val_min = 390 dm_digtable->rssi_val_min =
386 rtl92c_dm_initial_gain_min_pwdb(hw); 391 rtl92c_dm_initial_gain_min_pwdb(hw);
387 rtl92c_dm_ctrl_initgain_by_rssi(hw); 392 rtl92c_dm_ctrl_initgain_by_rssi(hw);
388 } 393 }
389 } else { 394 } else {
390 dm_digtable.rssi_val_min = 0; 395 dm_digtable->rssi_val_min = 0;
391 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; 396 dm_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
392 dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT; 397 dm_digtable->backoff_val = DM_DIG_BACKOFF_DEFAULT;
393 dm_digtable.cur_igvalue = 0x20; 398 dm_digtable->cur_igvalue = 0x20;
394 dm_digtable.pre_igvalue = 0; 399 dm_digtable->pre_igvalue = 0;
395 rtl92c_dm_write_dig(hw); 400 rtl92c_dm_write_dig(hw);
396 } 401 }
397} 402}
@@ -400,40 +405,41 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
400{ 405{
401 struct rtl_priv *rtlpriv = rtl_priv(hw); 406 struct rtl_priv *rtlpriv = rtl_priv(hw);
402 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 407 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
408 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
403 409
404 if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT) { 410 if (dm_digtable->cursta_connectctate == DIG_STA_CONNECT) {
405 dm_digtable.rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw); 411 dm_digtable->rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
406 412
407 if (dm_digtable.pre_cck_pd_state == CCK_PD_STAGE_LowRssi) { 413 if (dm_digtable->pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
408 if (dm_digtable.rssi_val_min <= 25) 414 if (dm_digtable->rssi_val_min <= 25)
409 dm_digtable.cur_cck_pd_state = 415 dm_digtable->cur_cck_pd_state =
410 CCK_PD_STAGE_LowRssi; 416 CCK_PD_STAGE_LowRssi;
411 else 417 else
412 dm_digtable.cur_cck_pd_state = 418 dm_digtable->cur_cck_pd_state =
413 CCK_PD_STAGE_HighRssi; 419 CCK_PD_STAGE_HighRssi;
414 } else { 420 } else {
415 if (dm_digtable.rssi_val_min <= 20) 421 if (dm_digtable->rssi_val_min <= 20)
416 dm_digtable.cur_cck_pd_state = 422 dm_digtable->cur_cck_pd_state =
417 CCK_PD_STAGE_LowRssi; 423 CCK_PD_STAGE_LowRssi;
418 else 424 else
419 dm_digtable.cur_cck_pd_state = 425 dm_digtable->cur_cck_pd_state =
420 CCK_PD_STAGE_HighRssi; 426 CCK_PD_STAGE_HighRssi;
421 } 427 }
422 } else { 428 } else {
423 dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX; 429 dm_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
424 } 430 }
425 431
426 if (dm_digtable.pre_cck_pd_state != dm_digtable.cur_cck_pd_state) { 432 if (dm_digtable->pre_cck_pd_state != dm_digtable->cur_cck_pd_state) {
427 if (dm_digtable.cur_cck_pd_state == CCK_PD_STAGE_LowRssi) { 433 if (dm_digtable->cur_cck_pd_state == CCK_PD_STAGE_LowRssi) {
428 if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800) 434 if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800)
429 dm_digtable.cur_cck_fa_state = 435 dm_digtable->cur_cck_fa_state =
430 CCK_FA_STAGE_High; 436 CCK_FA_STAGE_High;
431 else 437 else
432 dm_digtable.cur_cck_fa_state = CCK_FA_STAGE_Low; 438 dm_digtable->cur_cck_fa_state = CCK_FA_STAGE_Low;
433 439
434 if (dm_digtable.pre_cck_fa_state != 440 if (dm_digtable->pre_cck_fa_state !=
435 dm_digtable.cur_cck_fa_state) { 441 dm_digtable->cur_cck_fa_state) {
436 if (dm_digtable.cur_cck_fa_state == 442 if (dm_digtable->cur_cck_fa_state ==
437 CCK_FA_STAGE_Low) 443 CCK_FA_STAGE_Low)
438 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 444 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
439 0x83); 445 0x83);
@@ -441,8 +447,8 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
441 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 447 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
442 0xcd); 448 0xcd);
443 449
444 dm_digtable.pre_cck_fa_state = 450 dm_digtable->pre_cck_fa_state =
445 dm_digtable.cur_cck_fa_state; 451 dm_digtable->cur_cck_fa_state;
446 } 452 }
447 453
448 rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40); 454 rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40);
@@ -458,11 +464,11 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
458 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 464 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
459 MASKBYTE2, 0xd3); 465 MASKBYTE2, 0xd3);
460 } 466 }
461 dm_digtable.pre_cck_pd_state = dm_digtable.cur_cck_pd_state; 467 dm_digtable->pre_cck_pd_state = dm_digtable->cur_cck_pd_state;
462 } 468 }
463 469
464 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "CCKPDStage=%x\n", 470 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "CCKPDStage=%x\n",
465 dm_digtable.cur_cck_pd_state); 471 dm_digtable->cur_cck_pd_state);
466 472
467 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "is92C=%x\n", 473 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE, "is92C=%x\n",
468 IS_92C_SERIAL(rtlhal->version)); 474 IS_92C_SERIAL(rtlhal->version));
@@ -470,31 +476,34 @@ static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
470 476
471static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw) 477static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
472{ 478{
479 struct rtl_priv *rtlpriv = rtl_priv(hw);
480 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
473 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 481 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
474 482
475 if (mac->act_scanning) 483 if (mac->act_scanning)
476 return; 484 return;
477 485
478 if (mac->link_state >= MAC80211_LINKED) 486 if (mac->link_state >= MAC80211_LINKED)
479 dm_digtable.cursta_connectctate = DIG_STA_CONNECT; 487 dm_digtable->cursta_connectctate = DIG_STA_CONNECT;
480 else 488 else
481 dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT; 489 dm_digtable->cursta_connectctate = DIG_STA_DISCONNECT;
482 490
483 rtl92c_dm_initial_gain_sta(hw); 491 rtl92c_dm_initial_gain_sta(hw);
484 rtl92c_dm_initial_gain_multi_sta(hw); 492 rtl92c_dm_initial_gain_multi_sta(hw);
485 rtl92c_dm_cck_packet_detection_thresh(hw); 493 rtl92c_dm_cck_packet_detection_thresh(hw);
486 494
487 dm_digtable.presta_connectstate = dm_digtable.cursta_connectctate; 495 dm_digtable->presta_connectstate = dm_digtable->cursta_connectctate;
488 496
489} 497}
490 498
491static void rtl92c_dm_dig(struct ieee80211_hw *hw) 499static void rtl92c_dm_dig(struct ieee80211_hw *hw)
492{ 500{
493 struct rtl_priv *rtlpriv = rtl_priv(hw); 501 struct rtl_priv *rtlpriv = rtl_priv(hw);
502 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
494 503
495 if (rtlpriv->dm.dm_initialgain_enable == false) 504 if (rtlpriv->dm.dm_initialgain_enable == false)
496 return; 505 return;
497 if (dm_digtable.dig_enable_flag == false) 506 if (dm_digtable->dig_enable_flag == false)
498 return; 507 return;
499 508
500 rtl92c_dm_ctrl_initgain_by_twoport(hw); 509 rtl92c_dm_ctrl_initgain_by_twoport(hw);
@@ -514,23 +523,24 @@ static void rtl92c_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
514void rtl92c_dm_write_dig(struct ieee80211_hw *hw) 523void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
515{ 524{
516 struct rtl_priv *rtlpriv = rtl_priv(hw); 525 struct rtl_priv *rtlpriv = rtl_priv(hw);
526 struct dig_t *dm_digtable = &rtlpriv->dm_digtable;
517 527
518 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 528 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
519 "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n", 529 "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n",
520 dm_digtable.cur_igvalue, dm_digtable.pre_igvalue, 530 dm_digtable->cur_igvalue, dm_digtable->pre_igvalue,
521 dm_digtable.backoff_val); 531 dm_digtable->backoff_val);
522 532
523 dm_digtable.cur_igvalue += 2; 533 dm_digtable->cur_igvalue += 2;
524 if (dm_digtable.cur_igvalue > 0x3f) 534 if (dm_digtable->cur_igvalue > 0x3f)
525 dm_digtable.cur_igvalue = 0x3f; 535 dm_digtable->cur_igvalue = 0x3f;
526 536
527 if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) { 537 if (dm_digtable->pre_igvalue != dm_digtable->cur_igvalue) {
528 rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, 538 rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
529 dm_digtable.cur_igvalue); 539 dm_digtable->cur_igvalue);
530 rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f, 540 rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
531 dm_digtable.cur_igvalue); 541 dm_digtable->cur_igvalue);
532 542
533 dm_digtable.pre_igvalue = dm_digtable.cur_igvalue; 543 dm_digtable->pre_igvalue = dm_digtable->cur_igvalue;
534 } 544 }
535} 545}
536EXPORT_SYMBOL(rtl92c_dm_write_dig); 546EXPORT_SYMBOL(rtl92c_dm_write_dig);
@@ -1223,15 +1233,20 @@ static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
1223 1233
1224static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw) 1234static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
1225{ 1235{
1226 dm_pstable.pre_ccastate = CCA_MAX; 1236 struct rtl_priv *rtlpriv = rtl_priv(hw);
1227 dm_pstable.cur_ccasate = CCA_MAX; 1237 struct ps_t *dm_pstable = &rtlpriv->dm_pstable;
1228 dm_pstable.pre_rfstate = RF_MAX; 1238
1229 dm_pstable.cur_rfstate = RF_MAX; 1239 dm_pstable->pre_ccastate = CCA_MAX;
1230 dm_pstable.rssi_val_min = 0; 1240 dm_pstable->cur_ccasate = CCA_MAX;
1241 dm_pstable->pre_rfstate = RF_MAX;
1242 dm_pstable->cur_rfstate = RF_MAX;
1243 dm_pstable->rssi_val_min = 0;
1231} 1244}
1232 1245
1233void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal) 1246void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
1234{ 1247{
1248 struct rtl_priv *rtlpriv = rtl_priv(hw);
1249 struct ps_t *dm_pstable = &rtlpriv->dm_pstable;
1235 static u8 initialize; 1250 static u8 initialize;
1236 static u32 reg_874, reg_c70, reg_85c, reg_a74; 1251 static u32 reg_874, reg_c70, reg_85c, reg_a74;
1237 1252
@@ -1251,27 +1266,27 @@ void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
1251 } 1266 }
1252 1267
1253 if (!bforce_in_normal) { 1268 if (!bforce_in_normal) {
1254 if (dm_pstable.rssi_val_min != 0) { 1269 if (dm_pstable->rssi_val_min != 0) {
1255 if (dm_pstable.pre_rfstate == RF_NORMAL) { 1270 if (dm_pstable->pre_rfstate == RF_NORMAL) {
1256 if (dm_pstable.rssi_val_min >= 30) 1271 if (dm_pstable->rssi_val_min >= 30)
1257 dm_pstable.cur_rfstate = RF_SAVE; 1272 dm_pstable->cur_rfstate = RF_SAVE;
1258 else 1273 else
1259 dm_pstable.cur_rfstate = RF_NORMAL; 1274 dm_pstable->cur_rfstate = RF_NORMAL;
1260 } else { 1275 } else {
1261 if (dm_pstable.rssi_val_min <= 25) 1276 if (dm_pstable->rssi_val_min <= 25)
1262 dm_pstable.cur_rfstate = RF_NORMAL; 1277 dm_pstable->cur_rfstate = RF_NORMAL;
1263 else 1278 else
1264 dm_pstable.cur_rfstate = RF_SAVE; 1279 dm_pstable->cur_rfstate = RF_SAVE;
1265 } 1280 }
1266 } else { 1281 } else {
1267 dm_pstable.cur_rfstate = RF_MAX; 1282 dm_pstable->cur_rfstate = RF_MAX;
1268 } 1283 }
1269 } else { 1284 } else {
1270 dm_pstable.cur_rfstate = RF_NORMAL; 1285 dm_pstable->cur_rfstate = RF_NORMAL;
1271 } 1286 }
1272 1287
1273 if (dm_pstable.pre_rfstate != dm_pstable.cur_rfstate) { 1288 if (dm_pstable->pre_rfstate != dm_pstable->cur_rfstate) {
1274 if (dm_pstable.cur_rfstate == RF_SAVE) { 1289 if (dm_pstable->cur_rfstate == RF_SAVE) {
1275 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, 1290 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
1276 0x1C0000, 0x2); 1291 0x1C0000, 0x2);
1277 rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0); 1292 rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0);
@@ -1293,7 +1308,7 @@ void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
1293 rtl_set_bbreg(hw, 0x818, BIT(28), 0x0); 1308 rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
1294 } 1309 }
1295 1310
1296 dm_pstable.pre_rfstate = dm_pstable.cur_rfstate; 1311 dm_pstable->pre_rfstate = dm_pstable->cur_rfstate;
1297 } 1312 }
1298} 1313}
1299EXPORT_SYMBOL(rtl92c_dm_rf_saving); 1314EXPORT_SYMBOL(rtl92c_dm_rf_saving);
@@ -1301,36 +1316,37 @@ EXPORT_SYMBOL(rtl92c_dm_rf_saving);
1301static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw) 1316static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
1302{ 1317{
1303 struct rtl_priv *rtlpriv = rtl_priv(hw); 1318 struct rtl_priv *rtlpriv = rtl_priv(hw);
1319 struct ps_t *dm_pstable = &rtlpriv->dm_pstable;
1304 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 1320 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1305 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 1321 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1306 1322
1307 if (((mac->link_state == MAC80211_NOLINK)) && 1323 if (((mac->link_state == MAC80211_NOLINK)) &&
1308 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) { 1324 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
1309 dm_pstable.rssi_val_min = 0; 1325 dm_pstable->rssi_val_min = 0;
1310 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "Not connected to any\n"); 1326 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, "Not connected to any\n");
1311 } 1327 }
1312 1328
1313 if (mac->link_state == MAC80211_LINKED) { 1329 if (mac->link_state == MAC80211_LINKED) {
1314 if (mac->opmode == NL80211_IFTYPE_ADHOC) { 1330 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
1315 dm_pstable.rssi_val_min = 1331 dm_pstable->rssi_val_min =
1316 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; 1332 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
1317 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, 1333 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1318 "AP Client PWDB = 0x%lx\n", 1334 "AP Client PWDB = 0x%lx\n",
1319 dm_pstable.rssi_val_min); 1335 dm_pstable->rssi_val_min);
1320 } else { 1336 } else {
1321 dm_pstable.rssi_val_min = 1337 dm_pstable->rssi_val_min =
1322 rtlpriv->dm.undecorated_smoothed_pwdb; 1338 rtlpriv->dm.undecorated_smoothed_pwdb;
1323 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, 1339 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1324 "STA Default Port PWDB = 0x%lx\n", 1340 "STA Default Port PWDB = 0x%lx\n",
1325 dm_pstable.rssi_val_min); 1341 dm_pstable->rssi_val_min);
1326 } 1342 }
1327 } else { 1343 } else {
1328 dm_pstable.rssi_val_min = 1344 dm_pstable->rssi_val_min =
1329 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb; 1345 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
1330 1346
1331 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, 1347 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1332 "AP Ext Port PWDB = 0x%lx\n", 1348 "AP Ext Port PWDB = 0x%lx\n",
1333 dm_pstable.rssi_val_min); 1349 dm_pstable->rssi_val_min);
1334 } 1350 }
1335 1351
1336 if (IS_92C_SERIAL(rtlhal->version)) 1352 if (IS_92C_SERIAL(rtlhal->version))
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
index 2178e3761883..518e208c0180 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.h
@@ -91,40 +91,6 @@
91#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74 91#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
92#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67 92#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
93 93
94struct ps_t {
95 u8 pre_ccastate;
96 u8 cur_ccasate;
97 u8 pre_rfstate;
98 u8 cur_rfstate;
99 long rssi_val_min;
100};
101
102struct dig_t {
103 u8 dig_enable_flag;
104 u8 dig_ext_port_stage;
105 u32 rssi_lowthresh;
106 u32 rssi_highthresh;
107 u32 fa_lowthresh;
108 u32 fa_highthresh;
109 u8 cursta_connectctate;
110 u8 presta_connectstate;
111 u8 curmultista_connectstate;
112 u8 pre_igvalue;
113 u8 cur_igvalue;
114 char backoff_val;
115 char backoff_val_range_max;
116 char backoff_val_range_min;
117 u8 rx_gain_range_max;
118 u8 rx_gain_range_min;
119 u8 rssi_val_min;
120 u8 pre_cck_pd_state;
121 u8 cur_cck_pd_state;
122 u8 pre_cck_fa_state;
123 u8 cur_cck_fa_state;
124 u8 pre_ccastate;
125 u8 cur_ccasate;
126};
127
128struct swat_t { 94struct swat_t {
129 u8 failure_cnt; 95 u8 failure_cnt;
130 u8 try_flag; 96 u8 try_flag;
@@ -189,7 +155,6 @@ enum dm_dig_connect_e {
189 DIG_CONNECT_MAX 155 DIG_CONNECT_MAX
190}; 156};
191 157
192extern struct dig_t dm_digtable;
193void rtl92c_dm_init(struct ieee80211_hw *hw); 158void rtl92c_dm_init(struct ieee80211_hw *hw);
194void rtl92c_dm_watchdog(struct ieee80211_hw *hw); 159void rtl92c_dm_watchdog(struct ieee80211_hw *hw);
195void rtl92c_dm_write_dig(struct ieee80211_hw *hw); 160void rtl92c_dm_write_dig(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index c20b3c30f62e..692c8ef5ee89 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -34,6 +34,7 @@
34#include "../rtl8192ce/def.h" 34#include "../rtl8192ce/def.h"
35#include "fw_common.h" 35#include "fw_common.h"
36#include <linux/export.h> 36#include <linux/export.h>
37#include <linux/kmemleak.h>
37 38
38static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable) 39static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
39{ 40{
@@ -776,6 +777,8 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
776 skb = dev_alloc_skb(totalpacketlen); 777 skb = dev_alloc_skb(totalpacketlen);
777 if (!skb) 778 if (!skb)
778 return; 779 return;
780 kmemleak_not_leak(skb);
781
779 memcpy((u8 *) skb_put(skb, totalpacketlen), 782 memcpy((u8 *) skb_put(skb, totalpacketlen),
780 &reserved_page_packet, totalpacketlen); 783 &reserved_page_packet, totalpacketlen);
781 784
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
index 4c016241f340..cdcad7d9f15e 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
@@ -1881,6 +1881,7 @@ void rtl92c_phy_set_io(struct ieee80211_hw *hw)
1881{ 1881{
1882 struct rtl_priv *rtlpriv = rtl_priv(hw); 1882 struct rtl_priv *rtlpriv = rtl_priv(hw);
1883 struct rtl_phy *rtlphy = &(rtlpriv->phy); 1883 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1884 struct dig_t dm_digtable = rtlpriv->dm_digtable;
1884 1885
1885 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, 1886 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
1886 "--->Cmd(%#x), set_io_inprogress(%d)\n", 1887 "--->Cmd(%#x), set_io_inprogress(%d)\n",
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
index 26747fa86005..d4a3d032c7bf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
@@ -86,40 +86,6 @@
86#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74 86#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
87#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67 87#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
88 88
89struct ps_t {
90 u8 pre_ccastate;
91 u8 cur_ccasate;
92 u8 pre_rfstate;
93 u8 cur_rfstate;
94 long rssi_val_min;
95};
96
97struct dig_t {
98 u8 dig_enable_flag;
99 u8 dig_ext_port_stage;
100 u32 rssi_lowthresh;
101 u32 rssi_highthresh;
102 u32 fa_lowthresh;
103 u32 fa_highthresh;
104 u8 cursta_connectctate;
105 u8 presta_connectstate;
106 u8 curmultista_connectstate;
107 u8 pre_igvalue;
108 u8 cur_igvalue;
109 char backoff_val;
110 char backoff_val_range_max;
111 char backoff_val_range_min;
112 u8 rx_gain_range_max;
113 u8 rx_gain_range_min;
114 u8 rssi_val_min;
115 u8 pre_cck_pd_state;
116 u8 cur_cck_pd_state;
117 u8 pre_cck_fa_state;
118 u8 cur_cck_fa_state;
119 u8 pre_ccastate;
120 u8 cur_ccasate;
121};
122
123struct swat_t { 89struct swat_t {
124 u8 failure_cnt; 90 u8 failure_cnt;
125 u8 try_flag; 91 u8 try_flag;
@@ -184,7 +150,6 @@ enum dm_dig_connect_e {
184 DIG_CONNECT_MAX 150 DIG_CONNECT_MAX
185}; 151};
186 152
187extern struct dig_t dm_digtable;
188void rtl92c_dm_init(struct ieee80211_hw *hw); 153void rtl92c_dm_init(struct ieee80211_hw *hw);
189void rtl92c_dm_watchdog(struct ieee80211_hw *hw); 154void rtl92c_dm_watchdog(struct ieee80211_hw *hw);
190void rtl92c_dm_write_dig(struct ieee80211_hw *hw); 155void rtl92c_dm_write_dig(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index 2c3b73366cd2..3aa927f8b9b9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -389,21 +389,4 @@ static struct pci_driver rtl92ce_driver = {
389 .driver.pm = &rtlwifi_pm_ops, 389 .driver.pm = &rtlwifi_pm_ops,
390}; 390};
391 391
392static int __init rtl92ce_module_init(void) 392module_pci_driver(rtl92ce_driver);
393{
394 int ret;
395
396 ret = pci_register_driver(&rtl92ce_driver);
397 if (ret)
398 RT_ASSERT(false, "No device found\n");
399
400 return ret;
401}
402
403static void __exit rtl92ce_module_exit(void)
404{
405 pci_unregister_driver(&rtl92ce_driver);
406}
407
408module_init(rtl92ce_module_init);
409module_exit(rtl92ce_module_exit);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
index efb9ab270403..c4adb9777365 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
@@ -530,12 +530,7 @@
530 SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val) 530 SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val)
531 531
532#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \ 532#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
533do { \ 533 memset(__pdesc, 0, min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET))
534 if (_size > TX_DESC_NEXT_DESC_OFFSET) \
535 memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \
536 else \
537 memset(__pdesc, 0, _size); \
538} while (0);
539 534
540struct rx_fwinfo_92c { 535struct rx_fwinfo_92c {
541 u8 gain_trsw[4]; 536 u8 gain_trsw[4];
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 82c85286ab2e..7737fb0c6661 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -338,6 +338,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
338 {RTL_USB_DEVICE(0x2019, 0x1201, rtl92cu_hal_cfg)}, /*Planex-Vencer*/ 338 {RTL_USB_DEVICE(0x2019, 0x1201, rtl92cu_hal_cfg)}, /*Planex-Vencer*/
339 339
340 /****** 8192CU ********/ 340 /****** 8192CU ********/
341 {RTL_USB_DEVICE(0x050d, 0x1004, rtl92cu_hal_cfg)}, /*Belcom-SurfN300*/
341 {RTL_USB_DEVICE(0x050d, 0x2102, rtl92cu_hal_cfg)}, /*Belcom-Sercomm*/ 342 {RTL_USB_DEVICE(0x050d, 0x2102, rtl92cu_hal_cfg)}, /*Belcom-Sercomm*/
342 {RTL_USB_DEVICE(0x050d, 0x2103, rtl92cu_hal_cfg)}, /*Belcom-Edimax*/ 343 {RTL_USB_DEVICE(0x050d, 0x2103, rtl92cu_hal_cfg)}, /*Belcom-Edimax*/
343 {RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/ 344 {RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/def.h b/drivers/net/wireless/rtlwifi/rtl8192de/def.h
index eafdf76ed64d..939c905f547f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/def.h
@@ -151,9 +151,6 @@ enum version_8192d {
151 151
152/* for 92D */ 152/* for 92D */
153#define CHIP_92D_SINGLEPHY BIT(9) 153#define CHIP_92D_SINGLEPHY BIT(9)
154#define C_CUT_VERSION BIT(13)
155#define D_CUT_VERSION ((BIT(12)|BIT(13)))
156#define E_CUT_VERSION BIT(14)
157 154
158/* Chip specific */ 155/* Chip specific */
159#define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3) 156#define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3)
@@ -173,7 +170,10 @@ enum version_8192d {
173#define RF_TYPE_1T2R BIT(4) 170#define RF_TYPE_1T2R BIT(4)
174#define RF_TYPE_2T2R BIT(5) 171#define RF_TYPE_2T2R BIT(5)
175#define CHIP_VENDOR_UMC BIT(7) 172#define CHIP_VENDOR_UMC BIT(7)
176#define B_CUT_VERSION BIT(12) 173#define CHIP_92D_B_CUT BIT(12)
174#define CHIP_92D_C_CUT BIT(13)
175#define CHIP_92D_D_CUT (BIT(13)|BIT(12))
176#define CHIP_92D_E_CUT BIT(14)
177 177
178/* MASK */ 178/* MASK */
179#define IC_TYPE_MASK (BIT(0)|BIT(1)|BIT(2)) 179#define IC_TYPE_MASK (BIT(0)|BIT(1)|BIT(2))
@@ -205,15 +205,13 @@ enum version_8192d {
205 CHIP_92D) ? true : false) 205 CHIP_92D) ? true : false)
206#define IS_92D_C_CUT(version) ((IS_92D(version)) ? \ 206#define IS_92D_C_CUT(version) ((IS_92D(version)) ? \
207 ((GET_CVID_CUT_VERSION(version) == \ 207 ((GET_CVID_CUT_VERSION(version) == \
208 0x2000) ? true : false) : false) 208 CHIP_92D_C_CUT) ? true : false) : false)
209#define IS_92D_D_CUT(version) ((IS_92D(version)) ? \ 209#define IS_92D_D_CUT(version) ((IS_92D(version)) ? \
210 ((GET_CVID_CUT_VERSION(version) == \ 210 ((GET_CVID_CUT_VERSION(version) == \
211 0x3000) ? true : false) : false) 211 CHIP_92D_D_CUT) ? true : false) : false)
212#define IS_92D_E_CUT(version) ((IS_92D(version)) ? \ 212#define IS_92D_E_CUT(version) ((IS_92D(version)) ? \
213 ((GET_CVID_CUT_VERSION(version) == \ 213 ((GET_CVID_CUT_VERSION(version) == \
214 0x4000) ? true : false) : false) 214 CHIP_92D_E_CUT) ? true : false) : false)
215#define CHIP_92D_C_CUT BIT(10)
216#define CHIP_92D_D_CUT BIT(11)
217 215
218enum rf_optype { 216enum rf_optype {
219 RF_OP_BY_SW_3WIRE = 0, 217 RF_OP_BY_SW_3WIRE = 0,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
index 4737018c9daa..a7d63a84551a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.c
@@ -37,8 +37,6 @@
37 37
38#define UNDEC_SM_PWDB entry_min_undecoratedsmoothed_pwdb 38#define UNDEC_SM_PWDB entry_min_undecoratedsmoothed_pwdb
39 39
40struct dig_t de_digtable;
41
42static const u32 ofdmswing_table[OFDM_TABLE_SIZE_92D] = { 40static const u32 ofdmswing_table[OFDM_TABLE_SIZE_92D] = {
43 0x7f8001fe, /* 0, +6.0dB */ 41 0x7f8001fe, /* 0, +6.0dB */
44 0x788001e2, /* 1, +5.5dB */ 42 0x788001e2, /* 1, +5.5dB */
@@ -159,27 +157,30 @@ static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
159 157
160static void rtl92d_dm_diginit(struct ieee80211_hw *hw) 158static void rtl92d_dm_diginit(struct ieee80211_hw *hw)
161{ 159{
162 de_digtable.dig_enable_flag = true; 160 struct rtl_priv *rtlpriv = rtl_priv(hw);
163 de_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; 161 struct dig_t *de_digtable = &rtlpriv->dm_digtable;
164 de_digtable.cur_igvalue = 0x20; 162
165 de_digtable.pre_igvalue = 0x0; 163 de_digtable->dig_enable_flag = true;
166 de_digtable.cursta_connectctate = DIG_STA_DISCONNECT; 164 de_digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
167 de_digtable.presta_connectstate = DIG_STA_DISCONNECT; 165 de_digtable->cur_igvalue = 0x20;
168 de_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT; 166 de_digtable->pre_igvalue = 0x0;
169 de_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW; 167 de_digtable->cursta_connectctate = DIG_STA_DISCONNECT;
170 de_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH; 168 de_digtable->presta_connectstate = DIG_STA_DISCONNECT;
171 de_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW; 169 de_digtable->curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
172 de_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH; 170 de_digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
173 de_digtable.rx_gain_range_max = DM_DIG_FA_UPPER; 171 de_digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
174 de_digtable.rx_gain_range_min = DM_DIG_FA_LOWER; 172 de_digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
175 de_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT; 173 de_digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
176 de_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX; 174 de_digtable->rx_gain_range_max = DM_DIG_FA_UPPER;
177 de_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN; 175 de_digtable->rx_gain_range_min = DM_DIG_FA_LOWER;
178 de_digtable.pre_cck_pd_state = CCK_PD_STAGE_LOWRSSI; 176 de_digtable->backoff_val = DM_DIG_BACKOFF_DEFAULT;
179 de_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX; 177 de_digtable->backoff_val_range_max = DM_DIG_BACKOFF_MAX;
180 de_digtable.large_fa_hit = 0; 178 de_digtable->backoff_val_range_min = DM_DIG_BACKOFF_MIN;
181 de_digtable.recover_cnt = 0; 179 de_digtable->pre_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
182 de_digtable.forbidden_igi = DM_DIG_FA_LOWER; 180 de_digtable->cur_cck_pd_state = CCK_PD_STAGE_MAX;
181 de_digtable->large_fa_hit = 0;
182 de_digtable->recover_cnt = 0;
183 de_digtable->forbidden_igi = DM_DIG_FA_LOWER;
183} 184}
184 185
185static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw) 186static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
@@ -266,68 +267,70 @@ static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
266static void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw) 267static void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw)
267{ 268{
268 struct rtl_priv *rtlpriv = rtl_priv(hw); 269 struct rtl_priv *rtlpriv = rtl_priv(hw);
270 struct dig_t *de_digtable = &rtlpriv->dm_digtable;
269 struct rtl_mac *mac = rtl_mac(rtlpriv); 271 struct rtl_mac *mac = rtl_mac(rtlpriv);
270 272
271 /* Determine the minimum RSSI */ 273 /* Determine the minimum RSSI */
272 if ((mac->link_state < MAC80211_LINKED) && 274 if ((mac->link_state < MAC80211_LINKED) &&
273 (rtlpriv->dm.UNDEC_SM_PWDB == 0)) { 275 (rtlpriv->dm.UNDEC_SM_PWDB == 0)) {
274 de_digtable.min_undecorated_pwdb_for_dm = 0; 276 de_digtable->min_undecorated_pwdb_for_dm = 0;
275 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, 277 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
276 "Not connected to any\n"); 278 "Not connected to any\n");
277 } 279 }
278 if (mac->link_state >= MAC80211_LINKED) { 280 if (mac->link_state >= MAC80211_LINKED) {
279 if (mac->opmode == NL80211_IFTYPE_AP || 281 if (mac->opmode == NL80211_IFTYPE_AP ||
280 mac->opmode == NL80211_IFTYPE_ADHOC) { 282 mac->opmode == NL80211_IFTYPE_ADHOC) {
281 de_digtable.min_undecorated_pwdb_for_dm = 283 de_digtable->min_undecorated_pwdb_for_dm =
282 rtlpriv->dm.UNDEC_SM_PWDB; 284 rtlpriv->dm.UNDEC_SM_PWDB;
283 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, 285 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
284 "AP Client PWDB = 0x%lx\n", 286 "AP Client PWDB = 0x%lx\n",
285 rtlpriv->dm.UNDEC_SM_PWDB); 287 rtlpriv->dm.UNDEC_SM_PWDB);
286 } else { 288 } else {
287 de_digtable.min_undecorated_pwdb_for_dm = 289 de_digtable->min_undecorated_pwdb_for_dm =
288 rtlpriv->dm.undecorated_smoothed_pwdb; 290 rtlpriv->dm.undecorated_smoothed_pwdb;
289 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, 291 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
290 "STA Default Port PWDB = 0x%x\n", 292 "STA Default Port PWDB = 0x%x\n",
291 de_digtable.min_undecorated_pwdb_for_dm); 293 de_digtable->min_undecorated_pwdb_for_dm);
292 } 294 }
293 } else { 295 } else {
294 de_digtable.min_undecorated_pwdb_for_dm = 296 de_digtable->min_undecorated_pwdb_for_dm =
295 rtlpriv->dm.UNDEC_SM_PWDB; 297 rtlpriv->dm.UNDEC_SM_PWDB;
296 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD, 298 RT_TRACE(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
297 "AP Ext Port or disconnect PWDB = 0x%x\n", 299 "AP Ext Port or disconnect PWDB = 0x%x\n",
298 de_digtable.min_undecorated_pwdb_for_dm); 300 de_digtable->min_undecorated_pwdb_for_dm);
299 } 301 }
300 302
301 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n", 303 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
302 de_digtable.min_undecorated_pwdb_for_dm); 304 de_digtable->min_undecorated_pwdb_for_dm);
303} 305}
304 306
305static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw) 307static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
306{ 308{
307 struct rtl_priv *rtlpriv = rtl_priv(hw); 309 struct rtl_priv *rtlpriv = rtl_priv(hw);
310 struct dig_t *de_digtable = &rtlpriv->dm_digtable;
308 unsigned long flag = 0; 311 unsigned long flag = 0;
309 312
310 if (de_digtable.cursta_connectctate == DIG_STA_CONNECT) { 313 if (de_digtable->cursta_connectctate == DIG_STA_CONNECT) {
311 if (de_digtable.pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) { 314 if (de_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
312 if (de_digtable.min_undecorated_pwdb_for_dm <= 25) 315 if (de_digtable->min_undecorated_pwdb_for_dm <= 25)
313 de_digtable.cur_cck_pd_state = 316 de_digtable->cur_cck_pd_state =
314 CCK_PD_STAGE_LOWRSSI; 317 CCK_PD_STAGE_LOWRSSI;
315 else 318 else
316 de_digtable.cur_cck_pd_state = 319 de_digtable->cur_cck_pd_state =
317 CCK_PD_STAGE_HIGHRSSI; 320 CCK_PD_STAGE_HIGHRSSI;
318 } else { 321 } else {
319 if (de_digtable.min_undecorated_pwdb_for_dm <= 20) 322 if (de_digtable->min_undecorated_pwdb_for_dm <= 20)
320 de_digtable.cur_cck_pd_state = 323 de_digtable->cur_cck_pd_state =
321 CCK_PD_STAGE_LOWRSSI; 324 CCK_PD_STAGE_LOWRSSI;
322 else 325 else
323 de_digtable.cur_cck_pd_state = 326 de_digtable->cur_cck_pd_state =
324 CCK_PD_STAGE_HIGHRSSI; 327 CCK_PD_STAGE_HIGHRSSI;
325 } 328 }
326 } else { 329 } else {
327 de_digtable.cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI; 330 de_digtable->cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
328 } 331 }
329 if (de_digtable.pre_cck_pd_state != de_digtable.cur_cck_pd_state) { 332 if (de_digtable->pre_cck_pd_state != de_digtable->cur_cck_pd_state) {
330 if (de_digtable.cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) { 333 if (de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
331 rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag); 334 rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
332 rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0x83); 335 rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0x83);
333 rtl92d_release_cckandrw_pagea_ctl(hw, &flag); 336 rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
@@ -336,13 +339,13 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
336 rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0xcd); 339 rtl_set_bbreg(hw, RCCK0_CCA, BMASKBYTE2, 0xcd);
337 rtl92d_release_cckandrw_pagea_ctl(hw, &flag); 340 rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
338 } 341 }
339 de_digtable.pre_cck_pd_state = de_digtable.cur_cck_pd_state; 342 de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state;
340 } 343 }
341 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n", 344 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n",
342 de_digtable.cursta_connectctate == DIG_STA_CONNECT ? 345 de_digtable->cursta_connectctate == DIG_STA_CONNECT ?
343 "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT"); 346 "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT");
344 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n", 347 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n",
345 de_digtable.cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ? 348 de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
346 "Low RSSI " : "High RSSI "); 349 "Low RSSI " : "High RSSI ");
347 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "is92d single phy =%x\n", 350 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "is92d single phy =%x\n",
348 IS_92D_SINGLEPHY(rtlpriv->rtlhal.version)); 351 IS_92D_SINGLEPHY(rtlpriv->rtlhal.version));
@@ -352,37 +355,40 @@ static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
352void rtl92d_dm_write_dig(struct ieee80211_hw *hw) 355void rtl92d_dm_write_dig(struct ieee80211_hw *hw)
353{ 356{
354 struct rtl_priv *rtlpriv = rtl_priv(hw); 357 struct rtl_priv *rtlpriv = rtl_priv(hw);
358 struct dig_t *de_digtable = &rtlpriv->dm_digtable;
355 359
356 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 360 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
357 "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n", 361 "cur_igvalue = 0x%x, pre_igvalue = 0x%x, backoff_val = %d\n",
358 de_digtable.cur_igvalue, de_digtable.pre_igvalue, 362 de_digtable->cur_igvalue, de_digtable->pre_igvalue,
359 de_digtable.backoff_val); 363 de_digtable->backoff_val);
360 if (de_digtable.dig_enable_flag == false) { 364 if (de_digtable->dig_enable_flag == false) {
361 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "DIG is disabled\n"); 365 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "DIG is disabled\n");
362 de_digtable.pre_igvalue = 0x17; 366 de_digtable->pre_igvalue = 0x17;
363 return; 367 return;
364 } 368 }
365 if (de_digtable.pre_igvalue != de_digtable.cur_igvalue) { 369 if (de_digtable->pre_igvalue != de_digtable->cur_igvalue) {
366 rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, 370 rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
367 de_digtable.cur_igvalue); 371 de_digtable->cur_igvalue);
368 rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f, 372 rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
369 de_digtable.cur_igvalue); 373 de_digtable->cur_igvalue);
370 de_digtable.pre_igvalue = de_digtable.cur_igvalue; 374 de_digtable->pre_igvalue = de_digtable->cur_igvalue;
371 } 375 }
372} 376}
373 377
374static void rtl92d_early_mode_enabled(struct rtl_priv *rtlpriv) 378static void rtl92d_early_mode_enabled(struct rtl_priv *rtlpriv)
375{ 379{
380 struct dig_t *de_digtable = &rtlpriv->dm_digtable;
381
376 if ((rtlpriv->mac80211.link_state >= MAC80211_LINKED) && 382 if ((rtlpriv->mac80211.link_state >= MAC80211_LINKED) &&
377 (rtlpriv->mac80211.vendor == PEER_CISCO)) { 383 (rtlpriv->mac80211.vendor == PEER_CISCO)) {
378 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "IOT_PEER = CISCO\n"); 384 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "IOT_PEER = CISCO\n");
379 if (de_digtable.last_min_undecorated_pwdb_for_dm >= 50 385 if (de_digtable->last_min_undecorated_pwdb_for_dm >= 50
380 && de_digtable.min_undecorated_pwdb_for_dm < 50) { 386 && de_digtable->min_undecorated_pwdb_for_dm < 50) {
381 rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x00); 387 rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x00);
382 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 388 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
383 "Early Mode Off\n"); 389 "Early Mode Off\n");
384 } else if (de_digtable.last_min_undecorated_pwdb_for_dm <= 55 && 390 } else if (de_digtable->last_min_undecorated_pwdb_for_dm <= 55 &&
385 de_digtable.min_undecorated_pwdb_for_dm > 55) { 391 de_digtable->min_undecorated_pwdb_for_dm > 55) {
386 rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f); 392 rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
387 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 393 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
388 "Early Mode On\n"); 394 "Early Mode On\n");
@@ -396,14 +402,15 @@ static void rtl92d_early_mode_enabled(struct rtl_priv *rtlpriv)
396static void rtl92d_dm_dig(struct ieee80211_hw *hw) 402static void rtl92d_dm_dig(struct ieee80211_hw *hw)
397{ 403{
398 struct rtl_priv *rtlpriv = rtl_priv(hw); 404 struct rtl_priv *rtlpriv = rtl_priv(hw);
399 u8 value_igi = de_digtable.cur_igvalue; 405 struct dig_t *de_digtable = &rtlpriv->dm_digtable;
406 u8 value_igi = de_digtable->cur_igvalue;
400 struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt); 407 struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
401 408
402 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "==>\n"); 409 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "==>\n");
403 if (rtlpriv->rtlhal.earlymode_enable) { 410 if (rtlpriv->rtlhal.earlymode_enable) {
404 rtl92d_early_mode_enabled(rtlpriv); 411 rtl92d_early_mode_enabled(rtlpriv);
405 de_digtable.last_min_undecorated_pwdb_for_dm = 412 de_digtable->last_min_undecorated_pwdb_for_dm =
406 de_digtable.min_undecorated_pwdb_for_dm; 413 de_digtable->min_undecorated_pwdb_for_dm;
407 } 414 }
408 if (!rtlpriv->dm.dm_initialgain_enable) 415 if (!rtlpriv->dm.dm_initialgain_enable)
409 return; 416 return;
@@ -421,9 +428,9 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
421 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n"); 428 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n");
422 /* Decide the current status and if modify initial gain or not */ 429 /* Decide the current status and if modify initial gain or not */
423 if (rtlpriv->mac80211.link_state >= MAC80211_LINKED) 430 if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
424 de_digtable.cursta_connectctate = DIG_STA_CONNECT; 431 de_digtable->cursta_connectctate = DIG_STA_CONNECT;
425 else 432 else
426 de_digtable.cursta_connectctate = DIG_STA_DISCONNECT; 433 de_digtable->cursta_connectctate = DIG_STA_DISCONNECT;
427 434
428 /* adjust initial gain according to false alarm counter */ 435 /* adjust initial gain according to false alarm counter */
429 if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0) 436 if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0)
@@ -436,64 +443,64 @@ static void rtl92d_dm_dig(struct ieee80211_hw *hw)
436 value_igi += 2; 443 value_igi += 2;
437 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 444 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
438 "dm_DIG() Before: large_fa_hit=%d, forbidden_igi=%x\n", 445 "dm_DIG() Before: large_fa_hit=%d, forbidden_igi=%x\n",
439 de_digtable.large_fa_hit, de_digtable.forbidden_igi); 446 de_digtable->large_fa_hit, de_digtable->forbidden_igi);
440 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 447 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
441 "dm_DIG() Before: Recover_cnt=%d, rx_gain_range_min=%x\n", 448 "dm_DIG() Before: Recover_cnt=%d, rx_gain_range_min=%x\n",
442 de_digtable.recover_cnt, de_digtable.rx_gain_range_min); 449 de_digtable->recover_cnt, de_digtable->rx_gain_range_min);
443 450
444 /* deal with abnorally large false alarm */ 451 /* deal with abnorally large false alarm */
445 if (falsealm_cnt->cnt_all > 10000) { 452 if (falsealm_cnt->cnt_all > 10000) {
446 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 453 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
447 "dm_DIG(): Abnormally false alarm case\n"); 454 "dm_DIG(): Abnormally false alarm case\n");
448 455
449 de_digtable.large_fa_hit++; 456 de_digtable->large_fa_hit++;
450 if (de_digtable.forbidden_igi < de_digtable.cur_igvalue) { 457 if (de_digtable->forbidden_igi < de_digtable->cur_igvalue) {
451 de_digtable.forbidden_igi = de_digtable.cur_igvalue; 458 de_digtable->forbidden_igi = de_digtable->cur_igvalue;
452 de_digtable.large_fa_hit = 1; 459 de_digtable->large_fa_hit = 1;
453 } 460 }
454 if (de_digtable.large_fa_hit >= 3) { 461 if (de_digtable->large_fa_hit >= 3) {
455 if ((de_digtable.forbidden_igi + 1) > DM_DIG_MAX) 462 if ((de_digtable->forbidden_igi + 1) > DM_DIG_MAX)
456 de_digtable.rx_gain_range_min = DM_DIG_MAX; 463 de_digtable->rx_gain_range_min = DM_DIG_MAX;
457 else 464 else
458 de_digtable.rx_gain_range_min = 465 de_digtable->rx_gain_range_min =
459 (de_digtable.forbidden_igi + 1); 466 (de_digtable->forbidden_igi + 1);
460 de_digtable.recover_cnt = 3600; /* 3600=2hr */ 467 de_digtable->recover_cnt = 3600; /* 3600=2hr */
461 } 468 }
462 } else { 469 } else {
463 /* Recovery mechanism for IGI lower bound */ 470 /* Recovery mechanism for IGI lower bound */
464 if (de_digtable.recover_cnt != 0) { 471 if (de_digtable->recover_cnt != 0) {
465 de_digtable.recover_cnt--; 472 de_digtable->recover_cnt--;
466 } else { 473 } else {
467 if (de_digtable.large_fa_hit == 0) { 474 if (de_digtable->large_fa_hit == 0) {
468 if ((de_digtable.forbidden_igi - 1) < 475 if ((de_digtable->forbidden_igi - 1) <
469 DM_DIG_FA_LOWER) { 476 DM_DIG_FA_LOWER) {
470 de_digtable.forbidden_igi = 477 de_digtable->forbidden_igi =
471 DM_DIG_FA_LOWER; 478 DM_DIG_FA_LOWER;
472 de_digtable.rx_gain_range_min = 479 de_digtable->rx_gain_range_min =
473 DM_DIG_FA_LOWER; 480 DM_DIG_FA_LOWER;
474 481
475 } else { 482 } else {
476 de_digtable.forbidden_igi--; 483 de_digtable->forbidden_igi--;
477 de_digtable.rx_gain_range_min = 484 de_digtable->rx_gain_range_min =
478 (de_digtable.forbidden_igi + 1); 485 (de_digtable->forbidden_igi + 1);
479 } 486 }
480 } else if (de_digtable.large_fa_hit == 3) { 487 } else if (de_digtable->large_fa_hit == 3) {
481 de_digtable.large_fa_hit = 0; 488 de_digtable->large_fa_hit = 0;
482 } 489 }
483 } 490 }
484 } 491 }
485 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 492 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
486 "dm_DIG() After: large_fa_hit=%d, forbidden_igi=%x\n", 493 "dm_DIG() After: large_fa_hit=%d, forbidden_igi=%x\n",
487 de_digtable.large_fa_hit, de_digtable.forbidden_igi); 494 de_digtable->large_fa_hit, de_digtable->forbidden_igi);
488 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD, 495 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
489 "dm_DIG() After: recover_cnt=%d, rx_gain_range_min=%x\n", 496 "dm_DIG() After: recover_cnt=%d, rx_gain_range_min=%x\n",
490 de_digtable.recover_cnt, de_digtable.rx_gain_range_min); 497 de_digtable->recover_cnt, de_digtable->rx_gain_range_min);
491 498
492 if (value_igi > DM_DIG_MAX) 499 if (value_igi > DM_DIG_MAX)
493 value_igi = DM_DIG_MAX; 500 value_igi = DM_DIG_MAX;
494 else if (value_igi < de_digtable.rx_gain_range_min) 501 else if (value_igi < de_digtable->rx_gain_range_min)
495 value_igi = de_digtable.rx_gain_range_min; 502 value_igi = de_digtable->rx_gain_range_min;
496 de_digtable.cur_igvalue = value_igi; 503 de_digtable->cur_igvalue = value_igi;
497 rtl92d_dm_write_dig(hw); 504 rtl92d_dm_write_dig(hw);
498 if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) 505 if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G)
499 rtl92d_dm_cck_packet_detection_thresh(hw); 506 rtl92d_dm_cck_packet_detection_thresh(hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/dm.h b/drivers/net/wireless/rtlwifi/rtl8192de/dm.h
index 91030ec8ac3e..3fea0c11c24a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/dm.h
@@ -87,55 +87,6 @@
87#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67 87#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
88#define INDEX_MAPPING_NUM 13 88#define INDEX_MAPPING_NUM 13
89 89
90struct ps_t {
91 u8 pre_ccastate;
92 u8 cur_ccasate;
93
94 u8 pre_rfstate;
95 u8 cur_rfstate;
96
97 long rssi_val_min;
98};
99
100struct dig_t {
101 u8 dig_enable_flag;
102 u8 dig_ext_port_stage;
103
104 u32 rssi_lowthresh;
105 u32 rssi_highthresh;
106
107 u32 fa_lowthresh;
108 u32 fa_highthresh;
109
110 u8 cursta_connectctate;
111 u8 presta_connectstate;
112 u8 curmultista_connectstate;
113
114 u8 pre_igvalue;
115 u8 cur_igvalue;
116
117 char backoff_val;
118 char backoff_val_range_max;
119 char backoff_val_range_min;
120 u8 rx_gain_range_max;
121 u8 rx_gain_range_min;
122 u8 min_undecorated_pwdb_for_dm;
123 long last_min_undecorated_pwdb_for_dm;
124
125 u8 pre_cck_pd_state;
126 u8 cur_cck_pd_state;
127
128 u8 pre_cck_fa_state;
129 u8 cur_cck_fa_state;
130
131 u8 pre_ccastate;
132 u8 cur_ccasate;
133
134 u8 large_fa_hit;
135 u8 forbidden_igi;
136 u32 recover_cnt;
137};
138
139struct swat { 90struct swat {
140 u8 failure_cnt; 91 u8 failure_cnt;
141 u8 try_flag; 92 u8 try_flag;
@@ -200,8 +151,6 @@ enum dm_dig_connect {
200 DIG_CONNECT_MAX 151 DIG_CONNECT_MAX
201}; 152};
202 153
203extern struct dig_t de_digtable;
204
205void rtl92d_dm_init(struct ieee80211_hw *hw); 154void rtl92d_dm_init(struct ieee80211_hw *hw);
206void rtl92d_dm_watchdog(struct ieee80211_hw *hw); 155void rtl92d_dm_watchdog(struct ieee80211_hw *hw);
207void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw); 156void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index 509f5af38adf..b338d526c422 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -1743,9 +1743,13 @@ static void _rtl92de_efuse_update_chip_version(struct ieee80211_hw *hw)
1743 chipver |= CHIP_92D_D_CUT; 1743 chipver |= CHIP_92D_D_CUT;
1744 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "D-CUT!!!\n"); 1744 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "D-CUT!!!\n");
1745 break; 1745 break;
1746 case 0xCC33:
1747 chipver |= CHIP_92D_E_CUT;
1748 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "E-CUT!!!\n");
1749 break;
1746 default: 1750 default:
1747 chipver |= CHIP_92D_D_CUT; 1751 chipver |= CHIP_92D_D_CUT;
1748 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Unkown CUT!\n"); 1752 RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Unknown CUT!\n");
1749 break; 1753 break;
1750 } 1754 }
1751 rtlpriv->rtlhal.version = chipver; 1755 rtlpriv->rtlhal.version = chipver;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
index 28fc5fb8057b..18380a7829f1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
@@ -3064,6 +3064,7 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw)
3064static void rtl92d_phy_set_io(struct ieee80211_hw *hw) 3064static void rtl92d_phy_set_io(struct ieee80211_hw *hw)
3065{ 3065{
3066 struct rtl_priv *rtlpriv = rtl_priv(hw); 3066 struct rtl_priv *rtlpriv = rtl_priv(hw);
3067 struct dig_t *de_digtable = &rtlpriv->dm_digtable;
3067 struct rtl_phy *rtlphy = &(rtlpriv->phy); 3068 struct rtl_phy *rtlphy = &(rtlpriv->phy);
3068 3069
3069 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, 3070 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
@@ -3071,13 +3072,13 @@ static void rtl92d_phy_set_io(struct ieee80211_hw *hw)
3071 rtlphy->current_io_type, rtlphy->set_io_inprogress); 3072 rtlphy->current_io_type, rtlphy->set_io_inprogress);
3072 switch (rtlphy->current_io_type) { 3073 switch (rtlphy->current_io_type) {
3073 case IO_CMD_RESUME_DM_BY_SCAN: 3074 case IO_CMD_RESUME_DM_BY_SCAN:
3074 de_digtable.cur_igvalue = rtlphy->initgain_backup.xaagccore1; 3075 de_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1;
3075 rtl92d_dm_write_dig(hw); 3076 rtl92d_dm_write_dig(hw);
3076 rtl92d_phy_set_txpower_level(hw, rtlphy->current_channel); 3077 rtl92d_phy_set_txpower_level(hw, rtlphy->current_channel);
3077 break; 3078 break;
3078 case IO_CMD_PAUSE_DM_BY_SCAN: 3079 case IO_CMD_PAUSE_DM_BY_SCAN:
3079 rtlphy->initgain_backup.xaagccore1 = de_digtable.cur_igvalue; 3080 rtlphy->initgain_backup.xaagccore1 = de_digtable->cur_igvalue;
3080 de_digtable.cur_igvalue = 0x37; 3081 de_digtable->cur_igvalue = 0x37;
3081 rtl92d_dm_write_dig(hw); 3082 rtl92d_dm_write_dig(hw);
3082 break; 3083 break;
3083 default: 3084 default:
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
index 0dc736c2723b..057a52431b00 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/trx.h
@@ -530,12 +530,8 @@
530 SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val) 530 SET_BITS_OFFSET_LE(__pdesc+28, 0, 32, __val)
531 531
532#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \ 532#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
533do { \ 533 memset((void *)__pdesc, 0, \
534 if (_size > TX_DESC_NEXT_DESC_OFFSET) \ 534 min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET))
535 memset((void *)__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \
536 else \
537 memset((void *)__pdesc, 0, _size); \
538} while (0);
539 535
540/* For 92D early mode */ 536/* For 92D early mode */
541#define SET_EARLYMODE_PKTNUM(__paddr, __value) \ 537#define SET_EARLYMODE_PKTNUM(__paddr, __value) \
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
index d1b0a1e14971..20afec62ce05 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
@@ -252,12 +252,7 @@
252 * the desc is cleared. */ 252 * the desc is cleared. */
253#define TX_DESC_NEXT_DESC_OFFSET 36 253#define TX_DESC_NEXT_DESC_OFFSET 36
254#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \ 254#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
255do { \ 255 memset(__pdesc, 0, min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET))
256 if (_size > TX_DESC_NEXT_DESC_OFFSET) \
257 memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \
258 else \
259 memset(__pdesc, 0, _size); \
260} while (0);
261 256
262/* Rx Desc */ 257/* Rx Desc */
263#define RX_STATUS_DESC_SIZE 24 258#define RX_STATUS_DESC_SIZE 24
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
index fbabae17259e..2e1158026fb7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.c
@@ -35,7 +35,6 @@
35#include "dm.h" 35#include "dm.h"
36#include "fw.h" 36#include "fw.h"
37 37
38struct dig_t digtable;
39static const u32 edca_setting_dl[PEER_MAX] = { 38static const u32 edca_setting_dl[PEER_MAX] = {
40 0xa44f, /* 0 UNKNOWN */ 39 0xa44f, /* 0 UNKNOWN */
41 0x5ea44f, /* 1 REALTEK_90 */ 40 0x5ea44f, /* 1 REALTEK_90 */
@@ -421,62 +420,64 @@ static void _rtl92s_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
421static void rtl92s_backoff_enable_flag(struct ieee80211_hw *hw) 420static void rtl92s_backoff_enable_flag(struct ieee80211_hw *hw)
422{ 421{
423 struct rtl_priv *rtlpriv = rtl_priv(hw); 422 struct rtl_priv *rtlpriv = rtl_priv(hw);
423 struct dig_t *digtable = &rtlpriv->dm_digtable;
424 struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt); 424 struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
425 425
426 if (falsealm_cnt->cnt_all > digtable.fa_highthresh) { 426 if (falsealm_cnt->cnt_all > digtable->fa_highthresh) {
427 if ((digtable.backoff_val - 6) < 427 if ((digtable->backoff_val - 6) <
428 digtable.backoffval_range_min) 428 digtable->backoffval_range_min)
429 digtable.backoff_val = digtable.backoffval_range_min; 429 digtable->backoff_val = digtable->backoffval_range_min;
430 else 430 else
431 digtable.backoff_val -= 6; 431 digtable->backoff_val -= 6;
432 } else if (falsealm_cnt->cnt_all < digtable.fa_lowthresh) { 432 } else if (falsealm_cnt->cnt_all < digtable->fa_lowthresh) {
433 if ((digtable.backoff_val + 6) > 433 if ((digtable->backoff_val + 6) >
434 digtable.backoffval_range_max) 434 digtable->backoffval_range_max)
435 digtable.backoff_val = 435 digtable->backoff_val =
436 digtable.backoffval_range_max; 436 digtable->backoffval_range_max;
437 else 437 else
438 digtable.backoff_val += 6; 438 digtable->backoff_val += 6;
439 } 439 }
440} 440}
441 441
442static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw) 442static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw)
443{ 443{
444 struct rtl_priv *rtlpriv = rtl_priv(hw); 444 struct rtl_priv *rtlpriv = rtl_priv(hw);
445 struct dig_t *digtable = &rtlpriv->dm_digtable;
445 struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt); 446 struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
446 static u8 initialized, force_write; 447 static u8 initialized, force_write;
447 u8 initial_gain = 0; 448 u8 initial_gain = 0;
448 449
449 if ((digtable.pre_sta_connectstate == digtable.cur_sta_connectstate) || 450 if ((digtable->pre_sta_connectstate == digtable->cur_sta_connectstate) ||
450 (digtable.cur_sta_connectstate == DIG_STA_BEFORE_CONNECT)) { 451 (digtable->cur_sta_connectstate == DIG_STA_BEFORE_CONNECT)) {
451 if (digtable.cur_sta_connectstate == DIG_STA_BEFORE_CONNECT) { 452 if (digtable->cur_sta_connectstate == DIG_STA_BEFORE_CONNECT) {
452 if (rtlpriv->psc.rfpwr_state != ERFON) 453 if (rtlpriv->psc.rfpwr_state != ERFON)
453 return; 454 return;
454 455
455 if (digtable.backoff_enable_flag) 456 if (digtable->backoff_enable_flag)
456 rtl92s_backoff_enable_flag(hw); 457 rtl92s_backoff_enable_flag(hw);
457 else 458 else
458 digtable.backoff_val = DM_DIG_BACKOFF; 459 digtable->backoff_val = DM_DIG_BACKOFF;
459 460
460 if ((digtable.rssi_val + 10 - digtable.backoff_val) > 461 if ((digtable->rssi_val + 10 - digtable->backoff_val) >
461 digtable.rx_gain_range_max) 462 digtable->rx_gain_range_max)
462 digtable.cur_igvalue = 463 digtable->cur_igvalue =
463 digtable.rx_gain_range_max; 464 digtable->rx_gain_range_max;
464 else if ((digtable.rssi_val + 10 - digtable.backoff_val) 465 else if ((digtable->rssi_val + 10 - digtable->backoff_val)
465 < digtable.rx_gain_range_min) 466 < digtable->rx_gain_range_min)
466 digtable.cur_igvalue = 467 digtable->cur_igvalue =
467 digtable.rx_gain_range_min; 468 digtable->rx_gain_range_min;
468 else 469 else
469 digtable.cur_igvalue = digtable.rssi_val + 10 - 470 digtable->cur_igvalue = digtable->rssi_val + 10 -
470 digtable.backoff_val; 471 digtable->backoff_val;
471 472
472 if (falsealm_cnt->cnt_all > 10000) 473 if (falsealm_cnt->cnt_all > 10000)
473 digtable.cur_igvalue = 474 digtable->cur_igvalue =
474 (digtable.cur_igvalue > 0x33) ? 475 (digtable->cur_igvalue > 0x33) ?
475 digtable.cur_igvalue : 0x33; 476 digtable->cur_igvalue : 0x33;
476 477
477 if (falsealm_cnt->cnt_all > 16000) 478 if (falsealm_cnt->cnt_all > 16000)
478 digtable.cur_igvalue = 479 digtable->cur_igvalue =
479 digtable.rx_gain_range_max; 480 digtable->rx_gain_range_max;
480 /* connected -> connected or disconnected -> disconnected */ 481 /* connected -> connected or disconnected -> disconnected */
481 } else { 482 } else {
482 /* Firmware control DIG, do nothing in driver dm */ 483 /* Firmware control DIG, do nothing in driver dm */
@@ -486,31 +487,31 @@ static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw)
486 * disconnected or beforeconnect->(dis)connected */ 487 * disconnected or beforeconnect->(dis)connected */
487 } else { 488 } else {
488 /* Enable FW DIG */ 489 /* Enable FW DIG */
489 digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; 490 digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
490 rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_ENABLE); 491 rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_ENABLE);
491 492
492 digtable.backoff_val = DM_DIG_BACKOFF; 493 digtable->backoff_val = DM_DIG_BACKOFF;
493 digtable.cur_igvalue = rtlpriv->phy.default_initialgain[0]; 494 digtable->cur_igvalue = rtlpriv->phy.default_initialgain[0];
494 digtable.pre_igvalue = 0; 495 digtable->pre_igvalue = 0;
495 return; 496 return;
496 } 497 }
497 498
498 /* Forced writing to prevent from fw-dig overwriting. */ 499 /* Forced writing to prevent from fw-dig overwriting. */
499 if (digtable.pre_igvalue != rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, 500 if (digtable->pre_igvalue != rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1,
500 MASKBYTE0)) 501 MASKBYTE0))
501 force_write = 1; 502 force_write = 1;
502 503
503 if ((digtable.pre_igvalue != digtable.cur_igvalue) || 504 if ((digtable->pre_igvalue != digtable->cur_igvalue) ||
504 !initialized || force_write) { 505 !initialized || force_write) {
505 /* Disable FW DIG */ 506 /* Disable FW DIG */
506 rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_DISABLE); 507 rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_DISABLE);
507 508
508 initial_gain = (u8)digtable.cur_igvalue; 509 initial_gain = (u8)digtable->cur_igvalue;
509 510
510 /* Set initial gain. */ 511 /* Set initial gain. */
511 rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0, initial_gain); 512 rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0, initial_gain);
512 rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0, initial_gain); 513 rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0, initial_gain);
513 digtable.pre_igvalue = digtable.cur_igvalue; 514 digtable->pre_igvalue = digtable->cur_igvalue;
514 initialized = 1; 515 initialized = 1;
515 force_write = 0; 516 force_write = 0;
516 } 517 }
@@ -519,6 +520,7 @@ static void _rtl92s_dm_initial_gain_sta_beforeconnect(struct ieee80211_hw *hw)
519static void _rtl92s_dm_ctrl_initgain_bytwoport(struct ieee80211_hw *hw) 520static void _rtl92s_dm_ctrl_initgain_bytwoport(struct ieee80211_hw *hw)
520{ 521{
521 struct rtl_priv *rtlpriv = rtl_priv(hw); 522 struct rtl_priv *rtlpriv = rtl_priv(hw);
523 struct dig_t *digtable = &rtlpriv->dm_digtable;
522 524
523 if (rtlpriv->mac80211.act_scanning) 525 if (rtlpriv->mac80211.act_scanning)
524 return; 526 return;
@@ -526,17 +528,17 @@ static void _rtl92s_dm_ctrl_initgain_bytwoport(struct ieee80211_hw *hw)
526 /* Decide the current status and if modify initial gain or not */ 528 /* Decide the current status and if modify initial gain or not */
527 if (rtlpriv->mac80211.link_state >= MAC80211_LINKED || 529 if (rtlpriv->mac80211.link_state >= MAC80211_LINKED ||
528 rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC) 530 rtlpriv->mac80211.opmode == NL80211_IFTYPE_ADHOC)
529 digtable.cur_sta_connectstate = DIG_STA_CONNECT; 531 digtable->cur_sta_connectstate = DIG_STA_CONNECT;
530 else 532 else
531 digtable.cur_sta_connectstate = DIG_STA_DISCONNECT; 533 digtable->cur_sta_connectstate = DIG_STA_DISCONNECT;
532 534
533 digtable.rssi_val = rtlpriv->dm.undecorated_smoothed_pwdb; 535 digtable->rssi_val = rtlpriv->dm.undecorated_smoothed_pwdb;
534 536
535 /* Change dig mode to rssi */ 537 /* Change dig mode to rssi */
536 if (digtable.cur_sta_connectstate != DIG_STA_DISCONNECT) { 538 if (digtable->cur_sta_connectstate != DIG_STA_DISCONNECT) {
537 if (digtable.dig_twoport_algorithm == 539 if (digtable->dig_twoport_algorithm ==
538 DIG_TWO_PORT_ALGO_FALSE_ALARM) { 540 DIG_TWO_PORT_ALGO_FALSE_ALARM) {
539 digtable.dig_twoport_algorithm = DIG_TWO_PORT_ALGO_RSSI; 541 digtable->dig_twoport_algorithm = DIG_TWO_PORT_ALGO_RSSI;
540 rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_MODE_SS); 542 rtl92s_phy_set_fw_cmd(hw, FW_CMD_DIG_MODE_SS);
541 } 543 }
542 } 544 }
@@ -544,13 +546,14 @@ static void _rtl92s_dm_ctrl_initgain_bytwoport(struct ieee80211_hw *hw)
544 _rtl92s_dm_false_alarm_counter_statistics(hw); 546 _rtl92s_dm_false_alarm_counter_statistics(hw);
545 _rtl92s_dm_initial_gain_sta_beforeconnect(hw); 547 _rtl92s_dm_initial_gain_sta_beforeconnect(hw);
546 548
547 digtable.pre_sta_connectstate = digtable.cur_sta_connectstate; 549 digtable->pre_sta_connectstate = digtable->cur_sta_connectstate;
548} 550}
549 551
550static void _rtl92s_dm_ctrl_initgain_byrssi(struct ieee80211_hw *hw) 552static void _rtl92s_dm_ctrl_initgain_byrssi(struct ieee80211_hw *hw)
551{ 553{
552 struct rtl_priv *rtlpriv = rtl_priv(hw); 554 struct rtl_priv *rtlpriv = rtl_priv(hw);
553 struct rtl_phy *rtlphy = &(rtlpriv->phy); 555 struct rtl_phy *rtlphy = &(rtlpriv->phy);
556 struct dig_t *digtable = &rtlpriv->dm_digtable;
554 557
555 /* 2T2R TP issue */ 558 /* 2T2R TP issue */
556 if (rtlphy->rf_type == RF_2T2R) 559 if (rtlphy->rf_type == RF_2T2R)
@@ -559,7 +562,7 @@ static void _rtl92s_dm_ctrl_initgain_byrssi(struct ieee80211_hw *hw)
559 if (!rtlpriv->dm.dm_initialgain_enable) 562 if (!rtlpriv->dm.dm_initialgain_enable)
560 return; 563 return;
561 564
562 if (digtable.dig_enable_flag == false) 565 if (digtable->dig_enable_flag == false)
563 return; 566 return;
564 567
565 _rtl92s_dm_ctrl_initgain_bytwoport(hw); 568 _rtl92s_dm_ctrl_initgain_bytwoport(hw);
@@ -639,51 +642,52 @@ static void _rtl92s_dm_dynamic_txpower(struct ieee80211_hw *hw)
639static void _rtl92s_dm_init_dig(struct ieee80211_hw *hw) 642static void _rtl92s_dm_init_dig(struct ieee80211_hw *hw)
640{ 643{
641 struct rtl_priv *rtlpriv = rtl_priv(hw); 644 struct rtl_priv *rtlpriv = rtl_priv(hw);
645 struct dig_t *digtable = &rtlpriv->dm_digtable;
642 646
643 /* Disable DIG scheme now.*/ 647 /* Disable DIG scheme now.*/
644 digtable.dig_enable_flag = true; 648 digtable->dig_enable_flag = true;
645 digtable.backoff_enable_flag = true; 649 digtable->backoff_enable_flag = true;
646 650
647 if ((rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER) && 651 if ((rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER) &&
648 (hal_get_firmwareversion(rtlpriv) >= 0x3c)) 652 (hal_get_firmwareversion(rtlpriv) >= 0x3c))
649 digtable.dig_algorithm = DIG_ALGO_BY_TOW_PORT; 653 digtable->dig_algorithm = DIG_ALGO_BY_TOW_PORT;
650 else 654 else
651 digtable.dig_algorithm = 655 digtable->dig_algorithm =
652 DIG_ALGO_BEFORE_CONNECT_BY_RSSI_AND_ALARM; 656 DIG_ALGO_BEFORE_CONNECT_BY_RSSI_AND_ALARM;
653 657
654 digtable.dig_twoport_algorithm = DIG_TWO_PORT_ALGO_RSSI; 658 digtable->dig_twoport_algorithm = DIG_TWO_PORT_ALGO_RSSI;
655 digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX; 659 digtable->dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
656 /* off=by real rssi value, on=by digtable.rssi_val for new dig */ 660 /* off=by real rssi value, on=by digtable->rssi_val for new dig */
657 digtable.dig_dbgmode = DM_DBG_OFF; 661 digtable->dig_dbgmode = DM_DBG_OFF;
658 digtable.dig_slgorithm_switch = 0; 662 digtable->dig_slgorithm_switch = 0;
659 663
660 /* 2007/10/04 MH Define init gain threshol. */ 664 /* 2007/10/04 MH Define init gain threshol. */
661 digtable.dig_state = DM_STA_DIG_MAX; 665 digtable->dig_state = DM_STA_DIG_MAX;
662 digtable.dig_highpwrstate = DM_STA_DIG_MAX; 666 digtable->dig_highpwrstate = DM_STA_DIG_MAX;
663 667
664 digtable.cur_sta_connectstate = DIG_STA_DISCONNECT; 668 digtable->cur_sta_connectstate = DIG_STA_DISCONNECT;
665 digtable.pre_sta_connectstate = DIG_STA_DISCONNECT; 669 digtable->pre_sta_connectstate = DIG_STA_DISCONNECT;
666 digtable.cur_ap_connectstate = DIG_AP_DISCONNECT; 670 digtable->cur_ap_connectstate = DIG_AP_DISCONNECT;
667 digtable.pre_ap_connectstate = DIG_AP_DISCONNECT; 671 digtable->pre_ap_connectstate = DIG_AP_DISCONNECT;
668 672
669 digtable.rssi_lowthresh = DM_DIG_THRESH_LOW; 673 digtable->rssi_lowthresh = DM_DIG_THRESH_LOW;
670 digtable.rssi_highthresh = DM_DIG_THRESH_HIGH; 674 digtable->rssi_highthresh = DM_DIG_THRESH_HIGH;
671 675
672 digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW; 676 digtable->fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
673 digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH; 677 digtable->fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
674 678
675 digtable.rssi_highpower_lowthresh = DM_DIG_HIGH_PWR_THRESH_LOW; 679 digtable->rssi_highpower_lowthresh = DM_DIG_HIGH_PWR_THRESH_LOW;
676 digtable.rssi_highpower_highthresh = DM_DIG_HIGH_PWR_THRESH_HIGH; 680 digtable->rssi_highpower_highthresh = DM_DIG_HIGH_PWR_THRESH_HIGH;
677 681
678 /* for dig debug rssi value */ 682 /* for dig debug rssi value */
679 digtable.rssi_val = 50; 683 digtable->rssi_val = 50;
680 digtable.backoff_val = DM_DIG_BACKOFF; 684 digtable->backoff_val = DM_DIG_BACKOFF;
681 digtable.rx_gain_range_max = DM_DIG_MAX; 685 digtable->rx_gain_range_max = DM_DIG_MAX;
682 686
683 digtable.rx_gain_range_min = DM_DIG_MIN; 687 digtable->rx_gain_range_min = DM_DIG_MIN;
684 688
685 digtable.backoffval_range_max = DM_DIG_BACKOFF_MAX; 689 digtable->backoffval_range_max = DM_DIG_BACKOFF_MAX;
686 digtable.backoffval_range_min = DM_DIG_BACKOFF_MIN; 690 digtable->backoffval_range_min = DM_DIG_BACKOFF_MIN;
687} 691}
688 692
689static void _rtl92s_dm_init_dynamic_txpower(struct ieee80211_hw *hw) 693static void _rtl92s_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/dm.h b/drivers/net/wireless/rtlwifi/rtl8192se/dm.h
index e1b19a641765..2e9052c8fe4b 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/dm.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/dm.h
@@ -29,48 +29,6 @@
29#ifndef __RTL_92S_DM_H__ 29#ifndef __RTL_92S_DM_H__
30#define __RTL_92S_DM_H__ 30#define __RTL_92S_DM_H__
31 31
32struct dig_t {
33 u8 dig_enable_flag;
34 u8 dig_algorithm;
35 u8 dig_twoport_algorithm;
36 u8 dig_ext_port_stage;
37 u8 dig_dbgmode;
38 u8 dig_slgorithm_switch;
39
40 long rssi_lowthresh;
41 long rssi_highthresh;
42
43 u32 fa_lowthresh;
44 u32 fa_highthresh;
45
46 long rssi_highpower_lowthresh;
47 long rssi_highpower_highthresh;
48
49 u8 dig_state;
50 u8 dig_highpwrstate;
51 u8 cur_sta_connectstate;
52 u8 pre_sta_connectstate;
53 u8 cur_ap_connectstate;
54 u8 pre_ap_connectstate;
55
56 u8 cur_pd_thstate;
57 u8 pre_pd_thstate;
58 u8 cur_cs_ratiostate;
59 u8 pre_cs_ratiostate;
60
61 u32 pre_igvalue;
62 u32 cur_igvalue;
63
64 u8 backoff_enable_flag;
65 char backoff_val;
66 char backoffval_range_max;
67 char backoffval_range_min;
68 u8 rx_gain_range_max;
69 u8 rx_gain_range_min;
70
71 long rssi_val;
72};
73
74enum dm_dig_alg { 32enum dm_dig_alg {
75 DIG_ALGO_BY_FALSE_ALARM = 0, 33 DIG_ALGO_BY_FALSE_ALARM = 0,
76 DIG_ALGO_BY_RSSI = 1, 34 DIG_ALGO_BY_RSSI = 1,
@@ -154,8 +112,6 @@ enum dm_ratr_sta {
154#define DM_DIG_BACKOFF_MAX 12 112#define DM_DIG_BACKOFF_MAX 12
155#define DM_DIG_BACKOFF_MIN -4 113#define DM_DIG_BACKOFF_MIN -4
156 114
157extern struct dig_t digtable;
158
159void rtl92s_dm_watchdog(struct ieee80211_hw *hw); 115void rtl92s_dm_watchdog(struct ieee80211_hw *hw);
160void rtl92s_dm_init(struct ieee80211_hw *hw); 116void rtl92s_dm_init(struct ieee80211_hw *hw);
161void rtl92s_dm_init_edca_turbo(struct ieee80211_hw *hw); 117void rtl92s_dm_init_edca_turbo(struct ieee80211_hw *hw);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.h b/drivers/net/wireless/rtlwifi/rtl8192se/fw.h
index b4afff626437..d53f4332464d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/fw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/fw.h
@@ -345,7 +345,7 @@ enum fw_h2c_cmd {
345 do { \ 345 do { \
346 udelay(1000); \ 346 udelay(1000); \
347 rtlpriv->rtlhal.fwcmd_iomap &= (~_Bit); \ 347 rtlpriv->rtlhal.fwcmd_iomap &= (~_Bit); \
348 } while (0); 348 } while (0)
349 349
350#define FW_CMD_IO_UPDATE(rtlpriv, _val) \ 350#define FW_CMD_IO_UPDATE(rtlpriv, _val) \
351 rtlpriv->rtlhal.fwcmd_iomap = _val; 351 rtlpriv->rtlhal.fwcmd_iomap = _val;
@@ -354,13 +354,13 @@ enum fw_h2c_cmd {
354 do { \ 354 do { \
355 rtl_write_word(rtlpriv, LBUS_MON_ADDR, (u16)_val); \ 355 rtl_write_word(rtlpriv, LBUS_MON_ADDR, (u16)_val); \
356 FW_CMD_IO_UPDATE(rtlpriv, _val); \ 356 FW_CMD_IO_UPDATE(rtlpriv, _val); \
357 } while (0); 357 } while (0)
358 358
359#define FW_CMD_PARA_SET(rtlpriv, _val) \ 359#define FW_CMD_PARA_SET(rtlpriv, _val) \
360 do { \ 360 do { \
361 rtl_write_dword(rtlpriv, LBUS_ADDR_MASK, _val); \ 361 rtl_write_dword(rtlpriv, LBUS_ADDR_MASK, _val); \
362 rtlpriv->rtlhal.fwcmd_ioparam = _val; \ 362 rtlpriv->rtlhal.fwcmd_ioparam = _val; \
363 } while (0); 363 } while (0)
364 364
365#define FW_CMD_IO_QUERY(rtlpriv) \ 365#define FW_CMD_IO_QUERY(rtlpriv) \
366 (u16)(rtlpriv->rtlhal.fwcmd_iomap) 366 (u16)(rtlpriv->rtlhal.fwcmd_iomap)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index 4a499928e4c6..8d7099bc472c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -1450,6 +1450,7 @@ static void _rtl92s_phy_set_fwcmd_io(struct ieee80211_hw *hw)
1450bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio) 1450bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
1451{ 1451{
1452 struct rtl_priv *rtlpriv = rtl_priv(hw); 1452 struct rtl_priv *rtlpriv = rtl_priv(hw);
1453 struct dig_t *digtable = &rtlpriv->dm_digtable;
1453 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); 1454 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1454 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); 1455 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1455 u32 fw_param = FW_CMD_IO_PARA_QUERY(rtlpriv); 1456 u32 fw_param = FW_CMD_IO_PARA_QUERY(rtlpriv);
@@ -1588,16 +1589,16 @@ bool rtl92s_phy_set_fw_cmd(struct ieee80211_hw *hw, enum fwcmd_iotype fw_cmdio)
1588 FW_SS_CTL); 1589 FW_SS_CTL);
1589 1590
1590 if (rtlpriv->dm.dm_flag & HAL_DM_DIG_DISABLE || 1591 if (rtlpriv->dm.dm_flag & HAL_DM_DIG_DISABLE ||
1591 !digtable.dig_enable_flag) 1592 !digtable->dig_enable_flag)
1592 fw_cmdmap &= ~FW_DIG_ENABLE_CTL; 1593 fw_cmdmap &= ~FW_DIG_ENABLE_CTL;
1593 1594
1594 if ((rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) || 1595 if ((rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) ||
1595 rtlpriv->dm.dynamic_txpower_enable) 1596 rtlpriv->dm.dynamic_txpower_enable)
1596 fw_cmdmap &= ~FW_HIGH_PWR_ENABLE_CTL; 1597 fw_cmdmap &= ~FW_HIGH_PWR_ENABLE_CTL;
1597 1598
1598 if ((digtable.dig_ext_port_stage == 1599 if ((digtable->dig_ext_port_stage ==
1599 DIG_EXT_PORT_STAGE_0) || 1600 DIG_EXT_PORT_STAGE_0) ||
1600 (digtable.dig_ext_port_stage == 1601 (digtable->dig_ext_port_stage ==
1601 DIG_EXT_PORT_STAGE_1)) 1602 DIG_EXT_PORT_STAGE_1))
1602 fw_cmdmap &= ~FW_DIG_ENABLE_CTL; 1603 fw_cmdmap &= ~FW_DIG_ENABLE_CTL;
1603 1604
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index f1b36005c6a2..730bcc919529 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -450,21 +450,4 @@ static struct pci_driver rtl92se_driver = {
450 .driver.pm = &rtlwifi_pm_ops, 450 .driver.pm = &rtlwifi_pm_ops,
451}; 451};
452 452
453static int __init rtl92se_module_init(void) 453module_pci_driver(rtl92se_driver);
454{
455 int ret = 0;
456
457 ret = pci_register_driver(&rtl92se_driver);
458 if (ret)
459 RT_ASSERT(false, "No device found\n");
460
461 return ret;
462}
463
464static void __exit rtl92se_module_exit(void)
465{
466 pci_unregister_driver(&rtl92se_driver);
467}
468
469module_init(rtl92se_module_init);
470module_exit(rtl92se_module_exit);
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 28ebc69218a3..bd816aef26dc 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1592,6 +1592,65 @@ struct rtl_debug {
1592 char proc_name[20]; 1592 char proc_name[20];
1593}; 1593};
1594 1594
1595struct ps_t {
1596 u8 pre_ccastate;
1597 u8 cur_ccasate;
1598 u8 pre_rfstate;
1599 u8 cur_rfstate;
1600 long rssi_val_min;
1601};
1602
1603struct dig_t {
1604 u32 rssi_lowthresh;
1605 u32 rssi_highthresh;
1606 u32 fa_lowthresh;
1607 u32 fa_highthresh;
1608 long last_min_undecorated_pwdb_for_dm;
1609 long rssi_highpower_lowthresh;
1610 long rssi_highpower_highthresh;
1611 u32 recover_cnt;
1612 u32 pre_igvalue;
1613 u32 cur_igvalue;
1614 long rssi_val;
1615 u8 dig_enable_flag;
1616 u8 dig_ext_port_stage;
1617 u8 dig_algorithm;
1618 u8 dig_twoport_algorithm;
1619 u8 dig_dbgmode;
1620 u8 dig_slgorithm_switch;
1621 u8 cursta_connectctate;
1622 u8 presta_connectstate;
1623 u8 curmultista_connectstate;
1624 char backoff_val;
1625 char backoff_val_range_max;
1626 char backoff_val_range_min;
1627 u8 rx_gain_range_max;
1628 u8 rx_gain_range_min;
1629 u8 min_undecorated_pwdb_for_dm;
1630 u8 rssi_val_min;
1631 u8 pre_cck_pd_state;
1632 u8 cur_cck_pd_state;
1633 u8 pre_cck_fa_state;
1634 u8 cur_cck_fa_state;
1635 u8 pre_ccastate;
1636 u8 cur_ccasate;
1637 u8 large_fa_hit;
1638 u8 forbidden_igi;
1639 u8 dig_state;
1640 u8 dig_highpwrstate;
1641 u8 cur_sta_connectstate;
1642 u8 pre_sta_connectstate;
1643 u8 cur_ap_connectstate;
1644 u8 pre_ap_connectstate;
1645 u8 cur_pd_thstate;
1646 u8 pre_pd_thstate;
1647 u8 cur_cs_ratiostate;
1648 u8 pre_cs_ratiostate;
1649 u8 backoff_enable_flag;
1650 char backoffval_range_max;
1651 char backoffval_range_min;
1652};
1653
1595struct rtl_priv { 1654struct rtl_priv {
1596 struct completion firmware_loading_complete; 1655 struct completion firmware_loading_complete;
1597 struct rtl_locks locks; 1656 struct rtl_locks locks;
@@ -1629,6 +1688,10 @@ struct rtl_priv {
1629 interface or hardware */ 1688 interface or hardware */
1630 unsigned long status; 1689 unsigned long status;
1631 1690
1691 /* tables for dm */
1692 struct dig_t dm_digtable;
1693 struct ps_t dm_pstable;
1694
1632 /* data buffer pointer for USB reads */ 1695 /* data buffer pointer for USB reads */
1633 __le32 *usb_data; 1696 __le32 *usb_data;
1634 int usb_data_index; 1697 int usb_data_index;
@@ -1958,37 +2021,35 @@ static inline void rtl_write_dword(struct rtl_priv *rtlpriv,
1958static inline u32 rtl_get_bbreg(struct ieee80211_hw *hw, 2021static inline u32 rtl_get_bbreg(struct ieee80211_hw *hw,
1959 u32 regaddr, u32 bitmask) 2022 u32 regaddr, u32 bitmask)
1960{ 2023{
1961 return ((struct rtl_priv *)(hw)->priv)->cfg->ops->get_bbreg(hw, 2024 struct rtl_priv *rtlpriv = hw->priv;
1962 regaddr, 2025
1963 bitmask); 2026 return rtlpriv->cfg->ops->get_bbreg(hw, regaddr, bitmask);
1964} 2027}
1965 2028
1966static inline void rtl_set_bbreg(struct ieee80211_hw *hw, u32 regaddr, 2029static inline void rtl_set_bbreg(struct ieee80211_hw *hw, u32 regaddr,
1967 u32 bitmask, u32 data) 2030 u32 bitmask, u32 data)
1968{ 2031{
1969 ((struct rtl_priv *)(hw)->priv)->cfg->ops->set_bbreg(hw, 2032 struct rtl_priv *rtlpriv = hw->priv;
1970 regaddr, bitmask,
1971 data);
1972 2033
2034 rtlpriv->cfg->ops->set_bbreg(hw, regaddr, bitmask, data);
1973} 2035}
1974 2036
1975static inline u32 rtl_get_rfreg(struct ieee80211_hw *hw, 2037static inline u32 rtl_get_rfreg(struct ieee80211_hw *hw,
1976 enum radio_path rfpath, u32 regaddr, 2038 enum radio_path rfpath, u32 regaddr,
1977 u32 bitmask) 2039 u32 bitmask)
1978{ 2040{
1979 return ((struct rtl_priv *)(hw)->priv)->cfg->ops->get_rfreg(hw, 2041 struct rtl_priv *rtlpriv = hw->priv;
1980 rfpath, 2042
1981 regaddr, 2043 return rtlpriv->cfg->ops->get_rfreg(hw, rfpath, regaddr, bitmask);
1982 bitmask);
1983} 2044}
1984 2045
1985static inline void rtl_set_rfreg(struct ieee80211_hw *hw, 2046static inline void rtl_set_rfreg(struct ieee80211_hw *hw,
1986 enum radio_path rfpath, u32 regaddr, 2047 enum radio_path rfpath, u32 regaddr,
1987 u32 bitmask, u32 data) 2048 u32 bitmask, u32 data)
1988{ 2049{
1989 ((struct rtl_priv *)(hw)->priv)->cfg->ops->set_rfreg(hw, 2050 struct rtl_priv *rtlpriv = hw->priv;
1990 rfpath, regaddr, 2051
1991 bitmask, data); 2052 rtlpriv->cfg->ops->set_rfreg(hw, rfpath, regaddr, bitmask, data);
1992} 2053}
1993 2054
1994static inline bool is_hal_stop(struct rtl_hal *rtlhal) 2055static inline bool is_hal_stop(struct rtl_hal *rtlhal)
diff --git a/drivers/net/wireless/ti/Kconfig b/drivers/net/wireless/ti/Kconfig
new file mode 100644
index 000000000000..1a72932e2213
--- /dev/null
+++ b/drivers/net/wireless/ti/Kconfig
@@ -0,0 +1,14 @@
1menuconfig WL_TI
2 bool "TI Wireless LAN support"
3 ---help---
4 This section contains support for all the wireless drivers
5 for Texas Instruments WLAN chips, such as wl1251 and the wl12xx
6 family.
7
8if WL_TI
9source "drivers/net/wireless/ti/wl1251/Kconfig"
10source "drivers/net/wireless/ti/wl12xx/Kconfig"
11
12# keep last for automatic dependencies
13source "drivers/net/wireless/ti/wlcore/Kconfig"
14endif # WL_TI
diff --git a/drivers/net/wireless/ti/Makefile b/drivers/net/wireless/ti/Makefile
new file mode 100644
index 000000000000..0a565622d4a4
--- /dev/null
+++ b/drivers/net/wireless/ti/Makefile
@@ -0,0 +1,4 @@
1obj-$(CONFIG_WLCORE) += wlcore/
2obj-$(CONFIG_WL12XX) += wl12xx/
3obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wlcore/
4obj-$(CONFIG_WL1251) += wl1251/
diff --git a/drivers/net/wireless/wl1251/Kconfig b/drivers/net/wireless/ti/wl1251/Kconfig
index 1fb65849414f..1fb65849414f 100644
--- a/drivers/net/wireless/wl1251/Kconfig
+++ b/drivers/net/wireless/ti/wl1251/Kconfig
diff --git a/drivers/net/wireless/wl1251/Makefile b/drivers/net/wireless/ti/wl1251/Makefile
index a5c6328b5f72..a5c6328b5f72 100644
--- a/drivers/net/wireless/wl1251/Makefile
+++ b/drivers/net/wireless/ti/wl1251/Makefile
diff --git a/drivers/net/wireless/wl1251/acx.c b/drivers/net/wireless/ti/wl1251/acx.c
index ad87a1ac6462..ad87a1ac6462 100644
--- a/drivers/net/wireless/wl1251/acx.c
+++ b/drivers/net/wireless/ti/wl1251/acx.c
diff --git a/drivers/net/wireless/wl1251/acx.h b/drivers/net/wireless/ti/wl1251/acx.h
index c2ba100f9b1a..c2ba100f9b1a 100644
--- a/drivers/net/wireless/wl1251/acx.h
+++ b/drivers/net/wireless/ti/wl1251/acx.h
diff --git a/drivers/net/wireless/wl1251/boot.c b/drivers/net/wireless/ti/wl1251/boot.c
index a2e5241382da..a2e5241382da 100644
--- a/drivers/net/wireless/wl1251/boot.c
+++ b/drivers/net/wireless/ti/wl1251/boot.c
diff --git a/drivers/net/wireless/wl1251/boot.h b/drivers/net/wireless/ti/wl1251/boot.h
index 7661bc5e4662..7661bc5e4662 100644
--- a/drivers/net/wireless/wl1251/boot.h
+++ b/drivers/net/wireless/ti/wl1251/boot.h
diff --git a/drivers/net/wireless/wl1251/cmd.c b/drivers/net/wireless/ti/wl1251/cmd.c
index d14d69d733a0..d14d69d733a0 100644
--- a/drivers/net/wireless/wl1251/cmd.c
+++ b/drivers/net/wireless/ti/wl1251/cmd.c
diff --git a/drivers/net/wireless/wl1251/cmd.h b/drivers/net/wireless/ti/wl1251/cmd.h
index ee4f2b391822..ee4f2b391822 100644
--- a/drivers/net/wireless/wl1251/cmd.h
+++ b/drivers/net/wireless/ti/wl1251/cmd.h
diff --git a/drivers/net/wireless/wl1251/debugfs.c b/drivers/net/wireless/ti/wl1251/debugfs.c
index 448da1f8c22f..448da1f8c22f 100644
--- a/drivers/net/wireless/wl1251/debugfs.c
+++ b/drivers/net/wireless/ti/wl1251/debugfs.c
diff --git a/drivers/net/wireless/wl1251/debugfs.h b/drivers/net/wireless/ti/wl1251/debugfs.h
index b3417c02a218..b3417c02a218 100644
--- a/drivers/net/wireless/wl1251/debugfs.h
+++ b/drivers/net/wireless/ti/wl1251/debugfs.h
diff --git a/drivers/net/wireless/wl1251/event.c b/drivers/net/wireless/ti/wl1251/event.c
index 9f15ccaf8f05..9f15ccaf8f05 100644
--- a/drivers/net/wireless/wl1251/event.c
+++ b/drivers/net/wireless/ti/wl1251/event.c
diff --git a/drivers/net/wireless/wl1251/event.h b/drivers/net/wireless/ti/wl1251/event.h
index 30eb5d150bf7..30eb5d150bf7 100644
--- a/drivers/net/wireless/wl1251/event.h
+++ b/drivers/net/wireless/ti/wl1251/event.h
diff --git a/drivers/net/wireless/wl1251/init.c b/drivers/net/wireless/ti/wl1251/init.c
index 89b43d35473c..89b43d35473c 100644
--- a/drivers/net/wireless/wl1251/init.c
+++ b/drivers/net/wireless/ti/wl1251/init.c
diff --git a/drivers/net/wireless/wl1251/init.h b/drivers/net/wireless/ti/wl1251/init.h
index 543f17582ead..543f17582ead 100644
--- a/drivers/net/wireless/wl1251/init.h
+++ b/drivers/net/wireless/ti/wl1251/init.h
diff --git a/drivers/net/wireless/wl1251/io.c b/drivers/net/wireless/ti/wl1251/io.c
index cdcadbf6ac2c..cdcadbf6ac2c 100644
--- a/drivers/net/wireless/wl1251/io.c
+++ b/drivers/net/wireless/ti/wl1251/io.c
diff --git a/drivers/net/wireless/wl1251/io.h b/drivers/net/wireless/ti/wl1251/io.h
index d382877c34cc..d382877c34cc 100644
--- a/drivers/net/wireless/wl1251/io.h
+++ b/drivers/net/wireless/ti/wl1251/io.h
diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index d1afb8e3b2ef..d1afb8e3b2ef 100644
--- a/drivers/net/wireless/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
diff --git a/drivers/net/wireless/wl1251/ps.c b/drivers/net/wireless/ti/wl1251/ps.c
index db719f7d2692..db719f7d2692 100644
--- a/drivers/net/wireless/wl1251/ps.c
+++ b/drivers/net/wireless/ti/wl1251/ps.c
diff --git a/drivers/net/wireless/wl1251/ps.h b/drivers/net/wireless/ti/wl1251/ps.h
index 75efad246d67..75efad246d67 100644
--- a/drivers/net/wireless/wl1251/ps.h
+++ b/drivers/net/wireless/ti/wl1251/ps.h
diff --git a/drivers/net/wireless/wl1251/reg.h b/drivers/net/wireless/ti/wl1251/reg.h
index a5809019c5c1..a5809019c5c1 100644
--- a/drivers/net/wireless/wl1251/reg.h
+++ b/drivers/net/wireless/ti/wl1251/reg.h
diff --git a/drivers/net/wireless/wl1251/rx.c b/drivers/net/wireless/ti/wl1251/rx.c
index 6af35265c900..6af35265c900 100644
--- a/drivers/net/wireless/wl1251/rx.c
+++ b/drivers/net/wireless/ti/wl1251/rx.c
diff --git a/drivers/net/wireless/wl1251/rx.h b/drivers/net/wireless/ti/wl1251/rx.h
index 4448f635a4d8..4448f635a4d8 100644
--- a/drivers/net/wireless/wl1251/rx.h
+++ b/drivers/net/wireless/ti/wl1251/rx.h
diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index 1b851f650e07..1b851f650e07 100644
--- a/drivers/net/wireless/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
diff --git a/drivers/net/wireless/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 6248c354fc5c..6248c354fc5c 100644
--- a/drivers/net/wireless/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
diff --git a/drivers/net/wireless/wl1251/spi.h b/drivers/net/wireless/ti/wl1251/spi.h
index 16d506955cc0..16d506955cc0 100644
--- a/drivers/net/wireless/wl1251/spi.h
+++ b/drivers/net/wireless/ti/wl1251/spi.h
diff --git a/drivers/net/wireless/wl1251/tx.c b/drivers/net/wireless/ti/wl1251/tx.c
index 28121c590a2b..28121c590a2b 100644
--- a/drivers/net/wireless/wl1251/tx.c
+++ b/drivers/net/wireless/ti/wl1251/tx.c
diff --git a/drivers/net/wireless/wl1251/tx.h b/drivers/net/wireless/ti/wl1251/tx.h
index 81338d39b43e..81338d39b43e 100644
--- a/drivers/net/wireless/wl1251/tx.h
+++ b/drivers/net/wireless/ti/wl1251/tx.h
diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/ti/wl1251/wl1251.h
index 9d8f5816c6f9..9d8f5816c6f9 100644
--- a/drivers/net/wireless/wl1251/wl1251.h
+++ b/drivers/net/wireless/ti/wl1251/wl1251.h
diff --git a/drivers/net/wireless/wl1251/wl12xx_80211.h b/drivers/net/wireless/ti/wl1251/wl12xx_80211.h
index 04ed51495772..04ed51495772 100644
--- a/drivers/net/wireless/wl1251/wl12xx_80211.h
+++ b/drivers/net/wireless/ti/wl1251/wl12xx_80211.h
diff --git a/drivers/net/wireless/ti/wl12xx/Kconfig b/drivers/net/wireless/ti/wl12xx/Kconfig
new file mode 100644
index 000000000000..5b92329122c4
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/Kconfig
@@ -0,0 +1,8 @@
1config WL12XX
2 tristate "TI wl12xx support"
3 select WLCORE
4 ---help---
5 This module adds support for wireless adapters based on TI wl1271,
6 wl1273, wl1281 and wl1283 chipsets. This module does *not* include
7 support for wl1251. For wl1251 support, use the separate homonymous
8 driver instead.
diff --git a/drivers/net/wireless/ti/wl12xx/Makefile b/drivers/net/wireless/ti/wl12xx/Makefile
new file mode 100644
index 000000000000..87f64b14db35
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/Makefile
@@ -0,0 +1,3 @@
1wl12xx-objs = main.o cmd.o acx.o
2
3obj-$(CONFIG_WL12XX) += wl12xx.o
diff --git a/drivers/net/wireless/ti/wl12xx/acx.c b/drivers/net/wireless/ti/wl12xx/acx.c
new file mode 100644
index 000000000000..bea06b2d7bf4
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/acx.c
@@ -0,0 +1,53 @@
1/*
2 * This file is part of wl12xx
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 * Copyright (C) 2011 Texas Instruments Inc.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
20 *
21 */
22
23#include "../wlcore/cmd.h"
24#include "../wlcore/debug.h"
25#include "../wlcore/acx.h"
26
27#include "acx.h"
28
29int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap)
30{
31 struct wl1271_acx_host_config_bitmap *bitmap_conf;
32 int ret;
33
34 bitmap_conf = kzalloc(sizeof(*bitmap_conf), GFP_KERNEL);
35 if (!bitmap_conf) {
36 ret = -ENOMEM;
37 goto out;
38 }
39
40 bitmap_conf->host_cfg_bitmap = cpu_to_le32(host_cfg_bitmap);
41
42 ret = wl1271_cmd_configure(wl, ACX_HOST_IF_CFG_BITMAP,
43 bitmap_conf, sizeof(*bitmap_conf));
44 if (ret < 0) {
45 wl1271_warning("wl1271 bitmap config opt failed: %d", ret);
46 goto out;
47 }
48
49out:
50 kfree(bitmap_conf);
51
52 return ret;
53}
diff --git a/drivers/net/wireless/ti/wl12xx/acx.h b/drivers/net/wireless/ti/wl12xx/acx.h
new file mode 100644
index 000000000000..d1f5aba0afce
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/acx.h
@@ -0,0 +1,36 @@
1/*
2 * This file is part of wl12xx
3 *
4 * Copyright (C) 1998-2009, 2011 Texas Instruments. All rights reserved.
5 * Copyright (C) 2008-2010 Nokia Corporation
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
20 *
21 */
22
23#ifndef __WL12XX_ACX_H__
24#define __WL12XX_ACX_H__
25
26#include "../wlcore/wlcore.h"
27
28struct wl1271_acx_host_config_bitmap {
29 struct acx_header header;
30
31 __le32 host_cfg_bitmap;
32} __packed;
33
34int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap);
35
36#endif /* __WL12XX_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wl12xx/cmd.c b/drivers/net/wireless/ti/wl12xx/cmd.c
new file mode 100644
index 000000000000..8ffaeb5f2147
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/cmd.c
@@ -0,0 +1,254 @@
1/*
2 * This file is part of wl12xx
3 *
4 * Copyright (C) 2009-2010 Nokia Corporation
5 * Copyright (C) 2011 Texas Instruments Inc.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
20 *
21 */
22
23#include "../wlcore/cmd.h"
24#include "../wlcore/debug.h"
25
26#include "wl12xx.h"
27#include "cmd.h"
28
29int wl1271_cmd_ext_radio_parms(struct wl1271 *wl)
30{
31 struct wl1271_ext_radio_parms_cmd *ext_radio_parms;
32 struct wl12xx_priv *priv = wl->priv;
33 struct wl12xx_conf_rf *rf = &priv->conf.rf;
34 int ret;
35
36 if (!wl->nvs)
37 return -ENODEV;
38
39 ext_radio_parms = kzalloc(sizeof(*ext_radio_parms), GFP_KERNEL);
40 if (!ext_radio_parms)
41 return -ENOMEM;
42
43 ext_radio_parms->test.id = TEST_CMD_INI_FILE_RF_EXTENDED_PARAM;
44
45 memcpy(ext_radio_parms->tx_per_channel_power_compensation_2,
46 rf->tx_per_channel_power_compensation_2,
47 CONF_TX_PWR_COMPENSATION_LEN_2);
48 memcpy(ext_radio_parms->tx_per_channel_power_compensation_5,
49 rf->tx_per_channel_power_compensation_5,
50 CONF_TX_PWR_COMPENSATION_LEN_5);
51
52 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_EXT_RADIO_PARAM: ",
53 ext_radio_parms, sizeof(*ext_radio_parms));
54
55 ret = wl1271_cmd_test(wl, ext_radio_parms, sizeof(*ext_radio_parms), 0);
56 if (ret < 0)
57 wl1271_warning("TEST_CMD_INI_FILE_RF_EXTENDED_PARAM failed");
58
59 kfree(ext_radio_parms);
60 return ret;
61}
62
63int wl1271_cmd_general_parms(struct wl1271 *wl)
64{
65 struct wl1271_general_parms_cmd *gen_parms;
66 struct wl1271_ini_general_params *gp =
67 &((struct wl1271_nvs_file *)wl->nvs)->general_params;
68 bool answer = false;
69 int ret;
70
71 if (!wl->nvs)
72 return -ENODEV;
73
74 if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
75 wl1271_warning("FEM index from INI out of bounds");
76 return -EINVAL;
77 }
78
79 gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
80 if (!gen_parms)
81 return -ENOMEM;
82
83 gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM;
84
85 memcpy(&gen_parms->general_params, gp, sizeof(*gp));
86
87 if (gp->tx_bip_fem_auto_detect)
88 answer = true;
89
90 /* Override the REF CLK from the NVS with the one from platform data */
91 gen_parms->general_params.ref_clock = wl->ref_clock;
92
93 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
94 if (ret < 0) {
95 wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
96 goto out;
97 }
98
99 gp->tx_bip_fem_manufacturer =
100 gen_parms->general_params.tx_bip_fem_manufacturer;
101
102 if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
103 wl1271_warning("FEM index from FW out of bounds");
104 ret = -EINVAL;
105 goto out;
106 }
107
108 wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
109 answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
110
111out:
112 kfree(gen_parms);
113 return ret;
114}
115
116int wl128x_cmd_general_parms(struct wl1271 *wl)
117{
118 struct wl128x_general_parms_cmd *gen_parms;
119 struct wl128x_ini_general_params *gp =
120 &((struct wl128x_nvs_file *)wl->nvs)->general_params;
121 bool answer = false;
122 int ret;
123
124 if (!wl->nvs)
125 return -ENODEV;
126
127 if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
128 wl1271_warning("FEM index from ini out of bounds");
129 return -EINVAL;
130 }
131
132 gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
133 if (!gen_parms)
134 return -ENOMEM;
135
136 gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM;
137
138 memcpy(&gen_parms->general_params, gp, sizeof(*gp));
139
140 if (gp->tx_bip_fem_auto_detect)
141 answer = true;
142
143 /* Replace REF and TCXO CLKs with the ones from platform data */
144 gen_parms->general_params.ref_clock = wl->ref_clock;
145 gen_parms->general_params.tcxo_ref_clock = wl->tcxo_clock;
146
147 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
148 if (ret < 0) {
149 wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
150 goto out;
151 }
152
153 gp->tx_bip_fem_manufacturer =
154 gen_parms->general_params.tx_bip_fem_manufacturer;
155
156 if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
157 wl1271_warning("FEM index from FW out of bounds");
158 ret = -EINVAL;
159 goto out;
160 }
161
162 wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
163 answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
164
165out:
166 kfree(gen_parms);
167 return ret;
168}
169
170int wl1271_cmd_radio_parms(struct wl1271 *wl)
171{
172 struct wl1271_nvs_file *nvs = (struct wl1271_nvs_file *)wl->nvs;
173 struct wl1271_radio_parms_cmd *radio_parms;
174 struct wl1271_ini_general_params *gp = &nvs->general_params;
175 int ret;
176
177 if (!wl->nvs)
178 return -ENODEV;
179
180 radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL);
181 if (!radio_parms)
182 return -ENOMEM;
183
184 radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
185
186 /* 2.4GHz parameters */
187 memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
188 sizeof(struct wl1271_ini_band_params_2));
189 memcpy(&radio_parms->dyn_params_2,
190 &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params,
191 sizeof(struct wl1271_ini_fem_params_2));
192
193 /* 5GHz parameters */
194 memcpy(&radio_parms->static_params_5,
195 &nvs->stat_radio_params_5,
196 sizeof(struct wl1271_ini_band_params_5));
197 memcpy(&radio_parms->dyn_params_5,
198 &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params,
199 sizeof(struct wl1271_ini_fem_params_5));
200
201 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
202 radio_parms, sizeof(*radio_parms));
203
204 ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0);
205 if (ret < 0)
206 wl1271_warning("CMD_INI_FILE_RADIO_PARAM failed");
207
208 kfree(radio_parms);
209 return ret;
210}
211
212int wl128x_cmd_radio_parms(struct wl1271 *wl)
213{
214 struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs;
215 struct wl128x_radio_parms_cmd *radio_parms;
216 struct wl128x_ini_general_params *gp = &nvs->general_params;
217 int ret;
218
219 if (!wl->nvs)
220 return -ENODEV;
221
222 radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL);
223 if (!radio_parms)
224 return -ENOMEM;
225
226 radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
227
228 /* 2.4GHz parameters */
229 memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
230 sizeof(struct wl128x_ini_band_params_2));
231 memcpy(&radio_parms->dyn_params_2,
232 &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params,
233 sizeof(struct wl128x_ini_fem_params_2));
234
235 /* 5GHz parameters */
236 memcpy(&radio_parms->static_params_5,
237 &nvs->stat_radio_params_5,
238 sizeof(struct wl128x_ini_band_params_5));
239 memcpy(&radio_parms->dyn_params_5,
240 &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params,
241 sizeof(struct wl128x_ini_fem_params_5));
242
243 radio_parms->fem_vendor_and_options = nvs->fem_vendor_and_options;
244
245 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
246 radio_parms, sizeof(*radio_parms));
247
248 ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0);
249 if (ret < 0)
250 wl1271_warning("CMD_INI_FILE_RADIO_PARAM failed");
251
252 kfree(radio_parms);
253 return ret;
254}
diff --git a/drivers/net/wireless/ti/wl12xx/cmd.h b/drivers/net/wireless/ti/wl12xx/cmd.h
new file mode 100644
index 000000000000..140a0e8829d5
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/cmd.h
@@ -0,0 +1,112 @@
1/*
2 * This file is part of wl12xx
3 *
4 * Copyright (C) 1998-2009, 2011 Texas Instruments. All rights reserved.
5 * Copyright (C) 2009 Nokia Corporation
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
20 *
21 */
22
23#ifndef __WL12XX_CMD_H__
24#define __WL12XX_CMD_H__
25
26#include "conf.h"
27
28#define TEST_CMD_INI_FILE_RADIO_PARAM 0x19
29#define TEST_CMD_INI_FILE_GENERAL_PARAM 0x1E
30
31struct wl1271_general_parms_cmd {
32 struct wl1271_cmd_header header;
33
34 struct wl1271_cmd_test_header test;
35
36 struct wl1271_ini_general_params general_params;
37
38 u8 sr_debug_table[WL1271_INI_MAX_SMART_REFLEX_PARAM];
39 u8 sr_sen_n_p;
40 u8 sr_sen_n_p_gain;
41 u8 sr_sen_nrn;
42 u8 sr_sen_prn;
43 u8 padding[3];
44} __packed;
45
46struct wl128x_general_parms_cmd {
47 struct wl1271_cmd_header header;
48
49 struct wl1271_cmd_test_header test;
50
51 struct wl128x_ini_general_params general_params;
52
53 u8 sr_debug_table[WL1271_INI_MAX_SMART_REFLEX_PARAM];
54 u8 sr_sen_n_p;
55 u8 sr_sen_n_p_gain;
56 u8 sr_sen_nrn;
57 u8 sr_sen_prn;
58 u8 padding[3];
59} __packed;
60
61struct wl1271_radio_parms_cmd {
62 struct wl1271_cmd_header header;
63
64 struct wl1271_cmd_test_header test;
65
66 /* Static radio parameters */
67 struct wl1271_ini_band_params_2 static_params_2;
68 struct wl1271_ini_band_params_5 static_params_5;
69
70 /* Dynamic radio parameters */
71 struct wl1271_ini_fem_params_2 dyn_params_2;
72 u8 padding2;
73 struct wl1271_ini_fem_params_5 dyn_params_5;
74 u8 padding3[2];
75} __packed;
76
77struct wl128x_radio_parms_cmd {
78 struct wl1271_cmd_header header;
79
80 struct wl1271_cmd_test_header test;
81
82 /* Static radio parameters */
83 struct wl128x_ini_band_params_2 static_params_2;
84 struct wl128x_ini_band_params_5 static_params_5;
85
86 u8 fem_vendor_and_options;
87
88 /* Dynamic radio parameters */
89 struct wl128x_ini_fem_params_2 dyn_params_2;
90 u8 padding2;
91 struct wl128x_ini_fem_params_5 dyn_params_5;
92} __packed;
93
94#define TEST_CMD_INI_FILE_RF_EXTENDED_PARAM 0x26
95
96struct wl1271_ext_radio_parms_cmd {
97 struct wl1271_cmd_header header;
98
99 struct wl1271_cmd_test_header test;
100
101 u8 tx_per_channel_power_compensation_2[CONF_TX_PWR_COMPENSATION_LEN_2];
102 u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5];
103 u8 padding[3];
104} __packed;
105
106int wl1271_cmd_general_parms(struct wl1271 *wl);
107int wl128x_cmd_general_parms(struct wl1271 *wl);
108int wl1271_cmd_radio_parms(struct wl1271 *wl);
109int wl128x_cmd_radio_parms(struct wl1271 *wl);
110int wl1271_cmd_ext_radio_parms(struct wl1271 *wl);
111
112#endif /* __WL12XX_CMD_H__ */
diff --git a/drivers/net/wireless/ti/wl12xx/conf.h b/drivers/net/wireless/ti/wl12xx/conf.h
new file mode 100644
index 000000000000..75e29897a0f5
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/conf.h
@@ -0,0 +1,50 @@
1/*
2 * This file is part of wl12xx
3 *
4 * Copyright (C) 2011 Texas Instruments Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#ifndef __WL12XX_CONF_H__
23#define __WL12XX_CONF_H__
24
25/* these are number of channels on the band divided by two, rounded up */
26#define CONF_TX_PWR_COMPENSATION_LEN_2 7
27#define CONF_TX_PWR_COMPENSATION_LEN_5 18
28
29struct wl12xx_conf_rf {
30 /*
31 * Per channel power compensation for 2.4GHz
32 *
33 * Range: s8
34 */
35 u8 tx_per_channel_power_compensation_2[CONF_TX_PWR_COMPENSATION_LEN_2];
36
37 /*
38 * Per channel power compensation for 5GHz
39 *
40 * Range: s8
41 */
42 u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5];
43};
44
45struct wl12xx_priv_conf {
46 struct wl12xx_conf_rf rf;
47 struct conf_memory_settings mem_wl127x;
48};
49
50#endif /* __WL12XX_CONF_H__ */
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
new file mode 100644
index 000000000000..d7dd3def07b5
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -0,0 +1,1388 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2008-2010 Nokia Corporation
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#include <linux/module.h>
23#include <linux/platform_device.h>
24
25#include <linux/err.h>
26
27#include <linux/wl12xx.h>
28
29#include "../wlcore/wlcore.h"
30#include "../wlcore/debug.h"
31#include "../wlcore/io.h"
32#include "../wlcore/acx.h"
33#include "../wlcore/tx.h"
34#include "../wlcore/rx.h"
35#include "../wlcore/io.h"
36#include "../wlcore/boot.h"
37
38#include "wl12xx.h"
39#include "reg.h"
40#include "cmd.h"
41#include "acx.h"
42
43static struct wlcore_conf wl12xx_conf = {
44 .sg = {
45 .params = {
46 [CONF_SG_ACL_BT_MASTER_MIN_BR] = 10,
47 [CONF_SG_ACL_BT_MASTER_MAX_BR] = 180,
48 [CONF_SG_ACL_BT_SLAVE_MIN_BR] = 10,
49 [CONF_SG_ACL_BT_SLAVE_MAX_BR] = 180,
50 [CONF_SG_ACL_BT_MASTER_MIN_EDR] = 10,
51 [CONF_SG_ACL_BT_MASTER_MAX_EDR] = 80,
52 [CONF_SG_ACL_BT_SLAVE_MIN_EDR] = 10,
53 [CONF_SG_ACL_BT_SLAVE_MAX_EDR] = 80,
54 [CONF_SG_ACL_WLAN_PS_MASTER_BR] = 8,
55 [CONF_SG_ACL_WLAN_PS_SLAVE_BR] = 8,
56 [CONF_SG_ACL_WLAN_PS_MASTER_EDR] = 20,
57 [CONF_SG_ACL_WLAN_PS_SLAVE_EDR] = 20,
58 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR] = 20,
59 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR] = 35,
60 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR] = 16,
61 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR] = 35,
62 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR] = 32,
63 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR] = 50,
64 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR] = 28,
65 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR] = 50,
66 [CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR] = 10,
67 [CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR] = 20,
68 [CONF_SG_ACL_PASSIVE_SCAN_BT_BR] = 75,
69 [CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR] = 15,
70 [CONF_SG_ACL_PASSIVE_SCAN_BT_EDR] = 27,
71 [CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR] = 17,
72 /* active scan params */
73 [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
74 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
75 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
76 /* passive scan params */
77 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_BR] = 800,
78 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_EDR] = 200,
79 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
80 /* passive scan in dual antenna params */
81 [CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN] = 0,
82 [CONF_SG_BCN_HV3_COLLISION_THRESH_IN_PASSIVE_SCAN] = 0,
83 [CONF_SG_TX_RX_PROTECTION_BWIDTH_IN_PASSIVE_SCAN] = 0,
84 /* general params */
85 [CONF_SG_STA_FORCE_PS_IN_BT_SCO] = 1,
86 [CONF_SG_ANTENNA_CONFIGURATION] = 0,
87 [CONF_SG_BEACON_MISS_PERCENT] = 60,
88 [CONF_SG_DHCP_TIME] = 5000,
89 [CONF_SG_RXT] = 1200,
90 [CONF_SG_TXT] = 1000,
91 [CONF_SG_ADAPTIVE_RXT_TXT] = 1,
92 [CONF_SG_GENERAL_USAGE_BIT_MAP] = 3,
93 [CONF_SG_HV3_MAX_SERVED] = 6,
94 [CONF_SG_PS_POLL_TIMEOUT] = 10,
95 [CONF_SG_UPSD_TIMEOUT] = 10,
96 [CONF_SG_CONSECUTIVE_CTS_THRESHOLD] = 2,
97 [CONF_SG_STA_RX_WINDOW_AFTER_DTIM] = 5,
98 [CONF_SG_STA_CONNECTION_PROTECTION_TIME] = 30,
99 /* AP params */
100 [CONF_AP_BEACON_MISS_TX] = 3,
101 [CONF_AP_RX_WINDOW_AFTER_BEACON] = 10,
102 [CONF_AP_BEACON_WINDOW_INTERVAL] = 2,
103 [CONF_AP_CONNECTION_PROTECTION_TIME] = 0,
104 [CONF_AP_BT_ACL_VAL_BT_SERVE_TIME] = 25,
105 [CONF_AP_BT_ACL_VAL_WL_SERVE_TIME] = 25,
106 /* CTS Diluting params */
107 [CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH] = 0,
108 [CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER] = 0,
109 },
110 .state = CONF_SG_PROTECTIVE,
111 },
112 .rx = {
113 .rx_msdu_life_time = 512000,
114 .packet_detection_threshold = 0,
115 .ps_poll_timeout = 15,
116 .upsd_timeout = 15,
117 .rts_threshold = IEEE80211_MAX_RTS_THRESHOLD,
118 .rx_cca_threshold = 0,
119 .irq_blk_threshold = 0xFFFF,
120 .irq_pkt_threshold = 0,
121 .irq_timeout = 600,
122 .queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
123 },
124 .tx = {
125 .tx_energy_detection = 0,
126 .sta_rc_conf = {
127 .enabled_rates = 0,
128 .short_retry_limit = 10,
129 .long_retry_limit = 10,
130 .aflags = 0,
131 },
132 .ac_conf_count = 4,
133 .ac_conf = {
134 [CONF_TX_AC_BE] = {
135 .ac = CONF_TX_AC_BE,
136 .cw_min = 15,
137 .cw_max = 63,
138 .aifsn = 3,
139 .tx_op_limit = 0,
140 },
141 [CONF_TX_AC_BK] = {
142 .ac = CONF_TX_AC_BK,
143 .cw_min = 15,
144 .cw_max = 63,
145 .aifsn = 7,
146 .tx_op_limit = 0,
147 },
148 [CONF_TX_AC_VI] = {
149 .ac = CONF_TX_AC_VI,
150 .cw_min = 15,
151 .cw_max = 63,
152 .aifsn = CONF_TX_AIFS_PIFS,
153 .tx_op_limit = 3008,
154 },
155 [CONF_TX_AC_VO] = {
156 .ac = CONF_TX_AC_VO,
157 .cw_min = 15,
158 .cw_max = 63,
159 .aifsn = CONF_TX_AIFS_PIFS,
160 .tx_op_limit = 1504,
161 },
162 },
163 .max_tx_retries = 100,
164 .ap_aging_period = 300,
165 .tid_conf_count = 4,
166 .tid_conf = {
167 [CONF_TX_AC_BE] = {
168 .queue_id = CONF_TX_AC_BE,
169 .channel_type = CONF_CHANNEL_TYPE_EDCF,
170 .tsid = CONF_TX_AC_BE,
171 .ps_scheme = CONF_PS_SCHEME_LEGACY,
172 .ack_policy = CONF_ACK_POLICY_LEGACY,
173 .apsd_conf = {0, 0},
174 },
175 [CONF_TX_AC_BK] = {
176 .queue_id = CONF_TX_AC_BK,
177 .channel_type = CONF_CHANNEL_TYPE_EDCF,
178 .tsid = CONF_TX_AC_BK,
179 .ps_scheme = CONF_PS_SCHEME_LEGACY,
180 .ack_policy = CONF_ACK_POLICY_LEGACY,
181 .apsd_conf = {0, 0},
182 },
183 [CONF_TX_AC_VI] = {
184 .queue_id = CONF_TX_AC_VI,
185 .channel_type = CONF_CHANNEL_TYPE_EDCF,
186 .tsid = CONF_TX_AC_VI,
187 .ps_scheme = CONF_PS_SCHEME_LEGACY,
188 .ack_policy = CONF_ACK_POLICY_LEGACY,
189 .apsd_conf = {0, 0},
190 },
191 [CONF_TX_AC_VO] = {
192 .queue_id = CONF_TX_AC_VO,
193 .channel_type = CONF_CHANNEL_TYPE_EDCF,
194 .tsid = CONF_TX_AC_VO,
195 .ps_scheme = CONF_PS_SCHEME_LEGACY,
196 .ack_policy = CONF_ACK_POLICY_LEGACY,
197 .apsd_conf = {0, 0},
198 },
199 },
200 .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
201 .tx_compl_timeout = 700,
202 .tx_compl_threshold = 4,
203 .basic_rate = CONF_HW_BIT_RATE_1MBPS,
204 .basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
205 .tmpl_short_retry_limit = 10,
206 .tmpl_long_retry_limit = 10,
207 .tx_watchdog_timeout = 5000,
208 },
209 .conn = {
210 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
211 .listen_interval = 1,
212 .suspend_wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM,
213 .suspend_listen_interval = 3,
214 .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
215 .bcn_filt_ie_count = 2,
216 .bcn_filt_ie = {
217 [0] = {
218 .ie = WLAN_EID_CHANNEL_SWITCH,
219 .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE,
220 },
221 [1] = {
222 .ie = WLAN_EID_HT_OPERATION,
223 .rule = CONF_BCN_RULE_PASS_ON_CHANGE,
224 },
225 },
226 .synch_fail_thold = 10,
227 .bss_lose_timeout = 100,
228 .beacon_rx_timeout = 10000,
229 .broadcast_timeout = 20000,
230 .rx_broadcast_in_ps = 1,
231 .ps_poll_threshold = 10,
232 .bet_enable = CONF_BET_MODE_ENABLE,
233 .bet_max_consecutive = 50,
234 .psm_entry_retries = 8,
235 .psm_exit_retries = 16,
236 .psm_entry_nullfunc_retries = 3,
237 .dynamic_ps_timeout = 40,
238 .forced_ps = false,
239 .keep_alive_interval = 55000,
240 .max_listen_interval = 20,
241 },
242 .itrim = {
243 .enable = false,
244 .timeout = 50000,
245 },
246 .pm_config = {
247 .host_clk_settling_time = 5000,
248 .host_fast_wakeup_support = false
249 },
250 .roam_trigger = {
251 .trigger_pacing = 1,
252 .avg_weight_rssi_beacon = 20,
253 .avg_weight_rssi_data = 10,
254 .avg_weight_snr_beacon = 20,
255 .avg_weight_snr_data = 10,
256 },
257 .scan = {
258 .min_dwell_time_active = 7500,
259 .max_dwell_time_active = 30000,
260 .min_dwell_time_passive = 100000,
261 .max_dwell_time_passive = 100000,
262 .num_probe_reqs = 2,
263 .split_scan_timeout = 50000,
264 },
265 .sched_scan = {
266 /*
267 * Values are in TU/1000 but since sched scan FW command
268 * params are in TUs rounding up may occur.
269 */
270 .base_dwell_time = 7500,
271 .max_dwell_time_delta = 22500,
272 /* based on 250bits per probe @1Mbps */
273 .dwell_time_delta_per_probe = 2000,
274 /* based on 250bits per probe @6Mbps (plus a bit more) */
275 .dwell_time_delta_per_probe_5 = 350,
276 .dwell_time_passive = 100000,
277 .dwell_time_dfs = 150000,
278 .num_probe_reqs = 2,
279 .rssi_threshold = -90,
280 .snr_threshold = 0,
281 },
282 .ht = {
283 .rx_ba_win_size = 8,
284 .tx_ba_win_size = 64,
285 .inactivity_timeout = 10000,
286 .tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP,
287 },
288 /*
289 * Memory config for wl127x chips is given in the
290 * wl12xx_default_priv_conf struct. The below configuration is
291 * for wl128x chips.
292 */
293 .mem = {
294 .num_stations = 1,
295 .ssid_profiles = 1,
296 .rx_block_num = 40,
297 .tx_min_block_num = 40,
298 .dynamic_memory = 1,
299 .min_req_tx_blocks = 45,
300 .min_req_rx_blocks = 22,
301 .tx_min = 27,
302 },
303 .fm_coex = {
304 .enable = true,
305 .swallow_period = 5,
306 .n_divider_fref_set_1 = 0xff, /* default */
307 .n_divider_fref_set_2 = 12,
308 .m_divider_fref_set_1 = 148,
309 .m_divider_fref_set_2 = 0xffff, /* default */
310 .coex_pll_stabilization_time = 0xffffffff, /* default */
311 .ldo_stabilization_time = 0xffff, /* default */
312 .fm_disturbed_band_margin = 0xff, /* default */
313 .swallow_clk_diff = 0xff, /* default */
314 },
315 .rx_streaming = {
316 .duration = 150,
317 .queues = 0x1,
318 .interval = 20,
319 .always = 0,
320 },
321 .fwlog = {
322 .mode = WL12XX_FWLOG_ON_DEMAND,
323 .mem_blocks = 2,
324 .severity = 0,
325 .timestamp = WL12XX_FWLOG_TIMESTAMP_DISABLED,
326 .output = WL12XX_FWLOG_OUTPUT_HOST,
327 .threshold = 0,
328 },
329 .rate = {
330 .rate_retry_score = 32000,
331 .per_add = 8192,
332 .per_th1 = 2048,
333 .per_th2 = 4096,
334 .max_per = 8100,
335 .inverse_curiosity_factor = 5,
336 .tx_fail_low_th = 4,
337 .tx_fail_high_th = 10,
338 .per_alpha_shift = 4,
339 .per_add_shift = 13,
340 .per_beta1_shift = 10,
341 .per_beta2_shift = 8,
342 .rate_check_up = 2,
343 .rate_check_down = 12,
344 .rate_retry_policy = {
345 0x00, 0x00, 0x00, 0x00, 0x00,
346 0x00, 0x00, 0x00, 0x00, 0x00,
347 0x00, 0x00, 0x00,
348 },
349 },
350 .hangover = {
351 .recover_time = 0,
352 .hangover_period = 20,
353 .dynamic_mode = 1,
354 .early_termination_mode = 1,
355 .max_period = 20,
356 .min_period = 1,
357 .increase_delta = 1,
358 .decrease_delta = 2,
359 .quiet_time = 4,
360 .increase_time = 1,
361 .window_size = 16,
362 },
363};
364
365static struct wl12xx_priv_conf wl12xx_default_priv_conf = {
366 .rf = {
367 .tx_per_channel_power_compensation_2 = {
368 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
369 },
370 .tx_per_channel_power_compensation_5 = {
371 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
372 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
373 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
374 },
375 },
376 .mem_wl127x = {
377 .num_stations = 1,
378 .ssid_profiles = 1,
379 .rx_block_num = 70,
380 .tx_min_block_num = 40,
381 .dynamic_memory = 1,
382 .min_req_tx_blocks = 100,
383 .min_req_rx_blocks = 22,
384 .tx_min = 27,
385 },
386
387};
388
389#define WL12XX_TX_HW_BLOCK_SPARE_DEFAULT 1
390#define WL12XX_TX_HW_BLOCK_GEM_SPARE 2
391#define WL12XX_TX_HW_BLOCK_SIZE 252
392
393static const u8 wl12xx_rate_to_idx_2ghz[] = {
394 /* MCS rates are used only with 11n */
395 7, /* WL12XX_CONF_HW_RXTX_RATE_MCS7_SGI */
396 7, /* WL12XX_CONF_HW_RXTX_RATE_MCS7 */
397 6, /* WL12XX_CONF_HW_RXTX_RATE_MCS6 */
398 5, /* WL12XX_CONF_HW_RXTX_RATE_MCS5 */
399 4, /* WL12XX_CONF_HW_RXTX_RATE_MCS4 */
400 3, /* WL12XX_CONF_HW_RXTX_RATE_MCS3 */
401 2, /* WL12XX_CONF_HW_RXTX_RATE_MCS2 */
402 1, /* WL12XX_CONF_HW_RXTX_RATE_MCS1 */
403 0, /* WL12XX_CONF_HW_RXTX_RATE_MCS0 */
404
405 11, /* WL12XX_CONF_HW_RXTX_RATE_54 */
406 10, /* WL12XX_CONF_HW_RXTX_RATE_48 */
407 9, /* WL12XX_CONF_HW_RXTX_RATE_36 */
408 8, /* WL12XX_CONF_HW_RXTX_RATE_24 */
409
410 /* TI-specific rate */
411 CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL12XX_CONF_HW_RXTX_RATE_22 */
412
413 7, /* WL12XX_CONF_HW_RXTX_RATE_18 */
414 6, /* WL12XX_CONF_HW_RXTX_RATE_12 */
415 3, /* WL12XX_CONF_HW_RXTX_RATE_11 */
416 5, /* WL12XX_CONF_HW_RXTX_RATE_9 */
417 4, /* WL12XX_CONF_HW_RXTX_RATE_6 */
418 2, /* WL12XX_CONF_HW_RXTX_RATE_5_5 */
419 1, /* WL12XX_CONF_HW_RXTX_RATE_2 */
420 0 /* WL12XX_CONF_HW_RXTX_RATE_1 */
421};
422
423static const u8 wl12xx_rate_to_idx_5ghz[] = {
424 /* MCS rates are used only with 11n */
425 7, /* WL12XX_CONF_HW_RXTX_RATE_MCS7_SGI */
426 7, /* WL12XX_CONF_HW_RXTX_RATE_MCS7 */
427 6, /* WL12XX_CONF_HW_RXTX_RATE_MCS6 */
428 5, /* WL12XX_CONF_HW_RXTX_RATE_MCS5 */
429 4, /* WL12XX_CONF_HW_RXTX_RATE_MCS4 */
430 3, /* WL12XX_CONF_HW_RXTX_RATE_MCS3 */
431 2, /* WL12XX_CONF_HW_RXTX_RATE_MCS2 */
432 1, /* WL12XX_CONF_HW_RXTX_RATE_MCS1 */
433 0, /* WL12XX_CONF_HW_RXTX_RATE_MCS0 */
434
435 7, /* WL12XX_CONF_HW_RXTX_RATE_54 */
436 6, /* WL12XX_CONF_HW_RXTX_RATE_48 */
437 5, /* WL12XX_CONF_HW_RXTX_RATE_36 */
438 4, /* WL12XX_CONF_HW_RXTX_RATE_24 */
439
440 /* TI-specific rate */
441 CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL12XX_CONF_HW_RXTX_RATE_22 */
442
443 3, /* WL12XX_CONF_HW_RXTX_RATE_18 */
444 2, /* WL12XX_CONF_HW_RXTX_RATE_12 */
445 CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL12XX_CONF_HW_RXTX_RATE_11 */
446 1, /* WL12XX_CONF_HW_RXTX_RATE_9 */
447 0, /* WL12XX_CONF_HW_RXTX_RATE_6 */
448 CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL12XX_CONF_HW_RXTX_RATE_5_5 */
449 CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL12XX_CONF_HW_RXTX_RATE_2 */
450 CONF_HW_RXTX_RATE_UNSUPPORTED /* WL12XX_CONF_HW_RXTX_RATE_1 */
451};
452
453static const u8 *wl12xx_band_rate_to_idx[] = {
454 [IEEE80211_BAND_2GHZ] = wl12xx_rate_to_idx_2ghz,
455 [IEEE80211_BAND_5GHZ] = wl12xx_rate_to_idx_5ghz
456};
457
458enum wl12xx_hw_rates {
459 WL12XX_CONF_HW_RXTX_RATE_MCS7_SGI = 0,
460 WL12XX_CONF_HW_RXTX_RATE_MCS7,
461 WL12XX_CONF_HW_RXTX_RATE_MCS6,
462 WL12XX_CONF_HW_RXTX_RATE_MCS5,
463 WL12XX_CONF_HW_RXTX_RATE_MCS4,
464 WL12XX_CONF_HW_RXTX_RATE_MCS3,
465 WL12XX_CONF_HW_RXTX_RATE_MCS2,
466 WL12XX_CONF_HW_RXTX_RATE_MCS1,
467 WL12XX_CONF_HW_RXTX_RATE_MCS0,
468 WL12XX_CONF_HW_RXTX_RATE_54,
469 WL12XX_CONF_HW_RXTX_RATE_48,
470 WL12XX_CONF_HW_RXTX_RATE_36,
471 WL12XX_CONF_HW_RXTX_RATE_24,
472 WL12XX_CONF_HW_RXTX_RATE_22,
473 WL12XX_CONF_HW_RXTX_RATE_18,
474 WL12XX_CONF_HW_RXTX_RATE_12,
475 WL12XX_CONF_HW_RXTX_RATE_11,
476 WL12XX_CONF_HW_RXTX_RATE_9,
477 WL12XX_CONF_HW_RXTX_RATE_6,
478 WL12XX_CONF_HW_RXTX_RATE_5_5,
479 WL12XX_CONF_HW_RXTX_RATE_2,
480 WL12XX_CONF_HW_RXTX_RATE_1,
481 WL12XX_CONF_HW_RXTX_RATE_MAX,
482};
483
484static struct wlcore_partition_set wl12xx_ptable[PART_TABLE_LEN] = {
485 [PART_DOWN] = {
486 .mem = {
487 .start = 0x00000000,
488 .size = 0x000177c0
489 },
490 .reg = {
491 .start = REGISTERS_BASE,
492 .size = 0x00008800
493 },
494 .mem2 = {
495 .start = 0x00000000,
496 .size = 0x00000000
497 },
498 .mem3 = {
499 .start = 0x00000000,
500 .size = 0x00000000
501 },
502 },
503
504 [PART_BOOT] = { /* in wl12xx we can use a mix of work and down
505 * partition here */
506 .mem = {
507 .start = 0x00040000,
508 .size = 0x00014fc0
509 },
510 .reg = {
511 .start = REGISTERS_BASE,
512 .size = 0x00008800
513 },
514 .mem2 = {
515 .start = 0x00000000,
516 .size = 0x00000000
517 },
518 .mem3 = {
519 .start = 0x00000000,
520 .size = 0x00000000
521 },
522 },
523
524 [PART_WORK] = {
525 .mem = {
526 .start = 0x00040000,
527 .size = 0x00014fc0
528 },
529 .reg = {
530 .start = REGISTERS_BASE,
531 .size = 0x0000a000
532 },
533 .mem2 = {
534 .start = 0x003004f8,
535 .size = 0x00000004
536 },
537 .mem3 = {
538 .start = 0x00040404,
539 .size = 0x00000000
540 },
541 },
542
543 [PART_DRPW] = {
544 .mem = {
545 .start = 0x00040000,
546 .size = 0x00014fc0
547 },
548 .reg = {
549 .start = DRPW_BASE,
550 .size = 0x00006000
551 },
552 .mem2 = {
553 .start = 0x00000000,
554 .size = 0x00000000
555 },
556 .mem3 = {
557 .start = 0x00000000,
558 .size = 0x00000000
559 }
560 }
561};
562
563static const int wl12xx_rtable[REG_TABLE_LEN] = {
564 [REG_ECPU_CONTROL] = WL12XX_REG_ECPU_CONTROL,
565 [REG_INTERRUPT_NO_CLEAR] = WL12XX_REG_INTERRUPT_NO_CLEAR,
566 [REG_INTERRUPT_ACK] = WL12XX_REG_INTERRUPT_ACK,
567 [REG_COMMAND_MAILBOX_PTR] = WL12XX_REG_COMMAND_MAILBOX_PTR,
568 [REG_EVENT_MAILBOX_PTR] = WL12XX_REG_EVENT_MAILBOX_PTR,
569 [REG_INTERRUPT_TRIG] = WL12XX_REG_INTERRUPT_TRIG,
570 [REG_INTERRUPT_MASK] = WL12XX_REG_INTERRUPT_MASK,
571 [REG_PC_ON_RECOVERY] = WL12XX_SCR_PAD4,
572 [REG_CHIP_ID_B] = WL12XX_CHIP_ID_B,
573 [REG_CMD_MBOX_ADDRESS] = WL12XX_CMD_MBOX_ADDRESS,
574
575 /* data access memory addresses, used with partition translation */
576 [REG_SLV_MEM_DATA] = WL1271_SLV_MEM_DATA,
577 [REG_SLV_REG_DATA] = WL1271_SLV_REG_DATA,
578
579 /* raw data access memory addresses */
580 [REG_RAW_FW_STATUS_ADDR] = FW_STATUS_ADDR,
581};
582
583/* TODO: maybe move to a new header file? */
584#define WL127X_FW_NAME_MULTI "ti-connectivity/wl127x-fw-4-mr.bin"
585#define WL127X_FW_NAME_SINGLE "ti-connectivity/wl127x-fw-4-sr.bin"
586#define WL127X_PLT_FW_NAME "ti-connectivity/wl127x-fw-4-plt.bin"
587
588#define WL128X_FW_NAME_MULTI "ti-connectivity/wl128x-fw-4-mr.bin"
589#define WL128X_FW_NAME_SINGLE "ti-connectivity/wl128x-fw-4-sr.bin"
590#define WL128X_PLT_FW_NAME "ti-connectivity/wl128x-fw-4-plt.bin"
591
592static void wl127x_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
593{
594 if (wl->chip.id != CHIP_ID_1283_PG20) {
595 struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
596 struct wl1271_rx_mem_pool_addr rx_mem_addr;
597
598 /*
599 * Choose the block we want to read
600 * For aggregated packets, only the first memory block
601 * should be retrieved. The FW takes care of the rest.
602 */
603 u32 mem_block = rx_desc & RX_MEM_BLOCK_MASK;
604
605 rx_mem_addr.addr = (mem_block << 8) +
606 le32_to_cpu(wl_mem_map->packet_memory_pool_start);
607
608 rx_mem_addr.addr_extra = rx_mem_addr.addr + 4;
609
610 wl1271_write(wl, WL1271_SLV_REG_DATA,
611 &rx_mem_addr, sizeof(rx_mem_addr), false);
612 }
613}
614
615static int wl12xx_identify_chip(struct wl1271 *wl)
616{
617 int ret = 0;
618
619 switch (wl->chip.id) {
620 case CHIP_ID_1271_PG10:
621 wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
622 wl->chip.id);
623
624 /* clear the alignment quirk, since we don't support it */
625 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
626
627 wl->quirks |= WLCORE_QUIRK_LEGACY_NVS;
628 wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
629 wl->mr_fw_name = WL127X_FW_NAME_MULTI;
630 memcpy(&wl->conf.mem, &wl12xx_default_priv_conf.mem_wl127x,
631 sizeof(wl->conf.mem));
632
633 /* read data preparation is only needed by wl127x */
634 wl->ops->prepare_read = wl127x_prepare_read;
635
636 break;
637
638 case CHIP_ID_1271_PG20:
639 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
640 wl->chip.id);
641
642 /* clear the alignment quirk, since we don't support it */
643 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
644
645 wl->quirks |= WLCORE_QUIRK_LEGACY_NVS;
646 wl->plt_fw_name = WL127X_PLT_FW_NAME;
647 wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
648 wl->mr_fw_name = WL127X_FW_NAME_MULTI;
649 memcpy(&wl->conf.mem, &wl12xx_default_priv_conf.mem_wl127x,
650 sizeof(wl->conf.mem));
651
652 /* read data preparation is only needed by wl127x */
653 wl->ops->prepare_read = wl127x_prepare_read;
654
655 break;
656
657 case CHIP_ID_1283_PG20:
658 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)",
659 wl->chip.id);
660 wl->plt_fw_name = WL128X_PLT_FW_NAME;
661 wl->sr_fw_name = WL128X_FW_NAME_SINGLE;
662 wl->mr_fw_name = WL128X_FW_NAME_MULTI;
663 break;
664 case CHIP_ID_1283_PG10:
665 default:
666 wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
667 ret = -ENODEV;
668 goto out;
669 }
670
671out:
672 return ret;
673}
674
675static void wl12xx_top_reg_write(struct wl1271 *wl, int addr, u16 val)
676{
677 /* write address >> 1 + 0x30000 to OCP_POR_CTR */
678 addr = (addr >> 1) + 0x30000;
679 wl1271_write32(wl, WL12XX_OCP_POR_CTR, addr);
680
681 /* write value to OCP_POR_WDATA */
682 wl1271_write32(wl, WL12XX_OCP_DATA_WRITE, val);
683
684 /* write 1 to OCP_CMD */
685 wl1271_write32(wl, WL12XX_OCP_CMD, OCP_CMD_WRITE);
686}
687
688static u16 wl12xx_top_reg_read(struct wl1271 *wl, int addr)
689{
690 u32 val;
691 int timeout = OCP_CMD_LOOP;
692
693 /* write address >> 1 + 0x30000 to OCP_POR_CTR */
694 addr = (addr >> 1) + 0x30000;
695 wl1271_write32(wl, WL12XX_OCP_POR_CTR, addr);
696
697 /* write 2 to OCP_CMD */
698 wl1271_write32(wl, WL12XX_OCP_CMD, OCP_CMD_READ);
699
700 /* poll for data ready */
701 do {
702 val = wl1271_read32(wl, WL12XX_OCP_DATA_READ);
703 } while (!(val & OCP_READY_MASK) && --timeout);
704
705 if (!timeout) {
706 wl1271_warning("Top register access timed out.");
707 return 0xffff;
708 }
709
710 /* check data status and return if OK */
711 if ((val & OCP_STATUS_MASK) == OCP_STATUS_OK)
712 return val & 0xffff;
713 else {
714 wl1271_warning("Top register access returned error.");
715 return 0xffff;
716 }
717}
718
719static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl)
720{
721 u16 spare_reg;
722
723 /* Mask bits [2] & [8:4] in the sys_clk_cfg register */
724 spare_reg = wl12xx_top_reg_read(wl, WL_SPARE_REG);
725 if (spare_reg == 0xFFFF)
726 return -EFAULT;
727 spare_reg |= (BIT(3) | BIT(5) | BIT(6));
728 wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg);
729
730 /* Enable FREF_CLK_REQ & mux MCS and coex PLLs to FREF */
731 wl12xx_top_reg_write(wl, SYS_CLK_CFG_REG,
732 WL_CLK_REQ_TYPE_PG2 | MCS_PLL_CLK_SEL_FREF);
733
734 /* Delay execution for 15msec, to let the HW settle */
735 mdelay(15);
736
737 return 0;
738}
739
740static bool wl128x_is_tcxo_valid(struct wl1271 *wl)
741{
742 u16 tcxo_detection;
743
744 tcxo_detection = wl12xx_top_reg_read(wl, TCXO_CLK_DETECT_REG);
745 if (tcxo_detection & TCXO_DET_FAILED)
746 return false;
747
748 return true;
749}
750
751static bool wl128x_is_fref_valid(struct wl1271 *wl)
752{
753 u16 fref_detection;
754
755 fref_detection = wl12xx_top_reg_read(wl, FREF_CLK_DETECT_REG);
756 if (fref_detection & FREF_CLK_DETECT_FAIL)
757 return false;
758
759 return true;
760}
761
762static int wl128x_manually_configure_mcs_pll(struct wl1271 *wl)
763{
764 wl12xx_top_reg_write(wl, MCS_PLL_M_REG, MCS_PLL_M_REG_VAL);
765 wl12xx_top_reg_write(wl, MCS_PLL_N_REG, MCS_PLL_N_REG_VAL);
766 wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG, MCS_PLL_CONFIG_REG_VAL);
767
768 return 0;
769}
770
771static int wl128x_configure_mcs_pll(struct wl1271 *wl, int clk)
772{
773 u16 spare_reg;
774 u16 pll_config;
775 u8 input_freq;
776
777 /* Mask bits [3:1] in the sys_clk_cfg register */
778 spare_reg = wl12xx_top_reg_read(wl, WL_SPARE_REG);
779 if (spare_reg == 0xFFFF)
780 return -EFAULT;
781 spare_reg |= BIT(2);
782 wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg);
783
784 /* Handle special cases of the TCXO clock */
785 if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_8 ||
786 wl->tcxo_clock == WL12XX_TCXOCLOCK_33_6)
787 return wl128x_manually_configure_mcs_pll(wl);
788
789 /* Set the input frequency according to the selected clock source */
790 input_freq = (clk & 1) + 1;
791
792 pll_config = wl12xx_top_reg_read(wl, MCS_PLL_CONFIG_REG);
793 if (pll_config == 0xFFFF)
794 return -EFAULT;
795 pll_config |= (input_freq << MCS_SEL_IN_FREQ_SHIFT);
796 pll_config |= MCS_PLL_ENABLE_HP;
797 wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG, pll_config);
798
799 return 0;
800}
801
802/*
803 * WL128x has two clocks input - TCXO and FREF.
804 * TCXO is the main clock of the device, while FREF is used to sync
805 * between the GPS and the cellular modem.
806 * In cases where TCXO is 32.736MHz or 16.368MHz, the FREF will be used
807 * as the WLAN/BT main clock.
808 */
809static int wl128x_boot_clk(struct wl1271 *wl, int *selected_clock)
810{
811 u16 sys_clk_cfg;
812
813 /* For XTAL-only modes, FREF will be used after switching from TCXO */
814 if (wl->ref_clock == WL12XX_REFCLOCK_26_XTAL ||
815 wl->ref_clock == WL12XX_REFCLOCK_38_XTAL) {
816 if (!wl128x_switch_tcxo_to_fref(wl))
817 return -EINVAL;
818 goto fref_clk;
819 }
820
821 /* Query the HW, to determine which clock source we should use */
822 sys_clk_cfg = wl12xx_top_reg_read(wl, SYS_CLK_CFG_REG);
823 if (sys_clk_cfg == 0xFFFF)
824 return -EINVAL;
825 if (sys_clk_cfg & PRCM_CM_EN_MUX_WLAN_FREF)
826 goto fref_clk;
827
828 /* If TCXO is either 32.736MHz or 16.368MHz, switch to FREF */
829 if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_368 ||
830 wl->tcxo_clock == WL12XX_TCXOCLOCK_32_736) {
831 if (!wl128x_switch_tcxo_to_fref(wl))
832 return -EINVAL;
833 goto fref_clk;
834 }
835
836 /* TCXO clock is selected */
837 if (!wl128x_is_tcxo_valid(wl))
838 return -EINVAL;
839 *selected_clock = wl->tcxo_clock;
840 goto config_mcs_pll;
841
842fref_clk:
843 /* FREF clock is selected */
844 if (!wl128x_is_fref_valid(wl))
845 return -EINVAL;
846 *selected_clock = wl->ref_clock;
847
848config_mcs_pll:
849 return wl128x_configure_mcs_pll(wl, *selected_clock);
850}
851
852static int wl127x_boot_clk(struct wl1271 *wl)
853{
854 u32 pause;
855 u32 clk;
856
857 if (WL127X_PG_GET_MAJOR(wl->hw_pg_ver) < 3)
858 wl->quirks |= WLCORE_QUIRK_END_OF_TRANSACTION;
859
860 if (wl->ref_clock == CONF_REF_CLK_19_2_E ||
861 wl->ref_clock == CONF_REF_CLK_38_4_E ||
862 wl->ref_clock == CONF_REF_CLK_38_4_M_XTAL)
863 /* ref clk: 19.2/38.4/38.4-XTAL */
864 clk = 0x3;
865 else if (wl->ref_clock == CONF_REF_CLK_26_E ||
866 wl->ref_clock == CONF_REF_CLK_52_E)
867 /* ref clk: 26/52 */
868 clk = 0x5;
869 else
870 return -EINVAL;
871
872 if (wl->ref_clock != CONF_REF_CLK_19_2_E) {
873 u16 val;
874 /* Set clock type (open drain) */
875 val = wl12xx_top_reg_read(wl, OCP_REG_CLK_TYPE);
876 val &= FREF_CLK_TYPE_BITS;
877 wl12xx_top_reg_write(wl, OCP_REG_CLK_TYPE, val);
878
879 /* Set clock pull mode (no pull) */
880 val = wl12xx_top_reg_read(wl, OCP_REG_CLK_PULL);
881 val |= NO_PULL;
882 wl12xx_top_reg_write(wl, OCP_REG_CLK_PULL, val);
883 } else {
884 u16 val;
885 /* Set clock polarity */
886 val = wl12xx_top_reg_read(wl, OCP_REG_CLK_POLARITY);
887 val &= FREF_CLK_POLARITY_BITS;
888 val |= CLK_REQ_OUTN_SEL;
889 wl12xx_top_reg_write(wl, OCP_REG_CLK_POLARITY, val);
890 }
891
892 wl1271_write32(wl, WL12XX_PLL_PARAMETERS, clk);
893
894 pause = wl1271_read32(wl, WL12XX_PLL_PARAMETERS);
895
896 wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause);
897
898 pause &= ~(WU_COUNTER_PAUSE_VAL);
899 pause |= WU_COUNTER_PAUSE_VAL;
900 wl1271_write32(wl, WL12XX_WU_COUNTER_PAUSE, pause);
901
902 return 0;
903}
904
905static int wl1271_boot_soft_reset(struct wl1271 *wl)
906{
907 unsigned long timeout;
908 u32 boot_data;
909
910 /* perform soft reset */
911 wl1271_write32(wl, WL12XX_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT);
912
913 /* SOFT_RESET is self clearing */
914 timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME);
915 while (1) {
916 boot_data = wl1271_read32(wl, WL12XX_SLV_SOFT_RESET);
917 wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data);
918 if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0)
919 break;
920
921 if (time_after(jiffies, timeout)) {
922 /* 1.2 check pWhalBus->uSelfClearTime if the
923 * timeout was reached */
924 wl1271_error("soft reset timeout");
925 return -1;
926 }
927
928 udelay(SOFT_RESET_STALL_TIME);
929 }
930
931 /* disable Rx/Tx */
932 wl1271_write32(wl, WL12XX_ENABLE, 0x0);
933
934 /* disable auto calibration on start*/
935 wl1271_write32(wl, WL12XX_SPARE_A2, 0xffff);
936
937 return 0;
938}
939
940static int wl12xx_pre_boot(struct wl1271 *wl)
941{
942 int ret = 0;
943 u32 clk;
944 int selected_clock = -1;
945
946 if (wl->chip.id == CHIP_ID_1283_PG20) {
947 ret = wl128x_boot_clk(wl, &selected_clock);
948 if (ret < 0)
949 goto out;
950 } else {
951 ret = wl127x_boot_clk(wl);
952 if (ret < 0)
953 goto out;
954 }
955
956 /* Continue the ELP wake up sequence */
957 wl1271_write32(wl, WL12XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
958 udelay(500);
959
960 wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
961
962 /* Read-modify-write DRPW_SCRATCH_START register (see next state)
963 to be used by DRPw FW. The RTRIM value will be added by the FW
964 before taking DRPw out of reset */
965
966 clk = wl1271_read32(wl, WL12XX_DRPW_SCRATCH_START);
967
968 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
969
970 if (wl->chip.id == CHIP_ID_1283_PG20)
971 clk |= ((selected_clock & 0x3) << 1) << 4;
972 else
973 clk |= (wl->ref_clock << 1) << 4;
974
975 wl1271_write32(wl, WL12XX_DRPW_SCRATCH_START, clk);
976
977 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
978
979 /* Disable interrupts */
980 wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
981
982 ret = wl1271_boot_soft_reset(wl);
983 if (ret < 0)
984 goto out;
985
986out:
987 return ret;
988}
989
990static void wl12xx_pre_upload(struct wl1271 *wl)
991{
992 u32 tmp;
993
994 /* write firmware's last address (ie. it's length) to
995 * ACX_EEPROMLESS_IND_REG */
996 wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG");
997
998 wl1271_write32(wl, WL12XX_EEPROMLESS_IND, WL12XX_EEPROMLESS_IND);
999
1000 tmp = wlcore_read_reg(wl, REG_CHIP_ID_B);
1001
1002 wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
1003
1004 /* 6. read the EEPROM parameters */
1005 tmp = wl1271_read32(wl, WL12XX_SCR_PAD2);
1006
1007 /* WL1271: The reference driver skips steps 7 to 10 (jumps directly
1008 * to upload_fw) */
1009
1010 if (wl->chip.id == CHIP_ID_1283_PG20)
1011 wl12xx_top_reg_write(wl, SDIO_IO_DS, HCI_IO_DS_6MA);
1012}
1013
1014static void wl12xx_enable_interrupts(struct wl1271 *wl)
1015{
1016 u32 polarity;
1017
1018 polarity = wl12xx_top_reg_read(wl, OCP_REG_POLARITY);
1019
1020 /* We use HIGH polarity, so unset the LOW bit */
1021 polarity &= ~POLARITY_LOW;
1022 wl12xx_top_reg_write(wl, OCP_REG_POLARITY, polarity);
1023
1024 wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_ALL_EVENTS_VECTOR);
1025
1026 wlcore_enable_interrupts(wl);
1027 wlcore_write_reg(wl, REG_INTERRUPT_MASK,
1028 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
1029
1030 wl1271_write32(wl, WL12XX_HI_CFG, HI_CFG_DEF_VAL);
1031}
1032
1033static int wl12xx_boot(struct wl1271 *wl)
1034{
1035 int ret;
1036
1037 ret = wl12xx_pre_boot(wl);
1038 if (ret < 0)
1039 goto out;
1040
1041 ret = wlcore_boot_upload_nvs(wl);
1042 if (ret < 0)
1043 goto out;
1044
1045 wl12xx_pre_upload(wl);
1046
1047 ret = wlcore_boot_upload_firmware(wl);
1048 if (ret < 0)
1049 goto out;
1050
1051 ret = wlcore_boot_run_firmware(wl);
1052 if (ret < 0)
1053 goto out;
1054
1055 wl12xx_enable_interrupts(wl);
1056
1057out:
1058 return ret;
1059}
1060
1061static void wl12xx_trigger_cmd(struct wl1271 *wl, int cmd_box_addr,
1062 void *buf, size_t len)
1063{
1064 wl1271_write(wl, cmd_box_addr, buf, len, false);
1065 wlcore_write_reg(wl, REG_INTERRUPT_TRIG, WL12XX_INTR_TRIG_CMD);
1066}
1067
1068static void wl12xx_ack_event(struct wl1271 *wl)
1069{
1070 wlcore_write_reg(wl, REG_INTERRUPT_TRIG, WL12XX_INTR_TRIG_EVENT_ACK);
1071}
1072
1073static u32 wl12xx_calc_tx_blocks(struct wl1271 *wl, u32 len, u32 spare_blks)
1074{
1075 u32 blk_size = WL12XX_TX_HW_BLOCK_SIZE;
1076 u32 align_len = wlcore_calc_packet_alignment(wl, len);
1077
1078 return (align_len + blk_size - 1) / blk_size + spare_blks;
1079}
1080
1081static void
1082wl12xx_set_tx_desc_blocks(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
1083 u32 blks, u32 spare_blks)
1084{
1085 if (wl->chip.id == CHIP_ID_1283_PG20) {
1086 desc->wl128x_mem.total_mem_blocks = blks;
1087 } else {
1088 desc->wl127x_mem.extra_blocks = spare_blks;
1089 desc->wl127x_mem.total_mem_blocks = blks;
1090 }
1091}
1092
1093static void
1094wl12xx_set_tx_desc_data_len(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
1095 struct sk_buff *skb)
1096{
1097 u32 aligned_len = wlcore_calc_packet_alignment(wl, skb->len);
1098
1099 if (wl->chip.id == CHIP_ID_1283_PG20) {
1100 desc->wl128x_mem.extra_bytes = aligned_len - skb->len;
1101 desc->length = cpu_to_le16(aligned_len >> 2);
1102
1103 wl1271_debug(DEBUG_TX,
1104 "tx_fill_hdr: hlid: %d len: %d life: %d mem: %d extra: %d",
1105 desc->hlid,
1106 le16_to_cpu(desc->length),
1107 le16_to_cpu(desc->life_time),
1108 desc->wl128x_mem.total_mem_blocks,
1109 desc->wl128x_mem.extra_bytes);
1110 } else {
1111 /* calculate number of padding bytes */
1112 int pad = aligned_len - skb->len;
1113 desc->tx_attr |=
1114 cpu_to_le16(pad << TX_HW_ATTR_OFST_LAST_WORD_PAD);
1115
1116 /* Store the aligned length in terms of words */
1117 desc->length = cpu_to_le16(aligned_len >> 2);
1118
1119 wl1271_debug(DEBUG_TX,
1120 "tx_fill_hdr: pad: %d hlid: %d len: %d life: %d mem: %d",
1121 pad, desc->hlid,
1122 le16_to_cpu(desc->length),
1123 le16_to_cpu(desc->life_time),
1124 desc->wl127x_mem.total_mem_blocks);
1125 }
1126}
1127
1128static enum wl_rx_buf_align
1129wl12xx_get_rx_buf_align(struct wl1271 *wl, u32 rx_desc)
1130{
1131 if (rx_desc & RX_BUF_UNALIGNED_PAYLOAD)
1132 return WLCORE_RX_BUF_UNALIGNED;
1133
1134 return WLCORE_RX_BUF_ALIGNED;
1135}
1136
1137static u32 wl12xx_get_rx_packet_len(struct wl1271 *wl, void *rx_data,
1138 u32 data_len)
1139{
1140 struct wl1271_rx_descriptor *desc = rx_data;
1141
1142 /* invalid packet */
1143 if (data_len < sizeof(*desc) ||
1144 data_len < sizeof(*desc) + desc->pad_len)
1145 return 0;
1146
1147 return data_len - sizeof(*desc) - desc->pad_len;
1148}
1149
1150static void wl12xx_tx_delayed_compl(struct wl1271 *wl)
1151{
1152 if (wl->fw_status->tx_results_counter == (wl->tx_results_count & 0xff))
1153 return;
1154
1155 wl1271_tx_complete(wl);
1156}
1157
1158static int wl12xx_hw_init(struct wl1271 *wl)
1159{
1160 int ret;
1161
1162 if (wl->chip.id == CHIP_ID_1283_PG20) {
1163 u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE;
1164
1165 ret = wl128x_cmd_general_parms(wl);
1166 if (ret < 0)
1167 goto out;
1168 ret = wl128x_cmd_radio_parms(wl);
1169 if (ret < 0)
1170 goto out;
1171
1172 if (wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN)
1173 /* Enable SDIO padding */
1174 host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK;
1175
1176 /* Must be before wl1271_acx_init_mem_config() */
1177 ret = wl1271_acx_host_if_cfg_bitmap(wl, host_cfg_bitmap);
1178 if (ret < 0)
1179 goto out;
1180 } else {
1181 ret = wl1271_cmd_general_parms(wl);
1182 if (ret < 0)
1183 goto out;
1184 ret = wl1271_cmd_radio_parms(wl);
1185 if (ret < 0)
1186 goto out;
1187 ret = wl1271_cmd_ext_radio_parms(wl);
1188 if (ret < 0)
1189 goto out;
1190 }
1191out:
1192 return ret;
1193}
1194
1195static u32 wl12xx_sta_get_ap_rate_mask(struct wl1271 *wl,
1196 struct wl12xx_vif *wlvif)
1197{
1198 return wlvif->rate_set;
1199}
1200
1201static int wl12xx_identify_fw(struct wl1271 *wl)
1202{
1203 unsigned int *fw_ver = wl->chip.fw_ver;
1204
1205 /* Only new station firmwares support routing fw logs to the host */
1206 if ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) &&
1207 (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_FWLOG_STA_MIN))
1208 wl->quirks |= WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED;
1209
1210 /* This feature is not yet supported for AP mode */
1211 if (fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_AP)
1212 wl->quirks |= WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED;
1213
1214 return 0;
1215}
1216
1217static void wl12xx_conf_init(struct wl1271 *wl)
1218{
1219 struct wl12xx_priv *priv = wl->priv;
1220
1221 /* apply driver default configuration */
1222 memcpy(&wl->conf, &wl12xx_conf, sizeof(wl12xx_conf));
1223
1224 /* apply default private configuration */
1225 memcpy(&priv->conf, &wl12xx_default_priv_conf, sizeof(priv->conf));
1226}
1227
1228static bool wl12xx_mac_in_fuse(struct wl1271 *wl)
1229{
1230 bool supported = false;
1231 u8 major, minor;
1232
1233 if (wl->chip.id == CHIP_ID_1283_PG20) {
1234 major = WL128X_PG_GET_MAJOR(wl->hw_pg_ver);
1235 minor = WL128X_PG_GET_MINOR(wl->hw_pg_ver);
1236
1237 /* in wl128x we have the MAC address if the PG is >= (2, 1) */
1238 if (major > 2 || (major == 2 && minor >= 1))
1239 supported = true;
1240 } else {
1241 major = WL127X_PG_GET_MAJOR(wl->hw_pg_ver);
1242 minor = WL127X_PG_GET_MINOR(wl->hw_pg_ver);
1243
1244 /* in wl127x we have the MAC address if the PG is >= (3, 1) */
1245 if (major == 3 && minor >= 1)
1246 supported = true;
1247 }
1248
1249 wl1271_debug(DEBUG_PROBE,
1250 "PG Ver major = %d minor = %d, MAC %s present",
1251 major, minor, supported ? "is" : "is not");
1252
1253 return supported;
1254}
1255
1256static void wl12xx_get_fuse_mac(struct wl1271 *wl)
1257{
1258 u32 mac1, mac2;
1259
1260 wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
1261
1262 mac1 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_1);
1263 mac2 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_2);
1264
1265 /* these are the two parts of the BD_ADDR */
1266 wl->fuse_oui_addr = ((mac2 & 0xffff) << 8) +
1267 ((mac1 & 0xff000000) >> 24);
1268 wl->fuse_nic_addr = mac1 & 0xffffff;
1269
1270 wlcore_set_partition(wl, &wl->ptable[PART_DOWN]);
1271}
1272
1273static s8 wl12xx_get_pg_ver(struct wl1271 *wl)
1274{
1275 u32 die_info;
1276
1277 if (wl->chip.id == CHIP_ID_1283_PG20)
1278 die_info = wl12xx_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1);
1279 else
1280 die_info = wl12xx_top_reg_read(wl, WL127X_REG_FUSE_DATA_2_1);
1281
1282 return (s8) (die_info & PG_VER_MASK) >> PG_VER_OFFSET;
1283}
1284
1285static void wl12xx_get_mac(struct wl1271 *wl)
1286{
1287 if (wl12xx_mac_in_fuse(wl))
1288 wl12xx_get_fuse_mac(wl);
1289}
1290
1291static struct wlcore_ops wl12xx_ops = {
1292 .identify_chip = wl12xx_identify_chip,
1293 .identify_fw = wl12xx_identify_fw,
1294 .boot = wl12xx_boot,
1295 .trigger_cmd = wl12xx_trigger_cmd,
1296 .ack_event = wl12xx_ack_event,
1297 .calc_tx_blocks = wl12xx_calc_tx_blocks,
1298 .set_tx_desc_blocks = wl12xx_set_tx_desc_blocks,
1299 .set_tx_desc_data_len = wl12xx_set_tx_desc_data_len,
1300 .get_rx_buf_align = wl12xx_get_rx_buf_align,
1301 .get_rx_packet_len = wl12xx_get_rx_packet_len,
1302 .tx_immediate_compl = NULL,
1303 .tx_delayed_compl = wl12xx_tx_delayed_compl,
1304 .hw_init = wl12xx_hw_init,
1305 .init_vif = NULL,
1306 .sta_get_ap_rate_mask = wl12xx_sta_get_ap_rate_mask,
1307 .get_pg_ver = wl12xx_get_pg_ver,
1308 .get_mac = wl12xx_get_mac,
1309};
1310
1311static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
1312 .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 |
1313 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT),
1314 .ht_supported = true,
1315 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K,
1316 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8,
1317 .mcs = {
1318 .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
1319 .rx_highest = cpu_to_le16(72),
1320 .tx_params = IEEE80211_HT_MCS_TX_DEFINED,
1321 },
1322};
1323
1324static int __devinit wl12xx_probe(struct platform_device *pdev)
1325{
1326 struct wl1271 *wl;
1327 struct ieee80211_hw *hw;
1328 struct wl12xx_priv *priv;
1329
1330 hw = wlcore_alloc_hw(sizeof(*priv));
1331 if (IS_ERR(hw)) {
1332 wl1271_error("can't allocate hw");
1333 return PTR_ERR(hw);
1334 }
1335
1336 wl = hw->priv;
1337 wl->ops = &wl12xx_ops;
1338 wl->ptable = wl12xx_ptable;
1339 wl->rtable = wl12xx_rtable;
1340 wl->num_tx_desc = 16;
1341 wl->normal_tx_spare = WL12XX_TX_HW_BLOCK_SPARE_DEFAULT;
1342 wl->gem_tx_spare = WL12XX_TX_HW_BLOCK_GEM_SPARE;
1343 wl->band_rate_to_idx = wl12xx_band_rate_to_idx;
1344 wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX;
1345 wl->hw_min_ht_rate = WL12XX_CONF_HW_RXTX_RATE_MCS0;
1346 wl->fw_status_priv_len = 0;
1347 memcpy(&wl->ht_cap, &wl12xx_ht_cap, sizeof(wl12xx_ht_cap));
1348 wl12xx_conf_init(wl);
1349
1350 return wlcore_probe(wl, pdev);
1351}
1352
1353static const struct platform_device_id wl12xx_id_table[] __devinitconst = {
1354 { "wl12xx", 0 },
1355 { } /* Terminating Entry */
1356};
1357MODULE_DEVICE_TABLE(platform, wl12xx_id_table);
1358
1359static struct platform_driver wl12xx_driver = {
1360 .probe = wl12xx_probe,
1361 .remove = __devexit_p(wlcore_remove),
1362 .id_table = wl12xx_id_table,
1363 .driver = {
1364 .name = "wl12xx_driver",
1365 .owner = THIS_MODULE,
1366 }
1367};
1368
1369static int __init wl12xx_init(void)
1370{
1371 return platform_driver_register(&wl12xx_driver);
1372}
1373module_init(wl12xx_init);
1374
1375static void __exit wl12xx_exit(void)
1376{
1377 platform_driver_unregister(&wl12xx_driver);
1378}
1379module_exit(wl12xx_exit);
1380
1381MODULE_LICENSE("GPL v2");
1382MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
1383MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE);
1384MODULE_FIRMWARE(WL127X_FW_NAME_MULTI);
1385MODULE_FIRMWARE(WL127X_PLT_FW_NAME);
1386MODULE_FIRMWARE(WL128X_FW_NAME_SINGLE);
1387MODULE_FIRMWARE(WL128X_FW_NAME_MULTI);
1388MODULE_FIRMWARE(WL128X_PLT_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/reg.h b/drivers/net/wireless/ti/wl12xx/reg.h
index 340db324bc26..79ede02e2587 100644
--- a/drivers/net/wireless/wl12xx/reg.h
+++ b/drivers/net/wireless/ti/wl12xx/reg.h
@@ -33,16 +33,8 @@
33#define REGISTERS_DOWN_SIZE 0x00008800 33#define REGISTERS_DOWN_SIZE 0x00008800
34#define REGISTERS_WORK_SIZE 0x0000b000 34#define REGISTERS_WORK_SIZE 0x0000b000
35 35
36#define HW_ACCESS_ELP_CTRL_REG_ADDR 0x1FFFC
37#define FW_STATUS_ADDR (0x14FC0 + 0xA000) 36#define FW_STATUS_ADDR (0x14FC0 + 0xA000)
38 37
39/* ELP register commands */
40#define ELPCTRL_WAKE_UP 0x1
41#define ELPCTRL_WAKE_UP_WLAN_READY 0x5
42#define ELPCTRL_SLEEP 0x0
43/* ELP WLAN_READY bit */
44#define ELPCTRL_WLAN_READY 0x2
45
46/*=============================================== 38/*===============================================
47 Host Software Reset - 32bit RW 39 Host Software Reset - 32bit RW
48 ------------------------------------------ 40 ------------------------------------------
@@ -57,14 +49,14 @@
57 (not self-clearing), the Wlan hardware 49 (not self-clearing), the Wlan hardware
58 exits the software reset state. 50 exits the software reset state.
59===============================================*/ 51===============================================*/
60#define ACX_REG_SLV_SOFT_RESET (REGISTERS_BASE + 0x0000) 52#define WL12XX_SLV_SOFT_RESET (REGISTERS_BASE + 0x0000)
61 53
62#define WL1271_SLV_REG_DATA (REGISTERS_BASE + 0x0008) 54#define WL1271_SLV_REG_DATA (REGISTERS_BASE + 0x0008)
63#define WL1271_SLV_REG_ADATA (REGISTERS_BASE + 0x000c) 55#define WL1271_SLV_REG_ADATA (REGISTERS_BASE + 0x000c)
64#define WL1271_SLV_MEM_DATA (REGISTERS_BASE + 0x0018) 56#define WL1271_SLV_MEM_DATA (REGISTERS_BASE + 0x0018)
65 57
66#define ACX_REG_INTERRUPT_TRIG (REGISTERS_BASE + 0x0474) 58#define WL12XX_REG_INTERRUPT_TRIG (REGISTERS_BASE + 0x0474)
67#define ACX_REG_INTERRUPT_TRIG_H (REGISTERS_BASE + 0x0478) 59#define WL12XX_REG_INTERRUPT_TRIG_H (REGISTERS_BASE + 0x0478)
68 60
69/*============================================= 61/*=============================================
70 Host Interrupt Mask Register - 32bit (RW) 62 Host Interrupt Mask Register - 32bit (RW)
@@ -94,7 +86,7 @@
94 21- - 86 21- -
95 Default: 0x0001 87 Default: 0x0001
96*==============================================*/ 88*==============================================*/
97#define ACX_REG_INTERRUPT_MASK (REGISTERS_BASE + 0x04DC) 89#define WL12XX_REG_INTERRUPT_MASK (REGISTERS_BASE + 0x04DC)
98 90
99/*============================================= 91/*=============================================
100 Host Interrupt Mask Set 16bit, (Write only) 92 Host Interrupt Mask Set 16bit, (Write only)
@@ -125,7 +117,7 @@
125 Reading this register doesn't 117 Reading this register doesn't
126 effect its content. 118 effect its content.
127=============================================*/ 119=============================================*/
128#define ACX_REG_INTERRUPT_NO_CLEAR (REGISTERS_BASE + 0x04E8) 120#define WL12XX_REG_INTERRUPT_NO_CLEAR (REGISTERS_BASE + 0x04E8)
129 121
130/*============================================= 122/*=============================================
131 Host Interrupt Status Clear on Read Register 123 Host Interrupt Status Clear on Read Register
@@ -148,9 +140,9 @@
148 HINT_STS_ND registers, thus making the 140 HINT_STS_ND registers, thus making the
149 assotiated interrupt inactive. (0-no effect) 141 assotiated interrupt inactive. (0-no effect)
150==============================================*/ 142==============================================*/
151#define ACX_REG_INTERRUPT_ACK (REGISTERS_BASE + 0x04F0) 143#define WL12XX_REG_INTERRUPT_ACK (REGISTERS_BASE + 0x04F0)
152 144
153#define RX_DRIVER_COUNTER_ADDRESS (REGISTERS_BASE + 0x0538) 145#define WL12XX_REG_RX_DRIVER_COUNTER (REGISTERS_BASE + 0x0538)
154 146
155/* Device Configuration registers*/ 147/* Device Configuration registers*/
156#define SOR_CFG (REGISTERS_BASE + 0x0800) 148#define SOR_CFG (REGISTERS_BASE + 0x0800)
@@ -175,9 +167,9 @@
175 1 halt eCPU 167 1 halt eCPU
176 0 enable eCPU 168 0 enable eCPU
177 ===============================================*/ 169 ===============================================*/
178#define ACX_REG_ECPU_CONTROL (REGISTERS_BASE + 0x0804) 170#define WL12XX_REG_ECPU_CONTROL (REGISTERS_BASE + 0x0804)
179 171
180#define HI_CFG (REGISTERS_BASE + 0x0808) 172#define WL12XX_HI_CFG (REGISTERS_BASE + 0x0808)
181 173
182/*=============================================== 174/*===============================================
183 EEPROM Burst Read Start - 32bit RW 175 EEPROM Burst Read Start - 32bit RW
@@ -196,72 +188,67 @@
196*================================================*/ 188*================================================*/
197#define ACX_REG_EE_START (REGISTERS_BASE + 0x080C) 189#define ACX_REG_EE_START (REGISTERS_BASE + 0x080C)
198 190
199#define OCP_POR_CTR (REGISTERS_BASE + 0x09B4) 191#define WL12XX_OCP_POR_CTR (REGISTERS_BASE + 0x09B4)
200#define OCP_DATA_WRITE (REGISTERS_BASE + 0x09B8) 192#define WL12XX_OCP_DATA_WRITE (REGISTERS_BASE + 0x09B8)
201#define OCP_DATA_READ (REGISTERS_BASE + 0x09BC) 193#define WL12XX_OCP_DATA_READ (REGISTERS_BASE + 0x09BC)
202#define OCP_CMD (REGISTERS_BASE + 0x09C0) 194#define WL12XX_OCP_CMD (REGISTERS_BASE + 0x09C0)
203
204#define WL1271_HOST_WR_ACCESS (REGISTERS_BASE + 0x09F8)
205 195
206#define CHIP_ID_B (REGISTERS_BASE + 0x5674) 196#define WL12XX_HOST_WR_ACCESS (REGISTERS_BASE + 0x09F8)
207 197
208#define CHIP_ID_1271_PG10 (0x4030101) 198#define WL12XX_CHIP_ID_B (REGISTERS_BASE + 0x5674)
209#define CHIP_ID_1271_PG20 (0x4030111)
210#define CHIP_ID_1283_PG10 (0x05030101)
211#define CHIP_ID_1283_PG20 (0x05030111)
212 199
213#define ENABLE (REGISTERS_BASE + 0x5450) 200#define WL12XX_ENABLE (REGISTERS_BASE + 0x5450)
214 201
215/* Power Management registers */ 202/* Power Management registers */
216#define ELP_CFG_MODE (REGISTERS_BASE + 0x5804) 203#define WL12XX_ELP_CFG_MODE (REGISTERS_BASE + 0x5804)
217#define ELP_CMD (REGISTERS_BASE + 0x5808) 204#define WL12XX_ELP_CMD (REGISTERS_BASE + 0x5808)
218#define PLL_CAL_TIME (REGISTERS_BASE + 0x5810) 205#define WL12XX_PLL_CAL_TIME (REGISTERS_BASE + 0x5810)
219#define CLK_REQ_TIME (REGISTERS_BASE + 0x5814) 206#define WL12XX_CLK_REQ_TIME (REGISTERS_BASE + 0x5814)
220#define CLK_BUF_TIME (REGISTERS_BASE + 0x5818) 207#define WL12XX_CLK_BUF_TIME (REGISTERS_BASE + 0x5818)
221 208
222#define CFG_PLL_SYNC_CNT (REGISTERS_BASE + 0x5820) 209#define WL12XX_CFG_PLL_SYNC_CNT (REGISTERS_BASE + 0x5820)
223 210
224/* Scratch Pad registers*/ 211/* Scratch Pad registers*/
225#define SCR_PAD0 (REGISTERS_BASE + 0x5608) 212#define WL12XX_SCR_PAD0 (REGISTERS_BASE + 0x5608)
226#define SCR_PAD1 (REGISTERS_BASE + 0x560C) 213#define WL12XX_SCR_PAD1 (REGISTERS_BASE + 0x560C)
227#define SCR_PAD2 (REGISTERS_BASE + 0x5610) 214#define WL12XX_SCR_PAD2 (REGISTERS_BASE + 0x5610)
228#define SCR_PAD3 (REGISTERS_BASE + 0x5614) 215#define WL12XX_SCR_PAD3 (REGISTERS_BASE + 0x5614)
229#define SCR_PAD4 (REGISTERS_BASE + 0x5618) 216#define WL12XX_SCR_PAD4 (REGISTERS_BASE + 0x5618)
230#define SCR_PAD4_SET (REGISTERS_BASE + 0x561C) 217#define WL12XX_SCR_PAD4_SET (REGISTERS_BASE + 0x561C)
231#define SCR_PAD4_CLR (REGISTERS_BASE + 0x5620) 218#define WL12XX_SCR_PAD4_CLR (REGISTERS_BASE + 0x5620)
232#define SCR_PAD5 (REGISTERS_BASE + 0x5624) 219#define WL12XX_SCR_PAD5 (REGISTERS_BASE + 0x5624)
233#define SCR_PAD5_SET (REGISTERS_BASE + 0x5628) 220#define WL12XX_SCR_PAD5_SET (REGISTERS_BASE + 0x5628)
234#define SCR_PAD5_CLR (REGISTERS_BASE + 0x562C) 221#define WL12XX_SCR_PAD5_CLR (REGISTERS_BASE + 0x562C)
235#define SCR_PAD6 (REGISTERS_BASE + 0x5630) 222#define WL12XX_SCR_PAD6 (REGISTERS_BASE + 0x5630)
236#define SCR_PAD7 (REGISTERS_BASE + 0x5634) 223#define WL12XX_SCR_PAD7 (REGISTERS_BASE + 0x5634)
237#define SCR_PAD8 (REGISTERS_BASE + 0x5638) 224#define WL12XX_SCR_PAD8 (REGISTERS_BASE + 0x5638)
238#define SCR_PAD9 (REGISTERS_BASE + 0x563C) 225#define WL12XX_SCR_PAD9 (REGISTERS_BASE + 0x563C)
239 226
240/* Spare registers*/ 227/* Spare registers*/
241#define SPARE_A1 (REGISTERS_BASE + 0x0994) 228#define WL12XX_SPARE_A1 (REGISTERS_BASE + 0x0994)
242#define SPARE_A2 (REGISTERS_BASE + 0x0998) 229#define WL12XX_SPARE_A2 (REGISTERS_BASE + 0x0998)
243#define SPARE_A3 (REGISTERS_BASE + 0x099C) 230#define WL12XX_SPARE_A3 (REGISTERS_BASE + 0x099C)
244#define SPARE_A4 (REGISTERS_BASE + 0x09A0) 231#define WL12XX_SPARE_A4 (REGISTERS_BASE + 0x09A0)
245#define SPARE_A5 (REGISTERS_BASE + 0x09A4) 232#define WL12XX_SPARE_A5 (REGISTERS_BASE + 0x09A4)
246#define SPARE_A6 (REGISTERS_BASE + 0x09A8) 233#define WL12XX_SPARE_A6 (REGISTERS_BASE + 0x09A8)
247#define SPARE_A7 (REGISTERS_BASE + 0x09AC) 234#define WL12XX_SPARE_A7 (REGISTERS_BASE + 0x09AC)
248#define SPARE_A8 (REGISTERS_BASE + 0x09B0) 235#define WL12XX_SPARE_A8 (REGISTERS_BASE + 0x09B0)
249#define SPARE_B1 (REGISTERS_BASE + 0x5420) 236#define WL12XX_SPARE_B1 (REGISTERS_BASE + 0x5420)
250#define SPARE_B2 (REGISTERS_BASE + 0x5424) 237#define WL12XX_SPARE_B2 (REGISTERS_BASE + 0x5424)
251#define SPARE_B3 (REGISTERS_BASE + 0x5428) 238#define WL12XX_SPARE_B3 (REGISTERS_BASE + 0x5428)
252#define SPARE_B4 (REGISTERS_BASE + 0x542C) 239#define WL12XX_SPARE_B4 (REGISTERS_BASE + 0x542C)
253#define SPARE_B5 (REGISTERS_BASE + 0x5430) 240#define WL12XX_SPARE_B5 (REGISTERS_BASE + 0x5430)
254#define SPARE_B6 (REGISTERS_BASE + 0x5434) 241#define WL12XX_SPARE_B6 (REGISTERS_BASE + 0x5434)
255#define SPARE_B7 (REGISTERS_BASE + 0x5438) 242#define WL12XX_SPARE_B7 (REGISTERS_BASE + 0x5438)
256#define SPARE_B8 (REGISTERS_BASE + 0x543C) 243#define WL12XX_SPARE_B8 (REGISTERS_BASE + 0x543C)
257 244
258#define PLL_PARAMETERS (REGISTERS_BASE + 0x6040) 245#define WL12XX_PLL_PARAMETERS (REGISTERS_BASE + 0x6040)
259#define WU_COUNTER_PAUSE (REGISTERS_BASE + 0x6008) 246#define WL12XX_WU_COUNTER_PAUSE (REGISTERS_BASE + 0x6008)
260#define WELP_ARM_COMMAND (REGISTERS_BASE + 0x6100) 247#define WL12XX_WELP_ARM_COMMAND (REGISTERS_BASE + 0x6100)
261#define DRPW_SCRATCH_START (DRPW_BASE + 0x002C) 248#define WL12XX_DRPW_SCRATCH_START (DRPW_BASE + 0x002C)
262 249
263 250#define WL12XX_CMD_MBOX_ADDRESS 0x407B4
264#define ACX_SLV_SOFT_RESET_BIT BIT(1) 251
265#define ACX_REG_EEPROM_START_BIT BIT(1) 252#define ACX_REG_EEPROM_START_BIT BIT(1)
266 253
267/* Command/Information Mailbox Pointers */ 254/* Command/Information Mailbox Pointers */
@@ -279,7 +266,7 @@
279 the host receives the Init Complete interrupt from 266 the host receives the Init Complete interrupt from
280 the Wlan hardware. 267 the Wlan hardware.
281 ===============================================*/ 268 ===============================================*/
282#define REG_COMMAND_MAILBOX_PTR (SCR_PAD0) 269#define WL12XX_REG_COMMAND_MAILBOX_PTR (WL12XX_SCR_PAD0)
283 270
284/*=============================================== 271/*===============================================
285 Information Mailbox Pointer - 32bit RW 272 Information Mailbox Pointer - 32bit RW
@@ -294,7 +281,7 @@
294 until after the host receives the Init Complete interrupt from 281 until after the host receives the Init Complete interrupt from
295 the Wlan hardware. 282 the Wlan hardware.
296 ===============================================*/ 283 ===============================================*/
297#define REG_EVENT_MAILBOX_PTR (SCR_PAD1) 284#define WL12XX_REG_EVENT_MAILBOX_PTR (WL12XX_SCR_PAD1)
298 285
299/*=============================================== 286/*===============================================
300 EEPROM Read/Write Request 32bit RW 287 EEPROM Read/Write Request 32bit RW
@@ -365,26 +352,6 @@
365#define ACX_CONT_WIND_MIN_MASK 0x0000007f 352#define ACX_CONT_WIND_MIN_MASK 0x0000007f
366#define ACX_CONT_WIND_MAX 0x03ff0000 353#define ACX_CONT_WIND_MAX 0x03ff0000
367 354
368/*===============================================
369 HI_CFG Interface Configuration Register Values
370 ------------------------------------------
371 ===============================================*/
372#define HI_CFG_UART_ENABLE 0x00000004
373#define HI_CFG_RST232_ENABLE 0x00000008
374#define HI_CFG_CLOCK_REQ_SELECT 0x00000010
375#define HI_CFG_HOST_INT_ENABLE 0x00000020
376#define HI_CFG_VLYNQ_OUTPUT_ENABLE 0x00000040
377#define HI_CFG_HOST_INT_ACTIVE_LOW 0x00000080
378#define HI_CFG_UART_TX_OUT_GPIO_15 0x00000100
379#define HI_CFG_UART_TX_OUT_GPIO_14 0x00000200
380#define HI_CFG_UART_TX_OUT_GPIO_7 0x00000400
381
382#define HI_CFG_DEF_VAL \
383 (HI_CFG_UART_ENABLE | \
384 HI_CFG_RST232_ENABLE | \
385 HI_CFG_CLOCK_REQ_SELECT | \
386 HI_CFG_HOST_INT_ENABLE)
387
388#define REF_FREQ_19_2 0 355#define REF_FREQ_19_2 0
389#define REF_FREQ_26_0 1 356#define REF_FREQ_26_0 1
390#define REF_FREQ_38_4 2 357#define REF_FREQ_38_4 2
@@ -400,38 +367,19 @@
400#define LUT_PARAM_BB_PLL_LOOP_FILTER 5 367#define LUT_PARAM_BB_PLL_LOOP_FILTER 5
401#define LUT_PARAM_NUM 6 368#define LUT_PARAM_NUM 6
402 369
403#define ACX_EEPROMLESS_IND_REG (SCR_PAD4) 370#define WL12XX_EEPROMLESS_IND (WL12XX_SCR_PAD4)
404#define USE_EEPROM 0 371#define USE_EEPROM 0
405#define SOFT_RESET_MAX_TIME 1000000
406#define SOFT_RESET_STALL_TIME 1000
407#define NVS_DATA_BUNDARY_ALIGNMENT 4 372#define NVS_DATA_BUNDARY_ALIGNMENT 4
408 373
409
410/* Firmware image load chunk size */
411#define CHUNK_SIZE 16384
412
413/* Firmware image header size */ 374/* Firmware image header size */
414#define FW_HDR_SIZE 8 375#define FW_HDR_SIZE 8
415 376
416#define ECPU_CONTROL_HALT 0x00000101
417
418
419/****************************************************************************** 377/******************************************************************************
420 378
421 CHANNELS, BAND & REG DOMAINS definitions 379 CHANNELS, BAND & REG DOMAINS definitions
422 380
423******************************************************************************/ 381******************************************************************************/
424 382
425
426enum {
427 RADIO_BAND_2_4GHZ = 0, /* 2.4 Ghz band */
428 RADIO_BAND_5GHZ = 1, /* 5 Ghz band */
429 RADIO_BAND_JAPAN_4_9_GHZ = 2,
430 DEFAULT_BAND = RADIO_BAND_2_4GHZ,
431 INVALID_BAND = 0xFE,
432 MAX_RADIO_BANDS = 0xFF
433};
434
435#define SHORT_PREAMBLE_BIT BIT(0) /* CCK or Barker depending on the rate */ 383#define SHORT_PREAMBLE_BIT BIT(0) /* CCK or Barker depending on the rate */
436#define OFDM_RATE_BIT BIT(6) 384#define OFDM_RATE_BIT BIT(6)
437#define PBCC_RATE_BIT BIT(7) 385#define PBCC_RATE_BIT BIT(7)
@@ -465,14 +413,82 @@ b12-b0 - Supported Rate indicator bits as defined below.
465 413
466******************************************************************************/ 414******************************************************************************/
467 415
416#define OCP_CMD_LOOP 32
417#define OCP_CMD_WRITE 0x1
418#define OCP_CMD_READ 0x2
419#define OCP_READY_MASK BIT(18)
420#define OCP_STATUS_MASK (BIT(16) | BIT(17))
421#define OCP_STATUS_NO_RESP 0x00000
422#define OCP_STATUS_OK 0x10000
423#define OCP_STATUS_REQ_FAILED 0x20000
424#define OCP_STATUS_RESP_ERROR 0x30000
425
426#define OCP_REG_POLARITY 0x0064
427#define OCP_REG_CLK_TYPE 0x0448
428#define OCP_REG_CLK_POLARITY 0x0cb2
429#define OCP_REG_CLK_PULL 0x0cb4
430
431#define POLARITY_LOW BIT(1)
432#define NO_PULL (BIT(14) | BIT(15))
433
434#define FREF_CLK_TYPE_BITS 0xfffffe7f
435#define CLK_REQ_PRCM 0x100
436#define FREF_CLK_POLARITY_BITS 0xfffff8ff
437#define CLK_REQ_OUTN_SEL 0x700
438
439#define WU_COUNTER_PAUSE_VAL 0x3FF
440
441/* PLL configuration algorithm for wl128x */
442#define SYS_CLK_CFG_REG 0x2200
443/* Bit[0] - 0-TCXO, 1-FREF */
444#define MCS_PLL_CLK_SEL_FREF BIT(0)
445/* Bit[3:2] - 01-TCXO, 10-FREF */
446#define WL_CLK_REQ_TYPE_FREF BIT(3)
447#define WL_CLK_REQ_TYPE_PG2 (BIT(3) | BIT(2))
448/* Bit[4] - 0-TCXO, 1-FREF */
449#define PRCM_CM_EN_MUX_WLAN_FREF BIT(4)
450
451#define TCXO_ILOAD_INT_REG 0x2264
452#define TCXO_CLK_DETECT_REG 0x2266
453
454#define TCXO_DET_FAILED BIT(4)
455
456#define FREF_ILOAD_INT_REG 0x2084
457#define FREF_CLK_DETECT_REG 0x2086
458#define FREF_CLK_DETECT_FAIL BIT(4)
459
460/* Use this reg for masking during driver access */
461#define WL_SPARE_REG 0x2320
462#define WL_SPARE_VAL BIT(2)
463/* Bit[6:5:3] - mask wl write SYS_CLK_CFG[8:5:2:4] */
464#define WL_SPARE_MASK_8526 (BIT(6) | BIT(5) | BIT(3))
465
466#define PLL_LOCK_COUNTERS_REG 0xD8C
467#define PLL_LOCK_COUNTERS_COEX 0x0F
468#define PLL_LOCK_COUNTERS_MCS 0xF0
469#define MCS_PLL_OVERRIDE_REG 0xD90
470#define MCS_PLL_CONFIG_REG 0xD92
471#define MCS_SEL_IN_FREQ_MASK 0x0070
472#define MCS_SEL_IN_FREQ_SHIFT 4
473#define MCS_PLL_CONFIG_REG_VAL 0x73
474#define MCS_PLL_ENABLE_HP (BIT(0) | BIT(1))
475
476#define MCS_PLL_M_REG 0xD94
477#define MCS_PLL_N_REG 0xD96
478#define MCS_PLL_M_REG_VAL 0xC8
479#define MCS_PLL_N_REG_VAL 0x07
480
481#define SDIO_IO_DS 0xd14
482
483/* SDIO/wSPI DS configuration values */
484enum {
485 HCI_IO_DS_8MA = 0,
486 HCI_IO_DS_4MA = 1, /* default */
487 HCI_IO_DS_6MA = 2,
488 HCI_IO_DS_2MA = 3,
489};
468 490
469/************************************************************************* 491/* end PLL configuration algorithm for wl128x */
470
471 Interrupt Trigger Register (Host -> WiLink)
472
473**************************************************************************/
474
475/* Hardware to Embedded CPU Interrupts - first 32-bit register set */
476 492
477/* 493/*
478 * Host Command Interrupt. Setting this bit masks 494 * Host Command Interrupt. Setting this bit masks
@@ -480,7 +496,7 @@ b12-b0 - Supported Rate indicator bits as defined below.
480 * the FW that it has sent a command 496 * the FW that it has sent a command
481 * to the Wlan hardware Command Mailbox. 497 * to the Wlan hardware Command Mailbox.
482 */ 498 */
483#define INTR_TRIG_CMD BIT(0) 499#define WL12XX_INTR_TRIG_CMD BIT(0)
484 500
485/* 501/*
486 * Host Event Acknowlegde Interrupt. The host 502 * Host Event Acknowlegde Interrupt. The host
@@ -488,42 +504,27 @@ b12-b0 - Supported Rate indicator bits as defined below.
488 * the unsolicited information from the event 504 * the unsolicited information from the event
489 * mailbox. 505 * mailbox.
490 */ 506 */
491#define INTR_TRIG_EVENT_ACK BIT(1) 507#define WL12XX_INTR_TRIG_EVENT_ACK BIT(1)
492
493/*
494 * The host sets this bit to inform the Wlan
495 * FW that a TX packet is in the XFER
496 * Buffer #0.
497 */
498#define INTR_TRIG_TX_PROC0 BIT(2)
499
500/*
501 * The host sets this bit to inform the FW
502 * that it read a packet from RX XFER
503 * Buffer #0.
504 */
505#define INTR_TRIG_RX_PROC0 BIT(3)
506
507#define INTR_TRIG_DEBUG_ACK BIT(4)
508 508
509#define INTR_TRIG_STATE_CHANGED BIT(5) 509/*===============================================
510 510 HI_CFG Interface Configuration Register Values
511 511 ------------------------------------------
512/* Hardware to Embedded CPU Interrupts - second 32-bit register set */ 512 ===============================================*/
513 513#define HI_CFG_UART_ENABLE 0x00000004
514/* 514#define HI_CFG_RST232_ENABLE 0x00000008
515 * The host sets this bit to inform the FW 515#define HI_CFG_CLOCK_REQ_SELECT 0x00000010
516 * that it read a packet from RX XFER 516#define HI_CFG_HOST_INT_ENABLE 0x00000020
517 * Buffer #1. 517#define HI_CFG_VLYNQ_OUTPUT_ENABLE 0x00000040
518 */ 518#define HI_CFG_HOST_INT_ACTIVE_LOW 0x00000080
519#define INTR_TRIG_RX_PROC1 BIT(17) 519#define HI_CFG_UART_TX_OUT_GPIO_15 0x00000100
520#define HI_CFG_UART_TX_OUT_GPIO_14 0x00000200
521#define HI_CFG_UART_TX_OUT_GPIO_7 0x00000400
520 522
521/* 523#define HI_CFG_DEF_VAL \
522 * The host sets this bit to inform the Wlan 524 (HI_CFG_UART_ENABLE | \
523 * hardware that a TX packet is in the XFER 525 HI_CFG_RST232_ENABLE | \
524 * Buffer #1. 526 HI_CFG_CLOCK_REQ_SELECT | \
525 */ 527 HI_CFG_HOST_INT_ENABLE)
526#define INTR_TRIG_TX_PROC1 BIT(18)
527 528
528#define WL127X_REG_FUSE_DATA_2_1 0x050a 529#define WL127X_REG_FUSE_DATA_2_1 0x050a
529#define WL128X_REG_FUSE_DATA_2_1 0x2152 530#define WL128X_REG_FUSE_DATA_2_1 0x2152
diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h
new file mode 100644
index 000000000000..74cd332e23ef
--- /dev/null
+++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h
@@ -0,0 +1,31 @@
1/*
2 * This file is part of wl12xx
3 *
4 * Copyright (C) 2011 Texas Instruments Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#ifndef __WL12XX_PRIV_H__
23#define __WL12XX_PRIV_H__
24
25#include "conf.h"
26
27struct wl12xx_priv {
28 struct wl12xx_priv_conf conf;
29};
30
31#endif /* __WL12XX_PRIV_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/Kconfig b/drivers/net/wireless/ti/wlcore/Kconfig
new file mode 100644
index 000000000000..9d04c38938bc
--- /dev/null
+++ b/drivers/net/wireless/ti/wlcore/Kconfig
@@ -0,0 +1,41 @@
1config WLCORE
2 tristate "TI wlcore support"
3 depends on WL_TI && GENERIC_HARDIRQS
4 depends on INET
5 select FW_LOADER
6 ---help---
7 This module contains the main code for TI WLAN chips. It abstracts
8 hardware-specific differences among different chipset families.
9 Each chipset family needs to implement its own lower-level module
10 that will depend on this module for the common code.
11
12 If you choose to build a module, it will be called wlcore. Say N if
13 unsure.
14
15config WLCORE_SPI
16 tristate "TI wlcore SPI support"
17 depends on WLCORE && SPI_MASTER
18 select CRC7
19 ---help---
20 This module adds support for the SPI interface of adapters using
21 TI WLAN chipsets. Select this if your platform is using
22 the SPI bus.
23
24 If you choose to build a module, it'll be called wlcore_spi.
25 Say N if unsure.
26
27config WLCORE_SDIO
28 tristate "TI wlcore SDIO support"
29 depends on WLCORE && MMC
30 ---help---
31 This module adds support for the SDIO interface of adapters using
32 TI WLAN chipsets. Select this if your platform is using
33 the SDIO bus.
34
35 If you choose to build a module, it'll be called wlcore_sdio.
36 Say N if unsure.
37
38config WL12XX_PLATFORM_DATA
39 bool
40 depends on WLCORE_SDIO != n || WL1251_SDIO != n
41 default y
diff --git a/drivers/net/wireless/ti/wlcore/Makefile b/drivers/net/wireless/ti/wlcore/Makefile
new file mode 100644
index 000000000000..d9fba9e32130
--- /dev/null
+++ b/drivers/net/wireless/ti/wlcore/Makefile
@@ -0,0 +1,15 @@
1wlcore-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
2 boot.o init.o debugfs.o scan.o
3
4wlcore_spi-objs = spi.o
5wlcore_sdio-objs = sdio.o
6
7wlcore-$(CONFIG_NL80211_TESTMODE) += testmode.o
8obj-$(CONFIG_WLCORE) += wlcore.o
9obj-$(CONFIG_WLCORE_SPI) += wlcore_spi.o
10obj-$(CONFIG_WLCORE_SDIO) += wlcore_sdio.o
11
12# small builtin driver bit
13obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx_platform_data.o
14
15ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index bc96db0683a5..5912541a925e 100644
--- a/drivers/net/wireless/wl12xx/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -28,11 +28,11 @@
28#include <linux/spi/spi.h> 28#include <linux/spi/spi.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30 30
31#include "wl12xx.h" 31#include "wlcore.h"
32#include "debug.h" 32#include "debug.h"
33#include "wl12xx_80211.h" 33#include "wl12xx_80211.h"
34#include "reg.h"
35#include "ps.h" 34#include "ps.h"
35#include "hw_ops.h"
36 36
37int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif, 37int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif,
38 u8 wake_up_event, u8 listen_interval) 38 u8 wake_up_event, u8 listen_interval)
@@ -757,7 +757,10 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif)
757 757
758 /* configure one AP supported rate class */ 758 /* configure one AP supported rate class */
759 acx->rate_policy_idx = cpu_to_le32(wlvif->sta.ap_rate_idx); 759 acx->rate_policy_idx = cpu_to_le32(wlvif->sta.ap_rate_idx);
760 acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->rate_set); 760
761 /* the AP policy is HW specific */
762 acx->rate_policy.enabled_rates =
763 cpu_to_le32(wlcore_hw_sta_get_ap_rate_mask(wl, wlvif));
761 acx->rate_policy.short_retry_limit = c->short_retry_limit; 764 acx->rate_policy.short_retry_limit = c->short_retry_limit;
762 acx->rate_policy.long_retry_limit = c->long_retry_limit; 765 acx->rate_policy.long_retry_limit = c->long_retry_limit;
763 acx->rate_policy.aflags = c->aflags; 766 acx->rate_policy.aflags = c->aflags;
@@ -969,17 +972,14 @@ int wl12xx_acx_mem_cfg(struct wl1271 *wl)
969 goto out; 972 goto out;
970 } 973 }
971 974
972 if (wl->chip.id == CHIP_ID_1283_PG20) 975 mem = &wl->conf.mem;
973 mem = &wl->conf.mem_wl128x;
974 else
975 mem = &wl->conf.mem_wl127x;
976 976
977 /* memory config */ 977 /* memory config */
978 mem_conf->num_stations = mem->num_stations; 978 mem_conf->num_stations = mem->num_stations;
979 mem_conf->rx_mem_block_num = mem->rx_block_num; 979 mem_conf->rx_mem_block_num = mem->rx_block_num;
980 mem_conf->tx_min_mem_block_num = mem->tx_min_block_num; 980 mem_conf->tx_min_mem_block_num = mem->tx_min_block_num;
981 mem_conf->num_ssid_profiles = mem->ssid_profiles; 981 mem_conf->num_ssid_profiles = mem->ssid_profiles;
982 mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS); 982 mem_conf->total_tx_descriptors = cpu_to_le32(wl->num_tx_desc);
983 mem_conf->dyn_mem_enable = mem->dynamic_memory; 983 mem_conf->dyn_mem_enable = mem->dynamic_memory;
984 mem_conf->tx_free_req = mem->min_req_tx_blocks; 984 mem_conf->tx_free_req = mem->min_req_tx_blocks;
985 mem_conf->rx_free_req = mem->min_req_rx_blocks; 985 mem_conf->rx_free_req = mem->min_req_rx_blocks;
@@ -998,32 +998,6 @@ out:
998 return ret; 998 return ret;
999} 999}
1000 1000
1001int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap)
1002{
1003 struct wl1271_acx_host_config_bitmap *bitmap_conf;
1004 int ret;
1005
1006 bitmap_conf = kzalloc(sizeof(*bitmap_conf), GFP_KERNEL);
1007 if (!bitmap_conf) {
1008 ret = -ENOMEM;
1009 goto out;
1010 }
1011
1012 bitmap_conf->host_cfg_bitmap = cpu_to_le32(host_cfg_bitmap);
1013
1014 ret = wl1271_cmd_configure(wl, ACX_HOST_IF_CFG_BITMAP,
1015 bitmap_conf, sizeof(*bitmap_conf));
1016 if (ret < 0) {
1017 wl1271_warning("wl1271 bitmap config opt failed: %d", ret);
1018 goto out;
1019 }
1020
1021out:
1022 kfree(bitmap_conf);
1023
1024 return ret;
1025}
1026
1027int wl1271_acx_init_mem_config(struct wl1271 *wl) 1001int wl1271_acx_init_mem_config(struct wl1271 *wl)
1028{ 1002{
1029 int ret; 1003 int ret;
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index a28fc044034c..b2f88831b7a9 100644
--- a/drivers/net/wireless/wl12xx/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -25,7 +25,7 @@
25#ifndef __ACX_H__ 25#ifndef __ACX_H__
26#define __ACX_H__ 26#define __ACX_H__
27 27
28#include "wl12xx.h" 28#include "wlcore.h"
29#include "cmd.h" 29#include "cmd.h"
30 30
31/************************************************************************* 31/*************************************************************************
@@ -824,16 +824,11 @@ struct wl1271_acx_keep_alive_config {
824 __le32 period; 824 __le32 period;
825} __packed; 825} __packed;
826 826
827/* TODO: maybe this needs to be moved somewhere else? */
827#define HOST_IF_CFG_RX_FIFO_ENABLE BIT(0) 828#define HOST_IF_CFG_RX_FIFO_ENABLE BIT(0)
828#define HOST_IF_CFG_TX_EXTRA_BLKS_SWAP BIT(1) 829#define HOST_IF_CFG_TX_EXTRA_BLKS_SWAP BIT(1)
829#define HOST_IF_CFG_TX_PAD_TO_SDIO_BLK BIT(3) 830#define HOST_IF_CFG_TX_PAD_TO_SDIO_BLK BIT(3)
830 831
831struct wl1271_acx_host_config_bitmap {
832 struct acx_header header;
833
834 __le32 host_cfg_bitmap;
835} __packed;
836
837enum { 832enum {
838 WL1271_ACX_TRIG_TYPE_LEVEL = 0, 833 WL1271_ACX_TRIG_TYPE_LEVEL = 0,
839 WL1271_ACX_TRIG_TYPE_EDGE, 834 WL1271_ACX_TRIG_TYPE_EDGE,
@@ -1274,7 +1269,6 @@ int wl1271_acx_frag_threshold(struct wl1271 *wl, u32 frag_threshold);
1274int wl1271_acx_tx_config_options(struct wl1271 *wl); 1269int wl1271_acx_tx_config_options(struct wl1271 *wl);
1275int wl12xx_acx_mem_cfg(struct wl1271 *wl); 1270int wl12xx_acx_mem_cfg(struct wl1271 *wl);
1276int wl1271_acx_init_mem_config(struct wl1271 *wl); 1271int wl1271_acx_init_mem_config(struct wl1271 *wl);
1277int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap);
1278int wl1271_acx_init_rx_interrupt(struct wl1271 *wl); 1272int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
1279int wl1271_acx_smart_reflex(struct wl1271 *wl); 1273int wl1271_acx_smart_reflex(struct wl1271 *wl);
1280int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif, 1274int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif,
diff --git a/drivers/net/wireless/ti/wlcore/boot.c b/drivers/net/wireless/ti/wlcore/boot.c
new file mode 100644
index 000000000000..3a2207db5405
--- /dev/null
+++ b/drivers/net/wireless/ti/wlcore/boot.c
@@ -0,0 +1,443 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2008-2010 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/slab.h>
25#include <linux/wl12xx.h>
26#include <linux/export.h>
27
28#include "debug.h"
29#include "acx.h"
30#include "boot.h"
31#include "io.h"
32#include "event.h"
33#include "rx.h"
34#include "hw_ops.h"
35
36static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
37{
38 u32 cpu_ctrl;
39
40 /* 10.5.0 run the firmware (I) */
41 cpu_ctrl = wlcore_read_reg(wl, REG_ECPU_CONTROL);
42
43 /* 10.5.1 run the firmware (II) */
44 cpu_ctrl |= flag;
45 wlcore_write_reg(wl, REG_ECPU_CONTROL, cpu_ctrl);
46}
47
48static int wlcore_parse_fw_ver(struct wl1271 *wl)
49{
50 int ret;
51
52 ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u",
53 &wl->chip.fw_ver[0], &wl->chip.fw_ver[1],
54 &wl->chip.fw_ver[2], &wl->chip.fw_ver[3],
55 &wl->chip.fw_ver[4]);
56
57 if (ret != 5) {
58 wl1271_warning("fw version incorrect value");
59 memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver));
60 return -EINVAL;
61 }
62
63 ret = wlcore_identify_fw(wl);
64 if (ret < 0)
65 return ret;
66
67 return 0;
68}
69
70static int wlcore_boot_fw_version(struct wl1271 *wl)
71{
72 struct wl1271_static_data *static_data;
73 int ret;
74
75 static_data = kmalloc(sizeof(*static_data), GFP_DMA);
76 if (!static_data) {
77 wl1271_error("Couldn't allocate memory for static data!");
78 return -ENOMEM;
79 }
80
81 wl1271_read(wl, wl->cmd_box_addr, static_data, sizeof(*static_data),
82 false);
83
84 strncpy(wl->chip.fw_ver_str, static_data->fw_version,
85 sizeof(wl->chip.fw_ver_str));
86
87 kfree(static_data);
88
89 /* make sure the string is NULL-terminated */
90 wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0';
91
92 ret = wlcore_parse_fw_ver(wl);
93 if (ret < 0)
94 return ret;
95
96 return 0;
97}
98
99static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
100 size_t fw_data_len, u32 dest)
101{
102 struct wlcore_partition_set partition;
103 int addr, chunk_num, partition_limit;
104 u8 *p, *chunk;
105
106 /* whal_FwCtrl_LoadFwImageSm() */
107
108 wl1271_debug(DEBUG_BOOT, "starting firmware upload");
109
110 wl1271_debug(DEBUG_BOOT, "fw_data_len %zd chunk_size %d",
111 fw_data_len, CHUNK_SIZE);
112
113 if ((fw_data_len % 4) != 0) {
114 wl1271_error("firmware length not multiple of four");
115 return -EIO;
116 }
117
118 chunk = kmalloc(CHUNK_SIZE, GFP_KERNEL);
119 if (!chunk) {
120 wl1271_error("allocation for firmware upload chunk failed");
121 return -ENOMEM;
122 }
123
124 memcpy(&partition, &wl->ptable[PART_DOWN], sizeof(partition));
125 partition.mem.start = dest;
126 wlcore_set_partition(wl, &partition);
127
128 /* 10.1 set partition limit and chunk num */
129 chunk_num = 0;
130 partition_limit = wl->ptable[PART_DOWN].mem.size;
131
132 while (chunk_num < fw_data_len / CHUNK_SIZE) {
133 /* 10.2 update partition, if needed */
134 addr = dest + (chunk_num + 2) * CHUNK_SIZE;
135 if (addr > partition_limit) {
136 addr = dest + chunk_num * CHUNK_SIZE;
137 partition_limit = chunk_num * CHUNK_SIZE +
138 wl->ptable[PART_DOWN].mem.size;
139 partition.mem.start = addr;
140 wlcore_set_partition(wl, &partition);
141 }
142
143 /* 10.3 upload the chunk */
144 addr = dest + chunk_num * CHUNK_SIZE;
145 p = buf + chunk_num * CHUNK_SIZE;
146 memcpy(chunk, p, CHUNK_SIZE);
147 wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x",
148 p, addr);
149 wl1271_write(wl, addr, chunk, CHUNK_SIZE, false);
150
151 chunk_num++;
152 }
153
154 /* 10.4 upload the last chunk */
155 addr = dest + chunk_num * CHUNK_SIZE;
156 p = buf + chunk_num * CHUNK_SIZE;
157 memcpy(chunk, p, fw_data_len % CHUNK_SIZE);
158 wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x",
159 fw_data_len % CHUNK_SIZE, p, addr);
160 wl1271_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false);
161
162 kfree(chunk);
163 return 0;
164}
165
166int wlcore_boot_upload_firmware(struct wl1271 *wl)
167{
168 u32 chunks, addr, len;
169 int ret = 0;
170 u8 *fw;
171
172 fw = wl->fw;
173 chunks = be32_to_cpup((__be32 *) fw);
174 fw += sizeof(u32);
175
176 wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks);
177
178 while (chunks--) {
179 addr = be32_to_cpup((__be32 *) fw);
180 fw += sizeof(u32);
181 len = be32_to_cpup((__be32 *) fw);
182 fw += sizeof(u32);
183
184 if (len > 300000) {
185 wl1271_info("firmware chunk too long: %u", len);
186 return -EINVAL;
187 }
188 wl1271_debug(DEBUG_BOOT, "chunk %d addr 0x%x len %u",
189 chunks, addr, len);
190 ret = wl1271_boot_upload_firmware_chunk(wl, fw, len, addr);
191 if (ret != 0)
192 break;
193 fw += len;
194 }
195
196 return ret;
197}
198EXPORT_SYMBOL_GPL(wlcore_boot_upload_firmware);
199
200int wlcore_boot_upload_nvs(struct wl1271 *wl)
201{
202 size_t nvs_len, burst_len;
203 int i;
204 u32 dest_addr, val;
205 u8 *nvs_ptr, *nvs_aligned;
206
207 if (wl->nvs == NULL)
208 return -ENODEV;
209
210 if (wl->quirks & WLCORE_QUIRK_LEGACY_NVS) {
211 struct wl1271_nvs_file *nvs =
212 (struct wl1271_nvs_file *)wl->nvs;
213 /*
214 * FIXME: the LEGACY NVS image support (NVS's missing the 5GHz
215 * band configurations) can be removed when those NVS files stop
216 * floating around.
217 */
218 if (wl->nvs_len == sizeof(struct wl1271_nvs_file) ||
219 wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) {
220 if (nvs->general_params.dual_mode_select)
221 wl->enable_11a = true;
222 }
223
224 if (wl->nvs_len != sizeof(struct wl1271_nvs_file) &&
225 (wl->nvs_len != WL1271_INI_LEGACY_NVS_FILE_SIZE ||
226 wl->enable_11a)) {
227 wl1271_error("nvs size is not as expected: %zu != %zu",
228 wl->nvs_len, sizeof(struct wl1271_nvs_file));
229 kfree(wl->nvs);
230 wl->nvs = NULL;
231 wl->nvs_len = 0;
232 return -EILSEQ;
233 }
234
235 /* only the first part of the NVS needs to be uploaded */
236 nvs_len = sizeof(nvs->nvs);
237 nvs_ptr = (u8 *) nvs->nvs;
238 } else {
239 struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs;
240
241 if (wl->nvs_len == sizeof(struct wl128x_nvs_file)) {
242 if (nvs->general_params.dual_mode_select)
243 wl->enable_11a = true;
244 } else {
245 wl1271_error("nvs size is not as expected: %zu != %zu",
246 wl->nvs_len,
247 sizeof(struct wl128x_nvs_file));
248 kfree(wl->nvs);
249 wl->nvs = NULL;
250 wl->nvs_len = 0;
251 return -EILSEQ;
252 }
253
254 /* only the first part of the NVS needs to be uploaded */
255 nvs_len = sizeof(nvs->nvs);
256 nvs_ptr = (u8 *)nvs->nvs;
257 }
258
259 /* update current MAC address to NVS */
260 nvs_ptr[11] = wl->addresses[0].addr[0];
261 nvs_ptr[10] = wl->addresses[0].addr[1];
262 nvs_ptr[6] = wl->addresses[0].addr[2];
263 nvs_ptr[5] = wl->addresses[0].addr[3];
264 nvs_ptr[4] = wl->addresses[0].addr[4];
265 nvs_ptr[3] = wl->addresses[0].addr[5];
266
267 /*
268 * Layout before the actual NVS tables:
269 * 1 byte : burst length.
270 * 2 bytes: destination address.
271 * n bytes: data to burst copy.
272 *
273 * This is ended by a 0 length, then the NVS tables.
274 */
275
276 /* FIXME: Do we need to check here whether the LSB is 1? */
277 while (nvs_ptr[0]) {
278 burst_len = nvs_ptr[0];
279 dest_addr = (nvs_ptr[1] & 0xfe) | ((u32)(nvs_ptr[2] << 8));
280
281 /*
282 * Due to our new wl1271_translate_reg_addr function,
283 * we need to add the register partition start address
284 * to the destination
285 */
286 dest_addr += wl->curr_part.reg.start;
287
288 /* We move our pointer to the data */
289 nvs_ptr += 3;
290
291 for (i = 0; i < burst_len; i++) {
292 if (nvs_ptr + 3 >= (u8 *) wl->nvs + nvs_len)
293 goto out_badnvs;
294
295 val = (nvs_ptr[0] | (nvs_ptr[1] << 8)
296 | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24));
297
298 wl1271_debug(DEBUG_BOOT,
299 "nvs burst write 0x%x: 0x%x",
300 dest_addr, val);
301 wl1271_write32(wl, dest_addr, val);
302
303 nvs_ptr += 4;
304 dest_addr += 4;
305 }
306
307 if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
308 goto out_badnvs;
309 }
310
311 /*
312 * We've reached the first zero length, the first NVS table
313 * is located at an aligned offset which is at least 7 bytes further.
314 * NOTE: The wl->nvs->nvs element must be first, in order to
315 * simplify the casting, we assume it is at the beginning of
316 * the wl->nvs structure.
317 */
318 nvs_ptr = (u8 *)wl->nvs +
319 ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4);
320
321 if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
322 goto out_badnvs;
323
324 nvs_len -= nvs_ptr - (u8 *)wl->nvs;
325
326 /* Now we must set the partition correctly */
327 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
328
329 /* Copy the NVS tables to a new block to ensure alignment */
330 nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL);
331 if (!nvs_aligned)
332 return -ENOMEM;
333
334 /* And finally we upload the NVS tables */
335 wlcore_write_data(wl, REG_CMD_MBOX_ADDRESS,
336 nvs_aligned, nvs_len, false);
337
338 kfree(nvs_aligned);
339 return 0;
340
341out_badnvs:
342 wl1271_error("nvs data is malformed");
343 return -EILSEQ;
344}
345EXPORT_SYMBOL_GPL(wlcore_boot_upload_nvs);
346
347int wlcore_boot_run_firmware(struct wl1271 *wl)
348{
349 int loop, ret;
350 u32 chip_id, intr;
351
352 /* Make sure we have the boot partition */
353 wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
354
355 wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
356
357 chip_id = wlcore_read_reg(wl, REG_CHIP_ID_B);
358
359 wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id);
360
361 if (chip_id != wl->chip.id) {
362 wl1271_error("chip id doesn't match after firmware boot");
363 return -EIO;
364 }
365
366 /* wait for init to complete */
367 loop = 0;
368 while (loop++ < INIT_LOOP) {
369 udelay(INIT_LOOP_DELAY);
370 intr = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR);
371
372 if (intr == 0xffffffff) {
373 wl1271_error("error reading hardware complete "
374 "init indication");
375 return -EIO;
376 }
377 /* check that ACX_INTR_INIT_COMPLETE is enabled */
378 else if (intr & WL1271_ACX_INTR_INIT_COMPLETE) {
379 wlcore_write_reg(wl, REG_INTERRUPT_ACK,
380 WL1271_ACX_INTR_INIT_COMPLETE);
381 break;
382 }
383 }
384
385 if (loop > INIT_LOOP) {
386 wl1271_error("timeout waiting for the hardware to "
387 "complete initialization");
388 return -EIO;
389 }
390
391 /* get hardware config command mail box */
392 wl->cmd_box_addr = wlcore_read_reg(wl, REG_COMMAND_MAILBOX_PTR);
393
394 wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x", wl->cmd_box_addr);
395
396 /* get hardware config event mail box */
397 wl->mbox_ptr[0] = wlcore_read_reg(wl, REG_EVENT_MAILBOX_PTR);
398 wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox);
399
400 wl1271_debug(DEBUG_MAILBOX, "MBOX ptrs: 0x%x 0x%x",
401 wl->mbox_ptr[0], wl->mbox_ptr[1]);
402
403 ret = wlcore_boot_fw_version(wl);
404 if (ret < 0) {
405 wl1271_error("couldn't boot firmware");
406 return ret;
407 }
408
409 /*
410 * in case of full asynchronous mode the firmware event must be
411 * ready to receive event from the command mailbox
412 */
413
414 /* unmask required mbox events */
415 wl->event_mask = BSS_LOSE_EVENT_ID |
416 SCAN_COMPLETE_EVENT_ID |
417 ROLE_STOP_COMPLETE_EVENT_ID |
418 RSSI_SNR_TRIGGER_0_EVENT_ID |
419 PSPOLL_DELIVERY_FAILURE_EVENT_ID |
420 SOFT_GEMINI_SENSE_EVENT_ID |
421 PERIODIC_SCAN_REPORT_EVENT_ID |
422 PERIODIC_SCAN_COMPLETE_EVENT_ID |
423 DUMMY_PACKET_EVENT_ID |
424 PEER_REMOVE_COMPLETE_EVENT_ID |
425 BA_SESSION_RX_CONSTRAINT_EVENT_ID |
426 REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
427 INACTIVE_STA_EVENT_ID |
428 MAX_TX_RETRY_EVENT_ID |
429 CHANNEL_SWITCH_COMPLETE_EVENT_ID;
430
431 ret = wl1271_event_unmask(wl);
432 if (ret < 0) {
433 wl1271_error("EVENT mask setting failed");
434 return ret;
435 }
436
437 /* set the working partition to its "running" mode offset */
438 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
439
440 /* firmware startup completed */
441 return 0;
442}
443EXPORT_SYMBOL_GPL(wlcore_boot_run_firmware);
diff --git a/drivers/net/wireless/ti/wlcore/boot.h b/drivers/net/wireless/ti/wlcore/boot.h
new file mode 100644
index 000000000000..094981dd2227
--- /dev/null
+++ b/drivers/net/wireless/ti/wlcore/boot.h
@@ -0,0 +1,54 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#ifndef __BOOT_H__
25#define __BOOT_H__
26
27#include "wlcore.h"
28
29int wlcore_boot_upload_firmware(struct wl1271 *wl);
30int wlcore_boot_upload_nvs(struct wl1271 *wl);
31int wlcore_boot_run_firmware(struct wl1271 *wl);
32
33#define WL1271_NO_SUBBANDS 8
34#define WL1271_NO_POWER_LEVELS 4
35#define WL1271_FW_VERSION_MAX_LEN 20
36
37struct wl1271_static_data {
38 u8 mac_address[ETH_ALEN];
39 u8 padding[2];
40 u8 fw_version[WL1271_FW_VERSION_MAX_LEN];
41 u32 hw_version;
42 u8 tx_power_table[WL1271_NO_SUBBANDS][WL1271_NO_POWER_LEVELS];
43};
44
45/* number of times we try to read the INIT interrupt */
46#define INIT_LOOP 20000
47
48/* delay between retries */
49#define INIT_LOOP_DELAY 50
50
51#define WU_COUNTER_PAUSE_VAL 0x3FF
52#define WELP_ARM_COMMAND_VAL 0x4
53
54#endif
diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 3414fc11e9ba..5c4716c6f040 100644
--- a/drivers/net/wireless/wl12xx/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -28,9 +28,8 @@
28#include <linux/ieee80211.h> 28#include <linux/ieee80211.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30 30
31#include "wl12xx.h" 31#include "wlcore.h"
32#include "debug.h" 32#include "debug.h"
33#include "reg.h"
34#include "io.h" 33#include "io.h"
35#include "acx.h" 34#include "acx.h"
36#include "wl12xx_80211.h" 35#include "wl12xx_80211.h"
@@ -67,11 +66,15 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
67 66
68 wl1271_write(wl, wl->cmd_box_addr, buf, len, false); 67 wl1271_write(wl, wl->cmd_box_addr, buf, len, false);
69 68
70 wl1271_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD); 69 /*
70 * TODO: we just need this because one bit is in a different
71 * place. Is there any better way?
72 */
73 wl->ops->trigger_cmd(wl, wl->cmd_box_addr, buf, len);
71 74
72 timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT); 75 timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT);
73 76
74 intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); 77 intr = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR);
75 while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) { 78 while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) {
76 if (time_after(jiffies, timeout)) { 79 if (time_after(jiffies, timeout)) {
77 wl1271_error("command complete timeout"); 80 wl1271_error("command complete timeout");
@@ -85,7 +88,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
85 else 88 else
86 msleep(1); 89 msleep(1);
87 90
88 intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); 91 intr = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR);
89 } 92 }
90 93
91 /* read back the status code of the command */ 94 /* read back the status code of the command */
@@ -100,8 +103,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
100 goto fail; 103 goto fail;
101 } 104 }
102 105
103 wl1271_write32(wl, ACX_REG_INTERRUPT_ACK, 106 wlcore_write_reg(wl, REG_INTERRUPT_ACK, WL1271_ACX_INTR_CMD_COMPLETE);
104 WL1271_ACX_INTR_CMD_COMPLETE);
105 return 0; 107 return 0;
106 108
107fail: 109fail:
@@ -110,240 +112,18 @@ fail:
110 return ret; 112 return ret;
111} 113}
112 114
113int wl1271_cmd_general_parms(struct wl1271 *wl)
114{
115 struct wl1271_general_parms_cmd *gen_parms;
116 struct wl1271_ini_general_params *gp =
117 &((struct wl1271_nvs_file *)wl->nvs)->general_params;
118 bool answer = false;
119 int ret;
120
121 if (!wl->nvs)
122 return -ENODEV;
123
124 if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
125 wl1271_warning("FEM index from INI out of bounds");
126 return -EINVAL;
127 }
128
129 gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
130 if (!gen_parms)
131 return -ENOMEM;
132
133 gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM;
134
135 memcpy(&gen_parms->general_params, gp, sizeof(*gp));
136
137 if (gp->tx_bip_fem_auto_detect)
138 answer = true;
139
140 /* Override the REF CLK from the NVS with the one from platform data */
141 gen_parms->general_params.ref_clock = wl->ref_clock;
142
143 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
144 if (ret < 0) {
145 wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
146 goto out;
147 }
148
149 gp->tx_bip_fem_manufacturer =
150 gen_parms->general_params.tx_bip_fem_manufacturer;
151
152 if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
153 wl1271_warning("FEM index from FW out of bounds");
154 ret = -EINVAL;
155 goto out;
156 }
157
158 wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
159 answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
160
161out:
162 kfree(gen_parms);
163 return ret;
164}
165
166int wl128x_cmd_general_parms(struct wl1271 *wl)
167{
168 struct wl128x_general_parms_cmd *gen_parms;
169 struct wl128x_ini_general_params *gp =
170 &((struct wl128x_nvs_file *)wl->nvs)->general_params;
171 bool answer = false;
172 int ret;
173
174 if (!wl->nvs)
175 return -ENODEV;
176
177 if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
178 wl1271_warning("FEM index from ini out of bounds");
179 return -EINVAL;
180 }
181
182 gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
183 if (!gen_parms)
184 return -ENOMEM;
185
186 gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM;
187
188 memcpy(&gen_parms->general_params, gp, sizeof(*gp));
189
190 if (gp->tx_bip_fem_auto_detect)
191 answer = true;
192
193 /* Replace REF and TCXO CLKs with the ones from platform data */
194 gen_parms->general_params.ref_clock = wl->ref_clock;
195 gen_parms->general_params.tcxo_ref_clock = wl->tcxo_clock;
196
197 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
198 if (ret < 0) {
199 wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
200 goto out;
201 }
202
203 gp->tx_bip_fem_manufacturer =
204 gen_parms->general_params.tx_bip_fem_manufacturer;
205
206 if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
207 wl1271_warning("FEM index from FW out of bounds");
208 ret = -EINVAL;
209 goto out;
210 }
211
212 wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
213 answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
214
215out:
216 kfree(gen_parms);
217 return ret;
218}
219
220int wl1271_cmd_radio_parms(struct wl1271 *wl)
221{
222 struct wl1271_nvs_file *nvs = (struct wl1271_nvs_file *)wl->nvs;
223 struct wl1271_radio_parms_cmd *radio_parms;
224 struct wl1271_ini_general_params *gp = &nvs->general_params;
225 int ret;
226
227 if (!wl->nvs)
228 return -ENODEV;
229
230 radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL);
231 if (!radio_parms)
232 return -ENOMEM;
233
234 radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
235
236 /* 2.4GHz parameters */
237 memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
238 sizeof(struct wl1271_ini_band_params_2));
239 memcpy(&radio_parms->dyn_params_2,
240 &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params,
241 sizeof(struct wl1271_ini_fem_params_2));
242
243 /* 5GHz parameters */
244 memcpy(&radio_parms->static_params_5,
245 &nvs->stat_radio_params_5,
246 sizeof(struct wl1271_ini_band_params_5));
247 memcpy(&radio_parms->dyn_params_5,
248 &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params,
249 sizeof(struct wl1271_ini_fem_params_5));
250
251 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
252 radio_parms, sizeof(*radio_parms));
253
254 ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0);
255 if (ret < 0)
256 wl1271_warning("CMD_INI_FILE_RADIO_PARAM failed");
257
258 kfree(radio_parms);
259 return ret;
260}
261
262int wl128x_cmd_radio_parms(struct wl1271 *wl)
263{
264 struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs;
265 struct wl128x_radio_parms_cmd *radio_parms;
266 struct wl128x_ini_general_params *gp = &nvs->general_params;
267 int ret;
268
269 if (!wl->nvs)
270 return -ENODEV;
271
272 radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL);
273 if (!radio_parms)
274 return -ENOMEM;
275
276 radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
277
278 /* 2.4GHz parameters */
279 memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
280 sizeof(struct wl128x_ini_band_params_2));
281 memcpy(&radio_parms->dyn_params_2,
282 &nvs->dyn_radio_params_2[gp->tx_bip_fem_manufacturer].params,
283 sizeof(struct wl128x_ini_fem_params_2));
284
285 /* 5GHz parameters */
286 memcpy(&radio_parms->static_params_5,
287 &nvs->stat_radio_params_5,
288 sizeof(struct wl128x_ini_band_params_5));
289 memcpy(&radio_parms->dyn_params_5,
290 &nvs->dyn_radio_params_5[gp->tx_bip_fem_manufacturer].params,
291 sizeof(struct wl128x_ini_fem_params_5));
292
293 radio_parms->fem_vendor_and_options = nvs->fem_vendor_and_options;
294
295 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
296 radio_parms, sizeof(*radio_parms));
297
298 ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0);
299 if (ret < 0)
300 wl1271_warning("CMD_INI_FILE_RADIO_PARAM failed");
301
302 kfree(radio_parms);
303 return ret;
304}
305
306int wl1271_cmd_ext_radio_parms(struct wl1271 *wl)
307{
308 struct wl1271_ext_radio_parms_cmd *ext_radio_parms;
309 struct conf_rf_settings *rf = &wl->conf.rf;
310 int ret;
311
312 if (!wl->nvs)
313 return -ENODEV;
314
315 ext_radio_parms = kzalloc(sizeof(*ext_radio_parms), GFP_KERNEL);
316 if (!ext_radio_parms)
317 return -ENOMEM;
318
319 ext_radio_parms->test.id = TEST_CMD_INI_FILE_RF_EXTENDED_PARAM;
320
321 memcpy(ext_radio_parms->tx_per_channel_power_compensation_2,
322 rf->tx_per_channel_power_compensation_2,
323 CONF_TX_PWR_COMPENSATION_LEN_2);
324 memcpy(ext_radio_parms->tx_per_channel_power_compensation_5,
325 rf->tx_per_channel_power_compensation_5,
326 CONF_TX_PWR_COMPENSATION_LEN_5);
327
328 wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_EXT_RADIO_PARAM: ",
329 ext_radio_parms, sizeof(*ext_radio_parms));
330
331 ret = wl1271_cmd_test(wl, ext_radio_parms, sizeof(*ext_radio_parms), 0);
332 if (ret < 0)
333 wl1271_warning("TEST_CMD_INI_FILE_RF_EXTENDED_PARAM failed");
334
335 kfree(ext_radio_parms);
336 return ret;
337}
338
339/* 115/*
340 * Poll the mailbox event field until any of the bits in the mask is set or a 116 * Poll the mailbox event field until any of the bits in the mask is set or a
341 * timeout occurs (WL1271_EVENT_TIMEOUT in msecs) 117 * timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
342 */ 118 */
343static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, u32 mask) 119static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, u32 mask)
344{ 120{
345 u32 events_vector, event; 121 u32 *events_vector;
122 u32 event;
346 unsigned long timeout; 123 unsigned long timeout;
124 int ret = 0;
125
126 events_vector = kmalloc(sizeof(*events_vector), GFP_DMA);
347 127
348 timeout = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT); 128 timeout = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
349 129
@@ -351,21 +131,24 @@ static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, u32 mask)
351 if (time_after(jiffies, timeout)) { 131 if (time_after(jiffies, timeout)) {
352 wl1271_debug(DEBUG_CMD, "timeout waiting for event %d", 132 wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
353 (int)mask); 133 (int)mask);
354 return -ETIMEDOUT; 134 ret = -ETIMEDOUT;
135 goto out;
355 } 136 }
356 137
357 msleep(1); 138 msleep(1);
358 139
359 /* read from both event fields */ 140 /* read from both event fields */
360 wl1271_read(wl, wl->mbox_ptr[0], &events_vector, 141 wl1271_read(wl, wl->mbox_ptr[0], events_vector,
361 sizeof(events_vector), false); 142 sizeof(*events_vector), false);
362 event = events_vector & mask; 143 event = *events_vector & mask;
363 wl1271_read(wl, wl->mbox_ptr[1], &events_vector, 144 wl1271_read(wl, wl->mbox_ptr[1], events_vector,
364 sizeof(events_vector), false); 145 sizeof(*events_vector), false);
365 event |= events_vector & mask; 146 event |= *events_vector & mask;
366 } while (!event); 147 } while (!event);
367 148
368 return 0; 149out:
150 kfree(events_vector);
151 return ret;
369} 152}
370 153
371static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask) 154static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
@@ -522,7 +305,7 @@ static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
522 305
523 cmd->role_id = wlvif->dev_role_id; 306 cmd->role_id = wlvif->dev_role_id;
524 if (wlvif->band == IEEE80211_BAND_5GHZ) 307 if (wlvif->band == IEEE80211_BAND_5GHZ)
525 cmd->band = WL12XX_BAND_5GHZ; 308 cmd->band = WLCORE_BAND_5GHZ;
526 cmd->channel = wlvif->channel; 309 cmd->channel = wlvif->channel;
527 310
528 if (wlvif->dev_hlid == WL12XX_INVALID_LINK_ID) { 311 if (wlvif->dev_hlid == WL12XX_INVALID_LINK_ID) {
@@ -613,7 +396,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
613 396
614 cmd->role_id = wlvif->role_id; 397 cmd->role_id = wlvif->role_id;
615 if (wlvif->band == IEEE80211_BAND_5GHZ) 398 if (wlvif->band == IEEE80211_BAND_5GHZ)
616 cmd->band = WL12XX_BAND_5GHZ; 399 cmd->band = WLCORE_BAND_5GHZ;
617 cmd->channel = wlvif->channel; 400 cmd->channel = wlvif->channel;
618 cmd->sta.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set); 401 cmd->sta.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
619 cmd->sta.beacon_interval = cpu_to_le16(wlvif->beacon_int); 402 cmd->sta.beacon_interval = cpu_to_le16(wlvif->beacon_int);
@@ -750,14 +533,14 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
750 533
751 switch (wlvif->band) { 534 switch (wlvif->band) {
752 case IEEE80211_BAND_2GHZ: 535 case IEEE80211_BAND_2GHZ:
753 cmd->band = RADIO_BAND_2_4GHZ; 536 cmd->band = WLCORE_BAND_2_4GHZ;
754 break; 537 break;
755 case IEEE80211_BAND_5GHZ: 538 case IEEE80211_BAND_5GHZ:
756 cmd->band = RADIO_BAND_5GHZ; 539 cmd->band = WLCORE_BAND_5GHZ;
757 break; 540 break;
758 default: 541 default:
759 wl1271_warning("ap start - unknown band: %d", (int)wlvif->band); 542 wl1271_warning("ap start - unknown band: %d", (int)wlvif->band);
760 cmd->band = RADIO_BAND_2_4GHZ; 543 cmd->band = WLCORE_BAND_2_4GHZ;
761 break; 544 break;
762 } 545 }
763 546
@@ -830,7 +613,7 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif)
830 613
831 cmd->role_id = wlvif->role_id; 614 cmd->role_id = wlvif->role_id;
832 if (wlvif->band == IEEE80211_BAND_5GHZ) 615 if (wlvif->band == IEEE80211_BAND_5GHZ)
833 cmd->band = WL12XX_BAND_5GHZ; 616 cmd->band = WLCORE_BAND_5GHZ;
834 cmd->channel = wlvif->channel; 617 cmd->channel = wlvif->channel;
835 cmd->ibss.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set); 618 cmd->ibss.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
836 cmd->ibss.beacon_interval = cpu_to_le16(wlvif->beacon_int); 619 cmd->ibss.beacon_interval = cpu_to_le16(wlvif->beacon_int);
@@ -904,6 +687,7 @@ int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer)
904 687
905 return ret; 688 return ret;
906} 689}
690EXPORT_SYMBOL_GPL(wl1271_cmd_test);
907 691
908/** 692/**
909 * read acx from firmware 693 * read acx from firmware
@@ -960,6 +744,7 @@ int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len)
960 744
961 return 0; 745 return 0;
962} 746}
747EXPORT_SYMBOL_GPL(wl1271_cmd_configure);
963 748
964int wl1271_cmd_data_path(struct wl1271 *wl, bool enable) 749int wl1271_cmd_data_path(struct wl1271 *wl, bool enable)
965{ 750{
@@ -1730,10 +1515,10 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
1730 cmd->channel = wlvif->channel; 1515 cmd->channel = wlvif->channel;
1731 switch (wlvif->band) { 1516 switch (wlvif->band) {
1732 case IEEE80211_BAND_2GHZ: 1517 case IEEE80211_BAND_2GHZ:
1733 cmd->band = RADIO_BAND_2_4GHZ; 1518 cmd->band = WLCORE_BAND_2_4GHZ;
1734 break; 1519 break;
1735 case IEEE80211_BAND_5GHZ: 1520 case IEEE80211_BAND_5GHZ:
1736 cmd->band = RADIO_BAND_5GHZ; 1521 cmd->band = WLCORE_BAND_5GHZ;
1737 break; 1522 break;
1738 default: 1523 default:
1739 wl1271_error("roc - unknown band: %d", (int)wlvif->band); 1524 wl1271_error("roc - unknown band: %d", (int)wlvif->band);
diff --git a/drivers/net/wireless/wl12xx/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index de217d92516b..a46ae07cb77e 100644
--- a/drivers/net/wireless/wl12xx/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -25,17 +25,12 @@
25#ifndef __CMD_H__ 25#ifndef __CMD_H__
26#define __CMD_H__ 26#define __CMD_H__
27 27
28#include "wl12xx.h" 28#include "wlcore.h"
29 29
30struct acx_header; 30struct acx_header;
31 31
32int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len, 32int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
33 size_t res_len); 33 size_t res_len);
34int wl1271_cmd_general_parms(struct wl1271 *wl);
35int wl128x_cmd_general_parms(struct wl1271 *wl);
36int wl1271_cmd_radio_parms(struct wl1271 *wl);
37int wl128x_cmd_radio_parms(struct wl1271 *wl);
38int wl1271_cmd_ext_radio_parms(struct wl1271 *wl);
39int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type, 34int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
40 u8 *role_id); 35 u8 *role_id);
41int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id); 36int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id);
@@ -262,13 +257,13 @@ struct wl12xx_cmd_role_disable {
262 u8 padding[3]; 257 u8 padding[3];
263} __packed; 258} __packed;
264 259
265enum wl12xx_band { 260enum wlcore_band {
266 WL12XX_BAND_2_4GHZ = 0, 261 WLCORE_BAND_2_4GHZ = 0,
267 WL12XX_BAND_5GHZ = 1, 262 WLCORE_BAND_5GHZ = 1,
268 WL12XX_BAND_JAPAN_4_9_GHZ = 2, 263 WLCORE_BAND_JAPAN_4_9_GHZ = 2,
269 WL12XX_BAND_DEFAULT = WL12XX_BAND_2_4GHZ, 264 WLCORE_BAND_DEFAULT = WLCORE_BAND_2_4GHZ,
270 WL12XX_BAND_INVALID = 0x7E, 265 WLCORE_BAND_INVALID = 0x7E,
271 WL12XX_BAND_MAX_RADIO = 0x7F, 266 WLCORE_BAND_MAX_RADIO = 0x7F,
272}; 267};
273 268
274struct wl12xx_cmd_role_start { 269struct wl12xx_cmd_role_start {
@@ -494,83 +489,6 @@ enum wl1271_channel_tune_bands {
494 489
495#define WL1271_PD_REFERENCE_POINT_BAND_B_G 0 490#define WL1271_PD_REFERENCE_POINT_BAND_B_G 0
496 491
497#define TEST_CMD_INI_FILE_RADIO_PARAM 0x19
498#define TEST_CMD_INI_FILE_GENERAL_PARAM 0x1E
499#define TEST_CMD_INI_FILE_RF_EXTENDED_PARAM 0x26
500
501struct wl1271_general_parms_cmd {
502 struct wl1271_cmd_header header;
503
504 struct wl1271_cmd_test_header test;
505
506 struct wl1271_ini_general_params general_params;
507
508 u8 sr_debug_table[WL1271_INI_MAX_SMART_REFLEX_PARAM];
509 u8 sr_sen_n_p;
510 u8 sr_sen_n_p_gain;
511 u8 sr_sen_nrn;
512 u8 sr_sen_prn;
513 u8 padding[3];
514} __packed;
515
516struct wl128x_general_parms_cmd {
517 struct wl1271_cmd_header header;
518
519 struct wl1271_cmd_test_header test;
520
521 struct wl128x_ini_general_params general_params;
522
523 u8 sr_debug_table[WL1271_INI_MAX_SMART_REFLEX_PARAM];
524 u8 sr_sen_n_p;
525 u8 sr_sen_n_p_gain;
526 u8 sr_sen_nrn;
527 u8 sr_sen_prn;
528 u8 padding[3];
529} __packed;
530
531struct wl1271_radio_parms_cmd {
532 struct wl1271_cmd_header header;
533
534 struct wl1271_cmd_test_header test;
535
536 /* Static radio parameters */
537 struct wl1271_ini_band_params_2 static_params_2;
538 struct wl1271_ini_band_params_5 static_params_5;
539
540 /* Dynamic radio parameters */
541 struct wl1271_ini_fem_params_2 dyn_params_2;
542 u8 padding2;
543 struct wl1271_ini_fem_params_5 dyn_params_5;
544 u8 padding3[2];
545} __packed;
546
547struct wl128x_radio_parms_cmd {
548 struct wl1271_cmd_header header;
549
550 struct wl1271_cmd_test_header test;
551
552 /* Static radio parameters */
553 struct wl128x_ini_band_params_2 static_params_2;
554 struct wl128x_ini_band_params_5 static_params_5;
555
556 u8 fem_vendor_and_options;
557
558 /* Dynamic radio parameters */
559 struct wl128x_ini_fem_params_2 dyn_params_2;
560 u8 padding2;
561 struct wl128x_ini_fem_params_5 dyn_params_5;
562} __packed;
563
564struct wl1271_ext_radio_parms_cmd {
565 struct wl1271_cmd_header header;
566
567 struct wl1271_cmd_test_header test;
568
569 u8 tx_per_channel_power_compensation_2[CONF_TX_PWR_COMPENSATION_LEN_2];
570 u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5];
571 u8 padding[3];
572} __packed;
573
574/* 492/*
575 * There are three types of disconnections: 493 * There are three types of disconnections:
576 * 494 *
diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/ti/wlcore/conf.h
index 3e581e19424c..fef0db4213bc 100644
--- a/drivers/net/wireless/wl12xx/conf.h
+++ b/drivers/net/wireless/ti/wlcore/conf.h
@@ -65,36 +65,7 @@ enum {
65 CONF_HW_RATE_INDEX_MAX = CONF_HW_RATE_INDEX_54MBPS, 65 CONF_HW_RATE_INDEX_MAX = CONF_HW_RATE_INDEX_54MBPS,
66}; 66};
67 67
68enum { 68#define CONF_HW_RXTX_RATE_UNSUPPORTED 0xff
69 CONF_HW_RXTX_RATE_MCS7_SGI = 0,
70 CONF_HW_RXTX_RATE_MCS7,
71 CONF_HW_RXTX_RATE_MCS6,
72 CONF_HW_RXTX_RATE_MCS5,
73 CONF_HW_RXTX_RATE_MCS4,
74 CONF_HW_RXTX_RATE_MCS3,
75 CONF_HW_RXTX_RATE_MCS2,
76 CONF_HW_RXTX_RATE_MCS1,
77 CONF_HW_RXTX_RATE_MCS0,
78 CONF_HW_RXTX_RATE_54,
79 CONF_HW_RXTX_RATE_48,
80 CONF_HW_RXTX_RATE_36,
81 CONF_HW_RXTX_RATE_24,
82 CONF_HW_RXTX_RATE_22,
83 CONF_HW_RXTX_RATE_18,
84 CONF_HW_RXTX_RATE_12,
85 CONF_HW_RXTX_RATE_11,
86 CONF_HW_RXTX_RATE_9,
87 CONF_HW_RXTX_RATE_6,
88 CONF_HW_RXTX_RATE_5_5,
89 CONF_HW_RXTX_RATE_2,
90 CONF_HW_RXTX_RATE_1,
91 CONF_HW_RXTX_RATE_MAX,
92 CONF_HW_RXTX_RATE_UNSUPPORTED = 0xff
93};
94
95/* Rates between and including these are MCS rates */
96#define CONF_HW_RXTX_RATE_MCS_MIN CONF_HW_RXTX_RATE_MCS7_SGI
97#define CONF_HW_RXTX_RATE_MCS_MAX CONF_HW_RXTX_RATE_MCS0
98 69
99enum { 70enum {
100 CONF_SG_DISABLE = 0, 71 CONF_SG_DISABLE = 0,
@@ -1096,16 +1067,31 @@ struct conf_scan_settings {
1096}; 1067};
1097 1068
1098struct conf_sched_scan_settings { 1069struct conf_sched_scan_settings {
1099 /* minimum time to wait on the channel for active scans (in TUs) */ 1070 /*
1100 u16 min_dwell_time_active; 1071 * The base time to wait on the channel for active scans (in TU/1000).
1072 * The minimum dwell time is calculated according to this:
1073 * min_dwell_time = base + num_of_probes_to_be_sent * delta_per_probe
1074 * The maximum dwell time is calculated according to this:
1075 * max_dwell_time = min_dwell_time + max_dwell_time_delta
1076 */
1077 u32 base_dwell_time;
1101 1078
1102 /* maximum time to wait on the channel for active scans (in TUs) */ 1079 /* The delta between the min dwell time and max dwell time for
1103 u16 max_dwell_time_active; 1080 * active scans (in TU/1000s). The max dwell time is used by the FW once
1081 * traffic is detected on the channel.
1082 */
1083 u32 max_dwell_time_delta;
1084
1085 /* Delta added to min dwell time per each probe in 2.4 GHz (TU/1000) */
1086 u32 dwell_time_delta_per_probe;
1104 1087
1105 /* time to wait on the channel for passive scans (in TUs) */ 1088 /* Delta added to min dwell time per each probe in 5 GHz (TU/1000) */
1089 u32 dwell_time_delta_per_probe_5;
1090
1091 /* time to wait on the channel for passive scans (in TU/1000) */
1106 u32 dwell_time_passive; 1092 u32 dwell_time_passive;
1107 1093
1108 /* time to wait on the channel for DFS scans (in TUs) */ 1094 /* time to wait on the channel for DFS scans (in TU/1000) */
1109 u32 dwell_time_dfs; 1095 u32 dwell_time_dfs;
1110 1096
1111 /* number of probe requests to send on each channel in active scans */ 1097 /* number of probe requests to send on each channel in active scans */
@@ -1118,26 +1104,6 @@ struct conf_sched_scan_settings {
1118 s8 snr_threshold; 1104 s8 snr_threshold;
1119}; 1105};
1120 1106
1121/* these are number of channels on the band divided by two, rounded up */
1122#define CONF_TX_PWR_COMPENSATION_LEN_2 7
1123#define CONF_TX_PWR_COMPENSATION_LEN_5 18
1124
1125struct conf_rf_settings {
1126 /*
1127 * Per channel power compensation for 2.4GHz
1128 *
1129 * Range: s8
1130 */
1131 u8 tx_per_channel_power_compensation_2[CONF_TX_PWR_COMPENSATION_LEN_2];
1132
1133 /*
1134 * Per channel power compensation for 5GHz
1135 *
1136 * Range: s8
1137 */
1138 u8 tx_per_channel_power_compensation_5[CONF_TX_PWR_COMPENSATION_LEN_5];
1139};
1140
1141struct conf_ht_setting { 1107struct conf_ht_setting {
1142 u8 rx_ba_win_size; 1108 u8 rx_ba_win_size;
1143 u8 tx_ba_win_size; 1109 u8 tx_ba_win_size;
@@ -1286,7 +1252,7 @@ struct conf_hangover_settings {
1286 u8 window_size; 1252 u8 window_size;
1287}; 1253};
1288 1254
1289struct conf_drv_settings { 1255struct wlcore_conf {
1290 struct conf_sg_settings sg; 1256 struct conf_sg_settings sg;
1291 struct conf_rx_settings rx; 1257 struct conf_rx_settings rx;
1292 struct conf_tx_settings tx; 1258 struct conf_tx_settings tx;
@@ -1296,16 +1262,13 @@ struct conf_drv_settings {
1296 struct conf_roam_trigger_settings roam_trigger; 1262 struct conf_roam_trigger_settings roam_trigger;
1297 struct conf_scan_settings scan; 1263 struct conf_scan_settings scan;
1298 struct conf_sched_scan_settings sched_scan; 1264 struct conf_sched_scan_settings sched_scan;
1299 struct conf_rf_settings rf;
1300 struct conf_ht_setting ht; 1265 struct conf_ht_setting ht;
1301 struct conf_memory_settings mem_wl127x; 1266 struct conf_memory_settings mem;
1302 struct conf_memory_settings mem_wl128x;
1303 struct conf_fm_coex fm_coex; 1267 struct conf_fm_coex fm_coex;
1304 struct conf_rx_streaming_settings rx_streaming; 1268 struct conf_rx_streaming_settings rx_streaming;
1305 struct conf_fwlog fwlog; 1269 struct conf_fwlog fwlog;
1306 struct conf_rate_policy_settings rate; 1270 struct conf_rate_policy_settings rate;
1307 struct conf_hangover_settings hangover; 1271 struct conf_hangover_settings hangover;
1308 u8 hci_io_ds;
1309}; 1272};
1310 1273
1311#endif 1274#endif
diff --git a/drivers/net/wireless/wl12xx/debug.h b/drivers/net/wireless/ti/wlcore/debug.h
index ec0fdc25b280..6b800b3cbea5 100644
--- a/drivers/net/wireless/wl12xx/debug.h
+++ b/drivers/net/wireless/ti/wlcore/debug.h
@@ -52,6 +52,7 @@ enum {
52 DEBUG_ADHOC = BIT(16), 52 DEBUG_ADHOC = BIT(16),
53 DEBUG_AP = BIT(17), 53 DEBUG_AP = BIT(17),
54 DEBUG_PROBE = BIT(18), 54 DEBUG_PROBE = BIT(18),
55 DEBUG_IO = BIT(19),
55 DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP), 56 DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP),
56 DEBUG_ALL = ~0, 57 DEBUG_ALL = ~0,
57}; 58};
diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/ti/wlcore/debugfs.c
index 564d49575c94..d5aea1ff5ad1 100644
--- a/drivers/net/wireless/wl12xx/debugfs.c
+++ b/drivers/net/wireless/ti/wlcore/debugfs.c
@@ -26,7 +26,7 @@
26#include <linux/skbuff.h> 26#include <linux/skbuff.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28 28
29#include "wl12xx.h" 29#include "wlcore.h"
30#include "debug.h" 30#include "debug.h"
31#include "acx.h" 31#include "acx.h"
32#include "ps.h" 32#include "ps.h"
@@ -647,6 +647,7 @@ static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
647 VIF_STATE_PRINT_INT(last_rssi_event); 647 VIF_STATE_PRINT_INT(last_rssi_event);
648 VIF_STATE_PRINT_INT(ba_support); 648 VIF_STATE_PRINT_INT(ba_support);
649 VIF_STATE_PRINT_INT(ba_allowed); 649 VIF_STATE_PRINT_INT(ba_allowed);
650 VIF_STATE_PRINT_INT(is_gem);
650 VIF_STATE_PRINT_LLHEX(tx_security_seq); 651 VIF_STATE_PRINT_LLHEX(tx_security_seq);
651 VIF_STATE_PRINT_INT(tx_security_last_seq_lsb); 652 VIF_STATE_PRINT_INT(tx_security_last_seq_lsb);
652 } 653 }
diff --git a/drivers/net/wireless/wl12xx/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
index 254c5b292cf6..a8d3aef011ff 100644
--- a/drivers/net/wireless/wl12xx/debugfs.h
+++ b/drivers/net/wireless/ti/wlcore/debugfs.h
@@ -24,7 +24,7 @@
24#ifndef __DEBUGFS_H__ 24#ifndef __DEBUGFS_H__
25#define __DEBUGFS_H__ 25#define __DEBUGFS_H__
26 26
27#include "wl12xx.h" 27#include "wlcore.h"
28 28
29int wl1271_debugfs_init(struct wl1271 *wl); 29int wl1271_debugfs_init(struct wl1271 *wl);
30void wl1271_debugfs_exit(struct wl1271 *wl); 30void wl1271_debugfs_exit(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/event.c b/drivers/net/wireless/ti/wlcore/event.c
index c953717f38eb..292632ddf890 100644
--- a/drivers/net/wireless/wl12xx/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -21,9 +21,8 @@
21 * 21 *
22 */ 22 */
23 23
24#include "wl12xx.h" 24#include "wlcore.h"
25#include "debug.h" 25#include "debug.h"
26#include "reg.h"
27#include "io.h" 26#include "io.h"
28#include "event.h" 27#include "event.h"
29#include "ps.h" 28#include "ps.h"
@@ -98,8 +97,9 @@ static void wl1271_event_mbox_dump(struct event_mailbox *mbox)
98 wl1271_debug(DEBUG_EVENT, "\tmask: 0x%x", mbox->events_mask); 97 wl1271_debug(DEBUG_EVENT, "\tmask: 0x%x", mbox->events_mask);
99} 98}
100 99
101static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox) 100static int wl1271_event_process(struct wl1271 *wl)
102{ 101{
102 struct event_mailbox *mbox = wl->mbox;
103 struct ieee80211_vif *vif; 103 struct ieee80211_vif *vif;
104 struct wl12xx_vif *wlvif; 104 struct wl12xx_vif *wlvif;
105 u32 vector; 105 u32 vector;
@@ -196,7 +196,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
196 bool success; 196 bool success;
197 197
198 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, 198 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS,
199 &wl->flags)) 199 &wlvif->flags))
200 continue; 200 continue;
201 201
202 success = mbox->channel_switch_status ? false : true; 202 success = mbox->channel_switch_status ? false : true;
@@ -278,18 +278,8 @@ int wl1271_event_unmask(struct wl1271 *wl)
278 return 0; 278 return 0;
279} 279}
280 280
281void wl1271_event_mbox_config(struct wl1271 *wl)
282{
283 wl->mbox_ptr[0] = wl1271_read32(wl, REG_EVENT_MAILBOX_PTR);
284 wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox);
285
286 wl1271_debug(DEBUG_EVENT, "MBOX ptrs: 0x%x 0x%x",
287 wl->mbox_ptr[0], wl->mbox_ptr[1]);
288}
289
290int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num) 281int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
291{ 282{
292 struct event_mailbox mbox;
293 int ret; 283 int ret;
294 284
295 wl1271_debug(DEBUG_EVENT, "EVENT on mbox %d", mbox_num); 285 wl1271_debug(DEBUG_EVENT, "EVENT on mbox %d", mbox_num);
@@ -298,16 +288,19 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
298 return -EINVAL; 288 return -EINVAL;
299 289
300 /* first we read the mbox descriptor */ 290 /* first we read the mbox descriptor */
301 wl1271_read(wl, wl->mbox_ptr[mbox_num], &mbox, 291 wl1271_read(wl, wl->mbox_ptr[mbox_num], wl->mbox,
302 sizeof(struct event_mailbox), false); 292 sizeof(*wl->mbox), false);
303 293
304 /* process the descriptor */ 294 /* process the descriptor */
305 ret = wl1271_event_process(wl, &mbox); 295 ret = wl1271_event_process(wl);
306 if (ret < 0) 296 if (ret < 0)
307 return ret; 297 return ret;
308 298
309 /* then we let the firmware know it can go on...*/ 299 /*
310 wl1271_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_EVENT_ACK); 300 * TODO: we just need this because one bit is in a different
301 * place. Is there any better way?
302 */
303 wl->ops->ack_event(wl);
311 304
312 return 0; 305 return 0;
313} 306}
diff --git a/drivers/net/wireless/wl12xx/event.h b/drivers/net/wireless/ti/wlcore/event.h
index 057d193d3525..8adf18d6c58f 100644
--- a/drivers/net/wireless/wl12xx/event.h
+++ b/drivers/net/wireless/ti/wlcore/event.h
@@ -132,8 +132,9 @@ struct event_mailbox {
132 u8 reserved_8[9]; 132 u8 reserved_8[9];
133} __packed; 133} __packed;
134 134
135struct wl1271;
136
135int wl1271_event_unmask(struct wl1271 *wl); 137int wl1271_event_unmask(struct wl1271 *wl);
136void wl1271_event_mbox_config(struct wl1271 *wl);
137int wl1271_event_handle(struct wl1271 *wl, u8 mbox); 138int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
138 139
139#endif 140#endif
diff --git a/drivers/net/wireless/ti/wlcore/hw_ops.h b/drivers/net/wireless/ti/wlcore/hw_ops.h
new file mode 100644
index 000000000000..9384b4d56c24
--- /dev/null
+++ b/drivers/net/wireless/ti/wlcore/hw_ops.h
@@ -0,0 +1,122 @@
1/*
2 * This file is part of wlcore
3 *
4 * Copyright (C) 2011 Texas Instruments Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#ifndef __WLCORE_HW_OPS_H__
23#define __WLCORE_HW_OPS_H__
24
25#include "wlcore.h"
26#include "rx.h"
27
28static inline u32
29wlcore_hw_calc_tx_blocks(struct wl1271 *wl, u32 len, u32 spare_blks)
30{
31 if (!wl->ops->calc_tx_blocks)
32 BUG_ON(1);
33
34 return wl->ops->calc_tx_blocks(wl, len, spare_blks);
35}
36
37static inline void
38wlcore_hw_set_tx_desc_blocks(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
39 u32 blks, u32 spare_blks)
40{
41 if (!wl->ops->set_tx_desc_blocks)
42 BUG_ON(1);
43
44 return wl->ops->set_tx_desc_blocks(wl, desc, blks, spare_blks);
45}
46
47static inline void
48wlcore_hw_set_tx_desc_data_len(struct wl1271 *wl,
49 struct wl1271_tx_hw_descr *desc,
50 struct sk_buff *skb)
51{
52 if (!wl->ops->set_tx_desc_data_len)
53 BUG_ON(1);
54
55 wl->ops->set_tx_desc_data_len(wl, desc, skb);
56}
57
58static inline enum wl_rx_buf_align
59wlcore_hw_get_rx_buf_align(struct wl1271 *wl, u32 rx_desc)
60{
61
62 if (!wl->ops->get_rx_buf_align)
63 BUG_ON(1);
64
65 return wl->ops->get_rx_buf_align(wl, rx_desc);
66}
67
68static inline void
69wlcore_hw_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
70{
71 if (wl->ops->prepare_read)
72 wl->ops->prepare_read(wl, rx_desc, len);
73}
74
75static inline u32
76wlcore_hw_get_rx_packet_len(struct wl1271 *wl, void *rx_data, u32 data_len)
77{
78 if (!wl->ops->get_rx_packet_len)
79 BUG_ON(1);
80
81 return wl->ops->get_rx_packet_len(wl, rx_data, data_len);
82}
83
84static inline void wlcore_hw_tx_delayed_compl(struct wl1271 *wl)
85{
86 if (wl->ops->tx_delayed_compl)
87 wl->ops->tx_delayed_compl(wl);
88}
89
90static inline void wlcore_hw_tx_immediate_compl(struct wl1271 *wl)
91{
92 if (wl->ops->tx_immediate_compl)
93 wl->ops->tx_immediate_compl(wl);
94}
95
96static inline int
97wlcore_hw_init_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
98{
99 if (wl->ops->init_vif)
100 return wl->ops->init_vif(wl, wlvif);
101
102 return 0;
103}
104
105static inline u32
106wlcore_hw_sta_get_ap_rate_mask(struct wl1271 *wl, struct wl12xx_vif *wlvif)
107{
108 if (!wl->ops->sta_get_ap_rate_mask)
109 BUG_ON(1);
110
111 return wl->ops->sta_get_ap_rate_mask(wl, wlvif);
112}
113
114static inline int wlcore_identify_fw(struct wl1271 *wl)
115{
116 if (wl->ops->identify_fw)
117 return wl->ops->identify_fw(wl);
118
119 return 0;
120}
121
122#endif
diff --git a/drivers/net/wireless/wl12xx/ini.h b/drivers/net/wireless/ti/wlcore/ini.h
index 4cf9ecc56212..4cf9ecc56212 100644
--- a/drivers/net/wireless/wl12xx/ini.h
+++ b/drivers/net/wireless/ti/wlcore/ini.h
diff --git a/drivers/net/wireless/wl12xx/init.c b/drivers/net/wireless/ti/wlcore/init.c
index 203fbebf09eb..9f89255eb6e6 100644
--- a/drivers/net/wireless/wl12xx/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -30,9 +30,9 @@
30#include "wl12xx_80211.h" 30#include "wl12xx_80211.h"
31#include "acx.h" 31#include "acx.h"
32#include "cmd.h" 32#include "cmd.h"
33#include "reg.h"
34#include "tx.h" 33#include "tx.h"
35#include "io.h" 34#include "io.h"
35#include "hw_ops.h"
36 36
37int wl1271_init_templates_config(struct wl1271 *wl) 37int wl1271_init_templates_config(struct wl1271 *wl)
38{ 38{
@@ -319,7 +319,7 @@ static int wl12xx_init_fwlog(struct wl1271 *wl)
319{ 319{
320 int ret; 320 int ret;
321 321
322 if (wl->quirks & WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED) 322 if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
323 return 0; 323 return 0;
324 324
325 ret = wl12xx_cmd_config_fwlog(wl); 325 ret = wl12xx_cmd_config_fwlog(wl);
@@ -494,26 +494,6 @@ static int wl1271_set_ba_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif)
494 return wl12xx_acx_set_ba_initiator_policy(wl, wlvif); 494 return wl12xx_acx_set_ba_initiator_policy(wl, wlvif);
495} 495}
496 496
497int wl1271_chip_specific_init(struct wl1271 *wl)
498{
499 int ret = 0;
500
501 if (wl->chip.id == CHIP_ID_1283_PG20) {
502 u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE;
503
504 if (!(wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT))
505 /* Enable SDIO padding */
506 host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK;
507
508 /* Must be before wl1271_acx_init_mem_config() */
509 ret = wl1271_acx_host_if_cfg_bitmap(wl, host_cfg_bitmap);
510 if (ret < 0)
511 goto out;
512 }
513out:
514 return ret;
515}
516
517/* vif-specifc initialization */ 497/* vif-specifc initialization */
518static int wl12xx_init_sta_role(struct wl1271 *wl, struct wl12xx_vif *wlvif) 498static int wl12xx_init_sta_role(struct wl1271 *wl, struct wl12xx_vif *wlvif)
519{ 499{
@@ -582,10 +562,17 @@ int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif)
582 if (ret < 0) 562 if (ret < 0)
583 return ret; 563 return ret;
584 } else if (!wl->sta_count) { 564 } else if (!wl->sta_count) {
585 /* Configure for ELP power saving */ 565 if (wl->quirks & WLCORE_QUIRK_NO_ELP) {
586 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP); 566 /* Configure for power always on */
587 if (ret < 0) 567 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
588 return ret; 568 if (ret < 0)
569 return ret;
570 } else {
571 /* Configure for ELP power saving */
572 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
573 if (ret < 0)
574 return ret;
575 }
589 } 576 }
590 } 577 }
591 578
@@ -652,6 +639,10 @@ int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif)
652 if (ret < 0) 639 if (ret < 0)
653 return ret; 640 return ret;
654 641
642 ret = wlcore_hw_init_vif(wl, wlvif);
643 if (ret < 0)
644 return ret;
645
655 return 0; 646 return 0;
656} 647}
657 648
@@ -659,27 +650,8 @@ int wl1271_hw_init(struct wl1271 *wl)
659{ 650{
660 int ret; 651 int ret;
661 652
662 if (wl->chip.id == CHIP_ID_1283_PG20) { 653 /* Chip-specific hw init */
663 ret = wl128x_cmd_general_parms(wl); 654 ret = wl->ops->hw_init(wl);
664 if (ret < 0)
665 return ret;
666 ret = wl128x_cmd_radio_parms(wl);
667 if (ret < 0)
668 return ret;
669 } else {
670 ret = wl1271_cmd_general_parms(wl);
671 if (ret < 0)
672 return ret;
673 ret = wl1271_cmd_radio_parms(wl);
674 if (ret < 0)
675 return ret;
676 ret = wl1271_cmd_ext_radio_parms(wl);
677 if (ret < 0)
678 return ret;
679 }
680
681 /* Chip-specific init */
682 ret = wl1271_chip_specific_init(wl);
683 if (ret < 0) 655 if (ret < 0)
684 return ret; 656 return ret;
685 657
diff --git a/drivers/net/wireless/wl12xx/init.h b/drivers/net/wireless/ti/wlcore/init.h
index 2da0f404ef6e..a45fbfddec19 100644
--- a/drivers/net/wireless/wl12xx/init.h
+++ b/drivers/net/wireless/ti/wlcore/init.h
@@ -24,7 +24,7 @@
24#ifndef __INIT_H__ 24#ifndef __INIT_H__
25#define __INIT_H__ 25#define __INIT_H__
26 26
27#include "wl12xx.h" 27#include "wlcore.h"
28 28
29int wl1271_hw_init_power_auth(struct wl1271 *wl); 29int wl1271_hw_init_power_auth(struct wl1271 *wl);
30int wl1271_init_templates_config(struct wl1271 *wl); 30int wl1271_init_templates_config(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/io.c b/drivers/net/wireless/ti/wlcore/io.c
index c574a3b31e31..7cd0081aede5 100644
--- a/drivers/net/wireless/wl12xx/io.c
+++ b/drivers/net/wireless/ti/wlcore/io.c
@@ -26,84 +26,12 @@
26#include <linux/spi/spi.h> 26#include <linux/spi/spi.h>
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28 28
29#include "wl12xx.h" 29#include "wlcore.h"
30#include "debug.h" 30#include "debug.h"
31#include "wl12xx_80211.h" 31#include "wl12xx_80211.h"
32#include "io.h" 32#include "io.h"
33#include "tx.h" 33#include "tx.h"
34 34
35#define OCP_CMD_LOOP 32
36
37#define OCP_CMD_WRITE 0x1
38#define OCP_CMD_READ 0x2
39
40#define OCP_READY_MASK BIT(18)
41#define OCP_STATUS_MASK (BIT(16) | BIT(17))
42
43#define OCP_STATUS_NO_RESP 0x00000
44#define OCP_STATUS_OK 0x10000
45#define OCP_STATUS_REQ_FAILED 0x20000
46#define OCP_STATUS_RESP_ERROR 0x30000
47
48struct wl1271_partition_set wl12xx_part_table[PART_TABLE_LEN] = {
49 [PART_DOWN] = {
50 .mem = {
51 .start = 0x00000000,
52 .size = 0x000177c0
53 },
54 .reg = {
55 .start = REGISTERS_BASE,
56 .size = 0x00008800
57 },
58 .mem2 = {
59 .start = 0x00000000,
60 .size = 0x00000000
61 },
62 .mem3 = {
63 .start = 0x00000000,
64 .size = 0x00000000
65 },
66 },
67
68 [PART_WORK] = {
69 .mem = {
70 .start = 0x00040000,
71 .size = 0x00014fc0
72 },
73 .reg = {
74 .start = REGISTERS_BASE,
75 .size = 0x0000a000
76 },
77 .mem2 = {
78 .start = 0x003004f8,
79 .size = 0x00000004
80 },
81 .mem3 = {
82 .start = 0x00040404,
83 .size = 0x00000000
84 },
85 },
86
87 [PART_DRPW] = {
88 .mem = {
89 .start = 0x00040000,
90 .size = 0x00014fc0
91 },
92 .reg = {
93 .start = DRPW_BASE,
94 .size = 0x00006000
95 },
96 .mem2 = {
97 .start = 0x00000000,
98 .size = 0x00000000
99 },
100 .mem3 = {
101 .start = 0x00000000,
102 .size = 0x00000000
103 }
104 }
105};
106
107bool wl1271_set_block_size(struct wl1271 *wl) 35bool wl1271_set_block_size(struct wl1271 *wl)
108{ 36{
109 if (wl->if_ops->set_block_size) { 37 if (wl->if_ops->set_block_size) {
@@ -114,17 +42,53 @@ bool wl1271_set_block_size(struct wl1271 *wl)
114 return false; 42 return false;
115} 43}
116 44
117void wl1271_disable_interrupts(struct wl1271 *wl) 45void wlcore_disable_interrupts(struct wl1271 *wl)
118{ 46{
119 disable_irq(wl->irq); 47 disable_irq(wl->irq);
120} 48}
49EXPORT_SYMBOL_GPL(wlcore_disable_interrupts);
121 50
122void wl1271_enable_interrupts(struct wl1271 *wl) 51void wlcore_enable_interrupts(struct wl1271 *wl)
123{ 52{
124 enable_irq(wl->irq); 53 enable_irq(wl->irq);
125} 54}
55EXPORT_SYMBOL_GPL(wlcore_enable_interrupts);
126 56
127/* Set the SPI partitions to access the chip addresses 57int wlcore_translate_addr(struct wl1271 *wl, int addr)
58{
59 struct wlcore_partition_set *part = &wl->curr_part;
60
61 /*
62 * To translate, first check to which window of addresses the
63 * particular address belongs. Then subtract the starting address
64 * of that window from the address. Then, add offset of the
65 * translated region.
66 *
67 * The translated regions occur next to each other in physical device
68 * memory, so just add the sizes of the preceding address regions to
69 * get the offset to the new region.
70 */
71 if ((addr >= part->mem.start) &&
72 (addr < part->mem.start + part->mem.size))
73 return addr - part->mem.start;
74 else if ((addr >= part->reg.start) &&
75 (addr < part->reg.start + part->reg.size))
76 return addr - part->reg.start + part->mem.size;
77 else if ((addr >= part->mem2.start) &&
78 (addr < part->mem2.start + part->mem2.size))
79 return addr - part->mem2.start + part->mem.size +
80 part->reg.size;
81 else if ((addr >= part->mem3.start) &&
82 (addr < part->mem3.start + part->mem3.size))
83 return addr - part->mem3.start + part->mem.size +
84 part->reg.size + part->mem2.size;
85
86 WARN(1, "HW address 0x%x out of range", addr);
87 return 0;
88}
89EXPORT_SYMBOL_GPL(wlcore_translate_addr);
90
91/* Set the partitions to access the chip addresses
128 * 92 *
129 * To simplify driver code, a fixed (virtual) memory map is defined for 93 * To simplify driver code, a fixed (virtual) memory map is defined for
130 * register and memory addresses. Because in the chipset, in different stages 94 * register and memory addresses. Because in the chipset, in different stages
@@ -158,33 +122,43 @@ void wl1271_enable_interrupts(struct wl1271 *wl)
158 * | | 122 * | |
159 * 123 *
160 */ 124 */
161int wl1271_set_partition(struct wl1271 *wl, 125void wlcore_set_partition(struct wl1271 *wl,
162 struct wl1271_partition_set *p) 126 const struct wlcore_partition_set *p)
163{ 127{
164 /* copy partition info */ 128 /* copy partition info */
165 memcpy(&wl->part, p, sizeof(*p)); 129 memcpy(&wl->curr_part, p, sizeof(*p));
166 130
167 wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X", 131 wl1271_debug(DEBUG_IO, "mem_start %08X mem_size %08X",
168 p->mem.start, p->mem.size); 132 p->mem.start, p->mem.size);
169 wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X", 133 wl1271_debug(DEBUG_IO, "reg_start %08X reg_size %08X",
170 p->reg.start, p->reg.size); 134 p->reg.start, p->reg.size);
171 wl1271_debug(DEBUG_SPI, "mem2_start %08X mem2_size %08X", 135 wl1271_debug(DEBUG_IO, "mem2_start %08X mem2_size %08X",
172 p->mem2.start, p->mem2.size); 136 p->mem2.start, p->mem2.size);
173 wl1271_debug(DEBUG_SPI, "mem3_start %08X mem3_size %08X", 137 wl1271_debug(DEBUG_IO, "mem3_start %08X mem3_size %08X",
174 p->mem3.start, p->mem3.size); 138 p->mem3.start, p->mem3.size);
175 139
176 /* write partition info to the chipset */
177 wl1271_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start); 140 wl1271_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start);
178 wl1271_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size); 141 wl1271_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size);
179 wl1271_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start); 142 wl1271_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start);
180 wl1271_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size); 143 wl1271_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size);
181 wl1271_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start); 144 wl1271_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start);
182 wl1271_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size); 145 wl1271_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size);
146 /*
147 * We don't need the size of the last partition, as it is
148 * automatically calculated based on the total memory size and
149 * the sizes of the previous partitions.
150 */
183 wl1271_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start); 151 wl1271_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
152}
153EXPORT_SYMBOL_GPL(wlcore_set_partition);
184 154
185 return 0; 155void wlcore_select_partition(struct wl1271 *wl, u8 part)
156{
157 wl1271_debug(DEBUG_IO, "setting partition %d", part);
158
159 wlcore_set_partition(wl, &wl->ptable[part]);
186} 160}
187EXPORT_SYMBOL_GPL(wl1271_set_partition); 161EXPORT_SYMBOL_GPL(wlcore_select_partition);
188 162
189void wl1271_io_reset(struct wl1271 *wl) 163void wl1271_io_reset(struct wl1271 *wl)
190{ 164{
@@ -197,48 +171,3 @@ void wl1271_io_init(struct wl1271 *wl)
197 if (wl->if_ops->init) 171 if (wl->if_ops->init)
198 wl->if_ops->init(wl->dev); 172 wl->if_ops->init(wl->dev);
199} 173}
200
201void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
202{
203 /* write address >> 1 + 0x30000 to OCP_POR_CTR */
204 addr = (addr >> 1) + 0x30000;
205 wl1271_write32(wl, OCP_POR_CTR, addr);
206
207 /* write value to OCP_POR_WDATA */
208 wl1271_write32(wl, OCP_DATA_WRITE, val);
209
210 /* write 1 to OCP_CMD */
211 wl1271_write32(wl, OCP_CMD, OCP_CMD_WRITE);
212}
213
214u16 wl1271_top_reg_read(struct wl1271 *wl, int addr)
215{
216 u32 val;
217 int timeout = OCP_CMD_LOOP;
218
219 /* write address >> 1 + 0x30000 to OCP_POR_CTR */
220 addr = (addr >> 1) + 0x30000;
221 wl1271_write32(wl, OCP_POR_CTR, addr);
222
223 /* write 2 to OCP_CMD */
224 wl1271_write32(wl, OCP_CMD, OCP_CMD_READ);
225
226 /* poll for data ready */
227 do {
228 val = wl1271_read32(wl, OCP_DATA_READ);
229 } while (!(val & OCP_READY_MASK) && --timeout);
230
231 if (!timeout) {
232 wl1271_warning("Top register access timed out.");
233 return 0xffff;
234 }
235
236 /* check data status and return if OK */
237 if ((val & OCP_STATUS_MASK) == OCP_STATUS_OK)
238 return val & 0xffff;
239 else {
240 wl1271_warning("Top register access returned error.");
241 return 0xffff;
242 }
243}
244
diff --git a/drivers/net/wireless/wl12xx/io.h b/drivers/net/wireless/ti/wlcore/io.h
index 4fb3dab8c3b2..8942954b56a0 100644
--- a/drivers/net/wireless/wl12xx/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -26,7 +26,6 @@
26#define __IO_H__ 26#define __IO_H__
27 27
28#include <linux/irqreturn.h> 28#include <linux/irqreturn.h>
29#include "reg.h"
30 29
31#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0 30#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0
32 31
@@ -43,15 +42,14 @@
43 42
44#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000 43#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000
45 44
46extern struct wl1271_partition_set wl12xx_part_table[PART_TABLE_LEN];
47
48struct wl1271; 45struct wl1271;
49 46
50void wl1271_disable_interrupts(struct wl1271 *wl); 47void wlcore_disable_interrupts(struct wl1271 *wl);
51void wl1271_enable_interrupts(struct wl1271 *wl); 48void wlcore_enable_interrupts(struct wl1271 *wl);
52 49
53void wl1271_io_reset(struct wl1271 *wl); 50void wl1271_io_reset(struct wl1271 *wl);
54void wl1271_io_init(struct wl1271 *wl); 51void wl1271_io_init(struct wl1271 *wl);
52int wlcore_translate_addr(struct wl1271 *wl, int addr);
55 53
56/* Raw target IO, address is not translated */ 54/* Raw target IO, address is not translated */
57static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf, 55static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
@@ -66,6 +64,18 @@ static inline void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
66 wl->if_ops->read(wl->dev, addr, buf, len, fixed); 64 wl->if_ops->read(wl->dev, addr, buf, len, fixed);
67} 65}
68 66
67static inline void wlcore_raw_read_data(struct wl1271 *wl, int reg, void *buf,
68 size_t len, bool fixed)
69{
70 wl1271_raw_read(wl, wl->rtable[reg], buf, len, fixed);
71}
72
73static inline void wlcore_raw_write_data(struct wl1271 *wl, int reg, void *buf,
74 size_t len, bool fixed)
75{
76 wl1271_raw_write(wl, wl->rtable[reg], buf, len, fixed);
77}
78
69static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr) 79static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
70{ 80{
71 wl1271_raw_read(wl, addr, &wl->buffer_32, 81 wl1271_raw_read(wl, addr, &wl->buffer_32,
@@ -81,36 +91,12 @@ static inline void wl1271_raw_write32(struct wl1271 *wl, int addr, u32 val)
81 sizeof(wl->buffer_32), false); 91 sizeof(wl->buffer_32), false);
82} 92}
83 93
84/* Translated target IO */
85static inline int wl1271_translate_addr(struct wl1271 *wl, int addr)
86{
87 /*
88 * To translate, first check to which window of addresses the
89 * particular address belongs. Then subtract the starting address
90 * of that window from the address. Then, add offset of the
91 * translated region.
92 *
93 * The translated regions occur next to each other in physical device
94 * memory, so just add the sizes of the preceding address regions to
95 * get the offset to the new region.
96 *
97 * Currently, only the two first regions are addressed, and the
98 * assumption is that all addresses will fall into either of those
99 * two.
100 */
101 if ((addr >= wl->part.reg.start) &&
102 (addr < wl->part.reg.start + wl->part.reg.size))
103 return addr - wl->part.reg.start + wl->part.mem.size;
104 else
105 return addr - wl->part.mem.start;
106}
107
108static inline void wl1271_read(struct wl1271 *wl, int addr, void *buf, 94static inline void wl1271_read(struct wl1271 *wl, int addr, void *buf,
109 size_t len, bool fixed) 95 size_t len, bool fixed)
110{ 96{
111 int physical; 97 int physical;
112 98
113 physical = wl1271_translate_addr(wl, addr); 99 physical = wlcore_translate_addr(wl, addr);
114 100
115 wl1271_raw_read(wl, physical, buf, len, fixed); 101 wl1271_raw_read(wl, physical, buf, len, fixed);
116} 102}
@@ -120,11 +106,23 @@ static inline void wl1271_write(struct wl1271 *wl, int addr, void *buf,
120{ 106{
121 int physical; 107 int physical;
122 108
123 physical = wl1271_translate_addr(wl, addr); 109 physical = wlcore_translate_addr(wl, addr);
124 110
125 wl1271_raw_write(wl, physical, buf, len, fixed); 111 wl1271_raw_write(wl, physical, buf, len, fixed);
126} 112}
127 113
114static inline void wlcore_write_data(struct wl1271 *wl, int reg, void *buf,
115 size_t len, bool fixed)
116{
117 wl1271_write(wl, wl->rtable[reg], buf, len, fixed);
118}
119
120static inline void wlcore_read_data(struct wl1271 *wl, int reg, void *buf,
121 size_t len, bool fixed)
122{
123 wl1271_read(wl, wl->rtable[reg], buf, len, fixed);
124}
125
128static inline void wl1271_read_hwaddr(struct wl1271 *wl, int hwaddr, 126static inline void wl1271_read_hwaddr(struct wl1271 *wl, int hwaddr,
129 void *buf, size_t len, bool fixed) 127 void *buf, size_t len, bool fixed)
130{ 128{
@@ -134,19 +132,30 @@ static inline void wl1271_read_hwaddr(struct wl1271 *wl, int hwaddr,
134 /* Addresses are stored internally as addresses to 32 bytes blocks */ 132 /* Addresses are stored internally as addresses to 32 bytes blocks */
135 addr = hwaddr << 5; 133 addr = hwaddr << 5;
136 134
137 physical = wl1271_translate_addr(wl, addr); 135 physical = wlcore_translate_addr(wl, addr);
138 136
139 wl1271_raw_read(wl, physical, buf, len, fixed); 137 wl1271_raw_read(wl, physical, buf, len, fixed);
140} 138}
141 139
142static inline u32 wl1271_read32(struct wl1271 *wl, int addr) 140static inline u32 wl1271_read32(struct wl1271 *wl, int addr)
143{ 141{
144 return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr)); 142 return wl1271_raw_read32(wl, wlcore_translate_addr(wl, addr));
145} 143}
146 144
147static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val) 145static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
148{ 146{
149 wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val); 147 wl1271_raw_write32(wl, wlcore_translate_addr(wl, addr), val);
148}
149
150static inline u32 wlcore_read_reg(struct wl1271 *wl, int reg)
151{
152 return wl1271_raw_read32(wl,
153 wlcore_translate_addr(wl, wl->rtable[reg]));
154}
155
156static inline void wlcore_write_reg(struct wl1271 *wl, int reg, u32 val)
157{
158 wl1271_raw_write32(wl, wlcore_translate_addr(wl, wl->rtable[reg]), val);
150} 159}
151 160
152static inline void wl1271_power_off(struct wl1271 *wl) 161static inline void wl1271_power_off(struct wl1271 *wl)
@@ -164,13 +173,8 @@ static inline int wl1271_power_on(struct wl1271 *wl)
164 return ret; 173 return ret;
165} 174}
166 175
167 176void wlcore_set_partition(struct wl1271 *wl,
168/* Top Register IO */ 177 const struct wlcore_partition_set *p);
169void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val);
170u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
171
172int wl1271_set_partition(struct wl1271 *wl,
173 struct wl1271_partition_set *p);
174 178
175bool wl1271_set_block_size(struct wl1271 *wl); 179bool wl1271_set_block_size(struct wl1271 *wl);
176 180
@@ -178,4 +182,6 @@ bool wl1271_set_block_size(struct wl1271 *wl);
178 182
179int wl1271_tx_dummy_packet(struct wl1271 *wl); 183int wl1271_tx_dummy_packet(struct wl1271 *wl);
180 184
185void wlcore_select_partition(struct wl1271 *wl, u8 part);
186
181#endif 187#endif
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 39002363611e..2b0f987660c6 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -35,10 +35,9 @@
35#include <linux/sched.h> 35#include <linux/sched.h>
36#include <linux/interrupt.h> 36#include <linux/interrupt.h>
37 37
38#include "wl12xx.h" 38#include "wlcore.h"
39#include "debug.h" 39#include "debug.h"
40#include "wl12xx_80211.h" 40#include "wl12xx_80211.h"
41#include "reg.h"
42#include "io.h" 41#include "io.h"
43#include "event.h" 42#include "event.h"
44#include "tx.h" 43#include "tx.h"
@@ -50,342 +49,15 @@
50#include "boot.h" 49#include "boot.h"
51#include "testmode.h" 50#include "testmode.h"
52#include "scan.h" 51#include "scan.h"
52#include "hw_ops.h"
53 53
54#define WL1271_BOOT_RETRIES 3 54#define WL1271_BOOT_RETRIES 3
55 55
56static struct conf_drv_settings default_conf = { 56#define WL1271_BOOT_RETRIES 3
57 .sg = {
58 .params = {
59 [CONF_SG_ACL_BT_MASTER_MIN_BR] = 10,
60 [CONF_SG_ACL_BT_MASTER_MAX_BR] = 180,
61 [CONF_SG_ACL_BT_SLAVE_MIN_BR] = 10,
62 [CONF_SG_ACL_BT_SLAVE_MAX_BR] = 180,
63 [CONF_SG_ACL_BT_MASTER_MIN_EDR] = 10,
64 [CONF_SG_ACL_BT_MASTER_MAX_EDR] = 80,
65 [CONF_SG_ACL_BT_SLAVE_MIN_EDR] = 10,
66 [CONF_SG_ACL_BT_SLAVE_MAX_EDR] = 80,
67 [CONF_SG_ACL_WLAN_PS_MASTER_BR] = 8,
68 [CONF_SG_ACL_WLAN_PS_SLAVE_BR] = 8,
69 [CONF_SG_ACL_WLAN_PS_MASTER_EDR] = 20,
70 [CONF_SG_ACL_WLAN_PS_SLAVE_EDR] = 20,
71 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR] = 20,
72 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR] = 35,
73 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR] = 16,
74 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR] = 35,
75 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR] = 32,
76 [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR] = 50,
77 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR] = 28,
78 [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR] = 50,
79 [CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR] = 10,
80 [CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR] = 20,
81 [CONF_SG_ACL_PASSIVE_SCAN_BT_BR] = 75,
82 [CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR] = 15,
83 [CONF_SG_ACL_PASSIVE_SCAN_BT_EDR] = 27,
84 [CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR] = 17,
85 /* active scan params */
86 [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
87 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
88 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
89 /* passive scan params */
90 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_BR] = 800,
91 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_EDR] = 200,
92 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
93 /* passive scan in dual antenna params */
94 [CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN] = 0,
95 [CONF_SG_BCN_HV3_COLLISION_THRESH_IN_PASSIVE_SCAN] = 0,
96 [CONF_SG_TX_RX_PROTECTION_BWIDTH_IN_PASSIVE_SCAN] = 0,
97 /* general params */
98 [CONF_SG_STA_FORCE_PS_IN_BT_SCO] = 1,
99 [CONF_SG_ANTENNA_CONFIGURATION] = 0,
100 [CONF_SG_BEACON_MISS_PERCENT] = 60,
101 [CONF_SG_DHCP_TIME] = 5000,
102 [CONF_SG_RXT] = 1200,
103 [CONF_SG_TXT] = 1000,
104 [CONF_SG_ADAPTIVE_RXT_TXT] = 1,
105 [CONF_SG_GENERAL_USAGE_BIT_MAP] = 3,
106 [CONF_SG_HV3_MAX_SERVED] = 6,
107 [CONF_SG_PS_POLL_TIMEOUT] = 10,
108 [CONF_SG_UPSD_TIMEOUT] = 10,
109 [CONF_SG_CONSECUTIVE_CTS_THRESHOLD] = 2,
110 [CONF_SG_STA_RX_WINDOW_AFTER_DTIM] = 5,
111 [CONF_SG_STA_CONNECTION_PROTECTION_TIME] = 30,
112 /* AP params */
113 [CONF_AP_BEACON_MISS_TX] = 3,
114 [CONF_AP_RX_WINDOW_AFTER_BEACON] = 10,
115 [CONF_AP_BEACON_WINDOW_INTERVAL] = 2,
116 [CONF_AP_CONNECTION_PROTECTION_TIME] = 0,
117 [CONF_AP_BT_ACL_VAL_BT_SERVE_TIME] = 25,
118 [CONF_AP_BT_ACL_VAL_WL_SERVE_TIME] = 25,
119 /* CTS Diluting params */
120 [CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH] = 0,
121 [CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER] = 0,
122 },
123 .state = CONF_SG_PROTECTIVE,
124 },
125 .rx = {
126 .rx_msdu_life_time = 512000,
127 .packet_detection_threshold = 0,
128 .ps_poll_timeout = 15,
129 .upsd_timeout = 15,
130 .rts_threshold = IEEE80211_MAX_RTS_THRESHOLD,
131 .rx_cca_threshold = 0,
132 .irq_blk_threshold = 0xFFFF,
133 .irq_pkt_threshold = 0,
134 .irq_timeout = 600,
135 .queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
136 },
137 .tx = {
138 .tx_energy_detection = 0,
139 .sta_rc_conf = {
140 .enabled_rates = 0,
141 .short_retry_limit = 10,
142 .long_retry_limit = 10,
143 .aflags = 0,
144 },
145 .ac_conf_count = 4,
146 .ac_conf = {
147 [CONF_TX_AC_BE] = {
148 .ac = CONF_TX_AC_BE,
149 .cw_min = 15,
150 .cw_max = 63,
151 .aifsn = 3,
152 .tx_op_limit = 0,
153 },
154 [CONF_TX_AC_BK] = {
155 .ac = CONF_TX_AC_BK,
156 .cw_min = 15,
157 .cw_max = 63,
158 .aifsn = 7,
159 .tx_op_limit = 0,
160 },
161 [CONF_TX_AC_VI] = {
162 .ac = CONF_TX_AC_VI,
163 .cw_min = 15,
164 .cw_max = 63,
165 .aifsn = CONF_TX_AIFS_PIFS,
166 .tx_op_limit = 3008,
167 },
168 [CONF_TX_AC_VO] = {
169 .ac = CONF_TX_AC_VO,
170 .cw_min = 15,
171 .cw_max = 63,
172 .aifsn = CONF_TX_AIFS_PIFS,
173 .tx_op_limit = 1504,
174 },
175 },
176 .max_tx_retries = 100,
177 .ap_aging_period = 300,
178 .tid_conf_count = 4,
179 .tid_conf = {
180 [CONF_TX_AC_BE] = {
181 .queue_id = CONF_TX_AC_BE,
182 .channel_type = CONF_CHANNEL_TYPE_EDCF,
183 .tsid = CONF_TX_AC_BE,
184 .ps_scheme = CONF_PS_SCHEME_LEGACY,
185 .ack_policy = CONF_ACK_POLICY_LEGACY,
186 .apsd_conf = {0, 0},
187 },
188 [CONF_TX_AC_BK] = {
189 .queue_id = CONF_TX_AC_BK,
190 .channel_type = CONF_CHANNEL_TYPE_EDCF,
191 .tsid = CONF_TX_AC_BK,
192 .ps_scheme = CONF_PS_SCHEME_LEGACY,
193 .ack_policy = CONF_ACK_POLICY_LEGACY,
194 .apsd_conf = {0, 0},
195 },
196 [CONF_TX_AC_VI] = {
197 .queue_id = CONF_TX_AC_VI,
198 .channel_type = CONF_CHANNEL_TYPE_EDCF,
199 .tsid = CONF_TX_AC_VI,
200 .ps_scheme = CONF_PS_SCHEME_LEGACY,
201 .ack_policy = CONF_ACK_POLICY_LEGACY,
202 .apsd_conf = {0, 0},
203 },
204 [CONF_TX_AC_VO] = {
205 .queue_id = CONF_TX_AC_VO,
206 .channel_type = CONF_CHANNEL_TYPE_EDCF,
207 .tsid = CONF_TX_AC_VO,
208 .ps_scheme = CONF_PS_SCHEME_LEGACY,
209 .ack_policy = CONF_ACK_POLICY_LEGACY,
210 .apsd_conf = {0, 0},
211 },
212 },
213 .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
214 .tx_compl_timeout = 700,
215 .tx_compl_threshold = 4,
216 .basic_rate = CONF_HW_BIT_RATE_1MBPS,
217 .basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
218 .tmpl_short_retry_limit = 10,
219 .tmpl_long_retry_limit = 10,
220 .tx_watchdog_timeout = 5000,
221 },
222 .conn = {
223 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
224 .listen_interval = 1,
225 .suspend_wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM,
226 .suspend_listen_interval = 3,
227 .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
228 .bcn_filt_ie_count = 2,
229 .bcn_filt_ie = {
230 [0] = {
231 .ie = WLAN_EID_CHANNEL_SWITCH,
232 .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE,
233 },
234 [1] = {
235 .ie = WLAN_EID_HT_INFORMATION,
236 .rule = CONF_BCN_RULE_PASS_ON_CHANGE,
237 },
238 },
239 .synch_fail_thold = 10,
240 .bss_lose_timeout = 100,
241 .beacon_rx_timeout = 10000,
242 .broadcast_timeout = 20000,
243 .rx_broadcast_in_ps = 1,
244 .ps_poll_threshold = 10,
245 .bet_enable = CONF_BET_MODE_ENABLE,
246 .bet_max_consecutive = 50,
247 .psm_entry_retries = 8,
248 .psm_exit_retries = 16,
249 .psm_entry_nullfunc_retries = 3,
250 .dynamic_ps_timeout = 200,
251 .forced_ps = false,
252 .keep_alive_interval = 55000,
253 .max_listen_interval = 20,
254 },
255 .itrim = {
256 .enable = false,
257 .timeout = 50000,
258 },
259 .pm_config = {
260 .host_clk_settling_time = 5000,
261 .host_fast_wakeup_support = false
262 },
263 .roam_trigger = {
264 .trigger_pacing = 1,
265 .avg_weight_rssi_beacon = 20,
266 .avg_weight_rssi_data = 10,
267 .avg_weight_snr_beacon = 20,
268 .avg_weight_snr_data = 10,
269 },
270 .scan = {
271 .min_dwell_time_active = 7500,
272 .max_dwell_time_active = 30000,
273 .min_dwell_time_passive = 100000,
274 .max_dwell_time_passive = 100000,
275 .num_probe_reqs = 2,
276 .split_scan_timeout = 50000,
277 },
278 .sched_scan = {
279 /* sched_scan requires dwell times in TU instead of TU/1000 */
280 .min_dwell_time_active = 30,
281 .max_dwell_time_active = 60,
282 .dwell_time_passive = 100,
283 .dwell_time_dfs = 150,
284 .num_probe_reqs = 2,
285 .rssi_threshold = -90,
286 .snr_threshold = 0,
287 },
288 .rf = {
289 .tx_per_channel_power_compensation_2 = {
290 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
291 },
292 .tx_per_channel_power_compensation_5 = {
293 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
294 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
295 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
296 },
297 },
298 .ht = {
299 .rx_ba_win_size = 8,
300 .tx_ba_win_size = 64,
301 .inactivity_timeout = 10000,
302 .tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP,
303 },
304 .mem_wl127x = {
305 .num_stations = 1,
306 .ssid_profiles = 1,
307 .rx_block_num = 70,
308 .tx_min_block_num = 40,
309 .dynamic_memory = 1,
310 .min_req_tx_blocks = 100,
311 .min_req_rx_blocks = 22,
312 .tx_min = 27,
313 },
314 .mem_wl128x = {
315 .num_stations = 1,
316 .ssid_profiles = 1,
317 .rx_block_num = 40,
318 .tx_min_block_num = 40,
319 .dynamic_memory = 1,
320 .min_req_tx_blocks = 45,
321 .min_req_rx_blocks = 22,
322 .tx_min = 27,
323 },
324 .fm_coex = {
325 .enable = true,
326 .swallow_period = 5,
327 .n_divider_fref_set_1 = 0xff, /* default */
328 .n_divider_fref_set_2 = 12,
329 .m_divider_fref_set_1 = 148,
330 .m_divider_fref_set_2 = 0xffff, /* default */
331 .coex_pll_stabilization_time = 0xffffffff, /* default */
332 .ldo_stabilization_time = 0xffff, /* default */
333 .fm_disturbed_band_margin = 0xff, /* default */
334 .swallow_clk_diff = 0xff, /* default */
335 },
336 .rx_streaming = {
337 .duration = 150,
338 .queues = 0x1,
339 .interval = 20,
340 .always = 0,
341 },
342 .fwlog = {
343 .mode = WL12XX_FWLOG_ON_DEMAND,
344 .mem_blocks = 2,
345 .severity = 0,
346 .timestamp = WL12XX_FWLOG_TIMESTAMP_DISABLED,
347 .output = WL12XX_FWLOG_OUTPUT_HOST,
348 .threshold = 0,
349 },
350 .hci_io_ds = HCI_IO_DS_6MA,
351 .rate = {
352 .rate_retry_score = 32000,
353 .per_add = 8192,
354 .per_th1 = 2048,
355 .per_th2 = 4096,
356 .max_per = 8100,
357 .inverse_curiosity_factor = 5,
358 .tx_fail_low_th = 4,
359 .tx_fail_high_th = 10,
360 .per_alpha_shift = 4,
361 .per_add_shift = 13,
362 .per_beta1_shift = 10,
363 .per_beta2_shift = 8,
364 .rate_check_up = 2,
365 .rate_check_down = 12,
366 .rate_retry_policy = {
367 0x00, 0x00, 0x00, 0x00, 0x00,
368 0x00, 0x00, 0x00, 0x00, 0x00,
369 0x00, 0x00, 0x00,
370 },
371 },
372 .hangover = {
373 .recover_time = 0,
374 .hangover_period = 20,
375 .dynamic_mode = 1,
376 .early_termination_mode = 1,
377 .max_period = 20,
378 .min_period = 1,
379 .increase_delta = 1,
380 .decrease_delta = 2,
381 .quiet_time = 4,
382 .increase_time = 1,
383 .window_size = 16,
384 },
385};
386 57
387static char *fwlog_param; 58static char *fwlog_param;
388static bool bug_on_recovery; 59static bool bug_on_recovery;
60static bool no_recovery;
389 61
390static void __wl1271_op_remove_interface(struct wl1271 *wl, 62static void __wl1271_op_remove_interface(struct wl1271 *wl,
391 struct ieee80211_vif *vif, 63 struct ieee80211_vif *vif,
@@ -628,22 +300,8 @@ out:
628 mutex_unlock(&wl->mutex); 300 mutex_unlock(&wl->mutex);
629} 301}
630 302
631static void wl1271_conf_init(struct wl1271 *wl) 303static void wlcore_adjust_conf(struct wl1271 *wl)
632{ 304{
633
634 /*
635 * This function applies the default configuration to the driver. This
636 * function is invoked upon driver load (spi probe.)
637 *
638 * The configuration is stored in a run-time structure in order to
639 * facilitate for run-time adjustment of any of the parameters. Making
640 * changes to the configuration structure will apply the new values on
641 * the next interface up (wl1271_op_start.)
642 */
643
644 /* apply driver default configuration */
645 memcpy(&wl->conf, &default_conf, sizeof(default_conf));
646
647 /* Adjust settings according to optional module parameters */ 305 /* Adjust settings according to optional module parameters */
648 if (fwlog_param) { 306 if (fwlog_param) {
649 if (!strcmp(fwlog_param, "continuous")) { 307 if (!strcmp(fwlog_param, "continuous")) {
@@ -666,28 +324,7 @@ static int wl1271_plt_init(struct wl1271 *wl)
666{ 324{
667 int ret; 325 int ret;
668 326
669 if (wl->chip.id == CHIP_ID_1283_PG20) 327 ret = wl->ops->hw_init(wl);
670 ret = wl128x_cmd_general_parms(wl);
671 else
672 ret = wl1271_cmd_general_parms(wl);
673 if (ret < 0)
674 return ret;
675
676 if (wl->chip.id == CHIP_ID_1283_PG20)
677 ret = wl128x_cmd_radio_parms(wl);
678 else
679 ret = wl1271_cmd_radio_parms(wl);
680 if (ret < 0)
681 return ret;
682
683 if (wl->chip.id != CHIP_ID_1283_PG20) {
684 ret = wl1271_cmd_ext_radio_parms(wl);
685 if (ret < 0)
686 return ret;
687 }
688
689 /* Chip-specific initializations */
690 ret = wl1271_chip_specific_init(wl);
691 if (ret < 0) 328 if (ret < 0)
692 return ret; 329 return ret;
693 330
@@ -750,7 +387,7 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
750 387
751static void wl12xx_irq_update_links_status(struct wl1271 *wl, 388static void wl12xx_irq_update_links_status(struct wl1271 *wl,
752 struct wl12xx_vif *wlvif, 389 struct wl12xx_vif *wlvif,
753 struct wl12xx_fw_status *status) 390 struct wl_fw_status *status)
754{ 391{
755 struct wl1271_link *lnk; 392 struct wl1271_link *lnk;
756 u32 cur_fw_ps_map; 393 u32 cur_fw_ps_map;
@@ -770,9 +407,10 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
770 407
771 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) { 408 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
772 lnk = &wl->links[hlid]; 409 lnk = &wl->links[hlid];
773 cnt = status->tx_lnk_free_pkts[hlid] - lnk->prev_freed_pkts; 410 cnt = status->counters.tx_lnk_free_pkts[hlid] -
411 lnk->prev_freed_pkts;
774 412
775 lnk->prev_freed_pkts = status->tx_lnk_free_pkts[hlid]; 413 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[hlid];
776 lnk->allocated_pkts -= cnt; 414 lnk->allocated_pkts -= cnt;
777 415
778 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid, 416 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
@@ -781,15 +419,19 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
781} 419}
782 420
783static void wl12xx_fw_status(struct wl1271 *wl, 421static void wl12xx_fw_status(struct wl1271 *wl,
784 struct wl12xx_fw_status *status) 422 struct wl_fw_status *status)
785{ 423{
786 struct wl12xx_vif *wlvif; 424 struct wl12xx_vif *wlvif;
787 struct timespec ts; 425 struct timespec ts;
788 u32 old_tx_blk_count = wl->tx_blocks_available; 426 u32 old_tx_blk_count = wl->tx_blocks_available;
789 int avail, freed_blocks; 427 int avail, freed_blocks;
790 int i; 428 int i;
429 size_t status_len;
430
431 status_len = sizeof(*status) + wl->fw_status_priv_len;
791 432
792 wl1271_raw_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false); 433 wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status,
434 status_len, false);
793 435
794 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, " 436 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
795 "drv_rx_counter = %d, tx_results_counter = %d)", 437 "drv_rx_counter = %d, tx_results_counter = %d)",
@@ -801,10 +443,10 @@ static void wl12xx_fw_status(struct wl1271 *wl,
801 for (i = 0; i < NUM_TX_QUEUES; i++) { 443 for (i = 0; i < NUM_TX_QUEUES; i++) {
802 /* prevent wrap-around in freed-packets counter */ 444 /* prevent wrap-around in freed-packets counter */
803 wl->tx_allocated_pkts[i] -= 445 wl->tx_allocated_pkts[i] -=
804 (status->tx_released_pkts[i] - 446 (status->counters.tx_released_pkts[i] -
805 wl->tx_pkts_freed[i]) & 0xff; 447 wl->tx_pkts_freed[i]) & 0xff;
806 448
807 wl->tx_pkts_freed[i] = status->tx_released_pkts[i]; 449 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
808 } 450 }
809 451
810 /* prevent wrap-around in total blocks counter */ 452 /* prevent wrap-around in total blocks counter */
@@ -927,6 +569,9 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
927 smp_mb__after_clear_bit(); 569 smp_mb__after_clear_bit();
928 570
929 wl12xx_fw_status(wl, wl->fw_status); 571 wl12xx_fw_status(wl, wl->fw_status);
572
573 wlcore_hw_tx_immediate_compl(wl);
574
930 intr = le32_to_cpu(wl->fw_status->intr); 575 intr = le32_to_cpu(wl->fw_status->intr);
931 intr &= WL1271_INTR_MASK; 576 intr &= WL1271_INTR_MASK;
932 if (!intr) { 577 if (!intr) {
@@ -963,9 +608,7 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
963 } 608 }
964 609
965 /* check for tx results */ 610 /* check for tx results */
966 if (wl->fw_status->tx_results_counter != 611 wlcore_hw_tx_delayed_compl(wl);
967 (wl->tx_results_count & 0xff))
968 wl1271_tx_complete(wl);
969 612
970 /* Make sure the deferred queues don't get too long */ 613 /* Make sure the deferred queues don't get too long */
971 defer_count = skb_queue_len(&wl->deferred_tx_queue) + 614 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
@@ -1046,10 +689,7 @@ static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
1046 689
1047 if (plt) { 690 if (plt) {
1048 fw_type = WL12XX_FW_TYPE_PLT; 691 fw_type = WL12XX_FW_TYPE_PLT;
1049 if (wl->chip.id == CHIP_ID_1283_PG20) 692 fw_name = wl->plt_fw_name;
1050 fw_name = WL128X_PLT_FW_NAME;
1051 else
1052 fw_name = WL127X_PLT_FW_NAME;
1053 } else { 693 } else {
1054 /* 694 /*
1055 * we can't call wl12xx_get_vif_count() here because 695 * we can't call wl12xx_get_vif_count() here because
@@ -1057,16 +697,10 @@ static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
1057 */ 697 */
1058 if (wl->last_vif_count > 1) { 698 if (wl->last_vif_count > 1) {
1059 fw_type = WL12XX_FW_TYPE_MULTI; 699 fw_type = WL12XX_FW_TYPE_MULTI;
1060 if (wl->chip.id == CHIP_ID_1283_PG20) 700 fw_name = wl->mr_fw_name;
1061 fw_name = WL128X_FW_NAME_MULTI;
1062 else
1063 fw_name = WL127X_FW_NAME_MULTI;
1064 } else { 701 } else {
1065 fw_type = WL12XX_FW_TYPE_NORMAL; 702 fw_type = WL12XX_FW_TYPE_NORMAL;
1066 if (wl->chip.id == CHIP_ID_1283_PG20) 703 fw_name = wl->sr_fw_name;
1067 fw_name = WL128X_FW_NAME_SINGLE;
1068 else
1069 fw_name = WL127X_FW_NAME_SINGLE;
1070 } 704 }
1071 } 705 }
1072 706
@@ -1173,7 +807,7 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
1173 u32 first_addr; 807 u32 first_addr;
1174 u8 *block; 808 u8 *block;
1175 809
1176 if ((wl->quirks & WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED) || 810 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
1177 (wl->conf.fwlog.mode != WL12XX_FWLOG_ON_DEMAND) || 811 (wl->conf.fwlog.mode != WL12XX_FWLOG_ON_DEMAND) ||
1178 (wl->conf.fwlog.mem_blocks == 0)) 812 (wl->conf.fwlog.mem_blocks == 0))
1179 return; 813 return;
@@ -1239,11 +873,20 @@ static void wl1271_recovery_work(struct work_struct *work)
1239 wl12xx_read_fwlog_panic(wl); 873 wl12xx_read_fwlog_panic(wl);
1240 874
1241 wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x", 875 wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x",
1242 wl->chip.fw_ver_str, wl1271_read32(wl, SCR_PAD4)); 876 wl->chip.fw_ver_str,
877 wlcore_read_reg(wl, REG_PC_ON_RECOVERY));
1243 878
1244 BUG_ON(bug_on_recovery && 879 BUG_ON(bug_on_recovery &&
1245 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)); 880 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
1246 881
882 if (no_recovery) {
883 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
884 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
885 goto out_unlock;
886 }
887
888 BUG_ON(bug_on_recovery);
889
1247 /* 890 /*
1248 * Advance security sequence number to overcome potential progress 891 * Advance security sequence number to overcome potential progress
1249 * in the firmware during recovery. This doens't hurt if the network is 892 * in the firmware during recovery. This doens't hurt if the network is
@@ -1290,10 +933,7 @@ out_unlock:
1290 933
1291static void wl1271_fw_wakeup(struct wl1271 *wl) 934static void wl1271_fw_wakeup(struct wl1271 *wl)
1292{ 935{
1293 u32 elp_reg; 936 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1294
1295 elp_reg = ELPCTRL_WAKE_UP;
1296 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
1297} 937}
1298 938
1299static int wl1271_setup(struct wl1271 *wl) 939static int wl1271_setup(struct wl1271 *wl)
@@ -1323,7 +963,7 @@ static int wl12xx_set_power_on(struct wl1271 *wl)
1323 wl1271_io_reset(wl); 963 wl1271_io_reset(wl);
1324 wl1271_io_init(wl); 964 wl1271_io_init(wl);
1325 965
1326 wl1271_set_partition(wl, &wl12xx_part_table[PART_DOWN]); 966 wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1327 967
1328 /* ELP module wake up */ 968 /* ELP module wake up */
1329 wl1271_fw_wakeup(wl); 969 wl1271_fw_wakeup(wl);
@@ -1348,44 +988,18 @@ static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1348 * negligible, we use the same block size for all different 988 * negligible, we use the same block size for all different
1349 * chip types. 989 * chip types.
1350 */ 990 */
1351 if (!wl1271_set_block_size(wl)) 991 if (wl1271_set_block_size(wl))
1352 wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT; 992 wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1353
1354 switch (wl->chip.id) {
1355 case CHIP_ID_1271_PG10:
1356 wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
1357 wl->chip.id);
1358 993
1359 ret = wl1271_setup(wl); 994 ret = wl->ops->identify_chip(wl);
1360 if (ret < 0) 995 if (ret < 0)
1361 goto out; 996 goto out;
1362 wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
1363 break;
1364
1365 case CHIP_ID_1271_PG20:
1366 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
1367 wl->chip.id);
1368
1369 ret = wl1271_setup(wl);
1370 if (ret < 0)
1371 goto out;
1372 wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
1373 break;
1374 997
1375 case CHIP_ID_1283_PG20: 998 /* TODO: make sure the lower driver has set things up correctly */
1376 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)",
1377 wl->chip.id);
1378 999
1379 ret = wl1271_setup(wl); 1000 ret = wl1271_setup(wl);
1380 if (ret < 0) 1001 if (ret < 0)
1381 goto out;
1382 break;
1383 case CHIP_ID_1283_PG10:
1384 default:
1385 wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
1386 ret = -ENODEV;
1387 goto out; 1002 goto out;
1388 }
1389 1003
1390 ret = wl12xx_fetch_firmware(wl, plt); 1004 ret = wl12xx_fetch_firmware(wl, plt);
1391 if (ret < 0) 1005 if (ret < 0)
@@ -1425,7 +1039,7 @@ int wl1271_plt_start(struct wl1271 *wl)
1425 if (ret < 0) 1039 if (ret < 0)
1426 goto power_off; 1040 goto power_off;
1427 1041
1428 ret = wl1271_boot(wl); 1042 ret = wl->ops->boot(wl);
1429 if (ret < 0) 1043 if (ret < 0)
1430 goto power_off; 1044 goto power_off;
1431 1045
@@ -1454,7 +1068,7 @@ irq_disable:
1454 work function will not do anything.) Also, any other 1068 work function will not do anything.) Also, any other
1455 possible concurrent operations will fail due to the 1069 possible concurrent operations will fail due to the
1456 current state, hence the wl1271 struct should be safe. */ 1070 current state, hence the wl1271 struct should be safe. */
1457 wl1271_disable_interrupts(wl); 1071 wlcore_disable_interrupts(wl);
1458 wl1271_flush_deferred_work(wl); 1072 wl1271_flush_deferred_work(wl);
1459 cancel_work_sync(&wl->netstack_work); 1073 cancel_work_sync(&wl->netstack_work);
1460 mutex_lock(&wl->mutex); 1074 mutex_lock(&wl->mutex);
@@ -1481,7 +1095,7 @@ int wl1271_plt_stop(struct wl1271 *wl)
1481 * Otherwise, the interrupt handler might be called and exit without 1095 * Otherwise, the interrupt handler might be called and exit without
1482 * reading the interrupt status. 1096 * reading the interrupt status.
1483 */ 1097 */
1484 wl1271_disable_interrupts(wl); 1098 wlcore_disable_interrupts(wl);
1485 mutex_lock(&wl->mutex); 1099 mutex_lock(&wl->mutex);
1486 if (!wl->plt) { 1100 if (!wl->plt) {
1487 mutex_unlock(&wl->mutex); 1101 mutex_unlock(&wl->mutex);
@@ -1491,7 +1105,7 @@ int wl1271_plt_stop(struct wl1271 *wl)
1491 * may have been disabled when op_stop was called. It will, 1105 * may have been disabled when op_stop was called. It will,
1492 * however, balance the above call to disable_interrupts(). 1106 * however, balance the above call to disable_interrupts().
1493 */ 1107 */
1494 wl1271_enable_interrupts(wl); 1108 wlcore_enable_interrupts(wl);
1495 1109
1496 wl1271_error("cannot power down because not in PLT " 1110 wl1271_error("cannot power down because not in PLT "
1497 "state: %d", wl->state); 1111 "state: %d", wl->state);
@@ -1652,14 +1266,12 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1652{ 1266{
1653 int ret = 0; 1267 int ret = 0;
1654 1268
1655 mutex_lock(&wl->mutex);
1656
1657 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) 1269 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1658 goto out_unlock; 1270 goto out;
1659 1271
1660 ret = wl1271_ps_elp_wakeup(wl); 1272 ret = wl1271_ps_elp_wakeup(wl);
1661 if (ret < 0) 1273 if (ret < 0)
1662 goto out_unlock; 1274 goto out;
1663 1275
1664 ret = wl1271_acx_wake_up_conditions(wl, wlvif, 1276 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1665 wl->conf.conn.suspend_wake_up_event, 1277 wl->conf.conn.suspend_wake_up_event,
@@ -1668,11 +1280,9 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1668 if (ret < 0) 1280 if (ret < 0)
1669 wl1271_error("suspend: set wake up conditions failed: %d", ret); 1281 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1670 1282
1671
1672 wl1271_ps_elp_sleep(wl); 1283 wl1271_ps_elp_sleep(wl);
1673 1284
1674out_unlock: 1285out:
1675 mutex_unlock(&wl->mutex);
1676 return ret; 1286 return ret;
1677 1287
1678} 1288}
@@ -1682,20 +1292,17 @@ static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1682{ 1292{
1683 int ret = 0; 1293 int ret = 0;
1684 1294
1685 mutex_lock(&wl->mutex);
1686
1687 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) 1295 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1688 goto out_unlock; 1296 goto out;
1689 1297
1690 ret = wl1271_ps_elp_wakeup(wl); 1298 ret = wl1271_ps_elp_wakeup(wl);
1691 if (ret < 0) 1299 if (ret < 0)
1692 goto out_unlock; 1300 goto out;
1693 1301
1694 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true); 1302 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1695 1303
1696 wl1271_ps_elp_sleep(wl); 1304 wl1271_ps_elp_sleep(wl);
1697out_unlock: 1305out:
1698 mutex_unlock(&wl->mutex);
1699 return ret; 1306 return ret;
1700 1307
1701} 1308}
@@ -1720,10 +1327,9 @@ static void wl1271_configure_resume(struct wl1271 *wl,
1720 if ((!is_ap) && (!is_sta)) 1327 if ((!is_ap) && (!is_sta))
1721 return; 1328 return;
1722 1329
1723 mutex_lock(&wl->mutex);
1724 ret = wl1271_ps_elp_wakeup(wl); 1330 ret = wl1271_ps_elp_wakeup(wl);
1725 if (ret < 0) 1331 if (ret < 0)
1726 goto out; 1332 return;
1727 1333
1728 if (is_sta) { 1334 if (is_sta) {
1729 ret = wl1271_acx_wake_up_conditions(wl, wlvif, 1335 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
@@ -1739,8 +1345,6 @@ static void wl1271_configure_resume(struct wl1271 *wl,
1739 } 1345 }
1740 1346
1741 wl1271_ps_elp_sleep(wl); 1347 wl1271_ps_elp_sleep(wl);
1742out:
1743 mutex_unlock(&wl->mutex);
1744} 1348}
1745 1349
1746static int wl1271_op_suspend(struct ieee80211_hw *hw, 1350static int wl1271_op_suspend(struct ieee80211_hw *hw,
@@ -1755,6 +1359,7 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
1755 1359
1756 wl1271_tx_flush(wl); 1360 wl1271_tx_flush(wl);
1757 1361
1362 mutex_lock(&wl->mutex);
1758 wl->wow_enabled = true; 1363 wl->wow_enabled = true;
1759 wl12xx_for_each_wlvif(wl, wlvif) { 1364 wl12xx_for_each_wlvif(wl, wlvif) {
1760 ret = wl1271_configure_suspend(wl, wlvif); 1365 ret = wl1271_configure_suspend(wl, wlvif);
@@ -1763,6 +1368,7 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
1763 return ret; 1368 return ret;
1764 } 1369 }
1765 } 1370 }
1371 mutex_unlock(&wl->mutex);
1766 /* flush any remaining work */ 1372 /* flush any remaining work */
1767 wl1271_debug(DEBUG_MAC80211, "flushing remaining works"); 1373 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1768 1374
@@ -1770,7 +1376,7 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
1770 * disable and re-enable interrupts in order to flush 1376 * disable and re-enable interrupts in order to flush
1771 * the threaded_irq 1377 * the threaded_irq
1772 */ 1378 */
1773 wl1271_disable_interrupts(wl); 1379 wlcore_disable_interrupts(wl);
1774 1380
1775 /* 1381 /*
1776 * set suspended flag to avoid triggering a new threaded_irq 1382 * set suspended flag to avoid triggering a new threaded_irq
@@ -1778,7 +1384,7 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
1778 */ 1384 */
1779 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags); 1385 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1780 1386
1781 wl1271_enable_interrupts(wl); 1387 wlcore_enable_interrupts(wl);
1782 flush_work(&wl->tx_work); 1388 flush_work(&wl->tx_work);
1783 flush_delayed_work(&wl->elp_work); 1389 flush_delayed_work(&wl->elp_work);
1784 1390
@@ -1810,12 +1416,15 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
1810 wl1271_debug(DEBUG_MAC80211, 1416 wl1271_debug(DEBUG_MAC80211,
1811 "run postponed irq_work directly"); 1417 "run postponed irq_work directly");
1812 wl1271_irq(0, wl); 1418 wl1271_irq(0, wl);
1813 wl1271_enable_interrupts(wl); 1419 wlcore_enable_interrupts(wl);
1814 } 1420 }
1421
1422 mutex_lock(&wl->mutex);
1815 wl12xx_for_each_wlvif(wl, wlvif) { 1423 wl12xx_for_each_wlvif(wl, wlvif) {
1816 wl1271_configure_resume(wl, wlvif); 1424 wl1271_configure_resume(wl, wlvif);
1817 } 1425 }
1818 wl->wow_enabled = false; 1426 wl->wow_enabled = false;
1427 mutex_unlock(&wl->mutex);
1819 1428
1820 return 0; 1429 return 0;
1821} 1430}
@@ -1851,7 +1460,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
1851 * Otherwise, the interrupt handler might be called and exit without 1460 * Otherwise, the interrupt handler might be called and exit without
1852 * reading the interrupt status. 1461 * reading the interrupt status.
1853 */ 1462 */
1854 wl1271_disable_interrupts(wl); 1463 wlcore_disable_interrupts(wl);
1855 mutex_lock(&wl->mutex); 1464 mutex_lock(&wl->mutex);
1856 if (wl->state == WL1271_STATE_OFF) { 1465 if (wl->state == WL1271_STATE_OFF) {
1857 mutex_unlock(&wl->mutex); 1466 mutex_unlock(&wl->mutex);
@@ -1861,7 +1470,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
1861 * may have been disabled when op_stop was called. It will, 1470 * may have been disabled when op_stop was called. It will,
1862 * however, balance the above call to disable_interrupts(). 1471 * however, balance the above call to disable_interrupts().
1863 */ 1472 */
1864 wl1271_enable_interrupts(wl); 1473 wlcore_enable_interrupts(wl);
1865 return; 1474 return;
1866 } 1475 }
1867 1476
@@ -1894,7 +1503,6 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
1894 wl->tx_results_count = 0; 1503 wl->tx_results_count = 0;
1895 wl->tx_packets_count = 0; 1504 wl->tx_packets_count = 0;
1896 wl->time_offset = 0; 1505 wl->time_offset = 0;
1897 wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
1898 wl->ap_fw_ps_map = 0; 1506 wl->ap_fw_ps_map = 0;
1899 wl->ap_ps_map = 0; 1507 wl->ap_ps_map = 0;
1900 wl->sched_scanning = false; 1508 wl->sched_scanning = false;
@@ -2067,7 +1675,7 @@ static bool wl12xx_init_fw(struct wl1271 *wl)
2067 if (ret < 0) 1675 if (ret < 0)
2068 goto power_off; 1676 goto power_off;
2069 1677
2070 ret = wl1271_boot(wl); 1678 ret = wl->ops->boot(wl);
2071 if (ret < 0) 1679 if (ret < 0)
2072 goto power_off; 1680 goto power_off;
2073 1681
@@ -2087,7 +1695,7 @@ irq_disable:
2087 work function will not do anything.) Also, any other 1695 work function will not do anything.) Also, any other
2088 possible concurrent operations will fail due to the 1696 possible concurrent operations will fail due to the
2089 current state, hence the wl1271 struct should be safe. */ 1697 current state, hence the wl1271 struct should be safe. */
2090 wl1271_disable_interrupts(wl); 1698 wlcore_disable_interrupts(wl);
2091 wl1271_flush_deferred_work(wl); 1699 wl1271_flush_deferred_work(wl);
2092 cancel_work_sync(&wl->netstack_work); 1700 cancel_work_sync(&wl->netstack_work);
2093 mutex_lock(&wl->mutex); 1701 mutex_lock(&wl->mutex);
@@ -2360,10 +1968,12 @@ deinit:
2360 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++) 1968 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2361 wl12xx_free_rate_policy(wl, 1969 wl12xx_free_rate_policy(wl,
2362 &wlvif->ap.ucast_rate_idx[i]); 1970 &wlvif->ap.ucast_rate_idx[i]);
1971 wl1271_free_ap_keys(wl, wlvif);
2363 } 1972 }
2364 1973
1974 dev_kfree_skb(wlvif->probereq);
1975 wlvif->probereq = NULL;
2365 wl12xx_tx_reset_wlvif(wl, wlvif); 1976 wl12xx_tx_reset_wlvif(wl, wlvif);
2366 wl1271_free_ap_keys(wl, wlvif);
2367 if (wl->last_wlvif == wlvif) 1977 if (wl->last_wlvif == wlvif)
2368 wl->last_wlvif = NULL; 1978 wl->last_wlvif = NULL;
2369 list_del(&wlvif->list); 1979 list_del(&wlvif->list);
@@ -2946,6 +2556,17 @@ static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2946 int ret; 2556 int ret;
2947 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); 2557 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2948 2558
2559 /*
2560 * A role set to GEM cipher requires different Tx settings (namely
2561 * spare blocks). Note when we are in this mode so the HW can adjust.
2562 */
2563 if (key_type == KEY_GEM) {
2564 if (action == KEY_ADD_OR_REPLACE)
2565 wlvif->is_gem = true;
2566 else if (action == KEY_REMOVE)
2567 wlvif->is_gem = false;
2568 }
2569
2949 if (is_ap) { 2570 if (is_ap) {
2950 struct wl1271_station *wl_sta; 2571 struct wl1271_station *wl_sta;
2951 u8 hlid; 2572 u8 hlid;
@@ -2984,17 +2605,6 @@ static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2984 0xff, 0xff, 0xff, 0xff, 0xff, 0xff 2605 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2985 }; 2606 };
2986 2607
2987 /*
2988 * A STA set to GEM cipher requires 2 tx spare blocks.
2989 * Return to default value when GEM cipher key is removed
2990 */
2991 if (key_type == KEY_GEM) {
2992 if (action == KEY_ADD_OR_REPLACE)
2993 wl->tx_spare_blocks = 2;
2994 else if (action == KEY_REMOVE)
2995 wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
2996 }
2997
2998 addr = sta ? sta->addr : bcast_addr; 2608 addr = sta ? sta->addr : bcast_addr;
2999 2609
3000 if (is_zero_ether_addr(addr)) { 2610 if (is_zero_ether_addr(addr)) {
@@ -3791,8 +3401,7 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3791 wlvif->rssi_thold = bss_conf->cqm_rssi_thold; 3401 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
3792 } 3402 }
3793 3403
3794 if (changed & BSS_CHANGED_BSSID && 3404 if (changed & BSS_CHANGED_BSSID)
3795 (is_ibss || bss_conf->assoc))
3796 if (!is_zero_ether_addr(bss_conf->bssid)) { 3405 if (!is_zero_ether_addr(bss_conf->bssid)) {
3797 ret = wl12xx_cmd_build_null_data(wl, wlvif); 3406 ret = wl12xx_cmd_build_null_data(wl, wlvif);
3798 if (ret < 0) 3407 if (ret < 0)
@@ -3801,9 +3410,6 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
3801 ret = wl1271_build_qos_null_data(wl, vif); 3410 ret = wl1271_build_qos_null_data(wl, vif);
3802 if (ret < 0) 3411 if (ret < 0)
3803 goto out; 3412 goto out;
3804
3805 /* Need to update the BSSID (for filtering etc) */
3806 do_join = true;
3807 } 3413 }
3808 3414
3809 if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) { 3415 if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) {
@@ -3830,6 +3436,7 @@ sta_not_found:
3830 int ieoffset; 3436 int ieoffset;
3831 wlvif->aid = bss_conf->aid; 3437 wlvif->aid = bss_conf->aid;
3832 wlvif->beacon_int = bss_conf->beacon_int; 3438 wlvif->beacon_int = bss_conf->beacon_int;
3439 do_join = true;
3833 set_assoc = true; 3440 set_assoc = true;
3834 3441
3835 /* 3442 /*
@@ -4662,60 +4269,12 @@ static struct ieee80211_channel wl1271_channels[] = {
4662 { .hw_value = 14, .center_freq = 2484, .max_power = 25 }, 4269 { .hw_value = 14, .center_freq = 2484, .max_power = 25 },
4663}; 4270};
4664 4271
4665/* mapping to indexes for wl1271_rates */
4666static const u8 wl1271_rate_to_idx_2ghz[] = {
4667 /* MCS rates are used only with 11n */
4668 7, /* CONF_HW_RXTX_RATE_MCS7_SGI */
4669 7, /* CONF_HW_RXTX_RATE_MCS7 */
4670 6, /* CONF_HW_RXTX_RATE_MCS6 */
4671 5, /* CONF_HW_RXTX_RATE_MCS5 */
4672 4, /* CONF_HW_RXTX_RATE_MCS4 */
4673 3, /* CONF_HW_RXTX_RATE_MCS3 */
4674 2, /* CONF_HW_RXTX_RATE_MCS2 */
4675 1, /* CONF_HW_RXTX_RATE_MCS1 */
4676 0, /* CONF_HW_RXTX_RATE_MCS0 */
4677
4678 11, /* CONF_HW_RXTX_RATE_54 */
4679 10, /* CONF_HW_RXTX_RATE_48 */
4680 9, /* CONF_HW_RXTX_RATE_36 */
4681 8, /* CONF_HW_RXTX_RATE_24 */
4682
4683 /* TI-specific rate */
4684 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_22 */
4685
4686 7, /* CONF_HW_RXTX_RATE_18 */
4687 6, /* CONF_HW_RXTX_RATE_12 */
4688 3, /* CONF_HW_RXTX_RATE_11 */
4689 5, /* CONF_HW_RXTX_RATE_9 */
4690 4, /* CONF_HW_RXTX_RATE_6 */
4691 2, /* CONF_HW_RXTX_RATE_5_5 */
4692 1, /* CONF_HW_RXTX_RATE_2 */
4693 0 /* CONF_HW_RXTX_RATE_1 */
4694};
4695
4696/* 11n STA capabilities */
4697#define HW_RX_HIGHEST_RATE 72
4698
4699#define WL12XX_HT_CAP { \
4700 .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | \
4701 (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT), \
4702 .ht_supported = true, \
4703 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K, \
4704 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
4705 .mcs = { \
4706 .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, \
4707 .rx_highest = cpu_to_le16(HW_RX_HIGHEST_RATE), \
4708 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
4709 }, \
4710}
4711
4712/* can't be const, mac80211 writes to this */ 4272/* can't be const, mac80211 writes to this */
4713static struct ieee80211_supported_band wl1271_band_2ghz = { 4273static struct ieee80211_supported_band wl1271_band_2ghz = {
4714 .channels = wl1271_channels, 4274 .channels = wl1271_channels,
4715 .n_channels = ARRAY_SIZE(wl1271_channels), 4275 .n_channels = ARRAY_SIZE(wl1271_channels),
4716 .bitrates = wl1271_rates, 4276 .bitrates = wl1271_rates,
4717 .n_bitrates = ARRAY_SIZE(wl1271_rates), 4277 .n_bitrates = ARRAY_SIZE(wl1271_rates),
4718 .ht_cap = WL12XX_HT_CAP,
4719}; 4278};
4720 4279
4721/* 5 GHz data rates for WL1273 */ 4280/* 5 GHz data rates for WL1273 */
@@ -4784,48 +4343,11 @@ static struct ieee80211_channel wl1271_channels_5ghz[] = {
4784 { .hw_value = 165, .center_freq = 5825, .max_power = 25 }, 4343 { .hw_value = 165, .center_freq = 5825, .max_power = 25 },
4785}; 4344};
4786 4345
4787/* mapping to indexes for wl1271_rates_5ghz */
4788static const u8 wl1271_rate_to_idx_5ghz[] = {
4789 /* MCS rates are used only with 11n */
4790 7, /* CONF_HW_RXTX_RATE_MCS7_SGI */
4791 7, /* CONF_HW_RXTX_RATE_MCS7 */
4792 6, /* CONF_HW_RXTX_RATE_MCS6 */
4793 5, /* CONF_HW_RXTX_RATE_MCS5 */
4794 4, /* CONF_HW_RXTX_RATE_MCS4 */
4795 3, /* CONF_HW_RXTX_RATE_MCS3 */
4796 2, /* CONF_HW_RXTX_RATE_MCS2 */
4797 1, /* CONF_HW_RXTX_RATE_MCS1 */
4798 0, /* CONF_HW_RXTX_RATE_MCS0 */
4799
4800 7, /* CONF_HW_RXTX_RATE_54 */
4801 6, /* CONF_HW_RXTX_RATE_48 */
4802 5, /* CONF_HW_RXTX_RATE_36 */
4803 4, /* CONF_HW_RXTX_RATE_24 */
4804
4805 /* TI-specific rate */
4806 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_22 */
4807
4808 3, /* CONF_HW_RXTX_RATE_18 */
4809 2, /* CONF_HW_RXTX_RATE_12 */
4810 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_11 */
4811 1, /* CONF_HW_RXTX_RATE_9 */
4812 0, /* CONF_HW_RXTX_RATE_6 */
4813 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_5_5 */
4814 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_2 */
4815 CONF_HW_RXTX_RATE_UNSUPPORTED /* CONF_HW_RXTX_RATE_1 */
4816};
4817
4818static struct ieee80211_supported_band wl1271_band_5ghz = { 4346static struct ieee80211_supported_band wl1271_band_5ghz = {
4819 .channels = wl1271_channels_5ghz, 4347 .channels = wl1271_channels_5ghz,
4820 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz), 4348 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
4821 .bitrates = wl1271_rates_5ghz, 4349 .bitrates = wl1271_rates_5ghz,
4822 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz), 4350 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
4823 .ht_cap = WL12XX_HT_CAP,
4824};
4825
4826static const u8 *wl1271_band_rate_to_idx[] = {
4827 [IEEE80211_BAND_2GHZ] = wl1271_rate_to_idx_2ghz,
4828 [IEEE80211_BAND_5GHZ] = wl1271_rate_to_idx_5ghz
4829}; 4351};
4830 4352
4831static const struct ieee80211_ops wl1271_ops = { 4353static const struct ieee80211_ops wl1271_ops = {
@@ -4862,18 +4384,18 @@ static const struct ieee80211_ops wl1271_ops = {
4862}; 4384};
4863 4385
4864 4386
4865u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band) 4387u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
4866{ 4388{
4867 u8 idx; 4389 u8 idx;
4868 4390
4869 BUG_ON(band >= sizeof(wl1271_band_rate_to_idx)/sizeof(u8 *)); 4391 BUG_ON(band >= 2);
4870 4392
4871 if (unlikely(rate >= CONF_HW_RXTX_RATE_MAX)) { 4393 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
4872 wl1271_error("Illegal RX rate from HW: %d", rate); 4394 wl1271_error("Illegal RX rate from HW: %d", rate);
4873 return 0; 4395 return 0;
4874 } 4396 }
4875 4397
4876 idx = wl1271_band_rate_to_idx[band][rate]; 4398 idx = wl->band_rate_to_idx[band][rate];
4877 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) { 4399 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
4878 wl1271_error("Unsupported RX rate from HW: %d", rate); 4400 wl1271_error("Unsupported RX rate from HW: %d", rate);
4879 return 0; 4401 return 0;
@@ -5027,34 +4549,6 @@ static struct bin_attribute fwlog_attr = {
5027 .read = wl1271_sysfs_read_fwlog, 4549 .read = wl1271_sysfs_read_fwlog,
5028}; 4550};
5029 4551
5030static bool wl12xx_mac_in_fuse(struct wl1271 *wl)
5031{
5032 bool supported = false;
5033 u8 major, minor;
5034
5035 if (wl->chip.id == CHIP_ID_1283_PG20) {
5036 major = WL128X_PG_GET_MAJOR(wl->hw_pg_ver);
5037 minor = WL128X_PG_GET_MINOR(wl->hw_pg_ver);
5038
5039 /* in wl128x we have the MAC address if the PG is >= (2, 1) */
5040 if (major > 2 || (major == 2 && minor >= 1))
5041 supported = true;
5042 } else {
5043 major = WL127X_PG_GET_MAJOR(wl->hw_pg_ver);
5044 minor = WL127X_PG_GET_MINOR(wl->hw_pg_ver);
5045
5046 /* in wl127x we have the MAC address if the PG is >= (3, 1) */
5047 if (major == 3 && minor >= 1)
5048 supported = true;
5049 }
5050
5051 wl1271_debug(DEBUG_PROBE,
5052 "PG Ver major = %d minor = %d, MAC %s present",
5053 major, minor, supported ? "is" : "is not");
5054
5055 return supported;
5056}
5057
5058static void wl12xx_derive_mac_addresses(struct wl1271 *wl, 4552static void wl12xx_derive_mac_addresses(struct wl1271 *wl,
5059 u32 oui, u32 nic, int n) 4553 u32 oui, u32 nic, int n)
5060{ 4554{
@@ -5080,47 +4574,23 @@ static void wl12xx_derive_mac_addresses(struct wl1271 *wl,
5080 wl->hw->wiphy->addresses = wl->addresses; 4574 wl->hw->wiphy->addresses = wl->addresses;
5081} 4575}
5082 4576
5083static void wl12xx_get_fuse_mac(struct wl1271 *wl)
5084{
5085 u32 mac1, mac2;
5086
5087 wl1271_set_partition(wl, &wl12xx_part_table[PART_DRPW]);
5088
5089 mac1 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_1);
5090 mac2 = wl1271_read32(wl, WL12XX_REG_FUSE_BD_ADDR_2);
5091
5092 /* these are the two parts of the BD_ADDR */
5093 wl->fuse_oui_addr = ((mac2 & 0xffff) << 8) +
5094 ((mac1 & 0xff000000) >> 24);
5095 wl->fuse_nic_addr = mac1 & 0xffffff;
5096
5097 wl1271_set_partition(wl, &wl12xx_part_table[PART_DOWN]);
5098}
5099
5100static int wl12xx_get_hw_info(struct wl1271 *wl) 4577static int wl12xx_get_hw_info(struct wl1271 *wl)
5101{ 4578{
5102 int ret; 4579 int ret;
5103 u32 die_info;
5104 4580
5105 ret = wl12xx_set_power_on(wl); 4581 ret = wl12xx_set_power_on(wl);
5106 if (ret < 0) 4582 if (ret < 0)
5107 goto out; 4583 goto out;
5108 4584
5109 wl->chip.id = wl1271_read32(wl, CHIP_ID_B); 4585 wl->chip.id = wlcore_read_reg(wl, REG_CHIP_ID_B);
5110 4586
5111 if (wl->chip.id == CHIP_ID_1283_PG20) 4587 wl->fuse_oui_addr = 0;
5112 die_info = wl1271_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1); 4588 wl->fuse_nic_addr = 0;
5113 else
5114 die_info = wl1271_top_reg_read(wl, WL127X_REG_FUSE_DATA_2_1);
5115 4589
5116 wl->hw_pg_ver = (s8) (die_info & PG_VER_MASK) >> PG_VER_OFFSET; 4590 wl->hw_pg_ver = wl->ops->get_pg_ver(wl);
5117 4591
5118 if (!wl12xx_mac_in_fuse(wl)) { 4592 if (wl->ops->get_mac)
5119 wl->fuse_oui_addr = 0; 4593 wl->ops->get_mac(wl);
5120 wl->fuse_nic_addr = 0;
5121 } else {
5122 wl12xx_get_fuse_mac(wl);
5123 }
5124 4594
5125 wl1271_power_off(wl); 4595 wl1271_power_off(wl);
5126out: 4596out:
@@ -5242,7 +4712,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
5242 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE - 4712 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5243 sizeof(struct ieee80211_header); 4713 sizeof(struct ieee80211_header);
5244 4714
5245 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 4715 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
4716 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
5246 4717
5247 /* make sure all our channels fit in the scanned_ch bitmask */ 4718 /* make sure all our channels fit in the scanned_ch bitmask */
5248 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) + 4719 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
@@ -5254,8 +4725,12 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
5254 */ 4725 */
5255 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz, 4726 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5256 sizeof(wl1271_band_2ghz)); 4727 sizeof(wl1271_band_2ghz));
4728 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap, &wl->ht_cap,
4729 sizeof(wl->ht_cap));
5257 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz, 4730 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5258 sizeof(wl1271_band_5ghz)); 4731 sizeof(wl1271_band_5ghz));
4732 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap, &wl->ht_cap,
4733 sizeof(wl->ht_cap));
5259 4734
5260 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = 4735 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5261 &wl->bands[IEEE80211_BAND_2GHZ]; 4736 &wl->bands[IEEE80211_BAND_2GHZ];
@@ -5279,14 +4754,14 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
5279 wl->hw->sta_data_size = sizeof(struct wl1271_station); 4754 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5280 wl->hw->vif_data_size = sizeof(struct wl12xx_vif); 4755 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5281 4756
5282 wl->hw->max_rx_aggregation_subframes = 8; 4757 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5283 4758
5284 return 0; 4759 return 0;
5285} 4760}
5286 4761
5287#define WL1271_DEFAULT_CHANNEL 0 4762#define WL1271_DEFAULT_CHANNEL 0
5288 4763
5289static struct ieee80211_hw *wl1271_alloc_hw(void) 4764struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size)
5290{ 4765{
5291 struct ieee80211_hw *hw; 4766 struct ieee80211_hw *hw;
5292 struct wl1271 *wl; 4767 struct wl1271 *wl;
@@ -5305,6 +4780,13 @@ static struct ieee80211_hw *wl1271_alloc_hw(void)
5305 wl = hw->priv; 4780 wl = hw->priv;
5306 memset(wl, 0, sizeof(*wl)); 4781 memset(wl, 0, sizeof(*wl));
5307 4782
4783 wl->priv = kzalloc(priv_size, GFP_KERNEL);
4784 if (!wl->priv) {
4785 wl1271_error("could not alloc wl priv");
4786 ret = -ENOMEM;
4787 goto err_priv_alloc;
4788 }
4789
5308 INIT_LIST_HEAD(&wl->wlvif_list); 4790 INIT_LIST_HEAD(&wl->wlvif_list);
5309 4791
5310 wl->hw = hw; 4792 wl->hw = hw;
@@ -5341,7 +4823,6 @@ static struct ieee80211_hw *wl1271_alloc_hw(void)
5341 wl->quirks = 0; 4823 wl->quirks = 0;
5342 wl->platform_quirks = 0; 4824 wl->platform_quirks = 0;
5343 wl->sched_scanning = false; 4825 wl->sched_scanning = false;
5344 wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
5345 wl->system_hlid = WL12XX_SYSTEM_HLID; 4826 wl->system_hlid = WL12XX_SYSTEM_HLID;
5346 wl->active_sta_count = 0; 4827 wl->active_sta_count = 0;
5347 wl->fwlog_size = 0; 4828 wl->fwlog_size = 0;
@@ -5351,7 +4832,7 @@ static struct ieee80211_hw *wl1271_alloc_hw(void)
5351 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map); 4832 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5352 4833
5353 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map)); 4834 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5354 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) 4835 for (i = 0; i < wl->num_tx_desc; i++)
5355 wl->tx_frames[i] = NULL; 4836 wl->tx_frames[i] = NULL;
5356 4837
5357 spin_lock_init(&wl->wl_lock); 4838 spin_lock_init(&wl->wl_lock);
@@ -5360,9 +4841,6 @@ static struct ieee80211_hw *wl1271_alloc_hw(void)
5360 wl->fw_type = WL12XX_FW_TYPE_NONE; 4841 wl->fw_type = WL12XX_FW_TYPE_NONE;
5361 mutex_init(&wl->mutex); 4842 mutex_init(&wl->mutex);
5362 4843
5363 /* Apply default driver configuration. */
5364 wl1271_conf_init(wl);
5365
5366 order = get_order(WL1271_AGGR_BUFFER_SIZE); 4844 order = get_order(WL1271_AGGR_BUFFER_SIZE);
5367 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order); 4845 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5368 if (!wl->aggr_buf) { 4846 if (!wl->aggr_buf) {
@@ -5383,8 +4861,17 @@ static struct ieee80211_hw *wl1271_alloc_hw(void)
5383 goto err_dummy_packet; 4861 goto err_dummy_packet;
5384 } 4862 }
5385 4863
4864 wl->mbox = kmalloc(sizeof(*wl->mbox), GFP_DMA);
4865 if (!wl->mbox) {
4866 ret = -ENOMEM;
4867 goto err_fwlog;
4868 }
4869
5386 return hw; 4870 return hw;
5387 4871
4872err_fwlog:
4873 free_page((unsigned long)wl->fwlog);
4874
5388err_dummy_packet: 4875err_dummy_packet:
5389 dev_kfree_skb(wl->dummy_packet); 4876 dev_kfree_skb(wl->dummy_packet);
5390 4877
@@ -5396,14 +4883,18 @@ err_wq:
5396 4883
5397err_hw: 4884err_hw:
5398 wl1271_debugfs_exit(wl); 4885 wl1271_debugfs_exit(wl);
4886 kfree(wl->priv);
4887
4888err_priv_alloc:
5399 ieee80211_free_hw(hw); 4889 ieee80211_free_hw(hw);
5400 4890
5401err_hw_alloc: 4891err_hw_alloc:
5402 4892
5403 return ERR_PTR(ret); 4893 return ERR_PTR(ret);
5404} 4894}
4895EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5405 4896
5406static int wl1271_free_hw(struct wl1271 *wl) 4897int wlcore_free_hw(struct wl1271 *wl)
5407{ 4898{
5408 /* Unblock any fwlog readers */ 4899 /* Unblock any fwlog readers */
5409 mutex_lock(&wl->mutex); 4900 mutex_lock(&wl->mutex);
@@ -5433,10 +4924,12 @@ static int wl1271_free_hw(struct wl1271 *wl)
5433 kfree(wl->tx_res_if); 4924 kfree(wl->tx_res_if);
5434 destroy_workqueue(wl->freezable_wq); 4925 destroy_workqueue(wl->freezable_wq);
5435 4926
4927 kfree(wl->priv);
5436 ieee80211_free_hw(wl->hw); 4928 ieee80211_free_hw(wl->hw);
5437 4929
5438 return 0; 4930 return 0;
5439} 4931}
4932EXPORT_SYMBOL_GPL(wlcore_free_hw);
5440 4933
5441static irqreturn_t wl12xx_hardirq(int irq, void *cookie) 4934static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
5442{ 4935{
@@ -5467,22 +4960,22 @@ static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
5467 return IRQ_WAKE_THREAD; 4960 return IRQ_WAKE_THREAD;
5468} 4961}
5469 4962
5470static int __devinit wl12xx_probe(struct platform_device *pdev) 4963int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
5471{ 4964{
5472 struct wl12xx_platform_data *pdata = pdev->dev.platform_data; 4965 struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
5473 struct ieee80211_hw *hw;
5474 struct wl1271 *wl;
5475 unsigned long irqflags; 4966 unsigned long irqflags;
5476 int ret = -ENODEV; 4967 int ret;
5477 4968
5478 hw = wl1271_alloc_hw(); 4969 if (!wl->ops || !wl->ptable) {
5479 if (IS_ERR(hw)) { 4970 ret = -EINVAL;
5480 wl1271_error("can't allocate hw"); 4971 goto out_free_hw;
5481 ret = PTR_ERR(hw);
5482 goto out;
5483 } 4972 }
5484 4973
5485 wl = hw->priv; 4974 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
4975
4976 /* adjust some runtime configuration parameters */
4977 wlcore_adjust_conf(wl);
4978
5486 wl->irq = platform_get_irq(pdev, 0); 4979 wl->irq = platform_get_irq(pdev, 0);
5487 wl->ref_clock = pdata->board_ref_clock; 4980 wl->ref_clock = pdata->board_ref_clock;
5488 wl->tcxo_clock = pdata->board_tcxo_clock; 4981 wl->tcxo_clock = pdata->board_tcxo_clock;
@@ -5511,7 +5004,7 @@ static int __devinit wl12xx_probe(struct platform_device *pdev)
5511 wl->irq_wake_enabled = true; 5004 wl->irq_wake_enabled = true;
5512 device_init_wakeup(wl->dev, 1); 5005 device_init_wakeup(wl->dev, 1);
5513 if (pdata->pwr_in_suspend) 5006 if (pdata->pwr_in_suspend)
5514 hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY; 5007 wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
5515 5008
5516 } 5009 }
5517 disable_irq(wl->irq); 5010 disable_irq(wl->irq);
@@ -5545,7 +5038,7 @@ static int __devinit wl12xx_probe(struct platform_device *pdev)
5545 goto out_hw_pg_ver; 5038 goto out_hw_pg_ver;
5546 } 5039 }
5547 5040
5548 return 0; 5041 goto out;
5549 5042
5550out_hw_pg_ver: 5043out_hw_pg_ver:
5551 device_remove_file(wl->dev, &dev_attr_hw_pg_ver); 5044 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
@@ -5557,13 +5050,14 @@ out_irq:
5557 free_irq(wl->irq, wl); 5050 free_irq(wl->irq, wl);
5558 5051
5559out_free_hw: 5052out_free_hw:
5560 wl1271_free_hw(wl); 5053 wlcore_free_hw(wl);
5561 5054
5562out: 5055out:
5563 return ret; 5056 return ret;
5564} 5057}
5058EXPORT_SYMBOL_GPL(wlcore_probe);
5565 5059
5566static int __devexit wl12xx_remove(struct platform_device *pdev) 5060int __devexit wlcore_remove(struct platform_device *pdev)
5567{ 5061{
5568 struct wl1271 *wl = platform_get_drvdata(pdev); 5062 struct wl1271 *wl = platform_get_drvdata(pdev);
5569 5063
@@ -5573,38 +5067,11 @@ static int __devexit wl12xx_remove(struct platform_device *pdev)
5573 } 5067 }
5574 wl1271_unregister_hw(wl); 5068 wl1271_unregister_hw(wl);
5575 free_irq(wl->irq, wl); 5069 free_irq(wl->irq, wl);
5576 wl1271_free_hw(wl); 5070 wlcore_free_hw(wl);
5577 5071
5578 return 0; 5072 return 0;
5579} 5073}
5580 5074EXPORT_SYMBOL_GPL(wlcore_remove);
5581static const struct platform_device_id wl12xx_id_table[] __devinitconst = {
5582 { "wl12xx", 0 },
5583 { } /* Terminating Entry */
5584};
5585MODULE_DEVICE_TABLE(platform, wl12xx_id_table);
5586
5587static struct platform_driver wl12xx_driver = {
5588 .probe = wl12xx_probe,
5589 .remove = __devexit_p(wl12xx_remove),
5590 .id_table = wl12xx_id_table,
5591 .driver = {
5592 .name = "wl12xx_driver",
5593 .owner = THIS_MODULE,
5594 }
5595};
5596
5597static int __init wl12xx_init(void)
5598{
5599 return platform_driver_register(&wl12xx_driver);
5600}
5601module_init(wl12xx_init);
5602
5603static void __exit wl12xx_exit(void)
5604{
5605 platform_driver_unregister(&wl12xx_driver);
5606}
5607module_exit(wl12xx_exit);
5608 5075
5609u32 wl12xx_debug_level = DEBUG_NONE; 5076u32 wl12xx_debug_level = DEBUG_NONE;
5610EXPORT_SYMBOL_GPL(wl12xx_debug_level); 5077EXPORT_SYMBOL_GPL(wl12xx_debug_level);
@@ -5618,6 +5085,9 @@ MODULE_PARM_DESC(fwlog,
5618module_param(bug_on_recovery, bool, S_IRUSR | S_IWUSR); 5085module_param(bug_on_recovery, bool, S_IRUSR | S_IWUSR);
5619MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery"); 5086MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
5620 5087
5088module_param(no_recovery, bool, S_IRUSR | S_IWUSR);
5089MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
5090
5621MODULE_LICENSE("GPL"); 5091MODULE_LICENSE("GPL");
5622MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>"); 5092MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
5623MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 5093MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/wl12xx/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 78f598b4f97b..756eee2257b4 100644
--- a/drivers/net/wireless/wl12xx/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -21,7 +21,6 @@
21 * 21 *
22 */ 22 */
23 23
24#include "reg.h"
25#include "ps.h" 24#include "ps.h"
26#include "io.h" 25#include "io.h"
27#include "tx.h" 26#include "tx.h"
@@ -62,7 +61,7 @@ void wl1271_elp_work(struct work_struct *work)
62 } 61 }
63 62
64 wl1271_debug(DEBUG_PSM, "chip to elp"); 63 wl1271_debug(DEBUG_PSM, "chip to elp");
65 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP); 64 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
66 set_bit(WL1271_FLAG_IN_ELP, &wl->flags); 65 set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
67 66
68out: 67out:
@@ -74,6 +73,9 @@ void wl1271_ps_elp_sleep(struct wl1271 *wl)
74{ 73{
75 struct wl12xx_vif *wlvif; 74 struct wl12xx_vif *wlvif;
76 75
76 if (wl->quirks & WLCORE_QUIRK_NO_ELP)
77 return;
78
77 /* we shouldn't get consecutive sleep requests */ 79 /* we shouldn't get consecutive sleep requests */
78 if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags))) 80 if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
79 return; 81 return;
@@ -125,7 +127,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl)
125 wl->elp_compl = &compl; 127 wl->elp_compl = &compl;
126 spin_unlock_irqrestore(&wl->wl_lock, flags); 128 spin_unlock_irqrestore(&wl->wl_lock, flags);
127 129
128 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP); 130 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
129 131
130 if (!pending) { 132 if (!pending) {
131 ret = wait_for_completion_timeout( 133 ret = wait_for_completion_timeout(
diff --git a/drivers/net/wireless/wl12xx/ps.h b/drivers/net/wireless/ti/wlcore/ps.h
index 5f19d4fbbf27..de4f9da8ed26 100644
--- a/drivers/net/wireless/wl12xx/ps.h
+++ b/drivers/net/wireless/ti/wlcore/ps.h
@@ -24,7 +24,7 @@
24#ifndef __PS_H__ 24#ifndef __PS_H__
25#define __PS_H__ 25#define __PS_H__
26 26
27#include "wl12xx.h" 27#include "wlcore.h"
28#include "acx.h" 28#include "acx.h"
29 29
30int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, 30int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
diff --git a/drivers/net/wireless/wl12xx/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index cfa6071704c5..89bd9385e90b 100644
--- a/drivers/net/wireless/wl12xx/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -24,34 +24,36 @@
24#include <linux/gfp.h> 24#include <linux/gfp.h>
25#include <linux/sched.h> 25#include <linux/sched.h>
26 26
27#include "wl12xx.h" 27#include "wlcore.h"
28#include "debug.h" 28#include "debug.h"
29#include "acx.h" 29#include "acx.h"
30#include "reg.h"
31#include "rx.h" 30#include "rx.h"
32#include "tx.h" 31#include "tx.h"
33#include "io.h" 32#include "io.h"
33#include "hw_ops.h"
34 34
35static u8 wl12xx_rx_get_mem_block(struct wl12xx_fw_status *status, 35/*
36 u32 drv_rx_counter) 36 * TODO: this is here just for now, it must be removed when the data
37{ 37 * operations are in place.
38 return le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) & 38 */
39 RX_MEM_BLOCK_MASK; 39#include "../wl12xx/reg.h"
40}
41 40
42static u32 wl12xx_rx_get_buf_size(struct wl12xx_fw_status *status, 41static u32 wlcore_rx_get_buf_size(struct wl1271 *wl,
43 u32 drv_rx_counter) 42 u32 rx_pkt_desc)
44{ 43{
45 return (le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) & 44 if (wl->quirks & WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN)
46 RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV; 45 return (rx_pkt_desc & ALIGNED_RX_BUF_SIZE_MASK) >>
46 ALIGNED_RX_BUF_SIZE_SHIFT;
47
48 return (rx_pkt_desc & RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV;
47} 49}
48 50
49static bool wl12xx_rx_get_unaligned(struct wl12xx_fw_status *status, 51static u32 wlcore_rx_get_align_buf_size(struct wl1271 *wl, u32 pkt_len)
50 u32 drv_rx_counter)
51{ 52{
52 /* Convert the value to bool */ 53 if (wl->quirks & WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN)
53 return !!(le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) & 54 return ALIGN(pkt_len, WL12XX_BUS_BLOCK_SIZE);
54 RX_BUF_UNALIGNED_PAYLOAD); 55
56 return pkt_len;
55} 57}
56 58
57static void wl1271_rx_status(struct wl1271 *wl, 59static void wl1271_rx_status(struct wl1271 *wl,
@@ -66,10 +68,10 @@ static void wl1271_rx_status(struct wl1271 *wl,
66 else 68 else
67 status->band = IEEE80211_BAND_5GHZ; 69 status->band = IEEE80211_BAND_5GHZ;
68 70
69 status->rate_idx = wl1271_rate_to_idx(desc->rate, status->band); 71 status->rate_idx = wlcore_rate_to_idx(wl, desc->rate, status->band);
70 72
71 /* 11n support */ 73 /* 11n support */
72 if (desc->rate <= CONF_HW_RXTX_RATE_MCS0) 74 if (desc->rate <= wl->hw_min_ht_rate)
73 status->flag |= RX_FLAG_HT; 75 status->flag |= RX_FLAG_HT;
74 76
75 status->signal = desc->rssi; 77 status->signal = desc->rssi;
@@ -98,7 +100,7 @@ static void wl1271_rx_status(struct wl1271 *wl,
98} 100}
99 101
100static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length, 102static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
101 bool unaligned, u8 *hlid) 103 enum wl_rx_buf_align rx_align, u8 *hlid)
102{ 104{
103 struct wl1271_rx_descriptor *desc; 105 struct wl1271_rx_descriptor *desc;
104 struct sk_buff *skb; 106 struct sk_buff *skb;
@@ -106,8 +108,9 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
106 u8 *buf; 108 u8 *buf;
107 u8 beacon = 0; 109 u8 beacon = 0;
108 u8 is_data = 0; 110 u8 is_data = 0;
109 u8 reserved = unaligned ? NET_IP_ALIGN : 0; 111 u8 reserved = 0;
110 u16 seq_num; 112 u16 seq_num;
113 u32 pkt_data_len;
111 114
112 /* 115 /*
113 * In PLT mode we seem to get frames and mac80211 warns about them, 116 * In PLT mode we seem to get frames and mac80211 warns about them,
@@ -116,6 +119,16 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
116 if (unlikely(wl->plt)) 119 if (unlikely(wl->plt))
117 return -EINVAL; 120 return -EINVAL;
118 121
122 pkt_data_len = wlcore_hw_get_rx_packet_len(wl, data, length);
123 if (!pkt_data_len) {
124 wl1271_error("Invalid packet arrived from HW. length %d",
125 length);
126 return -EINVAL;
127 }
128
129 if (rx_align == WLCORE_RX_BUF_UNALIGNED)
130 reserved = NET_IP_ALIGN;
131
119 /* the data read starts with the descriptor */ 132 /* the data read starts with the descriptor */
120 desc = (struct wl1271_rx_descriptor *) data; 133 desc = (struct wl1271_rx_descriptor *) data;
121 134
@@ -142,8 +155,8 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
142 return -EINVAL; 155 return -EINVAL;
143 } 156 }
144 157
145 /* skb length not included rx descriptor */ 158 /* skb length not including rx descriptor */
146 skb = __dev_alloc_skb(length + reserved - sizeof(*desc), GFP_KERNEL); 159 skb = __dev_alloc_skb(pkt_data_len + reserved, GFP_KERNEL);
147 if (!skb) { 160 if (!skb) {
148 wl1271_error("Couldn't allocate RX frame"); 161 wl1271_error("Couldn't allocate RX frame");
149 return -ENOMEM; 162 return -ENOMEM;
@@ -152,7 +165,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
152 /* reserve the unaligned payload(if any) */ 165 /* reserve the unaligned payload(if any) */
153 skb_reserve(skb, reserved); 166 skb_reserve(skb, reserved);
154 167
155 buf = skb_put(skb, length - sizeof(*desc)); 168 buf = skb_put(skb, pkt_data_len);
156 169
157 /* 170 /*
158 * Copy packets from aggregation buffer to the skbs without rx 171 * Copy packets from aggregation buffer to the skbs without rx
@@ -160,7 +173,10 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
160 * packets copy the packets in offset of 2 bytes guarantee IP header 173 * packets copy the packets in offset of 2 bytes guarantee IP header
161 * payload aligned to 4 bytes. 174 * payload aligned to 4 bytes.
162 */ 175 */
163 memcpy(buf, data + sizeof(*desc), length - sizeof(*desc)); 176 memcpy(buf, data + sizeof(*desc), pkt_data_len);
177 if (rx_align == WLCORE_RX_BUF_PADDED)
178 skb_pull(skb, NET_IP_ALIGN);
179
164 *hlid = desc->hlid; 180 *hlid = desc->hlid;
165 181
166 hdr = (struct ieee80211_hdr *)skb->data; 182 hdr = (struct ieee80211_hdr *)skb->data;
@@ -177,36 +193,35 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
177 beacon ? "beacon" : "", 193 beacon ? "beacon" : "",
178 seq_num, *hlid); 194 seq_num, *hlid);
179 195
180 skb_trim(skb, skb->len - desc->pad_len);
181
182 skb_queue_tail(&wl->deferred_rx_queue, skb); 196 skb_queue_tail(&wl->deferred_rx_queue, skb);
183 queue_work(wl->freezable_wq, &wl->netstack_work); 197 queue_work(wl->freezable_wq, &wl->netstack_work);
184 198
185 return is_data; 199 return is_data;
186} 200}
187 201
188void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status) 202void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
189{ 203{
190 struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
191 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0}; 204 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
192 u32 buf_size; 205 u32 buf_size;
193 u32 fw_rx_counter = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK; 206 u32 fw_rx_counter = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
194 u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK; 207 u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
195 u32 rx_counter; 208 u32 rx_counter;
196 u32 mem_block; 209 u32 pkt_len, align_pkt_len;
197 u32 pkt_length; 210 u32 pkt_offset, des;
198 u32 pkt_offset;
199 u8 hlid; 211 u8 hlid;
200 bool unaligned = false; 212 enum wl_rx_buf_align rx_align;
201 213
202 while (drv_rx_counter != fw_rx_counter) { 214 while (drv_rx_counter != fw_rx_counter) {
203 buf_size = 0; 215 buf_size = 0;
204 rx_counter = drv_rx_counter; 216 rx_counter = drv_rx_counter;
205 while (rx_counter != fw_rx_counter) { 217 while (rx_counter != fw_rx_counter) {
206 pkt_length = wl12xx_rx_get_buf_size(status, rx_counter); 218 des = le32_to_cpu(status->rx_pkt_descs[rx_counter]);
207 if (buf_size + pkt_length > WL1271_AGGR_BUFFER_SIZE) 219 pkt_len = wlcore_rx_get_buf_size(wl, des);
220 align_pkt_len = wlcore_rx_get_align_buf_size(wl,
221 pkt_len);
222 if (buf_size + align_pkt_len > WL1271_AGGR_BUFFER_SIZE)
208 break; 223 break;
209 buf_size += pkt_length; 224 buf_size += align_pkt_len;
210 rx_counter++; 225 rx_counter++;
211 rx_counter &= NUM_RX_PKT_DESC_MOD_MASK; 226 rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
212 } 227 }
@@ -216,38 +231,18 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
216 break; 231 break;
217 } 232 }
218 233
219 if (wl->chip.id != CHIP_ID_1283_PG20) {
220 /*
221 * Choose the block we want to read
222 * For aggregated packets, only the first memory block
223 * should be retrieved. The FW takes care of the rest.
224 */
225 mem_block = wl12xx_rx_get_mem_block(status,
226 drv_rx_counter);
227
228 wl->rx_mem_pool_addr.addr = (mem_block << 8) +
229 le32_to_cpu(wl_mem_map->packet_memory_pool_start);
230
231 wl->rx_mem_pool_addr.addr_extra =
232 wl->rx_mem_pool_addr.addr + 4;
233
234 wl1271_write(wl, WL1271_SLV_REG_DATA,
235 &wl->rx_mem_pool_addr,
236 sizeof(wl->rx_mem_pool_addr), false);
237 }
238
239 /* Read all available packets at once */ 234 /* Read all available packets at once */
240 wl1271_read(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, 235 des = le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]);
241 buf_size, true); 236 wlcore_hw_prepare_read(wl, des, buf_size);
237 wlcore_read_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
238 buf_size, true);
242 239
243 /* Split data into separate packets */ 240 /* Split data into separate packets */
244 pkt_offset = 0; 241 pkt_offset = 0;
245 while (pkt_offset < buf_size) { 242 while (pkt_offset < buf_size) {
246 pkt_length = wl12xx_rx_get_buf_size(status, 243 des = le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]);
247 drv_rx_counter); 244 pkt_len = wlcore_rx_get_buf_size(wl, des);
248 245 rx_align = wlcore_hw_get_rx_buf_align(wl, des);
249 unaligned = wl12xx_rx_get_unaligned(status,
250 drv_rx_counter);
251 246
252 /* 247 /*
253 * the handle data call can only fail in memory-outage 248 * the handle data call can only fail in memory-outage
@@ -256,7 +251,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
256 */ 251 */
257 if (wl1271_rx_handle_data(wl, 252 if (wl1271_rx_handle_data(wl,
258 wl->aggr_buf + pkt_offset, 253 wl->aggr_buf + pkt_offset,
259 pkt_length, unaligned, 254 pkt_len, rx_align,
260 &hlid) == 1) { 255 &hlid) == 1) {
261 if (hlid < WL12XX_MAX_LINKS) 256 if (hlid < WL12XX_MAX_LINKS)
262 __set_bit(hlid, active_hlids); 257 __set_bit(hlid, active_hlids);
@@ -269,7 +264,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
269 wl->rx_counter++; 264 wl->rx_counter++;
270 drv_rx_counter++; 265 drv_rx_counter++;
271 drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK; 266 drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
272 pkt_offset += pkt_length; 267 pkt_offset += wlcore_rx_get_align_buf_size(wl, pkt_len);
273 } 268 }
274 } 269 }
275 270
@@ -277,8 +272,9 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
277 * Write the driver's packet counter to the FW. This is only required 272 * Write the driver's packet counter to the FW. This is only required
278 * for older hardware revisions 273 * for older hardware revisions
279 */ 274 */
280 if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION) 275 if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION)
281 wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter); 276 wl1271_write32(wl, WL12XX_REG_RX_DRIVER_COUNTER,
277 wl->rx_counter);
282 278
283 wl12xx_rearm_rx_streaming(wl, active_hlids); 279 wl12xx_rearm_rx_streaming(wl, active_hlids);
284} 280}
diff --git a/drivers/net/wireless/wl12xx/rx.h b/drivers/net/wireless/ti/wlcore/rx.h
index 86ba6b1d0cdc..6e129e2a8546 100644
--- a/drivers/net/wireless/wl12xx/rx.h
+++ b/drivers/net/wireless/ti/wlcore/rx.h
@@ -96,9 +96,19 @@
96#define RX_MEM_BLOCK_MASK 0xFF 96#define RX_MEM_BLOCK_MASK 0xFF
97#define RX_BUF_SIZE_MASK 0xFFF00 97#define RX_BUF_SIZE_MASK 0xFFF00
98#define RX_BUF_SIZE_SHIFT_DIV 6 98#define RX_BUF_SIZE_SHIFT_DIV 6
99#define ALIGNED_RX_BUF_SIZE_MASK 0xFFFF00
100#define ALIGNED_RX_BUF_SIZE_SHIFT 8
101
99/* If set, the start of IP payload is not 4 bytes aligned */ 102/* If set, the start of IP payload is not 4 bytes aligned */
100#define RX_BUF_UNALIGNED_PAYLOAD BIT(20) 103#define RX_BUF_UNALIGNED_PAYLOAD BIT(20)
101 104
105/* Describes the alignment state of a Rx buffer */
106enum wl_rx_buf_align {
107 WLCORE_RX_BUF_ALIGNED,
108 WLCORE_RX_BUF_UNALIGNED,
109 WLCORE_RX_BUF_PADDED,
110};
111
102enum { 112enum {
103 WL12XX_RX_CLASS_UNKNOWN, 113 WL12XX_RX_CLASS_UNKNOWN,
104 WL12XX_RX_CLASS_MANAGEMENT, 114 WL12XX_RX_CLASS_MANAGEMENT,
@@ -126,7 +136,7 @@ struct wl1271_rx_descriptor {
126 u8 reserved; 136 u8 reserved;
127} __packed; 137} __packed;
128 138
129void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status); 139void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status);
130u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); 140u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
131 141
132#endif 142#endif
diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index fcba055ef196..ade21a011c45 100644
--- a/drivers/net/wireless/wl12xx/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -23,7 +23,7 @@
23 23
24#include <linux/ieee80211.h> 24#include <linux/ieee80211.h>
25 25
26#include "wl12xx.h" 26#include "wlcore.h"
27#include "debug.h" 27#include "debug.h"
28#include "cmd.h" 28#include "cmd.h"
29#include "scan.h" 29#include "scan.h"
@@ -417,6 +417,23 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
417 int i, j; 417 int i, j;
418 u32 flags; 418 u32 flags;
419 bool force_passive = !req->n_ssids; 419 bool force_passive = !req->n_ssids;
420 u32 min_dwell_time_active, max_dwell_time_active, delta_per_probe;
421 u32 dwell_time_passive, dwell_time_dfs;
422
423 if (band == IEEE80211_BAND_5GHZ)
424 delta_per_probe = c->dwell_time_delta_per_probe_5;
425 else
426 delta_per_probe = c->dwell_time_delta_per_probe;
427
428 min_dwell_time_active = c->base_dwell_time +
429 req->n_ssids * c->num_probe_reqs * delta_per_probe;
430
431 max_dwell_time_active = min_dwell_time_active + c->max_dwell_time_delta;
432
433 min_dwell_time_active = DIV_ROUND_UP(min_dwell_time_active, 1000);
434 max_dwell_time_active = DIV_ROUND_UP(max_dwell_time_active, 1000);
435 dwell_time_passive = DIV_ROUND_UP(c->dwell_time_passive, 1000);
436 dwell_time_dfs = DIV_ROUND_UP(c->dwell_time_dfs, 1000);
420 437
421 for (i = 0, j = start; 438 for (i = 0, j = start;
422 i < req->n_channels && j < max_channels; 439 i < req->n_channels && j < max_channels;
@@ -440,21 +457,24 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
440 req->channels[i]->flags); 457 req->channels[i]->flags);
441 wl1271_debug(DEBUG_SCAN, "max_power %d", 458 wl1271_debug(DEBUG_SCAN, "max_power %d",
442 req->channels[i]->max_power); 459 req->channels[i]->max_power);
460 wl1271_debug(DEBUG_SCAN, "min_dwell_time %d max dwell time %d",
461 min_dwell_time_active,
462 max_dwell_time_active);
443 463
444 if (flags & IEEE80211_CHAN_RADAR) { 464 if (flags & IEEE80211_CHAN_RADAR) {
445 channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS; 465 channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;
446 466
447 channels[j].passive_duration = 467 channels[j].passive_duration =
448 cpu_to_le16(c->dwell_time_dfs); 468 cpu_to_le16(dwell_time_dfs);
449 } else { 469 } else {
450 channels[j].passive_duration = 470 channels[j].passive_duration =
451 cpu_to_le16(c->dwell_time_passive); 471 cpu_to_le16(dwell_time_passive);
452 } 472 }
453 473
454 channels[j].min_duration = 474 channels[j].min_duration =
455 cpu_to_le16(c->min_dwell_time_active); 475 cpu_to_le16(min_dwell_time_active);
456 channels[j].max_duration = 476 channels[j].max_duration =
457 cpu_to_le16(c->max_dwell_time_active); 477 cpu_to_le16(max_dwell_time_active);
458 478
459 channels[j].tx_power_att = req->channels[i]->max_power; 479 channels[j].tx_power_att = req->channels[i]->max_power;
460 channels[j].channel = req->channels[i]->hw_value; 480 channels[j].channel = req->channels[i]->hw_value;
diff --git a/drivers/net/wireless/wl12xx/scan.h b/drivers/net/wireless/ti/wlcore/scan.h
index 96ff457a3a0b..81ee36ac2078 100644
--- a/drivers/net/wireless/wl12xx/scan.h
+++ b/drivers/net/wireless/ti/wlcore/scan.h
@@ -24,7 +24,7 @@
24#ifndef __SCAN_H__ 24#ifndef __SCAN_H__
25#define __SCAN_H__ 25#define __SCAN_H__
26 26
27#include "wl12xx.h" 27#include "wlcore.h"
28 28
29int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif, 29int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
30 const u8 *ssid, size_t ssid_len, 30 const u8 *ssid, size_t ssid_len,
@@ -55,7 +55,7 @@ void wl1271_scan_sched_scan_results(struct wl1271 *wl);
55#define WL1271_SCAN_BAND_2_4_GHZ 0 55#define WL1271_SCAN_BAND_2_4_GHZ 0
56#define WL1271_SCAN_BAND_5_GHZ 1 56#define WL1271_SCAN_BAND_5_GHZ 1
57 57
58#define WL1271_SCAN_TIMEOUT 10000 /* msec */ 58#define WL1271_SCAN_TIMEOUT 30000 /* msec */
59 59
60enum { 60enum {
61 WL1271_SCAN_STATE_IDLE, 61 WL1271_SCAN_STATE_IDLE,
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 4b3c32774bae..0a72347cfc4c 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -33,7 +33,7 @@
33#include <linux/wl12xx.h> 33#include <linux/wl12xx.h>
34#include <linux/pm_runtime.h> 34#include <linux/pm_runtime.h>
35 35
36#include "wl12xx.h" 36#include "wlcore.h"
37#include "wl12xx_80211.h" 37#include "wl12xx_80211.h"
38#include "io.h" 38#include "io.h"
39 39
@@ -76,7 +76,7 @@ static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf,
76 76
77 sdio_claim_host(func); 77 sdio_claim_host(func);
78 78
79 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { 79 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) {
80 ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret); 80 ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
81 dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n", 81 dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n",
82 addr, ((u8 *)buf)[0]); 82 addr, ((u8 *)buf)[0]);
@@ -105,7 +105,7 @@ static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf,
105 105
106 sdio_claim_host(func); 106 sdio_claim_host(func);
107 107
108 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) { 108 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) {
109 sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret); 109 sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
110 dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n", 110 dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n",
111 addr, ((u8 *)buf)[0]); 111 addr, ((u8 *)buf)[0]);
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 2fc18a8dcce8..553cd3cbb98c 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -30,12 +30,10 @@
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32 32
33#include "wl12xx.h" 33#include "wlcore.h"
34#include "wl12xx_80211.h" 34#include "wl12xx_80211.h"
35#include "io.h" 35#include "io.h"
36 36
37#include "reg.h"
38
39#define WSPI_CMD_READ 0x40000000 37#define WSPI_CMD_READ 0x40000000
40#define WSPI_CMD_WRITE 0x00000000 38#define WSPI_CMD_WRITE 0x00000000
41#define WSPI_CMD_FIXED 0x20000000 39#define WSPI_CMD_FIXED 0x20000000
diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index 1e93bb9c0246..0e59ea2cdd39 100644
--- a/drivers/net/wireless/wl12xx/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -25,10 +25,9 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <net/genetlink.h> 26#include <net/genetlink.h>
27 27
28#include "wl12xx.h" 28#include "wlcore.h"
29#include "debug.h" 29#include "debug.h"
30#include "acx.h" 30#include "acx.h"
31#include "reg.h"
32#include "ps.h" 31#include "ps.h"
33#include "io.h" 32#include "io.h"
34 33
@@ -116,7 +115,8 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
116 goto out_sleep; 115 goto out_sleep;
117 } 116 }
118 117
119 NLA_PUT(skb, WL1271_TM_ATTR_DATA, buf_len, buf); 118 if (nla_put(skb, WL1271_TM_ATTR_DATA, buf_len, buf))
119 goto nla_put_failure;
120 ret = cfg80211_testmode_reply(skb); 120 ret = cfg80211_testmode_reply(skb);
121 if (ret < 0) 121 if (ret < 0)
122 goto out_sleep; 122 goto out_sleep;
@@ -178,7 +178,8 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
178 goto out_free; 178 goto out_free;
179 } 179 }
180 180
181 NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd); 181 if (nla_put(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd))
182 goto nla_put_failure;
182 ret = cfg80211_testmode_reply(skb); 183 ret = cfg80211_testmode_reply(skb);
183 if (ret < 0) 184 if (ret < 0)
184 goto out_free; 185 goto out_free;
@@ -297,7 +298,8 @@ static int wl12xx_tm_cmd_get_mac(struct wl1271 *wl, struct nlattr *tb[])
297 goto out; 298 goto out;
298 } 299 }
299 300
300 NLA_PUT(skb, WL1271_TM_ATTR_DATA, ETH_ALEN, mac_addr); 301 if (nla_put(skb, WL1271_TM_ATTR_DATA, ETH_ALEN, mac_addr))
302 goto nla_put_failure;
301 ret = cfg80211_testmode_reply(skb); 303 ret = cfg80211_testmode_reply(skb);
302 if (ret < 0) 304 if (ret < 0)
303 goto out; 305 goto out;
diff --git a/drivers/net/wireless/wl12xx/testmode.h b/drivers/net/wireless/ti/wlcore/testmode.h
index 8071654259ea..8071654259ea 100644
--- a/drivers/net/wireless/wl12xx/testmode.h
+++ b/drivers/net/wireless/ti/wlcore/testmode.h
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 43ae49143d68..6893bc207994 100644
--- a/drivers/net/wireless/wl12xx/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -25,13 +25,19 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/etherdevice.h> 26#include <linux/etherdevice.h>
27 27
28#include "wl12xx.h" 28#include "wlcore.h"
29#include "debug.h" 29#include "debug.h"
30#include "io.h" 30#include "io.h"
31#include "reg.h"
32#include "ps.h" 31#include "ps.h"
33#include "tx.h" 32#include "tx.h"
34#include "event.h" 33#include "event.h"
34#include "hw_ops.h"
35
36/*
37 * TODO: this is here just for now, it must be removed when the data
38 * operations are in place.
39 */
40#include "../wl12xx/reg.h"
35 41
36static int wl1271_set_default_wep_key(struct wl1271 *wl, 42static int wl1271_set_default_wep_key(struct wl1271 *wl,
37 struct wl12xx_vif *wlvif, u8 id) 43 struct wl12xx_vif *wlvif, u8 id)
@@ -56,8 +62,8 @@ static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
56{ 62{
57 int id; 63 int id;
58 64
59 id = find_first_zero_bit(wl->tx_frames_map, ACX_TX_DESCRIPTORS); 65 id = find_first_zero_bit(wl->tx_frames_map, wl->num_tx_desc);
60 if (id >= ACX_TX_DESCRIPTORS) 66 if (id >= wl->num_tx_desc)
61 return -EBUSY; 67 return -EBUSY;
62 68
63 __set_bit(id, wl->tx_frames_map); 69 __set_bit(id, wl->tx_frames_map);
@@ -69,7 +75,7 @@ static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
69static void wl1271_free_tx_id(struct wl1271 *wl, int id) 75static void wl1271_free_tx_id(struct wl1271 *wl, int id)
70{ 76{
71 if (__test_and_clear_bit(id, wl->tx_frames_map)) { 77 if (__test_and_clear_bit(id, wl->tx_frames_map)) {
72 if (unlikely(wl->tx_frames_cnt == ACX_TX_DESCRIPTORS)) 78 if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc))
73 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); 79 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
74 80
75 wl->tx_frames[id] = NULL; 81 wl->tx_frames[id] = NULL;
@@ -167,14 +173,15 @@ u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
167 return wlvif->dev_hlid; 173 return wlvif->dev_hlid;
168} 174}
169 175
170static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl, 176unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
171 unsigned int packet_length) 177 unsigned int packet_length)
172{ 178{
173 if (wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT) 179 if (wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN)
174 return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
175 else
176 return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE); 180 return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
181 else
182 return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
177} 183}
184EXPORT_SYMBOL(wlcore_calc_packet_alignment);
178 185
179static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, 186static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
180 struct sk_buff *skb, u32 extra, u32 buf_offset, 187 struct sk_buff *skb, u32 extra, u32 buf_offset,
@@ -182,10 +189,9 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
182{ 189{
183 struct wl1271_tx_hw_descr *desc; 190 struct wl1271_tx_hw_descr *desc;
184 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; 191 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
185 u32 len;
186 u32 total_blocks; 192 u32 total_blocks;
187 int id, ret = -EBUSY, ac; 193 int id, ret = -EBUSY, ac;
188 u32 spare_blocks = wl->tx_spare_blocks; 194 u32 spare_blocks = wl->normal_tx_spare;
189 bool is_dummy = false; 195 bool is_dummy = false;
190 196
191 if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE) 197 if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
@@ -196,30 +202,19 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
196 if (id < 0) 202 if (id < 0)
197 return id; 203 return id;
198 204
199 /* approximate the number of blocks required for this packet 205 if (unlikely(wl12xx_is_dummy_packet(wl, skb)))
200 in the firmware */
201 len = wl12xx_calc_packet_alignment(wl, total_len);
202
203 /* in case of a dummy packet, use default amount of spare mem blocks */
204 if (unlikely(wl12xx_is_dummy_packet(wl, skb))) {
205 is_dummy = true; 206 is_dummy = true;
206 spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT; 207 else if (wlvif->is_gem)
207 } 208 spare_blocks = wl->gem_tx_spare;
208 209
209 total_blocks = (len + TX_HW_BLOCK_SIZE - 1) / TX_HW_BLOCK_SIZE + 210 total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks);
210 spare_blocks;
211 211
212 if (total_blocks <= wl->tx_blocks_available) { 212 if (total_blocks <= wl->tx_blocks_available) {
213 desc = (struct wl1271_tx_hw_descr *)skb_push( 213 desc = (struct wl1271_tx_hw_descr *)skb_push(
214 skb, total_len - skb->len); 214 skb, total_len - skb->len);
215 215
216 /* HW descriptor fields change between wl127x and wl128x */ 216 wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks,
217 if (wl->chip.id == CHIP_ID_1283_PG20) { 217 spare_blocks);
218 desc->wl128x_mem.total_mem_blocks = total_blocks;
219 } else {
220 desc->wl127x_mem.extra_blocks = spare_blocks;
221 desc->wl127x_mem.total_mem_blocks = total_blocks;
222 }
223 218
224 desc->id = id; 219 desc->id = id;
225 220
@@ -256,7 +251,7 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
256{ 251{
257 struct timespec ts; 252 struct timespec ts;
258 struct wl1271_tx_hw_descr *desc; 253 struct wl1271_tx_hw_descr *desc;
259 int aligned_len, ac, rate_idx; 254 int ac, rate_idx;
260 s64 hosttime; 255 s64 hosttime;
261 u16 tx_attr = 0; 256 u16 tx_attr = 0;
262 __le16 frame_control; 257 __le16 frame_control;
@@ -329,44 +324,16 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
329 } 324 }
330 325
331 tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY; 326 tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
332 desc->reserved = 0;
333
334 aligned_len = wl12xx_calc_packet_alignment(wl, skb->len);
335
336 if (wl->chip.id == CHIP_ID_1283_PG20) {
337 desc->wl128x_mem.extra_bytes = aligned_len - skb->len;
338 desc->length = cpu_to_le16(aligned_len >> 2);
339
340 wl1271_debug(DEBUG_TX, "tx_fill_hdr: hlid: %d "
341 "tx_attr: 0x%x len: %d life: %d mem: %d",
342 desc->hlid, tx_attr,
343 le16_to_cpu(desc->length),
344 le16_to_cpu(desc->life_time),
345 desc->wl128x_mem.total_mem_blocks);
346 } else {
347 int pad;
348
349 /* Store the aligned length in terms of words */
350 desc->length = cpu_to_le16(aligned_len >> 2);
351
352 /* calculate number of padding bytes */
353 pad = aligned_len - skb->len;
354 tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
355
356 wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d hlid: %d "
357 "tx_attr: 0x%x len: %d life: %d mem: %d", pad,
358 desc->hlid, tx_attr,
359 le16_to_cpu(desc->length),
360 le16_to_cpu(desc->life_time),
361 desc->wl127x_mem.total_mem_blocks);
362 }
363 327
364 /* for WEP shared auth - no fw encryption is needed */ 328 /* for WEP shared auth - no fw encryption is needed */
365 if (ieee80211_is_auth(frame_control) && 329 if (ieee80211_is_auth(frame_control) &&
366 ieee80211_has_protected(frame_control)) 330 ieee80211_has_protected(frame_control))
367 tx_attr |= TX_HW_ATTR_HOST_ENCRYPT; 331 tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
368 332
333 desc->reserved = 0;
369 desc->tx_attr = cpu_to_le16(tx_attr); 334 desc->tx_attr = cpu_to_le16(tx_attr);
335
336 wlcore_hw_set_tx_desc_data_len(wl, desc, skb);
370} 337}
371 338
372/* caller must hold wl->mutex */ 339/* caller must hold wl->mutex */
@@ -432,7 +399,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
432 * In special cases, we want to align to a specific block size 399 * In special cases, we want to align to a specific block size
433 * (eg. for wl128x with SDIO we align to 256). 400 * (eg. for wl128x with SDIO we align to 256).
434 */ 401 */
435 total_len = wl12xx_calc_packet_alignment(wl, skb->len); 402 total_len = wlcore_calc_packet_alignment(wl, skb->len);
436 403
437 memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len); 404 memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
438 memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len); 405 memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
@@ -718,8 +685,8 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
718 * Flush buffer and try again. 685 * Flush buffer and try again.
719 */ 686 */
720 wl1271_skb_queue_head(wl, wlvif, skb); 687 wl1271_skb_queue_head(wl, wlvif, skb);
721 wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, 688 wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
722 buf_offset, true); 689 buf_offset, true);
723 sent_packets = true; 690 sent_packets = true;
724 buf_offset = 0; 691 buf_offset = 0;
725 continue; 692 continue;
@@ -753,8 +720,8 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
753 720
754out_ack: 721out_ack:
755 if (buf_offset) { 722 if (buf_offset) {
756 wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, 723 wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
757 buf_offset, true); 724 buf_offset, true);
758 sent_packets = true; 725 sent_packets = true;
759 } 726 }
760 if (sent_packets) { 727 if (sent_packets) {
@@ -762,8 +729,8 @@ out_ack:
762 * Interrupt the firmware with the new packets. This is only 729 * Interrupt the firmware with the new packets. This is only
763 * required for older hardware revisions 730 * required for older hardware revisions
764 */ 731 */
765 if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION) 732 if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION)
766 wl1271_write32(wl, WL1271_HOST_WR_ACCESS, 733 wl1271_write32(wl, WL12XX_HOST_WR_ACCESS,
767 wl->tx_packets_count); 734 wl->tx_packets_count);
768 735
769 wl1271_handle_tx_low_watermark(wl); 736 wl1271_handle_tx_low_watermark(wl);
@@ -792,11 +759,20 @@ static u8 wl1271_tx_get_rate_flags(u8 rate_class_index)
792{ 759{
793 u8 flags = 0; 760 u8 flags = 0;
794 761
795 if (rate_class_index >= CONF_HW_RXTX_RATE_MCS_MIN && 762 /*
796 rate_class_index <= CONF_HW_RXTX_RATE_MCS_MAX) 763 * TODO: use wl12xx constants when this code is moved to wl12xx, as
764 * only it uses Tx-completion.
765 */
766 if (rate_class_index <= 8)
797 flags |= IEEE80211_TX_RC_MCS; 767 flags |= IEEE80211_TX_RC_MCS;
798 if (rate_class_index == CONF_HW_RXTX_RATE_MCS7_SGI) 768
769 /*
770 * TODO: use wl12xx constants when this code is moved to wl12xx, as
771 * only it uses Tx-completion.
772 */
773 if (rate_class_index == 0)
799 flags |= IEEE80211_TX_RC_SHORT_GI; 774 flags |= IEEE80211_TX_RC_SHORT_GI;
775
800 return flags; 776 return flags;
801} 777}
802 778
@@ -813,7 +789,7 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
813 u8 retries = 0; 789 u8 retries = 0;
814 790
815 /* check for id legality */ 791 /* check for id legality */
816 if (unlikely(id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL)) { 792 if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) {
817 wl1271_warning("TX result illegal id: %d", id); 793 wl1271_warning("TX result illegal id: %d", id);
818 return; 794 return;
819 } 795 }
@@ -834,7 +810,7 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
834 if (result->status == TX_SUCCESS) { 810 if (result->status == TX_SUCCESS) {
835 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) 811 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
836 info->flags |= IEEE80211_TX_STAT_ACK; 812 info->flags |= IEEE80211_TX_STAT_ACK;
837 rate = wl1271_rate_to_idx(result->rate_class_index, 813 rate = wlcore_rate_to_idx(wl, result->rate_class_index,
838 wlvif->band); 814 wlvif->band);
839 rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index); 815 rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index);
840 retries = result->ack_failures; 816 retries = result->ack_failures;
@@ -929,6 +905,7 @@ void wl1271_tx_complete(struct wl1271 *wl)
929 wl->tx_results_count++; 905 wl->tx_results_count++;
930 } 906 }
931} 907}
908EXPORT_SYMBOL(wl1271_tx_complete);
932 909
933void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) 910void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
934{ 911{
@@ -1006,7 +983,7 @@ void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
1006 if (reset_tx_queues) 983 if (reset_tx_queues)
1007 wl1271_handle_tx_low_watermark(wl); 984 wl1271_handle_tx_low_watermark(wl);
1008 985
1009 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) { 986 for (i = 0; i < wl->num_tx_desc; i++) {
1010 if (wl->tx_frames[i] == NULL) 987 if (wl->tx_frames[i] == NULL)
1011 continue; 988 continue;
1012 989
diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 5cf8c32d40d1..2fd6e5dc6f75 100644
--- a/drivers/net/wireless/wl12xx/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -25,9 +25,6 @@
25#ifndef __TX_H__ 25#ifndef __TX_H__
26#define __TX_H__ 26#define __TX_H__
27 27
28#define TX_HW_BLOCK_SPARE_DEFAULT 1
29#define TX_HW_BLOCK_SIZE 252
30
31#define TX_HW_MGMT_PKT_LIFETIME_TU 2000 28#define TX_HW_MGMT_PKT_LIFETIME_TU 2000
32#define TX_HW_AP_MODE_PKT_LIFETIME_TU 8000 29#define TX_HW_AP_MODE_PKT_LIFETIME_TU 8000
33 30
@@ -212,7 +209,7 @@ void wl1271_tx_complete(struct wl1271 *wl);
212void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif); 209void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif);
213void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues); 210void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues);
214void wl1271_tx_flush(struct wl1271 *wl); 211void wl1271_tx_flush(struct wl1271 *wl);
215u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band); 212u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band);
216u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, 213u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
217 enum ieee80211_band rate_band); 214 enum ieee80211_band rate_band);
218u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set); 215u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
@@ -224,6 +221,8 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
224void wl1271_handle_tx_low_watermark(struct wl1271 *wl); 221void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
225bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb); 222bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
226void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids); 223void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids);
224unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
225 unsigned int packet_length);
227 226
228/* from main.c */ 227/* from main.c */
229void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid); 228void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wlcore/wl12xx.h
index 749a15a75d38..a9b220c43e54 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/ti/wlcore/wl12xx.h
@@ -89,8 +89,6 @@
89#define WL1271_AP_BSS_INDEX 0 89#define WL1271_AP_BSS_INDEX 0
90#define WL1271_AP_DEF_BEACON_EXP 20 90#define WL1271_AP_DEF_BEACON_EXP 20
91 91
92#define ACX_TX_DESCRIPTORS 16
93
94#define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE) 92#define WL1271_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
95 93
96enum wl1271_state { 94enum wl1271_state {
@@ -105,26 +103,6 @@ enum wl12xx_fw_type {
105 WL12XX_FW_TYPE_PLT, 103 WL12XX_FW_TYPE_PLT,
106}; 104};
107 105
108enum wl1271_partition_type {
109 PART_DOWN,
110 PART_WORK,
111 PART_DRPW,
112
113 PART_TABLE_LEN
114};
115
116struct wl1271_partition {
117 u32 size;
118 u32 start;
119};
120
121struct wl1271_partition_set {
122 struct wl1271_partition mem;
123 struct wl1271_partition reg;
124 struct wl1271_partition mem2;
125 struct wl1271_partition mem3;
126};
127
128struct wl1271; 106struct wl1271;
129 107
130enum { 108enum {
@@ -167,8 +145,21 @@ struct wl1271_stats {
167 145
168#define AP_MAX_STATIONS 8 146#define AP_MAX_STATIONS 8
169 147
148struct wl_fw_packet_counters {
149 /* Cumulative counter of released packets per AC */
150 u8 tx_released_pkts[NUM_TX_QUEUES];
151
152 /* Cumulative counter of freed packets per HLID */
153 u8 tx_lnk_free_pkts[WL12XX_MAX_LINKS];
154
155 /* Cumulative counter of released Voice memory blocks */
156 u8 tx_voice_released_blks;
157
158 u8 padding[3];
159} __packed;
160
170/* FW status registers */ 161/* FW status registers */
171struct wl12xx_fw_status { 162struct wl_fw_status {
172 __le32 intr; 163 __le32 intr;
173 u8 fw_rx_counter; 164 u8 fw_rx_counter;
174 u8 drv_rx_counter; 165 u8 drv_rx_counter;
@@ -195,16 +186,12 @@ struct wl12xx_fw_status {
195 /* Size (in Memory Blocks) of TX pool */ 186 /* Size (in Memory Blocks) of TX pool */
196 __le32 tx_total; 187 __le32 tx_total;
197 188
198 /* Cumulative counter of released packets per AC */ 189 struct wl_fw_packet_counters counters;
199 u8 tx_released_pkts[NUM_TX_QUEUES];
200 190
201 /* Cumulative counter of freed packets per HLID */
202 u8 tx_lnk_free_pkts[WL12XX_MAX_LINKS];
203
204 /* Cumulative counter of released Voice memory blocks */
205 u8 tx_voice_released_blks;
206 u8 padding_1[3];
207 __le32 log_start_addr; 191 __le32 log_start_addr;
192
193 /* Private status to be used by the lower drivers */
194 u8 priv[0];
208} __packed; 195} __packed;
209 196
210struct wl1271_rx_mem_pool_addr { 197struct wl1271_rx_mem_pool_addr {
@@ -292,214 +279,6 @@ struct wl1271_link {
292 u8 ba_bitmap; 279 u8 ba_bitmap;
293}; 280};
294 281
295struct wl1271 {
296 struct ieee80211_hw *hw;
297 bool mac80211_registered;
298
299 struct device *dev;
300
301 void *if_priv;
302
303 struct wl1271_if_operations *if_ops;
304
305 void (*set_power)(bool enable);
306 int irq;
307 int ref_clock;
308
309 spinlock_t wl_lock;
310
311 enum wl1271_state state;
312 enum wl12xx_fw_type fw_type;
313 bool plt;
314 u8 last_vif_count;
315 struct mutex mutex;
316
317 unsigned long flags;
318
319 struct wl1271_partition_set part;
320
321 struct wl1271_chip chip;
322
323 int cmd_box_addr;
324 int event_box_addr;
325
326 u8 *fw;
327 size_t fw_len;
328 void *nvs;
329 size_t nvs_len;
330
331 s8 hw_pg_ver;
332
333 /* address read from the fuse ROM */
334 u32 fuse_oui_addr;
335 u32 fuse_nic_addr;
336
337 /* we have up to 2 MAC addresses */
338 struct mac_address addresses[2];
339 int channel;
340 u8 system_hlid;
341
342 unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
343 unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
344 unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
345 unsigned long rate_policies_map[
346 BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)];
347
348 struct list_head wlvif_list;
349
350 u8 sta_count;
351 u8 ap_count;
352
353 struct wl1271_acx_mem_map *target_mem_map;
354
355 /* Accounting for allocated / available TX blocks on HW */
356 u32 tx_blocks_freed;
357 u32 tx_blocks_available;
358 u32 tx_allocated_blocks;
359 u32 tx_results_count;
360
361 /* amount of spare TX blocks to use */
362 u32 tx_spare_blocks;
363
364 /* Accounting for allocated / available Tx packets in HW */
365 u32 tx_pkts_freed[NUM_TX_QUEUES];
366 u32 tx_allocated_pkts[NUM_TX_QUEUES];
367
368 /* Transmitted TX packets counter for chipset interface */
369 u32 tx_packets_count;
370
371 /* Time-offset between host and chipset clocks */
372 s64 time_offset;
373
374 /* Frames scheduled for transmission, not handled yet */
375 int tx_queue_count[NUM_TX_QUEUES];
376 long stopped_queues_map;
377
378 /* Frames received, not handled yet by mac80211 */
379 struct sk_buff_head deferred_rx_queue;
380
381 /* Frames sent, not returned yet to mac80211 */
382 struct sk_buff_head deferred_tx_queue;
383
384 struct work_struct tx_work;
385 struct workqueue_struct *freezable_wq;
386
387 /* Pending TX frames */
388 unsigned long tx_frames_map[BITS_TO_LONGS(ACX_TX_DESCRIPTORS)];
389 struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
390 int tx_frames_cnt;
391
392 /* FW Rx counter */
393 u32 rx_counter;
394
395 /* Rx memory pool address */
396 struct wl1271_rx_mem_pool_addr rx_mem_pool_addr;
397
398 /* Intermediate buffer, used for packet aggregation */
399 u8 *aggr_buf;
400
401 /* Reusable dummy packet template */
402 struct sk_buff *dummy_packet;
403
404 /* Network stack work */
405 struct work_struct netstack_work;
406
407 /* FW log buffer */
408 u8 *fwlog;
409
410 /* Number of valid bytes in the FW log buffer */
411 ssize_t fwlog_size;
412
413 /* Sysfs FW log entry readers wait queue */
414 wait_queue_head_t fwlog_waitq;
415
416 /* Hardware recovery work */
417 struct work_struct recovery_work;
418
419 /* The mbox event mask */
420 u32 event_mask;
421
422 /* Mailbox pointers */
423 u32 mbox_ptr[2];
424
425 /* Are we currently scanning */
426 struct ieee80211_vif *scan_vif;
427 struct wl1271_scan scan;
428 struct delayed_work scan_complete_work;
429
430 bool sched_scanning;
431
432 /* The current band */
433 enum ieee80211_band band;
434
435 struct completion *elp_compl;
436 struct delayed_work elp_work;
437
438 /* in dBm */
439 int power_level;
440
441 struct wl1271_stats stats;
442
443 __le32 buffer_32;
444 u32 buffer_cmd;
445 u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
446
447 struct wl12xx_fw_status *fw_status;
448 struct wl1271_tx_hw_res_if *tx_res_if;
449
450 /* Current chipset configuration */
451 struct conf_drv_settings conf;
452
453 bool sg_enabled;
454
455 bool enable_11a;
456
457 /* Most recently reported noise in dBm */
458 s8 noise;
459
460 /* bands supported by this instance of wl12xx */
461 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
462
463 int tcxo_clock;
464
465 /*
466 * wowlan trigger was configured during suspend.
467 * (currently, only "ANY" trigger is supported)
468 */
469 bool wow_enabled;
470 bool irq_wake_enabled;
471
472 /*
473 * AP-mode - links indexed by HLID. The global and broadcast links
474 * are always active.
475 */
476 struct wl1271_link links[WL12XX_MAX_LINKS];
477
478 /* AP-mode - a bitmap of links currently in PS mode according to FW */
479 u32 ap_fw_ps_map;
480
481 /* AP-mode - a bitmap of links currently in PS mode in mac80211 */
482 unsigned long ap_ps_map;
483
484 /* Quirks of specific hardware revisions */
485 unsigned int quirks;
486
487 /* Platform limitations */
488 unsigned int platform_quirks;
489
490 /* number of currently active RX BA sessions */
491 int ba_rx_session_count;
492
493 /* AP-mode - number of currently connected stations */
494 int active_sta_count;
495
496 /* last wlvif we transmitted from */
497 struct wl12xx_vif *last_wlvif;
498
499 /* work to fire when Tx is stuck */
500 struct delayed_work tx_watchdog_work;
501};
502
503struct wl1271_station { 282struct wl1271_station {
504 u8 hlid; 283 u8 hlid;
505}; 284};
@@ -605,6 +384,9 @@ struct wl12xx_vif {
605 struct work_struct rx_streaming_disable_work; 384 struct work_struct rx_streaming_disable_work;
606 struct timer_list rx_streaming_timer; 385 struct timer_list rx_streaming_timer;
607 386
387 /* does the current role use GEM for encryption (AP or STA) */
388 bool is_gem;
389
608 /* 390 /*
609 * This struct must be last! 391 * This struct must be last!
610 * data that has to be saved acrossed reconfigs (e.g. recovery) 392 * data that has to be saved acrossed reconfigs (e.g. recovery)
@@ -679,17 +461,6 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
679#define HW_BG_RATES_MASK 0xffff 461#define HW_BG_RATES_MASK 0xffff
680#define HW_HT_RATES_OFFSET 16 462#define HW_HT_RATES_OFFSET 16
681 463
682/* Quirks */
683
684/* Each RX/TX transaction requires an end-of-transaction transfer */
685#define WL12XX_QUIRK_END_OF_TRANSACTION BIT(0)
686
687/* wl127x and SPI don't support SDIO block size alignment */
688#define WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT BIT(2)
689
690/* Older firmwares did not implement the FW logger over bus feature */
691#define WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED BIT(4)
692
693#define WL12XX_HW_BLOCK_SIZE 256 464#define WL12XX_HW_BLOCK_SIZE 256
694 465
695#endif 466#endif
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/ti/wlcore/wl12xx_80211.h
index 22b0bc98d7b5..22b0bc98d7b5 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/ti/wlcore/wl12xx_80211.h
diff --git a/drivers/net/wireless/wl12xx/wl12xx_platform_data.c b/drivers/net/wireless/ti/wlcore/wl12xx_platform_data.c
index 998e95895f9d..998e95895f9d 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_platform_data.c
+++ b/drivers/net/wireless/ti/wlcore/wl12xx_platform_data.c
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
new file mode 100644
index 000000000000..39f9fadfebd9
--- /dev/null
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -0,0 +1,448 @@
1/*
2 * This file is part of wlcore
3 *
4 * Copyright (C) 2011 Texas Instruments Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#ifndef __WLCORE_H__
23#define __WLCORE_H__
24
25#include <linux/platform_device.h>
26
27#include "wl12xx.h"
28#include "event.h"
29
30/* The maximum number of Tx descriptors in all chip families */
31#define WLCORE_MAX_TX_DESCRIPTORS 32
32
33/* forward declaration */
34struct wl1271_tx_hw_descr;
35enum wl_rx_buf_align;
36
37struct wlcore_ops {
38 int (*identify_chip)(struct wl1271 *wl);
39 int (*identify_fw)(struct wl1271 *wl);
40 int (*boot)(struct wl1271 *wl);
41 void (*trigger_cmd)(struct wl1271 *wl, int cmd_box_addr,
42 void *buf, size_t len);
43 void (*ack_event)(struct wl1271 *wl);
44 u32 (*calc_tx_blocks)(struct wl1271 *wl, u32 len, u32 spare_blks);
45 void (*set_tx_desc_blocks)(struct wl1271 *wl,
46 struct wl1271_tx_hw_descr *desc,
47 u32 blks, u32 spare_blks);
48 void (*set_tx_desc_data_len)(struct wl1271 *wl,
49 struct wl1271_tx_hw_descr *desc,
50 struct sk_buff *skb);
51 enum wl_rx_buf_align (*get_rx_buf_align)(struct wl1271 *wl,
52 u32 rx_desc);
53 void (*prepare_read)(struct wl1271 *wl, u32 rx_desc, u32 len);
54 u32 (*get_rx_packet_len)(struct wl1271 *wl, void *rx_data,
55 u32 data_len);
56 void (*tx_delayed_compl)(struct wl1271 *wl);
57 void (*tx_immediate_compl)(struct wl1271 *wl);
58 int (*hw_init)(struct wl1271 *wl);
59 int (*init_vif)(struct wl1271 *wl, struct wl12xx_vif *wlvif);
60 u32 (*sta_get_ap_rate_mask)(struct wl1271 *wl,
61 struct wl12xx_vif *wlvif);
62 s8 (*get_pg_ver)(struct wl1271 *wl);
63 void (*get_mac)(struct wl1271 *wl);
64};
65
66enum wlcore_partitions {
67 PART_DOWN,
68 PART_WORK,
69 PART_BOOT,
70 PART_DRPW,
71 PART_TOP_PRCM_ELP_SOC,
72 PART_PHY_INIT,
73
74 PART_TABLE_LEN,
75};
76
77struct wlcore_partition {
78 u32 size;
79 u32 start;
80};
81
82struct wlcore_partition_set {
83 struct wlcore_partition mem;
84 struct wlcore_partition reg;
85 struct wlcore_partition mem2;
86 struct wlcore_partition mem3;
87};
88
89enum wlcore_registers {
90 /* register addresses, used with partition translation */
91 REG_ECPU_CONTROL,
92 REG_INTERRUPT_NO_CLEAR,
93 REG_INTERRUPT_ACK,
94 REG_COMMAND_MAILBOX_PTR,
95 REG_EVENT_MAILBOX_PTR,
96 REG_INTERRUPT_TRIG,
97 REG_INTERRUPT_MASK,
98 REG_PC_ON_RECOVERY,
99 REG_CHIP_ID_B,
100 REG_CMD_MBOX_ADDRESS,
101
102 /* data access memory addresses, used with partition translation */
103 REG_SLV_MEM_DATA,
104 REG_SLV_REG_DATA,
105
106 /* raw data access memory addresses */
107 REG_RAW_FW_STATUS_ADDR,
108
109 REG_TABLE_LEN,
110};
111
112struct wl1271 {
113 struct ieee80211_hw *hw;
114 bool mac80211_registered;
115
116 struct device *dev;
117
118 void *if_priv;
119
120 struct wl1271_if_operations *if_ops;
121
122 void (*set_power)(bool enable);
123 int irq;
124 int ref_clock;
125
126 spinlock_t wl_lock;
127
128 enum wl1271_state state;
129 enum wl12xx_fw_type fw_type;
130 bool plt;
131 u8 last_vif_count;
132 struct mutex mutex;
133
134 unsigned long flags;
135
136 struct wlcore_partition_set curr_part;
137
138 struct wl1271_chip chip;
139
140 int cmd_box_addr;
141
142 u8 *fw;
143 size_t fw_len;
144 void *nvs;
145 size_t nvs_len;
146
147 s8 hw_pg_ver;
148
149 /* address read from the fuse ROM */
150 u32 fuse_oui_addr;
151 u32 fuse_nic_addr;
152
153 /* we have up to 2 MAC addresses */
154 struct mac_address addresses[2];
155 int channel;
156 u8 system_hlid;
157
158 unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
159 unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
160 unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
161 unsigned long rate_policies_map[
162 BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)];
163
164 struct list_head wlvif_list;
165
166 u8 sta_count;
167 u8 ap_count;
168
169 struct wl1271_acx_mem_map *target_mem_map;
170
171 /* Accounting for allocated / available TX blocks on HW */
172 u32 tx_blocks_freed;
173 u32 tx_blocks_available;
174 u32 tx_allocated_blocks;
175 u32 tx_results_count;
176
177 /* Accounting for allocated / available Tx packets in HW */
178 u32 tx_pkts_freed[NUM_TX_QUEUES];
179 u32 tx_allocated_pkts[NUM_TX_QUEUES];
180
181 /* Transmitted TX packets counter for chipset interface */
182 u32 tx_packets_count;
183
184 /* Time-offset between host and chipset clocks */
185 s64 time_offset;
186
187 /* Frames scheduled for transmission, not handled yet */
188 int tx_queue_count[NUM_TX_QUEUES];
189 long stopped_queues_map;
190
191 /* Frames received, not handled yet by mac80211 */
192 struct sk_buff_head deferred_rx_queue;
193
194 /* Frames sent, not returned yet to mac80211 */
195 struct sk_buff_head deferred_tx_queue;
196
197 struct work_struct tx_work;
198 struct workqueue_struct *freezable_wq;
199
200 /* Pending TX frames */
201 unsigned long tx_frames_map[BITS_TO_LONGS(WLCORE_MAX_TX_DESCRIPTORS)];
202 struct sk_buff *tx_frames[WLCORE_MAX_TX_DESCRIPTORS];
203 int tx_frames_cnt;
204
205 /* FW Rx counter */
206 u32 rx_counter;
207
208 /* Rx memory pool address */
209 struct wl1271_rx_mem_pool_addr rx_mem_pool_addr;
210
211 /* Intermediate buffer, used for packet aggregation */
212 u8 *aggr_buf;
213
214 /* Reusable dummy packet template */
215 struct sk_buff *dummy_packet;
216
217 /* Network stack work */
218 struct work_struct netstack_work;
219
220 /* FW log buffer */
221 u8 *fwlog;
222
223 /* Number of valid bytes in the FW log buffer */
224 ssize_t fwlog_size;
225
226 /* Sysfs FW log entry readers wait queue */
227 wait_queue_head_t fwlog_waitq;
228
229 /* Hardware recovery work */
230 struct work_struct recovery_work;
231
232 /* Pointer that holds DMA-friendly block for the mailbox */
233 struct event_mailbox *mbox;
234
235 /* The mbox event mask */
236 u32 event_mask;
237
238 /* Mailbox pointers */
239 u32 mbox_ptr[2];
240
241 /* Are we currently scanning */
242 struct ieee80211_vif *scan_vif;
243 struct wl1271_scan scan;
244 struct delayed_work scan_complete_work;
245
246 bool sched_scanning;
247
248 /* The current band */
249 enum ieee80211_band band;
250
251 struct completion *elp_compl;
252 struct delayed_work elp_work;
253
254 /* in dBm */
255 int power_level;
256
257 struct wl1271_stats stats;
258
259 __le32 buffer_32;
260 u32 buffer_cmd;
261 u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
262
263 struct wl_fw_status *fw_status;
264 struct wl1271_tx_hw_res_if *tx_res_if;
265
266 /* Current chipset configuration */
267 struct wlcore_conf conf;
268
269 bool sg_enabled;
270
271 bool enable_11a;
272
273 /* Most recently reported noise in dBm */
274 s8 noise;
275
276 /* bands supported by this instance of wl12xx */
277 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
278
279 int tcxo_clock;
280
281 /*
282 * wowlan trigger was configured during suspend.
283 * (currently, only "ANY" trigger is supported)
284 */
285 bool wow_enabled;
286 bool irq_wake_enabled;
287
288 /*
289 * AP-mode - links indexed by HLID. The global and broadcast links
290 * are always active.
291 */
292 struct wl1271_link links[WL12XX_MAX_LINKS];
293
294 /* AP-mode - a bitmap of links currently in PS mode according to FW */
295 u32 ap_fw_ps_map;
296
297 /* AP-mode - a bitmap of links currently in PS mode in mac80211 */
298 unsigned long ap_ps_map;
299
300 /* Quirks of specific hardware revisions */
301 unsigned int quirks;
302
303 /* Platform limitations */
304 unsigned int platform_quirks;
305
306 /* number of currently active RX BA sessions */
307 int ba_rx_session_count;
308
309 /* AP-mode - number of currently connected stations */
310 int active_sta_count;
311
312 /* last wlvif we transmitted from */
313 struct wl12xx_vif *last_wlvif;
314
315 /* work to fire when Tx is stuck */
316 struct delayed_work tx_watchdog_work;
317
318 struct wlcore_ops *ops;
319 /* pointer to the lower driver partition table */
320 const struct wlcore_partition_set *ptable;
321 /* pointer to the lower driver register table */
322 const int *rtable;
323 /* name of the firmwares to load - for PLT, single role, multi-role */
324 const char *plt_fw_name;
325 const char *sr_fw_name;
326 const char *mr_fw_name;
327
328 /* per-chip-family private structure */
329 void *priv;
330
331 /* number of TX descriptors the HW supports. */
332 u32 num_tx_desc;
333
334 /* spare Tx blocks for normal/GEM operating modes */
335 u32 normal_tx_spare;
336 u32 gem_tx_spare;
337
338 /* translate HW Tx rates to standard rate-indices */
339 const u8 **band_rate_to_idx;
340
341 /* size of table for HW rates that can be received from chip */
342 u8 hw_tx_rate_tbl_size;
343
344 /* this HW rate and below are considered HT rates for this chip */
345 u8 hw_min_ht_rate;
346
347 /* HW HT (11n) capabilities */
348 struct ieee80211_sta_ht_cap ht_cap;
349
350 /* size of the private FW status data */
351 size_t fw_status_priv_len;
352};
353
354int __devinit wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
355int __devexit wlcore_remove(struct platform_device *pdev);
356struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size);
357int wlcore_free_hw(struct wl1271 *wl);
358
359/* Firmware image load chunk size */
360#define CHUNK_SIZE 16384
361
362/* Quirks */
363
364/* Each RX/TX transaction requires an end-of-transaction transfer */
365#define WLCORE_QUIRK_END_OF_TRANSACTION BIT(0)
366
367/* wl127x and SPI don't support SDIO block size alignment */
368#define WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN BIT(2)
369
370/* means aggregated Rx packets are aligned to a SDIO block */
371#define WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN BIT(3)
372
373/* Older firmwares did not implement the FW logger over bus feature */
374#define WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED BIT(4)
375
376/* Older firmwares use an old NVS format */
377#define WLCORE_QUIRK_LEGACY_NVS BIT(5)
378
379/* Some firmwares may not support ELP */
380#define WLCORE_QUIRK_NO_ELP BIT(6)
381
382/* TODO: move to the lower drivers when all usages are abstracted */
383#define CHIP_ID_1271_PG10 (0x4030101)
384#define CHIP_ID_1271_PG20 (0x4030111)
385#define CHIP_ID_1283_PG10 (0x05030101)
386#define CHIP_ID_1283_PG20 (0x05030111)
387
388/* TODO: move all these common registers and values elsewhere */
389#define HW_ACCESS_ELP_CTRL_REG 0x1FFFC
390
391/* ELP register commands */
392#define ELPCTRL_WAKE_UP 0x1
393#define ELPCTRL_WAKE_UP_WLAN_READY 0x5
394#define ELPCTRL_SLEEP 0x0
395/* ELP WLAN_READY bit */
396#define ELPCTRL_WLAN_READY 0x2
397
398/*************************************************************************
399
400 Interrupt Trigger Register (Host -> WiLink)
401
402**************************************************************************/
403
404/* Hardware to Embedded CPU Interrupts - first 32-bit register set */
405
406/*
407 * The host sets this bit to inform the Wlan
408 * FW that a TX packet is in the XFER
409 * Buffer #0.
410 */
411#define INTR_TRIG_TX_PROC0 BIT(2)
412
413/*
414 * The host sets this bit to inform the FW
415 * that it read a packet from RX XFER
416 * Buffer #0.
417 */
418#define INTR_TRIG_RX_PROC0 BIT(3)
419
420#define INTR_TRIG_DEBUG_ACK BIT(4)
421
422#define INTR_TRIG_STATE_CHANGED BIT(5)
423
424/* Hardware to Embedded CPU Interrupts - second 32-bit register set */
425
426/*
427 * The host sets this bit to inform the FW
428 * that it read a packet from RX XFER
429 * Buffer #1.
430 */
431#define INTR_TRIG_RX_PROC1 BIT(17)
432
433/*
434 * The host sets this bit to inform the Wlan
435 * hardware that a TX packet is in the XFER
436 * Buffer #1.
437 */
438#define INTR_TRIG_TX_PROC1 BIT(18)
439
440#define ACX_SLV_SOFT_RESET_BIT BIT(1)
441#define SOFT_RESET_MAX_TIME 1000000
442#define SOFT_RESET_STALL_TIME 1000
443
444#define ECPU_CONTROL_HALT 0x00000101
445
446#define WELP_ARM_COMMAND_VAL 0x4
447
448#endif /* __WLCORE_H__ */
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
deleted file mode 100644
index af08c8609c63..000000000000
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ /dev/null
@@ -1,48 +0,0 @@
1menuconfig WL12XX_MENU
2 tristate "TI wl12xx driver support"
3 depends on MAC80211 && EXPERIMENTAL
4 ---help---
5 This will enable TI wl12xx driver support for the following chips:
6 wl1271, wl1273, wl1281 and wl1283.
7 The drivers make use of the mac80211 stack.
8
9config WL12XX
10 tristate "TI wl12xx support"
11 depends on WL12XX_MENU && GENERIC_HARDIRQS
12 depends on INET
13 select FW_LOADER
14 ---help---
15 This module adds support for wireless adapters based on TI wl1271 and
16 TI wl1273 chipsets. This module does *not* include support for wl1251.
17 For wl1251 support, use the separate homonymous driver instead.
18
19 If you choose to build a module, it will be called wl12xx. Say N if
20 unsure.
21
22config WL12XX_SPI
23 tristate "TI wl12xx SPI support"
24 depends on WL12XX && SPI_MASTER
25 select CRC7
26 ---help---
27 This module adds support for the SPI interface of adapters using
28 TI wl12xx chipsets. Select this if your platform is using
29 the SPI bus.
30
31 If you choose to build a module, it'll be called wl12xx_spi.
32 Say N if unsure.
33
34config WL12XX_SDIO
35 tristate "TI wl12xx SDIO support"
36 depends on WL12XX && MMC
37 ---help---
38 This module adds support for the SDIO interface of adapters using
39 TI wl12xx chipsets. Select this if your platform is using
40 the SDIO bus.
41
42 If you choose to build a module, it'll be called wl12xx_sdio.
43 Say N if unsure.
44
45config WL12XX_PLATFORM_DATA
46 bool
47 depends on WL12XX_SDIO != n || WL1251_SDIO != n
48 default y
diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile
deleted file mode 100644
index 98f289c907a9..000000000000
--- a/drivers/net/wireless/wl12xx/Makefile
+++ /dev/null
@@ -1,15 +0,0 @@
1wl12xx-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
2 boot.o init.o debugfs.o scan.o
3
4wl12xx_spi-objs = spi.o
5wl12xx_sdio-objs = sdio.o
6
7wl12xx-$(CONFIG_NL80211_TESTMODE) += testmode.o
8obj-$(CONFIG_WL12XX) += wl12xx.o
9obj-$(CONFIG_WL12XX_SPI) += wl12xx_spi.o
10obj-$(CONFIG_WL12XX_SDIO) += wl12xx_sdio.o
11
12# small builtin driver bit
13obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx_platform_data.o
14
15ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c
deleted file mode 100644
index 954101d03f06..000000000000
--- a/drivers/net/wireless/wl12xx/boot.c
+++ /dev/null
@@ -1,786 +0,0 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2008-2010 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/slab.h>
25#include <linux/wl12xx.h>
26#include <linux/export.h>
27
28#include "debug.h"
29#include "acx.h"
30#include "reg.h"
31#include "boot.h"
32#include "io.h"
33#include "event.h"
34#include "rx.h"
35
36static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
37{
38 u32 cpu_ctrl;
39
40 /* 10.5.0 run the firmware (I) */
41 cpu_ctrl = wl1271_read32(wl, ACX_REG_ECPU_CONTROL);
42
43 /* 10.5.1 run the firmware (II) */
44 cpu_ctrl |= flag;
45 wl1271_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
46}
47
48static unsigned int wl12xx_get_fw_ver_quirks(struct wl1271 *wl)
49{
50 unsigned int quirks = 0;
51 unsigned int *fw_ver = wl->chip.fw_ver;
52
53 /* Only new station firmwares support routing fw logs to the host */
54 if ((fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_STA) &&
55 (fw_ver[FW_VER_MINOR] < FW_VER_MINOR_FWLOG_STA_MIN))
56 quirks |= WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED;
57
58 /* This feature is not yet supported for AP mode */
59 if (fw_ver[FW_VER_IF_TYPE] == FW_VER_IF_TYPE_AP)
60 quirks |= WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED;
61
62 return quirks;
63}
64
65static void wl1271_parse_fw_ver(struct wl1271 *wl)
66{
67 int ret;
68
69 ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u",
70 &wl->chip.fw_ver[0], &wl->chip.fw_ver[1],
71 &wl->chip.fw_ver[2], &wl->chip.fw_ver[3],
72 &wl->chip.fw_ver[4]);
73
74 if (ret != 5) {
75 wl1271_warning("fw version incorrect value");
76 memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver));
77 return;
78 }
79
80 /* Check if any quirks are needed with older fw versions */
81 wl->quirks |= wl12xx_get_fw_ver_quirks(wl);
82}
83
84static void wl1271_boot_fw_version(struct wl1271 *wl)
85{
86 struct wl1271_static_data static_data;
87
88 wl1271_read(wl, wl->cmd_box_addr, &static_data, sizeof(static_data),
89 false);
90
91 strncpy(wl->chip.fw_ver_str, static_data.fw_version,
92 sizeof(wl->chip.fw_ver_str));
93
94 /* make sure the string is NULL-terminated */
95 wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0';
96
97 wl1271_parse_fw_ver(wl);
98}
99
100static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
101 size_t fw_data_len, u32 dest)
102{
103 struct wl1271_partition_set partition;
104 int addr, chunk_num, partition_limit;
105 u8 *p, *chunk;
106
107 /* whal_FwCtrl_LoadFwImageSm() */
108
109 wl1271_debug(DEBUG_BOOT, "starting firmware upload");
110
111 wl1271_debug(DEBUG_BOOT, "fw_data_len %zd chunk_size %d",
112 fw_data_len, CHUNK_SIZE);
113
114 if ((fw_data_len % 4) != 0) {
115 wl1271_error("firmware length not multiple of four");
116 return -EIO;
117 }
118
119 chunk = kmalloc(CHUNK_SIZE, GFP_KERNEL);
120 if (!chunk) {
121 wl1271_error("allocation for firmware upload chunk failed");
122 return -ENOMEM;
123 }
124
125 memcpy(&partition, &wl12xx_part_table[PART_DOWN], sizeof(partition));
126 partition.mem.start = dest;
127 wl1271_set_partition(wl, &partition);
128
129 /* 10.1 set partition limit and chunk num */
130 chunk_num = 0;
131 partition_limit = wl12xx_part_table[PART_DOWN].mem.size;
132
133 while (chunk_num < fw_data_len / CHUNK_SIZE) {
134 /* 10.2 update partition, if needed */
135 addr = dest + (chunk_num + 2) * CHUNK_SIZE;
136 if (addr > partition_limit) {
137 addr = dest + chunk_num * CHUNK_SIZE;
138 partition_limit = chunk_num * CHUNK_SIZE +
139 wl12xx_part_table[PART_DOWN].mem.size;
140 partition.mem.start = addr;
141 wl1271_set_partition(wl, &partition);
142 }
143
144 /* 10.3 upload the chunk */
145 addr = dest + chunk_num * CHUNK_SIZE;
146 p = buf + chunk_num * CHUNK_SIZE;
147 memcpy(chunk, p, CHUNK_SIZE);
148 wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x",
149 p, addr);
150 wl1271_write(wl, addr, chunk, CHUNK_SIZE, false);
151
152 chunk_num++;
153 }
154
155 /* 10.4 upload the last chunk */
156 addr = dest + chunk_num * CHUNK_SIZE;
157 p = buf + chunk_num * CHUNK_SIZE;
158 memcpy(chunk, p, fw_data_len % CHUNK_SIZE);
159 wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x",
160 fw_data_len % CHUNK_SIZE, p, addr);
161 wl1271_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false);
162
163 kfree(chunk);
164 return 0;
165}
166
167static int wl1271_boot_upload_firmware(struct wl1271 *wl)
168{
169 u32 chunks, addr, len;
170 int ret = 0;
171 u8 *fw;
172
173 fw = wl->fw;
174 chunks = be32_to_cpup((__be32 *) fw);
175 fw += sizeof(u32);
176
177 wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks);
178
179 while (chunks--) {
180 addr = be32_to_cpup((__be32 *) fw);
181 fw += sizeof(u32);
182 len = be32_to_cpup((__be32 *) fw);
183 fw += sizeof(u32);
184
185 if (len > 300000) {
186 wl1271_info("firmware chunk too long: %u", len);
187 return -EINVAL;
188 }
189 wl1271_debug(DEBUG_BOOT, "chunk %d addr 0x%x len %u",
190 chunks, addr, len);
191 ret = wl1271_boot_upload_firmware_chunk(wl, fw, len, addr);
192 if (ret != 0)
193 break;
194 fw += len;
195 }
196
197 return ret;
198}
199
200static int wl1271_boot_upload_nvs(struct wl1271 *wl)
201{
202 size_t nvs_len, burst_len;
203 int i;
204 u32 dest_addr, val;
205 u8 *nvs_ptr, *nvs_aligned;
206
207 if (wl->nvs == NULL)
208 return -ENODEV;
209
210 if (wl->chip.id == CHIP_ID_1283_PG20) {
211 struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs;
212
213 if (wl->nvs_len == sizeof(struct wl128x_nvs_file)) {
214 if (nvs->general_params.dual_mode_select)
215 wl->enable_11a = true;
216 } else {
217 wl1271_error("nvs size is not as expected: %zu != %zu",
218 wl->nvs_len,
219 sizeof(struct wl128x_nvs_file));
220 kfree(wl->nvs);
221 wl->nvs = NULL;
222 wl->nvs_len = 0;
223 return -EILSEQ;
224 }
225
226 /* only the first part of the NVS needs to be uploaded */
227 nvs_len = sizeof(nvs->nvs);
228 nvs_ptr = (u8 *)nvs->nvs;
229
230 } else {
231 struct wl1271_nvs_file *nvs =
232 (struct wl1271_nvs_file *)wl->nvs;
233 /*
234 * FIXME: the LEGACY NVS image support (NVS's missing the 5GHz
235 * band configurations) can be removed when those NVS files stop
236 * floating around.
237 */
238 if (wl->nvs_len == sizeof(struct wl1271_nvs_file) ||
239 wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) {
240 if (nvs->general_params.dual_mode_select)
241 wl->enable_11a = true;
242 }
243
244 if (wl->nvs_len != sizeof(struct wl1271_nvs_file) &&
245 (wl->nvs_len != WL1271_INI_LEGACY_NVS_FILE_SIZE ||
246 wl->enable_11a)) {
247 wl1271_error("nvs size is not as expected: %zu != %zu",
248 wl->nvs_len, sizeof(struct wl1271_nvs_file));
249 kfree(wl->nvs);
250 wl->nvs = NULL;
251 wl->nvs_len = 0;
252 return -EILSEQ;
253 }
254
255 /* only the first part of the NVS needs to be uploaded */
256 nvs_len = sizeof(nvs->nvs);
257 nvs_ptr = (u8 *) nvs->nvs;
258 }
259
260 /* update current MAC address to NVS */
261 nvs_ptr[11] = wl->addresses[0].addr[0];
262 nvs_ptr[10] = wl->addresses[0].addr[1];
263 nvs_ptr[6] = wl->addresses[0].addr[2];
264 nvs_ptr[5] = wl->addresses[0].addr[3];
265 nvs_ptr[4] = wl->addresses[0].addr[4];
266 nvs_ptr[3] = wl->addresses[0].addr[5];
267
268 /*
269 * Layout before the actual NVS tables:
270 * 1 byte : burst length.
271 * 2 bytes: destination address.
272 * n bytes: data to burst copy.
273 *
274 * This is ended by a 0 length, then the NVS tables.
275 */
276
277 /* FIXME: Do we need to check here whether the LSB is 1? */
278 while (nvs_ptr[0]) {
279 burst_len = nvs_ptr[0];
280 dest_addr = (nvs_ptr[1] & 0xfe) | ((u32)(nvs_ptr[2] << 8));
281
282 /*
283 * Due to our new wl1271_translate_reg_addr function,
284 * we need to add the REGISTER_BASE to the destination
285 */
286 dest_addr += REGISTERS_BASE;
287
288 /* We move our pointer to the data */
289 nvs_ptr += 3;
290
291 for (i = 0; i < burst_len; i++) {
292 if (nvs_ptr + 3 >= (u8 *) wl->nvs + nvs_len)
293 goto out_badnvs;
294
295 val = (nvs_ptr[0] | (nvs_ptr[1] << 8)
296 | (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24));
297
298 wl1271_debug(DEBUG_BOOT,
299 "nvs burst write 0x%x: 0x%x",
300 dest_addr, val);
301 wl1271_write32(wl, dest_addr, val);
302
303 nvs_ptr += 4;
304 dest_addr += 4;
305 }
306
307 if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
308 goto out_badnvs;
309 }
310
311 /*
312 * We've reached the first zero length, the first NVS table
313 * is located at an aligned offset which is at least 7 bytes further.
314 * NOTE: The wl->nvs->nvs element must be first, in order to
315 * simplify the casting, we assume it is at the beginning of
316 * the wl->nvs structure.
317 */
318 nvs_ptr = (u8 *)wl->nvs +
319 ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4);
320
321 if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
322 goto out_badnvs;
323
324 nvs_len -= nvs_ptr - (u8 *)wl->nvs;
325
326 /* Now we must set the partition correctly */
327 wl1271_set_partition(wl, &wl12xx_part_table[PART_WORK]);
328
329 /* Copy the NVS tables to a new block to ensure alignment */
330 nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL);
331 if (!nvs_aligned)
332 return -ENOMEM;
333
334 /* And finally we upload the NVS tables */
335 wl1271_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len, false);
336
337 kfree(nvs_aligned);
338 return 0;
339
340out_badnvs:
341 wl1271_error("nvs data is malformed");
342 return -EILSEQ;
343}
344
345static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
346{
347 wl1271_enable_interrupts(wl);
348 wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
349 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
350 wl1271_write32(wl, HI_CFG, HI_CFG_DEF_VAL);
351}
352
353static int wl1271_boot_soft_reset(struct wl1271 *wl)
354{
355 unsigned long timeout;
356 u32 boot_data;
357
358 /* perform soft reset */
359 wl1271_write32(wl, ACX_REG_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT);
360
361 /* SOFT_RESET is self clearing */
362 timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME);
363 while (1) {
364 boot_data = wl1271_read32(wl, ACX_REG_SLV_SOFT_RESET);
365 wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data);
366 if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0)
367 break;
368
369 if (time_after(jiffies, timeout)) {
370 /* 1.2 check pWhalBus->uSelfClearTime if the
371 * timeout was reached */
372 wl1271_error("soft reset timeout");
373 return -1;
374 }
375
376 udelay(SOFT_RESET_STALL_TIME);
377 }
378
379 /* disable Rx/Tx */
380 wl1271_write32(wl, ENABLE, 0x0);
381
382 /* disable auto calibration on start*/
383 wl1271_write32(wl, SPARE_A2, 0xffff);
384
385 return 0;
386}
387
388static int wl1271_boot_run_firmware(struct wl1271 *wl)
389{
390 int loop, ret;
391 u32 chip_id, intr;
392
393 wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
394
395 chip_id = wl1271_read32(wl, CHIP_ID_B);
396
397 wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id);
398
399 if (chip_id != wl->chip.id) {
400 wl1271_error("chip id doesn't match after firmware boot");
401 return -EIO;
402 }
403
404 /* wait for init to complete */
405 loop = 0;
406 while (loop++ < INIT_LOOP) {
407 udelay(INIT_LOOP_DELAY);
408 intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
409
410 if (intr == 0xffffffff) {
411 wl1271_error("error reading hardware complete "
412 "init indication");
413 return -EIO;
414 }
415 /* check that ACX_INTR_INIT_COMPLETE is enabled */
416 else if (intr & WL1271_ACX_INTR_INIT_COMPLETE) {
417 wl1271_write32(wl, ACX_REG_INTERRUPT_ACK,
418 WL1271_ACX_INTR_INIT_COMPLETE);
419 break;
420 }
421 }
422
423 if (loop > INIT_LOOP) {
424 wl1271_error("timeout waiting for the hardware to "
425 "complete initialization");
426 return -EIO;
427 }
428
429 /* get hardware config command mail box */
430 wl->cmd_box_addr = wl1271_read32(wl, REG_COMMAND_MAILBOX_PTR);
431
432 /* get hardware config event mail box */
433 wl->event_box_addr = wl1271_read32(wl, REG_EVENT_MAILBOX_PTR);
434
435 /* set the working partition to its "running" mode offset */
436 wl1271_set_partition(wl, &wl12xx_part_table[PART_WORK]);
437
438 wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x event_box_addr 0x%x",
439 wl->cmd_box_addr, wl->event_box_addr);
440
441 wl1271_boot_fw_version(wl);
442
443 /*
444 * in case of full asynchronous mode the firmware event must be
445 * ready to receive event from the command mailbox
446 */
447
448 /* unmask required mbox events */
449 wl->event_mask = BSS_LOSE_EVENT_ID |
450 SCAN_COMPLETE_EVENT_ID |
451 ROLE_STOP_COMPLETE_EVENT_ID |
452 RSSI_SNR_TRIGGER_0_EVENT_ID |
453 PSPOLL_DELIVERY_FAILURE_EVENT_ID |
454 SOFT_GEMINI_SENSE_EVENT_ID |
455 PERIODIC_SCAN_REPORT_EVENT_ID |
456 PERIODIC_SCAN_COMPLETE_EVENT_ID |
457 DUMMY_PACKET_EVENT_ID |
458 PEER_REMOVE_COMPLETE_EVENT_ID |
459 BA_SESSION_RX_CONSTRAINT_EVENT_ID |
460 REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
461 INACTIVE_STA_EVENT_ID |
462 MAX_TX_RETRY_EVENT_ID |
463 CHANNEL_SWITCH_COMPLETE_EVENT_ID;
464
465 ret = wl1271_event_unmask(wl);
466 if (ret < 0) {
467 wl1271_error("EVENT mask setting failed");
468 return ret;
469 }
470
471 wl1271_event_mbox_config(wl);
472
473 /* firmware startup completed */
474 return 0;
475}
476
477static int wl1271_boot_write_irq_polarity(struct wl1271 *wl)
478{
479 u32 polarity;
480
481 polarity = wl1271_top_reg_read(wl, OCP_REG_POLARITY);
482
483 /* We use HIGH polarity, so unset the LOW bit */
484 polarity &= ~POLARITY_LOW;
485 wl1271_top_reg_write(wl, OCP_REG_POLARITY, polarity);
486
487 return 0;
488}
489
490static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl)
491{
492 u16 spare_reg;
493
494 /* Mask bits [2] & [8:4] in the sys_clk_cfg register */
495 spare_reg = wl1271_top_reg_read(wl, WL_SPARE_REG);
496 if (spare_reg == 0xFFFF)
497 return -EFAULT;
498 spare_reg |= (BIT(3) | BIT(5) | BIT(6));
499 wl1271_top_reg_write(wl, WL_SPARE_REG, spare_reg);
500
501 /* Enable FREF_CLK_REQ & mux MCS and coex PLLs to FREF */
502 wl1271_top_reg_write(wl, SYS_CLK_CFG_REG,
503 WL_CLK_REQ_TYPE_PG2 | MCS_PLL_CLK_SEL_FREF);
504
505 /* Delay execution for 15msec, to let the HW settle */
506 mdelay(15);
507
508 return 0;
509}
510
511static bool wl128x_is_tcxo_valid(struct wl1271 *wl)
512{
513 u16 tcxo_detection;
514
515 tcxo_detection = wl1271_top_reg_read(wl, TCXO_CLK_DETECT_REG);
516 if (tcxo_detection & TCXO_DET_FAILED)
517 return false;
518
519 return true;
520}
521
522static bool wl128x_is_fref_valid(struct wl1271 *wl)
523{
524 u16 fref_detection;
525
526 fref_detection = wl1271_top_reg_read(wl, FREF_CLK_DETECT_REG);
527 if (fref_detection & FREF_CLK_DETECT_FAIL)
528 return false;
529
530 return true;
531}
532
533static int wl128x_manually_configure_mcs_pll(struct wl1271 *wl)
534{
535 wl1271_top_reg_write(wl, MCS_PLL_M_REG, MCS_PLL_M_REG_VAL);
536 wl1271_top_reg_write(wl, MCS_PLL_N_REG, MCS_PLL_N_REG_VAL);
537 wl1271_top_reg_write(wl, MCS_PLL_CONFIG_REG, MCS_PLL_CONFIG_REG_VAL);
538
539 return 0;
540}
541
542static int wl128x_configure_mcs_pll(struct wl1271 *wl, int clk)
543{
544 u16 spare_reg;
545 u16 pll_config;
546 u8 input_freq;
547
548 /* Mask bits [3:1] in the sys_clk_cfg register */
549 spare_reg = wl1271_top_reg_read(wl, WL_SPARE_REG);
550 if (spare_reg == 0xFFFF)
551 return -EFAULT;
552 spare_reg |= BIT(2);
553 wl1271_top_reg_write(wl, WL_SPARE_REG, spare_reg);
554
555 /* Handle special cases of the TCXO clock */
556 if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_8 ||
557 wl->tcxo_clock == WL12XX_TCXOCLOCK_33_6)
558 return wl128x_manually_configure_mcs_pll(wl);
559
560 /* Set the input frequency according to the selected clock source */
561 input_freq = (clk & 1) + 1;
562
563 pll_config = wl1271_top_reg_read(wl, MCS_PLL_CONFIG_REG);
564 if (pll_config == 0xFFFF)
565 return -EFAULT;
566 pll_config |= (input_freq << MCS_SEL_IN_FREQ_SHIFT);
567 pll_config |= MCS_PLL_ENABLE_HP;
568 wl1271_top_reg_write(wl, MCS_PLL_CONFIG_REG, pll_config);
569
570 return 0;
571}
572
573/*
574 * WL128x has two clocks input - TCXO and FREF.
575 * TCXO is the main clock of the device, while FREF is used to sync
576 * between the GPS and the cellular modem.
577 * In cases where TCXO is 32.736MHz or 16.368MHz, the FREF will be used
578 * as the WLAN/BT main clock.
579 */
580static int wl128x_boot_clk(struct wl1271 *wl, int *selected_clock)
581{
582 u16 sys_clk_cfg;
583
584 /* For XTAL-only modes, FREF will be used after switching from TCXO */
585 if (wl->ref_clock == WL12XX_REFCLOCK_26_XTAL ||
586 wl->ref_clock == WL12XX_REFCLOCK_38_XTAL) {
587 if (!wl128x_switch_tcxo_to_fref(wl))
588 return -EINVAL;
589 goto fref_clk;
590 }
591
592 /* Query the HW, to determine which clock source we should use */
593 sys_clk_cfg = wl1271_top_reg_read(wl, SYS_CLK_CFG_REG);
594 if (sys_clk_cfg == 0xFFFF)
595 return -EINVAL;
596 if (sys_clk_cfg & PRCM_CM_EN_MUX_WLAN_FREF)
597 goto fref_clk;
598
599 /* If TCXO is either 32.736MHz or 16.368MHz, switch to FREF */
600 if (wl->tcxo_clock == WL12XX_TCXOCLOCK_16_368 ||
601 wl->tcxo_clock == WL12XX_TCXOCLOCK_32_736) {
602 if (!wl128x_switch_tcxo_to_fref(wl))
603 return -EINVAL;
604 goto fref_clk;
605 }
606
607 /* TCXO clock is selected */
608 if (!wl128x_is_tcxo_valid(wl))
609 return -EINVAL;
610 *selected_clock = wl->tcxo_clock;
611 goto config_mcs_pll;
612
613fref_clk:
614 /* FREF clock is selected */
615 if (!wl128x_is_fref_valid(wl))
616 return -EINVAL;
617 *selected_clock = wl->ref_clock;
618
619config_mcs_pll:
620 return wl128x_configure_mcs_pll(wl, *selected_clock);
621}
622
623static int wl127x_boot_clk(struct wl1271 *wl)
624{
625 u32 pause;
626 u32 clk;
627
628 if (WL127X_PG_GET_MAJOR(wl->hw_pg_ver) < 3)
629 wl->quirks |= WL12XX_QUIRK_END_OF_TRANSACTION;
630
631 if (wl->ref_clock == CONF_REF_CLK_19_2_E ||
632 wl->ref_clock == CONF_REF_CLK_38_4_E ||
633 wl->ref_clock == CONF_REF_CLK_38_4_M_XTAL)
634 /* ref clk: 19.2/38.4/38.4-XTAL */
635 clk = 0x3;
636 else if (wl->ref_clock == CONF_REF_CLK_26_E ||
637 wl->ref_clock == CONF_REF_CLK_52_E)
638 /* ref clk: 26/52 */
639 clk = 0x5;
640 else
641 return -EINVAL;
642
643 if (wl->ref_clock != CONF_REF_CLK_19_2_E) {
644 u16 val;
645 /* Set clock type (open drain) */
646 val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE);
647 val &= FREF_CLK_TYPE_BITS;
648 wl1271_top_reg_write(wl, OCP_REG_CLK_TYPE, val);
649
650 /* Set clock pull mode (no pull) */
651 val = wl1271_top_reg_read(wl, OCP_REG_CLK_PULL);
652 val |= NO_PULL;
653 wl1271_top_reg_write(wl, OCP_REG_CLK_PULL, val);
654 } else {
655 u16 val;
656 /* Set clock polarity */
657 val = wl1271_top_reg_read(wl, OCP_REG_CLK_POLARITY);
658 val &= FREF_CLK_POLARITY_BITS;
659 val |= CLK_REQ_OUTN_SEL;
660 wl1271_top_reg_write(wl, OCP_REG_CLK_POLARITY, val);
661 }
662
663 wl1271_write32(wl, PLL_PARAMETERS, clk);
664
665 pause = wl1271_read32(wl, PLL_PARAMETERS);
666
667 wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause);
668
669 pause &= ~(WU_COUNTER_PAUSE_VAL);
670 pause |= WU_COUNTER_PAUSE_VAL;
671 wl1271_write32(wl, WU_COUNTER_PAUSE, pause);
672
673 return 0;
674}
675
676/* uploads NVS and firmware */
677int wl1271_load_firmware(struct wl1271 *wl)
678{
679 int ret = 0;
680 u32 tmp, clk;
681 int selected_clock = -1;
682
683 if (wl->chip.id == CHIP_ID_1283_PG20) {
684 ret = wl128x_boot_clk(wl, &selected_clock);
685 if (ret < 0)
686 goto out;
687 } else {
688 ret = wl127x_boot_clk(wl);
689 if (ret < 0)
690 goto out;
691 }
692
693 /* Continue the ELP wake up sequence */
694 wl1271_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
695 udelay(500);
696
697 wl1271_set_partition(wl, &wl12xx_part_table[PART_DRPW]);
698
699 /* Read-modify-write DRPW_SCRATCH_START register (see next state)
700 to be used by DRPw FW. The RTRIM value will be added by the FW
701 before taking DRPw out of reset */
702
703 wl1271_debug(DEBUG_BOOT, "DRPW_SCRATCH_START %08x", DRPW_SCRATCH_START);
704 clk = wl1271_read32(wl, DRPW_SCRATCH_START);
705
706 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
707
708 if (wl->chip.id == CHIP_ID_1283_PG20) {
709 clk |= ((selected_clock & 0x3) << 1) << 4;
710 } else {
711 clk |= (wl->ref_clock << 1) << 4;
712 }
713
714 wl1271_write32(wl, DRPW_SCRATCH_START, clk);
715
716 wl1271_set_partition(wl, &wl12xx_part_table[PART_WORK]);
717
718 /* Disable interrupts */
719 wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
720
721 ret = wl1271_boot_soft_reset(wl);
722 if (ret < 0)
723 goto out;
724
725 /* 2. start processing NVS file */
726 ret = wl1271_boot_upload_nvs(wl);
727 if (ret < 0)
728 goto out;
729
730 /* write firmware's last address (ie. it's length) to
731 * ACX_EEPROMLESS_IND_REG */
732 wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG");
733
734 wl1271_write32(wl, ACX_EEPROMLESS_IND_REG, ACX_EEPROMLESS_IND_REG);
735
736 tmp = wl1271_read32(wl, CHIP_ID_B);
737
738 wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
739
740 /* 6. read the EEPROM parameters */
741 tmp = wl1271_read32(wl, SCR_PAD2);
742
743 /* WL1271: The reference driver skips steps 7 to 10 (jumps directly
744 * to upload_fw) */
745
746 if (wl->chip.id == CHIP_ID_1283_PG20)
747 wl1271_top_reg_write(wl, SDIO_IO_DS, wl->conf.hci_io_ds);
748
749 ret = wl1271_boot_upload_firmware(wl);
750 if (ret < 0)
751 goto out;
752
753out:
754 return ret;
755}
756EXPORT_SYMBOL_GPL(wl1271_load_firmware);
757
758int wl1271_boot(struct wl1271 *wl)
759{
760 int ret;
761
762 /* upload NVS and firmware */
763 ret = wl1271_load_firmware(wl);
764 if (ret)
765 return ret;
766
767 /* 10.5 start firmware */
768 ret = wl1271_boot_run_firmware(wl);
769 if (ret < 0)
770 goto out;
771
772 ret = wl1271_boot_write_irq_polarity(wl);
773 if (ret < 0)
774 goto out;
775
776 wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
777 WL1271_ACX_ALL_EVENTS_VECTOR);
778
779 /* Enable firmware interrupts now */
780 wl1271_boot_enable_interrupts(wl);
781
782 wl1271_event_mbox_config(wl);
783
784out:
785 return ret;
786}
diff --git a/drivers/net/wireless/wl12xx/boot.h b/drivers/net/wireless/wl12xx/boot.h
deleted file mode 100644
index c3adc09f403d..000000000000
--- a/drivers/net/wireless/wl12xx/boot.h
+++ /dev/null
@@ -1,120 +0,0 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#ifndef __BOOT_H__
25#define __BOOT_H__
26
27#include "wl12xx.h"
28
29int wl1271_boot(struct wl1271 *wl);
30int wl1271_load_firmware(struct wl1271 *wl);
31
32#define WL1271_NO_SUBBANDS 8
33#define WL1271_NO_POWER_LEVELS 4
34#define WL1271_FW_VERSION_MAX_LEN 20
35
36struct wl1271_static_data {
37 u8 mac_address[ETH_ALEN];
38 u8 padding[2];
39 u8 fw_version[WL1271_FW_VERSION_MAX_LEN];
40 u32 hw_version;
41 u8 tx_power_table[WL1271_NO_SUBBANDS][WL1271_NO_POWER_LEVELS];
42};
43
44/* number of times we try to read the INIT interrupt */
45#define INIT_LOOP 20000
46
47/* delay between retries */
48#define INIT_LOOP_DELAY 50
49
50#define WU_COUNTER_PAUSE_VAL 0x3FF
51#define WELP_ARM_COMMAND_VAL 0x4
52
53#define OCP_REG_POLARITY 0x0064
54#define OCP_REG_CLK_TYPE 0x0448
55#define OCP_REG_CLK_POLARITY 0x0cb2
56#define OCP_REG_CLK_PULL 0x0cb4
57
58#define CMD_MBOX_ADDRESS 0x407B4
59
60#define POLARITY_LOW BIT(1)
61#define NO_PULL (BIT(14) | BIT(15))
62
63#define FREF_CLK_TYPE_BITS 0xfffffe7f
64#define CLK_REQ_PRCM 0x100
65#define FREF_CLK_POLARITY_BITS 0xfffff8ff
66#define CLK_REQ_OUTN_SEL 0x700
67
68/* PLL configuration algorithm for wl128x */
69#define SYS_CLK_CFG_REG 0x2200
70/* Bit[0] - 0-TCXO, 1-FREF */
71#define MCS_PLL_CLK_SEL_FREF BIT(0)
72/* Bit[3:2] - 01-TCXO, 10-FREF */
73#define WL_CLK_REQ_TYPE_FREF BIT(3)
74#define WL_CLK_REQ_TYPE_PG2 (BIT(3) | BIT(2))
75/* Bit[4] - 0-TCXO, 1-FREF */
76#define PRCM_CM_EN_MUX_WLAN_FREF BIT(4)
77
78#define TCXO_ILOAD_INT_REG 0x2264
79#define TCXO_CLK_DETECT_REG 0x2266
80
81#define TCXO_DET_FAILED BIT(4)
82
83#define FREF_ILOAD_INT_REG 0x2084
84#define FREF_CLK_DETECT_REG 0x2086
85#define FREF_CLK_DETECT_FAIL BIT(4)
86
87/* Use this reg for masking during driver access */
88#define WL_SPARE_REG 0x2320
89#define WL_SPARE_VAL BIT(2)
90/* Bit[6:5:3] - mask wl write SYS_CLK_CFG[8:5:2:4] */
91#define WL_SPARE_MASK_8526 (BIT(6) | BIT(5) | BIT(3))
92
93#define PLL_LOCK_COUNTERS_REG 0xD8C
94#define PLL_LOCK_COUNTERS_COEX 0x0F
95#define PLL_LOCK_COUNTERS_MCS 0xF0
96#define MCS_PLL_OVERRIDE_REG 0xD90
97#define MCS_PLL_CONFIG_REG 0xD92
98#define MCS_SEL_IN_FREQ_MASK 0x0070
99#define MCS_SEL_IN_FREQ_SHIFT 4
100#define MCS_PLL_CONFIG_REG_VAL 0x73
101#define MCS_PLL_ENABLE_HP (BIT(0) | BIT(1))
102
103#define MCS_PLL_M_REG 0xD94
104#define MCS_PLL_N_REG 0xD96
105#define MCS_PLL_M_REG_VAL 0xC8
106#define MCS_PLL_N_REG_VAL 0x07
107
108#define SDIO_IO_DS 0xd14
109
110/* SDIO/wSPI DS configuration values */
111enum {
112 HCI_IO_DS_8MA = 0,
113 HCI_IO_DS_4MA = 1, /* default */
114 HCI_IO_DS_6MA = 2,
115 HCI_IO_DS_2MA = 3,
116};
117
118/* end PLL configuration algorithm for wl128x */
119
120#endif
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index cb6204f78300..e6ec16d92e65 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -266,9 +266,13 @@ struct pn533 {
266 int in_maxlen; 266 int in_maxlen;
267 struct pn533_frame *in_frame; 267 struct pn533_frame *in_frame;
268 268
269 struct tasklet_struct tasklet; 269 struct sk_buff_head resp_q;
270 struct pn533_frame *tklt_in_frame; 270
271 int tklt_in_error; 271 struct workqueue_struct *wq;
272 struct work_struct cmd_work;
273 struct work_struct mi_work;
274 struct pn533_frame *wq_in_frame;
275 int wq_in_error;
272 276
273 pn533_cmd_complete_t cmd_complete; 277 pn533_cmd_complete_t cmd_complete;
274 void *cmd_complete_arg; 278 void *cmd_complete_arg;
@@ -383,15 +387,21 @@ static bool pn533_rx_frame_is_cmd_response(struct pn533_frame *frame, u8 cmd)
383 return (PN533_FRAME_CMD(frame) == PN533_CMD_RESPONSE(cmd)); 387 return (PN533_FRAME_CMD(frame) == PN533_CMD_RESPONSE(cmd));
384} 388}
385 389
386static void pn533_tasklet_cmd_complete(unsigned long arg) 390
391static void pn533_wq_cmd_complete(struct work_struct *work)
387{ 392{
388 struct pn533 *dev = (struct pn533 *) arg; 393 struct pn533 *dev = container_of(work, struct pn533, cmd_work);
389 struct pn533_frame *in_frame = dev->tklt_in_frame; 394 struct pn533_frame *in_frame;
390 int rc; 395 int rc;
391 396
392 if (dev->tklt_in_error) 397 if (dev == NULL)
398 return;
399
400 in_frame = dev->wq_in_frame;
401
402 if (dev->wq_in_error)
393 rc = dev->cmd_complete(dev, dev->cmd_complete_arg, NULL, 403 rc = dev->cmd_complete(dev, dev->cmd_complete_arg, NULL,
394 dev->tklt_in_error); 404 dev->wq_in_error);
395 else 405 else
396 rc = dev->cmd_complete(dev, dev->cmd_complete_arg, 406 rc = dev->cmd_complete(dev, dev->cmd_complete_arg,
397 PN533_FRAME_CMD_PARAMS_PTR(in_frame), 407 PN533_FRAME_CMD_PARAMS_PTR(in_frame),
@@ -406,7 +416,7 @@ static void pn533_recv_response(struct urb *urb)
406 struct pn533 *dev = urb->context; 416 struct pn533 *dev = urb->context;
407 struct pn533_frame *in_frame; 417 struct pn533_frame *in_frame;
408 418
409 dev->tklt_in_frame = NULL; 419 dev->wq_in_frame = NULL;
410 420
411 switch (urb->status) { 421 switch (urb->status) {
412 case 0: 422 case 0:
@@ -417,36 +427,36 @@ static void pn533_recv_response(struct urb *urb)
417 case -ESHUTDOWN: 427 case -ESHUTDOWN:
418 nfc_dev_dbg(&dev->interface->dev, "Urb shutting down with" 428 nfc_dev_dbg(&dev->interface->dev, "Urb shutting down with"
419 " status: %d", urb->status); 429 " status: %d", urb->status);
420 dev->tklt_in_error = urb->status; 430 dev->wq_in_error = urb->status;
421 goto sched_tasklet; 431 goto sched_wq;
422 default: 432 default:
423 nfc_dev_err(&dev->interface->dev, "Nonzero urb status received:" 433 nfc_dev_err(&dev->interface->dev, "Nonzero urb status received:"
424 " %d", urb->status); 434 " %d", urb->status);
425 dev->tklt_in_error = urb->status; 435 dev->wq_in_error = urb->status;
426 goto sched_tasklet; 436 goto sched_wq;
427 } 437 }
428 438
429 in_frame = dev->in_urb->transfer_buffer; 439 in_frame = dev->in_urb->transfer_buffer;
430 440
431 if (!pn533_rx_frame_is_valid(in_frame)) { 441 if (!pn533_rx_frame_is_valid(in_frame)) {
432 nfc_dev_err(&dev->interface->dev, "Received an invalid frame"); 442 nfc_dev_err(&dev->interface->dev, "Received an invalid frame");
433 dev->tklt_in_error = -EIO; 443 dev->wq_in_error = -EIO;
434 goto sched_tasklet; 444 goto sched_wq;
435 } 445 }
436 446
437 if (!pn533_rx_frame_is_cmd_response(in_frame, dev->cmd)) { 447 if (!pn533_rx_frame_is_cmd_response(in_frame, dev->cmd)) {
438 nfc_dev_err(&dev->interface->dev, "The received frame is not " 448 nfc_dev_err(&dev->interface->dev, "The received frame is not "
439 "response to the last command"); 449 "response to the last command");
440 dev->tklt_in_error = -EIO; 450 dev->wq_in_error = -EIO;
441 goto sched_tasklet; 451 goto sched_wq;
442 } 452 }
443 453
444 nfc_dev_dbg(&dev->interface->dev, "Received a valid frame"); 454 nfc_dev_dbg(&dev->interface->dev, "Received a valid frame");
445 dev->tklt_in_error = 0; 455 dev->wq_in_error = 0;
446 dev->tklt_in_frame = in_frame; 456 dev->wq_in_frame = in_frame;
447 457
448sched_tasklet: 458sched_wq:
449 tasklet_schedule(&dev->tasklet); 459 queue_work(dev->wq, &dev->cmd_work);
450} 460}
451 461
452static int pn533_submit_urb_for_response(struct pn533 *dev, gfp_t flags) 462static int pn533_submit_urb_for_response(struct pn533 *dev, gfp_t flags)
@@ -471,21 +481,21 @@ static void pn533_recv_ack(struct urb *urb)
471 case -ESHUTDOWN: 481 case -ESHUTDOWN:
472 nfc_dev_dbg(&dev->interface->dev, "Urb shutting down with" 482 nfc_dev_dbg(&dev->interface->dev, "Urb shutting down with"
473 " status: %d", urb->status); 483 " status: %d", urb->status);
474 dev->tklt_in_error = urb->status; 484 dev->wq_in_error = urb->status;
475 goto sched_tasklet; 485 goto sched_wq;
476 default: 486 default:
477 nfc_dev_err(&dev->interface->dev, "Nonzero urb status received:" 487 nfc_dev_err(&dev->interface->dev, "Nonzero urb status received:"
478 " %d", urb->status); 488 " %d", urb->status);
479 dev->tklt_in_error = urb->status; 489 dev->wq_in_error = urb->status;
480 goto sched_tasklet; 490 goto sched_wq;
481 } 491 }
482 492
483 in_frame = dev->in_urb->transfer_buffer; 493 in_frame = dev->in_urb->transfer_buffer;
484 494
485 if (!pn533_rx_frame_is_ack(in_frame)) { 495 if (!pn533_rx_frame_is_ack(in_frame)) {
486 nfc_dev_err(&dev->interface->dev, "Received an invalid ack"); 496 nfc_dev_err(&dev->interface->dev, "Received an invalid ack");
487 dev->tklt_in_error = -EIO; 497 dev->wq_in_error = -EIO;
488 goto sched_tasklet; 498 goto sched_wq;
489 } 499 }
490 500
491 nfc_dev_dbg(&dev->interface->dev, "Received a valid ack"); 501 nfc_dev_dbg(&dev->interface->dev, "Received a valid ack");
@@ -494,15 +504,15 @@ static void pn533_recv_ack(struct urb *urb)
494 if (rc) { 504 if (rc) {
495 nfc_dev_err(&dev->interface->dev, "usb_submit_urb failed with" 505 nfc_dev_err(&dev->interface->dev, "usb_submit_urb failed with"
496 " result %d", rc); 506 " result %d", rc);
497 dev->tklt_in_error = rc; 507 dev->wq_in_error = rc;
498 goto sched_tasklet; 508 goto sched_wq;
499 } 509 }
500 510
501 return; 511 return;
502 512
503sched_tasklet: 513sched_wq:
504 dev->tklt_in_frame = NULL; 514 dev->wq_in_frame = NULL;
505 tasklet_schedule(&dev->tasklet); 515 queue_work(dev->wq, &dev->cmd_work);
506} 516}
507 517
508static int pn533_submit_urb_for_ack(struct pn533 *dev, gfp_t flags) 518static int pn533_submit_urb_for_ack(struct pn533 *dev, gfp_t flags)
@@ -1249,6 +1259,8 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev, u32 target_idx)
1249 1259
1250 dev->tgt_active_prot = 0; 1260 dev->tgt_active_prot = 0;
1251 1261
1262 skb_queue_purge(&dev->resp_q);
1263
1252 pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_RELEASE); 1264 pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_RELEASE);
1253 1265
1254 tg = 1; 1266 tg = 1;
@@ -1447,11 +1459,49 @@ struct pn533_data_exchange_arg {
1447 void *cb_context; 1459 void *cb_context;
1448}; 1460};
1449 1461
1462static struct sk_buff *pn533_build_response(struct pn533 *dev)
1463{
1464 struct sk_buff *skb, *tmp, *t;
1465 unsigned int skb_len = 0, tmp_len = 0;
1466
1467 nfc_dev_dbg(&dev->interface->dev, "%s\n", __func__);
1468
1469 if (skb_queue_empty(&dev->resp_q))
1470 return NULL;
1471
1472 if (skb_queue_len(&dev->resp_q) == 1) {
1473 skb = skb_dequeue(&dev->resp_q);
1474 goto out;
1475 }
1476
1477 skb_queue_walk_safe(&dev->resp_q, tmp, t)
1478 skb_len += tmp->len;
1479
1480 nfc_dev_dbg(&dev->interface->dev, "%s total length %d\n",
1481 __func__, skb_len);
1482
1483 skb = alloc_skb(skb_len, GFP_KERNEL);
1484 if (skb == NULL)
1485 goto out;
1486
1487 skb_put(skb, skb_len);
1488
1489 skb_queue_walk_safe(&dev->resp_q, tmp, t) {
1490 memcpy(skb->data + tmp_len, tmp->data, tmp->len);
1491 tmp_len += tmp->len;
1492 }
1493
1494out:
1495 skb_queue_purge(&dev->resp_q);
1496
1497 return skb;
1498}
1499
1450static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg, 1500static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
1451 u8 *params, int params_len) 1501 u8 *params, int params_len)
1452{ 1502{
1453 struct pn533_data_exchange_arg *arg = _arg; 1503 struct pn533_data_exchange_arg *arg = _arg;
1454 struct sk_buff *skb_resp = arg->skb_resp; 1504 struct sk_buff *skb = NULL, *skb_resp = arg->skb_resp;
1455 struct pn533_frame *in_frame = (struct pn533_frame *) skb_resp->data; 1505 struct pn533_frame *in_frame = (struct pn533_frame *) skb_resp->data;
1456 int err = 0; 1506 int err = 0;
1457 u8 status; 1507 u8 status;
@@ -1459,15 +1509,13 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
1459 1509
1460 nfc_dev_dbg(&dev->interface->dev, "%s", __func__); 1510 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
1461 1511
1462 dev_kfree_skb_irq(arg->skb_out); 1512 dev_kfree_skb(arg->skb_out);
1463 1513
1464 if (params_len < 0) { /* error */ 1514 if (params_len < 0) { /* error */
1465 err = params_len; 1515 err = params_len;
1466 goto error; 1516 goto error;
1467 } 1517 }
1468 1518
1469 skb_put(skb_resp, PN533_FRAME_SIZE(in_frame));
1470
1471 status = params[0]; 1519 status = params[0];
1472 1520
1473 cmd_ret = status & PN533_CMD_RET_MASK; 1521 cmd_ret = status & PN533_CMD_RET_MASK;
@@ -1478,25 +1526,27 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
1478 goto error; 1526 goto error;
1479 } 1527 }
1480 1528
1529 skb_put(skb_resp, PN533_FRAME_SIZE(in_frame));
1530 skb_pull(skb_resp, PN533_CMD_DATAEXCH_HEAD_LEN);
1531 skb_trim(skb_resp, skb_resp->len - PN533_FRAME_TAIL_SIZE);
1532 skb_queue_tail(&dev->resp_q, skb_resp);
1533
1481 if (status & PN533_CMD_MI_MASK) { 1534 if (status & PN533_CMD_MI_MASK) {
1482 /* TODO: Implement support to multi-part data exchange */ 1535 queue_work(dev->wq, &dev->mi_work);
1483 nfc_dev_err(&dev->interface->dev, "Multi-part message not yet" 1536 return -EINPROGRESS;
1484 " supported");
1485 /* Prevent the other messages from controller */
1486 pn533_send_ack(dev, GFP_ATOMIC);
1487 err = -ENOSYS;
1488 goto error;
1489 } 1537 }
1490 1538
1491 skb_pull(skb_resp, PN533_CMD_DATAEXCH_HEAD_LEN); 1539 skb = pn533_build_response(dev);
1492 skb_trim(skb_resp, skb_resp->len - PN533_FRAME_TAIL_SIZE); 1540 if (skb == NULL)
1541 goto error;
1493 1542
1494 arg->cb(arg->cb_context, skb_resp, 0); 1543 arg->cb(arg->cb_context, skb, 0);
1495 kfree(arg); 1544 kfree(arg);
1496 return 0; 1545 return 0;
1497 1546
1498error: 1547error:
1499 dev_kfree_skb_irq(skb_resp); 1548 skb_queue_purge(&dev->resp_q);
1549 dev_kfree_skb(skb_resp);
1500 arg->cb(arg->cb_context, NULL, err); 1550 arg->cb(arg->cb_context, NULL, err);
1501 kfree(arg); 1551 kfree(arg);
1502 return 0; 1552 return 0;
@@ -1571,6 +1621,68 @@ error:
1571 return rc; 1621 return rc;
1572} 1622}
1573 1623
1624static void pn533_wq_mi_recv(struct work_struct *work)
1625{
1626 struct pn533 *dev = container_of(work, struct pn533, mi_work);
1627 struct sk_buff *skb_cmd;
1628 struct pn533_data_exchange_arg *arg = dev->cmd_complete_arg;
1629 struct pn533_frame *out_frame, *in_frame;
1630 struct sk_buff *skb_resp;
1631 int skb_resp_len;
1632 int rc;
1633
1634 nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
1635
1636 /* This is a zero payload size skb */
1637 skb_cmd = alloc_skb(PN533_CMD_DATAEXCH_HEAD_LEN + PN533_FRAME_TAIL_SIZE,
1638 GFP_KERNEL);
1639 if (skb_cmd == NULL)
1640 goto error_cmd;
1641
1642 skb_reserve(skb_cmd, PN533_CMD_DATAEXCH_HEAD_LEN);
1643
1644 rc = pn533_data_exchange_tx_frame(dev, skb_cmd);
1645 if (rc)
1646 goto error_frame;
1647
1648 skb_resp_len = PN533_CMD_DATAEXCH_HEAD_LEN +
1649 PN533_CMD_DATAEXCH_DATA_MAXLEN +
1650 PN533_FRAME_TAIL_SIZE;
1651 skb_resp = alloc_skb(skb_resp_len, GFP_KERNEL);
1652 if (!skb_resp) {
1653 rc = -ENOMEM;
1654 goto error_frame;
1655 }
1656
1657 in_frame = (struct pn533_frame *) skb_resp->data;
1658 out_frame = (struct pn533_frame *) skb_cmd->data;
1659
1660 arg->skb_resp = skb_resp;
1661 arg->skb_out = skb_cmd;
1662
1663 rc = __pn533_send_cmd_frame_async(dev, out_frame, in_frame,
1664 skb_resp_len,
1665 pn533_data_exchange_complete,
1666 dev->cmd_complete_arg, GFP_KERNEL);
1667 if (!rc)
1668 return;
1669
1670 nfc_dev_err(&dev->interface->dev, "Error %d when trying to"
1671 " perform data_exchange", rc);
1672
1673 kfree_skb(skb_resp);
1674
1675error_frame:
1676 kfree_skb(skb_cmd);
1677
1678error_cmd:
1679 pn533_send_ack(dev, GFP_KERNEL);
1680
1681 kfree(arg);
1682
1683 up(&dev->cmd_lock);
1684}
1685
1574static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata, 1686static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
1575 u8 cfgdata_len) 1687 u8 cfgdata_len)
1576{ 1688{
@@ -1668,7 +1780,15 @@ static int pn533_probe(struct usb_interface *interface,
1668 NULL, 0, 1780 NULL, 0,
1669 pn533_send_complete, dev); 1781 pn533_send_complete, dev);
1670 1782
1671 tasklet_init(&dev->tasklet, pn533_tasklet_cmd_complete, (ulong)dev); 1783 INIT_WORK(&dev->cmd_work, pn533_wq_cmd_complete);
1784 INIT_WORK(&dev->mi_work, pn533_wq_mi_recv);
1785 dev->wq = alloc_workqueue("pn533",
1786 WQ_NON_REENTRANT | WQ_UNBOUND | WQ_MEM_RECLAIM,
1787 1);
1788 if (dev->wq == NULL)
1789 goto error;
1790
1791 skb_queue_head_init(&dev->resp_q);
1672 1792
1673 usb_set_intfdata(interface, dev); 1793 usb_set_intfdata(interface, dev);
1674 1794
@@ -1678,7 +1798,7 @@ static int pn533_probe(struct usb_interface *interface,
1678 rc = pn533_send_cmd_frame_sync(dev, dev->out_frame, dev->in_frame, 1798 rc = pn533_send_cmd_frame_sync(dev, dev->out_frame, dev->in_frame,
1679 dev->in_maxlen); 1799 dev->in_maxlen);
1680 if (rc) 1800 if (rc)
1681 goto kill_tasklet; 1801 goto destroy_wq;
1682 1802
1683 fw_ver = (struct pn533_fw_version *) 1803 fw_ver = (struct pn533_fw_version *)
1684 PN533_FRAME_CMD_PARAMS_PTR(dev->in_frame); 1804 PN533_FRAME_CMD_PARAMS_PTR(dev->in_frame);
@@ -1694,7 +1814,7 @@ static int pn533_probe(struct usb_interface *interface,
1694 PN533_CMD_DATAEXCH_HEAD_LEN, 1814 PN533_CMD_DATAEXCH_HEAD_LEN,
1695 PN533_FRAME_TAIL_SIZE); 1815 PN533_FRAME_TAIL_SIZE);
1696 if (!dev->nfc_dev) 1816 if (!dev->nfc_dev)
1697 goto kill_tasklet; 1817 goto destroy_wq;
1698 1818
1699 nfc_set_parent_dev(dev->nfc_dev, &interface->dev); 1819 nfc_set_parent_dev(dev->nfc_dev, &interface->dev);
1700 nfc_set_drvdata(dev->nfc_dev, dev); 1820 nfc_set_drvdata(dev->nfc_dev, dev);
@@ -1720,8 +1840,8 @@ static int pn533_probe(struct usb_interface *interface,
1720 1840
1721free_nfc_dev: 1841free_nfc_dev:
1722 nfc_free_device(dev->nfc_dev); 1842 nfc_free_device(dev->nfc_dev);
1723kill_tasklet: 1843destroy_wq:
1724 tasklet_kill(&dev->tasklet); 1844 destroy_workqueue(dev->wq);
1725error: 1845error:
1726 kfree(dev->in_frame); 1846 kfree(dev->in_frame);
1727 usb_free_urb(dev->in_urb); 1847 usb_free_urb(dev->in_urb);
@@ -1744,7 +1864,9 @@ static void pn533_disconnect(struct usb_interface *interface)
1744 usb_kill_urb(dev->in_urb); 1864 usb_kill_urb(dev->in_urb);
1745 usb_kill_urb(dev->out_urb); 1865 usb_kill_urb(dev->out_urb);
1746 1866
1747 tasklet_kill(&dev->tasklet); 1867 destroy_workqueue(dev->wq);
1868
1869 skb_queue_purge(&dev->resp_q);
1748 1870
1749 kfree(dev->in_frame); 1871 kfree(dev->in_frame);
1750 usb_free_urb(dev->in_urb); 1872 usb_free_urb(dev->in_urb);
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 483c0adcad87..2574abde8d99 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -45,6 +45,8 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
45 for (i=0; i<PHY_MAX_ADDR; i++) 45 for (i=0; i<PHY_MAX_ADDR; i++)
46 mdio->irq[i] = PHY_POLL; 46 mdio->irq[i] = PHY_POLL;
47 47
48 mdio->dev.of_node = np;
49
48 /* Register the MDIO bus */ 50 /* Register the MDIO bus */
49 rc = mdiobus_register(mdio); 51 rc = mdiobus_register(mdio);
50 if (rc) 52 if (rc)
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 4bf71028556b..953ec3f08470 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2626,6 +2626,18 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4374,
2626DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375, 2626DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375,
2627 quirk_msi_intx_disable_bug); 2627 quirk_msi_intx_disable_bug);
2628 2628
2629DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1062,
2630 quirk_msi_intx_disable_bug);
2631DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1063,
2632 quirk_msi_intx_disable_bug);
2633DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2060,
2634 quirk_msi_intx_disable_bug);
2635DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2062,
2636 quirk_msi_intx_disable_bug);
2637DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1073,
2638 quirk_msi_intx_disable_bug);
2639DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1083,
2640 quirk_msi_intx_disable_bug);
2629#endif /* CONFIG_PCI_MSI */ 2641#endif /* CONFIG_PCI_MSI */
2630 2642
2631/* Allow manual resource allocation for PCI hotplug bridges 2643/* Allow manual resource allocation for PCI hotplug bridges
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index cd9bc3b129bc..5648dad71fb3 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -78,9 +78,13 @@ config PTP_1588_CLOCK_PCH
78 depends on PCH_GBE 78 depends on PCH_GBE
79 help 79 help
80 This driver adds support for using the PCH EG20T as a PTP 80 This driver adds support for using the PCH EG20T as a PTP
81 clock. This clock is only useful if your PTP programs are 81 clock. The hardware supports time stamping of PTP packets
82 getting hardware time stamps on the PTP Ethernet packets 82 when using the end-to-end delay (E2E) mechansim. The peer
83 using the SO_TIMESTAMPING API. 83 delay mechansim (P2P) is not supported.
84
85 This clock is only useful if your PTP programs are getting
86 hardware time stamps on the PTP Ethernet packets using the
87 SO_TIMESTAMPING API.
84 88
85 To compile this driver as a module, choose M here: the module 89 To compile this driver as a module, choose M here: the module
86 will be called ptp_pch. 90 will be called ptp_pch.
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index f519a131238d..1e528b539a07 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -304,6 +304,12 @@ void ptp_clock_event(struct ptp_clock *ptp, struct ptp_clock_event *event)
304} 304}
305EXPORT_SYMBOL(ptp_clock_event); 305EXPORT_SYMBOL(ptp_clock_event);
306 306
307int ptp_clock_index(struct ptp_clock *ptp)
308{
309 return ptp->index;
310}
311EXPORT_SYMBOL(ptp_clock_index);
312
307/* module operations */ 313/* module operations */
308 314
309static void __exit ptp_exit(void) 315static void __exit ptp_exit(void)
diff --git a/drivers/ptp/ptp_ixp46x.c b/drivers/ptp/ptp_ixp46x.c
index 6f2782bb5f41..e03c40692b00 100644
--- a/drivers/ptp/ptp_ixp46x.c
+++ b/drivers/ptp/ptp_ixp46x.c
@@ -284,6 +284,7 @@ static void __exit ptp_ixp_exit(void)
284{ 284{
285 free_irq(MASTER_IRQ, &ixp_clock); 285 free_irq(MASTER_IRQ, &ixp_clock);
286 free_irq(SLAVE_IRQ, &ixp_clock); 286 free_irq(SLAVE_IRQ, &ixp_clock);
287 ixp46x_phc_index = -1;
287 ptp_clock_unregister(ixp_clock.ptp_clock); 288 ptp_clock_unregister(ixp_clock.ptp_clock);
288} 289}
289 290
@@ -302,6 +303,8 @@ static int __init ptp_ixp_init(void)
302 if (IS_ERR(ixp_clock.ptp_clock)) 303 if (IS_ERR(ixp_clock.ptp_clock))
303 return PTR_ERR(ixp_clock.ptp_clock); 304 return PTR_ERR(ixp_clock.ptp_clock);
304 305
306 ixp46x_phc_index = ptp_clock_index(ixp_clock.ptp_clock);
307
305 __raw_writel(DEFAULT_ADDEND, &ixp_clock.regs->addend); 308 __raw_writel(DEFAULT_ADDEND, &ixp_clock.regs->addend);
306 __raw_writel(1, &ixp_clock.regs->trgt_lo); 309 __raw_writel(1, &ixp_clock.regs->trgt_lo);
307 __raw_writel(0, &ixp_clock.regs->trgt_hi); 310 __raw_writel(0, &ixp_clock.regs->trgt_hi);
diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c
index 375eb04c16ea..08c331130d88 100644
--- a/drivers/ptp/ptp_pch.c
+++ b/drivers/ptp/ptp_pch.c
@@ -261,6 +261,7 @@ u64 pch_rx_snap_read(struct pci_dev *pdev)
261 261
262 ns = ((u64) hi) << 32; 262 ns = ((u64) hi) << 32;
263 ns |= lo; 263 ns |= lo;
264 ns <<= TICKS_NS_SHIFT;
264 265
265 return ns; 266 return ns;
266} 267}
@@ -277,6 +278,7 @@ u64 pch_tx_snap_read(struct pci_dev *pdev)
277 278
278 ns = ((u64) hi) << 32; 279 ns = ((u64) hi) << 32;
279 ns |= lo; 280 ns |= lo;
281 ns <<= TICKS_NS_SHIFT;
280 282
281 return ns; 283 return ns;
282} 284}
@@ -306,7 +308,7 @@ static void pch_reset(struct pch_dev *chip)
306 * traffic on the ethernet interface 308 * traffic on the ethernet interface
307 * @addr: dress which contain the column separated address to be used. 309 * @addr: dress which contain the column separated address to be used.
308 */ 310 */
309static int pch_set_station_address(u8 *addr, struct pci_dev *pdev) 311int pch_set_station_address(u8 *addr, struct pci_dev *pdev)
310{ 312{
311 s32 i; 313 s32 i;
312 struct pch_dev *chip = pci_get_drvdata(pdev); 314 struct pch_dev *chip = pci_get_drvdata(pdev);
@@ -350,6 +352,7 @@ static int pch_set_station_address(u8 *addr, struct pci_dev *pdev)
350 } 352 }
351 return 0; 353 return 0;
352} 354}
355EXPORT_SYMBOL(pch_set_station_address);
353 356
354/* 357/*
355 * Interrupt service routine 358 * Interrupt service routine
@@ -649,8 +652,6 @@ pch_probe(struct pci_dev *pdev, const struct pci_device_id *id)
649 iowrite32(1, &chip->regs->trgt_lo); 652 iowrite32(1, &chip->regs->trgt_lo);
650 iowrite32(0, &chip->regs->trgt_hi); 653 iowrite32(0, &chip->regs->trgt_hi);
651 iowrite32(PCH_TSE_TTIPEND, &chip->regs->event); 654 iowrite32(PCH_TSE_TTIPEND, &chip->regs->event);
652 /* Version: IEEE1588 v1 and IEEE1588-2008, Mode: All Evwnt, Locked */
653 iowrite32(0x80020000, &chip->regs->ch_control);
654 655
655 pch_eth_enable_set(chip); 656 pch_eth_enable_set(chip);
656 657
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 453a740fa68e..922086105b4b 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -662,7 +662,7 @@ iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
662 662
663 /* setup Socket parameters */ 663 /* setup Socket parameters */
664 sk = sock->sk; 664 sk = sock->sk;
665 sk->sk_reuse = 1; 665 sk->sk_reuse = SK_CAN_REUSE;
666 sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */ 666 sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
667 sk->sk_allocation = GFP_ATOMIC; 667 sk->sk_allocation = GFP_ATOMIC;
668 668
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 91a97b3e45c6..5877b2c64e2a 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -345,7 +345,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
345 } 345 }
346 if (unlikely 346 if (unlikely
347 (skb->truesize != 347 (skb->truesize !=
348 sizeof(*skb) + skb_end_pointer(skb) - skb->head)) { 348 sizeof(*skb) + skb_end_offset(skb))) {
349 /* 349 /*
350 printk("TX buffer truesize has been changed\n"); 350 printk("TX buffer truesize has been changed\n");
351 */ 351 */
diff --git a/drivers/staging/ramster/cluster/tcp.c b/drivers/staging/ramster/cluster/tcp.c
index 3af1b2c51b78..b9721c1055b1 100644
--- a/drivers/staging/ramster/cluster/tcp.c
+++ b/drivers/staging/ramster/cluster/tcp.c
@@ -2106,7 +2106,7 @@ static int r2net_open_listening_sock(__be32 addr, __be16 port)
2106 r2net_listen_sock = sock; 2106 r2net_listen_sock = sock;
2107 INIT_WORK(&r2net_listen_work, r2net_accept_many); 2107 INIT_WORK(&r2net_listen_work, r2net_accept_many);
2108 2108
2109 sock->sk->sk_reuse = 1; 2109 sock->sk->sk_reuse = SK_CAN_REUSE;
2110 ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); 2110 ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
2111 if (ret < 0) { 2111 if (ret < 0) {
2112 printk(KERN_ERR "ramster: Error %d while binding socket at " 2112 printk(KERN_ERR "ramster: Error %d while binding socket at "
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 1f21d2a1e528..853db7a08a26 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -166,7 +166,7 @@ static void handle_tx(struct vhost_net *net)
166 if (wmem < sock->sk->sk_sndbuf / 2) 166 if (wmem < sock->sk->sk_sndbuf / 2)
167 tx_poll_stop(net); 167 tx_poll_stop(net);
168 hdr_size = vq->vhost_hlen; 168 hdr_size = vq->vhost_hlen;
169 zcopy = vhost_sock_zcopy(sock); 169 zcopy = vq->ubufs;
170 170
171 for (;;) { 171 for (;;) {
172 /* Release DMAs done buffers first */ 172 /* Release DMAs done buffers first */
@@ -257,7 +257,8 @@ static void handle_tx(struct vhost_net *net)
257 UIO_MAXIOV; 257 UIO_MAXIOV;
258 } 258 }
259 vhost_discard_vq_desc(vq, 1); 259 vhost_discard_vq_desc(vq, 1);
260 tx_poll_start(net, sock); 260 if (err == -EAGAIN || err == -ENOBUFS)
261 tx_poll_start(net, sock);
261 break; 262 break;
262 } 263 }
263 if (err != len) 264 if (err != len)
@@ -265,6 +266,8 @@ static void handle_tx(struct vhost_net *net)
265 " len %d != %zd\n", err, len); 266 " len %d != %zd\n", err, len);
266 if (!zcopy) 267 if (!zcopy)
267 vhost_add_used_and_signal(&net->dev, vq, head, 0); 268 vhost_add_used_and_signal(&net->dev, vq, head, 0);
269 else
270 vhost_zerocopy_signal_used(vq);
268 total_len += len; 271 total_len += len;
269 if (unlikely(total_len >= VHOST_NET_WEIGHT)) { 272 if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
270 vhost_poll_queue(&vq->poll); 273 vhost_poll_queue(&vq->poll);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 51e4c1eeec4f..94dbd25caa30 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1603,6 +1603,7 @@ void vhost_zerocopy_callback(struct ubuf_info *ubuf)
1603 struct vhost_ubuf_ref *ubufs = ubuf->ctx; 1603 struct vhost_ubuf_ref *ubufs = ubuf->ctx;
1604 struct vhost_virtqueue *vq = ubufs->vq; 1604 struct vhost_virtqueue *vq = ubufs->vq;
1605 1605
1606 vhost_poll_queue(&vq->poll);
1606 /* set len = 1 to mark this desc buffers done DMA */ 1607 /* set len = 1 to mark this desc buffers done DMA */
1607 vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN; 1608 vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
1608 kref_put(&ubufs->kref, vhost_zerocopy_done_signal); 1609 kref_put(&ubufs->kref, vhost_zerocopy_done_signal);
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 044e7b58d31c..1bfe8802cc1e 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -2005,7 +2005,7 @@ static int o2net_open_listening_sock(__be32 addr, __be16 port)
2005 o2net_listen_sock = sock; 2005 o2net_listen_sock = sock;
2006 INIT_WORK(&o2net_listen_work, o2net_accept_many); 2006 INIT_WORK(&o2net_listen_work, o2net_accept_many);
2007 2007
2008 sock->sk->sk_reuse = 1; 2008 sock->sk->sk_reuse = SK_CAN_REUSE;
2009 ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin)); 2009 ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
2010 if (ret < 0) { 2010 if (ret < 0) {
2011 printk(KERN_ERR "o2net: Error %d while binding socket at " 2011 printk(KERN_ERR "o2net: Error %d while binding socket at "
diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h
index 65a2562f66b4..6bb43382f3f3 100644
--- a/include/linux/dcbnl.h
+++ b/include/linux/dcbnl.h
@@ -67,6 +67,17 @@ struct ieee_ets {
67 __u8 reco_prio_tc[IEEE_8021QAZ_MAX_TCS]; 67 __u8 reco_prio_tc[IEEE_8021QAZ_MAX_TCS];
68}; 68};
69 69
70/* This structure contains rate limit extension to the IEEE 802.1Qaz ETS
71 * managed object.
72 * Values are 64 bits long and specified in Kbps to enable usage over both
73 * slow and very fast networks.
74 *
75 * @tc_maxrate: maximal tc tx bandwidth indexed by traffic class
76 */
77struct ieee_maxrate {
78 __u64 tc_maxrate[IEEE_8021QAZ_MAX_TCS];
79};
80
70/* This structure contains the IEEE 802.1Qaz PFC managed object 81/* This structure contains the IEEE 802.1Qaz PFC managed object
71 * 82 *
72 * @pfc_cap: Indicates the number of traffic classes on the local device 83 * @pfc_cap: Indicates the number of traffic classes on the local device
@@ -321,6 +332,7 @@ enum ieee_attrs {
321 DCB_ATTR_IEEE_PEER_ETS, 332 DCB_ATTR_IEEE_PEER_ETS,
322 DCB_ATTR_IEEE_PEER_PFC, 333 DCB_ATTR_IEEE_PEER_PFC,
323 DCB_ATTR_IEEE_PEER_APP, 334 DCB_ATTR_IEEE_PEER_APP,
335 DCB_ATTR_IEEE_MAXRATE,
324 __DCB_ATTR_IEEE_MAX 336 __DCB_ATTR_IEEE_MAX
325}; 337};
326#define DCB_ATTR_IEEE_MAX (__DCB_ATTR_IEEE_MAX - 1) 338#define DCB_ATTR_IEEE_MAX (__DCB_ATTR_IEEE_MAX - 1)
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index eaf95a023af4..d16294e2a118 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -549,6 +549,8 @@ static inline const char *dccp_role(const struct sock *sk)
549 return NULL; 549 return NULL;
550} 550}
551 551
552extern void dccp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
553
552#endif /* __KERNEL__ */ 554#endif /* __KERNEL__ */
553 555
554#endif /* _LINUX_DCCP_H */ 556#endif /* _LINUX_DCCP_H */
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index f5647b59a90e..89d68d837b6e 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -726,6 +726,29 @@ struct ethtool_sfeatures {
726 struct ethtool_set_features_block features[0]; 726 struct ethtool_set_features_block features[0];
727}; 727};
728 728
729/**
730 * struct ethtool_ts_info - holds a device's timestamping and PHC association
731 * @cmd: command number = %ETHTOOL_GET_TS_INFO
732 * @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags
733 * @phc_index: device index of the associated PHC, or -1 if there is none
734 * @tx_types: bit mask of the supported hwtstamp_tx_types enumeration values
735 * @rx_filters: bit mask of the supported hwtstamp_rx_filters enumeration values
736 *
737 * The bits in the 'tx_types' and 'rx_filters' fields correspond to
738 * the 'hwtstamp_tx_types' and 'hwtstamp_rx_filters' enumeration values,
739 * respectively. For example, if the device supports HWTSTAMP_TX_ON,
740 * then (1 << HWTSTAMP_TX_ON) in 'tx_types' will be set.
741 */
742struct ethtool_ts_info {
743 __u32 cmd;
744 __u32 so_timestamping;
745 __s32 phc_index;
746 __u32 tx_types;
747 __u32 tx_reserved[3];
748 __u32 rx_filters;
749 __u32 rx_reserved[3];
750};
751
729/* 752/*
730 * %ETHTOOL_SFEATURES changes features present in features[].valid to the 753 * %ETHTOOL_SFEATURES changes features present in features[].valid to the
731 * values of corresponding bits in features[].requested. Bits in .requested 754 * values of corresponding bits in features[].requested. Bits in .requested
@@ -788,6 +811,7 @@ struct net_device;
788 811
789/* Some generic methods drivers may use in their ethtool_ops */ 812/* Some generic methods drivers may use in their ethtool_ops */
790u32 ethtool_op_get_link(struct net_device *dev); 813u32 ethtool_op_get_link(struct net_device *dev);
814int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti);
791 815
792/** 816/**
793 * ethtool_rxfh_indir_default - get default value for RX flow hash indirection 817 * ethtool_rxfh_indir_default - get default value for RX flow hash indirection
@@ -893,6 +917,9 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
893 * and flag of the device. 917 * and flag of the device.
894 * @get_dump_data: Get dump data. 918 * @get_dump_data: Get dump data.
895 * @set_dump: Set dump specific flags to the device. 919 * @set_dump: Set dump specific flags to the device.
920 * @get_ts_info: Get the time stamping and PTP hardware clock capabilities.
921 * Drivers supporting transmit time stamps in software should set this to
922 * ethtool_op_get_ts_info().
896 * 923 *
897 * All operations are optional (i.e. the function pointer may be set 924 * All operations are optional (i.e. the function pointer may be set
898 * to %NULL) and callers must take this into account. Callers must 925 * to %NULL) and callers must take this into account. Callers must
@@ -954,6 +981,7 @@ struct ethtool_ops {
954 int (*get_dump_data)(struct net_device *, 981 int (*get_dump_data)(struct net_device *,
955 struct ethtool_dump *, void *); 982 struct ethtool_dump *, void *);
956 int (*set_dump)(struct net_device *, struct ethtool_dump *); 983 int (*set_dump)(struct net_device *, struct ethtool_dump *);
984 int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *);
957 985
958}; 986};
959#endif /* __KERNEL__ */ 987#endif /* __KERNEL__ */
@@ -1028,6 +1056,7 @@ struct ethtool_ops {
1028#define ETHTOOL_SET_DUMP 0x0000003e /* Set dump settings */ 1056#define ETHTOOL_SET_DUMP 0x0000003e /* Set dump settings */
1029#define ETHTOOL_GET_DUMP_FLAG 0x0000003f /* Get dump settings */ 1057#define ETHTOOL_GET_DUMP_FLAG 0x0000003f /* Get dump settings */
1030#define ETHTOOL_GET_DUMP_DATA 0x00000040 /* Get dump data */ 1058#define ETHTOOL_GET_DUMP_DATA 0x00000040 /* Get dump data */
1059#define ETHTOOL_GET_TS_INFO 0x00000041 /* Get time stamping and PHC info */
1031 1060
1032/* compatibility with older code */ 1061/* compatibility with older code */
1033#define SPARC_ETH_GSET ETHTOOL_GSET 1062#define SPARC_ETH_GSET ETHTOOL_GSET
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 8eeb205f298b..72090994d789 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -126,7 +126,8 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
126#define SKF_AD_HATYPE 28 126#define SKF_AD_HATYPE 28
127#define SKF_AD_RXHASH 32 127#define SKF_AD_RXHASH 32
128#define SKF_AD_CPU 36 128#define SKF_AD_CPU 36
129#define SKF_AD_MAX 40 129#define SKF_AD_ALU_XOR_X 40
130#define SKF_AD_MAX 44
130#define SKF_NET_OFF (-0x100000) 131#define SKF_NET_OFF (-0x100000)
131#define SKF_LL_OFF (-0x200000) 132#define SKF_LL_OFF (-0x200000)
132 133
@@ -153,6 +154,9 @@ static inline unsigned int sk_filter_len(const struct sk_filter *fp)
153extern int sk_filter(struct sock *sk, struct sk_buff *skb); 154extern int sk_filter(struct sock *sk, struct sk_buff *skb);
154extern unsigned int sk_run_filter(const struct sk_buff *skb, 155extern unsigned int sk_run_filter(const struct sk_buff *skb,
155 const struct sock_filter *filter); 156 const struct sock_filter *filter);
157extern int sk_unattached_filter_create(struct sk_filter **pfp,
158 struct sock_fprog *fprog);
159extern void sk_unattached_filter_destroy(struct sk_filter *fp);
156extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); 160extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
157extern int sk_detach_filter(struct sock *sk); 161extern int sk_detach_filter(struct sock *sk);
158extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen); 162extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
@@ -228,6 +232,7 @@ enum {
228 BPF_S_ANC_HATYPE, 232 BPF_S_ANC_HATYPE,
229 BPF_S_ANC_RXHASH, 233 BPF_S_ANC_RXHASH,
230 BPF_S_ANC_CPU, 234 BPF_S_ANC_CPU,
235 BPF_S_ANC_ALU_XOR_X,
231}; 236};
232 237
233#endif /* __KERNEL__ */ 238#endif /* __KERNEL__ */
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 5852545e6bba..6af8738ae7e9 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -274,6 +274,33 @@ struct hv_ring_buffer_debug_info {
274 u32 bytes_avail_towrite; 274 u32 bytes_avail_towrite;
275}; 275};
276 276
277
278/*
279 *
280 * hv_get_ringbuffer_availbytes()
281 *
282 * Get number of bytes available to read and to write to
283 * for the specified ring buffer
284 */
285static inline void
286hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
287 u32 *read, u32 *write)
288{
289 u32 read_loc, write_loc, dsize;
290
291 smp_read_barrier_depends();
292
293 /* Capture the read/write indices before they changed */
294 read_loc = rbi->ring_buffer->read_index;
295 write_loc = rbi->ring_buffer->write_index;
296 dsize = rbi->ring_datasize;
297
298 *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) :
299 read_loc - write_loc;
300 *read = dsize - *write;
301}
302
303
277/* 304/*
278 * We use the same version numbering for all Hyper-V modules. 305 * We use the same version numbering for all Hyper-V modules.
279 * 306 *
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 210e2c325534..ce9af8918514 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -640,9 +640,9 @@ struct ieee80211_rann_ie {
640 u8 rann_hopcount; 640 u8 rann_hopcount;
641 u8 rann_ttl; 641 u8 rann_ttl;
642 u8 rann_addr[6]; 642 u8 rann_addr[6];
643 u32 rann_seq; 643 __le32 rann_seq;
644 u32 rann_interval; 644 __le32 rann_interval;
645 u32 rann_metric; 645 __le32 rann_metric;
646} __attribute__ ((packed)); 646} __attribute__ ((packed));
647 647
648enum ieee80211_rann_flags { 648enum ieee80211_rann_flags {
@@ -1007,13 +1007,13 @@ enum ieee80211_min_mpdu_spacing {
1007}; 1007};
1008 1008
1009/** 1009/**
1010 * struct ieee80211_ht_info - HT information 1010 * struct ieee80211_ht_operation - HT operation IE
1011 * 1011 *
1012 * This structure is the "HT information element" as 1012 * This structure is the "HT operation element" as
1013 * described in 802.11n D5.0 7.3.2.58 1013 * described in 802.11n-2009 7.3.2.57
1014 */ 1014 */
1015struct ieee80211_ht_info { 1015struct ieee80211_ht_operation {
1016 u8 control_chan; 1016 u8 primary_chan;
1017 u8 ht_param; 1017 u8 ht_param;
1018 __le16 operation_mode; 1018 __le16 operation_mode;
1019 __le16 stbc_param; 1019 __le16 stbc_param;
@@ -1027,8 +1027,6 @@ struct ieee80211_ht_info {
1027#define IEEE80211_HT_PARAM_CHA_SEC_BELOW 0x03 1027#define IEEE80211_HT_PARAM_CHA_SEC_BELOW 0x03
1028#define IEEE80211_HT_PARAM_CHAN_WIDTH_ANY 0x04 1028#define IEEE80211_HT_PARAM_CHAN_WIDTH_ANY 0x04
1029#define IEEE80211_HT_PARAM_RIFS_MODE 0x08 1029#define IEEE80211_HT_PARAM_RIFS_MODE 0x08
1030#define IEEE80211_HT_PARAM_SPSMP_SUPPORT 0x10
1031#define IEEE80211_HT_PARAM_SERV_INTERVAL_GRAN 0xE0
1032 1030
1033/* for operation_mode */ 1031/* for operation_mode */
1034#define IEEE80211_HT_OP_MODE_PROTECTION 0x0003 1032#define IEEE80211_HT_OP_MODE_PROTECTION 0x0003
@@ -1301,7 +1299,7 @@ enum ieee80211_eid {
1301 WLAN_EID_EXT_SUPP_RATES = 50, 1299 WLAN_EID_EXT_SUPP_RATES = 50,
1302 1300
1303 WLAN_EID_HT_CAPABILITY = 45, 1301 WLAN_EID_HT_CAPABILITY = 45,
1304 WLAN_EID_HT_INFORMATION = 61, 1302 WLAN_EID_HT_OPERATION = 61,
1305 1303
1306 WLAN_EID_RSN = 48, 1304 WLAN_EID_RSN = 48,
1307 WLAN_EID_MMIE = 76, 1305 WLAN_EID_MMIE = 76,
@@ -1441,6 +1439,18 @@ enum ieee80211_tdls_actioncode {
1441#define WLAN_TDLS_SNAP_RFTYPE 0x2 1439#define WLAN_TDLS_SNAP_RFTYPE 0x2
1442 1440
1443/** 1441/**
1442 * enum - mesh synchronization method identifier
1443 *
1444 * @IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET: the default synchronization method
1445 * @IEEE80211_SYNC_METHOD_VENDOR: a vendor specific synchronization method
1446 * that will be specified in a vendor specific information element
1447 */
1448enum {
1449 IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET = 1,
1450 IEEE80211_SYNC_METHOD_VENDOR = 255,
1451};
1452
1453/**
1444 * enum - mesh path selection protocol identifier 1454 * enum - mesh path selection protocol identifier
1445 * 1455 *
1446 * @IEEE80211_PATH_PROTOCOL_HWMP: the default path selection protocol 1456 * @IEEE80211_PATH_PROTOCOL_HWMP: the default path selection protocol
@@ -1448,7 +1458,7 @@ enum ieee80211_tdls_actioncode {
1448 * be specified in a vendor specific information element 1458 * be specified in a vendor specific information element
1449 */ 1459 */
1450enum { 1460enum {
1451 IEEE80211_PATH_PROTOCOL_HWMP = 0, 1461 IEEE80211_PATH_PROTOCOL_HWMP = 1,
1452 IEEE80211_PATH_PROTOCOL_VENDOR = 255, 1462 IEEE80211_PATH_PROTOCOL_VENDOR = 255,
1453}; 1463};
1454 1464
@@ -1460,7 +1470,7 @@ enum {
1460 * specified in a vendor specific information element 1470 * specified in a vendor specific information element
1461 */ 1471 */
1462enum { 1472enum {
1463 IEEE80211_PATH_METRIC_AIRTIME = 0, 1473 IEEE80211_PATH_METRIC_AIRTIME = 1,
1464 IEEE80211_PATH_METRIC_VENDOR = 255, 1474 IEEE80211_PATH_METRIC_VENDOR = 255,
1465}; 1475};
1466 1476
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 4b24ff453aee..f715750d0b87 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -138,6 +138,8 @@ enum {
138 IFLA_GROUP, /* Group the device belongs to */ 138 IFLA_GROUP, /* Group the device belongs to */
139 IFLA_NET_NS_FD, 139 IFLA_NET_NS_FD,
140 IFLA_EXT_MASK, /* Extended info mask, VFs, etc */ 140 IFLA_EXT_MASK, /* Extended info mask, VFs, etc */
141 IFLA_PROMISCUITY, /* Promiscuity count: > 0 means acts PROMISC */
142#define IFLA_PROMISCUITY IFLA_PROMISCUITY
141 __IFLA_MAX 143 __IFLA_MAX
142}; 144};
143 145
@@ -253,6 +255,7 @@ struct ifla_vlan_qos_mapping {
253enum { 255enum {
254 IFLA_MACVLAN_UNSPEC, 256 IFLA_MACVLAN_UNSPEC,
255 IFLA_MACVLAN_MODE, 257 IFLA_MACVLAN_MODE,
258 IFLA_MACVLAN_FLAGS,
256 __IFLA_MACVLAN_MAX, 259 __IFLA_MACVLAN_MAX,
257}; 260};
258 261
@@ -265,6 +268,8 @@ enum macvlan_mode {
265 MACVLAN_MODE_PASSTHRU = 8,/* take over the underlying device */ 268 MACVLAN_MODE_PASSTHRU = 8,/* take over the underlying device */
266}; 269};
267 270
271#define MACVLAN_FLAG_NOPROMISC 1
272
268/* SR-IOV virtual function management section */ 273/* SR-IOV virtual function management section */
269 274
270enum { 275enum {
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index d103dca5c563..f65e8d250f7e 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -60,6 +60,7 @@ struct macvlan_dev {
60 struct net_device *lowerdev; 60 struct net_device *lowerdev;
61 struct macvlan_pcpu_stats __percpu *pcpu_stats; 61 struct macvlan_pcpu_stats __percpu *pcpu_stats;
62 enum macvlan_mode mode; 62 enum macvlan_mode mode;
63 u16 flags;
63 int (*receive)(struct sk_buff *skb); 64 int (*receive)(struct sk_buff *skb);
64 int (*forward)(struct net_device *dev, struct sk_buff *skb); 65 int (*forward)(struct net_device *dev, struct sk_buff *skb);
65 struct macvtap_queue *taps[MAX_MACVTAP_QUEUES]; 66 struct macvtap_queue *taps[MAX_MACVTAP_QUEUES];
diff --git a/include/linux/if_pppol2tp.h b/include/linux/if_pppol2tp.h
index 23cefa1111bf..b4775418d525 100644
--- a/include/linux/if_pppol2tp.h
+++ b/include/linux/if_pppol2tp.h
@@ -19,10 +19,11 @@
19 19
20#ifdef __KERNEL__ 20#ifdef __KERNEL__
21#include <linux/in.h> 21#include <linux/in.h>
22#include <linux/in6.h>
22#endif 23#endif
23 24
24/* Structure used to connect() the socket to a particular tunnel UDP 25/* Structure used to connect() the socket to a particular tunnel UDP
25 * socket. 26 * socket over IPv4.
26 */ 27 */
27struct pppol2tp_addr { 28struct pppol2tp_addr {
28 __kernel_pid_t pid; /* pid that owns the fd. 29 __kernel_pid_t pid; /* pid that owns the fd.
@@ -35,6 +36,20 @@ struct pppol2tp_addr {
35 __u16 d_tunnel, d_session; /* For sending outgoing packets */ 36 __u16 d_tunnel, d_session; /* For sending outgoing packets */
36}; 37};
37 38
39/* Structure used to connect() the socket to a particular tunnel UDP
40 * socket over IPv6.
41 */
42struct pppol2tpin6_addr {
43 __kernel_pid_t pid; /* pid that owns the fd.
44 * 0 => current */
45 int fd; /* FD of UDP socket to use */
46
47 __u16 s_tunnel, s_session; /* For matching incoming packets */
48 __u16 d_tunnel, d_session; /* For sending outgoing packets */
49
50 struct sockaddr_in6 addr; /* IP address and port to send to */
51};
52
38/* The L2TPv3 protocol changes tunnel and session ids from 16 to 32 53/* The L2TPv3 protocol changes tunnel and session ids from 16 to 32
39 * bits. So we need a different sockaddr structure. 54 * bits. So we need a different sockaddr structure.
40 */ 55 */
@@ -49,6 +64,17 @@ struct pppol2tpv3_addr {
49 __u32 d_tunnel, d_session; /* For sending outgoing packets */ 64 __u32 d_tunnel, d_session; /* For sending outgoing packets */
50}; 65};
51 66
67struct pppol2tpv3in6_addr {
68 __kernel_pid_t pid; /* pid that owns the fd.
69 * 0 => current */
70 int fd; /* FD of UDP or IP socket to use */
71
72 __u32 s_tunnel, s_session; /* For matching incoming packets */
73 __u32 d_tunnel, d_session; /* For sending outgoing packets */
74
75 struct sockaddr_in6 addr; /* IP address and port to send to */
76};
77
52/* Socket options: 78/* Socket options:
53 * DEBUG - bitmask of debug message categories 79 * DEBUG - bitmask of debug message categories
54 * SENDSEQ - 0 => don't send packets with sequence numbers 80 * SENDSEQ - 0 => don't send packets with sequence numbers
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index b5f927f59f26..09c474c480cd 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -70,7 +70,7 @@ struct sockaddr_pppox {
70 struct pppoe_addr pppoe; 70 struct pppoe_addr pppoe;
71 struct pptp_addr pptp; 71 struct pptp_addr pptp;
72 } sa_addr; 72 } sa_addr;
73} __attribute__((packed)); 73} __packed;
74 74
75/* The use of the above union isn't viable because the size of this 75/* The use of the above union isn't viable because the size of this
76 * struct must stay fixed over time -- applications use sizeof(struct 76 * struct must stay fixed over time -- applications use sizeof(struct
@@ -81,7 +81,13 @@ struct sockaddr_pppol2tp {
81 __kernel_sa_family_t sa_family; /* address family, AF_PPPOX */ 81 __kernel_sa_family_t sa_family; /* address family, AF_PPPOX */
82 unsigned int sa_protocol; /* protocol identifier */ 82 unsigned int sa_protocol; /* protocol identifier */
83 struct pppol2tp_addr pppol2tp; 83 struct pppol2tp_addr pppol2tp;
84} __attribute__((packed)); 84} __packed;
85
86struct sockaddr_pppol2tpin6 {
87 __kernel_sa_family_t sa_family; /* address family, AF_PPPOX */
88 unsigned int sa_protocol; /* protocol identifier */
89 struct pppol2tpin6_addr pppol2tp;
90} __packed;
85 91
86/* The L2TPv3 protocol changes tunnel and session ids from 16 to 32 92/* The L2TPv3 protocol changes tunnel and session ids from 16 to 32
87 * bits. So we need a different sockaddr structure. 93 * bits. So we need a different sockaddr structure.
@@ -90,7 +96,13 @@ struct sockaddr_pppol2tpv3 {
90 __kernel_sa_family_t sa_family; /* address family, AF_PPPOX */ 96 __kernel_sa_family_t sa_family; /* address family, AF_PPPOX */
91 unsigned int sa_protocol; /* protocol identifier */ 97 unsigned int sa_protocol; /* protocol identifier */
92 struct pppol2tpv3_addr pppol2tp; 98 struct pppol2tpv3_addr pppol2tp;
93} __attribute__((packed)); 99} __packed;
100
101struct sockaddr_pppol2tpv3in6 {
102 __kernel_sa_family_t sa_family; /* address family, AF_PPPOX */
103 unsigned int sa_protocol; /* protocol identifier */
104 struct pppol2tpv3in6_addr pppol2tp;
105} __packed;
94 106
95/********************************************************************* 107/*********************************************************************
96 * 108 *
@@ -140,7 +152,7 @@ struct pppoe_hdr {
140 __be16 sid; 152 __be16 sid;
141 __be16 length; 153 __be16 length;
142 struct pppoe_tag tag[0]; 154 struct pppoe_tag tag[0];
143} __attribute__((packed)); 155} __packed;
144 156
145/* Length of entire PPPoE + PPP header */ 157/* Length of entire PPPoE + PPP header */
146#define PPPOE_SES_HLEN 8 158#define PPPOE_SES_HLEN 8
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 58404b0c5010..8185f57a9c7f 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -28,10 +28,28 @@ struct team;
28 28
29struct team_port { 29struct team_port {
30 struct net_device *dev; 30 struct net_device *dev;
31 struct hlist_node hlist; /* node in hash list */ 31 struct hlist_node hlist; /* node in enabled ports hash list */
32 struct list_head list; /* node in ordinary list */ 32 struct list_head list; /* node in ordinary list */
33 struct team *team; 33 struct team *team;
34 int index; 34 int index; /* index of enabled port. If disabled, it's set to -1 */
35
36 bool linkup; /* either state.linkup or user.linkup */
37
38 struct {
39 bool linkup;
40 u32 speed;
41 u8 duplex;
42 } state;
43
44 /* Values set by userspace */
45 struct {
46 bool linkup;
47 bool linkup_enabled;
48 } user;
49
50 /* Custom gennetlink interface related flags */
51 bool changed;
52 bool removed;
35 53
36 /* 54 /*
37 * A place for storing original values of the device before it 55 * A place for storing original values of the device before it
@@ -42,14 +60,6 @@ struct team_port {
42 unsigned int mtu; 60 unsigned int mtu;
43 } orig; 61 } orig;
44 62
45 bool linkup;
46 u32 speed;
47 u8 duplex;
48
49 /* Custom gennetlink interface related flags */
50 bool changed;
51 bool removed;
52
53 struct rcu_head rcu; 63 struct rcu_head rcu;
54}; 64};
55 65
@@ -68,18 +78,30 @@ struct team_mode_ops {
68enum team_option_type { 78enum team_option_type {
69 TEAM_OPTION_TYPE_U32, 79 TEAM_OPTION_TYPE_U32,
70 TEAM_OPTION_TYPE_STRING, 80 TEAM_OPTION_TYPE_STRING,
81 TEAM_OPTION_TYPE_BINARY,
82 TEAM_OPTION_TYPE_BOOL,
83};
84
85struct team_gsetter_ctx {
86 union {
87 u32 u32_val;
88 const char *str_val;
89 struct {
90 const void *ptr;
91 u32 len;
92 } bin_val;
93 bool bool_val;
94 } data;
95 struct team_port *port;
71}; 96};
72 97
73struct team_option { 98struct team_option {
74 struct list_head list; 99 struct list_head list;
75 const char *name; 100 const char *name;
101 bool per_port;
76 enum team_option_type type; 102 enum team_option_type type;
77 int (*getter)(struct team *team, void *arg); 103 int (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
78 int (*setter)(struct team *team, void *arg); 104 int (*setter)(struct team *team, struct team_gsetter_ctx *ctx);
79
80 /* Custom gennetlink interface related flags */
81 bool changed;
82 bool removed;
83}; 105};
84 106
85struct team_mode { 107struct team_mode {
@@ -103,13 +125,15 @@ struct team {
103 struct mutex lock; /* used for overall locking, e.g. port lists write */ 125 struct mutex lock; /* used for overall locking, e.g. port lists write */
104 126
105 /* 127 /*
106 * port lists with port count 128 * List of enabled ports and their count
107 */ 129 */
108 int port_count; 130 int en_port_count;
109 struct hlist_head port_hlist[TEAM_PORT_HASHENTRIES]; 131 struct hlist_head en_port_hlist[TEAM_PORT_HASHENTRIES];
110 struct list_head port_list; 132
133 struct list_head port_list; /* list of all ports */
111 134
112 struct list_head option_list; 135 struct list_head option_list;
136 struct list_head option_inst_list; /* list of option instances */
113 137
114 const struct team_mode *mode; 138 const struct team_mode *mode;
115 struct team_mode_ops ops; 139 struct team_mode_ops ops;
@@ -119,7 +143,7 @@ struct team {
119static inline struct hlist_head *team_port_index_hash(struct team *team, 143static inline struct hlist_head *team_port_index_hash(struct team *team,
120 int port_index) 144 int port_index)
121{ 145{
122 return &team->port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)]; 146 return &team->en_port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)];
123} 147}
124 148
125static inline struct team_port *team_get_port_by_index(struct team *team, 149static inline struct team_port *team_get_port_by_index(struct team *team,
@@ -216,6 +240,7 @@ enum {
216 TEAM_ATTR_OPTION_TYPE, /* u8 */ 240 TEAM_ATTR_OPTION_TYPE, /* u8 */
217 TEAM_ATTR_OPTION_DATA, /* dynamic */ 241 TEAM_ATTR_OPTION_DATA, /* dynamic */
218 TEAM_ATTR_OPTION_REMOVED, /* flag */ 242 TEAM_ATTR_OPTION_REMOVED, /* flag */
243 TEAM_ATTR_OPTION_PORT_IFINDEX, /* u32 */ /* for per-port options */
219 244
220 __TEAM_ATTR_OPTION_MAX, 245 __TEAM_ATTR_OPTION_MAX,
221 TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1, 246 TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1,
diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h
index 4deb3834d62c..be0ef3df4acb 100644
--- a/include/linux/ip_vs.h
+++ b/include/linux/ip_vs.h
@@ -125,8 +125,8 @@ struct ip_vs_service_user {
125 125
126 /* virtual service options */ 126 /* virtual service options */
127 char sched_name[IP_VS_SCHEDNAME_MAXLEN]; 127 char sched_name[IP_VS_SCHEDNAME_MAXLEN];
128 unsigned flags; /* virtual service flags */ 128 unsigned int flags; /* virtual service flags */
129 unsigned timeout; /* persistent timeout in sec */ 129 unsigned int timeout; /* persistent timeout in sec */
130 __be32 netmask; /* persistent netmask */ 130 __be32 netmask; /* persistent netmask */
131}; 131};
132 132
@@ -137,7 +137,7 @@ struct ip_vs_dest_user {
137 __be16 port; 137 __be16 port;
138 138
139 /* real server options */ 139 /* real server options */
140 unsigned conn_flags; /* connection flags */ 140 unsigned int conn_flags; /* connection flags */
141 int weight; /* destination weight */ 141 int weight; /* destination weight */
142 142
143 /* thresholds for active connections */ 143 /* thresholds for active connections */
@@ -187,8 +187,8 @@ struct ip_vs_service_entry {
187 187
188 /* service options */ 188 /* service options */
189 char sched_name[IP_VS_SCHEDNAME_MAXLEN]; 189 char sched_name[IP_VS_SCHEDNAME_MAXLEN];
190 unsigned flags; /* virtual service flags */ 190 unsigned int flags; /* virtual service flags */
191 unsigned timeout; /* persistent timeout */ 191 unsigned int timeout; /* persistent timeout */
192 __be32 netmask; /* persistent netmask */ 192 __be32 netmask; /* persistent netmask */
193 193
194 /* number of real servers */ 194 /* number of real servers */
@@ -202,7 +202,7 @@ struct ip_vs_service_entry {
202struct ip_vs_dest_entry { 202struct ip_vs_dest_entry {
203 __be32 addr; /* destination address */ 203 __be32 addr; /* destination address */
204 __be16 port; 204 __be16 port;
205 unsigned conn_flags; /* connection flags */ 205 unsigned int conn_flags; /* connection flags */
206 int weight; /* destination weight */ 206 int weight; /* destination weight */
207 207
208 __u32 u_threshold; /* upper threshold */ 208 __u32 u_threshold; /* upper threshold */
diff --git a/include/linux/l2tp.h b/include/linux/l2tp.h
index e77d7f9bb246..7eab668f60f3 100644
--- a/include/linux/l2tp.h
+++ b/include/linux/l2tp.h
@@ -11,6 +11,7 @@
11#include <linux/socket.h> 11#include <linux/socket.h>
12#ifdef __KERNEL__ 12#ifdef __KERNEL__
13#include <linux/in.h> 13#include <linux/in.h>
14#include <linux/in6.h>
14#else 15#else
15#include <netinet/in.h> 16#include <netinet/in.h>
16#endif 17#endif
@@ -39,6 +40,22 @@ struct sockaddr_l2tpip {
39 sizeof(__u32)]; 40 sizeof(__u32)];
40}; 41};
41 42
43/**
44 * struct sockaddr_l2tpip6 - the sockaddr structure for L2TP-over-IPv6 sockets
45 * @l2tp_family: address family number AF_L2TPIP.
46 * @l2tp_addr: protocol specific address information
47 * @l2tp_conn_id: connection id of tunnel
48 */
49struct sockaddr_l2tpip6 {
50 /* The first fields must match struct sockaddr_in6 */
51 __kernel_sa_family_t l2tp_family; /* AF_INET6 */
52 __be16 l2tp_unused; /* INET port number (unused) */
53 __be32 l2tp_flowinfo; /* IPv6 flow information */
54 struct in6_addr l2tp_addr; /* IPv6 address */
55 __u32 l2tp_scope_id; /* scope id (new in RFC2553) */
56 __u32 l2tp_conn_id; /* Connection ID of tunnel */
57};
58
42/***************************************************************************** 59/*****************************************************************************
43 * NETLINK_GENERIC netlink family. 60 * NETLINK_GENERIC netlink family.
44 *****************************************************************************/ 61 *****************************************************************************/
@@ -108,6 +125,8 @@ enum {
108 L2TP_ATTR_MTU, /* u16 */ 125 L2TP_ATTR_MTU, /* u16 */
109 L2TP_ATTR_MRU, /* u16 */ 126 L2TP_ATTR_MRU, /* u16 */
110 L2TP_ATTR_STATS, /* nested */ 127 L2TP_ATTR_STATS, /* nested */
128 L2TP_ATTR_IP6_SADDR, /* struct in6_addr */
129 L2TP_ATTR_IP6_DADDR, /* struct in6_addr */
111 __L2TP_ATTR_MAX, 130 __L2TP_ATTR_MAX,
112}; 131};
113 132
diff --git a/include/linux/mISDNhw.h b/include/linux/mISDNhw.h
index 4af841408fb5..de165b54237b 100644
--- a/include/linux/mISDNhw.h
+++ b/include/linux/mISDNhw.h
@@ -135,6 +135,9 @@ extern int create_l1(struct dchannel *, dchannel_l1callback *);
135#define HW_TESTRX_RAW 0x9602 135#define HW_TESTRX_RAW 0x9602
136#define HW_TESTRX_HDLC 0x9702 136#define HW_TESTRX_HDLC 0x9702
137#define HW_TESTRX_OFF 0x9802 137#define HW_TESTRX_OFF 0x9802
138#define HW_TIMER3_IND 0x9902
139#define HW_TIMER3_VALUE 0x9a00
140#define HW_TIMER3_VMASK 0x00FF
138 141
139struct layer1; 142struct layer1;
140extern int l1_event(struct layer1 *, u_int); 143extern int l1_event(struct layer1 *, u_int);
diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h
index b5e7f2202484..ce6e613dff4c 100644
--- a/include/linux/mISDNif.h
+++ b/include/linux/mISDNif.h
@@ -37,7 +37,7 @@
37 */ 37 */
38#define MISDN_MAJOR_VERSION 1 38#define MISDN_MAJOR_VERSION 1
39#define MISDN_MINOR_VERSION 1 39#define MISDN_MINOR_VERSION 1
40#define MISDN_RELEASE 21 40#define MISDN_RELEASE 28
41 41
42/* primitives for information exchange 42/* primitives for information exchange
43 * generell format 43 * generell format
@@ -115,6 +115,11 @@
115#define MDL_ERROR_IND 0x1F04 115#define MDL_ERROR_IND 0x1F04
116#define MDL_ERROR_RSP 0x5F04 116#define MDL_ERROR_RSP 0x5F04
117 117
118/* intern layer 2 */
119#define DL_TIMER200_IND 0x7004
120#define DL_TIMER203_IND 0x7304
121#define DL_INTERN_MSG 0x7804
122
118/* DL_INFORMATION_IND types */ 123/* DL_INFORMATION_IND types */
119#define DL_INFO_L2_CONNECT 0x0001 124#define DL_INFO_L2_CONNECT 0x0001
120#define DL_INFO_L2_REMOVED 0x0002 125#define DL_INFO_L2_REMOVED 0x0002
@@ -367,6 +372,7 @@ clear_channelmap(u_int nr, u_char *map)
367#define MISDN_CTRL_RX_OFF 0x0100 372#define MISDN_CTRL_RX_OFF 0x0100
368#define MISDN_CTRL_FILL_EMPTY 0x0200 373#define MISDN_CTRL_FILL_EMPTY 0x0200
369#define MISDN_CTRL_GETPEER 0x0400 374#define MISDN_CTRL_GETPEER 0x0400
375#define MISDN_CTRL_L1_TIMER3 0x0800
370#define MISDN_CTRL_HW_FEATURES_OP 0x2000 376#define MISDN_CTRL_HW_FEATURES_OP 0x2000
371#define MISDN_CTRL_HW_FEATURES 0x2001 377#define MISDN_CTRL_HW_FEATURES 0x2001
372#define MISDN_CTRL_HFC_OP 0x4000 378#define MISDN_CTRL_HFC_OP 0x4000
@@ -585,6 +591,7 @@ static inline struct mISDNdevice *dev_to_mISDN(struct device *dev)
585extern void set_channel_address(struct mISDNchannel *, u_int, u_int); 591extern void set_channel_address(struct mISDNchannel *, u_int, u_int);
586extern void mISDN_clock_update(struct mISDNclock *, int, struct timeval *); 592extern void mISDN_clock_update(struct mISDNclock *, int, struct timeval *);
587extern unsigned short mISDN_clock_get(void); 593extern unsigned short mISDN_clock_get(void);
594extern const char *mISDNDevName4ch(struct mISDNchannel *);
588 595
589#endif /* __KERNEL__ */ 596#endif /* __KERNEL__ */
590#endif /* mISDNIF_H */ 597#endif /* mISDNIF_H */
diff --git a/include/linux/mdio-mux.h b/include/linux/mdio-mux.h
new file mode 100644
index 000000000000..a243dbba8659
--- /dev/null
+++ b/include/linux/mdio-mux.h
@@ -0,0 +1,21 @@
1/*
2 * MDIO bus multiplexer framwork.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2011, 2012 Cavium, Inc.
9 */
10#ifndef __LINUX_MDIO_MUX_H
11#define __LINUX_MDIO_MUX_H
12#include <linux/device.h>
13
14int mdio_mux_init(struct device *dev,
15 int (*switch_fn) (int cur, int desired, void *data),
16 void **mux_handle,
17 void *data);
18
19void mdio_mux_uninit(void *mux_handle);
20
21#endif /* __LINUX_MDIO_MUX_H */
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 9958ff2cad3c..1f3860a8a109 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -150,6 +150,10 @@ enum {
150 /* statistics commands */ 150 /* statistics commands */
151 MLX4_CMD_QUERY_IF_STAT = 0X54, 151 MLX4_CMD_QUERY_IF_STAT = 0X54,
152 MLX4_CMD_SET_IF_STAT = 0X55, 152 MLX4_CMD_SET_IF_STAT = 0X55,
153
154 /* set port opcode modifiers */
155 MLX4_SET_PORT_PRIO2TC = 0x8,
156 MLX4_SET_PORT_SCHEDULER = 0x9,
153}; 157};
154 158
155enum { 159enum {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 834c96c5d879..6d028247f79d 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -628,6 +628,9 @@ int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
628 u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx); 628 u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
629int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, 629int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
630 u8 promisc); 630 u8 promisc);
631int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc);
632int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
633 u8 *pg, u16 *ratelimit);
631int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); 634int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
632int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); 635int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
633void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index); 636void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 091f9e7dc8b9..96005d75893c 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -139,7 +139,8 @@ struct mlx4_qp_path {
139 u8 rgid[16]; 139 u8 rgid[16];
140 u8 sched_queue; 140 u8 sched_queue;
141 u8 vlan_index; 141 u8 vlan_index;
142 u8 reserved3[2]; 142 u8 feup;
143 u8 reserved3;
143 u8 reserved4[2]; 144 u8 reserved4[2];
144 u8 dmac[6]; 145 u8 dmac[6];
145}; 146};
diff --git a/include/linux/neighbour.h b/include/linux/neighbour.h
index b188f68a08c9..275e5d65dcb2 100644
--- a/include/linux/neighbour.h
+++ b/include/linux/neighbour.h
@@ -33,6 +33,9 @@ enum {
33#define NTF_PROXY 0x08 /* == ATF_PUBL */ 33#define NTF_PROXY 0x08 /* == ATF_PUBL */
34#define NTF_ROUTER 0x80 34#define NTF_ROUTER 0x80
35 35
36#define NTF_SELF 0x02
37#define NTF_MASTER 0x04
38
36/* 39/*
37 * Neighbor Cache Entry States. 40 * Neighbor Cache Entry States.
38 */ 41 */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 5cbaa20f1659..7f377fb8b527 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -54,6 +54,7 @@
54#include <net/netprio_cgroup.h> 54#include <net/netprio_cgroup.h>
55 55
56#include <linux/netdev_features.h> 56#include <linux/netdev_features.h>
57#include <linux/neighbour.h>
57 58
58struct netpoll_info; 59struct netpoll_info;
59struct device; 60struct device;
@@ -288,7 +289,7 @@ struct hh_cache {
288struct header_ops { 289struct header_ops {
289 int (*create) (struct sk_buff *skb, struct net_device *dev, 290 int (*create) (struct sk_buff *skb, struct net_device *dev,
290 unsigned short type, const void *daddr, 291 unsigned short type, const void *daddr,
291 const void *saddr, unsigned len); 292 const void *saddr, unsigned int len);
292 int (*parse)(const struct sk_buff *skb, unsigned char *haddr); 293 int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
293 int (*rebuild)(struct sk_buff *skb); 294 int (*rebuild)(struct sk_buff *skb);
294 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type); 295 int (*cache)(const struct neighbour *neigh, struct hh_cache *hh, __be16 type);
@@ -905,6 +906,16 @@ struct netdev_fcoe_hbainfo {
905 * feature set might be less than what was returned by ndo_fix_features()). 906 * feature set might be less than what was returned by ndo_fix_features()).
906 * Must return >0 or -errno if it changed dev->features itself. 907 * Must return >0 or -errno if it changed dev->features itself.
907 * 908 *
909 * int (*ndo_fdb_add)(struct ndmsg *ndm, struct net_device *dev,
910 * unsigned char *addr, u16 flags)
911 * Adds an FDB entry to dev for addr.
912 * int (*ndo_fdb_del)(struct ndmsg *ndm, struct net_device *dev,
913 * unsigned char *addr)
914 * Deletes the FDB entry from dev coresponding to addr.
915 * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
916 * struct net_device *dev, int idx)
917 * Used to add FDB entries to dump requests. Implementers should add
918 * entries to skb and update idx with the number of entries.
908 */ 919 */
909struct net_device_ops { 920struct net_device_ops {
910 int (*ndo_init)(struct net_device *dev); 921 int (*ndo_init)(struct net_device *dev);
@@ -1002,6 +1013,18 @@ struct net_device_ops {
1002 netdev_features_t features); 1013 netdev_features_t features);
1003 int (*ndo_neigh_construct)(struct neighbour *n); 1014 int (*ndo_neigh_construct)(struct neighbour *n);
1004 void (*ndo_neigh_destroy)(struct neighbour *n); 1015 void (*ndo_neigh_destroy)(struct neighbour *n);
1016
1017 int (*ndo_fdb_add)(struct ndmsg *ndm,
1018 struct net_device *dev,
1019 unsigned char *addr,
1020 u16 flags);
1021 int (*ndo_fdb_del)(struct ndmsg *ndm,
1022 struct net_device *dev,
1023 unsigned char *addr);
1024 int (*ndo_fdb_dump)(struct sk_buff *skb,
1025 struct netlink_callback *cb,
1026 struct net_device *dev,
1027 int idx);
1005}; 1028};
1006 1029
1007/* 1030/*
@@ -1486,6 +1509,8 @@ struct napi_gro_cb {
1486 1509
1487 /* Free the skb? */ 1510 /* Free the skb? */
1488 int free; 1511 int free;
1512#define NAPI_GRO_FREE 1
1513#define NAPI_GRO_FREE_STOLEN_HEAD 2
1489}; 1514};
1490 1515
1491#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb) 1516#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
@@ -1689,7 +1714,7 @@ static inline void *skb_gro_network_header(struct sk_buff *skb)
1689static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, 1714static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
1690 unsigned short type, 1715 unsigned short type,
1691 const void *daddr, const void *saddr, 1716 const void *daddr, const void *saddr,
1692 unsigned len) 1717 unsigned int len)
1693{ 1718{
1694 if (!dev->header_ops || !dev->header_ops->create) 1719 if (!dev->header_ops || !dev->header_ops->create)
1695 return 0; 1720 return 0;
@@ -1740,7 +1765,7 @@ struct softnet_data {
1740 unsigned int input_queue_head; 1765 unsigned int input_queue_head;
1741 unsigned int input_queue_tail; 1766 unsigned int input_queue_tail;
1742#endif 1767#endif
1743 unsigned dropped; 1768 unsigned int dropped;
1744 struct sk_buff_head input_pkt_queue; 1769 struct sk_buff_head input_pkt_queue;
1745 struct napi_struct backlog; 1770 struct napi_struct backlog;
1746}; 1771};
@@ -1925,7 +1950,7 @@ static inline void netdev_sent_queue(struct net_device *dev, unsigned int bytes)
1925} 1950}
1926 1951
1927static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, 1952static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
1928 unsigned pkts, unsigned bytes) 1953 unsigned int pkts, unsigned int bytes)
1929{ 1954{
1930#ifdef CONFIG_BQL 1955#ifdef CONFIG_BQL
1931 if (unlikely(!bytes)) 1956 if (unlikely(!bytes))
@@ -1949,7 +1974,7 @@ static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
1949} 1974}
1950 1975
1951static inline void netdev_completed_queue(struct net_device *dev, 1976static inline void netdev_completed_queue(struct net_device *dev,
1952 unsigned pkts, unsigned bytes) 1977 unsigned int pkts, unsigned int bytes)
1953{ 1978{
1954 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes); 1979 netdev_tx_completed_queue(netdev_get_tx_queue(dev, 0), pkts, bytes);
1955} 1980}
@@ -2144,9 +2169,9 @@ extern void netdev_rx_handler_unregister(struct net_device *dev);
2144extern bool dev_valid_name(const char *name); 2169extern bool dev_valid_name(const char *name);
2145extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); 2170extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
2146extern int dev_ethtool(struct net *net, struct ifreq *); 2171extern int dev_ethtool(struct net *net, struct ifreq *);
2147extern unsigned dev_get_flags(const struct net_device *); 2172extern unsigned int dev_get_flags(const struct net_device *);
2148extern int __dev_change_flags(struct net_device *, unsigned int flags); 2173extern int __dev_change_flags(struct net_device *, unsigned int flags);
2149extern int dev_change_flags(struct net_device *, unsigned); 2174extern int dev_change_flags(struct net_device *, unsigned int);
2150extern void __dev_notify_flags(struct net_device *, unsigned int old_flags); 2175extern void __dev_notify_flags(struct net_device *, unsigned int old_flags);
2151extern int dev_change_name(struct net_device *, const char *); 2176extern int dev_change_name(struct net_device *, const char *);
2152extern int dev_set_alias(struct net_device *, const char *, size_t); 2177extern int dev_set_alias(struct net_device *, const char *, size_t);
@@ -2546,6 +2571,7 @@ extern int dev_addr_init(struct net_device *dev);
2546 2571
2547/* Functions used for unicast addresses handling */ 2572/* Functions used for unicast addresses handling */
2548extern int dev_uc_add(struct net_device *dev, unsigned char *addr); 2573extern int dev_uc_add(struct net_device *dev, unsigned char *addr);
2574extern int dev_uc_add_excl(struct net_device *dev, unsigned char *addr);
2549extern int dev_uc_del(struct net_device *dev, unsigned char *addr); 2575extern int dev_uc_del(struct net_device *dev, unsigned char *addr);
2550extern int dev_uc_sync(struct net_device *to, struct net_device *from); 2576extern int dev_uc_sync(struct net_device *to, struct net_device *from);
2551extern void dev_uc_unsync(struct net_device *to, struct net_device *from); 2577extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
@@ -2555,6 +2581,7 @@ extern void dev_uc_init(struct net_device *dev);
2555/* Functions used for multicast addresses handling */ 2581/* Functions used for multicast addresses handling */
2556extern int dev_mc_add(struct net_device *dev, unsigned char *addr); 2582extern int dev_mc_add(struct net_device *dev, unsigned char *addr);
2557extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr); 2583extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr);
2584extern int dev_mc_add_excl(struct net_device *dev, unsigned char *addr);
2558extern int dev_mc_del(struct net_device *dev, unsigned char *addr); 2585extern int dev_mc_del(struct net_device *dev, unsigned char *addr);
2559extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr); 2586extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr);
2560extern int dev_mc_sync(struct net_device *to, struct net_device *from); 2587extern int dev_mc_sync(struct net_device *to, struct net_device *from);
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 29734be334c1..ff9c84c29b28 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -154,12 +154,6 @@ void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
154int nf_register_sockopt(struct nf_sockopt_ops *reg); 154int nf_register_sockopt(struct nf_sockopt_ops *reg);
155void nf_unregister_sockopt(struct nf_sockopt_ops *reg); 155void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
156 156
157#ifdef CONFIG_SYSCTL
158/* Sysctl registration */
159extern struct ctl_path nf_net_netfilter_sysctl_path[];
160extern struct ctl_path nf_net_ipv4_netfilter_sysctl_path[];
161#endif /* CONFIG_SYSCTL */
162
163extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; 157extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
164 158
165#if defined(CONFIG_JUMP_LABEL) 159#if defined(CONFIG_JUMP_LABEL)
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 2f8e18a23227..2edc64cab739 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -411,26 +411,32 @@ ip_set_get_h16(const struct nlattr *attr)
411#define ipset_nest_start(skb, attr) nla_nest_start(skb, attr | NLA_F_NESTED) 411#define ipset_nest_start(skb, attr) nla_nest_start(skb, attr | NLA_F_NESTED)
412#define ipset_nest_end(skb, start) nla_nest_end(skb, start) 412#define ipset_nest_end(skb, start) nla_nest_end(skb, start)
413 413
414#define NLA_PUT_IPADDR4(skb, type, ipaddr) \ 414static inline int nla_put_ipaddr4(struct sk_buff *skb, int type, __be32 ipaddr)
415do { \ 415{
416 struct nlattr *__nested = ipset_nest_start(skb, type); \ 416 struct nlattr *__nested = ipset_nest_start(skb, type);
417 \ 417 int ret;
418 if (!__nested) \ 418
419 goto nla_put_failure; \ 419 if (!__nested)
420 NLA_PUT_NET32(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr); \ 420 return -EMSGSIZE;
421 ipset_nest_end(skb, __nested); \ 421 ret = nla_put_net32(skb, IPSET_ATTR_IPADDR_IPV4, ipaddr);
422} while (0) 422 if (!ret)
423 423 ipset_nest_end(skb, __nested);
424#define NLA_PUT_IPADDR6(skb, type, ipaddrptr) \ 424 return ret;
425do { \ 425}
426 struct nlattr *__nested = ipset_nest_start(skb, type); \ 426
427 \ 427static inline int nla_put_ipaddr6(struct sk_buff *skb, int type, const struct in6_addr *ipaddrptr)
428 if (!__nested) \ 428{
429 goto nla_put_failure; \ 429 struct nlattr *__nested = ipset_nest_start(skb, type);
430 NLA_PUT(skb, IPSET_ATTR_IPADDR_IPV6, \ 430 int ret;
431 sizeof(struct in6_addr), ipaddrptr); \ 431
432 ipset_nest_end(skb, __nested); \ 432 if (!__nested)
433} while (0) 433 return -EMSGSIZE;
434 ret = nla_put(skb, IPSET_ATTR_IPADDR_IPV6,
435 sizeof(struct in6_addr), ipaddrptr);
436 if (!ret)
437 ipset_nest_end(skb, __nested);
438 return ret;
439}
434 440
435/* Get address from skbuff */ 441/* Get address from skbuff */
436static inline __be32 442static inline __be32
@@ -472,8 +478,8 @@ union ip_set_name_index {
472 478
473#define IP_SET_OP_GET_BYNAME 0x00000006 /* Get set index by name */ 479#define IP_SET_OP_GET_BYNAME 0x00000006 /* Get set index by name */
474struct ip_set_req_get_set { 480struct ip_set_req_get_set {
475 unsigned op; 481 unsigned int op;
476 unsigned version; 482 unsigned int version;
477 union ip_set_name_index set; 483 union ip_set_name_index set;
478}; 484};
479 485
@@ -482,8 +488,8 @@ struct ip_set_req_get_set {
482 488
483#define IP_SET_OP_VERSION 0x00000100 /* Ask kernel version */ 489#define IP_SET_OP_VERSION 0x00000100 /* Ask kernel version */
484struct ip_set_req_version { 490struct ip_set_req_version {
485 unsigned op; 491 unsigned int op;
486 unsigned version; 492 unsigned int version;
487}; 493};
488 494
489#endif /*_IP_SET_H */ 495#endif /*_IP_SET_H */
diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h
index 05a5d72680be..289b62d9dd1f 100644
--- a/include/linux/netfilter/ipset/ip_set_ahash.h
+++ b/include/linux/netfilter/ipset/ip_set_ahash.h
@@ -594,17 +594,20 @@ type_pf_head(struct ip_set *set, struct sk_buff *skb)
594 nested = ipset_nest_start(skb, IPSET_ATTR_DATA); 594 nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
595 if (!nested) 595 if (!nested)
596 goto nla_put_failure; 596 goto nla_put_failure;
597 NLA_PUT_NET32(skb, IPSET_ATTR_HASHSIZE, 597 if (nla_put_net32(skb, IPSET_ATTR_HASHSIZE,
598 htonl(jhash_size(h->table->htable_bits))); 598 htonl(jhash_size(h->table->htable_bits))) ||
599 NLA_PUT_NET32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem)); 599 nla_put_net32(skb, IPSET_ATTR_MAXELEM, htonl(h->maxelem)))
600 goto nla_put_failure;
600#ifdef IP_SET_HASH_WITH_NETMASK 601#ifdef IP_SET_HASH_WITH_NETMASK
601 if (h->netmask != HOST_MASK) 602 if (h->netmask != HOST_MASK &&
602 NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, h->netmask); 603 nla_put_u8(skb, IPSET_ATTR_NETMASK, h->netmask))
604 goto nla_put_failure;
603#endif 605#endif
604 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); 606 if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
605 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)); 607 nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
606 if (with_timeout(h->timeout)) 608 (with_timeout(h->timeout) &&
607 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout)); 609 nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(h->timeout))))
610 goto nla_put_failure;
608 ipset_nest_end(skb, nested); 611 ipset_nest_end(skb, nested);
609 612
610 return 0; 613 return 0;
diff --git a/include/linux/netfilter/nf_conntrack_h323_types.h b/include/linux/netfilter/nf_conntrack_h323_types.h
index f35b6b4801e7..b0821f45fbe4 100644
--- a/include/linux/netfilter/nf_conntrack_h323_types.h
+++ b/include/linux/netfilter/nf_conntrack_h323_types.h
@@ -7,12 +7,12 @@
7 7
8typedef struct TransportAddress_ipAddress { /* SEQUENCE */ 8typedef struct TransportAddress_ipAddress { /* SEQUENCE */
9 int options; /* No use */ 9 int options; /* No use */
10 unsigned ip; 10 unsigned int ip;
11} TransportAddress_ipAddress; 11} TransportAddress_ipAddress;
12 12
13typedef struct TransportAddress_ip6Address { /* SEQUENCE */ 13typedef struct TransportAddress_ip6Address { /* SEQUENCE */
14 int options; /* No use */ 14 int options; /* No use */
15 unsigned ip; 15 unsigned int ip;
16} TransportAddress_ip6Address; 16} TransportAddress_ip6Address;
17 17
18typedef struct TransportAddress { /* CHOICE */ 18typedef struct TransportAddress { /* CHOICE */
@@ -96,12 +96,12 @@ typedef struct DataType { /* CHOICE */
96 96
97typedef struct UnicastAddress_iPAddress { /* SEQUENCE */ 97typedef struct UnicastAddress_iPAddress { /* SEQUENCE */
98 int options; /* No use */ 98 int options; /* No use */
99 unsigned network; 99 unsigned int network;
100} UnicastAddress_iPAddress; 100} UnicastAddress_iPAddress;
101 101
102typedef struct UnicastAddress_iP6Address { /* SEQUENCE */ 102typedef struct UnicastAddress_iP6Address { /* SEQUENCE */
103 int options; /* No use */ 103 int options; /* No use */
104 unsigned network; 104 unsigned int network;
105} UnicastAddress_iP6Address; 105} UnicastAddress_iP6Address;
106 106
107typedef struct UnicastAddress { /* CHOICE */ 107typedef struct UnicastAddress { /* CHOICE */
@@ -698,7 +698,7 @@ typedef struct RegistrationRequest { /* SEQUENCE */
698 } options; 698 } options;
699 RegistrationRequest_callSignalAddress callSignalAddress; 699 RegistrationRequest_callSignalAddress callSignalAddress;
700 RegistrationRequest_rasAddress rasAddress; 700 RegistrationRequest_rasAddress rasAddress;
701 unsigned timeToLive; 701 unsigned int timeToLive;
702} RegistrationRequest; 702} RegistrationRequest;
703 703
704typedef struct RegistrationConfirm_callSignalAddress { /* SEQUENCE OF */ 704typedef struct RegistrationConfirm_callSignalAddress { /* SEQUENCE OF */
@@ -730,7 +730,7 @@ typedef struct RegistrationConfirm { /* SEQUENCE */
730 eRegistrationConfirm_genericData = (1 << 12), 730 eRegistrationConfirm_genericData = (1 << 12),
731 } options; 731 } options;
732 RegistrationConfirm_callSignalAddress callSignalAddress; 732 RegistrationConfirm_callSignalAddress callSignalAddress;
733 unsigned timeToLive; 733 unsigned int timeToLive;
734} RegistrationConfirm; 734} RegistrationConfirm;
735 735
736typedef struct UnregistrationRequest_callSignalAddress { /* SEQUENCE OF */ 736typedef struct UnregistrationRequest_callSignalAddress { /* SEQUENCE OF */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 6fd1f0d07e64..a1048c1587d1 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -80,7 +80,7 @@ extern int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n);
80extern int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n); 80extern int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n);
81 81
82extern int nfnetlink_has_listeners(struct net *net, unsigned int group); 82extern int nfnetlink_has_listeners(struct net *net, unsigned int group);
83extern int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, 83extern int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group,
84 int echo, gfp_t flags); 84 int echo, gfp_t flags);
85extern int nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error); 85extern int nfnetlink_set_err(struct net *net, u32 pid, u32 group, int error);
86extern int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u_int32_t pid, int flags); 86extern int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u_int32_t pid, int flags);
diff --git a/include/linux/nfc.h b/include/linux/nfc.h
index 39c1fcf089c0..0ae9b5857c83 100644
--- a/include/linux/nfc.h
+++ b/include/linux/nfc.h
@@ -70,6 +70,7 @@ enum nfc_commands {
70 NFC_EVENT_TARGETS_FOUND, 70 NFC_EVENT_TARGETS_FOUND,
71 NFC_EVENT_DEVICE_ADDED, 71 NFC_EVENT_DEVICE_ADDED,
72 NFC_EVENT_DEVICE_REMOVED, 72 NFC_EVENT_DEVICE_REMOVED,
73 NFC_EVENT_TARGET_LOST,
73/* private: internal use only */ 74/* private: internal use only */
74 __NFC_CMD_AFTER_LAST 75 __NFC_CMD_AFTER_LAST
75}; 76};
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index e474f6e780cc..1335084b1c69 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -548,6 +548,11 @@
548 * @NL80211_CMD_SET_NOACK_MAP: sets a bitmap for the individual TIDs whether 548 * @NL80211_CMD_SET_NOACK_MAP: sets a bitmap for the individual TIDs whether
549 * No Acknowledgement Policy should be applied. 549 * No Acknowledgement Policy should be applied.
550 * 550 *
551 * @NL80211_CMD_CH_SWITCH_NOTIFY: An AP or GO may decide to switch channels
552 * independently of the userspace SME, send this event indicating
553 * %NL80211_ATTR_IFINDEX is now on %NL80211_ATTR_WIPHY_FREQ with
554 * %NL80211_ATTR_WIPHY_CHANNEL_TYPE.
555 *
551 * @NL80211_CMD_MAX: highest used command number 556 * @NL80211_CMD_MAX: highest used command number
552 * @__NL80211_CMD_AFTER_LAST: internal use 557 * @__NL80211_CMD_AFTER_LAST: internal use
553 */ 558 */
@@ -689,6 +694,8 @@ enum nl80211_commands {
689 694
690 NL80211_CMD_SET_NOACK_MAP, 695 NL80211_CMD_SET_NOACK_MAP,
691 696
697 NL80211_CMD_CH_SWITCH_NOTIFY,
698
692 /* add new commands above here */ 699 /* add new commands above here */
693 700
694 /* used to define NL80211_CMD_MAX below */ 701 /* used to define NL80211_CMD_MAX below */
@@ -1685,6 +1692,7 @@ enum nl80211_sta_bss_param {
1685 * @NL80211_STA_INFO_CONNECTED_TIME: time since the station is last connected 1692 * @NL80211_STA_INFO_CONNECTED_TIME: time since the station is last connected
1686 * @NL80211_STA_INFO_STA_FLAGS: Contains a struct nl80211_sta_flag_update. 1693 * @NL80211_STA_INFO_STA_FLAGS: Contains a struct nl80211_sta_flag_update.
1687 * @NL80211_STA_INFO_BEACON_LOSS: count of times beacon loss was detected (u32) 1694 * @NL80211_STA_INFO_BEACON_LOSS: count of times beacon loss was detected (u32)
1695 * @NL80211_STA_INFO_T_OFFSET: timing offset with respect to this STA (s64)
1688 * @__NL80211_STA_INFO_AFTER_LAST: internal 1696 * @__NL80211_STA_INFO_AFTER_LAST: internal
1689 * @NL80211_STA_INFO_MAX: highest possible station info attribute 1697 * @NL80211_STA_INFO_MAX: highest possible station info attribute
1690 */ 1698 */
@@ -1708,6 +1716,7 @@ enum nl80211_sta_info {
1708 NL80211_STA_INFO_CONNECTED_TIME, 1716 NL80211_STA_INFO_CONNECTED_TIME,
1709 NL80211_STA_INFO_STA_FLAGS, 1717 NL80211_STA_INFO_STA_FLAGS,
1710 NL80211_STA_INFO_BEACON_LOSS, 1718 NL80211_STA_INFO_BEACON_LOSS,
1719 NL80211_STA_INFO_T_OFFSET,
1711 1720
1712 /* keep last */ 1721 /* keep last */
1713 __NL80211_STA_INFO_AFTER_LAST, 1722 __NL80211_STA_INFO_AFTER_LAST,
@@ -2142,6 +2151,9 @@ enum nl80211_mntr_flags {
2142 * 2151 *
2143 * @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute 2152 * @NL80211_MESHCONF_ATTR_MAX: highest possible mesh configuration attribute
2144 * 2153 *
2154 * @NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR: maximum number of neighbors
2155 * to synchronize to for 11s default synchronization method (see 11C.12.2.2)
2156 *
2145 * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use 2157 * @__NL80211_MESHCONF_ATTR_AFTER_LAST: internal use
2146 */ 2158 */
2147enum nl80211_meshconf_params { 2159enum nl80211_meshconf_params {
@@ -2166,6 +2178,7 @@ enum nl80211_meshconf_params {
2166 NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, 2178 NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
2167 NL80211_MESHCONF_FORWARDING, 2179 NL80211_MESHCONF_FORWARDING,
2168 NL80211_MESHCONF_RSSI_THRESHOLD, 2180 NL80211_MESHCONF_RSSI_THRESHOLD,
2181 NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR,
2169 2182
2170 /* keep last */ 2183 /* keep last */
2171 __NL80211_MESHCONF_ATTR_AFTER_LAST, 2184 __NL80211_MESHCONF_ATTR_AFTER_LAST,
@@ -2205,6 +2218,11 @@ enum nl80211_meshconf_params {
2205 * complete (unsecured) mesh peering without the need of a userspace daemon. 2218 * complete (unsecured) mesh peering without the need of a userspace daemon.
2206 * 2219 *
2207 * @NL80211_MESH_SETUP_ATTR_MAX: highest possible mesh setup attribute number 2220 * @NL80211_MESH_SETUP_ATTR_MAX: highest possible mesh setup attribute number
2221 *
2222 * @NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC: Enable this option to use a
2223 * vendor specific synchronization method or disable it to use the default
2224 * neighbor offset synchronization
2225 *
2208 * @__NL80211_MESH_SETUP_ATTR_AFTER_LAST: Internal use 2226 * @__NL80211_MESH_SETUP_ATTR_AFTER_LAST: Internal use
2209 */ 2227 */
2210enum nl80211_mesh_setup_params { 2228enum nl80211_mesh_setup_params {
@@ -2214,6 +2232,7 @@ enum nl80211_mesh_setup_params {
2214 NL80211_MESH_SETUP_IE, 2232 NL80211_MESH_SETUP_IE,
2215 NL80211_MESH_SETUP_USERSPACE_AUTH, 2233 NL80211_MESH_SETUP_USERSPACE_AUTH,
2216 NL80211_MESH_SETUP_USERSPACE_AMPE, 2234 NL80211_MESH_SETUP_USERSPACE_AMPE,
2235 NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC,
2217 2236
2218 /* keep last */ 2237 /* keep last */
2219 __NL80211_MESH_SETUP_ATTR_AFTER_LAST, 2238 __NL80211_MESH_SETUP_ATTR_AFTER_LAST,
@@ -2223,7 +2242,7 @@ enum nl80211_mesh_setup_params {
2223/** 2242/**
2224 * enum nl80211_txq_attr - TX queue parameter attributes 2243 * enum nl80211_txq_attr - TX queue parameter attributes
2225 * @__NL80211_TXQ_ATTR_INVALID: Attribute number 0 is reserved 2244 * @__NL80211_TXQ_ATTR_INVALID: Attribute number 0 is reserved
2226 * @NL80211_TXQ_ATTR_QUEUE: TX queue identifier (NL80211_TXQ_Q_*) 2245 * @NL80211_TXQ_ATTR_AC: AC identifier (NL80211_AC_*)
2227 * @NL80211_TXQ_ATTR_TXOP: Maximum burst time in units of 32 usecs, 0 meaning 2246 * @NL80211_TXQ_ATTR_TXOP: Maximum burst time in units of 32 usecs, 0 meaning
2228 * disabled 2247 * disabled
2229 * @NL80211_TXQ_ATTR_CWMIN: Minimum contention window [a value of the form 2248 * @NL80211_TXQ_ATTR_CWMIN: Minimum contention window [a value of the form
@@ -2236,7 +2255,7 @@ enum nl80211_mesh_setup_params {
2236 */ 2255 */
2237enum nl80211_txq_attr { 2256enum nl80211_txq_attr {
2238 __NL80211_TXQ_ATTR_INVALID, 2257 __NL80211_TXQ_ATTR_INVALID,
2239 NL80211_TXQ_ATTR_QUEUE, 2258 NL80211_TXQ_ATTR_AC,
2240 NL80211_TXQ_ATTR_TXOP, 2259 NL80211_TXQ_ATTR_TXOP,
2241 NL80211_TXQ_ATTR_CWMIN, 2260 NL80211_TXQ_ATTR_CWMIN,
2242 NL80211_TXQ_ATTR_CWMAX, 2261 NL80211_TXQ_ATTR_CWMAX,
@@ -2247,13 +2266,21 @@ enum nl80211_txq_attr {
2247 NL80211_TXQ_ATTR_MAX = __NL80211_TXQ_ATTR_AFTER_LAST - 1 2266 NL80211_TXQ_ATTR_MAX = __NL80211_TXQ_ATTR_AFTER_LAST - 1
2248}; 2267};
2249 2268
2250enum nl80211_txq_q { 2269enum nl80211_ac {
2251 NL80211_TXQ_Q_VO, 2270 NL80211_AC_VO,
2252 NL80211_TXQ_Q_VI, 2271 NL80211_AC_VI,
2253 NL80211_TXQ_Q_BE, 2272 NL80211_AC_BE,
2254 NL80211_TXQ_Q_BK 2273 NL80211_AC_BK,
2274 NL80211_NUM_ACS
2255}; 2275};
2256 2276
2277/* backward compat */
2278#define NL80211_TXQ_ATTR_QUEUE NL80211_TXQ_ATTR_AC
2279#define NL80211_TXQ_Q_VO NL80211_AC_VO
2280#define NL80211_TXQ_Q_VI NL80211_AC_VI
2281#define NL80211_TXQ_Q_BE NL80211_AC_BE
2282#define NL80211_TXQ_Q_BK NL80211_AC_BK
2283
2257enum nl80211_channel_type { 2284enum nl80211_channel_type {
2258 NL80211_CHAN_NO_HT, 2285 NL80211_CHAN_NO_HT,
2259 NL80211_CHAN_HT20, 2286 NL80211_CHAN_HT20,
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index 53b94e025c7c..912c27a0f7ee 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -22,4 +22,6 @@ extern struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
22 void (*hndlr)(struct net_device *), 22 void (*hndlr)(struct net_device *),
23 phy_interface_t iface); 23 phy_interface_t iface);
24 24
25extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
26
25#endif /* __LINUX_OF_MDIO_H */ 27#endif /* __LINUX_OF_MDIO_H */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 6fe0a37d4abf..f092032f1c98 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -412,6 +412,9 @@ struct phy_driver {
412 /* Clears up any memory if needed */ 412 /* Clears up any memory if needed */
413 void (*remove)(struct phy_device *phydev); 413 void (*remove)(struct phy_device *phydev);
414 414
415 /* Handles ethtool queries for hardware time stamping. */
416 int (*ts_info)(struct phy_device *phydev, struct ethtool_ts_info *ti);
417
415 /* Handles SIOCSHWTSTAMP ioctl for hardware time stamping. */ 418 /* Handles SIOCSHWTSTAMP ioctl for hardware time stamping. */
416 int (*hwtstamp)(struct phy_device *phydev, struct ifreq *ifr); 419 int (*hwtstamp)(struct phy_device *phydev, struct ifreq *ifr);
417 420
diff --git a/include/linux/pkt_sched.h b/include/linux/pkt_sched.h
index 410b33d014d2..ffe975c3f1d8 100644
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -509,6 +509,7 @@ enum {
509 TCA_NETEM_CORRUPT, 509 TCA_NETEM_CORRUPT,
510 TCA_NETEM_LOSS, 510 TCA_NETEM_LOSS,
511 TCA_NETEM_RATE, 511 TCA_NETEM_RATE,
512 TCA_NETEM_ECN,
512 __TCA_NETEM_MAX, 513 __TCA_NETEM_MAX,
513}; 514};
514 515
diff --git a/include/linux/platform_data/wiznet.h b/include/linux/platform_data/wiznet.h
new file mode 100644
index 000000000000..b5d8c192d84d
--- /dev/null
+++ b/include/linux/platform_data/wiznet.h
@@ -0,0 +1,24 @@
1/*
2 * Ethernet driver for the WIZnet W5x00 chip.
3 *
4 * Licensed under the GPL-2 or later.
5 */
6
7#ifndef PLATFORM_DATA_WIZNET_H
8#define PLATFORM_DATA_WIZNET_H
9
10#include <linux/if_ether.h>
11
12struct wiznet_platform_data {
13 int link_gpio;
14 u8 mac_addr[ETH_ALEN];
15};
16
17#ifndef CONFIG_WIZNET_BUS_SHIFT
18#define CONFIG_WIZNET_BUS_SHIFT 0
19#endif
20
21#define W5100_BUS_DIRECT_SIZE (0x8000 << CONFIG_WIZNET_BUS_SHIFT)
22#define W5300_BUS_DIRECT_SIZE (0x0400 << CONFIG_WIZNET_BUS_SHIFT)
23
24#endif /* PLATFORM_DATA_WIZNET_H */
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index dd2e44fba63e..945704c2ed65 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -136,4 +136,12 @@ struct ptp_clock_event {
136extern void ptp_clock_event(struct ptp_clock *ptp, 136extern void ptp_clock_event(struct ptp_clock *ptp,
137 struct ptp_clock_event *event); 137 struct ptp_clock_event *event);
138 138
139/**
140 * ptp_clock_index() - obtain the device index of a PTP clock
141 *
142 * @ptp: The clock obtained from ptp_clock_register().
143 */
144
145extern int ptp_clock_index(struct ptp_clock *ptp);
146
139#endif 147#endif
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 577592ea0ea0..2c1de8982c85 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -801,6 +801,10 @@ rtattr_failure:
801 return table; 801 return table;
802} 802}
803 803
804extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
805 struct netlink_callback *cb,
806 struct net_device *dev,
807 int idx);
804#endif /* __KERNEL__ */ 808#endif /* __KERNEL__ */
805 809
806 810
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 111f26b6e28b..bb47314c7179 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -117,11 +117,11 @@ struct nf_conntrack {
117 117
118#ifdef CONFIG_BRIDGE_NETFILTER 118#ifdef CONFIG_BRIDGE_NETFILTER
119struct nf_bridge_info { 119struct nf_bridge_info {
120 atomic_t use; 120 atomic_t use;
121 struct net_device *physindev; 121 unsigned int mask;
122 struct net_device *physoutdev; 122 struct net_device *physindev;
123 unsigned int mask; 123 struct net_device *physoutdev;
124 unsigned long data[32 / sizeof(unsigned long)]; 124 unsigned long data[32 / sizeof(unsigned long)];
125}; 125};
126#endif 126#endif
127 127
@@ -470,7 +470,8 @@ struct sk_buff {
470 __u8 wifi_acked_valid:1; 470 __u8 wifi_acked_valid:1;
471 __u8 wifi_acked:1; 471 __u8 wifi_acked:1;
472 __u8 no_fcs:1; 472 __u8 no_fcs:1;
473 /* 9/11 bit hole (depending on ndisc_nodetype presence) */ 473 __u8 head_frag:1;
474 /* 8/10 bit hole (depending on ndisc_nodetype presence) */
474 kmemcheck_bitfield_end(flags2); 475 kmemcheck_bitfield_end(flags2);
475 476
476#ifdef CONFIG_NET_DMA 477#ifdef CONFIG_NET_DMA
@@ -560,9 +561,10 @@ static inline struct rtable *skb_rtable(const struct sk_buff *skb)
560extern void kfree_skb(struct sk_buff *skb); 561extern void kfree_skb(struct sk_buff *skb);
561extern void consume_skb(struct sk_buff *skb); 562extern void consume_skb(struct sk_buff *skb);
562extern void __kfree_skb(struct sk_buff *skb); 563extern void __kfree_skb(struct sk_buff *skb);
564extern struct kmem_cache *skbuff_head_cache;
563extern struct sk_buff *__alloc_skb(unsigned int size, 565extern struct sk_buff *__alloc_skb(unsigned int size,
564 gfp_t priority, int fclone, int node); 566 gfp_t priority, int fclone, int node);
565extern struct sk_buff *build_skb(void *data); 567extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
566static inline struct sk_buff *alloc_skb(unsigned int size, 568static inline struct sk_buff *alloc_skb(unsigned int size,
567 gfp_t priority) 569 gfp_t priority)
568{ 570{
@@ -643,11 +645,21 @@ static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
643{ 645{
644 return skb->head + skb->end; 646 return skb->head + skb->end;
645} 647}
648
649static inline unsigned int skb_end_offset(const struct sk_buff *skb)
650{
651 return skb->end;
652}
646#else 653#else
647static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) 654static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
648{ 655{
649 return skb->end; 656 return skb->end;
650} 657}
658
659static inline unsigned int skb_end_offset(const struct sk_buff *skb)
660{
661 return skb->end - skb->head;
662}
651#endif 663#endif
652 664
653/* Internal */ 665/* Internal */
@@ -881,10 +893,11 @@ static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
881 */ 893 */
882static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_) 894static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
883{ 895{
884 struct sk_buff *list = ((const struct sk_buff *)list_)->next; 896 struct sk_buff *skb = list_->next;
885 if (list == (struct sk_buff *)list_) 897
886 list = NULL; 898 if (skb == (struct sk_buff *)list_)
887 return list; 899 skb = NULL;
900 return skb;
888} 901}
889 902
890/** 903/**
@@ -900,6 +913,7 @@ static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
900 const struct sk_buff_head *list_) 913 const struct sk_buff_head *list_)
901{ 914{
902 struct sk_buff *next = skb->next; 915 struct sk_buff *next = skb->next;
916
903 if (next == (struct sk_buff *)list_) 917 if (next == (struct sk_buff *)list_)
904 next = NULL; 918 next = NULL;
905 return next; 919 return next;
@@ -920,10 +934,12 @@ static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
920 */ 934 */
921static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_) 935static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
922{ 936{
923 struct sk_buff *list = ((const struct sk_buff *)list_)->prev; 937 struct sk_buff *skb = list_->prev;
924 if (list == (struct sk_buff *)list_) 938
925 list = NULL; 939 if (skb == (struct sk_buff *)list_)
926 return list; 940 skb = NULL;
941 return skb;
942
927} 943}
928 944
929/** 945/**
@@ -1963,8 +1979,8 @@ static inline int skb_add_data(struct sk_buff *skb,
1963 return -EFAULT; 1979 return -EFAULT;
1964} 1980}
1965 1981
1966static inline int skb_can_coalesce(struct sk_buff *skb, int i, 1982static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
1967 const struct page *page, int off) 1983 const struct page *page, int off)
1968{ 1984{
1969 if (i) { 1985 if (i) {
1970 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1986 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
@@ -1972,7 +1988,7 @@ static inline int skb_can_coalesce(struct sk_buff *skb, int i,
1972 return page == skb_frag_page(frag) && 1988 return page == skb_frag_page(frag) &&
1973 off == frag->page_offset + skb_frag_size(frag); 1989 off == frag->page_offset + skb_frag_size(frag);
1974 } 1990 }
1975 return 0; 1991 return false;
1976} 1992}
1977 1993
1978static inline int __skb_linearize(struct sk_buff *skb) 1994static inline int __skb_linearize(struct sk_buff *skb)
@@ -2552,7 +2568,7 @@ static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
2552 return false; 2568 return false;
2553 2569
2554 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); 2570 skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
2555 if (skb_end_pointer(skb) - skb->head < skb_size) 2571 if (skb_end_offset(skb) < skb_size)
2556 return false; 2572 return false;
2557 2573
2558 if (skb_shared(skb) || skb_cloned(skb)) 2574 if (skb_shared(skb) || skb_cloned(skb))
@@ -2560,5 +2576,19 @@ static inline bool skb_is_recycleable(const struct sk_buff *skb, int skb_size)
2560 2576
2561 return true; 2577 return true;
2562} 2578}
2579
2580/**
2581 * skb_head_is_locked - Determine if the skb->head is locked down
2582 * @skb: skb to check
2583 *
2584 * The head on skbs build around a head frag can be removed if they are
2585 * not cloned. This function returns true if the skb head is locked down
2586 * due to either being allocated via kmalloc, or by being a clone with
2587 * multiple references to the head.
2588 */
2589static inline bool skb_head_is_locked(const struct sk_buff *skb)
2590{
2591 return !skb->head_frag || skb_cloned(skb);
2592}
2563#endif /* __KERNEL__ */ 2593#endif /* __KERNEL__ */
2564#endif /* _LINUX_SKBUFF_H */ 2594#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index 251729a47880..db4bae78bda9 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -32,8 +32,8 @@ struct sock_diag_handler {
32 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh); 32 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
33}; 33};
34 34
35int sock_diag_register(struct sock_diag_handler *h); 35int sock_diag_register(const struct sock_diag_handler *h);
36void sock_diag_unregister(struct sock_diag_handler *h); 36void sock_diag_unregister(const struct sock_diag_handler *h);
37 37
38void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); 38void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
39void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh)); 39void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
diff --git a/include/linux/socket.h b/include/linux/socket.h
index b84bbd48b874..25d6322fb635 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -68,13 +68,13 @@ struct msghdr {
68 __kernel_size_t msg_iovlen; /* Number of blocks */ 68 __kernel_size_t msg_iovlen; /* Number of blocks */
69 void * msg_control; /* Per protocol magic (eg BSD file descriptor passing) */ 69 void * msg_control; /* Per protocol magic (eg BSD file descriptor passing) */
70 __kernel_size_t msg_controllen; /* Length of cmsg list */ 70 __kernel_size_t msg_controllen; /* Length of cmsg list */
71 unsigned msg_flags; 71 unsigned int msg_flags;
72}; 72};
73 73
74/* For recvmmsg/sendmmsg */ 74/* For recvmmsg/sendmmsg */
75struct mmsghdr { 75struct mmsghdr {
76 struct msghdr msg_hdr; 76 struct msghdr msg_hdr;
77 unsigned msg_len; 77 unsigned int msg_len;
78}; 78};
79 79
80/* 80/*
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 0dddc9e42b6b..f85c93d6e6da 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -28,6 +28,51 @@
28 28
29#include <linux/platform_device.h> 29#include <linux/platform_device.h>
30 30
31#define STMMAC_RX_COE_NONE 0
32#define STMMAC_RX_COE_TYPE1 1
33#define STMMAC_RX_COE_TYPE2 2
34
35/* Define the macros for CSR clock range parameters to be passed by
36 * platform code.
37 * This could also be configured at run time using CPU freq framework. */
38
39/* MDC Clock Selection define*/
40#define STMMAC_CSR_60_100M 0x0 /* MDC = clk_scr_i/42 */
41#define STMMAC_CSR_100_150M 0x1 /* MDC = clk_scr_i/62 */
42#define STMMAC_CSR_20_35M 0x2 /* MDC = clk_scr_i/16 */
43#define STMMAC_CSR_35_60M 0x3 /* MDC = clk_scr_i/26 */
44#define STMMAC_CSR_150_250M 0x4 /* MDC = clk_scr_i/102 */
45#define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/122 */
46
47/* The MDC clock could be set higher than the IEEE 802.3
48 * specified frequency limit 0f 2.5 MHz, by programming a clock divider
49 * of value different than the above defined values. The resultant MDIO
50 * clock frequency of 12.5 MHz is applicable for the interfacing chips
51 * supporting higher MDC clocks.
52 * The MDC clock selection macros need to be defined for MDC clock rate
53 * of 12.5 MHz, corresponding to the following selection.
54 */
55#define STMMAC_CSR_I_4 0x8 /* clk_csr_i/4 */
56#define STMMAC_CSR_I_6 0x9 /* clk_csr_i/6 */
57#define STMMAC_CSR_I_8 0xA /* clk_csr_i/8 */
58#define STMMAC_CSR_I_10 0xB /* clk_csr_i/10 */
59#define STMMAC_CSR_I_12 0xC /* clk_csr_i/12 */
60#define STMMAC_CSR_I_14 0xD /* clk_csr_i/14 */
61#define STMMAC_CSR_I_16 0xE /* clk_csr_i/16 */
62#define STMMAC_CSR_I_18 0xF /* clk_csr_i/18 */
63
64/* AXI DMA Burst length suported */
65#define DMA_AXI_BLEN_4 (1 << 1)
66#define DMA_AXI_BLEN_8 (1 << 2)
67#define DMA_AXI_BLEN_16 (1 << 3)
68#define DMA_AXI_BLEN_32 (1 << 4)
69#define DMA_AXI_BLEN_64 (1 << 5)
70#define DMA_AXI_BLEN_128 (1 << 6)
71#define DMA_AXI_BLEN_256 (1 << 7)
72#define DMA_AXI_BLEN_ALL (DMA_AXI_BLEN_4 | DMA_AXI_BLEN_8 | DMA_AXI_BLEN_16 \
73 | DMA_AXI_BLEN_32 | DMA_AXI_BLEN_64 \
74 | DMA_AXI_BLEN_128 | DMA_AXI_BLEN_256)
75
31/* Platfrom data for platform device structure's platform_data field */ 76/* Platfrom data for platform device structure's platform_data field */
32 77
33struct stmmac_mdio_bus_data { 78struct stmmac_mdio_bus_data {
@@ -38,16 +83,24 @@ struct stmmac_mdio_bus_data {
38 int probed_phy_irq; 83 int probed_phy_irq;
39}; 84};
40 85
86struct stmmac_dma_cfg {
87 int pbl;
88 int fixed_burst;
89 int burst_len;
90};
91
41struct plat_stmmacenet_data { 92struct plat_stmmacenet_data {
93 char *phy_bus_name;
42 int bus_id; 94 int bus_id;
43 int phy_addr; 95 int phy_addr;
44 int interface; 96 int interface;
45 struct stmmac_mdio_bus_data *mdio_bus_data; 97 struct stmmac_mdio_bus_data *mdio_bus_data;
46 int pbl; 98 struct stmmac_dma_cfg *dma_cfg;
47 int clk_csr; 99 int clk_csr;
48 int has_gmac; 100 int has_gmac;
49 int enh_desc; 101 int enh_desc;
50 int tx_coe; 102 int tx_coe;
103 int rx_coe;
51 int bugged_jumbo; 104 int bugged_jumbo;
52 int pmt; 105 int pmt;
53 int force_sf_dma_mode; 106 int force_sf_dma_mode;
@@ -56,6 +109,7 @@ struct plat_stmmacenet_data {
56 int (*init)(struct platform_device *pdev); 109 int (*init)(struct platform_device *pdev);
57 void (*exit)(struct platform_device *pdev); 110 void (*exit)(struct platform_device *pdev);
58 void *custom_cfg; 111 void *custom_cfg;
112 void *custom_data;
59 void *bsp_priv; 113 void *bsp_priv;
60}; 114};
61#endif 115#endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index b6c62d294380..d9b42c5be088 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -106,6 +106,22 @@ enum {
106#define TCP_THIN_LINEAR_TIMEOUTS 16 /* Use linear timeouts for thin streams*/ 106#define TCP_THIN_LINEAR_TIMEOUTS 16 /* Use linear timeouts for thin streams*/
107#define TCP_THIN_DUPACK 17 /* Fast retrans. after 1 dupack */ 107#define TCP_THIN_DUPACK 17 /* Fast retrans. after 1 dupack */
108#define TCP_USER_TIMEOUT 18 /* How long for loss retry before timeout */ 108#define TCP_USER_TIMEOUT 18 /* How long for loss retry before timeout */
109#define TCP_REPAIR 19 /* TCP sock is under repair right now */
110#define TCP_REPAIR_QUEUE 20
111#define TCP_QUEUE_SEQ 21
112#define TCP_REPAIR_OPTIONS 22
113
114struct tcp_repair_opt {
115 __u32 opt_code;
116 __u32 opt_val;
117};
118
119enum {
120 TCP_NO_QUEUE,
121 TCP_RECV_QUEUE,
122 TCP_SEND_QUEUE,
123 TCP_QUEUES_NR,
124};
109 125
110/* for TCP_INFO socket option */ 126/* for TCP_INFO socket option */
111#define TCPI_OPT_TIMESTAMPS 1 127#define TCPI_OPT_TIMESTAMPS 1
@@ -353,7 +369,11 @@ struct tcp_sock {
353 u8 nonagle : 4,/* Disable Nagle algorithm? */ 369 u8 nonagle : 4,/* Disable Nagle algorithm? */
354 thin_lto : 1,/* Use linear timeouts for thin streams */ 370 thin_lto : 1,/* Use linear timeouts for thin streams */
355 thin_dupack : 1,/* Fast retransmit on first dupack */ 371 thin_dupack : 1,/* Fast retransmit on first dupack */
356 unused : 2; 372 repair : 1,
373 unused : 1;
374 u8 repair_queue;
375 u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */
376 early_retrans_delayed:1; /* Delayed ER timer installed */
357 377
358/* RTT measurement */ 378/* RTT measurement */
359 u32 srtt; /* smoothed round trip time << 3 */ 379 u32 srtt; /* smoothed round trip time << 3 */
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 970d5a2a9047..2470f541af50 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -49,8 +49,11 @@
49#define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */ 49#define VIRTIO_NET_F_CTRL_RX 18 /* Control channel RX mode support */
50#define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */ 50#define VIRTIO_NET_F_CTRL_VLAN 19 /* Control channel VLAN filtering */
51#define VIRTIO_NET_F_CTRL_RX_EXTRA 20 /* Extra RX mode control support */ 51#define VIRTIO_NET_F_CTRL_RX_EXTRA 20 /* Extra RX mode control support */
52#define VIRTIO_NET_F_GUEST_ANNOUNCE 21 /* Guest can announce device on the
53 * network */
52 54
53#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */ 55#define VIRTIO_NET_S_LINK_UP 1 /* Link is up */
56#define VIRTIO_NET_S_ANNOUNCE 2 /* Announcement is needed */
54 57
55struct virtio_net_config { 58struct virtio_net_config {
56 /* The config defining mac address (if VIRTIO_NET_F_MAC) */ 59 /* The config defining mac address (if VIRTIO_NET_F_MAC) */
@@ -152,4 +155,15 @@ struct virtio_net_ctrl_mac {
152 #define VIRTIO_NET_CTRL_VLAN_ADD 0 155 #define VIRTIO_NET_CTRL_VLAN_ADD 0
153 #define VIRTIO_NET_CTRL_VLAN_DEL 1 156 #define VIRTIO_NET_CTRL_VLAN_DEL 1
154 157
158/*
159 * Control link announce acknowledgement
160 *
161 * The command VIRTIO_NET_CTRL_ANNOUNCE_ACK is used to indicate that
162 * driver has recevied the notification; device would clear the
163 * VIRTIO_NET_S_ANNOUNCE bit in the status field after it receives
164 * this command.
165 */
166#define VIRTIO_NET_CTRL_ANNOUNCE 3
167 #define VIRTIO_NET_CTRL_ANNOUNCE_ACK 0
168
155#endif /* _LINUX_VIRTIO_NET_H */ 169#endif /* _LINUX_VIRTIO_NET_H */
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 757a17638b1b..27f450ba9514 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -92,7 +92,7 @@ extern void addrconf_leave_solict(struct inet6_dev *idev,
92 const struct in6_addr *addr); 92 const struct in6_addr *addr);
93 93
94static inline unsigned long addrconf_timeout_fixup(u32 timeout, 94static inline unsigned long addrconf_timeout_fixup(u32 timeout,
95 unsigned unit) 95 unsigned int unit)
96{ 96{
97 if (timeout == 0xffffffff) 97 if (timeout == 0xffffffff)
98 return ~0UL; 98 return ~0UL;
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index ca68e2cef230..2ee33da36a7a 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -22,7 +22,7 @@ extern struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
22struct unix_address { 22struct unix_address {
23 atomic_t refcnt; 23 atomic_t refcnt;
24 int len; 24 int len;
25 unsigned hash; 25 unsigned int hash;
26 struct sockaddr_un name[0]; 26 struct sockaddr_un name[0];
27}; 27};
28 28
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 94e09d361bb1..5d2352154cf6 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -215,7 +215,7 @@ typedef struct ax25_dev {
215 struct ax25_dev *next; 215 struct ax25_dev *next;
216 struct net_device *dev; 216 struct net_device *dev;
217 struct net_device *forward; 217 struct net_device *forward;
218 struct ctl_table *systable; 218 struct ctl_table_header *sysheader;
219 int values[AX25_MAX_VALUES]; 219 int values[AX25_MAX_VALUES];
220#if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER) 220#if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER)
221 ax25_dama_info dama; 221 ax25_dama_info dama;
@@ -441,11 +441,11 @@ extern void ax25_uid_free(void);
441 441
442/* sysctl_net_ax25.c */ 442/* sysctl_net_ax25.c */
443#ifdef CONFIG_SYSCTL 443#ifdef CONFIG_SYSCTL
444extern void ax25_register_sysctl(void); 444extern int ax25_register_dev_sysctl(ax25_dev *ax25_dev);
445extern void ax25_unregister_sysctl(void); 445extern void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev);
446#else 446#else
447static inline void ax25_register_sysctl(void) {}; 447static inline int ax25_register_dev_sysctl(ax25_dev *ax25_dev) { return 0; }
448static inline void ax25_unregister_sysctl(void) {}; 448static inline void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev) {}
449#endif /* CONFIG_SYSCTL */ 449#endif /* CONFIG_SYSCTL */
450 450
451#endif 451#endif
diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h
index 6db8ecf52aa2..439dadc8102f 100644
--- a/include/net/caif/caif_hsi.h
+++ b/include/net/caif/caif_hsi.h
@@ -123,12 +123,21 @@ struct cfhsi_rx_state {
123 bool piggy_desc; 123 bool piggy_desc;
124}; 124};
125 125
126/* Priority mapping */
127enum {
128 CFHSI_PRIO_CTL = 0,
129 CFHSI_PRIO_VI,
130 CFHSI_PRIO_VO,
131 CFHSI_PRIO_BEBK,
132 CFHSI_PRIO_LAST,
133};
134
126/* Structure implemented by CAIF HSI drivers. */ 135/* Structure implemented by CAIF HSI drivers. */
127struct cfhsi { 136struct cfhsi {
128 struct caif_dev_common cfdev; 137 struct caif_dev_common cfdev;
129 struct net_device *ndev; 138 struct net_device *ndev;
130 struct platform_device *pdev; 139 struct platform_device *pdev;
131 struct sk_buff_head qhead; 140 struct sk_buff_head qhead[CFHSI_PRIO_LAST];
132 struct cfhsi_drv drv; 141 struct cfhsi_drv drv;
133 struct cfhsi_dev *dev; 142 struct cfhsi_dev *dev;
134 int tx_state; 143 int tx_state;
@@ -151,8 +160,14 @@ struct cfhsi {
151 wait_queue_head_t wake_up_wait; 160 wait_queue_head_t wake_up_wait;
152 wait_queue_head_t wake_down_wait; 161 wait_queue_head_t wake_down_wait;
153 wait_queue_head_t flush_fifo_wait; 162 wait_queue_head_t flush_fifo_wait;
154 struct timer_list timer; 163 struct timer_list inactivity_timer;
155 struct timer_list rx_slowpath_timer; 164 struct timer_list rx_slowpath_timer;
165
166 /* TX aggregation */
167 unsigned long aggregation_timeout;
168 int aggregation_len;
169 struct timer_list aggregation_timer;
170
156 unsigned long bits; 171 unsigned long bits;
157}; 172};
158 173
diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h
index 6bd200a4754a..83a89ba3005b 100644
--- a/include/net/caif/cfpkt.h
+++ b/include/net/caif/cfpkt.h
@@ -188,11 +188,18 @@ struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt);
188 */ 188 */
189void *cfpkt_tonative(struct cfpkt *pkt); 189void *cfpkt_tonative(struct cfpkt *pkt);
190 190
191
192/* 191/*
193 * Returns packet information for a packet. 192 * Returns packet information for a packet.
194 * pkt Packet to get info from; 193 * pkt Packet to get info from;
195 * @return Packet information 194 * @return Packet information
196 */ 195 */
197struct caif_payload_info *cfpkt_info(struct cfpkt *pkt); 196struct caif_payload_info *cfpkt_info(struct cfpkt *pkt);
197
198/** cfpkt_set_prio - set priority for a CAIF packet.
199 *
200 * @pkt: The CAIF packet to be adjusted.
201 * @prio: one of TC_PRIO_ constants.
202 */
203void cfpkt_set_prio(struct cfpkt *pkt, int prio);
204
198#endif /* CFPKT_H_ */ 205#endif /* CFPKT_H_ */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 83d800c31e3c..815dc3f37e2b 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -521,6 +521,7 @@ struct station_parameters {
521 * @STATION_INFO_ASSOC_REQ_IES: @assoc_req_ies filled 521 * @STATION_INFO_ASSOC_REQ_IES: @assoc_req_ies filled
522 * @STATION_INFO_STA_FLAGS: @sta_flags filled 522 * @STATION_INFO_STA_FLAGS: @sta_flags filled
523 * @STATION_INFO_BEACON_LOSS_COUNT: @beacon_loss_count filled 523 * @STATION_INFO_BEACON_LOSS_COUNT: @beacon_loss_count filled
524 * @STATION_INFO_T_OFFSET: @t_offset filled
524 */ 525 */
525enum station_info_flags { 526enum station_info_flags {
526 STATION_INFO_INACTIVE_TIME = 1<<0, 527 STATION_INFO_INACTIVE_TIME = 1<<0,
@@ -542,7 +543,8 @@ enum station_info_flags {
542 STATION_INFO_CONNECTED_TIME = 1<<16, 543 STATION_INFO_CONNECTED_TIME = 1<<16,
543 STATION_INFO_ASSOC_REQ_IES = 1<<17, 544 STATION_INFO_ASSOC_REQ_IES = 1<<17,
544 STATION_INFO_STA_FLAGS = 1<<18, 545 STATION_INFO_STA_FLAGS = 1<<18,
545 STATION_INFO_BEACON_LOSS_COUNT = 1<<19 546 STATION_INFO_BEACON_LOSS_COUNT = 1<<19,
547 STATION_INFO_T_OFFSET = 1<<20,
546}; 548};
547 549
548/** 550/**
@@ -643,6 +645,7 @@ struct sta_bss_parameters {
643 * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets. 645 * @assoc_req_ies_len: Length of assoc_req_ies buffer in octets.
644 * @sta_flags: station flags mask & values 646 * @sta_flags: station flags mask & values
645 * @beacon_loss_count: Number of times beacon loss event has triggered. 647 * @beacon_loss_count: Number of times beacon loss event has triggered.
648 * @t_offset: Time offset of the station relative to this host.
646 */ 649 */
647struct station_info { 650struct station_info {
648 u32 filled; 651 u32 filled;
@@ -671,6 +674,7 @@ struct station_info {
671 size_t assoc_req_ies_len; 674 size_t assoc_req_ies_len;
672 675
673 u32 beacon_loss_count; 676 u32 beacon_loss_count;
677 s64 t_offset;
674 678
675 /* 679 /*
676 * Note: Add a new enum station_info_flags value for each new field and 680 * Note: Add a new enum station_info_flags value for each new field and
@@ -798,6 +802,8 @@ struct mesh_config {
798 /* ttl used in path selection information elements */ 802 /* ttl used in path selection information elements */
799 u8 element_ttl; 803 u8 element_ttl;
800 bool auto_open_plinks; 804 bool auto_open_plinks;
805 /* neighbor offset synchronization */
806 u32 dot11MeshNbrOffsetMaxNeighbor;
801 /* HWMP parameters */ 807 /* HWMP parameters */
802 u8 dot11MeshHWMPmaxPREQretries; 808 u8 dot11MeshHWMPmaxPREQretries;
803 u32 path_refresh_time; 809 u32 path_refresh_time;
@@ -821,6 +827,7 @@ struct mesh_config {
821 * struct mesh_setup - 802.11s mesh setup configuration 827 * struct mesh_setup - 802.11s mesh setup configuration
822 * @mesh_id: the mesh ID 828 * @mesh_id: the mesh ID
823 * @mesh_id_len: length of the mesh ID, at least 1 and at most 32 bytes 829 * @mesh_id_len: length of the mesh ID, at least 1 and at most 32 bytes
830 * @sync_method: which synchronization method to use
824 * @path_sel_proto: which path selection protocol to use 831 * @path_sel_proto: which path selection protocol to use
825 * @path_metric: which metric to use 832 * @path_metric: which metric to use
826 * @ie: vendor information elements (optional) 833 * @ie: vendor information elements (optional)
@@ -834,8 +841,9 @@ struct mesh_config {
834struct mesh_setup { 841struct mesh_setup {
835 const u8 *mesh_id; 842 const u8 *mesh_id;
836 u8 mesh_id_len; 843 u8 mesh_id_len;
837 u8 path_sel_proto; 844 u8 sync_method;
838 u8 path_metric; 845 u8 path_sel_proto;
846 u8 path_metric;
839 const u8 *ie; 847 const u8 *ie;
840 u8 ie_len; 848 u8 ie_len;
841 bool is_authenticated; 849 bool is_authenticated;
@@ -845,7 +853,7 @@ struct mesh_setup {
845 853
846/** 854/**
847 * struct ieee80211_txq_params - TX queue parameters 855 * struct ieee80211_txq_params - TX queue parameters
848 * @queue: TX queue identifier (NL80211_TXQ_Q_*) 856 * @ac: AC identifier
849 * @txop: Maximum burst time in units of 32 usecs, 0 meaning disabled 857 * @txop: Maximum burst time in units of 32 usecs, 0 meaning disabled
850 * @cwmin: Minimum contention window [a value of the form 2^n-1 in the range 858 * @cwmin: Minimum contention window [a value of the form 2^n-1 in the range
851 * 1..32767] 859 * 1..32767]
@@ -854,7 +862,7 @@ struct mesh_setup {
854 * @aifs: Arbitration interframe space [0..255] 862 * @aifs: Arbitration interframe space [0..255]
855 */ 863 */
856struct ieee80211_txq_params { 864struct ieee80211_txq_params {
857 enum nl80211_txq_q queue; 865 enum nl80211_ac ac;
858 u16 txop; 866 u16 txop;
859 u16 cwmin; 867 u16 cwmin;
860 u16 cwmax; 868 u16 cwmax;
@@ -1336,6 +1344,9 @@ struct cfg80211_gtk_rekey_data {
1336 * be %NULL or contain the enabled Wake-on-Wireless triggers that are 1344 * be %NULL or contain the enabled Wake-on-Wireless triggers that are
1337 * configured for the device. 1345 * configured for the device.
1338 * @resume: wiphy device needs to be resumed 1346 * @resume: wiphy device needs to be resumed
1347 * @set_wakeup: Called when WoWLAN is enabled/disabled, use this callback
1348 * to call device_set_wakeup_enable() to enable/disable wakeup from
1349 * the device.
1339 * 1350 *
1340 * @add_virtual_intf: create a new virtual interface with the given name, 1351 * @add_virtual_intf: create a new virtual interface with the given name,
1341 * must set the struct wireless_dev's iftype. Beware: You must create 1352 * must set the struct wireless_dev's iftype. Beware: You must create
@@ -1507,6 +1518,7 @@ struct cfg80211_gtk_rekey_data {
1507struct cfg80211_ops { 1518struct cfg80211_ops {
1508 int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow); 1519 int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
1509 int (*resume)(struct wiphy *wiphy); 1520 int (*resume)(struct wiphy *wiphy);
1521 void (*set_wakeup)(struct wiphy *wiphy, bool enabled);
1510 1522
1511 struct net_device * (*add_virtual_intf)(struct wiphy *wiphy, 1523 struct net_device * (*add_virtual_intf)(struct wiphy *wiphy,
1512 char *name, 1524 char *name,
@@ -1698,7 +1710,8 @@ struct cfg80211_ops {
1698 struct net_device *dev, 1710 struct net_device *dev,
1699 u16 noack_map); 1711 u16 noack_map);
1700 1712
1701 struct ieee80211_channel *(*get_channel)(struct wiphy *wiphy); 1713 struct ieee80211_channel *(*get_channel)(struct wiphy *wiphy,
1714 enum nl80211_channel_type *type);
1702}; 1715};
1703 1716
1704/* 1717/*
@@ -1732,10 +1745,6 @@ struct cfg80211_ops {
1732 * hints read the documenation for regulatory_hint_found_beacon() 1745 * hints read the documenation for regulatory_hint_found_beacon()
1733 * @WIPHY_FLAG_NETNS_OK: if not set, do not allow changing the netns of this 1746 * @WIPHY_FLAG_NETNS_OK: if not set, do not allow changing the netns of this
1734 * wiphy at all 1747 * wiphy at all
1735 * @WIPHY_FLAG_ENFORCE_COMBINATIONS: Set this flag to enforce interface
1736 * combinations for this device. This flag is used for backward
1737 * compatibility only until all drivers advertise combinations and
1738 * they will always be enforced.
1739 * @WIPHY_FLAG_PS_ON_BY_DEFAULT: if set to true, powersave will be enabled 1748 * @WIPHY_FLAG_PS_ON_BY_DEFAULT: if set to true, powersave will be enabled
1740 * by default -- this flag will be set depending on the kernel's default 1749 * by default -- this flag will be set depending on the kernel's default
1741 * on wiphy_new(), but can be changed by the driver if it has a good 1750 * on wiphy_new(), but can be changed by the driver if it has a good
@@ -1780,7 +1789,7 @@ enum wiphy_flags {
1780 WIPHY_FLAG_IBSS_RSN = BIT(8), 1789 WIPHY_FLAG_IBSS_RSN = BIT(8),
1781 WIPHY_FLAG_MESH_AUTH = BIT(10), 1790 WIPHY_FLAG_MESH_AUTH = BIT(10),
1782 WIPHY_FLAG_SUPPORTS_SCHED_SCAN = BIT(11), 1791 WIPHY_FLAG_SUPPORTS_SCHED_SCAN = BIT(11),
1783 WIPHY_FLAG_ENFORCE_COMBINATIONS = BIT(12), 1792 /* use hole at 12 */
1784 WIPHY_FLAG_SUPPORTS_FW_ROAM = BIT(13), 1793 WIPHY_FLAG_SUPPORTS_FW_ROAM = BIT(13),
1785 WIPHY_FLAG_AP_UAPSD = BIT(14), 1794 WIPHY_FLAG_AP_UAPSD = BIT(14),
1786 WIPHY_FLAG_SUPPORTS_TDLS = BIT(15), 1795 WIPHY_FLAG_SUPPORTS_TDLS = BIT(15),
@@ -3343,6 +3352,17 @@ int cfg80211_can_beacon_sec_chan(struct wiphy *wiphy,
3343 enum nl80211_channel_type channel_type); 3352 enum nl80211_channel_type channel_type);
3344 3353
3345/* 3354/*
3355 * cfg80211_ch_switch_notify - update wdev channel and notify userspace
3356 * @dev: the device which switched channels
3357 * @freq: new channel frequency (in MHz)
3358 * @type: channel type
3359 *
3360 * Acquires wdev_lock, so must only be called from sleepable driver context!
3361 */
3362void cfg80211_ch_switch_notify(struct net_device *dev, int freq,
3363 enum nl80211_channel_type type);
3364
3365/*
3346 * cfg80211_calculate_bitrate - calculate actual bitrate (in 100Kbps units) 3366 * cfg80211_calculate_bitrate - calculate actual bitrate (in 100Kbps units)
3347 * @rate: given rate_info to calculate bitrate from 3367 * @rate: given rate_info to calculate bitrate from
3348 * 3368 *
diff --git a/include/net/compat.h b/include/net/compat.h
index a974ae92d182..6e9565324989 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -42,12 +42,12 @@ extern int compat_sock_get_timestampns(struct sock *, struct timespec __user *);
42 42
43extern int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *); 43extern int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *);
44extern int verify_compat_iovec(struct msghdr *, struct iovec *, struct sockaddr_storage *, int); 44extern int verify_compat_iovec(struct msghdr *, struct iovec *, struct sockaddr_storage *, int);
45extern asmlinkage long compat_sys_sendmsg(int,struct compat_msghdr __user *,unsigned); 45extern asmlinkage long compat_sys_sendmsg(int,struct compat_msghdr __user *,unsigned int);
46extern asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *, 46extern asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *,
47 unsigned, unsigned); 47 unsigned int, unsigned int);
48extern asmlinkage long compat_sys_recvmsg(int,struct compat_msghdr __user *,unsigned); 48extern asmlinkage long compat_sys_recvmsg(int,struct compat_msghdr __user *,unsigned int);
49extern asmlinkage long compat_sys_recvmmsg(int, struct compat_mmsghdr __user *, 49extern asmlinkage long compat_sys_recvmmsg(int, struct compat_mmsghdr __user *,
50 unsigned, unsigned, 50 unsigned int, unsigned int,
51 struct compat_timespec __user *); 51 struct compat_timespec __user *);
52extern asmlinkage long compat_sys_getsockopt(int, int, int, char __user *, int __user *); 52extern asmlinkage long compat_sys_getsockopt(int, int, int, char __user *, int __user *);
53extern int put_cmsg_compat(struct msghdr*, int, int, int, void *); 53extern int put_cmsg_compat(struct msghdr*, int, int, int, void *);
diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h
index f55c980d8e23..fc5d5dcebb00 100644
--- a/include/net/dcbnl.h
+++ b/include/net/dcbnl.h
@@ -48,6 +48,8 @@ struct dcbnl_rtnl_ops {
48 /* IEEE 802.1Qaz std */ 48 /* IEEE 802.1Qaz std */
49 int (*ieee_getets) (struct net_device *, struct ieee_ets *); 49 int (*ieee_getets) (struct net_device *, struct ieee_ets *);
50 int (*ieee_setets) (struct net_device *, struct ieee_ets *); 50 int (*ieee_setets) (struct net_device *, struct ieee_ets *);
51 int (*ieee_getmaxrate) (struct net_device *, struct ieee_maxrate *);
52 int (*ieee_setmaxrate) (struct net_device *, struct ieee_maxrate *);
51 int (*ieee_getpfc) (struct net_device *, struct ieee_pfc *); 53 int (*ieee_getpfc) (struct net_device *, struct ieee_pfc *);
52 int (*ieee_setpfc) (struct net_device *, struct ieee_pfc *); 54 int (*ieee_setpfc) (struct net_device *, struct ieee_pfc *);
53 int (*ieee_getapp) (struct net_device *, struct dcb_app *); 55 int (*ieee_getapp) (struct net_device *, struct dcb_app *);
diff --git a/include/net/dn.h b/include/net/dn.h
index 814af0b9387d..c88bf4ebd330 100644
--- a/include/net/dn.h
+++ b/include/net/dn.h
@@ -199,7 +199,7 @@ static inline void dn_sk_ports_copy(struct flowidn *fld, struct dn_scp *scp)
199 fld->fld_dport = scp->addrrem; 199 fld->fld_dport = scp->addrrem;
200} 200}
201 201
202extern unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu); 202extern unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu);
203 203
204#define DN_MENUVER_ACC 0x01 204#define DN_MENUVER_ACC 0x01
205#define DN_MENUVER_USR 0x02 205#define DN_MENUVER_USR 0x02
diff --git a/include/net/dn_fib.h b/include/net/dn_fib.h
index 782ef7cb4930..1ee9d4bda30d 100644
--- a/include/net/dn_fib.h
+++ b/include/net/dn_fib.h
@@ -31,7 +31,7 @@ struct dn_fib_res {
31 31
32struct dn_fib_nh { 32struct dn_fib_nh {
33 struct net_device *nh_dev; 33 struct net_device *nh_dev;
34 unsigned nh_flags; 34 unsigned int nh_flags;
35 unsigned char nh_scope; 35 unsigned char nh_scope;
36 int nh_weight; 36 int nh_weight;
37 int nh_power; 37 int nh_power;
@@ -45,7 +45,7 @@ struct dn_fib_info {
45 int fib_treeref; 45 int fib_treeref;
46 atomic_t fib_clntref; 46 atomic_t fib_clntref;
47 int fib_dead; 47 int fib_dead;
48 unsigned fib_flags; 48 unsigned int fib_flags;
49 int fib_protocol; 49 int fib_protocol;
50 __le16 fib_prefsrc; 50 __le16 fib_prefsrc;
51 __u32 fib_priority; 51 __u32 fib_priority;
@@ -140,7 +140,7 @@ extern void dn_fib_table_cleanup(void);
140 */ 140 */
141extern void dn_fib_rules_init(void); 141extern void dn_fib_rules_init(void);
142extern void dn_fib_rules_cleanup(void); 142extern void dn_fib_rules_cleanup(void);
143extern unsigned dnet_addr_type(__le16 addr); 143extern unsigned int dnet_addr_type(__le16 addr);
144extern int dn_fib_lookup(struct flowidn *fld, struct dn_fib_res *res); 144extern int dn_fib_lookup(struct flowidn *fld, struct dn_fib_res *res);
145 145
146extern int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb); 146extern int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb);
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index 81712cfa1ddf..c507e05d172f 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -76,8 +76,8 @@ struct dn_route {
76 __le16 rt_src_map; 76 __le16 rt_src_map;
77 __le16 rt_dst_map; 77 __le16 rt_dst_map;
78 78
79 unsigned rt_flags; 79 unsigned int rt_flags;
80 unsigned rt_type; 80 unsigned int rt_type;
81}; 81};
82 82
83static inline bool dn_is_input_route(struct dn_route *rt) 83static inline bool dn_is_input_route(struct dn_route *rt)
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index e1c2ee0eef47..3682a0a076c1 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -12,7 +12,7 @@ struct sk_buff;
12struct dst_ops { 12struct dst_ops {
13 unsigned short family; 13 unsigned short family;
14 __be16 protocol; 14 __be16 protocol;
15 unsigned gc_thresh; 15 unsigned int gc_thresh;
16 16
17 int (*gc)(struct dst_ops *ops); 17 int (*gc)(struct dst_ops *ops);
18 struct dst_entry * (*check)(struct dst_entry *, __u32 cookie); 18 struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 75d615649071..9ac2524d1402 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -25,7 +25,7 @@
25 25
26struct icmp_err { 26struct icmp_err {
27 int errno; 27 int errno;
28 unsigned fatal:1; 28 unsigned int fatal:1;
29}; 29};
30 30
31extern const struct icmp_err icmp_err_convert[]; 31extern const struct icmp_err icmp_err_convert[];
@@ -41,7 +41,6 @@ struct net;
41 41
42extern void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info); 42extern void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
43extern int icmp_rcv(struct sk_buff *skb); 43extern int icmp_rcv(struct sk_buff *skb);
44extern int icmp_ioctl(struct sock *sk, int cmd, unsigned long arg);
45extern int icmp_init(void); 44extern int icmp_init(void);
46extern void icmp_out_count(struct net *net, unsigned char type); 45extern void icmp_out_count(struct net *net, unsigned char type);
47 46
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 51a7031b4aa3..50f325fd0691 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -120,7 +120,7 @@ struct ifmcaddr6 {
120 unsigned char mca_crcount; 120 unsigned char mca_crcount;
121 unsigned long mca_sfcount[2]; 121 unsigned long mca_sfcount[2];
122 struct timer_list mca_timer; 122 struct timer_list mca_timer;
123 unsigned mca_flags; 123 unsigned int mca_flags;
124 int mca_users; 124 int mca_users;
125 atomic_t mca_refcnt; 125 atomic_t mca_refcnt;
126 spinlock_t mca_lock; 126 spinlock_t mca_lock;
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 3207e58ee019..1866a676c810 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -23,7 +23,7 @@ struct sock;
23struct sockaddr; 23struct sockaddr;
24 24
25extern int inet6_csk_bind_conflict(const struct sock *sk, 25extern int inet6_csk_bind_conflict(const struct sock *sk,
26 const struct inet_bind_bucket *tb); 26 const struct inet_bind_bucket *tb, bool relax);
27 27
28extern struct dst_entry* inet6_csk_route_req(struct sock *sk, 28extern struct dst_entry* inet6_csk_route_req(struct sock *sk,
29 const struct request_sock *req); 29 const struct request_sock *req);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index dbf9aab34c82..7d83f90f203f 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -45,6 +45,7 @@ struct inet_connection_sock_af_ops {
45 struct dst_entry *dst); 45 struct dst_entry *dst);
46 struct inet_peer *(*get_peer)(struct sock *sk, bool *release_it); 46 struct inet_peer *(*get_peer)(struct sock *sk, bool *release_it);
47 u16 net_header_len; 47 u16 net_header_len;
48 u16 net_frag_header_len;
48 u16 sockaddr_len; 49 u16 sockaddr_len;
49 int (*setsockopt)(struct sock *sk, int level, int optname, 50 int (*setsockopt)(struct sock *sk, int level, int optname,
50 char __user *optval, unsigned int optlen); 51 char __user *optval, unsigned int optlen);
@@ -60,7 +61,7 @@ struct inet_connection_sock_af_ops {
60#endif 61#endif
61 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *); 62 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
62 int (*bind_conflict)(const struct sock *sk, 63 int (*bind_conflict)(const struct sock *sk,
63 const struct inet_bind_bucket *tb); 64 const struct inet_bind_bucket *tb, bool relax);
64}; 65};
65 66
66/** inet_connection_sock - INET connection oriented sock 67/** inet_connection_sock - INET connection oriented sock
@@ -245,7 +246,7 @@ extern struct request_sock *inet_csk_search_req(const struct sock *sk,
245 const __be32 raddr, 246 const __be32 raddr,
246 const __be32 laddr); 247 const __be32 laddr);
247extern int inet_csk_bind_conflict(const struct sock *sk, 248extern int inet_csk_bind_conflict(const struct sock *sk,
248 const struct inet_bind_bucket *tb); 249 const struct inet_bind_bucket *tb, bool relax);
249extern int inet_csk_get_port(struct sock *sk, unsigned short snum); 250extern int inet_csk_get_port(struct sock *sk, unsigned short snum);
250 251
251extern struct dst_entry* inet_csk_route_req(struct sock *sk, 252extern struct dst_entry* inet_csk_route_req(struct sock *sk,
diff --git a/include/net/ip.h b/include/net/ip.h
index b53d65f24f7b..94ddb69cc0f3 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -222,9 +222,6 @@ static inline int inet_is_reserved_local_port(int port)
222 222
223extern int sysctl_ip_nonlocal_bind; 223extern int sysctl_ip_nonlocal_bind;
224 224
225extern struct ctl_path net_core_path[];
226extern struct ctl_path net_ipv4_ctl_path[];
227
228/* From inetpeer.c */ 225/* From inetpeer.c */
229extern int inet_peer_threshold; 226extern int inet_peer_threshold;
230extern int inet_peer_minttl; 227extern int inet_peer_minttl;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 2ad92ca4e6f3..c062b6773cc7 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -146,7 +146,7 @@ struct rt6_rtnl_dump_arg {
146 146
147extern int rt6_dump_route(struct rt6_info *rt, void *p_arg); 147extern int rt6_dump_route(struct rt6_info *rt, void *p_arg);
148extern void rt6_ifdown(struct net *net, struct net_device *dev); 148extern void rt6_ifdown(struct net *net, struct net_device *dev);
149extern void rt6_mtu_change(struct net_device *dev, unsigned mtu); 149extern void rt6_mtu_change(struct net_device *dev, unsigned int mtu);
150extern void rt6_remove_prefsrc(struct inet6_ifaddr *ifp); 150extern void rt6_remove_prefsrc(struct inet6_ifaddr *ifp);
151 151
152 152
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 10422ef14e28..78df0866cc38 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -49,7 +49,7 @@ struct fib_nh {
49 struct net_device *nh_dev; 49 struct net_device *nh_dev;
50 struct hlist_node nh_hash; 50 struct hlist_node nh_hash;
51 struct fib_info *nh_parent; 51 struct fib_info *nh_parent;
52 unsigned nh_flags; 52 unsigned int nh_flags;
53 unsigned char nh_scope; 53 unsigned char nh_scope;
54#ifdef CONFIG_IP_ROUTE_MULTIPATH 54#ifdef CONFIG_IP_ROUTE_MULTIPATH
55 int nh_weight; 55 int nh_weight;
@@ -74,7 +74,7 @@ struct fib_info {
74 struct net *fib_net; 74 struct net *fib_net;
75 int fib_treeref; 75 int fib_treeref;
76 atomic_t fib_clntref; 76 atomic_t fib_clntref;
77 unsigned fib_flags; 77 unsigned int fib_flags;
78 unsigned char fib_dead; 78 unsigned char fib_dead;
79 unsigned char fib_protocol; 79 unsigned char fib_protocol;
80 unsigned char fib_scope; 80 unsigned char fib_scope;
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 72522f087375..93b81aa73429 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -10,7 +10,6 @@
10 10
11#include <asm/types.h> /* for __uXX types */ 11#include <asm/types.h> /* for __uXX types */
12 12
13#include <linux/sysctl.h> /* for ctl_path */
14#include <linux/list.h> /* for struct list_head */ 13#include <linux/list.h> /* for struct list_head */
15#include <linux/spinlock.h> /* for struct rwlock_t */ 14#include <linux/spinlock.h> /* for struct rwlock_t */
16#include <linux/atomic.h> /* for struct atomic_t */ 15#include <linux/atomic.h> /* for struct atomic_t */
@@ -580,8 +579,8 @@ struct ip_vs_service_user_kern {
580 /* virtual service options */ 579 /* virtual service options */
581 char *sched_name; 580 char *sched_name;
582 char *pe_name; 581 char *pe_name;
583 unsigned flags; /* virtual service flags */ 582 unsigned int flags; /* virtual service flags */
584 unsigned timeout; /* persistent timeout in sec */ 583 unsigned int timeout; /* persistent timeout in sec */
585 u32 netmask; /* persistent netmask */ 584 u32 netmask; /* persistent netmask */
586}; 585};
587 586
@@ -592,7 +591,7 @@ struct ip_vs_dest_user_kern {
592 u16 port; 591 u16 port;
593 592
594 /* real server options */ 593 /* real server options */
595 unsigned conn_flags; /* connection flags */ 594 unsigned int conn_flags; /* connection flags */
596 int weight; /* destination weight */ 595 int weight; /* destination weight */
597 596
598 /* thresholds for active connections */ 597 /* thresholds for active connections */
@@ -616,8 +615,8 @@ struct ip_vs_service {
616 union nf_inet_addr addr; /* IP address for virtual service */ 615 union nf_inet_addr addr; /* IP address for virtual service */
617 __be16 port; /* port number for the service */ 616 __be16 port; /* port number for the service */
618 __u32 fwmark; /* firewall mark of the service */ 617 __u32 fwmark; /* firewall mark of the service */
619 unsigned flags; /* service status flags */ 618 unsigned int flags; /* service status flags */
620 unsigned timeout; /* persistent timeout in ticks */ 619 unsigned int timeout; /* persistent timeout in ticks */
621 __be32 netmask; /* grouping granularity */ 620 __be32 netmask; /* grouping granularity */
622 struct net *net; 621 struct net *net;
623 622
@@ -647,7 +646,7 @@ struct ip_vs_dest {
647 u16 af; /* address family */ 646 u16 af; /* address family */
648 __be16 port; /* port number of the server */ 647 __be16 port; /* port number of the server */
649 union nf_inet_addr addr; /* IP address of the server */ 648 union nf_inet_addr addr; /* IP address of the server */
650 volatile unsigned flags; /* dest status flags */ 649 volatile unsigned int flags; /* dest status flags */
651 atomic_t conn_flags; /* flags to copy to conn */ 650 atomic_t conn_flags; /* flags to copy to conn */
652 atomic_t weight; /* server weight */ 651 atomic_t weight; /* server weight */
653 652
@@ -953,7 +952,7 @@ static inline int sysctl_sync_ver(struct netns_ipvs *ipvs)
953 * IPVS core functions 952 * IPVS core functions
954 * (from ip_vs_core.c) 953 * (from ip_vs_core.c)
955 */ 954 */
956extern const char *ip_vs_proto_name(unsigned proto); 955extern const char *ip_vs_proto_name(unsigned int proto);
957extern void ip_vs_init_hash_table(struct list_head *table, int rows); 956extern void ip_vs_init_hash_table(struct list_head *table, int rows);
958#define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t))) 957#define IP_VS_INIT_HASH_TABLE(t) ip_vs_init_hash_table((t), ARRAY_SIZE((t)))
959 958
@@ -1014,7 +1013,7 @@ extern void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
1014 1013
1015struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, 1014struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p,
1016 const union nf_inet_addr *daddr, 1015 const union nf_inet_addr *daddr,
1017 __be16 dport, unsigned flags, 1016 __be16 dport, unsigned int flags,
1018 struct ip_vs_dest *dest, __u32 fwmark); 1017 struct ip_vs_dest *dest, __u32 fwmark);
1019extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp); 1018extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
1020 1019
@@ -1184,7 +1183,6 @@ extern void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg);
1184 * IPVS control data and functions (from ip_vs_ctl.c) 1183 * IPVS control data and functions (from ip_vs_ctl.c)
1185 */ 1184 */
1186extern struct ip_vs_stats ip_vs_stats; 1185extern struct ip_vs_stats ip_vs_stats;
1187extern const struct ctl_path net_vs_ctl_path[];
1188extern int sysctl_ip_vs_sync_ver; 1186extern int sysctl_ip_vs_sync_ver;
1189 1187
1190extern void ip_vs_sync_switch_mode(struct net *net, int mode); 1188extern void ip_vs_sync_switch_mode(struct net *net, int mode);
diff --git a/include/net/ipip.h b/include/net/ipip.h
index a32654d52730..a93cf6d7e94b 100644
--- a/include/net/ipip.h
+++ b/include/net/ipip.h
@@ -54,8 +54,10 @@ struct ip_tunnel_prl_entry {
54 \ 54 \
55 err = ip_local_out(skb); \ 55 err = ip_local_out(skb); \
56 if (likely(net_xmit_eval(err) == 0)) { \ 56 if (likely(net_xmit_eval(err) == 0)) { \
57 u64_stats_update_begin(&(stats1)->syncp); \
57 (stats1)->tx_bytes += pkt_len; \ 58 (stats1)->tx_bytes += pkt_len; \
58 (stats1)->tx_packets++; \ 59 (stats1)->tx_packets++; \
60 u64_stats_update_end(&(stats1)->syncp); \
59 } else { \ 61 } else { \
60 (stats2)->tx_errors++; \ 62 (stats2)->tx_errors++; \
61 (stats2)->tx_aborted_errors++; \ 63 (stats2)->tx_aborted_errors++; \
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index e4170a22fc6f..4332e9aad853 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -113,7 +113,6 @@ struct frag_hdr {
113 113
114/* sysctls */ 114/* sysctls */
115extern int sysctl_mld_max_msf; 115extern int sysctl_mld_max_msf;
116extern struct ctl_path net_ipv6_ctl_path[];
117 116
118#define _DEVINC(net, statname, modifier, idev, field) \ 117#define _DEVINC(net, statname, modifier, idev, field) \
119({ \ 118({ \
@@ -345,7 +344,7 @@ static inline int ipv6_addr_equal(const struct in6_addr *a1,
345static inline int __ipv6_prefix_equal(const __be32 *a1, const __be32 *a2, 344static inline int __ipv6_prefix_equal(const __be32 *a1, const __be32 *a2,
346 unsigned int prefixlen) 345 unsigned int prefixlen)
347{ 346{
348 unsigned pdw, pbi; 347 unsigned int pdw, pbi;
349 348
350 /* check complete u32 in prefix */ 349 /* check complete u32 in prefix */
351 pdw = prefixlen >> 5; 350 pdw = prefixlen >> 5;
@@ -661,8 +660,6 @@ extern struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
661extern struct ctl_table *ipv6_route_sysctl_init(struct net *net); 660extern struct ctl_table *ipv6_route_sysctl_init(struct net *net);
662extern int ipv6_sysctl_register(void); 661extern int ipv6_sysctl_register(void);
663extern void ipv6_sysctl_unregister(void); 662extern void ipv6_sysctl_unregister(void);
664extern int ipv6_static_sysctl_register(void);
665extern void ipv6_static_sysctl_unregister(void);
666#endif 663#endif
667 664
668#endif /* _NET_IPV6_H */ 665#endif /* _NET_IPV6_H */
diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
index 23a409381fa9..6ca3113df39e 100644
--- a/include/net/llc_c_ev.h
+++ b/include/net/llc_c_ev.h
@@ -264,6 +264,6 @@ extern int llc_conn_ev_qlfy_set_status_rst_done(struct sock *sk,
264static __inline__ int llc_conn_space(struct sock *sk, struct sk_buff *skb) 264static __inline__ int llc_conn_space(struct sock *sk, struct sk_buff *skb)
265{ 265{
266 return atomic_read(&sk->sk_rmem_alloc) + skb->truesize < 266 return atomic_read(&sk->sk_rmem_alloc) + skb->truesize <
267 (unsigned)sk->sk_rcvbuf; 267 (unsigned int)sk->sk_rcvbuf;
268} 268}
269#endif /* LLC_C_EV_H */ 269#endif /* LLC_C_EV_H */
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 9210bdc7bd8d..da3658177997 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -95,9 +95,11 @@ struct device;
95 * @IEEE80211_MAX_QUEUES: Maximum number of regular device queues. 95 * @IEEE80211_MAX_QUEUES: Maximum number of regular device queues.
96 */ 96 */
97enum ieee80211_max_queues { 97enum ieee80211_max_queues {
98 IEEE80211_MAX_QUEUES = 4, 98 IEEE80211_MAX_QUEUES = 16,
99}; 99};
100 100
101#define IEEE80211_INVAL_HW_QUEUE 0xff
102
101/** 103/**
102 * enum ieee80211_ac_numbers - AC numbers as used in mac80211 104 * enum ieee80211_ac_numbers - AC numbers as used in mac80211
103 * @IEEE80211_AC_VO: voice 105 * @IEEE80211_AC_VO: voice
@@ -244,7 +246,7 @@ enum ieee80211_rssi_event {
244 * @channel_type: Channel type for this BSS -- the hardware might be 246 * @channel_type: Channel type for this BSS -- the hardware might be
245 * configured for HT40+ while this BSS only uses no-HT, for 247 * configured for HT40+ while this BSS only uses no-HT, for
246 * example. 248 * example.
247 * @ht_operation_mode: HT operation mode (like in &struct ieee80211_ht_info). 249 * @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation.
248 * This field is only valid when the channel type is one of the HT types. 250 * This field is only valid when the channel type is one of the HT types.
249 * @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value 251 * @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value
250 * implies disabled 252 * implies disabled
@@ -522,7 +524,7 @@ struct ieee80211_tx_rate {
522 * 524 *
523 * @flags: transmit info flags, defined above 525 * @flags: transmit info flags, defined above
524 * @band: the band to transmit on (use for checking for races) 526 * @band: the band to transmit on (use for checking for races)
525 * @antenna_sel_tx: antenna to use, 0 for automatic diversity 527 * @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC
526 * @ack_frame_id: internal frame ID for TX status, used internally 528 * @ack_frame_id: internal frame ID for TX status, used internally
527 * @control: union for control data 529 * @control: union for control data
528 * @status: union for status data 530 * @status: union for status data
@@ -538,7 +540,7 @@ struct ieee80211_tx_info {
538 u32 flags; 540 u32 flags;
539 u8 band; 541 u8 band;
540 542
541 u8 antenna_sel_tx; 543 u8 hw_queue;
542 544
543 u16 ack_frame_id; 545 u16 ack_frame_id;
544 546
@@ -564,7 +566,8 @@ struct ieee80211_tx_info {
564 u8 ampdu_ack_len; 566 u8 ampdu_ack_len;
565 int ack_signal; 567 int ack_signal;
566 u8 ampdu_len; 568 u8 ampdu_len;
567 /* 15 bytes free */ 569 u8 antenna;
570 /* 14 bytes free */
568 } status; 571 } status;
569 struct { 572 struct {
570 struct ieee80211_tx_rate driver_rates[ 573 struct ieee80211_tx_rate driver_rates[
@@ -888,6 +891,8 @@ enum ieee80211_vif_flags {
888 * these need to be set (or cleared) when the interface is added 891 * these need to be set (or cleared) when the interface is added
889 * or, if supported by the driver, the interface type is changed 892 * or, if supported by the driver, the interface type is changed
890 * at runtime, mac80211 will never touch this field 893 * at runtime, mac80211 will never touch this field
894 * @hw_queue: hardware queue for each AC
895 * @cab_queue: content-after-beacon (DTIM beacon really) queue, AP mode only
891 * @drv_priv: data area for driver use, will always be aligned to 896 * @drv_priv: data area for driver use, will always be aligned to
892 * sizeof(void *). 897 * sizeof(void *).
893 */ 898 */
@@ -896,7 +901,12 @@ struct ieee80211_vif {
896 struct ieee80211_bss_conf bss_conf; 901 struct ieee80211_bss_conf bss_conf;
897 u8 addr[ETH_ALEN]; 902 u8 addr[ETH_ALEN];
898 bool p2p; 903 bool p2p;
904
905 u8 cab_queue;
906 u8 hw_queue[IEEE80211_NUM_ACS];
907
899 u32 driver_flags; 908 u32 driver_flags;
909
900 /* must be last */ 910 /* must be last */
901 u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *)))); 911 u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *))));
902}; 912};
@@ -1174,6 +1184,15 @@ enum sta_notify_cmd {
1174 * @IEEE80211_HW_SCAN_WHILE_IDLE: The device can do hw scan while 1184 * @IEEE80211_HW_SCAN_WHILE_IDLE: The device can do hw scan while
1175 * being idle (i.e. mac80211 doesn't have to go idle-off during the 1185 * being idle (i.e. mac80211 doesn't have to go idle-off during the
1176 * the scan). 1186 * the scan).
1187 *
1188 * @IEEE80211_HW_WANT_MONITOR_VIF: The driver would like to be informed of
1189 * a virtual monitor interface when monitor interfaces are the only
1190 * active interfaces.
1191 *
1192 * @IEEE80211_HW_QUEUE_CONTROL: The driver wants to control per-interface
1193 * queue mapping in order to use different queues (not just one per AC)
1194 * for different virtual interfaces. See the doc section on HW queue
1195 * control for more details.
1177 */ 1196 */
1178enum ieee80211_hw_flags { 1197enum ieee80211_hw_flags {
1179 IEEE80211_HW_HAS_RATE_CONTROL = 1<<0, 1198 IEEE80211_HW_HAS_RATE_CONTROL = 1<<0,
@@ -1190,13 +1209,13 @@ enum ieee80211_hw_flags {
1190 IEEE80211_HW_PS_NULLFUNC_STACK = 1<<11, 1209 IEEE80211_HW_PS_NULLFUNC_STACK = 1<<11,
1191 IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12, 1210 IEEE80211_HW_SUPPORTS_DYNAMIC_PS = 1<<12,
1192 IEEE80211_HW_MFP_CAPABLE = 1<<13, 1211 IEEE80211_HW_MFP_CAPABLE = 1<<13,
1193 /* reuse bit 14 */ 1212 IEEE80211_HW_WANT_MONITOR_VIF = 1<<14,
1194 IEEE80211_HW_SUPPORTS_STATIC_SMPS = 1<<15, 1213 IEEE80211_HW_SUPPORTS_STATIC_SMPS = 1<<15,
1195 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS = 1<<16, 1214 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS = 1<<16,
1196 IEEE80211_HW_SUPPORTS_UAPSD = 1<<17, 1215 IEEE80211_HW_SUPPORTS_UAPSD = 1<<17,
1197 IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18, 1216 IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18,
1198 IEEE80211_HW_CONNECTION_MONITOR = 1<<19, 1217 IEEE80211_HW_CONNECTION_MONITOR = 1<<19,
1199 /* reuse bit 20 */ 1218 IEEE80211_HW_QUEUE_CONTROL = 1<<20,
1200 IEEE80211_HW_SUPPORTS_PER_STA_GTK = 1<<21, 1219 IEEE80211_HW_SUPPORTS_PER_STA_GTK = 1<<21,
1201 IEEE80211_HW_AP_LINK_PS = 1<<22, 1220 IEEE80211_HW_AP_LINK_PS = 1<<22,
1202 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW = 1<<23, 1221 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW = 1<<23,
@@ -1266,6 +1285,9 @@ enum ieee80211_hw_flags {
1266 * @max_tx_aggregation_subframes: maximum number of subframes in an 1285 * @max_tx_aggregation_subframes: maximum number of subframes in an
1267 * aggregate an HT driver will transmit, used by the peer as a 1286 * aggregate an HT driver will transmit, used by the peer as a
1268 * hint to size its reorder buffer. 1287 * hint to size its reorder buffer.
1288 *
1289 * @offchannel_tx_hw_queue: HW queue ID to use for offchannel TX
1290 * (if %IEEE80211_HW_QUEUE_CONTROL is set)
1269 */ 1291 */
1270struct ieee80211_hw { 1292struct ieee80211_hw {
1271 struct ieee80211_conf conf; 1293 struct ieee80211_conf conf;
@@ -1286,6 +1308,7 @@ struct ieee80211_hw {
1286 u8 max_rate_tries; 1308 u8 max_rate_tries;
1287 u8 max_rx_aggregation_subframes; 1309 u8 max_rx_aggregation_subframes;
1288 u8 max_tx_aggregation_subframes; 1310 u8 max_tx_aggregation_subframes;
1311 u8 offchannel_tx_hw_queue;
1289}; 1312};
1290 1313
1291/** 1314/**
@@ -1694,6 +1717,61 @@ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
1694 */ 1717 */
1695 1718
1696/** 1719/**
1720 * DOC: HW queue control
1721 *
1722 * Before HW queue control was introduced, mac80211 only had a single static
1723 * assignment of per-interface AC software queues to hardware queues. This
1724 * was problematic for a few reasons:
1725 * 1) off-channel transmissions might get stuck behind other frames
1726 * 2) multiple virtual interfaces couldn't be handled correctly
1727 * 3) after-DTIM frames could get stuck behind other frames
1728 *
1729 * To solve this, hardware typically uses multiple different queues for all
1730 * the different usages, and this needs to be propagated into mac80211 so it
1731 * won't have the same problem with the software queues.
1732 *
1733 * Therefore, mac80211 now offers the %IEEE80211_HW_QUEUE_CONTROL capability
1734 * flag that tells it that the driver implements its own queue control. To do
1735 * so, the driver will set up the various queues in each &struct ieee80211_vif
1736 * and the offchannel queue in &struct ieee80211_hw. In response, mac80211 will
1737 * use those queue IDs in the hw_queue field of &struct ieee80211_tx_info and
1738 * if necessary will queue the frame on the right software queue that mirrors
1739 * the hardware queue.
1740 * Additionally, the driver has to then use these HW queue IDs for the queue
1741 * management functions (ieee80211_stop_queue() et al.)
1742 *
1743 * The driver is free to set up the queue mappings as needed, multiple virtual
1744 * interfaces may map to the same hardware queues if needed. The setup has to
1745 * happen during add_interface or change_interface callbacks. For example, a
1746 * driver supporting station+station and station+AP modes might decide to have
1747 * 10 hardware queues to handle different scenarios:
1748 *
1749 * 4 AC HW queues for 1st vif: 0, 1, 2, 3
1750 * 4 AC HW queues for 2nd vif: 4, 5, 6, 7
1751 * after-DTIM queue for AP: 8
1752 * off-channel queue: 9
1753 *
1754 * It would then set up the hardware like this:
1755 * hw.offchannel_tx_hw_queue = 9
1756 *
1757 * and the first virtual interface that is added as follows:
1758 * vif.hw_queue[IEEE80211_AC_VO] = 0
1759 * vif.hw_queue[IEEE80211_AC_VI] = 1
1760 * vif.hw_queue[IEEE80211_AC_BE] = 2
1761 * vif.hw_queue[IEEE80211_AC_BK] = 3
1762 * vif.cab_queue = 8 // if AP mode, otherwise %IEEE80211_INVAL_HW_QUEUE
1763 * and the second virtual interface with 4-7.
1764 *
1765 * If queue 6 gets full, for example, mac80211 would only stop the second
1766 * virtual interface's BE queue since virtual interface queues are per AC.
1767 *
1768 * Note that the vif.cab_queue value should be set to %IEEE80211_INVAL_HW_QUEUE
1769 * whenever the queue is not used (i.e. the interface is not in AP mode) if the
1770 * queue could potentially be shared since mac80211 will look at cab_queue when
1771 * a queue is stopped/woken even if the interface is not in AP mode.
1772 */
1773
1774/**
1697 * enum ieee80211_filter_flags - hardware filter flags 1775 * enum ieee80211_filter_flags - hardware filter flags
1698 * 1776 *
1699 * These flags determine what the filter in hardware should be 1777 * These flags determine what the filter in hardware should be
@@ -1780,6 +1858,18 @@ enum ieee80211_frame_release_type {
1780}; 1858};
1781 1859
1782/** 1860/**
1861 * enum ieee80211_rate_control_changed - flags to indicate what changed
1862 *
1863 * @IEEE80211_RC_BW_CHANGED: The bandwidth that can be used to transmit
1864 * to this station changed.
1865 * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed.
1866 */
1867enum ieee80211_rate_control_changed {
1868 IEEE80211_RC_BW_CHANGED = BIT(0),
1869 IEEE80211_RC_SMPS_CHANGED = BIT(1),
1870};
1871
1872/**
1783 * struct ieee80211_ops - callbacks from mac80211 to the driver 1873 * struct ieee80211_ops - callbacks from mac80211 to the driver
1784 * 1874 *
1785 * This structure contains various callbacks that the driver may 1875 * This structure contains various callbacks that the driver may
@@ -1980,6 +2070,14 @@ enum ieee80211_frame_release_type {
1980 * up the list of states. 2070 * up the list of states.
1981 * The callback can sleep. 2071 * The callback can sleep.
1982 * 2072 *
2073 * @sta_rc_update: Notifies the driver of changes to the bitrates that can be
2074 * used to transmit to the station. The changes are advertised with bits
2075 * from &enum ieee80211_rate_control_changed and the values are reflected
2076 * in the station data. This callback should only be used when the driver
2077 * uses hardware rate control (%IEEE80211_HW_HAS_RATE_CONTROL) since
2078 * otherwise the rate control algorithm is notified directly.
2079 * Must be atomic.
2080 *
1983 * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max), 2081 * @conf_tx: Configure TX queue parameters (EDCF (aifs, cw_min, cw_max),
1984 * bursting) for a hardware TX queue. 2082 * bursting) for a hardware TX queue.
1985 * Returns a negative error code on failure. 2083 * Returns a negative error code on failure.
@@ -2135,6 +2233,7 @@ struct ieee80211_ops {
2135#ifdef CONFIG_PM 2233#ifdef CONFIG_PM
2136 int (*suspend)(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); 2234 int (*suspend)(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
2137 int (*resume)(struct ieee80211_hw *hw); 2235 int (*resume)(struct ieee80211_hw *hw);
2236 void (*set_wakeup)(struct ieee80211_hw *hw, bool enabled);
2138#endif 2237#endif
2139 int (*add_interface)(struct ieee80211_hw *hw, 2238 int (*add_interface)(struct ieee80211_hw *hw,
2140 struct ieee80211_vif *vif); 2239 struct ieee80211_vif *vif);
@@ -2196,8 +2295,12 @@ struct ieee80211_ops {
2196 struct ieee80211_sta *sta, 2295 struct ieee80211_sta *sta,
2197 enum ieee80211_sta_state old_state, 2296 enum ieee80211_sta_state old_state,
2198 enum ieee80211_sta_state new_state); 2297 enum ieee80211_sta_state new_state);
2298 void (*sta_rc_update)(struct ieee80211_hw *hw,
2299 struct ieee80211_vif *vif,
2300 struct ieee80211_sta *sta,
2301 u32 changed);
2199 int (*conf_tx)(struct ieee80211_hw *hw, 2302 int (*conf_tx)(struct ieee80211_hw *hw,
2200 struct ieee80211_vif *vif, u16 queue, 2303 struct ieee80211_vif *vif, u16 ac,
2201 const struct ieee80211_tx_queue_params *params); 2304 const struct ieee80211_tx_queue_params *params);
2202 u64 (*get_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif); 2305 u64 (*get_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
2203 void (*set_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 2306 void (*set_tsf)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -2844,6 +2947,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
2844 */ 2947 */
2845__le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, 2948__le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
2846 struct ieee80211_vif *vif, 2949 struct ieee80211_vif *vif,
2950 enum ieee80211_band band,
2847 size_t frame_len, 2951 size_t frame_len,
2848 struct ieee80211_rate *rate); 2952 struct ieee80211_rate *rate);
2849 2953
@@ -3512,19 +3616,6 @@ void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn);
3512/* Rate control API */ 3616/* Rate control API */
3513 3617
3514/** 3618/**
3515 * enum rate_control_changed - flags to indicate which parameter changed
3516 *
3517 * @IEEE80211_RC_HT_CHANGED: The HT parameters of the operating channel have
3518 * changed, rate control algorithm can update its internal state if needed.
3519 * @IEEE80211_RC_SMPS_CHANGED: The SMPS state of the station changed, the rate
3520 * control algorithm needs to adjust accordingly.
3521 */
3522enum rate_control_changed {
3523 IEEE80211_RC_HT_CHANGED = BIT(0),
3524 IEEE80211_RC_SMPS_CHANGED = BIT(1),
3525};
3526
3527/**
3528 * struct ieee80211_tx_rate_control - rate control information for/from RC algo 3619 * struct ieee80211_tx_rate_control - rate control information for/from RC algo
3529 * 3620 *
3530 * @hw: The hardware the algorithm is invoked for. 3621 * @hw: The hardware the algorithm is invoked for.
@@ -3569,9 +3660,8 @@ struct rate_control_ops {
3569 void (*rate_init)(void *priv, struct ieee80211_supported_band *sband, 3660 void (*rate_init)(void *priv, struct ieee80211_supported_band *sband,
3570 struct ieee80211_sta *sta, void *priv_sta); 3661 struct ieee80211_sta *sta, void *priv_sta);
3571 void (*rate_update)(void *priv, struct ieee80211_supported_band *sband, 3662 void (*rate_update)(void *priv, struct ieee80211_supported_band *sband,
3572 struct ieee80211_sta *sta, 3663 struct ieee80211_sta *sta, void *priv_sta,
3573 void *priv_sta, u32 changed, 3664 u32 changed);
3574 enum nl80211_channel_type oper_chan_type);
3575 void (*free_sta)(void *priv, struct ieee80211_sta *sta, 3665 void (*free_sta)(void *priv, struct ieee80211_sta *sta,
3576 void *priv_sta); 3666 void *priv_sta);
3577 3667
@@ -3706,8 +3796,20 @@ void ieee80211_enable_rssi_reports(struct ieee80211_vif *vif,
3706 3796
3707void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif); 3797void ieee80211_disable_rssi_reports(struct ieee80211_vif *vif);
3708 3798
3709int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb); 3799int ieee80211_add_srates_ie(struct ieee80211_vif *vif,
3800 struct sk_buff *skb, bool need_basic);
3710 3801
3711int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif, 3802int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif,
3712 struct sk_buff *skb); 3803 struct sk_buff *skb, bool need_basic);
3804
3805/**
3806 * ieee80211_ave_rssi - report the average rssi for the specified interface
3807 *
3808 * @vif: the specified virtual interface
3809 *
3810 * This function return the average rssi value for the requested interface.
3811 * It assumes that the given vif is valid.
3812 */
3813int ieee80211_ave_rssi(struct ieee80211_vif *vif);
3814
3713#endif /* MAC80211_H */ 3815#endif /* MAC80211_H */
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 6f9c25a76cd1..c02b6ad3f6c5 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -34,6 +34,7 @@ enum {
34 __ND_OPT_ARRAY_MAX, 34 __ND_OPT_ARRAY_MAX,
35 ND_OPT_ROUTE_INFO = 24, /* RFC4191 */ 35 ND_OPT_ROUTE_INFO = 24, /* RFC4191 */
36 ND_OPT_RDNSS = 25, /* RFC5006 */ 36 ND_OPT_RDNSS = 25, /* RFC5006 */
37 ND_OPT_DNSSL = 31, /* RFC6106 */
37 __ND_OPT_MAX 38 __ND_OPT_MAX
38}; 39};
39 40
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 34c996f46181..6cdfeedb650b 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -195,7 +195,6 @@ static inline void *neighbour_priv(const struct neighbour *n)
195#define NEIGH_UPDATE_F_ADMIN 0x80000000 195#define NEIGH_UPDATE_F_ADMIN 0x80000000
196 196
197extern void neigh_table_init(struct neigh_table *tbl); 197extern void neigh_table_init(struct neigh_table *tbl);
198extern void neigh_table_init_no_netlink(struct neigh_table *tbl);
199extern int neigh_table_clear(struct neigh_table *tbl); 198extern int neigh_table_clear(struct neigh_table *tbl);
200extern struct neighbour * neigh_lookup(struct neigh_table *tbl, 199extern struct neighbour * neigh_lookup(struct neigh_table *tbl,
201 const void *pkey, 200 const void *pkey,
@@ -323,7 +322,7 @@ static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
323#ifdef CONFIG_BRIDGE_NETFILTER 322#ifdef CONFIG_BRIDGE_NETFILTER
324static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) 323static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
325{ 324{
326 unsigned seq, hh_alen; 325 unsigned int seq, hh_alen;
327 326
328 do { 327 do {
329 seq = read_seqbegin(&hh->hh_lock); 328 seq = read_seqbegin(&hh->hh_lock);
@@ -336,7 +335,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
336 335
337static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb) 336static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
338{ 337{
339 unsigned seq; 338 unsigned int seq;
340 int hh_len; 339 int hh_len;
341 340
342 do { 341 do {
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index ee547c149810..ac9195e6a062 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -279,14 +279,25 @@ extern void unregister_pernet_subsys(struct pernet_operations *);
279extern int register_pernet_device(struct pernet_operations *); 279extern int register_pernet_device(struct pernet_operations *);
280extern void unregister_pernet_device(struct pernet_operations *); 280extern void unregister_pernet_device(struct pernet_operations *);
281 281
282struct ctl_path;
283struct ctl_table; 282struct ctl_table;
284struct ctl_table_header; 283struct ctl_table_header;
285 284
286extern struct ctl_table_header *register_net_sysctl_table(struct net *net, 285#ifdef CONFIG_SYSCTL
287 const struct ctl_path *path, struct ctl_table *table); 286extern int net_sysctl_init(void);
288extern struct ctl_table_header *register_net_sysctl_rotable( 287extern struct ctl_table_header *register_net_sysctl(struct net *net,
289 const struct ctl_path *path, struct ctl_table *table); 288 const char *path, struct ctl_table *table);
290extern void unregister_net_sysctl_table(struct ctl_table_header *header); 289extern void unregister_net_sysctl_table(struct ctl_table_header *header);
290#else
291static inline int net_sysctl_init(void) { return 0; }
292static inline struct ctl_table_header *register_net_sysctl(struct net *net,
293 const char *path, struct ctl_table *table)
294{
295 return NULL;
296}
297static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
298{
299}
300#endif
301
291 302
292#endif /* __NET_NET_NAMESPACE_H */ 303#endif /* __NET_NET_NAMESPACE_H */
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
index e8010f445ae1..9699c028b74b 100644
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -65,7 +65,7 @@ struct nf_conntrack_l3proto {
65 65
66#ifdef CONFIG_SYSCTL 66#ifdef CONFIG_SYSCTL
67 struct ctl_table_header *ctl_table_header; 67 struct ctl_table_header *ctl_table_header;
68 struct ctl_path *ctl_table_path; 68 const char *ctl_table_path;
69 struct ctl_table *ctl_table; 69 struct ctl_table *ctl_table;
70#endif /* CONFIG_SYSCTL */ 70#endif /* CONFIG_SYSCTL */
71 71
diff --git a/include/net/netlink.h b/include/net/netlink.h
index f394fe5d7641..785f37a3b44e 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -102,20 +102,6 @@
102 * nla_put_flag(skb, type) add flag attribute to skb 102 * nla_put_flag(skb, type) add flag attribute to skb
103 * nla_put_msecs(skb, type, jiffies) add msecs attribute to skb 103 * nla_put_msecs(skb, type, jiffies) add msecs attribute to skb
104 * 104 *
105 * Exceptions Based Attribute Construction:
106 * NLA_PUT(skb, type, len, data) add attribute to skb
107 * NLA_PUT_U8(skb, type, value) add u8 attribute to skb
108 * NLA_PUT_U16(skb, type, value) add u16 attribute to skb
109 * NLA_PUT_U32(skb, type, value) add u32 attribute to skb
110 * NLA_PUT_U64(skb, type, value) add u64 attribute to skb
111 * NLA_PUT_STRING(skb, type, str) add string attribute to skb
112 * NLA_PUT_FLAG(skb, type) add flag attribute to skb
113 * NLA_PUT_MSECS(skb, type, jiffies) add msecs attribute to skb
114 *
115 * The meaning of these functions is equal to their lower case
116 * variants but they jump to the label nla_put_failure in case
117 * of a failure.
118 *
119 * Nested Attributes Construction: 105 * Nested Attributes Construction:
120 * nla_nest_start(skb, type) start a nested attribute 106 * nla_nest_start(skb, type) start a nested attribute
121 * nla_nest_end(skb, nla) finalize a nested attribute 107 * nla_nest_end(skb, nla) finalize a nested attribute
@@ -772,6 +758,39 @@ static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
772} 758}
773 759
774/** 760/**
761 * nla_put_be16 - Add a __be16 netlink attribute to a socket buffer
762 * @skb: socket buffer to add attribute to
763 * @attrtype: attribute type
764 * @value: numeric value
765 */
766static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value)
767{
768 return nla_put(skb, attrtype, sizeof(__be16), &value);
769}
770
771/**
772 * nla_put_net16 - Add 16-bit network byte order netlink attribute to a socket buffer
773 * @skb: socket buffer to add attribute to
774 * @attrtype: attribute type
775 * @value: numeric value
776 */
777static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value)
778{
779 return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, value);
780}
781
782/**
783 * nla_put_le16 - Add a __le16 netlink attribute to a socket buffer
784 * @skb: socket buffer to add attribute to
785 * @attrtype: attribute type
786 * @value: numeric value
787 */
788static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value)
789{
790 return nla_put(skb, attrtype, sizeof(__le16), &value);
791}
792
793/**
775 * nla_put_u32 - Add a u32 netlink attribute to a socket buffer 794 * nla_put_u32 - Add a u32 netlink attribute to a socket buffer
776 * @skb: socket buffer to add attribute to 795 * @skb: socket buffer to add attribute to
777 * @attrtype: attribute type 796 * @attrtype: attribute type
@@ -783,7 +802,40 @@ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
783} 802}
784 803
785/** 804/**
786 * nla_put_64 - Add a u64 netlink attribute to a socket buffer 805 * nla_put_be32 - Add a __be32 netlink attribute to a socket buffer
806 * @skb: socket buffer to add attribute to
807 * @attrtype: attribute type
808 * @value: numeric value
809 */
810static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value)
811{
812 return nla_put(skb, attrtype, sizeof(__be32), &value);
813}
814
815/**
816 * nla_put_net32 - Add 32-bit network byte order netlink attribute to a socket buffer
817 * @skb: socket buffer to add attribute to
818 * @attrtype: attribute type
819 * @value: numeric value
820 */
821static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value)
822{
823 return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, value);
824}
825
826/**
827 * nla_put_le32 - Add a __le32 netlink attribute to a socket buffer
828 * @skb: socket buffer to add attribute to
829 * @attrtype: attribute type
830 * @value: numeric value
831 */
832static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value)
833{
834 return nla_put(skb, attrtype, sizeof(__le32), &value);
835}
836
837/**
838 * nla_put_u64 - Add a u64 netlink attribute to a socket buffer
787 * @skb: socket buffer to add attribute to 839 * @skb: socket buffer to add attribute to
788 * @attrtype: attribute type 840 * @attrtype: attribute type
789 * @value: numeric value 841 * @value: numeric value
@@ -794,6 +846,39 @@ static inline int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value)
794} 846}
795 847
796/** 848/**
849 * nla_put_be64 - Add a __be64 netlink attribute to a socket buffer
850 * @skb: socket buffer to add attribute to
851 * @attrtype: attribute type
852 * @value: numeric value
853 */
854static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value)
855{
856 return nla_put(skb, attrtype, sizeof(__be64), &value);
857}
858
859/**
860 * nla_put_net64 - Add 64-bit network byte order netlink attribute to a socket buffer
861 * @skb: socket buffer to add attribute to
862 * @attrtype: attribute type
863 * @value: numeric value
864 */
865static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value)
866{
867 return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value);
868}
869
870/**
871 * nla_put_le64 - Add a __le64 netlink attribute to a socket buffer
872 * @skb: socket buffer to add attribute to
873 * @attrtype: attribute type
874 * @value: numeric value
875 */
876static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value)
877{
878 return nla_put(skb, attrtype, sizeof(__le64), &value);
879}
880
881/**
797 * nla_put_string - Add a string netlink attribute to a socket buffer 882 * nla_put_string - Add a string netlink attribute to a socket buffer
798 * @skb: socket buffer to add attribute to 883 * @skb: socket buffer to add attribute to
799 * @attrtype: attribute type 884 * @attrtype: attribute type
@@ -828,60 +913,6 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
828 return nla_put(skb, attrtype, sizeof(u64), &tmp); 913 return nla_put(skb, attrtype, sizeof(u64), &tmp);
829} 914}
830 915
831#define NLA_PUT(skb, attrtype, attrlen, data) \
832 do { \
833 if (unlikely(nla_put(skb, attrtype, attrlen, data) < 0)) \
834 goto nla_put_failure; \
835 } while(0)
836
837#define NLA_PUT_TYPE(skb, type, attrtype, value) \
838 do { \
839 type __tmp = value; \
840 NLA_PUT(skb, attrtype, sizeof(type), &__tmp); \
841 } while(0)
842
843#define NLA_PUT_U8(skb, attrtype, value) \
844 NLA_PUT_TYPE(skb, u8, attrtype, value)
845
846#define NLA_PUT_U16(skb, attrtype, value) \
847 NLA_PUT_TYPE(skb, u16, attrtype, value)
848
849#define NLA_PUT_LE16(skb, attrtype, value) \
850 NLA_PUT_TYPE(skb, __le16, attrtype, value)
851
852#define NLA_PUT_BE16(skb, attrtype, value) \
853 NLA_PUT_TYPE(skb, __be16, attrtype, value)
854
855#define NLA_PUT_NET16(skb, attrtype, value) \
856 NLA_PUT_BE16(skb, attrtype | NLA_F_NET_BYTEORDER, value)
857
858#define NLA_PUT_U32(skb, attrtype, value) \
859 NLA_PUT_TYPE(skb, u32, attrtype, value)
860
861#define NLA_PUT_BE32(skb, attrtype, value) \
862 NLA_PUT_TYPE(skb, __be32, attrtype, value)
863
864#define NLA_PUT_NET32(skb, attrtype, value) \
865 NLA_PUT_BE32(skb, attrtype | NLA_F_NET_BYTEORDER, value)
866
867#define NLA_PUT_U64(skb, attrtype, value) \
868 NLA_PUT_TYPE(skb, u64, attrtype, value)
869
870#define NLA_PUT_BE64(skb, attrtype, value) \
871 NLA_PUT_TYPE(skb, __be64, attrtype, value)
872
873#define NLA_PUT_NET64(skb, attrtype, value) \
874 NLA_PUT_BE64(skb, attrtype | NLA_F_NET_BYTEORDER, value)
875
876#define NLA_PUT_STRING(skb, attrtype, value) \
877 NLA_PUT(skb, attrtype, strlen(value) + 1, value)
878
879#define NLA_PUT_FLAG(skb, attrtype) \
880 NLA_PUT(skb, attrtype, 0, NULL)
881
882#define NLA_PUT_MSECS(skb, attrtype, jiffies) \
883 NLA_PUT_U64(skb, attrtype, jiffies_to_msecs(jiffies))
884
885/** 916/**
886 * nla_get_u32 - return payload of u32 attribute 917 * nla_get_u32 - return payload of u32 attribute
887 * @nla: u32 netlink attribute 918 * @nla: u32 netlink attribute
diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
index 548d78f2cc47..c06ac58ca107 100644
--- a/include/net/netns/hash.h
+++ b/include/net/netns/hash.h
@@ -5,7 +5,7 @@
5 5
6struct net; 6struct net;
7 7
8static inline unsigned net_hash_mix(struct net *net) 8static inline unsigned int net_hash_mix(struct net *net)
9{ 9{
10#ifdef CONFIG_NET_NS 10#ifdef CONFIG_NET_NS
11 /* 11 /*
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 81abfcb2eb4e..b42be53587ba 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -12,7 +12,9 @@ struct ctl_table_header;
12 12
13struct netns_sysctl_ipv6 { 13struct netns_sysctl_ipv6 {
14#ifdef CONFIG_SYSCTL 14#ifdef CONFIG_SYSCTL
15 struct ctl_table_header *table; 15 struct ctl_table_header *hdr;
16 struct ctl_table_header *route_hdr;
17 struct ctl_table_header *icmp_hdr;
16 struct ctl_table_header *frags_hdr; 18 struct ctl_table_header *frags_hdr;
17#endif 19#endif
18 int bindv6only; 20 int bindv6only;
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
new file mode 100644
index 000000000000..aca65a5a9d0d
--- /dev/null
+++ b/include/net/nfc/hci.h
@@ -0,0 +1,198 @@
1/*
2 * Copyright (C) 2011 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the
16 * Free Software Foundation, Inc.,
17 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#ifndef __NET_HCI_H
21#define __NET_HCI_H
22
23#include <linux/skbuff.h>
24
25#include <net/nfc/nfc.h>
26
27struct nfc_hci_dev;
28
29struct nfc_hci_ops {
30 int (*open) (struct nfc_hci_dev *hdev);
31 void (*close) (struct nfc_hci_dev *hdev);
32 int (*hci_ready) (struct nfc_hci_dev *hdev);
33 int (*xmit) (struct nfc_hci_dev *hdev, struct sk_buff *skb);
34 int (*start_poll) (struct nfc_hci_dev *hdev, u32 protocols);
35 int (*target_from_gate) (struct nfc_hci_dev *hdev, u8 gate,
36 struct nfc_target *target);
37 int (*complete_target_discovered) (struct nfc_hci_dev *hdev, u8 gate,
38 struct nfc_target *target);
39 int (*data_exchange) (struct nfc_hci_dev *hdev,
40 struct nfc_target *target,
41 struct sk_buff *skb, struct sk_buff **res_skb);
42};
43
44#define NFC_HCI_MAX_CUSTOM_GATES 15
45struct nfc_hci_init_data {
46 u8 gate_count;
47 u8 gates[NFC_HCI_MAX_CUSTOM_GATES];
48 char session_id[9];
49};
50
51typedef int (*xmit) (struct sk_buff *skb, void *cb_data);
52
53#define NFC_HCI_MAX_GATES 256
54
55struct nfc_hci_dev {
56 struct nfc_dev *ndev;
57
58 u32 max_data_link_payload;
59
60 struct mutex msg_tx_mutex;
61
62 struct list_head msg_tx_queue;
63
64 struct workqueue_struct *msg_tx_wq;
65 struct work_struct msg_tx_work;
66
67 struct timer_list cmd_timer;
68 struct hci_msg *cmd_pending_msg;
69
70 struct sk_buff_head rx_hcp_frags;
71
72 struct workqueue_struct *msg_rx_wq;
73 struct work_struct msg_rx_work;
74
75 struct sk_buff_head msg_rx_queue;
76
77 struct nfc_hci_ops *ops;
78
79 struct nfc_hci_init_data init_data;
80
81 void *clientdata;
82
83 u8 gate2pipe[NFC_HCI_MAX_GATES];
84
85 bool poll_started;
86 struct nfc_target *targets;
87 int target_count;
88
89 u8 sw_romlib;
90 u8 sw_patch;
91 u8 sw_flashlib_major;
92 u8 sw_flashlib_minor;
93
94 u8 hw_derivative;
95 u8 hw_version;
96 u8 hw_mpw;
97 u8 hw_software;
98 u8 hw_bsid;
99};
100
101/* hci device allocation */
102struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
103 struct nfc_hci_init_data *init_data,
104 u32 protocols,
105 int tx_headroom,
106 int tx_tailroom,
107 int max_link_payload);
108void nfc_hci_free_device(struct nfc_hci_dev *hdev);
109
110int nfc_hci_register_device(struct nfc_hci_dev *hdev);
111void nfc_hci_unregister_device(struct nfc_hci_dev *hdev);
112
113void nfc_hci_set_clientdata(struct nfc_hci_dev *hdev, void *clientdata);
114void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev);
115
116/* Host IDs */
117#define NFC_HCI_HOST_CONTROLLER_ID 0x00
118#define NFC_HCI_TERMINAL_HOST_ID 0x01
119#define NFC_HCI_UICC_HOST_ID 0x02
120
121/* Host Controller Gates and registry indexes */
122#define NFC_HCI_ADMIN_GATE 0x00
123#define NFC_HCI_ADMIN_SESSION_IDENTITY 0x01
124#define NFC_HCI_ADMIN_MAX_PIPE 0x02
125#define NFC_HCI_ADMIN_WHITELIST 0x03
126#define NFC_HCI_ADMIN_HOST_LIST 0x04
127
128#define NFC_HCI_LOOPBACK_GATE 0x04
129
130#define NFC_HCI_ID_MGMT_GATE 0x05
131#define NFC_HCI_ID_MGMT_VERSION_SW 0x01
132#define NFC_HCI_ID_MGMT_VERSION_HW 0x03
133#define NFC_HCI_ID_MGMT_VENDOR_NAME 0x04
134#define NFC_HCI_ID_MGMT_MODEL_ID 0x05
135#define NFC_HCI_ID_MGMT_HCI_VERSION 0x02
136#define NFC_HCI_ID_MGMT_GATES_LIST 0x06
137
138#define NFC_HCI_LINK_MGMT_GATE 0x06
139#define NFC_HCI_LINK_MGMT_REC_ERROR 0x01
140
141#define NFC_HCI_RF_READER_B_GATE 0x11
142#define NFC_HCI_RF_READER_B_PUPI 0x03
143#define NFC_HCI_RF_READER_B_APPLICATION_DATA 0x04
144#define NFC_HCI_RF_READER_B_AFI 0x02
145#define NFC_HCI_RF_READER_B_HIGHER_LAYER_RESPONSE 0x01
146#define NFC_HCI_RF_READER_B_HIGHER_LAYER_DATA 0x05
147
148#define NFC_HCI_RF_READER_A_GATE 0x13
149#define NFC_HCI_RF_READER_A_UID 0x02
150#define NFC_HCI_RF_READER_A_ATQA 0x04
151#define NFC_HCI_RF_READER_A_APPLICATION_DATA 0x05
152#define NFC_HCI_RF_READER_A_SAK 0x03
153#define NFC_HCI_RF_READER_A_FWI_SFGT 0x06
154#define NFC_HCI_RF_READER_A_DATARATE_MAX 0x01
155
156#define NFC_HCI_TYPE_A_SEL_PROT(x) (((x) & 0x60) >> 5)
157#define NFC_HCI_TYPE_A_SEL_PROT_MIFARE 0
158#define NFC_HCI_TYPE_A_SEL_PROT_ISO14443 1
159#define NFC_HCI_TYPE_A_SEL_PROT_DEP 2
160#define NFC_HCI_TYPE_A_SEL_PROT_ISO14443_DEP 3
161
162/* Generic events */
163#define NFC_HCI_EVT_HCI_END_OF_OPERATION 0x01
164#define NFC_HCI_EVT_POST_DATA 0x02
165#define NFC_HCI_EVT_HOT_PLUG 0x03
166
167/* Reader RF gates events */
168#define NFC_HCI_EVT_READER_REQUESTED 0x10
169#define NFC_HCI_EVT_END_OPERATION 0x11
170
171/* Reader Application gate events */
172#define NFC_HCI_EVT_TARGET_DISCOVERED 0x10
173
174/* receiving messages from lower layer */
175void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result,
176 struct sk_buff *skb);
177void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
178 struct sk_buff *skb);
179void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
180 struct sk_buff *skb);
181void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb);
182
183/* connecting to gates and sending hci instructions */
184int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate);
185int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate);
186int nfc_hci_disconnect_all_gates(struct nfc_hci_dev *hdev);
187int nfc_hci_get_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx,
188 struct sk_buff **skb);
189int nfc_hci_set_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx,
190 const u8 *param, size_t param_len);
191int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
192 const u8 *param, size_t param_len, struct sk_buff **skb);
193int nfc_hci_send_response(struct nfc_hci_dev *hdev, u8 gate, u8 response,
194 const u8 *param, size_t param_len);
195int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event,
196 const u8 *param, size_t param_len);
197
198#endif /* __NET_HCI_H */
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index bac070bf3514..9a2505a5b8de 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -62,10 +62,12 @@ struct nfc_ops {
62 int (*data_exchange)(struct nfc_dev *dev, u32 target_idx, 62 int (*data_exchange)(struct nfc_dev *dev, u32 target_idx,
63 struct sk_buff *skb, data_exchange_cb_t cb, 63 struct sk_buff *skb, data_exchange_cb_t cb,
64 void *cb_context); 64 void *cb_context);
65 int (*check_presence)(struct nfc_dev *dev, u32 target_idx);
65}; 66};
66 67
67#define NFC_TARGET_IDX_ANY -1 68#define NFC_TARGET_IDX_ANY -1
68#define NFC_MAX_GT_LEN 48 69#define NFC_MAX_GT_LEN 48
70#define NFC_TARGET_IDX_NONE 0xffffffff
69 71
70struct nfc_target { 72struct nfc_target {
71 u32 idx; 73 u32 idx;
@@ -78,6 +80,8 @@ struct nfc_target {
78 u8 sensb_res[NFC_SENSB_RES_MAXSIZE]; 80 u8 sensb_res[NFC_SENSB_RES_MAXSIZE];
79 u8 sensf_res_len; 81 u8 sensf_res_len;
80 u8 sensf_res[NFC_SENSF_RES_MAXSIZE]; 82 u8 sensf_res[NFC_SENSF_RES_MAXSIZE];
83 u8 hci_reader_gate;
84 u8 logical_idx;
81}; 85};
82 86
83struct nfc_genl_data { 87struct nfc_genl_data {
@@ -86,7 +90,8 @@ struct nfc_genl_data {
86}; 90};
87 91
88struct nfc_dev { 92struct nfc_dev {
89 unsigned idx; 93 unsigned int idx;
94 u32 target_next_idx;
90 struct nfc_target *targets; 95 struct nfc_target *targets;
91 int n_targets; 96 int n_targets;
92 int targets_generation; 97 int targets_generation;
@@ -94,7 +99,7 @@ struct nfc_dev {
94 struct device dev; 99 struct device dev;
95 bool dev_up; 100 bool dev_up;
96 bool polling; 101 bool polling;
97 bool remote_activated; 102 u32 activated_target_idx;
98 bool dep_link_up; 103 bool dep_link_up;
99 u32 dep_rf_mode; 104 u32 dep_rf_mode;
100 struct nfc_genl_data genl_data; 105 struct nfc_genl_data genl_data;
@@ -103,6 +108,10 @@ struct nfc_dev {
103 int tx_headroom; 108 int tx_headroom;
104 int tx_tailroom; 109 int tx_tailroom;
105 110
111 struct timer_list check_pres_timer;
112 struct workqueue_struct *check_pres_wq;
113 struct work_struct check_pres_work;
114
106 struct nfc_ops *ops; 115 struct nfc_ops *ops;
107}; 116};
108#define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev) 117#define to_nfc_dev(_dev) container_of(_dev, struct nfc_dev, dev)
@@ -181,6 +190,7 @@ int nfc_set_remote_general_bytes(struct nfc_dev *dev,
181 190
182int nfc_targets_found(struct nfc_dev *dev, 191int nfc_targets_found(struct nfc_dev *dev,
183 struct nfc_target *targets, int ntargets); 192 struct nfc_target *targets, int ntargets);
193int nfc_target_lost(struct nfc_dev *dev, u32 target_idx);
184 194
185int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx, 195int nfc_dep_link_is_up(struct nfc_dev *dev, u32 target_idx,
186 u8 comm_mode, u8 rf_mode); 196 u8 comm_mode, u8 rf_mode);
diff --git a/include/net/nfc/shdlc.h b/include/net/nfc/shdlc.h
new file mode 100644
index 000000000000..1071987d0408
--- /dev/null
+++ b/include/net/nfc/shdlc.h
@@ -0,0 +1,104 @@
1/*
2 * Copyright (C) 2012 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the
16 * Free Software Foundation, Inc.,
17 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#ifndef __NFC_SHDLC_H
21#define __NFC_SHDLC_H
22
23struct nfc_shdlc;
24
25struct nfc_shdlc_ops {
26 int (*open) (struct nfc_shdlc *shdlc);
27 void (*close) (struct nfc_shdlc *shdlc);
28 int (*hci_ready) (struct nfc_shdlc *shdlc);
29 int (*xmit) (struct nfc_shdlc *shdlc, struct sk_buff *skb);
30 int (*start_poll) (struct nfc_shdlc *shdlc, u32 protocols);
31 int (*target_from_gate) (struct nfc_shdlc *shdlc, u8 gate,
32 struct nfc_target *target);
33 int (*complete_target_discovered) (struct nfc_shdlc *shdlc, u8 gate,
34 struct nfc_target *target);
35 int (*data_exchange) (struct nfc_shdlc *shdlc,
36 struct nfc_target *target,
37 struct sk_buff *skb, struct sk_buff **res_skb);
38};
39
40enum shdlc_state {
41 SHDLC_DISCONNECTED = 0,
42 SHDLC_CONNECTING = 1,
43 SHDLC_NEGOCIATING = 2,
44 SHDLC_CONNECTED = 3
45};
46
47struct nfc_shdlc {
48 struct mutex state_mutex;
49 enum shdlc_state state;
50 int hard_fault;
51
52 struct nfc_hci_dev *hdev;
53
54 wait_queue_head_t *connect_wq;
55 int connect_tries;
56 int connect_result;
57 struct timer_list connect_timer;/* aka T3 in spec 10.6.1 */
58
59 u8 w; /* window size */
60 bool srej_support;
61
62 struct timer_list t1_timer; /* send ack timeout */
63 bool t1_active;
64
65 struct timer_list t2_timer; /* guard/retransmit timeout */
66 bool t2_active;
67
68 int ns; /* next seq num for send */
69 int nr; /* next expected seq num for receive */
70 int dnr; /* oldest sent unacked seq num */
71
72 struct sk_buff_head rcv_q;
73
74 struct sk_buff_head send_q;
75 bool rnr; /* other side is not ready to receive */
76
77 struct sk_buff_head ack_pending_q;
78
79 struct workqueue_struct *sm_wq;
80 struct work_struct sm_work;
81
82 struct nfc_shdlc_ops *ops;
83
84 int client_headroom;
85 int client_tailroom;
86
87 void *clientdata;
88};
89
90void nfc_shdlc_recv_frame(struct nfc_shdlc *shdlc, struct sk_buff *skb);
91
92struct nfc_shdlc *nfc_shdlc_allocate(struct nfc_shdlc_ops *ops,
93 struct nfc_hci_init_data *init_data,
94 u32 protocols,
95 int tx_headroom, int tx_tailroom,
96 int max_link_payload, const char *devname);
97
98void nfc_shdlc_free(struct nfc_shdlc *shdlc);
99
100void nfc_shdlc_set_clientdata(struct nfc_shdlc *shdlc, void *clientdata);
101void *nfc_shdlc_get_clientdata(struct nfc_shdlc *shdlc);
102struct nfc_hci_dev *nfc_shdlc_get_hci_dev(struct nfc_shdlc *shdlc);
103
104#endif /* __NFC_SHDLC_H */
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index fffdc603f4c8..66f5ac370f92 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -107,7 +107,7 @@ extern int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
107/* Calculate maximal size of packet seen by hard_start_xmit 107/* Calculate maximal size of packet seen by hard_start_xmit
108 routine of this device. 108 routine of this device.
109 */ 109 */
110static inline unsigned psched_mtu(const struct net_device *dev) 110static inline unsigned int psched_mtu(const struct net_device *dev)
111{ 111{
112 return dev->mtu + dev->hard_header_len; 112 return dev->mtu + dev->hard_header_len;
113} 113}
diff --git a/include/net/route.h b/include/net/route.h
index b1c0d5b564c2..ed2b78e2375d 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -50,7 +50,7 @@ struct rtable {
50 __be32 rt_key_src; 50 __be32 rt_key_src;
51 51
52 int rt_genid; 52 int rt_genid;
53 unsigned rt_flags; 53 unsigned int rt_flags;
54 __u16 rt_type; 54 __u16 rt_type;
55 __u8 rt_key_tos; 55 __u8 rt_key_tos;
56 56
@@ -185,8 +185,8 @@ extern unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph
185 unsigned short new_mtu, struct net_device *dev); 185 unsigned short new_mtu, struct net_device *dev);
186extern void ip_rt_send_redirect(struct sk_buff *skb); 186extern void ip_rt_send_redirect(struct sk_buff *skb);
187 187
188extern unsigned inet_addr_type(struct net *net, __be32 addr); 188extern unsigned int inet_addr_type(struct net *net, __be32 addr);
189extern unsigned inet_dev_addr_type(struct net *net, const struct net_device *dev, __be32 addr); 189extern unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev, __be32 addr);
190extern void ip_rt_multicast_event(struct in_device *); 190extern void ip_rt_multicast_event(struct in_device *);
191extern int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg); 191extern int ip_rt_ioctl(struct net *, unsigned int cmd, void __user *arg);
192extern void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt); 192extern void ip_rt_get_source(u8 *src, struct sk_buff *skb, struct rtable *rt);
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 370293901971..bbcfd0993432 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -41,9 +41,11 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
41 * @get_size: Function to calculate required room for dumping device 41 * @get_size: Function to calculate required room for dumping device
42 * specific netlink attributes 42 * specific netlink attributes
43 * @fill_info: Function to dump device specific netlink attributes 43 * @fill_info: Function to dump device specific netlink attributes
44 * @get_xstats_size: Function to calculate required room for dumping devic 44 * @get_xstats_size: Function to calculate required room for dumping device
45 * specific statistics 45 * specific statistics
46 * @fill_xstats: Function to dump device specific statistics 46 * @fill_xstats: Function to dump device specific statistics
47 * @get_tx_queues: Function to determine number of transmit queues to create when
48 * creating a new device.
47 */ 49 */
48struct rtnl_link_ops { 50struct rtnl_link_ops {
49 struct list_head list; 51 struct list_head list;
@@ -75,9 +77,8 @@ struct rtnl_link_ops {
75 size_t (*get_xstats_size)(const struct net_device *dev); 77 size_t (*get_xstats_size)(const struct net_device *dev);
76 int (*fill_xstats)(struct sk_buff *skb, 78 int (*fill_xstats)(struct sk_buff *skb,
77 const struct net_device *dev); 79 const struct net_device *dev);
78 int (*get_tx_queues)(struct net *net, struct nlattr *tb[], 80 int (*get_tx_queues)(struct net *net,
79 unsigned int *tx_queues, 81 struct nlattr *tb[]);
80 unsigned int *real_tx_queues);
81}; 82};
82 83
83extern int __rtnl_link_register(struct rtnl_link_ops *ops); 84extern int __rtnl_link_register(struct rtnl_link_ops *ops);
@@ -94,7 +95,7 @@ extern void rtnl_link_unregister(struct rtnl_link_ops *ops);
94 * @fill_link_af: Function to fill IFLA_AF_SPEC with address family 95 * @fill_link_af: Function to fill IFLA_AF_SPEC with address family
95 * specific netlink attributes. 96 * specific netlink attributes.
96 * @get_link_af_size: Function to calculate size of address family specific 97 * @get_link_af_size: Function to calculate size of address family specific
97 * netlink attributes exlusive the container attribute. 98 * netlink attributes.
98 * @validate_link_af: Validate a IFLA_AF_SPEC attribute, must check attr 99 * @validate_link_af: Validate a IFLA_AF_SPEC attribute, must check attr
99 * for invalid configuration settings. 100 * for invalid configuration settings.
100 * @set_link_af: Function to parse a IFLA_AF_SPEC attribute and modify 101 * @set_link_af: Function to parse a IFLA_AF_SPEC attribute and modify
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 88949a994538..e4652fe58958 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1145,10 +1145,10 @@ struct sctp_outq {
1145 /* Data pending that has never been transmitted. */ 1145 /* Data pending that has never been transmitted. */
1146 struct list_head out_chunk_list; 1146 struct list_head out_chunk_list;
1147 1147
1148 unsigned out_qlen; /* Total length of queued data chunks. */ 1148 unsigned int out_qlen; /* Total length of queued data chunks. */
1149 1149
1150 /* Error of send failed, may used in SCTP_SEND_FAILED event. */ 1150 /* Error of send failed, may used in SCTP_SEND_FAILED event. */
1151 unsigned error; 1151 unsigned int error;
1152 1152
1153 /* These are control chunks we want to send. */ 1153 /* These are control chunks we want to send. */
1154 struct list_head control_chunk_list; 1154 struct list_head control_chunk_list;
@@ -2000,8 +2000,8 @@ void sctp_assoc_update(struct sctp_association *old,
2000__u32 sctp_association_get_next_tsn(struct sctp_association *); 2000__u32 sctp_association_get_next_tsn(struct sctp_association *);
2001 2001
2002void sctp_assoc_sync_pmtu(struct sctp_association *); 2002void sctp_assoc_sync_pmtu(struct sctp_association *);
2003void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned); 2003void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int);
2004void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned); 2004void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int);
2005void sctp_assoc_set_primary(struct sctp_association *, 2005void sctp_assoc_set_primary(struct sctp_association *,
2006 struct sctp_transport *); 2006 struct sctp_transport *);
2007void sctp_assoc_del_nonprimary_peers(struct sctp_association *, 2007void sctp_assoc_del_nonprimary_peers(struct sctp_association *,
diff --git a/include/net/sock.h b/include/net/sock.h
index 5a0a58ac4126..e613704e9d1c 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -377,6 +377,17 @@ struct sock {
377 void (*sk_destruct)(struct sock *sk); 377 void (*sk_destruct)(struct sock *sk);
378}; 378};
379 379
380/*
381 * SK_CAN_REUSE and SK_NO_REUSE on a socket mean that the socket is OK
382 * or not whether his port will be reused by someone else. SK_FORCE_REUSE
383 * on a socket means that the socket will reuse everybody else's port
384 * without looking at the other's sk_reuse value.
385 */
386
387#define SK_NO_REUSE 0
388#define SK_CAN_REUSE 1
389#define SK_FORCE_REUSE 2
390
380static inline int sk_peek_offset(struct sock *sk, int flags) 391static inline int sk_peek_offset(struct sock *sk, int flags)
381{ 392{
382 if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0)) 393 if ((flags & MSG_PEEK) && (sk->sk_peek_off >= 0))
@@ -699,17 +710,19 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
699 * Do not take into account this skb truesize, 710 * Do not take into account this skb truesize,
700 * to allow even a single big packet to come. 711 * to allow even a single big packet to come.
701 */ 712 */
702static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb) 713static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb,
714 unsigned int limit)
703{ 715{
704 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc); 716 unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
705 717
706 return qsize > sk->sk_rcvbuf; 718 return qsize > limit;
707} 719}
708 720
709/* The per-socket spinlock must be held here. */ 721/* The per-socket spinlock must be held here. */
710static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb) 722static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
723 unsigned int limit)
711{ 724{
712 if (sk_rcvqueues_full(sk, skb)) 725 if (sk_rcvqueues_full(sk, skb, limit))
713 return -ENOBUFS; 726 return -ENOBUFS;
714 727
715 __sk_add_backlog(sk, skb); 728 __sk_add_backlog(sk, skb);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index f75a04d752cb..92faa6a7ea97 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -123,7 +123,7 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
123#endif 123#endif
124#define TCP_RTO_MAX ((unsigned)(120*HZ)) 124#define TCP_RTO_MAX ((unsigned)(120*HZ))
125#define TCP_RTO_MIN ((unsigned)(HZ/5)) 125#define TCP_RTO_MIN ((unsigned)(HZ/5))
126#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC2988bis initial RTO value */ 126#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
127#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now 127#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
128 * used as a fallback RTO for the 128 * used as a fallback RTO for the
129 * initial data transmission if no 129 * initial data transmission if no
@@ -252,6 +252,7 @@ extern int sysctl_tcp_max_ssthresh;
252extern int sysctl_tcp_cookie_size; 252extern int sysctl_tcp_cookie_size;
253extern int sysctl_tcp_thin_linear_timeouts; 253extern int sysctl_tcp_thin_linear_timeouts;
254extern int sysctl_tcp_thin_dupack; 254extern int sysctl_tcp_thin_dupack;
255extern int sysctl_tcp_early_retrans;
255 256
256extern atomic_long_t tcp_memory_allocated; 257extern atomic_long_t tcp_memory_allocated;
257extern struct percpu_counter tcp_sockets_allocated; 258extern struct percpu_counter tcp_sockets_allocated;
@@ -366,13 +367,6 @@ static inline void tcp_dec_quickack_mode(struct sock *sk,
366#define TCP_ECN_DEMAND_CWR 4 367#define TCP_ECN_DEMAND_CWR 4
367#define TCP_ECN_SEEN 8 368#define TCP_ECN_SEEN 8
368 369
369static __inline__ void
370TCP_ECN_create_request(struct request_sock *req, struct tcphdr *th)
371{
372 if (sysctl_tcp_ecn && th->ece && th->cwr)
373 inet_rsk(req)->ecn_ok = 1;
374}
375
376enum tcp_tw_status { 370enum tcp_tw_status {
377 TCP_TW_SUCCESS = 0, 371 TCP_TW_SUCCESS = 0,
378 TCP_TW_RST = 1, 372 TCP_TW_RST = 1,
@@ -395,6 +389,7 @@ extern void tcp_enter_loss(struct sock *sk, int how);
395extern void tcp_clear_retrans(struct tcp_sock *tp); 389extern void tcp_clear_retrans(struct tcp_sock *tp);
396extern void tcp_update_metrics(struct sock *sk); 390extern void tcp_update_metrics(struct sock *sk);
397extern void tcp_close(struct sock *sk, long timeout); 391extern void tcp_close(struct sock *sk, long timeout);
392extern void tcp_init_sock(struct sock *sk);
398extern unsigned int tcp_poll(struct file * file, struct socket *sock, 393extern unsigned int tcp_poll(struct file * file, struct socket *sock,
399 struct poll_table_struct *wait); 394 struct poll_table_struct *wait);
400extern int tcp_getsockopt(struct sock *sk, int level, int optname, 395extern int tcp_getsockopt(struct sock *sk, int level, int optname,
@@ -435,6 +430,10 @@ extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
435 struct request_values *rvp); 430 struct request_values *rvp);
436extern int tcp_disconnect(struct sock *sk, int flags); 431extern int tcp_disconnect(struct sock *sk, int flags);
437 432
433void tcp_connect_init(struct sock *sk);
434void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
435int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
436 int hdrlen, bool *fragstolen);
438 437
439/* From syncookies.c */ 438/* From syncookies.c */
440extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; 439extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
@@ -495,6 +494,8 @@ extern void tcp_send_delayed_ack(struct sock *sk);
495 494
496/* tcp_input.c */ 495/* tcp_input.c */
497extern void tcp_cwnd_application_limited(struct sock *sk); 496extern void tcp_cwnd_application_limited(struct sock *sk);
497extern void tcp_resume_early_retransmit(struct sock *sk);
498extern void tcp_rearm_rto(struct sock *sk);
498 499
499/* tcp_timer.c */ 500/* tcp_timer.c */
500extern void tcp_init_xmit_timers(struct sock *); 501extern void tcp_init_xmit_timers(struct sock *);
@@ -540,8 +541,8 @@ extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
540 541
541extern void tcp_initialize_rcv_mss(struct sock *sk); 542extern void tcp_initialize_rcv_mss(struct sock *sk);
542 543
543extern int tcp_mtu_to_mss(const struct sock *sk, int pmtu); 544extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
544extern int tcp_mss_to_mtu(const struct sock *sk, int mss); 545extern int tcp_mss_to_mtu(struct sock *sk, int mss);
545extern void tcp_mtup_init(struct sock *sk); 546extern void tcp_mtup_init(struct sock *sk);
546extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt); 547extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt);
547 548
@@ -609,6 +610,8 @@ static inline u32 tcp_receive_window(const struct tcp_sock *tp)
609 */ 610 */
610extern u32 __tcp_select_window(struct sock *sk); 611extern u32 __tcp_select_window(struct sock *sk);
611 612
613void tcp_send_window_probe(struct sock *sk);
614
612/* TCP timestamps are only 32-bits, this causes a slight 615/* TCP timestamps are only 32-bits, this causes a slight
613 * complication on 64-bit systems since we store a snapshot 616 * complication on 64-bit systems since we store a snapshot
614 * of jiffies in the buffer control blocks below. We decided 617 * of jiffies in the buffer control blocks below. We decided
@@ -645,21 +648,38 @@ struct tcp_skb_cb {
645 __u32 end_seq; /* SEQ + FIN + SYN + datalen */ 648 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
646 __u32 when; /* used to compute rtt's */ 649 __u32 when; /* used to compute rtt's */
647 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */ 650 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
651
648 __u8 sacked; /* State flags for SACK/FACK. */ 652 __u8 sacked; /* State flags for SACK/FACK. */
649#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */ 653#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
650#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */ 654#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
651#define TCPCB_LOST 0x04 /* SKB is lost */ 655#define TCPCB_LOST 0x04 /* SKB is lost */
652#define TCPCB_TAGBITS 0x07 /* All tag bits */ 656#define TCPCB_TAGBITS 0x07 /* All tag bits */
653 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
654 /* 1 byte hole */
655#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */ 657#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
656#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS) 658#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
657 659
660 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
661 /* 1 byte hole */
658 __u32 ack_seq; /* Sequence number ACK'd */ 662 __u32 ack_seq; /* Sequence number ACK'd */
659}; 663};
660 664
661#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) 665#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
662 666
667/* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set
668 *
669 * If we receive a SYN packet with these bits set, it means a network is
670 * playing bad games with TOS bits. In order to avoid possible false congestion
671 * notifications, we disable TCP ECN negociation.
672 */
673static inline void
674TCP_ECN_create_request(struct request_sock *req, const struct sk_buff *skb)
675{
676 const struct tcphdr *th = tcp_hdr(skb);
677
678 if (sysctl_tcp_ecn && th->ece && th->cwr &&
679 INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield))
680 inet_rsk(req)->ecn_ok = 1;
681}
682
663/* Due to TSO, an SKB can be composed of multiple actual 683/* Due to TSO, an SKB can be composed of multiple actual
664 * packets. To keep these tracked properly, we use this. 684 * packets. To keep these tracked properly, we use this.
665 */ 685 */
@@ -790,6 +810,21 @@ static inline void tcp_enable_fack(struct tcp_sock *tp)
790 tp->rx_opt.sack_ok |= TCP_FACK_ENABLED; 810 tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
791} 811}
792 812
813/* TCP early-retransmit (ER) is similar to but more conservative than
814 * the thin-dupack feature. Enable ER only if thin-dupack is disabled.
815 */
816static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
817{
818 tp->do_early_retrans = sysctl_tcp_early_retrans &&
819 !sysctl_tcp_thin_dupack && sysctl_tcp_reordering == 3;
820 tp->early_retrans_delayed = 0;
821}
822
823static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
824{
825 tp->do_early_retrans = 0;
826}
827
793static inline unsigned int tcp_left_out(const struct tcp_sock *tp) 828static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
794{ 829{
795 return tp->sacked_out + tp->lost_out; 830 return tp->sacked_out + tp->lost_out;
@@ -1226,7 +1261,7 @@ extern void tcp_put_md5sig_pool(void);
1226 1261
1227extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *); 1262extern int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
1228extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *, 1263extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1229 unsigned header_len); 1264 unsigned int header_len);
1230extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, 1265extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1231 const struct tcp_md5sig_key *key); 1266 const struct tcp_md5sig_key *key);
1232 1267
diff --git a/include/net/udp.h b/include/net/udp.h
index 5d606d9da9e5..065f379c6503 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -81,7 +81,7 @@ struct udp_table {
81extern struct udp_table udp_table; 81extern struct udp_table udp_table;
82extern void udp_table_init(struct udp_table *, const char *); 82extern void udp_table_init(struct udp_table *, const char *);
83static inline struct udp_hslot *udp_hashslot(struct udp_table *table, 83static inline struct udp_hslot *udp_hashslot(struct udp_table *table,
84 struct net *net, unsigned num) 84 struct net *net, unsigned int num)
85{ 85{
86 return &table->hash[udp_hashfn(net, num, table->mask)]; 86 return &table->hash[udp_hashfn(net, num, table->mask)];
87} 87}
@@ -267,4 +267,8 @@ extern void udp_init(void);
267extern int udp4_ufo_send_check(struct sk_buff *skb); 267extern int udp4_ufo_send_check(struct sk_buff *skb);
268extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, 268extern struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
269 netdev_features_t features); 269 netdev_features_t features);
270extern void udp_encap_enable(void);
271#if IS_ENABLED(CONFIG_IPV6)
272extern void udpv6_encap_enable(void);
273#endif
270#endif /* _UDP_H */ 274#endif /* _UDP_H */
diff --git a/include/net/wimax.h b/include/net/wimax.h
index 322ff4fbdb4a..bbb74f990cab 100644
--- a/include/net/wimax.h
+++ b/include/net/wimax.h
@@ -423,8 +423,8 @@ struct wimax_dev {
423 int (*op_reset)(struct wimax_dev *wimax_dev); 423 int (*op_reset)(struct wimax_dev *wimax_dev);
424 424
425 struct rfkill *rfkill; 425 struct rfkill *rfkill;
426 unsigned rf_hw; 426 unsigned int rf_hw;
427 unsigned rf_sw; 427 unsigned int rf_sw;
428 char name[32]; 428 char name[32];
429 429
430 struct dentry *debugfs_dentry; 430 struct dentry *debugfs_dentry;
diff --git a/include/net/x25.h b/include/net/x25.h
index a06119a05129..b4a8a8923128 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -305,7 +305,7 @@ static inline void x25_unregister_sysctl(void) {};
305#endif /* CONFIG_SYSCTL */ 305#endif /* CONFIG_SYSCTL */
306 306
307struct x25_skb_cb { 307struct x25_skb_cb {
308 unsigned flags; 308 unsigned int flags;
309}; 309};
310#define X25_SKB_CB(s) ((struct x25_skb_cb *) ((s)->cb)) 310#define X25_SKB_CB(s) ((struct x25_skb_cb *) ((s)->cb))
311 311
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 96239e78e621..1cb32bf107de 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1682,8 +1682,9 @@ static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
1682 1682
1683static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m) 1683static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
1684{ 1684{
1685 if (m->m | m->v) 1685 if ((m->m | m->v) &&
1686 NLA_PUT(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m); 1686 nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m))
1687 goto nla_put_failure;
1687 return 0; 1688 return 0;
1688 1689
1689nla_put_failure: 1690nla_put_failure:
diff --git a/net/802/fc.c b/net/802/fc.c
index b324e31401a9..05eea6b98bb8 100644
--- a/net/802/fc.c
+++ b/net/802/fc.c
@@ -35,7 +35,7 @@
35 35
36static int fc_header(struct sk_buff *skb, struct net_device *dev, 36static int fc_header(struct sk_buff *skb, struct net_device *dev,
37 unsigned short type, 37 unsigned short type,
38 const void *daddr, const void *saddr, unsigned len) 38 const void *daddr, const void *saddr, unsigned int len)
39{ 39{
40 struct fch_hdr *fch; 40 struct fch_hdr *fch;
41 int hdr_len; 41 int hdr_len;
diff --git a/net/802/fddi.c b/net/802/fddi.c
index 5ab25cd4314b..9cda40661e0d 100644
--- a/net/802/fddi.c
+++ b/net/802/fddi.c
@@ -51,7 +51,7 @@
51 51
52static int fddi_header(struct sk_buff *skb, struct net_device *dev, 52static int fddi_header(struct sk_buff *skb, struct net_device *dev,
53 unsigned short type, 53 unsigned short type,
54 const void *daddr, const void *saddr, unsigned len) 54 const void *daddr, const void *saddr, unsigned int len)
55{ 55{
56 int hl = FDDI_K_SNAP_HLEN; 56 int hl = FDDI_K_SNAP_HLEN;
57 struct fddihdr *fddi; 57 struct fddihdr *fddi;
diff --git a/net/802/garp.c b/net/802/garp.c
index a5c224830439..8456f5d98b85 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -157,9 +157,9 @@ static struct garp_attr *garp_attr_lookup(const struct garp_applicant *app,
157 while (parent) { 157 while (parent) {
158 attr = rb_entry(parent, struct garp_attr, node); 158 attr = rb_entry(parent, struct garp_attr, node);
159 d = garp_attr_cmp(attr, data, len, type); 159 d = garp_attr_cmp(attr, data, len, type);
160 if (d < 0) 160 if (d > 0)
161 parent = parent->rb_left; 161 parent = parent->rb_left;
162 else if (d > 0) 162 else if (d < 0)
163 parent = parent->rb_right; 163 parent = parent->rb_right;
164 else 164 else
165 return attr; 165 return attr;
@@ -178,9 +178,9 @@ static struct garp_attr *garp_attr_create(struct garp_applicant *app,
178 parent = *p; 178 parent = *p;
179 attr = rb_entry(parent, struct garp_attr, node); 179 attr = rb_entry(parent, struct garp_attr, node);
180 d = garp_attr_cmp(attr, data, len, type); 180 d = garp_attr_cmp(attr, data, len, type);
181 if (d < 0) 181 if (d > 0)
182 p = &parent->rb_left; 182 p = &parent->rb_left;
183 else if (d > 0) 183 else if (d < 0)
184 p = &parent->rb_right; 184 p = &parent->rb_right;
185 else { 185 else {
186 /* The attribute already exists; re-use it. */ 186 /* The attribute already exists; re-use it. */
diff --git a/net/802/hippi.c b/net/802/hippi.c
index 056794e66375..51a1f530417d 100644
--- a/net/802/hippi.c
+++ b/net/802/hippi.c
@@ -45,7 +45,7 @@
45 45
46static int hippi_header(struct sk_buff *skb, struct net_device *dev, 46static int hippi_header(struct sk_buff *skb, struct net_device *dev,
47 unsigned short type, 47 unsigned short type,
48 const void *daddr, const void *saddr, unsigned len) 48 const void *daddr, const void *saddr, unsigned int len)
49{ 49{
50 struct hippi_hdr *hip = (struct hippi_hdr *)skb_push(skb, HIPPI_HLEN); 50 struct hippi_hdr *hip = (struct hippi_hdr *)skb_push(skb, HIPPI_HLEN);
51 struct hippi_cb *hcb = (struct hippi_cb *) skb->cb; 51 struct hippi_cb *hcb = (struct hippi_cb *) skb->cb;
diff --git a/net/802/tr.c b/net/802/tr.c
index b9a3a145e348..30a352ed09b1 100644
--- a/net/802/tr.c
+++ b/net/802/tr.c
@@ -101,7 +101,7 @@ static inline unsigned long rif_hash(const unsigned char *addr)
101 101
102static int tr_header(struct sk_buff *skb, struct net_device *dev, 102static int tr_header(struct sk_buff *skb, struct net_device *dev,
103 unsigned short type, 103 unsigned short type,
104 const void *daddr, const void *saddr, unsigned len) 104 const void *daddr, const void *saddr, unsigned int len)
105{ 105{
106 struct trh_hdr *trh; 106 struct trh_hdr *trh;
107 int hdr_len; 107 int hdr_len;
@@ -193,7 +193,7 @@ __be16 tr_type_trans(struct sk_buff *skb, struct net_device *dev)
193 193
194 struct trh_hdr *trh; 194 struct trh_hdr *trh;
195 struct trllc *trllc; 195 struct trllc *trllc;
196 unsigned riflen=0; 196 unsigned int riflen=0;
197 197
198 skb->dev = dev; 198 skb->dev = dev;
199 skb_reset_mac_header(skb); 199 skb_reset_mac_header(skb);
@@ -643,12 +643,6 @@ static struct ctl_table tr_table[] = {
643 }, 643 },
644 { }, 644 { },
645}; 645};
646
647static __initdata struct ctl_path tr_path[] = {
648 { .procname = "net", },
649 { .procname = "token-ring", },
650 { }
651};
652#endif 646#endif
653 647
654/* 648/*
@@ -662,7 +656,7 @@ static int __init rif_init(void)
662 setup_timer(&rif_timer, rif_check_expire, 0); 656 setup_timer(&rif_timer, rif_check_expire, 0);
663 add_timer(&rif_timer); 657 add_timer(&rif_timer);
664#ifdef CONFIG_SYSCTL 658#ifdef CONFIG_SYSCTL
665 register_sysctl_paths(tr_path, tr_table); 659 register_net_sysctl(&init_net, "net/token-ring", tr_table);
666#endif 660#endif
667 proc_net_fops_create(&init_net, "tr_rif", S_IRUGO, &rif_seq_fops); 661 proc_net_fops_create(&init_net, "tr_rif", S_IRUGO, &rif_seq_fops);
668 return 0; 662 return 0;
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index 50711368ad6a..708c80ea1874 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -166,11 +166,13 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
166 struct nlattr *nest; 166 struct nlattr *nest;
167 unsigned int i; 167 unsigned int i;
168 168
169 NLA_PUT_U16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id); 169 if (nla_put_u16(skb, IFLA_VLAN_ID, vlan_dev_priv(dev)->vlan_id))
170 goto nla_put_failure;
170 if (vlan->flags) { 171 if (vlan->flags) {
171 f.flags = vlan->flags; 172 f.flags = vlan->flags;
172 f.mask = ~0; 173 f.mask = ~0;
173 NLA_PUT(skb, IFLA_VLAN_FLAGS, sizeof(f), &f); 174 if (nla_put(skb, IFLA_VLAN_FLAGS, sizeof(f), &f))
175 goto nla_put_failure;
174 } 176 }
175 if (vlan->nr_ingress_mappings) { 177 if (vlan->nr_ingress_mappings) {
176 nest = nla_nest_start(skb, IFLA_VLAN_INGRESS_QOS); 178 nest = nla_nest_start(skb, IFLA_VLAN_INGRESS_QOS);
@@ -183,8 +185,9 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
183 185
184 m.from = i; 186 m.from = i;
185 m.to = vlan->ingress_priority_map[i]; 187 m.to = vlan->ingress_priority_map[i];
186 NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING, 188 if (nla_put(skb, IFLA_VLAN_QOS_MAPPING,
187 sizeof(m), &m); 189 sizeof(m), &m))
190 goto nla_put_failure;
188 } 191 }
189 nla_nest_end(skb, nest); 192 nla_nest_end(skb, nest);
190 } 193 }
@@ -202,8 +205,9 @@ static int vlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
202 205
203 m.from = pm->priority; 206 m.from = pm->priority;
204 m.to = (pm->vlan_qos >> 13) & 0x7; 207 m.to = (pm->vlan_qos >> 13) & 0x7;
205 NLA_PUT(skb, IFLA_VLAN_QOS_MAPPING, 208 if (nla_put(skb, IFLA_VLAN_QOS_MAPPING,
206 sizeof(m), &m); 209 sizeof(m), &m))
210 goto nla_put_failure;
207 } 211 }
208 } 212 }
209 nla_nest_end(skb, nest); 213 nla_nest_end(skb, nest);
diff --git a/net/9p/client.c b/net/9p/client.c
index b23a17c431c8..a170893d70e0 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -1530,7 +1530,7 @@ p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset,
1530 1530
1531 1531
1532 p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n", 1532 p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n",
1533 fid->fid, (long long unsigned) offset, count); 1533 fid->fid, (unsigned long long) offset, count);
1534 err = 0; 1534 err = 0;
1535 clnt = fid->clnt; 1535 clnt = fid->clnt;
1536 1536
@@ -1605,7 +1605,7 @@ p9_client_write(struct p9_fid *fid, char *data, const char __user *udata,
1605 struct p9_req_t *req; 1605 struct p9_req_t *req;
1606 1606
1607 p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d\n", 1607 p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d\n",
1608 fid->fid, (long long unsigned) offset, count); 1608 fid->fid, (unsigned long long) offset, count);
1609 err = 0; 1609 err = 0;
1610 clnt = fid->clnt; 1610 clnt = fid->clnt;
1611 1611
@@ -2040,7 +2040,7 @@ int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset)
2040 char *dataptr; 2040 char *dataptr;
2041 2041
2042 p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n", 2042 p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n",
2043 fid->fid, (long long unsigned) offset, count); 2043 fid->fid, (unsigned long long) offset, count);
2044 2044
2045 err = 0; 2045 err = 0;
2046 clnt = fid->clnt; 2046 clnt = fid->clnt;
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index fccae26fa674..6449bae15702 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -513,7 +513,7 @@ error:
513 clear_bit(Wworksched, &m->wsched); 513 clear_bit(Wworksched, &m->wsched);
514} 514}
515 515
516static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) 516static int p9_pollwake(wait_queue_t *wait, unsigned int mode, int sync, void *key)
517{ 517{
518 struct p9_poll_wait *pwait = 518 struct p9_poll_wait *pwait =
519 container_of(wait, struct p9_poll_wait, wait); 519 container_of(wait, struct p9_poll_wait, wait);
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index bfa9ab93eda5..0301b328cf0f 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -63,7 +63,7 @@
63#include <net/tcp_states.h> 63#include <net/tcp_states.h>
64#include <net/route.h> 64#include <net/route.h>
65#include <linux/atalk.h> 65#include <linux/atalk.h>
66#include "../core/kmap_skb.h" 66#include <linux/highmem.h>
67 67
68struct datalink_proto *ddp_dl, *aarp_dl; 68struct datalink_proto *ddp_dl, *aarp_dl;
69static const struct proto_ops atalk_dgram_ops; 69static const struct proto_ops atalk_dgram_ops;
@@ -960,10 +960,10 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
960 960
961 if (copy > len) 961 if (copy > len)
962 copy = len; 962 copy = len;
963 vaddr = kmap_skb_frag(frag); 963 vaddr = kmap_atomic(skb_frag_page(frag));
964 sum = atalk_sum_partial(vaddr + frag->page_offset + 964 sum = atalk_sum_partial(vaddr + frag->page_offset +
965 offset - start, copy, sum); 965 offset - start, copy, sum);
966 kunmap_skb_frag(vaddr); 966 kunmap_atomic(vaddr);
967 967
968 if (!(len -= copy)) 968 if (!(len -= copy))
969 return sum; 969 return sum;
diff --git a/net/appletalk/sysctl_net_atalk.c b/net/appletalk/sysctl_net_atalk.c
index 04e9c0da7aa9..ebb864361f7a 100644
--- a/net/appletalk/sysctl_net_atalk.c
+++ b/net/appletalk/sysctl_net_atalk.c
@@ -42,20 +42,14 @@ static struct ctl_table atalk_table[] = {
42 { }, 42 { },
43}; 43};
44 44
45static struct ctl_path atalk_path[] = {
46 { .procname = "net", },
47 { .procname = "appletalk", },
48 { }
49};
50
51static struct ctl_table_header *atalk_table_header; 45static struct ctl_table_header *atalk_table_header;
52 46
53void atalk_register_sysctl(void) 47void atalk_register_sysctl(void)
54{ 48{
55 atalk_table_header = register_sysctl_paths(atalk_path, atalk_table); 49 atalk_table_header = register_net_sysctl(&init_net, "net/appletalk", atalk_table);
56} 50}
57 51
58void atalk_unregister_sysctl(void) 52void atalk_unregister_sysctl(void)
59{ 53{
60 unregister_sysctl_table(atalk_table_header); 54 unregister_net_sysctl_table(atalk_table_header);
61} 55}
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 353fccf1cde3..4819d31533e0 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -73,7 +73,7 @@ struct br2684_vcc {
73#ifdef CONFIG_ATM_BR2684_IPFILTER 73#ifdef CONFIG_ATM_BR2684_IPFILTER
74 struct br2684_filter filter; 74 struct br2684_filter filter;
75#endif /* CONFIG_ATM_BR2684_IPFILTER */ 75#endif /* CONFIG_ATM_BR2684_IPFILTER */
76 unsigned copies_needed, copies_failed; 76 unsigned int copies_needed, copies_failed;
77}; 77};
78 78
79struct br2684_dev { 79struct br2684_dev {
diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c
index 53e500292271..5bdd300db0f7 100644
--- a/net/atm/mpoa_proc.c
+++ b/net/atm/mpoa_proc.c
@@ -207,7 +207,7 @@ static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
207 size_t nbytes, loff_t *ppos) 207 size_t nbytes, loff_t *ppos)
208{ 208{
209 char *page, *p; 209 char *page, *p;
210 unsigned len; 210 unsigned int len;
211 211
212 if (nbytes == 0) 212 if (nbytes == 0)
213 return 0; 213 return 0;
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index 614d3fc47ede..ce1e59fdae7b 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -62,12 +62,25 @@ struct pppoatm_vcc {
62 void (*old_pop)(struct atm_vcc *, struct sk_buff *); 62 void (*old_pop)(struct atm_vcc *, struct sk_buff *);
63 /* keep old push/pop for detaching */ 63 /* keep old push/pop for detaching */
64 enum pppoatm_encaps encaps; 64 enum pppoatm_encaps encaps;
65 atomic_t inflight;
66 unsigned long blocked;
65 int flags; /* SC_COMP_PROT - compress protocol */ 67 int flags; /* SC_COMP_PROT - compress protocol */
66 struct ppp_channel chan; /* interface to generic ppp layer */ 68 struct ppp_channel chan; /* interface to generic ppp layer */
67 struct tasklet_struct wakeup_tasklet; 69 struct tasklet_struct wakeup_tasklet;
68}; 70};
69 71
70/* 72/*
73 * We want to allow two packets in the queue. The one that's currently in
74 * flight, and *one* queued up ready for the ATM device to send immediately
75 * from its TX done IRQ. We want to be able to use atomic_inc_not_zero(), so
76 * inflight == -2 represents an empty queue, -1 one packet, and zero means
77 * there are two packets in the queue.
78 */
79#define NONE_INFLIGHT -2
80
81#define BLOCKED 0
82
83/*
71 * Header used for LLC Encapsulated PPP (4 bytes) followed by the LCP protocol 84 * Header used for LLC Encapsulated PPP (4 bytes) followed by the LCP protocol
72 * ID (0xC021) used in autodetection 85 * ID (0xC021) used in autodetection
73 */ 86 */
@@ -102,16 +115,30 @@ static void pppoatm_wakeup_sender(unsigned long arg)
102static void pppoatm_pop(struct atm_vcc *atmvcc, struct sk_buff *skb) 115static void pppoatm_pop(struct atm_vcc *atmvcc, struct sk_buff *skb)
103{ 116{
104 struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc); 117 struct pppoatm_vcc *pvcc = atmvcc_to_pvcc(atmvcc);
118
105 pvcc->old_pop(atmvcc, skb); 119 pvcc->old_pop(atmvcc, skb);
120 atomic_dec(&pvcc->inflight);
121
106 /* 122 /*
107 * We don't really always want to do this since it's 123 * We always used to run the wakeup tasklet unconditionally here, for
108 * really inefficient - it would be much better if we could 124 * fear of race conditions where we clear the BLOCKED flag just as we
109 * test if we had actually throttled the generic layer. 125 * refuse another packet in pppoatm_send(). This was quite inefficient.
110 * Unfortunately then there would be a nasty SMP race where 126 *
111 * we could clear that flag just as we refuse another packet. 127 * In fact it's OK. The PPP core will only ever call pppoatm_send()
112 * For now we do the safe thing. 128 * while holding the channel->downl lock. And ppp_output_wakeup() as
129 * called by the tasklet will *also* grab that lock. So even if another
130 * CPU is in pppoatm_send() right now, the tasklet isn't going to race
131 * with it. The wakeup *will* happen after the other CPU is safely out
132 * of pppoatm_send() again.
133 *
134 * So if the CPU in pppoatm_send() has already set the BLOCKED bit and
135 * it about to return, that's fine. We trigger a wakeup which will
136 * happen later. And if the CPU in pppoatm_send() *hasn't* set the
137 * BLOCKED bit yet, that's fine too because of the double check in
138 * pppoatm_may_send() which is commented there.
113 */ 139 */
114 tasklet_schedule(&pvcc->wakeup_tasklet); 140 if (test_and_clear_bit(BLOCKED, &pvcc->blocked))
141 tasklet_schedule(&pvcc->wakeup_tasklet);
115} 142}
116 143
117/* 144/*
@@ -184,6 +211,51 @@ error:
184 ppp_input_error(&pvcc->chan, 0); 211 ppp_input_error(&pvcc->chan, 0);
185} 212}
186 213
214static inline int pppoatm_may_send(struct pppoatm_vcc *pvcc, int size)
215{
216 /*
217 * It's not clear that we need to bother with using atm_may_send()
218 * to check we don't exceed sk->sk_sndbuf. If userspace sets a
219 * value of sk_sndbuf which is lower than the MTU, we're going to
220 * block for ever. But the code always did that before we introduced
221 * the packet count limit, so...
222 */
223 if (atm_may_send(pvcc->atmvcc, size) &&
224 atomic_inc_not_zero_hint(&pvcc->inflight, NONE_INFLIGHT))
225 return 1;
226
227 /*
228 * We use test_and_set_bit() rather than set_bit() here because
229 * we need to ensure there's a memory barrier after it. The bit
230 * *must* be set before we do the atomic_inc() on pvcc->inflight.
231 * There's no smp_mb__after_set_bit(), so it's this or abuse
232 * smp_mb__after_clear_bit().
233 */
234 test_and_set_bit(BLOCKED, &pvcc->blocked);
235
236 /*
237 * We may have raced with pppoatm_pop(). If it ran for the
238 * last packet in the queue, *just* before we set the BLOCKED
239 * bit, then it might never run again and the channel could
240 * remain permanently blocked. Cope with that race by checking
241 * *again*. If it did run in that window, we'll have space on
242 * the queue now and can return success. It's harmless to leave
243 * the BLOCKED flag set, since it's only used as a trigger to
244 * run the wakeup tasklet. Another wakeup will never hurt.
245 * If pppoatm_pop() is running but hasn't got as far as making
246 * space on the queue yet, then it hasn't checked the BLOCKED
247 * flag yet either, so we're safe in that case too. It'll issue
248 * an "immediate" wakeup... where "immediate" actually involves
249 * taking the PPP channel's ->downl lock, which is held by the
250 * code path that calls pppoatm_send(), and is thus going to
251 * wait for us to finish.
252 */
253 if (atm_may_send(pvcc->atmvcc, size) &&
254 atomic_inc_not_zero(&pvcc->inflight))
255 return 1;
256
257 return 0;
258}
187/* 259/*
188 * Called by the ppp_generic.c to send a packet - returns true if packet 260 * Called by the ppp_generic.c to send a packet - returns true if packet
189 * was accepted. If we return false, then it's our job to call 261 * was accepted. If we return false, then it's our job to call
@@ -207,7 +279,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
207 struct sk_buff *n; 279 struct sk_buff *n;
208 n = skb_realloc_headroom(skb, LLC_LEN); 280 n = skb_realloc_headroom(skb, LLC_LEN);
209 if (n != NULL && 281 if (n != NULL &&
210 !atm_may_send(pvcc->atmvcc, n->truesize)) { 282 !pppoatm_may_send(pvcc, n->truesize)) {
211 kfree_skb(n); 283 kfree_skb(n);
212 goto nospace; 284 goto nospace;
213 } 285 }
@@ -215,12 +287,12 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
215 skb = n; 287 skb = n;
216 if (skb == NULL) 288 if (skb == NULL)
217 return DROP_PACKET; 289 return DROP_PACKET;
218 } else if (!atm_may_send(pvcc->atmvcc, skb->truesize)) 290 } else if (!pppoatm_may_send(pvcc, skb->truesize))
219 goto nospace; 291 goto nospace;
220 memcpy(skb_push(skb, LLC_LEN), pppllc, LLC_LEN); 292 memcpy(skb_push(skb, LLC_LEN), pppllc, LLC_LEN);
221 break; 293 break;
222 case e_vc: 294 case e_vc:
223 if (!atm_may_send(pvcc->atmvcc, skb->truesize)) 295 if (!pppoatm_may_send(pvcc, skb->truesize))
224 goto nospace; 296 goto nospace;
225 break; 297 break;
226 case e_autodetect: 298 case e_autodetect:
@@ -285,6 +357,9 @@ static int pppoatm_assign_vcc(struct atm_vcc *atmvcc, void __user *arg)
285 if (pvcc == NULL) 357 if (pvcc == NULL)
286 return -ENOMEM; 358 return -ENOMEM;
287 pvcc->atmvcc = atmvcc; 359 pvcc->atmvcc = atmvcc;
360
361 /* Maximum is zero, so that we can use atomic_inc_not_zero() */
362 atomic_set(&pvcc->inflight, NONE_INFLIGHT);
288 pvcc->old_push = atmvcc->push; 363 pvcc->old_push = atmvcc->push;
289 pvcc->old_pop = atmvcc->pop; 364 pvcc->old_pop = atmvcc->pop;
290 pvcc->encaps = (enum pppoatm_encaps) be.encaps; 365 pvcc->encaps = (enum pppoatm_encaps) be.encaps;
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 509c8ac02b63..86767ca908a3 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -166,7 +166,7 @@ void sigd_enq2(struct atm_vcc *vcc, enum atmsvc_msg_type type,
166{ 166{
167 struct sk_buff *skb; 167 struct sk_buff *skb;
168 struct atmsvc_msg *msg; 168 struct atmsvc_msg *msg;
169 static unsigned session = 0; 169 static unsigned int session = 0;
170 170
171 pr_debug("%d (0x%p)\n", (int)type, vcc); 171 pr_debug("%d (0x%p)\n", (int)type, vcc);
172 while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL))) 172 while (!(skb = alloc_skb(sizeof(struct atmsvc_msg), GFP_KERNEL)))
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 9d9a6a3edbd5..051f7abae66d 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1990,7 +1990,6 @@ static int __init ax25_init(void)
1990 sock_register(&ax25_family_ops); 1990 sock_register(&ax25_family_ops);
1991 dev_add_pack(&ax25_packet_type); 1991 dev_add_pack(&ax25_packet_type);
1992 register_netdevice_notifier(&ax25_dev_notifier); 1992 register_netdevice_notifier(&ax25_dev_notifier);
1993 ax25_register_sysctl();
1994 1993
1995 proc_net_fops_create(&init_net, "ax25_route", S_IRUGO, &ax25_route_fops); 1994 proc_net_fops_create(&init_net, "ax25_route", S_IRUGO, &ax25_route_fops);
1996 proc_net_fops_create(&init_net, "ax25", S_IRUGO, &ax25_info_fops); 1995 proc_net_fops_create(&init_net, "ax25", S_IRUGO, &ax25_info_fops);
@@ -2013,7 +2012,6 @@ static void __exit ax25_exit(void)
2013 proc_net_remove(&init_net, "ax25_calls"); 2012 proc_net_remove(&init_net, "ax25_calls");
2014 2013
2015 unregister_netdevice_notifier(&ax25_dev_notifier); 2014 unregister_netdevice_notifier(&ax25_dev_notifier);
2016 ax25_unregister_sysctl();
2017 2015
2018 dev_remove_pack(&ax25_packet_type); 2016 dev_remove_pack(&ax25_packet_type);
2019 2017
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index d0de30e89591..3d106767b272 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -59,8 +59,6 @@ void ax25_dev_device_up(struct net_device *dev)
59 return; 59 return;
60 } 60 }
61 61
62 ax25_unregister_sysctl();
63
64 dev->ax25_ptr = ax25_dev; 62 dev->ax25_ptr = ax25_dev;
65 ax25_dev->dev = dev; 63 ax25_dev->dev = dev;
66 dev_hold(dev); 64 dev_hold(dev);
@@ -90,7 +88,7 @@ void ax25_dev_device_up(struct net_device *dev)
90 ax25_dev_list = ax25_dev; 88 ax25_dev_list = ax25_dev;
91 spin_unlock_bh(&ax25_dev_lock); 89 spin_unlock_bh(&ax25_dev_lock);
92 90
93 ax25_register_sysctl(); 91 ax25_register_dev_sysctl(ax25_dev);
94} 92}
95 93
96void ax25_dev_device_down(struct net_device *dev) 94void ax25_dev_device_down(struct net_device *dev)
@@ -100,7 +98,7 @@ void ax25_dev_device_down(struct net_device *dev)
100 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) 98 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
101 return; 99 return;
102 100
103 ax25_unregister_sysctl(); 101 ax25_unregister_dev_sysctl(ax25_dev);
104 102
105 spin_lock_bh(&ax25_dev_lock); 103 spin_lock_bh(&ax25_dev_lock);
106 104
@@ -120,7 +118,6 @@ void ax25_dev_device_down(struct net_device *dev)
120 spin_unlock_bh(&ax25_dev_lock); 118 spin_unlock_bh(&ax25_dev_lock);
121 dev_put(dev); 119 dev_put(dev);
122 kfree(ax25_dev); 120 kfree(ax25_dev);
123 ax25_register_sysctl();
124 return; 121 return;
125 } 122 }
126 123
@@ -130,7 +127,6 @@ void ax25_dev_device_down(struct net_device *dev)
130 spin_unlock_bh(&ax25_dev_lock); 127 spin_unlock_bh(&ax25_dev_lock);
131 dev_put(dev); 128 dev_put(dev);
132 kfree(ax25_dev); 129 kfree(ax25_dev);
133 ax25_register_sysctl();
134 return; 130 return;
135 } 131 }
136 132
@@ -138,8 +134,6 @@ void ax25_dev_device_down(struct net_device *dev)
138 } 134 }
139 spin_unlock_bh(&ax25_dev_lock); 135 spin_unlock_bh(&ax25_dev_lock);
140 dev->ax25_ptr = NULL; 136 dev->ax25_ptr = NULL;
141
142 ax25_register_sysctl();
143} 137}
144 138
145int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd) 139int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd)
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
index 846ae4e2b115..67de6b33f2c3 100644
--- a/net/ax25/ax25_ip.c
+++ b/net/ax25/ax25_ip.c
@@ -48,7 +48,7 @@
48 48
49int ax25_hard_header(struct sk_buff *skb, struct net_device *dev, 49int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
50 unsigned short type, const void *daddr, 50 unsigned short type, const void *daddr,
51 const void *saddr, unsigned len) 51 const void *saddr, unsigned int len)
52{ 52{
53 unsigned char *buff; 53 unsigned char *buff;
54 54
@@ -219,7 +219,7 @@ put:
219 219
220int ax25_hard_header(struct sk_buff *skb, struct net_device *dev, 220int ax25_hard_header(struct sk_buff *skb, struct net_device *dev,
221 unsigned short type, const void *daddr, 221 unsigned short type, const void *daddr,
222 const void *saddr, unsigned len) 222 const void *saddr, unsigned int len)
223{ 223{
224 return -AX25_HEADER_LEN; 224 return -AX25_HEADER_LEN;
225} 225}
diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
index ebe0ef3f1d83..d5744b752511 100644
--- a/net/ax25/sysctl_net_ax25.c
+++ b/net/ax25/sysctl_net_ax25.c
@@ -29,17 +29,6 @@ static int min_proto[1], max_proto[] = { AX25_PROTO_MAX };
29static int min_ds_timeout[1], max_ds_timeout[] = {65535000}; 29static int min_ds_timeout[1], max_ds_timeout[] = {65535000};
30#endif 30#endif
31 31
32static struct ctl_table_header *ax25_table_header;
33
34static ctl_table *ax25_table;
35static int ax25_table_size;
36
37static struct ctl_path ax25_path[] = {
38 { .procname = "net", },
39 { .procname = "ax25", },
40 { }
41};
42
43static const ctl_table ax25_param_table[] = { 32static const ctl_table ax25_param_table[] = {
44 { 33 {
45 .procname = "ip_default_mode", 34 .procname = "ip_default_mode",
@@ -159,52 +148,37 @@ static const ctl_table ax25_param_table[] = {
159 { } /* that's all, folks! */ 148 { } /* that's all, folks! */
160}; 149};
161 150
162void ax25_register_sysctl(void) 151int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
163{ 152{
164 ax25_dev *ax25_dev; 153 char path[sizeof("net/ax25/") + IFNAMSIZ];
165 int n, k; 154 int k;
166 155 struct ctl_table *table;
167 spin_lock_bh(&ax25_dev_lock); 156
168 for (ax25_table_size = sizeof(ctl_table), ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) 157 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
169 ax25_table_size += sizeof(ctl_table); 158 if (!table)
170 159 return -ENOMEM;
171 if ((ax25_table = kzalloc(ax25_table_size, GFP_ATOMIC)) == NULL) { 160
172 spin_unlock_bh(&ax25_dev_lock); 161 for (k = 0; k < AX25_MAX_VALUES; k++)
173 return; 162 table[k].data = &ax25_dev->values[k];
174 } 163
175 164 snprintf(path, sizeof(path), "net/ax25/%s", ax25_dev->dev->name);
176 for (n = 0, ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) { 165 ax25_dev->sysheader = register_net_sysctl(&init_net, path, table);
177 struct ctl_table *child = kmemdup(ax25_param_table, 166 if (!ax25_dev->sysheader) {
178 sizeof(ax25_param_table), 167 kfree(table);
179 GFP_ATOMIC); 168 return -ENOMEM;
180 if (!child) {
181 while (n--)
182 kfree(ax25_table[n].child);
183 kfree(ax25_table);
184 spin_unlock_bh(&ax25_dev_lock);
185 return;
186 }
187 ax25_table[n].child = ax25_dev->systable = child;
188 ax25_table[n].procname = ax25_dev->dev->name;
189 ax25_table[n].mode = 0555;
190
191
192 for (k = 0; k < AX25_MAX_VALUES; k++)
193 child[k].data = &ax25_dev->values[k];
194
195 n++;
196 } 169 }
197 spin_unlock_bh(&ax25_dev_lock); 170 return 0;
198
199 ax25_table_header = register_sysctl_paths(ax25_path, ax25_table);
200} 171}
201 172
202void ax25_unregister_sysctl(void) 173void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev)
203{ 174{
204 ctl_table *p; 175 struct ctl_table_header *header = ax25_dev->sysheader;
205 unregister_sysctl_table(ax25_table_header); 176 struct ctl_table *table;
206 177
207 for (p = ax25_table; p->procname; p++) 178 if (header) {
208 kfree(p->child); 179 ax25_dev->sysheader = NULL;
209 kfree(ax25_table); 180 table = header->ctl_table_arg;
181 unregister_net_sysctl_table(header);
182 kfree(table);
183 }
210} 184}
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index 2b68d068eaf3..53f5244e28f8 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -7,19 +7,28 @@ config BATMAN_ADV
7 depends on NET 7 depends on NET
8 select CRC16 8 select CRC16
9 default n 9 default n
10 ---help--- 10 help
11 B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is
12 a routing protocol for multi-hop ad-hoc mesh networks. The
13 networks may be wired or wireless. See
14 http://www.open-mesh.org/ for more information and user space
15 tools.
11 16
12 B.A.T.M.A.N. (better approach to mobile ad-hoc networking) is 17config BATMAN_ADV_BLA
13 a routing protocol for multi-hop ad-hoc mesh networks. The 18 bool "Bridge Loop Avoidance"
14 networks may be wired or wireless. See 19 depends on BATMAN_ADV && INET
15 http://www.open-mesh.org/ for more information and user space 20 default y
16 tools. 21 help
22 This option enables BLA (Bridge Loop Avoidance), a mechanism
23 to avoid Ethernet frames looping when mesh nodes are connected
24 to both the same LAN and the same mesh. If you will never use
25 more than one mesh node in the same LAN, you can safely remove
26 this feature and save some space.
17 27
18config BATMAN_ADV_DEBUG 28config BATMAN_ADV_DEBUG
19 bool "B.A.T.M.A.N. debugging" 29 bool "B.A.T.M.A.N. debugging"
20 depends on BATMAN_ADV != n 30 depends on BATMAN_ADV
21 ---help--- 31 help
22
23 This is an option for use by developers; most people should 32 This is an option for use by developers; most people should
24 say N here. This enables compilation of support for 33 say N here. This enables compilation of support for
25 outputting debugging information to the kernel log. The 34 outputting debugging information to the kernel log. The
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index 4e392ebedb64..6d5c1940667d 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -23,6 +23,7 @@ batman-adv-y += bat_debugfs.o
23batman-adv-y += bat_iv_ogm.o 23batman-adv-y += bat_iv_ogm.o
24batman-adv-y += bat_sysfs.o 24batman-adv-y += bat_sysfs.o
25batman-adv-y += bitarray.o 25batman-adv-y += bitarray.o
26batman-adv-$(CONFIG_BATMAN_ADV_BLA) += bridge_loop_avoidance.o
26batman-adv-y += gateway_client.o 27batman-adv-y += gateway_client.o
27batman-adv-y += gateway_common.o 28batman-adv-y += gateway_common.o
28batman-adv-y += hard-interface.o 29batman-adv-y += hard-interface.o
diff --git a/net/batman-adv/bat_debugfs.c b/net/batman-adv/bat_debugfs.c
index c3b0548b175d..916380c73ab7 100644
--- a/net/batman-adv/bat_debugfs.c
+++ b/net/batman-adv/bat_debugfs.c
@@ -32,6 +32,7 @@
32#include "soft-interface.h" 32#include "soft-interface.h"
33#include "vis.h" 33#include "vis.h"
34#include "icmp_socket.h" 34#include "icmp_socket.h"
35#include "bridge_loop_avoidance.h"
35 36
36static struct dentry *bat_debugfs; 37static struct dentry *bat_debugfs;
37 38
@@ -238,17 +239,19 @@ static int gateways_open(struct inode *inode, struct file *file)
238 return single_open(file, gw_client_seq_print_text, net_dev); 239 return single_open(file, gw_client_seq_print_text, net_dev);
239} 240}
240 241
241static int softif_neigh_open(struct inode *inode, struct file *file) 242static int transtable_global_open(struct inode *inode, struct file *file)
242{ 243{
243 struct net_device *net_dev = (struct net_device *)inode->i_private; 244 struct net_device *net_dev = (struct net_device *)inode->i_private;
244 return single_open(file, softif_neigh_seq_print_text, net_dev); 245 return single_open(file, tt_global_seq_print_text, net_dev);
245} 246}
246 247
247static int transtable_global_open(struct inode *inode, struct file *file) 248#ifdef CONFIG_BATMAN_ADV_BLA
249static int bla_claim_table_open(struct inode *inode, struct file *file)
248{ 250{
249 struct net_device *net_dev = (struct net_device *)inode->i_private; 251 struct net_device *net_dev = (struct net_device *)inode->i_private;
250 return single_open(file, tt_global_seq_print_text, net_dev); 252 return single_open(file, bla_claim_table_seq_print_text, net_dev);
251} 253}
254#endif
252 255
253static int transtable_local_open(struct inode *inode, struct file *file) 256static int transtable_local_open(struct inode *inode, struct file *file)
254{ 257{
@@ -282,16 +285,20 @@ struct bat_debuginfo bat_debuginfo_##_name = { \
282static BAT_DEBUGINFO(routing_algos, S_IRUGO, bat_algorithms_open); 285static BAT_DEBUGINFO(routing_algos, S_IRUGO, bat_algorithms_open);
283static BAT_DEBUGINFO(originators, S_IRUGO, originators_open); 286static BAT_DEBUGINFO(originators, S_IRUGO, originators_open);
284static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open); 287static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open);
285static BAT_DEBUGINFO(softif_neigh, S_IRUGO, softif_neigh_open);
286static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open); 288static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open);
289#ifdef CONFIG_BATMAN_ADV_BLA
290static BAT_DEBUGINFO(bla_claim_table, S_IRUGO, bla_claim_table_open);
291#endif
287static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open); 292static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open);
288static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open); 293static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open);
289 294
290static struct bat_debuginfo *mesh_debuginfos[] = { 295static struct bat_debuginfo *mesh_debuginfos[] = {
291 &bat_debuginfo_originators, 296 &bat_debuginfo_originators,
292 &bat_debuginfo_gateways, 297 &bat_debuginfo_gateways,
293 &bat_debuginfo_softif_neigh,
294 &bat_debuginfo_transtable_global, 298 &bat_debuginfo_transtable_global,
299#ifdef CONFIG_BATMAN_ADV_BLA
300 &bat_debuginfo_bla_claim_table,
301#endif
295 &bat_debuginfo_transtable_local, 302 &bat_debuginfo_transtable_local,
296 &bat_debuginfo_vis_data, 303 &bat_debuginfo_vis_data,
297 NULL, 304 NULL,
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index a6d5d63fb6ad..8b2db2e76c7e 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -30,24 +30,44 @@
30#include "send.h" 30#include "send.h"
31#include "bat_algo.h" 31#include "bat_algo.h"
32 32
33static void bat_iv_ogm_init(struct hard_iface *hard_iface) 33static int bat_iv_ogm_iface_enable(struct hard_iface *hard_iface)
34{ 34{
35 struct batman_ogm_packet *batman_ogm_packet; 35 struct batman_ogm_packet *batman_ogm_packet;
36 uint32_t random_seqno;
37 int res = -1;
36 38
37 hard_iface->packet_len = BATMAN_OGM_LEN; 39 /* randomize initial seqno to avoid collision */
40 get_random_bytes(&random_seqno, sizeof(random_seqno));
41 atomic_set(&hard_iface->seqno, random_seqno);
42
43 hard_iface->packet_len = BATMAN_OGM_HLEN;
38 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC); 44 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
39 45
46 if (!hard_iface->packet_buff)
47 goto out;
48
40 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff; 49 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
41 batman_ogm_packet->header.packet_type = BAT_OGM; 50 batman_ogm_packet->header.packet_type = BAT_IV_OGM;
42 batman_ogm_packet->header.version = COMPAT_VERSION; 51 batman_ogm_packet->header.version = COMPAT_VERSION;
43 batman_ogm_packet->header.ttl = 2; 52 batman_ogm_packet->header.ttl = 2;
44 batman_ogm_packet->flags = NO_FLAGS; 53 batman_ogm_packet->flags = NO_FLAGS;
45 batman_ogm_packet->tq = TQ_MAX_VALUE; 54 batman_ogm_packet->tq = TQ_MAX_VALUE;
46 batman_ogm_packet->tt_num_changes = 0; 55 batman_ogm_packet->tt_num_changes = 0;
47 batman_ogm_packet->ttvn = 0; 56 batman_ogm_packet->ttvn = 0;
57
58 res = 0;
59
60out:
61 return res;
62}
63
64static void bat_iv_ogm_iface_disable(struct hard_iface *hard_iface)
65{
66 kfree(hard_iface->packet_buff);
67 hard_iface->packet_buff = NULL;
48} 68}
49 69
50static void bat_iv_ogm_init_primary(struct hard_iface *hard_iface) 70static void bat_iv_ogm_primary_iface_set(struct hard_iface *hard_iface)
51{ 71{
52 struct batman_ogm_packet *batman_ogm_packet; 72 struct batman_ogm_packet *batman_ogm_packet;
53 73
@@ -92,7 +112,7 @@ static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
92static int bat_iv_ogm_aggr_packet(int buff_pos, int packet_len, 112static int bat_iv_ogm_aggr_packet(int buff_pos, int packet_len,
93 int tt_num_changes) 113 int tt_num_changes)
94{ 114{
95 int next_buff_pos = buff_pos + BATMAN_OGM_LEN + tt_len(tt_num_changes); 115 int next_buff_pos = buff_pos + BATMAN_OGM_HLEN + tt_len(tt_num_changes);
96 116
97 return (next_buff_pos <= packet_len) && 117 return (next_buff_pos <= packet_len) &&
98 (next_buff_pos <= MAX_AGGREGATION_BYTES); 118 (next_buff_pos <= MAX_AGGREGATION_BYTES);
@@ -132,7 +152,7 @@ static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet,
132 "Sending own" : 152 "Sending own" :
133 "Forwarding")); 153 "Forwarding"));
134 bat_dbg(DBG_BATMAN, bat_priv, 154 bat_dbg(DBG_BATMAN, bat_priv,
135 "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n", 155 "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
136 fwd_str, (packet_num > 0 ? "aggregated " : ""), 156 fwd_str, (packet_num > 0 ? "aggregated " : ""),
137 batman_ogm_packet->orig, 157 batman_ogm_packet->orig,
138 ntohl(batman_ogm_packet->seqno), 158 ntohl(batman_ogm_packet->seqno),
@@ -142,7 +162,7 @@ static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet,
142 batman_ogm_packet->ttvn, hard_iface->net_dev->name, 162 batman_ogm_packet->ttvn, hard_iface->net_dev->name,
143 hard_iface->net_dev->dev_addr); 163 hard_iface->net_dev->dev_addr);
144 164
145 buff_pos += BATMAN_OGM_LEN + 165 buff_pos += BATMAN_OGM_HLEN +
146 tt_len(batman_ogm_packet->tt_num_changes); 166 tt_len(batman_ogm_packet->tt_num_changes);
147 packet_num++; 167 packet_num++;
148 batman_ogm_packet = (struct batman_ogm_packet *) 168 batman_ogm_packet = (struct batman_ogm_packet *)
@@ -191,7 +211,7 @@ static void bat_iv_ogm_emit(struct forw_packet *forw_packet)
191 211
192 /* FIXME: what about aggregated packets ? */ 212 /* FIXME: what about aggregated packets ? */
193 bat_dbg(DBG_BATMAN, bat_priv, 213 bat_dbg(DBG_BATMAN, bat_priv,
194 "%s packet (originator %pM, seqno %d, TTL %d) on interface %s [%pM]\n", 214 "%s packet (originator %pM, seqno %u, TTL %d) on interface %s [%pM]\n",
195 (forw_packet->own ? "Sending own" : "Forwarding"), 215 (forw_packet->own ? "Sending own" : "Forwarding"),
196 batman_ogm_packet->orig, 216 batman_ogm_packet->orig,
197 ntohl(batman_ogm_packet->seqno), 217 ntohl(batman_ogm_packet->seqno),
@@ -335,10 +355,9 @@ static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
335 if ((atomic_read(&bat_priv->aggregated_ogms)) && 355 if ((atomic_read(&bat_priv->aggregated_ogms)) &&
336 (packet_len < MAX_AGGREGATION_BYTES)) 356 (packet_len < MAX_AGGREGATION_BYTES))
337 forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES + 357 forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES +
338 sizeof(struct ethhdr)); 358 ETH_HLEN);
339 else 359 else
340 forw_packet_aggr->skb = dev_alloc_skb(packet_len + 360 forw_packet_aggr->skb = dev_alloc_skb(packet_len + ETH_HLEN);
341 sizeof(struct ethhdr));
342 361
343 if (!forw_packet_aggr->skb) { 362 if (!forw_packet_aggr->skb) {
344 if (!own_packet) 363 if (!own_packet)
@@ -346,7 +365,7 @@ static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
346 kfree(forw_packet_aggr); 365 kfree(forw_packet_aggr);
347 goto out; 366 goto out;
348 } 367 }
349 skb_reserve(forw_packet_aggr->skb, sizeof(struct ethhdr)); 368 skb_reserve(forw_packet_aggr->skb, ETH_HLEN);
350 369
351 INIT_HLIST_NODE(&forw_packet_aggr->list); 370 INIT_HLIST_NODE(&forw_packet_aggr->list);
352 371
@@ -520,7 +539,7 @@ static void bat_iv_ogm_forward(struct orig_node *orig_node,
520 batman_ogm_packet->flags &= ~DIRECTLINK; 539 batman_ogm_packet->flags &= ~DIRECTLINK;
521 540
522 bat_iv_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet, 541 bat_iv_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet,
523 BATMAN_OGM_LEN + tt_len(tt_num_changes), 542 BATMAN_OGM_HLEN + tt_len(tt_num_changes),
524 if_incoming, 0, bat_iv_ogm_fwd_send_time()); 543 if_incoming, 0, bat_iv_ogm_fwd_send_time());
525} 544}
526 545
@@ -842,7 +861,8 @@ static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
842 seq_diff = batman_ogm_packet->seqno - orig_node->last_real_seqno; 861 seq_diff = batman_ogm_packet->seqno - orig_node->last_real_seqno;
843 862
844 /* signalize caller that the packet is to be dropped. */ 863 /* signalize caller that the packet is to be dropped. */
845 if (window_protected(bat_priv, seq_diff, 864 if (!hlist_empty(&orig_node->neigh_list) &&
865 window_protected(bat_priv, seq_diff,
846 &orig_node->batman_seqno_reset)) 866 &orig_node->batman_seqno_reset))
847 goto out; 867 goto out;
848 868
@@ -850,9 +870,9 @@ static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
850 hlist_for_each_entry_rcu(tmp_neigh_node, node, 870 hlist_for_each_entry_rcu(tmp_neigh_node, node,
851 &orig_node->neigh_list, list) { 871 &orig_node->neigh_list, list) {
852 872
853 is_duplicate |= get_bit_status(tmp_neigh_node->real_bits, 873 is_duplicate |= bat_test_bit(tmp_neigh_node->real_bits,
854 orig_node->last_real_seqno, 874 orig_node->last_real_seqno,
855 batman_ogm_packet->seqno); 875 batman_ogm_packet->seqno);
856 876
857 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) && 877 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
858 (tmp_neigh_node->if_incoming == if_incoming)) 878 (tmp_neigh_node->if_incoming == if_incoming))
@@ -866,13 +886,14 @@ static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
866 seq_diff, set_mark); 886 seq_diff, set_mark);
867 887
868 tmp_neigh_node->real_packet_count = 888 tmp_neigh_node->real_packet_count =
869 bit_packet_count(tmp_neigh_node->real_bits); 889 bitmap_weight(tmp_neigh_node->real_bits,
890 TQ_LOCAL_WINDOW_SIZE);
870 } 891 }
871 rcu_read_unlock(); 892 rcu_read_unlock();
872 893
873 if (need_update) { 894 if (need_update) {
874 bat_dbg(DBG_BATMAN, bat_priv, 895 bat_dbg(DBG_BATMAN, bat_priv,
875 "updating last_seqno: old %d, new %d\n", 896 "updating last_seqno: old %u, new %u\n",
876 orig_node->last_real_seqno, batman_ogm_packet->seqno); 897 orig_node->last_real_seqno, batman_ogm_packet->seqno);
877 orig_node->last_real_seqno = batman_ogm_packet->seqno; 898 orig_node->last_real_seqno = batman_ogm_packet->seqno;
878 } 899 }
@@ -913,7 +934,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
913 * packet in an aggregation. Here we expect that the padding 934 * packet in an aggregation. Here we expect that the padding
914 * is always zero (or not 0x01) 935 * is always zero (or not 0x01)
915 */ 936 */
916 if (batman_ogm_packet->header.packet_type != BAT_OGM) 937 if (batman_ogm_packet->header.packet_type != BAT_IV_OGM)
917 return; 938 return;
918 939
919 /* could be changed by schedule_own_packet() */ 940 /* could be changed by schedule_own_packet() */
@@ -925,7 +946,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
925 batman_ogm_packet->orig) ? 1 : 0); 946 batman_ogm_packet->orig) ? 1 : 0);
926 947
927 bat_dbg(DBG_BATMAN, bat_priv, 948 bat_dbg(DBG_BATMAN, bat_priv,
928 "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %d, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n", 949 "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
929 ethhdr->h_source, if_incoming->net_dev->name, 950 ethhdr->h_source, if_incoming->net_dev->name,
930 if_incoming->net_dev->dev_addr, batman_ogm_packet->orig, 951 if_incoming->net_dev->dev_addr, batman_ogm_packet->orig,
931 batman_ogm_packet->prev_sender, batman_ogm_packet->seqno, 952 batman_ogm_packet->prev_sender, batman_ogm_packet->seqno,
@@ -998,11 +1019,11 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
998 1019
999 spin_lock_bh(&orig_neigh_node->ogm_cnt_lock); 1020 spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
1000 word = &(orig_neigh_node->bcast_own[offset]); 1021 word = &(orig_neigh_node->bcast_own[offset]);
1001 bit_mark(word, 1022 bat_set_bit(word,
1002 if_incoming_seqno - 1023 if_incoming_seqno -
1003 batman_ogm_packet->seqno - 2); 1024 batman_ogm_packet->seqno - 2);
1004 orig_neigh_node->bcast_own_sum[if_incoming->if_num] = 1025 orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
1005 bit_packet_count(word); 1026 bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
1006 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock); 1027 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
1007 } 1028 }
1008 1029
@@ -1152,12 +1173,12 @@ static void bat_iv_ogm_receive(struct hard_iface *if_incoming,
1152 batman_ogm_packet->seqno = ntohl(batman_ogm_packet->seqno); 1173 batman_ogm_packet->seqno = ntohl(batman_ogm_packet->seqno);
1153 batman_ogm_packet->tt_crc = ntohs(batman_ogm_packet->tt_crc); 1174 batman_ogm_packet->tt_crc = ntohs(batman_ogm_packet->tt_crc);
1154 1175
1155 tt_buff = packet_buff + buff_pos + BATMAN_OGM_LEN; 1176 tt_buff = packet_buff + buff_pos + BATMAN_OGM_HLEN;
1156 1177
1157 bat_iv_ogm_process(ethhdr, batman_ogm_packet, 1178 bat_iv_ogm_process(ethhdr, batman_ogm_packet,
1158 tt_buff, if_incoming); 1179 tt_buff, if_incoming);
1159 1180
1160 buff_pos += BATMAN_OGM_LEN + 1181 buff_pos += BATMAN_OGM_HLEN +
1161 tt_len(batman_ogm_packet->tt_num_changes); 1182 tt_len(batman_ogm_packet->tt_num_changes);
1162 1183
1163 batman_ogm_packet = (struct batman_ogm_packet *) 1184 batman_ogm_packet = (struct batman_ogm_packet *)
@@ -1168,8 +1189,9 @@ static void bat_iv_ogm_receive(struct hard_iface *if_incoming,
1168 1189
1169static struct bat_algo_ops batman_iv __read_mostly = { 1190static struct bat_algo_ops batman_iv __read_mostly = {
1170 .name = "BATMAN IV", 1191 .name = "BATMAN IV",
1171 .bat_ogm_init = bat_iv_ogm_init, 1192 .bat_iface_enable = bat_iv_ogm_iface_enable,
1172 .bat_ogm_init_primary = bat_iv_ogm_init_primary, 1193 .bat_iface_disable = bat_iv_ogm_iface_disable,
1194 .bat_primary_iface_set = bat_iv_ogm_primary_iface_set,
1173 .bat_ogm_update_mac = bat_iv_ogm_update_mac, 1195 .bat_ogm_update_mac = bat_iv_ogm_update_mac,
1174 .bat_ogm_schedule = bat_iv_ogm_schedule, 1196 .bat_ogm_schedule = bat_iv_ogm_schedule,
1175 .bat_ogm_emit = bat_iv_ogm_emit, 1197 .bat_ogm_emit = bat_iv_ogm_emit,
diff --git a/net/batman-adv/bat_sysfs.c b/net/batman-adv/bat_sysfs.c
index 68ff759fc304..2c816883ca13 100644
--- a/net/batman-adv/bat_sysfs.c
+++ b/net/batman-adv/bat_sysfs.c
@@ -149,7 +149,7 @@ static int store_bool_attr(char *buff, size_t count,
149 atomic_read(attr) == 1 ? "enabled" : "disabled", 149 atomic_read(attr) == 1 ? "enabled" : "disabled",
150 enabled == 1 ? "enabled" : "disabled"); 150 enabled == 1 ? "enabled" : "disabled");
151 151
152 atomic_set(attr, (unsigned)enabled); 152 atomic_set(attr, (unsigned int)enabled);
153 return count; 153 return count;
154} 154}
155 155
@@ -268,7 +268,7 @@ static ssize_t store_vis_mode(struct kobject *kobj, struct attribute *attr,
268 "client" : "server", vis_mode_tmp == VIS_TYPE_CLIENT_UPDATE ? 268 "client" : "server", vis_mode_tmp == VIS_TYPE_CLIENT_UPDATE ?
269 "client" : "server"); 269 "client" : "server");
270 270
271 atomic_set(&bat_priv->vis_mode, (unsigned)vis_mode_tmp); 271 atomic_set(&bat_priv->vis_mode, (unsigned int)vis_mode_tmp);
272 return count; 272 return count;
273} 273}
274 274
@@ -354,7 +354,7 @@ static ssize_t store_gw_mode(struct kobject *kobj, struct attribute *attr,
354 curr_gw_mode_str, buff); 354 curr_gw_mode_str, buff);
355 355
356 gw_deselect(bat_priv); 356 gw_deselect(bat_priv);
357 atomic_set(&bat_priv->gw_mode, (unsigned)gw_mode_tmp); 357 atomic_set(&bat_priv->gw_mode, (unsigned int)gw_mode_tmp);
358 return count; 358 return count;
359} 359}
360 360
@@ -386,6 +386,9 @@ static ssize_t store_gw_bwidth(struct kobject *kobj, struct attribute *attr,
386 386
387BAT_ATTR_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL); 387BAT_ATTR_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL);
388BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL); 388BAT_ATTR_BOOL(bonding, S_IRUGO | S_IWUSR, NULL);
389#ifdef CONFIG_BATMAN_ADV_BLA
390BAT_ATTR_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL);
391#endif
389BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu); 392BAT_ATTR_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu);
390BAT_ATTR_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL); 393BAT_ATTR_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL);
391static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode); 394static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode);
@@ -398,12 +401,15 @@ BAT_ATTR_UINT(gw_sel_class, S_IRUGO | S_IWUSR, 1, TQ_MAX_VALUE,
398static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth, 401static BAT_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, show_gw_bwidth,
399 store_gw_bwidth); 402 store_gw_bwidth);
400#ifdef CONFIG_BATMAN_ADV_DEBUG 403#ifdef CONFIG_BATMAN_ADV_DEBUG
401BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 7, NULL); 404BAT_ATTR_UINT(log_level, S_IRUGO | S_IWUSR, 0, 15, NULL);
402#endif 405#endif
403 406
404static struct bat_attribute *mesh_attrs[] = { 407static struct bat_attribute *mesh_attrs[] = {
405 &bat_attr_aggregated_ogms, 408 &bat_attr_aggregated_ogms,
406 &bat_attr_bonding, 409 &bat_attr_bonding,
410#ifdef CONFIG_BATMAN_ADV_BLA
411 &bat_attr_bridge_loop_avoidance,
412#endif
407 &bat_attr_fragmentation, 413 &bat_attr_fragmentation,
408 &bat_attr_ap_isolation, 414 &bat_attr_ap_isolation,
409 &bat_attr_vis_mode, 415 &bat_attr_vis_mode,
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index 6d0aa216b232..07ae6e1b8aca 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -24,100 +24,13 @@
24 24
25#include <linux/bitops.h> 25#include <linux/bitops.h>
26 26
27/* returns true if the corresponding bit in the given seq_bits indicates true
28 * and curr_seqno is within range of last_seqno */
29int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno,
30 uint32_t curr_seqno)
31{
32 int32_t diff, word_offset, word_num;
33
34 diff = last_seqno - curr_seqno;
35 if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE) {
36 return 0;
37 } else {
38 /* which word */
39 word_num = (last_seqno - curr_seqno) / WORD_BIT_SIZE;
40 /* which position in the selected word */
41 word_offset = (last_seqno - curr_seqno) % WORD_BIT_SIZE;
42
43 if (test_bit(word_offset, &seq_bits[word_num]))
44 return 1;
45 else
46 return 0;
47 }
48}
49
50/* turn corresponding bit on, so we can remember that we got the packet */
51void bit_mark(unsigned long *seq_bits, int32_t n)
52{
53 int32_t word_offset, word_num;
54
55 /* if too old, just drop it */
56 if (n < 0 || n >= TQ_LOCAL_WINDOW_SIZE)
57 return;
58
59 /* which word */
60 word_num = n / WORD_BIT_SIZE;
61 /* which position in the selected word */
62 word_offset = n % WORD_BIT_SIZE;
63
64 set_bit(word_offset, &seq_bits[word_num]); /* turn the position on */
65}
66
67/* shift the packet array by n places. */ 27/* shift the packet array by n places. */
68static void bit_shift(unsigned long *seq_bits, int32_t n) 28static void bat_bitmap_shift_left(unsigned long *seq_bits, int32_t n)
69{ 29{
70 int32_t word_offset, word_num;
71 int32_t i;
72
73 if (n <= 0 || n >= TQ_LOCAL_WINDOW_SIZE) 30 if (n <= 0 || n >= TQ_LOCAL_WINDOW_SIZE)
74 return; 31 return;
75 32
76 word_offset = n % WORD_BIT_SIZE;/* shift how much inside each word */ 33 bitmap_shift_left(seq_bits, seq_bits, n, TQ_LOCAL_WINDOW_SIZE);
77 word_num = n / WORD_BIT_SIZE; /* shift over how much (full) words */
78
79 for (i = NUM_WORDS - 1; i > word_num; i--) {
80 /* going from old to new, so we don't overwrite the data we copy
81 * from.
82 *
83 * left is high, right is low: FEDC BA98 7654 3210
84 * ^^ ^^
85 * vvvv
86 * ^^^^ = from, vvvvv =to, we'd have word_num==1 and
87 * word_offset==WORD_BIT_SIZE/2 ????? in this example.
88 * (=24 bits)
89 *
90 * our desired output would be: 9876 5432 1000 0000
91 * */
92
93 seq_bits[i] =
94 (seq_bits[i - word_num] << word_offset) +
95 /* take the lower port from the left half, shift it left
96 * to its final position */
97 (seq_bits[i - word_num - 1] >>
98 (WORD_BIT_SIZE-word_offset));
99 /* and the upper part of the right half and shift it left to
100 * its position */
101 /* for our example that would be: word[0] = 9800 + 0076 =
102 * 9876 */
103 }
104 /* now for our last word, i==word_num, we only have its "left" half.
105 * that's the 1000 word in our example.*/
106
107 seq_bits[i] = (seq_bits[i - word_num] << word_offset);
108
109 /* pad the rest with 0, if there is anything */
110 i--;
111
112 for (; i >= 0; i--)
113 seq_bits[i] = 0;
114}
115
116static void bit_reset_window(unsigned long *seq_bits)
117{
118 int i;
119 for (i = 0; i < NUM_WORDS; i++)
120 seq_bits[i] = 0;
121} 34}
122 35
123 36
@@ -137,7 +50,7 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
137 50
138 if ((seq_num_diff <= 0) && (seq_num_diff > -TQ_LOCAL_WINDOW_SIZE)) { 51 if ((seq_num_diff <= 0) && (seq_num_diff > -TQ_LOCAL_WINDOW_SIZE)) {
139 if (set_mark) 52 if (set_mark)
140 bit_mark(seq_bits, -seq_num_diff); 53 bat_set_bit(seq_bits, -seq_num_diff);
141 return 0; 54 return 0;
142 } 55 }
143 56
@@ -145,10 +58,10 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
145 * set the mark if required */ 58 * set the mark if required */
146 59
147 if ((seq_num_diff > 0) && (seq_num_diff < TQ_LOCAL_WINDOW_SIZE)) { 60 if ((seq_num_diff > 0) && (seq_num_diff < TQ_LOCAL_WINDOW_SIZE)) {
148 bit_shift(seq_bits, seq_num_diff); 61 bat_bitmap_shift_left(seq_bits, seq_num_diff);
149 62
150 if (set_mark) 63 if (set_mark)
151 bit_mark(seq_bits, 0); 64 bat_set_bit(seq_bits, 0);
152 return 1; 65 return 1;
153 } 66 }
154 67
@@ -159,9 +72,9 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
159 bat_dbg(DBG_BATMAN, bat_priv, 72 bat_dbg(DBG_BATMAN, bat_priv,
160 "We missed a lot of packets (%i) !\n", 73 "We missed a lot of packets (%i) !\n",
161 seq_num_diff - 1); 74 seq_num_diff - 1);
162 bit_reset_window(seq_bits); 75 bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE);
163 if (set_mark) 76 if (set_mark)
164 bit_mark(seq_bits, 0); 77 bat_set_bit(seq_bits, 0);
165 return 1; 78 return 1;
166 } 79 }
167 80
@@ -176,9 +89,9 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
176 bat_dbg(DBG_BATMAN, bat_priv, 89 bat_dbg(DBG_BATMAN, bat_priv,
177 "Other host probably restarted!\n"); 90 "Other host probably restarted!\n");
178 91
179 bit_reset_window(seq_bits); 92 bitmap_zero(seq_bits, TQ_LOCAL_WINDOW_SIZE);
180 if (set_mark) 93 if (set_mark)
181 bit_mark(seq_bits, 0); 94 bat_set_bit(seq_bits, 0);
182 95
183 return 1; 96 return 1;
184 } 97 }
@@ -186,16 +99,3 @@ int bit_get_packet(void *priv, unsigned long *seq_bits,
186 /* never reached */ 99 /* never reached */
187 return 0; 100 return 0;
188} 101}
189
190/* count the hamming weight, how many good packets did we receive? just count
191 * the 1's.
192 */
193int bit_packet_count(const unsigned long *seq_bits)
194{
195 int i, hamming = 0;
196
197 for (i = 0; i < NUM_WORDS; i++)
198 hamming += hweight_long(seq_bits[i]);
199
200 return hamming;
201}
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index c6135728a680..1835c15cda41 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -22,23 +22,33 @@
22#ifndef _NET_BATMAN_ADV_BITARRAY_H_ 22#ifndef _NET_BATMAN_ADV_BITARRAY_H_
23#define _NET_BATMAN_ADV_BITARRAY_H_ 23#define _NET_BATMAN_ADV_BITARRAY_H_
24 24
25#define WORD_BIT_SIZE (sizeof(unsigned long) * 8)
26
27/* returns true if the corresponding bit in the given seq_bits indicates true 25/* returns true if the corresponding bit in the given seq_bits indicates true
28 * and curr_seqno is within range of last_seqno */ 26 * and curr_seqno is within range of last_seqno */
29int get_bit_status(const unsigned long *seq_bits, uint32_t last_seqno, 27static inline int bat_test_bit(const unsigned long *seq_bits,
30 uint32_t curr_seqno); 28 uint32_t last_seqno, uint32_t curr_seqno)
29{
30 int32_t diff;
31
32 diff = last_seqno - curr_seqno;
33 if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE)
34 return 0;
35 else
36 return test_bit(diff, seq_bits);
37}
31 38
32/* turn corresponding bit on, so we can remember that we got the packet */ 39/* turn corresponding bit on, so we can remember that we got the packet */
33void bit_mark(unsigned long *seq_bits, int32_t n); 40static inline void bat_set_bit(unsigned long *seq_bits, int32_t n)
41{
42 /* if too old, just drop it */
43 if (n < 0 || n >= TQ_LOCAL_WINDOW_SIZE)
44 return;
34 45
46 set_bit(n, seq_bits); /* turn the position on */
47}
35 48
36/* receive and process one packet, returns 1 if received seq_num is considered 49/* receive and process one packet, returns 1 if received seq_num is considered
37 * new, 0 if old */ 50 * new, 0 if old */
38int bit_get_packet(void *priv, unsigned long *seq_bits, 51int bit_get_packet(void *priv, unsigned long *seq_bits,
39 int32_t seq_num_diff, int set_mark); 52 int32_t seq_num_diff, int set_mark);
40 53
41/* count the hamming weight, how many good packets did we receive? */
42int bit_packet_count(const unsigned long *seq_bits);
43
44#endif /* _NET_BATMAN_ADV_BITARRAY_H_ */ 54#endif /* _NET_BATMAN_ADV_BITARRAY_H_ */
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
new file mode 100644
index 000000000000..ad394c6496cc
--- /dev/null
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -0,0 +1,1580 @@
1/*
2 * Copyright (C) 2011 B.A.T.M.A.N. contributors:
3 *
4 * Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23#include "hash.h"
24#include "hard-interface.h"
25#include "originator.h"
26#include "bridge_loop_avoidance.h"
27#include "translation-table.h"
28#include "send.h"
29
30#include <linux/etherdevice.h>
31#include <linux/crc16.h>
32#include <linux/if_arp.h>
33#include <net/arp.h>
34#include <linux/if_vlan.h>
35
36static const uint8_t announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
37
38static void bla_periodic_work(struct work_struct *work);
39static void bla_send_announce(struct bat_priv *bat_priv,
40 struct backbone_gw *backbone_gw);
41
42/* return the index of the claim */
43static inline uint32_t choose_claim(const void *data, uint32_t size)
44{
45 const unsigned char *key = data;
46 uint32_t hash = 0;
47 size_t i;
48
49 for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
50 hash += key[i];
51 hash += (hash << 10);
52 hash ^= (hash >> 6);
53 }
54
55 hash += (hash << 3);
56 hash ^= (hash >> 11);
57 hash += (hash << 15);
58
59 return hash % size;
60}
61
62/* return the index of the backbone gateway */
63static inline uint32_t choose_backbone_gw(const void *data, uint32_t size)
64{
65 const unsigned char *key = data;
66 uint32_t hash = 0;
67 size_t i;
68
69 for (i = 0; i < ETH_ALEN + sizeof(short); i++) {
70 hash += key[i];
71 hash += (hash << 10);
72 hash ^= (hash >> 6);
73 }
74
75 hash += (hash << 3);
76 hash ^= (hash >> 11);
77 hash += (hash << 15);
78
79 return hash % size;
80}
81
82
83/* compares address and vid of two backbone gws */
84static int compare_backbone_gw(const struct hlist_node *node, const void *data2)
85{
86 const void *data1 = container_of(node, struct backbone_gw,
87 hash_entry);
88
89 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
90}
91
92/* compares address and vid of two claims */
93static int compare_claim(const struct hlist_node *node, const void *data2)
94{
95 const void *data1 = container_of(node, struct claim,
96 hash_entry);
97
98 return (memcmp(data1, data2, ETH_ALEN + sizeof(short)) == 0 ? 1 : 0);
99}
100
101/* free a backbone gw */
102static void backbone_gw_free_ref(struct backbone_gw *backbone_gw)
103{
104 if (atomic_dec_and_test(&backbone_gw->refcount))
105 kfree_rcu(backbone_gw, rcu);
106}
107
108/* finally deinitialize the claim */
109static void claim_free_rcu(struct rcu_head *rcu)
110{
111 struct claim *claim;
112
113 claim = container_of(rcu, struct claim, rcu);
114
115 backbone_gw_free_ref(claim->backbone_gw);
116 kfree(claim);
117}
118
119/* free a claim, call claim_free_rcu if its the last reference */
120static void claim_free_ref(struct claim *claim)
121{
122 if (atomic_dec_and_test(&claim->refcount))
123 call_rcu(&claim->rcu, claim_free_rcu);
124}
125
126/**
127 * @bat_priv: the bat priv with all the soft interface information
128 * @data: search data (may be local/static data)
129 *
130 * looks for a claim in the hash, and returns it if found
131 * or NULL otherwise.
132 */
133static struct claim *claim_hash_find(struct bat_priv *bat_priv,
134 struct claim *data)
135{
136 struct hashtable_t *hash = bat_priv->claim_hash;
137 struct hlist_head *head;
138 struct hlist_node *node;
139 struct claim *claim;
140 struct claim *claim_tmp = NULL;
141 int index;
142
143 if (!hash)
144 return NULL;
145
146 index = choose_claim(data, hash->size);
147 head = &hash->table[index];
148
149 rcu_read_lock();
150 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
151 if (!compare_claim(&claim->hash_entry, data))
152 continue;
153
154 if (!atomic_inc_not_zero(&claim->refcount))
155 continue;
156
157 claim_tmp = claim;
158 break;
159 }
160 rcu_read_unlock();
161
162 return claim_tmp;
163}
164
165/**
166 * @bat_priv: the bat priv with all the soft interface information
167 * @addr: the address of the originator
168 * @vid: the VLAN ID
169 *
170 * looks for a claim in the hash, and returns it if found
171 * or NULL otherwise.
172 */
173static struct backbone_gw *backbone_hash_find(struct bat_priv *bat_priv,
174 uint8_t *addr, short vid)
175{
176 struct hashtable_t *hash = bat_priv->backbone_hash;
177 struct hlist_head *head;
178 struct hlist_node *node;
179 struct backbone_gw search_entry, *backbone_gw;
180 struct backbone_gw *backbone_gw_tmp = NULL;
181 int index;
182
183 if (!hash)
184 return NULL;
185
186 memcpy(search_entry.orig, addr, ETH_ALEN);
187 search_entry.vid = vid;
188
189 index = choose_backbone_gw(&search_entry, hash->size);
190 head = &hash->table[index];
191
192 rcu_read_lock();
193 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
194 if (!compare_backbone_gw(&backbone_gw->hash_entry,
195 &search_entry))
196 continue;
197
198 if (!atomic_inc_not_zero(&backbone_gw->refcount))
199 continue;
200
201 backbone_gw_tmp = backbone_gw;
202 break;
203 }
204 rcu_read_unlock();
205
206 return backbone_gw_tmp;
207}
208
209/* delete all claims for a backbone */
210static void bla_del_backbone_claims(struct backbone_gw *backbone_gw)
211{
212 struct hashtable_t *hash;
213 struct hlist_node *node, *node_tmp;
214 struct hlist_head *head;
215 struct claim *claim;
216 int i;
217 spinlock_t *list_lock; /* protects write access to the hash lists */
218
219 hash = backbone_gw->bat_priv->claim_hash;
220 if (!hash)
221 return;
222
223 for (i = 0; i < hash->size; i++) {
224 head = &hash->table[i];
225 list_lock = &hash->list_locks[i];
226
227 spin_lock_bh(list_lock);
228 hlist_for_each_entry_safe(claim, node, node_tmp,
229 head, hash_entry) {
230
231 if (claim->backbone_gw != backbone_gw)
232 continue;
233
234 claim_free_ref(claim);
235 hlist_del_rcu(node);
236 }
237 spin_unlock_bh(list_lock);
238 }
239
240 /* all claims gone, intialize CRC */
241 backbone_gw->crc = BLA_CRC_INIT;
242}
243
244/**
245 * @bat_priv: the bat priv with all the soft interface information
246 * @orig: the mac address to be announced within the claim
247 * @vid: the VLAN ID
248 * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
249 *
250 * sends a claim frame according to the provided info.
251 */
252static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac,
253 short vid, int claimtype)
254{
255 struct sk_buff *skb;
256 struct ethhdr *ethhdr;
257 struct hard_iface *primary_if;
258 struct net_device *soft_iface;
259 uint8_t *hw_src;
260 struct bla_claim_dst local_claim_dest;
261 uint32_t zeroip = 0;
262
263 primary_if = primary_if_get_selected(bat_priv);
264 if (!primary_if)
265 return;
266
267 memcpy(&local_claim_dest, &bat_priv->claim_dest,
268 sizeof(local_claim_dest));
269 local_claim_dest.type = claimtype;
270
271 soft_iface = primary_if->soft_iface;
272
273 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
274 /* IP DST: 0.0.0.0 */
275 zeroip,
276 primary_if->soft_iface,
277 /* IP SRC: 0.0.0.0 */
278 zeroip,
279 /* Ethernet DST: Broadcast */
280 NULL,
281 /* Ethernet SRC/HW SRC: originator mac */
282 primary_if->net_dev->dev_addr,
283 /* HW DST: FF:43:05:XX:00:00
284 * with XX = claim type
285 * and YY:YY = group id
286 */
287 (uint8_t *)&local_claim_dest);
288
289 if (!skb)
290 goto out;
291
292 ethhdr = (struct ethhdr *)skb->data;
293 hw_src = (uint8_t *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
294
295 /* now we pretend that the client would have sent this ... */
296 switch (claimtype) {
297 case CLAIM_TYPE_ADD:
298 /* normal claim frame
299 * set Ethernet SRC to the clients mac
300 */
301 memcpy(ethhdr->h_source, mac, ETH_ALEN);
302 bat_dbg(DBG_BLA, bat_priv,
303 "bla_send_claim(): CLAIM %pM on vid %d\n", mac, vid);
304 break;
305 case CLAIM_TYPE_DEL:
306 /* unclaim frame
307 * set HW SRC to the clients mac
308 */
309 memcpy(hw_src, mac, ETH_ALEN);
310 bat_dbg(DBG_BLA, bat_priv,
311 "bla_send_claim(): UNCLAIM %pM on vid %d\n", mac, vid);
312 break;
313 case CLAIM_TYPE_ANNOUNCE:
314 /* announcement frame
315 * set HW SRC to the special mac containg the crc
316 */
317 memcpy(hw_src, mac, ETH_ALEN);
318 bat_dbg(DBG_BLA, bat_priv,
319 "bla_send_claim(): ANNOUNCE of %pM on vid %d\n",
320 ethhdr->h_source, vid);
321 break;
322 case CLAIM_TYPE_REQUEST:
323 /* request frame
324 * set HW SRC to the special mac containg the crc
325 */
326 memcpy(hw_src, mac, ETH_ALEN);
327 memcpy(ethhdr->h_dest, mac, ETH_ALEN);
328 bat_dbg(DBG_BLA, bat_priv,
329 "bla_send_claim(): REQUEST of %pM to %pMon vid %d\n",
330 ethhdr->h_source, ethhdr->h_dest, vid);
331 break;
332
333 }
334
335 if (vid != -1)
336 skb = vlan_insert_tag(skb, vid);
337
338 skb_reset_mac_header(skb);
339 skb->protocol = eth_type_trans(skb, soft_iface);
340 bat_priv->stats.rx_packets++;
341 bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
342 soft_iface->last_rx = jiffies;
343
344 netif_rx(skb);
345out:
346 if (primary_if)
347 hardif_free_ref(primary_if);
348}
349
350/**
351 * @bat_priv: the bat priv with all the soft interface information
352 * @orig: the mac address of the originator
353 * @vid: the VLAN ID
354 *
355 * searches for the backbone gw or creates a new one if it could not
356 * be found.
357 */
358static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv,
359 uint8_t *orig, short vid)
360{
361 struct backbone_gw *entry;
362 struct orig_node *orig_node;
363 int hash_added;
364
365 entry = backbone_hash_find(bat_priv, orig, vid);
366
367 if (entry)
368 return entry;
369
370 bat_dbg(DBG_BLA, bat_priv,
371 "bla_get_backbone_gw(): not found (%pM, %d), creating new entry\n",
372 orig, vid);
373
374 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
375 if (!entry)
376 return NULL;
377
378 entry->vid = vid;
379 entry->lasttime = jiffies;
380 entry->crc = BLA_CRC_INIT;
381 entry->bat_priv = bat_priv;
382 atomic_set(&entry->request_sent, 0);
383 memcpy(entry->orig, orig, ETH_ALEN);
384
385 /* one for the hash, one for returning */
386 atomic_set(&entry->refcount, 2);
387
388 hash_added = hash_add(bat_priv->backbone_hash, compare_backbone_gw,
389 choose_backbone_gw, entry, &entry->hash_entry);
390
391 if (unlikely(hash_added != 0)) {
392 /* hash failed, free the structure */
393 kfree(entry);
394 return NULL;
395 }
396
397 /* this is a gateway now, remove any tt entries */
398 orig_node = orig_hash_find(bat_priv, orig);
399 if (orig_node) {
400 tt_global_del_orig(bat_priv, orig_node,
401 "became a backbone gateway");
402 orig_node_free_ref(orig_node);
403 }
404 return entry;
405}
406
407/* update or add the own backbone gw to make sure we announce
408 * where we receive other backbone gws
409 */
410static void bla_update_own_backbone_gw(struct bat_priv *bat_priv,
411 struct hard_iface *primary_if,
412 short vid)
413{
414 struct backbone_gw *backbone_gw;
415
416 backbone_gw = bla_get_backbone_gw(bat_priv,
417 primary_if->net_dev->dev_addr, vid);
418 if (unlikely(!backbone_gw))
419 return;
420
421 backbone_gw->lasttime = jiffies;
422 backbone_gw_free_ref(backbone_gw);
423}
424
425/**
426 * @bat_priv: the bat priv with all the soft interface information
427 * @vid: the vid where the request came on
428 *
429 * Repeat all of our own claims, and finally send an ANNOUNCE frame
430 * to allow the requester another check if the CRC is correct now.
431 */
432static void bla_answer_request(struct bat_priv *bat_priv,
433 struct hard_iface *primary_if, short vid)
434{
435 struct hlist_node *node;
436 struct hlist_head *head;
437 struct hashtable_t *hash;
438 struct claim *claim;
439 struct backbone_gw *backbone_gw;
440 int i;
441
442 bat_dbg(DBG_BLA, bat_priv,
443 "bla_answer_request(): received a claim request, send all of our own claims again\n");
444
445 backbone_gw = backbone_hash_find(bat_priv,
446 primary_if->net_dev->dev_addr, vid);
447 if (!backbone_gw)
448 return;
449
450 hash = bat_priv->claim_hash;
451 for (i = 0; i < hash->size; i++) {
452 head = &hash->table[i];
453
454 rcu_read_lock();
455 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
456 /* only own claims are interesting */
457 if (claim->backbone_gw != backbone_gw)
458 continue;
459
460 bla_send_claim(bat_priv, claim->addr, claim->vid,
461 CLAIM_TYPE_ADD);
462 }
463 rcu_read_unlock();
464 }
465
466 /* finally, send an announcement frame */
467 bla_send_announce(bat_priv, backbone_gw);
468 backbone_gw_free_ref(backbone_gw);
469}
470
471/**
472 * @backbone_gw: the backbone gateway from whom we are out of sync
473 *
474 * When the crc is wrong, ask the backbone gateway for a full table update.
475 * After the request, it will repeat all of his own claims and finally
476 * send an announcement claim with which we can check again.
477 */
478static void bla_send_request(struct backbone_gw *backbone_gw)
479{
480 /* first, remove all old entries */
481 bla_del_backbone_claims(backbone_gw);
482
483 bat_dbg(DBG_BLA, backbone_gw->bat_priv,
484 "Sending REQUEST to %pM\n",
485 backbone_gw->orig);
486
487 /* send request */
488 bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
489 backbone_gw->vid, CLAIM_TYPE_REQUEST);
490
491 /* no local broadcasts should be sent or received, for now. */
492 if (!atomic_read(&backbone_gw->request_sent)) {
493 atomic_inc(&backbone_gw->bat_priv->bla_num_requests);
494 atomic_set(&backbone_gw->request_sent, 1);
495 }
496}
497
498/**
499 * @bat_priv: the bat priv with all the soft interface information
500 * @backbone_gw: our backbone gateway which should be announced
501 *
502 * This function sends an announcement. It is called from multiple
503 * places.
504 */
505static void bla_send_announce(struct bat_priv *bat_priv,
506 struct backbone_gw *backbone_gw)
507{
508 uint8_t mac[ETH_ALEN];
509 uint16_t crc;
510
511 memcpy(mac, announce_mac, 4);
512 crc = htons(backbone_gw->crc);
513 memcpy(&mac[4], (uint8_t *)&crc, 2);
514
515 bla_send_claim(bat_priv, mac, backbone_gw->vid, CLAIM_TYPE_ANNOUNCE);
516
517}
518
519/**
520 * @bat_priv: the bat priv with all the soft interface information
521 * @mac: the mac address of the claim
522 * @vid: the VLAN ID of the frame
523 * @backbone_gw: the backbone gateway which claims it
524 *
525 * Adds a claim in the claim hash.
526 */
527static void bla_add_claim(struct bat_priv *bat_priv, const uint8_t *mac,
528 const short vid, struct backbone_gw *backbone_gw)
529{
530 struct claim *claim;
531 struct claim search_claim;
532 int hash_added;
533
534 memcpy(search_claim.addr, mac, ETH_ALEN);
535 search_claim.vid = vid;
536 claim = claim_hash_find(bat_priv, &search_claim);
537
538 /* create a new claim entry if it does not exist yet. */
539 if (!claim) {
540 claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
541 if (!claim)
542 return;
543
544 memcpy(claim->addr, mac, ETH_ALEN);
545 claim->vid = vid;
546 claim->lasttime = jiffies;
547 claim->backbone_gw = backbone_gw;
548
549 atomic_set(&claim->refcount, 2);
550 bat_dbg(DBG_BLA, bat_priv,
551 "bla_add_claim(): adding new entry %pM, vid %d to hash ...\n",
552 mac, vid);
553 hash_added = hash_add(bat_priv->claim_hash, compare_claim,
554 choose_claim, claim, &claim->hash_entry);
555
556 if (unlikely(hash_added != 0)) {
557 /* only local changes happened. */
558 kfree(claim);
559 return;
560 }
561 } else {
562 claim->lasttime = jiffies;
563 if (claim->backbone_gw == backbone_gw)
564 /* no need to register a new backbone */
565 goto claim_free_ref;
566
567 bat_dbg(DBG_BLA, bat_priv,
568 "bla_add_claim(): changing ownership for %pM, vid %d\n",
569 mac, vid);
570
571 claim->backbone_gw->crc ^=
572 crc16(0, claim->addr, ETH_ALEN);
573 backbone_gw_free_ref(claim->backbone_gw);
574
575 }
576 /* set (new) backbone gw */
577 atomic_inc(&backbone_gw->refcount);
578 claim->backbone_gw = backbone_gw;
579
580 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
581 backbone_gw->lasttime = jiffies;
582
583claim_free_ref:
584 claim_free_ref(claim);
585}
586
587/* Delete a claim from the claim hash which has the
588 * given mac address and vid.
589 */
590static void bla_del_claim(struct bat_priv *bat_priv, const uint8_t *mac,
591 const short vid)
592{
593 struct claim search_claim, *claim;
594
595 memcpy(search_claim.addr, mac, ETH_ALEN);
596 search_claim.vid = vid;
597 claim = claim_hash_find(bat_priv, &search_claim);
598 if (!claim)
599 return;
600
601 bat_dbg(DBG_BLA, bat_priv, "bla_del_claim(): %pM, vid %d\n", mac, vid);
602
603 hash_remove(bat_priv->claim_hash, compare_claim, choose_claim, claim);
604 claim_free_ref(claim); /* reference from the hash is gone */
605
606 claim->backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
607
608 /* don't need the reference from hash_find() anymore */
609 claim_free_ref(claim);
610}
611
612/* check for ANNOUNCE frame, return 1 if handled */
613static int handle_announce(struct bat_priv *bat_priv,
614 uint8_t *an_addr, uint8_t *backbone_addr, short vid)
615{
616 struct backbone_gw *backbone_gw;
617 uint16_t crc;
618
619 if (memcmp(an_addr, announce_mac, 4) != 0)
620 return 0;
621
622 backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
623
624 if (unlikely(!backbone_gw))
625 return 1;
626
627
628 /* handle as ANNOUNCE frame */
629 backbone_gw->lasttime = jiffies;
630 crc = ntohs(*((uint16_t *)(&an_addr[4])));
631
632 bat_dbg(DBG_BLA, bat_priv,
633 "handle_announce(): ANNOUNCE vid %d (sent by %pM)... CRC = %04x\n",
634 vid, backbone_gw->orig, crc);
635
636 if (backbone_gw->crc != crc) {
637 bat_dbg(DBG_BLA, backbone_gw->bat_priv,
638 "handle_announce(): CRC FAILED for %pM/%d (my = %04x, sent = %04x)\n",
639 backbone_gw->orig, backbone_gw->vid, backbone_gw->crc,
640 crc);
641
642 bla_send_request(backbone_gw);
643 } else {
644 /* if we have sent a request and the crc was OK,
645 * we can allow traffic again.
646 */
647 if (atomic_read(&backbone_gw->request_sent)) {
648 atomic_dec(&backbone_gw->bat_priv->bla_num_requests);
649 atomic_set(&backbone_gw->request_sent, 0);
650 }
651 }
652
653 backbone_gw_free_ref(backbone_gw);
654 return 1;
655}
656
657/* check for REQUEST frame, return 1 if handled */
658static int handle_request(struct bat_priv *bat_priv,
659 struct hard_iface *primary_if,
660 uint8_t *backbone_addr,
661 struct ethhdr *ethhdr, short vid)
662{
663 /* check for REQUEST frame */
664 if (!compare_eth(backbone_addr, ethhdr->h_dest))
665 return 0;
666
667 /* sanity check, this should not happen on a normal switch,
668 * we ignore it in this case.
669 */
670 if (!compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
671 return 1;
672
673 bat_dbg(DBG_BLA, bat_priv,
674 "handle_request(): REQUEST vid %d (sent by %pM)...\n",
675 vid, ethhdr->h_source);
676
677 bla_answer_request(bat_priv, primary_if, vid);
678 return 1;
679}
680
681/* check for UNCLAIM frame, return 1 if handled */
682static int handle_unclaim(struct bat_priv *bat_priv,
683 struct hard_iface *primary_if,
684 uint8_t *backbone_addr,
685 uint8_t *claim_addr, short vid)
686{
687 struct backbone_gw *backbone_gw;
688
689 /* unclaim in any case if it is our own */
690 if (primary_if && compare_eth(backbone_addr,
691 primary_if->net_dev->dev_addr))
692 bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_DEL);
693
694 backbone_gw = backbone_hash_find(bat_priv, backbone_addr, vid);
695
696 if (!backbone_gw)
697 return 1;
698
699 /* this must be an UNCLAIM frame */
700 bat_dbg(DBG_BLA, bat_priv,
701 "handle_unclaim(): UNCLAIM %pM on vid %d (sent by %pM)...\n",
702 claim_addr, vid, backbone_gw->orig);
703
704 bla_del_claim(bat_priv, claim_addr, vid);
705 backbone_gw_free_ref(backbone_gw);
706 return 1;
707}
708
709/* check for CLAIM frame, return 1 if handled */
710static int handle_claim(struct bat_priv *bat_priv,
711 struct hard_iface *primary_if, uint8_t *backbone_addr,
712 uint8_t *claim_addr, short vid)
713{
714 struct backbone_gw *backbone_gw;
715
716 /* register the gateway if not yet available, and add the claim. */
717
718 backbone_gw = bla_get_backbone_gw(bat_priv, backbone_addr, vid);
719
720 if (unlikely(!backbone_gw))
721 return 1;
722
723 /* this must be a CLAIM frame */
724 bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
725 if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
726 bla_send_claim(bat_priv, claim_addr, vid, CLAIM_TYPE_ADD);
727
728 /* TODO: we could call something like tt_local_del() here. */
729
730 backbone_gw_free_ref(backbone_gw);
731 return 1;
732}
733
734/**
735 * @bat_priv: the bat priv with all the soft interface information
736 * @hw_src: the Hardware source in the ARP Header
737 * @hw_dst: the Hardware destination in the ARP Header
738 * @ethhdr: pointer to the Ethernet header of the claim frame
739 *
740 * checks if it is a claim packet and if its on the same group.
741 * This function also applies the group ID of the sender
742 * if it is in the same mesh.
743 *
744 * returns:
745 * 2 - if it is a claim packet and on the same group
746 * 1 - if is a claim packet from another group
747 * 0 - if it is not a claim packet
748 */
749static int check_claim_group(struct bat_priv *bat_priv,
750 struct hard_iface *primary_if,
751 uint8_t *hw_src, uint8_t *hw_dst,
752 struct ethhdr *ethhdr)
753{
754 uint8_t *backbone_addr;
755 struct orig_node *orig_node;
756 struct bla_claim_dst *bla_dst, *bla_dst_own;
757
758 bla_dst = (struct bla_claim_dst *)hw_dst;
759 bla_dst_own = &bat_priv->claim_dest;
760
761 /* check if it is a claim packet in general */
762 if (memcmp(bla_dst->magic, bla_dst_own->magic,
763 sizeof(bla_dst->magic)) != 0)
764 return 0;
765
766 /* if announcement packet, use the source,
767 * otherwise assume it is in the hw_src
768 */
769 switch (bla_dst->type) {
770 case CLAIM_TYPE_ADD:
771 backbone_addr = hw_src;
772 break;
773 case CLAIM_TYPE_REQUEST:
774 case CLAIM_TYPE_ANNOUNCE:
775 case CLAIM_TYPE_DEL:
776 backbone_addr = ethhdr->h_source;
777 break;
778 default:
779 return 0;
780 }
781
782 /* don't accept claim frames from ourselves */
783 if (compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
784 return 0;
785
786 /* if its already the same group, it is fine. */
787 if (bla_dst->group == bla_dst_own->group)
788 return 2;
789
790 /* lets see if this originator is in our mesh */
791 orig_node = orig_hash_find(bat_priv, backbone_addr);
792
793 /* dont accept claims from gateways which are not in
794 * the same mesh or group.
795 */
796 if (!orig_node)
797 return 1;
798
799 /* if our mesh friends mac is bigger, use it for ourselves. */
800 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
801 bat_dbg(DBG_BLA, bat_priv,
802 "taking other backbones claim group: %04x\n",
803 ntohs(bla_dst->group));
804 bla_dst_own->group = bla_dst->group;
805 }
806
807 orig_node_free_ref(orig_node);
808
809 return 2;
810}
811
812
813/**
814 * @bat_priv: the bat priv with all the soft interface information
815 * @skb: the frame to be checked
816 *
817 * Check if this is a claim frame, and process it accordingly.
818 *
819 * returns 1 if it was a claim frame, otherwise return 0 to
820 * tell the callee that it can use the frame on its own.
821 */
822static int bla_process_claim(struct bat_priv *bat_priv,
823 struct hard_iface *primary_if,
824 struct sk_buff *skb)
825{
826 struct ethhdr *ethhdr;
827 struct vlan_ethhdr *vhdr;
828 struct arphdr *arphdr;
829 uint8_t *hw_src, *hw_dst;
830 struct bla_claim_dst *bla_dst;
831 uint16_t proto;
832 int headlen;
833 short vid = -1;
834 int ret;
835
836 ethhdr = (struct ethhdr *)skb_mac_header(skb);
837
838 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
839 vhdr = (struct vlan_ethhdr *)ethhdr;
840 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
841 proto = ntohs(vhdr->h_vlan_encapsulated_proto);
842 headlen = sizeof(*vhdr);
843 } else {
844 proto = ntohs(ethhdr->h_proto);
845 headlen = ETH_HLEN;
846 }
847
848 if (proto != ETH_P_ARP)
849 return 0; /* not a claim frame */
850
851 /* this must be a ARP frame. check if it is a claim. */
852
853 if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
854 return 0;
855
856 /* pskb_may_pull() may have modified the pointers, get ethhdr again */
857 ethhdr = (struct ethhdr *)skb_mac_header(skb);
858 arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen);
859
860 /* Check whether the ARP frame carries a valid
861 * IP information
862 */
863
864 if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
865 return 0;
866 if (arphdr->ar_pro != htons(ETH_P_IP))
867 return 0;
868 if (arphdr->ar_hln != ETH_ALEN)
869 return 0;
870 if (arphdr->ar_pln != 4)
871 return 0;
872
873 hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
874 hw_dst = hw_src + ETH_ALEN + 4;
875 bla_dst = (struct bla_claim_dst *)hw_dst;
876
877 /* check if it is a claim frame. */
878 ret = check_claim_group(bat_priv, primary_if, hw_src, hw_dst, ethhdr);
879 if (ret == 1)
880 bat_dbg(DBG_BLA, bat_priv,
881 "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
882 ethhdr->h_source, vid, hw_src, hw_dst);
883
884 if (ret < 2)
885 return ret;
886
887 /* become a backbone gw ourselves on this vlan if not happened yet */
888 bla_update_own_backbone_gw(bat_priv, primary_if, vid);
889
890 /* check for the different types of claim frames ... */
891 switch (bla_dst->type) {
892 case CLAIM_TYPE_ADD:
893 if (handle_claim(bat_priv, primary_if, hw_src,
894 ethhdr->h_source, vid))
895 return 1;
896 break;
897 case CLAIM_TYPE_DEL:
898 if (handle_unclaim(bat_priv, primary_if,
899 ethhdr->h_source, hw_src, vid))
900 return 1;
901 break;
902
903 case CLAIM_TYPE_ANNOUNCE:
904 if (handle_announce(bat_priv, hw_src, ethhdr->h_source, vid))
905 return 1;
906 break;
907 case CLAIM_TYPE_REQUEST:
908 if (handle_request(bat_priv, primary_if, hw_src, ethhdr, vid))
909 return 1;
910 break;
911 }
912
913 bat_dbg(DBG_BLA, bat_priv,
914 "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
915 ethhdr->h_source, vid, hw_src, hw_dst);
916 return 1;
917}
918
919/* Check when we last heard from other nodes, and remove them in case of
920 * a time out, or clean all backbone gws if now is set.
921 */
922static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now)
923{
924 struct backbone_gw *backbone_gw;
925 struct hlist_node *node, *node_tmp;
926 struct hlist_head *head;
927 struct hashtable_t *hash;
928 spinlock_t *list_lock; /* protects write access to the hash lists */
929 int i;
930
931 hash = bat_priv->backbone_hash;
932 if (!hash)
933 return;
934
935 for (i = 0; i < hash->size; i++) {
936 head = &hash->table[i];
937 list_lock = &hash->list_locks[i];
938
939 spin_lock_bh(list_lock);
940 hlist_for_each_entry_safe(backbone_gw, node, node_tmp,
941 head, hash_entry) {
942 if (now)
943 goto purge_now;
944 if (!has_timed_out(backbone_gw->lasttime,
945 BLA_BACKBONE_TIMEOUT))
946 continue;
947
948 bat_dbg(DBG_BLA, backbone_gw->bat_priv,
949 "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
950 backbone_gw->orig);
951
952purge_now:
953 /* don't wait for the pending request anymore */
954 if (atomic_read(&backbone_gw->request_sent))
955 atomic_dec(&bat_priv->bla_num_requests);
956
957 bla_del_backbone_claims(backbone_gw);
958
959 hlist_del_rcu(node);
960 backbone_gw_free_ref(backbone_gw);
961 }
962 spin_unlock_bh(list_lock);
963 }
964}
965
966/**
967 * @bat_priv: the bat priv with all the soft interface information
968 * @primary_if: the selected primary interface, may be NULL if now is set
969 * @now: whether the whole hash shall be wiped now
970 *
971 * Check when we heard last time from our own claims, and remove them in case of
972 * a time out, or clean all claims if now is set
973 */
974static void bla_purge_claims(struct bat_priv *bat_priv,
975 struct hard_iface *primary_if, int now)
976{
977 struct claim *claim;
978 struct hlist_node *node;
979 struct hlist_head *head;
980 struct hashtable_t *hash;
981 int i;
982
983 hash = bat_priv->claim_hash;
984 if (!hash)
985 return;
986
987 for (i = 0; i < hash->size; i++) {
988 head = &hash->table[i];
989
990 rcu_read_lock();
991 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
992 if (now)
993 goto purge_now;
994 if (!compare_eth(claim->backbone_gw->orig,
995 primary_if->net_dev->dev_addr))
996 continue;
997 if (!has_timed_out(claim->lasttime,
998 BLA_CLAIM_TIMEOUT))
999 continue;
1000
1001 bat_dbg(DBG_BLA, bat_priv,
1002 "bla_purge_claims(): %pM, vid %d, time out\n",
1003 claim->addr, claim->vid);
1004
1005purge_now:
1006 handle_unclaim(bat_priv, primary_if,
1007 claim->backbone_gw->orig,
1008 claim->addr, claim->vid);
1009 }
1010 rcu_read_unlock();
1011 }
1012}
1013
1014/**
1015 * @bat_priv: the bat priv with all the soft interface information
1016 * @primary_if: the new selected primary_if
1017 * @oldif: the old primary interface, may be NULL
1018 *
1019 * Update the backbone gateways when the own orig address changes.
1020 *
1021 */
1022void bla_update_orig_address(struct bat_priv *bat_priv,
1023 struct hard_iface *primary_if,
1024 struct hard_iface *oldif)
1025{
1026 struct backbone_gw *backbone_gw;
1027 struct hlist_node *node;
1028 struct hlist_head *head;
1029 struct hashtable_t *hash;
1030 int i;
1031
1032 /* reset bridge loop avoidance group id */
1033 bat_priv->claim_dest.group =
1034 htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
1035
1036 if (!oldif) {
1037 bla_purge_claims(bat_priv, NULL, 1);
1038 bla_purge_backbone_gw(bat_priv, 1);
1039 return;
1040 }
1041
1042 hash = bat_priv->backbone_hash;
1043 if (!hash)
1044 return;
1045
1046 for (i = 0; i < hash->size; i++) {
1047 head = &hash->table[i];
1048
1049 rcu_read_lock();
1050 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1051 /* own orig still holds the old value. */
1052 if (!compare_eth(backbone_gw->orig,
1053 oldif->net_dev->dev_addr))
1054 continue;
1055
1056 memcpy(backbone_gw->orig,
1057 primary_if->net_dev->dev_addr, ETH_ALEN);
1058 /* send an announce frame so others will ask for our
1059 * claims and update their tables.
1060 */
1061 bla_send_announce(bat_priv, backbone_gw);
1062 }
1063 rcu_read_unlock();
1064 }
1065}
1066
1067
1068
1069/* (re)start the timer */
1070static void bla_start_timer(struct bat_priv *bat_priv)
1071{
1072 INIT_DELAYED_WORK(&bat_priv->bla_work, bla_periodic_work);
1073 queue_delayed_work(bat_event_workqueue, &bat_priv->bla_work,
1074 msecs_to_jiffies(BLA_PERIOD_LENGTH));
1075}
1076
1077/* periodic work to do:
1078 * * purge structures when they are too old
1079 * * send announcements
1080 */
1081static void bla_periodic_work(struct work_struct *work)
1082{
1083 struct delayed_work *delayed_work =
1084 container_of(work, struct delayed_work, work);
1085 struct bat_priv *bat_priv =
1086 container_of(delayed_work, struct bat_priv, bla_work);
1087 struct hlist_node *node;
1088 struct hlist_head *head;
1089 struct backbone_gw *backbone_gw;
1090 struct hashtable_t *hash;
1091 struct hard_iface *primary_if;
1092 int i;
1093
1094 primary_if = primary_if_get_selected(bat_priv);
1095 if (!primary_if)
1096 goto out;
1097
1098 bla_purge_claims(bat_priv, primary_if, 0);
1099 bla_purge_backbone_gw(bat_priv, 0);
1100
1101 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1102 goto out;
1103
1104 hash = bat_priv->backbone_hash;
1105 if (!hash)
1106 goto out;
1107
1108 for (i = 0; i < hash->size; i++) {
1109 head = &hash->table[i];
1110
1111 rcu_read_lock();
1112 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1113 if (!compare_eth(backbone_gw->orig,
1114 primary_if->net_dev->dev_addr))
1115 continue;
1116
1117 backbone_gw->lasttime = jiffies;
1118
1119 bla_send_announce(bat_priv, backbone_gw);
1120 }
1121 rcu_read_unlock();
1122 }
1123out:
1124 if (primary_if)
1125 hardif_free_ref(primary_if);
1126
1127 bla_start_timer(bat_priv);
1128}
1129
1130/* initialize all bla structures */
1131int bla_init(struct bat_priv *bat_priv)
1132{
1133 int i;
1134 uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1135 struct hard_iface *primary_if;
1136
1137 bat_dbg(DBG_BLA, bat_priv, "bla hash registering\n");
1138
1139 /* setting claim destination address */
1140 memcpy(&bat_priv->claim_dest.magic, claim_dest, 3);
1141 bat_priv->claim_dest.type = 0;
1142 primary_if = primary_if_get_selected(bat_priv);
1143 if (primary_if) {
1144 bat_priv->claim_dest.group =
1145 htons(crc16(0, primary_if->net_dev->dev_addr,
1146 ETH_ALEN));
1147 hardif_free_ref(primary_if);
1148 } else {
1149 bat_priv->claim_dest.group = 0; /* will be set later */
1150 }
1151
1152 /* initialize the duplicate list */
1153 for (i = 0; i < DUPLIST_SIZE; i++)
1154 bat_priv->bcast_duplist[i].entrytime =
1155 jiffies - msecs_to_jiffies(DUPLIST_TIMEOUT);
1156 bat_priv->bcast_duplist_curr = 0;
1157
1158 if (bat_priv->claim_hash)
1159 return 1;
1160
1161 bat_priv->claim_hash = hash_new(128);
1162 bat_priv->backbone_hash = hash_new(32);
1163
1164 if (!bat_priv->claim_hash || !bat_priv->backbone_hash)
1165 return -1;
1166
1167 bat_dbg(DBG_BLA, bat_priv, "bla hashes initialized\n");
1168
1169 bla_start_timer(bat_priv);
1170 return 1;
1171}
1172
1173/**
1174 * @bat_priv: the bat priv with all the soft interface information
1175 * @bcast_packet: originator mac address
1176 * @hdr_size: maximum length of the frame
1177 *
1178 * check if it is on our broadcast list. Another gateway might
1179 * have sent the same packet because it is connected to the same backbone,
1180 * so we have to remove this duplicate.
1181 *
1182 * This is performed by checking the CRC, which will tell us
1183 * with a good chance that it is the same packet. If it is furthermore
1184 * sent by another host, drop it. We allow equal packets from
1185 * the same host however as this might be intended.
1186 *
1187 **/
1188
1189int bla_check_bcast_duplist(struct bat_priv *bat_priv,
1190 struct bcast_packet *bcast_packet,
1191 int hdr_size)
1192{
1193 int i, length, curr;
1194 uint8_t *content;
1195 uint16_t crc;
1196 struct bcast_duplist_entry *entry;
1197
1198 length = hdr_size - sizeof(*bcast_packet);
1199 content = (uint8_t *)bcast_packet;
1200 content += sizeof(*bcast_packet);
1201
1202 /* calculate the crc ... */
1203 crc = crc16(0, content, length);
1204
1205 for (i = 0 ; i < DUPLIST_SIZE; i++) {
1206 curr = (bat_priv->bcast_duplist_curr + i) % DUPLIST_SIZE;
1207 entry = &bat_priv->bcast_duplist[curr];
1208
1209 /* we can stop searching if the entry is too old ;
1210 * later entries will be even older
1211 */
1212 if (has_timed_out(entry->entrytime, DUPLIST_TIMEOUT))
1213 break;
1214
1215 if (entry->crc != crc)
1216 continue;
1217
1218 if (compare_eth(entry->orig, bcast_packet->orig))
1219 continue;
1220
1221 /* this entry seems to match: same crc, not too old,
1222 * and from another gw. therefore return 1 to forbid it.
1223 */
1224 return 1;
1225 }
1226 /* not found, add a new entry (overwrite the oldest entry) */
1227 curr = (bat_priv->bcast_duplist_curr + DUPLIST_SIZE - 1) % DUPLIST_SIZE;
1228 entry = &bat_priv->bcast_duplist[curr];
1229 entry->crc = crc;
1230 entry->entrytime = jiffies;
1231 memcpy(entry->orig, bcast_packet->orig, ETH_ALEN);
1232 bat_priv->bcast_duplist_curr = curr;
1233
1234 /* allow it, its the first occurence. */
1235 return 0;
1236}
1237
1238
1239
1240/**
1241 * @bat_priv: the bat priv with all the soft interface information
1242 * @orig: originator mac address
1243 *
1244 * check if the originator is a gateway for any VLAN ID.
1245 *
1246 * returns 1 if it is found, 0 otherwise
1247 *
1248 */
1249
1250int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig)
1251{
1252 struct hashtable_t *hash = bat_priv->backbone_hash;
1253 struct hlist_head *head;
1254 struct hlist_node *node;
1255 struct backbone_gw *backbone_gw;
1256 int i;
1257
1258 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1259 return 0;
1260
1261 if (!hash)
1262 return 0;
1263
1264 for (i = 0; i < hash->size; i++) {
1265 head = &hash->table[i];
1266
1267 rcu_read_lock();
1268 hlist_for_each_entry_rcu(backbone_gw, node, head, hash_entry) {
1269 if (compare_eth(backbone_gw->orig, orig)) {
1270 rcu_read_unlock();
1271 return 1;
1272 }
1273 }
1274 rcu_read_unlock();
1275 }
1276
1277 return 0;
1278}
1279
1280
1281/**
1282 * @skb: the frame to be checked
1283 * @orig_node: the orig_node of the frame
1284 * @hdr_size: maximum length of the frame
1285 *
1286 * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1
1287 * if the orig_node is also a gateway on the soft interface, otherwise it
1288 * returns 0.
1289 *
1290 */
1291int bla_is_backbone_gw(struct sk_buff *skb,
1292 struct orig_node *orig_node, int hdr_size)
1293{
1294 struct ethhdr *ethhdr;
1295 struct vlan_ethhdr *vhdr;
1296 struct backbone_gw *backbone_gw;
1297 short vid = -1;
1298
1299 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1300 return 0;
1301
1302 /* first, find out the vid. */
1303 if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
1304 return 0;
1305
1306 ethhdr = (struct ethhdr *)(((uint8_t *)skb->data) + hdr_size);
1307
1308 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) {
1309 if (!pskb_may_pull(skb, hdr_size + sizeof(struct vlan_ethhdr)))
1310 return 0;
1311
1312 vhdr = (struct vlan_ethhdr *)(((uint8_t *)skb->data) +
1313 hdr_size);
1314 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
1315 }
1316
1317 /* see if this originator is a backbone gw for this VLAN */
1318
1319 backbone_gw = backbone_hash_find(orig_node->bat_priv,
1320 orig_node->orig, vid);
1321 if (!backbone_gw)
1322 return 0;
1323
1324 backbone_gw_free_ref(backbone_gw);
1325 return 1;
1326}
1327
1328/* free all bla structures (for softinterface free or module unload) */
1329void bla_free(struct bat_priv *bat_priv)
1330{
1331 struct hard_iface *primary_if;
1332
1333 cancel_delayed_work_sync(&bat_priv->bla_work);
1334 primary_if = primary_if_get_selected(bat_priv);
1335
1336 if (bat_priv->claim_hash) {
1337 bla_purge_claims(bat_priv, primary_if, 1);
1338 hash_destroy(bat_priv->claim_hash);
1339 bat_priv->claim_hash = NULL;
1340 }
1341 if (bat_priv->backbone_hash) {
1342 bla_purge_backbone_gw(bat_priv, 1);
1343 hash_destroy(bat_priv->backbone_hash);
1344 bat_priv->backbone_hash = NULL;
1345 }
1346 if (primary_if)
1347 hardif_free_ref(primary_if);
1348}
1349
1350/**
1351 * @bat_priv: the bat priv with all the soft interface information
1352 * @skb: the frame to be checked
1353 * @vid: the VLAN ID of the frame
1354 *
1355 * bla_rx avoidance checks if:
1356 * * we have to race for a claim
1357 * * if the frame is allowed on the LAN
1358 *
1359 * in these cases, the skb is further handled by this function and
1360 * returns 1, otherwise it returns 0 and the caller shall further
1361 * process the skb.
1362 *
1363 */
1364int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
1365{
1366 struct ethhdr *ethhdr;
1367 struct claim search_claim, *claim = NULL;
1368 struct hard_iface *primary_if;
1369 int ret;
1370
1371 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1372
1373 primary_if = primary_if_get_selected(bat_priv);
1374 if (!primary_if)
1375 goto handled;
1376
1377 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1378 goto allow;
1379
1380
1381 if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
1382 /* don't allow broadcasts while requests are in flight */
1383 if (is_multicast_ether_addr(ethhdr->h_dest))
1384 goto handled;
1385
1386 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
1387 search_claim.vid = vid;
1388 claim = claim_hash_find(bat_priv, &search_claim);
1389
1390 if (!claim) {
1391 /* possible optimization: race for a claim */
1392 /* No claim exists yet, claim it for us!
1393 */
1394 handle_claim(bat_priv, primary_if,
1395 primary_if->net_dev->dev_addr,
1396 ethhdr->h_source, vid);
1397 goto allow;
1398 }
1399
1400 /* if it is our own claim ... */
1401 if (compare_eth(claim->backbone_gw->orig,
1402 primary_if->net_dev->dev_addr)) {
1403 /* ... allow it in any case */
1404 claim->lasttime = jiffies;
1405 goto allow;
1406 }
1407
1408 /* if it is a broadcast ... */
1409 if (is_multicast_ether_addr(ethhdr->h_dest)) {
1410 /* ... drop it. the responsible gateway is in charge. */
1411 goto handled;
1412 } else {
1413 /* seems the client considers us as its best gateway.
1414 * send a claim and update the claim table
1415 * immediately.
1416 */
1417 handle_claim(bat_priv, primary_if,
1418 primary_if->net_dev->dev_addr,
1419 ethhdr->h_source, vid);
1420 goto allow;
1421 }
1422allow:
1423 bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1424 ret = 0;
1425 goto out;
1426
1427handled:
1428 kfree_skb(skb);
1429 ret = 1;
1430
1431out:
1432 if (primary_if)
1433 hardif_free_ref(primary_if);
1434 if (claim)
1435 claim_free_ref(claim);
1436 return ret;
1437}
1438
1439/**
1440 * @bat_priv: the bat priv with all the soft interface information
1441 * @skb: the frame to be checked
1442 * @vid: the VLAN ID of the frame
1443 *
1444 * bla_tx checks if:
1445 * * a claim was received which has to be processed
1446 * * the frame is allowed on the mesh
1447 *
1448 * in these cases, the skb is further handled by this function and
1449 * returns 1, otherwise it returns 0 and the caller shall further
1450 * process the skb.
1451 *
1452 */
1453int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid)
1454{
1455 struct ethhdr *ethhdr;
1456 struct claim search_claim, *claim = NULL;
1457 struct hard_iface *primary_if;
1458 int ret = 0;
1459
1460 primary_if = primary_if_get_selected(bat_priv);
1461 if (!primary_if)
1462 goto out;
1463
1464 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1465 goto allow;
1466
1467 /* in VLAN case, the mac header might not be set. */
1468 skb_reset_mac_header(skb);
1469
1470 if (bla_process_claim(bat_priv, primary_if, skb))
1471 goto handled;
1472
1473 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1474
1475 if (unlikely(atomic_read(&bat_priv->bla_num_requests)))
1476 /* don't allow broadcasts while requests are in flight */
1477 if (is_multicast_ether_addr(ethhdr->h_dest))
1478 goto handled;
1479
1480 memcpy(search_claim.addr, ethhdr->h_source, ETH_ALEN);
1481 search_claim.vid = vid;
1482
1483 claim = claim_hash_find(bat_priv, &search_claim);
1484
1485 /* if no claim exists, allow it. */
1486 if (!claim)
1487 goto allow;
1488
1489 /* check if we are responsible. */
1490 if (compare_eth(claim->backbone_gw->orig,
1491 primary_if->net_dev->dev_addr)) {
1492 /* if yes, the client has roamed and we have
1493 * to unclaim it.
1494 */
1495 handle_unclaim(bat_priv, primary_if,
1496 primary_if->net_dev->dev_addr,
1497 ethhdr->h_source, vid);
1498 goto allow;
1499 }
1500
1501 /* check if it is a multicast/broadcast frame */
1502 if (is_multicast_ether_addr(ethhdr->h_dest)) {
1503 /* drop it. the responsible gateway has forwarded it into
1504 * the backbone network.
1505 */
1506 goto handled;
1507 } else {
1508 /* we must allow it. at least if we are
1509 * responsible for the DESTINATION.
1510 */
1511 goto allow;
1512 }
1513allow:
1514 bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1515 ret = 0;
1516 goto out;
1517handled:
1518 ret = 1;
1519out:
1520 if (primary_if)
1521 hardif_free_ref(primary_if);
1522 if (claim)
1523 claim_free_ref(claim);
1524 return ret;
1525}
1526
1527int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
1528{
1529 struct net_device *net_dev = (struct net_device *)seq->private;
1530 struct bat_priv *bat_priv = netdev_priv(net_dev);
1531 struct hashtable_t *hash = bat_priv->claim_hash;
1532 struct claim *claim;
1533 struct hard_iface *primary_if;
1534 struct hlist_node *node;
1535 struct hlist_head *head;
1536 uint32_t i;
1537 bool is_own;
1538 int ret = 0;
1539
1540 primary_if = primary_if_get_selected(bat_priv);
1541 if (!primary_if) {
1542 ret = seq_printf(seq,
1543 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
1544 net_dev->name);
1545 goto out;
1546 }
1547
1548 if (primary_if->if_status != IF_ACTIVE) {
1549 ret = seq_printf(seq,
1550 "BATMAN mesh %s disabled - primary interface not active\n",
1551 net_dev->name);
1552 goto out;
1553 }
1554
1555 seq_printf(seq,
1556 "Claims announced for the mesh %s (orig %pM, group id %04x)\n",
1557 net_dev->name, primary_if->net_dev->dev_addr,
1558 ntohs(bat_priv->claim_dest.group));
1559 seq_printf(seq, " %-17s %-5s %-17s [o] (%-4s)\n",
1560 "Client", "VID", "Originator", "CRC");
1561 for (i = 0; i < hash->size; i++) {
1562 head = &hash->table[i];
1563
1564 rcu_read_lock();
1565 hlist_for_each_entry_rcu(claim, node, head, hash_entry) {
1566 is_own = compare_eth(claim->backbone_gw->orig,
1567 primary_if->net_dev->dev_addr);
1568 seq_printf(seq, " * %pM on % 5d by %pM [%c] (%04x)\n",
1569 claim->addr, claim->vid,
1570 claim->backbone_gw->orig,
1571 (is_own ? 'x' : ' '),
1572 claim->backbone_gw->crc);
1573 }
1574 rcu_read_unlock();
1575 }
1576out:
1577 if (primary_if)
1578 hardif_free_ref(primary_if);
1579 return ret;
1580}
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
new file mode 100644
index 000000000000..4a8e4fc766bc
--- /dev/null
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -0,0 +1,98 @@
1/*
2 * Copyright (C) 2011 B.A.T.M.A.N. contributors:
3 *
4 * Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#ifndef _NET_BATMAN_ADV_BLA_H_
23#define _NET_BATMAN_ADV_BLA_H_
24
25#ifdef CONFIG_BATMAN_ADV_BLA
26int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
27int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid);
28int bla_is_backbone_gw(struct sk_buff *skb,
29 struct orig_node *orig_node, int hdr_size);
30int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset);
31int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig);
32int bla_check_bcast_duplist(struct bat_priv *bat_priv,
33 struct bcast_packet *bcast_packet, int hdr_size);
34void bla_update_orig_address(struct bat_priv *bat_priv,
35 struct hard_iface *primary_if,
36 struct hard_iface *oldif);
37int bla_init(struct bat_priv *bat_priv);
38void bla_free(struct bat_priv *bat_priv);
39
40#define BLA_CRC_INIT 0
41#else /* ifdef CONFIG_BATMAN_ADV_BLA */
42
43static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb,
44 short vid)
45{
46 return 0;
47}
48
49static inline int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb,
50 short vid)
51{
52 return 0;
53}
54
55static inline int bla_is_backbone_gw(struct sk_buff *skb,
56 struct orig_node *orig_node,
57 int hdr_size)
58{
59 return 0;
60}
61
62static inline int bla_claim_table_seq_print_text(struct seq_file *seq,
63 void *offset)
64{
65 return 0;
66}
67
68static inline int bla_is_backbone_gw_orig(struct bat_priv *bat_priv,
69 uint8_t *orig)
70{
71 return 0;
72}
73
74static inline int bla_check_bcast_duplist(struct bat_priv *bat_priv,
75 struct bcast_packet *bcast_packet,
76 int hdr_size)
77{
78 return 0;
79}
80
81static inline void bla_update_orig_address(struct bat_priv *bat_priv,
82 struct hard_iface *primary_if,
83 struct hard_iface *oldif)
84{
85}
86
87static inline int bla_init(struct bat_priv *bat_priv)
88{
89 return 1;
90}
91
92static inline void bla_free(struct bat_priv *bat_priv)
93{
94}
95
96#endif /* ifdef CONFIG_BATMAN_ADV_BLA */
97
98#endif /* ifndef _NET_BATMAN_ADV_BLA_H_ */
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 377897701a85..47c79d724ba3 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -28,6 +28,7 @@
28#include "bat_sysfs.h" 28#include "bat_sysfs.h"
29#include "originator.h" 29#include "originator.h"
30#include "hash.h" 30#include "hash.h"
31#include "bridge_loop_avoidance.h"
31 32
32#include <linux/if_arp.h> 33#include <linux/if_arp.h>
33 34
@@ -107,7 +108,8 @@ out:
107 return hard_iface; 108 return hard_iface;
108} 109}
109 110
110static void primary_if_update_addr(struct bat_priv *bat_priv) 111static void primary_if_update_addr(struct bat_priv *bat_priv,
112 struct hard_iface *oldif)
111{ 113{
112 struct vis_packet *vis_packet; 114 struct vis_packet *vis_packet;
113 struct hard_iface *primary_if; 115 struct hard_iface *primary_if;
@@ -122,6 +124,7 @@ static void primary_if_update_addr(struct bat_priv *bat_priv)
122 memcpy(vis_packet->sender_orig, 124 memcpy(vis_packet->sender_orig,
123 primary_if->net_dev->dev_addr, ETH_ALEN); 125 primary_if->net_dev->dev_addr, ETH_ALEN);
124 126
127 bla_update_orig_address(bat_priv, primary_if, oldif);
125out: 128out:
126 if (primary_if) 129 if (primary_if)
127 hardif_free_ref(primary_if); 130 hardif_free_ref(primary_if);
@@ -140,14 +143,15 @@ static void primary_if_select(struct bat_priv *bat_priv,
140 curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1); 143 curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1);
141 rcu_assign_pointer(bat_priv->primary_if, new_hard_iface); 144 rcu_assign_pointer(bat_priv->primary_if, new_hard_iface);
142 145
143 if (curr_hard_iface)
144 hardif_free_ref(curr_hard_iface);
145
146 if (!new_hard_iface) 146 if (!new_hard_iface)
147 return; 147 goto out;
148 148
149 bat_priv->bat_algo_ops->bat_ogm_init_primary(new_hard_iface); 149 bat_priv->bat_algo_ops->bat_primary_iface_set(new_hard_iface);
150 primary_if_update_addr(bat_priv); 150 primary_if_update_addr(bat_priv, curr_hard_iface);
151
152out:
153 if (curr_hard_iface)
154 hardif_free_ref(curr_hard_iface);
151} 155}
152 156
153static bool hardif_is_iface_up(const struct hard_iface *hard_iface) 157static bool hardif_is_iface_up(const struct hard_iface *hard_iface)
@@ -300,22 +304,17 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
300 if (!softif_is_valid(soft_iface)) { 304 if (!softif_is_valid(soft_iface)) {
301 pr_err("Can't create batman mesh interface %s: already exists as regular interface\n", 305 pr_err("Can't create batman mesh interface %s: already exists as regular interface\n",
302 soft_iface->name); 306 soft_iface->name);
303 dev_put(soft_iface);
304 ret = -EINVAL; 307 ret = -EINVAL;
305 goto err; 308 goto err_dev;
306 } 309 }
307 310
308 hard_iface->soft_iface = soft_iface; 311 hard_iface->soft_iface = soft_iface;
309 bat_priv = netdev_priv(hard_iface->soft_iface); 312 bat_priv = netdev_priv(hard_iface->soft_iface);
310 313
311 bat_priv->bat_algo_ops->bat_ogm_init(hard_iface); 314 ret = bat_priv->bat_algo_ops->bat_iface_enable(hard_iface);
312 315 if (ret < 0) {
313 if (!hard_iface->packet_buff) {
314 bat_err(hard_iface->soft_iface,
315 "Can't add interface packet (%s): out of memory\n",
316 hard_iface->net_dev->name);
317 ret = -ENOMEM; 316 ret = -ENOMEM;
318 goto err; 317 goto err_dev;
319 } 318 }
320 319
321 hard_iface->if_num = bat_priv->num_ifaces; 320 hard_iface->if_num = bat_priv->num_ifaces;
@@ -328,7 +327,6 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
328 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev; 327 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
329 dev_add_pack(&hard_iface->batman_adv_ptype); 328 dev_add_pack(&hard_iface->batman_adv_ptype);
330 329
331 atomic_set(&hard_iface->seqno, 1);
332 atomic_set(&hard_iface->frag_seqno, 1); 330 atomic_set(&hard_iface->frag_seqno, 1);
333 bat_info(hard_iface->soft_iface, "Adding interface: %s\n", 331 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
334 hard_iface->net_dev->name); 332 hard_iface->net_dev->name);
@@ -360,6 +358,8 @@ int hardif_enable_interface(struct hard_iface *hard_iface,
360out: 358out:
361 return 0; 359 return 0;
362 360
361err_dev:
362 dev_put(soft_iface);
363err: 363err:
364 hardif_free_ref(hard_iface); 364 hardif_free_ref(hard_iface);
365 return ret; 365 return ret;
@@ -394,8 +394,7 @@ void hardif_disable_interface(struct hard_iface *hard_iface)
394 hardif_free_ref(new_if); 394 hardif_free_ref(new_if);
395 } 395 }
396 396
397 kfree(hard_iface->packet_buff); 397 bat_priv->bat_algo_ops->bat_iface_disable(hard_iface);
398 hard_iface->packet_buff = NULL;
399 hard_iface->if_status = IF_NOT_IN_USE; 398 hard_iface->if_status = IF_NOT_IN_USE;
400 399
401 /* delete all references to this hard_iface */ 400 /* delete all references to this hard_iface */
@@ -447,6 +446,13 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev)
447 check_known_mac_addr(hard_iface->net_dev); 446 check_known_mac_addr(hard_iface->net_dev);
448 list_add_tail_rcu(&hard_iface->list, &hardif_list); 447 list_add_tail_rcu(&hard_iface->list, &hardif_list);
449 448
449 /**
450 * This can't be called via a bat_priv callback because
451 * we have no bat_priv yet.
452 */
453 atomic_set(&hard_iface->seqno, 1);
454 hard_iface->packet_buff = NULL;
455
450 return hard_iface; 456 return hard_iface;
451 457
452free_if: 458free_if:
@@ -531,7 +537,7 @@ static int hard_if_event(struct notifier_block *this,
531 goto hardif_put; 537 goto hardif_put;
532 538
533 if (hard_iface == primary_if) 539 if (hard_iface == primary_if)
534 primary_if_update_addr(bat_priv); 540 primary_if_update_addr(bat_priv, NULL);
535 break; 541 break;
536 default: 542 default:
537 break; 543 break;
@@ -568,8 +574,7 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
568 goto err_free; 574 goto err_free;
569 575
570 /* expect a valid ethernet header here. */ 576 /* expect a valid ethernet header here. */
571 if (unlikely(skb->mac_len != sizeof(struct ethhdr) || 577 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
572 !skb_mac_header(skb)))
573 goto err_free; 578 goto err_free;
574 579
575 if (!hard_iface->soft_iface) 580 if (!hard_iface->soft_iface)
@@ -598,7 +603,7 @@ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
598 603
599 switch (batman_ogm_packet->header.packet_type) { 604 switch (batman_ogm_packet->header.packet_type) {
600 /* batman originator packet */ 605 /* batman originator packet */
601 case BAT_OGM: 606 case BAT_IV_OGM:
602 ret = recv_bat_ogm_packet(skb, hard_iface); 607 ret = recv_bat_ogm_packet(skb, hard_iface);
603 break; 608 break;
604 609
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index b87518edcef9..2e98a57f3407 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -175,13 +175,13 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff,
175 if (len >= sizeof(struct icmp_packet_rr)) 175 if (len >= sizeof(struct icmp_packet_rr))
176 packet_len = sizeof(struct icmp_packet_rr); 176 packet_len = sizeof(struct icmp_packet_rr);
177 177
178 skb = dev_alloc_skb(packet_len + sizeof(struct ethhdr)); 178 skb = dev_alloc_skb(packet_len + ETH_HLEN);
179 if (!skb) { 179 if (!skb) {
180 len = -ENOMEM; 180 len = -ENOMEM;
181 goto out; 181 goto out;
182 } 182 }
183 183
184 skb_reserve(skb, sizeof(struct ethhdr)); 184 skb_reserve(skb, ETH_HLEN);
185 icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len); 185 icmp_packet = (struct icmp_packet_rr *)skb_put(skb, packet_len);
186 186
187 if (copy_from_user(icmp_packet, buff, packet_len)) { 187 if (copy_from_user(icmp_packet, buff, packet_len)) {
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 6d51caaf8cec..791327219531 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -30,6 +30,7 @@
30#include "translation-table.h" 30#include "translation-table.h"
31#include "hard-interface.h" 31#include "hard-interface.h"
32#include "gateway_client.h" 32#include "gateway_client.h"
33#include "bridge_loop_avoidance.h"
33#include "vis.h" 34#include "vis.h"
34#include "hash.h" 35#include "hash.h"
35#include "bat_algo.h" 36#include "bat_algo.h"
@@ -96,13 +97,10 @@ int mesh_init(struct net_device *soft_iface)
96 spin_lock_init(&bat_priv->gw_list_lock); 97 spin_lock_init(&bat_priv->gw_list_lock);
97 spin_lock_init(&bat_priv->vis_hash_lock); 98 spin_lock_init(&bat_priv->vis_hash_lock);
98 spin_lock_init(&bat_priv->vis_list_lock); 99 spin_lock_init(&bat_priv->vis_list_lock);
99 spin_lock_init(&bat_priv->softif_neigh_lock);
100 spin_lock_init(&bat_priv->softif_neigh_vid_lock);
101 100
102 INIT_HLIST_HEAD(&bat_priv->forw_bat_list); 101 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
103 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list); 102 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
104 INIT_HLIST_HEAD(&bat_priv->gw_list); 103 INIT_HLIST_HEAD(&bat_priv->gw_list);
105 INIT_HLIST_HEAD(&bat_priv->softif_neigh_vids);
106 INIT_LIST_HEAD(&bat_priv->tt_changes_list); 104 INIT_LIST_HEAD(&bat_priv->tt_changes_list);
107 INIT_LIST_HEAD(&bat_priv->tt_req_list); 105 INIT_LIST_HEAD(&bat_priv->tt_req_list);
108 INIT_LIST_HEAD(&bat_priv->tt_roam_list); 106 INIT_LIST_HEAD(&bat_priv->tt_roam_list);
@@ -118,6 +116,9 @@ int mesh_init(struct net_device *soft_iface)
118 if (vis_init(bat_priv) < 1) 116 if (vis_init(bat_priv) < 1)
119 goto err; 117 goto err;
120 118
119 if (bla_init(bat_priv) < 1)
120 goto err;
121
121 atomic_set(&bat_priv->gw_reselect, 0); 122 atomic_set(&bat_priv->gw_reselect, 0);
122 atomic_set(&bat_priv->mesh_state, MESH_ACTIVE); 123 atomic_set(&bat_priv->mesh_state, MESH_ACTIVE);
123 goto end; 124 goto end;
@@ -145,7 +146,7 @@ void mesh_free(struct net_device *soft_iface)
145 146
146 tt_free(bat_priv); 147 tt_free(bat_priv);
147 148
148 softif_neigh_purge(bat_priv); 149 bla_free(bat_priv);
149 150
150 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); 151 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
151} 152}
@@ -207,8 +208,9 @@ int bat_algo_register(struct bat_algo_ops *bat_algo_ops)
207 } 208 }
208 209
209 /* all algorithms must implement all ops (for now) */ 210 /* all algorithms must implement all ops (for now) */
210 if (!bat_algo_ops->bat_ogm_init || 211 if (!bat_algo_ops->bat_iface_enable ||
211 !bat_algo_ops->bat_ogm_init_primary || 212 !bat_algo_ops->bat_iface_disable ||
213 !bat_algo_ops->bat_primary_iface_set ||
212 !bat_algo_ops->bat_ogm_update_mac || 214 !bat_algo_ops->bat_ogm_update_mac ||
213 !bat_algo_ops->bat_ogm_schedule || 215 !bat_algo_ops->bat_ogm_schedule ||
214 !bat_algo_ops->bat_ogm_emit || 216 !bat_algo_ops->bat_ogm_emit ||
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 94fa1c2393a6..d9832acf558d 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -65,7 +65,7 @@
65 65
66#define NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */ 66#define NULL_IFINDEX 0 /* dummy ifindex used to avoid iface checks */
67 67
68#define NUM_WORDS (TQ_LOCAL_WINDOW_SIZE / WORD_BIT_SIZE) 68#define NUM_WORDS BITS_TO_LONGS(TQ_LOCAL_WINDOW_SIZE)
69 69
70#define LOG_BUF_LEN 8192 /* has to be a power of 2 */ 70#define LOG_BUF_LEN 8192 /* has to be a power of 2 */
71 71
@@ -80,8 +80,12 @@
80#define MAX_AGGREGATION_BYTES 512 80#define MAX_AGGREGATION_BYTES 512
81#define MAX_AGGREGATION_MS 100 81#define MAX_AGGREGATION_MS 100
82 82
83#define SOFTIF_NEIGH_TIMEOUT 180000 /* 3 minutes */ 83#define BLA_PERIOD_LENGTH 10000 /* 10 seconds */
84#define BLA_BACKBONE_TIMEOUT (BLA_PERIOD_LENGTH * 3)
85#define BLA_CLAIM_TIMEOUT (BLA_PERIOD_LENGTH * 10)
84 86
87#define DUPLIST_SIZE 16
88#define DUPLIST_TIMEOUT 500 /* 500 ms */
85/* don't reset again within 30 seconds */ 89/* don't reset again within 30 seconds */
86#define RESET_PROTECTION_MS 30000 90#define RESET_PROTECTION_MS 30000
87#define EXPECTED_SEQNO_RANGE 65536 91#define EXPECTED_SEQNO_RANGE 65536
@@ -119,7 +123,8 @@ enum dbg_level {
119 DBG_BATMAN = 1 << 0, 123 DBG_BATMAN = 1 << 0,
120 DBG_ROUTES = 1 << 1, /* route added / changed / deleted */ 124 DBG_ROUTES = 1 << 1, /* route added / changed / deleted */
121 DBG_TT = 1 << 2, /* translation table operations */ 125 DBG_TT = 1 << 2, /* translation table operations */
122 DBG_ALL = 7 126 DBG_BLA = 1 << 3, /* bridge loop avoidance */
127 DBG_ALL = 15
123}; 128};
124 129
125/* Kernel headers */ 130/* Kernel headers */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 43c0a4f1399e..ce4969885894 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -28,6 +28,7 @@
28#include "hard-interface.h" 28#include "hard-interface.h"
29#include "unicast.h" 29#include "unicast.h"
30#include "soft-interface.h" 30#include "soft-interface.h"
31#include "bridge_loop_avoidance.h"
31 32
32static void purge_orig(struct work_struct *work); 33static void purge_orig(struct work_struct *work);
33 34
@@ -375,8 +376,6 @@ static void _purge_orig(struct bat_priv *bat_priv)
375 376
376 gw_node_purge(bat_priv); 377 gw_node_purge(bat_priv);
377 gw_election(bat_priv); 378 gw_election(bat_priv);
378
379 softif_neigh_purge(bat_priv);
380} 379}
381 380
382static void purge_orig(struct work_struct *work) 381static void purge_orig(struct work_struct *work)
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 441f3db1bd91..f54969c61a1e 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -25,7 +25,7 @@
25#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */ 25#define ETH_P_BATMAN 0x4305 /* unofficial/not registered Ethertype */
26 26
27enum bat_packettype { 27enum bat_packettype {
28 BAT_OGM = 0x01, 28 BAT_IV_OGM = 0x01,
29 BAT_ICMP = 0x02, 29 BAT_ICMP = 0x02,
30 BAT_UNICAST = 0x03, 30 BAT_UNICAST = 0x03,
31 BAT_BCAST = 0x04, 31 BAT_BCAST = 0x04,
@@ -38,7 +38,7 @@ enum bat_packettype {
38/* this file is included by batctl which needs these defines */ 38/* this file is included by batctl which needs these defines */
39#define COMPAT_VERSION 14 39#define COMPAT_VERSION 14
40 40
41enum batman_flags { 41enum batman_iv_flags {
42 PRIMARIES_FIRST_HOP = 1 << 4, 42 PRIMARIES_FIRST_HOP = 1 << 4,
43 VIS_SERVER = 1 << 5, 43 VIS_SERVER = 1 << 5,
44 DIRECTLINK = 1 << 6 44 DIRECTLINK = 1 << 6
@@ -90,6 +90,23 @@ enum tt_client_flags {
90 TT_CLIENT_PENDING = 1 << 10 90 TT_CLIENT_PENDING = 1 << 10
91}; 91};
92 92
93/* claim frame types for the bridge loop avoidance */
94enum bla_claimframe {
95 CLAIM_TYPE_ADD = 0x00,
96 CLAIM_TYPE_DEL = 0x01,
97 CLAIM_TYPE_ANNOUNCE = 0x02,
98 CLAIM_TYPE_REQUEST = 0x03
99};
100
101/* the destination hardware field in the ARP frame is used to
102 * transport the claim type and the group id
103 */
104struct bla_claim_dst {
105 uint8_t magic[3]; /* FF:43:05 */
106 uint8_t type; /* bla_claimframe */
107 uint16_t group; /* group id */
108} __packed;
109
93struct batman_header { 110struct batman_header {
94 uint8_t packet_type; 111 uint8_t packet_type;
95 uint8_t version; /* batman version field */ 112 uint8_t version; /* batman version field */
@@ -100,8 +117,8 @@ struct batman_ogm_packet {
100 struct batman_header header; 117 struct batman_header header;
101 uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */ 118 uint8_t flags; /* 0x40: DIRECTLINK flag, 0x20 VIS_SERVER flag... */
102 uint32_t seqno; 119 uint32_t seqno;
103 uint8_t orig[6]; 120 uint8_t orig[ETH_ALEN];
104 uint8_t prev_sender[6]; 121 uint8_t prev_sender[ETH_ALEN];
105 uint8_t gw_flags; /* flags related to gateway class */ 122 uint8_t gw_flags; /* flags related to gateway class */
106 uint8_t tq; 123 uint8_t tq;
107 uint8_t tt_num_changes; 124 uint8_t tt_num_changes;
@@ -109,13 +126,13 @@ struct batman_ogm_packet {
109 uint16_t tt_crc; 126 uint16_t tt_crc;
110} __packed; 127} __packed;
111 128
112#define BATMAN_OGM_LEN sizeof(struct batman_ogm_packet) 129#define BATMAN_OGM_HLEN sizeof(struct batman_ogm_packet)
113 130
114struct icmp_packet { 131struct icmp_packet {
115 struct batman_header header; 132 struct batman_header header;
116 uint8_t msg_type; /* see ICMP message types above */ 133 uint8_t msg_type; /* see ICMP message types above */
117 uint8_t dst[6]; 134 uint8_t dst[ETH_ALEN];
118 uint8_t orig[6]; 135 uint8_t orig[ETH_ALEN];
119 uint16_t seqno; 136 uint16_t seqno;
120 uint8_t uid; 137 uint8_t uid;
121 uint8_t reserved; 138 uint8_t reserved;
@@ -128,8 +145,8 @@ struct icmp_packet {
128struct icmp_packet_rr { 145struct icmp_packet_rr {
129 struct batman_header header; 146 struct batman_header header;
130 uint8_t msg_type; /* see ICMP message types above */ 147 uint8_t msg_type; /* see ICMP message types above */
131 uint8_t dst[6]; 148 uint8_t dst[ETH_ALEN];
132 uint8_t orig[6]; 149 uint8_t orig[ETH_ALEN];
133 uint16_t seqno; 150 uint16_t seqno;
134 uint8_t uid; 151 uint8_t uid;
135 uint8_t rr_cur; 152 uint8_t rr_cur;
@@ -139,16 +156,16 @@ struct icmp_packet_rr {
139struct unicast_packet { 156struct unicast_packet {
140 struct batman_header header; 157 struct batman_header header;
141 uint8_t ttvn; /* destination translation table version number */ 158 uint8_t ttvn; /* destination translation table version number */
142 uint8_t dest[6]; 159 uint8_t dest[ETH_ALEN];
143} __packed; 160} __packed;
144 161
145struct unicast_frag_packet { 162struct unicast_frag_packet {
146 struct batman_header header; 163 struct batman_header header;
147 uint8_t ttvn; /* destination translation table version number */ 164 uint8_t ttvn; /* destination translation table version number */
148 uint8_t dest[6]; 165 uint8_t dest[ETH_ALEN];
149 uint8_t flags; 166 uint8_t flags;
150 uint8_t align; 167 uint8_t align;
151 uint8_t orig[6]; 168 uint8_t orig[ETH_ALEN];
152 uint16_t seqno; 169 uint16_t seqno;
153} __packed; 170} __packed;
154 171
@@ -156,7 +173,7 @@ struct bcast_packet {
156 struct batman_header header; 173 struct batman_header header;
157 uint8_t reserved; 174 uint8_t reserved;
158 uint32_t seqno; 175 uint32_t seqno;
159 uint8_t orig[6]; 176 uint8_t orig[ETH_ALEN];
160} __packed; 177} __packed;
161 178
162struct vis_packet { 179struct vis_packet {
@@ -165,9 +182,9 @@ struct vis_packet {
165 uint32_t seqno; /* sequence number */ 182 uint32_t seqno; /* sequence number */
166 uint8_t entries; /* number of entries behind this struct */ 183 uint8_t entries; /* number of entries behind this struct */
167 uint8_t reserved; 184 uint8_t reserved;
168 uint8_t vis_orig[6]; /* originator that announces its neighbors */ 185 uint8_t vis_orig[ETH_ALEN]; /* originator reporting its neighbors */
169 uint8_t target_orig[6]; /* who should receive this packet */ 186 uint8_t target_orig[ETH_ALEN]; /* who should receive this packet */
170 uint8_t sender_orig[6]; /* who sent or rebroadcasted this packet */ 187 uint8_t sender_orig[ETH_ALEN]; /* who sent or forwarded this packet */
171} __packed; 188} __packed;
172 189
173struct tt_query_packet { 190struct tt_query_packet {
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 7f8e15899417..ff560863bc74 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -29,6 +29,10 @@
29#include "originator.h" 29#include "originator.h"
30#include "vis.h" 30#include "vis.h"
31#include "unicast.h" 31#include "unicast.h"
32#include "bridge_loop_avoidance.h"
33
34static int route_unicast_packet(struct sk_buff *skb,
35 struct hard_iface *recv_if);
32 36
33void slide_own_bcast_window(struct hard_iface *hard_iface) 37void slide_own_bcast_window(struct hard_iface *hard_iface)
34{ 38{
@@ -52,7 +56,7 @@ void slide_own_bcast_window(struct hard_iface *hard_iface)
52 56
53 bit_get_packet(bat_priv, word, 1, 0); 57 bit_get_packet(bat_priv, word, 1, 0);
54 orig_node->bcast_own_sum[hard_iface->if_num] = 58 orig_node->bcast_own_sum[hard_iface->if_num] =
55 bit_packet_count(word); 59 bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
56 spin_unlock_bh(&orig_node->ogm_cnt_lock); 60 spin_unlock_bh(&orig_node->ogm_cnt_lock);
57 } 61 }
58 rcu_read_unlock(); 62 rcu_read_unlock();
@@ -250,7 +254,7 @@ int recv_bat_ogm_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
250 struct ethhdr *ethhdr; 254 struct ethhdr *ethhdr;
251 255
252 /* drop packet if it has not necessary minimum size */ 256 /* drop packet if it has not necessary minimum size */
253 if (unlikely(!pskb_may_pull(skb, BATMAN_OGM_LEN))) 257 if (unlikely(!pskb_may_pull(skb, BATMAN_OGM_HLEN)))
254 return NET_RX_DROP; 258 return NET_RX_DROP;
255 259
256 ethhdr = (struct ethhdr *)skb_mac_header(skb); 260 ethhdr = (struct ethhdr *)skb_mac_header(skb);
@@ -309,7 +313,7 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv,
309 goto out; 313 goto out;
310 314
311 /* create a copy of the skb, if needed, to modify it. */ 315 /* create a copy of the skb, if needed, to modify it. */
312 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 316 if (skb_cow(skb, ETH_HLEN) < 0)
313 goto out; 317 goto out;
314 318
315 icmp_packet = (struct icmp_packet_rr *)skb->data; 319 icmp_packet = (struct icmp_packet_rr *)skb->data;
@@ -364,7 +368,7 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
364 goto out; 368 goto out;
365 369
366 /* create a copy of the skb, if needed, to modify it. */ 370 /* create a copy of the skb, if needed, to modify it. */
367 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 371 if (skb_cow(skb, ETH_HLEN) < 0)
368 goto out; 372 goto out;
369 373
370 icmp_packet = (struct icmp_packet *)skb->data; 374 icmp_packet = (struct icmp_packet *)skb->data;
@@ -450,7 +454,7 @@ int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
450 goto out; 454 goto out;
451 455
452 /* create a copy of the skb, if needed, to modify it. */ 456 /* create a copy of the skb, if needed, to modify it. */
453 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 457 if (skb_cow(skb, ETH_HLEN) < 0)
454 goto out; 458 goto out;
455 459
456 icmp_packet = (struct icmp_packet_rr *)skb->data; 460 icmp_packet = (struct icmp_packet_rr *)skb->data;
@@ -669,6 +673,13 @@ int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if)
669 if (!is_my_mac(roam_adv_packet->dst)) 673 if (!is_my_mac(roam_adv_packet->dst))
670 return route_unicast_packet(skb, recv_if); 674 return route_unicast_packet(skb, recv_if);
671 675
676 /* check if it is a backbone gateway. we don't accept
677 * roaming advertisement from it, as it has the same
678 * entries as we have.
679 */
680 if (bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src))
681 goto out;
682
672 orig_node = orig_hash_find(bat_priv, roam_adv_packet->src); 683 orig_node = orig_hash_find(bat_priv, roam_adv_packet->src);
673 if (!orig_node) 684 if (!orig_node)
674 goto out; 685 goto out;
@@ -798,7 +809,7 @@ static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
798 return 0; 809 return 0;
799} 810}
800 811
801int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) 812static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
802{ 813{
803 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); 814 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
804 struct orig_node *orig_node = NULL; 815 struct orig_node *orig_node = NULL;
@@ -830,7 +841,7 @@ int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
830 goto out; 841 goto out;
831 842
832 /* create a copy of the skb, if needed, to modify it. */ 843 /* create a copy of the skb, if needed, to modify it. */
833 if (skb_cow(skb, sizeof(struct ethhdr)) < 0) 844 if (skb_cow(skb, ETH_HLEN) < 0)
834 goto out; 845 goto out;
835 846
836 unicast_packet = (struct unicast_packet *)skb->data; 847 unicast_packet = (struct unicast_packet *)skb->data;
@@ -1047,8 +1058,8 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1047 spin_lock_bh(&orig_node->bcast_seqno_lock); 1058 spin_lock_bh(&orig_node->bcast_seqno_lock);
1048 1059
1049 /* check whether the packet is a duplicate */ 1060 /* check whether the packet is a duplicate */
1050 if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno, 1061 if (bat_test_bit(orig_node->bcast_bits, orig_node->last_bcast_seqno,
1051 ntohl(bcast_packet->seqno))) 1062 ntohl(bcast_packet->seqno)))
1052 goto spin_unlock; 1063 goto spin_unlock;
1053 1064
1054 seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno; 1065 seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
@@ -1065,9 +1076,19 @@ int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1065 1076
1066 spin_unlock_bh(&orig_node->bcast_seqno_lock); 1077 spin_unlock_bh(&orig_node->bcast_seqno_lock);
1067 1078
1079 /* check whether this has been sent by another originator before */
1080 if (bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size))
1081 goto out;
1082
1068 /* rebroadcast packet */ 1083 /* rebroadcast packet */
1069 add_bcast_packet_to_list(bat_priv, skb, 1); 1084 add_bcast_packet_to_list(bat_priv, skb, 1);
1070 1085
1086 /* don't hand the broadcast up if it is from an originator
1087 * from the same backbone.
1088 */
1089 if (bla_is_backbone_gw(skb, orig_node, hdr_size))
1090 goto out;
1091
1071 /* broadcast for me */ 1092 /* broadcast for me */
1072 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); 1093 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
1073 ret = NET_RX_SUCCESS; 1094 ret = NET_RX_SUCCESS;
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index 92ac100d83da..3d729cb17113 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -25,7 +25,6 @@
25void slide_own_bcast_window(struct hard_iface *hard_iface); 25void slide_own_bcast_window(struct hard_iface *hard_iface);
26void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node, 26void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node,
27 struct neigh_node *neigh_node); 27 struct neigh_node *neigh_node);
28int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
29int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if); 28int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if);
30int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); 29int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if);
31int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if); 30int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if);
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index af7a6741a685..7c66b6121fa6 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -51,7 +51,7 @@ int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
51 } 51 }
52 52
53 /* push to the ethernet header. */ 53 /* push to the ethernet header. */
54 if (my_skb_head_push(skb, sizeof(*ethhdr)) < 0) 54 if (my_skb_head_push(skb, ETH_HLEN) < 0)
55 goto send_skb_err; 55 goto send_skb_err;
56 56
57 skb_reset_mac_header(skb); 57 skb_reset_mac_header(skb);
@@ -87,7 +87,7 @@ static void realloc_packet_buffer(struct hard_iface *hard_iface,
87 /* keep old buffer if kmalloc should fail */ 87 /* keep old buffer if kmalloc should fail */
88 if (new_buff) { 88 if (new_buff) {
89 memcpy(new_buff, hard_iface->packet_buff, 89 memcpy(new_buff, hard_iface->packet_buff,
90 BATMAN_OGM_LEN); 90 BATMAN_OGM_HLEN);
91 91
92 kfree(hard_iface->packet_buff); 92 kfree(hard_iface->packet_buff);
93 hard_iface->packet_buff = new_buff; 93 hard_iface->packet_buff = new_buff;
@@ -101,13 +101,13 @@ static int prepare_packet_buffer(struct bat_priv *bat_priv,
101{ 101{
102 int new_len; 102 int new_len;
103 103
104 new_len = BATMAN_OGM_LEN + 104 new_len = BATMAN_OGM_HLEN +
105 tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes)); 105 tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
106 106
107 /* if we have too many changes for one packet don't send any 107 /* if we have too many changes for one packet don't send any
108 * and wait for the tt table request which will be fragmented */ 108 * and wait for the tt table request which will be fragmented */
109 if (new_len > hard_iface->soft_iface->mtu) 109 if (new_len > hard_iface->soft_iface->mtu)
110 new_len = BATMAN_OGM_LEN; 110 new_len = BATMAN_OGM_HLEN;
111 111
112 realloc_packet_buffer(hard_iface, new_len); 112 realloc_packet_buffer(hard_iface, new_len);
113 113
@@ -117,14 +117,14 @@ static int prepare_packet_buffer(struct bat_priv *bat_priv,
117 atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX); 117 atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
118 118
119 return tt_changes_fill_buffer(bat_priv, 119 return tt_changes_fill_buffer(bat_priv,
120 hard_iface->packet_buff + BATMAN_OGM_LEN, 120 hard_iface->packet_buff + BATMAN_OGM_HLEN,
121 hard_iface->packet_len - BATMAN_OGM_LEN); 121 hard_iface->packet_len - BATMAN_OGM_HLEN);
122} 122}
123 123
124static int reset_packet_buffer(struct bat_priv *bat_priv, 124static int reset_packet_buffer(struct bat_priv *bat_priv,
125 struct hard_iface *hard_iface) 125 struct hard_iface *hard_iface)
126{ 126{
127 realloc_packet_buffer(hard_iface, BATMAN_OGM_LEN); 127 realloc_packet_buffer(hard_iface, BATMAN_OGM_HLEN);
128 return 0; 128 return 0;
129} 129}
130 130
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index a5590f4193f1..6e2530b02043 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -36,6 +36,7 @@
36#include <linux/etherdevice.h> 36#include <linux/etherdevice.h>
37#include <linux/if_vlan.h> 37#include <linux/if_vlan.h>
38#include "unicast.h" 38#include "unicast.h"
39#include "bridge_loop_avoidance.h"
39 40
40 41
41static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); 42static int bat_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
@@ -73,439 +74,6 @@ int my_skb_head_push(struct sk_buff *skb, unsigned int len)
73 return 0; 74 return 0;
74} 75}
75 76
76static void softif_neigh_free_ref(struct softif_neigh *softif_neigh)
77{
78 if (atomic_dec_and_test(&softif_neigh->refcount))
79 kfree_rcu(softif_neigh, rcu);
80}
81
82static void softif_neigh_vid_free_rcu(struct rcu_head *rcu)
83{
84 struct softif_neigh_vid *softif_neigh_vid;
85 struct softif_neigh *softif_neigh;
86 struct hlist_node *node, *node_tmp;
87 struct bat_priv *bat_priv;
88
89 softif_neigh_vid = container_of(rcu, struct softif_neigh_vid, rcu);
90 bat_priv = softif_neigh_vid->bat_priv;
91
92 spin_lock_bh(&bat_priv->softif_neigh_lock);
93 hlist_for_each_entry_safe(softif_neigh, node, node_tmp,
94 &softif_neigh_vid->softif_neigh_list, list) {
95 hlist_del_rcu(&softif_neigh->list);
96 softif_neigh_free_ref(softif_neigh);
97 }
98 spin_unlock_bh(&bat_priv->softif_neigh_lock);
99
100 kfree(softif_neigh_vid);
101}
102
103static void softif_neigh_vid_free_ref(struct softif_neigh_vid *softif_neigh_vid)
104{
105 if (atomic_dec_and_test(&softif_neigh_vid->refcount))
106 call_rcu(&softif_neigh_vid->rcu, softif_neigh_vid_free_rcu);
107}
108
109static struct softif_neigh_vid *softif_neigh_vid_get(struct bat_priv *bat_priv,
110 short vid)
111{
112 struct softif_neigh_vid *softif_neigh_vid;
113 struct hlist_node *node;
114
115 rcu_read_lock();
116 hlist_for_each_entry_rcu(softif_neigh_vid, node,
117 &bat_priv->softif_neigh_vids, list) {
118 if (softif_neigh_vid->vid != vid)
119 continue;
120
121 if (!atomic_inc_not_zero(&softif_neigh_vid->refcount))
122 continue;
123
124 goto out;
125 }
126
127 softif_neigh_vid = kzalloc(sizeof(*softif_neigh_vid), GFP_ATOMIC);
128 if (!softif_neigh_vid)
129 goto out;
130
131 softif_neigh_vid->vid = vid;
132 softif_neigh_vid->bat_priv = bat_priv;
133
134 /* initialize with 2 - caller decrements counter by one */
135 atomic_set(&softif_neigh_vid->refcount, 2);
136 INIT_HLIST_HEAD(&softif_neigh_vid->softif_neigh_list);
137 INIT_HLIST_NODE(&softif_neigh_vid->list);
138 spin_lock_bh(&bat_priv->softif_neigh_vid_lock);
139 hlist_add_head_rcu(&softif_neigh_vid->list,
140 &bat_priv->softif_neigh_vids);
141 spin_unlock_bh(&bat_priv->softif_neigh_vid_lock);
142
143out:
144 rcu_read_unlock();
145 return softif_neigh_vid;
146}
147
148static struct softif_neigh *softif_neigh_get(struct bat_priv *bat_priv,
149 const uint8_t *addr, short vid)
150{
151 struct softif_neigh_vid *softif_neigh_vid;
152 struct softif_neigh *softif_neigh = NULL;
153 struct hlist_node *node;
154
155 softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
156 if (!softif_neigh_vid)
157 goto out;
158
159 rcu_read_lock();
160 hlist_for_each_entry_rcu(softif_neigh, node,
161 &softif_neigh_vid->softif_neigh_list,
162 list) {
163 if (!compare_eth(softif_neigh->addr, addr))
164 continue;
165
166 if (!atomic_inc_not_zero(&softif_neigh->refcount))
167 continue;
168
169 softif_neigh->last_seen = jiffies;
170 goto unlock;
171 }
172
173 softif_neigh = kzalloc(sizeof(*softif_neigh), GFP_ATOMIC);
174 if (!softif_neigh)
175 goto unlock;
176
177 memcpy(softif_neigh->addr, addr, ETH_ALEN);
178 softif_neigh->last_seen = jiffies;
179 /* initialize with 2 - caller decrements counter by one */
180 atomic_set(&softif_neigh->refcount, 2);
181
182 INIT_HLIST_NODE(&softif_neigh->list);
183 spin_lock_bh(&bat_priv->softif_neigh_lock);
184 hlist_add_head_rcu(&softif_neigh->list,
185 &softif_neigh_vid->softif_neigh_list);
186 spin_unlock_bh(&bat_priv->softif_neigh_lock);
187
188unlock:
189 rcu_read_unlock();
190out:
191 if (softif_neigh_vid)
192 softif_neigh_vid_free_ref(softif_neigh_vid);
193 return softif_neigh;
194}
195
196static struct softif_neigh *softif_neigh_get_selected(
197 struct softif_neigh_vid *softif_neigh_vid)
198{
199 struct softif_neigh *softif_neigh;
200
201 rcu_read_lock();
202 softif_neigh = rcu_dereference(softif_neigh_vid->softif_neigh);
203
204 if (softif_neigh && !atomic_inc_not_zero(&softif_neigh->refcount))
205 softif_neigh = NULL;
206
207 rcu_read_unlock();
208 return softif_neigh;
209}
210
211static struct softif_neigh *softif_neigh_vid_get_selected(
212 struct bat_priv *bat_priv,
213 short vid)
214{
215 struct softif_neigh_vid *softif_neigh_vid;
216 struct softif_neigh *softif_neigh = NULL;
217
218 softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
219 if (!softif_neigh_vid)
220 goto out;
221
222 softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
223out:
224 if (softif_neigh_vid)
225 softif_neigh_vid_free_ref(softif_neigh_vid);
226 return softif_neigh;
227}
228
229static void softif_neigh_vid_select(struct bat_priv *bat_priv,
230 struct softif_neigh *new_neigh,
231 short vid)
232{
233 struct softif_neigh_vid *softif_neigh_vid;
234 struct softif_neigh *curr_neigh;
235
236 softif_neigh_vid = softif_neigh_vid_get(bat_priv, vid);
237 if (!softif_neigh_vid)
238 goto out;
239
240 spin_lock_bh(&bat_priv->softif_neigh_lock);
241
242 if (new_neigh && !atomic_inc_not_zero(&new_neigh->refcount))
243 new_neigh = NULL;
244
245 curr_neigh = rcu_dereference_protected(softif_neigh_vid->softif_neigh,
246 1);
247 rcu_assign_pointer(softif_neigh_vid->softif_neigh, new_neigh);
248
249 if ((curr_neigh) && (!new_neigh))
250 bat_dbg(DBG_ROUTES, bat_priv,
251 "Removing mesh exit point on vid: %d (prev: %pM).\n",
252 vid, curr_neigh->addr);
253 else if ((curr_neigh) && (new_neigh))
254 bat_dbg(DBG_ROUTES, bat_priv,
255 "Changing mesh exit point on vid: %d from %pM to %pM.\n",
256 vid, curr_neigh->addr, new_neigh->addr);
257 else if ((!curr_neigh) && (new_neigh))
258 bat_dbg(DBG_ROUTES, bat_priv,
259 "Setting mesh exit point on vid: %d to %pM.\n",
260 vid, new_neigh->addr);
261
262 if (curr_neigh)
263 softif_neigh_free_ref(curr_neigh);
264
265 spin_unlock_bh(&bat_priv->softif_neigh_lock);
266
267out:
268 if (softif_neigh_vid)
269 softif_neigh_vid_free_ref(softif_neigh_vid);
270}
271
272static void softif_neigh_vid_deselect(struct bat_priv *bat_priv,
273 struct softif_neigh_vid *softif_neigh_vid)
274{
275 struct softif_neigh *curr_neigh;
276 struct softif_neigh *softif_neigh = NULL, *softif_neigh_tmp;
277 struct hard_iface *primary_if = NULL;
278 struct hlist_node *node;
279
280 primary_if = primary_if_get_selected(bat_priv);
281 if (!primary_if)
282 goto out;
283
284 /* find new softif_neigh immediately to avoid temporary loops */
285 rcu_read_lock();
286 curr_neigh = rcu_dereference(softif_neigh_vid->softif_neigh);
287
288 hlist_for_each_entry_rcu(softif_neigh_tmp, node,
289 &softif_neigh_vid->softif_neigh_list,
290 list) {
291 if (softif_neigh_tmp == curr_neigh)
292 continue;
293
294 /* we got a neighbor but its mac is 'bigger' than ours */
295 if (memcmp(primary_if->net_dev->dev_addr,
296 softif_neigh_tmp->addr, ETH_ALEN) < 0)
297 continue;
298
299 if (!atomic_inc_not_zero(&softif_neigh_tmp->refcount))
300 continue;
301
302 softif_neigh = softif_neigh_tmp;
303 goto unlock;
304 }
305
306unlock:
307 rcu_read_unlock();
308out:
309 softif_neigh_vid_select(bat_priv, softif_neigh, softif_neigh_vid->vid);
310
311 if (primary_if)
312 hardif_free_ref(primary_if);
313 if (softif_neigh)
314 softif_neigh_free_ref(softif_neigh);
315}
316
317int softif_neigh_seq_print_text(struct seq_file *seq, void *offset)
318{
319 struct net_device *net_dev = (struct net_device *)seq->private;
320 struct bat_priv *bat_priv = netdev_priv(net_dev);
321 struct softif_neigh_vid *softif_neigh_vid;
322 struct softif_neigh *softif_neigh;
323 struct hard_iface *primary_if;
324 struct hlist_node *node, *node_tmp;
325 struct softif_neigh *curr_softif_neigh;
326 int ret = 0, last_seen_secs, last_seen_msecs;
327
328 primary_if = primary_if_get_selected(bat_priv);
329 if (!primary_if) {
330 ret = seq_printf(seq,
331 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
332 net_dev->name);
333 goto out;
334 }
335
336 if (primary_if->if_status != IF_ACTIVE) {
337 ret = seq_printf(seq,
338 "BATMAN mesh %s disabled - primary interface not active\n",
339 net_dev->name);
340 goto out;
341 }
342
343 seq_printf(seq, "Softif neighbor list (%s)\n", net_dev->name);
344
345 rcu_read_lock();
346 hlist_for_each_entry_rcu(softif_neigh_vid, node,
347 &bat_priv->softif_neigh_vids, list) {
348 seq_printf(seq, " %-15s %s on vid: %d\n",
349 "Originator", "last-seen", softif_neigh_vid->vid);
350
351 curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
352
353 hlist_for_each_entry_rcu(softif_neigh, node_tmp,
354 &softif_neigh_vid->softif_neigh_list,
355 list) {
356 last_seen_secs = jiffies_to_msecs(jiffies -
357 softif_neigh->last_seen) / 1000;
358 last_seen_msecs = jiffies_to_msecs(jiffies -
359 softif_neigh->last_seen) % 1000;
360 seq_printf(seq, "%s %pM %3i.%03is\n",
361 curr_softif_neigh == softif_neigh
362 ? "=>" : " ", softif_neigh->addr,
363 last_seen_secs, last_seen_msecs);
364 }
365
366 if (curr_softif_neigh)
367 softif_neigh_free_ref(curr_softif_neigh);
368
369 seq_printf(seq, "\n");
370 }
371 rcu_read_unlock();
372
373out:
374 if (primary_if)
375 hardif_free_ref(primary_if);
376 return ret;
377}
378
379void softif_neigh_purge(struct bat_priv *bat_priv)
380{
381 struct softif_neigh *softif_neigh, *curr_softif_neigh;
382 struct softif_neigh_vid *softif_neigh_vid;
383 struct hlist_node *node, *node_tmp, *node_tmp2;
384 int do_deselect;
385
386 rcu_read_lock();
387 hlist_for_each_entry_rcu(softif_neigh_vid, node,
388 &bat_priv->softif_neigh_vids, list) {
389 if (!atomic_inc_not_zero(&softif_neigh_vid->refcount))
390 continue;
391
392 curr_softif_neigh = softif_neigh_get_selected(softif_neigh_vid);
393 do_deselect = 0;
394
395 spin_lock_bh(&bat_priv->softif_neigh_lock);
396 hlist_for_each_entry_safe(softif_neigh, node_tmp, node_tmp2,
397 &softif_neigh_vid->softif_neigh_list,
398 list) {
399 if ((!has_timed_out(softif_neigh->last_seen,
400 SOFTIF_NEIGH_TIMEOUT)) &&
401 (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE))
402 continue;
403
404 if (curr_softif_neigh == softif_neigh) {
405 bat_dbg(DBG_ROUTES, bat_priv,
406 "Current mesh exit point on vid: %d '%pM' vanished.\n",
407 softif_neigh_vid->vid,
408 softif_neigh->addr);
409 do_deselect = 1;
410 }
411
412 hlist_del_rcu(&softif_neigh->list);
413 softif_neigh_free_ref(softif_neigh);
414 }
415 spin_unlock_bh(&bat_priv->softif_neigh_lock);
416
417 /* soft_neigh_vid_deselect() needs to acquire the
418 * softif_neigh_lock */
419 if (do_deselect)
420 softif_neigh_vid_deselect(bat_priv, softif_neigh_vid);
421
422 if (curr_softif_neigh)
423 softif_neigh_free_ref(curr_softif_neigh);
424
425 softif_neigh_vid_free_ref(softif_neigh_vid);
426 }
427 rcu_read_unlock();
428
429 spin_lock_bh(&bat_priv->softif_neigh_vid_lock);
430 hlist_for_each_entry_safe(softif_neigh_vid, node, node_tmp,
431 &bat_priv->softif_neigh_vids, list) {
432 if (!hlist_empty(&softif_neigh_vid->softif_neigh_list))
433 continue;
434
435 hlist_del_rcu(&softif_neigh_vid->list);
436 softif_neigh_vid_free_ref(softif_neigh_vid);
437 }
438 spin_unlock_bh(&bat_priv->softif_neigh_vid_lock);
439
440}
441
442static void softif_batman_recv(struct sk_buff *skb, struct net_device *dev,
443 short vid)
444{
445 struct bat_priv *bat_priv = netdev_priv(dev);
446 struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
447 struct batman_ogm_packet *batman_ogm_packet;
448 struct softif_neigh *softif_neigh = NULL;
449 struct hard_iface *primary_if = NULL;
450 struct softif_neigh *curr_softif_neigh = NULL;
451
452 if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
453 batman_ogm_packet = (struct batman_ogm_packet *)
454 (skb->data + ETH_HLEN + VLAN_HLEN);
455 else
456 batman_ogm_packet = (struct batman_ogm_packet *)
457 (skb->data + ETH_HLEN);
458
459 if (batman_ogm_packet->header.version != COMPAT_VERSION)
460 goto out;
461
462 if (batman_ogm_packet->header.packet_type != BAT_OGM)
463 goto out;
464
465 if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
466 goto out;
467
468 if (is_my_mac(batman_ogm_packet->orig))
469 goto out;
470
471 softif_neigh = softif_neigh_get(bat_priv, batman_ogm_packet->orig, vid);
472 if (!softif_neigh)
473 goto out;
474
475 curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
476 if (curr_softif_neigh == softif_neigh)
477 goto out;
478
479 primary_if = primary_if_get_selected(bat_priv);
480 if (!primary_if)
481 goto out;
482
483 /* we got a neighbor but its mac is 'bigger' than ours */
484 if (memcmp(primary_if->net_dev->dev_addr,
485 softif_neigh->addr, ETH_ALEN) < 0)
486 goto out;
487
488 /* close own batX device and use softif_neigh as exit node */
489 if (!curr_softif_neigh) {
490 softif_neigh_vid_select(bat_priv, softif_neigh, vid);
491 goto out;
492 }
493
494 /* switch to new 'smallest neighbor' */
495 if (memcmp(softif_neigh->addr, curr_softif_neigh->addr, ETH_ALEN) < 0)
496 softif_neigh_vid_select(bat_priv, softif_neigh, vid);
497
498out:
499 kfree_skb(skb);
500 if (softif_neigh)
501 softif_neigh_free_ref(softif_neigh);
502 if (curr_softif_neigh)
503 softif_neigh_free_ref(curr_softif_neigh);
504 if (primary_if)
505 hardif_free_ref(primary_if);
506 return;
507}
508
509static int interface_open(struct net_device *dev) 77static int interface_open(struct net_device *dev)
510{ 78{
511 netif_start_queue(dev); 79 netif_start_queue(dev);
@@ -562,10 +130,11 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
562 struct hard_iface *primary_if = NULL; 130 struct hard_iface *primary_if = NULL;
563 struct bcast_packet *bcast_packet; 131 struct bcast_packet *bcast_packet;
564 struct vlan_ethhdr *vhdr; 132 struct vlan_ethhdr *vhdr;
565 struct softif_neigh *curr_softif_neigh = NULL; 133 static const uint8_t stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00,
134 0x00};
566 unsigned int header_len = 0; 135 unsigned int header_len = 0;
567 int data_len = skb->len, ret; 136 int data_len = skb->len, ret;
568 short vid = -1; 137 short vid __maybe_unused = -1;
569 bool do_bcast = false; 138 bool do_bcast = false;
570 139
571 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) 140 if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE)
@@ -583,21 +152,21 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface)
583 152
584 /* fall through */ 153 /* fall through */
585 case ETH_P_BATMAN: 154 case ETH_P_BATMAN:
586 softif_batman_recv(skb, soft_iface, vid); 155 goto dropped;
587 goto end;
588 } 156 }
589 157
590 /** 158 if (bla_tx(bat_priv, skb, vid))
591 * if we have a another chosen mesh exit node in range
592 * it will transport the packets to the mesh
593 */
594 curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
595 if (curr_softif_neigh)
596 goto dropped; 159 goto dropped;
597 160
598 /* Register the client MAC in the transtable */ 161 /* Register the client MAC in the transtable */
599 tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); 162 tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
600 163
164 /* don't accept stp packets. STP does not help in meshes.
165 * better use the bridge loop avoidance ...
166 */
167 if (compare_eth(ethhdr->h_dest, stp_addr))
168 goto dropped;
169
601 if (is_multicast_ether_addr(ethhdr->h_dest)) { 170 if (is_multicast_ether_addr(ethhdr->h_dest)) {
602 do_bcast = true; 171 do_bcast = true;
603 172
@@ -675,8 +244,6 @@ dropped:
675dropped_freed: 244dropped_freed:
676 bat_priv->stats.tx_dropped++; 245 bat_priv->stats.tx_dropped++;
677end: 246end:
678 if (curr_softif_neigh)
679 softif_neigh_free_ref(curr_softif_neigh);
680 if (primary_if) 247 if (primary_if)
681 hardif_free_ref(primary_if); 248 hardif_free_ref(primary_if);
682 return NETDEV_TX_OK; 249 return NETDEV_TX_OK;
@@ -687,12 +254,9 @@ void interface_rx(struct net_device *soft_iface,
687 int hdr_size) 254 int hdr_size)
688{ 255{
689 struct bat_priv *bat_priv = netdev_priv(soft_iface); 256 struct bat_priv *bat_priv = netdev_priv(soft_iface);
690 struct unicast_packet *unicast_packet;
691 struct ethhdr *ethhdr; 257 struct ethhdr *ethhdr;
692 struct vlan_ethhdr *vhdr; 258 struct vlan_ethhdr *vhdr;
693 struct softif_neigh *curr_softif_neigh = NULL; 259 short vid __maybe_unused = -1;
694 short vid = -1;
695 int ret;
696 260
697 /* check if enough space is available for pulling, and pull */ 261 /* check if enough space is available for pulling, and pull */
698 if (!pskb_may_pull(skb, hdr_size)) 262 if (!pskb_may_pull(skb, hdr_size))
@@ -716,30 +280,6 @@ void interface_rx(struct net_device *soft_iface,
716 goto dropped; 280 goto dropped;
717 } 281 }
718 282
719 /**
720 * if we have a another chosen mesh exit node in range
721 * it will transport the packets to the non-mesh network
722 */
723 curr_softif_neigh = softif_neigh_vid_get_selected(bat_priv, vid);
724 if (curr_softif_neigh) {
725 skb_push(skb, hdr_size);
726 unicast_packet = (struct unicast_packet *)skb->data;
727
728 if ((unicast_packet->header.packet_type != BAT_UNICAST) &&
729 (unicast_packet->header.packet_type != BAT_UNICAST_FRAG))
730 goto dropped;
731
732 skb_reset_mac_header(skb);
733
734 memcpy(unicast_packet->dest,
735 curr_softif_neigh->addr, ETH_ALEN);
736 ret = route_unicast_packet(skb, recv_if);
737 if (ret == NET_RX_DROP)
738 goto dropped;
739
740 goto out;
741 }
742
743 /* skb->dev & skb->pkt_type are set here */ 283 /* skb->dev & skb->pkt_type are set here */
744 if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) 284 if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
745 goto dropped; 285 goto dropped;
@@ -752,21 +292,25 @@ void interface_rx(struct net_device *soft_iface,
752/* skb->ip_summed = CHECKSUM_UNNECESSARY;*/ 292/* skb->ip_summed = CHECKSUM_UNNECESSARY;*/
753 293
754 bat_priv->stats.rx_packets++; 294 bat_priv->stats.rx_packets++;
755 bat_priv->stats.rx_bytes += skb->len + sizeof(struct ethhdr); 295 bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
756 296
757 soft_iface->last_rx = jiffies; 297 soft_iface->last_rx = jiffies;
758 298
759 if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest)) 299 if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest))
760 goto dropped; 300 goto dropped;
761 301
302 /* Let the bridge loop avoidance check the packet. If will
303 * not handle it, we can safely push it up.
304 */
305 if (bla_rx(bat_priv, skb, vid))
306 goto out;
307
762 netif_rx(skb); 308 netif_rx(skb);
763 goto out; 309 goto out;
764 310
765dropped: 311dropped:
766 kfree_skb(skb); 312 kfree_skb(skb);
767out: 313out:
768 if (curr_softif_neigh)
769 softif_neigh_free_ref(curr_softif_neigh);
770 return; 314 return;
771} 315}
772 316
@@ -828,13 +372,14 @@ struct net_device *softif_create(const char *name)
828 372
829 atomic_set(&bat_priv->aggregated_ogms, 1); 373 atomic_set(&bat_priv->aggregated_ogms, 1);
830 atomic_set(&bat_priv->bonding, 0); 374 atomic_set(&bat_priv->bonding, 0);
375 atomic_set(&bat_priv->bridge_loop_avoidance, 0);
831 atomic_set(&bat_priv->ap_isolation, 0); 376 atomic_set(&bat_priv->ap_isolation, 0);
832 atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE); 377 atomic_set(&bat_priv->vis_mode, VIS_TYPE_CLIENT_UPDATE);
833 atomic_set(&bat_priv->gw_mode, GW_MODE_OFF); 378 atomic_set(&bat_priv->gw_mode, GW_MODE_OFF);
834 atomic_set(&bat_priv->gw_sel_class, 20); 379 atomic_set(&bat_priv->gw_sel_class, 20);
835 atomic_set(&bat_priv->gw_bandwidth, 41); 380 atomic_set(&bat_priv->gw_bandwidth, 41);
836 atomic_set(&bat_priv->orig_interval, 1000); 381 atomic_set(&bat_priv->orig_interval, 1000);
837 atomic_set(&bat_priv->hop_penalty, 10); 382 atomic_set(&bat_priv->hop_penalty, 30);
838 atomic_set(&bat_priv->log_level, 0); 383 atomic_set(&bat_priv->log_level, 0);
839 atomic_set(&bat_priv->fragmentation, 1); 384 atomic_set(&bat_priv->fragmentation, 1);
840 atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN); 385 atomic_set(&bat_priv->bcast_queue_left, BCAST_QUEUE_LEN);
@@ -845,6 +390,7 @@ struct net_device *softif_create(const char *name)
845 atomic_set(&bat_priv->ttvn, 0); 390 atomic_set(&bat_priv->ttvn, 0);
846 atomic_set(&bat_priv->tt_local_changes, 0); 391 atomic_set(&bat_priv->tt_local_changes, 0);
847 atomic_set(&bat_priv->tt_ogm_append_cnt, 0); 392 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
393 atomic_set(&bat_priv->bla_num_requests, 0);
848 394
849 bat_priv->tt_buff = NULL; 395 bat_priv->tt_buff = NULL;
850 bat_priv->tt_buff_len = 0; 396 bat_priv->tt_buff_len = 0;
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 756eab5b8dd4..020300673884 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -23,8 +23,6 @@
23#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_ 23#define _NET_BATMAN_ADV_SOFT_INTERFACE_H_
24 24
25int my_skb_head_push(struct sk_buff *skb, unsigned int len); 25int my_skb_head_push(struct sk_buff *skb, unsigned int len);
26int softif_neigh_seq_print_text(struct seq_file *seq, void *offset);
27void softif_neigh_purge(struct bat_priv *bat_priv);
28void interface_rx(struct net_device *soft_iface, 26void interface_rx(struct net_device *soft_iface,
29 struct sk_buff *skb, struct hard_iface *recv_if, 27 struct sk_buff *skb, struct hard_iface *recv_if,
30 int hdr_size); 28 int hdr_size);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 1f8692127840..a38d315d3cd6 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -27,13 +27,14 @@
27#include "hash.h" 27#include "hash.h"
28#include "originator.h" 28#include "originator.h"
29#include "routing.h" 29#include "routing.h"
30#include "bridge_loop_avoidance.h"
30 31
31#include <linux/crc16.h> 32#include <linux/crc16.h>
32 33
33static void _tt_global_del(struct bat_priv *bat_priv, 34static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
34 struct tt_global_entry *tt_global_entry, 35 struct orig_node *orig_node);
35 const char *message);
36static void tt_purge(struct work_struct *work); 36static void tt_purge(struct work_struct *work);
37static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry);
37 38
38/* returns 1 if they are the same mac addr */ 39/* returns 1 if they are the same mac addr */
39static int compare_tt(const struct hlist_node *node, const void *data2) 40static int compare_tt(const struct hlist_node *node, const void *data2)
@@ -123,17 +124,31 @@ static void tt_global_entry_free_rcu(struct rcu_head *rcu)
123 tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, 124 tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
124 common); 125 common);
125 126
126 if (tt_global_entry->orig_node)
127 orig_node_free_ref(tt_global_entry->orig_node);
128
129 kfree(tt_global_entry); 127 kfree(tt_global_entry);
130} 128}
131 129
132static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry) 130static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
133{ 131{
134 if (atomic_dec_and_test(&tt_global_entry->common.refcount)) 132 if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
133 tt_global_del_orig_list(tt_global_entry);
135 call_rcu(&tt_global_entry->common.rcu, 134 call_rcu(&tt_global_entry->common.rcu,
136 tt_global_entry_free_rcu); 135 tt_global_entry_free_rcu);
136 }
137}
138
139static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
140{
141 struct tt_orig_list_entry *orig_entry;
142
143 orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu);
144 atomic_dec(&orig_entry->orig_node->tt_size);
145 orig_node_free_ref(orig_entry->orig_node);
146 kfree(orig_entry);
147}
148
149static void tt_orig_list_entry_free_ref(struct tt_orig_list_entry *orig_entry)
150{
151 call_rcu(&orig_entry->rcu, tt_orig_list_entry_free_rcu);
137} 152}
138 153
139static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr, 154static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
@@ -182,6 +197,9 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
182 struct bat_priv *bat_priv = netdev_priv(soft_iface); 197 struct bat_priv *bat_priv = netdev_priv(soft_iface);
183 struct tt_local_entry *tt_local_entry = NULL; 198 struct tt_local_entry *tt_local_entry = NULL;
184 struct tt_global_entry *tt_global_entry = NULL; 199 struct tt_global_entry *tt_global_entry = NULL;
200 struct hlist_head *head;
201 struct hlist_node *node;
202 struct tt_orig_list_entry *orig_entry;
185 int hash_added; 203 int hash_added;
186 204
187 tt_local_entry = tt_local_hash_find(bat_priv, addr); 205 tt_local_entry = tt_local_hash_find(bat_priv, addr);
@@ -232,14 +250,21 @@ void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
232 250
233 /* Check whether it is a roaming! */ 251 /* Check whether it is a roaming! */
234 if (tt_global_entry) { 252 if (tt_global_entry) {
235 /* This node is probably going to update its tt table */ 253 /* These node are probably going to update their tt table */
236 tt_global_entry->orig_node->tt_poss_change = true; 254 head = &tt_global_entry->orig_list;
237 /* The global entry has to be marked as ROAMING and has to be 255 rcu_read_lock();
238 * kept for consistency purpose */ 256 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
257 orig_entry->orig_node->tt_poss_change = true;
258
259 send_roam_adv(bat_priv, tt_global_entry->common.addr,
260 orig_entry->orig_node);
261 }
262 rcu_read_unlock();
263 /* The global entry has to be marked as ROAMING and
264 * has to be kept for consistency purpose
265 */
239 tt_global_entry->common.flags |= TT_CLIENT_ROAM; 266 tt_global_entry->common.flags |= TT_CLIENT_ROAM;
240 tt_global_entry->roam_at = jiffies; 267 tt_global_entry->roam_at = jiffies;
241 send_roam_adv(bat_priv, tt_global_entry->common.addr,
242 tt_global_entry->orig_node);
243 } 268 }
244out: 269out:
245 if (tt_local_entry) 270 if (tt_local_entry)
@@ -490,33 +515,76 @@ static void tt_changes_list_free(struct bat_priv *bat_priv)
490 spin_unlock_bh(&bat_priv->tt_changes_list_lock); 515 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
491} 516}
492 517
518/* find out if an orig_node is already in the list of a tt_global_entry.
519 * returns 1 if found, 0 otherwise
520 */
521static bool tt_global_entry_has_orig(const struct tt_global_entry *entry,
522 const struct orig_node *orig_node)
523{
524 struct tt_orig_list_entry *tmp_orig_entry;
525 const struct hlist_head *head;
526 struct hlist_node *node;
527 bool found = false;
528
529 rcu_read_lock();
530 head = &entry->orig_list;
531 hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
532 if (tmp_orig_entry->orig_node == orig_node) {
533 found = true;
534 break;
535 }
536 }
537 rcu_read_unlock();
538 return found;
539}
540
541static void tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry,
542 struct orig_node *orig_node,
543 int ttvn)
544{
545 struct tt_orig_list_entry *orig_entry;
546
547 orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
548 if (!orig_entry)
549 return;
550
551 INIT_HLIST_NODE(&orig_entry->list);
552 atomic_inc(&orig_node->refcount);
553 atomic_inc(&orig_node->tt_size);
554 orig_entry->orig_node = orig_node;
555 orig_entry->ttvn = ttvn;
556
557 spin_lock_bh(&tt_global_entry->list_lock);
558 hlist_add_head_rcu(&orig_entry->list,
559 &tt_global_entry->orig_list);
560 spin_unlock_bh(&tt_global_entry->list_lock);
561}
562
493/* caller must hold orig_node refcount */ 563/* caller must hold orig_node refcount */
494int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, 564int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
495 const unsigned char *tt_addr, uint8_t ttvn, bool roaming, 565 const unsigned char *tt_addr, uint8_t ttvn, bool roaming,
496 bool wifi) 566 bool wifi)
497{ 567{
498 struct tt_global_entry *tt_global_entry; 568 struct tt_global_entry *tt_global_entry = NULL;
499 struct orig_node *orig_node_tmp;
500 int ret = 0; 569 int ret = 0;
501 int hash_added; 570 int hash_added;
502 571
503 tt_global_entry = tt_global_hash_find(bat_priv, tt_addr); 572 tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
504 573
505 if (!tt_global_entry) { 574 if (!tt_global_entry) {
506 tt_global_entry = 575 tt_global_entry = kzalloc(sizeof(*tt_global_entry),
507 kmalloc(sizeof(*tt_global_entry), 576 GFP_ATOMIC);
508 GFP_ATOMIC);
509 if (!tt_global_entry) 577 if (!tt_global_entry)
510 goto out; 578 goto out;
511 579
512 memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN); 580 memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN);
581
513 tt_global_entry->common.flags = NO_FLAGS; 582 tt_global_entry->common.flags = NO_FLAGS;
514 atomic_set(&tt_global_entry->common.refcount, 2);
515 /* Assign the new orig_node */
516 atomic_inc(&orig_node->refcount);
517 tt_global_entry->orig_node = orig_node;
518 tt_global_entry->ttvn = ttvn;
519 tt_global_entry->roam_at = 0; 583 tt_global_entry->roam_at = 0;
584 atomic_set(&tt_global_entry->common.refcount, 2);
585
586 INIT_HLIST_HEAD(&tt_global_entry->orig_list);
587 spin_lock_init(&tt_global_entry->list_lock);
520 588
521 hash_added = hash_add(bat_priv->tt_global_hash, compare_tt, 589 hash_added = hash_add(bat_priv->tt_global_hash, compare_tt,
522 choose_orig, &tt_global_entry->common, 590 choose_orig, &tt_global_entry->common,
@@ -527,19 +595,27 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
527 tt_global_entry_free_ref(tt_global_entry); 595 tt_global_entry_free_ref(tt_global_entry);
528 goto out_remove; 596 goto out_remove;
529 } 597 }
530 atomic_inc(&orig_node->tt_size); 598
599 tt_global_add_orig_entry(tt_global_entry, orig_node, ttvn);
531 } else { 600 } else {
532 if (tt_global_entry->orig_node != orig_node) { 601 /* there is already a global entry, use this one. */
533 atomic_dec(&tt_global_entry->orig_node->tt_size); 602
534 orig_node_tmp = tt_global_entry->orig_node; 603 /* If there is the TT_CLIENT_ROAM flag set, there is only one
535 atomic_inc(&orig_node->refcount); 604 * originator left in the list and we previously received a
536 tt_global_entry->orig_node = orig_node; 605 * delete + roaming change for this originator.
537 orig_node_free_ref(orig_node_tmp); 606 *
538 atomic_inc(&orig_node->tt_size); 607 * We should first delete the old originator before adding the
608 * new one.
609 */
610 if (tt_global_entry->common.flags & TT_CLIENT_ROAM) {
611 tt_global_del_orig_list(tt_global_entry);
612 tt_global_entry->common.flags &= ~TT_CLIENT_ROAM;
613 tt_global_entry->roam_at = 0;
539 } 614 }
540 tt_global_entry->common.flags = NO_FLAGS; 615
541 tt_global_entry->ttvn = ttvn; 616 if (!tt_global_entry_has_orig(tt_global_entry, orig_node))
542 tt_global_entry->roam_at = 0; 617 tt_global_add_orig_entry(tt_global_entry, orig_node,
618 ttvn);
543 } 619 }
544 620
545 if (wifi) 621 if (wifi)
@@ -560,6 +636,34 @@ out:
560 return ret; 636 return ret;
561} 637}
562 638
639/* print all orig nodes who announce the address for this global entry.
640 * it is assumed that the caller holds rcu_read_lock();
641 */
642static void tt_global_print_entry(struct tt_global_entry *tt_global_entry,
643 struct seq_file *seq)
644{
645 struct hlist_head *head;
646 struct hlist_node *node;
647 struct tt_orig_list_entry *orig_entry;
648 struct tt_common_entry *tt_common_entry;
649 uint16_t flags;
650 uint8_t last_ttvn;
651
652 tt_common_entry = &tt_global_entry->common;
653
654 head = &tt_global_entry->orig_list;
655
656 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
657 flags = tt_common_entry->flags;
658 last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
659 seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c]\n",
660 tt_global_entry->common.addr, orig_entry->ttvn,
661 orig_entry->orig_node->orig, last_ttvn,
662 (flags & TT_CLIENT_ROAM ? 'R' : '.'),
663 (flags & TT_CLIENT_WIFI ? 'W' : '.'));
664 }
665}
666
563int tt_global_seq_print_text(struct seq_file *seq, void *offset) 667int tt_global_seq_print_text(struct seq_file *seq, void *offset)
564{ 668{
565 struct net_device *net_dev = (struct net_device *)seq->private; 669 struct net_device *net_dev = (struct net_device *)seq->private;
@@ -603,18 +707,7 @@ int tt_global_seq_print_text(struct seq_file *seq, void *offset)
603 tt_global_entry = container_of(tt_common_entry, 707 tt_global_entry = container_of(tt_common_entry,
604 struct tt_global_entry, 708 struct tt_global_entry,
605 common); 709 common);
606 seq_printf(seq, 710 tt_global_print_entry(tt_global_entry, seq);
607 " * %pM (%3u) via %pM (%3u) [%c%c]\n",
608 tt_global_entry->common.addr,
609 tt_global_entry->ttvn,
610 tt_global_entry->orig_node->orig,
611 (uint8_t) atomic_read(
612 &tt_global_entry->orig_node->
613 last_ttvn),
614 (tt_global_entry->common.flags &
615 TT_CLIENT_ROAM ? 'R' : '.'),
616 (tt_global_entry->common.flags &
617 TT_CLIENT_WIFI ? 'W' : '.'));
618 } 711 }
619 rcu_read_unlock(); 712 rcu_read_unlock();
620 } 713 }
@@ -624,59 +717,150 @@ out:
624 return ret; 717 return ret;
625} 718}
626 719
627static void _tt_global_del(struct bat_priv *bat_priv, 720/* deletes the orig list of a tt_global_entry */
628 struct tt_global_entry *tt_global_entry, 721static void tt_global_del_orig_list(struct tt_global_entry *tt_global_entry)
629 const char *message)
630{ 722{
631 if (!tt_global_entry) 723 struct hlist_head *head;
632 goto out; 724 struct hlist_node *node, *safe;
725 struct tt_orig_list_entry *orig_entry;
633 726
634 bat_dbg(DBG_TT, bat_priv, 727 spin_lock_bh(&tt_global_entry->list_lock);
635 "Deleting global tt entry %pM (via %pM): %s\n", 728 head = &tt_global_entry->orig_list;
636 tt_global_entry->common.addr, tt_global_entry->orig_node->orig, 729 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
637 message); 730 hlist_del_rcu(node);
731 tt_orig_list_entry_free_ref(orig_entry);
732 }
733 spin_unlock_bh(&tt_global_entry->list_lock);
638 734
639 atomic_dec(&tt_global_entry->orig_node->tt_size); 735}
736
737static void tt_global_del_orig_entry(struct bat_priv *bat_priv,
738 struct tt_global_entry *tt_global_entry,
739 struct orig_node *orig_node,
740 const char *message)
741{
742 struct hlist_head *head;
743 struct hlist_node *node, *safe;
744 struct tt_orig_list_entry *orig_entry;
745
746 spin_lock_bh(&tt_global_entry->list_lock);
747 head = &tt_global_entry->orig_list;
748 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
749 if (orig_entry->orig_node == orig_node) {
750 bat_dbg(DBG_TT, bat_priv,
751 "Deleting %pM from global tt entry %pM: %s\n",
752 orig_node->orig, tt_global_entry->common.addr,
753 message);
754 hlist_del_rcu(node);
755 tt_orig_list_entry_free_ref(orig_entry);
756 }
757 }
758 spin_unlock_bh(&tt_global_entry->list_lock);
759}
760
761static void tt_global_del_struct(struct bat_priv *bat_priv,
762 struct tt_global_entry *tt_global_entry,
763 const char *message)
764{
765 bat_dbg(DBG_TT, bat_priv,
766 "Deleting global tt entry %pM: %s\n",
767 tt_global_entry->common.addr, message);
640 768
641 hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig, 769 hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig,
642 tt_global_entry->common.addr); 770 tt_global_entry->common.addr);
643out: 771 tt_global_entry_free_ref(tt_global_entry);
644 if (tt_global_entry) 772
645 tt_global_entry_free_ref(tt_global_entry);
646} 773}
647 774
648void tt_global_del(struct bat_priv *bat_priv, 775/* If the client is to be deleted, we check if it is the last origantor entry
649 struct orig_node *orig_node, const unsigned char *addr, 776 * within tt_global entry. If yes, we set the TT_CLIENT_ROAM flag and the timer,
650 const char *message, bool roaming) 777 * otherwise we simply remove the originator scheduled for deletion.
778 */
779static void tt_global_del_roaming(struct bat_priv *bat_priv,
780 struct tt_global_entry *tt_global_entry,
781 struct orig_node *orig_node,
782 const char *message)
783{
784 bool last_entry = true;
785 struct hlist_head *head;
786 struct hlist_node *node;
787 struct tt_orig_list_entry *orig_entry;
788
789 /* no local entry exists, case 1:
790 * Check if this is the last one or if other entries exist.
791 */
792
793 rcu_read_lock();
794 head = &tt_global_entry->orig_list;
795 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
796 if (orig_entry->orig_node != orig_node) {
797 last_entry = false;
798 break;
799 }
800 }
801 rcu_read_unlock();
802
803 if (last_entry) {
804 /* its the last one, mark for roaming. */
805 tt_global_entry->common.flags |= TT_CLIENT_ROAM;
806 tt_global_entry->roam_at = jiffies;
807 } else
808 /* there is another entry, we can simply delete this
809 * one and can still use the other one.
810 */
811 tt_global_del_orig_entry(bat_priv, tt_global_entry,
812 orig_node, message);
813}
814
815
816
817static void tt_global_del(struct bat_priv *bat_priv,
818 struct orig_node *orig_node,
819 const unsigned char *addr,
820 const char *message, bool roaming)
651{ 821{
652 struct tt_global_entry *tt_global_entry = NULL; 822 struct tt_global_entry *tt_global_entry = NULL;
653 struct tt_local_entry *tt_local_entry = NULL; 823 struct tt_local_entry *tt_local_entry = NULL;
654 824
655 tt_global_entry = tt_global_hash_find(bat_priv, addr); 825 tt_global_entry = tt_global_hash_find(bat_priv, addr);
656 if (!tt_global_entry || tt_global_entry->orig_node != orig_node) 826 if (!tt_global_entry)
657 goto out; 827 goto out;
658 828
659 if (!roaming) 829 if (!roaming) {
660 goto out_del; 830 tt_global_del_orig_entry(bat_priv, tt_global_entry, orig_node,
831 message);
832
833 if (hlist_empty(&tt_global_entry->orig_list))
834 tt_global_del_struct(bat_priv, tt_global_entry,
835 message);
836
837 goto out;
838 }
661 839
662 /* if we are deleting a global entry due to a roam 840 /* if we are deleting a global entry due to a roam
663 * event, there are two possibilities: 841 * event, there are two possibilities:
664 * 1) the client roamed from node A to node B => we mark 842 * 1) the client roamed from node A to node B => if there
843 * is only one originator left for this client, we mark
665 * it with TT_CLIENT_ROAM, we start a timer and we 844 * it with TT_CLIENT_ROAM, we start a timer and we
666 * wait for node B to claim it. In case of timeout 845 * wait for node B to claim it. In case of timeout
667 * the entry is purged. 846 * the entry is purged.
847 *
848 * If there are other originators left, we directly delete
849 * the originator.
668 * 2) the client roamed to us => we can directly delete 850 * 2) the client roamed to us => we can directly delete
669 * the global entry, since it is useless now. */ 851 * the global entry, since it is useless now. */
852
670 tt_local_entry = tt_local_hash_find(bat_priv, 853 tt_local_entry = tt_local_hash_find(bat_priv,
671 tt_global_entry->common.addr); 854 tt_global_entry->common.addr);
672 if (!tt_local_entry) { 855 if (tt_local_entry) {
673 tt_global_entry->common.flags |= TT_CLIENT_ROAM; 856 /* local entry exists, case 2: client roamed to us. */
674 tt_global_entry->roam_at = jiffies; 857 tt_global_del_orig_list(tt_global_entry);
675 goto out; 858 tt_global_del_struct(bat_priv, tt_global_entry, message);
676 } 859 } else
860 /* no local entry exists, case 1: check for roaming */
861 tt_global_del_roaming(bat_priv, tt_global_entry, orig_node,
862 message);
677 863
678out_del:
679 _tt_global_del(bat_priv, tt_global_entry, message);
680 864
681out: 865out:
682 if (tt_global_entry) 866 if (tt_global_entry)
@@ -709,11 +893,14 @@ void tt_global_del_orig(struct bat_priv *bat_priv,
709 tt_global_entry = container_of(tt_common_entry, 893 tt_global_entry = container_of(tt_common_entry,
710 struct tt_global_entry, 894 struct tt_global_entry,
711 common); 895 common);
712 if (tt_global_entry->orig_node == orig_node) { 896
897 tt_global_del_orig_entry(bat_priv, tt_global_entry,
898 orig_node, message);
899
900 if (hlist_empty(&tt_global_entry->orig_list)) {
713 bat_dbg(DBG_TT, bat_priv, 901 bat_dbg(DBG_TT, bat_priv,
714 "Deleting global tt entry %pM (via %pM): %s\n", 902 "Deleting global tt entry %pM: %s\n",
715 tt_global_entry->common.addr, 903 tt_global_entry->common.addr,
716 tt_global_entry->orig_node->orig,
717 message); 904 message);
718 hlist_del_rcu(node); 905 hlist_del_rcu(node);
719 tt_global_entry_free_ref(tt_global_entry); 906 tt_global_entry_free_ref(tt_global_entry);
@@ -754,7 +941,7 @@ static void tt_global_roam_purge(struct bat_priv *bat_priv)
754 bat_dbg(DBG_TT, bat_priv, 941 bat_dbg(DBG_TT, bat_priv,
755 "Deleting global tt entry (%pM): Roaming timeout\n", 942 "Deleting global tt entry (%pM): Roaming timeout\n",
756 tt_global_entry->common.addr); 943 tt_global_entry->common.addr);
757 atomic_dec(&tt_global_entry->orig_node->tt_size); 944
758 hlist_del_rcu(node); 945 hlist_del_rcu(node);
759 tt_global_entry_free_ref(tt_global_entry); 946 tt_global_entry_free_ref(tt_global_entry);
760 } 947 }
@@ -817,6 +1004,11 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
817 struct tt_local_entry *tt_local_entry = NULL; 1004 struct tt_local_entry *tt_local_entry = NULL;
818 struct tt_global_entry *tt_global_entry = NULL; 1005 struct tt_global_entry *tt_global_entry = NULL;
819 struct orig_node *orig_node = NULL; 1006 struct orig_node *orig_node = NULL;
1007 struct neigh_node *router = NULL;
1008 struct hlist_head *head;
1009 struct hlist_node *node;
1010 struct tt_orig_list_entry *orig_entry;
1011 int best_tq;
820 1012
821 if (src && atomic_read(&bat_priv->ap_isolation)) { 1013 if (src && atomic_read(&bat_priv->ap_isolation)) {
822 tt_local_entry = tt_local_hash_find(bat_priv, src); 1014 tt_local_entry = tt_local_hash_find(bat_priv, src);
@@ -833,11 +1025,25 @@ struct orig_node *transtable_search(struct bat_priv *bat_priv,
833 if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry)) 1025 if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry))
834 goto out; 1026 goto out;
835 1027
836 if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount)) 1028 best_tq = 0;
837 goto out;
838 1029
839 orig_node = tt_global_entry->orig_node; 1030 rcu_read_lock();
1031 head = &tt_global_entry->orig_list;
1032 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
1033 router = orig_node_get_router(orig_entry->orig_node);
1034 if (!router)
1035 continue;
840 1036
1037 if (router->tq_avg > best_tq) {
1038 orig_node = orig_entry->orig_node;
1039 best_tq = router->tq_avg;
1040 }
1041 neigh_node_free_ref(router);
1042 }
1043 /* found anything? */
1044 if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
1045 orig_node = NULL;
1046 rcu_read_unlock();
841out: 1047out:
842 if (tt_global_entry) 1048 if (tt_global_entry)
843 tt_global_entry_free_ref(tt_global_entry); 1049 tt_global_entry_free_ref(tt_global_entry);
@@ -848,7 +1054,8 @@ out:
848} 1054}
849 1055
850/* Calculates the checksum of the local table of a given orig_node */ 1056/* Calculates the checksum of the local table of a given orig_node */
851uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node) 1057static uint16_t tt_global_crc(struct bat_priv *bat_priv,
1058 struct orig_node *orig_node)
852{ 1059{
853 uint16_t total = 0, total_one; 1060 uint16_t total = 0, total_one;
854 struct hashtable_t *hash = bat_priv->tt_global_hash; 1061 struct hashtable_t *hash = bat_priv->tt_global_hash;
@@ -868,20 +1075,26 @@ uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node)
868 tt_global_entry = container_of(tt_common_entry, 1075 tt_global_entry = container_of(tt_common_entry,
869 struct tt_global_entry, 1076 struct tt_global_entry,
870 common); 1077 common);
871 if (compare_eth(tt_global_entry->orig_node, 1078 /* Roaming clients are in the global table for
872 orig_node)) { 1079 * consistency only. They don't have to be
873 /* Roaming clients are in the global table for 1080 * taken into account while computing the
874 * consistency only. They don't have to be 1081 * global crc
875 * taken into account while computing the 1082 */
876 * global crc */ 1083 if (tt_global_entry->common.flags & TT_CLIENT_ROAM)
877 if (tt_common_entry->flags & TT_CLIENT_ROAM) 1084 continue;
878 continue; 1085
879 total_one = 0; 1086 /* find out if this global entry is announced by this
880 for (j = 0; j < ETH_ALEN; j++) 1087 * originator
881 total_one = crc16_byte(total_one, 1088 */
882 tt_common_entry->addr[j]); 1089 if (!tt_global_entry_has_orig(tt_global_entry,
883 total ^= total_one; 1090 orig_node))
884 } 1091 continue;
1092
1093 total_one = 0;
1094 for (j = 0; j < ETH_ALEN; j++)
1095 total_one = crc16_byte(total_one,
1096 tt_global_entry->common.addr[j]);
1097 total ^= total_one;
885 } 1098 }
886 rcu_read_unlock(); 1099 rcu_read_unlock();
887 } 1100 }
@@ -936,8 +1149,10 @@ static void tt_req_list_free(struct bat_priv *bat_priv)
936 spin_unlock_bh(&bat_priv->tt_req_list_lock); 1149 spin_unlock_bh(&bat_priv->tt_req_list_lock);
937} 1150}
938 1151
939void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node, 1152static void tt_save_orig_buffer(struct bat_priv *bat_priv,
940 const unsigned char *tt_buff, uint8_t tt_num_changes) 1153 struct orig_node *orig_node,
1154 const unsigned char *tt_buff,
1155 uint8_t tt_num_changes)
941{ 1156{
942 uint16_t tt_buff_len = tt_len(tt_num_changes); 1157 uint16_t tt_buff_len = tt_len(tt_num_changes);
943 1158
@@ -1020,7 +1235,7 @@ static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
1020 tt_global_entry = container_of(tt_common_entry, struct tt_global_entry, 1235 tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
1021 common); 1236 common);
1022 1237
1023 return (tt_global_entry->orig_node == orig_node); 1238 return tt_global_entry_has_orig(tt_global_entry, orig_node);
1024} 1239}
1025 1240
1026static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, 1241static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
@@ -1124,7 +1339,7 @@ static int send_tt_request(struct bat_priv *bat_priv,
1124 memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN); 1339 memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
1125 tt_request->header.ttl = TTL; 1340 tt_request->header.ttl = TTL;
1126 tt_request->ttvn = ttvn; 1341 tt_request->ttvn = ttvn;
1127 tt_request->tt_data = tt_crc; 1342 tt_request->tt_data = htons(tt_crc);
1128 tt_request->flags = TT_REQUEST; 1343 tt_request->flags = TT_REQUEST;
1129 1344
1130 if (full_table) 1345 if (full_table)
@@ -1401,10 +1616,15 @@ out:
1401bool send_tt_response(struct bat_priv *bat_priv, 1616bool send_tt_response(struct bat_priv *bat_priv,
1402 struct tt_query_packet *tt_request) 1617 struct tt_query_packet *tt_request)
1403{ 1618{
1404 if (is_my_mac(tt_request->dst)) 1619 if (is_my_mac(tt_request->dst)) {
1620 /* don't answer backbone gws! */
1621 if (bla_is_backbone_gw_orig(bat_priv, tt_request->src))
1622 return true;
1623
1405 return send_my_tt_response(bat_priv, tt_request); 1624 return send_my_tt_response(bat_priv, tt_request);
1406 else 1625 } else {
1407 return send_other_tt_response(bat_priv, tt_request); 1626 return send_other_tt_response(bat_priv, tt_request);
1627 }
1408} 1628}
1409 1629
1410static void _tt_update_changes(struct bat_priv *bat_priv, 1630static void _tt_update_changes(struct bat_priv *bat_priv,
@@ -1508,6 +1728,10 @@ void handle_tt_response(struct bat_priv *bat_priv,
1508 tt_response->src, tt_response->ttvn, tt_response->tt_data, 1728 tt_response->src, tt_response->ttvn, tt_response->tt_data,
1509 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.')); 1729 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1510 1730
1731 /* we should have never asked a backbone gw */
1732 if (bla_is_backbone_gw_orig(bat_priv, tt_response->src))
1733 goto out;
1734
1511 orig_node = orig_hash_find(bat_priv, tt_response->src); 1735 orig_node = orig_hash_find(bat_priv, tt_response->src);
1512 if (!orig_node) 1736 if (!orig_node)
1513 goto out; 1737 goto out;
@@ -1627,8 +1851,8 @@ unlock:
1627 return ret; 1851 return ret;
1628} 1852}
1629 1853
1630void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, 1854static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
1631 struct orig_node *orig_node) 1855 struct orig_node *orig_node)
1632{ 1856{
1633 struct neigh_node *neigh_node = NULL; 1857 struct neigh_node *neigh_node = NULL;
1634 struct sk_buff *skb = NULL; 1858 struct sk_buff *skb = NULL;
@@ -1796,6 +2020,8 @@ void tt_commit_changes(struct bat_priv *bat_priv)
1796 2020
1797 /* Increment the TTVN only once per OGM interval */ 2021 /* Increment the TTVN only once per OGM interval */
1798 atomic_inc(&bat_priv->ttvn); 2022 atomic_inc(&bat_priv->ttvn);
2023 bat_dbg(DBG_TT, bat_priv, "Local changes committed, updating to ttvn %u\n",
2024 (uint8_t)atomic_read(&bat_priv->ttvn));
1799 bat_priv->tt_poss_change = false; 2025 bat_priv->tt_poss_change = false;
1800} 2026}
1801 2027
@@ -1836,6 +2062,10 @@ void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
1836 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); 2062 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
1837 bool full_table = true; 2063 bool full_table = true;
1838 2064
2065 /* don't care about a backbone gateways updates. */
2066 if (bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
2067 return;
2068
1839 /* orig table not initialised AND first diff is in the OGM OR the ttvn 2069 /* orig table not initialised AND first diff is in the OGM OR the ttvn
1840 * increased by one -> we can apply the attached changes */ 2070 * increased by one -> we can apply the attached changes */
1841 if ((!orig_node->tt_initialised && ttvn == 1) || 2071 if ((!orig_node->tt_initialised && ttvn == 1) ||
@@ -1873,6 +2103,7 @@ void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
1873 } else { 2103 } else {
1874 /* if we missed more than one change or our tables are not 2104 /* if we missed more than one change or our tables are not
1875 * in sync anymore -> request fresh tt data */ 2105 * in sync anymore -> request fresh tt data */
2106
1876 if (!orig_node->tt_initialised || ttvn != orig_ttvn || 2107 if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
1877 orig_node->tt_crc != tt_crc) { 2108 orig_node->tt_crc != tt_crc) {
1878request_table: 2109request_table:
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index c753633b1da1..bfebe26edd8e 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -39,23 +39,15 @@ int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
39int tt_global_seq_print_text(struct seq_file *seq, void *offset); 39int tt_global_seq_print_text(struct seq_file *seq, void *offset);
40void tt_global_del_orig(struct bat_priv *bat_priv, 40void tt_global_del_orig(struct bat_priv *bat_priv,
41 struct orig_node *orig_node, const char *message); 41 struct orig_node *orig_node, const char *message);
42void tt_global_del(struct bat_priv *bat_priv,
43 struct orig_node *orig_node, const unsigned char *addr,
44 const char *message, bool roaming);
45struct orig_node *transtable_search(struct bat_priv *bat_priv, 42struct orig_node *transtable_search(struct bat_priv *bat_priv,
46 const uint8_t *src, const uint8_t *addr); 43 const uint8_t *src, const uint8_t *addr);
47void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node,
48 const unsigned char *tt_buff, uint8_t tt_num_changes);
49uint16_t tt_local_crc(struct bat_priv *bat_priv); 44uint16_t tt_local_crc(struct bat_priv *bat_priv);
50uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node);
51void tt_free(struct bat_priv *bat_priv); 45void tt_free(struct bat_priv *bat_priv);
52bool send_tt_response(struct bat_priv *bat_priv, 46bool send_tt_response(struct bat_priv *bat_priv,
53 struct tt_query_packet *tt_request); 47 struct tt_query_packet *tt_request);
54bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr); 48bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr);
55void handle_tt_response(struct bat_priv *bat_priv, 49void handle_tt_response(struct bat_priv *bat_priv,
56 struct tt_query_packet *tt_response); 50 struct tt_query_packet *tt_response);
57void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
58 struct orig_node *orig_node);
59void tt_commit_changes(struct bat_priv *bat_priv); 51void tt_commit_changes(struct bat_priv *bat_priv);
60bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst); 52bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst);
61void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, 53void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 302efb523475..2f4848b776a7 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -27,7 +27,7 @@
27#include "packet.h" 27#include "packet.h"
28#include "bitarray.h" 28#include "bitarray.h"
29 29
30#define BAT_HEADER_LEN (sizeof(struct ethhdr) + \ 30#define BAT_HEADER_LEN (ETH_HLEN + \
31 ((sizeof(struct unicast_packet) > sizeof(struct bcast_packet) ? \ 31 ((sizeof(struct unicast_packet) > sizeof(struct bcast_packet) ? \
32 sizeof(struct unicast_packet) : \ 32 sizeof(struct unicast_packet) : \
33 sizeof(struct bcast_packet)))) 33 sizeof(struct bcast_packet))))
@@ -90,7 +90,7 @@ struct orig_node {
90 bool tt_poss_change; 90 bool tt_poss_change;
91 uint32_t last_real_seqno; 91 uint32_t last_real_seqno;
92 uint8_t last_ttl; 92 uint8_t last_ttl;
93 unsigned long bcast_bits[NUM_WORDS]; 93 DECLARE_BITMAP(bcast_bits, TQ_LOCAL_WINDOW_SIZE);
94 uint32_t last_bcast_seqno; 94 uint32_t last_bcast_seqno;
95 struct hlist_head neigh_list; 95 struct hlist_head neigh_list;
96 struct list_head frag_list; 96 struct list_head frag_list;
@@ -132,7 +132,7 @@ struct neigh_node {
132 uint8_t last_ttl; 132 uint8_t last_ttl;
133 struct list_head bonding_list; 133 struct list_head bonding_list;
134 unsigned long last_valid; 134 unsigned long last_valid;
135 unsigned long real_bits[NUM_WORDS]; 135 DECLARE_BITMAP(real_bits, TQ_LOCAL_WINDOW_SIZE);
136 atomic_t refcount; 136 atomic_t refcount;
137 struct rcu_head rcu; 137 struct rcu_head rcu;
138 struct orig_node *orig_node; 138 struct orig_node *orig_node;
@@ -140,6 +140,13 @@ struct neigh_node {
140 spinlock_t tq_lock; /* protects: tq_recv, tq_index */ 140 spinlock_t tq_lock; /* protects: tq_recv, tq_index */
141}; 141};
142 142
143#ifdef CONFIG_BATMAN_ADV_BLA
144struct bcast_duplist_entry {
145 uint8_t orig[ETH_ALEN];
146 uint16_t crc;
147 unsigned long entrytime;
148};
149#endif
143 150
144struct bat_priv { 151struct bat_priv {
145 atomic_t mesh_state; 152 atomic_t mesh_state;
@@ -148,6 +155,7 @@ struct bat_priv {
148 atomic_t bonding; /* boolean */ 155 atomic_t bonding; /* boolean */
149 atomic_t fragmentation; /* boolean */ 156 atomic_t fragmentation; /* boolean */
150 atomic_t ap_isolation; /* boolean */ 157 atomic_t ap_isolation; /* boolean */
158 atomic_t bridge_loop_avoidance; /* boolean */
151 atomic_t vis_mode; /* VIS_TYPE_* */ 159 atomic_t vis_mode; /* VIS_TYPE_* */
152 atomic_t gw_mode; /* GW_MODE_* */ 160 atomic_t gw_mode; /* GW_MODE_* */
153 atomic_t gw_sel_class; /* uint */ 161 atomic_t gw_sel_class; /* uint */
@@ -161,6 +169,7 @@ struct bat_priv {
161 atomic_t ttvn; /* translation table version number */ 169 atomic_t ttvn; /* translation table version number */
162 atomic_t tt_ogm_append_cnt; 170 atomic_t tt_ogm_append_cnt;
163 atomic_t tt_local_changes; /* changes registered in a OGM interval */ 171 atomic_t tt_local_changes; /* changes registered in a OGM interval */
172 atomic_t bla_num_requests; /* number of bla requests in flight */
164 /* The tt_poss_change flag is used to detect an ongoing roaming phase. 173 /* The tt_poss_change flag is used to detect an ongoing roaming phase.
165 * If true, then I received a Roaming_adv and I have to inspect every 174 * If true, then I received a Roaming_adv and I have to inspect every
166 * packet directed to me to check whether I am still the true 175 * packet directed to me to check whether I am still the true
@@ -174,15 +183,23 @@ struct bat_priv {
174 struct hlist_head forw_bat_list; 183 struct hlist_head forw_bat_list;
175 struct hlist_head forw_bcast_list; 184 struct hlist_head forw_bcast_list;
176 struct hlist_head gw_list; 185 struct hlist_head gw_list;
177 struct hlist_head softif_neigh_vids;
178 struct list_head tt_changes_list; /* tracks changes in a OGM int */ 186 struct list_head tt_changes_list; /* tracks changes in a OGM int */
179 struct list_head vis_send_list; 187 struct list_head vis_send_list;
180 struct hashtable_t *orig_hash; 188 struct hashtable_t *orig_hash;
181 struct hashtable_t *tt_local_hash; 189 struct hashtable_t *tt_local_hash;
182 struct hashtable_t *tt_global_hash; 190 struct hashtable_t *tt_global_hash;
191#ifdef CONFIG_BATMAN_ADV_BLA
192 struct hashtable_t *claim_hash;
193 struct hashtable_t *backbone_hash;
194#endif
183 struct list_head tt_req_list; /* list of pending tt_requests */ 195 struct list_head tt_req_list; /* list of pending tt_requests */
184 struct list_head tt_roam_list; 196 struct list_head tt_roam_list;
185 struct hashtable_t *vis_hash; 197 struct hashtable_t *vis_hash;
198#ifdef CONFIG_BATMAN_ADV_BLA
199 struct bcast_duplist_entry bcast_duplist[DUPLIST_SIZE];
200 int bcast_duplist_curr;
201 struct bla_claim_dst claim_dest;
202#endif
186 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */ 203 spinlock_t forw_bat_list_lock; /* protects forw_bat_list */
187 spinlock_t forw_bcast_list_lock; /* protects */ 204 spinlock_t forw_bcast_list_lock; /* protects */
188 spinlock_t tt_changes_list_lock; /* protects tt_changes */ 205 spinlock_t tt_changes_list_lock; /* protects tt_changes */
@@ -191,8 +208,6 @@ struct bat_priv {
191 spinlock_t gw_list_lock; /* protects gw_list and curr_gw */ 208 spinlock_t gw_list_lock; /* protects gw_list and curr_gw */
192 spinlock_t vis_hash_lock; /* protects vis_hash */ 209 spinlock_t vis_hash_lock; /* protects vis_hash */
193 spinlock_t vis_list_lock; /* protects vis_info::recv_list */ 210 spinlock_t vis_list_lock; /* protects vis_info::recv_list */
194 spinlock_t softif_neigh_lock; /* protects soft-interface neigh list */
195 spinlock_t softif_neigh_vid_lock; /* protects soft-interface vid list */
196 atomic_t num_local_tt; 211 atomic_t num_local_tt;
197 /* Checksum of the local table, recomputed before sending a new OGM */ 212 /* Checksum of the local table, recomputed before sending a new OGM */
198 atomic_t tt_crc; 213 atomic_t tt_crc;
@@ -202,6 +217,7 @@ struct bat_priv {
202 struct delayed_work tt_work; 217 struct delayed_work tt_work;
203 struct delayed_work orig_work; 218 struct delayed_work orig_work;
204 struct delayed_work vis_work; 219 struct delayed_work vis_work;
220 struct delayed_work bla_work;
205 struct gw_node __rcu *curr_gw; /* rcu protected pointer */ 221 struct gw_node __rcu *curr_gw; /* rcu protected pointer */
206 atomic_t gw_reselect; 222 atomic_t gw_reselect;
207 struct hard_iface __rcu *primary_if; /* rcu protected pointer */ 223 struct hard_iface __rcu *primary_if; /* rcu protected pointer */
@@ -239,10 +255,41 @@ struct tt_local_entry {
239 255
240struct tt_global_entry { 256struct tt_global_entry {
241 struct tt_common_entry common; 257 struct tt_common_entry common;
258 struct hlist_head orig_list;
259 spinlock_t list_lock; /* protects the list */
260 unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */
261};
262
263struct tt_orig_list_entry {
242 struct orig_node *orig_node; 264 struct orig_node *orig_node;
243 uint8_t ttvn; 265 uint8_t ttvn;
244 unsigned long roam_at; /* time at which TT_GLOBAL_ROAM was set */ 266 struct rcu_head rcu;
267 struct hlist_node list;
268};
269
270#ifdef CONFIG_BATMAN_ADV_BLA
271struct backbone_gw {
272 uint8_t orig[ETH_ALEN];
273 short vid; /* used VLAN ID */
274 struct hlist_node hash_entry;
275 struct bat_priv *bat_priv;
276 unsigned long lasttime; /* last time we heard of this backbone gw */
277 atomic_t request_sent;
278 atomic_t refcount;
279 struct rcu_head rcu;
280 uint16_t crc; /* crc checksum over all claims */
281};
282
283struct claim {
284 uint8_t addr[ETH_ALEN];
285 short vid;
286 struct backbone_gw *backbone_gw;
287 unsigned long lasttime; /* last time we heard of claim (locals only) */
288 struct rcu_head rcu;
289 atomic_t refcount;
290 struct hlist_node hash_entry;
245}; 291};
292#endif
246 293
247struct tt_change_node { 294struct tt_change_node {
248 struct list_head list; 295 struct list_head list;
@@ -327,31 +374,15 @@ struct recvlist_node {
327 uint8_t mac[ETH_ALEN]; 374 uint8_t mac[ETH_ALEN];
328}; 375};
329 376
330struct softif_neigh_vid {
331 struct hlist_node list;
332 struct bat_priv *bat_priv;
333 short vid;
334 atomic_t refcount;
335 struct softif_neigh __rcu *softif_neigh;
336 struct rcu_head rcu;
337 struct hlist_head softif_neigh_list;
338};
339
340struct softif_neigh {
341 struct hlist_node list;
342 uint8_t addr[ETH_ALEN];
343 unsigned long last_seen;
344 atomic_t refcount;
345 struct rcu_head rcu;
346};
347
348struct bat_algo_ops { 377struct bat_algo_ops {
349 struct hlist_node list; 378 struct hlist_node list;
350 char *name; 379 char *name;
351 /* init OGM when hard-interface is enabled */ 380 /* init routing info when hard-interface is enabled */
352 void (*bat_ogm_init)(struct hard_iface *hard_iface); 381 int (*bat_iface_enable)(struct hard_iface *hard_iface);
353 /* init primary OGM when primary interface is selected */ 382 /* de-init routing info when hard-interface is disabled */
354 void (*bat_ogm_init_primary)(struct hard_iface *hard_iface); 383 void (*bat_iface_disable)(struct hard_iface *hard_iface);
384 /* called when primary interface is selected / changed */
385 void (*bat_primary_iface_set)(struct hard_iface *hard_iface);
355 /* init mac addresses of the OGM belonging to this hard-interface */ 386 /* init mac addresses of the OGM belonging to this hard-interface */
356 void (*bat_ogm_update_mac)(struct hard_iface *hard_iface); 387 void (*bat_ogm_update_mac)(struct hard_iface *hard_iface);
357 /* prepare a new outgoing OGM for the send queue */ 388 /* prepare a new outgoing OGM for the send queue */
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index c4a5b8cafada..cec216fb77c7 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -434,12 +434,12 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv,
434 return NULL; 434 return NULL;
435 435
436 info->skb_packet = dev_alloc_skb(sizeof(*packet) + vis_info_len + 436 info->skb_packet = dev_alloc_skb(sizeof(*packet) + vis_info_len +
437 sizeof(struct ethhdr)); 437 ETH_HLEN);
438 if (!info->skb_packet) { 438 if (!info->skb_packet) {
439 kfree(info); 439 kfree(info);
440 return NULL; 440 return NULL;
441 } 441 }
442 skb_reserve(info->skb_packet, sizeof(struct ethhdr)); 442 skb_reserve(info->skb_packet, ETH_HLEN);
443 packet = (struct vis_packet *)skb_put(info->skb_packet, sizeof(*packet) 443 packet = (struct vis_packet *)skb_put(info->skb_packet, sizeof(*packet)
444 + vis_info_len); 444 + vis_info_len);
445 445
@@ -894,11 +894,11 @@ int vis_init(struct bat_priv *bat_priv)
894 894
895 bat_priv->my_vis_info->skb_packet = dev_alloc_skb(sizeof(*packet) + 895 bat_priv->my_vis_info->skb_packet = dev_alloc_skb(sizeof(*packet) +
896 MAX_VIS_PACKET_SIZE + 896 MAX_VIS_PACKET_SIZE +
897 sizeof(struct ethhdr)); 897 ETH_HLEN);
898 if (!bat_priv->my_vis_info->skb_packet) 898 if (!bat_priv->my_vis_info->skb_packet)
899 goto free_info; 899 goto free_info;
900 900
901 skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr)); 901 skb_reserve(bat_priv->my_vis_info->skb_packet, ETH_HLEN);
902 packet = (struct vis_packet *)skb_put(bat_priv->my_vis_info->skb_packet, 902 packet = (struct vis_packet *)skb_put(bat_priv->my_vis_info->skb_packet,
903 sizeof(*packet)); 903 sizeof(*packet));
904 904
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index ba829de84423..d6e5929458b1 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -317,6 +317,9 @@ static const struct net_device_ops br_netdev_ops = {
317 .ndo_add_slave = br_add_slave, 317 .ndo_add_slave = br_add_slave,
318 .ndo_del_slave = br_del_slave, 318 .ndo_del_slave = br_del_slave,
319 .ndo_fix_features = br_fix_features, 319 .ndo_fix_features = br_fix_features,
320 .ndo_fdb_add = br_fdb_add,
321 .ndo_fdb_del = br_fdb_delete,
322 .ndo_fdb_dump = br_fdb_dump,
320}; 323};
321 324
322static void br_dev_free(struct net_device *dev) 325static void br_dev_free(struct net_device *dev)
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 5ba0c844d508..5945c54bc2de 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -487,14 +487,14 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
487 ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex; 487 ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
488 ndm->ndm_state = fdb_to_nud(fdb); 488 ndm->ndm_state = fdb_to_nud(fdb);
489 489
490 NLA_PUT(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr); 490 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr))
491 491 goto nla_put_failure;
492 ci.ndm_used = jiffies_to_clock_t(now - fdb->used); 492 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
493 ci.ndm_confirmed = 0; 493 ci.ndm_confirmed = 0;
494 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated); 494 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
495 ci.ndm_refcnt = 0; 495 ci.ndm_refcnt = 0;
496 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci); 496 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
497 497 goto nla_put_failure;
498 return nlmsg_end(skb, nlh); 498 return nlmsg_end(skb, nlh);
499 499
500nla_put_failure: 500nla_put_failure:
@@ -535,44 +535,38 @@ errout:
535} 535}
536 536
537/* Dump information about entries, in response to GETNEIGH */ 537/* Dump information about entries, in response to GETNEIGH */
538int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb) 538int br_fdb_dump(struct sk_buff *skb,
539 struct netlink_callback *cb,
540 struct net_device *dev,
541 int idx)
539{ 542{
540 struct net *net = sock_net(skb->sk); 543 struct net_bridge *br = netdev_priv(dev);
541 struct net_device *dev; 544 int i;
542 int idx = 0;
543
544 rcu_read_lock();
545 for_each_netdev_rcu(net, dev) {
546 struct net_bridge *br = netdev_priv(dev);
547 int i;
548 545
549 if (!(dev->priv_flags & IFF_EBRIDGE)) 546 if (!(dev->priv_flags & IFF_EBRIDGE))
550 continue; 547 goto out;
551 548
552 for (i = 0; i < BR_HASH_SIZE; i++) { 549 for (i = 0; i < BR_HASH_SIZE; i++) {
553 struct hlist_node *h; 550 struct hlist_node *h;
554 struct net_bridge_fdb_entry *f; 551 struct net_bridge_fdb_entry *f;
555
556 hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) {
557 if (idx < cb->args[0])
558 goto skip;
559 552
560 if (fdb_fill_info(skb, br, f, 553 hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) {
561 NETLINK_CB(cb->skb).pid, 554 if (idx < cb->args[0])
562 cb->nlh->nlmsg_seq, 555 goto skip;
563 RTM_NEWNEIGH, 556
564 NLM_F_MULTI) < 0) 557 if (fdb_fill_info(skb, br, f,
565 break; 558 NETLINK_CB(cb->skb).pid,
559 cb->nlh->nlmsg_seq,
560 RTM_NEWNEIGH,
561 NLM_F_MULTI) < 0)
562 break;
566skip: 563skip:
567 ++idx; 564 ++idx;
568 }
569 } 565 }
570 } 566 }
571 rcu_read_unlock();
572 567
573 cb->args[0] = idx; 568out:
574 569 return idx;
575 return skb->len;
576} 570}
577 571
578/* Update (create or replace) forwarding database entry */ 572/* Update (create or replace) forwarding database entry */
@@ -614,43 +608,11 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
614} 608}
615 609
616/* Add new permanent fdb entry with RTM_NEWNEIGH */ 610/* Add new permanent fdb entry with RTM_NEWNEIGH */
617int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 611int br_fdb_add(struct ndmsg *ndm, struct net_device *dev,
612 unsigned char *addr, u16 nlh_flags)
618{ 613{
619 struct net *net = sock_net(skb->sk);
620 struct ndmsg *ndm;
621 struct nlattr *tb[NDA_MAX+1];
622 struct net_device *dev;
623 struct net_bridge_port *p; 614 struct net_bridge_port *p;
624 const __u8 *addr; 615 int err = 0;
625 int err;
626
627 ASSERT_RTNL();
628 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
629 if (err < 0)
630 return err;
631
632 ndm = nlmsg_data(nlh);
633 if (ndm->ndm_ifindex == 0) {
634 pr_info("bridge: RTM_NEWNEIGH with invalid ifindex\n");
635 return -EINVAL;
636 }
637
638 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
639 if (dev == NULL) {
640 pr_info("bridge: RTM_NEWNEIGH with unknown ifindex\n");
641 return -ENODEV;
642 }
643
644 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
645 pr_info("bridge: RTM_NEWNEIGH with invalid address\n");
646 return -EINVAL;
647 }
648
649 addr = nla_data(tb[NDA_LLADDR]);
650 if (!is_valid_ether_addr(addr)) {
651 pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
652 return -EINVAL;
653 }
654 616
655 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) { 617 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
656 pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state); 618 pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
@@ -670,14 +632,14 @@ int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
670 rcu_read_unlock(); 632 rcu_read_unlock();
671 } else { 633 } else {
672 spin_lock_bh(&p->br->hash_lock); 634 spin_lock_bh(&p->br->hash_lock);
673 err = fdb_add_entry(p, addr, ndm->ndm_state, nlh->nlmsg_flags); 635 err = fdb_add_entry(p, addr, ndm->ndm_state, nlh_flags);
674 spin_unlock_bh(&p->br->hash_lock); 636 spin_unlock_bh(&p->br->hash_lock);
675 } 637 }
676 638
677 return err; 639 return err;
678} 640}
679 641
680static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr) 642static int fdb_delete_by_addr(struct net_bridge_port *p, u8 *addr)
681{ 643{
682 struct net_bridge *br = p->br; 644 struct net_bridge *br = p->br;
683 struct hlist_head *head = &br->hash[br_mac_hash(addr)]; 645 struct hlist_head *head = &br->hash[br_mac_hash(addr)];
@@ -692,40 +654,12 @@ static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr)
692} 654}
693 655
694/* Remove neighbor entry with RTM_DELNEIGH */ 656/* Remove neighbor entry with RTM_DELNEIGH */
695int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 657int br_fdb_delete(struct ndmsg *ndm, struct net_device *dev,
658 unsigned char *addr)
696{ 659{
697 struct net *net = sock_net(skb->sk);
698 struct ndmsg *ndm;
699 struct net_bridge_port *p; 660 struct net_bridge_port *p;
700 struct nlattr *llattr;
701 const __u8 *addr;
702 struct net_device *dev;
703 int err; 661 int err;
704 662
705 ASSERT_RTNL();
706 if (nlmsg_len(nlh) < sizeof(*ndm))
707 return -EINVAL;
708
709 ndm = nlmsg_data(nlh);
710 if (ndm->ndm_ifindex == 0) {
711 pr_info("bridge: RTM_DELNEIGH with invalid ifindex\n");
712 return -EINVAL;
713 }
714
715 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
716 if (dev == NULL) {
717 pr_info("bridge: RTM_DELNEIGH with unknown ifindex\n");
718 return -ENODEV;
719 }
720
721 llattr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_LLADDR);
722 if (llattr == NULL || nla_len(llattr) != ETH_ALEN) {
723 pr_info("bridge: RTM_DELNEIGH with invalid address\n");
724 return -EINVAL;
725 }
726
727 addr = nla_data(llattr);
728
729 p = br_port_get_rtnl(dev); 663 p = br_port_get_rtnl(dev);
730 if (p == NULL) { 664 if (p == NULL) {
731 pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n", 665 pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index a2098e3de500..e9466d412707 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -34,7 +34,7 @@ static inline int should_deliver(const struct net_bridge_port *p,
34 p->state == BR_STATE_FORWARDING); 34 p->state == BR_STATE_FORWARDING);
35} 35}
36 36
37static inline unsigned packet_length(const struct sk_buff *skb) 37static inline unsigned int packet_length(const struct sk_buff *skb)
38{ 38{
39 return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0); 39 return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
40} 40}
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 27ca25ed7021..5ca4c50ea233 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -36,6 +36,8 @@
36#define mlock_dereference(X, br) \ 36#define mlock_dereference(X, br) \
37 rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock)) 37 rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
38 38
39static void br_multicast_start_querier(struct net_bridge *br);
40
39#if IS_ENABLED(CONFIG_IPV6) 41#if IS_ENABLED(CONFIG_IPV6)
40static inline int ipv6_is_transient_multicast(const struct in6_addr *addr) 42static inline int ipv6_is_transient_multicast(const struct in6_addr *addr)
41{ 43{
@@ -512,8 +514,8 @@ static struct net_bridge_mdb_entry *br_multicast_get_group(
512 struct net_bridge_mdb_htable *mdb; 514 struct net_bridge_mdb_htable *mdb;
513 struct net_bridge_mdb_entry *mp; 515 struct net_bridge_mdb_entry *mp;
514 struct hlist_node *p; 516 struct hlist_node *p;
515 unsigned count = 0; 517 unsigned int count = 0;
516 unsigned max; 518 unsigned int max;
517 int elasticity; 519 int elasticity;
518 int err; 520 int err;
519 521
@@ -740,6 +742,20 @@ static void br_multicast_local_router_expired(unsigned long data)
740{ 742{
741} 743}
742 744
745static void br_multicast_querier_expired(unsigned long data)
746{
747 struct net_bridge *br = (void *)data;
748
749 spin_lock(&br->multicast_lock);
750 if (!netif_running(br->dev) || br->multicast_disabled)
751 goto out;
752
753 br_multicast_start_querier(br);
754
755out:
756 spin_unlock(&br->multicast_lock);
757}
758
743static void __br_multicast_send_query(struct net_bridge *br, 759static void __br_multicast_send_query(struct net_bridge *br,
744 struct net_bridge_port *port, 760 struct net_bridge_port *port,
745 struct br_ip *ip) 761 struct br_ip *ip)
@@ -766,6 +782,7 @@ static void br_multicast_send_query(struct net_bridge *br,
766 struct br_ip br_group; 782 struct br_ip br_group;
767 783
768 if (!netif_running(br->dev) || br->multicast_disabled || 784 if (!netif_running(br->dev) || br->multicast_disabled ||
785 !br->multicast_querier ||
769 timer_pending(&br->multicast_querier_timer)) 786 timer_pending(&br->multicast_querier_timer))
770 return; 787 return;
771 788
@@ -1281,8 +1298,8 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1281 struct sk_buff *skb2 = skb; 1298 struct sk_buff *skb2 = skb;
1282 const struct iphdr *iph; 1299 const struct iphdr *iph;
1283 struct igmphdr *ih; 1300 struct igmphdr *ih;
1284 unsigned len; 1301 unsigned int len;
1285 unsigned offset; 1302 unsigned int offset;
1286 int err; 1303 int err;
1287 1304
1288 /* We treat OOM as packet loss for now. */ 1305 /* We treat OOM as packet loss for now. */
@@ -1382,7 +1399,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1382 u8 icmp6_type; 1399 u8 icmp6_type;
1383 u8 nexthdr; 1400 u8 nexthdr;
1384 __be16 frag_off; 1401 __be16 frag_off;
1385 unsigned len; 1402 unsigned int len;
1386 int offset; 1403 int offset;
1387 int err; 1404 int err;
1388 1405
@@ -1548,6 +1565,7 @@ void br_multicast_init(struct net_bridge *br)
1548 br->hash_max = 512; 1565 br->hash_max = 512;
1549 1566
1550 br->multicast_router = 1; 1567 br->multicast_router = 1;
1568 br->multicast_querier = 0;
1551 br->multicast_last_member_count = 2; 1569 br->multicast_last_member_count = 2;
1552 br->multicast_startup_query_count = 2; 1570 br->multicast_startup_query_count = 2;
1553 1571
@@ -1562,7 +1580,7 @@ void br_multicast_init(struct net_bridge *br)
1562 setup_timer(&br->multicast_router_timer, 1580 setup_timer(&br->multicast_router_timer,
1563 br_multicast_local_router_expired, 0); 1581 br_multicast_local_router_expired, 0);
1564 setup_timer(&br->multicast_querier_timer, 1582 setup_timer(&br->multicast_querier_timer,
1565 br_multicast_local_router_expired, 0); 1583 br_multicast_querier_expired, (unsigned long)br);
1566 setup_timer(&br->multicast_query_timer, br_multicast_query_expired, 1584 setup_timer(&br->multicast_query_timer, br_multicast_query_expired,
1567 (unsigned long)br); 1585 (unsigned long)br);
1568} 1586}
@@ -1689,9 +1707,23 @@ unlock:
1689 return err; 1707 return err;
1690} 1708}
1691 1709
1692int br_multicast_toggle(struct net_bridge *br, unsigned long val) 1710static void br_multicast_start_querier(struct net_bridge *br)
1693{ 1711{
1694 struct net_bridge_port *port; 1712 struct net_bridge_port *port;
1713
1714 br_multicast_open(br);
1715
1716 list_for_each_entry(port, &br->port_list, list) {
1717 if (port->state == BR_STATE_DISABLED ||
1718 port->state == BR_STATE_BLOCKING)
1719 continue;
1720
1721 __br_multicast_enable_port(port);
1722 }
1723}
1724
1725int br_multicast_toggle(struct net_bridge *br, unsigned long val)
1726{
1695 int err = 0; 1727 int err = 0;
1696 struct net_bridge_mdb_htable *mdb; 1728 struct net_bridge_mdb_htable *mdb;
1697 1729
@@ -1721,14 +1753,7 @@ rollback:
1721 goto rollback; 1753 goto rollback;
1722 } 1754 }
1723 1755
1724 br_multicast_open(br); 1756 br_multicast_start_querier(br);
1725 list_for_each_entry(port, &br->port_list, list) {
1726 if (port->state == BR_STATE_DISABLED ||
1727 port->state == BR_STATE_BLOCKING)
1728 continue;
1729
1730 __br_multicast_enable_port(port);
1731 }
1732 1757
1733unlock: 1758unlock:
1734 spin_unlock_bh(&br->multicast_lock); 1759 spin_unlock_bh(&br->multicast_lock);
@@ -1736,6 +1761,24 @@ unlock:
1736 return err; 1761 return err;
1737} 1762}
1738 1763
1764int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
1765{
1766 val = !!val;
1767
1768 spin_lock_bh(&br->multicast_lock);
1769 if (br->multicast_querier == val)
1770 goto unlock;
1771
1772 br->multicast_querier = val;
1773 if (val)
1774 br_multicast_start_querier(br);
1775
1776unlock:
1777 spin_unlock_bh(&br->multicast_lock);
1778
1779 return 0;
1780}
1781
1739int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) 1782int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
1740{ 1783{
1741 int err = -ENOENT; 1784 int err = -ENOENT;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index d7f49b63ab0f..53f083686ae4 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -1004,12 +1004,6 @@ static ctl_table brnf_table[] = {
1004 }, 1004 },
1005 { } 1005 { }
1006}; 1006};
1007
1008static struct ctl_path brnf_path[] = {
1009 { .procname = "net", },
1010 { .procname = "bridge", },
1011 { }
1012};
1013#endif 1007#endif
1014 1008
1015int __init br_netfilter_init(void) 1009int __init br_netfilter_init(void)
@@ -1026,7 +1020,7 @@ int __init br_netfilter_init(void)
1026 return ret; 1020 return ret;
1027 } 1021 }
1028#ifdef CONFIG_SYSCTL 1022#ifdef CONFIG_SYSCTL
1029 brnf_sysctl_header = register_sysctl_paths(brnf_path, brnf_table); 1023 brnf_sysctl_header = register_net_sysctl(&init_net, "net/bridge", brnf_table);
1030 if (brnf_sysctl_header == NULL) { 1024 if (brnf_sysctl_header == NULL) {
1031 printk(KERN_WARNING 1025 printk(KERN_WARNING
1032 "br_netfilter: can't register to sysctl.\n"); 1026 "br_netfilter: can't register to sysctl.\n");
@@ -1043,7 +1037,7 @@ void br_netfilter_fini(void)
1043{ 1037{
1044 nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); 1038 nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
1045#ifdef CONFIG_SYSCTL 1039#ifdef CONFIG_SYSCTL
1046 unregister_sysctl_table(brnf_sysctl_header); 1040 unregister_net_sysctl_table(brnf_sysctl_header);
1047#endif 1041#endif
1048 dst_entries_destroy(&fake_dst_ops); 1042 dst_entries_destroy(&fake_dst_ops);
1049} 1043}
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index a1daf8227ed1..2080485515f1 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -60,20 +60,17 @@ static int br_fill_ifinfo(struct sk_buff *skb, const struct net_bridge_port *por
60 hdr->ifi_flags = dev_get_flags(dev); 60 hdr->ifi_flags = dev_get_flags(dev);
61 hdr->ifi_change = 0; 61 hdr->ifi_change = 0;
62 62
63 NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); 63 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
64 NLA_PUT_U32(skb, IFLA_MASTER, br->dev->ifindex); 64 nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) ||
65 NLA_PUT_U32(skb, IFLA_MTU, dev->mtu); 65 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
66 NLA_PUT_U8(skb, IFLA_OPERSTATE, operstate); 66 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
67 67 (dev->addr_len &&
68 if (dev->addr_len) 68 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
69 NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr); 69 (dev->ifindex != dev->iflink &&
70 70 nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
71 if (dev->ifindex != dev->iflink) 71 (event == RTM_NEWLINK &&
72 NLA_PUT_U32(skb, IFLA_LINK, dev->iflink); 72 nla_put_u8(skb, IFLA_PROTINFO, port->state)))
73 73 goto nla_put_failure;
74 if (event == RTM_NEWLINK)
75 NLA_PUT_U8(skb, IFLA_PROTINFO, port->state);
76
77 return nlmsg_end(skb, nlh); 74 return nlmsg_end(skb, nlh);
78 75
79nla_put_failure: 76nla_put_failure:
@@ -91,7 +88,7 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port)
91 int err = -ENOBUFS; 88 int err = -ENOBUFS;
92 89
93 br_debug(port->br, "port %u(%s) event %d\n", 90 br_debug(port->br, "port %u(%s) event %d\n",
94 (unsigned)port->port_no, port->dev->name, event); 91 (unsigned int)port->port_no, port->dev->name, event);
95 92
96 skb = nlmsg_new(br_nlmsg_size(), GFP_ATOMIC); 93 skb = nlmsg_new(br_nlmsg_size(), GFP_ATOMIC);
97 if (skb == NULL) 94 if (skb == NULL)
@@ -235,18 +232,6 @@ int __init br_netlink_init(void)
235 br_rtm_setlink, NULL, NULL); 232 br_rtm_setlink, NULL, NULL);
236 if (err) 233 if (err)
237 goto err3; 234 goto err3;
238 err = __rtnl_register(PF_BRIDGE, RTM_NEWNEIGH,
239 br_fdb_add, NULL, NULL);
240 if (err)
241 goto err3;
242 err = __rtnl_register(PF_BRIDGE, RTM_DELNEIGH,
243 br_fdb_delete, NULL, NULL);
244 if (err)
245 goto err3;
246 err = __rtnl_register(PF_BRIDGE, RTM_GETNEIGH,
247 NULL, br_fdb_dump, NULL);
248 if (err)
249 goto err3;
250 235
251 return 0; 236 return 0;
252 237
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index e1d882257877..1a8ad4fb9a6b 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -224,6 +224,7 @@ struct net_bridge
224 unsigned char multicast_router; 224 unsigned char multicast_router;
225 225
226 u8 multicast_disabled:1; 226 u8 multicast_disabled:1;
227 u8 multicast_querier:1;
227 228
228 u32 hash_elasticity; 229 u32 hash_elasticity;
229 u32 hash_max; 230 u32 hash_max;
@@ -359,9 +360,18 @@ extern int br_fdb_insert(struct net_bridge *br,
359extern void br_fdb_update(struct net_bridge *br, 360extern void br_fdb_update(struct net_bridge *br,
360 struct net_bridge_port *source, 361 struct net_bridge_port *source,
361 const unsigned char *addr); 362 const unsigned char *addr);
362extern int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb); 363
363extern int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); 364extern int br_fdb_delete(struct ndmsg *ndm,
364extern int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg); 365 struct net_device *dev,
366 unsigned char *addr);
367extern int br_fdb_add(struct ndmsg *nlh,
368 struct net_device *dev,
369 unsigned char *addr,
370 u16 nlh_flags);
371extern int br_fdb_dump(struct sk_buff *skb,
372 struct netlink_callback *cb,
373 struct net_device *dev,
374 int idx);
365 375
366/* br_forward.c */ 376/* br_forward.c */
367extern void br_deliver(const struct net_bridge_port *to, 377extern void br_deliver(const struct net_bridge_port *to,
@@ -417,6 +427,7 @@ extern int br_multicast_set_router(struct net_bridge *br, unsigned long val);
417extern int br_multicast_set_port_router(struct net_bridge_port *p, 427extern int br_multicast_set_port_router(struct net_bridge_port *p,
418 unsigned long val); 428 unsigned long val);
419extern int br_multicast_toggle(struct net_bridge *br, unsigned long val); 429extern int br_multicast_toggle(struct net_bridge *br, unsigned long val);
430extern int br_multicast_set_querier(struct net_bridge *br, unsigned long val);
420extern int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val); 431extern int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
421 432
422static inline bool br_multicast_is_router(struct net_bridge *br) 433static inline bool br_multicast_is_router(struct net_bridge *br)
diff --git a/net/bridge/br_private_stp.h b/net/bridge/br_private_stp.h
index 05ed9bc7e426..0c0fe36e7aa9 100644
--- a/net/bridge/br_private_stp.h
+++ b/net/bridge/br_private_stp.h
@@ -29,10 +29,9 @@
29#define BR_MIN_PATH_COST 1 29#define BR_MIN_PATH_COST 1
30#define BR_MAX_PATH_COST 65535 30#define BR_MAX_PATH_COST 65535
31 31
32struct br_config_bpdu 32struct br_config_bpdu {
33{ 33 unsigned int topology_change:1;
34 unsigned topology_change:1; 34 unsigned int topology_change_ack:1;
35 unsigned topology_change_ack:1;
36 bridge_id root; 35 bridge_id root;
37 int root_path_cost; 36 int root_path_cost;
38 bridge_id bridge_id; 37 bridge_id bridge_id;
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 8c836d96ba76..af9a12099ba4 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -32,7 +32,7 @@ static const char *const br_port_state_names[] = {
32void br_log_state(const struct net_bridge_port *p) 32void br_log_state(const struct net_bridge_port *p)
33{ 33{
34 br_info(p->br, "port %u(%s) entered %s state\n", 34 br_info(p->br, "port %u(%s) entered %s state\n",
35 (unsigned) p->port_no, p->dev->name, 35 (unsigned int) p->port_no, p->dev->name,
36 br_port_state_names[p->state]); 36 br_port_state_names[p->state]);
37} 37}
38 38
@@ -478,7 +478,7 @@ void br_received_tcn_bpdu(struct net_bridge_port *p)
478{ 478{
479 if (br_is_designated_port(p)) { 479 if (br_is_designated_port(p)) {
480 br_info(p->br, "port %u(%s) received tcn bpdu\n", 480 br_info(p->br, "port %u(%s) received tcn bpdu\n",
481 (unsigned) p->port_no, p->dev->name); 481 (unsigned int) p->port_no, p->dev->name);
482 482
483 br_topology_change_detection(p->br); 483 br_topology_change_detection(p->br);
484 br_topology_change_acknowledge(p); 484 br_topology_change_acknowledge(p);
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 58de2a0f9975..a6747e673426 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -56,7 +56,7 @@ static void br_message_age_timer_expired(unsigned long arg)
56 return; 56 return;
57 57
58 br_info(br, "port %u(%s) neighbor %.2x%.2x.%pM lost\n", 58 br_info(br, "port %u(%s) neighbor %.2x%.2x.%pM lost\n",
59 (unsigned) p->port_no, p->dev->name, 59 (unsigned int) p->port_no, p->dev->name,
60 id->prio[0], id->prio[1], &id->addr); 60 id->prio[0], id->prio[1], &id->addr);
61 61
62 /* 62 /*
@@ -84,7 +84,7 @@ static void br_forward_delay_timer_expired(unsigned long arg)
84 struct net_bridge *br = p->br; 84 struct net_bridge *br = p->br;
85 85
86 br_debug(br, "port %u(%s) forward delay timer\n", 86 br_debug(br, "port %u(%s) forward delay timer\n",
87 (unsigned) p->port_no, p->dev->name); 87 (unsigned int) p->port_no, p->dev->name);
88 spin_lock(&br->lock); 88 spin_lock(&br->lock);
89 if (p->state == BR_STATE_LISTENING) { 89 if (p->state == BR_STATE_LISTENING) {
90 p->state = BR_STATE_LEARNING; 90 p->state = BR_STATE_LEARNING;
@@ -131,7 +131,7 @@ static void br_hold_timer_expired(unsigned long arg)
131 struct net_bridge_port *p = (struct net_bridge_port *) arg; 131 struct net_bridge_port *p = (struct net_bridge_port *) arg;
132 132
133 br_debug(p->br, "port %u(%s) hold timer expired\n", 133 br_debug(p->br, "port %u(%s) hold timer expired\n",
134 (unsigned) p->port_no, p->dev->name); 134 (unsigned int) p->port_no, p->dev->name);
135 135
136 spin_lock(&p->br->lock); 136 spin_lock(&p->br->lock);
137 if (p->config_pending) 137 if (p->config_pending)
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index c236c0e43984..c5c059333eab 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -297,7 +297,7 @@ static ssize_t store_group_addr(struct device *d,
297 const char *buf, size_t len) 297 const char *buf, size_t len)
298{ 298{
299 struct net_bridge *br = to_bridge(d); 299 struct net_bridge *br = to_bridge(d);
300 unsigned new_addr[6]; 300 unsigned int new_addr[6];
301 int i; 301 int i;
302 302
303 if (!capable(CAP_NET_ADMIN)) 303 if (!capable(CAP_NET_ADMIN))
@@ -379,6 +379,23 @@ static ssize_t store_multicast_snooping(struct device *d,
379static DEVICE_ATTR(multicast_snooping, S_IRUGO | S_IWUSR, 379static DEVICE_ATTR(multicast_snooping, S_IRUGO | S_IWUSR,
380 show_multicast_snooping, store_multicast_snooping); 380 show_multicast_snooping, store_multicast_snooping);
381 381
382static ssize_t show_multicast_querier(struct device *d,
383 struct device_attribute *attr,
384 char *buf)
385{
386 struct net_bridge *br = to_bridge(d);
387 return sprintf(buf, "%d\n", br->multicast_querier);
388}
389
390static ssize_t store_multicast_querier(struct device *d,
391 struct device_attribute *attr,
392 const char *buf, size_t len)
393{
394 return store_bridge_parm(d, buf, len, br_multicast_set_querier);
395}
396static DEVICE_ATTR(multicast_querier, S_IRUGO | S_IWUSR,
397 show_multicast_querier, store_multicast_querier);
398
382static ssize_t show_hash_elasticity(struct device *d, 399static ssize_t show_hash_elasticity(struct device *d,
383 struct device_attribute *attr, char *buf) 400 struct device_attribute *attr, char *buf)
384{ 401{
@@ -702,6 +719,7 @@ static struct attribute *bridge_attrs[] = {
702#ifdef CONFIG_BRIDGE_IGMP_SNOOPING 719#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
703 &dev_attr_multicast_router.attr, 720 &dev_attr_multicast_router.attr,
704 &dev_attr_multicast_snooping.attr, 721 &dev_attr_multicast_snooping.attr,
722 &dev_attr_multicast_querier.attr,
705 &dev_attr_hash_elasticity.attr, 723 &dev_attr_hash_elasticity.attr,
706 &dev_attr_hash_max.attr, 724 &dev_attr_hash_max.attr,
707 &dev_attr_multicast_last_member_count.attr, 725 &dev_attr_multicast_last_member_count.attr,
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 5016fa57b623..0dccdb3c7d26 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -19,7 +19,7 @@
19#include <linux/uaccess.h> 19#include <linux/uaccess.h>
20#include <linux/debugfs.h> 20#include <linux/debugfs.h>
21#include <linux/caif/caif_socket.h> 21#include <linux/caif/caif_socket.h>
22#include <linux/atomic.h> 22#include <linux/pkt_sched.h>
23#include <net/sock.h> 23#include <net/sock.h>
24#include <net/tcp_states.h> 24#include <net/tcp_states.h>
25#include <net/caif/caif_layer.h> 25#include <net/caif/caif_layer.h>
@@ -130,7 +130,7 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
130 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 130 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
131 131
132 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 132 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
133 (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { 133 (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
134 if (net_ratelimit()) 134 if (net_ratelimit())
135 pr_debug("sending flow OFF (queue len = %d %d)\n", 135 pr_debug("sending flow OFF (queue len = %d %d)\n",
136 atomic_read(&cf_sk->sk.sk_rmem_alloc), 136 atomic_read(&cf_sk->sk.sk_rmem_alloc),
@@ -505,6 +505,7 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
505 505
506 pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb); 506 pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
507 memset(skb->cb, 0, sizeof(struct caif_payload_info)); 507 memset(skb->cb, 0, sizeof(struct caif_payload_info));
508 cfpkt_set_prio(pkt, cf_sk->sk.sk_priority);
508 509
509 if (cf_sk->layer.dn == NULL) { 510 if (cf_sk->layer.dn == NULL) {
510 kfree_skb(skb); 511 kfree_skb(skb);
@@ -1062,6 +1063,18 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
1062 /* Store the protocol */ 1063 /* Store the protocol */
1063 sk->sk_protocol = (unsigned char) protocol; 1064 sk->sk_protocol = (unsigned char) protocol;
1064 1065
1066 /* Initialize default priority for well-known cases */
1067 switch (protocol) {
1068 case CAIFPROTO_AT:
1069 sk->sk_priority = TC_PRIO_CONTROL;
1070 break;
1071 case CAIFPROTO_RFM:
1072 sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
1073 break;
1074 default:
1075 sk->sk_priority = TC_PRIO_BESTEFFORT;
1076 }
1077
1065 /* 1078 /*
1066 * Lock in order to try to stop someone from opening the socket 1079 * Lock in order to try to stop someone from opening the socket
1067 * too early. 1080 * too early.
@@ -1081,7 +1094,6 @@ static int caif_create(struct net *net, struct socket *sock, int protocol,
1081 set_rx_flow_on(cf_sk); 1094 set_rx_flow_on(cf_sk);
1082 1095
1083 /* Set default options on configuration */ 1096 /* Set default options on configuration */
1084 cf_sk->sk.sk_priority = CAIF_PRIO_NORMAL;
1085 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; 1097 cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
1086 cf_sk->conn_req.protocol = protocol; 1098 cf_sk->conn_req.protocol = protocol;
1087 release_sock(&cf_sk->sk); 1099 release_sock(&cf_sk->sk);
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 5cf52225692e..047cd0eec022 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -9,6 +9,7 @@
9#include <linux/stddef.h> 9#include <linux/stddef.h>
10#include <linux/spinlock.h> 10#include <linux/spinlock.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/pkt_sched.h>
12#include <net/caif/caif_layer.h> 13#include <net/caif/caif_layer.h>
13#include <net/caif/cfpkt.h> 14#include <net/caif/cfpkt.h>
14#include <net/caif/cfctrl.h> 15#include <net/caif/cfctrl.h>
@@ -189,6 +190,7 @@ void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
189 cfctrl->serv.dev_info.id = physlinkid; 190 cfctrl->serv.dev_info.id = physlinkid;
190 cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM); 191 cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM);
191 cfpkt_addbdy(pkt, physlinkid); 192 cfpkt_addbdy(pkt, physlinkid);
193 cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
192 dn->transmit(dn, pkt); 194 dn->transmit(dn, pkt);
193} 195}
194 196
@@ -281,6 +283,7 @@ int cfctrl_linkup_request(struct cflayer *layer,
281 * might arrive with the newly allocated channel ID. 283 * might arrive with the newly allocated channel ID.
282 */ 284 */
283 cfpkt_info(pkt)->dev_info->id = param->phyid; 285 cfpkt_info(pkt)->dev_info->id = param->phyid;
286 cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
284 ret = 287 ret =
285 dn->transmit(dn, pkt); 288 dn->transmit(dn, pkt);
286 if (ret < 0) { 289 if (ret < 0) {
@@ -314,6 +317,7 @@ int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
314 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY); 317 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
315 cfpkt_addbdy(pkt, channelid); 318 cfpkt_addbdy(pkt, channelid);
316 init_info(cfpkt_info(pkt), cfctrl); 319 init_info(cfpkt_info(pkt), cfctrl);
320 cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
317 ret = 321 ret =
318 dn->transmit(dn, pkt); 322 dn->transmit(dn, pkt);
319#ifndef CAIF_NO_LOOP 323#ifndef CAIF_NO_LOOP
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index e335ba859b97..863dedd91bb6 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -381,6 +381,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
381 memcpy(skb2->data, split, len2nd); 381 memcpy(skb2->data, split, len2nd);
382 skb2->tail += len2nd; 382 skb2->tail += len2nd;
383 skb2->len += len2nd; 383 skb2->len += len2nd;
384 skb2->priority = skb->priority;
384 return skb_to_pkt(skb2); 385 return skb_to_pkt(skb2);
385} 386}
386 387
@@ -394,3 +395,9 @@ struct caif_payload_info *cfpkt_info(struct cfpkt *pkt)
394 return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb; 395 return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb;
395} 396}
396EXPORT_SYMBOL(cfpkt_info); 397EXPORT_SYMBOL(cfpkt_info);
398
399void cfpkt_set_prio(struct cfpkt *pkt, int prio)
400{
401 pkt_to_skb(pkt)->priority = prio;
402}
403EXPORT_SYMBOL(cfpkt_set_prio);
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index 4aa33d4496b6..dd485f6128e8 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -11,6 +11,7 @@
11#include <linux/errno.h> 11#include <linux/errno.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/pkt_sched.h>
14#include <net/caif/caif_layer.h> 15#include <net/caif/caif_layer.h>
15#include <net/caif/cfsrvl.h> 16#include <net/caif/cfsrvl.h>
16#include <net/caif/cfpkt.h> 17#include <net/caif/cfpkt.h>
@@ -120,6 +121,7 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
120 info->channel_id = service->layer.id; 121 info->channel_id = service->layer.id;
121 info->hdr_len = 1; 122 info->hdr_len = 1;
122 info->dev_info = &service->dev_info; 123 info->dev_info = &service->dev_info;
124 cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
123 return layr->dn->transmit(layr->dn, pkt); 125 return layr->dn->transmit(layr->dn, pkt);
124 } 126 }
125 case CAIF_MODEMCMD_FLOW_OFF_REQ: 127 case CAIF_MODEMCMD_FLOW_OFF_REQ:
@@ -140,6 +142,7 @@ static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
140 info->channel_id = service->layer.id; 142 info->channel_id = service->layer.id;
141 info->hdr_len = 1; 143 info->hdr_len = 1;
142 info->dev_info = &service->dev_info; 144 info->dev_info = &service->dev_info;
145 cfpkt_set_prio(pkt, TC_PRIO_CONTROL);
143 return layr->dn->transmit(layr->dn, pkt); 146 return layr->dn->transmit(layr->dn, pkt);
144 } 147 }
145 default: 148 default:
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index d09340e1523f..69771c04ba8f 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -424,14 +424,14 @@ static int ipcaif_fill_info(struct sk_buff *skb, const struct net_device *dev)
424 struct chnl_net *priv; 424 struct chnl_net *priv;
425 u8 loop; 425 u8 loop;
426 priv = netdev_priv(dev); 426 priv = netdev_priv(dev);
427 NLA_PUT_U32(skb, IFLA_CAIF_IPV4_CONNID, 427 if (nla_put_u32(skb, IFLA_CAIF_IPV4_CONNID,
428 priv->conn_req.sockaddr.u.dgm.connection_id); 428 priv->conn_req.sockaddr.u.dgm.connection_id) ||
429 NLA_PUT_U32(skb, IFLA_CAIF_IPV6_CONNID, 429 nla_put_u32(skb, IFLA_CAIF_IPV6_CONNID,
430 priv->conn_req.sockaddr.u.dgm.connection_id); 430 priv->conn_req.sockaddr.u.dgm.connection_id))
431 goto nla_put_failure;
431 loop = priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP; 432 loop = priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP;
432 NLA_PUT_U8(skb, IFLA_CAIF_LOOPBACK, loop); 433 if (nla_put_u8(skb, IFLA_CAIF_LOOPBACK, loop))
433 434 goto nla_put_failure;
434
435 return 0; 435 return 0;
436nla_put_failure: 436nla_put_failure:
437 return -EMSGSIZE; 437 return -EMSGSIZE;
diff --git a/net/can/gw.c b/net/can/gw.c
index 3d79b127881e..b41acf25668f 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -66,7 +66,7 @@ MODULE_LICENSE("Dual BSD/GPL");
66MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>"); 66MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
67MODULE_ALIAS("can-gw"); 67MODULE_ALIAS("can-gw");
68 68
69HLIST_HEAD(cgw_list); 69static HLIST_HEAD(cgw_list);
70static struct notifier_block notifier; 70static struct notifier_block notifier;
71 71
72static struct kmem_cache *cgw_cache __read_mostly; 72static struct kmem_cache *cgw_cache __read_mostly;
diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h
index e02da7a5c5a1..f459e93b774f 100644
--- a/net/ceph/auth_x.h
+++ b/net/ceph/auth_x.h
@@ -13,7 +13,7 @@
13 */ 13 */
14struct ceph_x_ticket_handler { 14struct ceph_x_ticket_handler {
15 struct rb_node node; 15 struct rb_node node;
16 unsigned service; 16 unsigned int service;
17 17
18 struct ceph_crypto_key session_key; 18 struct ceph_crypto_key session_key;
19 struct ceph_timespec validity; 19 struct ceph_timespec validity;
@@ -27,7 +27,7 @@ struct ceph_x_ticket_handler {
27 27
28struct ceph_x_authorizer { 28struct ceph_x_authorizer {
29 struct ceph_buffer *buf; 29 struct ceph_buffer *buf;
30 unsigned service; 30 unsigned int service;
31 u64 nonce; 31 u64 nonce;
32 char reply_buf[128]; /* big enough for encrypted blob */ 32 char reply_buf[128]; /* big enough for encrypted blob */
33}; 33};
@@ -38,7 +38,7 @@ struct ceph_x_info {
38 bool starting; 38 bool starting;
39 u64 server_challenge; 39 u64 server_challenge;
40 40
41 unsigned have_keys; 41 unsigned int have_keys;
42 struct rb_root ticket_handlers; 42 struct rb_root ticket_handlers;
43 43
44 struct ceph_x_authorizer auth_authorizer; 44 struct ceph_x_authorizer auth_authorizer;
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index cc913193d992..a776f751edbf 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -441,8 +441,8 @@ EXPORT_SYMBOL(ceph_client_id);
441 * create a fresh client instance 441 * create a fresh client instance
442 */ 442 */
443struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private, 443struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
444 unsigned supported_features, 444 unsigned int supported_features,
445 unsigned required_features) 445 unsigned int required_features)
446{ 446{
447 struct ceph_client *client; 447 struct ceph_client *client;
448 struct ceph_entity_addr *myaddr = NULL; 448 struct ceph_entity_addr *myaddr = NULL;
diff --git a/net/ceph/ceph_hash.c b/net/ceph/ceph_hash.c
index 0a1b53bce76d..67bb1f11e613 100644
--- a/net/ceph/ceph_hash.c
+++ b/net/ceph/ceph_hash.c
@@ -20,7 +20,7 @@
20 c = c - a; c = c - b; c = c ^ (b >> 15); \ 20 c = c - a; c = c - b; c = c ^ (b >> 15); \
21 } while (0) 21 } while (0)
22 22
23unsigned ceph_str_hash_rjenkins(const char *str, unsigned length) 23unsigned int ceph_str_hash_rjenkins(const char *str, unsigned int length)
24{ 24{
25 const unsigned char *k = (const unsigned char *)str; 25 const unsigned char *k = (const unsigned char *)str;
26 __u32 a, b, c; /* the internal state */ 26 __u32 a, b, c; /* the internal state */
@@ -81,7 +81,7 @@ unsigned ceph_str_hash_rjenkins(const char *str, unsigned length)
81/* 81/*
82 * linux dcache hash 82 * linux dcache hash
83 */ 83 */
84unsigned ceph_str_hash_linux(const char *str, unsigned length) 84unsigned int ceph_str_hash_linux(const char *str, unsigned int length)
85{ 85{
86 unsigned long hash = 0; 86 unsigned long hash = 0;
87 unsigned char c; 87 unsigned char c;
@@ -94,7 +94,7 @@ unsigned ceph_str_hash_linux(const char *str, unsigned length)
94} 94}
95 95
96 96
97unsigned ceph_str_hash(int type, const char *s, unsigned len) 97unsigned int ceph_str_hash(int type, const char *s, unsigned int len)
98{ 98{
99 switch (type) { 99 switch (type) {
100 case CEPH_STR_HASH_LINUX: 100 case CEPH_STR_HASH_LINUX:
diff --git a/net/ceph/crush/mapper.c b/net/ceph/crush/mapper.c
index b79747c4b645..363f8f7e6c3c 100644
--- a/net/ceph/crush/mapper.c
+++ b/net/ceph/crush/mapper.c
@@ -20,6 +20,7 @@
20 20
21#include <linux/crush/crush.h> 21#include <linux/crush/crush.h>
22#include <linux/crush/hash.h> 22#include <linux/crush/hash.h>
23#include <linux/crush/mapper.h>
23 24
24/* 25/*
25 * Implement the core CRUSH mapping algorithm. 26 * Implement the core CRUSH mapping algorithm.
@@ -68,8 +69,8 @@ int crush_find_rule(struct crush_map *map, int ruleset, int type, int size)
68static int bucket_perm_choose(struct crush_bucket *bucket, 69static int bucket_perm_choose(struct crush_bucket *bucket,
69 int x, int r) 70 int x, int r)
70{ 71{
71 unsigned pr = r % bucket->size; 72 unsigned int pr = r % bucket->size;
72 unsigned i, s; 73 unsigned int i, s;
73 74
74 /* start a new permutation if @x has changed */ 75 /* start a new permutation if @x has changed */
75 if (bucket->perm_x != x || bucket->perm_n == 0) { 76 if (bucket->perm_x != x || bucket->perm_n == 0) {
@@ -100,13 +101,13 @@ static int bucket_perm_choose(struct crush_bucket *bucket,
100 for (i = 0; i < bucket->perm_n; i++) 101 for (i = 0; i < bucket->perm_n; i++)
101 dprintk(" perm_choose have %d: %d\n", i, bucket->perm[i]); 102 dprintk(" perm_choose have %d: %d\n", i, bucket->perm[i]);
102 while (bucket->perm_n <= pr) { 103 while (bucket->perm_n <= pr) {
103 unsigned p = bucket->perm_n; 104 unsigned int p = bucket->perm_n;
104 /* no point in swapping the final entry */ 105 /* no point in swapping the final entry */
105 if (p < bucket->size - 1) { 106 if (p < bucket->size - 1) {
106 i = crush_hash32_3(bucket->hash, x, bucket->id, p) % 107 i = crush_hash32_3(bucket->hash, x, bucket->id, p) %
107 (bucket->size - p); 108 (bucket->size - p);
108 if (i) { 109 if (i) {
109 unsigned t = bucket->perm[p + i]; 110 unsigned int t = bucket->perm[p + i];
110 bucket->perm[p + i] = bucket->perm[p]; 111 bucket->perm[p + i] = bucket->perm[p];
111 bucket->perm[p] = t; 112 bucket->perm[p] = t;
112 } 113 }
diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c
index 27d4ea315d12..54b531a01121 100644
--- a/net/ceph/debugfs.c
+++ b/net/ceph/debugfs.c
@@ -94,9 +94,9 @@ static int monc_show(struct seq_file *s, void *p)
94 mutex_lock(&monc->mutex); 94 mutex_lock(&monc->mutex);
95 95
96 if (monc->have_mdsmap) 96 if (monc->have_mdsmap)
97 seq_printf(s, "have mdsmap %u\n", (unsigned)monc->have_mdsmap); 97 seq_printf(s, "have mdsmap %u\n", (unsigned int)monc->have_mdsmap);
98 if (monc->have_osdmap) 98 if (monc->have_osdmap)
99 seq_printf(s, "have osdmap %u\n", (unsigned)monc->have_osdmap); 99 seq_printf(s, "have osdmap %u\n", (unsigned int)monc->have_osdmap);
100 if (monc->want_next_osdmap) 100 if (monc->want_next_osdmap)
101 seq_printf(s, "want next osdmap\n"); 101 seq_printf(s, "want next osdmap\n");
102 102
@@ -146,7 +146,7 @@ static int osdc_show(struct seq_file *s, void *pp)
146 146
147 if (req->r_reassert_version.epoch) 147 if (req->r_reassert_version.epoch)
148 seq_printf(s, "\t%u'%llu", 148 seq_printf(s, "\t%u'%llu",
149 (unsigned)le32_to_cpu(req->r_reassert_version.epoch), 149 (unsigned int)le32_to_cpu(req->r_reassert_version.epoch),
150 le64_to_cpu(req->r_reassert_version.version)); 150 le64_to_cpu(req->r_reassert_version.version));
151 else 151 else
152 seq_printf(s, "\t"); 152 seq_printf(s, "\t");
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index f0993af2ae4d..36fa6bf68498 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -699,7 +699,7 @@ static int prepare_write_connect(struct ceph_messenger *msgr,
699 struct ceph_connection *con, 699 struct ceph_connection *con,
700 int include_banner) 700 int include_banner)
701{ 701{
702 unsigned global_seq = get_global_seq(con->msgr, 0); 702 unsigned int global_seq = get_global_seq(con->msgr, 0);
703 int proto; 703 int proto;
704 704
705 switch (con->peer_name.type) { 705 switch (con->peer_name.type) {
@@ -816,7 +816,7 @@ static void iter_bio_next(struct bio **bio_iter, int *seg)
816static int write_partial_msg_pages(struct ceph_connection *con) 816static int write_partial_msg_pages(struct ceph_connection *con)
817{ 817{
818 struct ceph_msg *msg = con->out_msg; 818 struct ceph_msg *msg = con->out_msg;
819 unsigned data_len = le32_to_cpu(msg->hdr.data_len); 819 unsigned int data_len = le32_to_cpu(msg->hdr.data_len);
820 size_t len; 820 size_t len;
821 bool do_datacrc = !con->msgr->nocrc; 821 bool do_datacrc = !con->msgr->nocrc;
822 int ret; 822 int ret;
@@ -1554,7 +1554,7 @@ static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
1554 1554
1555static int read_partial_message_pages(struct ceph_connection *con, 1555static int read_partial_message_pages(struct ceph_connection *con,
1556 struct page **pages, 1556 struct page **pages,
1557 unsigned data_len, bool do_datacrc) 1557 unsigned int data_len, bool do_datacrc)
1558{ 1558{
1559 void *p; 1559 void *p;
1560 int ret; 1560 int ret;
@@ -1587,7 +1587,7 @@ static int read_partial_message_pages(struct ceph_connection *con,
1587#ifdef CONFIG_BLOCK 1587#ifdef CONFIG_BLOCK
1588static int read_partial_message_bio(struct ceph_connection *con, 1588static int read_partial_message_bio(struct ceph_connection *con,
1589 struct bio **bio_iter, int *bio_seg, 1589 struct bio **bio_iter, int *bio_seg,
1590 unsigned data_len, bool do_datacrc) 1590 unsigned int data_len, bool do_datacrc)
1591{ 1591{
1592 struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg); 1592 struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg);
1593 void *p; 1593 void *p;
@@ -1629,7 +1629,7 @@ static int read_partial_message(struct ceph_connection *con)
1629 struct ceph_msg *m = con->in_msg; 1629 struct ceph_msg *m = con->in_msg;
1630 int ret; 1630 int ret;
1631 int to, left; 1631 int to, left;
1632 unsigned front_len, middle_len, data_len; 1632 unsigned int front_len, middle_len, data_len;
1633 bool do_datacrc = !con->msgr->nocrc; 1633 bool do_datacrc = !con->msgr->nocrc;
1634 int skip; 1634 int skip;
1635 u64 seq; 1635 u64 seq;
@@ -2345,9 +2345,9 @@ void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
2345{ 2345{
2346 mutex_lock(&con->mutex); 2346 mutex_lock(&con->mutex);
2347 if (con->in_msg && con->in_msg == msg) { 2347 if (con->in_msg && con->in_msg == msg) {
2348 unsigned front_len = le32_to_cpu(con->in_hdr.front_len); 2348 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len);
2349 unsigned middle_len = le32_to_cpu(con->in_hdr.middle_len); 2349 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len);
2350 unsigned data_len = le32_to_cpu(con->in_hdr.data_len); 2350 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len);
2351 2351
2352 /* skip rest of message */ 2352 /* skip rest of message */
2353 dout("con_revoke_pages %p msg %p revoked\n", con, msg); 2353 dout("con_revoke_pages %p msg %p revoked\n", con, msg);
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 1845cde26227..10d6008d31f2 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -168,7 +168,7 @@ static bool __sub_expired(struct ceph_mon_client *monc)
168 */ 168 */
169static void __schedule_delayed(struct ceph_mon_client *monc) 169static void __schedule_delayed(struct ceph_mon_client *monc)
170{ 170{
171 unsigned delay; 171 unsigned int delay;
172 172
173 if (monc->cur_mon < 0 || __sub_expired(monc)) 173 if (monc->cur_mon < 0 || __sub_expired(monc))
174 delay = 10 * HZ; 174 delay = 10 * HZ;
@@ -184,7 +184,7 @@ static void __schedule_delayed(struct ceph_mon_client *monc)
184static void __send_subscribe(struct ceph_mon_client *monc) 184static void __send_subscribe(struct ceph_mon_client *monc)
185{ 185{
186 dout("__send_subscribe sub_sent=%u exp=%u want_osd=%d\n", 186 dout("__send_subscribe sub_sent=%u exp=%u want_osd=%d\n",
187 (unsigned)monc->sub_sent, __sub_expired(monc), 187 (unsigned int)monc->sub_sent, __sub_expired(monc),
188 monc->want_next_osdmap); 188 monc->want_next_osdmap);
189 if ((__sub_expired(monc) && !monc->sub_sent) || 189 if ((__sub_expired(monc) && !monc->sub_sent) ||
190 monc->want_next_osdmap == 1) { 190 monc->want_next_osdmap == 1) {
@@ -201,7 +201,7 @@ static void __send_subscribe(struct ceph_mon_client *monc)
201 201
202 if (monc->want_next_osdmap) { 202 if (monc->want_next_osdmap) {
203 dout("__send_subscribe to 'osdmap' %u\n", 203 dout("__send_subscribe to 'osdmap' %u\n",
204 (unsigned)monc->have_osdmap); 204 (unsigned int)monc->have_osdmap);
205 ceph_encode_string(&p, end, "osdmap", 6); 205 ceph_encode_string(&p, end, "osdmap", 6);
206 i = p; 206 i = p;
207 i->have = cpu_to_le64(monc->have_osdmap); 207 i->have = cpu_to_le64(monc->have_osdmap);
@@ -211,7 +211,7 @@ static void __send_subscribe(struct ceph_mon_client *monc)
211 } 211 }
212 if (monc->want_mdsmap) { 212 if (monc->want_mdsmap) {
213 dout("__send_subscribe to 'mdsmap' %u+\n", 213 dout("__send_subscribe to 'mdsmap' %u+\n",
214 (unsigned)monc->have_mdsmap); 214 (unsigned int)monc->have_mdsmap);
215 ceph_encode_string(&p, end, "mdsmap", 6); 215 ceph_encode_string(&p, end, "mdsmap", 6);
216 i = p; 216 i = p;
217 i->have = cpu_to_le64(monc->have_mdsmap); 217 i->have = cpu_to_le64(monc->have_mdsmap);
@@ -236,7 +236,7 @@ static void __send_subscribe(struct ceph_mon_client *monc)
236static void handle_subscribe_ack(struct ceph_mon_client *monc, 236static void handle_subscribe_ack(struct ceph_mon_client *monc,
237 struct ceph_msg *msg) 237 struct ceph_msg *msg)
238{ 238{
239 unsigned seconds; 239 unsigned int seconds;
240 struct ceph_mon_subscribe_ack *h = msg->front.iov_base; 240 struct ceph_mon_subscribe_ack *h = msg->front.iov_base;
241 241
242 if (msg->front.iov_len < sizeof(*h)) 242 if (msg->front.iov_len < sizeof(*h))
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 5e254055c910..1b0ef3c4d393 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1214,7 +1214,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1214 } 1214 }
1215 1215
1216 if (!req->r_got_reply) { 1216 if (!req->r_got_reply) {
1217 unsigned bytes; 1217 unsigned int bytes;
1218 1218
1219 req->r_result = le32_to_cpu(rhead->result); 1219 req->r_result = le32_to_cpu(rhead->result);
1220 bytes = le32_to_cpu(msg->hdr.data_len); 1220 bytes = le32_to_cpu(msg->hdr.data_len);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 29ad46ec9dcf..56e561a69004 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -38,7 +38,7 @@ done:
38 38
39/* maps */ 39/* maps */
40 40
41static int calc_bits_of(unsigned t) 41static int calc_bits_of(unsigned int t)
42{ 42{
43 int b = 0; 43 int b = 0;
44 while (t) { 44 while (t) {
@@ -154,7 +154,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
154 magic = ceph_decode_32(p); 154 magic = ceph_decode_32(p);
155 if (magic != CRUSH_MAGIC) { 155 if (magic != CRUSH_MAGIC) {
156 pr_err("crush_decode magic %x != current %x\n", 156 pr_err("crush_decode magic %x != current %x\n",
157 (unsigned)magic, (unsigned)CRUSH_MAGIC); 157 (unsigned int)magic, (unsigned int)CRUSH_MAGIC);
158 goto bad; 158 goto bad;
159 } 159 }
160 c->max_buckets = ceph_decode_32(p); 160 c->max_buckets = ceph_decode_32(p);
@@ -460,7 +460,7 @@ static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
460 460
461static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) 461static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
462{ 462{
463 unsigned n, m; 463 unsigned int n, m;
464 464
465 ceph_decode_copy(p, &pi->v, sizeof(pi->v)); 465 ceph_decode_copy(p, &pi->v, sizeof(pi->v));
466 calc_pg_masks(pi); 466 calc_pg_masks(pi);
@@ -970,7 +970,7 @@ void ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
970 objsetno = stripeno / su_per_object; 970 objsetno = stripeno / su_per_object;
971 971
972 *ono = objsetno * sc + stripepos; 972 *ono = objsetno * sc + stripepos;
973 dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned)*ono); 973 dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned int)*ono);
974 974
975 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */ 975 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */
976 t = off; 976 t = off;
@@ -998,12 +998,12 @@ int ceph_calc_object_layout(struct ceph_object_layout *ol,
998 struct ceph_file_layout *fl, 998 struct ceph_file_layout *fl,
999 struct ceph_osdmap *osdmap) 999 struct ceph_osdmap *osdmap)
1000{ 1000{
1001 unsigned num, num_mask; 1001 unsigned int num, num_mask;
1002 struct ceph_pg pgid; 1002 struct ceph_pg pgid;
1003 s32 preferred = (s32)le32_to_cpu(fl->fl_pg_preferred); 1003 s32 preferred = (s32)le32_to_cpu(fl->fl_pg_preferred);
1004 int poolid = le32_to_cpu(fl->fl_pg_pool); 1004 int poolid = le32_to_cpu(fl->fl_pg_pool);
1005 struct ceph_pg_pool_info *pool; 1005 struct ceph_pg_pool_info *pool;
1006 unsigned ps; 1006 unsigned int ps;
1007 1007
1008 BUG_ON(!osdmap); 1008 BUG_ON(!osdmap);
1009 1009
@@ -1045,7 +1045,7 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1045 struct ceph_pg_mapping *pg; 1045 struct ceph_pg_mapping *pg;
1046 struct ceph_pg_pool_info *pool; 1046 struct ceph_pg_pool_info *pool;
1047 int ruleno; 1047 int ruleno;
1048 unsigned poolid, ps, pps, t; 1048 unsigned int poolid, ps, pps, t;
1049 int preferred; 1049 int preferred;
1050 1050
1051 poolid = le32_to_cpu(pgid.pool); 1051 poolid = le32_to_cpu(pgid.pool);
diff --git a/net/compat.c b/net/compat.c
index e055708b8ec9..e240441a2317 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -741,13 +741,13 @@ static unsigned char nas[21] = {
741}; 741};
742#undef AL 742#undef AL
743 743
744asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags) 744asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags)
745{ 745{
746 return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); 746 return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
747} 747}
748 748
749asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, 749asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg,
750 unsigned vlen, unsigned int flags) 750 unsigned int vlen, unsigned int flags)
751{ 751{
752 return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, 752 return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
753 flags | MSG_CMSG_COMPAT); 753 flags | MSG_CMSG_COMPAT);
@@ -758,20 +758,20 @@ asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, uns
758 return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); 758 return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
759} 759}
760 760
761asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned flags) 761asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned int flags)
762{ 762{
763 return sys_recv(fd, buf, len, flags | MSG_CMSG_COMPAT); 763 return sys_recv(fd, buf, len, flags | MSG_CMSG_COMPAT);
764} 764}
765 765
766asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len, 766asmlinkage long compat_sys_recvfrom(int fd, void __user *buf, size_t len,
767 unsigned flags, struct sockaddr __user *addr, 767 unsigned int flags, struct sockaddr __user *addr,
768 int __user *addrlen) 768 int __user *addrlen)
769{ 769{
770 return sys_recvfrom(fd, buf, len, flags | MSG_CMSG_COMPAT, addr, addrlen); 770 return sys_recvfrom(fd, buf, len, flags | MSG_CMSG_COMPAT, addr, addrlen);
771} 771}
772 772
773asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, 773asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg,
774 unsigned vlen, unsigned int flags, 774 unsigned int vlen, unsigned int flags,
775 struct compat_timespec __user *timeout) 775 struct compat_timespec __user *timeout)
776{ 776{
777 int datagrams; 777 int datagrams;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index e4fbfd6e2bd4..ae6acf6a3dea 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -65,7 +65,7 @@ static inline int connection_based(struct sock *sk)
65 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM; 65 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
66} 66}
67 67
68static int receiver_wake_function(wait_queue_t *wait, unsigned mode, int sync, 68static int receiver_wake_function(wait_queue_t *wait, unsigned int mode, int sync,
69 void *key) 69 void *key)
70{ 70{
71 unsigned long bits = (unsigned long)key; 71 unsigned long bits = (unsigned long)key;
@@ -158,7 +158,7 @@ out_noerr:
158 * quite explicitly by POSIX 1003.1g, don't change them without having 158 * quite explicitly by POSIX 1003.1g, don't change them without having
159 * the standard around please. 159 * the standard around please.
160 */ 160 */
161struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, 161struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
162 int *peeked, int *off, int *err) 162 int *peeked, int *off, int *err)
163{ 163{
164 struct sk_buff *skb; 164 struct sk_buff *skb;
@@ -216,7 +216,7 @@ no_packet:
216} 216}
217EXPORT_SYMBOL(__skb_recv_datagram); 217EXPORT_SYMBOL(__skb_recv_datagram);
218 218
219struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, 219struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags,
220 int noblock, int *err) 220 int noblock, int *err)
221{ 221{
222 int peeked, off = 0; 222 int peeked, off = 0;
diff --git a/net/core/dev.c b/net/core/dev.c
index 9bb8f87c4cda..a2be59fe6ab8 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -208,7 +208,8 @@ static inline void dev_base_seq_inc(struct net *net)
208 208
209static inline struct hlist_head *dev_name_hash(struct net *net, const char *name) 209static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
210{ 210{
211 unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ)); 211 unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
212
212 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)]; 213 return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
213} 214}
214 215
@@ -3541,10 +3542,16 @@ gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
3541 break; 3542 break;
3542 3543
3543 case GRO_DROP: 3544 case GRO_DROP:
3544 case GRO_MERGED_FREE:
3545 kfree_skb(skb); 3545 kfree_skb(skb);
3546 break; 3546 break;
3547 3547
3548 case GRO_MERGED_FREE:
3549 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
3550 kmem_cache_free(skbuff_head_cache, skb);
3551 else
3552 __kfree_skb(skb);
3553 break;
3554
3548 case GRO_HELD: 3555 case GRO_HELD:
3549 case GRO_MERGED: 3556 case GRO_MERGED:
3550 break; 3557 break;
@@ -4618,9 +4625,9 @@ void dev_set_rx_mode(struct net_device *dev)
4618 * 4625 *
4619 * Get the combination of flag bits exported through APIs to userspace. 4626 * Get the combination of flag bits exported through APIs to userspace.
4620 */ 4627 */
4621unsigned dev_get_flags(const struct net_device *dev) 4628unsigned int dev_get_flags(const struct net_device *dev)
4622{ 4629{
4623 unsigned flags; 4630 unsigned int flags;
4624 4631
4625 flags = (dev->flags & ~(IFF_PROMISC | 4632 flags = (dev->flags & ~(IFF_PROMISC |
4626 IFF_ALLMULTI | 4633 IFF_ALLMULTI |
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 626698f0db8b..c4cc2bc49f06 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -21,12 +21,35 @@
21 * General list handling functions 21 * General list handling functions
22 */ 22 */
23 23
24static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,
25 unsigned char *addr, int addr_len,
26 unsigned char addr_type, bool global)
27{
28 struct netdev_hw_addr *ha;
29 int alloc_size;
30
31 alloc_size = sizeof(*ha);
32 if (alloc_size < L1_CACHE_BYTES)
33 alloc_size = L1_CACHE_BYTES;
34 ha = kmalloc(alloc_size, GFP_ATOMIC);
35 if (!ha)
36 return -ENOMEM;
37 memcpy(ha->addr, addr, addr_len);
38 ha->type = addr_type;
39 ha->refcount = 1;
40 ha->global_use = global;
41 ha->synced = false;
42 list_add_tail_rcu(&ha->list, &list->list);
43 list->count++;
44
45 return 0;
46}
47
24static int __hw_addr_add_ex(struct netdev_hw_addr_list *list, 48static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
25 unsigned char *addr, int addr_len, 49 unsigned char *addr, int addr_len,
26 unsigned char addr_type, bool global) 50 unsigned char addr_type, bool global)
27{ 51{
28 struct netdev_hw_addr *ha; 52 struct netdev_hw_addr *ha;
29 int alloc_size;
30 53
31 if (addr_len > MAX_ADDR_LEN) 54 if (addr_len > MAX_ADDR_LEN)
32 return -EINVAL; 55 return -EINVAL;
@@ -46,21 +69,7 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
46 } 69 }
47 } 70 }
48 71
49 72 return __hw_addr_create_ex(list, addr, addr_len, addr_type, global);
50 alloc_size = sizeof(*ha);
51 if (alloc_size < L1_CACHE_BYTES)
52 alloc_size = L1_CACHE_BYTES;
53 ha = kmalloc(alloc_size, GFP_ATOMIC);
54 if (!ha)
55 return -ENOMEM;
56 memcpy(ha->addr, addr, addr_len);
57 ha->type = addr_type;
58 ha->refcount = 1;
59 ha->global_use = global;
60 ha->synced = false;
61 list_add_tail_rcu(&ha->list, &list->list);
62 list->count++;
63 return 0;
64} 73}
65 74
66static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr, 75static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
@@ -377,6 +386,34 @@ EXPORT_SYMBOL(dev_addr_del_multiple);
377 */ 386 */
378 387
379/** 388/**
389 * dev_uc_add_excl - Add a global secondary unicast address
390 * @dev: device
391 * @addr: address to add
392 */
393int dev_uc_add_excl(struct net_device *dev, unsigned char *addr)
394{
395 struct netdev_hw_addr *ha;
396 int err;
397
398 netif_addr_lock_bh(dev);
399 list_for_each_entry(ha, &dev->uc.list, list) {
400 if (!memcmp(ha->addr, addr, dev->addr_len) &&
401 ha->type == NETDEV_HW_ADDR_T_UNICAST) {
402 err = -EEXIST;
403 goto out;
404 }
405 }
406 err = __hw_addr_create_ex(&dev->uc, addr, dev->addr_len,
407 NETDEV_HW_ADDR_T_UNICAST, true);
408 if (!err)
409 __dev_set_rx_mode(dev);
410out:
411 netif_addr_unlock_bh(dev);
412 return err;
413}
414EXPORT_SYMBOL(dev_uc_add_excl);
415
416/**
380 * dev_uc_add - Add a secondary unicast address 417 * dev_uc_add - Add a secondary unicast address
381 * @dev: device 418 * @dev: device
382 * @addr: address to add 419 * @addr: address to add
@@ -501,6 +538,34 @@ EXPORT_SYMBOL(dev_uc_init);
501 * Multicast list handling functions 538 * Multicast list handling functions
502 */ 539 */
503 540
541/**
542 * dev_mc_add_excl - Add a global secondary multicast address
543 * @dev: device
544 * @addr: address to add
545 */
546int dev_mc_add_excl(struct net_device *dev, unsigned char *addr)
547{
548 struct netdev_hw_addr *ha;
549 int err;
550
551 netif_addr_lock_bh(dev);
552 list_for_each_entry(ha, &dev->mc.list, list) {
553 if (!memcmp(ha->addr, addr, dev->addr_len) &&
554 ha->type == NETDEV_HW_ADDR_T_MULTICAST) {
555 err = -EEXIST;
556 goto out;
557 }
558 }
559 err = __hw_addr_create_ex(&dev->mc, addr, dev->addr_len,
560 NETDEV_HW_ADDR_T_MULTICAST, true);
561 if (!err)
562 __dev_set_rx_mode(dev);
563out:
564 netif_addr_unlock_bh(dev);
565 return err;
566}
567EXPORT_SYMBOL(dev_mc_add_excl);
568
504static int __dev_mc_add(struct net_device *dev, unsigned char *addr, 569static int __dev_mc_add(struct net_device *dev, unsigned char *addr,
505 bool global) 570 bool global)
506{ 571{
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 6d6d7d25caaa..beacdd93cd8f 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -17,6 +17,8 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/ethtool.h> 18#include <linux/ethtool.h>
19#include <linux/netdevice.h> 19#include <linux/netdevice.h>
20#include <linux/net_tstamp.h>
21#include <linux/phy.h>
20#include <linux/bitops.h> 22#include <linux/bitops.h>
21#include <linux/uaccess.h> 23#include <linux/uaccess.h>
22#include <linux/vmalloc.h> 24#include <linux/vmalloc.h>
@@ -36,6 +38,17 @@ u32 ethtool_op_get_link(struct net_device *dev)
36} 38}
37EXPORT_SYMBOL(ethtool_op_get_link); 39EXPORT_SYMBOL(ethtool_op_get_link);
38 40
41int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
42{
43 info->so_timestamping =
44 SOF_TIMESTAMPING_TX_SOFTWARE |
45 SOF_TIMESTAMPING_RX_SOFTWARE |
46 SOF_TIMESTAMPING_SOFTWARE;
47 info->phc_index = -1;
48 return 0;
49}
50EXPORT_SYMBOL(ethtool_op_get_ts_info);
51
39/* Handlers for each ethtool command */ 52/* Handlers for each ethtool command */
40 53
41#define ETHTOOL_DEV_FEATURE_WORDS ((NETDEV_FEATURE_COUNT + 31) / 32) 54#define ETHTOOL_DEV_FEATURE_WORDS ((NETDEV_FEATURE_COUNT + 31) / 32)
@@ -1278,6 +1291,40 @@ out:
1278 return ret; 1291 return ret;
1279} 1292}
1280 1293
1294static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr)
1295{
1296 int err = 0;
1297 struct ethtool_ts_info info;
1298 const struct ethtool_ops *ops = dev->ethtool_ops;
1299 struct phy_device *phydev = dev->phydev;
1300
1301 memset(&info, 0, sizeof(info));
1302 info.cmd = ETHTOOL_GET_TS_INFO;
1303
1304 if (phydev && phydev->drv && phydev->drv->ts_info) {
1305
1306 err = phydev->drv->ts_info(phydev, &info);
1307
1308 } else if (dev->ethtool_ops && dev->ethtool_ops->get_ts_info) {
1309
1310 err = ops->get_ts_info(dev, &info);
1311
1312 } else {
1313 info.so_timestamping =
1314 SOF_TIMESTAMPING_RX_SOFTWARE |
1315 SOF_TIMESTAMPING_SOFTWARE;
1316 info.phc_index = -1;
1317 }
1318
1319 if (err)
1320 return err;
1321
1322 if (copy_to_user(useraddr, &info, sizeof(info)))
1323 err = -EFAULT;
1324
1325 return err;
1326}
1327
1281/* The main entry point in this file. Called from net/core/dev.c */ 1328/* The main entry point in this file. Called from net/core/dev.c */
1282 1329
1283int dev_ethtool(struct net *net, struct ifreq *ifr) 1330int dev_ethtool(struct net *net, struct ifreq *ifr)
@@ -1295,11 +1342,13 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1295 return -EFAULT; 1342 return -EFAULT;
1296 1343
1297 if (!dev->ethtool_ops) { 1344 if (!dev->ethtool_ops) {
1298 /* ETHTOOL_GDRVINFO does not require any driver support. 1345 /* A few commands do not require any driver support,
1299 * It is also unprivileged and does not change anything, 1346 * are unprivileged, and do not change anything, so we
1300 * so we can take a shortcut to it. */ 1347 * can take a shortcut to them. */
1301 if (ethcmd == ETHTOOL_GDRVINFO) 1348 if (ethcmd == ETHTOOL_GDRVINFO)
1302 return ethtool_get_drvinfo(dev, useraddr); 1349 return ethtool_get_drvinfo(dev, useraddr);
1350 else if (ethcmd == ETHTOOL_GET_TS_INFO)
1351 return ethtool_get_ts_info(dev, useraddr);
1303 else 1352 else
1304 return -EOPNOTSUPP; 1353 return -EOPNOTSUPP;
1305 } 1354 }
@@ -1330,6 +1379,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1330 case ETHTOOL_GRXCLSRULE: 1379 case ETHTOOL_GRXCLSRULE:
1331 case ETHTOOL_GRXCLSRLALL: 1380 case ETHTOOL_GRXCLSRLALL:
1332 case ETHTOOL_GFEATURES: 1381 case ETHTOOL_GFEATURES:
1382 case ETHTOOL_GET_TS_INFO:
1333 break; 1383 break;
1334 default: 1384 default:
1335 if (!capable(CAP_NET_ADMIN)) 1385 if (!capable(CAP_NET_ADMIN))
@@ -1496,6 +1546,9 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1496 case ETHTOOL_GET_DUMP_DATA: 1546 case ETHTOOL_GET_DUMP_DATA:
1497 rc = ethtool_get_dump_data(dev, useraddr); 1547 rc = ethtool_get_dump_data(dev, useraddr);
1498 break; 1548 break;
1549 case ETHTOOL_GET_TS_INFO:
1550 rc = ethtool_get_ts_info(dev, useraddr);
1551 break;
1499 default: 1552 default:
1500 rc = -EOPNOTSUPP; 1553 rc = -EOPNOTSUPP;
1501 } 1554 }
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index c02e63c908da..72cceb79d0d4 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -542,7 +542,8 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
542 frh = nlmsg_data(nlh); 542 frh = nlmsg_data(nlh);
543 frh->family = ops->family; 543 frh->family = ops->family;
544 frh->table = rule->table; 544 frh->table = rule->table;
545 NLA_PUT_U32(skb, FRA_TABLE, rule->table); 545 if (nla_put_u32(skb, FRA_TABLE, rule->table))
546 goto nla_put_failure;
546 frh->res1 = 0; 547 frh->res1 = 0;
547 frh->res2 = 0; 548 frh->res2 = 0;
548 frh->action = rule->action; 549 frh->action = rule->action;
@@ -553,31 +554,28 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
553 frh->flags |= FIB_RULE_UNRESOLVED; 554 frh->flags |= FIB_RULE_UNRESOLVED;
554 555
555 if (rule->iifname[0]) { 556 if (rule->iifname[0]) {
556 NLA_PUT_STRING(skb, FRA_IIFNAME, rule->iifname); 557 if (nla_put_string(skb, FRA_IIFNAME, rule->iifname))
557 558 goto nla_put_failure;
558 if (rule->iifindex == -1) 559 if (rule->iifindex == -1)
559 frh->flags |= FIB_RULE_IIF_DETACHED; 560 frh->flags |= FIB_RULE_IIF_DETACHED;
560 } 561 }
561 562
562 if (rule->oifname[0]) { 563 if (rule->oifname[0]) {
563 NLA_PUT_STRING(skb, FRA_OIFNAME, rule->oifname); 564 if (nla_put_string(skb, FRA_OIFNAME, rule->oifname))
564 565 goto nla_put_failure;
565 if (rule->oifindex == -1) 566 if (rule->oifindex == -1)
566 frh->flags |= FIB_RULE_OIF_DETACHED; 567 frh->flags |= FIB_RULE_OIF_DETACHED;
567 } 568 }
568 569
569 if (rule->pref) 570 if ((rule->pref &&
570 NLA_PUT_U32(skb, FRA_PRIORITY, rule->pref); 571 nla_put_u32(skb, FRA_PRIORITY, rule->pref)) ||
571 572 (rule->mark &&
572 if (rule->mark) 573 nla_put_u32(skb, FRA_FWMARK, rule->mark)) ||
573 NLA_PUT_U32(skb, FRA_FWMARK, rule->mark); 574 ((rule->mark_mask || rule->mark) &&
574 575 nla_put_u32(skb, FRA_FWMASK, rule->mark_mask)) ||
575 if (rule->mark_mask || rule->mark) 576 (rule->target &&
576 NLA_PUT_U32(skb, FRA_FWMASK, rule->mark_mask); 577 nla_put_u32(skb, FRA_GOTO, rule->target)))
577 578 goto nla_put_failure;
578 if (rule->target)
579 NLA_PUT_U32(skb, FRA_GOTO, rule->target);
580
581 if (ops->fill(rule, skb, frh) < 0) 579 if (ops->fill(rule, skb, frh) < 0)
582 goto nla_put_failure; 580 goto nla_put_failure;
583 581
diff --git a/net/core/filter.c b/net/core/filter.c
index 6f755cca4520..47a5f055e7f3 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -317,6 +317,9 @@ load_b:
317 case BPF_S_ANC_CPU: 317 case BPF_S_ANC_CPU:
318 A = raw_smp_processor_id(); 318 A = raw_smp_processor_id();
319 continue; 319 continue;
320 case BPF_S_ANC_ALU_XOR_X:
321 A ^= X;
322 continue;
320 case BPF_S_ANC_NLATTR: { 323 case BPF_S_ANC_NLATTR: {
321 struct nlattr *nla; 324 struct nlattr *nla;
322 325
@@ -528,7 +531,7 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
528 * Compare this with conditional jumps below, 531 * Compare this with conditional jumps below,
529 * where offsets are limited. --ANK (981016) 532 * where offsets are limited. --ANK (981016)
530 */ 533 */
531 if (ftest->k >= (unsigned)(flen-pc-1)) 534 if (ftest->k >= (unsigned int)(flen-pc-1))
532 return -EINVAL; 535 return -EINVAL;
533 break; 536 break;
534 case BPF_S_JMP_JEQ_K: 537 case BPF_S_JMP_JEQ_K:
@@ -561,6 +564,7 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
561 ANCILLARY(HATYPE); 564 ANCILLARY(HATYPE);
562 ANCILLARY(RXHASH); 565 ANCILLARY(RXHASH);
563 ANCILLARY(CPU); 566 ANCILLARY(CPU);
567 ANCILLARY(ALU_XOR_X);
564 } 568 }
565 } 569 }
566 ftest->code = code; 570 ftest->code = code;
@@ -589,6 +593,67 @@ void sk_filter_release_rcu(struct rcu_head *rcu)
589} 593}
590EXPORT_SYMBOL(sk_filter_release_rcu); 594EXPORT_SYMBOL(sk_filter_release_rcu);
591 595
596static int __sk_prepare_filter(struct sk_filter *fp)
597{
598 int err;
599
600 fp->bpf_func = sk_run_filter;
601
602 err = sk_chk_filter(fp->insns, fp->len);
603 if (err)
604 return err;
605
606 bpf_jit_compile(fp);
607 return 0;
608}
609
610/**
611 * sk_unattached_filter_create - create an unattached filter
612 * @fprog: the filter program
613 * @sk: the socket to use
614 *
615 * Create a filter independent ofr any socket. We first run some
616 * sanity checks on it to make sure it does not explode on us later.
617 * If an error occurs or there is insufficient memory for the filter
618 * a negative errno code is returned. On success the return is zero.
619 */
620int sk_unattached_filter_create(struct sk_filter **pfp,
621 struct sock_fprog *fprog)
622{
623 struct sk_filter *fp;
624 unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
625 int err;
626
627 /* Make sure new filter is there and in the right amounts. */
628 if (fprog->filter == NULL)
629 return -EINVAL;
630
631 fp = kmalloc(fsize + sizeof(*fp), GFP_KERNEL);
632 if (!fp)
633 return -ENOMEM;
634 memcpy(fp->insns, fprog->filter, fsize);
635
636 atomic_set(&fp->refcnt, 1);
637 fp->len = fprog->len;
638
639 err = __sk_prepare_filter(fp);
640 if (err)
641 goto free_mem;
642
643 *pfp = fp;
644 return 0;
645free_mem:
646 kfree(fp);
647 return err;
648}
649EXPORT_SYMBOL_GPL(sk_unattached_filter_create);
650
651void sk_unattached_filter_destroy(struct sk_filter *fp)
652{
653 sk_filter_release(fp);
654}
655EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
656
592/** 657/**
593 * sk_attach_filter - attach a socket filter 658 * sk_attach_filter - attach a socket filter
594 * @fprog: the filter program 659 * @fprog: the filter program
@@ -619,16 +684,13 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
619 684
620 atomic_set(&fp->refcnt, 1); 685 atomic_set(&fp->refcnt, 1);
621 fp->len = fprog->len; 686 fp->len = fprog->len;
622 fp->bpf_func = sk_run_filter;
623 687
624 err = sk_chk_filter(fp->insns, fp->len); 688 err = __sk_prepare_filter(fp);
625 if (err) { 689 if (err) {
626 sk_filter_uncharge(sk, fp); 690 sk_filter_uncharge(sk, fp);
627 return err; 691 return err;
628 } 692 }
629 693
630 bpf_jit_compile(fp);
631
632 old_fp = rcu_dereference_protected(sk->sk_filter, 694 old_fp = rcu_dereference_protected(sk->sk_filter,
633 sock_owned_by_user(sk)); 695 sock_owned_by_user(sk));
634 rcu_assign_pointer(sk->sk_filter, fp); 696 rcu_assign_pointer(sk->sk_filter, fp);
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 0452eb27a272..ddedf211e588 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -27,7 +27,8 @@
27static inline int 27static inline int
28gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size) 28gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size)
29{ 29{
30 NLA_PUT(d->skb, type, size, buf); 30 if (nla_put(d->skb, type, size, buf))
31 goto nla_put_failure;
31 return 0; 32 return 0;
32 33
33nla_put_failure: 34nla_put_failure:
diff --git a/net/core/kmap_skb.h b/net/core/kmap_skb.h
deleted file mode 100644
index 52d0a4459041..000000000000
--- a/net/core/kmap_skb.h
+++ /dev/null
@@ -1,19 +0,0 @@
1#include <linux/highmem.h>
2
3static inline void *kmap_skb_frag(const skb_frag_t *frag)
4{
5#ifdef CONFIG_HIGHMEM
6 BUG_ON(in_irq());
7
8 local_bh_disable();
9#endif
10 return kmap_atomic(skb_frag_page(frag));
11}
12
13static inline void kunmap_skb_frag(void *vaddr)
14{
15 kunmap_atomic(vaddr);
16#ifdef CONFIG_HIGHMEM
17 local_bh_enable();
18#endif
19}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 0a68045782d1..fadaa819b854 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -890,7 +890,7 @@ static void neigh_timer_handler(unsigned long arg)
890{ 890{
891 unsigned long now, next; 891 unsigned long now, next;
892 struct neighbour *neigh = (struct neighbour *)arg; 892 struct neighbour *neigh = (struct neighbour *)arg;
893 unsigned state; 893 unsigned int state;
894 int notify = 0; 894 int notify = 0;
895 895
896 write_lock(&neigh->lock); 896 write_lock(&neigh->lock);
@@ -1500,7 +1500,7 @@ static void neigh_parms_destroy(struct neigh_parms *parms)
1500 1500
1501static struct lock_class_key neigh_table_proxy_queue_class; 1501static struct lock_class_key neigh_table_proxy_queue_class;
1502 1502
1503void neigh_table_init_no_netlink(struct neigh_table *tbl) 1503static void neigh_table_init_no_netlink(struct neigh_table *tbl)
1504{ 1504{
1505 unsigned long now = jiffies; 1505 unsigned long now = jiffies;
1506 unsigned long phsize; 1506 unsigned long phsize;
@@ -1538,7 +1538,6 @@ void neigh_table_init_no_netlink(struct neigh_table *tbl)
1538 tbl->last_flush = now; 1538 tbl->last_flush = now;
1539 tbl->last_rand = now + tbl->parms.reachable_time * 20; 1539 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1540} 1540}
1541EXPORT_SYMBOL(neigh_table_init_no_netlink);
1542 1541
1543void neigh_table_init(struct neigh_table *tbl) 1542void neigh_table_init(struct neigh_table *tbl)
1544{ 1543{
@@ -1768,29 +1767,29 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1768 if (nest == NULL) 1767 if (nest == NULL)
1769 return -ENOBUFS; 1768 return -ENOBUFS;
1770 1769
1771 if (parms->dev) 1770 if ((parms->dev &&
1772 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex); 1771 nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
1773 1772 nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
1774 NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)); 1773 nla_put_u32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes) ||
1775 NLA_PUT_U32(skb, NDTPA_QUEUE_LENBYTES, parms->queue_len_bytes); 1774 /* approximative value for deprecated QUEUE_LEN (in packets) */
1776 /* approximative value for deprecated QUEUE_LEN (in packets) */ 1775 nla_put_u32(skb, NDTPA_QUEUE_LEN,
1777 NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, 1776 DIV_ROUND_UP(parms->queue_len_bytes,
1778 DIV_ROUND_UP(parms->queue_len_bytes, 1777 SKB_TRUESIZE(ETH_FRAME_LEN))) ||
1779 SKB_TRUESIZE(ETH_FRAME_LEN))); 1778 nla_put_u32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen) ||
1780 NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen); 1779 nla_put_u32(skb, NDTPA_APP_PROBES, parms->app_probes) ||
1781 NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes); 1780 nla_put_u32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes) ||
1782 NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes); 1781 nla_put_u32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes) ||
1783 NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes); 1782 nla_put_msecs(skb, NDTPA_REACHABLE_TIME, parms->reachable_time) ||
1784 NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time); 1783 nla_put_msecs(skb, NDTPA_BASE_REACHABLE_TIME,
1785 NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME, 1784 parms->base_reachable_time) ||
1786 parms->base_reachable_time); 1785 nla_put_msecs(skb, NDTPA_GC_STALETIME, parms->gc_staletime) ||
1787 NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime); 1786 nla_put_msecs(skb, NDTPA_DELAY_PROBE_TIME,
1788 NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time); 1787 parms->delay_probe_time) ||
1789 NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time); 1788 nla_put_msecs(skb, NDTPA_RETRANS_TIME, parms->retrans_time) ||
1790 NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay); 1789 nla_put_msecs(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay) ||
1791 NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay); 1790 nla_put_msecs(skb, NDTPA_PROXY_DELAY, parms->proxy_delay) ||
1792 NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime); 1791 nla_put_msecs(skb, NDTPA_LOCKTIME, parms->locktime))
1793 1792 goto nla_put_failure;
1794 return nla_nest_end(skb, nest); 1793 return nla_nest_end(skb, nest);
1795 1794
1796nla_put_failure: 1795nla_put_failure:
@@ -1815,12 +1814,12 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1815 ndtmsg->ndtm_pad1 = 0; 1814 ndtmsg->ndtm_pad1 = 0;
1816 ndtmsg->ndtm_pad2 = 0; 1815 ndtmsg->ndtm_pad2 = 0;
1817 1816
1818 NLA_PUT_STRING(skb, NDTA_NAME, tbl->id); 1817 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
1819 NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval); 1818 nla_put_msecs(skb, NDTA_GC_INTERVAL, tbl->gc_interval) ||
1820 NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1); 1819 nla_put_u32(skb, NDTA_THRESH1, tbl->gc_thresh1) ||
1821 NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2); 1820 nla_put_u32(skb, NDTA_THRESH2, tbl->gc_thresh2) ||
1822 NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3); 1821 nla_put_u32(skb, NDTA_THRESH3, tbl->gc_thresh3))
1823 1822 goto nla_put_failure;
1824 { 1823 {
1825 unsigned long now = jiffies; 1824 unsigned long now = jiffies;
1826 unsigned int flush_delta = now - tbl->last_flush; 1825 unsigned int flush_delta = now - tbl->last_flush;
@@ -1841,7 +1840,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1841 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1); 1840 ndc.ndtc_hash_mask = ((1 << nht->hash_shift) - 1);
1842 rcu_read_unlock_bh(); 1841 rcu_read_unlock_bh();
1843 1842
1844 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc); 1843 if (nla_put(skb, NDTA_CONFIG, sizeof(ndc), &ndc))
1844 goto nla_put_failure;
1845 } 1845 }
1846 1846
1847 { 1847 {
@@ -1866,7 +1866,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1866 ndst.ndts_forced_gc_runs += st->forced_gc_runs; 1866 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1867 } 1867 }
1868 1868
1869 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst); 1869 if (nla_put(skb, NDTA_STATS, sizeof(ndst), &ndst))
1870 goto nla_put_failure;
1870 } 1871 }
1871 1872
1872 BUG_ON(tbl->parms.dev); 1873 BUG_ON(tbl->parms.dev);
@@ -2137,7 +2138,8 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2137 ndm->ndm_type = neigh->type; 2138 ndm->ndm_type = neigh->type;
2138 ndm->ndm_ifindex = neigh->dev->ifindex; 2139 ndm->ndm_ifindex = neigh->dev->ifindex;
2139 2140
2140 NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key); 2141 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2142 goto nla_put_failure;
2141 2143
2142 read_lock_bh(&neigh->lock); 2144 read_lock_bh(&neigh->lock);
2143 ndm->ndm_state = neigh->nud_state; 2145 ndm->ndm_state = neigh->nud_state;
@@ -2157,8 +2159,9 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2157 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1; 2159 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2158 read_unlock_bh(&neigh->lock); 2160 read_unlock_bh(&neigh->lock);
2159 2161
2160 NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes)); 2162 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2161 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci); 2163 nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
2164 goto nla_put_failure;
2162 2165
2163 return nlmsg_end(skb, nlh); 2166 return nlmsg_end(skb, nlh);
2164 2167
@@ -2187,7 +2190,8 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
2187 ndm->ndm_ifindex = pn->dev->ifindex; 2190 ndm->ndm_ifindex = pn->dev->ifindex;
2188 ndm->ndm_state = NUD_NONE; 2191 ndm->ndm_state = NUD_NONE;
2189 2192
2190 NLA_PUT(skb, NDA_DST, tbl->key_len, pn->key); 2193 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2194 goto nla_put_failure;
2191 2195
2192 return nlmsg_end(skb, nlh); 2196 return nlmsg_end(skb, nlh);
2193 2197
@@ -2795,7 +2799,6 @@ enum {
2795static struct neigh_sysctl_table { 2799static struct neigh_sysctl_table {
2796 struct ctl_table_header *sysctl_header; 2800 struct ctl_table_header *sysctl_header;
2797 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1]; 2801 struct ctl_table neigh_vars[NEIGH_VAR_MAX + 1];
2798 char *dev_name;
2799} neigh_sysctl_template __read_mostly = { 2802} neigh_sysctl_template __read_mostly = {
2800 .neigh_vars = { 2803 .neigh_vars = {
2801 [NEIGH_VAR_MCAST_PROBE] = { 2804 [NEIGH_VAR_MCAST_PROBE] = {
@@ -2921,19 +2924,7 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2921{ 2924{
2922 struct neigh_sysctl_table *t; 2925 struct neigh_sysctl_table *t;
2923 const char *dev_name_source = NULL; 2926 const char *dev_name_source = NULL;
2924 2927 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
2925#define NEIGH_CTL_PATH_ROOT 0
2926#define NEIGH_CTL_PATH_PROTO 1
2927#define NEIGH_CTL_PATH_NEIGH 2
2928#define NEIGH_CTL_PATH_DEV 3
2929
2930 struct ctl_path neigh_path[] = {
2931 { .procname = "net", },
2932 { .procname = "proto", },
2933 { .procname = "neigh", },
2934 { .procname = "default", },
2935 { },
2936 };
2937 2928
2938 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL); 2929 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
2939 if (!t) 2930 if (!t)
@@ -2961,7 +2952,7 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2961 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0, 2952 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
2962 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL])); 2953 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
2963 } else { 2954 } else {
2964 dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname; 2955 dev_name_source = "default";
2965 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1); 2956 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
2966 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1; 2957 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
2967 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2; 2958 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
@@ -2984,23 +2975,16 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2984 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev; 2975 t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].extra1 = dev;
2985 } 2976 }
2986 2977
2987 t->dev_name = kstrdup(dev_name_source, GFP_KERNEL); 2978 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",
2988 if (!t->dev_name) 2979 p_name, dev_name_source);
2989 goto free;
2990
2991 neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
2992 neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
2993
2994 t->sysctl_header = 2980 t->sysctl_header =
2995 register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars); 2981 register_net_sysctl(neigh_parms_net(p), neigh_path, t->neigh_vars);
2996 if (!t->sysctl_header) 2982 if (!t->sysctl_header)
2997 goto free_procname; 2983 goto free;
2998 2984
2999 p->sysctl_table = t; 2985 p->sysctl_table = t;
3000 return 0; 2986 return 0;
3001 2987
3002free_procname:
3003 kfree(t->dev_name);
3004free: 2988free:
3005 kfree(t); 2989 kfree(t);
3006err: 2990err:
@@ -3013,8 +2997,7 @@ void neigh_sysctl_unregister(struct neigh_parms *p)
3013 if (p->sysctl_table) { 2997 if (p->sysctl_table) {
3014 struct neigh_sysctl_table *t = p->sysctl_table; 2998 struct neigh_sysctl_table *t = p->sysctl_table;
3015 p->sysctl_table = NULL; 2999 p->sysctl_table = NULL;
3016 unregister_sysctl_table(t->sysctl_header); 3000 unregister_net_sysctl_table(t->sysctl_header);
3017 kfree(t->dev_name);
3018 kfree(t); 3001 kfree(t);
3019 } 3002 }
3020} 3003}
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 495586232aa1..fdf9e61d0651 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -74,15 +74,14 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
74 int (*set)(struct net_device *, unsigned long)) 74 int (*set)(struct net_device *, unsigned long))
75{ 75{
76 struct net_device *net = to_net_dev(dev); 76 struct net_device *net = to_net_dev(dev);
77 char *endp;
78 unsigned long new; 77 unsigned long new;
79 int ret = -EINVAL; 78 int ret = -EINVAL;
80 79
81 if (!capable(CAP_NET_ADMIN)) 80 if (!capable(CAP_NET_ADMIN))
82 return -EPERM; 81 return -EPERM;
83 82
84 new = simple_strtoul(buf, &endp, 0); 83 ret = kstrtoul(buf, 0, &new);
85 if (endp == buf) 84 if (ret)
86 goto err; 85 goto err;
87 86
88 if (!rtnl_trylock()) 87 if (!rtnl_trylock())
@@ -232,7 +231,7 @@ NETDEVICE_SHOW(flags, fmt_hex);
232 231
233static int change_flags(struct net_device *net, unsigned long new_flags) 232static int change_flags(struct net_device *net, unsigned long new_flags)
234{ 233{
235 return dev_change_flags(net, (unsigned) new_flags); 234 return dev_change_flags(net, (unsigned int) new_flags);
236} 235}
237 236
238static ssize_t store_flags(struct device *dev, struct device_attribute *attr, 237static ssize_t store_flags(struct device *dev, struct device_attribute *attr,
@@ -582,7 +581,7 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
582 return err; 581 return err;
583 } 582 }
584 583
585 map = kzalloc(max_t(unsigned, 584 map = kzalloc(max_t(unsigned int,
586 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES), 585 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
587 GFP_KERNEL); 586 GFP_KERNEL);
588 if (!map) { 587 if (!map) {
@@ -903,7 +902,7 @@ static ssize_t bql_set_hold_time(struct netdev_queue *queue,
903 const char *buf, size_t len) 902 const char *buf, size_t len)
904{ 903{
905 struct dql *dql = &queue->dql; 904 struct dql *dql = &queue->dql;
906 unsigned value; 905 unsigned int value;
907 int err; 906 int err;
908 907
909 err = kstrtouint(buf, 10, &value); 908 err = kstrtouint(buf, 10, &value);
@@ -1107,7 +1106,7 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
1107 return err; 1106 return err;
1108 } 1107 }
1109 1108
1110 new_dev_maps = kzalloc(max_t(unsigned, 1109 new_dev_maps = kzalloc(max_t(unsigned int,
1111 XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL); 1110 XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL);
1112 if (!new_dev_maps) { 1111 if (!new_dev_maps) {
1113 free_cpumask_var(mask); 1112 free_cpumask_var(mask);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 4d8ce93cd503..ffb5d382f241 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -320,7 +320,7 @@ struct pktgen_dev {
320 (see RFC 3260, sec. 4) */ 320 (see RFC 3260, sec. 4) */
321 321
322 /* MPLS */ 322 /* MPLS */
323 unsigned nr_labels; /* Depth of stack, 0 = no MPLS */ 323 unsigned int nr_labels; /* Depth of stack, 0 = no MPLS */
324 __be32 labels[MAX_MPLS_LABELS]; 324 __be32 labels[MAX_MPLS_LABELS];
325 325
326 /* VLAN/SVLAN (802.1Q/Q-in-Q) */ 326 /* VLAN/SVLAN (802.1Q/Q-in-Q) */
@@ -373,10 +373,10 @@ struct pktgen_dev {
373 */ 373 */
374 char odevname[32]; 374 char odevname[32];
375 struct flow_state *flows; 375 struct flow_state *flows;
376 unsigned cflows; /* Concurrent flows (config) */ 376 unsigned int cflows; /* Concurrent flows (config) */
377 unsigned lflow; /* Flow length (config) */ 377 unsigned int lflow; /* Flow length (config) */
378 unsigned nflows; /* accumulated flows (stats) */ 378 unsigned int nflows; /* accumulated flows (stats) */
379 unsigned curfl; /* current sequenced flow (state)*/ 379 unsigned int curfl; /* current sequenced flow (state)*/
380 380
381 u16 queue_map_min; 381 u16 queue_map_min;
382 u16 queue_map_max; 382 u16 queue_map_max;
@@ -592,7 +592,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
592 pkt_dev->src_mac_count, pkt_dev->dst_mac_count); 592 pkt_dev->src_mac_count, pkt_dev->dst_mac_count);
593 593
594 if (pkt_dev->nr_labels) { 594 if (pkt_dev->nr_labels) {
595 unsigned i; 595 unsigned int i;
596 seq_printf(seq, " mpls: "); 596 seq_printf(seq, " mpls: ");
597 for (i = 0; i < pkt_dev->nr_labels; i++) 597 for (i = 0; i < pkt_dev->nr_labels; i++)
598 seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]), 598 seq_printf(seq, "%08x%s", ntohl(pkt_dev->labels[i]),
@@ -812,7 +812,7 @@ done_str:
812 812
813static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev) 813static ssize_t get_labels(const char __user *buffer, struct pktgen_dev *pkt_dev)
814{ 814{
815 unsigned n = 0; 815 unsigned int n = 0;
816 char c; 816 char c;
817 ssize_t i = 0; 817 ssize_t i = 0;
818 int len; 818 int len;
@@ -1510,7 +1510,7 @@ static ssize_t pktgen_if_write(struct file *file,
1510 } 1510 }
1511 1511
1512 if (!strcmp(name, "mpls")) { 1512 if (!strcmp(name, "mpls")) {
1513 unsigned n, cnt; 1513 unsigned int n, cnt;
1514 1514
1515 len = get_labels(&user_buffer[i], pkt_dev); 1515 len = get_labels(&user_buffer[i], pkt_dev);
1516 if (len < 0) 1516 if (len < 0)
@@ -2324,7 +2324,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev)
2324 } 2324 }
2325 2325
2326 if (pkt_dev->flags & F_MPLS_RND) { 2326 if (pkt_dev->flags & F_MPLS_RND) {
2327 unsigned i; 2327 unsigned int i;
2328 for (i = 0; i < pkt_dev->nr_labels; i++) 2328 for (i = 0; i < pkt_dev->nr_labels; i++)
2329 if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM) 2329 if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM)
2330 pkt_dev->labels[i] = MPLS_STACK_BOTTOM | 2330 pkt_dev->labels[i] = MPLS_STACK_BOTTOM |
@@ -2550,7 +2550,7 @@ err:
2550 2550
2551static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev) 2551static void mpls_push(__be32 *mpls, struct pktgen_dev *pkt_dev)
2552{ 2552{
2553 unsigned i; 2553 unsigned int i;
2554 for (i = 0; i < pkt_dev->nr_labels; i++) 2554 for (i = 0; i < pkt_dev->nr_labels; i++)
2555 *mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM; 2555 *mpls++ = pkt_dev->labels[i] & ~MPLS_STACK_BOTTOM;
2556 2556
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 90430b776ece..b442d35bbc8b 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -35,7 +35,9 @@
35#include <linux/security.h> 35#include <linux/security.h>
36#include <linux/mutex.h> 36#include <linux/mutex.h>
37#include <linux/if_addr.h> 37#include <linux/if_addr.h>
38#include <linux/if_bridge.h>
38#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/etherdevice.h>
39 41
40#include <asm/uaccess.h> 42#include <asm/uaccess.h>
41 43
@@ -552,7 +554,7 @@ void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data
552} 554}
553EXPORT_SYMBOL(__rta_fill); 555EXPORT_SYMBOL(__rta_fill);
554 556
555int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, int echo) 557int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
556{ 558{
557 struct sock *rtnl = net->rtnl; 559 struct sock *rtnl = net->rtnl;
558 int err = 0; 560 int err = 0;
@@ -607,7 +609,8 @@ int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
607 for (i = 0; i < RTAX_MAX; i++) { 609 for (i = 0; i < RTAX_MAX; i++) {
608 if (metrics[i]) { 610 if (metrics[i]) {
609 valid++; 611 valid++;
610 NLA_PUT_U32(skb, i+1, metrics[i]); 612 if (nla_put_u32(skb, i+1, metrics[i]))
613 goto nla_put_failure;
611 } 614 }
612 } 615 }
613 616
@@ -782,6 +785,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
782 + nla_total_size(4) /* IFLA_MTU */ 785 + nla_total_size(4) /* IFLA_MTU */
783 + nla_total_size(4) /* IFLA_LINK */ 786 + nla_total_size(4) /* IFLA_LINK */
784 + nla_total_size(4) /* IFLA_MASTER */ 787 + nla_total_size(4) /* IFLA_MASTER */
788 + nla_total_size(4) /* IFLA_PROMISCUITY */
785 + nla_total_size(1) /* IFLA_OPERSTATE */ 789 + nla_total_size(1) /* IFLA_OPERSTATE */
786 + nla_total_size(1) /* IFLA_LINKMODE */ 790 + nla_total_size(1) /* IFLA_LINKMODE */
787 + nla_total_size(ext_filter_mask 791 + nla_total_size(ext_filter_mask
@@ -807,7 +811,8 @@ static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
807 vf_port = nla_nest_start(skb, IFLA_VF_PORT); 811 vf_port = nla_nest_start(skb, IFLA_VF_PORT);
808 if (!vf_port) 812 if (!vf_port)
809 goto nla_put_failure; 813 goto nla_put_failure;
810 NLA_PUT_U32(skb, IFLA_PORT_VF, vf); 814 if (nla_put_u32(skb, IFLA_PORT_VF, vf))
815 goto nla_put_failure;
811 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb); 816 err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
812 if (err == -EMSGSIZE) 817 if (err == -EMSGSIZE)
813 goto nla_put_failure; 818 goto nla_put_failure;
@@ -891,25 +896,23 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
891 ifm->ifi_flags = dev_get_flags(dev); 896 ifm->ifi_flags = dev_get_flags(dev);
892 ifm->ifi_change = change; 897 ifm->ifi_change = change;
893 898
894 NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); 899 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
895 NLA_PUT_U32(skb, IFLA_TXQLEN, dev->tx_queue_len); 900 nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
896 NLA_PUT_U8(skb, IFLA_OPERSTATE, 901 nla_put_u8(skb, IFLA_OPERSTATE,
897 netif_running(dev) ? dev->operstate : IF_OPER_DOWN); 902 netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
898 NLA_PUT_U8(skb, IFLA_LINKMODE, dev->link_mode); 903 nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
899 NLA_PUT_U32(skb, IFLA_MTU, dev->mtu); 904 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
900 NLA_PUT_U32(skb, IFLA_GROUP, dev->group); 905 nla_put_u32(skb, IFLA_GROUP, dev->group) ||
901 906 nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
902 if (dev->ifindex != dev->iflink) 907 (dev->ifindex != dev->iflink &&
903 NLA_PUT_U32(skb, IFLA_LINK, dev->iflink); 908 nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
904 909 (dev->master &&
905 if (dev->master) 910 nla_put_u32(skb, IFLA_MASTER, dev->master->ifindex)) ||
906 NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex); 911 (dev->qdisc &&
907 912 nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
908 if (dev->qdisc) 913 (dev->ifalias &&
909 NLA_PUT_STRING(skb, IFLA_QDISC, dev->qdisc->ops->id); 914 nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)))
910 915 goto nla_put_failure;
911 if (dev->ifalias)
912 NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias);
913 916
914 if (1) { 917 if (1) {
915 struct rtnl_link_ifmap map = { 918 struct rtnl_link_ifmap map = {
@@ -920,12 +923,14 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
920 .dma = dev->dma, 923 .dma = dev->dma,
921 .port = dev->if_port, 924 .port = dev->if_port,
922 }; 925 };
923 NLA_PUT(skb, IFLA_MAP, sizeof(map), &map); 926 if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
927 goto nla_put_failure;
924 } 928 }
925 929
926 if (dev->addr_len) { 930 if (dev->addr_len) {
927 NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr); 931 if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
928 NLA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast); 932 nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
933 goto nla_put_failure;
929 } 934 }
930 935
931 attr = nla_reserve(skb, IFLA_STATS, 936 attr = nla_reserve(skb, IFLA_STATS,
@@ -942,8 +947,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
942 goto nla_put_failure; 947 goto nla_put_failure;
943 copy_rtnl_link_stats64(nla_data(attr), stats); 948 copy_rtnl_link_stats64(nla_data(attr), stats);
944 949
945 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) 950 if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) &&
946 NLA_PUT_U32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)); 951 nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)))
952 goto nla_put_failure;
947 953
948 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent 954 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent
949 && (ext_filter_mask & RTEXT_FILTER_VF)) { 955 && (ext_filter_mask & RTEXT_FILTER_VF)) {
@@ -986,12 +992,13 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
986 nla_nest_cancel(skb, vfinfo); 992 nla_nest_cancel(skb, vfinfo);
987 goto nla_put_failure; 993 goto nla_put_failure;
988 } 994 }
989 NLA_PUT(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac); 995 if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
990 NLA_PUT(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan); 996 nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
991 NLA_PUT(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate), 997 nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
992 &vf_tx_rate); 998 &vf_tx_rate) ||
993 NLA_PUT(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk), 999 nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
994 &vf_spoofchk); 1000 &vf_spoofchk))
1001 goto nla_put_failure;
995 nla_nest_end(skb, vf); 1002 nla_nest_end(skb, vf);
996 } 1003 }
997 nla_nest_end(skb, vfinfo); 1004 nla_nest_end(skb, vfinfo);
@@ -1113,6 +1120,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
1113 [IFLA_PORT_SELF] = { .type = NLA_NESTED }, 1120 [IFLA_PORT_SELF] = { .type = NLA_NESTED },
1114 [IFLA_AF_SPEC] = { .type = NLA_NESTED }, 1121 [IFLA_AF_SPEC] = { .type = NLA_NESTED },
1115 [IFLA_EXT_MASK] = { .type = NLA_U32 }, 1122 [IFLA_EXT_MASK] = { .type = NLA_U32 },
1123 [IFLA_PROMISCUITY] = { .type = NLA_U32 },
1116}; 1124};
1117EXPORT_SYMBOL(ifla_policy); 1125EXPORT_SYMBOL(ifla_policy);
1118 1126
@@ -1634,14 +1642,14 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
1634 int err; 1642 int err;
1635 struct net_device *dev; 1643 struct net_device *dev;
1636 unsigned int num_queues = 1; 1644 unsigned int num_queues = 1;
1637 unsigned int real_num_queues = 1;
1638 1645
1639 if (ops->get_tx_queues) { 1646 if (ops->get_tx_queues) {
1640 err = ops->get_tx_queues(src_net, tb, &num_queues, 1647 err = ops->get_tx_queues(src_net, tb);
1641 &real_num_queues); 1648 if (err < 0)
1642 if (err)
1643 goto err; 1649 goto err;
1650 num_queues = err;
1644 } 1651 }
1652
1645 err = -ENOMEM; 1653 err = -ENOMEM;
1646 dev = alloc_netdev_mq(ops->priv_size, ifname, ops->setup, num_queues); 1654 dev = alloc_netdev_mq(ops->priv_size, ifname, ops->setup, num_queues);
1647 if (!dev) 1655 if (!dev)
@@ -1947,7 +1955,7 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
1947 return skb->len; 1955 return skb->len;
1948} 1956}
1949 1957
1950void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change) 1958void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change)
1951{ 1959{
1952 struct net *net = dev_net(dev); 1960 struct net *net = dev_net(dev);
1953 struct sk_buff *skb; 1961 struct sk_buff *skb;
@@ -1972,6 +1980,267 @@ errout:
1972 rtnl_set_sk_err(net, RTNLGRP_LINK, err); 1980 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
1973} 1981}
1974 1982
1983static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
1984 struct net_device *dev,
1985 u8 *addr, u32 pid, u32 seq,
1986 int type, unsigned int flags)
1987{
1988 struct nlmsghdr *nlh;
1989 struct ndmsg *ndm;
1990
1991 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), NLM_F_MULTI);
1992 if (!nlh)
1993 return -EMSGSIZE;
1994
1995 ndm = nlmsg_data(nlh);
1996 ndm->ndm_family = AF_BRIDGE;
1997 ndm->ndm_pad1 = 0;
1998 ndm->ndm_pad2 = 0;
1999 ndm->ndm_flags = flags;
2000 ndm->ndm_type = 0;
2001 ndm->ndm_ifindex = dev->ifindex;
2002 ndm->ndm_state = NUD_PERMANENT;
2003
2004 if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
2005 goto nla_put_failure;
2006
2007 return nlmsg_end(skb, nlh);
2008
2009nla_put_failure:
2010 nlmsg_cancel(skb, nlh);
2011 return -EMSGSIZE;
2012}
2013
2014static inline size_t rtnl_fdb_nlmsg_size(void)
2015{
2016 return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN);
2017}
2018
2019static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type)
2020{
2021 struct net *net = dev_net(dev);
2022 struct sk_buff *skb;
2023 int err = -ENOBUFS;
2024
2025 skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
2026 if (!skb)
2027 goto errout;
2028
2029 err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF);
2030 if (err < 0) {
2031 kfree_skb(skb);
2032 goto errout;
2033 }
2034
2035 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2036 return;
2037errout:
2038 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2039}
2040
2041static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2042{
2043 struct net *net = sock_net(skb->sk);
2044 struct net_device *master = NULL;
2045 struct ndmsg *ndm;
2046 struct nlattr *tb[NDA_MAX+1];
2047 struct net_device *dev;
2048 u8 *addr;
2049 int err;
2050
2051 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
2052 if (err < 0)
2053 return err;
2054
2055 ndm = nlmsg_data(nlh);
2056 if (ndm->ndm_ifindex == 0) {
2057 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ifindex\n");
2058 return -EINVAL;
2059 }
2060
2061 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
2062 if (dev == NULL) {
2063 pr_info("PF_BRIDGE: RTM_NEWNEIGH with unknown ifindex\n");
2064 return -ENODEV;
2065 }
2066
2067 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
2068 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid address\n");
2069 return -EINVAL;
2070 }
2071
2072 addr = nla_data(tb[NDA_LLADDR]);
2073 if (!is_valid_ether_addr(addr)) {
2074 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ether address\n");
2075 return -EINVAL;
2076 }
2077
2078 err = -EOPNOTSUPP;
2079
2080 /* Support fdb on master device the net/bridge default case */
2081 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
2082 (dev->priv_flags & IFF_BRIDGE_PORT)) {
2083 master = dev->master;
2084 err = master->netdev_ops->ndo_fdb_add(ndm, dev, addr,
2085 nlh->nlmsg_flags);
2086 if (err)
2087 goto out;
2088 else
2089 ndm->ndm_flags &= ~NTF_MASTER;
2090 }
2091
2092 /* Embedded bridge, macvlan, and any other device support */
2093 if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_add) {
2094 err = dev->netdev_ops->ndo_fdb_add(ndm, dev, addr,
2095 nlh->nlmsg_flags);
2096
2097 if (!err) {
2098 rtnl_fdb_notify(dev, addr, RTM_NEWNEIGH);
2099 ndm->ndm_flags &= ~NTF_SELF;
2100 }
2101 }
2102out:
2103 return err;
2104}
2105
2106static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2107{
2108 struct net *net = sock_net(skb->sk);
2109 struct ndmsg *ndm;
2110 struct nlattr *llattr;
2111 struct net_device *dev;
2112 int err = -EINVAL;
2113 __u8 *addr;
2114
2115 if (nlmsg_len(nlh) < sizeof(*ndm))
2116 return -EINVAL;
2117
2118 ndm = nlmsg_data(nlh);
2119 if (ndm->ndm_ifindex == 0) {
2120 pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ifindex\n");
2121 return -EINVAL;
2122 }
2123
2124 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
2125 if (dev == NULL) {
2126 pr_info("PF_BRIDGE: RTM_DELNEIGH with unknown ifindex\n");
2127 return -ENODEV;
2128 }
2129
2130 llattr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_LLADDR);
2131 if (llattr == NULL || nla_len(llattr) != ETH_ALEN) {
2132 pr_info("PF_BRIGDE: RTM_DELNEIGH with invalid address\n");
2133 return -EINVAL;
2134 }
2135
2136 addr = nla_data(llattr);
2137 err = -EOPNOTSUPP;
2138
2139 /* Support fdb on master device the net/bridge default case */
2140 if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
2141 (dev->priv_flags & IFF_BRIDGE_PORT)) {
2142 struct net_device *master = dev->master;
2143
2144 if (master->netdev_ops->ndo_fdb_del)
2145 err = master->netdev_ops->ndo_fdb_del(ndm, dev, addr);
2146
2147 if (err)
2148 goto out;
2149 else
2150 ndm->ndm_flags &= ~NTF_MASTER;
2151 }
2152
2153 /* Embedded bridge, macvlan, and any other device support */
2154 if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_del) {
2155 err = dev->netdev_ops->ndo_fdb_del(ndm, dev, addr);
2156
2157 if (!err) {
2158 rtnl_fdb_notify(dev, addr, RTM_DELNEIGH);
2159 ndm->ndm_flags &= ~NTF_SELF;
2160 }
2161 }
2162out:
2163 return err;
2164}
2165
2166static int nlmsg_populate_fdb(struct sk_buff *skb,
2167 struct netlink_callback *cb,
2168 struct net_device *dev,
2169 int *idx,
2170 struct netdev_hw_addr_list *list)
2171{
2172 struct netdev_hw_addr *ha;
2173 int err;
2174 u32 pid, seq;
2175
2176 pid = NETLINK_CB(cb->skb).pid;
2177 seq = cb->nlh->nlmsg_seq;
2178
2179 list_for_each_entry(ha, &list->list, list) {
2180 if (*idx < cb->args[0])
2181 goto skip;
2182
2183 err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
2184 pid, seq, 0, NTF_SELF);
2185 if (err < 0)
2186 return err;
2187skip:
2188 *idx += 1;
2189 }
2190 return 0;
2191}
2192
2193/**
2194 * ndo_dflt_fdb_dump: default netdevice operation to dump an FDB table.
2195 * @nlh: netlink message header
2196 * @dev: netdevice
2197 *
2198 * Default netdevice operation to dump the existing unicast address list.
2199 * Returns zero on success.
2200 */
2201int ndo_dflt_fdb_dump(struct sk_buff *skb,
2202 struct netlink_callback *cb,
2203 struct net_device *dev,
2204 int idx)
2205{
2206 int err;
2207
2208 netif_addr_lock_bh(dev);
2209 err = nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->uc);
2210 if (err)
2211 goto out;
2212 nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->mc);
2213out:
2214 netif_addr_unlock_bh(dev);
2215 return idx;
2216}
2217EXPORT_SYMBOL(ndo_dflt_fdb_dump);
2218
2219static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
2220{
2221 int idx = 0;
2222 struct net *net = sock_net(skb->sk);
2223 struct net_device *dev;
2224
2225 rcu_read_lock();
2226 for_each_netdev_rcu(net, dev) {
2227 if (dev->priv_flags & IFF_BRIDGE_PORT) {
2228 struct net_device *master = dev->master;
2229 const struct net_device_ops *ops = master->netdev_ops;
2230
2231 if (ops->ndo_fdb_dump)
2232 idx = ops->ndo_fdb_dump(skb, cb, dev, idx);
2233 }
2234
2235 if (dev->netdev_ops->ndo_fdb_dump)
2236 idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, idx);
2237 }
2238 rcu_read_unlock();
2239
2240 cb->args[0] = idx;
2241 return skb->len;
2242}
2243
1975/* Protected by RTNL sempahore. */ 2244/* Protected by RTNL sempahore. */
1976static struct rtattr **rta_buf; 2245static struct rtattr **rta_buf;
1977static int rtattr_max; 2246static int rtattr_max;
@@ -2042,7 +2311,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2042 struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len); 2311 struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len);
2043 2312
2044 while (RTA_OK(attr, attrlen)) { 2313 while (RTA_OK(attr, attrlen)) {
2045 unsigned flavor = attr->rta_type; 2314 unsigned int flavor = attr->rta_type;
2046 if (flavor) { 2315 if (flavor) {
2047 if (flavor > rta_max[sz_idx]) 2316 if (flavor > rta_max[sz_idx])
2048 return -EINVAL; 2317 return -EINVAL;
@@ -2144,5 +2413,9 @@ void __init rtnetlink_init(void)
2144 2413
2145 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, NULL); 2414 rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, NULL);
2146 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, NULL); 2415 rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, NULL);
2416
2417 rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, NULL);
2418 rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, NULL);
2419 rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL);
2147} 2420}
2148 2421
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index e59840010d45..2c35da818ef9 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -67,10 +67,9 @@
67 67
68#include <asm/uaccess.h> 68#include <asm/uaccess.h>
69#include <trace/events/skb.h> 69#include <trace/events/skb.h>
70#include <linux/highmem.h>
70 71
71#include "kmap_skb.h" 72struct kmem_cache *skbuff_head_cache __read_mostly;
72
73static struct kmem_cache *skbuff_head_cache __read_mostly;
74static struct kmem_cache *skbuff_fclone_cache __read_mostly; 73static struct kmem_cache *skbuff_fclone_cache __read_mostly;
75 74
76static void sock_pipe_buf_release(struct pipe_inode_info *pipe, 75static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
@@ -246,6 +245,7 @@ EXPORT_SYMBOL(__alloc_skb);
246/** 245/**
247 * build_skb - build a network buffer 246 * build_skb - build a network buffer
248 * @data: data buffer provided by caller 247 * @data: data buffer provided by caller
248 * @frag_size: size of fragment, or 0 if head was kmalloced
249 * 249 *
250 * Allocate a new &sk_buff. Caller provides space holding head and 250 * Allocate a new &sk_buff. Caller provides space holding head and
251 * skb_shared_info. @data must have been allocated by kmalloc() 251 * skb_shared_info. @data must have been allocated by kmalloc()
@@ -259,20 +259,21 @@ EXPORT_SYMBOL(__alloc_skb);
259 * before giving packet to stack. 259 * before giving packet to stack.
260 * RX rings only contains data buffers, not full skbs. 260 * RX rings only contains data buffers, not full skbs.
261 */ 261 */
262struct sk_buff *build_skb(void *data) 262struct sk_buff *build_skb(void *data, unsigned int frag_size)
263{ 263{
264 struct skb_shared_info *shinfo; 264 struct skb_shared_info *shinfo;
265 struct sk_buff *skb; 265 struct sk_buff *skb;
266 unsigned int size; 266 unsigned int size = frag_size ? : ksize(data);
267 267
268 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); 268 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
269 if (!skb) 269 if (!skb)
270 return NULL; 270 return NULL;
271 271
272 size = ksize(data) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 272 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
273 273
274 memset(skb, 0, offsetof(struct sk_buff, tail)); 274 memset(skb, 0, offsetof(struct sk_buff, tail));
275 skb->truesize = SKB_TRUESIZE(size); 275 skb->truesize = SKB_TRUESIZE(size);
276 skb->head_frag = frag_size != 0;
276 atomic_set(&skb->users, 1); 277 atomic_set(&skb->users, 1);
277 skb->head = data; 278 skb->head = data;
278 skb->data = data; 279 skb->data = data;
@@ -377,6 +378,14 @@ static void skb_clone_fraglist(struct sk_buff *skb)
377 skb_get(list); 378 skb_get(list);
378} 379}
379 380
381static void skb_free_head(struct sk_buff *skb)
382{
383 if (skb->head_frag)
384 put_page(virt_to_head_page(skb->head));
385 else
386 kfree(skb->head);
387}
388
380static void skb_release_data(struct sk_buff *skb) 389static void skb_release_data(struct sk_buff *skb)
381{ 390{
382 if (!skb->cloned || 391 if (!skb->cloned ||
@@ -403,7 +412,7 @@ static void skb_release_data(struct sk_buff *skb)
403 if (skb_has_frag_list(skb)) 412 if (skb_has_frag_list(skb))
404 skb_drop_fraglist(skb); 413 skb_drop_fraglist(skb);
405 414
406 kfree(skb->head); 415 skb_free_head(skb);
407 } 416 }
408} 417}
409 418
@@ -645,6 +654,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
645 C(tail); 654 C(tail);
646 C(end); 655 C(end);
647 C(head); 656 C(head);
657 C(head_frag);
648 C(data); 658 C(data);
649 C(truesize); 659 C(truesize);
650 atomic_set(&n->users, 1); 660 atomic_set(&n->users, 1);
@@ -707,10 +717,10 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
707 } 717 }
708 return -ENOMEM; 718 return -ENOMEM;
709 } 719 }
710 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 720 vaddr = kmap_atomic(skb_frag_page(f));
711 memcpy(page_address(page), 721 memcpy(page_address(page),
712 vaddr + f->page_offset, skb_frag_size(f)); 722 vaddr + f->page_offset, skb_frag_size(f));
713 kunmap_skb_frag(vaddr); 723 kunmap_atomic(vaddr);
714 page->private = (unsigned long)head; 724 page->private = (unsigned long)head;
715 head = page; 725 head = page;
716 } 726 }
@@ -819,7 +829,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
819struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) 829struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
820{ 830{
821 int headerlen = skb_headroom(skb); 831 int headerlen = skb_headroom(skb);
822 unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len; 832 unsigned int size = skb_end_offset(skb) + skb->data_len;
823 struct sk_buff *n = alloc_skb(size, gfp_mask); 833 struct sk_buff *n = alloc_skb(size, gfp_mask);
824 834
825 if (!n) 835 if (!n)
@@ -920,9 +930,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
920{ 930{
921 int i; 931 int i;
922 u8 *data; 932 u8 *data;
923 int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail; 933 int size = nhead + skb_end_offset(skb) + ntail;
924 long off; 934 long off;
925 bool fastpath;
926 935
927 BUG_ON(nhead < 0); 936 BUG_ON(nhead < 0);
928 937
@@ -931,27 +940,6 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
931 940
932 size = SKB_DATA_ALIGN(size); 941 size = SKB_DATA_ALIGN(size);
933 942
934 /* Check if we can avoid taking references on fragments if we own
935 * the last reference on skb->head. (see skb_release_data())
936 */
937 if (!skb->cloned)
938 fastpath = true;
939 else {
940 int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
941 fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
942 }
943
944 if (fastpath &&
945 size + sizeof(struct skb_shared_info) <= ksize(skb->head)) {
946 memmove(skb->head + size, skb_shinfo(skb),
947 offsetof(struct skb_shared_info,
948 frags[skb_shinfo(skb)->nr_frags]));
949 memmove(skb->head + nhead, skb->head,
950 skb_tail_pointer(skb) - skb->head);
951 off = nhead;
952 goto adjust_others;
953 }
954
955 data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), 943 data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
956 gfp_mask); 944 gfp_mask);
957 if (!data) 945 if (!data)
@@ -967,9 +955,12 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
967 skb_shinfo(skb), 955 skb_shinfo(skb),
968 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); 956 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
969 957
970 if (fastpath) { 958 /*
971 kfree(skb->head); 959 * if shinfo is shared we must drop the old head gracefully, but if it
972 } else { 960 * is not we can just drop the old head and let the existing refcount
961 * be since all we did is relocate the values
962 */
963 if (skb_cloned(skb)) {
973 /* copy this zero copy skb frags */ 964 /* copy this zero copy skb frags */
974 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { 965 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
975 if (skb_copy_ubufs(skb, gfp_mask)) 966 if (skb_copy_ubufs(skb, gfp_mask))
@@ -982,11 +973,13 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
982 skb_clone_fraglist(skb); 973 skb_clone_fraglist(skb);
983 974
984 skb_release_data(skb); 975 skb_release_data(skb);
976 } else {
977 skb_free_head(skb);
985 } 978 }
986 off = (data + nhead) - skb->head; 979 off = (data + nhead) - skb->head;
987 980
988 skb->head = data; 981 skb->head = data;
989adjust_others: 982 skb->head_frag = 0;
990 skb->data += off; 983 skb->data += off;
991#ifdef NET_SKBUFF_DATA_USES_OFFSET 984#ifdef NET_SKBUFF_DATA_USES_OFFSET
992 skb->end = size; 985 skb->end = size;
@@ -1275,7 +1268,7 @@ drop_pages:
1275 return -ENOMEM; 1268 return -ENOMEM;
1276 1269
1277 nfrag->next = frag->next; 1270 nfrag->next = frag->next;
1278 kfree_skb(frag); 1271 consume_skb(frag);
1279 frag = nfrag; 1272 frag = nfrag;
1280 *fragp = frag; 1273 *fragp = frag;
1281 } 1274 }
@@ -1487,21 +1480,22 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1487 1480
1488 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1481 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1489 int end; 1482 int end;
1483 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1490 1484
1491 WARN_ON(start > offset + len); 1485 WARN_ON(start > offset + len);
1492 1486
1493 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1487 end = start + skb_frag_size(f);
1494 if ((copy = end - offset) > 0) { 1488 if ((copy = end - offset) > 0) {
1495 u8 *vaddr; 1489 u8 *vaddr;
1496 1490
1497 if (copy > len) 1491 if (copy > len)
1498 copy = len; 1492 copy = len;
1499 1493
1500 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 1494 vaddr = kmap_atomic(skb_frag_page(f));
1501 memcpy(to, 1495 memcpy(to,
1502 vaddr + skb_shinfo(skb)->frags[i].page_offset+ 1496 vaddr + f->page_offset + offset - start,
1503 offset - start, copy); 1497 copy);
1504 kunmap_skb_frag(vaddr); 1498 kunmap_atomic(vaddr);
1505 1499
1506 if ((len -= copy) == 0) 1500 if ((len -= copy) == 0)
1507 return 0; 1501 return 0;
@@ -1547,9 +1541,9 @@ static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1547 put_page(spd->pages[i]); 1541 put_page(spd->pages[i]);
1548} 1542}
1549 1543
1550static inline struct page *linear_to_page(struct page *page, unsigned int *len, 1544static struct page *linear_to_page(struct page *page, unsigned int *len,
1551 unsigned int *offset, 1545 unsigned int *offset,
1552 struct sk_buff *skb, struct sock *sk) 1546 struct sk_buff *skb, struct sock *sk)
1553{ 1547{
1554 struct page *p = sk->sk_sndmsg_page; 1548 struct page *p = sk->sk_sndmsg_page;
1555 unsigned int off; 1549 unsigned int off;
@@ -1565,6 +1559,9 @@ new_page:
1565 } else { 1559 } else {
1566 unsigned int mlen; 1560 unsigned int mlen;
1567 1561
1562 /* If we are the only user of the page, we can reset offset */
1563 if (page_count(p) == 1)
1564 sk->sk_sndmsg_off = 0;
1568 off = sk->sk_sndmsg_off; 1565 off = sk->sk_sndmsg_off;
1569 mlen = PAGE_SIZE - off; 1566 mlen = PAGE_SIZE - off;
1570 if (mlen < 64 && mlen < *len) { 1567 if (mlen < 64 && mlen < *len) {
@@ -1578,36 +1575,48 @@ new_page:
1578 memcpy(page_address(p) + off, page_address(page) + *offset, *len); 1575 memcpy(page_address(p) + off, page_address(page) + *offset, *len);
1579 sk->sk_sndmsg_off += *len; 1576 sk->sk_sndmsg_off += *len;
1580 *offset = off; 1577 *offset = off;
1581 get_page(p);
1582 1578
1583 return p; 1579 return p;
1584} 1580}
1585 1581
1582static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
1583 struct page *page,
1584 unsigned int offset)
1585{
1586 return spd->nr_pages &&
1587 spd->pages[spd->nr_pages - 1] == page &&
1588 (spd->partial[spd->nr_pages - 1].offset +
1589 spd->partial[spd->nr_pages - 1].len == offset);
1590}
1591
1586/* 1592/*
1587 * Fill page/offset/length into spd, if it can hold more pages. 1593 * Fill page/offset/length into spd, if it can hold more pages.
1588 */ 1594 */
1589static inline int spd_fill_page(struct splice_pipe_desc *spd, 1595static bool spd_fill_page(struct splice_pipe_desc *spd,
1590 struct pipe_inode_info *pipe, struct page *page, 1596 struct pipe_inode_info *pipe, struct page *page,
1591 unsigned int *len, unsigned int offset, 1597 unsigned int *len, unsigned int offset,
1592 struct sk_buff *skb, int linear, 1598 struct sk_buff *skb, bool linear,
1593 struct sock *sk) 1599 struct sock *sk)
1594{ 1600{
1595 if (unlikely(spd->nr_pages == pipe->buffers)) 1601 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
1596 return 1; 1602 return true;
1597 1603
1598 if (linear) { 1604 if (linear) {
1599 page = linear_to_page(page, len, &offset, skb, sk); 1605 page = linear_to_page(page, len, &offset, skb, sk);
1600 if (!page) 1606 if (!page)
1601 return 1; 1607 return true;
1602 } else 1608 }
1603 get_page(page); 1609 if (spd_can_coalesce(spd, page, offset)) {
1604 1610 spd->partial[spd->nr_pages - 1].len += *len;
1611 return false;
1612 }
1613 get_page(page);
1605 spd->pages[spd->nr_pages] = page; 1614 spd->pages[spd->nr_pages] = page;
1606 spd->partial[spd->nr_pages].len = *len; 1615 spd->partial[spd->nr_pages].len = *len;
1607 spd->partial[spd->nr_pages].offset = offset; 1616 spd->partial[spd->nr_pages].offset = offset;
1608 spd->nr_pages++; 1617 spd->nr_pages++;
1609 1618
1610 return 0; 1619 return false;
1611} 1620}
1612 1621
1613static inline void __segment_seek(struct page **page, unsigned int *poff, 1622static inline void __segment_seek(struct page **page, unsigned int *poff,
@@ -1624,20 +1633,20 @@ static inline void __segment_seek(struct page **page, unsigned int *poff,
1624 *plen -= off; 1633 *plen -= off;
1625} 1634}
1626 1635
1627static inline int __splice_segment(struct page *page, unsigned int poff, 1636static bool __splice_segment(struct page *page, unsigned int poff,
1628 unsigned int plen, unsigned int *off, 1637 unsigned int plen, unsigned int *off,
1629 unsigned int *len, struct sk_buff *skb, 1638 unsigned int *len, struct sk_buff *skb,
1630 struct splice_pipe_desc *spd, int linear, 1639 struct splice_pipe_desc *spd, bool linear,
1631 struct sock *sk, 1640 struct sock *sk,
1632 struct pipe_inode_info *pipe) 1641 struct pipe_inode_info *pipe)
1633{ 1642{
1634 if (!*len) 1643 if (!*len)
1635 return 1; 1644 return true;
1636 1645
1637 /* skip this segment if already processed */ 1646 /* skip this segment if already processed */
1638 if (*off >= plen) { 1647 if (*off >= plen) {
1639 *off -= plen; 1648 *off -= plen;
1640 return 0; 1649 return false;
1641 } 1650 }
1642 1651
1643 /* ignore any bits we already processed */ 1652 /* ignore any bits we already processed */
@@ -1653,34 +1662,38 @@ static inline int __splice_segment(struct page *page, unsigned int poff,
1653 flen = min_t(unsigned int, flen, PAGE_SIZE - poff); 1662 flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
1654 1663
1655 if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk)) 1664 if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
1656 return 1; 1665 return true;
1657 1666
1658 __segment_seek(&page, &poff, &plen, flen); 1667 __segment_seek(&page, &poff, &plen, flen);
1659 *len -= flen; 1668 *len -= flen;
1660 1669
1661 } while (*len && plen); 1670 } while (*len && plen);
1662 1671
1663 return 0; 1672 return false;
1664} 1673}
1665 1674
1666/* 1675/*
1667 * Map linear and fragment data from the skb to spd. It reports failure if the 1676 * Map linear and fragment data from the skb to spd. It reports true if the
1668 * pipe is full or if we already spliced the requested length. 1677 * pipe is full or if we already spliced the requested length.
1669 */ 1678 */
1670static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, 1679static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1671 unsigned int *offset, unsigned int *len, 1680 unsigned int *offset, unsigned int *len,
1672 struct splice_pipe_desc *spd, struct sock *sk) 1681 struct splice_pipe_desc *spd, struct sock *sk)
1673{ 1682{
1674 int seg; 1683 int seg;
1675 1684
1676 /* 1685 /* map the linear part :
1677 * map the linear part 1686 * If skb->head_frag is set, this 'linear' part is backed by a
1687 * fragment, and if the head is not shared with any clones then
1688 * we can avoid a copy since we own the head portion of this page.
1678 */ 1689 */
1679 if (__splice_segment(virt_to_page(skb->data), 1690 if (__splice_segment(virt_to_page(skb->data),
1680 (unsigned long) skb->data & (PAGE_SIZE - 1), 1691 (unsigned long) skb->data & (PAGE_SIZE - 1),
1681 skb_headlen(skb), 1692 skb_headlen(skb),
1682 offset, len, skb, spd, 1, sk, pipe)) 1693 offset, len, skb, spd,
1683 return 1; 1694 skb_head_is_locked(skb),
1695 sk, pipe))
1696 return true;
1684 1697
1685 /* 1698 /*
1686 * then map the fragments 1699 * then map the fragments
@@ -1690,11 +1703,11 @@ static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1690 1703
1691 if (__splice_segment(skb_frag_page(f), 1704 if (__splice_segment(skb_frag_page(f),
1692 f->page_offset, skb_frag_size(f), 1705 f->page_offset, skb_frag_size(f),
1693 offset, len, skb, spd, 0, sk, pipe)) 1706 offset, len, skb, spd, false, sk, pipe))
1694 return 1; 1707 return true;
1695 } 1708 }
1696 1709
1697 return 0; 1710 return false;
1698} 1711}
1699 1712
1700/* 1713/*
@@ -1707,8 +1720,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1707 struct pipe_inode_info *pipe, unsigned int tlen, 1720 struct pipe_inode_info *pipe, unsigned int tlen,
1708 unsigned int flags) 1721 unsigned int flags)
1709{ 1722{
1710 struct partial_page partial[PIPE_DEF_BUFFERS]; 1723 struct partial_page partial[MAX_SKB_FRAGS];
1711 struct page *pages[PIPE_DEF_BUFFERS]; 1724 struct page *pages[MAX_SKB_FRAGS];
1712 struct splice_pipe_desc spd = { 1725 struct splice_pipe_desc spd = {
1713 .pages = pages, 1726 .pages = pages,
1714 .partial = partial, 1727 .partial = partial,
@@ -1720,9 +1733,6 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1720 struct sock *sk = skb->sk; 1733 struct sock *sk = skb->sk;
1721 int ret = 0; 1734 int ret = 0;
1722 1735
1723 if (splice_grow_spd(pipe, &spd))
1724 return -ENOMEM;
1725
1726 /* 1736 /*
1727 * __skb_splice_bits() only fails if the output has no room left, 1737 * __skb_splice_bits() only fails if the output has no room left,
1728 * so no point in going over the frag_list for the error case. 1738 * so no point in going over the frag_list for the error case.
@@ -1758,7 +1768,6 @@ done:
1758 lock_sock(sk); 1768 lock_sock(sk);
1759 } 1769 }
1760 1770
1761 splice_shrink_spd(pipe, &spd);
1762 return ret; 1771 return ret;
1763} 1772}
1764 1773
@@ -1806,10 +1815,10 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1806 if (copy > len) 1815 if (copy > len)
1807 copy = len; 1816 copy = len;
1808 1817
1809 vaddr = kmap_skb_frag(frag); 1818 vaddr = kmap_atomic(skb_frag_page(frag));
1810 memcpy(vaddr + frag->page_offset + offset - start, 1819 memcpy(vaddr + frag->page_offset + offset - start,
1811 from, copy); 1820 from, copy);
1812 kunmap_skb_frag(vaddr); 1821 kunmap_atomic(vaddr);
1813 1822
1814 if ((len -= copy) == 0) 1823 if ((len -= copy) == 0)
1815 return 0; 1824 return 0;
@@ -1869,21 +1878,21 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset,
1869 1878
1870 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1879 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1871 int end; 1880 int end;
1881 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1872 1882
1873 WARN_ON(start > offset + len); 1883 WARN_ON(start > offset + len);
1874 1884
1875 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1885 end = start + skb_frag_size(frag);
1876 if ((copy = end - offset) > 0) { 1886 if ((copy = end - offset) > 0) {
1877 __wsum csum2; 1887 __wsum csum2;
1878 u8 *vaddr; 1888 u8 *vaddr;
1879 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1880 1889
1881 if (copy > len) 1890 if (copy > len)
1882 copy = len; 1891 copy = len;
1883 vaddr = kmap_skb_frag(frag); 1892 vaddr = kmap_atomic(skb_frag_page(frag));
1884 csum2 = csum_partial(vaddr + frag->page_offset + 1893 csum2 = csum_partial(vaddr + frag->page_offset +
1885 offset - start, copy, 0); 1894 offset - start, copy, 0);
1886 kunmap_skb_frag(vaddr); 1895 kunmap_atomic(vaddr);
1887 csum = csum_block_add(csum, csum2, pos); 1896 csum = csum_block_add(csum, csum2, pos);
1888 if (!(len -= copy)) 1897 if (!(len -= copy))
1889 return csum; 1898 return csum;
@@ -1955,12 +1964,12 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1955 1964
1956 if (copy > len) 1965 if (copy > len)
1957 copy = len; 1966 copy = len;
1958 vaddr = kmap_skb_frag(frag); 1967 vaddr = kmap_atomic(skb_frag_page(frag));
1959 csum2 = csum_partial_copy_nocheck(vaddr + 1968 csum2 = csum_partial_copy_nocheck(vaddr +
1960 frag->page_offset + 1969 frag->page_offset +
1961 offset - start, to, 1970 offset - start, to,
1962 copy, 0); 1971 copy, 0);
1963 kunmap_skb_frag(vaddr); 1972 kunmap_atomic(vaddr);
1964 csum = csum_block_add(csum, csum2, pos); 1973 csum = csum_block_add(csum, csum2, pos);
1965 if (!(len -= copy)) 1974 if (!(len -= copy))
1966 return csum; 1975 return csum;
@@ -2480,7 +2489,7 @@ next_skb:
2480 2489
2481 if (abs_offset < block_limit) { 2490 if (abs_offset < block_limit) {
2482 if (!st->frag_data) 2491 if (!st->frag_data)
2483 st->frag_data = kmap_skb_frag(frag); 2492 st->frag_data = kmap_atomic(skb_frag_page(frag));
2484 2493
2485 *data = (u8 *) st->frag_data + frag->page_offset + 2494 *data = (u8 *) st->frag_data + frag->page_offset +
2486 (abs_offset - st->stepped_offset); 2495 (abs_offset - st->stepped_offset);
@@ -2489,7 +2498,7 @@ next_skb:
2489 } 2498 }
2490 2499
2491 if (st->frag_data) { 2500 if (st->frag_data) {
2492 kunmap_skb_frag(st->frag_data); 2501 kunmap_atomic(st->frag_data);
2493 st->frag_data = NULL; 2502 st->frag_data = NULL;
2494 } 2503 }
2495 2504
@@ -2498,7 +2507,7 @@ next_skb:
2498 } 2507 }
2499 2508
2500 if (st->frag_data) { 2509 if (st->frag_data) {
2501 kunmap_skb_frag(st->frag_data); 2510 kunmap_atomic(st->frag_data);
2502 st->frag_data = NULL; 2511 st->frag_data = NULL;
2503 } 2512 }
2504 2513
@@ -2526,7 +2535,7 @@ EXPORT_SYMBOL(skb_seq_read);
2526void skb_abort_seq_read(struct skb_seq_state *st) 2535void skb_abort_seq_read(struct skb_seq_state *st)
2527{ 2536{
2528 if (st->frag_data) 2537 if (st->frag_data)
2529 kunmap_skb_frag(st->frag_data); 2538 kunmap_atomic(st->frag_data);
2530} 2539}
2531EXPORT_SYMBOL(skb_abort_seq_read); 2540EXPORT_SYMBOL(skb_abort_seq_read);
2532 2541
@@ -2718,14 +2727,13 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2718 if (unlikely(!nskb)) 2727 if (unlikely(!nskb))
2719 goto err; 2728 goto err;
2720 2729
2721 hsize = skb_end_pointer(nskb) - nskb->head; 2730 hsize = skb_end_offset(nskb);
2722 if (skb_cow_head(nskb, doffset + headroom)) { 2731 if (skb_cow_head(nskb, doffset + headroom)) {
2723 kfree_skb(nskb); 2732 kfree_skb(nskb);
2724 goto err; 2733 goto err;
2725 } 2734 }
2726 2735
2727 nskb->truesize += skb_end_pointer(nskb) - nskb->head - 2736 nskb->truesize += skb_end_offset(nskb) - hsize;
2728 hsize;
2729 skb_release_head_state(nskb); 2737 skb_release_head_state(nskb);
2730 __skb_push(nskb, doffset); 2738 __skb_push(nskb, doffset);
2731 } else { 2739 } else {
@@ -2843,6 +2851,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2843 unsigned int len = skb_gro_len(skb); 2851 unsigned int len = skb_gro_len(skb);
2844 unsigned int offset = skb_gro_offset(skb); 2852 unsigned int offset = skb_gro_offset(skb);
2845 unsigned int headlen = skb_headlen(skb); 2853 unsigned int headlen = skb_headlen(skb);
2854 unsigned int delta_truesize;
2846 2855
2847 if (p->len + len >= 65536) 2856 if (p->len + len >= 65536)
2848 return -E2BIG; 2857 return -E2BIG;
@@ -2872,11 +2881,41 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2872 frag->page_offset += offset; 2881 frag->page_offset += offset;
2873 skb_frag_size_sub(frag, offset); 2882 skb_frag_size_sub(frag, offset);
2874 2883
2884 /* all fragments truesize : remove (head size + sk_buff) */
2885 delta_truesize = skb->truesize -
2886 SKB_TRUESIZE(skb_end_offset(skb));
2887
2875 skb->truesize -= skb->data_len; 2888 skb->truesize -= skb->data_len;
2876 skb->len -= skb->data_len; 2889 skb->len -= skb->data_len;
2877 skb->data_len = 0; 2890 skb->data_len = 0;
2878 2891
2879 NAPI_GRO_CB(skb)->free = 1; 2892 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
2893 goto done;
2894 } else if (skb->head_frag) {
2895 int nr_frags = pinfo->nr_frags;
2896 skb_frag_t *frag = pinfo->frags + nr_frags;
2897 struct page *page = virt_to_head_page(skb->head);
2898 unsigned int first_size = headlen - offset;
2899 unsigned int first_offset;
2900
2901 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
2902 return -E2BIG;
2903
2904 first_offset = skb->data -
2905 (unsigned char *)page_address(page) +
2906 offset;
2907
2908 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
2909
2910 frag->page.p = page;
2911 frag->page_offset = first_offset;
2912 skb_frag_size_set(frag, first_size);
2913
2914 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
2915 /* We dont need to clear skbinfo->nr_frags here */
2916
2917 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
2918 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
2880 goto done; 2919 goto done;
2881 } else if (skb_gro_len(p) != pinfo->gso_size) 2920 } else if (skb_gro_len(p) != pinfo->gso_size)
2882 return -E2BIG; 2921 return -E2BIG;
@@ -2918,7 +2957,7 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2918 p = nskb; 2957 p = nskb;
2919 2958
2920merge: 2959merge:
2921 p->truesize += skb->truesize - len; 2960 delta_truesize = skb->truesize;
2922 if (offset > headlen) { 2961 if (offset > headlen) {
2923 unsigned int eat = offset - headlen; 2962 unsigned int eat = offset - headlen;
2924 2963
@@ -2938,7 +2977,7 @@ merge:
2938done: 2977done:
2939 NAPI_GRO_CB(p)->count++; 2978 NAPI_GRO_CB(p)->count++;
2940 p->data_len += len; 2979 p->data_len += len;
2941 p->truesize += len; 2980 p->truesize += delta_truesize;
2942 p->len += len; 2981 p->len += len;
2943 2982
2944 NAPI_GRO_CB(skb)->same_flow = 1; 2983 NAPI_GRO_CB(skb)->same_flow = 1;
@@ -3166,7 +3205,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3166 int len = skb->len; 3205 int len = skb->len;
3167 3206
3168 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 3207 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
3169 (unsigned)sk->sk_rcvbuf) 3208 (unsigned int)sk->sk_rcvbuf)
3170 return -ENOMEM; 3209 return -ENOMEM;
3171 3210
3172 skb_orphan(skb); 3211 skb_orphan(skb);
diff --git a/net/core/sock.c b/net/core/sock.c
index b2e14c07d920..b8c818e69c23 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -113,6 +113,7 @@
113#include <linux/user_namespace.h> 113#include <linux/user_namespace.h>
114#include <linux/static_key.h> 114#include <linux/static_key.h>
115#include <linux/memcontrol.h> 115#include <linux/memcontrol.h>
116#include <linux/prefetch.h>
116 117
117#include <asm/uaccess.h> 118#include <asm/uaccess.h>
118 119
@@ -389,7 +390,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
389 390
390 skb->dev = NULL; 391 skb->dev = NULL;
391 392
392 if (sk_rcvqueues_full(sk, skb)) { 393 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
393 atomic_inc(&sk->sk_drops); 394 atomic_inc(&sk->sk_drops);
394 goto discard_and_relse; 395 goto discard_and_relse;
395 } 396 }
@@ -406,7 +407,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
406 rc = sk_backlog_rcv(sk, skb); 407 rc = sk_backlog_rcv(sk, skb);
407 408
408 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_); 409 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
409 } else if (sk_add_backlog(sk, skb)) { 410 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
410 bh_unlock_sock(sk); 411 bh_unlock_sock(sk);
411 atomic_inc(&sk->sk_drops); 412 atomic_inc(&sk->sk_drops);
412 goto discard_and_relse; 413 goto discard_and_relse;
@@ -561,7 +562,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
561 sock_valbool_flag(sk, SOCK_DBG, valbool); 562 sock_valbool_flag(sk, SOCK_DBG, valbool);
562 break; 563 break;
563 case SO_REUSEADDR: 564 case SO_REUSEADDR:
564 sk->sk_reuse = valbool; 565 sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
565 break; 566 break;
566 case SO_TYPE: 567 case SO_TYPE:
567 case SO_PROTOCOL: 568 case SO_PROTOCOL:
@@ -577,23 +578,15 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
577 break; 578 break;
578 case SO_SNDBUF: 579 case SO_SNDBUF:
579 /* Don't error on this BSD doesn't and if you think 580 /* Don't error on this BSD doesn't and if you think
580 about it this is right. Otherwise apps have to 581 * about it this is right. Otherwise apps have to
581 play 'guess the biggest size' games. RCVBUF/SNDBUF 582 * play 'guess the biggest size' games. RCVBUF/SNDBUF
582 are treated in BSD as hints */ 583 * are treated in BSD as hints
583 584 */
584 if (val > sysctl_wmem_max) 585 val = min_t(u32, val, sysctl_wmem_max);
585 val = sysctl_wmem_max;
586set_sndbuf: 586set_sndbuf:
587 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 587 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
588 if ((val * 2) < SOCK_MIN_SNDBUF) 588 sk->sk_sndbuf = max_t(u32, val * 2, SOCK_MIN_SNDBUF);
589 sk->sk_sndbuf = SOCK_MIN_SNDBUF; 589 /* Wake up sending tasks if we upped the value. */
590 else
591 sk->sk_sndbuf = val * 2;
592
593 /*
594 * Wake up sending tasks if we
595 * upped the value.
596 */
597 sk->sk_write_space(sk); 590 sk->sk_write_space(sk);
598 break; 591 break;
599 592
@@ -606,12 +599,11 @@ set_sndbuf:
606 599
607 case SO_RCVBUF: 600 case SO_RCVBUF:
608 /* Don't error on this BSD doesn't and if you think 601 /* Don't error on this BSD doesn't and if you think
609 about it this is right. Otherwise apps have to 602 * about it this is right. Otherwise apps have to
610 play 'guess the biggest size' games. RCVBUF/SNDBUF 603 * play 'guess the biggest size' games. RCVBUF/SNDBUF
611 are treated in BSD as hints */ 604 * are treated in BSD as hints
612 605 */
613 if (val > sysctl_rmem_max) 606 val = min_t(u32, val, sysctl_rmem_max);
614 val = sysctl_rmem_max;
615set_rcvbuf: 607set_rcvbuf:
616 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 608 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
617 /* 609 /*
@@ -629,10 +621,7 @@ set_rcvbuf:
629 * returning the value we actually used in getsockopt 621 * returning the value we actually used in getsockopt
630 * is the most desirable behavior. 622 * is the most desirable behavior.
631 */ 623 */
632 if ((val * 2) < SOCK_MIN_RCVBUF) 624 sk->sk_rcvbuf = max_t(u32, val * 2, SOCK_MIN_RCVBUF);
633 sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
634 else
635 sk->sk_rcvbuf = val * 2;
636 break; 625 break;
637 626
638 case SO_RCVBUFFORCE: 627 case SO_RCVBUFFORCE:
@@ -975,7 +964,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
975 break; 964 break;
976 965
977 case SO_PASSCRED: 966 case SO_PASSCRED:
978 v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0; 967 v.val = !!test_bit(SOCK_PASSCRED, &sock->flags);
979 break; 968 break;
980 969
981 case SO_PEERCRED: 970 case SO_PEERCRED:
@@ -1010,7 +999,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
1010 break; 999 break;
1011 1000
1012 case SO_PASSSEC: 1001 case SO_PASSSEC:
1013 v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0; 1002 v.val = !!test_bit(SOCK_PASSSEC, &sock->flags);
1014 break; 1003 break;
1015 1004
1016 case SO_PEERSEC: 1005 case SO_PEERSEC:
@@ -1534,7 +1523,7 @@ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
1534 */ 1523 */
1535void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) 1524void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
1536{ 1525{
1537 if ((unsigned)size <= sysctl_optmem_max && 1526 if ((unsigned int)size <= sysctl_optmem_max &&
1538 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { 1527 atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
1539 void *mem; 1528 void *mem;
1540 /* First do the add, to avoid the race if kmalloc 1529 /* First do the add, to avoid the race if kmalloc
@@ -1712,6 +1701,7 @@ static void __release_sock(struct sock *sk)
1712 do { 1701 do {
1713 struct sk_buff *next = skb->next; 1702 struct sk_buff *next = skb->next;
1714 1703
1704 prefetch(next);
1715 WARN_ON_ONCE(skb_dst_is_noref(skb)); 1705 WARN_ON_ONCE(skb_dst_is_noref(skb));
1716 skb->next = NULL; 1706 skb->next = NULL;
1717 sk_backlog_rcv(sk, skb); 1707 sk_backlog_rcv(sk, skb);
@@ -2576,7 +2566,7 @@ static char proto_method_implemented(const void *method)
2576} 2566}
2577static long sock_prot_memory_allocated(struct proto *proto) 2567static long sock_prot_memory_allocated(struct proto *proto)
2578{ 2568{
2579 return proto->memory_allocated != NULL ? proto_memory_allocated(proto): -1L; 2569 return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
2580} 2570}
2581 2571
2582static char *sock_prot_memory_pressure(struct proto *proto) 2572static char *sock_prot_memory_pressure(struct proto *proto)
diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
index b9868e1fd62c..5fd146720f39 100644
--- a/net/core/sock_diag.c
+++ b/net/core/sock_diag.c
@@ -10,7 +10,7 @@
10#include <linux/inet_diag.h> 10#include <linux/inet_diag.h>
11#include <linux/sock_diag.h> 11#include <linux/sock_diag.h>
12 12
13static struct sock_diag_handler *sock_diag_handlers[AF_MAX]; 13static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
14static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh); 14static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
15static DEFINE_MUTEX(sock_diag_table_mutex); 15static DEFINE_MUTEX(sock_diag_table_mutex);
16 16
@@ -70,7 +70,7 @@ void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlms
70} 70}
71EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat); 71EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat);
72 72
73int sock_diag_register(struct sock_diag_handler *hndl) 73int sock_diag_register(const struct sock_diag_handler *hndl)
74{ 74{
75 int err = 0; 75 int err = 0;
76 76
@@ -88,7 +88,7 @@ int sock_diag_register(struct sock_diag_handler *hndl)
88} 88}
89EXPORT_SYMBOL_GPL(sock_diag_register); 89EXPORT_SYMBOL_GPL(sock_diag_register);
90 90
91void sock_diag_unregister(struct sock_diag_handler *hnld) 91void sock_diag_unregister(const struct sock_diag_handler *hnld)
92{ 92{
93 int family = hnld->family; 93 int family = hnld->family;
94 94
@@ -102,7 +102,7 @@ void sock_diag_unregister(struct sock_diag_handler *hnld)
102} 102}
103EXPORT_SYMBOL_GPL(sock_diag_unregister); 103EXPORT_SYMBOL_GPL(sock_diag_unregister);
104 104
105static inline struct sock_diag_handler *sock_diag_lock_handler(int family) 105static const inline struct sock_diag_handler *sock_diag_lock_handler(int family)
106{ 106{
107 if (sock_diag_handlers[family] == NULL) 107 if (sock_diag_handlers[family] == NULL)
108 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, 108 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
@@ -112,7 +112,7 @@ static inline struct sock_diag_handler *sock_diag_lock_handler(int family)
112 return sock_diag_handlers[family]; 112 return sock_diag_handlers[family];
113} 113}
114 114
115static inline void sock_diag_unlock_handler(struct sock_diag_handler *h) 115static inline void sock_diag_unlock_handler(const struct sock_diag_handler *h)
116{ 116{
117 mutex_unlock(&sock_diag_table_mutex); 117 mutex_unlock(&sock_diag_table_mutex);
118} 118}
@@ -121,7 +121,7 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
121{ 121{
122 int err; 122 int err;
123 struct sock_diag_req *req = NLMSG_DATA(nlh); 123 struct sock_diag_req *req = NLMSG_DATA(nlh);
124 struct sock_diag_handler *hndl; 124 const struct sock_diag_handler *hndl;
125 125
126 if (nlmsg_len(nlh) < sizeof(*req)) 126 if (nlmsg_len(nlh) < sizeof(*req))
127 return -EINVAL; 127 return -EINVAL;
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 0c2850874254..a7c36845b123 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -14,6 +14,7 @@
14#include <linux/vmalloc.h> 14#include <linux/vmalloc.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17#include <linux/kmemleak.h>
17 18
18#include <net/ip.h> 19#include <net/ip.h>
19#include <net/sock.h> 20#include <net/sock.h>
@@ -202,12 +203,6 @@ static struct ctl_table netns_core_table[] = {
202 { } 203 { }
203}; 204};
204 205
205__net_initdata struct ctl_path net_core_path[] = {
206 { .procname = "net", },
207 { .procname = "core", },
208 { },
209};
210
211static __net_init int sysctl_core_net_init(struct net *net) 206static __net_init int sysctl_core_net_init(struct net *net)
212{ 207{
213 struct ctl_table *tbl; 208 struct ctl_table *tbl;
@@ -223,8 +218,7 @@ static __net_init int sysctl_core_net_init(struct net *net)
223 tbl[0].data = &net->core.sysctl_somaxconn; 218 tbl[0].data = &net->core.sysctl_somaxconn;
224 } 219 }
225 220
226 net->core.sysctl_hdr = register_net_sysctl_table(net, 221 net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
227 net_core_path, tbl);
228 if (net->core.sysctl_hdr == NULL) 222 if (net->core.sysctl_hdr == NULL)
229 goto err_reg; 223 goto err_reg;
230 224
@@ -254,10 +248,7 @@ static __net_initdata struct pernet_operations sysctl_core_ops = {
254 248
255static __init int sysctl_core_init(void) 249static __init int sysctl_core_init(void)
256{ 250{
257 static struct ctl_table empty[1]; 251 register_net_sysctl(&init_net, "net/core", net_core_table);
258
259 register_sysctl_paths(net_core_path, empty);
260 register_net_sysctl_rotable(net_core_path, net_core_table);
261 return register_pernet_subsys(&sysctl_core_ops); 252 return register_pernet_subsys(&sysctl_core_ops);
262} 253}
263 254
diff --git a/net/core/utils.c b/net/core/utils.c
index dc3c3faff2f4..39895a65e54a 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -58,14 +58,11 @@ __be32 in_aton(const char *str)
58 int i; 58 int i;
59 59
60 l = 0; 60 l = 0;
61 for (i = 0; i < 4; i++) 61 for (i = 0; i < 4; i++) {
62 {
63 l <<= 8; 62 l <<= 8;
64 if (*str != '\0') 63 if (*str != '\0') {
65 {
66 val = 0; 64 val = 0;
67 while (*str != '\0' && *str != '.' && *str != '\n') 65 while (*str != '\0' && *str != '.' && *str != '\n') {
68 {
69 val *= 10; 66 val *= 10;
70 val += *str - '0'; 67 val += *str - '0';
71 str++; 68 str++;
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index d86053002c16..656c7c75b192 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -178,6 +178,7 @@ static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
178 [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)}, 178 [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)},
179 [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)}, 179 [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)},
180 [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED}, 180 [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED},
181 [DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)},
181}; 182};
182 183
183static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = { 184static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
@@ -703,6 +704,7 @@ static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb,
703 704
704 ret = dcbnl_reply(err, RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP, 705 ret = dcbnl_reply(err, RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP,
705 pid, seq, flags); 706 pid, seq, flags);
707 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0);
706out: 708out:
707 return ret; 709 return ret;
708} 710}
@@ -935,6 +937,7 @@ static int dcbnl_setall(struct net_device *netdev, struct nlattr **tb,
935 937
936 ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB, 938 ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB,
937 DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags); 939 DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags);
940 dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0);
938 941
939 return ret; 942 return ret;
940} 943}
@@ -1205,13 +1208,15 @@ static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
1205 if (!app) 1208 if (!app)
1206 goto nla_put_failure; 1209 goto nla_put_failure;
1207 1210
1208 if (app_info_type) 1211 if (app_info_type &&
1209 NLA_PUT(skb, app_info_type, sizeof(info), &info); 1212 nla_put(skb, app_info_type, sizeof(info), &info))
1210 1213 goto nla_put_failure;
1211 for (i = 0; i < app_count; i++)
1212 NLA_PUT(skb, app_entry_type, sizeof(struct dcb_app),
1213 &table[i]);
1214 1214
1215 for (i = 0; i < app_count; i++) {
1216 if (nla_put(skb, app_entry_type, sizeof(struct dcb_app),
1217 &table[i]))
1218 goto nla_put_failure;
1219 }
1215 nla_nest_end(skb, app); 1220 nla_nest_end(skb, app);
1216 } 1221 }
1217 err = 0; 1222 err = 0;
@@ -1230,8 +1235,8 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1230 int dcbx; 1235 int dcbx;
1231 int err = -EMSGSIZE; 1236 int err = -EMSGSIZE;
1232 1237
1233 NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name); 1238 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1234 1239 goto nla_put_failure;
1235 ieee = nla_nest_start(skb, DCB_ATTR_IEEE); 1240 ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
1236 if (!ieee) 1241 if (!ieee)
1237 goto nla_put_failure; 1242 goto nla_put_failure;
@@ -1239,15 +1244,28 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1239 if (ops->ieee_getets) { 1244 if (ops->ieee_getets) {
1240 struct ieee_ets ets; 1245 struct ieee_ets ets;
1241 err = ops->ieee_getets(netdev, &ets); 1246 err = ops->ieee_getets(netdev, &ets);
1242 if (!err) 1247 if (!err &&
1243 NLA_PUT(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets); 1248 nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
1249 goto nla_put_failure;
1250 }
1251
1252 if (ops->ieee_getmaxrate) {
1253 struct ieee_maxrate maxrate;
1254 err = ops->ieee_getmaxrate(netdev, &maxrate);
1255 if (!err) {
1256 err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
1257 sizeof(maxrate), &maxrate);
1258 if (err)
1259 goto nla_put_failure;
1260 }
1244 } 1261 }
1245 1262
1246 if (ops->ieee_getpfc) { 1263 if (ops->ieee_getpfc) {
1247 struct ieee_pfc pfc; 1264 struct ieee_pfc pfc;
1248 err = ops->ieee_getpfc(netdev, &pfc); 1265 err = ops->ieee_getpfc(netdev, &pfc);
1249 if (!err) 1266 if (!err &&
1250 NLA_PUT(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc); 1267 nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
1268 goto nla_put_failure;
1251 } 1269 }
1252 1270
1253 app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE); 1271 app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
@@ -1278,15 +1296,17 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1278 if (ops->ieee_peer_getets) { 1296 if (ops->ieee_peer_getets) {
1279 struct ieee_ets ets; 1297 struct ieee_ets ets;
1280 err = ops->ieee_peer_getets(netdev, &ets); 1298 err = ops->ieee_peer_getets(netdev, &ets);
1281 if (!err) 1299 if (!err &&
1282 NLA_PUT(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets); 1300 nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
1301 goto nla_put_failure;
1283 } 1302 }
1284 1303
1285 if (ops->ieee_peer_getpfc) { 1304 if (ops->ieee_peer_getpfc) {
1286 struct ieee_pfc pfc; 1305 struct ieee_pfc pfc;
1287 err = ops->ieee_peer_getpfc(netdev, &pfc); 1306 err = ops->ieee_peer_getpfc(netdev, &pfc);
1288 if (!err) 1307 if (!err &&
1289 NLA_PUT(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc); 1308 nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
1309 goto nla_put_failure;
1290 } 1310 }
1291 1311
1292 if (ops->peer_getappinfo && ops->peer_getapptable) { 1312 if (ops->peer_getappinfo && ops->peer_getapptable) {
@@ -1340,10 +1360,11 @@ static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1340 ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0, 1360 ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
1341 &prio, &pgid, &tc_pct, &up_map); 1361 &prio, &pgid, &tc_pct, &up_map);
1342 1362
1343 NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_PGID, pgid); 1363 if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) ||
1344 NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map); 1364 nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
1345 NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio); 1365 nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
1346 NLA_PUT_U8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct); 1366 nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
1367 goto nla_put_failure;
1347 nla_nest_end(skb, tc_nest); 1368 nla_nest_end(skb, tc_nest);
1348 } 1369 }
1349 1370
@@ -1356,7 +1377,8 @@ static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1356 else 1377 else
1357 ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0, 1378 ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
1358 &tc_pct); 1379 &tc_pct);
1359 NLA_PUT_U8(skb, i, tc_pct); 1380 if (nla_put_u8(skb, i, tc_pct))
1381 goto nla_put_failure;
1360 } 1382 }
1361 nla_nest_end(skb, pg); 1383 nla_nest_end(skb, pg);
1362 return 0; 1384 return 0;
@@ -1373,8 +1395,8 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1373 int dcbx, i, err = -EMSGSIZE; 1395 int dcbx, i, err = -EMSGSIZE;
1374 u8 value; 1396 u8 value;
1375 1397
1376 NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name); 1398 if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1377 1399 goto nla_put_failure;
1378 cee = nla_nest_start(skb, DCB_ATTR_CEE); 1400 cee = nla_nest_start(skb, DCB_ATTR_CEE);
1379 if (!cee) 1401 if (!cee)
1380 goto nla_put_failure; 1402 goto nla_put_failure;
@@ -1401,7 +1423,8 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1401 1423
1402 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) { 1424 for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
1403 ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value); 1425 ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
1404 NLA_PUT_U8(skb, i, value); 1426 if (nla_put_u8(skb, i, value))
1427 goto nla_put_failure;
1405 } 1428 }
1406 nla_nest_end(skb, pfc_nest); 1429 nla_nest_end(skb, pfc_nest);
1407 } 1430 }
@@ -1454,8 +1477,9 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1454 1477
1455 for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX; 1478 for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
1456 i++) 1479 i++)
1457 if (!ops->getfeatcfg(netdev, i, &value)) 1480 if (!ops->getfeatcfg(netdev, i, &value) &&
1458 NLA_PUT_U8(skb, i, value); 1481 nla_put_u8(skb, i, value))
1482 goto nla_put_failure;
1459 1483
1460 nla_nest_end(skb, feat); 1484 nla_nest_end(skb, feat);
1461 } 1485 }
@@ -1464,15 +1488,17 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1464 if (ops->cee_peer_getpg) { 1488 if (ops->cee_peer_getpg) {
1465 struct cee_pg pg; 1489 struct cee_pg pg;
1466 err = ops->cee_peer_getpg(netdev, &pg); 1490 err = ops->cee_peer_getpg(netdev, &pg);
1467 if (!err) 1491 if (!err &&
1468 NLA_PUT(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg); 1492 nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
1493 goto nla_put_failure;
1469 } 1494 }
1470 1495
1471 if (ops->cee_peer_getpfc) { 1496 if (ops->cee_peer_getpfc) {
1472 struct cee_pfc pfc; 1497 struct cee_pfc pfc;
1473 err = ops->cee_peer_getpfc(netdev, &pfc); 1498 err = ops->cee_peer_getpfc(netdev, &pfc);
1474 if (!err) 1499 if (!err &&
1475 NLA_PUT(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc); 1500 nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
1501 goto nla_put_failure;
1476 } 1502 }
1477 1503
1478 if (ops->peer_getappinfo && ops->peer_getapptable) { 1504 if (ops->peer_getappinfo && ops->peer_getapptable) {
@@ -1589,6 +1615,14 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
1589 goto err; 1615 goto err;
1590 } 1616 }
1591 1617
1618 if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) {
1619 struct ieee_maxrate *maxrate =
1620 nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]);
1621 err = ops->ieee_setmaxrate(netdev, maxrate);
1622 if (err)
1623 goto err;
1624 }
1625
1592 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) { 1626 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
1593 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); 1627 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1594 err = ops->ieee_setpfc(netdev, pfc); 1628 err = ops->ieee_setpfc(netdev, pfc);
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 70bfaf2d1965..8c67bedf85b0 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -100,7 +100,7 @@ static void ccid3_update_send_interval(struct ccid3_hc_tx_sock *hc)
100 100
101 DCCP_BUG_ON(hc->tx_t_ipi == 0); 101 DCCP_BUG_ON(hc->tx_t_ipi == 0);
102 ccid3_pr_debug("t_ipi=%u, s=%u, X=%u\n", hc->tx_t_ipi, 102 ccid3_pr_debug("t_ipi=%u, s=%u, X=%u\n", hc->tx_t_ipi,
103 hc->tx_s, (unsigned)(hc->tx_x >> 6)); 103 hc->tx_s, (unsigned int)(hc->tx_x >> 6));
104} 104}
105 105
106static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now) 106static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now)
@@ -153,9 +153,9 @@ static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp)
153 153
154 if (hc->tx_x != old_x) { 154 if (hc->tx_x != old_x) {
155 ccid3_pr_debug("X_prev=%u, X_now=%u, X_calc=%u, " 155 ccid3_pr_debug("X_prev=%u, X_now=%u, X_calc=%u, "
156 "X_recv=%u\n", (unsigned)(old_x >> 6), 156 "X_recv=%u\n", (unsigned int)(old_x >> 6),
157 (unsigned)(hc->tx_x >> 6), hc->tx_x_calc, 157 (unsigned int)(hc->tx_x >> 6), hc->tx_x_calc,
158 (unsigned)(hc->tx_x_recv >> 6)); 158 (unsigned int)(hc->tx_x_recv >> 6));
159 159
160 ccid3_update_send_interval(hc); 160 ccid3_update_send_interval(hc);
161 } 161 }
@@ -425,8 +425,8 @@ done_computing_x:
425 "p=%u, X_calc=%u, X_recv=%u, X=%u\n", 425 "p=%u, X_calc=%u, X_recv=%u, X=%u\n",
426 dccp_role(sk), sk, hc->tx_rtt, r_sample, 426 dccp_role(sk), sk, hc->tx_rtt, r_sample,
427 hc->tx_s, hc->tx_p, hc->tx_x_calc, 427 hc->tx_s, hc->tx_p, hc->tx_x_calc,
428 (unsigned)(hc->tx_x_recv >> 6), 428 (unsigned int)(hc->tx_x_recv >> 6),
429 (unsigned)(hc->tx_x >> 6)); 429 (unsigned int)(hc->tx_x >> 6));
430 430
431 /* unschedule no feedback timer */ 431 /* unschedule no feedback timer */
432 sk_stop_timer(sk, &hc->tx_no_feedback_timer); 432 sk_stop_timer(sk, &hc->tx_no_feedback_timer);
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 29d6bb629a6c..9040be049d8c 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -75,7 +75,7 @@ extern void dccp_time_wait(struct sock *sk, int state, int timeo);
75 * state, about 60 seconds */ 75 * state, about 60 seconds */
76 76
77/* RFC 1122, 4.2.3.1 initial RTO value */ 77/* RFC 1122, 4.2.3.1 initial RTO value */
78#define DCCP_TIMEOUT_INIT ((unsigned)(3 * HZ)) 78#define DCCP_TIMEOUT_INIT ((unsigned int)(3 * HZ))
79 79
80/* 80/*
81 * The maximum back-off value for retransmissions. This is needed for 81 * The maximum back-off value for retransmissions. This is needed for
@@ -84,7 +84,7 @@ extern void dccp_time_wait(struct sock *sk, int state, int timeo);
84 * - feature-negotiation retransmission (sec. 6.6.3), 84 * - feature-negotiation retransmission (sec. 6.6.3),
85 * - Acks in client-PARTOPEN state (sec. 8.1.5). 85 * - Acks in client-PARTOPEN state (sec. 8.1.5).
86 */ 86 */
87#define DCCP_RTO_MAX ((unsigned)(64 * HZ)) 87#define DCCP_RTO_MAX ((unsigned int)(64 * HZ))
88 88
89/* 89/*
90 * RTT sampling: sanity bounds and fallback RTT value from RFC 4340, section 3.4 90 * RTT sampling: sanity bounds and fallback RTT value from RFC 4340, section 3.4
@@ -287,9 +287,9 @@ extern struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
287extern int dccp_child_process(struct sock *parent, struct sock *child, 287extern int dccp_child_process(struct sock *parent, struct sock *child,
288 struct sk_buff *skb); 288 struct sk_buff *skb);
289extern int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, 289extern int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
290 struct dccp_hdr *dh, unsigned len); 290 struct dccp_hdr *dh, unsigned int len);
291extern int dccp_rcv_established(struct sock *sk, struct sk_buff *skb, 291extern int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
292 const struct dccp_hdr *dh, const unsigned len); 292 const struct dccp_hdr *dh, const unsigned int len);
293 293
294extern int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized); 294extern int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized);
295extern void dccp_destroy_sock(struct sock *sk); 295extern void dccp_destroy_sock(struct sock *sk);
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 51d5fe5fffba..bc93a333931e 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -285,7 +285,7 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
285} 285}
286 286
287static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb, 287static int __dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
288 const struct dccp_hdr *dh, const unsigned len) 288 const struct dccp_hdr *dh, const unsigned int len)
289{ 289{
290 struct dccp_sock *dp = dccp_sk(sk); 290 struct dccp_sock *dp = dccp_sk(sk);
291 291
@@ -366,7 +366,7 @@ discard:
366} 366}
367 367
368int dccp_rcv_established(struct sock *sk, struct sk_buff *skb, 368int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
369 const struct dccp_hdr *dh, const unsigned len) 369 const struct dccp_hdr *dh, const unsigned int len)
370{ 370{
371 if (dccp_check_seqno(sk, skb)) 371 if (dccp_check_seqno(sk, skb))
372 goto discard; 372 goto discard;
@@ -388,7 +388,7 @@ EXPORT_SYMBOL_GPL(dccp_rcv_established);
388static int dccp_rcv_request_sent_state_process(struct sock *sk, 388static int dccp_rcv_request_sent_state_process(struct sock *sk,
389 struct sk_buff *skb, 389 struct sk_buff *skb,
390 const struct dccp_hdr *dh, 390 const struct dccp_hdr *dh,
391 const unsigned len) 391 const unsigned int len)
392{ 392{
393 /* 393 /*
394 * Step 4: Prepare sequence numbers in REQUEST 394 * Step 4: Prepare sequence numbers in REQUEST
@@ -521,7 +521,7 @@ unable_to_proceed:
521static int dccp_rcv_respond_partopen_state_process(struct sock *sk, 521static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
522 struct sk_buff *skb, 522 struct sk_buff *skb,
523 const struct dccp_hdr *dh, 523 const struct dccp_hdr *dh,
524 const unsigned len) 524 const unsigned int len)
525{ 525{
526 struct dccp_sock *dp = dccp_sk(sk); 526 struct dccp_sock *dp = dccp_sk(sk);
527 u32 sample = dp->dccps_options_received.dccpor_timestamp_echo; 527 u32 sample = dp->dccps_options_received.dccpor_timestamp_echo;
@@ -572,7 +572,7 @@ static int dccp_rcv_respond_partopen_state_process(struct sock *sk,
572} 572}
573 573
574int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, 574int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
575 struct dccp_hdr *dh, unsigned len) 575 struct dccp_hdr *dh, unsigned int len)
576{ 576{
577 struct dccp_sock *dp = dccp_sk(sk); 577 struct dccp_sock *dp = dccp_sk(sk);
578 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); 578 struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index caf6e1734b62..07f5579ca756 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -574,6 +574,11 @@ static void dccp_v4_reqsk_destructor(struct request_sock *req)
574 kfree(inet_rsk(req)->opt); 574 kfree(inet_rsk(req)->opt);
575} 575}
576 576
577void dccp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
578{
579}
580EXPORT_SYMBOL(dccp_syn_ack_timeout);
581
577static struct request_sock_ops dccp_request_sock_ops __read_mostly = { 582static struct request_sock_ops dccp_request_sock_ops __read_mostly = {
578 .family = PF_INET, 583 .family = PF_INET,
579 .obj_size = sizeof(struct dccp_request_sock), 584 .obj_size = sizeof(struct dccp_request_sock),
@@ -581,6 +586,7 @@ static struct request_sock_ops dccp_request_sock_ops __read_mostly = {
581 .send_ack = dccp_reqsk_send_ack, 586 .send_ack = dccp_reqsk_send_ack,
582 .destructor = dccp_v4_reqsk_destructor, 587 .destructor = dccp_v4_reqsk_destructor,
583 .send_reset = dccp_v4_ctl_send_reset, 588 .send_reset = dccp_v4_ctl_send_reset,
589 .syn_ack_timeout = dccp_syn_ack_timeout,
584}; 590};
585 591
586int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) 592int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 4dc588f520e0..fa9512d86f3b 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -343,6 +343,7 @@ static struct request_sock_ops dccp6_request_sock_ops = {
343 .send_ack = dccp_reqsk_send_ack, 343 .send_ack = dccp_reqsk_send_ack,
344 .destructor = dccp_v6_reqsk_destructor, 344 .destructor = dccp_v6_reqsk_destructor,
345 .send_reset = dccp_v6_ctl_send_reset, 345 .send_reset = dccp_v6_ctl_send_reset,
346 .syn_ack_timeout = dccp_syn_ack_timeout,
346}; 347};
347 348
348static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb) 349static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
@@ -579,7 +580,7 @@ static struct sock *dccp_v6_request_recv_sock(struct sock *sk,
579 newnp->pktoptions = NULL; 580 newnp->pktoptions = NULL;
580 if (ireq6->pktopts != NULL) { 581 if (ireq6->pktopts != NULL) {
581 newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC); 582 newnp->pktoptions = skb_clone(ireq6->pktopts, GFP_ATOMIC);
582 kfree_skb(ireq6->pktopts); 583 consume_skb(ireq6->pktopts);
583 ireq6->pktopts = NULL; 584 ireq6->pktopts = NULL;
584 if (newnp->pktoptions) 585 if (newnp->pktoptions)
585 skb_set_owner_r(newnp->pktoptions, newsk); 586 skb_set_owner_r(newnp->pktoptions, newsk);
diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c
index 42348824ee31..607ab71b5a0c 100644
--- a/net/dccp/sysctl.c
+++ b/net/dccp/sysctl.c
@@ -98,18 +98,11 @@ static struct ctl_table dccp_default_table[] = {
98 { } 98 { }
99}; 99};
100 100
101static struct ctl_path dccp_path[] = {
102 { .procname = "net", },
103 { .procname = "dccp", },
104 { .procname = "default", },
105 { }
106};
107
108static struct ctl_table_header *dccp_table_header; 101static struct ctl_table_header *dccp_table_header;
109 102
110int __init dccp_sysctl_init(void) 103int __init dccp_sysctl_init(void)
111{ 104{
112 dccp_table_header = register_sysctl_paths(dccp_path, 105 dccp_table_header = register_net_sysctl(&init_net, "net/dccp/default",
113 dccp_default_table); 106 dccp_default_table);
114 107
115 return dccp_table_header != NULL ? 0 : -ENOMEM; 108 return dccp_table_header != NULL ? 0 : -ENOMEM;
@@ -118,7 +111,7 @@ int __init dccp_sysctl_init(void)
118void dccp_sysctl_exit(void) 111void dccp_sysctl_exit(void)
119{ 112{
120 if (dccp_table_header != NULL) { 113 if (dccp_table_header != NULL) {
121 unregister_sysctl_table(dccp_table_header); 114 unregister_net_sysctl_table(dccp_table_header);
122 dccp_table_header = NULL; 115 dccp_table_header = NULL;
123 } 116 }
124} 117}
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 4136987d94da..2ba1a2814c24 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -250,7 +250,7 @@ static void dn_unhash_sock_bh(struct sock *sk)
250static struct hlist_head *listen_hash(struct sockaddr_dn *addr) 250static struct hlist_head *listen_hash(struct sockaddr_dn *addr)
251{ 251{
252 int i; 252 int i;
253 unsigned hash = addr->sdn_objnum; 253 unsigned int hash = addr->sdn_objnum;
254 254
255 if (hash == 0) { 255 if (hash == 0) {
256 hash = addr->sdn_objnamel; 256 hash = addr->sdn_objnamel;
@@ -1844,9 +1844,9 @@ static inline int dn_queue_too_long(struct dn_scp *scp, struct sk_buff_head *que
1844 * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't 1844 * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't
1845 * make much practical difference. 1845 * make much practical difference.
1846 */ 1846 */
1847unsigned dn_mss_from_pmtu(struct net_device *dev, int mtu) 1847unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu)
1848{ 1848{
1849 unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER; 1849 unsigned int mss = 230 - DN_MAX_NSP_DATA_HEADER;
1850 if (dev) { 1850 if (dev) {
1851 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); 1851 struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
1852 mtu -= LL_RESERVED_SPACE(dev); 1852 mtu -= LL_RESERVED_SPACE(dev);
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index c00e3077988c..f3924ab1e019 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -209,15 +209,7 @@ static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *
209 struct dn_dev_sysctl_table *t; 209 struct dn_dev_sysctl_table *t;
210 int i; 210 int i;
211 211
212#define DN_CTL_PATH_DEV 3 212 char path[sizeof("net/decnet/conf/") + IFNAMSIZ];
213
214 struct ctl_path dn_ctl_path[] = {
215 { .procname = "net", },
216 { .procname = "decnet", },
217 { .procname = "conf", },
218 { /* to be set */ },
219 { },
220 };
221 213
222 t = kmemdup(&dn_dev_sysctl, sizeof(*t), GFP_KERNEL); 214 t = kmemdup(&dn_dev_sysctl, sizeof(*t), GFP_KERNEL);
223 if (t == NULL) 215 if (t == NULL)
@@ -228,15 +220,12 @@ static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *
228 t->dn_dev_vars[i].data = ((char *)parms) + offset; 220 t->dn_dev_vars[i].data = ((char *)parms) + offset;
229 } 221 }
230 222
231 if (dev) { 223 snprintf(path, sizeof(path), "net/decnet/conf/%s",
232 dn_ctl_path[DN_CTL_PATH_DEV].procname = dev->name; 224 dev? dev->name : parms->name);
233 } else {
234 dn_ctl_path[DN_CTL_PATH_DEV].procname = parms->name;
235 }
236 225
237 t->dn_dev_vars[0].extra1 = (void *)dev; 226 t->dn_dev_vars[0].extra1 = (void *)dev;
238 227
239 t->sysctl_header = register_sysctl_paths(dn_ctl_path, t->dn_dev_vars); 228 t->sysctl_header = register_net_sysctl(&init_net, path, t->dn_dev_vars);
240 if (t->sysctl_header == NULL) 229 if (t->sysctl_header == NULL)
241 kfree(t); 230 kfree(t);
242 else 231 else
@@ -248,7 +237,7 @@ static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms)
248 if (parms->sysctl) { 237 if (parms->sysctl) {
249 struct dn_dev_sysctl_table *t = parms->sysctl; 238 struct dn_dev_sysctl_table *t = parms->sysctl;
250 parms->sysctl = NULL; 239 parms->sysctl = NULL;
251 unregister_sysctl_table(t->sysctl_header); 240 unregister_net_sysctl_table(t->sysctl_header);
252 kfree(t); 241 kfree(t);
253 } 242 }
254} 243}
@@ -694,13 +683,13 @@ static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
694 ifm->ifa_scope = ifa->ifa_scope; 683 ifm->ifa_scope = ifa->ifa_scope;
695 ifm->ifa_index = ifa->ifa_dev->dev->ifindex; 684 ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
696 685
697 if (ifa->ifa_address) 686 if ((ifa->ifa_address &&
698 NLA_PUT_LE16(skb, IFA_ADDRESS, ifa->ifa_address); 687 nla_put_le16(skb, IFA_ADDRESS, ifa->ifa_address)) ||
699 if (ifa->ifa_local) 688 (ifa->ifa_local &&
700 NLA_PUT_LE16(skb, IFA_LOCAL, ifa->ifa_local); 689 nla_put_le16(skb, IFA_LOCAL, ifa->ifa_local)) ||
701 if (ifa->ifa_label[0]) 690 (ifa->ifa_label[0] &&
702 NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label); 691 nla_put_string(skb, IFA_LABEL, ifa->ifa_label)))
703 692 goto nla_put_failure;
704 return nlmsg_end(skb, nlh); 693 return nlmsg_end(skb, nlh);
705 694
706nla_put_failure: 695nla_put_failure:
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 9e885f180b60..65a8cd7891fe 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -302,11 +302,12 @@ struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct dn_kern_rta
302 struct rtattr *attr = RTA_DATA(rta->rta_mx); 302 struct rtattr *attr = RTA_DATA(rta->rta_mx);
303 303
304 while(RTA_OK(attr, attrlen)) { 304 while(RTA_OK(attr, attrlen)) {
305 unsigned flavour = attr->rta_type; 305 unsigned int flavour = attr->rta_type;
306
306 if (flavour) { 307 if (flavour) {
307 if (flavour > RTAX_MAX) 308 if (flavour > RTAX_MAX)
308 goto err_inval; 309 goto err_inval;
309 fi->fib_metrics[flavour-1] = *(unsigned*)RTA_DATA(attr); 310 fi->fib_metrics[flavour-1] = *(unsigned int *)RTA_DATA(attr);
310 } 311 }
311 attr = RTA_NEXT(attr, attrlen); 312 attr = RTA_NEXT(attr, attrlen);
312 } 313 }
diff --git a/net/decnet/dn_nsp_in.c b/net/decnet/dn_nsp_in.c
index f6544b2c91b0..58084f37151e 100644
--- a/net/decnet/dn_nsp_in.c
+++ b/net/decnet/dn_nsp_in.c
@@ -588,7 +588,7 @@ static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig
588 number of warnings when compiling with -W --ANK 588 number of warnings when compiling with -W --ANK
589 */ 589 */
590 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 590 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
591 (unsigned)sk->sk_rcvbuf) { 591 (unsigned int)sk->sk_rcvbuf) {
592 err = -ENOMEM; 592 err = -ENOMEM;
593 goto out; 593 goto out;
594 } 594 }
diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c
index e446e85e64a6..b952f88d9c1f 100644
--- a/net/decnet/dn_nsp_out.c
+++ b/net/decnet/dn_nsp_out.c
@@ -209,7 +209,7 @@ static void dn_nsp_rtt(struct sock *sk, long rtt)
209 * 209 *
210 * Returns: The number of times the packet has been sent previously 210 * Returns: The number of times the packet has been sent previously
211 */ 211 */
212static inline unsigned dn_nsp_clone_and_send(struct sk_buff *skb, 212static inline unsigned int dn_nsp_clone_and_send(struct sk_buff *skb,
213 gfp_t gfp) 213 gfp_t gfp)
214{ 214{
215 struct dn_skb_cb *cb = DN_SKB_CB(skb); 215 struct dn_skb_cb *cb = DN_SKB_CB(skb);
@@ -240,7 +240,7 @@ void dn_nsp_output(struct sock *sk)
240{ 240{
241 struct dn_scp *scp = DN_SK(sk); 241 struct dn_scp *scp = DN_SK(sk);
242 struct sk_buff *skb; 242 struct sk_buff *skb;
243 unsigned reduce_win = 0; 243 unsigned int reduce_win = 0;
244 244
245 /* 245 /*
246 * First we check for otherdata/linkservice messages 246 * First we check for otherdata/linkservice messages
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 80a3de4906d3..7e1f8788da19 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -122,7 +122,7 @@ static int dn_route_input(struct sk_buff *);
122static void dn_run_flush(unsigned long dummy); 122static void dn_run_flush(unsigned long dummy);
123 123
124static struct dn_rt_hash_bucket *dn_rt_hash_table; 124static struct dn_rt_hash_bucket *dn_rt_hash_table;
125static unsigned dn_rt_hash_mask; 125static unsigned int dn_rt_hash_mask;
126 126
127static struct timer_list dn_route_timer; 127static struct timer_list dn_route_timer;
128static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush, 0, 0); 128static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush, 0, 0);
@@ -149,13 +149,13 @@ static void dn_dst_destroy(struct dst_entry *dst)
149 dst_destroy_metrics_generic(dst); 149 dst_destroy_metrics_generic(dst);
150} 150}
151 151
152static __inline__ unsigned dn_hash(__le16 src, __le16 dst) 152static __inline__ unsigned int dn_hash(__le16 src, __le16 dst)
153{ 153{
154 __u16 tmp = (__u16 __force)(src ^ dst); 154 __u16 tmp = (__u16 __force)(src ^ dst);
155 tmp ^= (tmp >> 3); 155 tmp ^= (tmp >> 3);
156 tmp ^= (tmp >> 5); 156 tmp ^= (tmp >> 5);
157 tmp ^= (tmp >> 10); 157 tmp ^= (tmp >> 10);
158 return dn_rt_hash_mask & (unsigned)tmp; 158 return dn_rt_hash_mask & (unsigned int)tmp;
159} 159}
160 160
161static inline void dnrt_free(struct dn_route *rt) 161static inline void dnrt_free(struct dn_route *rt)
@@ -297,7 +297,7 @@ static inline int compare_keys(struct flowidn *fl1, struct flowidn *fl2)
297 (fl1->flowidn_iif ^ fl2->flowidn_iif)) == 0; 297 (fl1->flowidn_iif ^ fl2->flowidn_iif)) == 0;
298} 298}
299 299
300static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp) 300static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_route **rp)
301{ 301{
302 struct dn_route *rth; 302 struct dn_route *rth;
303 struct dn_route __rcu **rthp; 303 struct dn_route __rcu **rthp;
@@ -934,8 +934,8 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *o
934 struct dn_route *rt = NULL; 934 struct dn_route *rt = NULL;
935 struct net_device *dev_out = NULL, *dev; 935 struct net_device *dev_out = NULL, *dev;
936 struct neighbour *neigh = NULL; 936 struct neighbour *neigh = NULL;
937 unsigned hash; 937 unsigned int hash;
938 unsigned flags = 0; 938 unsigned int flags = 0;
939 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST }; 939 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST };
940 int err; 940 int err;
941 int free_res = 0; 941 int free_res = 0;
@@ -1209,7 +1209,7 @@ e_neighbour:
1209 */ 1209 */
1210static int __dn_route_output_key(struct dst_entry **pprt, const struct flowidn *flp, int flags) 1210static int __dn_route_output_key(struct dst_entry **pprt, const struct flowidn *flp, int flags)
1211{ 1211{
1212 unsigned hash = dn_hash(flp->saddr, flp->daddr); 1212 unsigned int hash = dn_hash(flp->saddr, flp->daddr);
1213 struct dn_route *rt = NULL; 1213 struct dn_route *rt = NULL;
1214 1214
1215 if (!(flags & MSG_TRYHARD)) { 1215 if (!(flags & MSG_TRYHARD)) {
@@ -1275,7 +1275,7 @@ static int dn_route_input_slow(struct sk_buff *skb)
1275 struct net_device *out_dev = NULL; 1275 struct net_device *out_dev = NULL;
1276 struct dn_dev *dn_db; 1276 struct dn_dev *dn_db;
1277 struct neighbour *neigh = NULL; 1277 struct neighbour *neigh = NULL;
1278 unsigned hash; 1278 unsigned int hash;
1279 int flags = 0; 1279 int flags = 0;
1280 __le16 gateway = 0; 1280 __le16 gateway = 0;
1281 __le16 local_src = 0; 1281 __le16 local_src = 0;
@@ -1490,7 +1490,7 @@ static int dn_route_input(struct sk_buff *skb)
1490{ 1490{
1491 struct dn_route *rt; 1491 struct dn_route *rt;
1492 struct dn_skb_cb *cb = DN_SKB_CB(skb); 1492 struct dn_skb_cb *cb = DN_SKB_CB(skb);
1493 unsigned hash = dn_hash(cb->src, cb->dst); 1493 unsigned int hash = dn_hash(cb->src, cb->dst);
1494 1494
1495 if (skb_dst(skb)) 1495 if (skb_dst(skb))
1496 return 0; 1496 return 0;
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index f65c9ddaee41..e65f2c856e06 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -177,11 +177,11 @@ static int dn_fib_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
177 return 1; 177 return 1;
178} 178}
179 179
180unsigned dnet_addr_type(__le16 addr) 180unsigned int dnet_addr_type(__le16 addr)
181{ 181{
182 struct flowidn fld = { .daddr = addr }; 182 struct flowidn fld = { .daddr = addr };
183 struct dn_fib_res res; 183 struct dn_fib_res res;
184 unsigned ret = RTN_UNICAST; 184 unsigned int ret = RTN_UNICAST;
185 struct dn_fib_table *tb = dn_fib_get_table(RT_TABLE_LOCAL, 0); 185 struct dn_fib_table *tb = dn_fib_get_table(RT_TABLE_LOCAL, 0);
186 186
187 res.r = NULL; 187 res.r = NULL;
@@ -204,11 +204,11 @@ static int dn_fib_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
204 frh->src_len = r->src_len; 204 frh->src_len = r->src_len;
205 frh->tos = 0; 205 frh->tos = 0;
206 206
207 if (r->dst_len) 207 if ((r->dst_len &&
208 NLA_PUT_LE16(skb, FRA_DST, r->dst); 208 nla_put_le16(skb, FRA_DST, r->dst)) ||
209 if (r->src_len) 209 (r->src_len &&
210 NLA_PUT_LE16(skb, FRA_SRC, r->src); 210 nla_put_le16(skb, FRA_SRC, r->src)))
211 211 goto nla_put_failure;
212 return 0; 212 return 0;
213 213
214nla_put_failure: 214nla_put_failure:
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
index 02e75d11cfbb..a55eeccaa72f 100644
--- a/net/decnet/sysctl_net_decnet.c
+++ b/net/decnet/sysctl_net_decnet.c
@@ -351,20 +351,14 @@ static ctl_table dn_table[] = {
351 { } 351 { }
352}; 352};
353 353
354static struct ctl_path dn_path[] = {
355 { .procname = "net", },
356 { .procname = "decnet", },
357 { }
358};
359
360void dn_register_sysctl(void) 354void dn_register_sysctl(void)
361{ 355{
362 dn_table_header = register_sysctl_paths(dn_path, dn_table); 356 dn_table_header = register_net_sysctl(&init_net, "net/decnet", dn_table);
363} 357}
364 358
365void dn_unregister_sysctl(void) 359void dn_unregister_sysctl(void)
366{ 360{
367 unregister_sysctl_table(dn_table_header); 361 unregister_net_sysctl_table(dn_table_header);
368} 362}
369 363
370#else /* CONFIG_SYSCTL */ 364#else /* CONFIG_SYSCTL */
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index c73bba326d70..6f70ea935b0b 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -38,7 +38,7 @@ MODULE_DESCRIPTION("DNS Resolver");
38MODULE_AUTHOR("Wang Lei"); 38MODULE_AUTHOR("Wang Lei");
39MODULE_LICENSE("GPL"); 39MODULE_LICENSE("GPL");
40 40
41unsigned dns_resolver_debug; 41unsigned int dns_resolver_debug;
42module_param_named(debug, dns_resolver_debug, uint, S_IWUSR | S_IRUGO); 42module_param_named(debug, dns_resolver_debug, uint, S_IWUSR | S_IRUGO);
43MODULE_PARM_DESC(debug, "DNS Resolver debugging mask"); 43MODULE_PARM_DESC(debug, "DNS Resolver debugging mask");
44 44
diff --git a/net/dns_resolver/internal.h b/net/dns_resolver/internal.h
index 189ca9e9b785..17c7886b5b3a 100644
--- a/net/dns_resolver/internal.h
+++ b/net/dns_resolver/internal.h
@@ -31,7 +31,7 @@ extern const struct cred *dns_resolver_cache;
31/* 31/*
32 * debug tracing 32 * debug tracing
33 */ 33 */
34extern unsigned dns_resolver_debug; 34extern unsigned int dns_resolver_debug;
35 35
36#define kdebug(FMT, ...) \ 36#define kdebug(FMT, ...) \
37do { \ 37do { \
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index 71b5edcee401..fa14ca76b77b 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -617,7 +617,7 @@ static int econet_create(struct net *net, struct socket *sock, int protocol,
617 if (sk == NULL) 617 if (sk == NULL)
618 goto out; 618 goto out;
619 619
620 sk->sk_reuse = 1; 620 sk->sk_reuse = SK_CAN_REUSE;
621 sock->ops = &econet_ops; 621 sock->ops = &econet_ops;
622 sock_init_data(sock, sk); 622 sock_init_data(sock, sk);
623 623
@@ -1012,7 +1012,7 @@ static int __init aun_udp_initialise(void)
1012 return error; 1012 return error;
1013 } 1013 }
1014 1014
1015 udpsock->sk->sk_reuse = 1; 1015 udpsock->sk->sk_reuse = SK_CAN_REUSE;
1016 udpsock->sk->sk_allocation = GFP_ATOMIC; /* we're going to call it 1016 udpsock->sk->sk_allocation = GFP_ATOMIC; /* we're going to call it
1017 from interrupts */ 1017 from interrupts */
1018 1018
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index bf10a311cf1c..5889a6c38a10 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -77,7 +77,7 @@ __setup("ether=", netdev_boot_setup);
77 */ 77 */
78int eth_header(struct sk_buff *skb, struct net_device *dev, 78int eth_header(struct sk_buff *skb, struct net_device *dev,
79 unsigned short type, 79 unsigned short type,
80 const void *daddr, const void *saddr, unsigned len) 80 const void *daddr, const void *saddr, unsigned int len)
81{ 81{
82 struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN); 82 struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
83 83
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 840821b90bcd..32eb4179e8fa 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -196,7 +196,7 @@ lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift, const struct in6_addr *ipaddr,
196static void 196static void
197lowpan_uip_ds6_set_addr_iid(struct in6_addr *ipaddr, unsigned char *lladdr) 197lowpan_uip_ds6_set_addr_iid(struct in6_addr *ipaddr, unsigned char *lladdr)
198{ 198{
199 memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ALEN); 199 memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ADDR_LEN);
200 /* second bit-flip (Universe/Local) is done according RFC2464 */ 200 /* second bit-flip (Universe/Local) is done according RFC2464 */
201 ipaddr->s6_addr[8] ^= 0x02; 201 ipaddr->s6_addr[8] ^= 0x02;
202} 202}
@@ -221,7 +221,7 @@ lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr,
221 221
222 if (lladdr) 222 if (lladdr)
223 lowpan_raw_dump_inline(__func__, "linklocal address", 223 lowpan_raw_dump_inline(__func__, "linklocal address",
224 lladdr, IEEE802154_ALEN); 224 lladdr, IEEE802154_ADDR_LEN);
225 if (prefcount > 0) 225 if (prefcount > 0)
226 memcpy(ipaddr, prefix, prefcount); 226 memcpy(ipaddr, prefix, prefcount);
227 227
@@ -371,7 +371,7 @@ err:
371static int lowpan_header_create(struct sk_buff *skb, 371static int lowpan_header_create(struct sk_buff *skb,
372 struct net_device *dev, 372 struct net_device *dev,
373 unsigned short type, const void *_daddr, 373 unsigned short type, const void *_daddr,
374 const void *_saddr, unsigned len) 374 const void *_saddr, unsigned int len)
375{ 375{
376 u8 tmp, iphc0, iphc1, *hc06_ptr; 376 u8 tmp, iphc0, iphc1, *hc06_ptr;
377 struct ipv6hdr *hdr; 377 struct ipv6hdr *hdr;
@@ -650,6 +650,53 @@ static void lowpan_fragment_timer_expired(unsigned long entry_addr)
650 kfree(entry); 650 kfree(entry);
651} 651}
652 652
653static struct lowpan_fragment *
654lowpan_alloc_new_frame(struct sk_buff *skb, u8 iphc0, u8 len, u8 tag)
655{
656 struct lowpan_fragment *frame;
657
658 frame = kzalloc(sizeof(struct lowpan_fragment),
659 GFP_ATOMIC);
660 if (!frame)
661 goto frame_err;
662
663 INIT_LIST_HEAD(&frame->list);
664
665 frame->length = (iphc0 & 7) | (len << 3);
666 frame->tag = tag;
667
668 /* allocate buffer for frame assembling */
669 frame->skb = alloc_skb(frame->length +
670 sizeof(struct ipv6hdr), GFP_ATOMIC);
671
672 if (!frame->skb)
673 goto skb_err;
674
675 frame->skb->priority = skb->priority;
676 frame->skb->dev = skb->dev;
677
678 /* reserve headroom for uncompressed ipv6 header */
679 skb_reserve(frame->skb, sizeof(struct ipv6hdr));
680 skb_put(frame->skb, frame->length);
681
682 init_timer(&frame->timer);
683 /* time out is the same as for ipv6 - 60 sec */
684 frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
685 frame->timer.data = (unsigned long)frame;
686 frame->timer.function = lowpan_fragment_timer_expired;
687
688 add_timer(&frame->timer);
689
690 list_add_tail(&frame->list, &lowpan_fragments);
691
692 return frame;
693
694skb_err:
695 kfree(frame);
696frame_err:
697 return NULL;
698}
699
653static int 700static int
654lowpan_process_data(struct sk_buff *skb) 701lowpan_process_data(struct sk_buff *skb)
655{ 702{
@@ -692,41 +739,9 @@ lowpan_process_data(struct sk_buff *skb)
692 739
693 /* alloc new frame structure */ 740 /* alloc new frame structure */
694 if (!found) { 741 if (!found) {
695 frame = kzalloc(sizeof(struct lowpan_fragment), 742 frame = lowpan_alloc_new_frame(skb, iphc0, len, tag);
696 GFP_ATOMIC);
697 if (!frame) 743 if (!frame)
698 goto unlock_and_drop; 744 goto unlock_and_drop;
699
700 INIT_LIST_HEAD(&frame->list);
701
702 frame->length = (iphc0 & 7) | (len << 3);
703 frame->tag = tag;
704
705 /* allocate buffer for frame assembling */
706 frame->skb = alloc_skb(frame->length +
707 sizeof(struct ipv6hdr), GFP_ATOMIC);
708
709 if (!frame->skb) {
710 kfree(frame);
711 goto unlock_and_drop;
712 }
713
714 frame->skb->priority = skb->priority;
715 frame->skb->dev = skb->dev;
716
717 /* reserve headroom for uncompressed ipv6 header */
718 skb_reserve(frame->skb, sizeof(struct ipv6hdr));
719 skb_put(frame->skb, frame->length);
720
721 init_timer(&frame->timer);
722 /* time out is the same as for ipv6 - 60 sec */
723 frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
724 frame->timer.data = (unsigned long)frame;
725 frame->timer.function = lowpan_fragment_timer_expired;
726
727 add_timer(&frame->timer);
728
729 list_add_tail(&frame->list, &lowpan_fragments);
730 } 745 }
731 746
732 if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1) 747 if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1)
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h
index aeff3f310482..8c2251fb0a3f 100644
--- a/net/ieee802154/6lowpan.h
+++ b/net/ieee802154/6lowpan.h
@@ -53,9 +53,6 @@
53#ifndef __6LOWPAN_H__ 53#ifndef __6LOWPAN_H__
54#define __6LOWPAN_H__ 54#define __6LOWPAN_H__
55 55
56/* need to know address length to manipulate with it */
57#define IEEE802154_ALEN 8
58
59#define UIP_802154_SHORTADDR_LEN 2 /* compressed ipv6 address length */ 56#define UIP_802154_SHORTADDR_LEN 2 /* compressed ipv6 address length */
60#define UIP_IPH_LEN 40 /* ipv6 fixed header size */ 57#define UIP_IPH_LEN 40 /* ipv6 fixed header size */
61#define UIP_PROTO_UDP 17 /* ipv6 next header value for UDP */ 58#define UIP_PROTO_UDP 17 /* ipv6 next header value for UDP */
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index 1b09eaabaac1..6fbb2ad7bb6d 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -44,8 +44,8 @@ struct dgram_sock {
44 struct ieee802154_addr src_addr; 44 struct ieee802154_addr src_addr;
45 struct ieee802154_addr dst_addr; 45 struct ieee802154_addr dst_addr;
46 46
47 unsigned bound:1; 47 unsigned int bound:1;
48 unsigned want_ack:1; 48 unsigned int want_ack:1;
49}; 49};
50 50
51static inline struct dgram_sock *dgram_sk(const struct sock *sk) 51static inline struct dgram_sock *dgram_sk(const struct sock *sk)
@@ -206,7 +206,7 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
206 struct msghdr *msg, size_t size) 206 struct msghdr *msg, size_t size)
207{ 207{
208 struct net_device *dev; 208 struct net_device *dev;
209 unsigned mtu; 209 unsigned int mtu;
210 struct sk_buff *skb; 210 struct sk_buff *skb;
211 struct dgram_sock *ro = dgram_sk(sk); 211 struct dgram_sock *ro = dgram_sk(sk);
212 int hlen, tlen; 212 int hlen, tlen;
diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c
index adaf46214905..ca92587720f4 100644
--- a/net/ieee802154/nl-mac.c
+++ b/net/ieee802154/nl-mac.c
@@ -63,15 +63,14 @@ int ieee802154_nl_assoc_indic(struct net_device *dev,
63 if (!msg) 63 if (!msg)
64 return -ENOBUFS; 64 return -ENOBUFS;
65 65
66 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); 66 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
67 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); 67 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
68 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, 68 nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
69 dev->dev_addr); 69 dev->dev_addr) ||
70 70 nla_put(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
71 NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN, 71 addr->hwaddr) ||
72 addr->hwaddr); 72 nla_put_u8(msg, IEEE802154_ATTR_CAPABILITY, cap))
73 73 goto nla_put_failure;
74 NLA_PUT_U8(msg, IEEE802154_ATTR_CAPABILITY, cap);
75 74
76 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); 75 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
77 76
@@ -92,14 +91,13 @@ int ieee802154_nl_assoc_confirm(struct net_device *dev, u16 short_addr,
92 if (!msg) 91 if (!msg)
93 return -ENOBUFS; 92 return -ENOBUFS;
94 93
95 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); 94 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
96 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); 95 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
97 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, 96 nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
98 dev->dev_addr); 97 dev->dev_addr) ||
99 98 nla_put_u16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr) ||
100 NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr); 99 nla_put_u8(msg, IEEE802154_ATTR_STATUS, status))
101 NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); 100 goto nla_put_failure;
102
103 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); 101 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
104 102
105nla_put_failure: 103nla_put_failure:
@@ -119,20 +117,22 @@ int ieee802154_nl_disassoc_indic(struct net_device *dev,
119 if (!msg) 117 if (!msg)
120 return -ENOBUFS; 118 return -ENOBUFS;
121 119
122 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); 120 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
123 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); 121 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
124 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, 122 nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
125 dev->dev_addr); 123 dev->dev_addr))
126 124 goto nla_put_failure;
127 if (addr->addr_type == IEEE802154_ADDR_LONG) 125 if (addr->addr_type == IEEE802154_ADDR_LONG) {
128 NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN, 126 if (nla_put(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN,
129 addr->hwaddr); 127 addr->hwaddr))
130 else 128 goto nla_put_failure;
131 NLA_PUT_U16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR, 129 } else {
132 addr->short_addr); 130 if (nla_put_u16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR,
133 131 addr->short_addr))
134 NLA_PUT_U8(msg, IEEE802154_ATTR_REASON, reason); 132 goto nla_put_failure;
135 133 }
134 if (nla_put_u8(msg, IEEE802154_ATTR_REASON, reason))
135 goto nla_put_failure;
136 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); 136 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
137 137
138nla_put_failure: 138nla_put_failure:
@@ -151,13 +151,12 @@ int ieee802154_nl_disassoc_confirm(struct net_device *dev, u8 status)
151 if (!msg) 151 if (!msg)
152 return -ENOBUFS; 152 return -ENOBUFS;
153 153
154 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); 154 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
155 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); 155 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
156 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, 156 nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
157 dev->dev_addr); 157 dev->dev_addr) ||
158 158 nla_put_u8(msg, IEEE802154_ATTR_STATUS, status))
159 NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); 159 goto nla_put_failure;
160
161 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); 160 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
162 161
163nla_put_failure: 162nla_put_failure:
@@ -177,13 +176,13 @@ int ieee802154_nl_beacon_indic(struct net_device *dev,
177 if (!msg) 176 if (!msg)
178 return -ENOBUFS; 177 return -ENOBUFS;
179 178
180 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); 179 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
181 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); 180 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
182 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, 181 nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
183 dev->dev_addr); 182 dev->dev_addr) ||
184 NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr); 183 nla_put_u16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr) ||
185 NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid); 184 nla_put_u16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid))
186 185 goto nla_put_failure;
187 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); 186 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
188 187
189nla_put_failure: 188nla_put_failure:
@@ -204,19 +203,17 @@ int ieee802154_nl_scan_confirm(struct net_device *dev,
204 if (!msg) 203 if (!msg)
205 return -ENOBUFS; 204 return -ENOBUFS;
206 205
207 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); 206 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
208 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); 207 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
209 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, 208 nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
210 dev->dev_addr); 209 dev->dev_addr) ||
211 210 nla_put_u8(msg, IEEE802154_ATTR_STATUS, status) ||
212 NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); 211 nla_put_u8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type) ||
213 NLA_PUT_U8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type); 212 nla_put_u32(msg, IEEE802154_ATTR_CHANNELS, unscanned) ||
214 NLA_PUT_U32(msg, IEEE802154_ATTR_CHANNELS, unscanned); 213 nla_put_u8(msg, IEEE802154_ATTR_PAGE, page) ||
215 NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, page); 214 (edl &&
216 215 nla_put(msg, IEEE802154_ATTR_ED_LIST, 27, edl)))
217 if (edl) 216 goto nla_put_failure;
218 NLA_PUT(msg, IEEE802154_ATTR_ED_LIST, 27, edl);
219
220 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); 217 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
221 218
222nla_put_failure: 219nla_put_failure:
@@ -235,13 +232,12 @@ int ieee802154_nl_start_confirm(struct net_device *dev, u8 status)
235 if (!msg) 232 if (!msg)
236 return -ENOBUFS; 233 return -ENOBUFS;
237 234
238 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); 235 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
239 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); 236 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
240 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, 237 nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
241 dev->dev_addr); 238 dev->dev_addr) ||
242 239 nla_put_u8(msg, IEEE802154_ATTR_STATUS, status))
243 NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); 240 goto nla_put_failure;
244
245 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); 241 return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id);
246 242
247nla_put_failure: 243nla_put_failure:
@@ -266,16 +262,16 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 pid,
266 phy = ieee802154_mlme_ops(dev)->get_phy(dev); 262 phy = ieee802154_mlme_ops(dev)->get_phy(dev);
267 BUG_ON(!phy); 263 BUG_ON(!phy);
268 264
269 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); 265 if (nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name) ||
270 NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)); 266 nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
271 NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); 267 nla_put_u32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex) ||
272 268 nla_put(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN,
273 NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, 269 dev->dev_addr) ||
274 dev->dev_addr); 270 nla_put_u16(msg, IEEE802154_ATTR_SHORT_ADDR,
275 NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, 271 ieee802154_mlme_ops(dev)->get_short_addr(dev)) ||
276 ieee802154_mlme_ops(dev)->get_short_addr(dev)); 272 nla_put_u16(msg, IEEE802154_ATTR_PAN_ID,
277 NLA_PUT_U16(msg, IEEE802154_ATTR_PAN_ID, 273 ieee802154_mlme_ops(dev)->get_pan_id(dev)))
278 ieee802154_mlme_ops(dev)->get_pan_id(dev)); 274 goto nla_put_failure;
279 wpan_phy_put(phy); 275 wpan_phy_put(phy);
280 return genlmsg_end(msg, hdr); 276 return genlmsg_end(msg, hdr);
281 277
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index c64a38d57aa3..3bdc4303c339 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -53,18 +53,18 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid,
53 goto out; 53 goto out;
54 54
55 mutex_lock(&phy->pib_lock); 55 mutex_lock(&phy->pib_lock);
56 NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)); 56 if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
57 57 nla_put_u8(msg, IEEE802154_ATTR_PAGE, phy->current_page) ||
58 NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, phy->current_page); 58 nla_put_u8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel))
59 NLA_PUT_U8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel); 59 goto nla_put_failure;
60 for (i = 0; i < 32; i++) { 60 for (i = 0; i < 32; i++) {
61 if (phy->channels_supported[i]) 61 if (phy->channels_supported[i])
62 buf[pages++] = phy->channels_supported[i] | (i << 27); 62 buf[pages++] = phy->channels_supported[i] | (i << 27);
63 } 63 }
64 if (pages) 64 if (pages &&
65 NLA_PUT(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST, 65 nla_put(msg, IEEE802154_ATTR_CHANNEL_PAGE_LIST,
66 pages * sizeof(uint32_t), buf); 66 pages * sizeof(uint32_t), buf))
67 67 goto nla_put_failure;
68 mutex_unlock(&phy->pib_lock); 68 mutex_unlock(&phy->pib_lock);
69 kfree(buf); 69 kfree(buf);
70 return genlmsg_end(msg, hdr); 70 return genlmsg_end(msg, hdr);
@@ -245,9 +245,9 @@ static int ieee802154_add_iface(struct sk_buff *skb,
245 goto dev_unregister; 245 goto dev_unregister;
246 } 246 }
247 247
248 NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)); 248 if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
249 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); 249 nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name))
250 250 goto nla_put_failure;
251 dev_put(dev); 251 dev_put(dev);
252 252
253 wpan_phy_put(phy); 253 wpan_phy_put(phy);
@@ -333,10 +333,9 @@ static int ieee802154_del_iface(struct sk_buff *skb,
333 333
334 rtnl_unlock(); 334 rtnl_unlock();
335 335
336 336 if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
337 NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)); 337 nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, name))
338 NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, name); 338 goto nla_put_failure;
339
340 wpan_phy_put(phy); 339 wpan_phy_put(phy);
341 340
342 return ieee802154_nl_reply(msg, info); 341 return ieee802154_nl_reply(msg, info);
diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
index f96bae8fd330..50e823927d49 100644
--- a/net/ieee802154/raw.c
+++ b/net/ieee802154/raw.c
@@ -106,7 +106,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
106 size_t size) 106 size_t size)
107{ 107{
108 struct net_device *dev; 108 struct net_device *dev;
109 unsigned mtu; 109 unsigned int mtu;
110 struct sk_buff *skb; 110 struct sk_buff *skb;
111 int hlen, tlen; 111 int hlen, tlen;
112 int err; 112 int err;
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 10e3751466b5..c8f7aee587d1 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -350,7 +350,7 @@ lookup_protocol:
350 err = 0; 350 err = 0;
351 sk->sk_no_check = answer_no_check; 351 sk->sk_no_check = answer_no_check;
352 if (INET_PROTOSW_REUSE & answer_flags) 352 if (INET_PROTOSW_REUSE & answer_flags)
353 sk->sk_reuse = 1; 353 sk->sk_reuse = SK_CAN_REUSE;
354 354
355 inet = inet_sk(sk); 355 inet = inet_sk(sk);
356 inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0; 356 inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
@@ -541,7 +541,7 @@ out:
541} 541}
542EXPORT_SYMBOL(inet_bind); 542EXPORT_SYMBOL(inet_bind);
543 543
544int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr, 544int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
545 int addr_len, int flags) 545 int addr_len, int flags)
546{ 546{
547 struct sock *sk = sock->sk; 547 struct sock *sk = sock->sk;
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index fd508b526014..3a280756dd73 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -77,7 +77,7 @@ static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
77 77
78static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr) 78static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr)
79{ 79{
80 unsigned char * optptr = (unsigned char*)(iph+1); 80 unsigned char *optptr = (unsigned char *)(iph+1);
81 int l = iph->ihl*4 - sizeof(struct iphdr); 81 int l = iph->ihl*4 - sizeof(struct iphdr);
82 int optlen; 82 int optlen;
83 83
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 18d9b81ecb1a..373b56bf8f49 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1059,7 +1059,7 @@ static int arp_req_set(struct net *net, struct arpreq *r,
1059 neigh = __neigh_lookup_errno(&arp_tbl, &ip, dev); 1059 neigh = __neigh_lookup_errno(&arp_tbl, &ip, dev);
1060 err = PTR_ERR(neigh); 1060 err = PTR_ERR(neigh);
1061 if (!IS_ERR(neigh)) { 1061 if (!IS_ERR(neigh)) {
1062 unsigned state = NUD_STALE; 1062 unsigned int state = NUD_STALE;
1063 if (r->arp_flags & ATF_PERM) 1063 if (r->arp_flags & ATF_PERM)
1064 state = NUD_PERMANENT; 1064 state = NUD_PERMANENT;
1065 err = neigh_update(neigh, (r->arp_flags & ATF_COM) ? 1065 err = neigh_update(neigh, (r->arp_flags & ATF_COM) ?
@@ -1071,7 +1071,7 @@ static int arp_req_set(struct net *net, struct arpreq *r,
1071 return err; 1071 return err;
1072} 1072}
1073 1073
1074static unsigned arp_state_to_flags(struct neighbour *neigh) 1074static unsigned int arp_state_to_flags(struct neighbour *neigh)
1075{ 1075{
1076 if (neigh->nud_state&NUD_PERMANENT) 1076 if (neigh->nud_state&NUD_PERMANENT)
1077 return ATF_PERM | ATF_COM; 1077 return ATF_PERM | ATF_COM;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 6e447ff94dfa..88c9e3f68c78 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1125,7 +1125,7 @@ skip:
1125 } 1125 }
1126} 1126}
1127 1127
1128static inline bool inetdev_valid_mtu(unsigned mtu) 1128static inline bool inetdev_valid_mtu(unsigned int mtu)
1129{ 1129{
1130 return mtu >= 68; 1130 return mtu >= 68;
1131} 1131}
@@ -1266,17 +1266,15 @@ static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
1266 ifm->ifa_scope = ifa->ifa_scope; 1266 ifm->ifa_scope = ifa->ifa_scope;
1267 ifm->ifa_index = ifa->ifa_dev->dev->ifindex; 1267 ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
1268 1268
1269 if (ifa->ifa_address) 1269 if ((ifa->ifa_address &&
1270 NLA_PUT_BE32(skb, IFA_ADDRESS, ifa->ifa_address); 1270 nla_put_be32(skb, IFA_ADDRESS, ifa->ifa_address)) ||
1271 1271 (ifa->ifa_local &&
1272 if (ifa->ifa_local) 1272 nla_put_be32(skb, IFA_LOCAL, ifa->ifa_local)) ||
1273 NLA_PUT_BE32(skb, IFA_LOCAL, ifa->ifa_local); 1273 (ifa->ifa_broadcast &&
1274 1274 nla_put_be32(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
1275 if (ifa->ifa_broadcast) 1275 (ifa->ifa_label[0] &&
1276 NLA_PUT_BE32(skb, IFA_BROADCAST, ifa->ifa_broadcast); 1276 nla_put_string(skb, IFA_LABEL, ifa->ifa_label)))
1277 1277 goto nla_put_failure;
1278 if (ifa->ifa_label[0])
1279 NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label);
1280 1278
1281 return nlmsg_end(skb, nlh); 1279 return nlmsg_end(skb, nlh);
1282 1280
@@ -1587,7 +1585,6 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
1587static struct devinet_sysctl_table { 1585static struct devinet_sysctl_table {
1588 struct ctl_table_header *sysctl_header; 1586 struct ctl_table_header *sysctl_header;
1589 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX]; 1587 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
1590 char *dev_name;
1591} devinet_sysctl = { 1588} devinet_sysctl = {
1592 .devinet_vars = { 1589 .devinet_vars = {
1593 DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding", 1590 DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding",
@@ -1629,16 +1626,7 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name,
1629{ 1626{
1630 int i; 1627 int i;
1631 struct devinet_sysctl_table *t; 1628 struct devinet_sysctl_table *t;
1632 1629 char path[sizeof("net/ipv4/conf/") + IFNAMSIZ];
1633#define DEVINET_CTL_PATH_DEV 3
1634
1635 struct ctl_path devinet_ctl_path[] = {
1636 { .procname = "net", },
1637 { .procname = "ipv4", },
1638 { .procname = "conf", },
1639 { /* to be set */ },
1640 { },
1641 };
1642 1630
1643 t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL); 1631 t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL);
1644 if (!t) 1632 if (!t)
@@ -1650,27 +1638,15 @@ static int __devinet_sysctl_register(struct net *net, char *dev_name,
1650 t->devinet_vars[i].extra2 = net; 1638 t->devinet_vars[i].extra2 = net;
1651 } 1639 }
1652 1640
1653 /* 1641 snprintf(path, sizeof(path), "net/ipv4/conf/%s", dev_name);
1654 * Make a copy of dev_name, because '.procname' is regarded as const
1655 * by sysctl and we wouldn't want anyone to change it under our feet
1656 * (see SIOCSIFNAME).
1657 */
1658 t->dev_name = kstrdup(dev_name, GFP_KERNEL);
1659 if (!t->dev_name)
1660 goto free;
1661
1662 devinet_ctl_path[DEVINET_CTL_PATH_DEV].procname = t->dev_name;
1663 1642
1664 t->sysctl_header = register_net_sysctl_table(net, devinet_ctl_path, 1643 t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars);
1665 t->devinet_vars);
1666 if (!t->sysctl_header) 1644 if (!t->sysctl_header)
1667 goto free_procname; 1645 goto free;
1668 1646
1669 p->sysctl = t; 1647 p->sysctl = t;
1670 return 0; 1648 return 0;
1671 1649
1672free_procname:
1673 kfree(t->dev_name);
1674free: 1650free:
1675 kfree(t); 1651 kfree(t);
1676out: 1652out:
@@ -1686,7 +1662,6 @@ static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
1686 1662
1687 cnf->sysctl = NULL; 1663 cnf->sysctl = NULL;
1688 unregister_net_sysctl_table(t->sysctl_header); 1664 unregister_net_sysctl_table(t->sysctl_header);
1689 kfree(t->dev_name);
1690 kfree(t); 1665 kfree(t);
1691} 1666}
1692 1667
@@ -1716,12 +1691,6 @@ static struct ctl_table ctl_forward_entry[] = {
1716 }, 1691 },
1717 { }, 1692 { },
1718}; 1693};
1719
1720static __net_initdata struct ctl_path net_ipv4_path[] = {
1721 { .procname = "net", },
1722 { .procname = "ipv4", },
1723 { },
1724};
1725#endif 1694#endif
1726 1695
1727static __net_init int devinet_init_net(struct net *net) 1696static __net_init int devinet_init_net(struct net *net)
@@ -1767,7 +1736,7 @@ static __net_init int devinet_init_net(struct net *net)
1767 goto err_reg_dflt; 1736 goto err_reg_dflt;
1768 1737
1769 err = -ENOMEM; 1738 err = -ENOMEM;
1770 forw_hdr = register_net_sysctl_table(net, net_ipv4_path, tbl); 1739 forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
1771 if (forw_hdr == NULL) 1740 if (forw_hdr == NULL)
1772 goto err_reg_ctl; 1741 goto err_reg_ctl;
1773 net->ipv4.forw_hdr = forw_hdr; 1742 net->ipv4.forw_hdr = forw_hdr;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index cbe3a68507cf..3854411fa37c 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -136,13 +136,13 @@ static void fib_flush(struct net *net)
136 * Find address type as if only "dev" was present in the system. If 136 * Find address type as if only "dev" was present in the system. If
137 * on_dev is NULL then all interfaces are taken into consideration. 137 * on_dev is NULL then all interfaces are taken into consideration.
138 */ 138 */
139static inline unsigned __inet_dev_addr_type(struct net *net, 139static inline unsigned int __inet_dev_addr_type(struct net *net,
140 const struct net_device *dev, 140 const struct net_device *dev,
141 __be32 addr) 141 __be32 addr)
142{ 142{
143 struct flowi4 fl4 = { .daddr = addr }; 143 struct flowi4 fl4 = { .daddr = addr };
144 struct fib_result res; 144 struct fib_result res;
145 unsigned ret = RTN_BROADCAST; 145 unsigned int ret = RTN_BROADCAST;
146 struct fib_table *local_table; 146 struct fib_table *local_table;
147 147
148 if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr)) 148 if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
@@ -740,7 +740,7 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
740#define BRD_OK 2 740#define BRD_OK 2
741#define BRD0_OK 4 741#define BRD0_OK 4
742#define BRD1_OK 8 742#define BRD1_OK 8
743 unsigned ok = 0; 743 unsigned int ok = 0;
744 int subnet = 0; /* Primary network */ 744 int subnet = 0; /* Primary network */
745 int gone = 1; /* Address is missing */ 745 int gone = 1; /* Address is missing */
746 int same_prefsrc = 0; /* Another primary with same IP */ 746 int same_prefsrc = 0; /* Another primary with same IP */
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 799fc790b3cf..2d043f71ef70 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -221,15 +221,15 @@ static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
221 frh->src_len = rule4->src_len; 221 frh->src_len = rule4->src_len;
222 frh->tos = rule4->tos; 222 frh->tos = rule4->tos;
223 223
224 if (rule4->dst_len) 224 if ((rule4->dst_len &&
225 NLA_PUT_BE32(skb, FRA_DST, rule4->dst); 225 nla_put_be32(skb, FRA_DST, rule4->dst)) ||
226 226 (rule4->src_len &&
227 if (rule4->src_len) 227 nla_put_be32(skb, FRA_SRC, rule4->src)))
228 NLA_PUT_BE32(skb, FRA_SRC, rule4->src); 228 goto nla_put_failure;
229
230#ifdef CONFIG_IP_ROUTE_CLASSID 229#ifdef CONFIG_IP_ROUTE_CLASSID
231 if (rule4->tclassid) 230 if (rule4->tclassid &&
232 NLA_PUT_U32(skb, FRA_FLOW, rule4->tclassid); 231 nla_put_u32(skb, FRA_FLOW, rule4->tclassid))
232 goto nla_put_failure;
233#endif 233#endif
234 return 0; 234 return 0;
235 235
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 5063fa38ac7b..a8bdf7405433 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -931,33 +931,36 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
931 rtm->rtm_table = tb_id; 931 rtm->rtm_table = tb_id;
932 else 932 else
933 rtm->rtm_table = RT_TABLE_COMPAT; 933 rtm->rtm_table = RT_TABLE_COMPAT;
934 NLA_PUT_U32(skb, RTA_TABLE, tb_id); 934 if (nla_put_u32(skb, RTA_TABLE, tb_id))
935 goto nla_put_failure;
935 rtm->rtm_type = type; 936 rtm->rtm_type = type;
936 rtm->rtm_flags = fi->fib_flags; 937 rtm->rtm_flags = fi->fib_flags;
937 rtm->rtm_scope = fi->fib_scope; 938 rtm->rtm_scope = fi->fib_scope;
938 rtm->rtm_protocol = fi->fib_protocol; 939 rtm->rtm_protocol = fi->fib_protocol;
939 940
940 if (rtm->rtm_dst_len) 941 if (rtm->rtm_dst_len &&
941 NLA_PUT_BE32(skb, RTA_DST, dst); 942 nla_put_be32(skb, RTA_DST, dst))
942 943 goto nla_put_failure;
943 if (fi->fib_priority) 944 if (fi->fib_priority &&
944 NLA_PUT_U32(skb, RTA_PRIORITY, fi->fib_priority); 945 nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
945 946 goto nla_put_failure;
946 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) 947 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
947 goto nla_put_failure; 948 goto nla_put_failure;
948 949
949 if (fi->fib_prefsrc) 950 if (fi->fib_prefsrc &&
950 NLA_PUT_BE32(skb, RTA_PREFSRC, fi->fib_prefsrc); 951 nla_put_be32(skb, RTA_PREFSRC, fi->fib_prefsrc))
951 952 goto nla_put_failure;
952 if (fi->fib_nhs == 1) { 953 if (fi->fib_nhs == 1) {
953 if (fi->fib_nh->nh_gw) 954 if (fi->fib_nh->nh_gw &&
954 NLA_PUT_BE32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw); 955 nla_put_be32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw))
955 956 goto nla_put_failure;
956 if (fi->fib_nh->nh_oif) 957 if (fi->fib_nh->nh_oif &&
957 NLA_PUT_U32(skb, RTA_OIF, fi->fib_nh->nh_oif); 958 nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif))
959 goto nla_put_failure;
958#ifdef CONFIG_IP_ROUTE_CLASSID 960#ifdef CONFIG_IP_ROUTE_CLASSID
959 if (fi->fib_nh[0].nh_tclassid) 961 if (fi->fib_nh[0].nh_tclassid &&
960 NLA_PUT_U32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid); 962 nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
963 goto nla_put_failure;
961#endif 964#endif
962 } 965 }
963#ifdef CONFIG_IP_ROUTE_MULTIPATH 966#ifdef CONFIG_IP_ROUTE_MULTIPATH
@@ -978,11 +981,13 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
978 rtnh->rtnh_hops = nh->nh_weight - 1; 981 rtnh->rtnh_hops = nh->nh_weight - 1;
979 rtnh->rtnh_ifindex = nh->nh_oif; 982 rtnh->rtnh_ifindex = nh->nh_oif;
980 983
981 if (nh->nh_gw) 984 if (nh->nh_gw &&
982 NLA_PUT_BE32(skb, RTA_GATEWAY, nh->nh_gw); 985 nla_put_be32(skb, RTA_GATEWAY, nh->nh_gw))
986 goto nla_put_failure;
983#ifdef CONFIG_IP_ROUTE_CLASSID 987#ifdef CONFIG_IP_ROUTE_CLASSID
984 if (nh->nh_tclassid) 988 if (nh->nh_tclassid &&
985 NLA_PUT_U32(skb, RTA_FLOW, nh->nh_tclassid); 989 nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
990 goto nla_put_failure;
986#endif 991#endif
987 /* length of rtnetlink header + attributes */ 992 /* length of rtnetlink header + attributes */
988 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh; 993 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 5dfecfd7d5e9..6699f23e6f55 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -344,10 +344,10 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
344 pip->protocol = IPPROTO_IGMP; 344 pip->protocol = IPPROTO_IGMP;
345 pip->tot_len = 0; /* filled in later */ 345 pip->tot_len = 0; /* filled in later */
346 ip_select_ident(pip, &rt->dst, NULL); 346 ip_select_ident(pip, &rt->dst, NULL);
347 ((u8*)&pip[1])[0] = IPOPT_RA; 347 ((u8 *)&pip[1])[0] = IPOPT_RA;
348 ((u8*)&pip[1])[1] = 4; 348 ((u8 *)&pip[1])[1] = 4;
349 ((u8*)&pip[1])[2] = 0; 349 ((u8 *)&pip[1])[2] = 0;
350 ((u8*)&pip[1])[3] = 0; 350 ((u8 *)&pip[1])[3] = 0;
351 351
352 skb->transport_header = skb->network_header + sizeof(struct iphdr) + 4; 352 skb->transport_header = skb->network_header + sizeof(struct iphdr) + 4;
353 skb_put(skb, sizeof(*pig)); 353 skb_put(skb, sizeof(*pig));
@@ -688,10 +688,10 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
688 iph->saddr = fl4.saddr; 688 iph->saddr = fl4.saddr;
689 iph->protocol = IPPROTO_IGMP; 689 iph->protocol = IPPROTO_IGMP;
690 ip_select_ident(iph, &rt->dst, NULL); 690 ip_select_ident(iph, &rt->dst, NULL);
691 ((u8*)&iph[1])[0] = IPOPT_RA; 691 ((u8 *)&iph[1])[0] = IPOPT_RA;
692 ((u8*)&iph[1])[1] = 4; 692 ((u8 *)&iph[1])[1] = 4;
693 ((u8*)&iph[1])[2] = 0; 693 ((u8 *)&iph[1])[2] = 0;
694 ((u8*)&iph[1])[3] = 0; 694 ((u8 *)&iph[1])[3] = 0;
695 695
696 ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr)); 696 ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
697 ih->type = type; 697 ih->type = type;
@@ -774,7 +774,7 @@ static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
774 if (psf->sf_count[MCAST_INCLUDE] || 774 if (psf->sf_count[MCAST_INCLUDE] ||
775 pmc->sfcount[MCAST_EXCLUDE] != 775 pmc->sfcount[MCAST_EXCLUDE] !=
776 psf->sf_count[MCAST_EXCLUDE]) 776 psf->sf_count[MCAST_EXCLUDE])
777 continue; 777 break;
778 if (srcs[i] == psf->sf_inaddr) { 778 if (srcs[i] == psf->sf_inaddr) {
779 scount++; 779 scount++;
780 break; 780 break;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 19d66cefd7d3..95e61596e605 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -42,7 +42,8 @@ EXPORT_SYMBOL(sysctl_local_reserved_ports);
42 42
43void inet_get_local_port_range(int *low, int *high) 43void inet_get_local_port_range(int *low, int *high)
44{ 44{
45 unsigned seq; 45 unsigned int seq;
46
46 do { 47 do {
47 seq = read_seqbegin(&sysctl_local_ports.lock); 48 seq = read_seqbegin(&sysctl_local_ports.lock);
48 49
@@ -53,7 +54,7 @@ void inet_get_local_port_range(int *low, int *high)
53EXPORT_SYMBOL(inet_get_local_port_range); 54EXPORT_SYMBOL(inet_get_local_port_range);
54 55
55int inet_csk_bind_conflict(const struct sock *sk, 56int inet_csk_bind_conflict(const struct sock *sk,
56 const struct inet_bind_bucket *tb) 57 const struct inet_bind_bucket *tb, bool relax)
57{ 58{
58 struct sock *sk2; 59 struct sock *sk2;
59 struct hlist_node *node; 60 struct hlist_node *node;
@@ -79,6 +80,14 @@ int inet_csk_bind_conflict(const struct sock *sk,
79 sk2_rcv_saddr == sk_rcv_saddr(sk)) 80 sk2_rcv_saddr == sk_rcv_saddr(sk))
80 break; 81 break;
81 } 82 }
83 if (!relax && reuse && sk2->sk_reuse &&
84 sk2->sk_state != TCP_LISTEN) {
85 const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
86
87 if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
88 sk2_rcv_saddr == sk_rcv_saddr(sk))
89 break;
90 }
82 } 91 }
83 } 92 }
84 return node != NULL; 93 return node != NULL;
@@ -122,12 +131,13 @@ again:
122 (tb->num_owners < smallest_size || smallest_size == -1)) { 131 (tb->num_owners < smallest_size || smallest_size == -1)) {
123 smallest_size = tb->num_owners; 132 smallest_size = tb->num_owners;
124 smallest_rover = rover; 133 smallest_rover = rover;
125 if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { 134 if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
135 !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
126 snum = smallest_rover; 136 snum = smallest_rover;
127 goto tb_found; 137 goto tb_found;
128 } 138 }
129 } 139 }
130 if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { 140 if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false)) {
131 snum = rover; 141 snum = rover;
132 goto tb_found; 142 goto tb_found;
133 } 143 }
@@ -172,18 +182,22 @@ have_snum:
172 goto tb_not_found; 182 goto tb_not_found;
173tb_found: 183tb_found:
174 if (!hlist_empty(&tb->owners)) { 184 if (!hlist_empty(&tb->owners)) {
185 if (sk->sk_reuse == SK_FORCE_REUSE)
186 goto success;
187
175 if (tb->fastreuse > 0 && 188 if (tb->fastreuse > 0 &&
176 sk->sk_reuse && sk->sk_state != TCP_LISTEN && 189 sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
177 smallest_size == -1) { 190 smallest_size == -1) {
178 goto success; 191 goto success;
179 } else { 192 } else {
180 ret = 1; 193 ret = 1;
181 if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) { 194 if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true)) {
182 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN && 195 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN &&
183 smallest_size != -1 && --attempts >= 0) { 196 smallest_size != -1 && --attempts >= 0) {
184 spin_unlock(&head->lock); 197 spin_unlock(&head->lock);
185 goto again; 198 goto again;
186 } 199 }
200
187 goto fail_unlock; 201 goto fail_unlock;
188 } 202 }
189 } 203 }
@@ -514,7 +528,7 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
514 528
515 /* Normally all the openreqs are young and become mature 529 /* Normally all the openreqs are young and become mature
516 * (i.e. converted to established socket) for first timeout. 530 * (i.e. converted to established socket) for first timeout.
517 * If synack was not acknowledged for 3 seconds, it means 531 * If synack was not acknowledged for 1 second, it means
518 * one of the following things: synack was lost, ack was lost, 532 * one of the following things: synack was lost, ack was lost,
519 * rtt is high or nobody planned to ack (i.e. synflood). 533 * rtt is high or nobody planned to ack (i.e. synflood).
520 * When server is a bit loaded, queue is populated with old 534 * When server is a bit loaded, queue is populated with old
@@ -555,8 +569,7 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
555 syn_ack_recalc(req, thresh, max_retries, 569 syn_ack_recalc(req, thresh, max_retries,
556 queue->rskq_defer_accept, 570 queue->rskq_defer_accept,
557 &expire, &resend); 571 &expire, &resend);
558 if (req->rsk_ops->syn_ack_timeout) 572 req->rsk_ops->syn_ack_timeout(parent, req);
559 req->rsk_ops->syn_ack_timeout(parent, req);
560 if (!expire && 573 if (!expire &&
561 (!resend || 574 (!resend ||
562 !req->rsk_ops->rtx_syn_ack(parent, req, NULL) || 575 !req->rsk_ops->rtx_syn_ack(parent, req, NULL) ||
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 8f8db724bfaf..46d1e7199a8c 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -999,12 +999,12 @@ static int inet_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
999 return inet_diag_get_exact(skb, h, (struct inet_diag_req_v2 *)NLMSG_DATA(h)); 999 return inet_diag_get_exact(skb, h, (struct inet_diag_req_v2 *)NLMSG_DATA(h));
1000} 1000}
1001 1001
1002static struct sock_diag_handler inet_diag_handler = { 1002static const struct sock_diag_handler inet_diag_handler = {
1003 .family = AF_INET, 1003 .family = AF_INET,
1004 .dump = inet_diag_handler_dump, 1004 .dump = inet_diag_handler_dump,
1005}; 1005};
1006 1006
1007static struct sock_diag_handler inet6_diag_handler = { 1007static const struct sock_diag_handler inet6_diag_handler = {
1008 .family = AF_INET6, 1008 .family = AF_INET6,
1009 .dump = inet_diag_handler_dump, 1009 .dump = inet_diag_handler_dump,
1010}; 1010};
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 984ec656b03b..7880af970208 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -217,7 +217,7 @@ begin:
217} 217}
218EXPORT_SYMBOL_GPL(__inet_lookup_listener); 218EXPORT_SYMBOL_GPL(__inet_lookup_listener);
219 219
220struct sock * __inet_lookup_established(struct net *net, 220struct sock *__inet_lookup_established(struct net *net,
221 struct inet_hashinfo *hashinfo, 221 struct inet_hashinfo *hashinfo,
222 const __be32 saddr, const __be16 sport, 222 const __be32 saddr, const __be16 sport,
223 const __be32 daddr, const u16 hnum, 223 const __be32 daddr, const u16 hnum,
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 89168c6351ff..543ef6225458 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -263,7 +263,7 @@ rescan:
263void inet_twdr_hangman(unsigned long data) 263void inet_twdr_hangman(unsigned long data)
264{ 264{
265 struct inet_timewait_death_row *twdr; 265 struct inet_timewait_death_row *twdr;
266 int unsigned need_timer; 266 unsigned int need_timer;
267 267
268 twdr = (struct inet_timewait_death_row *)data; 268 twdr = (struct inet_timewait_death_row *)data;
269 spin_lock(&twdr->death_lock); 269 spin_lock(&twdr->death_lock);
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 29a07b6c7168..e5c44fc586ab 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -41,7 +41,7 @@
41 41
42static int ip_forward_finish(struct sk_buff *skb) 42static int ip_forward_finish(struct sk_buff *skb)
43{ 43{
44 struct ip_options * opt = &(IPCB(skb)->opt); 44 struct ip_options *opt = &(IPCB(skb)->opt);
45 45
46 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); 46 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
47 47
@@ -55,7 +55,7 @@ int ip_forward(struct sk_buff *skb)
55{ 55{
56 struct iphdr *iph; /* Our header */ 56 struct iphdr *iph; /* Our header */
57 struct rtable *rt; /* Route we use */ 57 struct rtable *rt; /* Route we use */
58 struct ip_options * opt = &(IPCB(skb)->opt); 58 struct ip_options *opt = &(IPCB(skb)->opt);
59 59
60 if (skb_warn_if_lro(skb)) 60 if (skb_warn_if_lro(skb))
61 goto drop; 61 goto drop;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 3727e234c884..71e5c328176c 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -569,7 +569,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
569 skb_morph(head, qp->q.fragments); 569 skb_morph(head, qp->q.fragments);
570 head->next = qp->q.fragments->next; 570 head->next = qp->q.fragments->next;
571 571
572 kfree_skb(qp->q.fragments); 572 consume_skb(qp->q.fragments);
573 qp->q.fragments = head; 573 qp->q.fragments = head;
574 } 574 }
575 575
@@ -782,7 +782,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
782 table[2].data = &net->ipv4.frags.timeout; 782 table[2].data = &net->ipv4.frags.timeout;
783 } 783 }
784 784
785 hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table); 785 hdr = register_net_sysctl(net, "net/ipv4", table);
786 if (hdr == NULL) 786 if (hdr == NULL)
787 goto err_reg; 787 goto err_reg;
788 788
@@ -807,7 +807,7 @@ static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net)
807 807
808static void ip4_frags_ctl_register(void) 808static void ip4_frags_ctl_register(void)
809{ 809{
810 register_net_sysctl_rotable(net_ipv4_ctl_path, ip4_frags_ctl_table); 810 register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table);
811} 811}
812#else 812#else
813static inline int ip4_frags_ns_ctl_register(struct net *net) 813static inline int ip4_frags_ns_ctl_register(struct net *net)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index b57532d4742c..f49047b79609 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -169,37 +169,56 @@ struct ipgre_net {
169 169
170/* often modified stats are per cpu, other are shared (netdev->stats) */ 170/* often modified stats are per cpu, other are shared (netdev->stats) */
171struct pcpu_tstats { 171struct pcpu_tstats {
172 unsigned long rx_packets; 172 u64 rx_packets;
173 unsigned long rx_bytes; 173 u64 rx_bytes;
174 unsigned long tx_packets; 174 u64 tx_packets;
175 unsigned long tx_bytes; 175 u64 tx_bytes;
176} __attribute__((aligned(4*sizeof(unsigned long)))); 176 struct u64_stats_sync syncp;
177};
177 178
178static struct net_device_stats *ipgre_get_stats(struct net_device *dev) 179static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev,
180 struct rtnl_link_stats64 *tot)
179{ 181{
180 struct pcpu_tstats sum = { 0 };
181 int i; 182 int i;
182 183
183 for_each_possible_cpu(i) { 184 for_each_possible_cpu(i) {
184 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); 185 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
185 186 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
186 sum.rx_packets += tstats->rx_packets; 187 unsigned int start;
187 sum.rx_bytes += tstats->rx_bytes; 188
188 sum.tx_packets += tstats->tx_packets; 189 do {
189 sum.tx_bytes += tstats->tx_bytes; 190 start = u64_stats_fetch_begin_bh(&tstats->syncp);
191 rx_packets = tstats->rx_packets;
192 tx_packets = tstats->tx_packets;
193 rx_bytes = tstats->rx_bytes;
194 tx_bytes = tstats->tx_bytes;
195 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
196
197 tot->rx_packets += rx_packets;
198 tot->tx_packets += tx_packets;
199 tot->rx_bytes += rx_bytes;
200 tot->tx_bytes += tx_bytes;
190 } 201 }
191 dev->stats.rx_packets = sum.rx_packets; 202
192 dev->stats.rx_bytes = sum.rx_bytes; 203 tot->multicast = dev->stats.multicast;
193 dev->stats.tx_packets = sum.tx_packets; 204 tot->rx_crc_errors = dev->stats.rx_crc_errors;
194 dev->stats.tx_bytes = sum.tx_bytes; 205 tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
195 return &dev->stats; 206 tot->rx_length_errors = dev->stats.rx_length_errors;
207 tot->rx_errors = dev->stats.rx_errors;
208 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
209 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
210 tot->tx_dropped = dev->stats.tx_dropped;
211 tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
212 tot->tx_errors = dev->stats.tx_errors;
213
214 return tot;
196} 215}
197 216
198/* Given src, dst and key, find appropriate for input tunnel. */ 217/* Given src, dst and key, find appropriate for input tunnel. */
199 218
200static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev, 219static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev,
201 __be32 remote, __be32 local, 220 __be32 remote, __be32 local,
202 __be32 key, __be16 gre_proto) 221 __be32 key, __be16 gre_proto)
203{ 222{
204 struct net *net = dev_net(dev); 223 struct net *net = dev_net(dev);
205 int link = dev->ifindex; 224 int link = dev->ifindex;
@@ -464,7 +483,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
464 */ 483 */
465 484
466 const struct iphdr *iph = (const struct iphdr *)skb->data; 485 const struct iphdr *iph = (const struct iphdr *)skb->data;
467 __be16 *p = (__be16*)(skb->data+(iph->ihl<<2)); 486 __be16 *p = (__be16 *)(skb->data+(iph->ihl<<2));
468 int grehlen = (iph->ihl<<2) + 4; 487 int grehlen = (iph->ihl<<2) + 4;
469 const int type = icmp_hdr(skb)->type; 488 const int type = icmp_hdr(skb)->type;
470 const int code = icmp_hdr(skb)->code; 489 const int code = icmp_hdr(skb)->code;
@@ -574,7 +593,7 @@ static int ipgre_rcv(struct sk_buff *skb)
574 593
575 iph = ip_hdr(skb); 594 iph = ip_hdr(skb);
576 h = skb->data; 595 h = skb->data;
577 flags = *(__be16*)h; 596 flags = *(__be16 *)h;
578 597
579 if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) { 598 if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
580 /* - Version must be 0. 599 /* - Version must be 0.
@@ -598,11 +617,11 @@ static int ipgre_rcv(struct sk_buff *skb)
598 offset += 4; 617 offset += 4;
599 } 618 }
600 if (flags&GRE_KEY) { 619 if (flags&GRE_KEY) {
601 key = *(__be32*)(h + offset); 620 key = *(__be32 *)(h + offset);
602 offset += 4; 621 offset += 4;
603 } 622 }
604 if (flags&GRE_SEQ) { 623 if (flags&GRE_SEQ) {
605 seqno = ntohl(*(__be32*)(h + offset)); 624 seqno = ntohl(*(__be32 *)(h + offset));
606 offset += 4; 625 offset += 4;
607 } 626 }
608 } 627 }
@@ -672,8 +691,10 @@ static int ipgre_rcv(struct sk_buff *skb)
672 } 691 }
673 692
674 tstats = this_cpu_ptr(tunnel->dev->tstats); 693 tstats = this_cpu_ptr(tunnel->dev->tstats);
694 u64_stats_update_begin(&tstats->syncp);
675 tstats->rx_packets++; 695 tstats->rx_packets++;
676 tstats->rx_bytes += skb->len; 696 tstats->rx_bytes += skb->len;
697 u64_stats_update_end(&tstats->syncp);
677 698
678 __skb_tunnel_rx(skb, tunnel->dev); 699 __skb_tunnel_rx(skb, tunnel->dev);
679 700
@@ -900,7 +921,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
900 htons(ETH_P_TEB) : skb->protocol; 921 htons(ETH_P_TEB) : skb->protocol;
901 922
902 if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) { 923 if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
903 __be32 *ptr = (__be32*)(((u8*)iph) + tunnel->hlen - 4); 924 __be32 *ptr = (__be32 *)(((u8 *)iph) + tunnel->hlen - 4);
904 925
905 if (tunnel->parms.o_flags&GRE_SEQ) { 926 if (tunnel->parms.o_flags&GRE_SEQ) {
906 ++tunnel->o_seqno; 927 ++tunnel->o_seqno;
@@ -913,7 +934,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
913 } 934 }
914 if (tunnel->parms.o_flags&GRE_CSUM) { 935 if (tunnel->parms.o_flags&GRE_CSUM) {
915 *ptr = 0; 936 *ptr = 0;
916 *(__sum16*)ptr = ip_compute_csum((void*)(iph+1), skb->len - sizeof(struct iphdr)); 937 *(__sum16 *)ptr = ip_compute_csum((void *)(iph+1), skb->len - sizeof(struct iphdr));
917 } 938 }
918 } 939 }
919 940
@@ -1169,7 +1190,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1169{ 1190{
1170 struct ip_tunnel *t = netdev_priv(dev); 1191 struct ip_tunnel *t = netdev_priv(dev);
1171 struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen); 1192 struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen);
1172 __be16 *p = (__be16*)(iph+1); 1193 __be16 *p = (__be16 *)(iph+1);
1173 1194
1174 memcpy(iph, &t->parms.iph, sizeof(struct iphdr)); 1195 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
1175 p[0] = t->parms.o_flags; 1196 p[0] = t->parms.o_flags;
@@ -1253,7 +1274,7 @@ static const struct net_device_ops ipgre_netdev_ops = {
1253 .ndo_start_xmit = ipgre_tunnel_xmit, 1274 .ndo_start_xmit = ipgre_tunnel_xmit,
1254 .ndo_do_ioctl = ipgre_tunnel_ioctl, 1275 .ndo_do_ioctl = ipgre_tunnel_ioctl,
1255 .ndo_change_mtu = ipgre_tunnel_change_mtu, 1276 .ndo_change_mtu = ipgre_tunnel_change_mtu,
1256 .ndo_get_stats = ipgre_get_stats, 1277 .ndo_get_stats64 = ipgre_get_stats64,
1257}; 1278};
1258 1279
1259static void ipgre_dev_free(struct net_device *dev) 1280static void ipgre_dev_free(struct net_device *dev)
@@ -1507,7 +1528,7 @@ static const struct net_device_ops ipgre_tap_netdev_ops = {
1507 .ndo_set_mac_address = eth_mac_addr, 1528 .ndo_set_mac_address = eth_mac_addr,
1508 .ndo_validate_addr = eth_validate_addr, 1529 .ndo_validate_addr = eth_validate_addr,
1509 .ndo_change_mtu = ipgre_tunnel_change_mtu, 1530 .ndo_change_mtu = ipgre_tunnel_change_mtu,
1510 .ndo_get_stats = ipgre_get_stats, 1531 .ndo_get_stats64 = ipgre_get_stats64,
1511}; 1532};
1512 1533
1513static void ipgre_tap_setup(struct net_device *dev) 1534static void ipgre_tap_setup(struct net_device *dev)
@@ -1654,17 +1675,18 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1654 struct ip_tunnel *t = netdev_priv(dev); 1675 struct ip_tunnel *t = netdev_priv(dev);
1655 struct ip_tunnel_parm *p = &t->parms; 1676 struct ip_tunnel_parm *p = &t->parms;
1656 1677
1657 NLA_PUT_U32(skb, IFLA_GRE_LINK, p->link); 1678 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1658 NLA_PUT_BE16(skb, IFLA_GRE_IFLAGS, p->i_flags); 1679 nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) ||
1659 NLA_PUT_BE16(skb, IFLA_GRE_OFLAGS, p->o_flags); 1680 nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) ||
1660 NLA_PUT_BE32(skb, IFLA_GRE_IKEY, p->i_key); 1681 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1661 NLA_PUT_BE32(skb, IFLA_GRE_OKEY, p->o_key); 1682 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1662 NLA_PUT_BE32(skb, IFLA_GRE_LOCAL, p->iph.saddr); 1683 nla_put_be32(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1663 NLA_PUT_BE32(skb, IFLA_GRE_REMOTE, p->iph.daddr); 1684 nla_put_be32(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1664 NLA_PUT_U8(skb, IFLA_GRE_TTL, p->iph.ttl); 1685 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1665 NLA_PUT_U8(skb, IFLA_GRE_TOS, p->iph.tos); 1686 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1666 NLA_PUT_U8(skb, IFLA_GRE_PMTUDISC, !!(p->iph.frag_off & htons(IP_DF))); 1687 nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1667 1688 !!(p->iph.frag_off & htons(IP_DF))))
1689 goto nla_put_failure;
1668 return 0; 1690 return 0;
1669 1691
1670nla_put_failure: 1692nla_put_failure:
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index a0d0d9d9b870..95722ed0e5bb 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -210,10 +210,10 @@ int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
210 * Simple and stupid 8), but the most efficient way. 210 * Simple and stupid 8), but the most efficient way.
211 */ 211 */
212 212
213void ip_options_fragment(struct sk_buff * skb) 213void ip_options_fragment(struct sk_buff *skb)
214{ 214{
215 unsigned char *optptr = skb_network_header(skb) + sizeof(struct iphdr); 215 unsigned char *optptr = skb_network_header(skb) + sizeof(struct iphdr);
216 struct ip_options * opt = &(IPCB(skb)->opt); 216 struct ip_options *opt = &(IPCB(skb)->opt);
217 int l = opt->optlen; 217 int l = opt->optlen;
218 int optlen; 218 int optlen;
219 219
@@ -248,13 +248,13 @@ void ip_options_fragment(struct sk_buff * skb)
248 */ 248 */
249 249
250int ip_options_compile(struct net *net, 250int ip_options_compile(struct net *net,
251 struct ip_options * opt, struct sk_buff * skb) 251 struct ip_options *opt, struct sk_buff *skb)
252{ 252{
253 int l; 253 int l;
254 unsigned char * iph; 254 unsigned char *iph;
255 unsigned char * optptr; 255 unsigned char *optptr;
256 int optlen; 256 int optlen;
257 unsigned char * pp_ptr = NULL; 257 unsigned char *pp_ptr = NULL;
258 struct rtable *rt = NULL; 258 struct rtable *rt = NULL;
259 259
260 if (skb != NULL) { 260 if (skb != NULL) {
@@ -413,7 +413,7 @@ int ip_options_compile(struct net *net,
413 opt->is_changed = 1; 413 opt->is_changed = 1;
414 } 414 }
415 } else { 415 } else {
416 unsigned overflow = optptr[3]>>4; 416 unsigned int overflow = optptr[3]>>4;
417 if (overflow == 15) { 417 if (overflow == 15) {
418 pp_ptr = optptr + 3; 418 pp_ptr = optptr + 3;
419 goto error; 419 goto error;
@@ -473,20 +473,20 @@ EXPORT_SYMBOL(ip_options_compile);
473 * Undo all the changes done by ip_options_compile(). 473 * Undo all the changes done by ip_options_compile().
474 */ 474 */
475 475
476void ip_options_undo(struct ip_options * opt) 476void ip_options_undo(struct ip_options *opt)
477{ 477{
478 if (opt->srr) { 478 if (opt->srr) {
479 unsigned char * optptr = opt->__data+opt->srr-sizeof(struct iphdr); 479 unsigned char *optptr = opt->__data+opt->srr-sizeof(struct iphdr);
480 memmove(optptr+7, optptr+3, optptr[1]-7); 480 memmove(optptr+7, optptr+3, optptr[1]-7);
481 memcpy(optptr+3, &opt->faddr, 4); 481 memcpy(optptr+3, &opt->faddr, 4);
482 } 482 }
483 if (opt->rr_needaddr) { 483 if (opt->rr_needaddr) {
484 unsigned char * optptr = opt->__data+opt->rr-sizeof(struct iphdr); 484 unsigned char *optptr = opt->__data+opt->rr-sizeof(struct iphdr);
485 optptr[2] -= 4; 485 optptr[2] -= 4;
486 memset(&optptr[optptr[2]-1], 0, 4); 486 memset(&optptr[optptr[2]-1], 0, 4);
487 } 487 }
488 if (opt->ts) { 488 if (opt->ts) {
489 unsigned char * optptr = opt->__data+opt->ts-sizeof(struct iphdr); 489 unsigned char *optptr = opt->__data+opt->ts-sizeof(struct iphdr);
490 if (opt->ts_needtime) { 490 if (opt->ts_needtime) {
491 optptr[2] -= 4; 491 optptr[2] -= 4;
492 memset(&optptr[optptr[2]-1], 0, 4); 492 memset(&optptr[optptr[2]-1], 0, 4);
@@ -549,8 +549,8 @@ int ip_options_get(struct net *net, struct ip_options_rcu **optp,
549 549
550void ip_forward_options(struct sk_buff *skb) 550void ip_forward_options(struct sk_buff *skb)
551{ 551{
552 struct ip_options * opt = &(IPCB(skb)->opt); 552 struct ip_options *opt = &(IPCB(skb)->opt);
553 unsigned char * optptr; 553 unsigned char *optptr;
554 struct rtable *rt = skb_rtable(skb); 554 struct rtable *rt = skb_rtable(skb);
555 unsigned char *raw = skb_network_header(skb); 555 unsigned char *raw = skb_network_header(skb);
556 556
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 2fd0fba77124..0d11f234d615 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -90,7 +90,7 @@ static void ip_cmsg_recv_opts(struct msghdr *msg, struct sk_buff *skb)
90static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb) 90static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
91{ 91{
92 unsigned char optbuf[sizeof(struct ip_options) + 40]; 92 unsigned char optbuf[sizeof(struct ip_options) + 40];
93 struct ip_options * opt = (struct ip_options *)optbuf; 93 struct ip_options *opt = (struct ip_options *)optbuf;
94 94
95 if (IPCB(skb)->opt.optlen == 0) 95 if (IPCB(skb)->opt.optlen == 0)
96 return; 96 return;
@@ -147,7 +147,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
147void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb) 147void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
148{ 148{
149 struct inet_sock *inet = inet_sk(skb->sk); 149 struct inet_sock *inet = inet_sk(skb->sk);
150 unsigned flags = inet->cmsg_flags; 150 unsigned int flags = inet->cmsg_flags;
151 151
152 /* Ordered by supposed usage frequency */ 152 /* Ordered by supposed usage frequency */
153 if (flags & 1) 153 if (flags & 1)
@@ -673,10 +673,15 @@ static int do_ip_setsockopt(struct sock *sk, int level,
673 break; 673 break;
674 } else { 674 } else {
675 memset(&mreq, 0, sizeof(mreq)); 675 memset(&mreq, 0, sizeof(mreq));
676 if (optlen >= sizeof(struct in_addr) && 676 if (optlen >= sizeof(struct ip_mreq)) {
677 copy_from_user(&mreq.imr_address, optval, 677 if (copy_from_user(&mreq, optval,
678 sizeof(struct in_addr))) 678 sizeof(struct ip_mreq)))
679 break; 679 break;
680 } else if (optlen >= sizeof(struct in_addr)) {
681 if (copy_from_user(&mreq.imr_address, optval,
682 sizeof(struct in_addr)))
683 break;
684 }
680 } 685 }
681 686
682 if (!mreq.imr_ifindex) { 687 if (!mreq.imr_ifindex) {
@@ -1094,7 +1099,7 @@ EXPORT_SYMBOL(compat_ip_setsockopt);
1094 */ 1099 */
1095 1100
1096static int do_ip_getsockopt(struct sock *sk, int level, int optname, 1101static int do_ip_getsockopt(struct sock *sk, int level, int optname,
1097 char __user *optval, int __user *optlen, unsigned flags) 1102 char __user *optval, int __user *optlen, unsigned int flags)
1098{ 1103{
1099 struct inet_sock *inet = inet_sk(sk); 1104 struct inet_sock *inet = inet_sk(sk);
1100 int val; 1105 int val;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 92ac7e7363a0..f267280d8709 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1198,7 +1198,7 @@ static int __init ic_dynamic(void)
1198 d = ic_first_dev; 1198 d = ic_first_dev;
1199 retries = CONF_SEND_RETRIES; 1199 retries = CONF_SEND_RETRIES;
1200 get_random_bytes(&timeout, sizeof(timeout)); 1200 get_random_bytes(&timeout, sizeof(timeout));
1201 timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned) CONF_TIMEOUT_RANDOM); 1201 timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned int) CONF_TIMEOUT_RANDOM);
1202 for (;;) { 1202 for (;;) {
1203 /* Track the device we are configuring */ 1203 /* Track the device we are configuring */
1204 ic_dev_xid = d->xid; 1204 ic_dev_xid = d->xid;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index ae1413e3f2f8..2d0f99bf61b3 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -144,33 +144,48 @@ static void ipip_dev_free(struct net_device *dev);
144 144
145/* often modified stats are per cpu, other are shared (netdev->stats) */ 145/* often modified stats are per cpu, other are shared (netdev->stats) */
146struct pcpu_tstats { 146struct pcpu_tstats {
147 unsigned long rx_packets; 147 u64 rx_packets;
148 unsigned long rx_bytes; 148 u64 rx_bytes;
149 unsigned long tx_packets; 149 u64 tx_packets;
150 unsigned long tx_bytes; 150 u64 tx_bytes;
151} __attribute__((aligned(4*sizeof(unsigned long)))); 151 struct u64_stats_sync syncp;
152};
152 153
153static struct net_device_stats *ipip_get_stats(struct net_device *dev) 154static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev,
155 struct rtnl_link_stats64 *tot)
154{ 156{
155 struct pcpu_tstats sum = { 0 };
156 int i; 157 int i;
157 158
158 for_each_possible_cpu(i) { 159 for_each_possible_cpu(i) {
159 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); 160 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
160 161 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
161 sum.rx_packets += tstats->rx_packets; 162 unsigned int start;
162 sum.rx_bytes += tstats->rx_bytes; 163
163 sum.tx_packets += tstats->tx_packets; 164 do {
164 sum.tx_bytes += tstats->tx_bytes; 165 start = u64_stats_fetch_begin_bh(&tstats->syncp);
166 rx_packets = tstats->rx_packets;
167 tx_packets = tstats->tx_packets;
168 rx_bytes = tstats->rx_bytes;
169 tx_bytes = tstats->tx_bytes;
170 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
171
172 tot->rx_packets += rx_packets;
173 tot->tx_packets += tx_packets;
174 tot->rx_bytes += rx_bytes;
175 tot->tx_bytes += tx_bytes;
165 } 176 }
166 dev->stats.rx_packets = sum.rx_packets; 177
167 dev->stats.rx_bytes = sum.rx_bytes; 178 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
168 dev->stats.tx_packets = sum.tx_packets; 179 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
169 dev->stats.tx_bytes = sum.tx_bytes; 180 tot->tx_dropped = dev->stats.tx_dropped;
170 return &dev->stats; 181 tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
182 tot->tx_errors = dev->stats.tx_errors;
183 tot->collisions = dev->stats.collisions;
184
185 return tot;
171} 186}
172 187
173static struct ip_tunnel * ipip_tunnel_lookup(struct net *net, 188static struct ip_tunnel *ipip_tunnel_lookup(struct net *net,
174 __be32 remote, __be32 local) 189 __be32 remote, __be32 local)
175{ 190{
176 unsigned int h0 = HASH(remote); 191 unsigned int h0 = HASH(remote);
@@ -245,7 +260,7 @@ static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t)
245 rcu_assign_pointer(*tp, t); 260 rcu_assign_pointer(*tp, t);
246} 261}
247 262
248static struct ip_tunnel * ipip_tunnel_locate(struct net *net, 263static struct ip_tunnel *ipip_tunnel_locate(struct net *net,
249 struct ip_tunnel_parm *parms, int create) 264 struct ip_tunnel_parm *parms, int create)
250{ 265{
251 __be32 remote = parms->iph.daddr; 266 __be32 remote = parms->iph.daddr;
@@ -404,8 +419,10 @@ static int ipip_rcv(struct sk_buff *skb)
404 skb->pkt_type = PACKET_HOST; 419 skb->pkt_type = PACKET_HOST;
405 420
406 tstats = this_cpu_ptr(tunnel->dev->tstats); 421 tstats = this_cpu_ptr(tunnel->dev->tstats);
422 u64_stats_update_begin(&tstats->syncp);
407 tstats->rx_packets++; 423 tstats->rx_packets++;
408 tstats->rx_bytes += skb->len; 424 tstats->rx_bytes += skb->len;
425 u64_stats_update_end(&tstats->syncp);
409 426
410 __skb_tunnel_rx(skb, tunnel->dev); 427 __skb_tunnel_rx(skb, tunnel->dev);
411 428
@@ -730,7 +747,7 @@ static const struct net_device_ops ipip_netdev_ops = {
730 .ndo_start_xmit = ipip_tunnel_xmit, 747 .ndo_start_xmit = ipip_tunnel_xmit,
731 .ndo_do_ioctl = ipip_tunnel_ioctl, 748 .ndo_do_ioctl = ipip_tunnel_ioctl,
732 .ndo_change_mtu = ipip_tunnel_change_mtu, 749 .ndo_change_mtu = ipip_tunnel_change_mtu,
733 .ndo_get_stats = ipip_get_stats, 750 .ndo_get_stats64 = ipip_get_stats64,
734}; 751};
735 752
736static void ipip_dev_free(struct net_device *dev) 753static void ipip_dev_free(struct net_device *dev)
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 960fbfc3e976..5bef604ac0fa 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -2119,15 +2119,16 @@ static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
2119 rtm->rtm_src_len = 32; 2119 rtm->rtm_src_len = 32;
2120 rtm->rtm_tos = 0; 2120 rtm->rtm_tos = 0;
2121 rtm->rtm_table = mrt->id; 2121 rtm->rtm_table = mrt->id;
2122 NLA_PUT_U32(skb, RTA_TABLE, mrt->id); 2122 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2123 goto nla_put_failure;
2123 rtm->rtm_type = RTN_MULTICAST; 2124 rtm->rtm_type = RTN_MULTICAST;
2124 rtm->rtm_scope = RT_SCOPE_UNIVERSE; 2125 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2125 rtm->rtm_protocol = RTPROT_UNSPEC; 2126 rtm->rtm_protocol = RTPROT_UNSPEC;
2126 rtm->rtm_flags = 0; 2127 rtm->rtm_flags = 0;
2127 2128
2128 NLA_PUT_BE32(skb, RTA_SRC, c->mfc_origin); 2129 if (nla_put_be32(skb, RTA_SRC, c->mfc_origin) ||
2129 NLA_PUT_BE32(skb, RTA_DST, c->mfc_mcastgrp); 2130 nla_put_be32(skb, RTA_DST, c->mfc_mcastgrp))
2130 2131 goto nla_put_failure;
2131 if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0) 2132 if (__ipmr_fill_mroute(mrt, skb, c, rtm) < 0)
2132 goto nla_put_failure; 2133 goto nla_put_failure;
2133 2134
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index 4f47e064e262..ed1b36783192 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -12,7 +12,7 @@
12#include <net/netfilter/nf_queue.h> 12#include <net/netfilter/nf_queue.h>
13 13
14/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ 14/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */
15int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) 15int ip_route_me_harder(struct sk_buff *skb, unsigned int addr_type)
16{ 16{
17 struct net *net = dev_net(skb_dst(skb)->dev); 17 struct net *net = dev_net(skb_dst(skb)->dev);
18 const struct iphdr *iph = ip_hdr(skb); 18 const struct iphdr *iph = ip_hdr(skb);
@@ -237,13 +237,3 @@ static void ipv4_netfilter_fini(void)
237 237
238module_init(ipv4_netfilter_init); 238module_init(ipv4_netfilter_init);
239module_exit(ipv4_netfilter_fini); 239module_exit(ipv4_netfilter_fini);
240
241#ifdef CONFIG_SYSCTL
242struct ctl_path nf_net_ipv4_netfilter_sysctl_path[] = {
243 { .procname = "net", },
244 { .procname = "ipv4", },
245 { .procname = "netfilter", },
246 { }
247};
248EXPORT_SYMBOL_GPL(nf_net_ipv4_netfilter_sysctl_path);
249#endif /* CONFIG_SYSCTL */
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index fd7a3f68917f..a3935273869f 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -303,7 +303,7 @@ unsigned int arpt_do_table(struct sk_buff *skb,
303 if (v < 0) { 303 if (v < 0) {
304 /* Pop from stack? */ 304 /* Pop from stack? */
305 if (v != XT_RETURN) { 305 if (v != XT_RETURN) {
306 verdict = (unsigned)(-v) - 1; 306 verdict = (unsigned int)(-v) - 1;
307 break; 307 break;
308 } 308 }
309 e = back; 309 e = back;
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index 94d45e1f8882..09775a1e1348 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -586,7 +586,7 @@ static int __init ip_queue_init(void)
586#endif 586#endif
587 register_netdevice_notifier(&ipq_dev_notifier); 587 register_netdevice_notifier(&ipq_dev_notifier);
588#ifdef CONFIG_SYSCTL 588#ifdef CONFIG_SYSCTL
589 ipq_sysctl_header = register_sysctl_paths(net_ipv4_ctl_path, ipq_table); 589 ipq_sysctl_header = register_net_sysctl(&init_net, "net/ipv4", ipq_table);
590#endif 590#endif
591 status = nf_register_queue_handler(NFPROTO_IPV4, &nfqh); 591 status = nf_register_queue_handler(NFPROTO_IPV4, &nfqh);
592 if (status < 0) { 592 if (status < 0) {
@@ -597,7 +597,7 @@ static int __init ip_queue_init(void)
597 597
598cleanup_sysctl: 598cleanup_sysctl:
599#ifdef CONFIG_SYSCTL 599#ifdef CONFIG_SYSCTL
600 unregister_sysctl_table(ipq_sysctl_header); 600 unregister_net_sysctl_table(ipq_sysctl_header);
601#endif 601#endif
602 unregister_netdevice_notifier(&ipq_dev_notifier); 602 unregister_netdevice_notifier(&ipq_dev_notifier);
603 proc_net_remove(&init_net, IPQ_PROC_FS_NAME); 603 proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
@@ -618,7 +618,7 @@ static void __exit ip_queue_fini(void)
618 ipq_flush(NULL, 0); 618 ipq_flush(NULL, 0);
619 619
620#ifdef CONFIG_SYSCTL 620#ifdef CONFIG_SYSCTL
621 unregister_sysctl_table(ipq_sysctl_header); 621 unregister_net_sysctl_table(ipq_sysctl_header);
622#endif 622#endif
623 unregister_netdevice_notifier(&ipq_dev_notifier); 623 unregister_netdevice_notifier(&ipq_dev_notifier);
624 proc_net_remove(&init_net, IPQ_PROC_FS_NAME); 624 proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 24e556e83a3b..585b80f3cc68 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -377,7 +377,7 @@ ipt_do_table(struct sk_buff *skb,
377 if (v < 0) { 377 if (v < 0) {
378 /* Pop from stack? */ 378 /* Pop from stack? */
379 if (v != XT_RETURN) { 379 if (v != XT_RETURN) {
380 verdict = (unsigned)(-v) - 1; 380 verdict = (unsigned int)(-v) - 1;
381 break; 381 break;
382 } 382 }
383 if (*stackptr <= origptr) { 383 if (*stackptr <= origptr) {
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index cf73cc70ed2d..91747d4ebc26 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -311,8 +311,9 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
311static int ipv4_tuple_to_nlattr(struct sk_buff *skb, 311static int ipv4_tuple_to_nlattr(struct sk_buff *skb,
312 const struct nf_conntrack_tuple *tuple) 312 const struct nf_conntrack_tuple *tuple)
313{ 313{
314 NLA_PUT_BE32(skb, CTA_IP_V4_SRC, tuple->src.u3.ip); 314 if (nla_put_be32(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) ||
315 NLA_PUT_BE32(skb, CTA_IP_V4_DST, tuple->dst.u3.ip); 315 nla_put_be32(skb, CTA_IP_V4_DST, tuple->dst.u3.ip))
316 goto nla_put_failure;
316 return 0; 317 return 0;
317 318
318nla_put_failure: 319nla_put_failure:
@@ -364,7 +365,7 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
364 .nla_policy = ipv4_nla_policy, 365 .nla_policy = ipv4_nla_policy,
365#endif 366#endif
366#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT) 367#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
367 .ctl_table_path = nf_net_ipv4_netfilter_sysctl_path, 368 .ctl_table_path = "net/ipv4/netfilter",
368 .ctl_table = ip_ct_sysctl_table, 369 .ctl_table = ip_ct_sysctl_table,
369#endif 370#endif
370 .me = THIS_MODULE, 371 .me = THIS_MODULE,
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 7cbe9cb261c2..0847e373d33c 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -228,10 +228,10 @@ icmp_error(struct net *net, struct nf_conn *tmpl,
228static int icmp_tuple_to_nlattr(struct sk_buff *skb, 228static int icmp_tuple_to_nlattr(struct sk_buff *skb,
229 const struct nf_conntrack_tuple *t) 229 const struct nf_conntrack_tuple *t)
230{ 230{
231 NLA_PUT_BE16(skb, CTA_PROTO_ICMP_ID, t->src.u.icmp.id); 231 if (nla_put_be16(skb, CTA_PROTO_ICMP_ID, t->src.u.icmp.id) ||
232 NLA_PUT_U8(skb, CTA_PROTO_ICMP_TYPE, t->dst.u.icmp.type); 232 nla_put_u8(skb, CTA_PROTO_ICMP_TYPE, t->dst.u.icmp.type) ||
233 NLA_PUT_U8(skb, CTA_PROTO_ICMP_CODE, t->dst.u.icmp.code); 233 nla_put_u8(skb, CTA_PROTO_ICMP_CODE, t->dst.u.icmp.code))
234 234 goto nla_put_failure;
235 return 0; 235 return 0;
236 236
237nla_put_failure: 237nla_put_failure:
@@ -293,8 +293,8 @@ icmp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
293{ 293{
294 const unsigned int *timeout = data; 294 const unsigned int *timeout = data;
295 295
296 NLA_PUT_BE32(skb, CTA_TIMEOUT_ICMP_TIMEOUT, htonl(*timeout / HZ)); 296 if (nla_put_be32(skb, CTA_TIMEOUT_ICMP_TIMEOUT, htonl(*timeout / HZ)))
297 297 goto nla_put_failure;
298 return 0; 298 return 0;
299 299
300nla_put_failure: 300nla_put_failure:
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index 57932c43960e..ea4a23813d26 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -283,7 +283,7 @@ static unsigned int ip_nat_sip_expect(struct sk_buff *skb, unsigned int dataoff,
283 __be32 newip; 283 __be32 newip;
284 u_int16_t port; 284 u_int16_t port;
285 char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; 285 char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")];
286 unsigned buflen; 286 unsigned int buflen;
287 287
288 /* Connection will come from reply */ 288 /* Connection will come from reply */
289 if (ct->tuplehash[dir].tuple.src.u3.ip == ct->tuplehash[!dir].tuple.dst.u3.ip) 289 if (ct->tuplehash[dir].tuple.src.u3.ip == ct->tuplehash[!dir].tuple.dst.u3.ip)
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 50009c787bcd..6e930c7174dd 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -51,15 +51,16 @@ static struct ping_table ping_table;
51 51
52static u16 ping_port_rover; 52static u16 ping_port_rover;
53 53
54static inline int ping_hashfn(struct net *net, unsigned num, unsigned mask) 54static inline int ping_hashfn(struct net *net, unsigned int num, unsigned int mask)
55{ 55{
56 int res = (num + net_hash_mix(net)) & mask; 56 int res = (num + net_hash_mix(net)) & mask;
57
57 pr_debug("hash(%d) = %d\n", num, res); 58 pr_debug("hash(%d) = %d\n", num, res);
58 return res; 59 return res;
59} 60}
60 61
61static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table, 62static inline struct hlist_nulls_head *ping_hashslot(struct ping_table *table,
62 struct net *net, unsigned num) 63 struct net *net, unsigned int num)
63{ 64{
64 return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)]; 65 return &table->hash[ping_hashfn(net, num, PING_HTABLE_MASK)];
65} 66}
@@ -188,7 +189,8 @@ static void inet_get_ping_group_range_net(struct net *net, gid_t *low,
188 gid_t *high) 189 gid_t *high)
189{ 190{
190 gid_t *data = net->ipv4.sysctl_ping_group_range; 191 gid_t *data = net->ipv4.sysctl_ping_group_range;
191 unsigned seq; 192 unsigned int seq;
193
192 do { 194 do {
193 seq = read_seqbegin(&sysctl_local_ports.lock); 195 seq = read_seqbegin(&sysctl_local_ports.lock);
194 196
@@ -410,7 +412,7 @@ struct pingfakehdr {
410 __wsum wcheck; 412 __wsum wcheck;
411}; 413};
412 414
413static int ping_getfrag(void *from, char * to, 415static int ping_getfrag(void *from, char *to,
414 int offset, int fraglen, int odd, struct sk_buff *skb) 416 int offset, int fraglen, int odd, struct sk_buff *skb)
415{ 417{
416 struct pingfakehdr *pfh = (struct pingfakehdr *)from; 418 struct pingfakehdr *pfh = (struct pingfakehdr *)from;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index bbd604c68e68..4032b818f3e4 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -288,7 +288,7 @@ void raw_icmp_error(struct sk_buff *skb, int protocol, u32 info)
288 read_unlock(&raw_v4_hashinfo.lock); 288 read_unlock(&raw_v4_hashinfo.lock);
289} 289}
290 290
291static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb) 291static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
292{ 292{
293 /* Charge it to the socket. */ 293 /* Charge it to the socket. */
294 294
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 167ea10b521a..5773f5d9e213 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -109,6 +109,7 @@
109#include <net/rtnetlink.h> 109#include <net/rtnetlink.h>
110#ifdef CONFIG_SYSCTL 110#ifdef CONFIG_SYSCTL
111#include <linux/sysctl.h> 111#include <linux/sysctl.h>
112#include <linux/kmemleak.h>
112#endif 113#endif
113#include <net/secure_seq.h> 114#include <net/secure_seq.h>
114 115
@@ -229,7 +230,7 @@ const __u8 ip_tos2prio[16] = {
229 TC_PRIO_INTERACTIVE_BULK, 230 TC_PRIO_INTERACTIVE_BULK,
230 ECN_OR_COST(INTERACTIVE_BULK) 231 ECN_OR_COST(INTERACTIVE_BULK)
231}; 232};
232 233EXPORT_SYMBOL(ip_tos2prio);
233 234
234/* 235/*
235 * Route cache. 236 * Route cache.
@@ -296,7 +297,7 @@ static inline void rt_hash_lock_init(void)
296#endif 297#endif
297 298
298static struct rt_hash_bucket *rt_hash_table __read_mostly; 299static struct rt_hash_bucket *rt_hash_table __read_mostly;
299static unsigned rt_hash_mask __read_mostly; 300static unsigned int rt_hash_mask __read_mostly;
300static unsigned int rt_hash_log __read_mostly; 301static unsigned int rt_hash_log __read_mostly;
301 302
302static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); 303static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
@@ -1143,7 +1144,7 @@ static int rt_bind_neighbour(struct rtable *rt)
1143 return 0; 1144 return 0;
1144} 1145}
1145 1146
1146static struct rtable *rt_intern_hash(unsigned hash, struct rtable *rt, 1147static struct rtable *rt_intern_hash(unsigned int hash, struct rtable *rt,
1147 struct sk_buff *skb, int ifindex) 1148 struct sk_buff *skb, int ifindex)
1148{ 1149{
1149 struct rtable *rth, *cand; 1150 struct rtable *rth, *cand;
@@ -1384,7 +1385,7 @@ void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
1384} 1385}
1385EXPORT_SYMBOL(__ip_select_ident); 1386EXPORT_SYMBOL(__ip_select_ident);
1386 1387
1387static void rt_del(unsigned hash, struct rtable *rt) 1388static void rt_del(unsigned int hash, struct rtable *rt)
1388{ 1389{
1389 struct rtable __rcu **rthp; 1390 struct rtable __rcu **rthp;
1390 struct rtable *aux; 1391 struct rtable *aux;
@@ -1538,7 +1539,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1538 ip_rt_put(rt); 1539 ip_rt_put(rt);
1539 ret = NULL; 1540 ret = NULL;
1540 } else if (rt->rt_flags & RTCF_REDIRECTED) { 1541 } else if (rt->rt_flags & RTCF_REDIRECTED) {
1541 unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src, 1542 unsigned int hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1542 rt->rt_oif, 1543 rt->rt_oif,
1543 rt_genid(dev_net(dst->dev))); 1544 rt_genid(dev_net(dst->dev)));
1544 rt_del(hash, rt); 1545 rt_del(hash, rt);
@@ -2215,9 +2216,9 @@ static int ip_mkroute_input(struct sk_buff *skb,
2215 struct in_device *in_dev, 2216 struct in_device *in_dev,
2216 __be32 daddr, __be32 saddr, u32 tos) 2217 __be32 daddr, __be32 saddr, u32 tos)
2217{ 2218{
2218 struct rtable* rth = NULL; 2219 struct rtable *rth = NULL;
2219 int err; 2220 int err;
2220 unsigned hash; 2221 unsigned int hash;
2221 2222
2222#ifdef CONFIG_IP_ROUTE_MULTIPATH 2223#ifdef CONFIG_IP_ROUTE_MULTIPATH
2223 if (res->fi && res->fi->fib_nhs > 1) 2224 if (res->fi && res->fi->fib_nhs > 1)
@@ -2255,13 +2256,13 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2255 struct fib_result res; 2256 struct fib_result res;
2256 struct in_device *in_dev = __in_dev_get_rcu(dev); 2257 struct in_device *in_dev = __in_dev_get_rcu(dev);
2257 struct flowi4 fl4; 2258 struct flowi4 fl4;
2258 unsigned flags = 0; 2259 unsigned int flags = 0;
2259 u32 itag = 0; 2260 u32 itag = 0;
2260 struct rtable * rth; 2261 struct rtable *rth;
2261 unsigned hash; 2262 unsigned int hash;
2262 __be32 spec_dst; 2263 __be32 spec_dst;
2263 int err = -EINVAL; 2264 int err = -EINVAL;
2264 struct net * net = dev_net(dev); 2265 struct net *net = dev_net(dev);
2265 2266
2266 /* IP on this device is disabled. */ 2267 /* IP on this device is disabled. */
2267 2268
@@ -2433,8 +2434,8 @@ martian_source_keep_err:
2433int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, 2434int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2434 u8 tos, struct net_device *dev, bool noref) 2435 u8 tos, struct net_device *dev, bool noref)
2435{ 2436{
2436 struct rtable * rth; 2437 struct rtable *rth;
2437 unsigned hash; 2438 unsigned int hash;
2438 int iif = dev->ifindex; 2439 int iif = dev->ifindex;
2439 struct net *net; 2440 struct net *net;
2440 int res; 2441 int res;
@@ -2972,7 +2973,8 @@ static int rt_fill_info(struct net *net,
2972 r->rtm_src_len = 0; 2973 r->rtm_src_len = 0;
2973 r->rtm_tos = rt->rt_key_tos; 2974 r->rtm_tos = rt->rt_key_tos;
2974 r->rtm_table = RT_TABLE_MAIN; 2975 r->rtm_table = RT_TABLE_MAIN;
2975 NLA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN); 2976 if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN))
2977 goto nla_put_failure;
2976 r->rtm_type = rt->rt_type; 2978 r->rtm_type = rt->rt_type;
2977 r->rtm_scope = RT_SCOPE_UNIVERSE; 2979 r->rtm_scope = RT_SCOPE_UNIVERSE;
2978 r->rtm_protocol = RTPROT_UNSPEC; 2980 r->rtm_protocol = RTPROT_UNSPEC;
@@ -2980,31 +2982,38 @@ static int rt_fill_info(struct net *net,
2980 if (rt->rt_flags & RTCF_NOTIFY) 2982 if (rt->rt_flags & RTCF_NOTIFY)
2981 r->rtm_flags |= RTM_F_NOTIFY; 2983 r->rtm_flags |= RTM_F_NOTIFY;
2982 2984
2983 NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst); 2985 if (nla_put_be32(skb, RTA_DST, rt->rt_dst))
2984 2986 goto nla_put_failure;
2985 if (rt->rt_key_src) { 2987 if (rt->rt_key_src) {
2986 r->rtm_src_len = 32; 2988 r->rtm_src_len = 32;
2987 NLA_PUT_BE32(skb, RTA_SRC, rt->rt_key_src); 2989 if (nla_put_be32(skb, RTA_SRC, rt->rt_key_src))
2990 goto nla_put_failure;
2988 } 2991 }
2989 if (rt->dst.dev) 2992 if (rt->dst.dev &&
2990 NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex); 2993 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2994 goto nla_put_failure;
2991#ifdef CONFIG_IP_ROUTE_CLASSID 2995#ifdef CONFIG_IP_ROUTE_CLASSID
2992 if (rt->dst.tclassid) 2996 if (rt->dst.tclassid &&
2993 NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid); 2997 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2998 goto nla_put_failure;
2994#endif 2999#endif
2995 if (rt_is_input_route(rt)) 3000 if (rt_is_input_route(rt)) {
2996 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst); 3001 if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_spec_dst))
2997 else if (rt->rt_src != rt->rt_key_src) 3002 goto nla_put_failure;
2998 NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src); 3003 } else if (rt->rt_src != rt->rt_key_src) {
2999 3004 if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_src))
3000 if (rt->rt_dst != rt->rt_gateway) 3005 goto nla_put_failure;
3001 NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway); 3006 }
3007 if (rt->rt_dst != rt->rt_gateway &&
3008 nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
3009 goto nla_put_failure;
3002 3010
3003 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) 3011 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
3004 goto nla_put_failure; 3012 goto nla_put_failure;
3005 3013
3006 if (rt->rt_mark) 3014 if (rt->rt_mark &&
3007 NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark); 3015 nla_put_be32(skb, RTA_MARK, rt->rt_mark))
3016 goto nla_put_failure;
3008 3017
3009 error = rt->dst.error; 3018 error = rt->dst.error;
3010 if (peer) { 3019 if (peer) {
@@ -3045,7 +3054,8 @@ static int rt_fill_info(struct net *net,
3045 } 3054 }
3046 } else 3055 } else
3047#endif 3056#endif
3048 NLA_PUT_U32(skb, RTA_IIF, rt->rt_iif); 3057 if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
3058 goto nla_put_failure;
3049 } 3059 }
3050 3060
3051 if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage, 3061 if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage,
@@ -3059,7 +3069,7 @@ nla_put_failure:
3059 return -EMSGSIZE; 3069 return -EMSGSIZE;
3060} 3070}
3061 3071
3062static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg) 3072static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
3063{ 3073{
3064 struct net *net = sock_net(in_skb->sk); 3074 struct net *net = sock_net(in_skb->sk);
3065 struct rtmsg *rtm; 3075 struct rtmsg *rtm;
@@ -3334,23 +3344,6 @@ static ctl_table ipv4_route_table[] = {
3334 { } 3344 { }
3335}; 3345};
3336 3346
3337static struct ctl_table empty[1];
3338
3339static struct ctl_table ipv4_skeleton[] =
3340{
3341 { .procname = "route",
3342 .mode = 0555, .child = ipv4_route_table},
3343 { .procname = "neigh",
3344 .mode = 0555, .child = empty},
3345 { }
3346};
3347
3348static __net_initdata struct ctl_path ipv4_path[] = {
3349 { .procname = "net", },
3350 { .procname = "ipv4", },
3351 { },
3352};
3353
3354static struct ctl_table ipv4_route_flush_table[] = { 3347static struct ctl_table ipv4_route_flush_table[] = {
3355 { 3348 {
3356 .procname = "flush", 3349 .procname = "flush",
@@ -3361,13 +3354,6 @@ static struct ctl_table ipv4_route_flush_table[] = {
3361 { }, 3354 { },
3362}; 3355};
3363 3356
3364static __net_initdata struct ctl_path ipv4_route_path[] = {
3365 { .procname = "net", },
3366 { .procname = "ipv4", },
3367 { .procname = "route", },
3368 { },
3369};
3370
3371static __net_init int sysctl_route_net_init(struct net *net) 3357static __net_init int sysctl_route_net_init(struct net *net)
3372{ 3358{
3373 struct ctl_table *tbl; 3359 struct ctl_table *tbl;
@@ -3380,8 +3366,7 @@ static __net_init int sysctl_route_net_init(struct net *net)
3380 } 3366 }
3381 tbl[0].extra1 = net; 3367 tbl[0].extra1 = net;
3382 3368
3383 net->ipv4.route_hdr = 3369 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
3384 register_net_sysctl_table(net, ipv4_route_path, tbl);
3385 if (net->ipv4.route_hdr == NULL) 3370 if (net->ipv4.route_hdr == NULL)
3386 goto err_reg; 3371 goto err_reg;
3387 return 0; 3372 return 0;
@@ -3505,6 +3490,6 @@ int __init ip_rt_init(void)
3505 */ 3490 */
3506void __init ip_static_sysctl_init(void) 3491void __init ip_static_sysctl_init(void)
3507{ 3492{
3508 register_sysctl_paths(ipv4_path, ipv4_skeleton); 3493 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
3509} 3494}
3510#endif 3495#endif
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 7a7724da9bff..ef32956ed655 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -27,6 +27,7 @@
27#include <net/tcp_memcontrol.h> 27#include <net/tcp_memcontrol.h>
28 28
29static int zero; 29static int zero;
30static int two = 2;
30static int tcp_retr1_max = 255; 31static int tcp_retr1_max = 255;
31static int ip_local_port_range_min[] = { 1, 1 }; 32static int ip_local_port_range_min[] = { 1, 1 };
32static int ip_local_port_range_max[] = { 65535, 65535 }; 33static int ip_local_port_range_max[] = { 65535, 65535 };
@@ -78,7 +79,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
78static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high) 79static void inet_get_ping_group_range_table(struct ctl_table *table, gid_t *low, gid_t *high)
79{ 80{
80 gid_t *data = table->data; 81 gid_t *data = table->data;
81 unsigned seq; 82 unsigned int seq;
82 do { 83 do {
83 seq = read_seqbegin(&sysctl_local_ports.lock); 84 seq = read_seqbegin(&sysctl_local_ports.lock);
84 85
@@ -677,6 +678,15 @@ static struct ctl_table ipv4_table[] = {
677 .proc_handler = proc_dointvec 678 .proc_handler = proc_dointvec
678 }, 679 },
679 { 680 {
681 .procname = "tcp_early_retrans",
682 .data = &sysctl_tcp_early_retrans,
683 .maxlen = sizeof(int),
684 .mode = 0644,
685 .proc_handler = proc_dointvec_minmax,
686 .extra1 = &zero,
687 .extra2 = &two,
688 },
689 {
680 .procname = "udp_mem", 690 .procname = "udp_mem",
681 .data = &sysctl_udp_mem, 691 .data = &sysctl_udp_mem,
682 .maxlen = sizeof(sysctl_udp_mem), 692 .maxlen = sizeof(sysctl_udp_mem),
@@ -768,13 +778,6 @@ static struct ctl_table ipv4_net_table[] = {
768 { } 778 { }
769}; 779};
770 780
771struct ctl_path net_ipv4_ctl_path[] = {
772 { .procname = "net", },
773 { .procname = "ipv4", },
774 { },
775};
776EXPORT_SYMBOL_GPL(net_ipv4_ctl_path);
777
778static __net_init int ipv4_sysctl_init_net(struct net *net) 781static __net_init int ipv4_sysctl_init_net(struct net *net)
779{ 782{
780 struct ctl_table *table; 783 struct ctl_table *table;
@@ -815,8 +818,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
815 818
816 tcp_init_mem(net); 819 tcp_init_mem(net);
817 820
818 net->ipv4.ipv4_hdr = register_net_sysctl_table(net, 821 net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
819 net_ipv4_ctl_path, table);
820 if (net->ipv4.ipv4_hdr == NULL) 822 if (net->ipv4.ipv4_hdr == NULL)
821 goto err_reg; 823 goto err_reg;
822 824
@@ -857,12 +859,12 @@ static __init int sysctl_ipv4_init(void)
857 if (!i->procname) 859 if (!i->procname)
858 return -EINVAL; 860 return -EINVAL;
859 861
860 hdr = register_sysctl_paths(net_ipv4_ctl_path, ipv4_table); 862 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
861 if (hdr == NULL) 863 if (hdr == NULL)
862 return -ENOMEM; 864 return -ENOMEM;
863 865
864 if (register_pernet_subsys(&ipv4_sysctl_ops)) { 866 if (register_pernet_subsys(&ipv4_sysctl_ops)) {
865 unregister_sysctl_table(hdr); 867 unregister_net_sysctl_table(hdr);
866 return -ENOMEM; 868 return -ENOMEM;
867 } 869 }
868 870
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1272a88c2a63..565406287f6f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -363,6 +363,71 @@ static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
363 return period; 363 return period;
364} 364}
365 365
366/* Address-family independent initialization for a tcp_sock.
367 *
368 * NOTE: A lot of things set to zero explicitly by call to
369 * sk_alloc() so need not be done here.
370 */
371void tcp_init_sock(struct sock *sk)
372{
373 struct inet_connection_sock *icsk = inet_csk(sk);
374 struct tcp_sock *tp = tcp_sk(sk);
375
376 skb_queue_head_init(&tp->out_of_order_queue);
377 tcp_init_xmit_timers(sk);
378 tcp_prequeue_init(tp);
379
380 icsk->icsk_rto = TCP_TIMEOUT_INIT;
381 tp->mdev = TCP_TIMEOUT_INIT;
382
383 /* So many TCP implementations out there (incorrectly) count the
384 * initial SYN frame in their delayed-ACK and congestion control
385 * algorithms that we must have the following bandaid to talk
386 * efficiently to them. -DaveM
387 */
388 tp->snd_cwnd = TCP_INIT_CWND;
389
390 /* See draft-stevens-tcpca-spec-01 for discussion of the
391 * initialization of these values.
392 */
393 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
394 tp->snd_cwnd_clamp = ~0;
395 tp->mss_cache = TCP_MSS_DEFAULT;
396
397 tp->reordering = sysctl_tcp_reordering;
398 tcp_enable_early_retrans(tp);
399 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
400
401 sk->sk_state = TCP_CLOSE;
402
403 sk->sk_write_space = sk_stream_write_space;
404 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
405
406 icsk->icsk_sync_mss = tcp_sync_mss;
407
408 /* TCP Cookie Transactions */
409 if (sysctl_tcp_cookie_size > 0) {
410 /* Default, cookies without s_data_payload. */
411 tp->cookie_values =
412 kzalloc(sizeof(*tp->cookie_values),
413 sk->sk_allocation);
414 if (tp->cookie_values != NULL)
415 kref_init(&tp->cookie_values->kref);
416 }
417 /* Presumed zeroed, in order of appearance:
418 * cookie_in_always, cookie_out_never,
419 * s_data_constant, s_data_in, s_data_out
420 */
421 sk->sk_sndbuf = sysctl_tcp_wmem[1];
422 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
423
424 local_bh_disable();
425 sock_update_memcg(sk);
426 sk_sockets_allocated_inc(sk);
427 local_bh_enable();
428}
429EXPORT_SYMBOL(tcp_init_sock);
430
366/* 431/*
367 * Wait for a TCP event. 432 * Wait for a TCP event.
368 * 433 *
@@ -784,9 +849,10 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
784 while (psize > 0) { 849 while (psize > 0) {
785 struct sk_buff *skb = tcp_write_queue_tail(sk); 850 struct sk_buff *skb = tcp_write_queue_tail(sk);
786 struct page *page = pages[poffset / PAGE_SIZE]; 851 struct page *page = pages[poffset / PAGE_SIZE];
787 int copy, i, can_coalesce; 852 int copy, i;
788 int offset = poffset % PAGE_SIZE; 853 int offset = poffset % PAGE_SIZE;
789 int size = min_t(size_t, psize, PAGE_SIZE - offset); 854 int size = min_t(size_t, psize, PAGE_SIZE - offset);
855 bool can_coalesce;
790 856
791 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) { 857 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
792new_segment: 858new_segment:
@@ -912,6 +978,39 @@ static inline int select_size(const struct sock *sk, bool sg)
912 return tmp; 978 return tmp;
913} 979}
914 980
981static int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
982{
983 struct sk_buff *skb;
984 struct tcphdr *th;
985 bool fragstolen;
986
987 skb = alloc_skb(size + sizeof(*th), sk->sk_allocation);
988 if (!skb)
989 goto err;
990
991 th = (struct tcphdr *)skb_put(skb, sizeof(*th));
992 skb_reset_transport_header(skb);
993 memset(th, 0, sizeof(*th));
994
995 if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size))
996 goto err_free;
997
998 TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
999 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size;
1000 TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1;
1001
1002 if (tcp_queue_rcv(sk, skb, sizeof(*th), &fragstolen)) {
1003 WARN_ON_ONCE(fragstolen); /* should not happen */
1004 __kfree_skb(skb);
1005 }
1006 return size;
1007
1008err_free:
1009 kfree_skb(skb);
1010err:
1011 return -ENOMEM;
1012}
1013
915int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 1014int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
916 size_t size) 1015 size_t size)
917{ 1016{
@@ -919,7 +1018,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
919 struct tcp_sock *tp = tcp_sk(sk); 1018 struct tcp_sock *tp = tcp_sk(sk);
920 struct sk_buff *skb; 1019 struct sk_buff *skb;
921 int iovlen, flags, err, copied; 1020 int iovlen, flags, err, copied;
922 int mss_now, size_goal; 1021 int mss_now = 0, size_goal;
923 bool sg; 1022 bool sg;
924 long timeo; 1023 long timeo;
925 1024
@@ -933,6 +1032,19 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
933 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) 1032 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
934 goto out_err; 1033 goto out_err;
935 1034
1035 if (unlikely(tp->repair)) {
1036 if (tp->repair_queue == TCP_RECV_QUEUE) {
1037 copied = tcp_send_rcvq(sk, msg, size);
1038 goto out;
1039 }
1040
1041 err = -EINVAL;
1042 if (tp->repair_queue == TCP_NO_QUEUE)
1043 goto out_err;
1044
1045 /* 'common' sending to sendq */
1046 }
1047
936 /* This should be in poll */ 1048 /* This should be in poll */
937 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 1049 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
938 1050
@@ -1089,7 +1201,7 @@ new_segment:
1089 if ((seglen -= copy) == 0 && iovlen == 0) 1201 if ((seglen -= copy) == 0 && iovlen == 0)
1090 goto out; 1202 goto out;
1091 1203
1092 if (skb->len < max || (flags & MSG_OOB)) 1204 if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair))
1093 continue; 1205 continue;
1094 1206
1095 if (forced_push(tp)) { 1207 if (forced_push(tp)) {
@@ -1102,7 +1214,7 @@ new_segment:
1102wait_for_sndbuf: 1214wait_for_sndbuf:
1103 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1215 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1104wait_for_memory: 1216wait_for_memory:
1105 if (copied) 1217 if (copied && likely(!tp->repair))
1106 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); 1218 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
1107 1219
1108 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) 1220 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
@@ -1113,7 +1225,7 @@ wait_for_memory:
1113 } 1225 }
1114 1226
1115out: 1227out:
1116 if (copied) 1228 if (copied && likely(!tp->repair))
1117 tcp_push(sk, flags, mss_now, tp->nonagle); 1229 tcp_push(sk, flags, mss_now, tp->nonagle);
1118 release_sock(sk); 1230 release_sock(sk);
1119 return copied; 1231 return copied;
@@ -1187,6 +1299,24 @@ static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
1187 return -EAGAIN; 1299 return -EAGAIN;
1188} 1300}
1189 1301
1302static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
1303{
1304 struct sk_buff *skb;
1305 int copied = 0, err = 0;
1306
1307 /* XXX -- need to support SO_PEEK_OFF */
1308
1309 skb_queue_walk(&sk->sk_write_queue, skb) {
1310 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, skb->len);
1311 if (err)
1312 break;
1313
1314 copied += skb->len;
1315 }
1316
1317 return err ?: copied;
1318}
1319
1190/* Clean up the receive buffer for full frames taken by the user, 1320/* Clean up the receive buffer for full frames taken by the user,
1191 * then send an ACK if necessary. COPIED is the number of bytes 1321 * then send an ACK if necessary. COPIED is the number of bytes
1192 * tcp_recvmsg has given to the user so far, it speeds up the 1322 * tcp_recvmsg has given to the user so far, it speeds up the
@@ -1432,6 +1562,21 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1432 if (flags & MSG_OOB) 1562 if (flags & MSG_OOB)
1433 goto recv_urg; 1563 goto recv_urg;
1434 1564
1565 if (unlikely(tp->repair)) {
1566 err = -EPERM;
1567 if (!(flags & MSG_PEEK))
1568 goto out;
1569
1570 if (tp->repair_queue == TCP_SEND_QUEUE)
1571 goto recv_sndq;
1572
1573 err = -EINVAL;
1574 if (tp->repair_queue == TCP_NO_QUEUE)
1575 goto out;
1576
1577 /* 'common' recv queue MSG_PEEK-ing */
1578 }
1579
1435 seq = &tp->copied_seq; 1580 seq = &tp->copied_seq;
1436 if (flags & MSG_PEEK) { 1581 if (flags & MSG_PEEK) {
1437 peek_seq = tp->copied_seq; 1582 peek_seq = tp->copied_seq;
@@ -1783,6 +1928,10 @@ out:
1783recv_urg: 1928recv_urg:
1784 err = tcp_recv_urg(sk, msg, len, flags); 1929 err = tcp_recv_urg(sk, msg, len, flags);
1785 goto out; 1930 goto out;
1931
1932recv_sndq:
1933 err = tcp_peek_sndq(sk, msg, len);
1934 goto out;
1786} 1935}
1787EXPORT_SYMBOL(tcp_recvmsg); 1936EXPORT_SYMBOL(tcp_recvmsg);
1788 1937
@@ -1935,7 +2084,9 @@ void tcp_close(struct sock *sk, long timeout)
1935 * advertise a zero window, then kill -9 the FTP client, wheee... 2084 * advertise a zero window, then kill -9 the FTP client, wheee...
1936 * Note: timeout is always zero in such a case. 2085 * Note: timeout is always zero in such a case.
1937 */ 2086 */
1938 if (data_was_unread) { 2087 if (unlikely(tcp_sk(sk)->repair)) {
2088 sk->sk_prot->disconnect(sk, 0);
2089 } else if (data_was_unread) {
1939 /* Unread data was tossed, zap the connection. */ 2090 /* Unread data was tossed, zap the connection. */
1940 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); 2091 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
1941 tcp_set_state(sk, TCP_CLOSE); 2092 tcp_set_state(sk, TCP_CLOSE);
@@ -2074,6 +2225,8 @@ int tcp_disconnect(struct sock *sk, int flags)
2074 /* ABORT function of RFC793 */ 2225 /* ABORT function of RFC793 */
2075 if (old_state == TCP_LISTEN) { 2226 if (old_state == TCP_LISTEN) {
2076 inet_csk_listen_stop(sk); 2227 inet_csk_listen_stop(sk);
2228 } else if (unlikely(tp->repair)) {
2229 sk->sk_err = ECONNABORTED;
2077 } else if (tcp_need_reset(old_state) || 2230 } else if (tcp_need_reset(old_state) ||
2078 (tp->snd_nxt != tp->write_seq && 2231 (tp->snd_nxt != tp->write_seq &&
2079 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { 2232 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
@@ -2125,6 +2278,54 @@ int tcp_disconnect(struct sock *sk, int flags)
2125} 2278}
2126EXPORT_SYMBOL(tcp_disconnect); 2279EXPORT_SYMBOL(tcp_disconnect);
2127 2280
2281static inline int tcp_can_repair_sock(struct sock *sk)
2282{
2283 return capable(CAP_NET_ADMIN) &&
2284 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED));
2285}
2286
2287static int tcp_repair_options_est(struct tcp_sock *tp,
2288 struct tcp_repair_opt __user *optbuf, unsigned int len)
2289{
2290 struct tcp_repair_opt opt;
2291
2292 while (len >= sizeof(opt)) {
2293 if (copy_from_user(&opt, optbuf, sizeof(opt)))
2294 return -EFAULT;
2295
2296 optbuf++;
2297 len -= sizeof(opt);
2298
2299 switch (opt.opt_code) {
2300 case TCPOPT_MSS:
2301 tp->rx_opt.mss_clamp = opt.opt_val;
2302 break;
2303 case TCPOPT_WINDOW:
2304 if (opt.opt_val > 14)
2305 return -EFBIG;
2306
2307 tp->rx_opt.snd_wscale = opt.opt_val;
2308 break;
2309 case TCPOPT_SACK_PERM:
2310 if (opt.opt_val != 0)
2311 return -EINVAL;
2312
2313 tp->rx_opt.sack_ok |= TCP_SACK_SEEN;
2314 if (sysctl_tcp_fack)
2315 tcp_enable_fack(tp);
2316 break;
2317 case TCPOPT_TIMESTAMP:
2318 if (opt.opt_val != 0)
2319 return -EINVAL;
2320
2321 tp->rx_opt.tstamp_ok = 1;
2322 break;
2323 }
2324 }
2325
2326 return 0;
2327}
2328
2128/* 2329/*
2129 * Socket option code for TCP. 2330 * Socket option code for TCP.
2130 */ 2331 */
@@ -2295,6 +2496,55 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2295 err = -EINVAL; 2496 err = -EINVAL;
2296 else 2497 else
2297 tp->thin_dupack = val; 2498 tp->thin_dupack = val;
2499 if (tp->thin_dupack)
2500 tcp_disable_early_retrans(tp);
2501 break;
2502
2503 case TCP_REPAIR:
2504 if (!tcp_can_repair_sock(sk))
2505 err = -EPERM;
2506 else if (val == 1) {
2507 tp->repair = 1;
2508 sk->sk_reuse = SK_FORCE_REUSE;
2509 tp->repair_queue = TCP_NO_QUEUE;
2510 } else if (val == 0) {
2511 tp->repair = 0;
2512 sk->sk_reuse = SK_NO_REUSE;
2513 tcp_send_window_probe(sk);
2514 } else
2515 err = -EINVAL;
2516
2517 break;
2518
2519 case TCP_REPAIR_QUEUE:
2520 if (!tp->repair)
2521 err = -EPERM;
2522 else if (val < TCP_QUEUES_NR)
2523 tp->repair_queue = val;
2524 else
2525 err = -EINVAL;
2526 break;
2527
2528 case TCP_QUEUE_SEQ:
2529 if (sk->sk_state != TCP_CLOSE)
2530 err = -EPERM;
2531 else if (tp->repair_queue == TCP_SEND_QUEUE)
2532 tp->write_seq = val;
2533 else if (tp->repair_queue == TCP_RECV_QUEUE)
2534 tp->rcv_nxt = val;
2535 else
2536 err = -EINVAL;
2537 break;
2538
2539 case TCP_REPAIR_OPTIONS:
2540 if (!tp->repair)
2541 err = -EINVAL;
2542 else if (sk->sk_state == TCP_ESTABLISHED)
2543 err = tcp_repair_options_est(tp,
2544 (struct tcp_repair_opt __user *)optval,
2545 optlen);
2546 else
2547 err = -EPERM;
2298 break; 2548 break;
2299 2549
2300 case TCP_CORK: 2550 case TCP_CORK:
@@ -2530,6 +2780,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
2530 val = tp->mss_cache; 2780 val = tp->mss_cache;
2531 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) 2781 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2532 val = tp->rx_opt.user_mss; 2782 val = tp->rx_opt.user_mss;
2783 if (tp->repair)
2784 val = tp->rx_opt.mss_clamp;
2533 break; 2785 break;
2534 case TCP_NODELAY: 2786 case TCP_NODELAY:
2535 val = !!(tp->nonagle&TCP_NAGLE_OFF); 2787 val = !!(tp->nonagle&TCP_NAGLE_OFF);
@@ -2632,6 +2884,26 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
2632 val = tp->thin_dupack; 2884 val = tp->thin_dupack;
2633 break; 2885 break;
2634 2886
2887 case TCP_REPAIR:
2888 val = tp->repair;
2889 break;
2890
2891 case TCP_REPAIR_QUEUE:
2892 if (tp->repair)
2893 val = tp->repair_queue;
2894 else
2895 return -EINVAL;
2896 break;
2897
2898 case TCP_QUEUE_SEQ:
2899 if (tp->repair_queue == TCP_SEND_QUEUE)
2900 val = tp->write_seq;
2901 else if (tp->repair_queue == TCP_RECV_QUEUE)
2902 val = tp->rcv_nxt;
2903 else
2904 return -EINVAL;
2905 break;
2906
2635 case TCP_USER_TIMEOUT: 2907 case TCP_USER_TIMEOUT:
2636 val = jiffies_to_msecs(icsk->icsk_user_timeout); 2908 val = jiffies_to_msecs(icsk->icsk_user_timeout);
2637 break; 2909 break;
@@ -2675,7 +2947,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
2675{ 2947{
2676 struct sk_buff *segs = ERR_PTR(-EINVAL); 2948 struct sk_buff *segs = ERR_PTR(-EINVAL);
2677 struct tcphdr *th; 2949 struct tcphdr *th;
2678 unsigned thlen; 2950 unsigned int thlen;
2679 unsigned int seq; 2951 unsigned int seq;
2680 __be32 delta; 2952 __be32 delta;
2681 unsigned int oldlen; 2953 unsigned int oldlen;
@@ -3033,9 +3305,9 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
3033 struct scatterlist sg; 3305 struct scatterlist sg;
3034 const struct tcphdr *tp = tcp_hdr(skb); 3306 const struct tcphdr *tp = tcp_hdr(skb);
3035 struct hash_desc *desc = &hp->md5_desc; 3307 struct hash_desc *desc = &hp->md5_desc;
3036 unsigned i; 3308 unsigned int i;
3037 const unsigned head_data_len = skb_headlen(skb) > header_len ? 3309 const unsigned int head_data_len = skb_headlen(skb) > header_len ?
3038 skb_headlen(skb) - header_len : 0; 3310 skb_headlen(skb) - header_len : 0;
3039 const struct skb_shared_info *shi = skb_shinfo(skb); 3311 const struct skb_shared_info *shi = skb_shinfo(skb);
3040 struct sk_buff *frag_iter; 3312 struct sk_buff *frag_iter;
3041 3313
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 257b61789eeb..eb58b94301ec 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -99,6 +99,7 @@ int sysctl_tcp_thin_dupack __read_mostly;
99 99
100int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; 100int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
101int sysctl_tcp_abc __read_mostly; 101int sysctl_tcp_abc __read_mostly;
102int sysctl_tcp_early_retrans __read_mostly = 2;
102 103
103#define FLAG_DATA 0x01 /* Incoming frame contained data. */ 104#define FLAG_DATA 0x01 /* Incoming frame contained data. */
104#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ 105#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */
@@ -175,7 +176,7 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
175static void tcp_incr_quickack(struct sock *sk) 176static void tcp_incr_quickack(struct sock *sk)
176{ 177{
177 struct inet_connection_sock *icsk = inet_csk(sk); 178 struct inet_connection_sock *icsk = inet_csk(sk);
178 unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); 179 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
179 180
180 if (quickacks == 0) 181 if (quickacks == 0)
181 quickacks = 2; 182 quickacks = 2;
@@ -906,6 +907,7 @@ static void tcp_init_metrics(struct sock *sk)
906 if (dst_metric(dst, RTAX_REORDERING) && 907 if (dst_metric(dst, RTAX_REORDERING) &&
907 tp->reordering != dst_metric(dst, RTAX_REORDERING)) { 908 tp->reordering != dst_metric(dst, RTAX_REORDERING)) {
908 tcp_disable_fack(tp); 909 tcp_disable_fack(tp);
910 tcp_disable_early_retrans(tp);
909 tp->reordering = dst_metric(dst, RTAX_REORDERING); 911 tp->reordering = dst_metric(dst, RTAX_REORDERING);
910 } 912 }
911 913
@@ -937,7 +939,7 @@ static void tcp_init_metrics(struct sock *sk)
937 tcp_set_rto(sk); 939 tcp_set_rto(sk);
938reset: 940reset:
939 if (tp->srtt == 0) { 941 if (tp->srtt == 0) {
940 /* RFC2988bis: We've failed to get a valid RTT sample from 942 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
941 * 3WHS. This is most likely due to retransmission, 943 * 3WHS. This is most likely due to retransmission,
942 * including spurious one. Reset the RTO back to 3secs 944 * including spurious one. Reset the RTO back to 3secs
943 * from the more aggressive 1sec to avoid more spurious 945 * from the more aggressive 1sec to avoid more spurious
@@ -947,7 +949,7 @@ reset:
947 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK; 949 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
948 } 950 }
949 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been 951 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
950 * retransmitted. In light of RFC2988bis' more aggressive 1sec 952 * retransmitted. In light of RFC6298 more aggressive 1sec
951 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK 953 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
952 * retransmission has occurred. 954 * retransmission has occurred.
953 */ 955 */
@@ -988,6 +990,9 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
988#endif 990#endif
989 tcp_disable_fack(tp); 991 tcp_disable_fack(tp);
990 } 992 }
993
994 if (metric > 0)
995 tcp_disable_early_retrans(tp);
991} 996}
992 997
993/* This must be called before lost_out is incremented */ 998/* This must be called before lost_out is incremented */
@@ -2339,6 +2344,27 @@ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
2339 return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; 2344 return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1;
2340} 2345}
2341 2346
2347static bool tcp_pause_early_retransmit(struct sock *sk, int flag)
2348{
2349 struct tcp_sock *tp = tcp_sk(sk);
2350 unsigned long delay;
2351
2352 /* Delay early retransmit and entering fast recovery for
2353 * max(RTT/4, 2msec) unless ack has ECE mark, no RTT samples
2354 * available, or RTO is scheduled to fire first.
2355 */
2356 if (sysctl_tcp_early_retrans < 2 || (flag & FLAG_ECE) || !tp->srtt)
2357 return false;
2358
2359 delay = max_t(unsigned long, (tp->srtt >> 5), msecs_to_jiffies(2));
2360 if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay)))
2361 return false;
2362
2363 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, delay, TCP_RTO_MAX);
2364 tp->early_retrans_delayed = 1;
2365 return true;
2366}
2367
2342static inline int tcp_skb_timedout(const struct sock *sk, 2368static inline int tcp_skb_timedout(const struct sock *sk,
2343 const struct sk_buff *skb) 2369 const struct sk_buff *skb)
2344{ 2370{
@@ -2446,7 +2472,7 @@ static inline int tcp_head_timedout(const struct sock *sk)
2446 * Main question: may we further continue forward transmission 2472 * Main question: may we further continue forward transmission
2447 * with the same cwnd? 2473 * with the same cwnd?
2448 */ 2474 */
2449static int tcp_time_to_recover(struct sock *sk) 2475static int tcp_time_to_recover(struct sock *sk, int flag)
2450{ 2476{
2451 struct tcp_sock *tp = tcp_sk(sk); 2477 struct tcp_sock *tp = tcp_sk(sk);
2452 __u32 packets_out; 2478 __u32 packets_out;
@@ -2492,6 +2518,16 @@ static int tcp_time_to_recover(struct sock *sk)
2492 tcp_is_sack(tp) && !tcp_send_head(sk)) 2518 tcp_is_sack(tp) && !tcp_send_head(sk))
2493 return 1; 2519 return 1;
2494 2520
2521 /* Trick#6: TCP early retransmit, per RFC5827. To avoid spurious
2522 * retransmissions due to small network reorderings, we implement
2523 * Mitigation A.3 in the RFC and delay the retransmission for a short
2524 * interval if appropriate.
2525 */
2526 if (tp->do_early_retrans && !tp->retrans_out && tp->sacked_out &&
2527 (tp->packets_out == (tp->sacked_out + 1) && tp->packets_out < 4) &&
2528 !tcp_may_send_now(sk))
2529 return !tcp_pause_early_retransmit(sk, flag);
2530
2495 return 0; 2531 return 0;
2496} 2532}
2497 2533
@@ -3025,6 +3061,38 @@ static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked,
3025 tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; 3061 tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
3026} 3062}
3027 3063
3064static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
3065{
3066 struct tcp_sock *tp = tcp_sk(sk);
3067 int mib_idx;
3068
3069 if (tcp_is_reno(tp))
3070 mib_idx = LINUX_MIB_TCPRENORECOVERY;
3071 else
3072 mib_idx = LINUX_MIB_TCPSACKRECOVERY;
3073
3074 NET_INC_STATS_BH(sock_net(sk), mib_idx);
3075
3076 tp->high_seq = tp->snd_nxt;
3077 tp->prior_ssthresh = 0;
3078 tp->undo_marker = tp->snd_una;
3079 tp->undo_retrans = tp->retrans_out;
3080
3081 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
3082 if (!ece_ack)
3083 tp->prior_ssthresh = tcp_current_ssthresh(sk);
3084 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
3085 TCP_ECN_queue_cwr(tp);
3086 }
3087
3088 tp->bytes_acked = 0;
3089 tp->snd_cwnd_cnt = 0;
3090 tp->prior_cwnd = tp->snd_cwnd;
3091 tp->prr_delivered = 0;
3092 tp->prr_out = 0;
3093 tcp_set_ca_state(sk, TCP_CA_Recovery);
3094}
3095
3028/* Process an event, which can update packets-in-flight not trivially. 3096/* Process an event, which can update packets-in-flight not trivially.
3029 * Main goal of this function is to calculate new estimate for left_out, 3097 * Main goal of this function is to calculate new estimate for left_out,
3030 * taking into account both packets sitting in receiver's buffer and 3098 * taking into account both packets sitting in receiver's buffer and
@@ -3044,7 +3112,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
3044 struct tcp_sock *tp = tcp_sk(sk); 3112 struct tcp_sock *tp = tcp_sk(sk);
3045 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && 3113 int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
3046 (tcp_fackets_out(tp) > tp->reordering)); 3114 (tcp_fackets_out(tp) > tp->reordering));
3047 int fast_rexmit = 0, mib_idx; 3115 int fast_rexmit = 0;
3048 3116
3049 if (WARN_ON(!tp->packets_out && tp->sacked_out)) 3117 if (WARN_ON(!tp->packets_out && tp->sacked_out))
3050 tp->sacked_out = 0; 3118 tp->sacked_out = 0;
@@ -3128,7 +3196,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
3128 if (icsk->icsk_ca_state <= TCP_CA_Disorder) 3196 if (icsk->icsk_ca_state <= TCP_CA_Disorder)
3129 tcp_try_undo_dsack(sk); 3197 tcp_try_undo_dsack(sk);
3130 3198
3131 if (!tcp_time_to_recover(sk)) { 3199 if (!tcp_time_to_recover(sk, flag)) {
3132 tcp_try_to_open(sk, flag); 3200 tcp_try_to_open(sk, flag);
3133 return; 3201 return;
3134 } 3202 }
@@ -3145,32 +3213,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
3145 } 3213 }
3146 3214
3147 /* Otherwise enter Recovery state */ 3215 /* Otherwise enter Recovery state */
3148 3216 tcp_enter_recovery(sk, (flag & FLAG_ECE));
3149 if (tcp_is_reno(tp))
3150 mib_idx = LINUX_MIB_TCPRENORECOVERY;
3151 else
3152 mib_idx = LINUX_MIB_TCPSACKRECOVERY;
3153
3154 NET_INC_STATS_BH(sock_net(sk), mib_idx);
3155
3156 tp->high_seq = tp->snd_nxt;
3157 tp->prior_ssthresh = 0;
3158 tp->undo_marker = tp->snd_una;
3159 tp->undo_retrans = tp->retrans_out;
3160
3161 if (icsk->icsk_ca_state < TCP_CA_CWR) {
3162 if (!(flag & FLAG_ECE))
3163 tp->prior_ssthresh = tcp_current_ssthresh(sk);
3164 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
3165 TCP_ECN_queue_cwr(tp);
3166 }
3167
3168 tp->bytes_acked = 0;
3169 tp->snd_cwnd_cnt = 0;
3170 tp->prior_cwnd = tp->snd_cwnd;
3171 tp->prr_delivered = 0;
3172 tp->prr_out = 0;
3173 tcp_set_ca_state(sk, TCP_CA_Recovery);
3174 fast_rexmit = 1; 3217 fast_rexmit = 1;
3175 } 3218 }
3176 3219
@@ -3252,16 +3295,47 @@ static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
3252/* Restart timer after forward progress on connection. 3295/* Restart timer after forward progress on connection.
3253 * RFC2988 recommends to restart timer to now+rto. 3296 * RFC2988 recommends to restart timer to now+rto.
3254 */ 3297 */
3255static void tcp_rearm_rto(struct sock *sk) 3298void tcp_rearm_rto(struct sock *sk)
3256{ 3299{
3257 const struct tcp_sock *tp = tcp_sk(sk); 3300 struct tcp_sock *tp = tcp_sk(sk);
3258 3301
3259 if (!tp->packets_out) { 3302 if (!tp->packets_out) {
3260 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); 3303 inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
3261 } else { 3304 } else {
3262 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 3305 u32 rto = inet_csk(sk)->icsk_rto;
3263 inet_csk(sk)->icsk_rto, TCP_RTO_MAX); 3306 /* Offset the time elapsed after installing regular RTO */
3307 if (tp->early_retrans_delayed) {
3308 struct sk_buff *skb = tcp_write_queue_head(sk);
3309 const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto;
3310 s32 delta = (s32)(rto_time_stamp - tcp_time_stamp);
3311 /* delta may not be positive if the socket is locked
3312 * when the delayed ER timer fires and is rescheduled.
3313 */
3314 if (delta > 0)
3315 rto = delta;
3316 }
3317 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
3318 TCP_RTO_MAX);
3264 } 3319 }
3320 tp->early_retrans_delayed = 0;
3321}
3322
3323/* This function is called when the delayed ER timer fires. TCP enters
3324 * fast recovery and performs fast-retransmit.
3325 */
3326void tcp_resume_early_retransmit(struct sock *sk)
3327{
3328 struct tcp_sock *tp = tcp_sk(sk);
3329
3330 tcp_rearm_rto(sk);
3331
3332 /* Stop if ER is disabled after the delayed ER timer is scheduled */
3333 if (!tp->do_early_retrans)
3334 return;
3335
3336 tcp_enter_recovery(sk, false);
3337 tcp_update_scoreboard(sk, 1);
3338 tcp_xmit_retransmit_queue(sk);
3265} 3339}
3266 3340
3267/* If we get here, the whole TSO packet has not been acked. */ 3341/* If we get here, the whole TSO packet has not been acked. */
@@ -3710,6 +3784,9 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3710 if (after(ack, tp->snd_nxt)) 3784 if (after(ack, tp->snd_nxt))
3711 goto invalid_ack; 3785 goto invalid_ack;
3712 3786
3787 if (tp->early_retrans_delayed)
3788 tcp_rearm_rto(sk);
3789
3713 if (after(ack, prior_snd_una)) 3790 if (after(ack, prior_snd_una))
3714 flag |= FLAG_SND_UNA_ADVANCED; 3791 flag |= FLAG_SND_UNA_ADVANCED;
3715 3792
@@ -4453,6 +4530,102 @@ static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
4453 return 0; 4530 return 0;
4454} 4531}
4455 4532
4533/**
4534 * tcp_try_coalesce - try to merge skb to prior one
4535 * @sk: socket
4536 * @to: prior buffer
4537 * @from: buffer to add in queue
4538 * @fragstolen: pointer to boolean
4539 *
4540 * Before queueing skb @from after @to, try to merge them
4541 * to reduce overall memory use and queue lengths, if cost is small.
4542 * Packets in ofo or receive queues can stay a long time.
4543 * Better try to coalesce them right now to avoid future collapses.
4544 * Returns true if caller should free @from instead of queueing it
4545 */
4546static bool tcp_try_coalesce(struct sock *sk,
4547 struct sk_buff *to,
4548 struct sk_buff *from,
4549 bool *fragstolen)
4550{
4551 int i, delta, len = from->len;
4552
4553 *fragstolen = false;
4554
4555 if (tcp_hdr(from)->fin || skb_cloned(to))
4556 return false;
4557
4558 if (len <= skb_tailroom(to)) {
4559 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
4560 goto merge;
4561 }
4562
4563 if (skb_has_frag_list(to) || skb_has_frag_list(from))
4564 return false;
4565
4566 if (skb_headlen(from) != 0) {
4567 struct page *page;
4568 unsigned int offset;
4569
4570 if (skb_shinfo(to)->nr_frags +
4571 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
4572 return false;
4573
4574 if (skb_head_is_locked(from))
4575 return false;
4576
4577 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
4578
4579 page = virt_to_head_page(from->head);
4580 offset = from->data - (unsigned char *)page_address(page);
4581
4582 skb_fill_page_desc(to, skb_shinfo(to)->nr_frags,
4583 page, offset, skb_headlen(from));
4584 *fragstolen = true;
4585 } else {
4586 if (skb_shinfo(to)->nr_frags +
4587 skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
4588 return false;
4589
4590 delta = from->truesize -
4591 SKB_TRUESIZE(skb_end_pointer(from) - from->head);
4592 }
4593
4594 WARN_ON_ONCE(delta < len);
4595
4596 memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
4597 skb_shinfo(from)->frags,
4598 skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
4599 skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
4600
4601 if (!skb_cloned(from))
4602 skb_shinfo(from)->nr_frags = 0;
4603
4604 /* if the skb is cloned this does nothing since we set nr_frags to 0 */
4605 for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
4606 skb_frag_ref(from, i);
4607
4608 to->truesize += delta;
4609 atomic_add(delta, &sk->sk_rmem_alloc);
4610 sk_mem_charge(sk, delta);
4611 to->len += len;
4612 to->data_len += len;
4613
4614merge:
4615 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
4616 TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
4617 TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
4618 return true;
4619}
4620
4621static void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
4622{
4623 if (head_stolen)
4624 kmem_cache_free(skbuff_head_cache, skb);
4625 else
4626 __kfree_skb(skb);
4627}
4628
4456static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) 4629static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4457{ 4630{
4458 struct tcp_sock *tp = tcp_sk(sk); 4631 struct tcp_sock *tp = tcp_sk(sk);
@@ -4491,23 +4664,13 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
4491 end_seq = TCP_SKB_CB(skb)->end_seq; 4664 end_seq = TCP_SKB_CB(skb)->end_seq;
4492 4665
4493 if (seq == TCP_SKB_CB(skb1)->end_seq) { 4666 if (seq == TCP_SKB_CB(skb1)->end_seq) {
4494 /* Packets in ofo can stay in queue a long time. 4667 bool fragstolen;
4495 * Better try to coalesce them right now 4668
4496 * to avoid future tcp_collapse_ofo_queue(), 4669 if (!tcp_try_coalesce(sk, skb1, skb, &fragstolen)) {
4497 * probably the most expensive function in tcp stack.
4498 */
4499 if (skb->len <= skb_tailroom(skb1) && !tcp_hdr(skb)->fin) {
4500 NET_INC_STATS_BH(sock_net(sk),
4501 LINUX_MIB_TCPRCVCOALESCE);
4502 BUG_ON(skb_copy_bits(skb, 0,
4503 skb_put(skb1, skb->len),
4504 skb->len));
4505 TCP_SKB_CB(skb1)->end_seq = end_seq;
4506 TCP_SKB_CB(skb1)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
4507 __kfree_skb(skb);
4508 skb = NULL;
4509 } else {
4510 __skb_queue_after(&tp->out_of_order_queue, skb1, skb); 4670 __skb_queue_after(&tp->out_of_order_queue, skb1, skb);
4671 } else {
4672 kfree_skb_partial(skb, fragstolen);
4673 skb = NULL;
4511 } 4674 }
4512 4675
4513 if (!tp->rx_opt.num_sacks || 4676 if (!tp->rx_opt.num_sacks ||
@@ -4583,12 +4746,29 @@ end:
4583 skb_set_owner_r(skb, sk); 4746 skb_set_owner_r(skb, sk);
4584} 4747}
4585 4748
4749int tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int hdrlen,
4750 bool *fragstolen)
4751{
4752 int eaten;
4753 struct sk_buff *tail = skb_peek_tail(&sk->sk_receive_queue);
4754
4755 __skb_pull(skb, hdrlen);
4756 eaten = (tail &&
4757 tcp_try_coalesce(sk, tail, skb, fragstolen)) ? 1 : 0;
4758 tcp_sk(sk)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
4759 if (!eaten) {
4760 __skb_queue_tail(&sk->sk_receive_queue, skb);
4761 skb_set_owner_r(skb, sk);
4762 }
4763 return eaten;
4764}
4586 4765
4587static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) 4766static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
4588{ 4767{
4589 const struct tcphdr *th = tcp_hdr(skb); 4768 const struct tcphdr *th = tcp_hdr(skb);
4590 struct tcp_sock *tp = tcp_sk(sk); 4769 struct tcp_sock *tp = tcp_sk(sk);
4591 int eaten = -1; 4770 int eaten = -1;
4771 bool fragstolen = false;
4592 4772
4593 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) 4773 if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
4594 goto drop; 4774 goto drop;
@@ -4633,8 +4813,7 @@ queue_and_out:
4633 tcp_try_rmem_schedule(sk, skb->truesize)) 4813 tcp_try_rmem_schedule(sk, skb->truesize))
4634 goto drop; 4814 goto drop;
4635 4815
4636 skb_set_owner_r(skb, sk); 4816 eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
4637 __skb_queue_tail(&sk->sk_receive_queue, skb);
4638 } 4817 }
4639 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 4818 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
4640 if (skb->len) 4819 if (skb->len)
@@ -4658,7 +4837,7 @@ queue_and_out:
4658 tcp_fast_path_check(sk); 4837 tcp_fast_path_check(sk);
4659 4838
4660 if (eaten > 0) 4839 if (eaten > 0)
4661 __kfree_skb(skb); 4840 kfree_skb_partial(skb, fragstolen);
4662 else if (!sock_flag(sk, SOCK_DEAD)) 4841 else if (!sock_flag(sk, SOCK_DEAD))
4663 sk->sk_data_ready(sk, 0); 4842 sk->sk_data_ready(sk, 0);
4664 return; 4843 return;
@@ -5437,6 +5616,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5437 } else { 5616 } else {
5438 int eaten = 0; 5617 int eaten = 0;
5439 int copied_early = 0; 5618 int copied_early = 0;
5619 bool fragstolen = false;
5440 5620
5441 if (tp->copied_seq == tp->rcv_nxt && 5621 if (tp->copied_seq == tp->rcv_nxt &&
5442 len - tcp_header_len <= tp->ucopy.len) { 5622 len - tcp_header_len <= tp->ucopy.len) {
@@ -5494,10 +5674,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5494 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); 5674 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
5495 5675
5496 /* Bulk data transfer: receiver */ 5676 /* Bulk data transfer: receiver */
5497 __skb_pull(skb, tcp_header_len); 5677 eaten = tcp_queue_rcv(sk, skb, tcp_header_len,
5498 __skb_queue_tail(&sk->sk_receive_queue, skb); 5678 &fragstolen);
5499 skb_set_owner_r(skb, sk);
5500 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
5501 } 5679 }
5502 5680
5503 tcp_event_data_recv(sk, skb); 5681 tcp_event_data_recv(sk, skb);
@@ -5519,7 +5697,7 @@ no_ack:
5519 else 5697 else
5520#endif 5698#endif
5521 if (eaten) 5699 if (eaten)
5522 __kfree_skb(skb); 5700 kfree_skb_partial(skb, fragstolen);
5523 else 5701 else
5524 sk->sk_data_ready(sk, 0); 5702 sk->sk_data_ready(sk, 0);
5525 return 0; 5703 return 0;
@@ -5563,6 +5741,44 @@ discard:
5563} 5741}
5564EXPORT_SYMBOL(tcp_rcv_established); 5742EXPORT_SYMBOL(tcp_rcv_established);
5565 5743
5744void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
5745{
5746 struct tcp_sock *tp = tcp_sk(sk);
5747 struct inet_connection_sock *icsk = inet_csk(sk);
5748
5749 tcp_set_state(sk, TCP_ESTABLISHED);
5750
5751 if (skb != NULL)
5752 security_inet_conn_established(sk, skb);
5753
5754 /* Make sure socket is routed, for correct metrics. */
5755 icsk->icsk_af_ops->rebuild_header(sk);
5756
5757 tcp_init_metrics(sk);
5758
5759 tcp_init_congestion_control(sk);
5760
5761 /* Prevent spurious tcp_cwnd_restart() on first data
5762 * packet.
5763 */
5764 tp->lsndtime = tcp_time_stamp;
5765
5766 tcp_init_buffer_space(sk);
5767
5768 if (sock_flag(sk, SOCK_KEEPOPEN))
5769 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp));
5770
5771 if (!tp->rx_opt.snd_wscale)
5772 __tcp_fast_path_on(tp, tp->snd_wnd);
5773 else
5774 tp->pred_flags = 0;
5775
5776 if (!sock_flag(sk, SOCK_DEAD)) {
5777 sk->sk_state_change(sk);
5778 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
5779 }
5780}
5781
5566static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, 5782static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5567 const struct tcphdr *th, unsigned int len) 5783 const struct tcphdr *th, unsigned int len)
5568{ 5784{
@@ -5695,36 +5911,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5695 } 5911 }
5696 5912
5697 smp_mb(); 5913 smp_mb();
5698 tcp_set_state(sk, TCP_ESTABLISHED);
5699
5700 security_inet_conn_established(sk, skb);
5701 5914
5702 /* Make sure socket is routed, for correct metrics. */ 5915 tcp_finish_connect(sk, skb);
5703 icsk->icsk_af_ops->rebuild_header(sk);
5704
5705 tcp_init_metrics(sk);
5706
5707 tcp_init_congestion_control(sk);
5708
5709 /* Prevent spurious tcp_cwnd_restart() on first data
5710 * packet.
5711 */
5712 tp->lsndtime = tcp_time_stamp;
5713
5714 tcp_init_buffer_space(sk);
5715
5716 if (sock_flag(sk, SOCK_KEEPOPEN))
5717 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp));
5718
5719 if (!tp->rx_opt.snd_wscale)
5720 __tcp_fast_path_on(tp, tp->snd_wnd);
5721 else
5722 tp->pred_flags = 0;
5723
5724 if (!sock_flag(sk, SOCK_DEAD)) {
5725 sk->sk_state_change(sk);
5726 sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
5727 }
5728 5916
5729 if (sk->sk_write_pending || 5917 if (sk->sk_write_pending ||
5730 icsk->icsk_accept_queue.rskq_defer_accept || 5918 icsk->icsk_accept_queue.rskq_defer_accept ||
@@ -5738,8 +5926,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5738 */ 5926 */
5739 inet_csk_schedule_ack(sk); 5927 inet_csk_schedule_ack(sk);
5740 icsk->icsk_ack.lrcvtime = tcp_time_stamp; 5928 icsk->icsk_ack.lrcvtime = tcp_time_stamp;
5741 icsk->icsk_ack.ato = TCP_ATO_MIN;
5742 tcp_incr_quickack(sk);
5743 tcp_enter_quickack_mode(sk); 5929 tcp_enter_quickack_mode(sk);
5744 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 5930 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
5745 TCP_DELACK_MAX, TCP_RTO_MAX); 5931 TCP_DELACK_MAX, TCP_RTO_MAX);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0cb86ceb652f..4ff5e1f70d16 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -138,6 +138,14 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
138} 138}
139EXPORT_SYMBOL_GPL(tcp_twsk_unique); 139EXPORT_SYMBOL_GPL(tcp_twsk_unique);
140 140
141static int tcp_repair_connect(struct sock *sk)
142{
143 tcp_connect_init(sk);
144 tcp_finish_connect(sk, NULL);
145
146 return 0;
147}
148
141/* This will initiate an outgoing connection. */ 149/* This will initiate an outgoing connection. */
142int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 150int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
143{ 151{
@@ -196,7 +204,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
196 /* Reset inherited state */ 204 /* Reset inherited state */
197 tp->rx_opt.ts_recent = 0; 205 tp->rx_opt.ts_recent = 0;
198 tp->rx_opt.ts_recent_stamp = 0; 206 tp->rx_opt.ts_recent_stamp = 0;
199 tp->write_seq = 0; 207 if (likely(!tp->repair))
208 tp->write_seq = 0;
200 } 209 }
201 210
202 if (tcp_death_row.sysctl_tw_recycle && 211 if (tcp_death_row.sysctl_tw_recycle &&
@@ -247,7 +256,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
247 sk->sk_gso_type = SKB_GSO_TCPV4; 256 sk->sk_gso_type = SKB_GSO_TCPV4;
248 sk_setup_caps(sk, &rt->dst); 257 sk_setup_caps(sk, &rt->dst);
249 258
250 if (!tp->write_seq) 259 if (!tp->write_seq && likely(!tp->repair))
251 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr, 260 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
252 inet->inet_daddr, 261 inet->inet_daddr,
253 inet->inet_sport, 262 inet->inet_sport,
@@ -255,7 +264,11 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
255 264
256 inet->inet_id = tp->write_seq ^ jiffies; 265 inet->inet_id = tp->write_seq ^ jiffies;
257 266
258 err = tcp_connect(sk); 267 if (likely(!tp->repair))
268 err = tcp_connect(sk);
269 else
270 err = tcp_repair_connect(sk);
271
259 rt = NULL; 272 rt = NULL;
260 if (err) 273 if (err)
261 goto failure; 274 goto failure;
@@ -1355,7 +1368,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1355 goto drop_and_free; 1368 goto drop_and_free;
1356 1369
1357 if (!want_cookie || tmp_opt.tstamp_ok) 1370 if (!want_cookie || tmp_opt.tstamp_ok)
1358 TCP_ECN_create_request(req, tcp_hdr(skb)); 1371 TCP_ECN_create_request(req, skb);
1359 1372
1360 if (want_cookie) { 1373 if (want_cookie) {
1361 isn = cookie_v4_init_sequence(sk, skb, &req->mss); 1374 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
@@ -1739,7 +1752,8 @@ process:
1739 if (!tcp_prequeue(sk, skb)) 1752 if (!tcp_prequeue(sk, skb))
1740 ret = tcp_v4_do_rcv(sk, skb); 1753 ret = tcp_v4_do_rcv(sk, skb);
1741 } 1754 }
1742 } else if (unlikely(sk_add_backlog(sk, skb))) { 1755 } else if (unlikely(sk_add_backlog(sk, skb,
1756 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1743 bh_unlock_sock(sk); 1757 bh_unlock_sock(sk);
1744 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); 1758 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1745 goto discard_and_relse; 1759 goto discard_and_relse;
@@ -1875,64 +1889,15 @@ static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1875static int tcp_v4_init_sock(struct sock *sk) 1889static int tcp_v4_init_sock(struct sock *sk)
1876{ 1890{
1877 struct inet_connection_sock *icsk = inet_csk(sk); 1891 struct inet_connection_sock *icsk = inet_csk(sk);
1878 struct tcp_sock *tp = tcp_sk(sk);
1879
1880 skb_queue_head_init(&tp->out_of_order_queue);
1881 tcp_init_xmit_timers(sk);
1882 tcp_prequeue_init(tp);
1883 1892
1884 icsk->icsk_rto = TCP_TIMEOUT_INIT; 1893 tcp_init_sock(sk);
1885 tp->mdev = TCP_TIMEOUT_INIT;
1886
1887 /* So many TCP implementations out there (incorrectly) count the
1888 * initial SYN frame in their delayed-ACK and congestion control
1889 * algorithms that we must have the following bandaid to talk
1890 * efficiently to them. -DaveM
1891 */
1892 tp->snd_cwnd = TCP_INIT_CWND;
1893
1894 /* See draft-stevens-tcpca-spec-01 for discussion of the
1895 * initialization of these values.
1896 */
1897 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1898 tp->snd_cwnd_clamp = ~0;
1899 tp->mss_cache = TCP_MSS_DEFAULT;
1900
1901 tp->reordering = sysctl_tcp_reordering;
1902 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1903
1904 sk->sk_state = TCP_CLOSE;
1905
1906 sk->sk_write_space = sk_stream_write_space;
1907 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1908 1894
1909 icsk->icsk_af_ops = &ipv4_specific; 1895 icsk->icsk_af_ops = &ipv4_specific;
1910 icsk->icsk_sync_mss = tcp_sync_mss; 1896
1911#ifdef CONFIG_TCP_MD5SIG 1897#ifdef CONFIG_TCP_MD5SIG
1912 tp->af_specific = &tcp_sock_ipv4_specific; 1898 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1913#endif 1899#endif
1914 1900
1915 /* TCP Cookie Transactions */
1916 if (sysctl_tcp_cookie_size > 0) {
1917 /* Default, cookies without s_data_payload. */
1918 tp->cookie_values =
1919 kzalloc(sizeof(*tp->cookie_values),
1920 sk->sk_allocation);
1921 if (tp->cookie_values != NULL)
1922 kref_init(&tp->cookie_values->kref);
1923 }
1924 /* Presumed zeroed, in order of appearance:
1925 * cookie_in_always, cookie_out_never,
1926 * s_data_constant, s_data_in, s_data_out
1927 */
1928 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1929 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1930
1931 local_bh_disable();
1932 sock_update_memcg(sk);
1933 sk_sockets_allocated_inc(sk);
1934 local_bh_enable();
1935
1936 return 0; 1901 return 0;
1937} 1902}
1938 1903
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 3cabafb5cdd1..6f6a91832826 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -482,6 +482,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
482 newtp->sacked_out = 0; 482 newtp->sacked_out = 0;
483 newtp->fackets_out = 0; 483 newtp->fackets_out = 0;
484 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 484 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
485 tcp_enable_early_retrans(newtp);
485 486
486 /* So many TCP implementations out there (incorrectly) count the 487 /* So many TCP implementations out there (incorrectly) count the
487 * initial SYN frame in their delayed-ACK and congestion control 488 * initial SYN frame in their delayed-ACK and congestion control
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 7ac6423117ad..d94733009923 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -78,9 +78,8 @@ static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
78 tp->frto_counter = 3; 78 tp->frto_counter = 3;
79 79
80 tp->packets_out += tcp_skb_pcount(skb); 80 tp->packets_out += tcp_skb_pcount(skb);
81 if (!prior_packets) 81 if (!prior_packets || tp->early_retrans_delayed)
82 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 82 tcp_rearm_rto(sk);
83 inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
84} 83}
85 84
86/* SND.NXT, if window was not shrunk. 85/* SND.NXT, if window was not shrunk.
@@ -563,13 +562,13 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
563/* Compute TCP options for SYN packets. This is not the final 562/* Compute TCP options for SYN packets. This is not the final
564 * network wire format yet. 563 * network wire format yet.
565 */ 564 */
566static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb, 565static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
567 struct tcp_out_options *opts, 566 struct tcp_out_options *opts,
568 struct tcp_md5sig_key **md5) 567 struct tcp_md5sig_key **md5)
569{ 568{
570 struct tcp_sock *tp = tcp_sk(sk); 569 struct tcp_sock *tp = tcp_sk(sk);
571 struct tcp_cookie_values *cvp = tp->cookie_values; 570 struct tcp_cookie_values *cvp = tp->cookie_values;
572 unsigned remaining = MAX_TCP_OPTION_SPACE; 571 unsigned int remaining = MAX_TCP_OPTION_SPACE;
573 u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ? 572 u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ?
574 tcp_cookie_size_check(cvp->cookie_desired) : 573 tcp_cookie_size_check(cvp->cookie_desired) :
575 0; 574 0;
@@ -663,15 +662,15 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
663} 662}
664 663
665/* Set up TCP options for SYN-ACKs. */ 664/* Set up TCP options for SYN-ACKs. */
666static unsigned tcp_synack_options(struct sock *sk, 665static unsigned int tcp_synack_options(struct sock *sk,
667 struct request_sock *req, 666 struct request_sock *req,
668 unsigned mss, struct sk_buff *skb, 667 unsigned int mss, struct sk_buff *skb,
669 struct tcp_out_options *opts, 668 struct tcp_out_options *opts,
670 struct tcp_md5sig_key **md5, 669 struct tcp_md5sig_key **md5,
671 struct tcp_extend_values *xvp) 670 struct tcp_extend_values *xvp)
672{ 671{
673 struct inet_request_sock *ireq = inet_rsk(req); 672 struct inet_request_sock *ireq = inet_rsk(req);
674 unsigned remaining = MAX_TCP_OPTION_SPACE; 673 unsigned int remaining = MAX_TCP_OPTION_SPACE;
675 u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ? 674 u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ?
676 xvp->cookie_plus : 675 xvp->cookie_plus :
677 0; 676 0;
@@ -742,13 +741,13 @@ static unsigned tcp_synack_options(struct sock *sk,
742/* Compute TCP options for ESTABLISHED sockets. This is not the 741/* Compute TCP options for ESTABLISHED sockets. This is not the
743 * final wire format yet. 742 * final wire format yet.
744 */ 743 */
745static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb, 744static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb,
746 struct tcp_out_options *opts, 745 struct tcp_out_options *opts,
747 struct tcp_md5sig_key **md5) 746 struct tcp_md5sig_key **md5)
748{ 747{
749 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; 748 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
750 struct tcp_sock *tp = tcp_sk(sk); 749 struct tcp_sock *tp = tcp_sk(sk);
751 unsigned size = 0; 750 unsigned int size = 0;
752 unsigned int eff_sacks; 751 unsigned int eff_sacks;
753 752
754#ifdef CONFIG_TCP_MD5SIG 753#ifdef CONFIG_TCP_MD5SIG
@@ -770,9 +769,9 @@ static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb,
770 769
771 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; 770 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
772 if (unlikely(eff_sacks)) { 771 if (unlikely(eff_sacks)) {
773 const unsigned remaining = MAX_TCP_OPTION_SPACE - size; 772 const unsigned int remaining = MAX_TCP_OPTION_SPACE - size;
774 opts->num_sack_blocks = 773 opts->num_sack_blocks =
775 min_t(unsigned, eff_sacks, 774 min_t(unsigned int, eff_sacks,
776 (remaining - TCPOLEN_SACK_BASE_ALIGNED) / 775 (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
777 TCPOLEN_SACK_PERBLOCK); 776 TCPOLEN_SACK_PERBLOCK);
778 size += TCPOLEN_SACK_BASE_ALIGNED + 777 size += TCPOLEN_SACK_BASE_ALIGNED +
@@ -801,7 +800,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
801 struct tcp_sock *tp; 800 struct tcp_sock *tp;
802 struct tcp_skb_cb *tcb; 801 struct tcp_skb_cb *tcb;
803 struct tcp_out_options opts; 802 struct tcp_out_options opts;
804 unsigned tcp_options_size, tcp_header_size; 803 unsigned int tcp_options_size, tcp_header_size;
805 struct tcp_md5sig_key *md5; 804 struct tcp_md5sig_key *md5;
806 struct tcphdr *th; 805 struct tcphdr *th;
807 int err; 806 int err;
@@ -1150,7 +1149,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
1150} 1149}
1151 1150
1152/* Calculate MSS. Not accounting for SACKs here. */ 1151/* Calculate MSS. Not accounting for SACKs here. */
1153int tcp_mtu_to_mss(const struct sock *sk, int pmtu) 1152int tcp_mtu_to_mss(struct sock *sk, int pmtu)
1154{ 1153{
1155 const struct tcp_sock *tp = tcp_sk(sk); 1154 const struct tcp_sock *tp = tcp_sk(sk);
1156 const struct inet_connection_sock *icsk = inet_csk(sk); 1155 const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1161,6 +1160,14 @@ int tcp_mtu_to_mss(const struct sock *sk, int pmtu)
1161 */ 1160 */
1162 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr); 1161 mss_now = pmtu - icsk->icsk_af_ops->net_header_len - sizeof(struct tcphdr);
1163 1162
1163 /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
1164 if (icsk->icsk_af_ops->net_frag_header_len) {
1165 const struct dst_entry *dst = __sk_dst_get(sk);
1166
1167 if (dst && dst_allfrag(dst))
1168 mss_now -= icsk->icsk_af_ops->net_frag_header_len;
1169 }
1170
1164 /* Clamp it (mss_clamp does not include tcp options) */ 1171 /* Clamp it (mss_clamp does not include tcp options) */
1165 if (mss_now > tp->rx_opt.mss_clamp) 1172 if (mss_now > tp->rx_opt.mss_clamp)
1166 mss_now = tp->rx_opt.mss_clamp; 1173 mss_now = tp->rx_opt.mss_clamp;
@@ -1179,7 +1186,7 @@ int tcp_mtu_to_mss(const struct sock *sk, int pmtu)
1179} 1186}
1180 1187
1181/* Inverse of above */ 1188/* Inverse of above */
1182int tcp_mss_to_mtu(const struct sock *sk, int mss) 1189int tcp_mss_to_mtu(struct sock *sk, int mss)
1183{ 1190{
1184 const struct tcp_sock *tp = tcp_sk(sk); 1191 const struct tcp_sock *tp = tcp_sk(sk);
1185 const struct inet_connection_sock *icsk = inet_csk(sk); 1192 const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -1190,6 +1197,13 @@ int tcp_mss_to_mtu(const struct sock *sk, int mss)
1190 icsk->icsk_ext_hdr_len + 1197 icsk->icsk_ext_hdr_len +
1191 icsk->icsk_af_ops->net_header_len; 1198 icsk->icsk_af_ops->net_header_len;
1192 1199
1200 /* IPv6 adds a frag_hdr in case RTAX_FEATURE_ALLFRAG is set */
1201 if (icsk->icsk_af_ops->net_frag_header_len) {
1202 const struct dst_entry *dst = __sk_dst_get(sk);
1203
1204 if (dst && dst_allfrag(dst))
1205 mtu += icsk->icsk_af_ops->net_frag_header_len;
1206 }
1193 return mtu; 1207 return mtu;
1194} 1208}
1195 1209
@@ -1259,7 +1273,7 @@ unsigned int tcp_current_mss(struct sock *sk)
1259 const struct tcp_sock *tp = tcp_sk(sk); 1273 const struct tcp_sock *tp = tcp_sk(sk);
1260 const struct dst_entry *dst = __sk_dst_get(sk); 1274 const struct dst_entry *dst = __sk_dst_get(sk);
1261 u32 mss_now; 1275 u32 mss_now;
1262 unsigned header_len; 1276 unsigned int header_len;
1263 struct tcp_out_options opts; 1277 struct tcp_out_options opts;
1264 struct tcp_md5sig_key *md5; 1278 struct tcp_md5sig_key *md5;
1265 1279
@@ -1390,7 +1404,7 @@ static inline int tcp_minshall_check(const struct tcp_sock *tp)
1390 */ 1404 */
1391static inline int tcp_nagle_check(const struct tcp_sock *tp, 1405static inline int tcp_nagle_check(const struct tcp_sock *tp,
1392 const struct sk_buff *skb, 1406 const struct sk_buff *skb,
1393 unsigned mss_now, int nonagle) 1407 unsigned int mss_now, int nonagle)
1394{ 1408{
1395 return skb->len < mss_now && 1409 return skb->len < mss_now &&
1396 ((nonagle & TCP_NAGLE_CORK) || 1410 ((nonagle & TCP_NAGLE_CORK) ||
@@ -2562,7 +2576,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2562EXPORT_SYMBOL(tcp_make_synack); 2576EXPORT_SYMBOL(tcp_make_synack);
2563 2577
2564/* Do all connect socket setups that can be done AF independent. */ 2578/* Do all connect socket setups that can be done AF independent. */
2565static void tcp_connect_init(struct sock *sk) 2579void tcp_connect_init(struct sock *sk)
2566{ 2580{
2567 const struct dst_entry *dst = __sk_dst_get(sk); 2581 const struct dst_entry *dst = __sk_dst_get(sk);
2568 struct tcp_sock *tp = tcp_sk(sk); 2582 struct tcp_sock *tp = tcp_sk(sk);
@@ -2617,9 +2631,12 @@ static void tcp_connect_init(struct sock *sk)
2617 tp->snd_una = tp->write_seq; 2631 tp->snd_una = tp->write_seq;
2618 tp->snd_sml = tp->write_seq; 2632 tp->snd_sml = tp->write_seq;
2619 tp->snd_up = tp->write_seq; 2633 tp->snd_up = tp->write_seq;
2620 tp->rcv_nxt = 0; 2634 tp->snd_nxt = tp->write_seq;
2621 tp->rcv_wup = 0; 2635
2622 tp->copied_seq = 0; 2636 if (likely(!tp->repair))
2637 tp->rcv_nxt = 0;
2638 tp->rcv_wup = tp->rcv_nxt;
2639 tp->copied_seq = tp->rcv_nxt;
2623 2640
2624 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; 2641 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
2625 inet_csk(sk)->icsk_retransmits = 0; 2642 inet_csk(sk)->icsk_retransmits = 0;
@@ -2642,7 +2659,6 @@ int tcp_connect(struct sock *sk)
2642 /* Reserve space for headers. */ 2659 /* Reserve space for headers. */
2643 skb_reserve(buff, MAX_TCP_HEADER); 2660 skb_reserve(buff, MAX_TCP_HEADER);
2644 2661
2645 tp->snd_nxt = tp->write_seq;
2646 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); 2662 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
2647 TCP_ECN_send_syn(sk, buff); 2663 TCP_ECN_send_syn(sk, buff);
2648 2664
@@ -2791,6 +2807,15 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
2791 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); 2807 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
2792} 2808}
2793 2809
2810void tcp_send_window_probe(struct sock *sk)
2811{
2812 if (sk->sk_state == TCP_ESTABLISHED) {
2813 tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
2814 tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq;
2815 tcp_xmit_probe_skb(sk, 0);
2816 }
2817}
2818
2794/* Initiate keepalive or window probe from timer. */ 2819/* Initiate keepalive or window probe from timer. */
2795int tcp_write_wakeup(struct sock *sk) 2820int tcp_write_wakeup(struct sock *sk)
2796{ 2821{
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index a981cdc0a6e9..4526fe68e60e 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -91,7 +91,7 @@ static inline int tcp_probe_avail(void)
91 * Note: arguments must match tcp_rcv_established()! 91 * Note: arguments must match tcp_rcv_established()!
92 */ 92 */
93static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, 93static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
94 struct tcphdr *th, unsigned len) 94 struct tcphdr *th, unsigned int len)
95{ 95{
96 const struct tcp_sock *tp = tcp_sk(sk); 96 const struct tcp_sock *tp = tcp_sk(sk);
97 const struct inet_sock *inet = inet_sk(sk); 97 const struct inet_sock *inet = inet_sk(sk);
@@ -138,7 +138,7 @@ static struct jprobe tcp_jprobe = {
138 .entry = jtcp_rcv_established, 138 .entry = jtcp_rcv_established,
139}; 139};
140 140
141static int tcpprobe_open(struct inode * inode, struct file * file) 141static int tcpprobe_open(struct inode *inode, struct file *file)
142{ 142{
143 /* Reset (empty) log */ 143 /* Reset (empty) log */
144 spin_lock_bh(&tcp_probe.lock); 144 spin_lock_bh(&tcp_probe.lock);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 34d4a02c2f16..e911e6c523ec 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -319,6 +319,11 @@ void tcp_retransmit_timer(struct sock *sk)
319 struct tcp_sock *tp = tcp_sk(sk); 319 struct tcp_sock *tp = tcp_sk(sk);
320 struct inet_connection_sock *icsk = inet_csk(sk); 320 struct inet_connection_sock *icsk = inet_csk(sk);
321 321
322 if (tp->early_retrans_delayed) {
323 tcp_resume_early_retransmit(sk);
324 return;
325 }
326
322 if (!tp->packets_out) 327 if (!tp->packets_out)
323 goto out; 328 goto out;
324 329
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index fe141052a1be..279fd0846302 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -107,6 +107,7 @@
107#include <net/checksum.h> 107#include <net/checksum.h>
108#include <net/xfrm.h> 108#include <net/xfrm.h>
109#include <trace/events/udp.h> 109#include <trace/events/udp.h>
110#include <linux/static_key.h>
110#include "udp_impl.h" 111#include "udp_impl.h"
111 112
112struct udp_table udp_table __read_mostly; 113struct udp_table udp_table __read_mostly;
@@ -206,7 +207,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
206 207
207 if (!snum) { 208 if (!snum) {
208 int low, high, remaining; 209 int low, high, remaining;
209 unsigned rand; 210 unsigned int rand;
210 unsigned short first, last; 211 unsigned short first, last;
211 DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN); 212 DECLARE_BITMAP(bitmap, PORTS_PER_CHAIN);
212 213
@@ -846,7 +847,7 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
846 * Get and verify the address. 847 * Get and verify the address.
847 */ 848 */
848 if (msg->msg_name) { 849 if (msg->msg_name) {
849 struct sockaddr_in * usin = (struct sockaddr_in *)msg->msg_name; 850 struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
850 if (msg->msg_namelen < sizeof(*usin)) 851 if (msg->msg_namelen < sizeof(*usin))
851 return -EINVAL; 852 return -EINVAL;
852 if (usin->sin_family != AF_INET) { 853 if (usin->sin_family != AF_INET) {
@@ -1379,6 +1380,14 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1379 1380
1380} 1381}
1381 1382
1383static struct static_key udp_encap_needed __read_mostly;
1384void udp_encap_enable(void)
1385{
1386 if (!static_key_enabled(&udp_encap_needed))
1387 static_key_slow_inc(&udp_encap_needed);
1388}
1389EXPORT_SYMBOL(udp_encap_enable);
1390
1382/* returns: 1391/* returns:
1383 * -1: error 1392 * -1: error
1384 * 0: success 1393 * 0: success
@@ -1400,7 +1409,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1400 goto drop; 1409 goto drop;
1401 nf_reset(skb); 1410 nf_reset(skb);
1402 1411
1403 if (up->encap_type) { 1412 if (static_key_false(&udp_encap_needed) && up->encap_type) {
1404 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb); 1413 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
1405 1414
1406 /* 1415 /*
@@ -1470,7 +1479,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1470 goto drop; 1479 goto drop;
1471 1480
1472 1481
1473 if (sk_rcvqueues_full(sk, skb)) 1482 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
1474 goto drop; 1483 goto drop;
1475 1484
1476 rc = 0; 1485 rc = 0;
@@ -1479,7 +1488,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1479 bh_lock_sock(sk); 1488 bh_lock_sock(sk);
1480 if (!sock_owned_by_user(sk)) 1489 if (!sock_owned_by_user(sk))
1481 rc = __udp_queue_rcv_skb(sk, skb); 1490 rc = __udp_queue_rcv_skb(sk, skb);
1482 else if (sk_add_backlog(sk, skb)) { 1491 else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
1483 bh_unlock_sock(sk); 1492 bh_unlock_sock(sk);
1484 goto drop; 1493 goto drop;
1485 } 1494 }
@@ -1760,6 +1769,7 @@ int udp_lib_setsockopt(struct sock *sk, int level, int optname,
1760 /* FALLTHROUGH */ 1769 /* FALLTHROUGH */
1761 case UDP_ENCAP_L2TPINUDP: 1770 case UDP_ENCAP_L2TPINUDP:
1762 up->encap_type = val; 1771 up->encap_type = val;
1772 udp_encap_enable();
1763 break; 1773 break;
1764 default: 1774 default:
1765 err = -ENOPROTOOPT; 1775 err = -ENOPROTOOPT;
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index aaad650d47d9..5a681e298b90 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -25,7 +25,7 @@ extern int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
25 size_t len, int noblock, int flags, int *addr_len); 25 size_t len, int noblock, int flags, int *addr_len);
26extern int udp_sendpage(struct sock *sk, struct page *page, int offset, 26extern int udp_sendpage(struct sock *sk, struct page *page, int offset,
27 size_t size, int flags); 27 size_t size, int flags);
28extern int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb); 28extern int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
29extern void udp_destroy_sock(struct sock *sk); 29extern void udp_destroy_sock(struct sock *sk);
30 30
31#ifdef CONFIG_PROC_FS 31#ifdef CONFIG_PROC_FS
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index a0b4c5da8d43..0d3426cb5c4f 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -152,7 +152,7 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
152 152
153 case IPPROTO_AH: 153 case IPPROTO_AH:
154 if (pskb_may_pull(skb, xprth + 8 - skb->data)) { 154 if (pskb_may_pull(skb, xprth + 8 - skb->data)) {
155 __be32 *ah_hdr = (__be32*)xprth; 155 __be32 *ah_hdr = (__be32 *)xprth;
156 156
157 fl4->fl4_ipsec_spi = ah_hdr[1]; 157 fl4->fl4_ipsec_spi = ah_hdr[1];
158 } 158 }
@@ -298,8 +298,8 @@ void __init xfrm4_init(int rt_max_size)
298 xfrm4_state_init(); 298 xfrm4_state_init();
299 xfrm4_policy_init(); 299 xfrm4_policy_init();
300#ifdef CONFIG_SYSCTL 300#ifdef CONFIG_SYSCTL
301 sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv4_ctl_path, 301 sysctl_hdr = register_net_sysctl(&init_net, "net/ipv4",
302 xfrm4_policy_table); 302 xfrm4_policy_table);
303#endif 303#endif
304} 304}
305 305
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 7d5cb975cc6f..e3b3421f8dad 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -149,7 +149,7 @@ static void addrconf_type_change(struct net_device *dev,
149 unsigned long event); 149 unsigned long event);
150static int addrconf_ifdown(struct net_device *dev, int how); 150static int addrconf_ifdown(struct net_device *dev, int how);
151 151
152static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags); 152static void addrconf_dad_start(struct inet6_ifaddr *ifp);
153static void addrconf_dad_timer(unsigned long data); 153static void addrconf_dad_timer(unsigned long data);
154static void addrconf_dad_completed(struct inet6_ifaddr *ifp); 154static void addrconf_dad_completed(struct inet6_ifaddr *ifp);
155static void addrconf_dad_run(struct inet6_dev *idev); 155static void addrconf_dad_run(struct inet6_dev *idev);
@@ -336,10 +336,9 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
336 snmp6_free_dev(idev); 336 snmp6_free_dev(idev);
337 kfree_rcu(idev, rcu); 337 kfree_rcu(idev, rcu);
338} 338}
339
340EXPORT_SYMBOL(in6_dev_finish_destroy); 339EXPORT_SYMBOL(in6_dev_finish_destroy);
341 340
342static struct inet6_dev * ipv6_add_dev(struct net_device *dev) 341static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
343{ 342{
344 struct inet6_dev *ndev; 343 struct inet6_dev *ndev;
345 344
@@ -441,7 +440,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
441 return ndev; 440 return ndev;
442} 441}
443 442
444static struct inet6_dev * ipv6_find_idev(struct net_device *dev) 443static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
445{ 444{
446 struct inet6_dev *idev; 445 struct inet6_dev *idev;
447 446
@@ -929,7 +928,7 @@ retry:
929 ift->tstamp = tmp_tstamp; 928 ift->tstamp = tmp_tstamp;
930 spin_unlock_bh(&ift->lock); 929 spin_unlock_bh(&ift->lock);
931 930
932 addrconf_dad_start(ift, 0); 931 addrconf_dad_start(ift);
933 in6_ifa_put(ift); 932 in6_ifa_put(ift);
934 in6_dev_put(idev); 933 in6_dev_put(idev);
935out: 934out:
@@ -1332,7 +1331,6 @@ int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
1332 rcu_read_unlock(); 1331 rcu_read_unlock();
1333 return onlink; 1332 return onlink;
1334} 1333}
1335
1336EXPORT_SYMBOL(ipv6_chk_prefix); 1334EXPORT_SYMBOL(ipv6_chk_prefix);
1337 1335
1338struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr, 1336struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
@@ -1522,7 +1520,7 @@ static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
1522 if (dev->addr_len != ARCNET_ALEN) 1520 if (dev->addr_len != ARCNET_ALEN)
1523 return -1; 1521 return -1;
1524 memset(eui, 0, 7); 1522 memset(eui, 0, 7);
1525 eui[7] = *(u8*)dev->dev_addr; 1523 eui[7] = *(u8 *)dev->dev_addr;
1526 return 0; 1524 return 0;
1527} 1525}
1528 1526
@@ -1667,7 +1665,8 @@ out:
1667 in6_dev_put(idev); 1665 in6_dev_put(idev);
1668} 1666}
1669 1667
1670static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr) { 1668static int __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
1669{
1671 int ret = 0; 1670 int ret = 0;
1672 1671
1673 if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0) 1672 if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
@@ -1908,7 +1907,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
1908 /* Try to figure out our local address for this prefix */ 1907 /* Try to figure out our local address for this prefix */
1909 1908
1910 if (pinfo->autoconf && in6_dev->cnf.autoconf) { 1909 if (pinfo->autoconf && in6_dev->cnf.autoconf) {
1911 struct inet6_ifaddr * ifp; 1910 struct inet6_ifaddr *ifp;
1912 struct in6_addr addr; 1911 struct in6_addr addr;
1913 int create = 0, update_lft = 0; 1912 int create = 0, update_lft = 0;
1914 1913
@@ -1957,7 +1956,7 @@ ok:
1957 1956
1958 update_lft = create = 1; 1957 update_lft = create = 1;
1959 ifp->cstamp = jiffies; 1958 ifp->cstamp = jiffies;
1960 addrconf_dad_start(ifp, RTF_ADDRCONF|RTF_PREFIX_RT); 1959 addrconf_dad_start(ifp);
1961 } 1960 }
1962 1961
1963 if (ifp) { 1962 if (ifp) {
@@ -2236,7 +2235,7 @@ static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *p
2236 * that the Optimistic flag should not be set for 2235 * that the Optimistic flag should not be set for
2237 * manually configured addresses 2236 * manually configured addresses
2238 */ 2237 */
2239 addrconf_dad_start(ifp, 0); 2238 addrconf_dad_start(ifp);
2240 in6_ifa_put(ifp); 2239 in6_ifa_put(ifp);
2241 addrconf_verify(0); 2240 addrconf_verify(0);
2242 return 0; 2241 return 0;
@@ -2362,9 +2361,9 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)
2362 } 2361 }
2363 2362
2364 for_each_netdev(net, dev) { 2363 for_each_netdev(net, dev) {
2365 struct in_device * in_dev = __in_dev_get_rtnl(dev); 2364 struct in_device *in_dev = __in_dev_get_rtnl(dev);
2366 if (in_dev && (dev->flags & IFF_UP)) { 2365 if (in_dev && (dev->flags & IFF_UP)) {
2367 struct in_ifaddr * ifa; 2366 struct in_ifaddr *ifa;
2368 2367
2369 int flag = scope; 2368 int flag = scope;
2370 2369
@@ -2410,7 +2409,7 @@ static void init_loopback(struct net_device *dev)
2410 2409
2411static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr) 2410static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr)
2412{ 2411{
2413 struct inet6_ifaddr * ifp; 2412 struct inet6_ifaddr *ifp;
2414 u32 addr_flags = IFA_F_PERMANENT; 2413 u32 addr_flags = IFA_F_PERMANENT;
2415 2414
2416#ifdef CONFIG_IPV6_OPTIMISTIC_DAD 2415#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
@@ -2423,7 +2422,7 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr
2423 ifp = ipv6_add_addr(idev, addr, 64, IFA_LINK, addr_flags); 2422 ifp = ipv6_add_addr(idev, addr, 64, IFA_LINK, addr_flags);
2424 if (!IS_ERR(ifp)) { 2423 if (!IS_ERR(ifp)) {
2425 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0); 2424 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
2426 addrconf_dad_start(ifp, 0); 2425 addrconf_dad_start(ifp);
2427 in6_ifa_put(ifp); 2426 in6_ifa_put(ifp);
2428 } 2427 }
2429} 2428}
@@ -2431,7 +2430,7 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr
2431static void addrconf_dev_config(struct net_device *dev) 2430static void addrconf_dev_config(struct net_device *dev)
2432{ 2431{
2433 struct in6_addr addr; 2432 struct in6_addr addr;
2434 struct inet6_dev * idev; 2433 struct inet6_dev *idev;
2435 2434
2436 ASSERT_RTNL(); 2435 ASSERT_RTNL();
2437 2436
@@ -2567,7 +2566,7 @@ static void addrconf_ip6_tnl_config(struct net_device *dev)
2567} 2566}
2568 2567
2569static int addrconf_notify(struct notifier_block *this, unsigned long event, 2568static int addrconf_notify(struct notifier_block *this, unsigned long event,
2570 void * data) 2569 void *data)
2571{ 2570{
2572 struct net_device *dev = (struct net_device *) data; 2571 struct net_device *dev = (struct net_device *) data;
2573 struct inet6_dev *idev = __in6_dev_get(dev); 2572 struct inet6_dev *idev = __in6_dev_get(dev);
@@ -2918,7 +2917,7 @@ static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
2918 addrconf_mod_timer(ifp, AC_DAD, rand_num); 2917 addrconf_mod_timer(ifp, AC_DAD, rand_num);
2919} 2918}
2920 2919
2921static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags) 2920static void addrconf_dad_start(struct inet6_ifaddr *ifp)
2922{ 2921{
2923 struct inet6_dev *idev = ifp->idev; 2922 struct inet6_dev *idev = ifp->idev;
2924 struct net_device *dev = idev->dev; 2923 struct net_device *dev = idev->dev;
@@ -3791,7 +3790,7 @@ static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
3791 return inet6_dump_addr(skb, cb, type); 3790 return inet6_dump_addr(skb, cb, type);
3792} 3791}
3793 3792
3794static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh, 3793static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3795 void *arg) 3794 void *arg)
3796{ 3795{
3797 struct net *net = sock_net(in_skb->sk); 3796 struct net *net = sock_net(in_skb->sk);
@@ -3986,14 +3985,14 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev)
3986 struct nlattr *nla; 3985 struct nlattr *nla;
3987 struct ifla_cacheinfo ci; 3986 struct ifla_cacheinfo ci;
3988 3987
3989 NLA_PUT_U32(skb, IFLA_INET6_FLAGS, idev->if_flags); 3988 if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags))
3990 3989 goto nla_put_failure;
3991 ci.max_reasm_len = IPV6_MAXPLEN; 3990 ci.max_reasm_len = IPV6_MAXPLEN;
3992 ci.tstamp = cstamp_delta(idev->tstamp); 3991 ci.tstamp = cstamp_delta(idev->tstamp);
3993 ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time); 3992 ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
3994 ci.retrans_time = jiffies_to_msecs(idev->nd_parms->retrans_time); 3993 ci.retrans_time = jiffies_to_msecs(idev->nd_parms->retrans_time);
3995 NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci); 3994 if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
3996 3995 goto nla_put_failure;
3997 nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32)); 3996 nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
3998 if (nla == NULL) 3997 if (nla == NULL)
3999 goto nla_put_failure; 3998 goto nla_put_failure;
@@ -4058,15 +4057,13 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
4058 hdr->ifi_flags = dev_get_flags(dev); 4057 hdr->ifi_flags = dev_get_flags(dev);
4059 hdr->ifi_change = 0; 4058 hdr->ifi_change = 0;
4060 4059
4061 NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); 4060 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
4062 4061 (dev->addr_len &&
4063 if (dev->addr_len) 4062 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
4064 NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr); 4063 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
4065 4064 (dev->ifindex != dev->iflink &&
4066 NLA_PUT_U32(skb, IFLA_MTU, dev->mtu); 4065 nla_put_u32(skb, IFLA_LINK, dev->iflink)))
4067 if (dev->ifindex != dev->iflink) 4066 goto nla_put_failure;
4068 NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
4069
4070 protoinfo = nla_nest_start(skb, IFLA_PROTINFO); 4067 protoinfo = nla_nest_start(skb, IFLA_PROTINFO);
4071 if (protoinfo == NULL) 4068 if (protoinfo == NULL)
4072 goto nla_put_failure; 4069 goto nla_put_failure;
@@ -4179,12 +4176,12 @@ static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
4179 if (pinfo->autoconf) 4176 if (pinfo->autoconf)
4180 pmsg->prefix_flags |= IF_PREFIX_AUTOCONF; 4177 pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
4181 4178
4182 NLA_PUT(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix); 4179 if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
4183 4180 goto nla_put_failure;
4184 ci.preferred_time = ntohl(pinfo->prefered); 4181 ci.preferred_time = ntohl(pinfo->prefered);
4185 ci.valid_time = ntohl(pinfo->valid); 4182 ci.valid_time = ntohl(pinfo->valid);
4186 NLA_PUT(skb, PREFIX_CACHEINFO, sizeof(ci), &ci); 4183 if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
4187 4184 goto nla_put_failure;
4188 return nlmsg_end(skb, nlh); 4185 return nlmsg_end(skb, nlh);
4189 4186
4190nla_put_failure: 4187nla_put_failure:
@@ -4368,7 +4365,6 @@ static struct addrconf_sysctl_table
4368{ 4365{
4369 struct ctl_table_header *sysctl_header; 4366 struct ctl_table_header *sysctl_header;
4370 ctl_table addrconf_vars[DEVCONF_MAX+1]; 4367 ctl_table addrconf_vars[DEVCONF_MAX+1];
4371 char *dev_name;
4372} addrconf_sysctl __read_mostly = { 4368} addrconf_sysctl __read_mostly = {
4373 .sysctl_header = NULL, 4369 .sysctl_header = NULL,
4374 .addrconf_vars = { 4370 .addrconf_vars = {
@@ -4597,17 +4593,7 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
4597{ 4593{
4598 int i; 4594 int i;
4599 struct addrconf_sysctl_table *t; 4595 struct addrconf_sysctl_table *t;
4600 4596 char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
4601#define ADDRCONF_CTL_PATH_DEV 3
4602
4603 struct ctl_path addrconf_ctl_path[] = {
4604 { .procname = "net", },
4605 { .procname = "ipv6", },
4606 { .procname = "conf", },
4607 { /* to be set */ },
4608 { },
4609 };
4610
4611 4597
4612 t = kmemdup(&addrconf_sysctl, sizeof(*t), GFP_KERNEL); 4598 t = kmemdup(&addrconf_sysctl, sizeof(*t), GFP_KERNEL);
4613 if (t == NULL) 4599 if (t == NULL)
@@ -4619,27 +4605,15 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
4619 t->addrconf_vars[i].extra2 = net; 4605 t->addrconf_vars[i].extra2 = net;
4620 } 4606 }
4621 4607
4622 /* 4608 snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
4623 * Make a copy of dev_name, because '.procname' is regarded as const
4624 * by sysctl and we wouldn't want anyone to change it under our feet
4625 * (see SIOCSIFNAME).
4626 */
4627 t->dev_name = kstrdup(dev_name, GFP_KERNEL);
4628 if (!t->dev_name)
4629 goto free;
4630
4631 addrconf_ctl_path[ADDRCONF_CTL_PATH_DEV].procname = t->dev_name;
4632 4609
4633 t->sysctl_header = register_net_sysctl_table(net, addrconf_ctl_path, 4610 t->sysctl_header = register_net_sysctl(net, path, t->addrconf_vars);
4634 t->addrconf_vars);
4635 if (t->sysctl_header == NULL) 4611 if (t->sysctl_header == NULL)
4636 goto free_procname; 4612 goto free;
4637 4613
4638 p->sysctl = t; 4614 p->sysctl = t;
4639 return 0; 4615 return 0;
4640 4616
4641free_procname:
4642 kfree(t->dev_name);
4643free: 4617free:
4644 kfree(t); 4618 kfree(t);
4645out: 4619out:
@@ -4656,7 +4630,6 @@ static void __addrconf_sysctl_unregister(struct ipv6_devconf *p)
4656 t = p->sysctl; 4630 t = p->sysctl;
4657 p->sysctl = NULL; 4631 p->sysctl = NULL;
4658 unregister_net_sysctl_table(t->sysctl_header); 4632 unregister_net_sysctl_table(t->sysctl_header);
4659 kfree(t->dev_name);
4660 kfree(t); 4633 kfree(t);
4661} 4634}
4662 4635
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index 399287e595d7..d051e5f4bf34 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -8,9 +8,9 @@
8 8
9#define IPV6_ADDR_SCOPE_TYPE(scope) ((scope) << 16) 9#define IPV6_ADDR_SCOPE_TYPE(scope) ((scope) << 16)
10 10
11static inline unsigned ipv6_addr_scope2type(unsigned scope) 11static inline unsigned int ipv6_addr_scope2type(unsigned int scope)
12{ 12{
13 switch(scope) { 13 switch (scope) {
14 case IPV6_ADDR_SCOPE_NODELOCAL: 14 case IPV6_ADDR_SCOPE_NODELOCAL:
15 return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_NODELOCAL) | 15 return (IPV6_ADDR_SCOPE_TYPE(IPV6_ADDR_SCOPE_NODELOCAL) |
16 IPV6_ADDR_LOOPBACK); 16 IPV6_ADDR_LOOPBACK);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 8ed1b930e75f..0ad046c7ae95 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -180,7 +180,7 @@ lookup_protocol:
180 err = 0; 180 err = 0;
181 sk->sk_no_check = answer_no_check; 181 sk->sk_no_check = answer_no_check;
182 if (INET_PROTOSW_REUSE & answer_flags) 182 if (INET_PROTOSW_REUSE & answer_flags)
183 sk->sk_reuse = 1; 183 sk->sk_reuse = SK_CAN_REUSE;
184 184
185 inet = inet_sk(sk); 185 inet = inet_sk(sk);
186 inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0; 186 inet->is_icsk = (INET_PROTOSW_ICSK & answer_flags) != 0;
@@ -1111,11 +1111,6 @@ static int __init inet6_init(void)
1111 if (err) 1111 if (err)
1112 goto out_sock_register_fail; 1112 goto out_sock_register_fail;
1113 1113
1114#ifdef CONFIG_SYSCTL
1115 err = ipv6_static_sysctl_register();
1116 if (err)
1117 goto static_sysctl_fail;
1118#endif
1119 tcpv6_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem; 1114 tcpv6_prot.sysctl_mem = init_net.ipv4.sysctl_tcp_mem;
1120 1115
1121 /* 1116 /*
@@ -1242,10 +1237,6 @@ ipmr_fail:
1242icmp_fail: 1237icmp_fail:
1243 unregister_pernet_subsys(&inet6_net_ops); 1238 unregister_pernet_subsys(&inet6_net_ops);
1244register_pernet_fail: 1239register_pernet_fail:
1245#ifdef CONFIG_SYSCTL
1246 ipv6_static_sysctl_unregister();
1247static_sysctl_fail:
1248#endif
1249 sock_unregister(PF_INET6); 1240 sock_unregister(PF_INET6);
1250 rtnl_unregister_all(PF_INET6); 1241 rtnl_unregister_all(PF_INET6);
1251out_sock_register_fail: 1242out_sock_register_fail:
@@ -1272,9 +1263,6 @@ static void __exit inet6_exit(void)
1272 /* Disallow any further netlink messages */ 1263 /* Disallow any further netlink messages */
1273 rtnl_unregister_all(PF_INET6); 1264 rtnl_unregister_all(PF_INET6);
1274 1265
1275#ifdef CONFIG_SYSCTL
1276 ipv6_sysctl_unregister();
1277#endif
1278 udpv6_exit(); 1266 udpv6_exit();
1279 udplitev6_exit(); 1267 udplitev6_exit();
1280 tcpv6_exit(); 1268 tcpv6_exit();
@@ -1302,9 +1290,6 @@ static void __exit inet6_exit(void)
1302 rawv6_exit(); 1290 rawv6_exit();
1303 1291
1304 unregister_pernet_subsys(&inet6_net_ops); 1292 unregister_pernet_subsys(&inet6_net_ops);
1305#ifdef CONFIG_SYSCTL
1306 ipv6_static_sysctl_unregister();
1307#endif
1308 proto_unregister(&rawv6_prot); 1293 proto_unregister(&rawv6_prot);
1309 proto_unregister(&udplitev6_prot); 1294 proto_unregister(&udplitev6_prot);
1310 proto_unregister(&udpv6_prot); 1295 proto_unregister(&udpv6_prot);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 76832c8dc89d..b8b61ac88bc2 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -22,6 +22,7 @@
22#include <linux/ipv6.h> 22#include <linux/ipv6.h>
23#include <linux/route.h> 23#include <linux/route.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/export.h>
25 26
26#include <net/ipv6.h> 27#include <net/ipv6.h>
27#include <net/ndisc.h> 28#include <net/ndisc.h>
@@ -98,7 +99,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
98 sin.sin_port = usin->sin6_port; 99 sin.sin_port = usin->sin6_port;
99 100
100 err = ip4_datagram_connect(sk, 101 err = ip4_datagram_connect(sk,
101 (struct sockaddr*) &sin, 102 (struct sockaddr *) &sin,
102 sizeof(sin)); 103 sizeof(sin));
103 104
104ipv4_connected: 105ipv4_connected:
@@ -202,6 +203,7 @@ out:
202 fl6_sock_release(flowlabel); 203 fl6_sock_release(flowlabel);
203 return err; 204 return err;
204} 205}
206EXPORT_SYMBOL_GPL(ip6_datagram_connect);
205 207
206void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, 208void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
207 __be16 port, u32 info, u8 *payload) 209 __be16 port, u32 info, u8 *payload)
@@ -414,6 +416,7 @@ out_free_skb:
414out: 416out:
415 return err; 417 return err;
416} 418}
419EXPORT_SYMBOL_GPL(ipv6_recv_error);
417 420
418/* 421/*
419 * Handle IPV6_RECVPATHMTU 422 * Handle IPV6_RECVPATHMTU
@@ -515,10 +518,10 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
515 u8 nexthdr = ipv6_hdr(skb)->nexthdr; 518 u8 nexthdr = ipv6_hdr(skb)->nexthdr;
516 519
517 while (off <= opt->lastopt) { 520 while (off <= opt->lastopt) {
518 unsigned len; 521 unsigned int len;
519 u8 *ptr = nh + off; 522 u8 *ptr = nh + off;
520 523
521 switch(nexthdr) { 524 switch (nexthdr) {
522 case IPPROTO_DSTOPTS: 525 case IPPROTO_DSTOPTS:
523 nexthdr = ptr[0]; 526 nexthdr = ptr[0];
524 len = (ptr[1] + 1) << 3; 527 len = (ptr[1] + 1) << 3;
@@ -827,9 +830,8 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
827 int tc; 830 int tc;
828 831
829 err = -EINVAL; 832 err = -EINVAL;
830 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) { 833 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
831 goto exit_f; 834 goto exit_f;
832 }
833 835
834 tc = *(int *)CMSG_DATA(cmsg); 836 tc = *(int *)CMSG_DATA(cmsg);
835 if (tc < -1 || tc > 0xff) 837 if (tc < -1 || tc > 0xff)
@@ -846,9 +848,8 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
846 int df; 848 int df;
847 849
848 err = -EINVAL; 850 err = -EINVAL;
849 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int))) { 851 if (cmsg->cmsg_len != CMSG_LEN(sizeof(int)))
850 goto exit_f; 852 goto exit_f;
851 }
852 853
853 df = *(int *)CMSG_DATA(cmsg); 854 df = *(int *)CMSG_DATA(cmsg);
854 if (df < 0 || df > 1) 855 if (df < 0 || df > 1)
@@ -870,3 +871,4 @@ int datagram_send_ctl(struct net *net, struct sock *sk,
870exit_f: 871exit_f:
871 return err; 872 return err;
872} 873}
874EXPORT_SYMBOL_GPL(datagram_send_ctl);
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 3d641b6e9b09..a93bd231eca1 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -153,6 +153,7 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb)
153 153
154 while (len > 0) { 154 while (len > 0) {
155 int optlen = nh[off + 1] + 2; 155 int optlen = nh[off + 1] + 2;
156 int i;
156 157
157 switch (nh[off]) { 158 switch (nh[off]) {
158 case IPV6_TLV_PAD0: 159 case IPV6_TLV_PAD0:
@@ -160,6 +161,21 @@ static int ip6_parse_tlv(struct tlvtype_proc *procs, struct sk_buff *skb)
160 break; 161 break;
161 162
162 case IPV6_TLV_PADN: 163 case IPV6_TLV_PADN:
164 /* RFC 2460 states that the purpose of PadN is
165 * to align the containing header to multiples
166 * of 8. 7 is therefore the highest valid value.
167 * See also RFC 4942, Section 2.1.9.5.
168 */
169 if (optlen > 7)
170 goto bad;
171 /* RFC 4942 recommends receiving hosts to
172 * actively check PadN payload to contain
173 * only zeroes.
174 */
175 for (i = 2; i < optlen; i++) {
176 if (nh[off + i] != 0)
177 goto bad;
178 }
163 break; 179 break;
164 180
165 default: /* Other TLV code so scan list */ 181 default: /* Other TLV code so scan list */
@@ -722,7 +738,6 @@ void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt,
722 if (opt->hopopt) 738 if (opt->hopopt)
723 ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt); 739 ipv6_push_exthdr(skb, proto, NEXTHDR_HOP, opt->hopopt);
724} 740}
725
726EXPORT_SYMBOL(ipv6_push_nfrag_opts); 741EXPORT_SYMBOL(ipv6_push_nfrag_opts);
727 742
728void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto) 743void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, u8 *proto)
@@ -738,20 +753,19 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
738 753
739 opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC); 754 opt2 = sock_kmalloc(sk, opt->tot_len, GFP_ATOMIC);
740 if (opt2) { 755 if (opt2) {
741 long dif = (char*)opt2 - (char*)opt; 756 long dif = (char *)opt2 - (char *)opt;
742 memcpy(opt2, opt, opt->tot_len); 757 memcpy(opt2, opt, opt->tot_len);
743 if (opt2->hopopt) 758 if (opt2->hopopt)
744 *((char**)&opt2->hopopt) += dif; 759 *((char **)&opt2->hopopt) += dif;
745 if (opt2->dst0opt) 760 if (opt2->dst0opt)
746 *((char**)&opt2->dst0opt) += dif; 761 *((char **)&opt2->dst0opt) += dif;
747 if (opt2->dst1opt) 762 if (opt2->dst1opt)
748 *((char**)&opt2->dst1opt) += dif; 763 *((char **)&opt2->dst1opt) += dif;
749 if (opt2->srcrt) 764 if (opt2->srcrt)
750 *((char**)&opt2->srcrt) += dif; 765 *((char **)&opt2->srcrt) += dif;
751 } 766 }
752 return opt2; 767 return opt2;
753} 768}
754
755EXPORT_SYMBOL_GPL(ipv6_dup_options); 769EXPORT_SYMBOL_GPL(ipv6_dup_options);
756 770
757static int ipv6_renew_option(void *ohdr, 771static int ipv6_renew_option(void *ohdr,
@@ -869,6 +883,7 @@ struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
869 883
870 return opt; 884 return opt;
871} 885}
886EXPORT_SYMBOL_GPL(ipv6_fixup_options);
872 887
873/** 888/**
874 * fl6_update_dst - update flowi destination address with info given 889 * fl6_update_dst - update flowi destination address with info given
@@ -892,5 +907,4 @@ struct in6_addr *fl6_update_dst(struct flowi6 *fl6,
892 fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr; 907 fl6->daddr = *((struct rt0_hdr *)opt->srcrt)->addr;
893 return orig; 908 return orig;
894} 909}
895
896EXPORT_SYMBOL_GPL(fl6_update_dst); 910EXPORT_SYMBOL_GPL(fl6_update_dst);
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
index 72957f4a7c6c..7b1a884634d5 100644
--- a/net/ipv6/exthdrs_core.c
+++ b/net/ipv6/exthdrs_core.c
@@ -21,6 +21,7 @@ int ipv6_ext_hdr(u8 nexthdr)
21 (nexthdr == NEXTHDR_NONE) || 21 (nexthdr == NEXTHDR_NONE) ||
22 (nexthdr == NEXTHDR_DEST); 22 (nexthdr == NEXTHDR_DEST);
23} 23}
24EXPORT_SYMBOL(ipv6_ext_hdr);
24 25
25/* 26/*
26 * Skip any extension headers. This is used by the ICMP module. 27 * Skip any extension headers. This is used by the ICMP module.
@@ -109,6 +110,4 @@ int ipv6_skip_exthdr(const struct sk_buff *skb, int start, u8 *nexthdrp,
109 *nexthdrp = nexthdr; 110 *nexthdrp = nexthdr;
110 return start; 111 return start;
111} 112}
112
113EXPORT_SYMBOL(ipv6_ext_hdr);
114EXPORT_SYMBOL(ipv6_skip_exthdr); 113EXPORT_SYMBOL(ipv6_skip_exthdr);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index b6c573152067..0ff1cfd55bc4 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -22,8 +22,7 @@
22#include <net/ip6_route.h> 22#include <net/ip6_route.h>
23#include <net/netlink.h> 23#include <net/netlink.h>
24 24
25struct fib6_rule 25struct fib6_rule {
26{
27 struct fib_rule common; 26 struct fib_rule common;
28 struct rt6key src; 27 struct rt6key src;
29 struct rt6key dst; 28 struct rt6key dst;
@@ -215,14 +214,13 @@ static int fib6_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
215 frh->src_len = rule6->src.plen; 214 frh->src_len = rule6->src.plen;
216 frh->tos = rule6->tclass; 215 frh->tos = rule6->tclass;
217 216
218 if (rule6->dst.plen) 217 if ((rule6->dst.plen &&
219 NLA_PUT(skb, FRA_DST, sizeof(struct in6_addr), 218 nla_put(skb, FRA_DST, sizeof(struct in6_addr),
220 &rule6->dst.addr); 219 &rule6->dst.addr)) ||
221 220 (rule6->src.plen &&
222 if (rule6->src.plen) 221 nla_put(skb, FRA_SRC, sizeof(struct in6_addr),
223 NLA_PUT(skb, FRA_SRC, sizeof(struct in6_addr), 222 &rule6->src.addr)))
224 &rule6->src.addr); 223 goto nla_put_failure;
225
226 return 0; 224 return 0;
227 225
228nla_put_failure: 226nla_put_failure:
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 27ac95a63429..cc079d8d4681 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -498,7 +498,7 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
498 err = ip6_append_data(sk, icmpv6_getfrag, &msg, 498 err = ip6_append_data(sk, icmpv6_getfrag, &msg,
499 len + sizeof(struct icmp6hdr), 499 len + sizeof(struct icmp6hdr),
500 sizeof(struct icmp6hdr), hlimit, 500 sizeof(struct icmp6hdr), hlimit,
501 np->tclass, NULL, &fl6, (struct rt6_info*)dst, 501 np->tclass, NULL, &fl6, (struct rt6_info *)dst,
502 MSG_DONTWAIT, np->dontfrag); 502 MSG_DONTWAIT, np->dontfrag);
503 if (err) { 503 if (err) {
504 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); 504 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS);
@@ -579,7 +579,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
579 579
580 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), 580 err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr),
581 sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl6, 581 sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl6,
582 (struct rt6_info*)dst, MSG_DONTWAIT, 582 (struct rt6_info *)dst, MSG_DONTWAIT,
583 np->dontfrag); 583 np->dontfrag);
584 584
585 if (err) { 585 if (err) {
@@ -950,7 +950,6 @@ int icmpv6_err_convert(u8 type, u8 code, int *err)
950 950
951 return fatal; 951 return fatal;
952} 952}
953
954EXPORT_SYMBOL(icmpv6_err_convert); 953EXPORT_SYMBOL(icmpv6_err_convert);
955 954
956#ifdef CONFIG_SYSCTL 955#ifdef CONFIG_SYSCTL
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 02dd203d9eac..e6cee5292a0b 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -28,7 +28,7 @@
28#include <net/inet6_connection_sock.h> 28#include <net/inet6_connection_sock.h>
29 29
30int inet6_csk_bind_conflict(const struct sock *sk, 30int inet6_csk_bind_conflict(const struct sock *sk,
31 const struct inet_bind_bucket *tb) 31 const struct inet_bind_bucket *tb, bool relax)
32{ 32{
33 const struct sock *sk2; 33 const struct sock *sk2;
34 const struct hlist_node *node; 34 const struct hlist_node *node;
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index b7867a1215b1..cb43df690210 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -294,6 +294,7 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions * opt_space,
294 opt_space->opt_flen = fopt->opt_flen; 294 opt_space->opt_flen = fopt->opt_flen;
295 return opt_space; 295 return opt_space;
296} 296}
297EXPORT_SYMBOL_GPL(fl6_merge_options);
297 298
298static unsigned long check_linger(unsigned long ttl) 299static unsigned long check_linger(unsigned long ttl)
299{ 300{
@@ -705,9 +706,9 @@ static int ip6fl_seq_show(struct seq_file *seq, void *v)
705 struct ip6_flowlabel *fl = v; 706 struct ip6_flowlabel *fl = v;
706 seq_printf(seq, 707 seq_printf(seq,
707 "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n", 708 "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
708 (unsigned)ntohl(fl->label), 709 (unsigned int)ntohl(fl->label),
709 fl->share, 710 fl->share,
710 (unsigned)fl->owner, 711 (int)fl->owner,
711 atomic_read(&fl->users), 712 atomic_read(&fl->users),
712 fl->linger/HZ, 713 fl->linger/HZ,
713 (long)(fl->expires - jiffies)/HZ, 714 (long)(fl->expires - jiffies)/HZ,
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index b7ca46161cb9..d8e05af2c4bb 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -210,7 +210,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
210 kfree_skb(skb); 210 kfree_skb(skb);
211 return -ENOBUFS; 211 return -ENOBUFS;
212 } 212 }
213 kfree_skb(skb); 213 consume_skb(skb);
214 skb = skb2; 214 skb = skb2;
215 skb_set_owner_w(skb, sk); 215 skb_set_owner_w(skb, sk);
216 } 216 }
@@ -889,7 +889,7 @@ slow_path:
889 } 889 }
890 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), 890 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
891 IPSTATS_MIB_FRAGOKS); 891 IPSTATS_MIB_FRAGOKS);
892 kfree_skb(skb); 892 consume_skb(skb);
893 return err; 893 return err;
894 894
895fail: 895fail:
@@ -1535,6 +1535,7 @@ error:
1535 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); 1535 IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1536 return err; 1536 return err;
1537} 1537}
1538EXPORT_SYMBOL_GPL(ip6_append_data);
1538 1539
1539static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np) 1540static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
1540{ 1541{
@@ -1638,6 +1639,7 @@ error:
1638 IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); 1639 IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1639 goto out; 1640 goto out;
1640} 1641}
1642EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1641 1643
1642void ip6_flush_pending_frames(struct sock *sk) 1644void ip6_flush_pending_frames(struct sock *sk)
1643{ 1645{
@@ -1652,3 +1654,4 @@ void ip6_flush_pending_frames(struct sock *sk)
1652 1654
1653 ip6_cork_release(inet_sk(sk), inet6_sk(sk)); 1655 ip6_cork_release(inet_sk(sk), inet6_sk(sk));
1654} 1656}
1657EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index aa21da6a09cd..5df487c81ed9 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -198,7 +198,7 @@ ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct ip6_tnl_parm *p)
198{ 198{
199 const struct in6_addr *remote = &p->raddr; 199 const struct in6_addr *remote = &p->raddr;
200 const struct in6_addr *local = &p->laddr; 200 const struct in6_addr *local = &p->laddr;
201 unsigned h = 0; 201 unsigned int h = 0;
202 int prio = 0; 202 int prio = 0;
203 203
204 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) { 204 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
@@ -954,7 +954,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
954 954
955 if (skb->sk) 955 if (skb->sk)
956 skb_set_owner_w(new_skb, skb->sk); 956 skb_set_owner_w(new_skb, skb->sk);
957 kfree_skb(skb); 957 consume_skb(skb);
958 skb = new_skb; 958 skb = new_skb;
959 } 959 }
960 skb_dst_drop(skb); 960 skb_dst_drop(skb);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 8110362e0af5..efc0098b59dd 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -2215,14 +2215,15 @@ static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
2215 rtm->rtm_src_len = 128; 2215 rtm->rtm_src_len = 128;
2216 rtm->rtm_tos = 0; 2216 rtm->rtm_tos = 0;
2217 rtm->rtm_table = mrt->id; 2217 rtm->rtm_table = mrt->id;
2218 NLA_PUT_U32(skb, RTA_TABLE, mrt->id); 2218 if (nla_put_u32(skb, RTA_TABLE, mrt->id))
2219 goto nla_put_failure;
2219 rtm->rtm_scope = RT_SCOPE_UNIVERSE; 2220 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2220 rtm->rtm_protocol = RTPROT_UNSPEC; 2221 rtm->rtm_protocol = RTPROT_UNSPEC;
2221 rtm->rtm_flags = 0; 2222 rtm->rtm_flags = 0;
2222 2223
2223 NLA_PUT(skb, RTA_SRC, 16, &c->mf6c_origin); 2224 if (nla_put(skb, RTA_SRC, 16, &c->mf6c_origin) ||
2224 NLA_PUT(skb, RTA_DST, 16, &c->mf6c_mcastgrp); 2225 nla_put(skb, RTA_DST, 16, &c->mf6c_mcastgrp))
2225 2226 goto nla_put_failure;
2226 if (__ip6mr_fill_mroute(mrt, skb, c, rtm) < 0) 2227 if (__ip6mr_fill_mroute(mrt, skb, c, rtm) < 0)
2227 goto nla_put_failure; 2228 goto nla_put_failure;
2228 2229
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 63dd1f89ed7d..ba6d13d1f1e1 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -678,7 +678,6 @@ done:
678 } 678 }
679 case MCAST_MSFILTER: 679 case MCAST_MSFILTER:
680 { 680 {
681 extern int sysctl_mld_max_msf;
682 struct group_filter *gsf; 681 struct group_filter *gsf;
683 682
684 if (optlen < GROUP_FILTER_SIZE(0)) 683 if (optlen < GROUP_FILTER_SIZE(0))
@@ -943,7 +942,7 @@ static int ipv6_getsockopt_sticky(struct sock *sk, struct ipv6_txoptions *opt,
943} 942}
944 943
945static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, 944static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
946 char __user *optval, int __user *optlen, unsigned flags) 945 char __user *optval, int __user *optlen, unsigned int flags)
947{ 946{
948 struct ipv6_pinfo *np = inet6_sk(sk); 947 struct ipv6_pinfo *np = inet6_sk(sk);
949 int len; 948 int len;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index b2869cab2092..7dfb89f2bae5 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1061,7 +1061,7 @@ static int mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
1061 if (psf->sf_count[MCAST_INCLUDE] || 1061 if (psf->sf_count[MCAST_INCLUDE] ||
1062 pmc->mca_sfcount[MCAST_EXCLUDE] != 1062 pmc->mca_sfcount[MCAST_EXCLUDE] !=
1063 psf->sf_count[MCAST_EXCLUDE]) 1063 psf->sf_count[MCAST_EXCLUDE])
1064 continue; 1064 break;
1065 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) { 1065 if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
1066 scount++; 1066 scount++;
1067 break; 1067 break;
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 176b469322ac..35615c6358b8 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -15,6 +15,7 @@
15/* 15/*
16 * Changes: 16 * Changes:
17 * 17 *
18 * Alexey I. Froloff : RFC6106 (DNSSL) support
18 * Pierre Ynard : export userland ND options 19 * Pierre Ynard : export userland ND options
19 * through netlink (RDNSS support) 20 * through netlink (RDNSS support)
20 * Lars Fenneberg : fixed MTU setting on receipt 21 * Lars Fenneberg : fixed MTU setting on receipt
@@ -228,7 +229,8 @@ static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur,
228 229
229static inline int ndisc_is_useropt(struct nd_opt_hdr *opt) 230static inline int ndisc_is_useropt(struct nd_opt_hdr *opt)
230{ 231{
231 return opt->nd_opt_type == ND_OPT_RDNSS; 232 return opt->nd_opt_type == ND_OPT_RDNSS ||
233 opt->nd_opt_type == ND_OPT_DNSSL;
232} 234}
233 235
234static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur, 236static struct nd_opt_hdr *ndisc_next_useropt(struct nd_opt_hdr *cur,
@@ -1099,8 +1101,9 @@ static void ndisc_ra_useropt(struct sk_buff *ra, struct nd_opt_hdr *opt)
1099 1101
1100 memcpy(ndmsg + 1, opt, opt->nd_opt_len << 3); 1102 memcpy(ndmsg + 1, opt, opt->nd_opt_len << 3);
1101 1103
1102 NLA_PUT(skb, NDUSEROPT_SRCADDR, sizeof(struct in6_addr), 1104 if (nla_put(skb, NDUSEROPT_SRCADDR, sizeof(struct in6_addr),
1103 &ipv6_hdr(ra)->saddr); 1105 &ipv6_hdr(ra)->saddr))
1106 goto nla_put_failure;
1104 nlmsg_end(skb, nlh); 1107 nlmsg_end(skb, nlh);
1105 1108
1106 rtnl_notify(skb, net, 0, RTNLGRP_ND_USEROPT, NULL, GFP_ATOMIC); 1109 rtnl_notify(skb, net, 0, RTNLGRP_ND_USEROPT, NULL, GFP_ATOMIC);
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index a34c9e4c792c..3ca9303b3a19 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -588,7 +588,7 @@ static int __init ip6_queue_init(void)
588#endif 588#endif
589 register_netdevice_notifier(&ipq_dev_notifier); 589 register_netdevice_notifier(&ipq_dev_notifier);
590#ifdef CONFIG_SYSCTL 590#ifdef CONFIG_SYSCTL
591 ipq_sysctl_header = register_sysctl_paths(net_ipv6_ctl_path, ipq_table); 591 ipq_sysctl_header = register_net_sysctl(&init_net, "net/ipv6", ipq_table);
592#endif 592#endif
593 status = nf_register_queue_handler(NFPROTO_IPV6, &nfqh); 593 status = nf_register_queue_handler(NFPROTO_IPV6, &nfqh);
594 if (status < 0) { 594 if (status < 0) {
@@ -599,7 +599,7 @@ static int __init ip6_queue_init(void)
599 599
600cleanup_sysctl: 600cleanup_sysctl:
601#ifdef CONFIG_SYSCTL 601#ifdef CONFIG_SYSCTL
602 unregister_sysctl_table(ipq_sysctl_header); 602 unregister_net_sysctl_table(ipq_sysctl_header);
603#endif 603#endif
604 unregister_netdevice_notifier(&ipq_dev_notifier); 604 unregister_netdevice_notifier(&ipq_dev_notifier);
605 proc_net_remove(&init_net, IPQ_PROC_FS_NAME); 605 proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
@@ -621,7 +621,7 @@ static void __exit ip6_queue_fini(void)
621 ipq_flush(NULL, 0); 621 ipq_flush(NULL, 0);
622 622
623#ifdef CONFIG_SYSCTL 623#ifdef CONFIG_SYSCTL
624 unregister_sysctl_table(ipq_sysctl_header); 624 unregister_net_sysctl_table(ipq_sysctl_header);
625#endif 625#endif
626 unregister_netdevice_notifier(&ipq_dev_notifier); 626 unregister_netdevice_notifier(&ipq_dev_notifier);
627 proc_net_remove(&init_net, IPQ_PROC_FS_NAME); 627 proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 9d4e15559319..d4e350f72bbb 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -396,7 +396,7 @@ ip6t_do_table(struct sk_buff *skb,
396 if (v < 0) { 396 if (v < 0) {
397 /* Pop from stack? */ 397 /* Pop from stack? */
398 if (v != XT_RETURN) { 398 if (v != XT_RETURN) {
399 verdict = (unsigned)(-v) - 1; 399 verdict = (unsigned int)(-v) - 1;
400 break; 400 break;
401 } 401 }
402 if (*stackptr <= origptr) 402 if (*stackptr <= origptr)
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 4111050a9fc5..fe925e492520 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -278,10 +278,11 @@ static struct nf_hook_ops ipv6_conntrack_ops[] __read_mostly = {
278static int ipv6_tuple_to_nlattr(struct sk_buff *skb, 278static int ipv6_tuple_to_nlattr(struct sk_buff *skb,
279 const struct nf_conntrack_tuple *tuple) 279 const struct nf_conntrack_tuple *tuple)
280{ 280{
281 NLA_PUT(skb, CTA_IP_V6_SRC, sizeof(u_int32_t) * 4, 281 if (nla_put(skb, CTA_IP_V6_SRC, sizeof(u_int32_t) * 4,
282 &tuple->src.u3.ip6); 282 &tuple->src.u3.ip6) ||
283 NLA_PUT(skb, CTA_IP_V6_DST, sizeof(u_int32_t) * 4, 283 nla_put(skb, CTA_IP_V6_DST, sizeof(u_int32_t) * 4,
284 &tuple->dst.u3.ip6); 284 &tuple->dst.u3.ip6))
285 goto nla_put_failure;
285 return 0; 286 return 0;
286 287
287nla_put_failure: 288nla_put_failure:
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 92cc9f2931ae..3e81904fbbcd 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -234,10 +234,10 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl,
234static int icmpv6_tuple_to_nlattr(struct sk_buff *skb, 234static int icmpv6_tuple_to_nlattr(struct sk_buff *skb,
235 const struct nf_conntrack_tuple *t) 235 const struct nf_conntrack_tuple *t)
236{ 236{
237 NLA_PUT_BE16(skb, CTA_PROTO_ICMPV6_ID, t->src.u.icmp.id); 237 if (nla_put_be16(skb, CTA_PROTO_ICMPV6_ID, t->src.u.icmp.id) ||
238 NLA_PUT_U8(skb, CTA_PROTO_ICMPV6_TYPE, t->dst.u.icmp.type); 238 nla_put_u8(skb, CTA_PROTO_ICMPV6_TYPE, t->dst.u.icmp.type) ||
239 NLA_PUT_U8(skb, CTA_PROTO_ICMPV6_CODE, t->dst.u.icmp.code); 239 nla_put_u8(skb, CTA_PROTO_ICMPV6_CODE, t->dst.u.icmp.code))
240 240 goto nla_put_failure;
241 return 0; 241 return 0;
242 242
243nla_put_failure: 243nla_put_failure:
@@ -300,8 +300,8 @@ icmpv6_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
300{ 300{
301 const unsigned int *timeout = data; 301 const unsigned int *timeout = data;
302 302
303 NLA_PUT_BE32(skb, CTA_TIMEOUT_ICMPV6_TIMEOUT, htonl(*timeout / HZ)); 303 if (nla_put_be32(skb, CTA_TIMEOUT_ICMPV6_TIMEOUT, htonl(*timeout / HZ)))
304 304 goto nla_put_failure;
305 return 0; 305 return 0;
306 306
307nla_put_failure: 307nla_put_failure:
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 38f00b0298d3..48a2be1b7c70 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -626,8 +626,8 @@ int nf_ct_frag6_init(void)
626 inet_frags_init(&nf_frags); 626 inet_frags_init(&nf_frags);
627 627
628#ifdef CONFIG_SYSCTL 628#ifdef CONFIG_SYSCTL
629 nf_ct_frag6_sysctl_header = register_sysctl_paths(nf_net_netfilter_sysctl_path, 629 nf_ct_frag6_sysctl_header = register_net_sysctl(&init_net, "net/netfilter",
630 nf_ct_frag6_sysctl_table); 630 nf_ct_frag6_sysctl_table);
631 if (!nf_ct_frag6_sysctl_header) { 631 if (!nf_ct_frag6_sysctl_header) {
632 inet_frags_fini(&nf_frags); 632 inet_frags_fini(&nf_frags);
633 return -ENOMEM; 633 return -ENOMEM;
@@ -640,7 +640,7 @@ int nf_ct_frag6_init(void)
640void nf_ct_frag6_cleanup(void) 640void nf_ct_frag6_cleanup(void)
641{ 641{
642#ifdef CONFIG_SYSCTL 642#ifdef CONFIG_SYSCTL
643 unregister_sysctl_table(nf_ct_frag6_sysctl_header); 643 unregister_net_sysctl_table(nf_ct_frag6_sysctl_header);
644 nf_ct_frag6_sysctl_header = NULL; 644 nf_ct_frag6_sysctl_header = NULL;
645#endif 645#endif
646 inet_frags_fini(&nf_frags); 646 inet_frags_fini(&nf_frags);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 9447bd69873a..54c5d2b704df 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -433,7 +433,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
433 skb_morph(head, fq->q.fragments); 433 skb_morph(head, fq->q.fragments);
434 head->next = fq->q.fragments->next; 434 head->next = fq->q.fragments->next;
435 435
436 kfree_skb(fq->q.fragments); 436 consume_skb(fq->q.fragments);
437 fq->q.fragments = head; 437 fq->q.fragments = head;
438 } 438 }
439 439
@@ -646,7 +646,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
646 table[2].data = &net->ipv6.frags.timeout; 646 table[2].data = &net->ipv6.frags.timeout;
647 } 647 }
648 648
649 hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table); 649 hdr = register_net_sysctl(net, "net/ipv6", table);
650 if (hdr == NULL) 650 if (hdr == NULL)
651 goto err_reg; 651 goto err_reg;
652 652
@@ -674,7 +674,7 @@ static struct ctl_table_header *ip6_ctl_header;
674 674
675static int ip6_frags_sysctl_register(void) 675static int ip6_frags_sysctl_register(void)
676{ 676{
677 ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path, 677 ip6_ctl_header = register_net_sysctl(&init_net, "net/ipv6",
678 ip6_frags_ctl_table); 678 ip6_frags_ctl_table);
679 return ip6_ctl_header == NULL ? -ENOMEM : 0; 679 return ip6_ctl_header == NULL ? -ENOMEM : 0;
680} 680}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index bc4888d902b2..0aefc36f74c7 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -82,7 +82,7 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
82static struct rt6_info *rt6_add_route_info(struct net *net, 82static struct rt6_info *rt6_add_route_info(struct net *net,
83 const struct in6_addr *prefix, int prefixlen, 83 const struct in6_addr *prefix, int prefixlen,
84 const struct in6_addr *gwaddr, int ifindex, 84 const struct in6_addr *gwaddr, int ifindex,
85 unsigned pref); 85 unsigned int pref);
86static struct rt6_info *rt6_get_route_info(struct net *net, 86static struct rt6_info *rt6_get_route_info(struct net *net,
87 const struct in6_addr *prefix, int prefixlen, 87 const struct in6_addr *prefix, int prefixlen,
88 const struct in6_addr *gwaddr, int ifindex); 88 const struct in6_addr *gwaddr, int ifindex);
@@ -1887,7 +1887,7 @@ out:
1887static struct rt6_info *rt6_add_route_info(struct net *net, 1887static struct rt6_info *rt6_add_route_info(struct net *net,
1888 const struct in6_addr *prefix, int prefixlen, 1888 const struct in6_addr *prefix, int prefixlen,
1889 const struct in6_addr *gwaddr, int ifindex, 1889 const struct in6_addr *gwaddr, int ifindex,
1890 unsigned pref) 1890 unsigned int pref)
1891{ 1891{
1892 struct fib6_config cfg = { 1892 struct fib6_config cfg = {
1893 .fc_table = RT6_TABLE_INFO, 1893 .fc_table = RT6_TABLE_INFO,
@@ -2217,10 +2217,9 @@ void rt6_ifdown(struct net *net, struct net_device *dev)
2217 icmp6_clean_all(fib6_ifdown, &adn); 2217 icmp6_clean_all(fib6_ifdown, &adn);
2218} 2218}
2219 2219
2220struct rt6_mtu_change_arg 2220struct rt6_mtu_change_arg {
2221{
2222 struct net_device *dev; 2221 struct net_device *dev;
2223 unsigned mtu; 2222 unsigned int mtu;
2224}; 2223};
2225 2224
2226static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) 2225static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
@@ -2262,7 +2261,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2262 return 0; 2261 return 0;
2263} 2262}
2264 2263
2265void rt6_mtu_change(struct net_device *dev, unsigned mtu) 2264void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
2266{ 2265{
2267 struct rt6_mtu_change_arg arg = { 2266 struct rt6_mtu_change_arg arg = {
2268 .dev = dev, 2267 .dev = dev,
@@ -2430,7 +2429,8 @@ static int rt6_fill_node(struct net *net,
2430 else 2429 else
2431 table = RT6_TABLE_UNSPEC; 2430 table = RT6_TABLE_UNSPEC;
2432 rtm->rtm_table = table; 2431 rtm->rtm_table = table;
2433 NLA_PUT_U32(skb, RTA_TABLE, table); 2432 if (nla_put_u32(skb, RTA_TABLE, table))
2433 goto nla_put_failure;
2434 if (rt->rt6i_flags & RTF_REJECT) 2434 if (rt->rt6i_flags & RTF_REJECT)
2435 rtm->rtm_type = RTN_UNREACHABLE; 2435 rtm->rtm_type = RTN_UNREACHABLE;
2436 else if (rt->rt6i_flags & RTF_LOCAL) 2436 else if (rt->rt6i_flags & RTF_LOCAL)
@@ -2453,16 +2453,20 @@ static int rt6_fill_node(struct net *net,
2453 rtm->rtm_flags |= RTM_F_CLONED; 2453 rtm->rtm_flags |= RTM_F_CLONED;
2454 2454
2455 if (dst) { 2455 if (dst) {
2456 NLA_PUT(skb, RTA_DST, 16, dst); 2456 if (nla_put(skb, RTA_DST, 16, dst))
2457 goto nla_put_failure;
2457 rtm->rtm_dst_len = 128; 2458 rtm->rtm_dst_len = 128;
2458 } else if (rtm->rtm_dst_len) 2459 } else if (rtm->rtm_dst_len)
2459 NLA_PUT(skb, RTA_DST, 16, &rt->rt6i_dst.addr); 2460 if (nla_put(skb, RTA_DST, 16, &rt->rt6i_dst.addr))
2461 goto nla_put_failure;
2460#ifdef CONFIG_IPV6_SUBTREES 2462#ifdef CONFIG_IPV6_SUBTREES
2461 if (src) { 2463 if (src) {
2462 NLA_PUT(skb, RTA_SRC, 16, src); 2464 if (nla_put(skb, RTA_SRC, 16, src))
2465 goto nla_put_failure;
2463 rtm->rtm_src_len = 128; 2466 rtm->rtm_src_len = 128;
2464 } else if (rtm->rtm_src_len) 2467 } else if (rtm->rtm_src_len &&
2465 NLA_PUT(skb, RTA_SRC, 16, &rt->rt6i_src.addr); 2468 nla_put(skb, RTA_SRC, 16, &rt->rt6i_src.addr))
2469 goto nla_put_failure;
2466#endif 2470#endif
2467 if (iif) { 2471 if (iif) {
2468#ifdef CONFIG_IPV6_MROUTE 2472#ifdef CONFIG_IPV6_MROUTE
@@ -2480,17 +2484,20 @@ static int rt6_fill_node(struct net *net,
2480 } 2484 }
2481 } else 2485 } else
2482#endif 2486#endif
2483 NLA_PUT_U32(skb, RTA_IIF, iif); 2487 if (nla_put_u32(skb, RTA_IIF, iif))
2488 goto nla_put_failure;
2484 } else if (dst) { 2489 } else if (dst) {
2485 struct in6_addr saddr_buf; 2490 struct in6_addr saddr_buf;
2486 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0) 2491 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
2487 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); 2492 nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
2493 goto nla_put_failure;
2488 } 2494 }
2489 2495
2490 if (rt->rt6i_prefsrc.plen) { 2496 if (rt->rt6i_prefsrc.plen) {
2491 struct in6_addr saddr_buf; 2497 struct in6_addr saddr_buf;
2492 saddr_buf = rt->rt6i_prefsrc.addr; 2498 saddr_buf = rt->rt6i_prefsrc.addr;
2493 NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf); 2499 if (nla_put(skb, RTA_PREFSRC, 16, &saddr_buf))
2500 goto nla_put_failure;
2494 } 2501 }
2495 2502
2496 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) 2503 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
@@ -2506,11 +2513,11 @@ static int rt6_fill_node(struct net *net,
2506 } 2513 }
2507 rcu_read_unlock(); 2514 rcu_read_unlock();
2508 2515
2509 if (rt->dst.dev) 2516 if (rt->dst.dev &&
2510 NLA_PUT_U32(skb, RTA_OIF, rt->dst.dev->ifindex); 2517 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2511 2518 goto nla_put_failure;
2512 NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric); 2519 if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
2513 2520 goto nla_put_failure;
2514 if (!(rt->rt6i_flags & RTF_EXPIRES)) 2521 if (!(rt->rt6i_flags & RTF_EXPIRES))
2515 expires = 0; 2522 expires = 0;
2516 else if (rt->dst.expires - jiffies < INT_MAX) 2523 else if (rt->dst.expires - jiffies < INT_MAX)
@@ -2615,6 +2622,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
2615 2622
2616 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 2623 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2617 if (!skb) { 2624 if (!skb) {
2625 dst_release(&rt->dst);
2618 err = -ENOBUFS; 2626 err = -ENOBUFS;
2619 goto errout; 2627 goto errout;
2620 } 2628 }
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index c4ffd1743528..e5fef943e30a 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -87,35 +87,51 @@ struct sit_net {
87 87
88/* often modified stats are per cpu, other are shared (netdev->stats) */ 88/* often modified stats are per cpu, other are shared (netdev->stats) */
89struct pcpu_tstats { 89struct pcpu_tstats {
90 unsigned long rx_packets; 90 u64 rx_packets;
91 unsigned long rx_bytes; 91 u64 rx_bytes;
92 unsigned long tx_packets; 92 u64 tx_packets;
93 unsigned long tx_bytes; 93 u64 tx_bytes;
94} __attribute__((aligned(4*sizeof(unsigned long)))); 94 struct u64_stats_sync syncp;
95};
95 96
96static struct net_device_stats *ipip6_get_stats(struct net_device *dev) 97static struct rtnl_link_stats64 *ipip6_get_stats64(struct net_device *dev,
98 struct rtnl_link_stats64 *tot)
97{ 99{
98 struct pcpu_tstats sum = { 0 };
99 int i; 100 int i;
100 101
101 for_each_possible_cpu(i) { 102 for_each_possible_cpu(i) {
102 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i); 103 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
103 104 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
104 sum.rx_packets += tstats->rx_packets; 105 unsigned int start;
105 sum.rx_bytes += tstats->rx_bytes; 106
106 sum.tx_packets += tstats->tx_packets; 107 do {
107 sum.tx_bytes += tstats->tx_bytes; 108 start = u64_stats_fetch_begin_bh(&tstats->syncp);
109 rx_packets = tstats->rx_packets;
110 tx_packets = tstats->tx_packets;
111 rx_bytes = tstats->rx_bytes;
112 tx_bytes = tstats->tx_bytes;
113 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
114
115 tot->rx_packets += rx_packets;
116 tot->tx_packets += tx_packets;
117 tot->rx_bytes += rx_bytes;
118 tot->tx_bytes += tx_bytes;
108 } 119 }
109 dev->stats.rx_packets = sum.rx_packets; 120
110 dev->stats.rx_bytes = sum.rx_bytes; 121 tot->rx_errors = dev->stats.rx_errors;
111 dev->stats.tx_packets = sum.tx_packets; 122 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
112 dev->stats.tx_bytes = sum.tx_bytes; 123 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
113 return &dev->stats; 124 tot->tx_dropped = dev->stats.tx_dropped;
125 tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
126 tot->tx_errors = dev->stats.tx_errors;
127
128 return tot;
114} 129}
130
115/* 131/*
116 * Must be invoked with rcu_read_lock 132 * Must be invoked with rcu_read_lock
117 */ 133 */
118static struct ip_tunnel * ipip6_tunnel_lookup(struct net *net, 134static struct ip_tunnel *ipip6_tunnel_lookup(struct net *net,
119 struct net_device *dev, __be32 remote, __be32 local) 135 struct net_device *dev, __be32 remote, __be32 local)
120{ 136{
121 unsigned int h0 = HASH(remote); 137 unsigned int h0 = HASH(remote);
@@ -691,7 +707,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
691 goto tx_error; 707 goto tx_error;
692 } 708 }
693 709
694 addr6 = (const struct in6_addr*)&neigh->primary_key; 710 addr6 = (const struct in6_addr *)&neigh->primary_key;
695 addr_type = ipv6_addr_type(addr6); 711 addr_type = ipv6_addr_type(addr6);
696 712
697 if ((addr_type & IPV6_ADDR_UNICAST) && 713 if ((addr_type & IPV6_ADDR_UNICAST) &&
@@ -721,7 +737,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
721 goto tx_error; 737 goto tx_error;
722 } 738 }
723 739
724 addr6 = (const struct in6_addr*)&neigh->primary_key; 740 addr6 = (const struct in6_addr *)&neigh->primary_key;
725 addr_type = ipv6_addr_type(addr6); 741 addr_type = ipv6_addr_type(addr6);
726 742
727 if (addr_type == IPV6_ADDR_ANY) { 743 if (addr_type == IPV6_ADDR_ANY) {
@@ -1126,7 +1142,7 @@ static const struct net_device_ops ipip6_netdev_ops = {
1126 .ndo_start_xmit = ipip6_tunnel_xmit, 1142 .ndo_start_xmit = ipip6_tunnel_xmit,
1127 .ndo_do_ioctl = ipip6_tunnel_ioctl, 1143 .ndo_do_ioctl = ipip6_tunnel_ioctl,
1128 .ndo_change_mtu = ipip6_tunnel_change_mtu, 1144 .ndo_change_mtu = ipip6_tunnel_change_mtu,
1129 .ndo_get_stats = ipip6_get_stats, 1145 .ndo_get_stats64= ipip6_get_stats64,
1130}; 1146};
1131 1147
1132static void ipip6_dev_free(struct net_device *dev) 1148static void ipip6_dev_free(struct net_device *dev)
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 166a57c47d39..e85c48bd404f 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -16,32 +16,8 @@
16#include <net/addrconf.h> 16#include <net/addrconf.h>
17#include <net/inet_frag.h> 17#include <net/inet_frag.h>
18 18
19static struct ctl_table empty[1];
20
21static ctl_table ipv6_static_skeleton[] = {
22 {
23 .procname = "neigh",
24 .maxlen = 0,
25 .mode = 0555,
26 .child = empty,
27 },
28 { }
29};
30
31static ctl_table ipv6_table_template[] = { 19static ctl_table ipv6_table_template[] = {
32 { 20 {
33 .procname = "route",
34 .maxlen = 0,
35 .mode = 0555,
36 .child = ipv6_route_table_template
37 },
38 {
39 .procname = "icmp",
40 .maxlen = 0,
41 .mode = 0555,
42 .child = ipv6_icmp_table_template
43 },
44 {
45 .procname = "bindv6only", 21 .procname = "bindv6only",
46 .data = &init_net.ipv6.sysctl.bindv6only, 22 .data = &init_net.ipv6.sysctl.bindv6only,
47 .maxlen = sizeof(int), 23 .maxlen = sizeof(int),
@@ -62,13 +38,6 @@ static ctl_table ipv6_rotable[] = {
62 { } 38 { }
63}; 39};
64 40
65struct ctl_path net_ipv6_ctl_path[] = {
66 { .procname = "net", },
67 { .procname = "ipv6", },
68 { },
69};
70EXPORT_SYMBOL_GPL(net_ipv6_ctl_path);
71
72static int __net_init ipv6_sysctl_net_init(struct net *net) 41static int __net_init ipv6_sysctl_net_init(struct net *net)
73{ 42{
74 struct ctl_table *ipv6_table; 43 struct ctl_table *ipv6_table;
@@ -81,28 +50,37 @@ static int __net_init ipv6_sysctl_net_init(struct net *net)
81 GFP_KERNEL); 50 GFP_KERNEL);
82 if (!ipv6_table) 51 if (!ipv6_table)
83 goto out; 52 goto out;
53 ipv6_table[0].data = &net->ipv6.sysctl.bindv6only;
84 54
85 ipv6_route_table = ipv6_route_sysctl_init(net); 55 ipv6_route_table = ipv6_route_sysctl_init(net);
86 if (!ipv6_route_table) 56 if (!ipv6_route_table)
87 goto out_ipv6_table; 57 goto out_ipv6_table;
88 ipv6_table[0].child = ipv6_route_table;
89 58
90 ipv6_icmp_table = ipv6_icmp_sysctl_init(net); 59 ipv6_icmp_table = ipv6_icmp_sysctl_init(net);
91 if (!ipv6_icmp_table) 60 if (!ipv6_icmp_table)
92 goto out_ipv6_route_table; 61 goto out_ipv6_route_table;
93 ipv6_table[1].child = ipv6_icmp_table;
94 62
95 ipv6_table[2].data = &net->ipv6.sysctl.bindv6only; 63 net->ipv6.sysctl.hdr = register_net_sysctl(net, "net/ipv6", ipv6_table);
96 64 if (!net->ipv6.sysctl.hdr)
97 net->ipv6.sysctl.table = register_net_sysctl_table(net, net_ipv6_ctl_path,
98 ipv6_table);
99 if (!net->ipv6.sysctl.table)
100 goto out_ipv6_icmp_table; 65 goto out_ipv6_icmp_table;
101 66
67 net->ipv6.sysctl.route_hdr =
68 register_net_sysctl(net, "net/ipv6/route", ipv6_route_table);
69 if (!net->ipv6.sysctl.route_hdr)
70 goto out_unregister_ipv6_table;
71
72 net->ipv6.sysctl.icmp_hdr =
73 register_net_sysctl(net, "net/ipv6/icmp", ipv6_icmp_table);
74 if (!net->ipv6.sysctl.icmp_hdr)
75 goto out_unregister_route_table;
76
102 err = 0; 77 err = 0;
103out: 78out:
104 return err; 79 return err;
105 80out_unregister_route_table:
81 unregister_net_sysctl_table(net->ipv6.sysctl.route_hdr);
82out_unregister_ipv6_table:
83 unregister_net_sysctl_table(net->ipv6.sysctl.hdr);
106out_ipv6_icmp_table: 84out_ipv6_icmp_table:
107 kfree(ipv6_icmp_table); 85 kfree(ipv6_icmp_table);
108out_ipv6_route_table: 86out_ipv6_route_table:
@@ -118,11 +96,13 @@ static void __net_exit ipv6_sysctl_net_exit(struct net *net)
118 struct ctl_table *ipv6_route_table; 96 struct ctl_table *ipv6_route_table;
119 struct ctl_table *ipv6_icmp_table; 97 struct ctl_table *ipv6_icmp_table;
120 98
121 ipv6_table = net->ipv6.sysctl.table->ctl_table_arg; 99 ipv6_table = net->ipv6.sysctl.hdr->ctl_table_arg;
122 ipv6_route_table = ipv6_table[0].child; 100 ipv6_route_table = net->ipv6.sysctl.route_hdr->ctl_table_arg;
123 ipv6_icmp_table = ipv6_table[1].child; 101 ipv6_icmp_table = net->ipv6.sysctl.icmp_hdr->ctl_table_arg;
124 102
125 unregister_net_sysctl_table(net->ipv6.sysctl.table); 103 unregister_net_sysctl_table(net->ipv6.sysctl.icmp_hdr);
104 unregister_net_sysctl_table(net->ipv6.sysctl.route_hdr);
105 unregister_net_sysctl_table(net->ipv6.sysctl.hdr);
126 106
127 kfree(ipv6_table); 107 kfree(ipv6_table);
128 kfree(ipv6_route_table); 108 kfree(ipv6_route_table);
@@ -140,7 +120,7 @@ int ipv6_sysctl_register(void)
140{ 120{
141 int err = -ENOMEM; 121 int err = -ENOMEM;
142 122
143 ip6_header = register_net_sysctl_rotable(net_ipv6_ctl_path, ipv6_rotable); 123 ip6_header = register_net_sysctl(&init_net, "net/ipv6", ipv6_rotable);
144 if (ip6_header == NULL) 124 if (ip6_header == NULL)
145 goto out; 125 goto out;
146 126
@@ -160,18 +140,3 @@ void ipv6_sysctl_unregister(void)
160 unregister_net_sysctl_table(ip6_header); 140 unregister_net_sysctl_table(ip6_header);
161 unregister_pernet_subsys(&ipv6_sysctl_net_ops); 141 unregister_pernet_subsys(&ipv6_sysctl_net_ops);
162} 142}
163
164static struct ctl_table_header *ip6_base;
165
166int ipv6_static_sysctl_register(void)
167{
168 ip6_base = register_sysctl_paths(net_ipv6_ctl_path, ipv6_static_skeleton);
169 if (ip6_base == NULL)
170 return -ENOMEM;
171 return 0;
172}
173
174void ipv6_static_sysctl_unregister(void)
175{
176 unregister_net_sysctl_table(ip6_base);
177}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 98256cf72f9d..078d039e8fd2 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1140,7 +1140,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1140 treq->rmt_addr = ipv6_hdr(skb)->saddr; 1140 treq->rmt_addr = ipv6_hdr(skb)->saddr;
1141 treq->loc_addr = ipv6_hdr(skb)->daddr; 1141 treq->loc_addr = ipv6_hdr(skb)->daddr;
1142 if (!want_cookie || tmp_opt.tstamp_ok) 1142 if (!want_cookie || tmp_opt.tstamp_ok)
1143 TCP_ECN_create_request(req, tcp_hdr(skb)); 1143 TCP_ECN_create_request(req, skb);
1144 1144
1145 treq->iif = sk->sk_bound_dev_if; 1145 treq->iif = sk->sk_bound_dev_if;
1146 1146
@@ -1353,7 +1353,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1353 newnp->pktoptions = NULL; 1353 newnp->pktoptions = NULL;
1354 if (treq->pktopts != NULL) { 1354 if (treq->pktopts != NULL) {
1355 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC); 1355 newnp->pktoptions = skb_clone(treq->pktopts, GFP_ATOMIC);
1356 kfree_skb(treq->pktopts); 1356 consume_skb(treq->pktopts);
1357 treq->pktopts = NULL; 1357 treq->pktopts = NULL;
1358 if (newnp->pktoptions) 1358 if (newnp->pktoptions)
1359 skb_set_owner_r(newnp->pktoptions, newsk); 1359 skb_set_owner_r(newnp->pktoptions, newsk);
@@ -1658,7 +1658,8 @@ process:
1658 if (!tcp_prequeue(sk, skb)) 1658 if (!tcp_prequeue(sk, skb))
1659 ret = tcp_v6_do_rcv(sk, skb); 1659 ret = tcp_v6_do_rcv(sk, skb);
1660 } 1660 }
1661 } else if (unlikely(sk_add_backlog(sk, skb))) { 1661 } else if (unlikely(sk_add_backlog(sk, skb,
1662 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1662 bh_unlock_sock(sk); 1663 bh_unlock_sock(sk);
1663 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); 1664 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1664 goto discard_and_relse; 1665 goto discard_and_relse;
@@ -1777,6 +1778,7 @@ static const struct inet_connection_sock_af_ops ipv6_specific = {
1777 .syn_recv_sock = tcp_v6_syn_recv_sock, 1778 .syn_recv_sock = tcp_v6_syn_recv_sock,
1778 .get_peer = tcp_v6_get_peer, 1779 .get_peer = tcp_v6_get_peer,
1779 .net_header_len = sizeof(struct ipv6hdr), 1780 .net_header_len = sizeof(struct ipv6hdr),
1781 .net_frag_header_len = sizeof(struct frag_hdr),
1780 .setsockopt = ipv6_setsockopt, 1782 .setsockopt = ipv6_setsockopt,
1781 .getsockopt = ipv6_getsockopt, 1783 .getsockopt = ipv6_getsockopt,
1782 .addr2sockaddr = inet6_csk_addr2sockaddr, 1784 .addr2sockaddr = inet6_csk_addr2sockaddr,
@@ -1833,64 +1835,15 @@ static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
1833static int tcp_v6_init_sock(struct sock *sk) 1835static int tcp_v6_init_sock(struct sock *sk)
1834{ 1836{
1835 struct inet_connection_sock *icsk = inet_csk(sk); 1837 struct inet_connection_sock *icsk = inet_csk(sk);
1836 struct tcp_sock *tp = tcp_sk(sk);
1837
1838 skb_queue_head_init(&tp->out_of_order_queue);
1839 tcp_init_xmit_timers(sk);
1840 tcp_prequeue_init(tp);
1841
1842 icsk->icsk_rto = TCP_TIMEOUT_INIT;
1843 tp->mdev = TCP_TIMEOUT_INIT;
1844
1845 /* So many TCP implementations out there (incorrectly) count the
1846 * initial SYN frame in their delayed-ACK and congestion control
1847 * algorithms that we must have the following bandaid to talk
1848 * efficiently to them. -DaveM
1849 */
1850 tp->snd_cwnd = 2;
1851
1852 /* See draft-stevens-tcpca-spec-01 for discussion of the
1853 * initialization of these values.
1854 */
1855 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
1856 tp->snd_cwnd_clamp = ~0;
1857 tp->mss_cache = TCP_MSS_DEFAULT;
1858
1859 tp->reordering = sysctl_tcp_reordering;
1860 1838
1861 sk->sk_state = TCP_CLOSE; 1839 tcp_init_sock(sk);
1862 1840
1863 icsk->icsk_af_ops = &ipv6_specific; 1841 icsk->icsk_af_ops = &ipv6_specific;
1864 icsk->icsk_ca_ops = &tcp_init_congestion_ops;
1865 icsk->icsk_sync_mss = tcp_sync_mss;
1866 sk->sk_write_space = sk_stream_write_space;
1867 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
1868 1842
1869#ifdef CONFIG_TCP_MD5SIG 1843#ifdef CONFIG_TCP_MD5SIG
1870 tp->af_specific = &tcp_sock_ipv6_specific; 1844 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
1871#endif 1845#endif
1872 1846
1873 /* TCP Cookie Transactions */
1874 if (sysctl_tcp_cookie_size > 0) {
1875 /* Default, cookies without s_data_payload. */
1876 tp->cookie_values =
1877 kzalloc(sizeof(*tp->cookie_values),
1878 sk->sk_allocation);
1879 if (tp->cookie_values != NULL)
1880 kref_init(&tp->cookie_values->kref);
1881 }
1882 /* Presumed zeroed, in order of appearance:
1883 * cookie_in_always, cookie_out_never,
1884 * s_data_constant, s_data_in, s_data_out
1885 */
1886 sk->sk_sndbuf = sysctl_tcp_wmem[1];
1887 sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1888
1889 local_bh_disable();
1890 sock_update_memcg(sk);
1891 sk_sockets_allocated_inc(sk);
1892 local_bh_enable();
1893
1894 return 0; 1847 return 0;
1895} 1848}
1896 1849
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 37b0699e95e5..c1d91a713e8e 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -496,6 +496,28 @@ out:
496 sock_put(sk); 496 sock_put(sk);
497} 497}
498 498
499static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
500{
501 int rc;
502
503 if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
504 sock_rps_save_rxhash(sk, skb);
505
506 rc = sock_queue_rcv_skb(sk, skb);
507 if (rc < 0) {
508 int is_udplite = IS_UDPLITE(sk);
509
510 /* Note that an ENOMEM error is charged twice */
511 if (rc == -ENOMEM)
512 UDP6_INC_STATS_BH(sock_net(sk),
513 UDP_MIB_RCVBUFERRORS, is_udplite);
514 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
515 kfree_skb(skb);
516 return -1;
517 }
518 return 0;
519}
520
499static __inline__ void udpv6_err(struct sk_buff *skb, 521static __inline__ void udpv6_err(struct sk_buff *skb,
500 struct inet6_skb_parm *opt, u8 type, 522 struct inet6_skb_parm *opt, u8 type,
501 u8 code, int offset, __be32 info ) 523 u8 code, int offset, __be32 info )
@@ -503,18 +525,54 @@ static __inline__ void udpv6_err(struct sk_buff *skb,
503 __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table); 525 __udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
504} 526}
505 527
506int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) 528static struct static_key udpv6_encap_needed __read_mostly;
529void udpv6_encap_enable(void)
530{
531 if (!static_key_enabled(&udpv6_encap_needed))
532 static_key_slow_inc(&udpv6_encap_needed);
533}
534EXPORT_SYMBOL(udpv6_encap_enable);
535
536int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
507{ 537{
508 struct udp_sock *up = udp_sk(sk); 538 struct udp_sock *up = udp_sk(sk);
509 int rc; 539 int rc;
510 int is_udplite = IS_UDPLITE(sk); 540 int is_udplite = IS_UDPLITE(sk);
511 541
512 if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
513 sock_rps_save_rxhash(sk, skb);
514
515 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) 542 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
516 goto drop; 543 goto drop;
517 544
545 if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
546 int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
547
548 /*
549 * This is an encapsulation socket so pass the skb to
550 * the socket's udp_encap_rcv() hook. Otherwise, just
551 * fall through and pass this up the UDP socket.
552 * up->encap_rcv() returns the following value:
553 * =0 if skb was successfully passed to the encap
554 * handler or was discarded by it.
555 * >0 if skb should be passed on to UDP.
556 * <0 if skb should be resubmitted as proto -N
557 */
558
559 /* if we're overly short, let UDP handle it */
560 encap_rcv = ACCESS_ONCE(up->encap_rcv);
561 if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
562 int ret;
563
564 ret = encap_rcv(sk, skb);
565 if (ret <= 0) {
566 UDP_INC_STATS_BH(sock_net(sk),
567 UDP_MIB_INDATAGRAMS,
568 is_udplite);
569 return -ret;
570 }
571 }
572
573 /* FALLTHROUGH -- it's a UDP Packet */
574 }
575
518 /* 576 /*
519 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c). 577 * UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
520 */ 578 */
@@ -539,21 +597,25 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
539 goto drop; 597 goto drop;
540 } 598 }
541 599
600 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
601 goto drop;
602
542 skb_dst_drop(skb); 603 skb_dst_drop(skb);
543 rc = sock_queue_rcv_skb(sk, skb); 604
544 if (rc < 0) { 605 bh_lock_sock(sk);
545 /* Note that an ENOMEM error is charged twice */ 606 rc = 0;
546 if (rc == -ENOMEM) 607 if (!sock_owned_by_user(sk))
547 UDP6_INC_STATS_BH(sock_net(sk), 608 rc = __udpv6_queue_rcv_skb(sk, skb);
548 UDP_MIB_RCVBUFERRORS, is_udplite); 609 else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
549 goto drop_no_sk_drops_inc; 610 bh_unlock_sock(sk);
611 goto drop;
550 } 612 }
613 bh_unlock_sock(sk);
551 614
552 return 0; 615 return rc;
553drop: 616drop:
554 atomic_inc(&sk->sk_drops);
555drop_no_sk_drops_inc:
556 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); 617 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
618 atomic_inc(&sk->sk_drops);
557 kfree_skb(skb); 619 kfree_skb(skb);
558 return -1; 620 return -1;
559} 621}
@@ -602,37 +664,27 @@ static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
602static void flush_stack(struct sock **stack, unsigned int count, 664static void flush_stack(struct sock **stack, unsigned int count,
603 struct sk_buff *skb, unsigned int final) 665 struct sk_buff *skb, unsigned int final)
604{ 666{
605 unsigned int i; 667 struct sk_buff *skb1 = NULL;
606 struct sock *sk; 668 struct sock *sk;
607 struct sk_buff *skb1; 669 unsigned int i;
608 670
609 for (i = 0; i < count; i++) { 671 for (i = 0; i < count; i++) {
610 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
611
612 sk = stack[i]; 672 sk = stack[i];
613 if (skb1) { 673 if (likely(skb1 == NULL))
614 if (sk_rcvqueues_full(sk, skb1)) { 674 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
615 kfree_skb(skb1); 675 if (!skb1) {
616 goto drop; 676 atomic_inc(&sk->sk_drops);
617 } 677 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
618 bh_lock_sock(sk); 678 IS_UDPLITE(sk));
619 if (!sock_owned_by_user(sk)) 679 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
620 udpv6_queue_rcv_skb(sk, skb1); 680 IS_UDPLITE(sk));
621 else if (sk_add_backlog(sk, skb1)) {
622 kfree_skb(skb1);
623 bh_unlock_sock(sk);
624 goto drop;
625 }
626 bh_unlock_sock(sk);
627 continue;
628 } 681 }
629drop: 682
630 atomic_inc(&sk->sk_drops); 683 if (skb1 && udpv6_queue_rcv_skb(sk, skb1) <= 0)
631 UDP6_INC_STATS_BH(sock_net(sk), 684 skb1 = NULL;
632 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
633 UDP6_INC_STATS_BH(sock_net(sk),
634 UDP_MIB_INERRORS, IS_UDPLITE(sk));
635 } 685 }
686 if (unlikely(skb1))
687 kfree_skb(skb1);
636} 688}
637/* 689/*
638 * Note: called only from the BH handler context, 690 * Note: called only from the BH handler context,
@@ -772,39 +824,29 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
772 * for sock caches... i'll skip this for now. 824 * for sock caches... i'll skip this for now.
773 */ 825 */
774 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); 826 sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
827 if (sk != NULL) {
828 int ret = udpv6_queue_rcv_skb(sk, skb);
829 sock_put(sk);
775 830
776 if (sk == NULL) { 831 /* a return value > 0 means to resubmit the input, but
777 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) 832 * it wants the return to be -protocol, or 0
778 goto discard; 833 */
779 834 if (ret > 0)
780 if (udp_lib_checksum_complete(skb)) 835 return -ret;
781 goto discard;
782 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
783 proto == IPPROTO_UDPLITE);
784
785 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
786 836
787 kfree_skb(skb);
788 return 0; 837 return 0;
789 } 838 }
790 839
791 /* deliver */ 840 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
792
793 if (sk_rcvqueues_full(sk, skb)) {
794 sock_put(sk);
795 goto discard; 841 goto discard;
796 } 842
797 bh_lock_sock(sk); 843 if (udp_lib_checksum_complete(skb))
798 if (!sock_owned_by_user(sk))
799 udpv6_queue_rcv_skb(sk, skb);
800 else if (sk_add_backlog(sk, skb)) {
801 atomic_inc(&sk->sk_drops);
802 bh_unlock_sock(sk);
803 sock_put(sk);
804 goto discard; 844 goto discard;
805 } 845
806 bh_unlock_sock(sk); 846 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
807 sock_put(sk); 847 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
848
849 kfree_skb(skb);
808 return 0; 850 return 0;
809 851
810short_packet: 852short_packet:
@@ -1471,7 +1513,7 @@ struct proto udpv6_prot = {
1471 .getsockopt = udpv6_getsockopt, 1513 .getsockopt = udpv6_getsockopt,
1472 .sendmsg = udpv6_sendmsg, 1514 .sendmsg = udpv6_sendmsg,
1473 .recvmsg = udpv6_recvmsg, 1515 .recvmsg = udpv6_recvmsg,
1474 .backlog_rcv = udpv6_queue_rcv_skb, 1516 .backlog_rcv = __udpv6_queue_rcv_skb,
1475 .hash = udp_lib_hash, 1517 .hash = udp_lib_hash,
1476 .unhash = udp_lib_unhash, 1518 .unhash = udp_lib_unhash,
1477 .rehash = udp_v6_rehash, 1519 .rehash = udp_v6_rehash,
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 8ea65e032733..8625fba96db9 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -334,8 +334,8 @@ int __init xfrm6_init(void)
334 goto out_policy; 334 goto out_policy;
335 335
336#ifdef CONFIG_SYSCTL 336#ifdef CONFIG_SYSCTL
337 sysctl_hdr = register_net_sysctl_table(&init_net, net_ipv6_ctl_path, 337 sysctl_hdr = register_net_sysctl(&init_net, "net/ipv6",
338 xfrm6_policy_table); 338 xfrm6_policy_table);
339#endif 339#endif
340out: 340out:
341 return ret; 341 return ret;
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 4fe1db12d2a3..ee5a7065aacc 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -68,9 +68,9 @@ static DEFINE_SPINLOCK(xfrm6_tunnel_spi_lock);
68 68
69static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly; 69static struct kmem_cache *xfrm6_tunnel_spi_kmem __read_mostly;
70 70
71static inline unsigned xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr) 71static inline unsigned int xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr)
72{ 72{
73 unsigned h; 73 unsigned int h;
74 74
75 h = (__force u32)(addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]); 75 h = (__force u32)(addr->a6[0] ^ addr->a6[1] ^ addr->a6[2] ^ addr->a6[3]);
76 h ^= h >> 16; 76 h ^= h >> 16;
@@ -80,7 +80,7 @@ static inline unsigned xfrm6_tunnel_spi_hash_byaddr(const xfrm_address_t *addr)
80 return h; 80 return h;
81} 81}
82 82
83static inline unsigned xfrm6_tunnel_spi_hash_byspi(u32 spi) 83static inline unsigned int xfrm6_tunnel_spi_hash_byspi(u32 spi)
84{ 84{
85 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE; 85 return spi % XFRM6_TUNNEL_SPI_BYSPI_HSIZE;
86} 86}
diff --git a/net/ipx/sysctl_net_ipx.c b/net/ipx/sysctl_net_ipx.c
index bd6dca00fb85..ad7c03dedaab 100644
--- a/net/ipx/sysctl_net_ipx.c
+++ b/net/ipx/sysctl_net_ipx.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/mm.h> 9#include <linux/mm.h>
10#include <linux/sysctl.h> 10#include <linux/sysctl.h>
11#include <net/net_namespace.h>
11 12
12#ifndef CONFIG_SYSCTL 13#ifndef CONFIG_SYSCTL
13#error This file should not be compiled without CONFIG_SYSCTL defined 14#error This file should not be compiled without CONFIG_SYSCTL defined
@@ -27,20 +28,14 @@ static struct ctl_table ipx_table[] = {
27 { }, 28 { },
28}; 29};
29 30
30static struct ctl_path ipx_path[] = {
31 { .procname = "net", },
32 { .procname = "ipx", },
33 { }
34};
35
36static struct ctl_table_header *ipx_table_header; 31static struct ctl_table_header *ipx_table_header;
37 32
38void ipx_register_sysctl(void) 33void ipx_register_sysctl(void)
39{ 34{
40 ipx_table_header = register_sysctl_paths(ipx_path, ipx_table); 35 ipx_table_header = register_net_sysctl(&init_net, "net/ipx", ipx_table);
41} 36}
42 37
43void ipx_unregister_sysctl(void) 38void ipx_unregister_sysctl(void)
44{ 39{
45 unregister_sysctl_table(ipx_table_header); 40 unregister_net_sysctl_table(ipx_table_header);
46} 41}
diff --git a/net/irda/ircomm/ircomm_tty_ioctl.c b/net/irda/ircomm/ircomm_tty_ioctl.c
index 77c5e6499f8f..d0667d68351d 100644
--- a/net/irda/ircomm/ircomm_tty_ioctl.c
+++ b/net/irda/ircomm/ircomm_tty_ioctl.c
@@ -54,7 +54,7 @@
54 */ 54 */
55static void ircomm_tty_change_speed(struct ircomm_tty_cb *self) 55static void ircomm_tty_change_speed(struct ircomm_tty_cb *self)
56{ 56{
57 unsigned cflag, cval; 57 unsigned int cflag, cval;
58 int baud; 58 int baud;
59 59
60 IRDA_DEBUG(2, "%s()\n", __func__ ); 60 IRDA_DEBUG(2, "%s()\n", __func__ );
diff --git a/net/irda/irsysctl.c b/net/irda/irsysctl.c
index 2615ffc8e785..de73f6496db5 100644
--- a/net/irda/irsysctl.c
+++ b/net/irda/irsysctl.c
@@ -235,12 +235,6 @@ static ctl_table irda_table[] = {
235 { } 235 { }
236}; 236};
237 237
238static struct ctl_path irda_path[] = {
239 { .procname = "net", },
240 { .procname = "irda", },
241 { }
242};
243
244static struct ctl_table_header *irda_table_header; 238static struct ctl_table_header *irda_table_header;
245 239
246/* 240/*
@@ -251,7 +245,7 @@ static struct ctl_table_header *irda_table_header;
251 */ 245 */
252int __init irda_sysctl_register(void) 246int __init irda_sysctl_register(void)
253{ 247{
254 irda_table_header = register_sysctl_paths(irda_path, irda_table); 248 irda_table_header = register_net_sysctl(&init_net, "net/irda", irda_table);
255 if (!irda_table_header) 249 if (!irda_table_header)
256 return -ENOMEM; 250 return -ENOMEM;
257 251
@@ -266,7 +260,7 @@ int __init irda_sysctl_register(void)
266 */ 260 */
267void irda_sysctl_unregister(void) 261void irda_sysctl_unregister(void)
268{ 262{
269 unregister_sysctl_table(irda_table_header); 263 unregister_net_sysctl_table(irda_table_header);
270} 264}
271 265
272 266
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 7e5d927b576f..34e418508a67 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1714,7 +1714,7 @@ static int key_notify_sa_flush(const struct km_event *c)
1714static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs) 1714static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
1715{ 1715{
1716 struct net *net = sock_net(sk); 1716 struct net *net = sock_net(sk);
1717 unsigned proto; 1717 unsigned int proto;
1718 struct km_event c; 1718 struct km_event c;
1719 struct xfrm_audit audit_info; 1719 struct xfrm_audit audit_info;
1720 int err, err2; 1720 int err, err2;
@@ -3547,7 +3547,7 @@ static int pfkey_sendmsg(struct kiocb *kiocb,
3547 goto out; 3547 goto out;
3548 3548
3549 err = -EMSGSIZE; 3549 err = -EMSGSIZE;
3550 if ((unsigned)len > sk->sk_sndbuf - 32) 3550 if ((unsigned int)len > sk->sk_sndbuf - 32)
3551 goto out; 3551 goto out;
3552 3552
3553 err = -ENOBUFS; 3553 err = -ENOBUFS;
diff --git a/net/l2tp/Makefile b/net/l2tp/Makefile
index 110e7bc2de5e..2870f41ea44d 100644
--- a/net/l2tp/Makefile
+++ b/net/l2tp/Makefile
@@ -10,3 +10,6 @@ obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_IP)) += l2tp_ip.o
10obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_V3)) += l2tp_netlink.o 10obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_V3)) += l2tp_netlink.o
11obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_ETH)) += l2tp_eth.o 11obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_ETH)) += l2tp_eth.o
12obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_DEBUGFS)) += l2tp_debugfs.o 12obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_DEBUGFS)) += l2tp_debugfs.o
13ifneq ($(CONFIG_IPV6),)
14obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_IP)) += l2tp_ip6.o
15endif
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 89ff8c67943e..456b52d8f6d8 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -53,6 +53,10 @@
53#include <net/inet_common.h> 53#include <net/inet_common.h>
54#include <net/xfrm.h> 54#include <net/xfrm.h>
55#include <net/protocol.h> 55#include <net/protocol.h>
56#include <net/inet6_connection_sock.h>
57#include <net/inet_ecn.h>
58#include <net/ip6_route.h>
59#include <net/ip6_checksum.h>
56 60
57#include <asm/byteorder.h> 61#include <asm/byteorder.h>
58#include <linux/atomic.h> 62#include <linux/atomic.h>
@@ -326,8 +330,10 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk
326 struct sk_buff *skbp; 330 struct sk_buff *skbp;
327 struct sk_buff *tmp; 331 struct sk_buff *tmp;
328 u32 ns = L2TP_SKB_CB(skb)->ns; 332 u32 ns = L2TP_SKB_CB(skb)->ns;
333 struct l2tp_stats *sstats;
329 334
330 spin_lock_bh(&session->reorder_q.lock); 335 spin_lock_bh(&session->reorder_q.lock);
336 sstats = &session->stats;
331 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) { 337 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
332 if (L2TP_SKB_CB(skbp)->ns > ns) { 338 if (L2TP_SKB_CB(skbp)->ns > ns) {
333 __skb_queue_before(&session->reorder_q, skbp, skb); 339 __skb_queue_before(&session->reorder_q, skbp, skb);
@@ -335,7 +341,9 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk
335 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n", 341 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
336 session->name, ns, L2TP_SKB_CB(skbp)->ns, 342 session->name, ns, L2TP_SKB_CB(skbp)->ns,
337 skb_queue_len(&session->reorder_q)); 343 skb_queue_len(&session->reorder_q));
338 session->stats.rx_oos_packets++; 344 u64_stats_update_begin(&sstats->syncp);
345 sstats->rx_oos_packets++;
346 u64_stats_update_end(&sstats->syncp);
339 goto out; 347 goto out;
340 } 348 }
341 } 349 }
@@ -352,16 +360,23 @@ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *
352{ 360{
353 struct l2tp_tunnel *tunnel = session->tunnel; 361 struct l2tp_tunnel *tunnel = session->tunnel;
354 int length = L2TP_SKB_CB(skb)->length; 362 int length = L2TP_SKB_CB(skb)->length;
363 struct l2tp_stats *tstats, *sstats;
355 364
356 /* We're about to requeue the skb, so return resources 365 /* We're about to requeue the skb, so return resources
357 * to its current owner (a socket receive buffer). 366 * to its current owner (a socket receive buffer).
358 */ 367 */
359 skb_orphan(skb); 368 skb_orphan(skb);
360 369
361 tunnel->stats.rx_packets++; 370 tstats = &tunnel->stats;
362 tunnel->stats.rx_bytes += length; 371 u64_stats_update_begin(&tstats->syncp);
363 session->stats.rx_packets++; 372 sstats = &session->stats;
364 session->stats.rx_bytes += length; 373 u64_stats_update_begin(&sstats->syncp);
374 tstats->rx_packets++;
375 tstats->rx_bytes += length;
376 sstats->rx_packets++;
377 sstats->rx_bytes += length;
378 u64_stats_update_end(&tstats->syncp);
379 u64_stats_update_end(&sstats->syncp);
365 380
366 if (L2TP_SKB_CB(skb)->has_seq) { 381 if (L2TP_SKB_CB(skb)->has_seq) {
367 /* Bump our Nr */ 382 /* Bump our Nr */
@@ -392,6 +407,7 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)
392{ 407{
393 struct sk_buff *skb; 408 struct sk_buff *skb;
394 struct sk_buff *tmp; 409 struct sk_buff *tmp;
410 struct l2tp_stats *sstats;
395 411
396 /* If the pkt at the head of the queue has the nr that we 412 /* If the pkt at the head of the queue has the nr that we
397 * expect to send up next, dequeue it and any other 413 * expect to send up next, dequeue it and any other
@@ -399,10 +415,13 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)
399 */ 415 */
400start: 416start:
401 spin_lock_bh(&session->reorder_q.lock); 417 spin_lock_bh(&session->reorder_q.lock);
418 sstats = &session->stats;
402 skb_queue_walk_safe(&session->reorder_q, skb, tmp) { 419 skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
403 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) { 420 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
404 session->stats.rx_seq_discards++; 421 u64_stats_update_begin(&sstats->syncp);
405 session->stats.rx_errors++; 422 sstats->rx_seq_discards++;
423 sstats->rx_errors++;
424 u64_stats_update_end(&sstats->syncp);
406 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, 425 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
407 "%s: oos pkt %u len %d discarded (too old), " 426 "%s: oos pkt %u len %d discarded (too old), "
408 "waiting for %u, reorder_q_len=%d\n", 427 "waiting for %u, reorder_q_len=%d\n",
@@ -446,21 +465,43 @@ static inline int l2tp_verify_udp_checksum(struct sock *sk,
446{ 465{
447 struct udphdr *uh = udp_hdr(skb); 466 struct udphdr *uh = udp_hdr(skb);
448 u16 ulen = ntohs(uh->len); 467 u16 ulen = ntohs(uh->len);
449 struct inet_sock *inet;
450 __wsum psum; 468 __wsum psum;
451 469
452 if (sk->sk_no_check || skb_csum_unnecessary(skb) || !uh->check) 470 if (sk->sk_no_check || skb_csum_unnecessary(skb))
453 return 0;
454
455 inet = inet_sk(sk);
456 psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, ulen,
457 IPPROTO_UDP, 0);
458
459 if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
460 !csum_fold(csum_add(psum, skb->csum)))
461 return 0; 471 return 0;
462 472
463 skb->csum = psum; 473#if IS_ENABLED(CONFIG_IPV6)
474 if (sk->sk_family == PF_INET6) {
475 if (!uh->check) {
476 LIMIT_NETDEBUG(KERN_INFO "L2TP: IPv6: checksum is 0\n");
477 return 1;
478 }
479 if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
480 !csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
481 &ipv6_hdr(skb)->daddr, ulen,
482 IPPROTO_UDP, skb->csum)) {
483 skb->ip_summed = CHECKSUM_UNNECESSARY;
484 return 0;
485 }
486 skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
487 &ipv6_hdr(skb)->daddr,
488 skb->len, IPPROTO_UDP,
489 0));
490 } else
491#endif
492 {
493 struct inet_sock *inet;
494 if (!uh->check)
495 return 0;
496 inet = inet_sk(sk);
497 psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr,
498 ulen, IPPROTO_UDP, 0);
499
500 if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
501 !csum_fold(csum_add(psum, skb->csum)))
502 return 0;
503 skb->csum = psum;
504 }
464 505
465 return __skb_checksum_complete(skb); 506 return __skb_checksum_complete(skb);
466} 507}
@@ -532,6 +573,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
532 struct l2tp_tunnel *tunnel = session->tunnel; 573 struct l2tp_tunnel *tunnel = session->tunnel;
533 int offset; 574 int offset;
534 u32 ns, nr; 575 u32 ns, nr;
576 struct l2tp_stats *sstats = &session->stats;
535 577
536 /* The ref count is increased since we now hold a pointer to 578 /* The ref count is increased since we now hold a pointer to
537 * the session. Take care to decrement the refcnt when exiting 579 * the session. Take care to decrement the refcnt when exiting
@@ -547,7 +589,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
547 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO, 589 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
548 "%s: cookie mismatch (%u/%u). Discarding.\n", 590 "%s: cookie mismatch (%u/%u). Discarding.\n",
549 tunnel->name, tunnel->tunnel_id, session->session_id); 591 tunnel->name, tunnel->tunnel_id, session->session_id);
550 session->stats.rx_cookie_discards++; 592 u64_stats_update_begin(&sstats->syncp);
593 sstats->rx_cookie_discards++;
594 u64_stats_update_end(&sstats->syncp);
551 goto discard; 595 goto discard;
552 } 596 }
553 ptr += session->peer_cookie_len; 597 ptr += session->peer_cookie_len;
@@ -616,7 +660,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
616 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING, 660 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING,
617 "%s: recv data has no seq numbers when required. " 661 "%s: recv data has no seq numbers when required. "
618 "Discarding\n", session->name); 662 "Discarding\n", session->name);
619 session->stats.rx_seq_discards++; 663 u64_stats_update_begin(&sstats->syncp);
664 sstats->rx_seq_discards++;
665 u64_stats_update_end(&sstats->syncp);
620 goto discard; 666 goto discard;
621 } 667 }
622 668
@@ -635,7 +681,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
635 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING, 681 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING,
636 "%s: recv data has no seq numbers when required. " 682 "%s: recv data has no seq numbers when required. "
637 "Discarding\n", session->name); 683 "Discarding\n", session->name);
638 session->stats.rx_seq_discards++; 684 u64_stats_update_begin(&sstats->syncp);
685 sstats->rx_seq_discards++;
686 u64_stats_update_end(&sstats->syncp);
639 goto discard; 687 goto discard;
640 } 688 }
641 } 689 }
@@ -689,7 +737,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
689 * packets 737 * packets
690 */ 738 */
691 if (L2TP_SKB_CB(skb)->ns != session->nr) { 739 if (L2TP_SKB_CB(skb)->ns != session->nr) {
692 session->stats.rx_seq_discards++; 740 u64_stats_update_begin(&sstats->syncp);
741 sstats->rx_seq_discards++;
742 u64_stats_update_end(&sstats->syncp);
693 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG, 743 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
694 "%s: oos pkt %u len %d discarded, " 744 "%s: oos pkt %u len %d discarded, "
695 "waiting for %u, reorder_q_len=%d\n", 745 "waiting for %u, reorder_q_len=%d\n",
@@ -716,7 +766,9 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
716 return; 766 return;
717 767
718discard: 768discard:
719 session->stats.rx_errors++; 769 u64_stats_update_begin(&sstats->syncp);
770 sstats->rx_errors++;
771 u64_stats_update_end(&sstats->syncp);
720 kfree_skb(skb); 772 kfree_skb(skb);
721 773
722 if (session->deref) 774 if (session->deref)
@@ -742,6 +794,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
742 int offset; 794 int offset;
743 u16 version; 795 u16 version;
744 int length; 796 int length;
797 struct l2tp_stats *tstats;
745 798
746 if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb)) 799 if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb))
747 goto discard_bad_csum; 800 goto discard_bad_csum;
@@ -834,7 +887,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
834discard_bad_csum: 887discard_bad_csum:
835 LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name); 888 LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
836 UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0); 889 UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0);
837 tunnel->stats.rx_errors++; 890 tstats = &tunnel->stats;
891 u64_stats_update_begin(&tstats->syncp);
892 tstats->rx_errors++;
893 u64_stats_update_end(&tstats->syncp);
838 kfree_skb(skb); 894 kfree_skb(skb);
839 895
840 return 0; 896 return 0;
@@ -960,6 +1016,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
960 struct l2tp_tunnel *tunnel = session->tunnel; 1016 struct l2tp_tunnel *tunnel = session->tunnel;
961 unsigned int len = skb->len; 1017 unsigned int len = skb->len;
962 int error; 1018 int error;
1019 struct l2tp_stats *tstats, *sstats;
963 1020
964 /* Debug */ 1021 /* Debug */
965 if (session->send_seq) 1022 if (session->send_seq)
@@ -988,18 +1045,29 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,
988 1045
989 /* Queue the packet to IP for output */ 1046 /* Queue the packet to IP for output */
990 skb->local_df = 1; 1047 skb->local_df = 1;
991 error = ip_queue_xmit(skb, fl); 1048#if IS_ENABLED(CONFIG_IPV6)
1049 if (skb->sk->sk_family == PF_INET6)
1050 error = inet6_csk_xmit(skb, NULL);
1051 else
1052#endif
1053 error = ip_queue_xmit(skb, fl);
992 1054
993 /* Update stats */ 1055 /* Update stats */
1056 tstats = &tunnel->stats;
1057 u64_stats_update_begin(&tstats->syncp);
1058 sstats = &session->stats;
1059 u64_stats_update_begin(&sstats->syncp);
994 if (error >= 0) { 1060 if (error >= 0) {
995 tunnel->stats.tx_packets++; 1061 tstats->tx_packets++;
996 tunnel->stats.tx_bytes += len; 1062 tstats->tx_bytes += len;
997 session->stats.tx_packets++; 1063 sstats->tx_packets++;
998 session->stats.tx_bytes += len; 1064 sstats->tx_bytes += len;
999 } else { 1065 } else {
1000 tunnel->stats.tx_errors++; 1066 tstats->tx_errors++;
1001 session->stats.tx_errors++; 1067 sstats->tx_errors++;
1002 } 1068 }
1069 u64_stats_update_end(&tstats->syncp);
1070 u64_stats_update_end(&sstats->syncp);
1003 1071
1004 return 0; 1072 return 0;
1005} 1073}
@@ -1021,6 +1089,31 @@ static inline void l2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1021 skb->destructor = l2tp_sock_wfree; 1089 skb->destructor = l2tp_sock_wfree;
1022} 1090}
1023 1091
1092#if IS_ENABLED(CONFIG_IPV6)
1093static void l2tp_xmit_ipv6_csum(struct sock *sk, struct sk_buff *skb,
1094 int udp_len)
1095{
1096 struct ipv6_pinfo *np = inet6_sk(sk);
1097 struct udphdr *uh = udp_hdr(skb);
1098
1099 if (!skb_dst(skb) || !skb_dst(skb)->dev ||
1100 !(skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) {
1101 __wsum csum = skb_checksum(skb, 0, udp_len, 0);
1102 skb->ip_summed = CHECKSUM_UNNECESSARY;
1103 uh->check = csum_ipv6_magic(&np->saddr, &np->daddr, udp_len,
1104 IPPROTO_UDP, csum);
1105 if (uh->check == 0)
1106 uh->check = CSUM_MANGLED_0;
1107 } else {
1108 skb->ip_summed = CHECKSUM_PARTIAL;
1109 skb->csum_start = skb_transport_header(skb) - skb->head;
1110 skb->csum_offset = offsetof(struct udphdr, check);
1111 uh->check = ~csum_ipv6_magic(&np->saddr, &np->daddr,
1112 udp_len, IPPROTO_UDP, 0);
1113 }
1114}
1115#endif
1116
1024/* If caller requires the skb to have a ppp header, the header must be 1117/* If caller requires the skb to have a ppp header, the header must be
1025 * inserted in the skb data before calling this function. 1118 * inserted in the skb data before calling this function.
1026 */ 1119 */
@@ -1089,6 +1182,11 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
1089 uh->check = 0; 1182 uh->check = 0;
1090 1183
1091 /* Calculate UDP checksum if configured to do so */ 1184 /* Calculate UDP checksum if configured to do so */
1185#if IS_ENABLED(CONFIG_IPV6)
1186 if (sk->sk_family == PF_INET6)
1187 l2tp_xmit_ipv6_csum(sk, skb, udp_len);
1188 else
1189#endif
1092 if (sk->sk_no_check == UDP_CSUM_NOXMIT) 1190 if (sk->sk_no_check == UDP_CSUM_NOXMIT)
1093 skb->ip_summed = CHECKSUM_NONE; 1191 skb->ip_summed = CHECKSUM_NONE;
1094 else if ((skb_dst(skb) && skb_dst(skb)->dev) && 1192 else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
@@ -1268,31 +1366,69 @@ static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2t
1268{ 1366{
1269 int err = -EINVAL; 1367 int err = -EINVAL;
1270 struct sockaddr_in udp_addr; 1368 struct sockaddr_in udp_addr;
1369#if IS_ENABLED(CONFIG_IPV6)
1370 struct sockaddr_in6 udp6_addr;
1371 struct sockaddr_l2tpip6 ip6_addr;
1372#endif
1271 struct sockaddr_l2tpip ip_addr; 1373 struct sockaddr_l2tpip ip_addr;
1272 struct socket *sock = NULL; 1374 struct socket *sock = NULL;
1273 1375
1274 switch (cfg->encap) { 1376 switch (cfg->encap) {
1275 case L2TP_ENCAPTYPE_UDP: 1377 case L2TP_ENCAPTYPE_UDP:
1276 err = sock_create(AF_INET, SOCK_DGRAM, 0, sockp); 1378#if IS_ENABLED(CONFIG_IPV6)
1277 if (err < 0) 1379 if (cfg->local_ip6 && cfg->peer_ip6) {
1278 goto out; 1380 err = sock_create(AF_INET6, SOCK_DGRAM, 0, sockp);
1381 if (err < 0)
1382 goto out;
1279 1383
1280 sock = *sockp; 1384 sock = *sockp;
1281 1385
1282 memset(&udp_addr, 0, sizeof(udp_addr)); 1386 memset(&udp6_addr, 0, sizeof(udp6_addr));
1283 udp_addr.sin_family = AF_INET; 1387 udp6_addr.sin6_family = AF_INET6;
1284 udp_addr.sin_addr = cfg->local_ip; 1388 memcpy(&udp6_addr.sin6_addr, cfg->local_ip6,
1285 udp_addr.sin_port = htons(cfg->local_udp_port); 1389 sizeof(udp6_addr.sin6_addr));
1286 err = kernel_bind(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr)); 1390 udp6_addr.sin6_port = htons(cfg->local_udp_port);
1287 if (err < 0) 1391 err = kernel_bind(sock, (struct sockaddr *) &udp6_addr,
1288 goto out; 1392 sizeof(udp6_addr));
1393 if (err < 0)
1394 goto out;
1289 1395
1290 udp_addr.sin_family = AF_INET; 1396 udp6_addr.sin6_family = AF_INET6;
1291 udp_addr.sin_addr = cfg->peer_ip; 1397 memcpy(&udp6_addr.sin6_addr, cfg->peer_ip6,
1292 udp_addr.sin_port = htons(cfg->peer_udp_port); 1398 sizeof(udp6_addr.sin6_addr));
1293 err = kernel_connect(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr), 0); 1399 udp6_addr.sin6_port = htons(cfg->peer_udp_port);
1294 if (err < 0) 1400 err = kernel_connect(sock,
1295 goto out; 1401 (struct sockaddr *) &udp6_addr,
1402 sizeof(udp6_addr), 0);
1403 if (err < 0)
1404 goto out;
1405 } else
1406#endif
1407 {
1408 err = sock_create(AF_INET, SOCK_DGRAM, 0, sockp);
1409 if (err < 0)
1410 goto out;
1411
1412 sock = *sockp;
1413
1414 memset(&udp_addr, 0, sizeof(udp_addr));
1415 udp_addr.sin_family = AF_INET;
1416 udp_addr.sin_addr = cfg->local_ip;
1417 udp_addr.sin_port = htons(cfg->local_udp_port);
1418 err = kernel_bind(sock, (struct sockaddr *) &udp_addr,
1419 sizeof(udp_addr));
1420 if (err < 0)
1421 goto out;
1422
1423 udp_addr.sin_family = AF_INET;
1424 udp_addr.sin_addr = cfg->peer_ip;
1425 udp_addr.sin_port = htons(cfg->peer_udp_port);
1426 err = kernel_connect(sock,
1427 (struct sockaddr *) &udp_addr,
1428 sizeof(udp_addr), 0);
1429 if (err < 0)
1430 goto out;
1431 }
1296 1432
1297 if (!cfg->use_udp_checksums) 1433 if (!cfg->use_udp_checksums)
1298 sock->sk->sk_no_check = UDP_CSUM_NOXMIT; 1434 sock->sk->sk_no_check = UDP_CSUM_NOXMIT;
@@ -1300,27 +1436,61 @@ static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2t
1300 break; 1436 break;
1301 1437
1302 case L2TP_ENCAPTYPE_IP: 1438 case L2TP_ENCAPTYPE_IP:
1303 err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_L2TP, sockp); 1439#if IS_ENABLED(CONFIG_IPV6)
1304 if (err < 0) 1440 if (cfg->local_ip6 && cfg->peer_ip6) {
1305 goto out; 1441 err = sock_create(AF_INET6, SOCK_DGRAM, IPPROTO_L2TP,
1442 sockp);
1443 if (err < 0)
1444 goto out;
1306 1445
1307 sock = *sockp; 1446 sock = *sockp;
1308 1447
1309 memset(&ip_addr, 0, sizeof(ip_addr)); 1448 memset(&ip6_addr, 0, sizeof(ip6_addr));
1310 ip_addr.l2tp_family = AF_INET; 1449 ip6_addr.l2tp_family = AF_INET6;
1311 ip_addr.l2tp_addr = cfg->local_ip; 1450 memcpy(&ip6_addr.l2tp_addr, cfg->local_ip6,
1312 ip_addr.l2tp_conn_id = tunnel_id; 1451 sizeof(ip6_addr.l2tp_addr));
1313 err = kernel_bind(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr)); 1452 ip6_addr.l2tp_conn_id = tunnel_id;
1314 if (err < 0) 1453 err = kernel_bind(sock, (struct sockaddr *) &ip6_addr,
1315 goto out; 1454 sizeof(ip6_addr));
1455 if (err < 0)
1456 goto out;
1316 1457
1317 ip_addr.l2tp_family = AF_INET; 1458 ip6_addr.l2tp_family = AF_INET6;
1318 ip_addr.l2tp_addr = cfg->peer_ip; 1459 memcpy(&ip6_addr.l2tp_addr, cfg->peer_ip6,
1319 ip_addr.l2tp_conn_id = peer_tunnel_id; 1460 sizeof(ip6_addr.l2tp_addr));
1320 err = kernel_connect(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr), 0); 1461 ip6_addr.l2tp_conn_id = peer_tunnel_id;
1321 if (err < 0) 1462 err = kernel_connect(sock,
1322 goto out; 1463 (struct sockaddr *) &ip6_addr,
1464 sizeof(ip6_addr), 0);
1465 if (err < 0)
1466 goto out;
1467 } else
1468#endif
1469 {
1470 err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_L2TP,
1471 sockp);
1472 if (err < 0)
1473 goto out;
1323 1474
1475 sock = *sockp;
1476
1477 memset(&ip_addr, 0, sizeof(ip_addr));
1478 ip_addr.l2tp_family = AF_INET;
1479 ip_addr.l2tp_addr = cfg->local_ip;
1480 ip_addr.l2tp_conn_id = tunnel_id;
1481 err = kernel_bind(sock, (struct sockaddr *) &ip_addr,
1482 sizeof(ip_addr));
1483 if (err < 0)
1484 goto out;
1485
1486 ip_addr.l2tp_family = AF_INET;
1487 ip_addr.l2tp_addr = cfg->peer_ip;
1488 ip_addr.l2tp_conn_id = peer_tunnel_id;
1489 err = kernel_connect(sock, (struct sockaddr *) &ip_addr,
1490 sizeof(ip_addr), 0);
1491 if (err < 0)
1492 goto out;
1493 }
1324 break; 1494 break;
1325 1495
1326 default: 1496 default:
@@ -1424,6 +1594,12 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1424 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ 1594 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1425 udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP; 1595 udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP;
1426 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; 1596 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
1597#if IS_ENABLED(CONFIG_IPV6)
1598 if (sk->sk_family == PF_INET6)
1599 udpv6_encap_enable();
1600 else
1601#endif
1602 udp_encap_enable();
1427 } 1603 }
1428 1604
1429 sk->sk_user_data = tunnel; 1605 sk->sk_user_data = tunnel;
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index a16a48e79fab..0bf60fc88bb7 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -45,6 +45,7 @@ struct l2tp_stats {
45 u64 rx_oos_packets; 45 u64 rx_oos_packets;
46 u64 rx_errors; 46 u64 rx_errors;
47 u64 rx_cookie_discards; 47 u64 rx_cookie_discards;
48 struct u64_stats_sync syncp;
48}; 49};
49 50
50struct l2tp_tunnel; 51struct l2tp_tunnel;
@@ -54,15 +55,15 @@ struct l2tp_tunnel;
54 */ 55 */
55struct l2tp_session_cfg { 56struct l2tp_session_cfg {
56 enum l2tp_pwtype pw_type; 57 enum l2tp_pwtype pw_type;
57 unsigned data_seq:2; /* data sequencing level 58 unsigned int data_seq:2; /* data sequencing level
58 * 0 => none, 1 => IP only, 59 * 0 => none, 1 => IP only,
59 * 2 => all 60 * 2 => all
60 */ 61 */
61 unsigned recv_seq:1; /* expect receive packets with 62 unsigned int recv_seq:1; /* expect receive packets with
62 * sequence numbers? */ 63 * sequence numbers? */
63 unsigned send_seq:1; /* send packets with sequence 64 unsigned int send_seq:1; /* send packets with sequence
64 * numbers? */ 65 * numbers? */
65 unsigned lns_mode:1; /* behave as LNS? LAC enables 66 unsigned int lns_mode:1; /* behave as LNS? LAC enables
66 * sequence numbers under 67 * sequence numbers under
67 * control of LNS. */ 68 * control of LNS. */
68 int debug; /* bitmask of debug message 69 int debug; /* bitmask of debug message
@@ -107,15 +108,15 @@ struct l2tp_session {
107 108
108 char name[32]; /* for logging */ 109 char name[32]; /* for logging */
109 char ifname[IFNAMSIZ]; 110 char ifname[IFNAMSIZ];
110 unsigned data_seq:2; /* data sequencing level 111 unsigned int data_seq:2; /* data sequencing level
111 * 0 => none, 1 => IP only, 112 * 0 => none, 1 => IP only,
112 * 2 => all 113 * 2 => all
113 */ 114 */
114 unsigned recv_seq:1; /* expect receive packets with 115 unsigned int recv_seq:1; /* expect receive packets with
115 * sequence numbers? */ 116 * sequence numbers? */
116 unsigned send_seq:1; /* send packets with sequence 117 unsigned int send_seq:1; /* send packets with sequence
117 * numbers? */ 118 * numbers? */
118 unsigned lns_mode:1; /* behave as LNS? LAC enables 119 unsigned int lns_mode:1; /* behave as LNS? LAC enables
119 * sequence numbers under 120 * sequence numbers under
120 * control of LNS. */ 121 * control of LNS. */
121 int debug; /* bitmask of debug message 122 int debug; /* bitmask of debug message
@@ -150,6 +151,10 @@ struct l2tp_tunnel_cfg {
150 /* Used only for kernel-created sockets */ 151 /* Used only for kernel-created sockets */
151 struct in_addr local_ip; 152 struct in_addr local_ip;
152 struct in_addr peer_ip; 153 struct in_addr peer_ip;
154#if IS_ENABLED(CONFIG_IPV6)
155 struct in6_addr *local_ip6;
156 struct in6_addr *peer_ip6;
157#endif
153 u16 local_udp_port; 158 u16 local_udp_port;
154 u16 peer_udp_port; 159 u16 peer_udp_port;
155 unsigned int use_udp_checksums:1; 160 unsigned int use_udp_checksums:1;
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 76130134bfa6..c0d57bad8b79 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -122,6 +122,14 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
122 seq_printf(m, "\nTUNNEL %u peer %u", tunnel->tunnel_id, tunnel->peer_tunnel_id); 122 seq_printf(m, "\nTUNNEL %u peer %u", tunnel->tunnel_id, tunnel->peer_tunnel_id);
123 if (tunnel->sock) { 123 if (tunnel->sock) {
124 struct inet_sock *inet = inet_sk(tunnel->sock); 124 struct inet_sock *inet = inet_sk(tunnel->sock);
125
126#if IS_ENABLED(CONFIG_IPV6)
127 if (tunnel->sock->sk_family == AF_INET6) {
128 struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
129 seq_printf(m, " from %pI6c to %pI6c\n",
130 &np->saddr, &np->daddr);
131 } else
132#endif
125 seq_printf(m, " from %pI4 to %pI4\n", 133 seq_printf(m, " from %pI4 to %pI4\n",
126 &inet->inet_saddr, &inet->inet_daddr); 134 &inet->inet_saddr, &inet->inet_daddr);
127 if (tunnel->encap == L2TP_ENCAPTYPE_UDP) 135 if (tunnel->encap == L2TP_ENCAPTYPE_UDP)
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 6274f0be82b0..c89a32fb5d5e 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -32,15 +32,8 @@ struct l2tp_ip_sock {
32 /* inet_sock has to be the first member of l2tp_ip_sock */ 32 /* inet_sock has to be the first member of l2tp_ip_sock */
33 struct inet_sock inet; 33 struct inet_sock inet;
34 34
35 __u32 conn_id; 35 u32 conn_id;
36 __u32 peer_conn_id; 36 u32 peer_conn_id;
37
38 __u64 tx_packets;
39 __u64 tx_bytes;
40 __u64 tx_errors;
41 __u64 rx_packets;
42 __u64 rx_bytes;
43 __u64 rx_errors;
44}; 37};
45 38
46static DEFINE_RWLOCK(l2tp_ip_lock); 39static DEFINE_RWLOCK(l2tp_ip_lock);
@@ -298,68 +291,27 @@ out_in_use:
298static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 291static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
299{ 292{
300 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr; 293 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
301 struct inet_sock *inet = inet_sk(sk); 294 int rc;
302 struct flowi4 *fl4;
303 struct rtable *rt;
304 __be32 saddr;
305 int oif, rc;
306 295
307 rc = -EINVAL;
308 if (addr_len < sizeof(*lsa)) 296 if (addr_len < sizeof(*lsa))
309 goto out; 297 return -EINVAL;
310
311 rc = -EAFNOSUPPORT;
312 if (lsa->l2tp_family != AF_INET)
313 goto out;
314
315 lock_sock(sk);
316
317 sk_dst_reset(sk);
318
319 oif = sk->sk_bound_dev_if;
320 saddr = inet->inet_saddr;
321 298
322 rc = -EINVAL;
323 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr)) 299 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
324 goto out; 300 return -EINVAL;
325 301
326 fl4 = &inet->cork.fl.u.ip4; 302 rc = ip4_datagram_connect(sk, uaddr, addr_len);
327 rt = ip_route_connect(fl4, lsa->l2tp_addr.s_addr, saddr, 303 if (rc < 0)
328 RT_CONN_FLAGS(sk), oif, 304 return rc;
329 IPPROTO_L2TP,
330 0, 0, sk, true);
331 if (IS_ERR(rt)) {
332 rc = PTR_ERR(rt);
333 if (rc == -ENETUNREACH)
334 IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES);
335 goto out;
336 }
337 305
338 rc = -ENETUNREACH; 306 lock_sock(sk);
339 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
340 ip_rt_put(rt);
341 goto out;
342 }
343 307
344 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; 308 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
345 309
346 if (!inet->inet_saddr)
347 inet->inet_saddr = fl4->saddr;
348 if (!inet->inet_rcv_saddr)
349 inet->inet_rcv_saddr = fl4->saddr;
350 inet->inet_daddr = fl4->daddr;
351 sk->sk_state = TCP_ESTABLISHED;
352 inet->inet_id = jiffies;
353
354 sk_dst_set(sk, &rt->dst);
355
356 write_lock_bh(&l2tp_ip_lock); 310 write_lock_bh(&l2tp_ip_lock);
357 hlist_del_init(&sk->sk_bind_node); 311 hlist_del_init(&sk->sk_bind_node);
358 sk_add_bind_node(sk, &l2tp_ip_bind_table); 312 sk_add_bind_node(sk, &l2tp_ip_bind_table);
359 write_unlock_bh(&l2tp_ip_lock); 313 write_unlock_bh(&l2tp_ip_lock);
360 314
361 rc = 0;
362out:
363 release_sock(sk); 315 release_sock(sk);
364 return rc; 316 return rc;
365} 317}
@@ -414,7 +366,6 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
414{ 366{
415 struct sk_buff *skb; 367 struct sk_buff *skb;
416 int rc; 368 int rc;
417 struct l2tp_ip_sock *lsa = l2tp_ip_sk(sk);
418 struct inet_sock *inet = inet_sk(sk); 369 struct inet_sock *inet = inet_sk(sk);
419 struct rtable *rt = NULL; 370 struct rtable *rt = NULL;
420 struct flowi4 *fl4; 371 struct flowi4 *fl4;
@@ -514,14 +465,8 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
514 rcu_read_unlock(); 465 rcu_read_unlock();
515 466
516error: 467error:
517 /* Update stats */ 468 if (rc >= 0)
518 if (rc >= 0) {
519 lsa->tx_packets++;
520 lsa->tx_bytes += len;
521 rc = len; 469 rc = len;
522 } else {
523 lsa->tx_errors++;
524 }
525 470
526out: 471out:
527 release_sock(sk); 472 release_sock(sk);
@@ -539,7 +484,6 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
539 size_t len, int noblock, int flags, int *addr_len) 484 size_t len, int noblock, int flags, int *addr_len)
540{ 485{
541 struct inet_sock *inet = inet_sk(sk); 486 struct inet_sock *inet = inet_sk(sk);
542 struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
543 size_t copied = 0; 487 size_t copied = 0;
544 int err = -EOPNOTSUPP; 488 int err = -EOPNOTSUPP;
545 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; 489 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
@@ -581,15 +525,7 @@ static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
581done: 525done:
582 skb_free_datagram(sk, skb); 526 skb_free_datagram(sk, skb);
583out: 527out:
584 if (err) { 528 return err ? err : copied;
585 lsk->rx_errors++;
586 return err;
587 }
588
589 lsk->rx_packets++;
590 lsk->rx_bytes += copied;
591
592 return copied;
593} 529}
594 530
595static struct proto l2tp_ip_prot = { 531static struct proto l2tp_ip_prot = {
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
new file mode 100644
index 000000000000..88f0abe35443
--- /dev/null
+++ b/net/l2tp/l2tp_ip6.c
@@ -0,0 +1,792 @@
1/*
2 * L2TPv3 IP encapsulation support for IPv6
3 *
4 * Copyright (c) 2012 Katalix Systems Ltd
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/icmp.h>
13#include <linux/module.h>
14#include <linux/skbuff.h>
15#include <linux/random.h>
16#include <linux/socket.h>
17#include <linux/l2tp.h>
18#include <linux/in.h>
19#include <linux/in6.h>
20#include <net/sock.h>
21#include <net/ip.h>
22#include <net/icmp.h>
23#include <net/udp.h>
24#include <net/inet_common.h>
25#include <net/inet_hashtables.h>
26#include <net/tcp_states.h>
27#include <net/protocol.h>
28#include <net/xfrm.h>
29
30#include <net/transp_v6.h>
31#include <net/addrconf.h>
32#include <net/ip6_route.h>
33
34#include "l2tp_core.h"
35
36struct l2tp_ip6_sock {
37 /* inet_sock has to be the first member of l2tp_ip6_sock */
38 struct inet_sock inet;
39
40 u32 conn_id;
41 u32 peer_conn_id;
42
43 /* ipv6_pinfo has to be the last member of l2tp_ip6_sock, see
44 inet6_sk_generic */
45 struct ipv6_pinfo inet6;
46};
47
48static DEFINE_RWLOCK(l2tp_ip6_lock);
49static struct hlist_head l2tp_ip6_table;
50static struct hlist_head l2tp_ip6_bind_table;
51
52static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk)
53{
54 return (struct l2tp_ip6_sock *)sk;
55}
56
57static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
58 struct in6_addr *laddr,
59 int dif, u32 tunnel_id)
60{
61 struct hlist_node *node;
62 struct sock *sk;
63
64 sk_for_each_bound(sk, node, &l2tp_ip6_bind_table) {
65 struct in6_addr *addr = inet6_rcv_saddr(sk);
66 struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
67
68 if (l2tp == NULL)
69 continue;
70
71 if ((l2tp->conn_id == tunnel_id) &&
72 net_eq(sock_net(sk), net) &&
73 !(addr && ipv6_addr_equal(addr, laddr)) &&
74 !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
75 goto found;
76 }
77
78 sk = NULL;
79found:
80 return sk;
81}
82
83static inline struct sock *l2tp_ip6_bind_lookup(struct net *net,
84 struct in6_addr *laddr,
85 int dif, u32 tunnel_id)
86{
87 struct sock *sk = __l2tp_ip6_bind_lookup(net, laddr, dif, tunnel_id);
88 if (sk)
89 sock_hold(sk);
90
91 return sk;
92}
93
94/* When processing receive frames, there are two cases to
95 * consider. Data frames consist of a non-zero session-id and an
96 * optional cookie. Control frames consist of a regular L2TP header
97 * preceded by 32-bits of zeros.
98 *
99 * L2TPv3 Session Header Over IP
100 *
101 * 0 1 2 3
102 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
103 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
104 * | Session ID |
105 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
106 * | Cookie (optional, maximum 64 bits)...
107 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
108 * |
109 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
110 *
111 * L2TPv3 Control Message Header Over IP
112 *
113 * 0 1 2 3
114 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
115 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
116 * | (32 bits of zeros) |
117 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
118 * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
119 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
120 * | Control Connection ID |
121 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
122 * | Ns | Nr |
123 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
124 *
125 * All control frames are passed to userspace.
126 */
127static int l2tp_ip6_recv(struct sk_buff *skb)
128{
129 struct sock *sk;
130 u32 session_id;
131 u32 tunnel_id;
132 unsigned char *ptr, *optr;
133 struct l2tp_session *session;
134 struct l2tp_tunnel *tunnel = NULL;
135 int length;
136 int offset;
137
138 /* Point to L2TP header */
139 optr = ptr = skb->data;
140
141 if (!pskb_may_pull(skb, 4))
142 goto discard;
143
144 session_id = ntohl(*((__be32 *) ptr));
145 ptr += 4;
146
147 /* RFC3931: L2TP/IP packets have the first 4 bytes containing
148 * the session_id. If it is 0, the packet is a L2TP control
149 * frame and the session_id value can be discarded.
150 */
151 if (session_id == 0) {
152 __skb_pull(skb, 4);
153 goto pass_up;
154 }
155
156 /* Ok, this is a data packet. Lookup the session. */
157 session = l2tp_session_find(&init_net, NULL, session_id);
158 if (session == NULL)
159 goto discard;
160
161 tunnel = session->tunnel;
162 if (tunnel == NULL)
163 goto discard;
164
165 /* Trace packet contents, if enabled */
166 if (tunnel->debug & L2TP_MSG_DATA) {
167 length = min(32u, skb->len);
168 if (!pskb_may_pull(skb, length))
169 goto discard;
170
171 printk(KERN_DEBUG "%s: ip recv: ", tunnel->name);
172
173 offset = 0;
174 do {
175 printk(" %02X", ptr[offset]);
176 } while (++offset < length);
177
178 printk("\n");
179 }
180
181 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
182 tunnel->recv_payload_hook);
183 return 0;
184
185pass_up:
186 /* Get the tunnel_id from the L2TP header */
187 if (!pskb_may_pull(skb, 12))
188 goto discard;
189
190 if ((skb->data[0] & 0xc0) != 0xc0)
191 goto discard;
192
193 tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
194 tunnel = l2tp_tunnel_find(&init_net, tunnel_id);
195 if (tunnel != NULL)
196 sk = tunnel->sock;
197 else {
198 struct ipv6hdr *iph = ipv6_hdr(skb);
199
200 read_lock_bh(&l2tp_ip6_lock);
201 sk = __l2tp_ip6_bind_lookup(&init_net, &iph->daddr,
202 0, tunnel_id);
203 read_unlock_bh(&l2tp_ip6_lock);
204 }
205
206 if (sk == NULL)
207 goto discard;
208
209 sock_hold(sk);
210
211 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
212 goto discard_put;
213
214 nf_reset(skb);
215
216 return sk_receive_skb(sk, skb, 1);
217
218discard_put:
219 sock_put(sk);
220
221discard:
222 kfree_skb(skb);
223 return 0;
224}
225
226static int l2tp_ip6_open(struct sock *sk)
227{
228 /* Prevent autobind. We don't have ports. */
229 inet_sk(sk)->inet_num = IPPROTO_L2TP;
230
231 write_lock_bh(&l2tp_ip6_lock);
232 sk_add_node(sk, &l2tp_ip6_table);
233 write_unlock_bh(&l2tp_ip6_lock);
234
235 return 0;
236}
237
238static void l2tp_ip6_close(struct sock *sk, long timeout)
239{
240 write_lock_bh(&l2tp_ip6_lock);
241 hlist_del_init(&sk->sk_bind_node);
242 sk_del_node_init(sk);
243 write_unlock_bh(&l2tp_ip6_lock);
244
245 sk_common_release(sk);
246}
247
248static void l2tp_ip6_destroy_sock(struct sock *sk)
249{
250 lock_sock(sk);
251 ip6_flush_pending_frames(sk);
252 release_sock(sk);
253
254 inet6_destroy_sock(sk);
255}
256
257static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
258{
259 struct inet_sock *inet = inet_sk(sk);
260 struct ipv6_pinfo *np = inet6_sk(sk);
261 struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *) uaddr;
262 __be32 v4addr = 0;
263 int addr_type;
264 int err;
265
266 if (addr_len < sizeof(*addr))
267 return -EINVAL;
268
269 addr_type = ipv6_addr_type(&addr->l2tp_addr);
270
271 /* l2tp_ip6 sockets are IPv6 only */
272 if (addr_type == IPV6_ADDR_MAPPED)
273 return -EADDRNOTAVAIL;
274
275 /* L2TP is point-point, not multicast */
276 if (addr_type & IPV6_ADDR_MULTICAST)
277 return -EADDRNOTAVAIL;
278
279 err = -EADDRINUSE;
280 read_lock_bh(&l2tp_ip6_lock);
281 if (__l2tp_ip6_bind_lookup(&init_net, &addr->l2tp_addr,
282 sk->sk_bound_dev_if, addr->l2tp_conn_id))
283 goto out_in_use;
284 read_unlock_bh(&l2tp_ip6_lock);
285
286 lock_sock(sk);
287
288 err = -EINVAL;
289 if (sk->sk_state != TCP_CLOSE)
290 goto out_unlock;
291
292 /* Check if the address belongs to the host. */
293 rcu_read_lock();
294 if (addr_type != IPV6_ADDR_ANY) {
295 struct net_device *dev = NULL;
296
297 if (addr_type & IPV6_ADDR_LINKLOCAL) {
298 if (addr_len >= sizeof(struct sockaddr_in6) &&
299 addr->l2tp_scope_id) {
300 /* Override any existing binding, if another
301 * one is supplied by user.
302 */
303 sk->sk_bound_dev_if = addr->l2tp_scope_id;
304 }
305
306 /* Binding to link-local address requires an
307 interface */
308 if (!sk->sk_bound_dev_if)
309 goto out_unlock_rcu;
310
311 err = -ENODEV;
312 dev = dev_get_by_index_rcu(sock_net(sk),
313 sk->sk_bound_dev_if);
314 if (!dev)
315 goto out_unlock_rcu;
316 }
317
318 /* ipv4 addr of the socket is invalid. Only the
319 * unspecified and mapped address have a v4 equivalent.
320 */
321 v4addr = LOOPBACK4_IPV6;
322 err = -EADDRNOTAVAIL;
323 if (!ipv6_chk_addr(sock_net(sk), &addr->l2tp_addr, dev, 0))
324 goto out_unlock_rcu;
325 }
326 rcu_read_unlock();
327
328 inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
329 np->rcv_saddr = addr->l2tp_addr;
330 np->saddr = addr->l2tp_addr;
331
332 l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id;
333
334 write_lock_bh(&l2tp_ip6_lock);
335 sk_add_bind_node(sk, &l2tp_ip6_bind_table);
336 sk_del_node_init(sk);
337 write_unlock_bh(&l2tp_ip6_lock);
338
339 release_sock(sk);
340 return 0;
341
342out_unlock_rcu:
343 rcu_read_unlock();
344out_unlock:
345 release_sock(sk);
346 return err;
347
348out_in_use:
349 read_unlock_bh(&l2tp_ip6_lock);
350 return err;
351}
352
353static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
354 int addr_len)
355{
356 struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *) uaddr;
357 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
358 struct in6_addr *daddr;
359 int addr_type;
360 int rc;
361
362 if (addr_len < sizeof(*lsa))
363 return -EINVAL;
364
365 addr_type = ipv6_addr_type(&usin->sin6_addr);
366 if (addr_type & IPV6_ADDR_MULTICAST)
367 return -EINVAL;
368
369 if (addr_type & IPV6_ADDR_MAPPED) {
370 daddr = &usin->sin6_addr;
371 if (ipv4_is_multicast(daddr->s6_addr32[3]))
372 return -EINVAL;
373 }
374
375 rc = ip6_datagram_connect(sk, uaddr, addr_len);
376
377 lock_sock(sk);
378
379 l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
380
381 write_lock_bh(&l2tp_ip6_lock);
382 hlist_del_init(&sk->sk_bind_node);
383 sk_add_bind_node(sk, &l2tp_ip6_bind_table);
384 write_unlock_bh(&l2tp_ip6_lock);
385
386 release_sock(sk);
387
388 return rc;
389}
390
391static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
392 int *uaddr_len, int peer)
393{
394 struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)uaddr;
395 struct sock *sk = sock->sk;
396 struct ipv6_pinfo *np = inet6_sk(sk);
397 struct l2tp_ip6_sock *lsk = l2tp_ip6_sk(sk);
398
399 lsa->l2tp_family = AF_INET6;
400 lsa->l2tp_flowinfo = 0;
401 lsa->l2tp_scope_id = 0;
402 if (peer) {
403 if (!lsk->peer_conn_id)
404 return -ENOTCONN;
405 lsa->l2tp_conn_id = lsk->peer_conn_id;
406 lsa->l2tp_addr = np->daddr;
407 if (np->sndflow)
408 lsa->l2tp_flowinfo = np->flow_label;
409 } else {
410 if (ipv6_addr_any(&np->rcv_saddr))
411 lsa->l2tp_addr = np->saddr;
412 else
413 lsa->l2tp_addr = np->rcv_saddr;
414
415 lsa->l2tp_conn_id = lsk->conn_id;
416 }
417 if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
418 lsa->l2tp_scope_id = sk->sk_bound_dev_if;
419 *uaddr_len = sizeof(*lsa);
420 return 0;
421}
422
423static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb)
424{
425 int rc;
426
427 /* Charge it to the socket, dropping if the queue is full. */
428 rc = sock_queue_rcv_skb(sk, skb);
429 if (rc < 0)
430 goto drop;
431
432 return 0;
433
434drop:
435 IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS);
436 kfree_skb(skb);
437 return -1;
438}
439
440static int l2tp_ip6_push_pending_frames(struct sock *sk)
441{
442 struct sk_buff *skb;
443 __be32 *transhdr = NULL;
444 int err = 0;
445
446 skb = skb_peek(&sk->sk_write_queue);
447 if (skb == NULL)
448 goto out;
449
450 transhdr = (__be32 *)skb_transport_header(skb);
451 *transhdr = 0;
452
453 err = ip6_push_pending_frames(sk);
454
455out:
456 return err;
457}
458
459/* Userspace will call sendmsg() on the tunnel socket to send L2TP
460 * control frames.
461 */
462static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
463 struct msghdr *msg, size_t len)
464{
465 struct ipv6_txoptions opt_space;
466 struct sockaddr_l2tpip6 *lsa =
467 (struct sockaddr_l2tpip6 *) msg->msg_name;
468 struct in6_addr *daddr, *final_p, final;
469 struct ipv6_pinfo *np = inet6_sk(sk);
470 struct ipv6_txoptions *opt = NULL;
471 struct ip6_flowlabel *flowlabel = NULL;
472 struct dst_entry *dst = NULL;
473 struct flowi6 fl6;
474 int addr_len = msg->msg_namelen;
475 int hlimit = -1;
476 int tclass = -1;
477 int dontfrag = -1;
478 int transhdrlen = 4; /* zero session-id */
479 int ulen = len + transhdrlen;
480 int err;
481
482 /* Rough check on arithmetic overflow,
483 better check is made in ip6_append_data().
484 */
485 if (len > INT_MAX)
486 return -EMSGSIZE;
487
488 /* Mirror BSD error message compatibility */
489 if (msg->msg_flags & MSG_OOB)
490 return -EOPNOTSUPP;
491
492 /*
493 * Get and verify the address.
494 */
495 memset(&fl6, 0, sizeof(fl6));
496
497 fl6.flowi6_mark = sk->sk_mark;
498
499 if (lsa) {
500 if (addr_len < SIN6_LEN_RFC2133)
501 return -EINVAL;
502
503 if (lsa->l2tp_family && lsa->l2tp_family != AF_INET6)
504 return -EAFNOSUPPORT;
505
506 daddr = &lsa->l2tp_addr;
507 if (np->sndflow) {
508 fl6.flowlabel = lsa->l2tp_flowinfo & IPV6_FLOWINFO_MASK;
509 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
510 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
511 if (flowlabel == NULL)
512 return -EINVAL;
513 daddr = &flowlabel->dst;
514 }
515 }
516
517 /*
518 * Otherwise it will be difficult to maintain
519 * sk->sk_dst_cache.
520 */
521 if (sk->sk_state == TCP_ESTABLISHED &&
522 ipv6_addr_equal(daddr, &np->daddr))
523 daddr = &np->daddr;
524
525 if (addr_len >= sizeof(struct sockaddr_in6) &&
526 lsa->l2tp_scope_id &&
527 ipv6_addr_type(daddr) & IPV6_ADDR_LINKLOCAL)
528 fl6.flowi6_oif = lsa->l2tp_scope_id;
529 } else {
530 if (sk->sk_state != TCP_ESTABLISHED)
531 return -EDESTADDRREQ;
532
533 daddr = &np->daddr;
534 fl6.flowlabel = np->flow_label;
535 }
536
537 if (fl6.flowi6_oif == 0)
538 fl6.flowi6_oif = sk->sk_bound_dev_if;
539
540 if (msg->msg_controllen) {
541 opt = &opt_space;
542 memset(opt, 0, sizeof(struct ipv6_txoptions));
543 opt->tot_len = sizeof(struct ipv6_txoptions);
544
545 err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
546 &hlimit, &tclass, &dontfrag);
547 if (err < 0) {
548 fl6_sock_release(flowlabel);
549 return err;
550 }
551 if ((fl6.flowlabel & IPV6_FLOWLABEL_MASK) && !flowlabel) {
552 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
553 if (flowlabel == NULL)
554 return -EINVAL;
555 }
556 if (!(opt->opt_nflen|opt->opt_flen))
557 opt = NULL;
558 }
559
560 if (opt == NULL)
561 opt = np->opt;
562 if (flowlabel)
563 opt = fl6_merge_options(&opt_space, flowlabel, opt);
564 opt = ipv6_fixup_options(&opt_space, opt);
565
566 fl6.flowi6_proto = sk->sk_protocol;
567 if (!ipv6_addr_any(daddr))
568 fl6.daddr = *daddr;
569 else
570 fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
571 if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
572 fl6.saddr = np->saddr;
573
574 final_p = fl6_update_dst(&fl6, opt, &final);
575
576 if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
577 fl6.flowi6_oif = np->mcast_oif;
578 else if (!fl6.flowi6_oif)
579 fl6.flowi6_oif = np->ucast_oif;
580
581 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
582
583 dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
584 if (IS_ERR(dst)) {
585 err = PTR_ERR(dst);
586 goto out;
587 }
588
589 if (hlimit < 0) {
590 if (ipv6_addr_is_multicast(&fl6.daddr))
591 hlimit = np->mcast_hops;
592 else
593 hlimit = np->hop_limit;
594 if (hlimit < 0)
595 hlimit = ip6_dst_hoplimit(dst);
596 }
597
598 if (tclass < 0)
599 tclass = np->tclass;
600
601 if (dontfrag < 0)
602 dontfrag = np->dontfrag;
603
604 if (msg->msg_flags & MSG_CONFIRM)
605 goto do_confirm;
606
607back_from_confirm:
608 lock_sock(sk);
609 err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov,
610 ulen, transhdrlen, hlimit, tclass, opt,
611 &fl6, (struct rt6_info *)dst,
612 msg->msg_flags, dontfrag);
613 if (err)
614 ip6_flush_pending_frames(sk);
615 else if (!(msg->msg_flags & MSG_MORE))
616 err = l2tp_ip6_push_pending_frames(sk);
617 release_sock(sk);
618done:
619 dst_release(dst);
620out:
621 fl6_sock_release(flowlabel);
622
623 return err < 0 ? err : len;
624
625do_confirm:
626 dst_confirm(dst);
627 if (!(msg->msg_flags & MSG_PROBE) || len)
628 goto back_from_confirm;
629 err = 0;
630 goto done;
631}
632
633static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
634 struct msghdr *msg, size_t len, int noblock,
635 int flags, int *addr_len)
636{
637 struct inet_sock *inet = inet_sk(sk);
638 struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name;
639 size_t copied = 0;
640 int err = -EOPNOTSUPP;
641 struct sk_buff *skb;
642
643 if (flags & MSG_OOB)
644 goto out;
645
646 if (addr_len)
647 *addr_len = sizeof(*lsa);
648
649 if (flags & MSG_ERRQUEUE)
650 return ipv6_recv_error(sk, msg, len);
651
652 skb = skb_recv_datagram(sk, flags, noblock, &err);
653 if (!skb)
654 goto out;
655
656 copied = skb->len;
657 if (len < copied) {
658 msg->msg_flags |= MSG_TRUNC;
659 copied = len;
660 }
661
662 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
663 if (err)
664 goto done;
665
666 sock_recv_timestamp(msg, sk, skb);
667
668 /* Copy the address. */
669 if (lsa) {
670 lsa->l2tp_family = AF_INET6;
671 lsa->l2tp_unused = 0;
672 lsa->l2tp_addr = ipv6_hdr(skb)->saddr;
673 lsa->l2tp_flowinfo = 0;
674 lsa->l2tp_scope_id = 0;
675 if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
676 lsa->l2tp_scope_id = IP6CB(skb)->iif;
677 }
678
679 if (inet->cmsg_flags)
680 ip_cmsg_recv(msg, skb);
681
682 if (flags & MSG_TRUNC)
683 copied = skb->len;
684done:
685 skb_free_datagram(sk, skb);
686out:
687 return err ? err : copied;
688}
689
690static struct proto l2tp_ip6_prot = {
691 .name = "L2TP/IPv6",
692 .owner = THIS_MODULE,
693 .init = l2tp_ip6_open,
694 .close = l2tp_ip6_close,
695 .bind = l2tp_ip6_bind,
696 .connect = l2tp_ip6_connect,
697 .disconnect = udp_disconnect,
698 .ioctl = udp_ioctl,
699 .destroy = l2tp_ip6_destroy_sock,
700 .setsockopt = ipv6_setsockopt,
701 .getsockopt = ipv6_getsockopt,
702 .sendmsg = l2tp_ip6_sendmsg,
703 .recvmsg = l2tp_ip6_recvmsg,
704 .backlog_rcv = l2tp_ip6_backlog_recv,
705 .hash = inet_hash,
706 .unhash = inet_unhash,
707 .obj_size = sizeof(struct l2tp_ip6_sock),
708#ifdef CONFIG_COMPAT
709 .compat_setsockopt = compat_ipv6_setsockopt,
710 .compat_getsockopt = compat_ipv6_getsockopt,
711#endif
712};
713
714static const struct proto_ops l2tp_ip6_ops = {
715 .family = PF_INET6,
716 .owner = THIS_MODULE,
717 .release = inet6_release,
718 .bind = inet6_bind,
719 .connect = inet_dgram_connect,
720 .socketpair = sock_no_socketpair,
721 .accept = sock_no_accept,
722 .getname = l2tp_ip6_getname,
723 .poll = datagram_poll,
724 .ioctl = inet6_ioctl,
725 .listen = sock_no_listen,
726 .shutdown = inet_shutdown,
727 .setsockopt = sock_common_setsockopt,
728 .getsockopt = sock_common_getsockopt,
729 .sendmsg = inet_sendmsg,
730 .recvmsg = sock_common_recvmsg,
731 .mmap = sock_no_mmap,
732 .sendpage = sock_no_sendpage,
733#ifdef CONFIG_COMPAT
734 .compat_setsockopt = compat_sock_common_setsockopt,
735 .compat_getsockopt = compat_sock_common_getsockopt,
736#endif
737};
738
739static struct inet_protosw l2tp_ip6_protosw = {
740 .type = SOCK_DGRAM,
741 .protocol = IPPROTO_L2TP,
742 .prot = &l2tp_ip6_prot,
743 .ops = &l2tp_ip6_ops,
744 .no_check = 0,
745};
746
747static struct inet6_protocol l2tp_ip6_protocol __read_mostly = {
748 .handler = l2tp_ip6_recv,
749};
750
751static int __init l2tp_ip6_init(void)
752{
753 int err;
754
755 printk(KERN_INFO "L2TP IP encapsulation support for IPv6 (L2TPv3)\n");
756
757 err = proto_register(&l2tp_ip6_prot, 1);
758 if (err != 0)
759 goto out;
760
761 err = inet6_add_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP);
762 if (err)
763 goto out1;
764
765 inet6_register_protosw(&l2tp_ip6_protosw);
766 return 0;
767
768out1:
769 proto_unregister(&l2tp_ip6_prot);
770out:
771 return err;
772}
773
774static void __exit l2tp_ip6_exit(void)
775{
776 inet6_unregister_protosw(&l2tp_ip6_protosw);
777 inet6_del_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP);
778 proto_unregister(&l2tp_ip6_prot);
779}
780
781module_init(l2tp_ip6_init);
782module_exit(l2tp_ip6_exit);
783
784MODULE_LICENSE("GPL");
785MODULE_AUTHOR("Chris Elston <celston@katalix.com>");
786MODULE_DESCRIPTION("L2TP IP encapsulation for IPv6");
787MODULE_VERSION("1.0");
788
789/* Use the value of SOCK_DGRAM (2) directory, because __stringify doesn't like
790 * enums
791 */
792MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 2, IPPROTO_L2TP);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 93a41a09458b..24edad0fd9ba 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -133,10 +133,25 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info
133 if (info->attrs[L2TP_ATTR_FD]) { 133 if (info->attrs[L2TP_ATTR_FD]) {
134 fd = nla_get_u32(info->attrs[L2TP_ATTR_FD]); 134 fd = nla_get_u32(info->attrs[L2TP_ATTR_FD]);
135 } else { 135 } else {
136 if (info->attrs[L2TP_ATTR_IP_SADDR]) 136#if IS_ENABLED(CONFIG_IPV6)
137 cfg.local_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_SADDR]); 137 if (info->attrs[L2TP_ATTR_IP6_SADDR] &&
138 if (info->attrs[L2TP_ATTR_IP_DADDR]) 138 info->attrs[L2TP_ATTR_IP6_DADDR]) {
139 cfg.peer_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_DADDR]); 139 cfg.local_ip6 = nla_data(
140 info->attrs[L2TP_ATTR_IP6_SADDR]);
141 cfg.peer_ip6 = nla_data(
142 info->attrs[L2TP_ATTR_IP6_DADDR]);
143 } else
144#endif
145 if (info->attrs[L2TP_ATTR_IP_SADDR] &&
146 info->attrs[L2TP_ATTR_IP_DADDR]) {
147 cfg.local_ip.s_addr = nla_get_be32(
148 info->attrs[L2TP_ATTR_IP_SADDR]);
149 cfg.peer_ip.s_addr = nla_get_be32(
150 info->attrs[L2TP_ATTR_IP_DADDR]);
151 } else {
152 ret = -EINVAL;
153 goto out;
154 }
140 if (info->attrs[L2TP_ATTR_UDP_SPORT]) 155 if (info->attrs[L2TP_ATTR_UDP_SPORT])
141 cfg.local_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_SPORT]); 156 cfg.local_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_SPORT]);
142 if (info->attrs[L2TP_ATTR_UDP_DPORT]) 157 if (info->attrs[L2TP_ATTR_UDP_DPORT])
@@ -225,47 +240,85 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
225 struct nlattr *nest; 240 struct nlattr *nest;
226 struct sock *sk = NULL; 241 struct sock *sk = NULL;
227 struct inet_sock *inet; 242 struct inet_sock *inet;
243#if IS_ENABLED(CONFIG_IPV6)
244 struct ipv6_pinfo *np = NULL;
245#endif
246 struct l2tp_stats stats;
247 unsigned int start;
228 248
229 hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, 249 hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags,
230 L2TP_CMD_TUNNEL_GET); 250 L2TP_CMD_TUNNEL_GET);
231 if (IS_ERR(hdr)) 251 if (IS_ERR(hdr))
232 return PTR_ERR(hdr); 252 return PTR_ERR(hdr);
233 253
234 NLA_PUT_U8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version); 254 if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) ||
235 NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id); 255 nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
236 NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id); 256 nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) ||
237 NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, tunnel->debug); 257 nla_put_u32(skb, L2TP_ATTR_DEBUG, tunnel->debug) ||
238 NLA_PUT_U16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap); 258 nla_put_u16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap))
259 goto nla_put_failure;
239 260
240 nest = nla_nest_start(skb, L2TP_ATTR_STATS); 261 nest = nla_nest_start(skb, L2TP_ATTR_STATS);
241 if (nest == NULL) 262 if (nest == NULL)
242 goto nla_put_failure; 263 goto nla_put_failure;
243 264
244 NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, tunnel->stats.tx_packets); 265 do {
245 NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, tunnel->stats.tx_bytes); 266 start = u64_stats_fetch_begin(&tunnel->stats.syncp);
246 NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, tunnel->stats.tx_errors); 267 stats.tx_packets = tunnel->stats.tx_packets;
247 NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, tunnel->stats.rx_packets); 268 stats.tx_bytes = tunnel->stats.tx_bytes;
248 NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, tunnel->stats.rx_bytes); 269 stats.tx_errors = tunnel->stats.tx_errors;
249 NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, tunnel->stats.rx_seq_discards); 270 stats.rx_packets = tunnel->stats.rx_packets;
250 NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, tunnel->stats.rx_oos_packets); 271 stats.rx_bytes = tunnel->stats.rx_bytes;
251 NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, tunnel->stats.rx_errors); 272 stats.rx_errors = tunnel->stats.rx_errors;
273 stats.rx_seq_discards = tunnel->stats.rx_seq_discards;
274 stats.rx_oos_packets = tunnel->stats.rx_oos_packets;
275 } while (u64_stats_fetch_retry(&tunnel->stats.syncp, start));
276
277 if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) ||
278 nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) ||
279 nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) ||
280 nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) ||
281 nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) ||
282 nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
283 stats.rx_seq_discards) ||
284 nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
285 stats.rx_oos_packets) ||
286 nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors))
287 goto nla_put_failure;
252 nla_nest_end(skb, nest); 288 nla_nest_end(skb, nest);
253 289
254 sk = tunnel->sock; 290 sk = tunnel->sock;
255 if (!sk) 291 if (!sk)
256 goto out; 292 goto out;
257 293
294#if IS_ENABLED(CONFIG_IPV6)
295 if (sk->sk_family == AF_INET6)
296 np = inet6_sk(sk);
297#endif
298
258 inet = inet_sk(sk); 299 inet = inet_sk(sk);
259 300
260 switch (tunnel->encap) { 301 switch (tunnel->encap) {
261 case L2TP_ENCAPTYPE_UDP: 302 case L2TP_ENCAPTYPE_UDP:
262 NLA_PUT_U16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)); 303 if (nla_put_u16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport)) ||
263 NLA_PUT_U16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)); 304 nla_put_u16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport)) ||
264 NLA_PUT_U8(skb, L2TP_ATTR_UDP_CSUM, (sk->sk_no_check != UDP_CSUM_NOXMIT)); 305 nla_put_u8(skb, L2TP_ATTR_UDP_CSUM,
306 (sk->sk_no_check != UDP_CSUM_NOXMIT)))
307 goto nla_put_failure;
265 /* NOBREAK */ 308 /* NOBREAK */
266 case L2TP_ENCAPTYPE_IP: 309 case L2TP_ENCAPTYPE_IP:
267 NLA_PUT_BE32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr); 310#if IS_ENABLED(CONFIG_IPV6)
268 NLA_PUT_BE32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr); 311 if (np) {
312 if (nla_put(skb, L2TP_ATTR_IP6_SADDR, sizeof(np->saddr),
313 &np->saddr) ||
314 nla_put(skb, L2TP_ATTR_IP6_DADDR, sizeof(np->daddr),
315 &np->daddr))
316 goto nla_put_failure;
317 } else
318#endif
319 if (nla_put_be32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr) ||
320 nla_put_be32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr))
321 goto nla_put_failure;
269 break; 322 break;
270 } 323 }
271 324
@@ -556,6 +609,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags
556 struct nlattr *nest; 609 struct nlattr *nest;
557 struct l2tp_tunnel *tunnel = session->tunnel; 610 struct l2tp_tunnel *tunnel = session->tunnel;
558 struct sock *sk = NULL; 611 struct sock *sk = NULL;
612 struct l2tp_stats stats;
613 unsigned int start;
559 614
560 sk = tunnel->sock; 615 sk = tunnel->sock;
561 616
@@ -563,43 +618,64 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags
563 if (IS_ERR(hdr)) 618 if (IS_ERR(hdr))
564 return PTR_ERR(hdr); 619 return PTR_ERR(hdr);
565 620
566 NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id); 621 if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
567 NLA_PUT_U32(skb, L2TP_ATTR_SESSION_ID, session->session_id); 622 nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) ||
568 NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id); 623 nla_put_u32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id) ||
569 NLA_PUT_U32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id); 624 nla_put_u32(skb, L2TP_ATTR_PEER_SESSION_ID,
570 NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, session->debug); 625 session->peer_session_id) ||
571 NLA_PUT_U16(skb, L2TP_ATTR_PW_TYPE, session->pwtype); 626 nla_put_u32(skb, L2TP_ATTR_DEBUG, session->debug) ||
572 NLA_PUT_U16(skb, L2TP_ATTR_MTU, session->mtu); 627 nla_put_u16(skb, L2TP_ATTR_PW_TYPE, session->pwtype) ||
573 if (session->mru) 628 nla_put_u16(skb, L2TP_ATTR_MTU, session->mtu) ||
574 NLA_PUT_U16(skb, L2TP_ATTR_MRU, session->mru); 629 (session->mru &&
575 630 nla_put_u16(skb, L2TP_ATTR_MRU, session->mru)))
576 if (session->ifname && session->ifname[0]) 631 goto nla_put_failure;
577 NLA_PUT_STRING(skb, L2TP_ATTR_IFNAME, session->ifname); 632
578 if (session->cookie_len) 633 if ((session->ifname && session->ifname[0] &&
579 NLA_PUT(skb, L2TP_ATTR_COOKIE, session->cookie_len, &session->cookie[0]); 634 nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
580 if (session->peer_cookie_len) 635 (session->cookie_len &&
581 NLA_PUT(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, &session->peer_cookie[0]); 636 nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len,
582 NLA_PUT_U8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq); 637 &session->cookie[0])) ||
583 NLA_PUT_U8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq); 638 (session->peer_cookie_len &&
584 NLA_PUT_U8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode); 639 nla_put(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len,
640 &session->peer_cookie[0])) ||
641 nla_put_u8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq) ||
642 nla_put_u8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq) ||
643 nla_put_u8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode) ||
585#ifdef CONFIG_XFRM 644#ifdef CONFIG_XFRM
586 if ((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) 645 (((sk) && (sk->sk_policy[0] || sk->sk_policy[1])) &&
587 NLA_PUT_U8(skb, L2TP_ATTR_USING_IPSEC, 1); 646 nla_put_u8(skb, L2TP_ATTR_USING_IPSEC, 1)) ||
588#endif 647#endif
589 if (session->reorder_timeout) 648 (session->reorder_timeout &&
590 NLA_PUT_MSECS(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout); 649 nla_put_msecs(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout)))
650 goto nla_put_failure;
591 651
592 nest = nla_nest_start(skb, L2TP_ATTR_STATS); 652 nest = nla_nest_start(skb, L2TP_ATTR_STATS);
593 if (nest == NULL) 653 if (nest == NULL)
594 goto nla_put_failure; 654 goto nla_put_failure;
595 NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, session->stats.tx_packets); 655
596 NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, session->stats.tx_bytes); 656 do {
597 NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, session->stats.tx_errors); 657 start = u64_stats_fetch_begin(&session->stats.syncp);
598 NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, session->stats.rx_packets); 658 stats.tx_packets = session->stats.tx_packets;
599 NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, session->stats.rx_bytes); 659 stats.tx_bytes = session->stats.tx_bytes;
600 NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, session->stats.rx_seq_discards); 660 stats.tx_errors = session->stats.tx_errors;
601 NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, session->stats.rx_oos_packets); 661 stats.rx_packets = session->stats.rx_packets;
602 NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, session->stats.rx_errors); 662 stats.rx_bytes = session->stats.rx_bytes;
663 stats.rx_errors = session->stats.rx_errors;
664 stats.rx_seq_discards = session->stats.rx_seq_discards;
665 stats.rx_oos_packets = session->stats.rx_oos_packets;
666 } while (u64_stats_fetch_retry(&session->stats.syncp, start));
667
668 if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) ||
669 nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) ||
670 nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) ||
671 nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) ||
672 nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) ||
673 nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS,
674 stats.rx_seq_discards) ||
675 nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS,
676 stats.rx_oos_packets) ||
677 nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors))
678 goto nla_put_failure;
603 nla_nest_end(skb, nest); 679 nla_nest_end(skb, nest);
604 680
605 return genlmsg_end(skb, hdr); 681 return genlmsg_end(skb, hdr);
@@ -708,6 +784,14 @@ static struct nla_policy l2tp_nl_policy[L2TP_ATTR_MAX + 1] = {
708 [L2TP_ATTR_MTU] = { .type = NLA_U16, }, 784 [L2TP_ATTR_MTU] = { .type = NLA_U16, },
709 [L2TP_ATTR_MRU] = { .type = NLA_U16, }, 785 [L2TP_ATTR_MRU] = { .type = NLA_U16, },
710 [L2TP_ATTR_STATS] = { .type = NLA_NESTED, }, 786 [L2TP_ATTR_STATS] = { .type = NLA_NESTED, },
787 [L2TP_ATTR_IP6_SADDR] = {
788 .type = NLA_BINARY,
789 .len = sizeof(struct in6_addr),
790 },
791 [L2TP_ATTR_IP6_DADDR] = {
792 .type = NLA_BINARY,
793 .len = sizeof(struct in6_addr),
794 },
711 [L2TP_ATTR_IFNAME] = { 795 [L2TP_ATTR_IFNAME] = {
712 .type = NLA_NUL_STRING, 796 .type = NLA_NUL_STRING,
713 .len = IFNAMSIZ - 1, 797 .len = IFNAMSIZ - 1,
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 1addd9f3f40a..9f2c421aa307 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -628,7 +628,6 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
628{ 628{
629 struct sock *sk = sock->sk; 629 struct sock *sk = sock->sk;
630 struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr; 630 struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr;
631 struct sockaddr_pppol2tpv3 *sp3 = (struct sockaddr_pppol2tpv3 *) uservaddr;
632 struct pppox_sock *po = pppox_sk(sk); 631 struct pppox_sock *po = pppox_sk(sk);
633 struct l2tp_session *session = NULL; 632 struct l2tp_session *session = NULL;
634 struct l2tp_tunnel *tunnel; 633 struct l2tp_tunnel *tunnel;
@@ -657,7 +656,13 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
657 if (sk->sk_user_data) 656 if (sk->sk_user_data)
658 goto end; /* socket is already attached */ 657 goto end; /* socket is already attached */
659 658
660 /* Get params from socket address. Handle L2TPv2 and L2TPv3 */ 659 /* Get params from socket address. Handle L2TPv2 and L2TPv3.
660 * This is nasty because there are different sockaddr_pppol2tp
661 * structs for L2TPv2, L2TPv3, over IPv4 and IPv6. We use
662 * the sockaddr size to determine which structure the caller
663 * is using.
664 */
665 peer_tunnel_id = 0;
661 if (sockaddr_len == sizeof(struct sockaddr_pppol2tp)) { 666 if (sockaddr_len == sizeof(struct sockaddr_pppol2tp)) {
662 fd = sp->pppol2tp.fd; 667 fd = sp->pppol2tp.fd;
663 tunnel_id = sp->pppol2tp.s_tunnel; 668 tunnel_id = sp->pppol2tp.s_tunnel;
@@ -665,12 +670,31 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
665 session_id = sp->pppol2tp.s_session; 670 session_id = sp->pppol2tp.s_session;
666 peer_session_id = sp->pppol2tp.d_session; 671 peer_session_id = sp->pppol2tp.d_session;
667 } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3)) { 672 } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3)) {
673 struct sockaddr_pppol2tpv3 *sp3 =
674 (struct sockaddr_pppol2tpv3 *) sp;
668 ver = 3; 675 ver = 3;
669 fd = sp3->pppol2tp.fd; 676 fd = sp3->pppol2tp.fd;
670 tunnel_id = sp3->pppol2tp.s_tunnel; 677 tunnel_id = sp3->pppol2tp.s_tunnel;
671 peer_tunnel_id = sp3->pppol2tp.d_tunnel; 678 peer_tunnel_id = sp3->pppol2tp.d_tunnel;
672 session_id = sp3->pppol2tp.s_session; 679 session_id = sp3->pppol2tp.s_session;
673 peer_session_id = sp3->pppol2tp.d_session; 680 peer_session_id = sp3->pppol2tp.d_session;
681 } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpin6)) {
682 struct sockaddr_pppol2tpin6 *sp6 =
683 (struct sockaddr_pppol2tpin6 *) sp;
684 fd = sp6->pppol2tp.fd;
685 tunnel_id = sp6->pppol2tp.s_tunnel;
686 peer_tunnel_id = sp6->pppol2tp.d_tunnel;
687 session_id = sp6->pppol2tp.s_session;
688 peer_session_id = sp6->pppol2tp.d_session;
689 } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3in6)) {
690 struct sockaddr_pppol2tpv3in6 *sp6 =
691 (struct sockaddr_pppol2tpv3in6 *) sp;
692 ver = 3;
693 fd = sp6->pppol2tp.fd;
694 tunnel_id = sp6->pppol2tp.s_tunnel;
695 peer_tunnel_id = sp6->pppol2tp.d_tunnel;
696 session_id = sp6->pppol2tp.s_session;
697 peer_session_id = sp6->pppol2tp.d_session;
674 } else { 698 } else {
675 error = -EINVAL; 699 error = -EINVAL;
676 goto end; /* bad socket address */ 700 goto end; /* bad socket address */
@@ -711,12 +735,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
711 if (tunnel->recv_payload_hook == NULL) 735 if (tunnel->recv_payload_hook == NULL)
712 tunnel->recv_payload_hook = pppol2tp_recv_payload_hook; 736 tunnel->recv_payload_hook = pppol2tp_recv_payload_hook;
713 737
714 if (tunnel->peer_tunnel_id == 0) { 738 if (tunnel->peer_tunnel_id == 0)
715 if (ver == 2) 739 tunnel->peer_tunnel_id = peer_tunnel_id;
716 tunnel->peer_tunnel_id = sp->pppol2tp.d_tunnel;
717 else
718 tunnel->peer_tunnel_id = sp3->pppol2tp.d_tunnel;
719 }
720 740
721 /* Create session if it doesn't already exist. We handle the 741 /* Create session if it doesn't already exist. We handle the
722 * case where a session was previously created by the netlink 742 * case where a session was previously created by the netlink
@@ -916,7 +936,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
916 } 936 }
917 937
918 inet = inet_sk(tunnel->sock); 938 inet = inet_sk(tunnel->sock);
919 if (tunnel->version == 2) { 939 if ((tunnel->version == 2) && (tunnel->sock->sk_family == AF_INET)) {
920 struct sockaddr_pppol2tp sp; 940 struct sockaddr_pppol2tp sp;
921 len = sizeof(sp); 941 len = sizeof(sp);
922 memset(&sp, 0, len); 942 memset(&sp, 0, len);
@@ -932,6 +952,46 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
932 sp.pppol2tp.addr.sin_port = inet->inet_dport; 952 sp.pppol2tp.addr.sin_port = inet->inet_dport;
933 sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr; 953 sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr;
934 memcpy(uaddr, &sp, len); 954 memcpy(uaddr, &sp, len);
955#if IS_ENABLED(CONFIG_IPV6)
956 } else if ((tunnel->version == 2) &&
957 (tunnel->sock->sk_family == AF_INET6)) {
958 struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
959 struct sockaddr_pppol2tpin6 sp;
960 len = sizeof(sp);
961 memset(&sp, 0, len);
962 sp.sa_family = AF_PPPOX;
963 sp.sa_protocol = PX_PROTO_OL2TP;
964 sp.pppol2tp.fd = tunnel->fd;
965 sp.pppol2tp.pid = pls->owner;
966 sp.pppol2tp.s_tunnel = tunnel->tunnel_id;
967 sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id;
968 sp.pppol2tp.s_session = session->session_id;
969 sp.pppol2tp.d_session = session->peer_session_id;
970 sp.pppol2tp.addr.sin6_family = AF_INET6;
971 sp.pppol2tp.addr.sin6_port = inet->inet_dport;
972 memcpy(&sp.pppol2tp.addr.sin6_addr, &np->daddr,
973 sizeof(np->daddr));
974 memcpy(uaddr, &sp, len);
975 } else if ((tunnel->version == 3) &&
976 (tunnel->sock->sk_family == AF_INET6)) {
977 struct ipv6_pinfo *np = inet6_sk(tunnel->sock);
978 struct sockaddr_pppol2tpv3in6 sp;
979 len = sizeof(sp);
980 memset(&sp, 0, len);
981 sp.sa_family = AF_PPPOX;
982 sp.sa_protocol = PX_PROTO_OL2TP;
983 sp.pppol2tp.fd = tunnel->fd;
984 sp.pppol2tp.pid = pls->owner;
985 sp.pppol2tp.s_tunnel = tunnel->tunnel_id;
986 sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id;
987 sp.pppol2tp.s_session = session->session_id;
988 sp.pppol2tp.d_session = session->peer_session_id;
989 sp.pppol2tp.addr.sin6_family = AF_INET6;
990 sp.pppol2tp.addr.sin6_port = inet->inet_dport;
991 memcpy(&sp.pppol2tp.addr.sin6_addr, &np->daddr,
992 sizeof(np->daddr));
993 memcpy(uaddr, &sp, len);
994#endif
935 } else if (tunnel->version == 3) { 995 } else if (tunnel->version == 3) {
936 struct sockaddr_pppol2tpv3 sp; 996 struct sockaddr_pppol2tpv3 sp;
937 len = sizeof(sp); 997 len = sizeof(sp);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index b9bef2c75026..17bc85d5b7ba 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -518,7 +518,7 @@ static int llc_ui_listen(struct socket *sock, int backlog)
518 if (sock_flag(sk, SOCK_ZAPPED)) 518 if (sock_flag(sk, SOCK_ZAPPED))
519 goto out; 519 goto out;
520 rc = 0; 520 rc = 0;
521 if (!(unsigned)backlog) /* BSDism */ 521 if (!(unsigned int)backlog) /* BSDism */
522 backlog = 1; 522 backlog = 1;
523 sk->sk_max_ack_backlog = backlog; 523 sk->sk_max_ack_backlog = backlog;
524 if (sk->sk_state != TCP_LISTEN) { 524 if (sk->sk_state != TCP_LISTEN) {
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index ba137a6a224d..0d0d416dfab6 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -828,7 +828,7 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
828 else { 828 else {
829 dprintk("%s: adding to backlog...\n", __func__); 829 dprintk("%s: adding to backlog...\n", __func__);
830 llc_set_backlog_type(skb, LLC_PACKET); 830 llc_set_backlog_type(skb, LLC_PACKET);
831 if (sk_add_backlog(sk, skb)) 831 if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
832 goto drop_unlock; 832 goto drop_unlock;
833 } 833 }
834out: 834out:
diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c
index e2ebe3586263..d75306b9c2f3 100644
--- a/net/llc/sysctl_net_llc.c
+++ b/net/llc/sysctl_net_llc.c
@@ -7,6 +7,7 @@
7#include <linux/mm.h> 7#include <linux/mm.h>
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/sysctl.h> 9#include <linux/sysctl.h>
10#include <net/net_namespace.h>
10#include <net/llc.h> 11#include <net/llc.h>
11 12
12#ifndef CONFIG_SYSCTL 13#ifndef CONFIG_SYSCTL
@@ -56,48 +57,29 @@ static struct ctl_table llc_station_table[] = {
56 { }, 57 { },
57}; 58};
58 59
59static struct ctl_table llc2_dir_timeout_table[] = { 60static struct ctl_table_header *llc2_timeout_header;
60 { 61static struct ctl_table_header *llc_station_header;
61 .procname = "timeout",
62 .mode = 0555,
63 .child = llc2_timeout_table,
64 },
65 { },
66};
67
68static struct ctl_table llc_table[] = {
69 {
70 .procname = "llc2",
71 .mode = 0555,
72 .child = llc2_dir_timeout_table,
73 },
74 {
75 .procname = "station",
76 .mode = 0555,
77 .child = llc_station_table,
78 },
79 { },
80};
81
82static struct ctl_path llc_path[] = {
83 { .procname = "net", },
84 { .procname = "llc", },
85 { }
86};
87
88static struct ctl_table_header *llc_table_header;
89 62
90int __init llc_sysctl_init(void) 63int __init llc_sysctl_init(void)
91{ 64{
92 llc_table_header = register_sysctl_paths(llc_path, llc_table); 65 llc2_timeout_header = register_net_sysctl(&init_net, "net/llc/llc2/timeout", llc2_timeout_table);
66 llc_station_header = register_net_sysctl(&init_net, "net/llc/station", llc_station_table);
93 67
94 return llc_table_header ? 0 : -ENOMEM; 68 if (!llc2_timeout_header || !llc_station_header) {
69 llc_sysctl_exit();
70 return -ENOMEM;
71 }
72 return 0;
95} 73}
96 74
97void llc_sysctl_exit(void) 75void llc_sysctl_exit(void)
98{ 76{
99 if (llc_table_header) { 77 if (llc2_timeout_header) {
100 unregister_sysctl_table(llc_table_header); 78 unregister_net_sysctl_table(llc2_timeout_header);
101 llc_table_header = NULL; 79 llc2_timeout_header = NULL;
80 }
81 if (llc_station_header) {
82 unregister_net_sysctl_table(llc_station_header);
83 llc_station_header = NULL;
102 } 84 }
103} 85}
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 96ddb72760b9..8d249d705980 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -225,6 +225,17 @@ config MAC80211_VERBOSE_MHWMP_DEBUG
225 225
226 Do not select this option. 226 Do not select this option.
227 227
228config MAC80211_VERBOSE_MESH_SYNC_DEBUG
229 bool "Verbose mesh mesh synchronization debugging"
230 depends on MAC80211_DEBUG_MENU
231 depends on MAC80211_MESH
232 ---help---
233 Selecting this option causes mac80211 to print out very verbose mesh
234 synchronization debugging messages (when mac80211 is taking part in a
235 mesh network).
236
237 Do not select this option.
238
228config MAC80211_VERBOSE_TDLS_DEBUG 239config MAC80211_VERBOSE_TDLS_DEBUG
229 bool "Verbose TDLS debugging" 240 bool "Verbose TDLS debugging"
230 depends on MAC80211_DEBUG_MENU 241 depends on MAC80211_DEBUG_MENU
diff --git a/net/mac80211/Makefile b/net/mac80211/Makefile
index 1be7a454aa77..3e9d931bba35 100644
--- a/net/mac80211/Makefile
+++ b/net/mac80211/Makefile
@@ -38,7 +38,8 @@ mac80211-$(CONFIG_MAC80211_MESH) += \
38 mesh.o \ 38 mesh.o \
39 mesh_pathtbl.o \ 39 mesh_pathtbl.o \
40 mesh_plink.o \ 40 mesh_plink.o \
41 mesh_hwmp.o 41 mesh_hwmp.o \
42 mesh_sync.o
42 43
43mac80211-$(CONFIG_PM) += pm.o 44mac80211-$(CONFIG_PM) += pm.o
44 45
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 64d3ce5ea1a0..a070d4f460ea 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -142,6 +142,18 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
142 u8 *timer_to_id = ptid - *ptid; 142 u8 *timer_to_id = ptid - *ptid;
143 struct sta_info *sta = container_of(timer_to_id, struct sta_info, 143 struct sta_info *sta = container_of(timer_to_id, struct sta_info,
144 timer_to_tid[0]); 144 timer_to_tid[0]);
145 struct tid_ampdu_rx *tid_rx;
146 unsigned long timeout;
147
148 tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[*ptid]);
149 if (!tid_rx)
150 return;
151
152 timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout);
153 if (time_is_after_jiffies(timeout)) {
154 mod_timer(&tid_rx->session_timer, timeout);
155 return;
156 }
145 157
146#ifdef CONFIG_MAC80211_HT_DEBUG 158#ifdef CONFIG_MAC80211_HT_DEBUG
147 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); 159 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
@@ -291,7 +303,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
291 /* rx timer */ 303 /* rx timer */
292 tid_agg_rx->session_timer.function = sta_rx_agg_session_timer_expired; 304 tid_agg_rx->session_timer.function = sta_rx_agg_session_timer_expired;
293 tid_agg_rx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid]; 305 tid_agg_rx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
294 init_timer(&tid_agg_rx->session_timer); 306 init_timer_deferrable(&tid_agg_rx->session_timer);
295 307
296 /* rx reorder timer */ 308 /* rx reorder timer */
297 tid_agg_rx->reorder_timer.function = sta_rx_agg_reorder_timer_expired; 309 tid_agg_rx->reorder_timer.function = sta_rx_agg_reorder_timer_expired;
@@ -335,8 +347,10 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
335 /* activate it for RX */ 347 /* activate it for RX */
336 rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx); 348 rcu_assign_pointer(sta->ampdu_mlme.tid_rx[tid], tid_agg_rx);
337 349
338 if (timeout) 350 if (timeout) {
339 mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout)); 351 mod_timer(&tid_agg_rx->session_timer, TU_TO_EXP_TIME(timeout));
352 tid_agg_rx->last_rx = jiffies;
353 }
340 354
341end: 355end:
342 mutex_unlock(&sta->ampdu_mlme.mtx); 356 mutex_unlock(&sta->ampdu_mlme.mtx);
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 76be61744198..5b7053c58732 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -286,25 +286,25 @@ static inline int ieee80211_ac_from_tid(int tid)
286 * a global "agg_queue_stop" refcount. 286 * a global "agg_queue_stop" refcount.
287 */ 287 */
288static void __acquires(agg_queue) 288static void __acquires(agg_queue)
289ieee80211_stop_queue_agg(struct ieee80211_local *local, int tid) 289ieee80211_stop_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
290{ 290{
291 int queue = ieee80211_ac_from_tid(tid); 291 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
292 292
293 if (atomic_inc_return(&local->agg_queue_stop[queue]) == 1) 293 if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1)
294 ieee80211_stop_queue_by_reason( 294 ieee80211_stop_queue_by_reason(
295 &local->hw, queue, 295 &sdata->local->hw, queue,
296 IEEE80211_QUEUE_STOP_REASON_AGGREGATION); 296 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
297 __acquire(agg_queue); 297 __acquire(agg_queue);
298} 298}
299 299
300static void __releases(agg_queue) 300static void __releases(agg_queue)
301ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid) 301ieee80211_wake_queue_agg(struct ieee80211_sub_if_data *sdata, int tid)
302{ 302{
303 int queue = ieee80211_ac_from_tid(tid); 303 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
304 304
305 if (atomic_dec_return(&local->agg_queue_stop[queue]) == 0) 305 if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0)
306 ieee80211_wake_queue_by_reason( 306 ieee80211_wake_queue_by_reason(
307 &local->hw, queue, 307 &sdata->local->hw, queue,
308 IEEE80211_QUEUE_STOP_REASON_AGGREGATION); 308 IEEE80211_QUEUE_STOP_REASON_AGGREGATION);
309 __release(agg_queue); 309 __release(agg_queue);
310} 310}
@@ -314,13 +314,14 @@ ieee80211_wake_queue_agg(struct ieee80211_local *local, int tid)
314 * requires a call to ieee80211_agg_splice_finish later 314 * requires a call to ieee80211_agg_splice_finish later
315 */ 315 */
316static void __acquires(agg_queue) 316static void __acquires(agg_queue)
317ieee80211_agg_splice_packets(struct ieee80211_local *local, 317ieee80211_agg_splice_packets(struct ieee80211_sub_if_data *sdata,
318 struct tid_ampdu_tx *tid_tx, u16 tid) 318 struct tid_ampdu_tx *tid_tx, u16 tid)
319{ 319{
320 int queue = ieee80211_ac_from_tid(tid); 320 struct ieee80211_local *local = sdata->local;
321 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)];
321 unsigned long flags; 322 unsigned long flags;
322 323
323 ieee80211_stop_queue_agg(local, tid); 324 ieee80211_stop_queue_agg(sdata, tid);
324 325
325 if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates" 326 if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
326 " from the pending queue\n", tid)) 327 " from the pending queue\n", tid))
@@ -336,9 +337,9 @@ ieee80211_agg_splice_packets(struct ieee80211_local *local,
336} 337}
337 338
338static void __releases(agg_queue) 339static void __releases(agg_queue)
339ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid) 340ieee80211_agg_splice_finish(struct ieee80211_sub_if_data *sdata, u16 tid)
340{ 341{
341 ieee80211_wake_queue_agg(local, tid); 342 ieee80211_wake_queue_agg(sdata, tid);
342} 343}
343 344
344void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid) 345void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
@@ -376,9 +377,9 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
376 " tid %d\n", tid); 377 " tid %d\n", tid);
377#endif 378#endif
378 spin_lock_bh(&sta->lock); 379 spin_lock_bh(&sta->lock);
379 ieee80211_agg_splice_packets(local, tid_tx, tid); 380 ieee80211_agg_splice_packets(sdata, tid_tx, tid);
380 ieee80211_assign_tid_tx(sta, tid, NULL); 381 ieee80211_assign_tid_tx(sta, tid, NULL);
381 ieee80211_agg_splice_finish(local, tid); 382 ieee80211_agg_splice_finish(sdata, tid);
382 spin_unlock_bh(&sta->lock); 383 spin_unlock_bh(&sta->lock);
383 384
384 kfree_rcu(tid_tx, rcu_head); 385 kfree_rcu(tid_tx, rcu_head);
@@ -417,6 +418,18 @@ static void sta_tx_agg_session_timer_expired(unsigned long data)
417 u8 *timer_to_id = ptid - *ptid; 418 u8 *timer_to_id = ptid - *ptid;
418 struct sta_info *sta = container_of(timer_to_id, struct sta_info, 419 struct sta_info *sta = container_of(timer_to_id, struct sta_info,
419 timer_to_tid[0]); 420 timer_to_tid[0]);
421 struct tid_ampdu_tx *tid_tx;
422 unsigned long timeout;
423
424 tid_tx = rcu_dereference_protected_tid_tx(sta, *ptid);
425 if (!tid_tx)
426 return;
427
428 timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout);
429 if (time_is_after_jiffies(timeout)) {
430 mod_timer(&tid_tx->session_timer, timeout);
431 return;
432 }
420 433
421#ifdef CONFIG_MAC80211_HT_DEBUG 434#ifdef CONFIG_MAC80211_HT_DEBUG
422 printk(KERN_DEBUG "tx session timer expired on tid %d\n", (u16)*ptid); 435 printk(KERN_DEBUG "tx session timer expired on tid %d\n", (u16)*ptid);
@@ -542,7 +555,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
542 /* tx timer */ 555 /* tx timer */
543 tid_tx->session_timer.function = sta_tx_agg_session_timer_expired; 556 tid_tx->session_timer.function = sta_tx_agg_session_timer_expired;
544 tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid]; 557 tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
545 init_timer(&tid_tx->session_timer); 558 init_timer_deferrable(&tid_tx->session_timer);
546 559
547 /* assign a dialog token */ 560 /* assign a dialog token */
548 sta->ampdu_mlme.dialog_token_allocator++; 561 sta->ampdu_mlme.dialog_token_allocator++;
@@ -586,14 +599,14 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
586 */ 599 */
587 spin_lock_bh(&sta->lock); 600 spin_lock_bh(&sta->lock);
588 601
589 ieee80211_agg_splice_packets(local, tid_tx, tid); 602 ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
590 /* 603 /*
591 * Now mark as operational. This will be visible 604 * Now mark as operational. This will be visible
592 * in the TX path, and lets it go lock-free in 605 * in the TX path, and lets it go lock-free in
593 * the common case. 606 * the common case.
594 */ 607 */
595 set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state); 608 set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
596 ieee80211_agg_splice_finish(local, tid); 609 ieee80211_agg_splice_finish(sta->sdata, tid);
597 610
598 spin_unlock_bh(&sta->lock); 611 spin_unlock_bh(&sta->lock);
599} 612}
@@ -778,12 +791,12 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
778 * more. 791 * more.
779 */ 792 */
780 793
781 ieee80211_agg_splice_packets(local, tid_tx, tid); 794 ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
782 795
783 /* future packets must not find the tid_tx struct any more */ 796 /* future packets must not find the tid_tx struct any more */
784 ieee80211_assign_tid_tx(sta, tid, NULL); 797 ieee80211_assign_tid_tx(sta, tid, NULL);
785 798
786 ieee80211_agg_splice_finish(local, tid); 799 ieee80211_agg_splice_finish(sta->sdata, tid);
787 800
788 kfree_rcu(tid_tx, rcu_head); 801 kfree_rcu(tid_tx, rcu_head);
789 802
@@ -884,9 +897,11 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
884 897
885 sta->ampdu_mlme.addba_req_num[tid] = 0; 898 sta->ampdu_mlme.addba_req_num[tid] = 0;
886 899
887 if (tid_tx->timeout) 900 if (tid_tx->timeout) {
888 mod_timer(&tid_tx->session_timer, 901 mod_timer(&tid_tx->session_timer,
889 TU_TO_EXP_TIME(tid_tx->timeout)); 902 TU_TO_EXP_TIME(tid_tx->timeout));
903 tid_tx->last_tx = jiffies;
904 }
890 905
891 } else { 906 } else {
892 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 907 ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR,
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 677d65929780..70b2af2315a6 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -412,6 +412,10 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
412 sinfo->llid = le16_to_cpu(sta->llid); 412 sinfo->llid = le16_to_cpu(sta->llid);
413 sinfo->plid = le16_to_cpu(sta->plid); 413 sinfo->plid = le16_to_cpu(sta->plid);
414 sinfo->plink_state = sta->plink_state; 414 sinfo->plink_state = sta->plink_state;
415 if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
416 sinfo->filled |= STATION_INFO_T_OFFSET;
417 sinfo->t_offset = sta->t_offset;
418 }
415#endif 419#endif
416 } 420 }
417 421
@@ -640,6 +644,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
640 644
641 ieee80211_bss_info_change_notify(sdata, changed); 645 ieee80211_bss_info_change_notify(sdata, changed);
642 646
647 netif_carrier_on(dev);
648 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
649 netif_carrier_on(vlan->dev);
650
643 return 0; 651 return 0;
644} 652}
645 653
@@ -665,7 +673,7 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
665 673
666static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev) 674static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
667{ 675{
668 struct ieee80211_sub_if_data *sdata; 676 struct ieee80211_sub_if_data *sdata, *vlan;
669 struct beacon_data *old; 677 struct beacon_data *old;
670 678
671 sdata = IEEE80211_DEV_TO_SUB_IF(dev); 679 sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -674,6 +682,10 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
674 if (!old) 682 if (!old)
675 return -ENOENT; 683 return -ENOENT;
676 684
685 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
686 netif_carrier_off(vlan->dev);
687 netif_carrier_off(dev);
688
677 RCU_INIT_POINTER(sdata->u.ap.beacon, NULL); 689 RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
678 690
679 kfree_rcu(old, rcu_head); 691 kfree_rcu(old, rcu_head);
@@ -993,6 +1005,9 @@ static int ieee80211_change_station(struct wiphy *wiphy,
993 } 1005 }
994 1006
995 if (params->vlan && params->vlan != sta->sdata->dev) { 1007 if (params->vlan && params->vlan != sta->sdata->dev) {
1008 bool prev_4addr = false;
1009 bool new_4addr = false;
1010
996 vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); 1011 vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
997 1012
998 if (vlansdata->vif.type != NL80211_IFTYPE_AP_VLAN && 1013 if (vlansdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
@@ -1008,9 +1023,25 @@ static int ieee80211_change_station(struct wiphy *wiphy,
1008 } 1023 }
1009 1024
1010 rcu_assign_pointer(vlansdata->u.vlan.sta, sta); 1025 rcu_assign_pointer(vlansdata->u.vlan.sta, sta);
1026 new_4addr = true;
1027 }
1028
1029 if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1030 sta->sdata->u.vlan.sta) {
1031 rcu_assign_pointer(sta->sdata->u.vlan.sta, NULL);
1032 prev_4addr = true;
1011 } 1033 }
1012 1034
1013 sta->sdata = vlansdata; 1035 sta->sdata = vlansdata;
1036
1037 if (sta->sta_state == IEEE80211_STA_AUTHORIZED &&
1038 prev_4addr != new_4addr) {
1039 if (new_4addr)
1040 atomic_dec(&sta->sdata->bss->num_mcast_sta);
1041 else
1042 atomic_inc(&sta->sdata->bss->num_mcast_sta);
1043 }
1044
1014 ieee80211_send_layer2_update(sta); 1045 ieee80211_send_layer2_update(sta);
1015 } 1046 }
1016 1047
@@ -1235,6 +1266,7 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh,
1235 /* now copy the rest of the setup parameters */ 1266 /* now copy the rest of the setup parameters */
1236 ifmsh->mesh_id_len = setup->mesh_id_len; 1267 ifmsh->mesh_id_len = setup->mesh_id_len;
1237 memcpy(ifmsh->mesh_id, setup->mesh_id, ifmsh->mesh_id_len); 1268 memcpy(ifmsh->mesh_id, setup->mesh_id, ifmsh->mesh_id_len);
1269 ifmsh->mesh_sp_id = setup->sync_method;
1238 ifmsh->mesh_pp_id = setup->path_sel_proto; 1270 ifmsh->mesh_pp_id = setup->path_sel_proto;
1239 ifmsh->mesh_pm_id = setup->path_metric; 1271 ifmsh->mesh_pm_id = setup->path_metric;
1240 ifmsh->security = IEEE80211_MESH_SEC_NONE; 1272 ifmsh->security = IEEE80211_MESH_SEC_NONE;
@@ -1279,6 +1311,9 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
1279 conf->dot11MeshTTL = nconf->element_ttl; 1311 conf->dot11MeshTTL = nconf->element_ttl;
1280 if (_chg_mesh_attr(NL80211_MESHCONF_AUTO_OPEN_PLINKS, mask)) 1312 if (_chg_mesh_attr(NL80211_MESHCONF_AUTO_OPEN_PLINKS, mask))
1281 conf->auto_open_plinks = nconf->auto_open_plinks; 1313 conf->auto_open_plinks = nconf->auto_open_plinks;
1314 if (_chg_mesh_attr(NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, mask))
1315 conf->dot11MeshNbrOffsetMaxNeighbor =
1316 nconf->dot11MeshNbrOffsetMaxNeighbor;
1282 if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, mask)) 1317 if (_chg_mesh_attr(NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, mask))
1283 conf->dot11MeshHWMPmaxPREQretries = 1318 conf->dot11MeshHWMPmaxPREQretries =
1284 nconf->dot11MeshHWMPmaxPREQretries; 1319 nconf->dot11MeshHWMPmaxPREQretries;
@@ -1437,6 +1472,9 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
1437 if (!local->ops->conf_tx) 1472 if (!local->ops->conf_tx)
1438 return -EOPNOTSUPP; 1473 return -EOPNOTSUPP;
1439 1474
1475 if (local->hw.queues < IEEE80211_NUM_ACS)
1476 return -EOPNOTSUPP;
1477
1440 memset(&p, 0, sizeof(p)); 1478 memset(&p, 0, sizeof(p));
1441 p.aifs = params->aifs; 1479 p.aifs = params->aifs;
1442 p.cw_max = params->cwmax; 1480 p.cw_max = params->cwmax;
@@ -1449,14 +1487,11 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
1449 */ 1487 */
1450 p.uapsd = false; 1488 p.uapsd = false;
1451 1489
1452 if (params->queue >= local->hw.queues) 1490 sdata->tx_conf[params->ac] = p;
1453 return -EINVAL; 1491 if (drv_conf_tx(local, sdata, params->ac, &p)) {
1454
1455 sdata->tx_conf[params->queue] = p;
1456 if (drv_conf_tx(local, sdata, params->queue, &p)) {
1457 wiphy_debug(local->hw.wiphy, 1492 wiphy_debug(local->hw.wiphy,
1458 "failed to set TX queue parameters for queue %d\n", 1493 "failed to set TX queue parameters for AC %d\n",
1459 params->queue); 1494 params->ac);
1460 return -EINVAL; 1495 return -EINVAL;
1461 } 1496 }
1462 1497
@@ -2090,6 +2125,10 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
2090 2125
2091 IEEE80211_SKB_CB(skb)->flags = flags; 2126 IEEE80211_SKB_CB(skb)->flags = flags;
2092 2127
2128 if (flags & IEEE80211_TX_CTL_TX_OFFCHAN)
2129 IEEE80211_SKB_CB(skb)->hw_queue =
2130 local->hw.offchannel_tx_hw_queue;
2131
2093 skb->dev = sdata->dev; 2132 skb->dev = sdata->dev;
2094 2133
2095 *cookie = (unsigned long) skb; 2134 *cookie = (unsigned long) skb;
@@ -2131,6 +2170,8 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
2131 /* modify cookie to prevent API mismatches */ 2170 /* modify cookie to prevent API mismatches */
2132 *cookie ^= 2; 2171 *cookie ^= 2;
2133 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN; 2172 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_TX_OFFCHAN;
2173 IEEE80211_SKB_CB(skb)->hw_queue =
2174 local->hw.offchannel_tx_hw_queue;
2134 local->hw_roc_skb = skb; 2175 local->hw_roc_skb = skb;
2135 local->hw_roc_skb_for_status = skb; 2176 local->hw_roc_skb_for_status = skb;
2136 mutex_unlock(&local->mtx); 2177 mutex_unlock(&local->mtx);
@@ -2350,8 +2391,8 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
2350 tf->u.setup_req.capability = 2391 tf->u.setup_req.capability =
2351 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); 2392 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
2352 2393
2353 ieee80211_add_srates_ie(&sdata->vif, skb); 2394 ieee80211_add_srates_ie(&sdata->vif, skb, false);
2354 ieee80211_add_ext_srates_ie(&sdata->vif, skb); 2395 ieee80211_add_ext_srates_ie(&sdata->vif, skb, false);
2355 ieee80211_tdls_add_ext_capab(skb); 2396 ieee80211_tdls_add_ext_capab(skb);
2356 break; 2397 break;
2357 case WLAN_TDLS_SETUP_RESPONSE: 2398 case WLAN_TDLS_SETUP_RESPONSE:
@@ -2364,8 +2405,8 @@ ieee80211_prep_tdls_encap_data(struct wiphy *wiphy, struct net_device *dev,
2364 tf->u.setup_resp.capability = 2405 tf->u.setup_resp.capability =
2365 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); 2406 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
2366 2407
2367 ieee80211_add_srates_ie(&sdata->vif, skb); 2408 ieee80211_add_srates_ie(&sdata->vif, skb, false);
2368 ieee80211_add_ext_srates_ie(&sdata->vif, skb); 2409 ieee80211_add_ext_srates_ie(&sdata->vif, skb, false);
2369 ieee80211_tdls_add_ext_capab(skb); 2410 ieee80211_tdls_add_ext_capab(skb);
2370 break; 2411 break;
2371 case WLAN_TDLS_SETUP_CONFIRM: 2412 case WLAN_TDLS_SETUP_CONFIRM:
@@ -2425,8 +2466,8 @@ ieee80211_prep_tdls_direct(struct wiphy *wiphy, struct net_device *dev,
2425 mgmt->u.action.u.tdls_discover_resp.capability = 2466 mgmt->u.action.u.tdls_discover_resp.capability =
2426 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata)); 2467 cpu_to_le16(ieee80211_get_tdls_sta_capab(sdata));
2427 2468
2428 ieee80211_add_srates_ie(&sdata->vif, skb); 2469 ieee80211_add_srates_ie(&sdata->vif, skb, false);
2429 ieee80211_add_ext_srates_ie(&sdata->vif, skb); 2470 ieee80211_add_ext_srates_ie(&sdata->vif, skb, false);
2430 ieee80211_tdls_add_ext_capab(skb); 2471 ieee80211_tdls_add_ext_capab(skb);
2431 break; 2472 break;
2432 default: 2473 default:
@@ -2666,13 +2707,22 @@ static int ieee80211_probe_client(struct wiphy *wiphy, struct net_device *dev,
2666} 2707}
2667 2708
2668static struct ieee80211_channel * 2709static struct ieee80211_channel *
2669ieee80211_wiphy_get_channel(struct wiphy *wiphy) 2710ieee80211_wiphy_get_channel(struct wiphy *wiphy,
2711 enum nl80211_channel_type *type)
2670{ 2712{
2671 struct ieee80211_local *local = wiphy_priv(wiphy); 2713 struct ieee80211_local *local = wiphy_priv(wiphy);
2672 2714
2715 *type = local->_oper_channel_type;
2673 return local->oper_channel; 2716 return local->oper_channel;
2674} 2717}
2675 2718
2719#ifdef CONFIG_PM
2720static void ieee80211_set_wakeup(struct wiphy *wiphy, bool enabled)
2721{
2722 drv_set_wakeup(wiphy_priv(wiphy), enabled);
2723}
2724#endif
2725
2676struct cfg80211_ops mac80211_config_ops = { 2726struct cfg80211_ops mac80211_config_ops = {
2677 .add_virtual_intf = ieee80211_add_iface, 2727 .add_virtual_intf = ieee80211_add_iface,
2678 .del_virtual_intf = ieee80211_del_iface, 2728 .del_virtual_intf = ieee80211_del_iface,
@@ -2741,4 +2791,7 @@ struct cfg80211_ops mac80211_config_ops = {
2741 .probe_client = ieee80211_probe_client, 2791 .probe_client = ieee80211_probe_client,
2742 .get_channel = ieee80211_wiphy_get_channel, 2792 .get_channel = ieee80211_wiphy_get_channel,
2743 .set_noack_map = ieee80211_set_noack_map, 2793 .set_noack_map = ieee80211_set_noack_map,
2794#ifdef CONFIG_PM
2795 .set_wakeup = ieee80211_set_wakeup,
2796#endif
2744}; 2797};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index e00ce8c3e28e..c76cf7230c7d 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -135,29 +135,3 @@ bool ieee80211_set_channel_type(struct ieee80211_local *local,
135 135
136 return result; 136 return result;
137} 137}
138
139/*
140 * ieee80211_get_tx_channel_type returns the channel type we should
141 * use for packet transmission, given the channel capability and
142 * whatever regulatory flags we have been given.
143 */
144enum nl80211_channel_type ieee80211_get_tx_channel_type(
145 struct ieee80211_local *local,
146 enum nl80211_channel_type channel_type)
147{
148 switch (channel_type) {
149 case NL80211_CHAN_HT40PLUS:
150 if (local->hw.conf.channel->flags &
151 IEEE80211_CHAN_NO_HT40PLUS)
152 return NL80211_CHAN_HT20;
153 break;
154 case NL80211_CHAN_HT40MINUS:
155 if (local->hw.conf.channel->flags &
156 IEEE80211_CHAN_NO_HT40MINUS)
157 return NL80211_CHAN_HT20;
158 break;
159 default:
160 break;
161 }
162 return channel_type;
163}
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 30f99c344847..ea0122dbd2b3 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -394,7 +394,7 @@ static ssize_t ieee80211_if_parse_uapsd_max_sp_len(
394__IEEE80211_IF_FILE_W(uapsd_max_sp_len); 394__IEEE80211_IF_FILE_W(uapsd_max_sp_len);
395 395
396/* AP attributes */ 396/* AP attributes */
397IEEE80211_IF_FILE(num_sta_authorized, u.ap.num_sta_authorized, ATOMIC); 397IEEE80211_IF_FILE(num_mcast_sta, u.ap.num_mcast_sta, ATOMIC);
398IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC); 398IEEE80211_IF_FILE(num_sta_ps, u.ap.num_sta_ps, ATOMIC);
399IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC); 399IEEE80211_IF_FILE(dtim_count, u.ap.dtim_count, DEC);
400 400
@@ -424,6 +424,7 @@ static ssize_t ieee80211_if_parse_tsf(
424 struct ieee80211_local *local = sdata->local; 424 struct ieee80211_local *local = sdata->local;
425 unsigned long long tsf; 425 unsigned long long tsf;
426 int ret; 426 int ret;
427 int tsf_is_delta = 0;
427 428
428 if (strncmp(buf, "reset", 5) == 0) { 429 if (strncmp(buf, "reset", 5) == 0) {
429 if (local->ops->reset_tsf) { 430 if (local->ops->reset_tsf) {
@@ -431,9 +432,20 @@ static ssize_t ieee80211_if_parse_tsf(
431 wiphy_info(local->hw.wiphy, "debugfs reset TSF\n"); 432 wiphy_info(local->hw.wiphy, "debugfs reset TSF\n");
432 } 433 }
433 } else { 434 } else {
435 if (buflen > 10 && buf[1] == '=') {
436 if (buf[0] == '+')
437 tsf_is_delta = 1;
438 else if (buf[0] == '-')
439 tsf_is_delta = -1;
440 else
441 return -EINVAL;
442 buf += 2;
443 }
434 ret = kstrtoull(buf, 10, &tsf); 444 ret = kstrtoull(buf, 10, &tsf);
435 if (ret < 0) 445 if (ret < 0)
436 return -EINVAL; 446 return -EINVAL;
447 if (tsf_is_delta)
448 tsf = drv_get_tsf(local, sdata) + tsf_is_delta * tsf;
437 if (local->ops->set_tsf) { 449 if (local->ops->set_tsf) {
438 drv_set_tsf(local, sdata, tsf); 450 drv_set_tsf(local, sdata, tsf);
439 wiphy_info(local->hw.wiphy, 451 wiphy_info(local->hw.wiphy,
@@ -499,26 +511,23 @@ IEEE80211_IF_FILE(dot11MeshForwarding, u.mesh.mshcfg.dot11MeshForwarding, DEC);
499IEEE80211_IF_FILE(rssi_threshold, u.mesh.mshcfg.rssi_threshold, DEC); 511IEEE80211_IF_FILE(rssi_threshold, u.mesh.mshcfg.rssi_threshold, DEC);
500#endif 512#endif
501 513
502
503#define DEBUGFS_ADD(name) \
504 debugfs_create_file(#name, 0400, sdata->debugfs.dir, \
505 sdata, &name##_ops);
506
507#define DEBUGFS_ADD_MODE(name, mode) \ 514#define DEBUGFS_ADD_MODE(name, mode) \
508 debugfs_create_file(#name, mode, sdata->debugfs.dir, \ 515 debugfs_create_file(#name, mode, sdata->debugfs.dir, \
509 sdata, &name##_ops); 516 sdata, &name##_ops);
510 517
511static void add_sta_files(struct ieee80211_sub_if_data *sdata) 518#define DEBUGFS_ADD(name) DEBUGFS_ADD_MODE(name, 0400)
519
520static void add_common_files(struct ieee80211_sub_if_data *sdata)
512{ 521{
513 DEBUGFS_ADD(drop_unencrypted); 522 DEBUGFS_ADD(drop_unencrypted);
514 DEBUGFS_ADD(flags);
515 DEBUGFS_ADD(state);
516 DEBUGFS_ADD(channel_type);
517 DEBUGFS_ADD(rc_rateidx_mask_2ghz); 523 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
518 DEBUGFS_ADD(rc_rateidx_mask_5ghz); 524 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
519 DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz); 525 DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
520 DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz); 526 DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
527}
521 528
529static void add_sta_files(struct ieee80211_sub_if_data *sdata)
530{
522 DEBUGFS_ADD(bssid); 531 DEBUGFS_ADD(bssid);
523 DEBUGFS_ADD(aid); 532 DEBUGFS_ADD(aid);
524 DEBUGFS_ADD(last_beacon); 533 DEBUGFS_ADD(last_beacon);
@@ -531,16 +540,7 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata)
531 540
532static void add_ap_files(struct ieee80211_sub_if_data *sdata) 541static void add_ap_files(struct ieee80211_sub_if_data *sdata)
533{ 542{
534 DEBUGFS_ADD(drop_unencrypted); 543 DEBUGFS_ADD(num_mcast_sta);
535 DEBUGFS_ADD(flags);
536 DEBUGFS_ADD(state);
537 DEBUGFS_ADD(channel_type);
538 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
539 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
540 DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
541 DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
542
543 DEBUGFS_ADD(num_sta_authorized);
544 DEBUGFS_ADD(num_sta_ps); 544 DEBUGFS_ADD(num_sta_ps);
545 DEBUGFS_ADD(dtim_count); 545 DEBUGFS_ADD(dtim_count);
546 DEBUGFS_ADD(num_buffered_multicast); 546 DEBUGFS_ADD(num_buffered_multicast);
@@ -549,48 +549,14 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata)
549 549
550static void add_ibss_files(struct ieee80211_sub_if_data *sdata) 550static void add_ibss_files(struct ieee80211_sub_if_data *sdata)
551{ 551{
552 DEBUGFS_ADD(channel_type);
553 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
554 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
555 DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
556 DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
557
558 DEBUGFS_ADD_MODE(tsf, 0600); 552 DEBUGFS_ADD_MODE(tsf, 0600);
559} 553}
560 554
561static void add_wds_files(struct ieee80211_sub_if_data *sdata) 555static void add_wds_files(struct ieee80211_sub_if_data *sdata)
562{ 556{
563 DEBUGFS_ADD(drop_unencrypted);
564 DEBUGFS_ADD(flags);
565 DEBUGFS_ADD(state);
566 DEBUGFS_ADD(channel_type);
567 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
568 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
569 DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
570 DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
571
572 DEBUGFS_ADD(peer); 557 DEBUGFS_ADD(peer);
573} 558}
574 559
575static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
576{
577 DEBUGFS_ADD(drop_unencrypted);
578 DEBUGFS_ADD(flags);
579 DEBUGFS_ADD(state);
580 DEBUGFS_ADD(channel_type);
581 DEBUGFS_ADD(rc_rateidx_mask_2ghz);
582 DEBUGFS_ADD(rc_rateidx_mask_5ghz);
583 DEBUGFS_ADD(rc_rateidx_mcs_mask_2ghz);
584 DEBUGFS_ADD(rc_rateidx_mcs_mask_5ghz);
585}
586
587static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
588{
589 DEBUGFS_ADD(flags);
590 DEBUGFS_ADD(state);
591 DEBUGFS_ADD(channel_type);
592}
593
594#ifdef CONFIG_MAC80211_MESH 560#ifdef CONFIG_MAC80211_MESH
595 561
596static void add_mesh_files(struct ieee80211_sub_if_data *sdata) 562static void add_mesh_files(struct ieee80211_sub_if_data *sdata)
@@ -651,6 +617,13 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
651 if (!sdata->debugfs.dir) 617 if (!sdata->debugfs.dir)
652 return; 618 return;
653 619
620 DEBUGFS_ADD(flags);
621 DEBUGFS_ADD(state);
622 DEBUGFS_ADD(channel_type);
623
624 if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
625 add_common_files(sdata);
626
654 switch (sdata->vif.type) { 627 switch (sdata->vif.type) {
655 case NL80211_IFTYPE_MESH_POINT: 628 case NL80211_IFTYPE_MESH_POINT:
656#ifdef CONFIG_MAC80211_MESH 629#ifdef CONFIG_MAC80211_MESH
@@ -671,12 +644,6 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
671 case NL80211_IFTYPE_WDS: 644 case NL80211_IFTYPE_WDS:
672 add_wds_files(sdata); 645 add_wds_files(sdata);
673 break; 646 break;
674 case NL80211_IFTYPE_MONITOR:
675 add_monitor_files(sdata);
676 break;
677 case NL80211_IFTYPE_AP_VLAN:
678 add_vlan_files(sdata);
679 break;
680 default: 647 default:
681 break; 648 break;
682 } 649 }
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 832b2da5e4cd..5ccec2c1e9f6 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -63,7 +63,7 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
63 test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : "" 63 test_sta_flag(sta, WLAN_STA_##flg) ? #flg "\n" : ""
64 64
65 int res = scnprintf(buf, sizeof(buf), 65 int res = scnprintf(buf, sizeof(buf),
66 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s", 66 "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
67 TEST(AUTH), TEST(ASSOC), TEST(PS_STA), 67 TEST(AUTH), TEST(ASSOC), TEST(PS_STA),
68 TEST(PS_DRIVER), TEST(AUTHORIZED), 68 TEST(PS_DRIVER), TEST(AUTHORIZED),
69 TEST(SHORT_PREAMBLE), 69 TEST(SHORT_PREAMBLE),
@@ -71,7 +71,8 @@ static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
71 TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL), 71 TEST(MFP), TEST(BLOCK_BA), TEST(PSPOLL),
72 TEST(UAPSD), TEST(SP), TEST(TDLS_PEER), 72 TEST(UAPSD), TEST(SP), TEST(TDLS_PEER),
73 TEST(TDLS_PEER_AUTH), TEST(4ADDR_EVENT), 73 TEST(TDLS_PEER_AUTH), TEST(4ADDR_EVENT),
74 TEST(INSERTED), TEST(RATE_CONTROL)); 74 TEST(INSERTED), TEST(RATE_CONTROL),
75 TEST(TOFFSET_KNOWN));
75#undef TEST 76#undef TEST
76 return simple_read_from_buffer(userbuf, count, ppos, buf, res); 77 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
77} 78}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index af4691fed645..4a0e559cb26b 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -7,7 +7,9 @@
7 7
8static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata) 8static inline void check_sdata_in_driver(struct ieee80211_sub_if_data *sdata)
9{ 9{
10 WARN_ON(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER)); 10 WARN(!(sdata->flags & IEEE80211_SDATA_IN_DRIVER),
11 "%s: Failed check-sdata-in-driver check, flags: 0x%x\n",
12 sdata->dev->name, sdata->flags);
11} 13}
12 14
13static inline struct ieee80211_sub_if_data * 15static inline struct ieee80211_sub_if_data *
@@ -89,6 +91,19 @@ static inline int drv_resume(struct ieee80211_local *local)
89 trace_drv_return_int(local, ret); 91 trace_drv_return_int(local, ret);
90 return ret; 92 return ret;
91} 93}
94
95static inline void drv_set_wakeup(struct ieee80211_local *local,
96 bool enabled)
97{
98 might_sleep();
99
100 if (!local->ops->set_wakeup)
101 return;
102
103 trace_drv_set_wakeup(local, enabled);
104 local->ops->set_wakeup(&local->hw, enabled);
105 trace_drv_return_void(local);
106}
92#endif 107#endif
93 108
94static inline int drv_add_interface(struct ieee80211_local *local, 109static inline int drv_add_interface(struct ieee80211_local *local,
@@ -99,7 +114,8 @@ static inline int drv_add_interface(struct ieee80211_local *local,
99 might_sleep(); 114 might_sleep();
100 115
101 if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN || 116 if (WARN_ON(sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
102 sdata->vif.type == NL80211_IFTYPE_MONITOR)) 117 (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
118 !(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))))
103 return -EINVAL; 119 return -EINVAL;
104 120
105 trace_drv_add_interface(local, sdata); 121 trace_drv_add_interface(local, sdata);
@@ -474,8 +490,23 @@ int drv_sta_state(struct ieee80211_local *local,
474 return ret; 490 return ret;
475} 491}
476 492
493static inline void drv_sta_rc_update(struct ieee80211_local *local,
494 struct ieee80211_sub_if_data *sdata,
495 struct ieee80211_sta *sta, u32 changed)
496{
497 sdata = get_bss_sdata(sdata);
498 check_sdata_in_driver(sdata);
499
500 trace_drv_sta_rc_update(local, sdata, sta, changed);
501 if (local->ops->sta_rc_update)
502 local->ops->sta_rc_update(&local->hw, &sdata->vif,
503 sta, changed);
504
505 trace_drv_return_void(local);
506}
507
477static inline int drv_conf_tx(struct ieee80211_local *local, 508static inline int drv_conf_tx(struct ieee80211_local *local,
478 struct ieee80211_sub_if_data *sdata, u16 queue, 509 struct ieee80211_sub_if_data *sdata, u16 ac,
479 const struct ieee80211_tx_queue_params *params) 510 const struct ieee80211_tx_queue_params *params)
480{ 511{
481 int ret = -EOPNOTSUPP; 512 int ret = -EOPNOTSUPP;
@@ -484,10 +515,10 @@ static inline int drv_conf_tx(struct ieee80211_local *local,
484 515
485 check_sdata_in_driver(sdata); 516 check_sdata_in_driver(sdata);
486 517
487 trace_drv_conf_tx(local, sdata, queue, params); 518 trace_drv_conf_tx(local, sdata, ac, params);
488 if (local->ops->conf_tx) 519 if (local->ops->conf_tx)
489 ret = local->ops->conf_tx(&local->hw, &sdata->vif, 520 ret = local->ops->conf_tx(&local->hw, &sdata->vif,
490 queue, params); 521 ac, params);
491 trace_drv_return_int(local, ret); 522 trace_drv_return_int(local, ret);
492 return ret; 523 return ret;
493} 524}
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 21d6f5290a1c..7c0754bed61b 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -171,6 +171,20 @@ DEFINE_EVENT(local_only_evt, drv_resume,
171 TP_ARGS(local) 171 TP_ARGS(local)
172); 172);
173 173
174TRACE_EVENT(drv_set_wakeup,
175 TP_PROTO(struct ieee80211_local *local, bool enabled),
176 TP_ARGS(local, enabled),
177 TP_STRUCT__entry(
178 LOCAL_ENTRY
179 __field(bool, enabled)
180 ),
181 TP_fast_assign(
182 LOCAL_ASSIGN;
183 __entry->enabled = enabled;
184 ),
185 TP_printk(LOCAL_PR_FMT " enabled:%d", LOCAL_PR_ARG, __entry->enabled)
186);
187
174DEFINE_EVENT(local_only_evt, drv_stop, 188DEFINE_EVENT(local_only_evt, drv_stop,
175 TP_PROTO(struct ieee80211_local *local), 189 TP_PROTO(struct ieee80211_local *local),
176 TP_ARGS(local) 190 TP_ARGS(local)
@@ -624,6 +638,34 @@ TRACE_EVENT(drv_sta_state,
624 ) 638 )
625); 639);
626 640
641TRACE_EVENT(drv_sta_rc_update,
642 TP_PROTO(struct ieee80211_local *local,
643 struct ieee80211_sub_if_data *sdata,
644 struct ieee80211_sta *sta,
645 u32 changed),
646
647 TP_ARGS(local, sdata, sta, changed),
648
649 TP_STRUCT__entry(
650 LOCAL_ENTRY
651 VIF_ENTRY
652 STA_ENTRY
653 __field(u32, changed)
654 ),
655
656 TP_fast_assign(
657 LOCAL_ASSIGN;
658 VIF_ASSIGN;
659 STA_ASSIGN;
660 __entry->changed = changed;
661 ),
662
663 TP_printk(
664 LOCAL_PR_FMT VIF_PR_FMT STA_PR_FMT " changed: 0x%x",
665 LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->changed
666 )
667);
668
627TRACE_EVENT(drv_sta_add, 669TRACE_EVENT(drv_sta_add,
628 TP_PROTO(struct ieee80211_local *local, 670 TP_PROTO(struct ieee80211_local *local,
629 struct ieee80211_sub_if_data *sdata, 671 struct ieee80211_sub_if_data *sdata,
@@ -677,15 +719,14 @@ TRACE_EVENT(drv_sta_remove,
677TRACE_EVENT(drv_conf_tx, 719TRACE_EVENT(drv_conf_tx,
678 TP_PROTO(struct ieee80211_local *local, 720 TP_PROTO(struct ieee80211_local *local,
679 struct ieee80211_sub_if_data *sdata, 721 struct ieee80211_sub_if_data *sdata,
680 u16 queue, 722 u16 ac, const struct ieee80211_tx_queue_params *params),
681 const struct ieee80211_tx_queue_params *params),
682 723
683 TP_ARGS(local, sdata, queue, params), 724 TP_ARGS(local, sdata, ac, params),
684 725
685 TP_STRUCT__entry( 726 TP_STRUCT__entry(
686 LOCAL_ENTRY 727 LOCAL_ENTRY
687 VIF_ENTRY 728 VIF_ENTRY
688 __field(u16, queue) 729 __field(u16, ac)
689 __field(u16, txop) 730 __field(u16, txop)
690 __field(u16, cw_min) 731 __field(u16, cw_min)
691 __field(u16, cw_max) 732 __field(u16, cw_max)
@@ -696,7 +737,7 @@ TRACE_EVENT(drv_conf_tx,
696 TP_fast_assign( 737 TP_fast_assign(
697 LOCAL_ASSIGN; 738 LOCAL_ASSIGN;
698 VIF_ASSIGN; 739 VIF_ASSIGN;
699 __entry->queue = queue; 740 __entry->ac = ac;
700 __entry->txop = params->txop; 741 __entry->txop = params->txop;
701 __entry->cw_max = params->cw_max; 742 __entry->cw_max = params->cw_max;
702 __entry->cw_min = params->cw_min; 743 __entry->cw_min = params->cw_min;
@@ -705,8 +746,8 @@ TRACE_EVENT(drv_conf_tx,
705 ), 746 ),
706 747
707 TP_printk( 748 TP_printk(
708 LOCAL_PR_FMT VIF_PR_FMT " queue:%d", 749 LOCAL_PR_FMT VIF_PR_FMT " AC:%d",
709 LOCAL_PR_ARG, VIF_PR_ARG, __entry->queue 750 LOCAL_PR_ARG, VIF_PR_ARG, __entry->ac
710 ) 751 )
711); 752);
712 753
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index f25fff7607d8..9b603366943c 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -19,15 +19,6 @@
19#include "ieee80211_i.h" 19#include "ieee80211_i.h"
20#include "rate.h" 20#include "rate.h"
21 21
22bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata)
23{
24 const __le16 flg = cpu_to_le16(IEEE80211_HT_CAP_SUP_WIDTH_20_40);
25 if ((sdata->u.mgd.ht_capa_mask.cap_info & flg) &&
26 !(sdata->u.mgd.ht_capa.cap_info & flg))
27 return true;
28 return false;
29}
30
31static void __check_htcap_disable(struct ieee80211_sub_if_data *sdata, 22static void __check_htcap_disable(struct ieee80211_sub_if_data *sdata,
32 struct ieee80211_sta_ht_cap *ht_cap, 23 struct ieee80211_sta_ht_cap *ht_cap,
33 u16 flag) 24 u16 flag)
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index cef7c29214a8..61cd391c32a3 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -160,16 +160,14 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
160 if (channel_type && sband->ht_cap.ht_supported) { 160 if (channel_type && sband->ht_cap.ht_supported) {
161 pos = skb_put(skb, 4 + 161 pos = skb_put(skb, 4 +
162 sizeof(struct ieee80211_ht_cap) + 162 sizeof(struct ieee80211_ht_cap) +
163 sizeof(struct ieee80211_ht_info)); 163 sizeof(struct ieee80211_ht_operation));
164 pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap, 164 pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
165 sband->ht_cap.cap); 165 sband->ht_cap.cap);
166 pos = ieee80211_ie_build_ht_info(pos, 166 pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap,
167 &sband->ht_cap, 167 chan, channel_type);
168 chan,
169 channel_type);
170 } 168 }
171 169
172 if (local->hw.queues >= 4) { 170 if (local->hw.queues >= IEEE80211_NUM_ACS) {
173 pos = skb_put(skb, 9); 171 pos = skb_put(skb, 9);
174 *pos++ = WLAN_EID_VENDOR_SPECIFIC; 172 *pos++ = WLAN_EID_VENDOR_SPECIFIC;
175 *pos++ = 7; /* len */ 173 *pos++ = 7; /* len */
@@ -410,7 +408,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
410 408
411 if (elems->supp_rates) { 409 if (elems->supp_rates) {
412 supp_rates = ieee80211_sta_get_rates(local, elems, 410 supp_rates = ieee80211_sta_get_rates(local, elems,
413 band); 411 band, NULL);
414 if (sta) { 412 if (sta) {
415 u32 prev_rates; 413 u32 prev_rates;
416 414
@@ -441,13 +439,13 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
441 if (sta && elems->wmm_info) 439 if (sta && elems->wmm_info)
442 set_sta_flag(sta, WLAN_STA_WME); 440 set_sta_flag(sta, WLAN_STA_WME);
443 441
444 if (sta && elems->ht_info_elem && elems->ht_cap_elem && 442 if (sta && elems->ht_operation && elems->ht_cap_elem &&
445 sdata->u.ibss.channel_type != NL80211_CHAN_NO_HT) { 443 sdata->u.ibss.channel_type != NL80211_CHAN_NO_HT) {
446 /* we both use HT */ 444 /* we both use HT */
447 struct ieee80211_sta_ht_cap sta_ht_cap_new; 445 struct ieee80211_sta_ht_cap sta_ht_cap_new;
448 enum nl80211_channel_type channel_type = 446 enum nl80211_channel_type channel_type =
449 ieee80211_ht_info_to_channel_type( 447 ieee80211_ht_oper_to_channel_type(
450 elems->ht_info_elem); 448 elems->ht_operation);
451 449
452 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, 450 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
453 elems->ht_cap_elem, 451 elems->ht_cap_elem,
@@ -560,7 +558,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
560 sdata->name, mgmt->bssid); 558 sdata->name, mgmt->bssid);
561#endif 559#endif
562 ieee80211_sta_join_ibss(sdata, bss); 560 ieee80211_sta_join_ibss(sdata, bss);
563 supp_rates = ieee80211_sta_get_rates(local, elems, band); 561 supp_rates = ieee80211_sta_get_rates(local, elems, band, NULL);
564 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 562 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
565 supp_rates, true); 563 supp_rates, true);
566 rcu_read_unlock(); 564 rcu_read_unlock();
@@ -1063,7 +1061,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
1063 4 /* IBSS params */ + 1061 4 /* IBSS params */ +
1064 2 + (IEEE80211_MAX_SUPP_RATES - 8) + 1062 2 + (IEEE80211_MAX_SUPP_RATES - 8) +
1065 2 + sizeof(struct ieee80211_ht_cap) + 1063 2 + sizeof(struct ieee80211_ht_cap) +
1066 2 + sizeof(struct ieee80211_ht_info) + 1064 2 + sizeof(struct ieee80211_ht_operation) +
1067 params->ie_len); 1065 params->ie_len);
1068 if (!skb) 1066 if (!skb)
1069 return -ENOMEM; 1067 return -ENOMEM;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index db8fae51714c..6cd89d414f22 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -52,7 +52,8 @@ struct ieee80211_local;
52 * increased memory use (about 2 kB of RAM per entry). */ 52 * increased memory use (about 2 kB of RAM per entry). */
53#define IEEE80211_FRAGMENT_MAX 4 53#define IEEE80211_FRAGMENT_MAX 4
54 54
55#define TU_TO_EXP_TIME(x) (jiffies + usecs_to_jiffies((x) * 1024)) 55#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024))
56#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x))
56 57
57#define IEEE80211_DEFAULT_UAPSD_QUEUES \ 58#define IEEE80211_DEFAULT_UAPSD_QUEUES \
58 (IEEE80211_WMM_IE_STA_QOSINFO_AC_BK | \ 59 (IEEE80211_WMM_IE_STA_QOSINFO_AC_BK | \
@@ -281,7 +282,7 @@ struct ieee80211_if_ap {
281 u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)]; 282 u8 tim[sizeof(unsigned long) * BITS_TO_LONGS(IEEE80211_MAX_AID + 1)];
282 struct sk_buff_head ps_bc_buf; 283 struct sk_buff_head ps_bc_buf;
283 atomic_t num_sta_ps; /* number of stations in PS mode */ 284 atomic_t num_sta_ps; /* number of stations in PS mode */
284 atomic_t num_sta_authorized; /* number of authorized stations */ 285 atomic_t num_mcast_sta; /* number of stations receiving multicast */
285 int dtim_count; 286 int dtim_count;
286 bool dtim_bc_mc; 287 bool dtim_bc_mc;
287}; 288};
@@ -378,6 +379,7 @@ enum ieee80211_sta_flags {
378 IEEE80211_STA_UAPSD_ENABLED = BIT(7), 379 IEEE80211_STA_UAPSD_ENABLED = BIT(7),
379 IEEE80211_STA_NULLFUNC_ACKED = BIT(8), 380 IEEE80211_STA_NULLFUNC_ACKED = BIT(8),
380 IEEE80211_STA_RESET_SIGNAL_AVE = BIT(9), 381 IEEE80211_STA_RESET_SIGNAL_AVE = BIT(9),
382 IEEE80211_STA_DISABLE_40MHZ = BIT(10),
381}; 383};
382 384
383struct ieee80211_mgd_auth_data { 385struct ieee80211_mgd_auth_data {
@@ -397,7 +399,7 @@ struct ieee80211_mgd_auth_data {
397struct ieee80211_mgd_assoc_data { 399struct ieee80211_mgd_assoc_data {
398 struct cfg80211_bss *bss; 400 struct cfg80211_bss *bss;
399 const u8 *supp_rates; 401 const u8 *supp_rates;
400 const u8 *ht_information_ie; 402 const u8 *ht_operation_ie;
401 403
402 unsigned long timeout; 404 unsigned long timeout;
403 int tries; 405 int tries;
@@ -552,6 +554,24 @@ struct ieee80211_if_ibss {
552 } state; 554 } state;
553}; 555};
554 556
557/**
558 * struct ieee80211_mesh_sync_ops - Extensible synchronization framework interface
559 *
560 * these declarations define the interface, which enables
561 * vendor-specific mesh synchronization
562 *
563 */
564struct ieee802_11_elems;
565struct ieee80211_mesh_sync_ops {
566 void (*rx_bcn_presp)(struct ieee80211_sub_if_data *sdata,
567 u16 stype,
568 struct ieee80211_mgmt *mgmt,
569 struct ieee802_11_elems *elems,
570 struct ieee80211_rx_status *rx_status);
571 void (*adjust_tbtt)(struct ieee80211_sub_if_data *sdata);
572 /* add other framework functions here */
573};
574
555struct ieee80211_if_mesh { 575struct ieee80211_if_mesh {
556 struct timer_list housekeeping_timer; 576 struct timer_list housekeeping_timer;
557 struct timer_list mesh_path_timer; 577 struct timer_list mesh_path_timer;
@@ -600,6 +620,11 @@ struct ieee80211_if_mesh {
600 IEEE80211_MESH_SEC_AUTHED = 0x1, 620 IEEE80211_MESH_SEC_AUTHED = 0x1,
601 IEEE80211_MESH_SEC_SECURED = 0x2, 621 IEEE80211_MESH_SEC_SECURED = 0x2,
602 } security; 622 } security;
623 /* Extensible Synchronization Framework */
624 struct ieee80211_mesh_sync_ops *sync_ops;
625 s64 sync_offset_clockdrift_max;
626 spinlock_t sync_offset_lock;
627 bool adjusting_tbtt;
603}; 628};
604 629
605#ifdef CONFIG_MAC80211_MESH 630#ifdef CONFIG_MAC80211_MESH
@@ -666,12 +691,6 @@ struct ieee80211_sub_if_data {
666 691
667 char name[IFNAMSIZ]; 692 char name[IFNAMSIZ];
668 693
669 /*
670 * keep track of whether the HT opmode (stored in
671 * vif.bss_info.ht_operation_mode) is valid.
672 */
673 bool ht_opmode_valid;
674
675 /* to detect idle changes */ 694 /* to detect idle changes */
676 bool old_idle; 695 bool old_idle;
677 696
@@ -691,7 +710,7 @@ struct ieee80211_sub_if_data {
691 __be16 control_port_protocol; 710 __be16 control_port_protocol;
692 bool control_port_no_encrypt; 711 bool control_port_no_encrypt;
693 712
694 struct ieee80211_tx_queue_params tx_conf[IEEE80211_MAX_QUEUES]; 713 struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS];
695 714
696 struct work_struct work; 715 struct work_struct work;
697 struct sk_buff_head skb_queue; 716 struct sk_buff_head skb_queue;
@@ -761,7 +780,6 @@ enum queue_stop_reason {
761 IEEE80211_QUEUE_STOP_REASON_AGGREGATION, 780 IEEE80211_QUEUE_STOP_REASON_AGGREGATION,
762 IEEE80211_QUEUE_STOP_REASON_SUSPEND, 781 IEEE80211_QUEUE_STOP_REASON_SUSPEND,
763 IEEE80211_QUEUE_STOP_REASON_SKB_ADD, 782 IEEE80211_QUEUE_STOP_REASON_SKB_ADD,
764 IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE,
765}; 783};
766 784
767#ifdef CONFIG_MAC80211_LEDS 785#ifdef CONFIG_MAC80211_LEDS
@@ -785,6 +803,8 @@ struct tpt_led_trigger {
785 * well be on the operating channel 803 * well be on the operating channel
786 * @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to 804 * @SCAN_HW_SCANNING: The hardware is scanning for us, we have no way to
787 * determine if we are on the operating channel or not 805 * determine if we are on the operating channel or not
806 * @SCAN_ONCHANNEL_SCANNING: Do a software scan on only the current operating
807 * channel. This should not interrupt normal traffic.
788 * @SCAN_COMPLETED: Set for our scan work function when the driver reported 808 * @SCAN_COMPLETED: Set for our scan work function when the driver reported
789 * that the scan completed. 809 * that the scan completed.
790 * @SCAN_ABORTED: Set for our scan work function when the driver reported 810 * @SCAN_ABORTED: Set for our scan work function when the driver reported
@@ -793,6 +813,7 @@ struct tpt_led_trigger {
793enum { 813enum {
794 SCAN_SW_SCANNING, 814 SCAN_SW_SCANNING,
795 SCAN_HW_SCANNING, 815 SCAN_HW_SCANNING,
816 SCAN_ONCHANNEL_SCANNING,
796 SCAN_COMPLETED, 817 SCAN_COMPLETED,
797 SCAN_ABORTED, 818 SCAN_ABORTED,
798}; 819};
@@ -1082,6 +1103,9 @@ struct ieee80211_local {
1082 struct net_device napi_dev; 1103 struct net_device napi_dev;
1083 1104
1084 struct napi_struct napi; 1105 struct napi_struct napi;
1106
1107 /* virtual monitor interface */
1108 struct ieee80211_sub_if_data __rcu *monitor_sdata;
1085}; 1109};
1086 1110
1087static inline struct ieee80211_sub_if_data * 1111static inline struct ieee80211_sub_if_data *
@@ -1117,7 +1141,7 @@ struct ieee802_11_elems {
1117 u8 *wmm_info; 1141 u8 *wmm_info;
1118 u8 *wmm_param; 1142 u8 *wmm_param;
1119 struct ieee80211_ht_cap *ht_cap_elem; 1143 struct ieee80211_ht_cap *ht_cap_elem;
1120 struct ieee80211_ht_info *ht_info_elem; 1144 struct ieee80211_ht_operation *ht_operation;
1121 struct ieee80211_meshconf_ie *mesh_config; 1145 struct ieee80211_meshconf_ie *mesh_config;
1122 u8 *mesh_id; 1146 u8 *mesh_id;
1123 u8 *peering; 1147 u8 *peering;
@@ -1239,6 +1263,7 @@ int ieee80211_request_internal_scan(struct ieee80211_sub_if_data *sdata,
1239int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata, 1263int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
1240 struct cfg80211_scan_request *req); 1264 struct cfg80211_scan_request *req);
1241void ieee80211_scan_cancel(struct ieee80211_local *local); 1265void ieee80211_scan_cancel(struct ieee80211_local *local);
1266void ieee80211_run_deferred_scan(struct ieee80211_local *local);
1242ieee80211_rx_result 1267ieee80211_rx_result
1243ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); 1268ieee80211_scan_rx(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb);
1244 1269
@@ -1251,9 +1276,6 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
1251 struct ieee802_11_elems *elems, 1276 struct ieee802_11_elems *elems,
1252 struct ieee80211_channel *channel, 1277 struct ieee80211_channel *channel,
1253 bool beacon); 1278 bool beacon);
1254struct ieee80211_bss *
1255ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
1256 u8 *ssid, u8 ssid_len);
1257void ieee80211_rx_bss_put(struct ieee80211_local *local, 1279void ieee80211_rx_bss_put(struct ieee80211_local *local,
1258 struct ieee80211_bss *bss); 1280 struct ieee80211_bss *bss);
1259 1281
@@ -1299,7 +1321,6 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1299 struct net_device *dev); 1321 struct net_device *dev);
1300 1322
1301/* HT */ 1323/* HT */
1302bool ieee80111_cfg_override_disables_ht40(struct ieee80211_sub_if_data *sdata);
1303void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata, 1324void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
1304 struct ieee80211_sta_ht_cap *ht_cap); 1325 struct ieee80211_sta_ht_cap *ht_cap);
1305void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata, 1326void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
@@ -1383,7 +1404,7 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw)
1383extern void *mac80211_wiphy_privid; /* for wiphy privid */ 1404extern void *mac80211_wiphy_privid; /* for wiphy privid */
1384u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len, 1405u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
1385 enum nl80211_iftype type); 1406 enum nl80211_iftype type);
1386int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, 1407int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
1387 int rate, int erp, int short_preamble); 1408 int rate, int erp, int short_preamble);
1388void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx, 1409void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
1389 struct ieee80211_hdr *hdr, const u8 *tsc, 1410 struct ieee80211_hdr *hdr, const u8 *tsc,
@@ -1429,13 +1450,17 @@ void ieee80211_wake_queue_by_reason(struct ieee80211_hw *hw, int queue,
1429 enum queue_stop_reason reason); 1450 enum queue_stop_reason reason);
1430void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue, 1451void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
1431 enum queue_stop_reason reason); 1452 enum queue_stop_reason reason);
1453void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue);
1432void ieee80211_add_pending_skb(struct ieee80211_local *local, 1454void ieee80211_add_pending_skb(struct ieee80211_local *local,
1433 struct sk_buff *skb); 1455 struct sk_buff *skb);
1434void ieee80211_add_pending_skbs(struct ieee80211_local *local,
1435 struct sk_buff_head *skbs);
1436void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local, 1456void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
1437 struct sk_buff_head *skbs, 1457 struct sk_buff_head *skbs,
1438 void (*fn)(void *data), void *data); 1458 void (*fn)(void *data), void *data);
1459static inline void ieee80211_add_pending_skbs(struct ieee80211_local *local,
1460 struct sk_buff_head *skbs)
1461{
1462 ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL);
1463}
1439 1464
1440void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata, 1465void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1441 u16 transaction, u16 auth_alg, 1466 u16 transaction, u16 auth_alg,
@@ -1460,7 +1485,7 @@ void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
1460 const u8 *supp_rates); 1485 const u8 *supp_rates);
1461u32 ieee80211_sta_get_rates(struct ieee80211_local *local, 1486u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
1462 struct ieee802_11_elems *elems, 1487 struct ieee802_11_elems *elems,
1463 enum ieee80211_band band); 1488 enum ieee80211_band band, u32 *basic_rates);
1464int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata, 1489int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
1465 enum ieee80211_smps_mode smps_mode); 1490 enum ieee80211_smps_mode smps_mode);
1466void ieee80211_recalc_smps(struct ieee80211_local *local); 1491void ieee80211_recalc_smps(struct ieee80211_local *local);
@@ -1470,10 +1495,9 @@ size_t ieee80211_ie_split(const u8 *ies, size_t ielen,
1470size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset); 1495size_t ieee80211_ie_split_vendor(const u8 *ies, size_t ielen, size_t offset);
1471u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap, 1496u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
1472 u16 cap); 1497 u16 cap);
1473u8 *ieee80211_ie_build_ht_info(u8 *pos, 1498u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
1474 struct ieee80211_sta_ht_cap *ht_cap, 1499 struct ieee80211_channel *channel,
1475 struct ieee80211_channel *channel, 1500 enum nl80211_channel_type channel_type);
1476 enum nl80211_channel_type channel_type);
1477 1501
1478/* internal work items */ 1502/* internal work items */
1479void ieee80211_work_init(struct ieee80211_local *local); 1503void ieee80211_work_init(struct ieee80211_local *local);
@@ -1501,10 +1525,7 @@ bool ieee80211_set_channel_type(struct ieee80211_local *local,
1501 struct ieee80211_sub_if_data *sdata, 1525 struct ieee80211_sub_if_data *sdata,
1502 enum nl80211_channel_type chantype); 1526 enum nl80211_channel_type chantype);
1503enum nl80211_channel_type 1527enum nl80211_channel_type
1504ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info); 1528ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper);
1505enum nl80211_channel_type ieee80211_get_tx_channel_type(
1506 struct ieee80211_local *local,
1507 enum nl80211_channel_type channel_type);
1508 1529
1509#ifdef CONFIG_MAC80211_NOINLINE 1530#ifdef CONFIG_MAC80211_NOINLINE
1510#define debug_noinline noinline 1531#define debug_noinline noinline
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index c20051b7ffcd..3e05a8bfddf0 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -149,6 +149,35 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
149 return 0; 149 return 0;
150} 150}
151 151
152static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata)
153{
154 int n_queues = sdata->local->hw.queues;
155 int i;
156
157 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
158 if (WARN_ON_ONCE(sdata->vif.hw_queue[i] ==
159 IEEE80211_INVAL_HW_QUEUE))
160 return -EINVAL;
161 if (WARN_ON_ONCE(sdata->vif.hw_queue[i] >=
162 n_queues))
163 return -EINVAL;
164 }
165
166 if ((sdata->vif.type != NL80211_IFTYPE_AP) ||
167 !(sdata->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)) {
168 sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE;
169 return 0;
170 }
171
172 if (WARN_ON_ONCE(sdata->vif.cab_queue == IEEE80211_INVAL_HW_QUEUE))
173 return -EINVAL;
174
175 if (WARN_ON_ONCE(sdata->vif.cab_queue >= n_queues))
176 return -EINVAL;
177
178 return 0;
179}
180
152void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata, 181void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
153 const int offset) 182 const int offset)
154{ 183{
@@ -169,6 +198,81 @@ void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,
169#undef ADJUST 198#undef ADJUST
170} 199}
171 200
201static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata)
202{
203 struct ieee80211_local *local = sdata->local;
204 int i;
205
206 for (i = 0; i < IEEE80211_NUM_ACS; i++) {
207 if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
208 sdata->vif.hw_queue[i] = IEEE80211_INVAL_HW_QUEUE;
209 else
210 sdata->vif.hw_queue[i] = i;
211 }
212 sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE;
213}
214
215static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
216{
217 struct ieee80211_sub_if_data *sdata;
218 int ret;
219
220 if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
221 return 0;
222
223 if (local->monitor_sdata)
224 return 0;
225
226 sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL);
227 if (!sdata)
228 return -ENOMEM;
229
230 /* set up data */
231 sdata->local = local;
232 sdata->vif.type = NL80211_IFTYPE_MONITOR;
233 snprintf(sdata->name, IFNAMSIZ, "%s-monitor",
234 wiphy_name(local->hw.wiphy));
235
236 ieee80211_set_default_queues(sdata);
237
238 ret = drv_add_interface(local, sdata);
239 if (WARN_ON(ret)) {
240 /* ok .. stupid driver, it asked for this! */
241 kfree(sdata);
242 return ret;
243 }
244
245 ret = ieee80211_check_queues(sdata);
246 if (ret) {
247 kfree(sdata);
248 return ret;
249 }
250
251 rcu_assign_pointer(local->monitor_sdata, sdata);
252
253 return 0;
254}
255
256static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
257{
258 struct ieee80211_sub_if_data *sdata;
259
260 if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
261 return;
262
263 sdata = rtnl_dereference(local->monitor_sdata);
264
265 if (!sdata)
266 return;
267
268 rcu_assign_pointer(local->monitor_sdata, NULL);
269 synchronize_net();
270
271 drv_remove_interface(local, sdata);
272
273 kfree(sdata);
274}
275
172/* 276/*
173 * NOTE: Be very careful when changing this function, it must NOT return 277 * NOTE: Be very careful when changing this function, it must NOT return
174 * an error on interface type changes that have been pre-checked, so most 278 * an error on interface type changes that have been pre-checked, so most
@@ -246,15 +350,18 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
246 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN); 350 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
247 351
248 if (!is_valid_ether_addr(dev->dev_addr)) { 352 if (!is_valid_ether_addr(dev->dev_addr)) {
249 if (!local->open_count) 353 res = -EADDRNOTAVAIL;
250 drv_stop(local); 354 goto err_stop;
251 return -EADDRNOTAVAIL;
252 } 355 }
253 } 356 }
254 357
255 switch (sdata->vif.type) { 358 switch (sdata->vif.type) {
256 case NL80211_IFTYPE_AP_VLAN: 359 case NL80211_IFTYPE_AP_VLAN:
257 /* no need to tell driver */ 360 /* no need to tell driver, but set carrier */
361 if (rtnl_dereference(sdata->bss->beacon))
362 netif_carrier_on(dev);
363 else
364 netif_carrier_off(dev);
258 break; 365 break;
259 case NL80211_IFTYPE_MONITOR: 366 case NL80211_IFTYPE_MONITOR:
260 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) { 367 if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) {
@@ -262,6 +369,12 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
262 break; 369 break;
263 } 370 }
264 371
372 if (local->monitors == 0 && local->open_count == 0) {
373 res = ieee80211_add_virtual_monitor(local);
374 if (res)
375 goto err_stop;
376 }
377
265 /* must be before the call to ieee80211_configure_filter */ 378 /* must be before the call to ieee80211_configure_filter */
266 local->monitors++; 379 local->monitors++;
267 if (local->monitors == 1) { 380 if (local->monitors == 1) {
@@ -276,9 +389,14 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
276 break; 389 break;
277 default: 390 default:
278 if (coming_up) { 391 if (coming_up) {
392 ieee80211_del_virtual_monitor(local);
393
279 res = drv_add_interface(local, sdata); 394 res = drv_add_interface(local, sdata);
280 if (res) 395 if (res)
281 goto err_stop; 396 goto err_stop;
397 res = ieee80211_check_queues(sdata);
398 if (res)
399 goto err_del_interface;
282 } 400 }
283 401
284 if (sdata->vif.type == NL80211_IFTYPE_AP) { 402 if (sdata->vif.type == NL80211_IFTYPE_AP) {
@@ -294,7 +412,8 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
294 ieee80211_bss_info_change_notify(sdata, changed); 412 ieee80211_bss_info_change_notify(sdata, changed);
295 413
296 if (sdata->vif.type == NL80211_IFTYPE_STATION || 414 if (sdata->vif.type == NL80211_IFTYPE_STATION ||
297 sdata->vif.type == NL80211_IFTYPE_ADHOC) 415 sdata->vif.type == NL80211_IFTYPE_ADHOC ||
416 sdata->vif.type == NL80211_IFTYPE_AP)
298 netif_carrier_off(dev); 417 netif_carrier_off(dev);
299 else 418 else
300 netif_carrier_on(dev); 419 netif_carrier_on(dev);
@@ -366,6 +485,7 @@ static int ieee80211_do_open(struct net_device *dev, bool coming_up)
366 sdata->bss = NULL; 485 sdata->bss = NULL;
367 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 486 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
368 list_del(&sdata->u.vlan.list); 487 list_del(&sdata->u.vlan.list);
488 /* might already be clear but that doesn't matter */
369 clear_bit(SDATA_STATE_RUNNING, &sdata->state); 489 clear_bit(SDATA_STATE_RUNNING, &sdata->state);
370 return res; 490 return res;
371} 491}
@@ -508,6 +628,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
508 if (local->monitors == 0) { 628 if (local->monitors == 0) {
509 local->hw.conf.flags &= ~IEEE80211_CONF_MONITOR; 629 local->hw.conf.flags &= ~IEEE80211_CONF_MONITOR;
510 hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR; 630 hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR;
631 ieee80211_del_virtual_monitor(local);
511 } 632 }
512 633
513 ieee80211_adjust_monitor_flags(sdata, -1); 634 ieee80211_adjust_monitor_flags(sdata, -1);
@@ -581,6 +702,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
581 } 702 }
582 } 703 }
583 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 704 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
705
706 if (local->monitors == local->open_count && local->monitors > 0)
707 ieee80211_add_virtual_monitor(local);
584} 708}
585 709
586static int ieee80211_stop(struct net_device *dev) 710static int ieee80211_stop(struct net_device *dev)
@@ -676,7 +800,7 @@ static u16 ieee80211_monitor_select_queue(struct net_device *dev,
676 struct ieee80211_hdr *hdr; 800 struct ieee80211_hdr *hdr;
677 struct ieee80211_radiotap_header *rtap = (void *)skb->data; 801 struct ieee80211_radiotap_header *rtap = (void *)skb->data;
678 802
679 if (local->hw.queues < 4) 803 if (local->hw.queues < IEEE80211_NUM_ACS)
680 return 0; 804 return 0;
681 805
682 if (skb->len < 4 || 806 if (skb->len < 4 ||
@@ -907,6 +1031,18 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
907 ieee80211_debugfs_add_netdev(sdata); 1031 ieee80211_debugfs_add_netdev(sdata);
908} 1032}
909 1033
1034static void ieee80211_clean_sdata(struct ieee80211_sub_if_data *sdata)
1035{
1036 switch (sdata->vif.type) {
1037 case NL80211_IFTYPE_MESH_POINT:
1038 mesh_path_flush_by_iface(sdata);
1039 break;
1040
1041 default:
1042 break;
1043 }
1044}
1045
910static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, 1046static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
911 enum nl80211_iftype type) 1047 enum nl80211_iftype type)
912{ 1048{
@@ -970,6 +1106,13 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
970 if (ret) 1106 if (ret)
971 type = sdata->vif.type; 1107 type = sdata->vif.type;
972 1108
1109 /*
1110 * Ignore return value here, there's not much we can do since
1111 * the driver changed the interface type internally already.
1112 * The warnings will hopefully make driver authors fix it :-)
1113 */
1114 ieee80211_check_queues(sdata);
1115
973 ieee80211_setup_sdata(sdata, type); 1116 ieee80211_setup_sdata(sdata, type);
974 1117
975 err = ieee80211_do_open(sdata->dev, false); 1118 err = ieee80211_do_open(sdata->dev, false);
@@ -1133,11 +1276,15 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1133 struct net_device *ndev; 1276 struct net_device *ndev;
1134 struct ieee80211_sub_if_data *sdata = NULL; 1277 struct ieee80211_sub_if_data *sdata = NULL;
1135 int ret, i; 1278 int ret, i;
1279 int txqs = 1;
1136 1280
1137 ASSERT_RTNL(); 1281 ASSERT_RTNL();
1138 1282
1283 if (local->hw.queues >= IEEE80211_NUM_ACS)
1284 txqs = IEEE80211_NUM_ACS;
1285
1139 ndev = alloc_netdev_mqs(sizeof(*sdata) + local->hw.vif_data_size, 1286 ndev = alloc_netdev_mqs(sizeof(*sdata) + local->hw.vif_data_size,
1140 name, ieee80211_if_setup, local->hw.queues, 1); 1287 name, ieee80211_if_setup, txqs, 1);
1141 if (!ndev) 1288 if (!ndev)
1142 return -ENOMEM; 1289 return -ENOMEM;
1143 dev_net_set(ndev, wiphy_net(local->hw.wiphy)); 1290 dev_net_set(ndev, wiphy_net(local->hw.wiphy));
@@ -1192,6 +1339,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1192 sizeof(sdata->rc_rateidx_mcs_mask[i])); 1339 sizeof(sdata->rc_rateidx_mcs_mask[i]));
1193 } 1340 }
1194 1341
1342 ieee80211_set_default_queues(sdata);
1343
1195 /* setup type-dependent data */ 1344 /* setup type-dependent data */
1196 ieee80211_setup_sdata(sdata, type); 1345 ieee80211_setup_sdata(sdata, type);
1197 1346
@@ -1227,8 +1376,8 @@ void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata)
1227 list_del_rcu(&sdata->list); 1376 list_del_rcu(&sdata->list);
1228 mutex_unlock(&sdata->local->iflist_mtx); 1377 mutex_unlock(&sdata->local->iflist_mtx);
1229 1378
1230 if (ieee80211_vif_is_mesh(&sdata->vif)) 1379 /* clean up type-dependent data */
1231 mesh_path_flush_by_iface(sdata); 1380 ieee80211_clean_sdata(sdata);
1232 1381
1233 synchronize_rcu(); 1382 synchronize_rcu();
1234 unregister_netdevice(sdata->dev); 1383 unregister_netdevice(sdata->dev);
@@ -1249,8 +1398,7 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
1249 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { 1398 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
1250 list_del(&sdata->list); 1399 list_del(&sdata->list);
1251 1400
1252 if (ieee80211_vif_is_mesh(&sdata->vif)) 1401 ieee80211_clean_sdata(sdata);
1253 mesh_path_flush_by_iface(sdata);
1254 1402
1255 unregister_netdevice_queue(sdata->dev, &unreg_list); 1403 unregister_netdevice_queue(sdata->dev, &unreg_list);
1256 } 1404 }
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 16336480c631..b70f7f09da61 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -47,7 +47,8 @@ void ieee80211_configure_filter(struct ieee80211_local *local)
47 if (atomic_read(&local->iff_allmultis)) 47 if (atomic_read(&local->iff_allmultis))
48 new_flags |= FIF_ALLMULTI; 48 new_flags |= FIF_ALLMULTI;
49 49
50 if (local->monitors || test_bit(SCAN_SW_SCANNING, &local->scanning)) 50 if (local->monitors || test_bit(SCAN_SW_SCANNING, &local->scanning) ||
51 test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning))
51 new_flags |= FIF_BCN_PRBRESP_PROMISC; 52 new_flags |= FIF_BCN_PRBRESP_PROMISC;
52 53
53 if (local->fif_probe_req || local->probe_req_reg) 54 if (local->fif_probe_req || local->probe_req_reg)
@@ -148,6 +149,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
148 } 149 }
149 150
150 if (test_bit(SCAN_SW_SCANNING, &local->scanning) || 151 if (test_bit(SCAN_SW_SCANNING, &local->scanning) ||
152 test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
151 test_bit(SCAN_HW_SCANNING, &local->scanning)) 153 test_bit(SCAN_HW_SCANNING, &local->scanning))
152 power = chan->max_power; 154 power = chan->max_power;
153 else 155 else
@@ -557,8 +559,10 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
557 WIPHY_FLAG_4ADDR_AP | 559 WIPHY_FLAG_4ADDR_AP |
558 WIPHY_FLAG_4ADDR_STATION | 560 WIPHY_FLAG_4ADDR_STATION |
559 WIPHY_FLAG_REPORTS_OBSS | 561 WIPHY_FLAG_REPORTS_OBSS |
560 WIPHY_FLAG_OFFCHAN_TX | 562 WIPHY_FLAG_OFFCHAN_TX;
561 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 563
564 if (ops->remain_on_channel)
565 wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
562 566
563 wiphy->features = NL80211_FEATURE_SK_TX_STATUS | 567 wiphy->features = NL80211_FEATURE_SK_TX_STATUS |
564 NL80211_FEATURE_HT_IBSS; 568 NL80211_FEATURE_HT_IBSS;
@@ -589,6 +593,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
589 local->hw.max_report_rates = 0; 593 local->hw.max_report_rates = 0;
590 local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; 594 local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
591 local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; 595 local->hw.max_tx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
596 local->hw.offchannel_tx_hw_queue = IEEE80211_INVAL_HW_QUEUE;
592 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; 597 local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
593 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; 598 local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
594 local->user_power_level = -1; 599 local->user_power_level = -1;
@@ -685,6 +690,11 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
685 WLAN_CIPHER_SUITE_AES_CMAC 690 WLAN_CIPHER_SUITE_AES_CMAC
686 }; 691 };
687 692
693 if (hw->flags & IEEE80211_HW_QUEUE_CONTROL &&
694 (local->hw.offchannel_tx_hw_queue == IEEE80211_INVAL_HW_QUEUE ||
695 local->hw.offchannel_tx_hw_queue >= local->hw.queues))
696 return -EINVAL;
697
688 if ((hw->wiphy->wowlan.flags || hw->wiphy->wowlan.n_patterns) 698 if ((hw->wiphy->wowlan.flags || hw->wiphy->wowlan.n_patterns)
689#ifdef CONFIG_PM 699#ifdef CONFIG_PM
690 && (!local->ops->suspend || !local->ops->resume) 700 && (!local->ops->suspend || !local->ops->resume)
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index e5fbb7cf3562..598a96a3a051 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -13,9 +13,6 @@
13#include "ieee80211_i.h" 13#include "ieee80211_i.h"
14#include "mesh.h" 14#include "mesh.h"
15 15
16#define MESHCONF_CAPAB_ACCEPT_PLINKS 0x01
17#define MESHCONF_CAPAB_FORWARDING 0x08
18
19#define TMR_RUNNING_HK 0 16#define TMR_RUNNING_HK 0
20#define TMR_RUNNING_MP 1 17#define TMR_RUNNING_MP 1
21#define TMR_RUNNING_MPR 2 18#define TMR_RUNNING_MPR 2
@@ -67,16 +64,18 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data)
67/** 64/**
68 * mesh_matches_local - check if the config of a mesh point matches ours 65 * mesh_matches_local - check if the config of a mesh point matches ours
69 * 66 *
70 * @ie: information elements of a management frame from the mesh peer
71 * @sdata: local mesh subif 67 * @sdata: local mesh subif
68 * @ie: information elements of a management frame from the mesh peer
72 * 69 *
73 * This function checks if the mesh configuration of a mesh point matches the 70 * This function checks if the mesh configuration of a mesh point matches the
74 * local mesh configuration, i.e. if both nodes belong to the same mesh network. 71 * local mesh configuration, i.e. if both nodes belong to the same mesh network.
75 */ 72 */
76bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_data *sdata) 73bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
74 struct ieee802_11_elems *ie)
77{ 75{
78 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 76 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
79 struct ieee80211_local *local = sdata->local; 77 struct ieee80211_local *local = sdata->local;
78 u32 basic_rates = 0;
80 79
81 /* 80 /*
82 * As support for each feature is added, check for matching 81 * As support for each feature is added, check for matching
@@ -97,10 +96,16 @@ bool mesh_matches_local(struct ieee802_11_elems *ie, struct ieee80211_sub_if_dat
97 (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth))) 96 (ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth)))
98 goto mismatch; 97 goto mismatch;
99 98
99 ieee80211_sta_get_rates(local, ie, local->oper_channel->band,
100 &basic_rates);
101
102 if (sdata->vif.bss_conf.basic_rates != basic_rates)
103 goto mismatch;
104
100 /* disallow peering with mismatched channel types for now */ 105 /* disallow peering with mismatched channel types for now */
101 if (ie->ht_info_elem && 106 if (ie->ht_operation &&
102 (local->_oper_channel_type != 107 (local->_oper_channel_type !=
103 ieee80211_ht_info_to_channel_type(ie->ht_info_elem))) 108 ieee80211_ht_oper_to_channel_type(ie->ht_operation)))
104 goto mismatch; 109 goto mismatch;
105 110
106 return true; 111 return true;
@@ -251,8 +256,10 @@ mesh_add_meshconf_ie(struct sk_buff *skb, struct ieee80211_sub_if_data *sdata)
251 /* Mesh capability */ 256 /* Mesh capability */
252 ifmsh->accepting_plinks = mesh_plink_availables(sdata); 257 ifmsh->accepting_plinks = mesh_plink_availables(sdata);
253 *pos = MESHCONF_CAPAB_FORWARDING; 258 *pos = MESHCONF_CAPAB_FORWARDING;
254 *pos++ |= ifmsh->accepting_plinks ? 259 *pos |= ifmsh->accepting_plinks ?
255 MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00; 260 MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
261 *pos++ |= ifmsh->adjusting_tbtt ?
262 MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00;
256 *pos++ = 0x00; 263 *pos++ = 0x00;
257 264
258 return 0; 265 return 0;
@@ -371,7 +378,7 @@ int mesh_add_ht_cap_ie(struct sk_buff *skb,
371 return 0; 378 return 0;
372} 379}
373 380
374int mesh_add_ht_info_ie(struct sk_buff *skb, 381int mesh_add_ht_oper_ie(struct sk_buff *skb,
375 struct ieee80211_sub_if_data *sdata) 382 struct ieee80211_sub_if_data *sdata)
376{ 383{
377 struct ieee80211_local *local = sdata->local; 384 struct ieee80211_local *local = sdata->local;
@@ -385,11 +392,11 @@ int mesh_add_ht_info_ie(struct sk_buff *skb,
385 if (!ht_cap->ht_supported || channel_type == NL80211_CHAN_NO_HT) 392 if (!ht_cap->ht_supported || channel_type == NL80211_CHAN_NO_HT)
386 return 0; 393 return 0;
387 394
388 if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_info)) 395 if (skb_tailroom(skb) < 2 + sizeof(struct ieee80211_ht_operation))
389 return -ENOMEM; 396 return -ENOMEM;
390 397
391 pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_info)); 398 pos = skb_put(skb, 2 + sizeof(struct ieee80211_ht_operation));
392 ieee80211_ie_build_ht_info(pos, ht_cap, channel, channel_type); 399 ieee80211_ie_build_ht_oper(pos, ht_cap, channel, channel_type);
393 400
394 return 0; 401 return 0;
395} 402}
@@ -573,14 +580,21 @@ void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
573 ieee80211_configure_filter(local); 580 ieee80211_configure_filter(local);
574 581
575 ifmsh->mesh_cc_id = 0; /* Disabled */ 582 ifmsh->mesh_cc_id = 0; /* Disabled */
576 ifmsh->mesh_sp_id = 0; /* Neighbor Offset */
577 ifmsh->mesh_auth_id = 0; /* Disabled */ 583 ifmsh->mesh_auth_id = 0; /* Disabled */
584 /* register sync ops from extensible synchronization framework */
585 ifmsh->sync_ops = ieee80211_mesh_sync_ops_get(ifmsh->mesh_sp_id);
586 ifmsh->adjusting_tbtt = false;
587 ifmsh->sync_offset_clockdrift_max = 0;
578 set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); 588 set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
579 ieee80211_mesh_root_setup(ifmsh); 589 ieee80211_mesh_root_setup(ifmsh);
580 ieee80211_queue_work(&local->hw, &sdata->work); 590 ieee80211_queue_work(&local->hw, &sdata->work);
581 sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL; 591 sdata->vif.bss_conf.beacon_int = MESH_DEFAULT_BEACON_INTERVAL;
592 sdata->vif.bss_conf.basic_rates =
593 ieee80211_mandatory_rates(sdata->local,
594 sdata->local->hw.conf.channel->band);
582 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON | 595 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON |
583 BSS_CHANGED_BEACON_ENABLED | 596 BSS_CHANGED_BEACON_ENABLED |
597 BSS_CHANGED_BASIC_RATES |
584 BSS_CHANGED_BEACON_INT); 598 BSS_CHANGED_BEACON_INT);
585} 599}
586 600
@@ -616,9 +630,9 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
616 struct ieee80211_rx_status *rx_status) 630 struct ieee80211_rx_status *rx_status)
617{ 631{
618 struct ieee80211_local *local = sdata->local; 632 struct ieee80211_local *local = sdata->local;
633 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
619 struct ieee802_11_elems elems; 634 struct ieee802_11_elems elems;
620 struct ieee80211_channel *channel; 635 struct ieee80211_channel *channel;
621 u32 supp_rates = 0;
622 size_t baselen; 636 size_t baselen;
623 int freq; 637 int freq;
624 enum ieee80211_band band = rx_status->band; 638 enum ieee80211_band band = rx_status->band;
@@ -650,10 +664,12 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
650 return; 664 return;
651 665
652 if (elems.mesh_id && elems.mesh_config && 666 if (elems.mesh_id && elems.mesh_config &&
653 mesh_matches_local(&elems, sdata)) { 667 mesh_matches_local(sdata, &elems))
654 supp_rates = ieee80211_sta_get_rates(local, &elems, band); 668 mesh_neighbour_update(sdata, mgmt->sa, &elems);
655 mesh_neighbour_update(mgmt->sa, supp_rates, sdata, &elems); 669
656 } 670 if (ifmsh->sync_ops)
671 ifmsh->sync_ops->rx_bcn_presp(sdata,
672 stype, mgmt, &elems, rx_status);
657} 673}
658 674
659static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata, 675static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
@@ -721,6 +737,9 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata)
721 737
722 if (test_and_clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags)) 738 if (test_and_clear_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags))
723 ieee80211_mesh_rootpath(sdata); 739 ieee80211_mesh_rootpath(sdata);
740
741 if (test_and_clear_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags))
742 mesh_sync_adjust_tbtt(sdata);
724} 743}
725 744
726void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) 745void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
@@ -761,4 +780,5 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
761 (unsigned long) sdata); 780 (unsigned long) sdata);
762 INIT_LIST_HEAD(&ifmsh->preq_queue.list); 781 INIT_LIST_HEAD(&ifmsh->preq_queue.list);
763 spin_lock_init(&ifmsh->mesh_preq_queue_lock); 782 spin_lock_init(&ifmsh->mesh_preq_queue_lock);
783 spin_lock_init(&ifmsh->sync_offset_lock);
764} 784}
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 8d53b71378e3..e3642756f8f4 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -19,6 +19,20 @@
19/* Data structures */ 19/* Data structures */
20 20
21/** 21/**
22 * enum mesh_config_capab_flags - mesh config IE capability flags
23 *
24 * @MESHCONF_CAPAB_ACCEPT_PLINKS: STA is willing to establish
25 * additional mesh peerings with other mesh STAs
26 * @MESHCONF_CAPAB_FORWARDING: the STA forwards MSDUs
27 * @MESHCONF_CAPAB_TBTT_ADJUSTING: TBTT adjustment procedure is ongoing
28 */
29enum mesh_config_capab_flags {
30 MESHCONF_CAPAB_ACCEPT_PLINKS = BIT(0),
31 MESHCONF_CAPAB_FORWARDING = BIT(3),
32 MESHCONF_CAPAB_TBTT_ADJUSTING = BIT(5),
33};
34
35/**
22 * enum mesh_path_flags - mac80211 mesh path flags 36 * enum mesh_path_flags - mac80211 mesh path flags
23 * 37 *
24 * 38 *
@@ -56,12 +70,15 @@ enum mesh_path_flags {
56 * @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to 70 * @MESH_WORK_GROW_MPP_TABLE: the mesh portals table is full and needs to
57 * grow 71 * grow
58 * @MESH_WORK_ROOT: the mesh root station needs to send a frame 72 * @MESH_WORK_ROOT: the mesh root station needs to send a frame
73 * @MESH_WORK_DRIFT_ADJUST: time to compensate for clock drift relative to other
74 * mesh nodes
59 */ 75 */
60enum mesh_deferred_task_flags { 76enum mesh_deferred_task_flags {
61 MESH_WORK_HOUSEKEEPING, 77 MESH_WORK_HOUSEKEEPING,
62 MESH_WORK_GROW_MPATH_TABLE, 78 MESH_WORK_GROW_MPATH_TABLE,
63 MESH_WORK_GROW_MPP_TABLE, 79 MESH_WORK_GROW_MPP_TABLE,
64 MESH_WORK_ROOT, 80 MESH_WORK_ROOT,
81 MESH_WORK_DRIFT_ADJUST,
65}; 82};
66 83
67/** 84/**
@@ -86,6 +103,7 @@ enum mesh_deferred_task_flags {
86 * mpath itself. No need to take this lock when adding or removing 103 * mpath itself. No need to take this lock when adding or removing
87 * an mpath to a hash bucket on a path table. 104 * an mpath to a hash bucket on a path table.
88 * @rann_snd_addr: the RANN sender address 105 * @rann_snd_addr: the RANN sender address
106 * @rann_metric: the aggregated path metric towards the root node
89 * @is_root: the destination station of this path is a root node 107 * @is_root: the destination station of this path is a root node
90 * @is_gate: the destination station of this path is a mesh gate 108 * @is_gate: the destination station of this path is a mesh gate
91 * 109 *
@@ -112,6 +130,7 @@ struct mesh_path {
112 enum mesh_path_flags flags; 130 enum mesh_path_flags flags;
113 spinlock_t state_lock; 131 spinlock_t state_lock;
114 u8 rann_snd_addr[ETH_ALEN]; 132 u8 rann_snd_addr[ETH_ALEN];
133 u32 rann_metric;
115 bool is_root; 134 bool is_root;
116 bool is_gate; 135 bool is_gate;
117}; 136};
@@ -203,8 +222,8 @@ int ieee80211_new_mesh_header(struct ieee80211s_hdr *meshhdr,
203 char *addr6); 222 char *addr6);
204int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr, 223int mesh_rmc_check(u8 *addr, struct ieee80211s_hdr *mesh_hdr,
205 struct ieee80211_sub_if_data *sdata); 224 struct ieee80211_sub_if_data *sdata);
206bool mesh_matches_local(struct ieee802_11_elems *ie, 225bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
207 struct ieee80211_sub_if_data *sdata); 226 struct ieee802_11_elems *ie);
208void mesh_ids_set_default(struct ieee80211_if_mesh *mesh); 227void mesh_ids_set_default(struct ieee80211_if_mesh *mesh);
209void mesh_mgmt_ies_add(struct sk_buff *skb, 228void mesh_mgmt_ies_add(struct sk_buff *skb,
210 struct ieee80211_sub_if_data *sdata); 229 struct ieee80211_sub_if_data *sdata);
@@ -220,7 +239,7 @@ int mesh_add_ds_params_ie(struct sk_buff *skb,
220 struct ieee80211_sub_if_data *sdata); 239 struct ieee80211_sub_if_data *sdata);
221int mesh_add_ht_cap_ie(struct sk_buff *skb, 240int mesh_add_ht_cap_ie(struct sk_buff *skb,
222 struct ieee80211_sub_if_data *sdata); 241 struct ieee80211_sub_if_data *sdata);
223int mesh_add_ht_info_ie(struct sk_buff *skb, 242int mesh_add_ht_oper_ie(struct sk_buff *skb,
224 struct ieee80211_sub_if_data *sdata); 243 struct ieee80211_sub_if_data *sdata);
225void mesh_rmc_free(struct ieee80211_sub_if_data *sdata); 244void mesh_rmc_free(struct ieee80211_sub_if_data *sdata);
226int mesh_rmc_init(struct ieee80211_sub_if_data *sdata); 245int mesh_rmc_init(struct ieee80211_sub_if_data *sdata);
@@ -232,6 +251,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata);
232void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata); 251void ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata);
233void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata); 252void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata);
234void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh); 253void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh);
254struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method);
235 255
236/* Mesh paths */ 256/* Mesh paths */
237int mesh_nexthop_lookup(struct sk_buff *skb, 257int mesh_nexthop_lookup(struct sk_buff *skb,
@@ -256,9 +276,9 @@ int mesh_path_add_gate(struct mesh_path *mpath);
256int mesh_path_send_to_gates(struct mesh_path *mpath); 276int mesh_path_send_to_gates(struct mesh_path *mpath);
257int mesh_gate_num(struct ieee80211_sub_if_data *sdata); 277int mesh_gate_num(struct ieee80211_sub_if_data *sdata);
258/* Mesh plinks */ 278/* Mesh plinks */
259void mesh_neighbour_update(u8 *hw_addr, u32 rates, 279void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
260 struct ieee80211_sub_if_data *sdata, 280 u8 *hw_addr,
261 struct ieee802_11_elems *ie); 281 struct ieee802_11_elems *ie);
262bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie); 282bool mesh_peer_accepts_plinks(struct ieee802_11_elems *ie);
263void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata); 283void mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata);
264void mesh_plink_broken(struct sta_info *sta); 284void mesh_plink_broken(struct sta_info *sta);
@@ -284,7 +304,6 @@ void mesh_pathtbl_unregister(void);
284int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata); 304int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata);
285void mesh_path_timer(unsigned long data); 305void mesh_path_timer(unsigned long data);
286void mesh_path_flush_by_nexthop(struct sta_info *sta); 306void mesh_path_flush_by_nexthop(struct sta_info *sta);
287void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
288void mesh_path_discard_frame(struct sk_buff *skb, 307void mesh_path_discard_frame(struct sk_buff *skb,
289 struct ieee80211_sub_if_data *sdata); 308 struct ieee80211_sub_if_data *sdata);
290void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata); 309void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata);
@@ -325,6 +344,8 @@ void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata);
325void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata); 344void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata);
326void mesh_plink_quiesce(struct sta_info *sta); 345void mesh_plink_quiesce(struct sta_info *sta);
327void mesh_plink_restart(struct sta_info *sta); 346void mesh_plink_restart(struct sta_info *sta);
347void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
348void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata);
328#else 349#else
329#define mesh_allocated 0 350#define mesh_allocated 0
330static inline void 351static inline void
@@ -337,6 +358,8 @@ static inline void mesh_plink_quiesce(struct sta_info *sta) {}
337static inline void mesh_plink_restart(struct sta_info *sta) {} 358static inline void mesh_plink_restart(struct sta_info *sta) {}
338static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) 359static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
339{ return false; } 360{ return false; }
361static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
362{}
340#endif 363#endif
341 364
342#endif /* IEEE80211S_H */ 365#endif /* IEEE80211S_H */
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 1c6f3d02aebf..503016f58631 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -86,8 +86,8 @@ static inline u32 u16_field_get(u8 *preq_elem, int offset, bool ae)
86#define PERR_IE_TARGET_RCODE(x) u16_field_get(x, 13, 0) 86#define PERR_IE_TARGET_RCODE(x) u16_field_get(x, 13, 0)
87 87
88#define MSEC_TO_TU(x) (x*1000/1024) 88#define MSEC_TO_TU(x) (x*1000/1024)
89#define SN_GT(x, y) ((long) (y) - (long) (x) < 0) 89#define SN_GT(x, y) ((s32)(y - x) < 0)
90#define SN_LT(x, y) ((long) (x) - (long) (y) < 0) 90#define SN_LT(x, y) ((s32)(x - y) < 0)
91 91
92#define net_traversal_jiffies(s) \ 92#define net_traversal_jiffies(s) \
93 msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime) 93 msecs_to_jiffies(s->u.mesh.mshcfg.dot11MeshHWMPnetDiameterTraversalTime)
@@ -732,11 +732,12 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
732 struct ieee80211_rann_ie *rann) 732 struct ieee80211_rann_ie *rann)
733{ 733{
734 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 734 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
735 struct ieee80211_local *local = sdata->local;
736 struct sta_info *sta;
735 struct mesh_path *mpath; 737 struct mesh_path *mpath;
736 u8 ttl, flags, hopcount; 738 u8 ttl, flags, hopcount;
737 u8 *orig_addr; 739 u8 *orig_addr;
738 u32 orig_sn, metric; 740 u32 orig_sn, metric, metric_txsta, interval;
739 u32 interval = ifmsh->mshcfg.dot11MeshHWMPRannInterval;
740 bool root_is_gate; 741 bool root_is_gate;
741 742
742 ttl = rann->rann_ttl; 743 ttl = rann->rann_ttl;
@@ -748,10 +749,11 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
748 flags = rann->rann_flags; 749 flags = rann->rann_flags;
749 root_is_gate = !!(flags & RANN_FLAG_IS_GATE); 750 root_is_gate = !!(flags & RANN_FLAG_IS_GATE);
750 orig_addr = rann->rann_addr; 751 orig_addr = rann->rann_addr;
751 orig_sn = rann->rann_seq; 752 orig_sn = le32_to_cpu(rann->rann_seq);
753 interval = le32_to_cpu(rann->rann_interval);
752 hopcount = rann->rann_hopcount; 754 hopcount = rann->rann_hopcount;
753 hopcount++; 755 hopcount++;
754 metric = rann->rann_metric; 756 metric = le32_to_cpu(rann->rann_metric);
755 757
756 /* Ignore our own RANNs */ 758 /* Ignore our own RANNs */
757 if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0) 759 if (compare_ether_addr(orig_addr, sdata->vif.addr) == 0)
@@ -761,6 +763,14 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
761 orig_addr, mgmt->sa, root_is_gate); 763 orig_addr, mgmt->sa, root_is_gate);
762 764
763 rcu_read_lock(); 765 rcu_read_lock();
766 sta = sta_info_get(sdata, mgmt->sa);
767 if (!sta) {
768 rcu_read_unlock();
769 return;
770 }
771
772 metric_txsta = airtime_link_metric_get(local, sta);
773
764 mpath = mesh_path_lookup(orig_addr, sdata); 774 mpath = mesh_path_lookup(orig_addr, sdata);
765 if (!mpath) { 775 if (!mpath) {
766 mesh_path_add(orig_addr, sdata); 776 mesh_path_add(orig_addr, sdata);
@@ -780,18 +790,21 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
780 mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH); 790 mesh_queue_preq(mpath, PREQ_Q_F_START | PREQ_Q_F_REFRESH);
781 } 791 }
782 792
783 if (mpath->sn < orig_sn && ifmsh->mshcfg.dot11MeshForwarding) { 793 if ((SN_LT(mpath->sn, orig_sn) || (mpath->sn == orig_sn &&
794 metric < mpath->rann_metric)) && ifmsh->mshcfg.dot11MeshForwarding) {
784 mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr, 795 mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
785 cpu_to_le32(orig_sn), 796 cpu_to_le32(orig_sn),
786 0, NULL, 0, broadcast_addr, 797 0, NULL, 0, broadcast_addr,
787 hopcount, ttl, cpu_to_le32(interval), 798 hopcount, ttl, cpu_to_le32(interval),
788 cpu_to_le32(metric + mpath->metric), 799 cpu_to_le32(metric + metric_txsta),
789 0, sdata); 800 0, sdata);
790 mpath->sn = orig_sn; 801 mpath->sn = orig_sn;
802 mpath->rann_metric = metric + metric_txsta;
803 /* Recording RANNs sender address to send individually
804 * addressed PREQs destined for root mesh STA */
805 memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
791 } 806 }
792 807
793 /* Using individually addressed PREQ for root node */
794 memcpy(mpath->rann_snd_addr, mgmt->sa, ETH_ALEN);
795 mpath->is_root = true; 808 mpath->is_root = true;
796 809
797 if (root_is_gate) 810 if (root_is_gate)
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 49aaefd99635..baa6096c66b4 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -538,6 +538,8 @@ int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
538 538
539 read_lock_bh(&pathtbl_resize_lock); 539 read_lock_bh(&pathtbl_resize_lock);
540 memcpy(new_mpath->dst, dst, ETH_ALEN); 540 memcpy(new_mpath->dst, dst, ETH_ALEN);
541 memset(new_mpath->rann_snd_addr, 0xff, ETH_ALEN);
542 new_mpath->is_root = false;
541 new_mpath->sdata = sdata; 543 new_mpath->sdata = sdata;
542 new_mpath->flags = 0; 544 new_mpath->flags = 0;
543 skb_queue_head_init(&new_mpath->frame_queue); 545 skb_queue_head_init(&new_mpath->frame_queue);
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 4e53c4cbca9e..1ff2a5c63e43 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -82,20 +82,14 @@ static inline void mesh_plink_fsm_restart(struct sta_info *sta)
82} 82}
83 83
84/* 84/*
85 * NOTE: This is just an alias for sta_info_alloc(), see notes 85 * Allocate mesh sta entry and insert into station table
86 * on it in the lifecycle management section!
87 */ 86 */
88static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata, 87static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
89 u8 *hw_addr, u32 rates, 88 u8 *hw_addr)
90 struct ieee802_11_elems *elems)
91{ 89{
92 struct ieee80211_local *local = sdata->local;
93 struct ieee80211_supported_band *sband;
94 struct sta_info *sta; 90 struct sta_info *sta;
95 91
96 sband = local->hw.wiphy->bands[local->oper_channel->band]; 92 if (sdata->local->num_sta >= MESH_MAX_PLINKS)
97
98 if (local->num_sta >= MESH_MAX_PLINKS)
99 return NULL; 93 return NULL;
100 94
101 sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL); 95 sta = sta_info_alloc(sdata, hw_addr, GFP_KERNEL);
@@ -108,12 +102,8 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
108 102
109 set_sta_flag(sta, WLAN_STA_WME); 103 set_sta_flag(sta, WLAN_STA_WME);
110 104
111 sta->sta.supp_rates[local->hw.conf.channel->band] = rates; 105 if (sta_info_insert(sta))
112 if (elems->ht_cap_elem) 106 return NULL;
113 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
114 elems->ht_cap_elem,
115 &sta->sta.ht_cap);
116 rate_control_rate_init(sta);
117 107
118 return sta; 108 return sta;
119} 109}
@@ -187,7 +177,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
187 2 + sdata->u.mesh.mesh_id_len + 177 2 + sdata->u.mesh.mesh_id_len +
188 2 + sizeof(struct ieee80211_meshconf_ie) + 178 2 + sizeof(struct ieee80211_meshconf_ie) +
189 2 + sizeof(struct ieee80211_ht_cap) + 179 2 + sizeof(struct ieee80211_ht_cap) +
190 2 + sizeof(struct ieee80211_ht_info) + 180 2 + sizeof(struct ieee80211_ht_operation) +
191 2 + 8 + /* peering IE */ 181 2 + 8 + /* peering IE */
192 sdata->u.mesh.ie_len); 182 sdata->u.mesh.ie_len);
193 if (!skb) 183 if (!skb)
@@ -212,8 +202,8 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
212 pos = skb_put(skb, 2); 202 pos = skb_put(skb, 2);
213 memcpy(pos + 2, &plid, 2); 203 memcpy(pos + 2, &plid, 2);
214 } 204 }
215 if (ieee80211_add_srates_ie(&sdata->vif, skb) || 205 if (ieee80211_add_srates_ie(&sdata->vif, skb, true) ||
216 ieee80211_add_ext_srates_ie(&sdata->vif, skb) || 206 ieee80211_add_ext_srates_ie(&sdata->vif, skb, true) ||
217 mesh_add_rsn_ie(skb, sdata) || 207 mesh_add_rsn_ie(skb, sdata) ||
218 mesh_add_meshid_ie(skb, sdata) || 208 mesh_add_meshid_ie(skb, sdata) ||
219 mesh_add_meshconf_ie(skb, sdata)) 209 mesh_add_meshconf_ie(skb, sdata))
@@ -263,7 +253,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
263 253
264 if (action != WLAN_SP_MESH_PEERING_CLOSE) { 254 if (action != WLAN_SP_MESH_PEERING_CLOSE) {
265 if (mesh_add_ht_cap_ie(skb, sdata) || 255 if (mesh_add_ht_cap_ie(skb, sdata) ||
266 mesh_add_ht_info_ie(skb, sdata)) 256 mesh_add_ht_oper_ie(skb, sdata))
267 return -1; 257 return -1;
268 } 258 }
269 259
@@ -274,43 +264,79 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
274 return 0; 264 return 0;
275} 265}
276 266
277void mesh_neighbour_update(u8 *hw_addr, u32 rates, 267/* mesh_peer_init - initialize new mesh peer and return resulting sta_info
278 struct ieee80211_sub_if_data *sdata, 268 *
279 struct ieee802_11_elems *elems) 269 * @sdata: local meshif
270 * @addr: peer's address
271 * @elems: IEs from beacon or mesh peering frame
272 *
273 * call under RCU
274 */
275static struct sta_info *mesh_peer_init(struct ieee80211_sub_if_data *sdata,
276 u8 *addr,
277 struct ieee802_11_elems *elems)
280{ 278{
281 struct ieee80211_local *local = sdata->local; 279 struct ieee80211_local *local = sdata->local;
280 enum ieee80211_band band = local->oper_channel->band;
281 struct ieee80211_supported_band *sband;
282 u32 rates, basic_rates = 0;
282 struct sta_info *sta; 283 struct sta_info *sta;
283 284
284 rcu_read_lock(); 285 sband = local->hw.wiphy->bands[band];
286 rates = ieee80211_sta_get_rates(local, elems, band, &basic_rates);
285 287
286 sta = sta_info_get(sdata, hw_addr); 288 sta = sta_info_get(sdata, addr);
287 if (!sta) { 289 if (!sta) {
288 rcu_read_unlock(); 290 sta = mesh_plink_alloc(sdata, addr);
289 /* Userspace handles peer allocation when security is enabled
290 * */
291 if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED)
292 cfg80211_notify_new_peer_candidate(sdata->dev, hw_addr,
293 elems->ie_start, elems->total_len,
294 GFP_KERNEL);
295 else
296 sta = mesh_plink_alloc(sdata, hw_addr, rates, elems);
297 if (!sta) 291 if (!sta)
298 return; 292 return NULL;
299 if (sta_info_insert_rcu(sta)) {
300 rcu_read_unlock();
301 return;
302 }
303 } 293 }
304 294
295 spin_lock_bh(&sta->lock);
305 sta->last_rx = jiffies; 296 sta->last_rx = jiffies;
306 sta->sta.supp_rates[local->hw.conf.channel->band] = rates; 297 sta->sta.supp_rates[band] = rates;
298 if (elems->ht_cap_elem &&
299 sdata->local->_oper_channel_type != NL80211_CHAN_NO_HT)
300 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
301 elems->ht_cap_elem,
302 &sta->sta.ht_cap);
303 else
304 memset(&sta->sta.ht_cap, 0, sizeof(sta->sta.ht_cap));
305
306 rate_control_rate_init(sta);
307 spin_unlock_bh(&sta->lock);
308
309 return sta;
310}
311
312void mesh_neighbour_update(struct ieee80211_sub_if_data *sdata,
313 u8 *hw_addr,
314 struct ieee802_11_elems *elems)
315{
316 struct sta_info *sta;
317
318 /* Userspace handles peer allocation when security is enabled */
319 if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
320 cfg80211_notify_new_peer_candidate(sdata->dev, hw_addr,
321 elems->ie_start,
322 elems->total_len,
323 GFP_KERNEL);
324 return;
325 }
326
327 rcu_read_lock();
328 sta = mesh_peer_init(sdata, hw_addr, elems);
329 if (!sta)
330 goto out;
331
307 if (mesh_peer_accepts_plinks(elems) && 332 if (mesh_peer_accepts_plinks(elems) &&
308 sta->plink_state == NL80211_PLINK_LISTEN && 333 sta->plink_state == NL80211_PLINK_LISTEN &&
309 sdata->u.mesh.accepting_plinks && 334 sdata->u.mesh.accepting_plinks &&
310 sdata->u.mesh.mshcfg.auto_open_plinks && 335 sdata->u.mesh.mshcfg.auto_open_plinks &&
311 rssi_threshold_check(sta, sdata)) 336 rssi_threshold_check(sta, sdata))
312 mesh_plink_open(sta); 337 mesh_plink_open(sta);
313 338
339out:
314 rcu_read_unlock(); 340 rcu_read_unlock();
315} 341}
316 342
@@ -456,7 +482,6 @@ void mesh_plink_block(struct sta_info *sta)
456void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt, 482void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt,
457 size_t len, struct ieee80211_rx_status *rx_status) 483 size_t len, struct ieee80211_rx_status *rx_status)
458{ 484{
459 struct ieee80211_local *local = sdata->local;
460 struct ieee802_11_elems elems; 485 struct ieee802_11_elems elems;
461 struct sta_info *sta; 486 struct sta_info *sta;
462 enum plink_event event; 487 enum plink_event event;
@@ -560,7 +585,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
560 /* Now we will figure out the appropriate event... */ 585 /* Now we will figure out the appropriate event... */
561 event = PLINK_UNDEFINED; 586 event = PLINK_UNDEFINED;
562 if (ftype != WLAN_SP_MESH_PEERING_CLOSE && 587 if (ftype != WLAN_SP_MESH_PEERING_CLOSE &&
563 (!mesh_matches_local(&elems, sdata))) { 588 !mesh_matches_local(sdata, &elems)) {
564 matches_local = false; 589 matches_local = false;
565 switch (ftype) { 590 switch (ftype) {
566 case WLAN_SP_MESH_PEERING_OPEN: 591 case WLAN_SP_MESH_PEERING_OPEN:
@@ -583,29 +608,13 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
583 return; 608 return;
584 } else if (!sta) { 609 } else if (!sta) {
585 /* ftype == WLAN_SP_MESH_PEERING_OPEN */ 610 /* ftype == WLAN_SP_MESH_PEERING_OPEN */
586 u32 rates;
587
588 rcu_read_unlock();
589
590 if (!mesh_plink_free_count(sdata)) { 611 if (!mesh_plink_free_count(sdata)) {
591 mpl_dbg("Mesh plink error: no more free plinks\n"); 612 mpl_dbg("Mesh plink error: no more free plinks\n");
592 return;
593 }
594
595 rates = ieee80211_sta_get_rates(local, &elems, rx_status->band);
596 sta = mesh_plink_alloc(sdata, mgmt->sa, rates, &elems);
597 if (!sta) {
598 mpl_dbg("Mesh plink error: plink table full\n");
599 return;
600 }
601 if (sta_info_insert_rcu(sta)) {
602 rcu_read_unlock(); 613 rcu_read_unlock();
603 return; 614 return;
604 } 615 }
605 event = OPN_ACPT; 616 event = OPN_ACPT;
606 spin_lock_bh(&sta->lock);
607 } else if (matches_local) { 617 } else if (matches_local) {
608 spin_lock_bh(&sta->lock);
609 switch (ftype) { 618 switch (ftype) {
610 case WLAN_SP_MESH_PEERING_OPEN: 619 case WLAN_SP_MESH_PEERING_OPEN:
611 if (!mesh_plink_free_count(sdata) || 620 if (!mesh_plink_free_count(sdata) ||
@@ -642,12 +651,19 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
642 break; 651 break;
643 default: 652 default:
644 mpl_dbg("Mesh plink: unknown frame subtype\n"); 653 mpl_dbg("Mesh plink: unknown frame subtype\n");
645 spin_unlock_bh(&sta->lock);
646 rcu_read_unlock(); 654 rcu_read_unlock();
647 return; 655 return;
648 } 656 }
649 } else { 657 }
650 spin_lock_bh(&sta->lock); 658
659 if (event == OPN_ACPT) {
660 /* allocate sta entry if necessary and update info */
661 sta = mesh_peer_init(sdata, mgmt->sa, &elems);
662 if (!sta) {
663 mpl_dbg("Mesh plink: failed to init peer!\n");
664 rcu_read_unlock();
665 return;
666 }
651 } 667 }
652 668
653 mpl_dbg("Mesh plink (peer, state, llid, plid, event): %pM %s %d %d %d\n", 669 mpl_dbg("Mesh plink (peer, state, llid, plid, event): %pM %s %d %d %d\n",
@@ -655,6 +671,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
655 le16_to_cpu(sta->llid), le16_to_cpu(sta->plid), 671 le16_to_cpu(sta->llid), le16_to_cpu(sta->plid),
656 event); 672 event);
657 reason = 0; 673 reason = 0;
674 spin_lock_bh(&sta->lock);
658 switch (sta->plink_state) { 675 switch (sta->plink_state) {
659 /* spin_unlock as soon as state is updated at each case */ 676 /* spin_unlock as soon as state is updated at each case */
660 case NL80211_PLINK_LISTEN: 677 case NL80211_PLINK_LISTEN:
diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
new file mode 100644
index 000000000000..38d30e8ce6dc
--- /dev/null
+++ b/net/mac80211/mesh_sync.c
@@ -0,0 +1,316 @@
1/*
2 * Copyright 2011-2012, Pavel Zubarev <pavel.zubarev@gmail.com>
3 * Copyright 2011-2012, Marco Porsch <marco.porsch@s2005.tu-chemnitz.de>
4 * Copyright 2011-2012, cozybit Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include "ieee80211_i.h"
12#include "mesh.h"
13#include "driver-ops.h"
14
15#ifdef CONFIG_MAC80211_VERBOSE_MESH_SYNC_DEBUG
16#define msync_dbg(fmt, args...) \
17 printk(KERN_DEBUG "Mesh sync (%s): " fmt "\n", sdata->name, ##args)
18#else
19#define msync_dbg(fmt, args...) do { (void)(0); } while (0)
20#endif
21
22/* This is not in the standard. It represents a tolerable tbtt drift below
23 * which we do no TSF adjustment.
24 */
25#define TOFFSET_MINIMUM_ADJUSTMENT 10
26
27/* This is not in the standard. It is a margin added to the
28 * Toffset setpoint to mitigate TSF overcorrection
29 * introduced by TSF adjustment latency.
30 */
31#define TOFFSET_SET_MARGIN 20
32
33/* This is not in the standard. It represents the maximum Toffset jump above
34 * which we'll invalidate the Toffset setpoint and choose a new setpoint. This
35 * could be, for instance, in case a neighbor is restarted and its TSF counter
36 * reset.
37 */
38#define TOFFSET_MAXIMUM_ADJUSTMENT 30000 /* 30 ms */
39
40struct sync_method {
41 u8 method;
42 struct ieee80211_mesh_sync_ops ops;
43};
44
45/**
46 * mesh_peer_tbtt_adjusting - check if an mp is currently adjusting its TBTT
47 *
48 * @ie: information elements of a management frame from the mesh peer
49 */
50static bool mesh_peer_tbtt_adjusting(struct ieee802_11_elems *ie)
51{
52 return (ie->mesh_config->meshconf_cap &
53 MESHCONF_CAPAB_TBTT_ADJUSTING) != 0;
54}
55
56void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
57{
58 struct ieee80211_local *local = sdata->local;
59 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
60 /* sdata->vif.bss_conf.beacon_int in 1024us units, 0.04% */
61 u64 beacon_int_fraction = sdata->vif.bss_conf.beacon_int * 1024 / 2500;
62 u64 tsf;
63 u64 tsfdelta;
64
65 spin_lock_bh(&ifmsh->sync_offset_lock);
66
67 if (ifmsh->sync_offset_clockdrift_max < beacon_int_fraction) {
68 msync_dbg("TBTT : max clockdrift=%lld; adjusting",
69 (long long) ifmsh->sync_offset_clockdrift_max);
70 tsfdelta = -ifmsh->sync_offset_clockdrift_max;
71 ifmsh->sync_offset_clockdrift_max = 0;
72 } else {
73 msync_dbg("TBTT : max clockdrift=%lld; adjusting by %llu",
74 (long long) ifmsh->sync_offset_clockdrift_max,
75 (unsigned long long) beacon_int_fraction);
76 tsfdelta = -beacon_int_fraction;
77 ifmsh->sync_offset_clockdrift_max -= beacon_int_fraction;
78 }
79
80 tsf = drv_get_tsf(local, sdata);
81 if (tsf != -1ULL)
82 drv_set_tsf(local, sdata, tsf + tsfdelta);
83 spin_unlock_bh(&ifmsh->sync_offset_lock);
84}
85
86static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
87 u16 stype,
88 struct ieee80211_mgmt *mgmt,
89 struct ieee802_11_elems *elems,
90 struct ieee80211_rx_status *rx_status)
91{
92 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
93 struct ieee80211_local *local = sdata->local;
94 struct sta_info *sta;
95 u64 t_t, t_r;
96
97 WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET);
98
99 /* standard mentions only beacons */
100 if (stype != IEEE80211_STYPE_BEACON)
101 return;
102
103 /* The current tsf is a first approximation for the timestamp
104 * for the received beacon. Further down we try to get a
105 * better value from the rx_status->mactime field if
106 * available. Also we have to call drv_get_tsf() before
107 * entering the rcu-read section.*/
108 t_r = drv_get_tsf(local, sdata);
109
110 rcu_read_lock();
111 sta = sta_info_get(sdata, mgmt->sa);
112 if (!sta)
113 goto no_sync;
114
115 /* check offset sync conditions (13.13.2.2.1)
116 *
117 * TODO also sync to
118 * dot11MeshNbrOffsetMaxNeighbor non-peer non-MBSS neighbors
119 */
120
121 if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) {
122 clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
123 msync_dbg("STA %pM : is adjusting TBTT", sta->sta.addr);
124 goto no_sync;
125 }
126
127 if (rx_status->flag & RX_FLAG_MACTIME_MPDU && rx_status->mactime) {
128 /*
129 * The mactime is defined as the time the first data symbol
130 * of the frame hits the PHY, and the timestamp of the beacon
131 * is defined as "the time that the data symbol containing the
132 * first bit of the timestamp is transmitted to the PHY plus
133 * the transmitting STA's delays through its local PHY from the
134 * MAC-PHY interface to its interface with the WM" (802.11
135 * 11.1.2)
136 *
137 * T_r, in 13.13.2.2.2, is just defined as "the frame reception
138 * time" but we unless we interpret that time to be the same
139 * time of the beacon timestamp, the offset calculation will be
140 * off. Below we adjust t_r to be "the time at which the first
141 * symbol of the timestamp element in the beacon is received".
142 * This correction depends on the rate.
143 *
144 * Based on similar code in ibss.c
145 */
146 int rate;
147
148 if (rx_status->flag & RX_FLAG_HT) {
149 /* TODO:
150 * In principle there could be HT-beacons (Dual Beacon
151 * HT Operation options), but for now ignore them and
152 * just use the primary (i.e. non-HT) beacons for
153 * synchronization.
154 * */
155 goto no_sync;
156 } else
157 rate = local->hw.wiphy->bands[rx_status->band]->
158 bitrates[rx_status->rate_idx].bitrate;
159
160 /* 24 bytes of header * 8 bits/byte *
161 * 10*(100 Kbps)/Mbps / rate (100 Kbps)*/
162 t_r = rx_status->mactime + (24 * 8 * 10 / rate);
163 }
164
165 /* Timing offset calculation (see 13.13.2.2.2) */
166 t_t = le64_to_cpu(mgmt->u.beacon.timestamp);
167 sta->t_offset = t_t - t_r;
168
169 if (test_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN)) {
170 s64 t_clockdrift = sta->t_offset_setpoint
171 - sta->t_offset;
172 msync_dbg("STA %pM : sta->t_offset=%lld, sta->t_offset_setpoint=%lld, t_clockdrift=%lld",
173 sta->sta.addr,
174 (long long) sta->t_offset,
175 (long long)
176 sta->t_offset_setpoint,
177 (long long) t_clockdrift);
178
179 if (t_clockdrift > TOFFSET_MAXIMUM_ADJUSTMENT ||
180 t_clockdrift < -TOFFSET_MAXIMUM_ADJUSTMENT) {
181 msync_dbg("STA %pM : t_clockdrift=%lld too large, setpoint reset",
182 sta->sta.addr,
183 (long long) t_clockdrift);
184 clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
185 goto no_sync;
186 }
187
188 rcu_read_unlock();
189
190 spin_lock_bh(&ifmsh->sync_offset_lock);
191 if (t_clockdrift >
192 ifmsh->sync_offset_clockdrift_max)
193 ifmsh->sync_offset_clockdrift_max
194 = t_clockdrift;
195 spin_unlock_bh(&ifmsh->sync_offset_lock);
196
197 } else {
198 sta->t_offset_setpoint = sta->t_offset - TOFFSET_SET_MARGIN;
199 set_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
200 msync_dbg("STA %pM : offset was invalid, "
201 " sta->t_offset=%lld",
202 sta->sta.addr,
203 (long long) sta->t_offset);
204 rcu_read_unlock();
205 }
206 return;
207
208no_sync:
209 rcu_read_unlock();
210}
211
212static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
213{
214 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
215
216 WARN_ON(ifmsh->mesh_sp_id
217 != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET);
218 BUG_ON(!rcu_read_lock_held());
219
220 spin_lock_bh(&ifmsh->sync_offset_lock);
221
222 if (ifmsh->sync_offset_clockdrift_max >
223 TOFFSET_MINIMUM_ADJUSTMENT) {
224 /* Since ajusting the tsf here would
225 * require a possibly blocking call
226 * to the driver tsf setter, we punt
227 * the tsf adjustment to the mesh tasklet
228 */
229 msync_dbg("TBTT : kicking off TBTT "
230 "adjustment with "
231 "clockdrift_max=%lld",
232 ifmsh->sync_offset_clockdrift_max);
233 set_bit(MESH_WORK_DRIFT_ADJUST,
234 &ifmsh->wrkq_flags);
235 } else {
236 msync_dbg("TBTT : max clockdrift=%lld; "
237 "too small to adjust",
238 (long long)
239 ifmsh->sync_offset_clockdrift_max);
240 ifmsh->sync_offset_clockdrift_max = 0;
241 }
242 spin_unlock_bh(&ifmsh->sync_offset_lock);
243}
244
245static const u8 *mesh_get_vendor_oui(struct ieee80211_sub_if_data *sdata)
246{
247 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
248 u8 offset;
249
250 if (!ifmsh->ie || !ifmsh->ie_len)
251 return NULL;
252
253 offset = ieee80211_ie_split_vendor(ifmsh->ie,
254 ifmsh->ie_len, 0);
255
256 if (!offset)
257 return NULL;
258
259 return ifmsh->ie + offset + 2;
260}
261
262static void mesh_sync_vendor_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
263 u16 stype,
264 struct ieee80211_mgmt *mgmt,
265 struct ieee802_11_elems *elems,
266 struct ieee80211_rx_status *rx_status)
267{
268 const u8 *oui;
269
270 WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR);
271 msync_dbg("called mesh_sync_vendor_rx_bcn_presp");
272 oui = mesh_get_vendor_oui(sdata);
273 /* here you would implement the vendor offset tracking for this oui */
274}
275
276static void mesh_sync_vendor_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
277{
278 const u8 *oui;
279
280 WARN_ON(sdata->u.mesh.mesh_sp_id != IEEE80211_SYNC_METHOD_VENDOR);
281 msync_dbg("called mesh_sync_vendor_adjust_tbtt");
282 oui = mesh_get_vendor_oui(sdata);
283 /* here you would implement the vendor tsf adjustment for this oui */
284}
285
286/* global variable */
287static struct sync_method sync_methods[] = {
288 {
289 .method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET,
290 .ops = {
291 .rx_bcn_presp = &mesh_sync_offset_rx_bcn_presp,
292 .adjust_tbtt = &mesh_sync_offset_adjust_tbtt,
293 }
294 },
295 {
296 .method = IEEE80211_SYNC_METHOD_VENDOR,
297 .ops = {
298 .rx_bcn_presp = &mesh_sync_vendor_rx_bcn_presp,
299 .adjust_tbtt = &mesh_sync_vendor_adjust_tbtt,
300 }
301 },
302};
303
304struct ieee80211_mesh_sync_ops *ieee80211_mesh_sync_ops_get(u8 method)
305{
306 struct ieee80211_mesh_sync_ops *ops = NULL;
307 u8 i;
308
309 for (i = 0 ; i < ARRAY_SIZE(sync_methods); ++i) {
310 if (sync_methods[i].method == method) {
311 ops = &sync_methods[i].ops;
312 break;
313 }
314 }
315 return ops;
316}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 20c680bfc3ae..03f93f958fa4 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -171,122 +171,64 @@ static int ecw2cw(int ecw)
171 return (1 << ecw) - 1; 171 return (1 << ecw) - 1;
172} 172}
173 173
174/* 174static u32 ieee80211_config_ht_tx(struct ieee80211_sub_if_data *sdata,
175 * ieee80211_enable_ht should be called only after the operating band 175 struct ieee80211_ht_operation *ht_oper,
176 * has been determined as ht configuration depends on the hw's 176 const u8 *bssid, bool reconfig)
177 * HT abilities for a specific band.
178 */
179static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
180 struct ieee80211_ht_info *hti,
181 const u8 *bssid, u16 ap_ht_cap_flags,
182 bool beacon_htcap_ie)
183{ 177{
184 struct ieee80211_local *local = sdata->local; 178 struct ieee80211_local *local = sdata->local;
185 struct ieee80211_supported_band *sband; 179 struct ieee80211_supported_band *sband;
186 struct sta_info *sta; 180 struct sta_info *sta;
187 u32 changed = 0; 181 u32 changed = 0;
188 int hti_cfreq;
189 u16 ht_opmode; 182 u16 ht_opmode;
190 bool enable_ht = true; 183 bool disable_40 = false;
191 enum nl80211_channel_type prev_chantype;
192 enum nl80211_channel_type rx_channel_type = NL80211_CHAN_NO_HT;
193 enum nl80211_channel_type tx_channel_type;
194 184
195 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 185 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
196 prev_chantype = sdata->vif.bss_conf.channel_type;
197 186
198 187 switch (sdata->vif.bss_conf.channel_type) {
199 hti_cfreq = ieee80211_channel_to_frequency(hti->control_chan, 188 case NL80211_CHAN_HT40PLUS:
200 sband->band); 189 if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40PLUS)
201 /* check that channel matches the right operating channel */ 190 disable_40 = true;
202 if (local->hw.conf.channel->center_freq != hti_cfreq) { 191 break;
203 /* Some APs mess this up, evidently. 192 case NL80211_CHAN_HT40MINUS:
204 * Netgear WNDR3700 sometimes reports 4 higher than 193 if (local->hw.conf.channel->flags & IEEE80211_CHAN_NO_HT40MINUS)
205 * the actual channel, for instance. 194 disable_40 = true;
206 */ 195 break;
207 printk(KERN_DEBUG 196 default:
208 "%s: Wrong control channel in association" 197 break;
209 " response: configured center-freq: %d"
210 " hti-cfreq: %d hti->control_chan: %d"
211 " band: %d. Disabling HT.\n",
212 sdata->name,
213 local->hw.conf.channel->center_freq,
214 hti_cfreq, hti->control_chan,
215 sband->band);
216 enable_ht = false;
217 }
218
219 if (enable_ht) {
220 rx_channel_type = NL80211_CHAN_HT20;
221
222 if (!(ap_ht_cap_flags & IEEE80211_HT_CAP_40MHZ_INTOLERANT) &&
223 !ieee80111_cfg_override_disables_ht40(sdata) &&
224 (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) &&
225 (hti->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)) {
226 switch(hti->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
227 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
228 rx_channel_type = NL80211_CHAN_HT40PLUS;
229 break;
230 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
231 rx_channel_type = NL80211_CHAN_HT40MINUS;
232 break;
233 }
234 }
235 } 198 }
236 199
237 tx_channel_type = ieee80211_get_tx_channel_type(local, rx_channel_type); 200 /* This can change during the lifetime of the BSS */
238 201 if (!(ht_oper->ht_param & IEEE80211_HT_PARAM_CHAN_WIDTH_ANY))
239 if (local->tmp_channel) 202 disable_40 = true;
240 local->tmp_channel_type = rx_channel_type;
241 203
242 if (!ieee80211_set_channel_type(local, sdata, rx_channel_type)) { 204 mutex_lock(&local->sta_mtx);
243 /* can only fail due to HT40+/- mismatch */ 205 sta = sta_info_get(sdata, bssid);
244 rx_channel_type = NL80211_CHAN_HT20;
245 WARN_ON(!ieee80211_set_channel_type(local, sdata,
246 rx_channel_type));
247 }
248
249 if (beacon_htcap_ie && (prev_chantype != rx_channel_type)) {
250 /*
251 * Whenever the AP announces the HT mode change that can be
252 * 40MHz intolerant or etc., it would be safer to stop tx
253 * queues before doing hw config to avoid buffer overflow.
254 */
255 ieee80211_stop_queues_by_reason(&sdata->local->hw,
256 IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE);
257 206
258 /* flush out all packets */ 207 WARN_ON_ONCE(!sta);
259 synchronize_net();
260 208
261 drv_flush(local, false); 209 if (sta && !sta->supports_40mhz)
262 } 210 disable_40 = true;
263 211
264 /* channel_type change automatically detected */ 212 if (sta && (!reconfig ||
265 ieee80211_hw_config(local, 0); 213 (disable_40 != !!(sta->sta.ht_cap.cap &
214 IEEE80211_HT_CAP_SUP_WIDTH_20_40)))) {
266 215
267 if (prev_chantype != tx_channel_type) { 216 if (disable_40)
268 rcu_read_lock(); 217 sta->sta.ht_cap.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
269 sta = sta_info_get(sdata, bssid); 218 else
270 if (sta) 219 sta->sta.ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
271 rate_control_rate_update(local, sband, sta,
272 IEEE80211_RC_HT_CHANGED,
273 tx_channel_type);
274 rcu_read_unlock();
275 220
276 if (beacon_htcap_ie) 221 rate_control_rate_update(local, sband, sta,
277 ieee80211_wake_queues_by_reason(&sdata->local->hw, 222 IEEE80211_RC_BW_CHANGED);
278 IEEE80211_QUEUE_STOP_REASON_CHTYPE_CHANGE);
279 } 223 }
224 mutex_unlock(&local->sta_mtx);
280 225
281 ht_opmode = le16_to_cpu(hti->operation_mode); 226 ht_opmode = le16_to_cpu(ht_oper->operation_mode);
282 227
283 /* if bss configuration changed store the new one */ 228 /* if bss configuration changed store the new one */
284 if (sdata->ht_opmode_valid != enable_ht || 229 if (!reconfig || (sdata->vif.bss_conf.ht_operation_mode != ht_opmode)) {
285 sdata->vif.bss_conf.ht_operation_mode != ht_opmode ||
286 prev_chantype != rx_channel_type) {
287 changed |= BSS_CHANGED_HT; 230 changed |= BSS_CHANGED_HT;
288 sdata->vif.bss_conf.ht_operation_mode = ht_opmode; 231 sdata->vif.bss_conf.ht_operation_mode = ht_opmode;
289 sdata->ht_opmode_valid = enable_ht;
290 } 232 }
291 233
292 return changed; 234 return changed;
@@ -316,12 +258,12 @@ static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
316} 258}
317 259
318static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata, 260static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
319 struct sk_buff *skb, const u8 *ht_info_ie, 261 struct sk_buff *skb, const u8 *ht_oper_ie,
320 struct ieee80211_supported_band *sband, 262 struct ieee80211_supported_band *sband,
321 struct ieee80211_channel *channel, 263 struct ieee80211_channel *channel,
322 enum ieee80211_smps_mode smps) 264 enum ieee80211_smps_mode smps)
323{ 265{
324 struct ieee80211_ht_info *ht_info; 266 struct ieee80211_ht_operation *ht_oper;
325 u8 *pos; 267 u8 *pos;
326 u32 flags = channel->flags; 268 u32 flags = channel->flags;
327 u16 cap; 269 u16 cap;
@@ -329,21 +271,21 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
329 271
330 BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap)); 272 BUILD_BUG_ON(sizeof(ht_cap) != sizeof(sband->ht_cap));
331 273
332 if (!ht_info_ie) 274 if (!ht_oper_ie)
333 return; 275 return;
334 276
335 if (ht_info_ie[1] < sizeof(struct ieee80211_ht_info)) 277 if (ht_oper_ie[1] < sizeof(struct ieee80211_ht_operation))
336 return; 278 return;
337 279
338 memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap)); 280 memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
339 ieee80211_apply_htcap_overrides(sdata, &ht_cap); 281 ieee80211_apply_htcap_overrides(sdata, &ht_cap);
340 282
341 ht_info = (struct ieee80211_ht_info *)(ht_info_ie + 2); 283 ht_oper = (struct ieee80211_ht_operation *)(ht_oper_ie + 2);
342 284
343 /* determine capability flags */ 285 /* determine capability flags */
344 cap = ht_cap.cap; 286 cap = ht_cap.cap;
345 287
346 switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { 288 switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
347 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 289 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
348 if (flags & IEEE80211_CHAN_NO_HT40PLUS) { 290 if (flags & IEEE80211_CHAN_NO_HT40PLUS) {
349 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; 291 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
@@ -358,6 +300,16 @@ static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
358 break; 300 break;
359 } 301 }
360 302
303 /*
304 * If 40 MHz was disabled associate as though we weren't
305 * capable of 40 MHz -- some broken APs will never fall
306 * back to trying to transmit in 20 MHz.
307 */
308 if (sdata->u.mgd.flags & IEEE80211_STA_DISABLE_40MHZ) {
309 cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
310 cap &= ~IEEE80211_HT_CAP_SGI_40;
311 }
312
361 /* set SM PS mode properly */ 313 /* set SM PS mode properly */
362 cap &= ~IEEE80211_HT_CAP_SM_PS; 314 cap &= ~IEEE80211_HT_CAP_SM_PS;
363 switch (smps) { 315 switch (smps) {
@@ -557,7 +509,7 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
557 } 509 }
558 510
559 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) 511 if (!(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
560 ieee80211_add_ht_ie(sdata, skb, assoc_data->ht_information_ie, 512 ieee80211_add_ht_ie(sdata, skb, assoc_data->ht_operation_ie,
561 sband, local->oper_channel, ifmgd->ap_smps); 513 sband, local->oper_channel, ifmgd->ap_smps);
562 514
563 /* if present, add any custom non-vendor IEs that go after HT */ 515 /* if present, add any custom non-vendor IEs that go after HT */
@@ -1182,7 +1134,7 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
1182 if (!local->ops->conf_tx) 1134 if (!local->ops->conf_tx)
1183 return; 1135 return;
1184 1136
1185 if (local->hw.queues < 4) 1137 if (local->hw.queues < IEEE80211_NUM_ACS)
1186 return; 1138 return;
1187 1139
1188 if (!wmm_param) 1140 if (!wmm_param)
@@ -1435,7 +1387,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1435 sdata->vif.bss_conf.assoc = false; 1387 sdata->vif.bss_conf.assoc = false;
1436 1388
1437 /* on the next assoc, re-program HT parameters */ 1389 /* on the next assoc, re-program HT parameters */
1438 sdata->ht_opmode_valid = false;
1439 memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa)); 1390 memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa));
1440 memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask)); 1391 memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
1441 1392
@@ -1496,19 +1447,24 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
1496static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata) 1447static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
1497{ 1448{
1498 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1449 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1450 struct ieee80211_local *local = sdata->local;
1499 1451
1452 mutex_lock(&local->mtx);
1500 if (!(ifmgd->flags & (IEEE80211_STA_BEACON_POLL | 1453 if (!(ifmgd->flags & (IEEE80211_STA_BEACON_POLL |
1501 IEEE80211_STA_CONNECTION_POLL))) 1454 IEEE80211_STA_CONNECTION_POLL))) {
1502 return; 1455 mutex_unlock(&local->mtx);
1456 return;
1457 }
1503 1458
1504 ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | 1459 ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL |
1505 IEEE80211_STA_BEACON_POLL); 1460 IEEE80211_STA_BEACON_POLL);
1506 mutex_lock(&sdata->local->iflist_mtx); 1461
1507 ieee80211_recalc_ps(sdata->local, -1); 1462 mutex_lock(&local->iflist_mtx);
1508 mutex_unlock(&sdata->local->iflist_mtx); 1463 ieee80211_recalc_ps(local, -1);
1464 mutex_unlock(&local->iflist_mtx);
1509 1465
1510 if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR) 1466 if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
1511 return; 1467 goto out;
1512 1468
1513 /* 1469 /*
1514 * We've received a probe response, but are not sure whether 1470 * We've received a probe response, but are not sure whether
@@ -1520,6 +1476,9 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
1520 mod_timer(&ifmgd->conn_mon_timer, 1476 mod_timer(&ifmgd->conn_mon_timer,
1521 round_jiffies_up(jiffies + 1477 round_jiffies_up(jiffies +
1522 IEEE80211_CONNECTION_IDLE_TIME)); 1478 IEEE80211_CONNECTION_IDLE_TIME));
1479out:
1480 ieee80211_run_deferred_scan(local);
1481 mutex_unlock(&local->mtx);
1523} 1482}
1524 1483
1525void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata, 1484void ieee80211_sta_tx_notify(struct ieee80211_sub_if_data *sdata,
@@ -1567,14 +1526,23 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
1567 ifmgd->nullfunc_failed = false; 1526 ifmgd->nullfunc_failed = false;
1568 ieee80211_send_nullfunc(sdata->local, sdata, 0); 1527 ieee80211_send_nullfunc(sdata->local, sdata, 0);
1569 } else { 1528 } else {
1529 int ssid_len;
1530
1570 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID); 1531 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
1571 ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid[1], NULL, 0, 1532 if (WARN_ON_ONCE(ssid == NULL))
1572 (u32) -1, true, false); 1533 ssid_len = 0;
1534 else
1535 ssid_len = ssid[1];
1536
1537 ieee80211_send_probe_req(sdata, dst, ssid + 2, ssid_len, NULL,
1538 0, (u32) -1, true, false);
1573 } 1539 }
1574 1540
1575 ifmgd->probe_send_count++; 1541 ifmgd->probe_send_count++;
1576 ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms); 1542 ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
1577 run_again(ifmgd, ifmgd->probe_timeout); 1543 run_again(ifmgd, ifmgd->probe_timeout);
1544 if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
1545 drv_flush(sdata->local, false);
1578} 1546}
1579 1547
1580static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata, 1548static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
@@ -1586,17 +1554,18 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
1586 if (!ieee80211_sdata_running(sdata)) 1554 if (!ieee80211_sdata_running(sdata))
1587 return; 1555 return;
1588 1556
1589 if (sdata->local->scanning)
1590 return;
1591
1592 if (sdata->local->tmp_channel)
1593 return;
1594
1595 mutex_lock(&ifmgd->mtx); 1557 mutex_lock(&ifmgd->mtx);
1596 1558
1597 if (!ifmgd->associated) 1559 if (!ifmgd->associated)
1598 goto out; 1560 goto out;
1599 1561
1562 mutex_lock(&sdata->local->mtx);
1563
1564 if (sdata->local->tmp_channel || sdata->local->scanning) {
1565 mutex_unlock(&sdata->local->mtx);
1566 goto out;
1567 }
1568
1600#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1569#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1601 if (beacon && net_ratelimit()) 1570 if (beacon && net_ratelimit())
1602 printk(KERN_DEBUG "%s: detected beacon loss from AP " 1571 printk(KERN_DEBUG "%s: detected beacon loss from AP "
@@ -1623,6 +1592,8 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
1623 else 1592 else
1624 ifmgd->flags |= IEEE80211_STA_CONNECTION_POLL; 1593 ifmgd->flags |= IEEE80211_STA_CONNECTION_POLL;
1625 1594
1595 mutex_unlock(&sdata->local->mtx);
1596
1626 if (already) 1597 if (already)
1627 goto out; 1598 goto out;
1628 1599
@@ -1643,6 +1614,7 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
1643 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1614 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1644 struct sk_buff *skb; 1615 struct sk_buff *skb;
1645 const u8 *ssid; 1616 const u8 *ssid;
1617 int ssid_len;
1646 1618
1647 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION)) 1619 if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_STATION))
1648 return NULL; 1620 return NULL;
@@ -1653,8 +1625,13 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
1653 return NULL; 1625 return NULL;
1654 1626
1655 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID); 1627 ssid = ieee80211_bss_get_ie(ifmgd->associated, WLAN_EID_SSID);
1628 if (WARN_ON_ONCE(ssid == NULL))
1629 ssid_len = 0;
1630 else
1631 ssid_len = ssid[1];
1632
1656 skb = ieee80211_build_probe_req(sdata, ifmgd->associated->bssid, 1633 skb = ieee80211_build_probe_req(sdata, ifmgd->associated->bssid,
1657 (u32) -1, ssid + 2, ssid[1], 1634 (u32) -1, ssid + 2, ssid_len,
1658 NULL, 0, true); 1635 NULL, 0, true);
1659 1636
1660 return skb; 1637 return skb;
@@ -2000,7 +1977,6 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2000 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; 1977 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
2001 u32 changed = 0; 1978 u32 changed = 0;
2002 int err; 1979 int err;
2003 u16 ap_ht_cap_flags;
2004 1980
2005 /* AssocResp and ReassocResp have identical structure */ 1981 /* AssocResp and ReassocResp have identical structure */
2006 1982
@@ -2051,7 +2027,8 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2051 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, 2027 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband,
2052 elems.ht_cap_elem, &sta->sta.ht_cap); 2028 elems.ht_cap_elem, &sta->sta.ht_cap);
2053 2029
2054 ap_ht_cap_flags = sta->sta.ht_cap.cap; 2030 sta->supports_40mhz =
2031 sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40;
2055 2032
2056 rate_control_rate_init(sta); 2033 rate_control_rate_init(sta);
2057 2034
@@ -2092,11 +2069,10 @@ static bool ieee80211_assoc_success(struct ieee80211_sub_if_data *sdata,
2092 ieee80211_set_wmm_default(sdata, false); 2069 ieee80211_set_wmm_default(sdata, false);
2093 changed |= BSS_CHANGED_QOS; 2070 changed |= BSS_CHANGED_QOS;
2094 2071
2095 if (elems.ht_info_elem && elems.wmm_param && 2072 if (elems.ht_operation && elems.wmm_param &&
2096 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) 2073 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N))
2097 changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem, 2074 changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation,
2098 cbss->bssid, ap_ht_cap_flags, 2075 cbss->bssid, false);
2099 false);
2100 2076
2101 /* set AID and assoc capability, 2077 /* set AID and assoc capability,
2102 * ieee80211_set_associated() will tell the driver */ 2078 * ieee80211_set_associated() will tell the driver */
@@ -2319,7 +2295,7 @@ static const u64 care_about_ies =
2319 (1ULL << WLAN_EID_CHANNEL_SWITCH) | 2295 (1ULL << WLAN_EID_CHANNEL_SWITCH) |
2320 (1ULL << WLAN_EID_PWR_CONSTRAINT) | 2296 (1ULL << WLAN_EID_PWR_CONSTRAINT) |
2321 (1ULL << WLAN_EID_HT_CAPABILITY) | 2297 (1ULL << WLAN_EID_HT_CAPABILITY) |
2322 (1ULL << WLAN_EID_HT_INFORMATION); 2298 (1ULL << WLAN_EID_HT_OPERATION);
2323 2299
2324static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata, 2300static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2325 struct ieee80211_mgmt *mgmt, 2301 struct ieee80211_mgmt *mgmt,
@@ -2468,11 +2444,13 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2468 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) { 2444 if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK) {
2469 if (directed_tim) { 2445 if (directed_tim) {
2470 if (local->hw.conf.dynamic_ps_timeout > 0) { 2446 if (local->hw.conf.dynamic_ps_timeout > 0) {
2471 local->hw.conf.flags &= ~IEEE80211_CONF_PS; 2447 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
2472 ieee80211_hw_config(local, 2448 local->hw.conf.flags &= ~IEEE80211_CONF_PS;
2473 IEEE80211_CONF_CHANGE_PS); 2449 ieee80211_hw_config(local,
2450 IEEE80211_CONF_CHANGE_PS);
2451 }
2474 ieee80211_send_nullfunc(local, sdata, 0); 2452 ieee80211_send_nullfunc(local, sdata, 0);
2475 } else { 2453 } else if (!local->pspolling && sdata->u.mgd.powersave) {
2476 local->pspolling = true; 2454 local->pspolling = true;
2477 2455
2478 /* 2456 /*
@@ -2504,31 +2482,14 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2504 erp_valid, erp_value); 2482 erp_valid, erp_value);
2505 2483
2506 2484
2507 if (elems.ht_cap_elem && elems.ht_info_elem && elems.wmm_param && 2485 if (elems.ht_cap_elem && elems.ht_operation && elems.wmm_param &&
2508 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) { 2486 !(ifmgd->flags & IEEE80211_STA_DISABLE_11N)) {
2509 struct sta_info *sta;
2510 struct ieee80211_supported_band *sband; 2487 struct ieee80211_supported_band *sband;
2511 u16 ap_ht_cap_flags;
2512
2513 rcu_read_lock();
2514
2515 sta = sta_info_get(sdata, bssid);
2516 if (WARN_ON(!sta)) {
2517 rcu_read_unlock();
2518 return;
2519 }
2520 2488
2521 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 2489 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
2522 2490
2523 ieee80211_ht_cap_ie_to_sta_ht_cap(sdata, sband, 2491 changed |= ieee80211_config_ht_tx(sdata, elems.ht_operation,
2524 elems.ht_cap_elem, &sta->sta.ht_cap); 2492 bssid, true);
2525
2526 ap_ht_cap_flags = sta->sta.ht_cap.cap;
2527
2528 rcu_read_unlock();
2529
2530 changed |= ieee80211_enable_ht(sdata, elems.ht_info_elem,
2531 bssid, ap_ht_cap_flags, true);
2532 } 2493 }
2533 2494
2534 /* Note: country IE parsing is done for us by cfg80211 */ 2495 /* Note: country IE parsing is done for us by cfg80211 */
@@ -3060,6 +3021,11 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3060 struct sta_info *sta; 3021 struct sta_info *sta;
3061 bool have_sta = false; 3022 bool have_sta = false;
3062 int err; 3023 int err;
3024 int ht_cfreq;
3025 enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT;
3026 const u8 *ht_oper_ie;
3027 const struct ieee80211_ht_operation *ht_oper = NULL;
3028 struct ieee80211_supported_band *sband;
3063 3029
3064 if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data)) 3030 if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
3065 return -EINVAL; 3031 return -EINVAL;
@@ -3081,17 +3047,76 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3081 mutex_unlock(&local->mtx); 3047 mutex_unlock(&local->mtx);
3082 3048
3083 /* switch to the right channel */ 3049 /* switch to the right channel */
3050 sband = local->hw.wiphy->bands[cbss->channel->band];
3051
3052 ifmgd->flags &= ~IEEE80211_STA_DISABLE_40MHZ;
3053
3054 if (sband->ht_cap.ht_supported) {
3055 ht_oper_ie = cfg80211_find_ie(WLAN_EID_HT_OPERATION,
3056 cbss->information_elements,
3057 cbss->len_information_elements);
3058 if (ht_oper_ie && ht_oper_ie[1] >= sizeof(*ht_oper))
3059 ht_oper = (void *)(ht_oper_ie + 2);
3060 }
3061
3062 if (ht_oper) {
3063 ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
3064 cbss->channel->band);
3065 /* check that channel matches the right operating channel */
3066 if (cbss->channel->center_freq != ht_cfreq) {
3067 /*
3068 * It's possible that some APs are confused here;
3069 * Netgear WNDR3700 sometimes reports 4 higher than
3070 * the actual channel in association responses, but
3071 * since we look at probe response/beacon data here
3072 * it should be OK.
3073 */
3074 printk(KERN_DEBUG
3075 "%s: Wrong control channel: center-freq: %d"
3076 " ht-cfreq: %d ht->primary_chan: %d"
3077 " band: %d. Disabling HT.\n",
3078 sdata->name, cbss->channel->center_freq,
3079 ht_cfreq, ht_oper->primary_chan,
3080 cbss->channel->band);
3081 ht_oper = NULL;
3082 }
3083 }
3084
3085 if (ht_oper) {
3086 channel_type = NL80211_CHAN_HT20;
3087
3088 if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
3089 switch (ht_oper->ht_param &
3090 IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
3091 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
3092 channel_type = NL80211_CHAN_HT40PLUS;
3093 break;
3094 case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
3095 channel_type = NL80211_CHAN_HT40MINUS;
3096 break;
3097 }
3098 }
3099 }
3100
3101 if (!ieee80211_set_channel_type(local, sdata, channel_type)) {
3102 /* can only fail due to HT40+/- mismatch */
3103 channel_type = NL80211_CHAN_HT20;
3104 printk(KERN_DEBUG
3105 "%s: disabling 40 MHz due to multi-vif mismatch\n",
3106 sdata->name);
3107 ifmgd->flags |= IEEE80211_STA_DISABLE_40MHZ;
3108 WARN_ON(!ieee80211_set_channel_type(local, sdata,
3109 channel_type));
3110 }
3111
3084 local->oper_channel = cbss->channel; 3112 local->oper_channel = cbss->channel;
3085 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL); 3113 ieee80211_hw_config(local, 0);
3086 3114
3087 if (!have_sta) { 3115 if (!have_sta) {
3088 struct ieee80211_supported_band *sband;
3089 u32 rates = 0, basic_rates = 0; 3116 u32 rates = 0, basic_rates = 0;
3090 bool have_higher_than_11mbit; 3117 bool have_higher_than_11mbit;
3091 int min_rate = INT_MAX, min_rate_index = -1; 3118 int min_rate = INT_MAX, min_rate_index = -1;
3092 3119
3093 sband = sdata->local->hw.wiphy->bands[cbss->channel->band];
3094
3095 ieee80211_get_rates(sband, bss->supp_rates, 3120 ieee80211_get_rates(sband, bss->supp_rates,
3096 bss->supp_rates_len, 3121 bss->supp_rates_len,
3097 &rates, &basic_rates, 3122 &rates, &basic_rates,
@@ -3311,7 +3336,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3311 /* Also disable HT if we don't support it or the AP doesn't use WMM */ 3336 /* Also disable HT if we don't support it or the AP doesn't use WMM */
3312 sband = local->hw.wiphy->bands[req->bss->channel->band]; 3337 sband = local->hw.wiphy->bands[req->bss->channel->band];
3313 if (!sband->ht_cap.ht_supported || 3338 if (!sband->ht_cap.ht_supported ||
3314 local->hw.queues < 4 || !bss->wmm_used) 3339 local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used)
3315 ifmgd->flags |= IEEE80211_STA_DISABLE_11N; 3340 ifmgd->flags |= IEEE80211_STA_DISABLE_11N;
3316 3341
3317 memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa)); 3342 memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
@@ -3334,11 +3359,12 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3334 ifmgd->ap_smps = ifmgd->req_smps; 3359 ifmgd->ap_smps = ifmgd->req_smps;
3335 3360
3336 assoc_data->capability = req->bss->capability; 3361 assoc_data->capability = req->bss->capability;
3337 assoc_data->wmm = bss->wmm_used && (local->hw.queues >= 4); 3362 assoc_data->wmm = bss->wmm_used &&
3363 (local->hw.queues >= IEEE80211_NUM_ACS);
3338 assoc_data->supp_rates = bss->supp_rates; 3364 assoc_data->supp_rates = bss->supp_rates;
3339 assoc_data->supp_rates_len = bss->supp_rates_len; 3365 assoc_data->supp_rates_len = bss->supp_rates_len;
3340 assoc_data->ht_information_ie = 3366 assoc_data->ht_operation_ie =
3341 ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_INFORMATION); 3367 ieee80211_bss_get_ie(req->bss, WLAN_EID_HT_OPERATION);
3342 3368
3343 if (bss->wmm_used && bss->uapsd_supported && 3369 if (bss->wmm_used && bss->uapsd_supported &&
3344 (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) { 3370 (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) {
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index ef8eba1d736d..af1c4e26e965 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -127,6 +127,10 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
127 drv_remove_interface(local, sdata); 127 drv_remove_interface(local, sdata);
128 } 128 }
129 129
130 sdata = rtnl_dereference(local->monitor_sdata);
131 if (sdata)
132 drv_remove_interface(local, sdata);
133
130 /* stop hardware - this must stop RX */ 134 /* stop hardware - this must stop RX */
131 if (local->open_count) 135 if (local->open_count)
132 ieee80211_stop_device(local); 136 ieee80211_stop_device(local);
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index fbb1efdc4d04..6e4fd32c6617 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -17,6 +17,7 @@
17#include <net/mac80211.h> 17#include <net/mac80211.h>
18#include "ieee80211_i.h" 18#include "ieee80211_i.h"
19#include "sta_info.h" 19#include "sta_info.h"
20#include "driver-ops.h"
20 21
21struct rate_control_ref { 22struct rate_control_ref {
22 struct ieee80211_local *local; 23 struct ieee80211_local *local;
@@ -63,8 +64,7 @@ static inline void rate_control_rate_init(struct sta_info *sta)
63 64
64static inline void rate_control_rate_update(struct ieee80211_local *local, 65static inline void rate_control_rate_update(struct ieee80211_local *local,
65 struct ieee80211_supported_band *sband, 66 struct ieee80211_supported_band *sband,
66 struct sta_info *sta, u32 changed, 67 struct sta_info *sta, u32 changed)
67 enum nl80211_channel_type oper_chan_type)
68{ 68{
69 struct rate_control_ref *ref = local->rate_ctrl; 69 struct rate_control_ref *ref = local->rate_ctrl;
70 struct ieee80211_sta *ista = &sta->sta; 70 struct ieee80211_sta *ista = &sta->sta;
@@ -72,7 +72,8 @@ static inline void rate_control_rate_update(struct ieee80211_local *local,
72 72
73 if (ref && ref->ops->rate_update) 73 if (ref && ref->ops->rate_update)
74 ref->ops->rate_update(ref->priv, sband, ista, 74 ref->ops->rate_update(ref->priv, sband, ista,
75 priv_sta, changed, oper_chan_type); 75 priv_sta, changed);
76 drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
76} 77}
77 78
78static inline void *rate_control_alloc_sta(struct rate_control_ref *ref, 79static inline void *rate_control_alloc_sta(struct rate_control_ref *ref,
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index b39dda523f39..79633ae06fd6 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -334,14 +334,15 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
334 334
335 335
336static void 336static void
337calc_rate_durations(struct ieee80211_local *local, struct minstrel_rate *d, 337calc_rate_durations(enum ieee80211_band band,
338 struct minstrel_rate *d,
338 struct ieee80211_rate *rate) 339 struct ieee80211_rate *rate)
339{ 340{
340 int erp = !!(rate->flags & IEEE80211_RATE_ERP_G); 341 int erp = !!(rate->flags & IEEE80211_RATE_ERP_G);
341 342
342 d->perfect_tx_time = ieee80211_frame_duration(local, 1200, 343 d->perfect_tx_time = ieee80211_frame_duration(band, 1200,
343 rate->bitrate, erp, 1); 344 rate->bitrate, erp, 1);
344 d->ack_time = ieee80211_frame_duration(local, 10, 345 d->ack_time = ieee80211_frame_duration(band, 10,
345 rate->bitrate, erp, 1); 346 rate->bitrate, erp, 1);
346} 347}
347 348
@@ -379,14 +380,14 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
379{ 380{
380 struct minstrel_sta_info *mi = priv_sta; 381 struct minstrel_sta_info *mi = priv_sta;
381 struct minstrel_priv *mp = priv; 382 struct minstrel_priv *mp = priv;
382 struct ieee80211_local *local = hw_to_local(mp->hw);
383 struct ieee80211_rate *ctl_rate; 383 struct ieee80211_rate *ctl_rate;
384 unsigned int i, n = 0; 384 unsigned int i, n = 0;
385 unsigned int t_slot = 9; /* FIXME: get real slot time */ 385 unsigned int t_slot = 9; /* FIXME: get real slot time */
386 386
387 mi->lowest_rix = rate_lowest_index(sband, sta); 387 mi->lowest_rix = rate_lowest_index(sband, sta);
388 ctl_rate = &sband->bitrates[mi->lowest_rix]; 388 ctl_rate = &sband->bitrates[mi->lowest_rix];
389 mi->sp_ack_dur = ieee80211_frame_duration(local, 10, ctl_rate->bitrate, 389 mi->sp_ack_dur = ieee80211_frame_duration(sband->band, 10,
390 ctl_rate->bitrate,
390 !!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1); 391 !!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1);
391 392
392 for (i = 0; i < sband->n_bitrates; i++) { 393 for (i = 0; i < sband->n_bitrates; i++) {
@@ -402,7 +403,7 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
402 403
403 mr->rix = i; 404 mr->rix = i;
404 mr->bitrate = sband->bitrates[i].bitrate / 5; 405 mr->bitrate = sband->bitrates[i].bitrate / 5;
405 calc_rate_durations(local, mr, &sband->bitrates[i]); 406 calc_rate_durations(sband->band, mr, &sband->bitrates[i]);
406 407
407 /* calculate maximum number of retransmissions before 408 /* calculate maximum number of retransmissions before
408 * fallback (based on maximum segment size) */ 409 * fallback (based on maximum segment size) */
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 16e0b277b9a8..2d1acc6c5445 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -686,14 +686,12 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
686 686
687static void 687static void
688minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband, 688minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
689 struct ieee80211_sta *sta, void *priv_sta, 689 struct ieee80211_sta *sta, void *priv_sta)
690 enum nl80211_channel_type oper_chan_type)
691{ 690{
692 struct minstrel_priv *mp = priv; 691 struct minstrel_priv *mp = priv;
693 struct minstrel_ht_sta_priv *msp = priv_sta; 692 struct minstrel_ht_sta_priv *msp = priv_sta;
694 struct minstrel_ht_sta *mi = &msp->ht; 693 struct minstrel_ht_sta *mi = &msp->ht;
695 struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs; 694 struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
696 struct ieee80211_local *local = hw_to_local(mp->hw);
697 u16 sta_cap = sta->ht_cap.cap; 695 u16 sta_cap = sta->ht_cap.cap;
698 int n_supported = 0; 696 int n_supported = 0;
699 int ack_dur; 697 int ack_dur;
@@ -712,8 +710,8 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
712 memset(mi, 0, sizeof(*mi)); 710 memset(mi, 0, sizeof(*mi));
713 mi->stats_update = jiffies; 711 mi->stats_update = jiffies;
714 712
715 ack_dur = ieee80211_frame_duration(local, 10, 60, 1, 1); 713 ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1);
716 mi->overhead = ieee80211_frame_duration(local, 0, 60, 1, 1) + ack_dur; 714 mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1) + ack_dur;
717 mi->overhead_rtscts = mi->overhead + 2 * ack_dur; 715 mi->overhead_rtscts = mi->overhead + 2 * ack_dur;
718 716
719 mi->avg_ampdu_len = MINSTREL_FRAC(1, 1); 717 mi->avg_ampdu_len = MINSTREL_FRAC(1, 1);
@@ -735,10 +733,6 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
735 if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING) 733 if (sta_cap & IEEE80211_HT_CAP_LDPC_CODING)
736 mi->tx_flags |= IEEE80211_TX_CTL_LDPC; 734 mi->tx_flags |= IEEE80211_TX_CTL_LDPC;
737 735
738 if (oper_chan_type != NL80211_CHAN_HT40MINUS &&
739 oper_chan_type != NL80211_CHAN_HT40PLUS)
740 sta_cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
741
742 smps = (sta_cap & IEEE80211_HT_CAP_SM_PS) >> 736 smps = (sta_cap & IEEE80211_HT_CAP_SM_PS) >>
743 IEEE80211_HT_CAP_SM_PS_SHIFT; 737 IEEE80211_HT_CAP_SM_PS_SHIFT;
744 738
@@ -788,17 +782,15 @@ static void
788minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband, 782minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband,
789 struct ieee80211_sta *sta, void *priv_sta) 783 struct ieee80211_sta *sta, void *priv_sta)
790{ 784{
791 struct minstrel_priv *mp = priv; 785 minstrel_ht_update_caps(priv, sband, sta, priv_sta);
792
793 minstrel_ht_update_caps(priv, sband, sta, priv_sta, mp->hw->conf.channel_type);
794} 786}
795 787
796static void 788static void
797minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband, 789minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband,
798 struct ieee80211_sta *sta, void *priv_sta, 790 struct ieee80211_sta *sta, void *priv_sta,
799 u32 changed, enum nl80211_channel_type oper_chan_type) 791 u32 changed)
800{ 792{
801 minstrel_ht_update_caps(priv, sband, sta, priv_sta, oper_chan_type); 793 minstrel_ht_update_caps(priv, sband, sta, priv_sta);
802} 794}
803 795
804static void * 796static void *
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index d64e285400aa..d5ac02fe37ff 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -426,6 +426,7 @@ ieee80211_rx_h_passive_scan(struct ieee80211_rx_data *rx)
426 426
427 if (test_bit(SCAN_HW_SCANNING, &local->scanning) || 427 if (test_bit(SCAN_HW_SCANNING, &local->scanning) ||
428 test_bit(SCAN_SW_SCANNING, &local->scanning) || 428 test_bit(SCAN_SW_SCANNING, &local->scanning) ||
429 test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
429 local->sched_scanning) 430 local->sched_scanning)
430 return ieee80211_scan_rx(rx->sdata, skb); 431 return ieee80211_scan_rx(rx->sdata, skb);
431 432
@@ -794,8 +795,7 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx)
794 795
795 /* reset session timer */ 796 /* reset session timer */
796 if (tid_agg_rx->timeout) 797 if (tid_agg_rx->timeout)
797 mod_timer(&tid_agg_rx->session_timer, 798 tid_agg_rx->last_rx = jiffies;
798 TU_TO_EXP_TIME(tid_agg_rx->timeout));
799 799
800 /* if this mpdu is fragmented - terminate rx aggregation session */ 800 /* if this mpdu is fragmented - terminate rx aggregation session */
801 sc = le16_to_cpu(hdr->seq_ctrl); 801 sc = le16_to_cpu(hdr->seq_ctrl);
@@ -2270,11 +2270,8 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2270 2270
2271 sband = rx->local->hw.wiphy->bands[status->band]; 2271 sband = rx->local->hw.wiphy->bands[status->band];
2272 2272
2273 rate_control_rate_update( 2273 rate_control_rate_update(local, sband, rx->sta,
2274 local, sband, rx->sta, 2274 IEEE80211_RC_SMPS_CHANGED);
2275 IEEE80211_RC_SMPS_CHANGED,
2276 ieee80211_get_tx_channel_type(
2277 local, local->_oper_channel_type));
2278 goto handled; 2275 goto handled;
2279 } 2276 }
2280 default: 2277 default:
@@ -2921,6 +2918,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2921 local->dot11ReceivedFragmentCount++; 2918 local->dot11ReceivedFragmentCount++;
2922 2919
2923 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) || 2920 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
2921 test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning) ||
2924 test_bit(SCAN_SW_SCANNING, &local->scanning))) 2922 test_bit(SCAN_SW_SCANNING, &local->scanning)))
2925 status->rx_flags |= IEEE80211_RX_IN_SCAN; 2923 status->rx_flags |= IEEE80211_RX_IN_SCAN;
2926 2924
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index c70e17677135..8282284f835c 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -29,20 +29,6 @@
29#define IEEE80211_CHANNEL_TIME (HZ / 33) 29#define IEEE80211_CHANNEL_TIME (HZ / 33)
30#define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 8) 30#define IEEE80211_PASSIVE_CHANNEL_TIME (HZ / 8)
31 31
32struct ieee80211_bss *
33ieee80211_rx_bss_get(struct ieee80211_local *local, u8 *bssid, int freq,
34 u8 *ssid, u8 ssid_len)
35{
36 struct cfg80211_bss *cbss;
37
38 cbss = cfg80211_get_bss(local->hw.wiphy,
39 ieee80211_get_channel(local->hw.wiphy, freq),
40 bssid, ssid, ssid_len, 0, 0);
41 if (!cbss)
42 return NULL;
43 return (void *)cbss->priv;
44}
45
46static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss) 32static void ieee80211_rx_bss_free(struct cfg80211_bss *cbss)
47{ 33{
48 struct ieee80211_bss *bss = (void *)cbss->priv; 34 struct ieee80211_bss *bss = (void *)cbss->priv;
@@ -387,6 +373,57 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
387 return 0; 373 return 0;
388} 374}
389 375
376static bool ieee80211_can_scan(struct ieee80211_local *local,
377 struct ieee80211_sub_if_data *sdata)
378{
379 if (!list_empty(&local->work_list))
380 return false;
381
382 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
383 sdata->u.mgd.flags & (IEEE80211_STA_BEACON_POLL |
384 IEEE80211_STA_CONNECTION_POLL))
385 return false;
386
387 return true;
388}
389
390void ieee80211_run_deferred_scan(struct ieee80211_local *local)
391{
392 lockdep_assert_held(&local->mtx);
393
394 if (!local->scan_req || local->scanning)
395 return;
396
397 if (!ieee80211_can_scan(local, local->scan_sdata))
398 return;
399
400 ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
401 round_jiffies_relative(0));
402}
403
404static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
405 unsigned long *next_delay)
406{
407 int i;
408 struct ieee80211_sub_if_data *sdata = local->scan_sdata;
409 enum ieee80211_band band = local->hw.conf.channel->band;
410
411 for (i = 0; i < local->scan_req->n_ssids; i++)
412 ieee80211_send_probe_req(
413 sdata, NULL,
414 local->scan_req->ssids[i].ssid,
415 local->scan_req->ssids[i].ssid_len,
416 local->scan_req->ie, local->scan_req->ie_len,
417 local->scan_req->rates[band], false,
418 local->scan_req->no_cck);
419
420 /*
421 * After sending probe requests, wait for probe responses
422 * on the channel.
423 */
424 *next_delay = IEEE80211_CHANNEL_TIME;
425 local->next_scan_state = SCAN_DECISION;
426}
390 427
391static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata, 428static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
392 struct cfg80211_scan_request *req) 429 struct cfg80211_scan_request *req)
@@ -399,7 +436,7 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
399 if (local->scan_req) 436 if (local->scan_req)
400 return -EBUSY; 437 return -EBUSY;
401 438
402 if (!list_empty(&local->work_list)) { 439 if (!ieee80211_can_scan(local, sdata)) {
403 /* wait for the work to finish/time out */ 440 /* wait for the work to finish/time out */
404 local->scan_req = req; 441 local->scan_req = req;
405 local->scan_sdata = sdata; 442 local->scan_sdata = sdata;
@@ -438,10 +475,47 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
438 local->scan_req = req; 475 local->scan_req = req;
439 local->scan_sdata = sdata; 476 local->scan_sdata = sdata;
440 477
441 if (local->ops->hw_scan) 478 if (local->ops->hw_scan) {
442 __set_bit(SCAN_HW_SCANNING, &local->scanning); 479 __set_bit(SCAN_HW_SCANNING, &local->scanning);
443 else 480 } else if ((req->n_channels == 1) &&
481 (req->channels[0]->center_freq ==
482 local->hw.conf.channel->center_freq)) {
483
484 /* If we are scanning only on the current channel, then
485 * we do not need to stop normal activities
486 */
487 unsigned long next_delay;
488
489 __set_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning);
490
491 ieee80211_recalc_idle(local);
492
493 /* Notify driver scan is starting, keep order of operations
494 * same as normal software scan, in case that matters. */
495 drv_sw_scan_start(local);
496
497 ieee80211_configure_filter(local); /* accept probe-responses */
498
499 /* We need to ensure power level is at max for scanning. */
500 ieee80211_hw_config(local, 0);
501
502 if ((req->channels[0]->flags &
503 IEEE80211_CHAN_PASSIVE_SCAN) ||
504 !local->scan_req->n_ssids) {
505 next_delay = IEEE80211_PASSIVE_CHANNEL_TIME;
506 } else {
507 ieee80211_scan_state_send_probe(local, &next_delay);
508 next_delay = IEEE80211_CHANNEL_TIME;
509 }
510
511 /* Now, just wait a bit and we are all done! */
512 ieee80211_queue_delayed_work(&local->hw, &local->scan_work,
513 next_delay);
514 return 0;
515 } else {
516 /* Do normal software scan */
444 __set_bit(SCAN_SW_SCANNING, &local->scanning); 517 __set_bit(SCAN_SW_SCANNING, &local->scanning);
518 }
445 519
446 ieee80211_recalc_idle(local); 520 ieee80211_recalc_idle(local);
447 521
@@ -598,30 +672,6 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
598 local->next_scan_state = SCAN_SEND_PROBE; 672 local->next_scan_state = SCAN_SEND_PROBE;
599} 673}
600 674
601static void ieee80211_scan_state_send_probe(struct ieee80211_local *local,
602 unsigned long *next_delay)
603{
604 int i;
605 struct ieee80211_sub_if_data *sdata = local->scan_sdata;
606 enum ieee80211_band band = local->hw.conf.channel->band;
607
608 for (i = 0; i < local->scan_req->n_ssids; i++)
609 ieee80211_send_probe_req(
610 sdata, NULL,
611 local->scan_req->ssids[i].ssid,
612 local->scan_req->ssids[i].ssid_len,
613 local->scan_req->ie, local->scan_req->ie_len,
614 local->scan_req->rates[band], false,
615 local->scan_req->no_cck);
616
617 /*
618 * After sending probe requests, wait for probe responses
619 * on the channel.
620 */
621 *next_delay = IEEE80211_CHANNEL_TIME;
622 local->next_scan_state = SCAN_DECISION;
623}
624
625static void ieee80211_scan_state_suspend(struct ieee80211_local *local, 675static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
626 unsigned long *next_delay) 676 unsigned long *next_delay)
627{ 677{
@@ -672,6 +722,12 @@ void ieee80211_scan_work(struct work_struct *work)
672 722
673 sdata = local->scan_sdata; 723 sdata = local->scan_sdata;
674 724
725 /* When scanning on-channel, the first-callback means completed. */
726 if (test_bit(SCAN_ONCHANNEL_SCANNING, &local->scanning)) {
727 aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning);
728 goto out_complete;
729 }
730
675 if (test_and_clear_bit(SCAN_COMPLETED, &local->scanning)) { 731 if (test_and_clear_bit(SCAN_COMPLETED, &local->scanning)) {
676 aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning); 732 aborted = test_and_clear_bit(SCAN_ABORTED, &local->scanning);
677 goto out_complete; 733 goto out_complete;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 38137cb5f6f0..97a9d6639fb9 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -1195,13 +1195,15 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
1195 ieee80211_is_qos_nullfunc(hdr->frame_control)) 1195 ieee80211_is_qos_nullfunc(hdr->frame_control))
1196 qoshdr = ieee80211_get_qos_ctl(hdr); 1196 qoshdr = ieee80211_get_qos_ctl(hdr);
1197 1197
1198 /* set EOSP for the frame */ 1198 /* end service period after last frame */
1199 if (reason == IEEE80211_FRAME_RELEASE_UAPSD && 1199 if (skb_queue_empty(&frames)) {
1200 qoshdr && skb_queue_empty(&frames)) 1200 if (reason == IEEE80211_FRAME_RELEASE_UAPSD &&
1201 *qoshdr |= IEEE80211_QOS_CTL_EOSP; 1201 qoshdr)
1202 1202 *qoshdr |= IEEE80211_QOS_CTL_EOSP;
1203 info->flags |= IEEE80211_TX_STATUS_EOSP | 1203
1204 IEEE80211_TX_CTL_REQ_TX_STATUS; 1204 info->flags |= IEEE80211_TX_STATUS_EOSP |
1205 IEEE80211_TX_CTL_REQ_TX_STATUS;
1206 }
1205 1207
1206 if (qoshdr) 1208 if (qoshdr)
1207 tids |= BIT(*qoshdr & IEEE80211_QOS_CTL_TID_MASK); 1209 tids |= BIT(*qoshdr & IEEE80211_QOS_CTL_TID_MASK);
@@ -1415,15 +1417,19 @@ int sta_info_move_state(struct sta_info *sta,
1415 if (sta->sta_state == IEEE80211_STA_AUTH) { 1417 if (sta->sta_state == IEEE80211_STA_AUTH) {
1416 set_bit(WLAN_STA_ASSOC, &sta->_flags); 1418 set_bit(WLAN_STA_ASSOC, &sta->_flags);
1417 } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) { 1419 } else if (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
1418 if (sta->sdata->vif.type == NL80211_IFTYPE_AP) 1420 if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
1419 atomic_dec(&sta->sdata->u.ap.num_sta_authorized); 1421 (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1422 !sta->sdata->u.vlan.sta))
1423 atomic_dec(&sta->sdata->bss->num_mcast_sta);
1420 clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags); 1424 clear_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
1421 } 1425 }
1422 break; 1426 break;
1423 case IEEE80211_STA_AUTHORIZED: 1427 case IEEE80211_STA_AUTHORIZED:
1424 if (sta->sta_state == IEEE80211_STA_ASSOC) { 1428 if (sta->sta_state == IEEE80211_STA_ASSOC) {
1425 if (sta->sdata->vif.type == NL80211_IFTYPE_AP) 1429 if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
1426 atomic_inc(&sta->sdata->u.ap.num_sta_authorized); 1430 (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
1431 !sta->sdata->u.vlan.sta))
1432 atomic_inc(&sta->sdata->bss->num_mcast_sta);
1427 set_bit(WLAN_STA_AUTHORIZED, &sta->_flags); 1433 set_bit(WLAN_STA_AUTHORIZED, &sta->_flags);
1428 } 1434 }
1429 break; 1435 break;
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index ab0576827baf..f75f5d9ac06d 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -55,6 +55,7 @@
55 * @WLAN_STA_4ADDR_EVENT: 4-addr event was already sent for this frame. 55 * @WLAN_STA_4ADDR_EVENT: 4-addr event was already sent for this frame.
56 * @WLAN_STA_INSERTED: This station is inserted into the hash table. 56 * @WLAN_STA_INSERTED: This station is inserted into the hash table.
57 * @WLAN_STA_RATE_CONTROL: rate control was initialized for this station. 57 * @WLAN_STA_RATE_CONTROL: rate control was initialized for this station.
58 * @WLAN_STA_TOFFSET_KNOWN: toffset calculated for this station is valid.
58 */ 59 */
59enum ieee80211_sta_info_flags { 60enum ieee80211_sta_info_flags {
60 WLAN_STA_AUTH, 61 WLAN_STA_AUTH,
@@ -76,6 +77,7 @@ enum ieee80211_sta_info_flags {
76 WLAN_STA_4ADDR_EVENT, 77 WLAN_STA_4ADDR_EVENT,
77 WLAN_STA_INSERTED, 78 WLAN_STA_INSERTED,
78 WLAN_STA_RATE_CONTROL, 79 WLAN_STA_RATE_CONTROL,
80 WLAN_STA_TOFFSET_KNOWN,
79}; 81};
80 82
81#define STA_TID_NUM 16 83#define STA_TID_NUM 16
@@ -101,6 +103,7 @@ enum ieee80211_sta_info_flags {
101 * @dialog_token: dialog token for aggregation session 103 * @dialog_token: dialog token for aggregation session
102 * @timeout: session timeout value to be filled in ADDBA requests 104 * @timeout: session timeout value to be filled in ADDBA requests
103 * @state: session state (see above) 105 * @state: session state (see above)
106 * @last_tx: jiffies of last tx activity
104 * @stop_initiator: initiator of a session stop 107 * @stop_initiator: initiator of a session stop
105 * @tx_stop: TX DelBA frame when stopping 108 * @tx_stop: TX DelBA frame when stopping
106 * @buf_size: reorder buffer size at receiver 109 * @buf_size: reorder buffer size at receiver
@@ -122,6 +125,7 @@ struct tid_ampdu_tx {
122 struct timer_list addba_resp_timer; 125 struct timer_list addba_resp_timer;
123 struct sk_buff_head pending; 126 struct sk_buff_head pending;
124 unsigned long state; 127 unsigned long state;
128 unsigned long last_tx;
125 u16 timeout; 129 u16 timeout;
126 u8 dialog_token; 130 u8 dialog_token;
127 u8 stop_initiator; 131 u8 stop_initiator;
@@ -139,6 +143,7 @@ struct tid_ampdu_tx {
139 * @reorder_time: jiffies when skb was added 143 * @reorder_time: jiffies when skb was added
140 * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value) 144 * @session_timer: check if peer keeps Tx-ing on the TID (by timeout value)
141 * @reorder_timer: releases expired frames from the reorder buffer. 145 * @reorder_timer: releases expired frames from the reorder buffer.
146 * @last_rx: jiffies of last rx activity
142 * @head_seq_num: head sequence number in reordering buffer. 147 * @head_seq_num: head sequence number in reordering buffer.
143 * @stored_mpdu_num: number of MPDUs in reordering buffer 148 * @stored_mpdu_num: number of MPDUs in reordering buffer
144 * @ssn: Starting Sequence Number expected to be aggregated. 149 * @ssn: Starting Sequence Number expected to be aggregated.
@@ -163,6 +168,7 @@ struct tid_ampdu_rx {
163 unsigned long *reorder_time; 168 unsigned long *reorder_time;
164 struct timer_list session_timer; 169 struct timer_list session_timer;
165 struct timer_list reorder_timer; 170 struct timer_list reorder_timer;
171 unsigned long last_rx;
166 u16 head_seq_num; 172 u16 head_seq_num;
167 u16 stored_mpdu_num; 173 u16 stored_mpdu_num;
168 u16 ssn; 174 u16 ssn;
@@ -264,6 +270,7 @@ struct sta_ampdu_mlme {
264 * @plink_timeout: timeout of peer link 270 * @plink_timeout: timeout of peer link
265 * @plink_timer: peer link watch timer 271 * @plink_timer: peer link watch timer
266 * @plink_timer_was_running: used by suspend/resume to restore timers 272 * @plink_timer_was_running: used by suspend/resume to restore timers
273 * @t_offset: timing offset relative to this host
267 * @debugfs: debug filesystem info 274 * @debugfs: debug filesystem info
268 * @dead: set to true when sta is unlinked 275 * @dead: set to true when sta is unlinked
269 * @uploaded: set to true when sta is uploaded to the driver 276 * @uploaded: set to true when sta is uploaded to the driver
@@ -353,6 +360,8 @@ struct sta_info {
353 enum nl80211_plink_state plink_state; 360 enum nl80211_plink_state plink_state;
354 u32 plink_timeout; 361 u32 plink_timeout;
355 struct timer_list plink_timer; 362 struct timer_list plink_timer;
363 s64 t_offset;
364 s64 t_offset_setpoint;
356#endif 365#endif
357 366
358#ifdef CONFIG_MAC80211_DEBUGFS 367#ifdef CONFIG_MAC80211_DEBUGFS
@@ -365,6 +374,8 @@ struct sta_info {
365 unsigned int lost_packets; 374 unsigned int lost_packets;
366 unsigned int beacon_loss_count; 375 unsigned int beacon_loss_count;
367 376
377 bool supports_40mhz;
378
368 /* keep last! */ 379 /* keep last! */
369 struct ieee80211_sta sta; 380 struct ieee80211_sta sta;
370}; 381};
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 5f8f89e89d6b..05f257aa2e08 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -355,7 +355,13 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
355 int rtap_len; 355 int rtap_len;
356 356
357 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { 357 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
358 if (info->status.rates[i].idx < 0) { 358 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
359 !(info->flags & IEEE80211_TX_STAT_AMPDU)) {
360 /* just the first aggr frame carry status info */
361 info->status.rates[i].idx = -1;
362 info->status.rates[i].count = 0;
363 break;
364 } else if (info->status.rates[i].idx < 0) {
359 break; 365 break;
360 } else if (i >= hw->max_report_rates) { 366 } else if (i >= hw->max_report_rates) {
361 /* the HW cannot have attempted that rate */ 367 /* the HW cannot have attempted that rate */
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index e76facc69e95..d67d36f57d78 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -159,7 +159,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
159 /* Time needed to transmit ACK 159 /* Time needed to transmit ACK
160 * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up 160 * (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
161 * to closest integer */ 161 * to closest integer */
162 dur = ieee80211_frame_duration(local, 10, rate, erp, 162 dur = ieee80211_frame_duration(sband->band, 10, rate, erp,
163 tx->sdata->vif.bss_conf.use_short_preamble); 163 tx->sdata->vif.bss_conf.use_short_preamble);
164 164
165 if (next_frag_len) { 165 if (next_frag_len) {
@@ -167,7 +167,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
167 * transmit next fragment plus ACK and 2 x SIFS. */ 167 * transmit next fragment plus ACK and 2 x SIFS. */
168 dur *= 2; /* ACK + SIFS */ 168 dur *= 2; /* ACK + SIFS */
169 /* next fragment */ 169 /* next fragment */
170 dur += ieee80211_frame_duration(local, next_frag_len, 170 dur += ieee80211_frame_duration(sband->band, next_frag_len,
171 txrate->bitrate, erp, 171 txrate->bitrate, erp,
172 tx->sdata->vif.bss_conf.use_short_preamble); 172 tx->sdata->vif.bss_conf.use_short_preamble);
173 } 173 }
@@ -230,9 +230,9 @@ ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx)
230 * changed via debugfs, user needs to reassociate manually to have 230 * changed via debugfs, user needs to reassociate manually to have
231 * everything in sync. 231 * everything in sync.
232 */ 232 */
233 if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) 233 if ((ifmgd->flags & IEEE80211_STA_UAPSD_ENABLED) &&
234 && (ifmgd->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 234 (ifmgd->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) &&
235 && skb_get_queue_mapping(tx->skb) == 0) 235 skb_get_queue_mapping(tx->skb) == IEEE80211_AC_VO)
236 return TX_CONTINUE; 236 return TX_CONTINUE;
237 237
238 if (local->hw.conf.flags & IEEE80211_CONF_PS) { 238 if (local->hw.conf.flags & IEEE80211_CONF_PS) {
@@ -306,7 +306,7 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
306 } 306 }
307 } else if (unlikely(tx->sdata->vif.type == NL80211_IFTYPE_AP && 307 } else if (unlikely(tx->sdata->vif.type == NL80211_IFTYPE_AP &&
308 ieee80211_is_data(hdr->frame_control) && 308 ieee80211_is_data(hdr->frame_control) &&
309 !atomic_read(&tx->sdata->u.ap.num_sta_authorized))) { 309 !atomic_read(&tx->sdata->u.ap.num_mcast_sta))) {
310 /* 310 /*
311 * No associated STAs - no need to send multicast 311 * No associated STAs - no need to send multicast
312 * frames. 312 * frames.
@@ -400,6 +400,8 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
400 return TX_CONTINUE; 400 return TX_CONTINUE;
401 401
402 info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM; 402 info->flags |= IEEE80211_TX_CTL_SEND_AFTER_DTIM;
403 if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
404 info->hw_queue = tx->sdata->vif.cab_queue;
403 405
404 /* device releases frame after DTIM beacon */ 406 /* device releases frame after DTIM beacon */
405 if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)) 407 if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING))
@@ -1118,8 +1120,7 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
1118 1120
1119 /* reset session timer */ 1121 /* reset session timer */
1120 if (reset_agg_timer && tid_tx->timeout) 1122 if (reset_agg_timer && tid_tx->timeout)
1121 mod_timer(&tid_tx->session_timer, 1123 tid_tx->last_tx = jiffies;
1122 TU_TO_EXP_TIME(tid_tx->timeout));
1123 1124
1124 return queued; 1125 return queued;
1125} 1126}
@@ -1216,11 +1217,19 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
1216 bool txpending) 1217 bool txpending)
1217{ 1218{
1218 struct sk_buff *skb, *tmp; 1219 struct sk_buff *skb, *tmp;
1219 struct ieee80211_tx_info *info;
1220 unsigned long flags; 1220 unsigned long flags;
1221 1221
1222 skb_queue_walk_safe(skbs, skb, tmp) { 1222 skb_queue_walk_safe(skbs, skb, tmp) {
1223 int q = skb_get_queue_mapping(skb); 1223 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1224 int q = info->hw_queue;
1225
1226#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1227 if (WARN_ON_ONCE(q >= local->hw.queues)) {
1228 __skb_unlink(skb, skbs);
1229 dev_kfree_skb(skb);
1230 continue;
1231 }
1232#endif
1224 1233
1225 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 1234 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
1226 if (local->queue_stop_reasons[q] || 1235 if (local->queue_stop_reasons[q] ||
@@ -1242,7 +1251,6 @@ static bool ieee80211_tx_frags(struct ieee80211_local *local,
1242 } 1251 }
1243 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 1252 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
1244 1253
1245 info = IEEE80211_SKB_CB(skb);
1246 info->control.vif = vif; 1254 info->control.vif = vif;
1247 info->control.sta = sta; 1255 info->control.sta = sta;
1248 1256
@@ -1285,8 +1293,16 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
1285 1293
1286 switch (sdata->vif.type) { 1294 switch (sdata->vif.type) {
1287 case NL80211_IFTYPE_MONITOR: 1295 case NL80211_IFTYPE_MONITOR:
1288 sdata = NULL; 1296 sdata = rcu_dereference(local->monitor_sdata);
1289 vif = NULL; 1297 if (sdata) {
1298 vif = &sdata->vif;
1299 info->hw_queue =
1300 vif->hw_queue[skb_get_queue_mapping(skb)];
1301 } else if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) {
1302 dev_kfree_skb(skb);
1303 return true;
1304 } else
1305 vif = NULL;
1290 break; 1306 break;
1291 case NL80211_IFTYPE_AP_VLAN: 1307 case NL80211_IFTYPE_AP_VLAN:
1292 sdata = container_of(sdata->bss, 1308 sdata = container_of(sdata->bss,
@@ -1401,6 +1417,12 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata,
1401 tx.channel = local->hw.conf.channel; 1417 tx.channel = local->hw.conf.channel;
1402 info->band = tx.channel->band; 1418 info->band = tx.channel->band;
1403 1419
1420 /* set up hw_queue value early */
1421 if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) ||
1422 !(local->hw.flags & IEEE80211_HW_QUEUE_CONTROL))
1423 info->hw_queue =
1424 sdata->vif.hw_queue[skb_get_queue_mapping(skb)];
1425
1404 if (!invoke_tx_handlers(&tx)) 1426 if (!invoke_tx_handlers(&tx))
1405 result = __ieee80211_tx(local, &tx.skbs, led_len, 1427 result = __ieee80211_tx(local, &tx.skbs, led_len,
1406 tx.sta, txpending); 1428 tx.sta, txpending);
@@ -1469,12 +1491,12 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb)
1469 1491
1470 if (ieee80211_vif_is_mesh(&sdata->vif) && 1492 if (ieee80211_vif_is_mesh(&sdata->vif) &&
1471 ieee80211_is_data(hdr->frame_control) && 1493 ieee80211_is_data(hdr->frame_control) &&
1472 !is_multicast_ether_addr(hdr->addr1)) 1494 !is_multicast_ether_addr(hdr->addr1) &&
1473 if (mesh_nexthop_resolve(skb, sdata)) { 1495 mesh_nexthop_resolve(skb, sdata)) {
1474 /* skb queued: don't free */ 1496 /* skb queued: don't free */
1475 rcu_read_unlock(); 1497 rcu_read_unlock();
1476 return; 1498 return;
1477 } 1499 }
1478 1500
1479 ieee80211_set_qos_hdr(sdata, skb); 1501 ieee80211_set_qos_hdr(sdata, skb);
1480 ieee80211_tx(sdata, skb, false); 1502 ieee80211_tx(sdata, skb, false);
@@ -1930,7 +1952,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1930 wme_sta = true; 1952 wme_sta = true;
1931 1953
1932 /* receiver and we are QoS enabled, use a QoS type frame */ 1954 /* receiver and we are QoS enabled, use a QoS type frame */
1933 if (wme_sta && local->hw.queues >= 4) { 1955 if (wme_sta && local->hw.queues >= IEEE80211_NUM_ACS) {
1934 fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 1956 fc |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1935 hdrlen += 2; 1957 hdrlen += 2;
1936 } 1958 }
@@ -2171,7 +2193,6 @@ static bool ieee80211_tx_pending_skb(struct ieee80211_local *local,
2171void ieee80211_tx_pending(unsigned long data) 2193void ieee80211_tx_pending(unsigned long data)
2172{ 2194{
2173 struct ieee80211_local *local = (struct ieee80211_local *)data; 2195 struct ieee80211_local *local = (struct ieee80211_local *)data;
2174 struct ieee80211_sub_if_data *sdata;
2175 unsigned long flags; 2196 unsigned long flags;
2176 int i; 2197 int i;
2177 bool txok; 2198 bool txok;
@@ -2208,8 +2229,7 @@ void ieee80211_tx_pending(unsigned long data)
2208 } 2229 }
2209 2230
2210 if (skb_queue_empty(&local->pending[i])) 2231 if (skb_queue_empty(&local->pending[i]))
2211 list_for_each_entry_rcu(sdata, &local->interfaces, list) 2232 ieee80211_propagate_queue_wake(local, i);
2212 netif_wake_subqueue(sdata->dev, i);
2213 } 2233 }
2214 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 2234 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
2215 2235
@@ -2375,6 +2395,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2375 IEEE80211_STYPE_BEACON); 2395 IEEE80211_STYPE_BEACON);
2376 } else if (ieee80211_vif_is_mesh(&sdata->vif)) { 2396 } else if (ieee80211_vif_is_mesh(&sdata->vif)) {
2377 struct ieee80211_mgmt *mgmt; 2397 struct ieee80211_mgmt *mgmt;
2398 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
2378 u8 *pos; 2399 u8 *pos;
2379 int hdr_len = offsetof(struct ieee80211_mgmt, u.beacon) + 2400 int hdr_len = offsetof(struct ieee80211_mgmt, u.beacon) +
2380 sizeof(mgmt->u.beacon); 2401 sizeof(mgmt->u.beacon);
@@ -2384,6 +2405,10 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2384 goto out; 2405 goto out;
2385#endif 2406#endif
2386 2407
2408 if (ifmsh->sync_ops)
2409 ifmsh->sync_ops->adjust_tbtt(
2410 sdata);
2411
2387 skb = dev_alloc_skb(local->tx_headroom + 2412 skb = dev_alloc_skb(local->tx_headroom +
2388 hdr_len + 2413 hdr_len +
2389 2 + /* NULL SSID */ 2414 2 + /* NULL SSID */
@@ -2391,7 +2416,7 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2391 2 + 3 + /* DS params */ 2416 2 + 3 + /* DS params */
2392 2 + (IEEE80211_MAX_SUPP_RATES - 8) + 2417 2 + (IEEE80211_MAX_SUPP_RATES - 8) +
2393 2 + sizeof(struct ieee80211_ht_cap) + 2418 2 + sizeof(struct ieee80211_ht_cap) +
2394 2 + sizeof(struct ieee80211_ht_info) + 2419 2 + sizeof(struct ieee80211_ht_operation) +
2395 2 + sdata->u.mesh.mesh_id_len + 2420 2 + sdata->u.mesh.mesh_id_len +
2396 2 + sizeof(struct ieee80211_meshconf_ie) + 2421 2 + sizeof(struct ieee80211_meshconf_ie) +
2397 sdata->u.mesh.ie_len); 2422 sdata->u.mesh.ie_len);
@@ -2415,12 +2440,12 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
2415 *pos++ = WLAN_EID_SSID; 2440 *pos++ = WLAN_EID_SSID;
2416 *pos++ = 0x0; 2441 *pos++ = 0x0;
2417 2442
2418 if (ieee80211_add_srates_ie(&sdata->vif, skb) || 2443 if (ieee80211_add_srates_ie(&sdata->vif, skb, true) ||
2419 mesh_add_ds_params_ie(skb, sdata) || 2444 mesh_add_ds_params_ie(skb, sdata) ||
2420 ieee80211_add_ext_srates_ie(&sdata->vif, skb) || 2445 ieee80211_add_ext_srates_ie(&sdata->vif, skb, true) ||
2421 mesh_add_rsn_ie(skb, sdata) || 2446 mesh_add_rsn_ie(skb, sdata) ||
2422 mesh_add_ht_cap_ie(skb, sdata) || 2447 mesh_add_ht_cap_ie(skb, sdata) ||
2423 mesh_add_ht_info_ie(skb, sdata) || 2448 mesh_add_ht_oper_ie(skb, sdata) ||
2424 mesh_add_meshid_ie(skb, sdata) || 2449 mesh_add_meshid_ie(skb, sdata) ||
2425 mesh_add_meshconf_ie(skb, sdata) || 2450 mesh_add_meshconf_ie(skb, sdata) ||
2426 mesh_add_vendor_ies(skb, sdata)) { 2451 mesh_add_vendor_ies(skb, sdata)) {
@@ -2604,7 +2629,7 @@ struct sk_buff *ieee80211_probereq_get(struct ieee80211_hw *hw,
2604 pos = skb_put(skb, ie_ssid_len); 2629 pos = skb_put(skb, ie_ssid_len);
2605 *pos++ = WLAN_EID_SSID; 2630 *pos++ = WLAN_EID_SSID;
2606 *pos++ = ssid_len; 2631 *pos++ = ssid_len;
2607 if (ssid) 2632 if (ssid_len)
2608 memcpy(pos, ssid, ssid_len); 2633 memcpy(pos, ssid, ssid_len);
2609 pos += ssid_len; 2634 pos += ssid_len;
2610 2635
@@ -2711,11 +2736,13 @@ EXPORT_SYMBOL(ieee80211_get_buffered_bc);
2711void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata, 2736void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
2712 struct sk_buff *skb, int tid) 2737 struct sk_buff *skb, int tid)
2713{ 2738{
2739 int ac = ieee802_1d_to_ac[tid];
2740
2714 skb_set_mac_header(skb, 0); 2741 skb_set_mac_header(skb, 0);
2715 skb_set_network_header(skb, 0); 2742 skb_set_network_header(skb, 0);
2716 skb_set_transport_header(skb, 0); 2743 skb_set_transport_header(skb, 0);
2717 2744
2718 skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]); 2745 skb_set_queue_mapping(skb, ac);
2719 skb->priority = tid; 2746 skb->priority = tid;
2720 2747
2721 /* 2748 /*
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 32f7a3b3d43c..d9a747d387f0 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -106,7 +106,7 @@ void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
106 } 106 }
107} 107}
108 108
109int ieee80211_frame_duration(struct ieee80211_local *local, size_t len, 109int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
110 int rate, int erp, int short_preamble) 110 int rate, int erp, int short_preamble)
111{ 111{
112 int dur; 112 int dur;
@@ -120,7 +120,7 @@ int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
120 * DIV_ROUND_UP() operations. 120 * DIV_ROUND_UP() operations.
121 */ 121 */
122 122
123 if (local->hw.conf.channel->band == IEEE80211_BAND_5GHZ || erp) { 123 if (band == IEEE80211_BAND_5GHZ || erp) {
124 /* 124 /*
125 * OFDM: 125 * OFDM:
126 * 126 *
@@ -162,10 +162,10 @@ int ieee80211_frame_duration(struct ieee80211_local *local, size_t len,
162/* Exported duration function for driver use */ 162/* Exported duration function for driver use */
163__le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw, 163__le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
164 struct ieee80211_vif *vif, 164 struct ieee80211_vif *vif,
165 enum ieee80211_band band,
165 size_t frame_len, 166 size_t frame_len,
166 struct ieee80211_rate *rate) 167 struct ieee80211_rate *rate)
167{ 168{
168 struct ieee80211_local *local = hw_to_local(hw);
169 struct ieee80211_sub_if_data *sdata; 169 struct ieee80211_sub_if_data *sdata;
170 u16 dur; 170 u16 dur;
171 int erp; 171 int erp;
@@ -179,7 +179,7 @@ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
179 erp = rate->flags & IEEE80211_RATE_ERP_G; 179 erp = rate->flags & IEEE80211_RATE_ERP_G;
180 } 180 }
181 181
182 dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, erp, 182 dur = ieee80211_frame_duration(band, frame_len, rate->bitrate, erp,
183 short_preamble); 183 short_preamble);
184 184
185 return cpu_to_le16(dur); 185 return cpu_to_le16(dur);
@@ -198,7 +198,7 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
198 u16 dur; 198 u16 dur;
199 struct ieee80211_supported_band *sband; 199 struct ieee80211_supported_band *sband;
200 200
201 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 201 sband = local->hw.wiphy->bands[frame_txctl->band];
202 202
203 short_preamble = false; 203 short_preamble = false;
204 204
@@ -213,13 +213,13 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
213 } 213 }
214 214
215 /* CTS duration */ 215 /* CTS duration */
216 dur = ieee80211_frame_duration(local, 10, rate->bitrate, 216 dur = ieee80211_frame_duration(sband->band, 10, rate->bitrate,
217 erp, short_preamble); 217 erp, short_preamble);
218 /* Data frame duration */ 218 /* Data frame duration */
219 dur += ieee80211_frame_duration(local, frame_len, rate->bitrate, 219 dur += ieee80211_frame_duration(sband->band, frame_len, rate->bitrate,
220 erp, short_preamble); 220 erp, short_preamble);
221 /* ACK duration */ 221 /* ACK duration */
222 dur += ieee80211_frame_duration(local, 10, rate->bitrate, 222 dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate,
223 erp, short_preamble); 223 erp, short_preamble);
224 224
225 return cpu_to_le16(dur); 225 return cpu_to_le16(dur);
@@ -239,7 +239,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
239 u16 dur; 239 u16 dur;
240 struct ieee80211_supported_band *sband; 240 struct ieee80211_supported_band *sband;
241 241
242 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 242 sband = local->hw.wiphy->bands[frame_txctl->band];
243 243
244 short_preamble = false; 244 short_preamble = false;
245 245
@@ -253,11 +253,11 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
253 } 253 }
254 254
255 /* Data frame duration */ 255 /* Data frame duration */
256 dur = ieee80211_frame_duration(local, frame_len, rate->bitrate, 256 dur = ieee80211_frame_duration(sband->band, frame_len, rate->bitrate,
257 erp, short_preamble); 257 erp, short_preamble);
258 if (!(frame_txctl->flags & IEEE80211_TX_CTL_NO_ACK)) { 258 if (!(frame_txctl->flags & IEEE80211_TX_CTL_NO_ACK)) {
259 /* ACK duration */ 259 /* ACK duration */
260 dur += ieee80211_frame_duration(local, 10, rate->bitrate, 260 dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate,
261 erp, short_preamble); 261 erp, short_preamble);
262 } 262 }
263 263
@@ -265,17 +265,45 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
265} 265}
266EXPORT_SYMBOL(ieee80211_ctstoself_duration); 266EXPORT_SYMBOL(ieee80211_ctstoself_duration);
267 267
268void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
269{
270 struct ieee80211_sub_if_data *sdata;
271
272 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
273 int ac;
274
275 if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
276 continue;
277
278 if (sdata->vif.cab_queue != IEEE80211_INVAL_HW_QUEUE &&
279 local->queue_stop_reasons[sdata->vif.cab_queue] != 0)
280 continue;
281
282 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
283 int ac_queue = sdata->vif.hw_queue[ac];
284
285 if (ac_queue == queue ||
286 (sdata->vif.cab_queue == queue &&
287 local->queue_stop_reasons[ac_queue] == 0 &&
288 skb_queue_empty(&local->pending[ac_queue])))
289 netif_wake_subqueue(sdata->dev, ac);
290 }
291 }
292}
293
268static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, 294static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
269 enum queue_stop_reason reason) 295 enum queue_stop_reason reason)
270{ 296{
271 struct ieee80211_local *local = hw_to_local(hw); 297 struct ieee80211_local *local = hw_to_local(hw);
272 struct ieee80211_sub_if_data *sdata;
273 298
274 trace_wake_queue(local, queue, reason); 299 trace_wake_queue(local, queue, reason);
275 300
276 if (WARN_ON(queue >= hw->queues)) 301 if (WARN_ON(queue >= hw->queues))
277 return; 302 return;
278 303
304 if (!test_bit(reason, &local->queue_stop_reasons[queue]))
305 return;
306
279 __clear_bit(reason, &local->queue_stop_reasons[queue]); 307 __clear_bit(reason, &local->queue_stop_reasons[queue]);
280 308
281 if (local->queue_stop_reasons[queue] != 0) 309 if (local->queue_stop_reasons[queue] != 0)
@@ -284,11 +312,7 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
284 312
285 if (skb_queue_empty(&local->pending[queue])) { 313 if (skb_queue_empty(&local->pending[queue])) {
286 rcu_read_lock(); 314 rcu_read_lock();
287 list_for_each_entry_rcu(sdata, &local->interfaces, list) { 315 ieee80211_propagate_queue_wake(local, queue);
288 if (test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
289 continue;
290 netif_wake_subqueue(sdata->dev, queue);
291 }
292 rcu_read_unlock(); 316 rcu_read_unlock();
293 } else 317 } else
294 tasklet_schedule(&local->tx_pending_tasklet); 318 tasklet_schedule(&local->tx_pending_tasklet);
@@ -323,11 +347,21 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
323 if (WARN_ON(queue >= hw->queues)) 347 if (WARN_ON(queue >= hw->queues))
324 return; 348 return;
325 349
350 if (test_bit(reason, &local->queue_stop_reasons[queue]))
351 return;
352
326 __set_bit(reason, &local->queue_stop_reasons[queue]); 353 __set_bit(reason, &local->queue_stop_reasons[queue]);
327 354
328 rcu_read_lock(); 355 rcu_read_lock();
329 list_for_each_entry_rcu(sdata, &local->interfaces, list) 356 list_for_each_entry_rcu(sdata, &local->interfaces, list) {
330 netif_stop_subqueue(sdata->dev, queue); 357 int ac;
358
359 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
360 if (sdata->vif.hw_queue[ac] == queue ||
361 sdata->vif.cab_queue == queue)
362 netif_stop_subqueue(sdata->dev, ac);
363 }
364 }
331 rcu_read_unlock(); 365 rcu_read_unlock();
332} 366}
333 367
@@ -354,8 +388,8 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local,
354{ 388{
355 struct ieee80211_hw *hw = &local->hw; 389 struct ieee80211_hw *hw = &local->hw;
356 unsigned long flags; 390 unsigned long flags;
357 int queue = skb_get_queue_mapping(skb);
358 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 391 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
392 int queue = info->hw_queue;
359 393
360 if (WARN_ON(!info->control.vif)) { 394 if (WARN_ON(!info->control.vif)) {
361 kfree_skb(skb); 395 kfree_skb(skb);
@@ -379,10 +413,6 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
379 int queue, i; 413 int queue, i;
380 414
381 spin_lock_irqsave(&local->queue_stop_reason_lock, flags); 415 spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
382 for (i = 0; i < hw->queues; i++)
383 __ieee80211_stop_queue(hw, i,
384 IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
385
386 while ((skb = skb_dequeue(skbs))) { 416 while ((skb = skb_dequeue(skbs))) {
387 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 417 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
388 418
@@ -391,7 +421,11 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
391 continue; 421 continue;
392 } 422 }
393 423
394 queue = skb_get_queue_mapping(skb); 424 queue = info->hw_queue;
425
426 __ieee80211_stop_queue(hw, queue,
427 IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
428
395 __skb_queue_tail(&local->pending[queue], skb); 429 __skb_queue_tail(&local->pending[queue], skb);
396 } 430 }
397 431
@@ -404,12 +438,6 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
404 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); 438 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
405} 439}
406 440
407void ieee80211_add_pending_skbs(struct ieee80211_local *local,
408 struct sk_buff_head *skbs)
409{
410 ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL);
411}
412
413void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw, 441void ieee80211_stop_queues_by_reason(struct ieee80211_hw *hw,
414 enum queue_stop_reason reason) 442 enum queue_stop_reason reason)
415{ 443{
@@ -684,9 +712,9 @@ u32 ieee802_11_parse_elems_crc(u8 *start, size_t len,
684 else 712 else
685 elem_parse_failed = true; 713 elem_parse_failed = true;
686 break; 714 break;
687 case WLAN_EID_HT_INFORMATION: 715 case WLAN_EID_HT_OPERATION:
688 if (elen >= sizeof(struct ieee80211_ht_info)) 716 if (elen >= sizeof(struct ieee80211_ht_operation))
689 elems->ht_info_elem = (void *)pos; 717 elems->ht_operation = (void *)pos;
690 else 718 else
691 elem_parse_failed = true; 719 elem_parse_failed = true;
692 break; 720 break;
@@ -775,19 +803,22 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
775{ 803{
776 struct ieee80211_local *local = sdata->local; 804 struct ieee80211_local *local = sdata->local;
777 struct ieee80211_tx_queue_params qparam; 805 struct ieee80211_tx_queue_params qparam;
778 int queue; 806 int ac;
779 bool use_11b; 807 bool use_11b;
780 int aCWmin, aCWmax; 808 int aCWmin, aCWmax;
781 809
782 if (!local->ops->conf_tx) 810 if (!local->ops->conf_tx)
783 return; 811 return;
784 812
813 if (local->hw.queues < IEEE80211_NUM_ACS)
814 return;
815
785 memset(&qparam, 0, sizeof(qparam)); 816 memset(&qparam, 0, sizeof(qparam));
786 817
787 use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) && 818 use_11b = (local->hw.conf.channel->band == IEEE80211_BAND_2GHZ) &&
788 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE); 819 !(sdata->flags & IEEE80211_SDATA_OPERATING_GMODE);
789 820
790 for (queue = 0; queue < local->hw.queues; queue++) { 821 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
791 /* Set defaults according to 802.11-2007 Table 7-37 */ 822 /* Set defaults according to 802.11-2007 Table 7-37 */
792 aCWmax = 1023; 823 aCWmax = 1023;
793 if (use_11b) 824 if (use_11b)
@@ -795,21 +826,21 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
795 else 826 else
796 aCWmin = 15; 827 aCWmin = 15;
797 828
798 switch (queue) { 829 switch (ac) {
799 case 3: /* AC_BK */ 830 case IEEE80211_AC_BK:
800 qparam.cw_max = aCWmax; 831 qparam.cw_max = aCWmax;
801 qparam.cw_min = aCWmin; 832 qparam.cw_min = aCWmin;
802 qparam.txop = 0; 833 qparam.txop = 0;
803 qparam.aifs = 7; 834 qparam.aifs = 7;
804 break; 835 break;
805 default: /* never happens but let's not leave undefined */ 836 default: /* never happens but let's not leave undefined */
806 case 2: /* AC_BE */ 837 case IEEE80211_AC_BE:
807 qparam.cw_max = aCWmax; 838 qparam.cw_max = aCWmax;
808 qparam.cw_min = aCWmin; 839 qparam.cw_min = aCWmin;
809 qparam.txop = 0; 840 qparam.txop = 0;
810 qparam.aifs = 3; 841 qparam.aifs = 3;
811 break; 842 break;
812 case 1: /* AC_VI */ 843 case IEEE80211_AC_VI:
813 qparam.cw_max = aCWmin; 844 qparam.cw_max = aCWmin;
814 qparam.cw_min = (aCWmin + 1) / 2 - 1; 845 qparam.cw_min = (aCWmin + 1) / 2 - 1;
815 if (use_11b) 846 if (use_11b)
@@ -818,7 +849,7 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
818 qparam.txop = 3008/32; 849 qparam.txop = 3008/32;
819 qparam.aifs = 2; 850 qparam.aifs = 2;
820 break; 851 break;
821 case 0: /* AC_VO */ 852 case IEEE80211_AC_VO:
822 qparam.cw_max = (aCWmin + 1) / 2 - 1; 853 qparam.cw_max = (aCWmin + 1) / 2 - 1;
823 qparam.cw_min = (aCWmin + 1) / 4 - 1; 854 qparam.cw_min = (aCWmin + 1) / 4 - 1;
824 if (use_11b) 855 if (use_11b)
@@ -831,8 +862,8 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
831 862
832 qparam.uapsd = false; 863 qparam.uapsd = false;
833 864
834 sdata->tx_conf[queue] = qparam; 865 sdata->tx_conf[ac] = qparam;
835 drv_conf_tx(local, sdata, queue, &qparam); 866 drv_conf_tx(local, sdata, ac, &qparam);
836 } 867 }
837 868
838 /* after reinitialize QoS TX queues setting to default, 869 /* after reinitialize QoS TX queues setting to default,
@@ -878,10 +909,8 @@ u32 ieee80211_mandatory_rates(struct ieee80211_local *local,
878 int i; 909 int i;
879 910
880 sband = local->hw.wiphy->bands[band]; 911 sband = local->hw.wiphy->bands[band];
881 if (!sband) { 912 if (WARN_ON(!sband))
882 WARN_ON(1); 913 return 1;
883 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
884 }
885 914
886 if (band == IEEE80211_BAND_2GHZ) 915 if (band == IEEE80211_BAND_2GHZ)
887 mandatory_flag = IEEE80211_RATE_MANDATORY_B; 916 mandatory_flag = IEEE80211_RATE_MANDATORY_B;
@@ -1106,7 +1135,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
1106 1135
1107u32 ieee80211_sta_get_rates(struct ieee80211_local *local, 1136u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
1108 struct ieee802_11_elems *elems, 1137 struct ieee802_11_elems *elems,
1109 enum ieee80211_band band) 1138 enum ieee80211_band band, u32 *basic_rates)
1110{ 1139{
1111 struct ieee80211_supported_band *sband; 1140 struct ieee80211_supported_band *sband;
1112 struct ieee80211_rate *bitrates; 1141 struct ieee80211_rate *bitrates;
@@ -1115,10 +1144,8 @@ u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
1115 int i, j; 1144 int i, j;
1116 sband = local->hw.wiphy->bands[band]; 1145 sband = local->hw.wiphy->bands[band];
1117 1146
1118 if (!sband) { 1147 if (WARN_ON(!sband))
1119 WARN_ON(1); 1148 return 1;
1120 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1121 }
1122 1149
1123 bitrates = sband->bitrates; 1150 bitrates = sband->bitrates;
1124 num_rates = sband->n_bitrates; 1151 num_rates = sband->n_bitrates;
@@ -1127,15 +1154,25 @@ u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
1127 elems->ext_supp_rates_len; i++) { 1154 elems->ext_supp_rates_len; i++) {
1128 u8 rate = 0; 1155 u8 rate = 0;
1129 int own_rate; 1156 int own_rate;
1157 bool is_basic;
1130 if (i < elems->supp_rates_len) 1158 if (i < elems->supp_rates_len)
1131 rate = elems->supp_rates[i]; 1159 rate = elems->supp_rates[i];
1132 else if (elems->ext_supp_rates) 1160 else if (elems->ext_supp_rates)
1133 rate = elems->ext_supp_rates 1161 rate = elems->ext_supp_rates
1134 [i - elems->supp_rates_len]; 1162 [i - elems->supp_rates_len];
1135 own_rate = 5 * (rate & 0x7f); 1163 own_rate = 5 * (rate & 0x7f);
1136 for (j = 0; j < num_rates; j++) 1164 is_basic = !!(rate & 0x80);
1137 if (bitrates[j].bitrate == own_rate) 1165
1166 if (is_basic && (rate & 0x7f) == BSS_MEMBERSHIP_SELECTOR_HT_PHY)
1167 continue;
1168
1169 for (j = 0; j < num_rates; j++) {
1170 if (bitrates[j].bitrate == own_rate) {
1138 supp_rates |= BIT(j); 1171 supp_rates |= BIT(j);
1172 if (basic_rates && is_basic)
1173 *basic_rates |= BIT(j);
1174 }
1175 }
1139 } 1176 }
1140 return supp_rates; 1177 return supp_rates;
1141} 1178}
@@ -1210,6 +1247,16 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1210 IEEE80211_TPT_LEDTRIG_FL_RADIO, 0); 1247 IEEE80211_TPT_LEDTRIG_FL_RADIO, 0);
1211 1248
1212 /* add interfaces */ 1249 /* add interfaces */
1250 sdata = rtnl_dereference(local->monitor_sdata);
1251 if (sdata) {
1252 res = drv_add_interface(local, sdata);
1253 if (WARN_ON(res)) {
1254 rcu_assign_pointer(local->monitor_sdata, NULL);
1255 synchronize_net();
1256 kfree(sdata);
1257 }
1258 }
1259
1213 list_for_each_entry(sdata, &local->interfaces, list) { 1260 list_for_each_entry(sdata, &local->interfaces, list) {
1214 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN && 1261 if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
1215 sdata->vif.type != NL80211_IFTYPE_MONITOR && 1262 sdata->vif.type != NL80211_IFTYPE_MONITOR &&
@@ -1232,14 +1279,17 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1232 mutex_unlock(&local->sta_mtx); 1279 mutex_unlock(&local->sta_mtx);
1233 1280
1234 /* reconfigure tx conf */ 1281 /* reconfigure tx conf */
1235 list_for_each_entry(sdata, &local->interfaces, list) { 1282 if (hw->queues >= IEEE80211_NUM_ACS) {
1236 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN || 1283 list_for_each_entry(sdata, &local->interfaces, list) {
1237 sdata->vif.type == NL80211_IFTYPE_MONITOR || 1284 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
1238 !ieee80211_sdata_running(sdata)) 1285 sdata->vif.type == NL80211_IFTYPE_MONITOR ||
1239 continue; 1286 !ieee80211_sdata_running(sdata))
1287 continue;
1240 1288
1241 for (i = 0; i < hw->queues; i++) 1289 for (i = 0; i < IEEE80211_NUM_ACS; i++)
1242 drv_conf_tx(local, sdata, i, &sdata->tx_conf[i]); 1290 drv_conf_tx(local, sdata, i,
1291 &sdata->tx_conf[i]);
1292 }
1243 } 1293 }
1244 1294
1245 /* reconfigure hardware */ 1295 /* reconfigure hardware */
@@ -1611,57 +1661,58 @@ u8 *ieee80211_ie_build_ht_cap(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
1611 return pos; 1661 return pos;
1612} 1662}
1613 1663
1614u8 *ieee80211_ie_build_ht_info(u8 *pos, 1664u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
1615 struct ieee80211_sta_ht_cap *ht_cap,
1616 struct ieee80211_channel *channel, 1665 struct ieee80211_channel *channel,
1617 enum nl80211_channel_type channel_type) 1666 enum nl80211_channel_type channel_type)
1618{ 1667{
1619 struct ieee80211_ht_info *ht_info; 1668 struct ieee80211_ht_operation *ht_oper;
1620 /* Build HT Information */ 1669 /* Build HT Information */
1621 *pos++ = WLAN_EID_HT_INFORMATION; 1670 *pos++ = WLAN_EID_HT_OPERATION;
1622 *pos++ = sizeof(struct ieee80211_ht_info); 1671 *pos++ = sizeof(struct ieee80211_ht_operation);
1623 ht_info = (struct ieee80211_ht_info *)pos; 1672 ht_oper = (struct ieee80211_ht_operation *)pos;
1624 ht_info->control_chan = 1673 ht_oper->primary_chan =
1625 ieee80211_frequency_to_channel(channel->center_freq); 1674 ieee80211_frequency_to_channel(channel->center_freq);
1626 switch (channel_type) { 1675 switch (channel_type) {
1627 case NL80211_CHAN_HT40MINUS: 1676 case NL80211_CHAN_HT40MINUS:
1628 ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW; 1677 ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
1629 break; 1678 break;
1630 case NL80211_CHAN_HT40PLUS: 1679 case NL80211_CHAN_HT40PLUS:
1631 ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE; 1680 ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
1632 break; 1681 break;
1633 case NL80211_CHAN_HT20: 1682 case NL80211_CHAN_HT20:
1634 default: 1683 default:
1635 ht_info->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE; 1684 ht_oper->ht_param = IEEE80211_HT_PARAM_CHA_SEC_NONE;
1636 break; 1685 break;
1637 } 1686 }
1638 if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) 1687 if (ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
1639 ht_info->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY; 1688 channel_type != NL80211_CHAN_NO_HT &&
1689 channel_type != NL80211_CHAN_HT20)
1690 ht_oper->ht_param |= IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
1640 1691
1641 /* 1692 /*
1642 * Note: According to 802.11n-2009 9.13.3.1, HT Protection field and 1693 * Note: According to 802.11n-2009 9.13.3.1, HT Protection field and
1643 * RIFS Mode are reserved in IBSS mode, therefore keep them at 0 1694 * RIFS Mode are reserved in IBSS mode, therefore keep them at 0
1644 */ 1695 */
1645 ht_info->operation_mode = 0x0000; 1696 ht_oper->operation_mode = 0x0000;
1646 ht_info->stbc_param = 0x0000; 1697 ht_oper->stbc_param = 0x0000;
1647 1698
1648 /* It seems that Basic MCS set and Supported MCS set 1699 /* It seems that Basic MCS set and Supported MCS set
1649 are identical for the first 10 bytes */ 1700 are identical for the first 10 bytes */
1650 memset(&ht_info->basic_set, 0, 16); 1701 memset(&ht_oper->basic_set, 0, 16);
1651 memcpy(&ht_info->basic_set, &ht_cap->mcs, 10); 1702 memcpy(&ht_oper->basic_set, &ht_cap->mcs, 10);
1652 1703
1653 return pos + sizeof(struct ieee80211_ht_info); 1704 return pos + sizeof(struct ieee80211_ht_operation);
1654} 1705}
1655 1706
1656enum nl80211_channel_type 1707enum nl80211_channel_type
1657ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info) 1708ieee80211_ht_oper_to_channel_type(struct ieee80211_ht_operation *ht_oper)
1658{ 1709{
1659 enum nl80211_channel_type channel_type; 1710 enum nl80211_channel_type channel_type;
1660 1711
1661 if (!ht_info) 1712 if (!ht_oper)
1662 return NL80211_CHAN_NO_HT; 1713 return NL80211_CHAN_NO_HT;
1663 1714
1664 switch (ht_info->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) { 1715 switch (ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
1665 case IEEE80211_HT_PARAM_CHA_SEC_NONE: 1716 case IEEE80211_HT_PARAM_CHA_SEC_NONE:
1666 channel_type = NL80211_CHAN_HT20; 1717 channel_type = NL80211_CHAN_HT20;
1667 break; 1718 break;
@@ -1678,13 +1729,15 @@ ieee80211_ht_info_to_channel_type(struct ieee80211_ht_info *ht_info)
1678 return channel_type; 1729 return channel_type;
1679} 1730}
1680 1731
1681int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb) 1732int ieee80211_add_srates_ie(struct ieee80211_vif *vif,
1733 struct sk_buff *skb, bool need_basic)
1682{ 1734{
1683 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 1735 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
1684 struct ieee80211_local *local = sdata->local; 1736 struct ieee80211_local *local = sdata->local;
1685 struct ieee80211_supported_band *sband; 1737 struct ieee80211_supported_band *sband;
1686 int rate; 1738 int rate;
1687 u8 i, rates, *pos; 1739 u8 i, rates, *pos;
1740 u32 basic_rates = vif->bss_conf.basic_rates;
1688 1741
1689 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1742 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1690 rates = sband->n_bitrates; 1743 rates = sband->n_bitrates;
@@ -1698,20 +1751,25 @@ int ieee80211_add_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
1698 *pos++ = WLAN_EID_SUPP_RATES; 1751 *pos++ = WLAN_EID_SUPP_RATES;
1699 *pos++ = rates; 1752 *pos++ = rates;
1700 for (i = 0; i < rates; i++) { 1753 for (i = 0; i < rates; i++) {
1754 u8 basic = 0;
1755 if (need_basic && basic_rates & BIT(i))
1756 basic = 0x80;
1701 rate = sband->bitrates[i].bitrate; 1757 rate = sband->bitrates[i].bitrate;
1702 *pos++ = (u8) (rate / 5); 1758 *pos++ = basic | (u8) (rate / 5);
1703 } 1759 }
1704 1760
1705 return 0; 1761 return 0;
1706} 1762}
1707 1763
1708int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb) 1764int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif,
1765 struct sk_buff *skb, bool need_basic)
1709{ 1766{
1710 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 1767 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
1711 struct ieee80211_local *local = sdata->local; 1768 struct ieee80211_local *local = sdata->local;
1712 struct ieee80211_supported_band *sband; 1769 struct ieee80211_supported_band *sband;
1713 int rate; 1770 int rate;
1714 u8 i, exrates, *pos; 1771 u8 i, exrates, *pos;
1772 u32 basic_rates = vif->bss_conf.basic_rates;
1715 1773
1716 sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; 1774 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1717 exrates = sband->n_bitrates; 1775 exrates = sband->n_bitrates;
@@ -1728,9 +1786,25 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_vif *vif, struct sk_buff *skb)
1728 *pos++ = WLAN_EID_EXT_SUPP_RATES; 1786 *pos++ = WLAN_EID_EXT_SUPP_RATES;
1729 *pos++ = exrates; 1787 *pos++ = exrates;
1730 for (i = 8; i < sband->n_bitrates; i++) { 1788 for (i = 8; i < sband->n_bitrates; i++) {
1789 u8 basic = 0;
1790 if (need_basic && basic_rates & BIT(i))
1791 basic = 0x80;
1731 rate = sband->bitrates[i].bitrate; 1792 rate = sband->bitrates[i].bitrate;
1732 *pos++ = (u8) (rate / 5); 1793 *pos++ = basic | (u8) (rate / 5);
1733 } 1794 }
1734 } 1795 }
1735 return 0; 1796 return 0;
1736} 1797}
1798
1799int ieee80211_ave_rssi(struct ieee80211_vif *vif)
1800{
1801 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
1802 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1803
1804 if (WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_STATION)) {
1805 /* non-managed type inferfaces */
1806 return 0;
1807 }
1808 return ifmgd->ave_beacon_signal;
1809}
1810EXPORT_SYMBOL_GPL(ieee80211_ave_rssi);
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 89511be3111e..c3d643a6536c 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -52,6 +52,26 @@ static int wme_downgrade_ac(struct sk_buff *skb)
52 } 52 }
53} 53}
54 54
55static u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
56 struct sk_buff *skb)
57{
58 /* in case we are a client verify acm is not set for this ac */
59 while (unlikely(local->wmm_acm & BIT(skb->priority))) {
60 if (wme_downgrade_ac(skb)) {
61 /*
62 * This should not really happen. The AP has marked all
63 * lower ACs to require admission control which is not
64 * a reasonable configuration. Allow the frame to be
65 * transmitted using AC_BK as a workaround.
66 */
67 break;
68 }
69 }
70
71 /* look up which queue to use for frames with this 1d tag */
72 return ieee802_1d_to_ac[skb->priority];
73}
74
55/* Indicate which queue to use for this fully formed 802.11 frame */ 75/* Indicate which queue to use for this fully formed 802.11 frame */
56u16 ieee80211_select_queue_80211(struct ieee80211_local *local, 76u16 ieee80211_select_queue_80211(struct ieee80211_local *local,
57 struct sk_buff *skb, 77 struct sk_buff *skb,
@@ -59,7 +79,7 @@ u16 ieee80211_select_queue_80211(struct ieee80211_local *local,
59{ 79{
60 u8 *p; 80 u8 *p;
61 81
62 if (local->hw.queues < 4) 82 if (local->hw.queues < IEEE80211_NUM_ACS)
63 return 0; 83 return 0;
64 84
65 if (!ieee80211_is_data(hdr->frame_control)) { 85 if (!ieee80211_is_data(hdr->frame_control)) {
@@ -86,9 +106,9 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
86 const u8 *ra = NULL; 106 const u8 *ra = NULL;
87 bool qos = false; 107 bool qos = false;
88 108
89 if (local->hw.queues < 4 || skb->len < 6) { 109 if (local->hw.queues < IEEE80211_NUM_ACS || skb->len < 6) {
90 skb->priority = 0; /* required for correct WPA/11i MIC */ 110 skb->priority = 0; /* required for correct WPA/11i MIC */
91 return min_t(u16, local->hw.queues - 1, IEEE80211_AC_BE); 111 return 0;
92 } 112 }
93 113
94 rcu_read_lock(); 114 rcu_read_lock();
@@ -139,26 +159,6 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
139 return ieee80211_downgrade_queue(local, skb); 159 return ieee80211_downgrade_queue(local, skb);
140} 160}
141 161
142u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
143 struct sk_buff *skb)
144{
145 /* in case we are a client verify acm is not set for this ac */
146 while (unlikely(local->wmm_acm & BIT(skb->priority))) {
147 if (wme_downgrade_ac(skb)) {
148 /*
149 * This should not really happen. The AP has marked all
150 * lower ACs to require admission control which is not
151 * a reasonable configuration. Allow the frame to be
152 * transmitted using AC_BK as a workaround.
153 */
154 break;
155 }
156 }
157
158 /* look up which queue to use for frames with this 1d tag */
159 return ieee802_1d_to_ac[skb->priority];
160}
161
162void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata, 162void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
163 struct sk_buff *skb) 163 struct sk_buff *skb)
164{ 164{
diff --git a/net/mac80211/wme.h b/net/mac80211/wme.h
index 94edceb617ff..ca80818b7b66 100644
--- a/net/mac80211/wme.h
+++ b/net/mac80211/wme.h
@@ -22,8 +22,5 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
22 struct sk_buff *skb); 22 struct sk_buff *skb);
23void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata, 23void ieee80211_set_qos_hdr(struct ieee80211_sub_if_data *sdata,
24 struct sk_buff *skb); 24 struct sk_buff *skb);
25u16 ieee80211_downgrade_queue(struct ieee80211_local *local,
26 struct sk_buff *skb);
27
28 25
29#endif /* _WME_H */ 26#endif /* _WME_H */
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index c6e230efa049..b2650a9d45ff 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -122,9 +122,6 @@ static void ieee80211_work_work(struct work_struct *work)
122 enum work_action rma; 122 enum work_action rma;
123 bool remain_off_channel = false; 123 bool remain_off_channel = false;
124 124
125 if (local->scanning)
126 return;
127
128 /* 125 /*
129 * ieee80211_queue_work() should have picked up most cases, 126 * ieee80211_queue_work() should have picked up most cases,
130 * here we'll pick the rest. 127 * here we'll pick the rest.
@@ -134,6 +131,11 @@ static void ieee80211_work_work(struct work_struct *work)
134 131
135 mutex_lock(&local->mtx); 132 mutex_lock(&local->mtx);
136 133
134 if (local->scanning) {
135 mutex_unlock(&local->mtx);
136 return;
137 }
138
137 ieee80211_recalc_idle(local); 139 ieee80211_recalc_idle(local);
138 140
139 list_for_each_entry_safe(wk, tmp, &local->work_list, list) { 141 list_for_each_entry_safe(wk, tmp, &local->work_list, list) {
@@ -226,13 +228,8 @@ static void ieee80211_work_work(struct work_struct *work)
226 run_again(local, jiffies + HZ/2); 228 run_again(local, jiffies + HZ/2);
227 } 229 }
228 230
229 if (list_empty(&local->work_list) && local->scan_req &&
230 !local->scanning)
231 ieee80211_queue_delayed_work(&local->hw,
232 &local->scan_work,
233 round_jiffies_relative(0));
234
235 ieee80211_recalc_idle(local); 231 ieee80211_recalc_idle(local);
232 ieee80211_run_deferred_scan(local);
236 233
237 mutex_unlock(&local->mtx); 234 mutex_unlock(&local->mtx);
238 235
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index e1b7e051332e..e19f3653db23 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -290,12 +290,3 @@ void __init netfilter_init(void)
290 if (netfilter_log_init() < 0) 290 if (netfilter_log_init() < 0)
291 panic("cannot initialize nf_log"); 291 panic("cannot initialize nf_log");
292} 292}
293
294#ifdef CONFIG_SYSCTL
295struct ctl_path nf_net_netfilter_sysctl_path[] = {
296 { .procname = "net", },
297 { .procname = "netfilter", },
298 { }
299};
300EXPORT_SYMBOL_GPL(nf_net_netfilter_sysctl_path);
301#endif /* CONFIG_SYSCTL */
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index a72a4dff0031..7e1b061aeeba 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -109,8 +109,9 @@ bitmap_ip_list(const struct ip_set *set,
109 } else 109 } else
110 goto nla_put_failure; 110 goto nla_put_failure;
111 } 111 }
112 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, 112 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
113 htonl(map->first_ip + id * map->hosts)); 113 htonl(map->first_ip + id * map->hosts)))
114 goto nla_put_failure;
114 ipset_nest_end(skb, nested); 115 ipset_nest_end(skb, nested);
115 } 116 }
116 ipset_nest_end(skb, atd); 117 ipset_nest_end(skb, atd);
@@ -194,10 +195,11 @@ bitmap_ip_tlist(const struct ip_set *set,
194 } else 195 } else
195 goto nla_put_failure; 196 goto nla_put_failure;
196 } 197 }
197 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, 198 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
198 htonl(map->first_ip + id * map->hosts)); 199 htonl(map->first_ip + id * map->hosts)) ||
199 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 200 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
200 htonl(ip_set_timeout_get(members[id]))); 201 htonl(ip_set_timeout_get(members[id]))))
202 goto nla_put_failure;
201 ipset_nest_end(skb, nested); 203 ipset_nest_end(skb, nested);
202 } 204 }
203 ipset_nest_end(skb, adt); 205 ipset_nest_end(skb, adt);
@@ -334,15 +336,16 @@ bitmap_ip_head(struct ip_set *set, struct sk_buff *skb)
334 nested = ipset_nest_start(skb, IPSET_ATTR_DATA); 336 nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
335 if (!nested) 337 if (!nested)
336 goto nla_put_failure; 338 goto nla_put_failure;
337 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip)); 339 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) ||
338 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); 340 nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)) ||
339 if (map->netmask != 32) 341 (map->netmask != 32 &&
340 NLA_PUT_U8(skb, IPSET_ATTR_NETMASK, map->netmask); 342 nla_put_u8(skb, IPSET_ATTR_NETMASK, map->netmask)) ||
341 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); 343 nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
342 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, 344 nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
343 htonl(sizeof(*map) + map->memsize)); 345 htonl(sizeof(*map) + map->memsize)) ||
344 if (with_timeout(map->timeout)) 346 (with_timeout(map->timeout) &&
345 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); 347 nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))))
348 goto nla_put_failure;
346 ipset_nest_end(skb, nested); 349 ipset_nest_end(skb, nested);
347 350
348 return 0; 351 return 0;
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 81324c12c5be..0bb16c469a89 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -186,11 +186,12 @@ bitmap_ipmac_list(const struct ip_set *set,
186 } else 186 } else
187 goto nla_put_failure; 187 goto nla_put_failure;
188 } 188 }
189 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, 189 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
190 htonl(map->first_ip + id)); 190 htonl(map->first_ip + id)) ||
191 if (elem->match == MAC_FILLED) 191 (elem->match == MAC_FILLED &&
192 NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN, 192 nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN,
193 elem->ether); 193 elem->ether)))
194 goto nla_put_failure;
194 ipset_nest_end(skb, nested); 195 ipset_nest_end(skb, nested);
195 } 196 }
196 ipset_nest_end(skb, atd); 197 ipset_nest_end(skb, atd);
@@ -314,14 +315,16 @@ bitmap_ipmac_tlist(const struct ip_set *set,
314 } else 315 } else
315 goto nla_put_failure; 316 goto nla_put_failure;
316 } 317 }
317 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, 318 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP,
318 htonl(map->first_ip + id)); 319 htonl(map->first_ip + id)) ||
319 if (elem->match == MAC_FILLED) 320 (elem->match == MAC_FILLED &&
320 NLA_PUT(skb, IPSET_ATTR_ETHER, ETH_ALEN, 321 nla_put(skb, IPSET_ATTR_ETHER, ETH_ALEN,
321 elem->ether); 322 elem->ether)))
323 goto nla_put_failure;
322 timeout = elem->match == MAC_UNSET ? elem->timeout 324 timeout = elem->match == MAC_UNSET ? elem->timeout
323 : ip_set_timeout_get(elem->timeout); 325 : ip_set_timeout_get(elem->timeout);
324 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout)); 326 if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(timeout)))
327 goto nla_put_failure;
325 ipset_nest_end(skb, nested); 328 ipset_nest_end(skb, nested);
326 } 329 }
327 ipset_nest_end(skb, atd); 330 ipset_nest_end(skb, atd);
@@ -438,14 +441,16 @@ bitmap_ipmac_head(struct ip_set *set, struct sk_buff *skb)
438 nested = ipset_nest_start(skb, IPSET_ATTR_DATA); 441 nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
439 if (!nested) 442 if (!nested)
440 goto nla_put_failure; 443 goto nla_put_failure;
441 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, htonl(map->first_ip)); 444 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, htonl(map->first_ip)) ||
442 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)); 445 nla_put_ipaddr4(skb, IPSET_ATTR_IP_TO, htonl(map->last_ip)) ||
443 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); 446 nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
444 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, 447 nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
445 htonl(sizeof(*map) 448 htonl(sizeof(*map) +
446 + (map->last_ip - map->first_ip + 1) * map->dsize)); 449 ((map->last_ip - map->first_ip + 1) *
447 if (with_timeout(map->timeout)) 450 map->dsize))) ||
448 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); 451 (with_timeout(map->timeout) &&
452 nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))))
453 goto nla_put_failure;
449 ipset_nest_end(skb, nested); 454 ipset_nest_end(skb, nested);
450 455
451 return 0; 456 return 0;
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 382ec28ba72e..b9f1fce7053b 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -96,8 +96,9 @@ bitmap_port_list(const struct ip_set *set,
96 } else 96 } else
97 goto nla_put_failure; 97 goto nla_put_failure;
98 } 98 }
99 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, 99 if (nla_put_net16(skb, IPSET_ATTR_PORT,
100 htons(map->first_port + id)); 100 htons(map->first_port + id)))
101 goto nla_put_failure;
101 ipset_nest_end(skb, nested); 102 ipset_nest_end(skb, nested);
102 } 103 }
103 ipset_nest_end(skb, atd); 104 ipset_nest_end(skb, atd);
@@ -183,10 +184,11 @@ bitmap_port_tlist(const struct ip_set *set,
183 } else 184 } else
184 goto nla_put_failure; 185 goto nla_put_failure;
185 } 186 }
186 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, 187 if (nla_put_net16(skb, IPSET_ATTR_PORT,
187 htons(map->first_port + id)); 188 htons(map->first_port + id)) ||
188 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 189 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
189 htonl(ip_set_timeout_get(members[id]))); 190 htonl(ip_set_timeout_get(members[id]))))
191 goto nla_put_failure;
190 ipset_nest_end(skb, nested); 192 ipset_nest_end(skb, nested);
191 } 193 }
192 ipset_nest_end(skb, adt); 194 ipset_nest_end(skb, adt);
@@ -320,13 +322,14 @@ bitmap_port_head(struct ip_set *set, struct sk_buff *skb)
320 nested = ipset_nest_start(skb, IPSET_ATTR_DATA); 322 nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
321 if (!nested) 323 if (!nested)
322 goto nla_put_failure; 324 goto nla_put_failure;
323 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, htons(map->first_port)); 325 if (nla_put_net16(skb, IPSET_ATTR_PORT, htons(map->first_port)) ||
324 NLA_PUT_NET16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port)); 326 nla_put_net16(skb, IPSET_ATTR_PORT_TO, htons(map->last_port)) ||
325 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); 327 nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
326 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, 328 nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
327 htonl(sizeof(*map) + map->memsize)); 329 htonl(sizeof(*map) + map->memsize)) ||
328 if (with_timeout(map->timeout)) 330 (with_timeout(map->timeout) &&
329 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); 331 nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))))
332 goto nla_put_failure;
330 ipset_nest_end(skb, nested); 333 ipset_nest_end(skb, nested);
331 334
332 return 0; 335 return 0;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index e6c1c9605a58..819c342f5b30 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1092,19 +1092,21 @@ dump_last:
1092 ret = -EMSGSIZE; 1092 ret = -EMSGSIZE;
1093 goto release_refcount; 1093 goto release_refcount;
1094 } 1094 }
1095 NLA_PUT_U8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL); 1095 if (nla_put_u8(skb, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) ||
1096 NLA_PUT_STRING(skb, IPSET_ATTR_SETNAME, set->name); 1096 nla_put_string(skb, IPSET_ATTR_SETNAME, set->name))
1097 goto nla_put_failure;
1097 if (dump_flags & IPSET_FLAG_LIST_SETNAME) 1098 if (dump_flags & IPSET_FLAG_LIST_SETNAME)
1098 goto next_set; 1099 goto next_set;
1099 switch (cb->args[2]) { 1100 switch (cb->args[2]) {
1100 case 0: 1101 case 0:
1101 /* Core header data */ 1102 /* Core header data */
1102 NLA_PUT_STRING(skb, IPSET_ATTR_TYPENAME, 1103 if (nla_put_string(skb, IPSET_ATTR_TYPENAME,
1103 set->type->name); 1104 set->type->name) ||
1104 NLA_PUT_U8(skb, IPSET_ATTR_FAMILY, 1105 nla_put_u8(skb, IPSET_ATTR_FAMILY,
1105 set->family); 1106 set->family) ||
1106 NLA_PUT_U8(skb, IPSET_ATTR_REVISION, 1107 nla_put_u8(skb, IPSET_ATTR_REVISION,
1107 set->revision); 1108 set->revision))
1109 goto nla_put_failure;
1108 ret = set->variant->head(set, skb); 1110 ret = set->variant->head(set, skb);
1109 if (ret < 0) 1111 if (ret < 0)
1110 goto release_refcount; 1112 goto release_refcount;
@@ -1410,11 +1412,12 @@ ip_set_header(struct sock *ctnl, struct sk_buff *skb,
1410 IPSET_CMD_HEADER); 1412 IPSET_CMD_HEADER);
1411 if (!nlh2) 1413 if (!nlh2)
1412 goto nlmsg_failure; 1414 goto nlmsg_failure;
1413 NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL); 1415 if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) ||
1414 NLA_PUT_STRING(skb2, IPSET_ATTR_SETNAME, set->name); 1416 nla_put_string(skb2, IPSET_ATTR_SETNAME, set->name) ||
1415 NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, set->type->name); 1417 nla_put_string(skb2, IPSET_ATTR_TYPENAME, set->type->name) ||
1416 NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, set->family); 1418 nla_put_u8(skb2, IPSET_ATTR_FAMILY, set->family) ||
1417 NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, set->revision); 1419 nla_put_u8(skb2, IPSET_ATTR_REVISION, set->revision))
1420 goto nla_put_failure;
1418 nlmsg_end(skb2, nlh2); 1421 nlmsg_end(skb2, nlh2);
1419 1422
1420 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); 1423 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
@@ -1469,11 +1472,12 @@ ip_set_type(struct sock *ctnl, struct sk_buff *skb,
1469 IPSET_CMD_TYPE); 1472 IPSET_CMD_TYPE);
1470 if (!nlh2) 1473 if (!nlh2)
1471 goto nlmsg_failure; 1474 goto nlmsg_failure;
1472 NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL); 1475 if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL) ||
1473 NLA_PUT_STRING(skb2, IPSET_ATTR_TYPENAME, typename); 1476 nla_put_string(skb2, IPSET_ATTR_TYPENAME, typename) ||
1474 NLA_PUT_U8(skb2, IPSET_ATTR_FAMILY, family); 1477 nla_put_u8(skb2, IPSET_ATTR_FAMILY, family) ||
1475 NLA_PUT_U8(skb2, IPSET_ATTR_REVISION, max); 1478 nla_put_u8(skb2, IPSET_ATTR_REVISION, max) ||
1476 NLA_PUT_U8(skb2, IPSET_ATTR_REVISION_MIN, min); 1479 nla_put_u8(skb2, IPSET_ATTR_REVISION_MIN, min))
1480 goto nla_put_failure;
1477 nlmsg_end(skb2, nlh2); 1481 nlmsg_end(skb2, nlh2);
1478 1482
1479 pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len); 1483 pr_debug("Send TYPE, nlmsg_len: %u\n", nlh2->nlmsg_len);
@@ -1517,7 +1521,8 @@ ip_set_protocol(struct sock *ctnl, struct sk_buff *skb,
1517 IPSET_CMD_PROTOCOL); 1521 IPSET_CMD_PROTOCOL);
1518 if (!nlh2) 1522 if (!nlh2)
1519 goto nlmsg_failure; 1523 goto nlmsg_failure;
1520 NLA_PUT_U8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL); 1524 if (nla_put_u8(skb2, IPSET_ATTR_PROTOCOL, IPSET_PROTOCOL))
1525 goto nla_put_failure;
1521 nlmsg_end(skb2, nlh2); 1526 nlmsg_end(skb2, nlh2);
1522 1527
1523 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT); 1528 ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
@@ -1613,7 +1618,7 @@ static struct nfnetlink_subsystem ip_set_netlink_subsys __read_mostly = {
1613static int 1618static int
1614ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len) 1619ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
1615{ 1620{
1616 unsigned *op; 1621 unsigned int *op;
1617 void *data; 1622 void *data;
1618 int copylen = *len, ret = 0; 1623 int copylen = *len, ret = 0;
1619 1624
@@ -1621,7 +1626,7 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
1621 return -EPERM; 1626 return -EPERM;
1622 if (optval != SO_IP_SET) 1627 if (optval != SO_IP_SET)
1623 return -EBADF; 1628 return -EBADF;
1624 if (*len < sizeof(unsigned)) 1629 if (*len < sizeof(unsigned int))
1625 return -EINVAL; 1630 return -EINVAL;
1626 1631
1627 data = vmalloc(*len); 1632 data = vmalloc(*len);
@@ -1631,7 +1636,7 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
1631 ret = -EFAULT; 1636 ret = -EFAULT;
1632 goto done; 1637 goto done;
1633 } 1638 }
1634 op = (unsigned *) data; 1639 op = (unsigned int *) data;
1635 1640
1636 if (*op < IP_SET_OP_VERSION) { 1641 if (*op < IP_SET_OP_VERSION) {
1637 /* Check the version at the beginning of operations */ 1642 /* Check the version at the beginning of operations */
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index 5139dea6019e..507fe93794aa 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -81,7 +81,8 @@ hash_ip4_data_zero_out(struct hash_ip4_elem *elem)
81static inline bool 81static inline bool
82hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *data) 82hash_ip4_data_list(struct sk_buff *skb, const struct hash_ip4_elem *data)
83{ 83{
84 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); 84 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip))
85 goto nla_put_failure;
85 return 0; 86 return 0;
86 87
87nla_put_failure: 88nla_put_failure:
@@ -94,9 +95,10 @@ hash_ip4_data_tlist(struct sk_buff *skb, const struct hash_ip4_elem *data)
94 const struct hash_ip4_telem *tdata = 95 const struct hash_ip4_telem *tdata =
95 (const struct hash_ip4_telem *)data; 96 (const struct hash_ip4_telem *)data;
96 97
97 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); 98 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
98 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 99 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
99 htonl(ip_set_timeout_get(tdata->timeout))); 100 htonl(ip_set_timeout_get(tdata->timeout))))
101 goto nla_put_failure;
100 102
101 return 0; 103 return 0;
102 104
@@ -262,7 +264,8 @@ ip6_netmask(union nf_inet_addr *ip, u8 prefix)
262static bool 264static bool
263hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *data) 265hash_ip6_data_list(struct sk_buff *skb, const struct hash_ip6_elem *data)
264{ 266{
265 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); 267 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6))
268 goto nla_put_failure;
266 return 0; 269 return 0;
267 270
268nla_put_failure: 271nla_put_failure:
@@ -275,9 +278,10 @@ hash_ip6_data_tlist(struct sk_buff *skb, const struct hash_ip6_elem *data)
275 const struct hash_ip6_telem *e = 278 const struct hash_ip6_telem *e =
276 (const struct hash_ip6_telem *)data; 279 (const struct hash_ip6_telem *)data;
277 280
278 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); 281 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
279 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 282 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
280 htonl(ip_set_timeout_get(e->timeout))); 283 htonl(ip_set_timeout_get(e->timeout))))
284 goto nla_put_failure;
281 return 0; 285 return 0;
282 286
283nla_put_failure: 287nla_put_failure:
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 9c27e249c171..68f284c97490 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -93,9 +93,10 @@ static bool
93hash_ipport4_data_list(struct sk_buff *skb, 93hash_ipport4_data_list(struct sk_buff *skb,
94 const struct hash_ipport4_elem *data) 94 const struct hash_ipport4_elem *data)
95{ 95{
96 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); 96 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
97 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 97 nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
98 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 98 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
99 goto nla_put_failure;
99 return 0; 100 return 0;
100 101
101nla_put_failure: 102nla_put_failure:
@@ -109,12 +110,12 @@ hash_ipport4_data_tlist(struct sk_buff *skb,
109 const struct hash_ipport4_telem *tdata = 110 const struct hash_ipport4_telem *tdata =
110 (const struct hash_ipport4_telem *)data; 111 (const struct hash_ipport4_telem *)data;
111 112
112 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); 113 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
113 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); 114 nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
114 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 115 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
115 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 116 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
116 htonl(ip_set_timeout_get(tdata->timeout))); 117 htonl(ip_set_timeout_get(tdata->timeout))))
117 118 goto nla_put_failure;
118 return 0; 119 return 0;
119 120
120nla_put_failure: 121nla_put_failure:
@@ -308,9 +309,10 @@ static bool
308hash_ipport6_data_list(struct sk_buff *skb, 309hash_ipport6_data_list(struct sk_buff *skb,
309 const struct hash_ipport6_elem *data) 310 const struct hash_ipport6_elem *data)
310{ 311{
311 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); 312 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
312 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 313 nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
313 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 314 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
315 goto nla_put_failure;
314 return 0; 316 return 0;
315 317
316nla_put_failure: 318nla_put_failure:
@@ -324,11 +326,12 @@ hash_ipport6_data_tlist(struct sk_buff *skb,
324 const struct hash_ipport6_telem *e = 326 const struct hash_ipport6_telem *e =
325 (const struct hash_ipport6_telem *)data; 327 (const struct hash_ipport6_telem *)data;
326 328
327 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); 329 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
328 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 330 nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
329 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 331 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
330 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 332 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
331 htonl(ip_set_timeout_get(e->timeout))); 333 htonl(ip_set_timeout_get(e->timeout))))
334 goto nla_put_failure;
332 return 0; 335 return 0;
333 336
334nla_put_failure: 337nla_put_failure:
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index 9134057c0728..1eec4b9e0dca 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -94,10 +94,11 @@ static bool
94hash_ipportip4_data_list(struct sk_buff *skb, 94hash_ipportip4_data_list(struct sk_buff *skb,
95 const struct hash_ipportip4_elem *data) 95 const struct hash_ipportip4_elem *data)
96{ 96{
97 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); 97 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
98 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2); 98 nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) ||
99 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 99 nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
100 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 100 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
101 goto nla_put_failure;
101 return 0; 102 return 0;
102 103
103nla_put_failure: 104nla_put_failure:
@@ -111,13 +112,13 @@ hash_ipportip4_data_tlist(struct sk_buff *skb,
111 const struct hash_ipportip4_telem *tdata = 112 const struct hash_ipportip4_telem *tdata =
112 (const struct hash_ipportip4_telem *)data; 113 (const struct hash_ipportip4_telem *)data;
113 114
114 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); 115 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
115 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2); 116 nla_put_ipaddr4(skb, IPSET_ATTR_IP2, tdata->ip2) ||
116 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); 117 nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
117 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 118 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
118 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 119 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
119 htonl(ip_set_timeout_get(tdata->timeout))); 120 htonl(ip_set_timeout_get(tdata->timeout))))
120 121 goto nla_put_failure;
121 return 0; 122 return 0;
122 123
123nla_put_failure: 124nla_put_failure:
@@ -319,10 +320,11 @@ static bool
319hash_ipportip6_data_list(struct sk_buff *skb, 320hash_ipportip6_data_list(struct sk_buff *skb,
320 const struct hash_ipportip6_elem *data) 321 const struct hash_ipportip6_elem *data)
321{ 322{
322 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); 323 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
323 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); 324 nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
324 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 325 nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
325 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 326 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto))
327 goto nla_put_failure;
326 return 0; 328 return 0;
327 329
328nla_put_failure: 330nla_put_failure:
@@ -336,12 +338,13 @@ hash_ipportip6_data_tlist(struct sk_buff *skb,
336 const struct hash_ipportip6_telem *e = 338 const struct hash_ipportip6_telem *e =
337 (const struct hash_ipportip6_telem *)data; 339 (const struct hash_ipportip6_telem *)data;
338 340
339 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); 341 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
340 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); 342 nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
341 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 343 nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
342 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 344 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
343 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 345 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
344 htonl(ip_set_timeout_get(e->timeout))); 346 htonl(ip_set_timeout_get(e->timeout))))
347 goto nla_put_failure;
345 return 0; 348 return 0;
346 349
347nla_put_failure: 350nla_put_failure:
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 5d05e6969862..62d66ecef369 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -124,13 +124,14 @@ hash_ipportnet4_data_list(struct sk_buff *skb,
124{ 124{
125 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; 125 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
126 126
127 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); 127 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
128 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2); 128 nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) ||
129 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 129 nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
130 NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1); 130 nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
131 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 131 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
132 if (flags) 132 (flags &&
133 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); 133 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
134 goto nla_put_failure;
134 return 0; 135 return 0;
135 136
136nla_put_failure: 137nla_put_failure:
@@ -145,16 +146,16 @@ hash_ipportnet4_data_tlist(struct sk_buff *skb,
145 (const struct hash_ipportnet4_telem *)data; 146 (const struct hash_ipportnet4_telem *)data;
146 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; 147 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
147 148
148 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); 149 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
149 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2); 150 nla_put_ipaddr4(skb, IPSET_ATTR_IP2, tdata->ip2) ||
150 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); 151 nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
151 NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1); 152 nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
152 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 153 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
153 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 154 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
154 htonl(ip_set_timeout_get(tdata->timeout))); 155 htonl(ip_set_timeout_get(tdata->timeout))) ||
155 if (flags) 156 (flags &&
156 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); 157 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
157 158 goto nla_put_failure;
158 return 0; 159 return 0;
159 160
160nla_put_failure: 161nla_put_failure:
@@ -436,13 +437,14 @@ hash_ipportnet6_data_list(struct sk_buff *skb,
436{ 437{
437 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; 438 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
438 439
439 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); 440 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
440 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); 441 nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
441 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 442 nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
442 NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1); 443 nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
443 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 444 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
444 if (flags) 445 (flags &&
445 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); 446 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
447 goto nla_put_failure;
446 return 0; 448 return 0;
447 449
448nla_put_failure: 450nla_put_failure:
@@ -457,15 +459,16 @@ hash_ipportnet6_data_tlist(struct sk_buff *skb,
457 (const struct hash_ipportnet6_telem *)data; 459 (const struct hash_ipportnet6_telem *)data;
458 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; 460 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
459 461
460 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); 462 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
461 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2); 463 nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) ||
462 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 464 nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
463 NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1); 465 nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) ||
464 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 466 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
465 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 467 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
466 htonl(ip_set_timeout_get(e->timeout))); 468 htonl(ip_set_timeout_get(e->timeout))) ||
467 if (flags) 469 (flags &&
468 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); 470 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
471 goto nla_put_failure;
469 return 0; 472 return 0;
470 473
471nla_put_failure: 474nla_put_failure:
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 7c3d945517cf..6607a814be57 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -111,10 +111,11 @@ hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data)
111{ 111{
112 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; 112 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
113 113
114 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); 114 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
115 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 115 nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
116 if (flags) 116 (flags &&
117 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); 117 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
118 goto nla_put_failure;
118 return 0; 119 return 0;
119 120
120nla_put_failure: 121nla_put_failure:
@@ -128,13 +129,13 @@ hash_net4_data_tlist(struct sk_buff *skb, const struct hash_net4_elem *data)
128 (const struct hash_net4_telem *)data; 129 (const struct hash_net4_telem *)data;
129 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; 130 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
130 131
131 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); 132 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
132 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, tdata->cidr); 133 nla_put_u8(skb, IPSET_ATTR_CIDR, tdata->cidr) ||
133 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 134 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
134 htonl(ip_set_timeout_get(tdata->timeout))); 135 htonl(ip_set_timeout_get(tdata->timeout))) ||
135 if (flags) 136 (flags &&
136 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); 137 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
137 138 goto nla_put_failure;
138 return 0; 139 return 0;
139 140
140nla_put_failure: 141nla_put_failure:
@@ -339,10 +340,11 @@ hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data)
339{ 340{
340 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; 341 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
341 342
342 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); 343 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
343 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 344 nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
344 if (flags) 345 (flags &&
345 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); 346 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
347 goto nla_put_failure;
346 return 0; 348 return 0;
347 349
348nla_put_failure: 350nla_put_failure:
@@ -356,12 +358,13 @@ hash_net6_data_tlist(struct sk_buff *skb, const struct hash_net6_elem *data)
356 (const struct hash_net6_telem *)data; 358 (const struct hash_net6_telem *)data;
357 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; 359 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
358 360
359 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); 361 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
360 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, e->cidr); 362 nla_put_u8(skb, IPSET_ATTR_CIDR, e->cidr) ||
361 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 363 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
362 htonl(ip_set_timeout_get(e->timeout))); 364 htonl(ip_set_timeout_get(e->timeout))) ||
363 if (flags) 365 (flags &&
364 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); 366 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
367 goto nla_put_failure;
365 return 0; 368 return 0;
366 369
367nla_put_failure: 370nla_put_failure:
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index f24037ff4322..6093f3daa911 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -252,11 +252,12 @@ hash_netiface4_data_list(struct sk_buff *skb,
252 252
253 if (data->nomatch) 253 if (data->nomatch)
254 flags |= IPSET_FLAG_NOMATCH; 254 flags |= IPSET_FLAG_NOMATCH;
255 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); 255 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
256 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 256 nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
257 NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); 257 nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
258 if (flags) 258 (flags &&
259 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); 259 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
260 goto nla_put_failure;
260 return 0; 261 return 0;
261 262
262nla_put_failure: 263nla_put_failure:
@@ -273,13 +274,14 @@ hash_netiface4_data_tlist(struct sk_buff *skb,
273 274
274 if (data->nomatch) 275 if (data->nomatch)
275 flags |= IPSET_FLAG_NOMATCH; 276 flags |= IPSET_FLAG_NOMATCH;
276 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); 277 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
277 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 278 nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
278 NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); 279 nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
279 if (flags) 280 (flags &&
280 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); 281 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))) ||
281 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 282 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
282 htonl(ip_set_timeout_get(tdata->timeout))); 283 htonl(ip_set_timeout_get(tdata->timeout))))
284 goto nla_put_failure;
283 285
284 return 0; 286 return 0;
285 287
@@ -555,11 +557,12 @@ hash_netiface6_data_list(struct sk_buff *skb,
555 557
556 if (data->nomatch) 558 if (data->nomatch)
557 flags |= IPSET_FLAG_NOMATCH; 559 flags |= IPSET_FLAG_NOMATCH;
558 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); 560 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
559 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 561 nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
560 NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); 562 nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
561 if (flags) 563 (flags &&
562 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); 564 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
565 goto nla_put_failure;
563 return 0; 566 return 0;
564 567
565nla_put_failure: 568nla_put_failure:
@@ -576,13 +579,14 @@ hash_netiface6_data_tlist(struct sk_buff *skb,
576 579
577 if (data->nomatch) 580 if (data->nomatch)
578 flags |= IPSET_FLAG_NOMATCH; 581 flags |= IPSET_FLAG_NOMATCH;
579 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); 582 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
580 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr); 583 nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr) ||
581 NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface); 584 nla_put_string(skb, IPSET_ATTR_IFACE, data->iface) ||
582 if (flags) 585 (flags &&
583 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); 586 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))) ||
584 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 587 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
585 htonl(ip_set_timeout_get(e->timeout))); 588 htonl(ip_set_timeout_get(e->timeout))))
589 goto nla_put_failure;
586 return 0; 590 return 0;
587 591
588nla_put_failure: 592nla_put_failure:
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index ce2e77100b64..ae3c644adc14 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -124,12 +124,13 @@ hash_netport4_data_list(struct sk_buff *skb,
124{ 124{
125 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; 125 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
126 126
127 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip); 127 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) ||
128 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 128 nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
129 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1); 129 nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
130 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 130 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
131 if (flags) 131 (flags &&
132 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); 132 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
133 goto nla_put_failure;
133 return 0; 134 return 0;
134 135
135nla_put_failure: 136nla_put_failure:
@@ -144,15 +145,15 @@ hash_netport4_data_tlist(struct sk_buff *skb,
144 (const struct hash_netport4_telem *)data; 145 (const struct hash_netport4_telem *)data;
145 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; 146 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
146 147
147 NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip); 148 if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, tdata->ip) ||
148 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port); 149 nla_put_net16(skb, IPSET_ATTR_PORT, tdata->port) ||
149 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1); 150 nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
150 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 151 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
151 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 152 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
152 htonl(ip_set_timeout_get(tdata->timeout))); 153 htonl(ip_set_timeout_get(tdata->timeout))) ||
153 if (flags) 154 (flags &&
154 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); 155 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
155 156 goto nla_put_failure;
156 return 0; 157 return 0;
157 158
158nla_put_failure: 159nla_put_failure:
@@ -402,12 +403,13 @@ hash_netport6_data_list(struct sk_buff *skb,
402{ 403{
403 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; 404 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
404 405
405 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip); 406 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) ||
406 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 407 nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
407 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1); 408 nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
408 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 409 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
409 if (flags) 410 (flags &&
410 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); 411 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
412 goto nla_put_failure;
411 return 0; 413 return 0;
412 414
413nla_put_failure: 415nla_put_failure:
@@ -422,14 +424,15 @@ hash_netport6_data_tlist(struct sk_buff *skb,
422 (const struct hash_netport6_telem *)data; 424 (const struct hash_netport6_telem *)data;
423 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; 425 u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
424 426
425 NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip); 427 if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &e->ip.in6) ||
426 NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port); 428 nla_put_net16(skb, IPSET_ATTR_PORT, data->port) ||
427 NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1); 429 nla_put_u8(skb, IPSET_ATTR_CIDR, data->cidr + 1) ||
428 NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto); 430 nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) ||
429 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 431 nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
430 htonl(ip_set_timeout_get(e->timeout))); 432 htonl(ip_set_timeout_get(e->timeout))) ||
431 if (flags) 433 (flags &&
432 NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)); 434 nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags))))
435 goto nla_put_failure;
433 return 0; 436 return 0;
434 437
435nla_put_failure: 438nla_put_failure:
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 7e095f9005f0..6cb1225765f9 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -402,12 +402,13 @@ list_set_head(struct ip_set *set, struct sk_buff *skb)
402 nested = ipset_nest_start(skb, IPSET_ATTR_DATA); 402 nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
403 if (!nested) 403 if (!nested)
404 goto nla_put_failure; 404 goto nla_put_failure;
405 NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size)); 405 if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) ||
406 if (with_timeout(map->timeout)) 406 (with_timeout(map->timeout) &&
407 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); 407 nla_put_net32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout))) ||
408 NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); 408 nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
409 NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, 409 nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
410 htonl(sizeof(*map) + map->size * map->dsize)); 410 htonl(sizeof(*map) + map->size * map->dsize)))
411 goto nla_put_failure;
411 ipset_nest_end(skb, nested); 412 ipset_nest_end(skb, nested);
412 413
413 return 0; 414 return 0;
@@ -442,13 +443,15 @@ list_set_list(const struct ip_set *set,
442 } else 443 } else
443 goto nla_put_failure; 444 goto nla_put_failure;
444 } 445 }
445 NLA_PUT_STRING(skb, IPSET_ATTR_NAME, 446 if (nla_put_string(skb, IPSET_ATTR_NAME,
446 ip_set_name_byindex(e->id)); 447 ip_set_name_byindex(e->id)))
448 goto nla_put_failure;
447 if (with_timeout(map->timeout)) { 449 if (with_timeout(map->timeout)) {
448 const struct set_telem *te = 450 const struct set_telem *te =
449 (const struct set_telem *) e; 451 (const struct set_telem *) e;
450 NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, 452 __be32 to = htonl(ip_set_timeout_get(te->timeout));
451 htonl(ip_set_timeout_get(te->timeout))); 453 if (nla_put_net32(skb, IPSET_ATTR_TIMEOUT, to))
454 goto nla_put_failure;
452 } 455 }
453 ipset_nest_end(skb, nested); 456 ipset_nest_end(skb, nested);
454 } 457 }
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 52856178c9d7..64f9e8f13207 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -313,7 +313,7 @@ vs_fix_ack_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
313 * Assumes already checked proto==IPPROTO_TCP and diff!=0. 313 * Assumes already checked proto==IPPROTO_TCP and diff!=0.
314 */ 314 */
315static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq, 315static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq,
316 unsigned flag, __u32 seq, int diff) 316 unsigned int flag, __u32 seq, int diff)
317{ 317{
318 /* spinlock is to keep updating cp->flags atomic */ 318 /* spinlock is to keep updating cp->flags atomic */
319 spin_lock(&cp->lock); 319 spin_lock(&cp->lock);
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 29fa5badde75..4a09b7873003 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -86,42 +86,42 @@ struct ip_vs_aligned_lock
86static struct ip_vs_aligned_lock 86static struct ip_vs_aligned_lock
87__ip_vs_conntbl_lock_array[CT_LOCKARRAY_SIZE] __cacheline_aligned; 87__ip_vs_conntbl_lock_array[CT_LOCKARRAY_SIZE] __cacheline_aligned;
88 88
89static inline void ct_read_lock(unsigned key) 89static inline void ct_read_lock(unsigned int key)
90{ 90{
91 read_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); 91 read_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
92} 92}
93 93
94static inline void ct_read_unlock(unsigned key) 94static inline void ct_read_unlock(unsigned int key)
95{ 95{
96 read_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); 96 read_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
97} 97}
98 98
99static inline void ct_write_lock(unsigned key) 99static inline void ct_write_lock(unsigned int key)
100{ 100{
101 write_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); 101 write_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
102} 102}
103 103
104static inline void ct_write_unlock(unsigned key) 104static inline void ct_write_unlock(unsigned int key)
105{ 105{
106 write_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); 106 write_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
107} 107}
108 108
109static inline void ct_read_lock_bh(unsigned key) 109static inline void ct_read_lock_bh(unsigned int key)
110{ 110{
111 read_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); 111 read_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
112} 112}
113 113
114static inline void ct_read_unlock_bh(unsigned key) 114static inline void ct_read_unlock_bh(unsigned int key)
115{ 115{
116 read_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); 116 read_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
117} 117}
118 118
119static inline void ct_write_lock_bh(unsigned key) 119static inline void ct_write_lock_bh(unsigned int key)
120{ 120{
121 write_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); 121 write_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
122} 122}
123 123
124static inline void ct_write_unlock_bh(unsigned key) 124static inline void ct_write_unlock_bh(unsigned int key)
125{ 125{
126 write_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); 126 write_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
127} 127}
@@ -130,7 +130,7 @@ static inline void ct_write_unlock_bh(unsigned key)
130/* 130/*
131 * Returns hash value for IPVS connection entry 131 * Returns hash value for IPVS connection entry
132 */ 132 */
133static unsigned int ip_vs_conn_hashkey(struct net *net, int af, unsigned proto, 133static unsigned int ip_vs_conn_hashkey(struct net *net, int af, unsigned int proto,
134 const union nf_inet_addr *addr, 134 const union nf_inet_addr *addr,
135 __be16 port) 135 __be16 port)
136{ 136{
@@ -188,7 +188,7 @@ static unsigned int ip_vs_conn_hashkey_conn(const struct ip_vs_conn *cp)
188 */ 188 */
189static inline int ip_vs_conn_hash(struct ip_vs_conn *cp) 189static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
190{ 190{
191 unsigned hash; 191 unsigned int hash;
192 int ret; 192 int ret;
193 193
194 if (cp->flags & IP_VS_CONN_F_ONE_PACKET) 194 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
@@ -224,7 +224,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
224 */ 224 */
225static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp) 225static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
226{ 226{
227 unsigned hash; 227 unsigned int hash;
228 int ret; 228 int ret;
229 229
230 /* unhash it and decrease its reference counter */ 230 /* unhash it and decrease its reference counter */
@@ -257,7 +257,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
257static inline struct ip_vs_conn * 257static inline struct ip_vs_conn *
258__ip_vs_conn_in_get(const struct ip_vs_conn_param *p) 258__ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
259{ 259{
260 unsigned hash; 260 unsigned int hash;
261 struct ip_vs_conn *cp; 261 struct ip_vs_conn *cp;
262 struct hlist_node *n; 262 struct hlist_node *n;
263 263
@@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(ip_vs_conn_in_get_proto);
344/* Get reference to connection template */ 344/* Get reference to connection template */
345struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p) 345struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
346{ 346{
347 unsigned hash; 347 unsigned int hash;
348 struct ip_vs_conn *cp; 348 struct ip_vs_conn *cp;
349 struct hlist_node *n; 349 struct hlist_node *n;
350 350
@@ -394,7 +394,7 @@ struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
394 * p->vaddr, p->vport: pkt dest address (foreign host) */ 394 * p->vaddr, p->vport: pkt dest address (foreign host) */
395struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p) 395struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
396{ 396{
397 unsigned hash; 397 unsigned int hash;
398 struct ip_vs_conn *cp, *ret=NULL; 398 struct ip_vs_conn *cp, *ret=NULL;
399 struct hlist_node *n; 399 struct hlist_node *n;
400 400
@@ -824,7 +824,7 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp)
824 */ 824 */
825struct ip_vs_conn * 825struct ip_vs_conn *
826ip_vs_conn_new(const struct ip_vs_conn_param *p, 826ip_vs_conn_new(const struct ip_vs_conn_param *p,
827 const union nf_inet_addr *daddr, __be16 dport, unsigned flags, 827 const union nf_inet_addr *daddr, __be16 dport, unsigned int flags,
828 struct ip_vs_dest *dest, __u32 fwmark) 828 struct ip_vs_dest *dest, __u32 fwmark)
829{ 829{
830 struct ip_vs_conn *cp; 830 struct ip_vs_conn *cp;
@@ -1057,7 +1057,7 @@ static const struct file_operations ip_vs_conn_fops = {
1057 .release = seq_release_net, 1057 .release = seq_release_net,
1058}; 1058};
1059 1059
1060static const char *ip_vs_origin_name(unsigned flags) 1060static const char *ip_vs_origin_name(unsigned int flags)
1061{ 1061{
1062 if (flags & IP_VS_CONN_F_SYNC) 1062 if (flags & IP_VS_CONN_F_SYNC)
1063 return "SYNC"; 1063 return "SYNC";
@@ -1169,7 +1169,7 @@ void ip_vs_random_dropentry(struct net *net)
1169 * Randomly scan 1/32 of the whole table every second 1169 * Randomly scan 1/32 of the whole table every second
1170 */ 1170 */
1171 for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) { 1171 for (idx = 0; idx < (ip_vs_conn_tab_size>>5); idx++) {
1172 unsigned hash = net_random() & ip_vs_conn_tab_mask; 1172 unsigned int hash = net_random() & ip_vs_conn_tab_mask;
1173 struct hlist_node *n; 1173 struct hlist_node *n;
1174 1174
1175 /* 1175 /*
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 00bdb1d9d690..c8f36b96f44f 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -80,7 +80,7 @@ static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0);
80#define icmp_id(icmph) (((icmph)->un).echo.id) 80#define icmp_id(icmph) (((icmph)->un).echo.id)
81#define icmpv6_id(icmph) (icmph->icmp6_dataun.u_echo.identifier) 81#define icmpv6_id(icmph) (icmph->icmp6_dataun.u_echo.identifier)
82 82
83const char *ip_vs_proto_name(unsigned proto) 83const char *ip_vs_proto_name(unsigned int proto)
84{ 84{
85 static char buf[20]; 85 static char buf[20];
86 86
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index f5589987fc80..37b91996bfba 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -265,11 +265,11 @@ static struct list_head ip_vs_svc_fwm_table[IP_VS_SVC_TAB_SIZE];
265/* 265/*
266 * Returns hash value for virtual service 266 * Returns hash value for virtual service
267 */ 267 */
268static inline unsigned 268static inline unsigned int
269ip_vs_svc_hashkey(struct net *net, int af, unsigned proto, 269ip_vs_svc_hashkey(struct net *net, int af, unsigned int proto,
270 const union nf_inet_addr *addr, __be16 port) 270 const union nf_inet_addr *addr, __be16 port)
271{ 271{
272 register unsigned porth = ntohs(port); 272 register unsigned int porth = ntohs(port);
273 __be32 addr_fold = addr->ip; 273 __be32 addr_fold = addr->ip;
274 274
275#ifdef CONFIG_IP_VS_IPV6 275#ifdef CONFIG_IP_VS_IPV6
@@ -286,7 +286,7 @@ ip_vs_svc_hashkey(struct net *net, int af, unsigned proto,
286/* 286/*
287 * Returns hash value of fwmark for virtual service lookup 287 * Returns hash value of fwmark for virtual service lookup
288 */ 288 */
289static inline unsigned ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark) 289static inline unsigned int ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark)
290{ 290{
291 return (((size_t)net>>8) ^ fwmark) & IP_VS_SVC_TAB_MASK; 291 return (((size_t)net>>8) ^ fwmark) & IP_VS_SVC_TAB_MASK;
292} 292}
@@ -298,7 +298,7 @@ static inline unsigned ip_vs_svc_fwm_hashkey(struct net *net, __u32 fwmark)
298 */ 298 */
299static int ip_vs_svc_hash(struct ip_vs_service *svc) 299static int ip_vs_svc_hash(struct ip_vs_service *svc)
300{ 300{
301 unsigned hash; 301 unsigned int hash;
302 302
303 if (svc->flags & IP_VS_SVC_F_HASHED) { 303 if (svc->flags & IP_VS_SVC_F_HASHED) {
304 pr_err("%s(): request for already hashed, called from %pF\n", 304 pr_err("%s(): request for already hashed, called from %pF\n",
@@ -361,7 +361,7 @@ static inline struct ip_vs_service *
361__ip_vs_service_find(struct net *net, int af, __u16 protocol, 361__ip_vs_service_find(struct net *net, int af, __u16 protocol,
362 const union nf_inet_addr *vaddr, __be16 vport) 362 const union nf_inet_addr *vaddr, __be16 vport)
363{ 363{
364 unsigned hash; 364 unsigned int hash;
365 struct ip_vs_service *svc; 365 struct ip_vs_service *svc;
366 366
367 /* Check for "full" addressed entries */ 367 /* Check for "full" addressed entries */
@@ -388,7 +388,7 @@ __ip_vs_service_find(struct net *net, int af, __u16 protocol,
388static inline struct ip_vs_service * 388static inline struct ip_vs_service *
389__ip_vs_svc_fwm_find(struct net *net, int af, __u32 fwmark) 389__ip_vs_svc_fwm_find(struct net *net, int af, __u32 fwmark)
390{ 390{
391 unsigned hash; 391 unsigned int hash;
392 struct ip_vs_service *svc; 392 struct ip_vs_service *svc;
393 393
394 /* Check for fwmark addressed entries */ 394 /* Check for fwmark addressed entries */
@@ -489,11 +489,11 @@ __ip_vs_unbind_svc(struct ip_vs_dest *dest)
489/* 489/*
490 * Returns hash value for real service 490 * Returns hash value for real service
491 */ 491 */
492static inline unsigned ip_vs_rs_hashkey(int af, 492static inline unsigned int ip_vs_rs_hashkey(int af,
493 const union nf_inet_addr *addr, 493 const union nf_inet_addr *addr,
494 __be16 port) 494 __be16 port)
495{ 495{
496 register unsigned porth = ntohs(port); 496 register unsigned int porth = ntohs(port);
497 __be32 addr_fold = addr->ip; 497 __be32 addr_fold = addr->ip;
498 498
499#ifdef CONFIG_IP_VS_IPV6 499#ifdef CONFIG_IP_VS_IPV6
@@ -512,7 +512,7 @@ static inline unsigned ip_vs_rs_hashkey(int af,
512 */ 512 */
513static int ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest) 513static int ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest)
514{ 514{
515 unsigned hash; 515 unsigned int hash;
516 516
517 if (!list_empty(&dest->d_list)) { 517 if (!list_empty(&dest->d_list)) {
518 return 0; 518 return 0;
@@ -555,7 +555,7 @@ ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol,
555 __be16 dport) 555 __be16 dport)
556{ 556{
557 struct netns_ipvs *ipvs = net_ipvs(net); 557 struct netns_ipvs *ipvs = net_ipvs(net);
558 unsigned hash; 558 unsigned int hash;
559 struct ip_vs_dest *dest; 559 struct ip_vs_dest *dest;
560 560
561 /* 561 /*
@@ -842,7 +842,7 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
842 struct ip_vs_dest **dest_p) 842 struct ip_vs_dest **dest_p)
843{ 843{
844 struct ip_vs_dest *dest; 844 struct ip_vs_dest *dest;
845 unsigned atype; 845 unsigned int atype;
846 846
847 EnterFunction(2); 847 EnterFunction(2);
848 848
@@ -1846,13 +1846,6 @@ static struct ctl_table vs_vars[] = {
1846 { } 1846 { }
1847}; 1847};
1848 1848
1849const struct ctl_path net_vs_ctl_path[] = {
1850 { .procname = "net", },
1851 { .procname = "ipv4", },
1852 { .procname = "vs", },
1853 { }
1854};
1855EXPORT_SYMBOL_GPL(net_vs_ctl_path);
1856#endif 1849#endif
1857 1850
1858#ifdef CONFIG_PROC_FS 1851#ifdef CONFIG_PROC_FS
@@ -1867,7 +1860,7 @@ struct ip_vs_iter {
1867 * Write the contents of the VS rule table to a PROCfs file. 1860 * Write the contents of the VS rule table to a PROCfs file.
1868 * (It is kept just for backward compatibility) 1861 * (It is kept just for backward compatibility)
1869 */ 1862 */
1870static inline const char *ip_vs_fwd_name(unsigned flags) 1863static inline const char *ip_vs_fwd_name(unsigned int flags)
1871{ 1864{
1872 switch (flags & IP_VS_CONN_F_FWD_MASK) { 1865 switch (flags & IP_VS_CONN_F_FWD_MASK) {
1873 case IP_VS_CONN_F_LOCALNODE: 1866 case IP_VS_CONN_F_LOCALNODE:
@@ -2816,17 +2809,17 @@ static int ip_vs_genl_fill_stats(struct sk_buff *skb, int container_type,
2816 2809
2817 ip_vs_copy_stats(&ustats, stats); 2810 ip_vs_copy_stats(&ustats, stats);
2818 2811
2819 NLA_PUT_U32(skb, IPVS_STATS_ATTR_CONNS, ustats.conns); 2812 if (nla_put_u32(skb, IPVS_STATS_ATTR_CONNS, ustats.conns) ||
2820 NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPKTS, ustats.inpkts); 2813 nla_put_u32(skb, IPVS_STATS_ATTR_INPKTS, ustats.inpkts) ||
2821 NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPKTS, ustats.outpkts); 2814 nla_put_u32(skb, IPVS_STATS_ATTR_OUTPKTS, ustats.outpkts) ||
2822 NLA_PUT_U64(skb, IPVS_STATS_ATTR_INBYTES, ustats.inbytes); 2815 nla_put_u64(skb, IPVS_STATS_ATTR_INBYTES, ustats.inbytes) ||
2823 NLA_PUT_U64(skb, IPVS_STATS_ATTR_OUTBYTES, ustats.outbytes); 2816 nla_put_u64(skb, IPVS_STATS_ATTR_OUTBYTES, ustats.outbytes) ||
2824 NLA_PUT_U32(skb, IPVS_STATS_ATTR_CPS, ustats.cps); 2817 nla_put_u32(skb, IPVS_STATS_ATTR_CPS, ustats.cps) ||
2825 NLA_PUT_U32(skb, IPVS_STATS_ATTR_INPPS, ustats.inpps); 2818 nla_put_u32(skb, IPVS_STATS_ATTR_INPPS, ustats.inpps) ||
2826 NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTPPS, ustats.outpps); 2819 nla_put_u32(skb, IPVS_STATS_ATTR_OUTPPS, ustats.outpps) ||
2827 NLA_PUT_U32(skb, IPVS_STATS_ATTR_INBPS, ustats.inbps); 2820 nla_put_u32(skb, IPVS_STATS_ATTR_INBPS, ustats.inbps) ||
2828 NLA_PUT_U32(skb, IPVS_STATS_ATTR_OUTBPS, ustats.outbps); 2821 nla_put_u32(skb, IPVS_STATS_ATTR_OUTBPS, ustats.outbps))
2829 2822 goto nla_put_failure;
2830 nla_nest_end(skb, nl_stats); 2823 nla_nest_end(skb, nl_stats);
2831 2824
2832 return 0; 2825 return 0;
@@ -2847,23 +2840,25 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
2847 if (!nl_service) 2840 if (!nl_service)
2848 return -EMSGSIZE; 2841 return -EMSGSIZE;
2849 2842
2850 NLA_PUT_U16(skb, IPVS_SVC_ATTR_AF, svc->af); 2843 if (nla_put_u16(skb, IPVS_SVC_ATTR_AF, svc->af))
2851 2844 goto nla_put_failure;
2852 if (svc->fwmark) { 2845 if (svc->fwmark) {
2853 NLA_PUT_U32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark); 2846 if (nla_put_u32(skb, IPVS_SVC_ATTR_FWMARK, svc->fwmark))
2847 goto nla_put_failure;
2854 } else { 2848 } else {
2855 NLA_PUT_U16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol); 2849 if (nla_put_u16(skb, IPVS_SVC_ATTR_PROTOCOL, svc->protocol) ||
2856 NLA_PUT(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr); 2850 nla_put(skb, IPVS_SVC_ATTR_ADDR, sizeof(svc->addr), &svc->addr) ||
2857 NLA_PUT_U16(skb, IPVS_SVC_ATTR_PORT, svc->port); 2851 nla_put_u16(skb, IPVS_SVC_ATTR_PORT, svc->port))
2852 goto nla_put_failure;
2858 } 2853 }
2859 2854
2860 NLA_PUT_STRING(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name); 2855 if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name) ||
2861 if (svc->pe) 2856 (svc->pe &&
2862 NLA_PUT_STRING(skb, IPVS_SVC_ATTR_PE_NAME, svc->pe->name); 2857 nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, svc->pe->name)) ||
2863 NLA_PUT(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags); 2858 nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) ||
2864 NLA_PUT_U32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ); 2859 nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) ||
2865 NLA_PUT_U32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask); 2860 nla_put_u32(skb, IPVS_SVC_ATTR_NETMASK, svc->netmask))
2866 2861 goto nla_put_failure;
2867 if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &svc->stats)) 2862 if (ip_vs_genl_fill_stats(skb, IPVS_SVC_ATTR_STATS, &svc->stats))
2868 goto nla_put_failure; 2863 goto nla_put_failure;
2869 2864
@@ -3038,21 +3033,22 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
3038 if (!nl_dest) 3033 if (!nl_dest)
3039 return -EMSGSIZE; 3034 return -EMSGSIZE;
3040 3035
3041 NLA_PUT(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr); 3036 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
3042 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port); 3037 nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
3043 3038 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
3044 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD, 3039 (atomic_read(&dest->conn_flags) &
3045 atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK); 3040 IP_VS_CONN_F_FWD_MASK)) ||
3046 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight)); 3041 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
3047 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold); 3042 atomic_read(&dest->weight)) ||
3048 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold); 3043 nla_put_u32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold) ||
3049 NLA_PUT_U32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS, 3044 nla_put_u32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold) ||
3050 atomic_read(&dest->activeconns)); 3045 nla_put_u32(skb, IPVS_DEST_ATTR_ACTIVE_CONNS,
3051 NLA_PUT_U32(skb, IPVS_DEST_ATTR_INACT_CONNS, 3046 atomic_read(&dest->activeconns)) ||
3052 atomic_read(&dest->inactconns)); 3047 nla_put_u32(skb, IPVS_DEST_ATTR_INACT_CONNS,
3053 NLA_PUT_U32(skb, IPVS_DEST_ATTR_PERSIST_CONNS, 3048 atomic_read(&dest->inactconns)) ||
3054 atomic_read(&dest->persistconns)); 3049 nla_put_u32(skb, IPVS_DEST_ATTR_PERSIST_CONNS,
3055 3050 atomic_read(&dest->persistconns)))
3051 goto nla_put_failure;
3056 if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &dest->stats)) 3052 if (ip_vs_genl_fill_stats(skb, IPVS_DEST_ATTR_STATS, &dest->stats))
3057 goto nla_put_failure; 3053 goto nla_put_failure;
3058 3054
@@ -3181,10 +3177,10 @@ static int ip_vs_genl_fill_daemon(struct sk_buff *skb, __be32 state,
3181 if (!nl_daemon) 3177 if (!nl_daemon)
3182 return -EMSGSIZE; 3178 return -EMSGSIZE;
3183 3179
3184 NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_STATE, state); 3180 if (nla_put_u32(skb, IPVS_DAEMON_ATTR_STATE, state) ||
3185 NLA_PUT_STRING(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn); 3181 nla_put_string(skb, IPVS_DAEMON_ATTR_MCAST_IFN, mcast_ifn) ||
3186 NLA_PUT_U32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid); 3182 nla_put_u32(skb, IPVS_DAEMON_ATTR_SYNC_ID, syncid))
3187 3183 goto nla_put_failure;
3188 nla_nest_end(skb, nl_daemon); 3184 nla_nest_end(skb, nl_daemon);
3189 3185
3190 return 0; 3186 return 0;
@@ -3473,21 +3469,26 @@ static int ip_vs_genl_get_cmd(struct sk_buff *skb, struct genl_info *info)
3473 3469
3474 __ip_vs_get_timeouts(net, &t); 3470 __ip_vs_get_timeouts(net, &t);
3475#ifdef CONFIG_IP_VS_PROTO_TCP 3471#ifdef CONFIG_IP_VS_PROTO_TCP
3476 NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP, t.tcp_timeout); 3472 if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP,
3477 NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN, 3473 t.tcp_timeout) ||
3478 t.tcp_fin_timeout); 3474 nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_TCP_FIN,
3475 t.tcp_fin_timeout))
3476 goto nla_put_failure;
3479#endif 3477#endif
3480#ifdef CONFIG_IP_VS_PROTO_UDP 3478#ifdef CONFIG_IP_VS_PROTO_UDP
3481 NLA_PUT_U32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout); 3479 if (nla_put_u32(msg, IPVS_CMD_ATTR_TIMEOUT_UDP, t.udp_timeout))
3480 goto nla_put_failure;
3482#endif 3481#endif
3483 3482
3484 break; 3483 break;
3485 } 3484 }
3486 3485
3487 case IPVS_CMD_GET_INFO: 3486 case IPVS_CMD_GET_INFO:
3488 NLA_PUT_U32(msg, IPVS_INFO_ATTR_VERSION, IP_VS_VERSION_CODE); 3487 if (nla_put_u32(msg, IPVS_INFO_ATTR_VERSION,
3489 NLA_PUT_U32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE, 3488 IP_VS_VERSION_CODE) ||
3490 ip_vs_conn_tab_size); 3489 nla_put_u32(msg, IPVS_INFO_ATTR_CONN_TAB_SIZE,
3490 ip_vs_conn_tab_size))
3491 goto nla_put_failure;
3491 break; 3492 break;
3492 } 3493 }
3493 3494
@@ -3664,8 +3665,7 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net)
3664 tbl[idx++].data = &ipvs->sysctl_nat_icmp_send; 3665 tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;
3665 3666
3666 3667
3667 ipvs->sysctl_hdr = register_net_sysctl_table(net, net_vs_ctl_path, 3668 ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl);
3668 tbl);
3669 if (ipvs->sysctl_hdr == NULL) { 3669 if (ipvs->sysctl_hdr == NULL) {
3670 if (!net_eq(net, &init_net)) 3670 if (!net_eq(net, &init_net))
3671 kfree(tbl); 3671 kfree(tbl);
diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c
index 1c269e56200a..1a53a7a2fff0 100644
--- a/net/netfilter/ipvs/ip_vs_dh.c
+++ b/net/netfilter/ipvs/ip_vs_dh.c
@@ -68,7 +68,7 @@ struct ip_vs_dh_bucket {
68/* 68/*
69 * Returns hash value for IPVS DH entry 69 * Returns hash value for IPVS DH entry
70 */ 70 */
71static inline unsigned ip_vs_dh_hashkey(int af, const union nf_inet_addr *addr) 71static inline unsigned int ip_vs_dh_hashkey(int af, const union nf_inet_addr *addr)
72{ 72{
73 __be32 addr_fold = addr->ip; 73 __be32 addr_fold = addr->ip;
74 74
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index e39f693dd3e4..510f2b5a5855 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -177,7 +177,7 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
177 __be16 port; 177 __be16 port;
178 struct ip_vs_conn *n_cp; 178 struct ip_vs_conn *n_cp;
179 char buf[24]; /* xxx.xxx.xxx.xxx,ppp,ppp\000 */ 179 char buf[24]; /* xxx.xxx.xxx.xxx,ppp,ppp\000 */
180 unsigned buf_len; 180 unsigned int buf_len;
181 int ret = 0; 181 int ret = 0;
182 enum ip_conntrack_info ctinfo; 182 enum ip_conntrack_info ctinfo;
183 struct nf_conn *ct; 183 struct nf_conn *ct;
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index caa43704e55e..9b0de9a0e08e 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -142,7 +142,7 @@ static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
142/* 142/*
143 * Returns hash value for IPVS LBLC entry 143 * Returns hash value for IPVS LBLC entry
144 */ 144 */
145static inline unsigned 145static inline unsigned int
146ip_vs_lblc_hashkey(int af, const union nf_inet_addr *addr) 146ip_vs_lblc_hashkey(int af, const union nf_inet_addr *addr)
147{ 147{
148 __be32 addr_fold = addr->ip; 148 __be32 addr_fold = addr->ip;
@@ -163,7 +163,7 @@ ip_vs_lblc_hashkey(int af, const union nf_inet_addr *addr)
163static void 163static void
164ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en) 164ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en)
165{ 165{
166 unsigned hash = ip_vs_lblc_hashkey(en->af, &en->addr); 166 unsigned int hash = ip_vs_lblc_hashkey(en->af, &en->addr);
167 167
168 list_add(&en->list, &tbl->bucket[hash]); 168 list_add(&en->list, &tbl->bucket[hash]);
169 atomic_inc(&tbl->entries); 169 atomic_inc(&tbl->entries);
@@ -178,7 +178,7 @@ static inline struct ip_vs_lblc_entry *
178ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl, 178ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl,
179 const union nf_inet_addr *addr) 179 const union nf_inet_addr *addr)
180{ 180{
181 unsigned hash = ip_vs_lblc_hashkey(af, addr); 181 unsigned int hash = ip_vs_lblc_hashkey(af, addr);
182 struct ip_vs_lblc_entry *en; 182 struct ip_vs_lblc_entry *en;
183 183
184 list_for_each_entry(en, &tbl->bucket[hash], list) 184 list_for_each_entry(en, &tbl->bucket[hash], list)
@@ -566,8 +566,7 @@ static int __net_init __ip_vs_lblc_init(struct net *net)
566 ipvs->lblc_ctl_table[0].data = &ipvs->sysctl_lblc_expiration; 566 ipvs->lblc_ctl_table[0].data = &ipvs->sysctl_lblc_expiration;
567 567
568 ipvs->lblc_ctl_header = 568 ipvs->lblc_ctl_header =
569 register_net_sysctl_table(net, net_vs_ctl_path, 569 register_net_sysctl(net, "net/ipv4/vs", ipvs->lblc_ctl_table);
570 ipvs->lblc_ctl_table);
571 if (!ipvs->lblc_ctl_header) { 570 if (!ipvs->lblc_ctl_header) {
572 if (!net_eq(net, &init_net)) 571 if (!net_eq(net, &init_net))
573 kfree(ipvs->lblc_ctl_table); 572 kfree(ipvs->lblc_ctl_table);
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 548bf37aa29e..9dcd39a48897 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -311,7 +311,7 @@ static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
311/* 311/*
312 * Returns hash value for IPVS LBLCR entry 312 * Returns hash value for IPVS LBLCR entry
313 */ 313 */
314static inline unsigned 314static inline unsigned int
315ip_vs_lblcr_hashkey(int af, const union nf_inet_addr *addr) 315ip_vs_lblcr_hashkey(int af, const union nf_inet_addr *addr)
316{ 316{
317 __be32 addr_fold = addr->ip; 317 __be32 addr_fold = addr->ip;
@@ -332,7 +332,7 @@ ip_vs_lblcr_hashkey(int af, const union nf_inet_addr *addr)
332static void 332static void
333ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en) 333ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en)
334{ 334{
335 unsigned hash = ip_vs_lblcr_hashkey(en->af, &en->addr); 335 unsigned int hash = ip_vs_lblcr_hashkey(en->af, &en->addr);
336 336
337 list_add(&en->list, &tbl->bucket[hash]); 337 list_add(&en->list, &tbl->bucket[hash]);
338 atomic_inc(&tbl->entries); 338 atomic_inc(&tbl->entries);
@@ -347,7 +347,7 @@ static inline struct ip_vs_lblcr_entry *
347ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl, 347ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl,
348 const union nf_inet_addr *addr) 348 const union nf_inet_addr *addr)
349{ 349{
350 unsigned hash = ip_vs_lblcr_hashkey(af, addr); 350 unsigned int hash = ip_vs_lblcr_hashkey(af, addr);
351 struct ip_vs_lblcr_entry *en; 351 struct ip_vs_lblcr_entry *en;
352 352
353 list_for_each_entry(en, &tbl->bucket[hash], list) 353 list_for_each_entry(en, &tbl->bucket[hash], list)
@@ -760,8 +760,7 @@ static int __net_init __ip_vs_lblcr_init(struct net *net)
760 ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration; 760 ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration;
761 761
762 ipvs->lblcr_ctl_header = 762 ipvs->lblcr_ctl_header =
763 register_net_sysctl_table(net, net_vs_ctl_path, 763 register_net_sysctl(net, "net/ipv4/vs", ipvs->lblcr_ctl_table);
764 ipvs->lblcr_ctl_table);
765 if (!ipvs->lblcr_ctl_header) { 764 if (!ipvs->lblcr_ctl_header) {
766 if (!net_eq(net, &init_net)) 765 if (!net_eq(net, &init_net))
767 kfree(ipvs->lblcr_ctl_table); 766 kfree(ipvs->lblcr_ctl_table);
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index ed835e67a07e..fdc82ad9cc0e 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -48,7 +48,7 @@ static struct ip_vs_protocol *ip_vs_proto_table[IP_VS_PROTO_TAB_SIZE];
48 */ 48 */
49static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp) 49static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
50{ 50{
51 unsigned hash = IP_VS_PROTO_HASH(pp->protocol); 51 unsigned int hash = IP_VS_PROTO_HASH(pp->protocol);
52 52
53 pp->next = ip_vs_proto_table[hash]; 53 pp->next = ip_vs_proto_table[hash];
54 ip_vs_proto_table[hash] = pp; 54 ip_vs_proto_table[hash] = pp;
@@ -66,7 +66,7 @@ static int
66register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp) 66register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
67{ 67{
68 struct netns_ipvs *ipvs = net_ipvs(net); 68 struct netns_ipvs *ipvs = net_ipvs(net);
69 unsigned hash = IP_VS_PROTO_HASH(pp->protocol); 69 unsigned int hash = IP_VS_PROTO_HASH(pp->protocol);
70 struct ip_vs_proto_data *pd = 70 struct ip_vs_proto_data *pd =
71 kzalloc(sizeof(struct ip_vs_proto_data), GFP_ATOMIC); 71 kzalloc(sizeof(struct ip_vs_proto_data), GFP_ATOMIC);
72 72
@@ -97,7 +97,7 @@ register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp)
97static int unregister_ip_vs_protocol(struct ip_vs_protocol *pp) 97static int unregister_ip_vs_protocol(struct ip_vs_protocol *pp)
98{ 98{
99 struct ip_vs_protocol **pp_p; 99 struct ip_vs_protocol **pp_p;
100 unsigned hash = IP_VS_PROTO_HASH(pp->protocol); 100 unsigned int hash = IP_VS_PROTO_HASH(pp->protocol);
101 101
102 pp_p = &ip_vs_proto_table[hash]; 102 pp_p = &ip_vs_proto_table[hash];
103 for (; *pp_p; pp_p = &(*pp_p)->next) { 103 for (; *pp_p; pp_p = &(*pp_p)->next) {
@@ -120,7 +120,7 @@ unregister_ip_vs_proto_netns(struct net *net, struct ip_vs_proto_data *pd)
120{ 120{
121 struct netns_ipvs *ipvs = net_ipvs(net); 121 struct netns_ipvs *ipvs = net_ipvs(net);
122 struct ip_vs_proto_data **pd_p; 122 struct ip_vs_proto_data **pd_p;
123 unsigned hash = IP_VS_PROTO_HASH(pd->pp->protocol); 123 unsigned int hash = IP_VS_PROTO_HASH(pd->pp->protocol);
124 124
125 pd_p = &ipvs->proto_data_table[hash]; 125 pd_p = &ipvs->proto_data_table[hash];
126 for (; *pd_p; pd_p = &(*pd_p)->next) { 126 for (; *pd_p; pd_p = &(*pd_p)->next) {
@@ -142,7 +142,7 @@ unregister_ip_vs_proto_netns(struct net *net, struct ip_vs_proto_data *pd)
142struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto) 142struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto)
143{ 143{
144 struct ip_vs_protocol *pp; 144 struct ip_vs_protocol *pp;
145 unsigned hash = IP_VS_PROTO_HASH(proto); 145 unsigned int hash = IP_VS_PROTO_HASH(proto);
146 146
147 for (pp = ip_vs_proto_table[hash]; pp; pp = pp->next) { 147 for (pp = ip_vs_proto_table[hash]; pp; pp = pp->next) {
148 if (pp->protocol == proto) 148 if (pp->protocol == proto)
@@ -160,7 +160,7 @@ struct ip_vs_proto_data *
160__ipvs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto) 160__ipvs_proto_data_get(struct netns_ipvs *ipvs, unsigned short proto)
161{ 161{
162 struct ip_vs_proto_data *pd; 162 struct ip_vs_proto_data *pd;
163 unsigned hash = IP_VS_PROTO_HASH(proto); 163 unsigned int hash = IP_VS_PROTO_HASH(proto);
164 164
165 for (pd = ipvs->proto_data_table[hash]; pd; pd = pd->next) { 165 for (pd = ipvs->proto_data_table[hash]; pd; pd = pd->next) {
166 if (pd->pp->protocol == proto) 166 if (pd->pp->protocol == proto)
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index 069e8d4d5c01..91e97ee049be 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -70,7 +70,7 @@ struct ip_vs_sh_bucket {
70/* 70/*
71 * Returns hash value for IPVS SH entry 71 * Returns hash value for IPVS SH entry
72 */ 72 */
73static inline unsigned ip_vs_sh_hashkey(int af, const union nf_inet_addr *addr) 73static inline unsigned int ip_vs_sh_hashkey(int af, const union nf_inet_addr *addr)
74{ 74{
75 __be32 addr_fold = addr->ip; 75 __be32 addr_fold = addr->ip;
76 76
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 8a0d6d6889f0..bf5e538af67b 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -839,7 +839,7 @@ static void ip_vs_process_message_v0(struct net *net, const char *buffer,
839 839
840 p = (char *)buffer + sizeof(struct ip_vs_sync_mesg_v0); 840 p = (char *)buffer + sizeof(struct ip_vs_sync_mesg_v0);
841 for (i=0; i<m->nr_conns; i++) { 841 for (i=0; i<m->nr_conns; i++) {
842 unsigned flags, state; 842 unsigned int flags, state;
843 843
844 if (p + SIMPLE_CONN_SIZE > buffer+buflen) { 844 if (p + SIMPLE_CONN_SIZE > buffer+buflen) {
845 IP_VS_ERR_RL("BACKUP v0, bogus conn\n"); 845 IP_VS_ERR_RL("BACKUP v0, bogus conn\n");
@@ -1109,7 +1109,7 @@ static void ip_vs_process_message(struct net *net, __u8 *buffer,
1109 1109
1110 for (i=0; i<nr_conns; i++) { 1110 for (i=0; i<nr_conns; i++) {
1111 union ip_vs_sync_conn *s; 1111 union ip_vs_sync_conn *s;
1112 unsigned size; 1112 unsigned int size;
1113 int retc; 1113 int retc;
1114 1114
1115 p = msg_end; 1115 p = msg_end;
@@ -1368,7 +1368,7 @@ static struct socket *make_receive_sock(struct net *net)
1368 */ 1368 */
1369 sk_change_net(sock->sk, net); 1369 sk_change_net(sock->sk, net);
1370 /* it is equivalent to the REUSEADDR option in user-space */ 1370 /* it is equivalent to the REUSEADDR option in user-space */
1371 sock->sk->sk_reuse = 1; 1371 sock->sk->sk_reuse = SK_CAN_REUSE;
1372 1372
1373 result = sock->ops->bind(sock, (struct sockaddr *) &mcast_addr, 1373 result = sock->ops->bind(sock, (struct sockaddr *) &mcast_addr,
1374 sizeof(struct sockaddr)); 1374 sizeof(struct sockaddr));
diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
index f4f8cda05986..d61e0782a797 100644
--- a/net/netfilter/nf_conntrack_acct.c
+++ b/net/netfilter/nf_conntrack_acct.c
@@ -69,8 +69,8 @@ static int nf_conntrack_acct_init_sysctl(struct net *net)
69 69
70 table[0].data = &net->ct.sysctl_acct; 70 table[0].data = &net->ct.sysctl_acct;
71 71
72 net->ct.acct_sysctl_header = register_net_sysctl_table(net, 72 net->ct.acct_sysctl_header = register_net_sysctl(net, "net/netfilter",
73 nf_net_netfilter_sysctl_path, table); 73 table);
74 if (!net->ct.acct_sysctl_header) { 74 if (!net->ct.acct_sysctl_header) {
75 printk(KERN_ERR "nf_conntrack_acct: can't register to sysctl.\n"); 75 printk(KERN_ERR "nf_conntrack_acct: can't register to sysctl.\n");
76 goto out_register; 76 goto out_register;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 729f157a0efa..cf0747c5741f 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1152,8 +1152,9 @@ static struct nf_ct_ext_type nf_ct_zone_extend __read_mostly = {
1152int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb, 1152int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
1153 const struct nf_conntrack_tuple *tuple) 1153 const struct nf_conntrack_tuple *tuple)
1154{ 1154{
1155 NLA_PUT_BE16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port); 1155 if (nla_put_be16(skb, CTA_PROTO_SRC_PORT, tuple->src.u.tcp.port) ||
1156 NLA_PUT_BE16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port); 1156 nla_put_be16(skb, CTA_PROTO_DST_PORT, tuple->dst.u.tcp.port))
1157 goto nla_put_failure;
1157 return 0; 1158 return 0;
1158 1159
1159nla_put_failure: 1160nla_put_failure:
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 5bd3047ddeec..b924f3a49a8e 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -199,8 +199,7 @@ static int nf_conntrack_event_init_sysctl(struct net *net)
199 table[1].data = &net->ct.sysctl_events_retry_timeout; 199 table[1].data = &net->ct.sysctl_events_retry_timeout;
200 200
201 net->ct.event_sysctl_header = 201 net->ct.event_sysctl_header =
202 register_net_sysctl_table(net, 202 register_net_sysctl(net, "net/netfilter", table);
203 nf_net_netfilter_sysctl_path, table);
204 if (!net->ct.event_sysctl_header) { 203 if (!net->ct.event_sysctl_header) {
205 printk(KERN_ERR "nf_ct_event: can't register to sysctl.\n"); 204 printk(KERN_ERR "nf_ct_event: can't register to sysctl.\n");
206 goto out_register; 205 goto out_register;
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 722291f8af72..471b054ad002 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -1230,7 +1230,7 @@ static struct nf_conntrack_expect *find_expect(struct nf_conn *ct,
1230 1230
1231/****************************************************************************/ 1231/****************************************************************************/
1232static int set_expect_timeout(struct nf_conntrack_expect *exp, 1232static int set_expect_timeout(struct nf_conntrack_expect *exp,
1233 unsigned timeout) 1233 unsigned int timeout)
1234{ 1234{
1235 if (!exp || !del_timer(&exp->timeout)) 1235 if (!exp || !del_timer(&exp->timeout))
1236 return 0; 1236 return 0;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index ca7e8354e4f8..462ec2dbe561 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -66,7 +66,8 @@ ctnetlink_dump_tuples_proto(struct sk_buff *skb,
66 nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED); 66 nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED);
67 if (!nest_parms) 67 if (!nest_parms)
68 goto nla_put_failure; 68 goto nla_put_failure;
69 NLA_PUT_U8(skb, CTA_PROTO_NUM, tuple->dst.protonum); 69 if (nla_put_u8(skb, CTA_PROTO_NUM, tuple->dst.protonum))
70 goto nla_put_failure;
70 71
71 if (likely(l4proto->tuple_to_nlattr)) 72 if (likely(l4proto->tuple_to_nlattr))
72 ret = l4proto->tuple_to_nlattr(skb, tuple); 73 ret = l4proto->tuple_to_nlattr(skb, tuple);
@@ -126,7 +127,8 @@ ctnetlink_dump_tuples(struct sk_buff *skb,
126static inline int 127static inline int
127ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct) 128ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
128{ 129{
129 NLA_PUT_BE32(skb, CTA_STATUS, htonl(ct->status)); 130 if (nla_put_be32(skb, CTA_STATUS, htonl(ct->status)))
131 goto nla_put_failure;
130 return 0; 132 return 0;
131 133
132nla_put_failure: 134nla_put_failure:
@@ -141,7 +143,8 @@ ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
141 if (timeout < 0) 143 if (timeout < 0)
142 timeout = 0; 144 timeout = 0;
143 145
144 NLA_PUT_BE32(skb, CTA_TIMEOUT, htonl(timeout)); 146 if (nla_put_be32(skb, CTA_TIMEOUT, htonl(timeout)))
147 goto nla_put_failure;
145 return 0; 148 return 0;
146 149
147nla_put_failure: 150nla_put_failure:
@@ -190,7 +193,8 @@ ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct)
190 nest_helper = nla_nest_start(skb, CTA_HELP | NLA_F_NESTED); 193 nest_helper = nla_nest_start(skb, CTA_HELP | NLA_F_NESTED);
191 if (!nest_helper) 194 if (!nest_helper)
192 goto nla_put_failure; 195 goto nla_put_failure;
193 NLA_PUT_STRING(skb, CTA_HELP_NAME, helper->name); 196 if (nla_put_string(skb, CTA_HELP_NAME, helper->name))
197 goto nla_put_failure;
194 198
195 if (helper->to_nlattr) 199 if (helper->to_nlattr)
196 helper->to_nlattr(skb, ct); 200 helper->to_nlattr(skb, ct);
@@ -214,8 +218,9 @@ dump_counters(struct sk_buff *skb, u64 pkts, u64 bytes,
214 if (!nest_count) 218 if (!nest_count)
215 goto nla_put_failure; 219 goto nla_put_failure;
216 220
217 NLA_PUT_BE64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts)); 221 if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts)) ||
218 NLA_PUT_BE64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes)); 222 nla_put_be64(skb, CTA_COUNTERS_BYTES, cpu_to_be64(bytes)))
223 goto nla_put_failure;
219 224
220 nla_nest_end(skb, nest_count); 225 nla_nest_end(skb, nest_count);
221 226
@@ -260,11 +265,10 @@ ctnetlink_dump_timestamp(struct sk_buff *skb, const struct nf_conn *ct)
260 if (!nest_count) 265 if (!nest_count)
261 goto nla_put_failure; 266 goto nla_put_failure;
262 267
263 NLA_PUT_BE64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start)); 268 if (nla_put_be64(skb, CTA_TIMESTAMP_START, cpu_to_be64(tstamp->start)) ||
264 if (tstamp->stop != 0) { 269 (tstamp->stop != 0 && nla_put_be64(skb, CTA_TIMESTAMP_STOP,
265 NLA_PUT_BE64(skb, CTA_TIMESTAMP_STOP, 270 cpu_to_be64(tstamp->stop))))
266 cpu_to_be64(tstamp->stop)); 271 goto nla_put_failure;
267 }
268 nla_nest_end(skb, nest_count); 272 nla_nest_end(skb, nest_count);
269 273
270 return 0; 274 return 0;
@@ -277,7 +281,8 @@ nla_put_failure:
277static inline int 281static inline int
278ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct) 282ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
279{ 283{
280 NLA_PUT_BE32(skb, CTA_MARK, htonl(ct->mark)); 284 if (nla_put_be32(skb, CTA_MARK, htonl(ct->mark)))
285 goto nla_put_failure;
281 return 0; 286 return 0;
282 287
283nla_put_failure: 288nla_put_failure:
@@ -304,7 +309,8 @@ ctnetlink_dump_secctx(struct sk_buff *skb, const struct nf_conn *ct)
304 if (!nest_secctx) 309 if (!nest_secctx)
305 goto nla_put_failure; 310 goto nla_put_failure;
306 311
307 NLA_PUT_STRING(skb, CTA_SECCTX_NAME, secctx); 312 if (nla_put_string(skb, CTA_SECCTX_NAME, secctx))
313 goto nla_put_failure;
308 nla_nest_end(skb, nest_secctx); 314 nla_nest_end(skb, nest_secctx);
309 315
310 ret = 0; 316 ret = 0;
@@ -349,12 +355,13 @@ dump_nat_seq_adj(struct sk_buff *skb, const struct nf_nat_seq *natseq, int type)
349 if (!nest_parms) 355 if (!nest_parms)
350 goto nla_put_failure; 356 goto nla_put_failure;
351 357
352 NLA_PUT_BE32(skb, CTA_NAT_SEQ_CORRECTION_POS, 358 if (nla_put_be32(skb, CTA_NAT_SEQ_CORRECTION_POS,
353 htonl(natseq->correction_pos)); 359 htonl(natseq->correction_pos)) ||
354 NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_BEFORE, 360 nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_BEFORE,
355 htonl(natseq->offset_before)); 361 htonl(natseq->offset_before)) ||
356 NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_AFTER, 362 nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_AFTER,
357 htonl(natseq->offset_after)); 363 htonl(natseq->offset_after)))
364 goto nla_put_failure;
358 365
359 nla_nest_end(skb, nest_parms); 366 nla_nest_end(skb, nest_parms);
360 367
@@ -390,7 +397,8 @@ ctnetlink_dump_nat_seq_adj(struct sk_buff *skb, const struct nf_conn *ct)
390static inline int 397static inline int
391ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct) 398ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
392{ 399{
393 NLA_PUT_BE32(skb, CTA_ID, htonl((unsigned long)ct)); 400 if (nla_put_be32(skb, CTA_ID, htonl((unsigned long)ct)))
401 goto nla_put_failure;
394 return 0; 402 return 0;
395 403
396nla_put_failure: 404nla_put_failure:
@@ -400,7 +408,8 @@ nla_put_failure:
400static inline int 408static inline int
401ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct) 409ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
402{ 410{
403 NLA_PUT_BE32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))); 411 if (nla_put_be32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use))))
412 goto nla_put_failure;
404 return 0; 413 return 0;
405 414
406nla_put_failure: 415nla_put_failure:
@@ -440,8 +449,9 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
440 goto nla_put_failure; 449 goto nla_put_failure;
441 nla_nest_end(skb, nest_parms); 450 nla_nest_end(skb, nest_parms);
442 451
443 if (nf_ct_zone(ct)) 452 if (nf_ct_zone(ct) &&
444 NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct))); 453 nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
454 goto nla_put_failure;
445 455
446 if (ctnetlink_dump_status(skb, ct) < 0 || 456 if (ctnetlink_dump_status(skb, ct) < 0 ||
447 ctnetlink_dump_timeout(skb, ct) < 0 || 457 ctnetlink_dump_timeout(skb, ct) < 0 ||
@@ -617,8 +627,9 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
617 goto nla_put_failure; 627 goto nla_put_failure;
618 nla_nest_end(skb, nest_parms); 628 nla_nest_end(skb, nest_parms);
619 629
620 if (nf_ct_zone(ct)) 630 if (nf_ct_zone(ct) &&
621 NLA_PUT_BE16(skb, CTA_ZONE, htons(nf_ct_zone(ct))); 631 nla_put_be16(skb, CTA_ZONE, htons(nf_ct_zone(ct))))
632 goto nla_put_failure;
622 633
623 if (ctnetlink_dump_id(skb, ct) < 0) 634 if (ctnetlink_dump_id(skb, ct) < 0)
624 goto nla_put_failure; 635 goto nla_put_failure;
@@ -1705,7 +1716,8 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
1705 if (!nest_parms) 1716 if (!nest_parms)
1706 goto nla_put_failure; 1717 goto nla_put_failure;
1707 1718
1708 NLA_PUT_BE32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir)); 1719 if (nla_put_be32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir)))
1720 goto nla_put_failure;
1709 1721
1710 nat_tuple.src.l3num = nf_ct_l3num(master); 1722 nat_tuple.src.l3num = nf_ct_l3num(master);
1711 nat_tuple.src.u3.ip = exp->saved_ip; 1723 nat_tuple.src.u3.ip = exp->saved_ip;
@@ -1718,21 +1730,24 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
1718 nla_nest_end(skb, nest_parms); 1730 nla_nest_end(skb, nest_parms);
1719 } 1731 }
1720#endif 1732#endif
1721 NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)); 1733 if (nla_put_be32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout)) ||
1722 NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)); 1734 nla_put_be32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp)) ||
1723 NLA_PUT_BE32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)); 1735 nla_put_be32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags)) ||
1724 NLA_PUT_BE32(skb, CTA_EXPECT_CLASS, htonl(exp->class)); 1736 nla_put_be32(skb, CTA_EXPECT_CLASS, htonl(exp->class)))
1737 goto nla_put_failure;
1725 help = nfct_help(master); 1738 help = nfct_help(master);
1726 if (help) { 1739 if (help) {
1727 struct nf_conntrack_helper *helper; 1740 struct nf_conntrack_helper *helper;
1728 1741
1729 helper = rcu_dereference(help->helper); 1742 helper = rcu_dereference(help->helper);
1730 if (helper) 1743 if (helper &&
1731 NLA_PUT_STRING(skb, CTA_EXPECT_HELP_NAME, helper->name); 1744 nla_put_string(skb, CTA_EXPECT_HELP_NAME, helper->name))
1745 goto nla_put_failure;
1732 } 1746 }
1733 expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn); 1747 expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn);
1734 if (expfn != NULL) 1748 if (expfn != NULL &&
1735 NLA_PUT_STRING(skb, CTA_EXPECT_FN, expfn->name); 1749 nla_put_string(skb, CTA_EXPECT_FN, expfn->name))
1750 goto nla_put_failure;
1736 1751
1737 return 0; 1752 return 0;
1738 1753
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index be3da2c8cdc5..8b631b07a645 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -36,11 +36,11 @@ static DEFINE_MUTEX(nf_ct_proto_mutex);
36 36
37#ifdef CONFIG_SYSCTL 37#ifdef CONFIG_SYSCTL
38static int 38static int
39nf_ct_register_sysctl(struct ctl_table_header **header, struct ctl_path *path, 39nf_ct_register_sysctl(struct ctl_table_header **header, const char *path,
40 struct ctl_table *table, unsigned int *users) 40 struct ctl_table *table, unsigned int *users)
41{ 41{
42 if (*header == NULL) { 42 if (*header == NULL) {
43 *header = register_sysctl_paths(path, table); 43 *header = register_net_sysctl(&init_net, path, table);
44 if (*header == NULL) 44 if (*header == NULL)
45 return -ENOMEM; 45 return -ENOMEM;
46 } 46 }
@@ -56,7 +56,7 @@ nf_ct_unregister_sysctl(struct ctl_table_header **header,
56 if (users != NULL && --*users > 0) 56 if (users != NULL && --*users > 0)
57 return; 57 return;
58 58
59 unregister_sysctl_table(*header); 59 unregister_net_sysctl_table(*header);
60 *header = NULL; 60 *header = NULL;
61} 61}
62#endif 62#endif
@@ -250,7 +250,7 @@ static int nf_ct_l4proto_register_sysctl(struct nf_conntrack_l4proto *l4proto)
250#ifdef CONFIG_SYSCTL 250#ifdef CONFIG_SYSCTL
251 if (l4proto->ctl_table != NULL) { 251 if (l4proto->ctl_table != NULL) {
252 err = nf_ct_register_sysctl(l4proto->ctl_table_header, 252 err = nf_ct_register_sysctl(l4proto->ctl_table_header,
253 nf_net_netfilter_sysctl_path, 253 "net/netfilter",
254 l4proto->ctl_table, 254 l4proto->ctl_table,
255 l4proto->ctl_table_users); 255 l4proto->ctl_table_users);
256 if (err < 0) 256 if (err < 0)
@@ -259,7 +259,7 @@ static int nf_ct_l4proto_register_sysctl(struct nf_conntrack_l4proto *l4proto)
259#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT 259#ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
260 if (l4proto->ctl_compat_table != NULL) { 260 if (l4proto->ctl_compat_table != NULL) {
261 err = nf_ct_register_sysctl(&l4proto->ctl_compat_table_header, 261 err = nf_ct_register_sysctl(&l4proto->ctl_compat_table_header,
262 nf_net_ipv4_netfilter_sysctl_path, 262 "net/ipv4/netfilter",
263 l4proto->ctl_compat_table, NULL); 263 l4proto->ctl_compat_table, NULL);
264 if (err == 0) 264 if (err == 0)
265 goto out; 265 goto out;
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index 24fdce256cb0..ef706a485be1 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -643,11 +643,12 @@ static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
643 nest_parms = nla_nest_start(skb, CTA_PROTOINFO_DCCP | NLA_F_NESTED); 643 nest_parms = nla_nest_start(skb, CTA_PROTOINFO_DCCP | NLA_F_NESTED);
644 if (!nest_parms) 644 if (!nest_parms)
645 goto nla_put_failure; 645 goto nla_put_failure;
646 NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state); 646 if (nla_put_u8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state) ||
647 NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_ROLE, 647 nla_put_u8(skb, CTA_PROTOINFO_DCCP_ROLE,
648 ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]); 648 ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]) ||
649 NLA_PUT_BE64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ, 649 nla_put_be64(skb, CTA_PROTOINFO_DCCP_HANDSHAKE_SEQ,
650 cpu_to_be64(ct->proto.dccp.handshake_seq)); 650 cpu_to_be64(ct->proto.dccp.handshake_seq)))
651 goto nla_put_failure;
651 nla_nest_end(skb, nest_parms); 652 nla_nest_end(skb, nest_parms);
652 spin_unlock_bh(&ct->lock); 653 spin_unlock_bh(&ct->lock);
653 return 0; 654 return 0;
@@ -739,9 +740,10 @@ dccp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
739 const unsigned int *timeouts = data; 740 const unsigned int *timeouts = data;
740 int i; 741 int i;
741 742
742 for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++) 743 for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++) {
743 NLA_PUT_BE32(skb, i, htonl(timeouts[i] / HZ)); 744 if (nla_put_be32(skb, i, htonl(timeouts[i] / HZ)))
744 745 goto nla_put_failure;
746 }
745 return 0; 747 return 0;
746 748
747nla_put_failure: 749nla_put_failure:
@@ -908,8 +910,8 @@ static __net_init int dccp_net_init(struct net *net)
908 dn->sysctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT]; 910 dn->sysctl_table[6].data = &dn->dccp_timeout[CT_DCCP_TIMEWAIT];
909 dn->sysctl_table[7].data = &dn->dccp_loose; 911 dn->sysctl_table[7].data = &dn->dccp_loose;
910 912
911 dn->sysctl_header = register_net_sysctl_table(net, 913 dn->sysctl_header = register_net_sysctl(net, "net/netfilter",
912 nf_net_netfilter_sysctl_path, dn->sysctl_table); 914 dn->sysctl_table);
913 if (!dn->sysctl_header) { 915 if (!dn->sysctl_header) {
914 kfree(dn->sysctl_table); 916 kfree(dn->sysctl_table);
915 return -ENOMEM; 917 return -ENOMEM;
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index 835e24c58f0d..d8923d54b358 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -90,7 +90,8 @@ generic_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
90{ 90{
91 const unsigned int *timeout = data; 91 const unsigned int *timeout = data;
92 92
93 NLA_PUT_BE32(skb, CTA_TIMEOUT_GENERIC_TIMEOUT, htonl(*timeout / HZ)); 93 if (nla_put_be32(skb, CTA_TIMEOUT_GENERIC_TIMEOUT, htonl(*timeout / HZ)))
94 goto nla_put_failure;
94 95
95 return 0; 96 return 0;
96 97
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index 659648c4b14a..4bf6b4e4b776 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -321,10 +321,11 @@ gre_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
321{ 321{
322 const unsigned int *timeouts = data; 322 const unsigned int *timeouts = data;
323 323
324 NLA_PUT_BE32(skb, CTA_TIMEOUT_GRE_UNREPLIED, 324 if (nla_put_be32(skb, CTA_TIMEOUT_GRE_UNREPLIED,
325 htonl(timeouts[GRE_CT_UNREPLIED] / HZ)); 325 htonl(timeouts[GRE_CT_UNREPLIED] / HZ)) ||
326 NLA_PUT_BE32(skb, CTA_TIMEOUT_GRE_REPLIED, 326 nla_put_be32(skb, CTA_TIMEOUT_GRE_REPLIED,
327 htonl(timeouts[GRE_CT_REPLIED] / HZ)); 327 htonl(timeouts[GRE_CT_REPLIED] / HZ)))
328 goto nla_put_failure;
328 return 0; 329 return 0;
329 330
330nla_put_failure: 331nla_put_failure:
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 72b5088592dc..996db2fa21f7 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -482,15 +482,12 @@ static int sctp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
482 if (!nest_parms) 482 if (!nest_parms)
483 goto nla_put_failure; 483 goto nla_put_failure;
484 484
485 NLA_PUT_U8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state); 485 if (nla_put_u8(skb, CTA_PROTOINFO_SCTP_STATE, ct->proto.sctp.state) ||
486 486 nla_put_be32(skb, CTA_PROTOINFO_SCTP_VTAG_ORIGINAL,
487 NLA_PUT_BE32(skb, 487 ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]) ||
488 CTA_PROTOINFO_SCTP_VTAG_ORIGINAL, 488 nla_put_be32(skb, CTA_PROTOINFO_SCTP_VTAG_REPLY,
489 ct->proto.sctp.vtag[IP_CT_DIR_ORIGINAL]); 489 ct->proto.sctp.vtag[IP_CT_DIR_REPLY]))
490 490 goto nla_put_failure;
491 NLA_PUT_BE32(skb,
492 CTA_PROTOINFO_SCTP_VTAG_REPLY,
493 ct->proto.sctp.vtag[IP_CT_DIR_REPLY]);
494 491
495 spin_unlock_bh(&ct->lock); 492 spin_unlock_bh(&ct->lock);
496 493
@@ -578,9 +575,10 @@ sctp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
578 const unsigned int *timeouts = data; 575 const unsigned int *timeouts = data;
579 int i; 576 int i;
580 577
581 for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++) 578 for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++) {
582 NLA_PUT_BE32(skb, i, htonl(timeouts[i] / HZ)); 579 if (nla_put_be32(skb, i, htonl(timeouts[i] / HZ)))
583 580 goto nla_put_failure;
581 }
584 return 0; 582 return 0;
585 583
586nla_put_failure: 584nla_put_failure:
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 0d07a1dcf605..4dfbfa840f8a 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -1147,21 +1147,22 @@ static int tcp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
1147 if (!nest_parms) 1147 if (!nest_parms)
1148 goto nla_put_failure; 1148 goto nla_put_failure;
1149 1149
1150 NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state); 1150 if (nla_put_u8(skb, CTA_PROTOINFO_TCP_STATE, ct->proto.tcp.state) ||
1151 1151 nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL,
1152 NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_WSCALE_ORIGINAL, 1152 ct->proto.tcp.seen[0].td_scale) ||
1153 ct->proto.tcp.seen[0].td_scale); 1153 nla_put_u8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY,
1154 1154 ct->proto.tcp.seen[1].td_scale))
1155 NLA_PUT_U8(skb, CTA_PROTOINFO_TCP_WSCALE_REPLY, 1155 goto nla_put_failure;
1156 ct->proto.tcp.seen[1].td_scale);
1157 1156
1158 tmp.flags = ct->proto.tcp.seen[0].flags; 1157 tmp.flags = ct->proto.tcp.seen[0].flags;
1159 NLA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL, 1158 if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_ORIGINAL,
1160 sizeof(struct nf_ct_tcp_flags), &tmp); 1159 sizeof(struct nf_ct_tcp_flags), &tmp))
1160 goto nla_put_failure;
1161 1161
1162 tmp.flags = ct->proto.tcp.seen[1].flags; 1162 tmp.flags = ct->proto.tcp.seen[1].flags;
1163 NLA_PUT(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY, 1163 if (nla_put(skb, CTA_PROTOINFO_TCP_FLAGS_REPLY,
1164 sizeof(struct nf_ct_tcp_flags), &tmp); 1164 sizeof(struct nf_ct_tcp_flags), &tmp))
1165 goto nla_put_failure;
1165 spin_unlock_bh(&ct->lock); 1166 spin_unlock_bh(&ct->lock);
1166 1167
1167 nla_nest_end(skb, nest_parms); 1168 nla_nest_end(skb, nest_parms);
@@ -1310,28 +1311,29 @@ tcp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
1310{ 1311{
1311 const unsigned int *timeouts = data; 1312 const unsigned int *timeouts = data;
1312 1313
1313 NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_SENT, 1314 if (nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
1314 htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ)); 1315 htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ)) ||
1315 NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_RECV, 1316 nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
1316 htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ)); 1317 htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ)) ||
1317 NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_ESTABLISHED, 1318 nla_put_be32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
1318 htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ)); 1319 htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ)) ||
1319 NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_FIN_WAIT, 1320 nla_put_be32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
1320 htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ)); 1321 htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ)) ||
1321 NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT, 1322 nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
1322 htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ)); 1323 htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ)) ||
1323 NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_LAST_ACK, 1324 nla_put_be32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
1324 htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ)); 1325 htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ)) ||
1325 NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_TIME_WAIT, 1326 nla_put_be32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
1326 htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ)); 1327 htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ)) ||
1327 NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_CLOSE, 1328 nla_put_be32(skb, CTA_TIMEOUT_TCP_CLOSE,
1328 htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ)); 1329 htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ)) ||
1329 NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_SENT2, 1330 nla_put_be32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
1330 htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ)); 1331 htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ)) ||
1331 NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_RETRANS, 1332 nla_put_be32(skb, CTA_TIMEOUT_TCP_RETRANS,
1332 htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ)); 1333 htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ)) ||
1333 NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_UNACK, 1334 nla_put_be32(skb, CTA_TIMEOUT_TCP_UNACK,
1334 htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ)); 1335 htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ)))
1336 goto nla_put_failure;
1335 return 0; 1337 return 0;
1336 1338
1337nla_put_failure: 1339nla_put_failure:
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index a9073dc1548d..7259a6bdeb49 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -181,10 +181,11 @@ udp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
181{ 181{
182 const unsigned int *timeouts = data; 182 const unsigned int *timeouts = data;
183 183
184 NLA_PUT_BE32(skb, CTA_TIMEOUT_UDP_UNREPLIED, 184 if (nla_put_be32(skb, CTA_TIMEOUT_UDP_UNREPLIED,
185 htonl(timeouts[UDP_CT_UNREPLIED] / HZ)); 185 htonl(timeouts[UDP_CT_UNREPLIED] / HZ)) ||
186 NLA_PUT_BE32(skb, CTA_TIMEOUT_UDP_REPLIED, 186 nla_put_be32(skb, CTA_TIMEOUT_UDP_REPLIED,
187 htonl(timeouts[UDP_CT_REPLIED] / HZ)); 187 htonl(timeouts[UDP_CT_REPLIED] / HZ)))
188 goto nla_put_failure;
188 return 0; 189 return 0;
189 190
190nla_put_failure: 191nla_put_failure:
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index e0606392cda0..4d60a5376aa6 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -185,10 +185,11 @@ udplite_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
185{ 185{
186 const unsigned int *timeouts = data; 186 const unsigned int *timeouts = data;
187 187
188 NLA_PUT_BE32(skb, CTA_TIMEOUT_UDPLITE_UNREPLIED, 188 if (nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_UNREPLIED,
189 htonl(timeouts[UDPLITE_CT_UNREPLIED] / HZ)); 189 htonl(timeouts[UDPLITE_CT_UNREPLIED] / HZ)) ||
190 NLA_PUT_BE32(skb, CTA_TIMEOUT_UDPLITE_REPLIED, 190 nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_REPLIED,
191 htonl(timeouts[UDPLITE_CT_REPLIED] / HZ)); 191 htonl(timeouts[UDPLITE_CT_REPLIED] / HZ)))
192 goto nla_put_failure;
192 return 0; 193 return 0;
193 194
194nla_put_failure: 195nla_put_failure:
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 885f5ab9bc28..9b3943252a5e 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -468,18 +468,13 @@ static ctl_table nf_ct_netfilter_table[] = {
468 { } 468 { }
469}; 469};
470 470
471static struct ctl_path nf_ct_path[] = {
472 { .procname = "net", },
473 { }
474};
475
476static int nf_conntrack_standalone_init_sysctl(struct net *net) 471static int nf_conntrack_standalone_init_sysctl(struct net *net)
477{ 472{
478 struct ctl_table *table; 473 struct ctl_table *table;
479 474
480 if (net_eq(net, &init_net)) { 475 if (net_eq(net, &init_net)) {
481 nf_ct_netfilter_header = 476 nf_ct_netfilter_header =
482 register_sysctl_paths(nf_ct_path, nf_ct_netfilter_table); 477 register_net_sysctl(&init_net, "net", nf_ct_netfilter_table);
483 if (!nf_ct_netfilter_header) 478 if (!nf_ct_netfilter_header)
484 goto out; 479 goto out;
485 } 480 }
@@ -494,8 +489,7 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net)
494 table[3].data = &net->ct.sysctl_checksum; 489 table[3].data = &net->ct.sysctl_checksum;
495 table[4].data = &net->ct.sysctl_log_invalid; 490 table[4].data = &net->ct.sysctl_log_invalid;
496 491
497 net->ct.sysctl_header = register_net_sysctl_table(net, 492 net->ct.sysctl_header = register_net_sysctl(net, "net/netfilter", table);
498 nf_net_netfilter_sysctl_path, table);
499 if (!net->ct.sysctl_header) 493 if (!net->ct.sysctl_header)
500 goto out_unregister_netfilter; 494 goto out_unregister_netfilter;
501 495
@@ -505,7 +499,7 @@ out_unregister_netfilter:
505 kfree(table); 499 kfree(table);
506out_kmemdup: 500out_kmemdup:
507 if (net_eq(net, &init_net)) 501 if (net_eq(net, &init_net))
508 unregister_sysctl_table(nf_ct_netfilter_header); 502 unregister_net_sysctl_table(nf_ct_netfilter_header);
509out: 503out:
510 printk(KERN_ERR "nf_conntrack: can't register to sysctl.\n"); 504 printk(KERN_ERR "nf_conntrack: can't register to sysctl.\n");
511 return -ENOMEM; 505 return -ENOMEM;
@@ -516,7 +510,7 @@ static void nf_conntrack_standalone_fini_sysctl(struct net *net)
516 struct ctl_table *table; 510 struct ctl_table *table;
517 511
518 if (net_eq(net, &init_net)) 512 if (net_eq(net, &init_net))
519 unregister_sysctl_table(nf_ct_netfilter_header); 513 unregister_net_sysctl_table(nf_ct_netfilter_header);
520 table = net->ct.sysctl_header->ctl_table_arg; 514 table = net->ct.sysctl_header->ctl_table_arg;
521 unregister_net_sysctl_table(net->ct.sysctl_header); 515 unregister_net_sysctl_table(net->ct.sysctl_header);
522 kfree(table); 516 kfree(table);
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
index e8d27afbbdb9..dbb364f62d6f 100644
--- a/net/netfilter/nf_conntrack_timestamp.c
+++ b/net/netfilter/nf_conntrack_timestamp.c
@@ -51,8 +51,8 @@ static int nf_conntrack_tstamp_init_sysctl(struct net *net)
51 51
52 table[0].data = &net->ct.sysctl_tstamp; 52 table[0].data = &net->ct.sysctl_tstamp;
53 53
54 net->ct.tstamp_sysctl_header = register_net_sysctl_table(net, 54 net->ct.tstamp_sysctl_header = register_net_sysctl(net, "net/netfilter",
55 nf_net_netfilter_sysctl_path, table); 55 table);
56 if (!net->ct.tstamp_sysctl_header) { 56 if (!net->ct.tstamp_sysctl_header) {
57 printk(KERN_ERR "nf_ct_tstamp: can't register to sysctl.\n"); 57 printk(KERN_ERR "nf_ct_tstamp: can't register to sysctl.\n");
58 goto out_register; 58 goto out_register;
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 957374a234d4..703fb26aa48d 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -214,13 +214,6 @@ static const struct file_operations nflog_file_ops = {
214#endif /* PROC_FS */ 214#endif /* PROC_FS */
215 215
216#ifdef CONFIG_SYSCTL 216#ifdef CONFIG_SYSCTL
217static struct ctl_path nf_log_sysctl_path[] = {
218 { .procname = "net", },
219 { .procname = "netfilter", },
220 { .procname = "nf_log", },
221 { }
222};
223
224static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3]; 217static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
225static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1]; 218static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
226static struct ctl_table_header *nf_log_dir_header; 219static struct ctl_table_header *nf_log_dir_header;
@@ -283,7 +276,7 @@ static __init int netfilter_log_sysctl_init(void)
283 nf_log_sysctl_table[i].extra1 = (void *)(unsigned long) i; 276 nf_log_sysctl_table[i].extra1 = (void *)(unsigned long) i;
284 } 277 }
285 278
286 nf_log_dir_header = register_sysctl_paths(nf_log_sysctl_path, 279 nf_log_dir_header = register_net_sysctl(&init_net, "net/netfilter/nf_log",
287 nf_log_sysctl_table); 280 nf_log_sysctl_table);
288 if (!nf_log_dir_header) 281 if (!nf_log_dir_header)
289 return -ENOMEM; 282 return -ENOMEM;
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index e6ddde165612..3e797d1fcb94 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -103,7 +103,7 @@ int nfnetlink_has_listeners(struct net *net, unsigned int group)
103EXPORT_SYMBOL_GPL(nfnetlink_has_listeners); 103EXPORT_SYMBOL_GPL(nfnetlink_has_listeners);
104 104
105int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, 105int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 pid,
106 unsigned group, int echo, gfp_t flags) 106 unsigned int group, int echo, gfp_t flags)
107{ 107{
108 return nlmsg_notify(net->nfnl, skb, pid, group, echo, flags); 108 return nlmsg_notify(net->nfnl, skb, pid, group, echo, flags);
109} 109}
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index d98c868c148b..b2e7310ca0b8 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -109,7 +109,8 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
109 nfmsg->version = NFNETLINK_V0; 109 nfmsg->version = NFNETLINK_V0;
110 nfmsg->res_id = 0; 110 nfmsg->res_id = 0;
111 111
112 NLA_PUT_STRING(skb, NFACCT_NAME, acct->name); 112 if (nla_put_string(skb, NFACCT_NAME, acct->name))
113 goto nla_put_failure;
113 114
114 if (type == NFNL_MSG_ACCT_GET_CTRZERO) { 115 if (type == NFNL_MSG_ACCT_GET_CTRZERO) {
115 pkts = atomic64_xchg(&acct->pkts, 0); 116 pkts = atomic64_xchg(&acct->pkts, 0);
@@ -118,9 +119,10 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
118 pkts = atomic64_read(&acct->pkts); 119 pkts = atomic64_read(&acct->pkts);
119 bytes = atomic64_read(&acct->bytes); 120 bytes = atomic64_read(&acct->bytes);
120 } 121 }
121 NLA_PUT_BE64(skb, NFACCT_PKTS, cpu_to_be64(pkts)); 122 if (nla_put_be64(skb, NFACCT_PKTS, cpu_to_be64(pkts)) ||
122 NLA_PUT_BE64(skb, NFACCT_BYTES, cpu_to_be64(bytes)); 123 nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes)) ||
123 NLA_PUT_BE32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt))); 124 nla_put_be32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt))))
125 goto nla_put_failure;
124 126
125 nlmsg_end(skb, nlh); 127 nlmsg_end(skb, nlh);
126 return skb->len; 128 return skb->len;
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 2b9e79f5ef05..3e655288d1d6 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -170,11 +170,12 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
170 nfmsg->version = NFNETLINK_V0; 170 nfmsg->version = NFNETLINK_V0;
171 nfmsg->res_id = 0; 171 nfmsg->res_id = 0;
172 172
173 NLA_PUT_STRING(skb, CTA_TIMEOUT_NAME, timeout->name); 173 if (nla_put_string(skb, CTA_TIMEOUT_NAME, timeout->name) ||
174 NLA_PUT_BE16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num)); 174 nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num)) ||
175 NLA_PUT_U8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4proto->l4proto); 175 nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4proto->l4proto) ||
176 NLA_PUT_BE32(skb, CTA_TIMEOUT_USE, 176 nla_put_be32(skb, CTA_TIMEOUT_USE,
177 htonl(atomic_read(&timeout->refcnt))); 177 htonl(atomic_read(&timeout->refcnt))))
178 goto nla_put_failure;
178 179
179 if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) { 180 if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) {
180 struct nlattr *nest_parms; 181 struct nlattr *nest_parms;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 66b2c54c544f..3c3cfc0cc9b5 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -391,67 +391,78 @@ __build_packet_message(struct nfulnl_instance *inst,
391 pmsg.hw_protocol = skb->protocol; 391 pmsg.hw_protocol = skb->protocol;
392 pmsg.hook = hooknum; 392 pmsg.hook = hooknum;
393 393
394 NLA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg); 394 if (nla_put(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg))
395 goto nla_put_failure;
395 396
396 if (prefix) 397 if (prefix &&
397 NLA_PUT(inst->skb, NFULA_PREFIX, plen, prefix); 398 nla_put(inst->skb, NFULA_PREFIX, plen, prefix))
399 goto nla_put_failure;
398 400
399 if (indev) { 401 if (indev) {
400#ifndef CONFIG_BRIDGE_NETFILTER 402#ifndef CONFIG_BRIDGE_NETFILTER
401 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, 403 if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
402 htonl(indev->ifindex)); 404 htonl(indev->ifindex)))
405 goto nla_put_failure;
403#else 406#else
404 if (pf == PF_BRIDGE) { 407 if (pf == PF_BRIDGE) {
405 /* Case 1: outdev is physical input device, we need to 408 /* Case 1: outdev is physical input device, we need to
406 * look for bridge group (when called from 409 * look for bridge group (when called from
407 * netfilter_bridge) */ 410 * netfilter_bridge) */
408 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV, 411 if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
409 htonl(indev->ifindex)); 412 htonl(indev->ifindex)) ||
410 /* this is the bridge group "brX" */ 413 /* this is the bridge group "brX" */
411 /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */ 414 /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
412 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, 415 nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
413 htonl(br_port_get_rcu(indev)->br->dev->ifindex)); 416 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
417 goto nla_put_failure;
414 } else { 418 } else {
415 /* Case 2: indev is bridge group, we need to look for 419 /* Case 2: indev is bridge group, we need to look for
416 * physical device (when called from ipv4) */ 420 * physical device (when called from ipv4) */
417 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV, 421 if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
418 htonl(indev->ifindex)); 422 htonl(indev->ifindex)))
419 if (skb->nf_bridge && skb->nf_bridge->physindev) 423 goto nla_put_failure;
420 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV, 424 if (skb->nf_bridge && skb->nf_bridge->physindev &&
421 htonl(skb->nf_bridge->physindev->ifindex)); 425 nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
426 htonl(skb->nf_bridge->physindev->ifindex)))
427 goto nla_put_failure;
422 } 428 }
423#endif 429#endif
424 } 430 }
425 431
426 if (outdev) { 432 if (outdev) {
427#ifndef CONFIG_BRIDGE_NETFILTER 433#ifndef CONFIG_BRIDGE_NETFILTER
428 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, 434 if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
429 htonl(outdev->ifindex)); 435 htonl(outdev->ifindex)))
436 goto nla_put_failure;
430#else 437#else
431 if (pf == PF_BRIDGE) { 438 if (pf == PF_BRIDGE) {
432 /* Case 1: outdev is physical output device, we need to 439 /* Case 1: outdev is physical output device, we need to
433 * look for bridge group (when called from 440 * look for bridge group (when called from
434 * netfilter_bridge) */ 441 * netfilter_bridge) */
435 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, 442 if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
436 htonl(outdev->ifindex)); 443 htonl(outdev->ifindex)) ||
437 /* this is the bridge group "brX" */ 444 /* this is the bridge group "brX" */
438 /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */ 445 /* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
439 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, 446 nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
440 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)); 447 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
448 goto nla_put_failure;
441 } else { 449 } else {
442 /* Case 2: indev is a bridge group, we need to look 450 /* Case 2: indev is a bridge group, we need to look
443 * for physical device (when called from ipv4) */ 451 * for physical device (when called from ipv4) */
444 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV, 452 if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
445 htonl(outdev->ifindex)); 453 htonl(outdev->ifindex)))
446 if (skb->nf_bridge && skb->nf_bridge->physoutdev) 454 goto nla_put_failure;
447 NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV, 455 if (skb->nf_bridge && skb->nf_bridge->physoutdev &&
448 htonl(skb->nf_bridge->physoutdev->ifindex)); 456 nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
457 htonl(skb->nf_bridge->physoutdev->ifindex)))
458 goto nla_put_failure;
449 } 459 }
450#endif 460#endif
451 } 461 }
452 462
453 if (skb->mark) 463 if (skb->mark &&
454 NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark)); 464 nla_put_be32(inst->skb, NFULA_MARK, htonl(skb->mark)))
465 goto nla_put_failure;
455 466
456 if (indev && skb->dev && 467 if (indev && skb->dev &&
457 skb->mac_header != skb->network_header) { 468 skb->mac_header != skb->network_header) {
@@ -459,16 +470,18 @@ __build_packet_message(struct nfulnl_instance *inst,
459 int len = dev_parse_header(skb, phw.hw_addr); 470 int len = dev_parse_header(skb, phw.hw_addr);
460 if (len > 0) { 471 if (len > 0) {
461 phw.hw_addrlen = htons(len); 472 phw.hw_addrlen = htons(len);
462 NLA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw); 473 if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
474 goto nla_put_failure;
463 } 475 }
464 } 476 }
465 477
466 if (indev && skb_mac_header_was_set(skb)) { 478 if (indev && skb_mac_header_was_set(skb)) {
467 NLA_PUT_BE16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)); 479 if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
468 NLA_PUT_BE16(inst->skb, NFULA_HWLEN, 480 nla_put_be16(inst->skb, NFULA_HWLEN,
469 htons(skb->dev->hard_header_len)); 481 htons(skb->dev->hard_header_len)) ||
470 NLA_PUT(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len, 482 nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
471 skb_mac_header(skb)); 483 skb_mac_header(skb)))
484 goto nla_put_failure;
472 } 485 }
473 486
474 if (skb->tstamp.tv64) { 487 if (skb->tstamp.tv64) {
@@ -477,7 +490,8 @@ __build_packet_message(struct nfulnl_instance *inst,
477 ts.sec = cpu_to_be64(tv.tv_sec); 490 ts.sec = cpu_to_be64(tv.tv_sec);
478 ts.usec = cpu_to_be64(tv.tv_usec); 491 ts.usec = cpu_to_be64(tv.tv_usec);
479 492
480 NLA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts); 493 if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts))
494 goto nla_put_failure;
481 } 495 }
482 496
483 /* UID */ 497 /* UID */
@@ -487,22 +501,24 @@ __build_packet_message(struct nfulnl_instance *inst,
487 struct file *file = skb->sk->sk_socket->file; 501 struct file *file = skb->sk->sk_socket->file;
488 __be32 uid = htonl(file->f_cred->fsuid); 502 __be32 uid = htonl(file->f_cred->fsuid);
489 __be32 gid = htonl(file->f_cred->fsgid); 503 __be32 gid = htonl(file->f_cred->fsgid);
490 /* need to unlock here since NLA_PUT may goto */
491 read_unlock_bh(&skb->sk->sk_callback_lock); 504 read_unlock_bh(&skb->sk->sk_callback_lock);
492 NLA_PUT_BE32(inst->skb, NFULA_UID, uid); 505 if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
493 NLA_PUT_BE32(inst->skb, NFULA_GID, gid); 506 nla_put_be32(inst->skb, NFULA_GID, gid))
507 goto nla_put_failure;
494 } else 508 } else
495 read_unlock_bh(&skb->sk->sk_callback_lock); 509 read_unlock_bh(&skb->sk->sk_callback_lock);
496 } 510 }
497 511
498 /* local sequence number */ 512 /* local sequence number */
499 if (inst->flags & NFULNL_CFG_F_SEQ) 513 if ((inst->flags & NFULNL_CFG_F_SEQ) &&
500 NLA_PUT_BE32(inst->skb, NFULA_SEQ, htonl(inst->seq++)); 514 nla_put_be32(inst->skb, NFULA_SEQ, htonl(inst->seq++)))
515 goto nla_put_failure;
501 516
502 /* global sequence number */ 517 /* global sequence number */
503 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) 518 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
504 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL, 519 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
505 htonl(atomic_inc_return(&global_seq))); 520 htonl(atomic_inc_return(&global_seq))))
521 goto nla_put_failure;
506 522
507 if (data_len) { 523 if (data_len) {
508 struct nlattr *nla; 524 struct nlattr *nla;
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index a80b0cb03f17..8d6bcf32c0ed 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -288,58 +288,67 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
288 indev = entry->indev; 288 indev = entry->indev;
289 if (indev) { 289 if (indev) {
290#ifndef CONFIG_BRIDGE_NETFILTER 290#ifndef CONFIG_BRIDGE_NETFILTER
291 NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)); 291 if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
292 goto nla_put_failure;
292#else 293#else
293 if (entry->pf == PF_BRIDGE) { 294 if (entry->pf == PF_BRIDGE) {
294 /* Case 1: indev is physical input device, we need to 295 /* Case 1: indev is physical input device, we need to
295 * look for bridge group (when called from 296 * look for bridge group (when called from
296 * netfilter_bridge) */ 297 * netfilter_bridge) */
297 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, 298 if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
298 htonl(indev->ifindex)); 299 htonl(indev->ifindex)) ||
299 /* this is the bridge group "brX" */ 300 /* this is the bridge group "brX" */
300 /* rcu_read_lock()ed by __nf_queue */ 301 /* rcu_read_lock()ed by __nf_queue */
301 NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, 302 nla_put_be32(skb, NFQA_IFINDEX_INDEV,
302 htonl(br_port_get_rcu(indev)->br->dev->ifindex)); 303 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
304 goto nla_put_failure;
303 } else { 305 } else {
304 /* Case 2: indev is bridge group, we need to look for 306 /* Case 2: indev is bridge group, we need to look for
305 * physical device (when called from ipv4) */ 307 * physical device (when called from ipv4) */
306 NLA_PUT_BE32(skb, NFQA_IFINDEX_INDEV, 308 if (nla_put_be32(skb, NFQA_IFINDEX_INDEV,
307 htonl(indev->ifindex)); 309 htonl(indev->ifindex)))
308 if (entskb->nf_bridge && entskb->nf_bridge->physindev) 310 goto nla_put_failure;
309 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSINDEV, 311 if (entskb->nf_bridge && entskb->nf_bridge->physindev &&
310 htonl(entskb->nf_bridge->physindev->ifindex)); 312 nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
313 htonl(entskb->nf_bridge->physindev->ifindex)))
314 goto nla_put_failure;
311 } 315 }
312#endif 316#endif
313 } 317 }
314 318
315 if (outdev) { 319 if (outdev) {
316#ifndef CONFIG_BRIDGE_NETFILTER 320#ifndef CONFIG_BRIDGE_NETFILTER
317 NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)); 321 if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
322 goto nla_put_failure;
318#else 323#else
319 if (entry->pf == PF_BRIDGE) { 324 if (entry->pf == PF_BRIDGE) {
320 /* Case 1: outdev is physical output device, we need to 325 /* Case 1: outdev is physical output device, we need to
321 * look for bridge group (when called from 326 * look for bridge group (when called from
322 * netfilter_bridge) */ 327 * netfilter_bridge) */
323 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, 328 if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
324 htonl(outdev->ifindex)); 329 htonl(outdev->ifindex)) ||
325 /* this is the bridge group "brX" */ 330 /* this is the bridge group "brX" */
326 /* rcu_read_lock()ed by __nf_queue */ 331 /* rcu_read_lock()ed by __nf_queue */
327 NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, 332 nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
328 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)); 333 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
334 goto nla_put_failure;
329 } else { 335 } else {
330 /* Case 2: outdev is bridge group, we need to look for 336 /* Case 2: outdev is bridge group, we need to look for
331 * physical output device (when called from ipv4) */ 337 * physical output device (when called from ipv4) */
332 NLA_PUT_BE32(skb, NFQA_IFINDEX_OUTDEV, 338 if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
333 htonl(outdev->ifindex)); 339 htonl(outdev->ifindex)))
334 if (entskb->nf_bridge && entskb->nf_bridge->physoutdev) 340 goto nla_put_failure;
335 NLA_PUT_BE32(skb, NFQA_IFINDEX_PHYSOUTDEV, 341 if (entskb->nf_bridge && entskb->nf_bridge->physoutdev &&
336 htonl(entskb->nf_bridge->physoutdev->ifindex)); 342 nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
343 htonl(entskb->nf_bridge->physoutdev->ifindex)))
344 goto nla_put_failure;
337 } 345 }
338#endif 346#endif
339 } 347 }
340 348
341 if (entskb->mark) 349 if (entskb->mark &&
342 NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark)); 350 nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark)))
351 goto nla_put_failure;
343 352
344 if (indev && entskb->dev && 353 if (indev && entskb->dev &&
345 entskb->mac_header != entskb->network_header) { 354 entskb->mac_header != entskb->network_header) {
@@ -347,7 +356,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
347 int len = dev_parse_header(entskb, phw.hw_addr); 356 int len = dev_parse_header(entskb, phw.hw_addr);
348 if (len) { 357 if (len) {
349 phw.hw_addrlen = htons(len); 358 phw.hw_addrlen = htons(len);
350 NLA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw); 359 if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
360 goto nla_put_failure;
351 } 361 }
352 } 362 }
353 363
@@ -357,7 +367,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
357 ts.sec = cpu_to_be64(tv.tv_sec); 367 ts.sec = cpu_to_be64(tv.tv_sec);
358 ts.usec = cpu_to_be64(tv.tv_usec); 368 ts.usec = cpu_to_be64(tv.tv_usec);
359 369
360 NLA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts); 370 if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts))
371 goto nla_put_failure;
361 } 372 }
362 373
363 if (data_len) { 374 if (data_len) {
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index d2ff15a2412b..fc0d6dbe5d17 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -314,7 +314,7 @@ static int recent_mt_check(const struct xt_mtchk_param *par)
314#ifdef CONFIG_PROC_FS 314#ifdef CONFIG_PROC_FS
315 struct proc_dir_entry *pde; 315 struct proc_dir_entry *pde;
316#endif 316#endif
317 unsigned i; 317 unsigned int i;
318 int ret = -EINVAL; 318 int ret = -EINVAL;
319 319
320 if (unlikely(!hash_rnd_inited)) { 320 if (unlikely(!hash_rnd_inited)) {
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index faa48f70b7c9..b3025a603d56 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -104,27 +104,27 @@ static inline int netlink_is_kernel(struct sock *sk)
104} 104}
105 105
106struct nl_pid_hash { 106struct nl_pid_hash {
107 struct hlist_head *table; 107 struct hlist_head *table;
108 unsigned long rehash_time; 108 unsigned long rehash_time;
109 109
110 unsigned int mask; 110 unsigned int mask;
111 unsigned int shift; 111 unsigned int shift;
112 112
113 unsigned int entries; 113 unsigned int entries;
114 unsigned int max_shift; 114 unsigned int max_shift;
115 115
116 u32 rnd; 116 u32 rnd;
117}; 117};
118 118
119struct netlink_table { 119struct netlink_table {
120 struct nl_pid_hash hash; 120 struct nl_pid_hash hash;
121 struct hlist_head mc_list; 121 struct hlist_head mc_list;
122 struct listeners __rcu *listeners; 122 struct listeners __rcu *listeners;
123 unsigned int nl_nonroot; 123 unsigned int nl_nonroot;
124 unsigned int groups; 124 unsigned int groups;
125 struct mutex *cb_mutex; 125 struct mutex *cb_mutex;
126 struct module *module; 126 struct module *module;
127 int registered; 127 int registered;
128}; 128};
129 129
130static struct netlink_table *nl_table; 130static struct netlink_table *nl_table;
@@ -132,7 +132,6 @@ static struct netlink_table *nl_table;
132static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); 132static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
133 133
134static int netlink_dump(struct sock *sk); 134static int netlink_dump(struct sock *sk);
135static void netlink_destroy_callback(struct netlink_callback *cb);
136 135
137static DEFINE_RWLOCK(nl_table_lock); 136static DEFINE_RWLOCK(nl_table_lock);
138static atomic_t nl_table_users = ATOMIC_INIT(0); 137static atomic_t nl_table_users = ATOMIC_INIT(0);
@@ -149,6 +148,18 @@ static inline struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid
149 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask]; 148 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
150} 149}
151 150
151static void netlink_destroy_callback(struct netlink_callback *cb)
152{
153 kfree_skb(cb->skb);
154 kfree(cb);
155}
156
157static void netlink_consume_callback(struct netlink_callback *cb)
158{
159 consume_skb(cb->skb);
160 kfree(cb);
161}
162
152static void netlink_sock_destruct(struct sock *sk) 163static void netlink_sock_destruct(struct sock *sk)
153{ 164{
154 struct netlink_sock *nlk = nlk_sk(sk); 165 struct netlink_sock *nlk = nlk_sk(sk);
@@ -414,9 +425,9 @@ static int __netlink_create(struct net *net, struct socket *sock,
414 sock_init_data(sock, sk); 425 sock_init_data(sock, sk);
415 426
416 nlk = nlk_sk(sk); 427 nlk = nlk_sk(sk);
417 if (cb_mutex) 428 if (cb_mutex) {
418 nlk->cb_mutex = cb_mutex; 429 nlk->cb_mutex = cb_mutex;
419 else { 430 } else {
420 nlk->cb_mutex = &nlk->cb_def_mutex; 431 nlk->cb_mutex = &nlk->cb_def_mutex;
421 mutex_init(nlk->cb_mutex); 432 mutex_init(nlk->cb_mutex);
422 } 433 }
@@ -522,8 +533,9 @@ static int netlink_release(struct socket *sock)
522 nl_table[sk->sk_protocol].module = NULL; 533 nl_table[sk->sk_protocol].module = NULL;
523 nl_table[sk->sk_protocol].registered = 0; 534 nl_table[sk->sk_protocol].registered = 0;
524 } 535 }
525 } else if (nlk->subscriptions) 536 } else if (nlk->subscriptions) {
526 netlink_update_listeners(sk); 537 netlink_update_listeners(sk);
538 }
527 netlink_table_ungrab(); 539 netlink_table_ungrab();
528 540
529 kfree(nlk->groups); 541 kfree(nlk->groups);
@@ -866,7 +878,7 @@ static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
866 struct sk_buff *nskb = skb_clone(skb, allocation); 878 struct sk_buff *nskb = skb_clone(skb, allocation);
867 if (!nskb) 879 if (!nskb)
868 return skb; 880 return skb;
869 kfree_skb(skb); 881 consume_skb(skb);
870 skb = nskb; 882 skb = nskb;
871 } 883 }
872 884
@@ -896,8 +908,10 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
896 ret = skb->len; 908 ret = skb->len;
897 skb_set_owner_r(skb, sk); 909 skb_set_owner_r(skb, sk);
898 nlk->netlink_rcv(skb); 910 nlk->netlink_rcv(skb);
911 consume_skb(skb);
912 } else {
913 kfree_skb(skb);
899 } 914 }
900 kfree_skb(skb);
901 sock_put(sk); 915 sock_put(sk);
902 return ret; 916 return ret;
903} 917}
@@ -1086,8 +1100,8 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid,
1086 if (info.delivery_failure) { 1100 if (info.delivery_failure) {
1087 kfree_skb(info.skb2); 1101 kfree_skb(info.skb2);
1088 return -ENOBUFS; 1102 return -ENOBUFS;
1089 } else 1103 }
1090 consume_skb(info.skb2); 1104 consume_skb(info.skb2);
1091 1105
1092 if (info.delivered) { 1106 if (info.delivered) {
1093 if (info.congested && (allocation & __GFP_WAIT)) 1107 if (info.congested && (allocation & __GFP_WAIT))
@@ -1240,8 +1254,9 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
1240 nlk->flags |= NETLINK_RECV_NO_ENOBUFS; 1254 nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
1241 clear_bit(0, &nlk->state); 1255 clear_bit(0, &nlk->state);
1242 wake_up_interruptible(&nlk->wait); 1256 wake_up_interruptible(&nlk->wait);
1243 } else 1257 } else {
1244 nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS; 1258 nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
1259 }
1245 err = 0; 1260 err = 0;
1246 break; 1261 break;
1247 default: 1262 default:
@@ -1645,12 +1660,6 @@ void netlink_set_nonroot(int protocol, unsigned int flags)
1645} 1660}
1646EXPORT_SYMBOL(netlink_set_nonroot); 1661EXPORT_SYMBOL(netlink_set_nonroot);
1647 1662
1648static void netlink_destroy_callback(struct netlink_callback *cb)
1649{
1650 kfree_skb(cb->skb);
1651 kfree(cb);
1652}
1653
1654struct nlmsghdr * 1663struct nlmsghdr *
1655__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags) 1664__nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags)
1656{ 1665{
@@ -1727,7 +1736,7 @@ static int netlink_dump(struct sock *sk)
1727 nlk->cb = NULL; 1736 nlk->cb = NULL;
1728 mutex_unlock(nlk->cb_mutex); 1737 mutex_unlock(nlk->cb_mutex);
1729 1738
1730 netlink_destroy_callback(cb); 1739 netlink_consume_callback(cb);
1731 return 0; 1740 return 0;
1732 1741
1733errout_skb: 1742errout_skb:
@@ -1996,11 +2005,11 @@ static void netlink_seq_stop(struct seq_file *seq, void *v)
1996 2005
1997static int netlink_seq_show(struct seq_file *seq, void *v) 2006static int netlink_seq_show(struct seq_file *seq, void *v)
1998{ 2007{
1999 if (v == SEQ_START_TOKEN) 2008 if (v == SEQ_START_TOKEN) {
2000 seq_puts(seq, 2009 seq_puts(seq,
2001 "sk Eth Pid Groups " 2010 "sk Eth Pid Groups "
2002 "Rmem Wmem Dump Locks Drops Inode\n"); 2011 "Rmem Wmem Dump Locks Drops Inode\n");
2003 else { 2012 } else {
2004 struct sock *s = v; 2013 struct sock *s = v;
2005 struct netlink_sock *nlk = nlk_sk(s); 2014 struct netlink_sock *nlk = nlk_sk(s);
2006 2015
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 9f40441d7a7d..8340ace837f2 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -635,11 +635,12 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq,
635 if (hdr == NULL) 635 if (hdr == NULL)
636 return -1; 636 return -1;
637 637
638 NLA_PUT_STRING(skb, CTRL_ATTR_FAMILY_NAME, family->name); 638 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) ||
639 NLA_PUT_U16(skb, CTRL_ATTR_FAMILY_ID, family->id); 639 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) ||
640 NLA_PUT_U32(skb, CTRL_ATTR_VERSION, family->version); 640 nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) ||
641 NLA_PUT_U32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize); 641 nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) ||
642 NLA_PUT_U32(skb, CTRL_ATTR_MAXATTR, family->maxattr); 642 nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr))
643 goto nla_put_failure;
643 644
644 if (!list_empty(&family->ops_list)) { 645 if (!list_empty(&family->ops_list)) {
645 struct nlattr *nla_ops; 646 struct nlattr *nla_ops;
@@ -657,8 +658,9 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq,
657 if (nest == NULL) 658 if (nest == NULL)
658 goto nla_put_failure; 659 goto nla_put_failure;
659 660
660 NLA_PUT_U32(skb, CTRL_ATTR_OP_ID, ops->cmd); 661 if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) ||
661 NLA_PUT_U32(skb, CTRL_ATTR_OP_FLAGS, ops->flags); 662 nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, ops->flags))
663 goto nla_put_failure;
662 664
663 nla_nest_end(skb, nest); 665 nla_nest_end(skb, nest);
664 } 666 }
@@ -682,9 +684,10 @@ static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq,
682 if (nest == NULL) 684 if (nest == NULL)
683 goto nla_put_failure; 685 goto nla_put_failure;
684 686
685 NLA_PUT_U32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id); 687 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) ||
686 NLA_PUT_STRING(skb, CTRL_ATTR_MCAST_GRP_NAME, 688 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
687 grp->name); 689 grp->name))
690 goto nla_put_failure;
688 691
689 nla_nest_end(skb, nest); 692 nla_nest_end(skb, nest);
690 } 693 }
@@ -710,8 +713,9 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid,
710 if (hdr == NULL) 713 if (hdr == NULL)
711 return -1; 714 return -1;
712 715
713 NLA_PUT_STRING(skb, CTRL_ATTR_FAMILY_NAME, grp->family->name); 716 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, grp->family->name) ||
714 NLA_PUT_U16(skb, CTRL_ATTR_FAMILY_ID, grp->family->id); 717 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, grp->family->id))
718 goto nla_put_failure;
715 719
716 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS); 720 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
717 if (nla_grps == NULL) 721 if (nla_grps == NULL)
@@ -721,9 +725,10 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid,
721 if (nest == NULL) 725 if (nest == NULL)
722 goto nla_put_failure; 726 goto nla_put_failure;
723 727
724 NLA_PUT_U32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id); 728 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) ||
725 NLA_PUT_STRING(skb, CTRL_ATTR_MCAST_GRP_NAME, 729 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME,
726 grp->name); 730 grp->name))
731 goto nla_put_failure;
727 732
728 nla_nest_end(skb, nest); 733 nla_nest_end(skb, nest);
729 nla_nest_end(skb, nla_grps); 734 nla_nest_end(skb, nla_grps);
diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c
index 1c51d7a58f0b..743262becd6e 100644
--- a/net/netrom/nr_dev.c
+++ b/net/netrom/nr_dev.c
@@ -97,7 +97,7 @@ static int nr_rebuild_header(struct sk_buff *skb)
97 97
98static int nr_header(struct sk_buff *skb, struct net_device *dev, 98static int nr_header(struct sk_buff *skb, struct net_device *dev,
99 unsigned short type, 99 unsigned short type,
100 const void *daddr, const void *saddr, unsigned len) 100 const void *daddr, const void *saddr, unsigned int len)
101{ 101{
102 unsigned char *buff = skb_push(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); 102 unsigned char *buff = skb_push(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN);
103 103
diff --git a/net/netrom/sysctl_net_netrom.c b/net/netrom/sysctl_net_netrom.c
index 1e0fa9e57aac..42f630b9a698 100644
--- a/net/netrom/sysctl_net_netrom.c
+++ b/net/netrom/sysctl_net_netrom.c
@@ -146,18 +146,12 @@ static ctl_table nr_table[] = {
146 { } 146 { }
147}; 147};
148 148
149static struct ctl_path nr_path[] = {
150 { .procname = "net", },
151 { .procname = "netrom", },
152 { }
153};
154
155void __init nr_register_sysctl(void) 149void __init nr_register_sysctl(void)
156{ 150{
157 nr_table_header = register_sysctl_paths(nr_path, nr_table); 151 nr_table_header = register_net_sysctl(&init_net, "net/netrom", nr_table);
158} 152}
159 153
160void nr_unregister_sysctl(void) 154void nr_unregister_sysctl(void)
161{ 155{
162 unregister_sysctl_table(nr_table_header); 156 unregister_net_sysctl_table(nr_table_header);
163} 157}
diff --git a/net/nfc/Kconfig b/net/nfc/Kconfig
index 44c865b86d6f..8d8d9bc4b6ff 100644
--- a/net/nfc/Kconfig
+++ b/net/nfc/Kconfig
@@ -14,6 +14,7 @@ menuconfig NFC
14 be called nfc. 14 be called nfc.
15 15
16source "net/nfc/nci/Kconfig" 16source "net/nfc/nci/Kconfig"
17source "net/nfc/hci/Kconfig"
17source "net/nfc/llcp/Kconfig" 18source "net/nfc/llcp/Kconfig"
18 19
19source "drivers/nfc/Kconfig" 20source "drivers/nfc/Kconfig"
diff --git a/net/nfc/Makefile b/net/nfc/Makefile
index 7b4a6dcfa566..d1a117c2c401 100644
--- a/net/nfc/Makefile
+++ b/net/nfc/Makefile
@@ -4,6 +4,7 @@
4 4
5obj-$(CONFIG_NFC) += nfc.o 5obj-$(CONFIG_NFC) += nfc.o
6obj-$(CONFIG_NFC_NCI) += nci/ 6obj-$(CONFIG_NFC_NCI) += nci/
7obj-$(CONFIG_NFC_HCI) += hci/
7 8
8nfc-objs := core.o netlink.o af_nfc.o rawsock.o 9nfc-objs := core.o netlink.o af_nfc.o rawsock.o
9nfc-$(CONFIG_NFC_LLCP) += llcp/llcp.o llcp/commands.o llcp/sock.o 10nfc-$(CONFIG_NFC_LLCP) += llcp/llcp.o llcp/commands.o llcp/sock.o
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 295d129864d2..3192c3f589ee 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -33,6 +33,8 @@
33 33
34#define VERSION "0.1" 34#define VERSION "0.1"
35 35
36#define NFC_CHECK_PRES_FREQ_MS 2000
37
36int nfc_devlist_generation; 38int nfc_devlist_generation;
37DEFINE_MUTEX(nfc_devlist_mutex); 39DEFINE_MUTEX(nfc_devlist_mutex);
38 40
@@ -95,7 +97,7 @@ int nfc_dev_down(struct nfc_dev *dev)
95 goto error; 97 goto error;
96 } 98 }
97 99
98 if (dev->polling || dev->remote_activated) { 100 if (dev->polling || dev->activated_target_idx != NFC_TARGET_IDX_NONE) {
99 rc = -EBUSY; 101 rc = -EBUSY;
100 goto error; 102 goto error;
101 } 103 }
@@ -211,6 +213,8 @@ int nfc_dep_link_up(struct nfc_dev *dev, int target_index, u8 comm_mode)
211 } 213 }
212 214
213 rc = dev->ops->dep_link_up(dev, target_index, comm_mode, gb, gb_len); 215 rc = dev->ops->dep_link_up(dev, target_index, comm_mode, gb, gb_len);
216 if (!rc)
217 dev->activated_target_idx = target_index;
214 218
215error: 219error:
216 device_unlock(&dev->dev); 220 device_unlock(&dev->dev);
@@ -246,6 +250,7 @@ int nfc_dep_link_down(struct nfc_dev *dev)
246 rc = dev->ops->dep_link_down(dev); 250 rc = dev->ops->dep_link_down(dev);
247 if (!rc) { 251 if (!rc) {
248 dev->dep_link_up = false; 252 dev->dep_link_up = false;
253 dev->activated_target_idx = NFC_TARGET_IDX_NONE;
249 nfc_llcp_mac_is_down(dev); 254 nfc_llcp_mac_is_down(dev);
250 nfc_genl_dep_link_down_event(dev); 255 nfc_genl_dep_link_down_event(dev);
251 } 256 }
@@ -289,8 +294,13 @@ int nfc_activate_target(struct nfc_dev *dev, u32 target_idx, u32 protocol)
289 } 294 }
290 295
291 rc = dev->ops->activate_target(dev, target_idx, protocol); 296 rc = dev->ops->activate_target(dev, target_idx, protocol);
292 if (!rc) 297 if (!rc) {
293 dev->remote_activated = true; 298 dev->activated_target_idx = target_idx;
299
300 if (dev->ops->check_presence)
301 mod_timer(&dev->check_pres_timer, jiffies +
302 msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
303 }
294 304
295error: 305error:
296 device_unlock(&dev->dev); 306 device_unlock(&dev->dev);
@@ -317,8 +327,11 @@ int nfc_deactivate_target(struct nfc_dev *dev, u32 target_idx)
317 goto error; 327 goto error;
318 } 328 }
319 329
330 if (dev->ops->check_presence)
331 del_timer_sync(&dev->check_pres_timer);
332
320 dev->ops->deactivate_target(dev, target_idx); 333 dev->ops->deactivate_target(dev, target_idx);
321 dev->remote_activated = false; 334 dev->activated_target_idx = NFC_TARGET_IDX_NONE;
322 335
323error: 336error:
324 device_unlock(&dev->dev); 337 device_unlock(&dev->dev);
@@ -352,8 +365,27 @@ int nfc_data_exchange(struct nfc_dev *dev, u32 target_idx, struct sk_buff *skb,
352 goto error; 365 goto error;
353 } 366 }
354 367
368 if (dev->activated_target_idx == NFC_TARGET_IDX_NONE) {
369 rc = -ENOTCONN;
370 kfree_skb(skb);
371 goto error;
372 }
373
374 if (target_idx != dev->activated_target_idx) {
375 rc = -EADDRNOTAVAIL;
376 kfree_skb(skb);
377 goto error;
378 }
379
380 if (dev->ops->check_presence)
381 del_timer_sync(&dev->check_pres_timer);
382
355 rc = dev->ops->data_exchange(dev, target_idx, skb, cb, cb_context); 383 rc = dev->ops->data_exchange(dev, target_idx, skb, cb, cb_context);
356 384
385 if (!rc && dev->ops->check_presence)
386 mod_timer(&dev->check_pres_timer, jiffies +
387 msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
388
357error: 389error:
358 device_unlock(&dev->dev); 390 device_unlock(&dev->dev);
359 return rc; 391 return rc;
@@ -428,10 +460,15 @@ EXPORT_SYMBOL(nfc_alloc_recv_skb);
428int nfc_targets_found(struct nfc_dev *dev, 460int nfc_targets_found(struct nfc_dev *dev,
429 struct nfc_target *targets, int n_targets) 461 struct nfc_target *targets, int n_targets)
430{ 462{
463 int i;
464
431 pr_debug("dev_name=%s n_targets=%d\n", dev_name(&dev->dev), n_targets); 465 pr_debug("dev_name=%s n_targets=%d\n", dev_name(&dev->dev), n_targets);
432 466
433 dev->polling = false; 467 dev->polling = false;
434 468
469 for (i = 0; i < n_targets; i++)
470 targets[i].idx = dev->target_next_idx++;
471
435 spin_lock_bh(&dev->targets_lock); 472 spin_lock_bh(&dev->targets_lock);
436 473
437 dev->targets_generation++; 474 dev->targets_generation++;
@@ -455,17 +492,92 @@ int nfc_targets_found(struct nfc_dev *dev,
455} 492}
456EXPORT_SYMBOL(nfc_targets_found); 493EXPORT_SYMBOL(nfc_targets_found);
457 494
495int nfc_target_lost(struct nfc_dev *dev, u32 target_idx)
496{
497 struct nfc_target *tg;
498 int i;
499
500 pr_debug("dev_name %s n_target %d\n", dev_name(&dev->dev), target_idx);
501
502 spin_lock_bh(&dev->targets_lock);
503
504 for (i = 0; i < dev->n_targets; i++) {
505 tg = &dev->targets[i];
506 if (tg->idx == target_idx)
507 break;
508 }
509
510 if (i == dev->n_targets) {
511 spin_unlock_bh(&dev->targets_lock);
512 return -EINVAL;
513 }
514
515 dev->targets_generation++;
516 dev->n_targets--;
517 dev->activated_target_idx = NFC_TARGET_IDX_NONE;
518
519 if (dev->n_targets) {
520 memcpy(&dev->targets[i], &dev->targets[i + 1],
521 (dev->n_targets - i) * sizeof(struct nfc_target));
522 } else {
523 kfree(dev->targets);
524 dev->targets = NULL;
525 }
526
527 spin_unlock_bh(&dev->targets_lock);
528
529 nfc_genl_target_lost(dev, target_idx);
530
531 return 0;
532}
533EXPORT_SYMBOL(nfc_target_lost);
534
458static void nfc_release(struct device *d) 535static void nfc_release(struct device *d)
459{ 536{
460 struct nfc_dev *dev = to_nfc_dev(d); 537 struct nfc_dev *dev = to_nfc_dev(d);
461 538
462 pr_debug("dev_name=%s\n", dev_name(&dev->dev)); 539 pr_debug("dev_name=%s\n", dev_name(&dev->dev));
463 540
541 if (dev->ops->check_presence) {
542 del_timer_sync(&dev->check_pres_timer);
543 destroy_workqueue(dev->check_pres_wq);
544 }
545
464 nfc_genl_data_exit(&dev->genl_data); 546 nfc_genl_data_exit(&dev->genl_data);
465 kfree(dev->targets); 547 kfree(dev->targets);
466 kfree(dev); 548 kfree(dev);
467} 549}
468 550
551static void nfc_check_pres_work(struct work_struct *work)
552{
553 struct nfc_dev *dev = container_of(work, struct nfc_dev,
554 check_pres_work);
555 int rc;
556
557 device_lock(&dev->dev);
558
559 if (dev->activated_target_idx != NFC_TARGET_IDX_NONE &&
560 timer_pending(&dev->check_pres_timer) == 0) {
561 rc = dev->ops->check_presence(dev, dev->activated_target_idx);
562 if (!rc) {
563 mod_timer(&dev->check_pres_timer, jiffies +
564 msecs_to_jiffies(NFC_CHECK_PRES_FREQ_MS));
565 } else {
566 nfc_target_lost(dev, dev->activated_target_idx);
567 dev->activated_target_idx = NFC_TARGET_IDX_NONE;
568 }
569 }
570
571 device_unlock(&dev->dev);
572}
573
574static void nfc_check_pres_timeout(unsigned long data)
575{
576 struct nfc_dev *dev = (struct nfc_dev *)data;
577
578 queue_work(dev->check_pres_wq, &dev->check_pres_work);
579}
580
469struct class nfc_class = { 581struct class nfc_class = {
470 .name = "nfc", 582 .name = "nfc",
471 .dev_release = nfc_release, 583 .dev_release = nfc_release,
@@ -475,12 +587,12 @@ EXPORT_SYMBOL(nfc_class);
475static int match_idx(struct device *d, void *data) 587static int match_idx(struct device *d, void *data)
476{ 588{
477 struct nfc_dev *dev = to_nfc_dev(d); 589 struct nfc_dev *dev = to_nfc_dev(d);
478 unsigned *idx = data; 590 unsigned int *idx = data;
479 591
480 return dev->idx == *idx; 592 return dev->idx == *idx;
481} 593}
482 594
483struct nfc_dev *nfc_get_device(unsigned idx) 595struct nfc_dev *nfc_get_device(unsigned int idx)
484{ 596{
485 struct device *d; 597 struct device *d;
486 598
@@ -531,6 +643,26 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
531 /* first generation must not be 0 */ 643 /* first generation must not be 0 */
532 dev->targets_generation = 1; 644 dev->targets_generation = 1;
533 645
646 dev->activated_target_idx = NFC_TARGET_IDX_NONE;
647
648 if (ops->check_presence) {
649 char name[32];
650 init_timer(&dev->check_pres_timer);
651 dev->check_pres_timer.data = (unsigned long)dev;
652 dev->check_pres_timer.function = nfc_check_pres_timeout;
653
654 INIT_WORK(&dev->check_pres_work, nfc_check_pres_work);
655 snprintf(name, sizeof(name), "nfc%d_check_pres_wq", dev->idx);
656 dev->check_pres_wq = alloc_workqueue(name, WQ_NON_REENTRANT |
657 WQ_UNBOUND |
658 WQ_MEM_RECLAIM, 1);
659 if (dev->check_pres_wq == NULL) {
660 kfree(dev);
661 return NULL;
662 }
663 }
664
665
534 return dev; 666 return dev;
535} 667}
536EXPORT_SYMBOL(nfc_allocate_device); 668EXPORT_SYMBOL(nfc_allocate_device);
diff --git a/net/nfc/hci/Kconfig b/net/nfc/hci/Kconfig
new file mode 100644
index 000000000000..17213a6362b4
--- /dev/null
+++ b/net/nfc/hci/Kconfig
@@ -0,0 +1,16 @@
1config NFC_HCI
2 depends on NFC
3 tristate "NFC HCI implementation"
4 default n
5 help
6 Say Y here if you want to build support for a kernel NFC HCI
7 implementation. This is mostly needed for devices that only process
8 HCI frames, like for example the NXP pn544.
9
10config NFC_SHDLC
11 depends on NFC_HCI
12 bool "SHDLC link layer for HCI based NFC drivers"
13 default n
14 ---help---
15 Say yes if you use an NFC HCI driver that requires SHDLC link layer.
16 If unsure, say N here.
diff --git a/net/nfc/hci/Makefile b/net/nfc/hci/Makefile
new file mode 100644
index 000000000000..f9c44b2fb065
--- /dev/null
+++ b/net/nfc/hci/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the Linux NFC HCI layer.
3#
4
5obj-$(CONFIG_NFC_HCI) += hci.o
6
7hci-y := core.o hcp.o command.o
8hci-$(CONFIG_NFC_SHDLC) += shdlc.o
diff --git a/net/nfc/hci/command.c b/net/nfc/hci/command.c
new file mode 100644
index 000000000000..8729abf5f18b
--- /dev/null
+++ b/net/nfc/hci/command.c
@@ -0,0 +1,354 @@
1/*
2 * Copyright (C) 2012 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the
16 * Free Software Foundation, Inc.,
17 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#define pr_fmt(fmt) "hci: %s: " fmt, __func__
21
22#include <linux/init.h>
23#include <linux/kernel.h>
24#include <linux/sched.h>
25#include <linux/module.h>
26
27#include <net/nfc/hci.h>
28
29#include "hci.h"
30
31static int nfc_hci_result_to_errno(u8 result)
32{
33 switch (result) {
34 case NFC_HCI_ANY_OK:
35 return 0;
36 case NFC_HCI_ANY_E_TIMEOUT:
37 return -ETIMEDOUT;
38 default:
39 return -1;
40 }
41}
42
43static void nfc_hci_execute_cb(struct nfc_hci_dev *hdev, u8 result,
44 struct sk_buff *skb, void *cb_data)
45{
46 struct hcp_exec_waiter *hcp_ew = (struct hcp_exec_waiter *)cb_data;
47
48 pr_debug("HCI Cmd completed with HCI result=%d\n", result);
49
50 hcp_ew->exec_result = nfc_hci_result_to_errno(result);
51 if (hcp_ew->exec_result == 0)
52 hcp_ew->result_skb = skb;
53 else
54 kfree_skb(skb);
55 hcp_ew->exec_complete = true;
56
57 wake_up(hcp_ew->wq);
58}
59
60static int nfc_hci_execute_cmd(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
61 const u8 *param, size_t param_len,
62 struct sk_buff **skb)
63{
64 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(ew_wq);
65 struct hcp_exec_waiter hcp_ew;
66 hcp_ew.wq = &ew_wq;
67 hcp_ew.exec_complete = false;
68 hcp_ew.result_skb = NULL;
69
70 pr_debug("through pipe=%d, cmd=%d, plen=%zd\n", pipe, cmd, param_len);
71
72 /* TODO: Define hci cmd execution delay. Should it be the same
73 * for all commands?
74 */
75 hcp_ew.exec_result = nfc_hci_hcp_message_tx(hdev, pipe,
76 NFC_HCI_HCP_COMMAND, cmd,
77 param, param_len,
78 nfc_hci_execute_cb, &hcp_ew,
79 3000);
80 if (hcp_ew.exec_result < 0)
81 return hcp_ew.exec_result;
82
83 wait_event(ew_wq, hcp_ew.exec_complete == true);
84
85 if (hcp_ew.exec_result == 0) {
86 if (skb)
87 *skb = hcp_ew.result_skb;
88 else
89 kfree_skb(hcp_ew.result_skb);
90 }
91
92 return hcp_ew.exec_result;
93}
94
95int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event,
96 const u8 *param, size_t param_len)
97{
98 u8 pipe;
99
100 pr_debug("%d to gate %d\n", event, gate);
101
102 pipe = hdev->gate2pipe[gate];
103 if (pipe == NFC_HCI_INVALID_PIPE)
104 return -EADDRNOTAVAIL;
105
106 return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_EVENT, event,
107 param, param_len, NULL, NULL, 0);
108}
109EXPORT_SYMBOL(nfc_hci_send_event);
110
111int nfc_hci_send_response(struct nfc_hci_dev *hdev, u8 gate, u8 response,
112 const u8 *param, size_t param_len)
113{
114 u8 pipe;
115
116 pr_debug("\n");
117
118 pipe = hdev->gate2pipe[gate];
119 if (pipe == NFC_HCI_INVALID_PIPE)
120 return -EADDRNOTAVAIL;
121
122 return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_RESPONSE,
123 response, param, param_len, NULL, NULL,
124 0);
125}
126EXPORT_SYMBOL(nfc_hci_send_response);
127
128/*
129 * Execute an hci command sent to gate.
130 * skb will contain response data if success. skb can be NULL if you are not
131 * interested by the response.
132 */
133int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
134 const u8 *param, size_t param_len, struct sk_buff **skb)
135{
136 u8 pipe;
137
138 pr_debug("\n");
139
140 pipe = hdev->gate2pipe[gate];
141 if (pipe == NFC_HCI_INVALID_PIPE)
142 return -EADDRNOTAVAIL;
143
144 return nfc_hci_execute_cmd(hdev, pipe, cmd, param, param_len, skb);
145}
146EXPORT_SYMBOL(nfc_hci_send_cmd);
147
148int nfc_hci_set_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx,
149 const u8 *param, size_t param_len)
150{
151 int r;
152 u8 *tmp;
153
154 /* TODO ELa: reg idx must be inserted before param, but we don't want
155 * to ask the caller to do it to keep a simpler API.
156 * For now, just create a new temporary param buffer. This is far from
157 * optimal though, and the plan is to modify APIs to pass idx down to
158 * nfc_hci_hcp_message_tx where the frame is actually built, thereby
159 * eliminating the need for the temp allocation-copy here.
160 */
161
162 pr_debug("idx=%d to gate %d\n", idx, gate);
163
164 tmp = kmalloc(1 + param_len, GFP_KERNEL);
165 if (tmp == NULL)
166 return -ENOMEM;
167
168 *tmp = idx;
169 memcpy(tmp + 1, param, param_len);
170
171 r = nfc_hci_send_cmd(hdev, gate, NFC_HCI_ANY_SET_PARAMETER,
172 tmp, param_len + 1, NULL);
173
174 kfree(tmp);
175
176 return r;
177}
178EXPORT_SYMBOL(nfc_hci_set_param);
179
180int nfc_hci_get_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx,
181 struct sk_buff **skb)
182{
183 pr_debug("gate=%d regidx=%d\n", gate, idx);
184
185 return nfc_hci_send_cmd(hdev, gate, NFC_HCI_ANY_GET_PARAMETER,
186 &idx, 1, skb);
187}
188EXPORT_SYMBOL(nfc_hci_get_param);
189
190static int nfc_hci_open_pipe(struct nfc_hci_dev *hdev, u8 pipe)
191{
192 struct sk_buff *skb;
193 int r;
194
195 pr_debug("pipe=%d\n", pipe);
196
197 r = nfc_hci_execute_cmd(hdev, pipe, NFC_HCI_ANY_OPEN_PIPE,
198 NULL, 0, &skb);
199 if (r == 0) {
200 /* dest host other than host controller will send
201 * number of pipes already open on this gate before
202 * execution. The number can be found in skb->data[0]
203 */
204 kfree_skb(skb);
205 }
206
207 return r;
208}
209
210static int nfc_hci_close_pipe(struct nfc_hci_dev *hdev, u8 pipe)
211{
212 pr_debug("\n");
213
214 return nfc_hci_execute_cmd(hdev, pipe, NFC_HCI_ANY_CLOSE_PIPE,
215 NULL, 0, NULL);
216}
217
218static u8 nfc_hci_create_pipe(struct nfc_hci_dev *hdev, u8 dest_host,
219 u8 dest_gate, int *result)
220{
221 struct sk_buff *skb;
222 struct hci_create_pipe_params params;
223 struct hci_create_pipe_resp *resp;
224 u8 pipe;
225
226 pr_debug("gate=%d\n", dest_gate);
227
228 params.src_gate = NFC_HCI_ADMIN_GATE;
229 params.dest_host = dest_host;
230 params.dest_gate = dest_gate;
231
232 *result = nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE,
233 NFC_HCI_ADM_CREATE_PIPE,
234 (u8 *) &params, sizeof(params), &skb);
235 if (*result == 0) {
236 resp = (struct hci_create_pipe_resp *)skb->data;
237 pipe = resp->pipe;
238 kfree_skb(skb);
239
240 pr_debug("pipe created=%d\n", pipe);
241
242 return pipe;
243 } else
244 return NFC_HCI_INVALID_PIPE;
245}
246
247static int nfc_hci_delete_pipe(struct nfc_hci_dev *hdev, u8 pipe)
248{
249 pr_debug("\n");
250
251 return nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE,
252 NFC_HCI_ADM_DELETE_PIPE, &pipe, 1, NULL);
253}
254
255static int nfc_hci_clear_all_pipes(struct nfc_hci_dev *hdev)
256{
257 int r;
258
259 u8 param[2];
260
261 /* TODO: Find out what the identity reference data is
262 * and fill param with it. HCI spec 6.1.3.5 */
263
264 pr_debug("\n");
265
266 r = nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE,
267 NFC_HCI_ADM_CLEAR_ALL_PIPE, param, 2, NULL);
268
269 return 0;
270}
271
272int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate)
273{
274 int r;
275 u8 pipe = hdev->gate2pipe[gate];
276
277 pr_debug("\n");
278
279 if (pipe == NFC_HCI_INVALID_PIPE)
280 return -EADDRNOTAVAIL;
281
282 r = nfc_hci_close_pipe(hdev, pipe);
283 if (r < 0)
284 return r;
285
286 if (pipe != NFC_HCI_LINK_MGMT_PIPE && pipe != NFC_HCI_ADMIN_PIPE) {
287 r = nfc_hci_delete_pipe(hdev, pipe);
288 if (r < 0)
289 return r;
290 }
291
292 hdev->gate2pipe[gate] = NFC_HCI_INVALID_PIPE;
293
294 return 0;
295}
296EXPORT_SYMBOL(nfc_hci_disconnect_gate);
297
298int nfc_hci_disconnect_all_gates(struct nfc_hci_dev *hdev)
299{
300 int r;
301
302 pr_debug("\n");
303
304 r = nfc_hci_clear_all_pipes(hdev);
305 if (r < 0)
306 return r;
307
308 memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe));
309
310 return 0;
311}
312EXPORT_SYMBOL(nfc_hci_disconnect_all_gates);
313
314int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate)
315{
316 u8 pipe = NFC_HCI_INVALID_PIPE;
317 bool pipe_created = false;
318 int r;
319
320 pr_debug("\n");
321
322 if (hdev->gate2pipe[dest_gate] != NFC_HCI_INVALID_PIPE)
323 return -EADDRINUSE;
324
325 switch (dest_gate) {
326 case NFC_HCI_LINK_MGMT_GATE:
327 pipe = NFC_HCI_LINK_MGMT_PIPE;
328 break;
329 case NFC_HCI_ADMIN_GATE:
330 pipe = NFC_HCI_ADMIN_PIPE;
331 break;
332 default:
333 pipe = nfc_hci_create_pipe(hdev, dest_host, dest_gate, &r);
334 if (pipe == NFC_HCI_INVALID_PIPE)
335 return r;
336 pipe_created = true;
337 break;
338 }
339
340 r = nfc_hci_open_pipe(hdev, pipe);
341 if (r < 0) {
342 if (pipe_created)
343 if (nfc_hci_delete_pipe(hdev, pipe) < 0) {
344 /* TODO: Cannot clean by deleting pipe...
345 * -> inconsistent state */
346 }
347 return r;
348 }
349
350 hdev->gate2pipe[dest_gate] = pipe;
351
352 return 0;
353}
354EXPORT_SYMBOL(nfc_hci_connect_gate);
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
new file mode 100644
index 000000000000..86fd00d5a099
--- /dev/null
+++ b/net/nfc/hci/core.c
@@ -0,0 +1,830 @@
1/*
2 * Copyright (C) 2012 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the
16 * Free Software Foundation, Inc.,
17 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#define pr_fmt(fmt) "hci: %s: " fmt, __func__
21
22#include <linux/init.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/nfc.h>
26
27#include <net/nfc/nfc.h>
28#include <net/nfc/hci.h>
29
30#include "hci.h"
31
32/* Largest headroom needed for outgoing HCI commands */
33#define HCI_CMDS_HEADROOM 1
34
35static void nfc_hci_msg_tx_work(struct work_struct *work)
36{
37 struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev,
38 msg_tx_work);
39 struct hci_msg *msg;
40 struct sk_buff *skb;
41 int r = 0;
42
43 mutex_lock(&hdev->msg_tx_mutex);
44
45 if (hdev->cmd_pending_msg) {
46 if (timer_pending(&hdev->cmd_timer) == 0) {
47 if (hdev->cmd_pending_msg->cb)
48 hdev->cmd_pending_msg->cb(hdev,
49 NFC_HCI_ANY_E_TIMEOUT,
50 NULL,
51 hdev->
52 cmd_pending_msg->
53 cb_context);
54 kfree(hdev->cmd_pending_msg);
55 hdev->cmd_pending_msg = NULL;
56 } else
57 goto exit;
58 }
59
60next_msg:
61 if (list_empty(&hdev->msg_tx_queue))
62 goto exit;
63
64 msg = list_first_entry(&hdev->msg_tx_queue, struct hci_msg, msg_l);
65 list_del(&msg->msg_l);
66
67 pr_debug("msg_tx_queue has a cmd to send\n");
68 while ((skb = skb_dequeue(&msg->msg_frags)) != NULL) {
69 r = hdev->ops->xmit(hdev, skb);
70 if (r < 0) {
71 kfree_skb(skb);
72 skb_queue_purge(&msg->msg_frags);
73 if (msg->cb)
74 msg->cb(hdev, NFC_HCI_ANY_E_NOK, NULL,
75 msg->cb_context);
76 kfree(msg);
77 break;
78 }
79 }
80
81 if (r)
82 goto next_msg;
83
84 if (msg->wait_response == false) {
85 kfree(msg);
86 goto next_msg;
87 }
88
89 hdev->cmd_pending_msg = msg;
90 mod_timer(&hdev->cmd_timer, jiffies +
91 msecs_to_jiffies(hdev->cmd_pending_msg->completion_delay));
92
93exit:
94 mutex_unlock(&hdev->msg_tx_mutex);
95}
96
97static void nfc_hci_msg_rx_work(struct work_struct *work)
98{
99 struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev,
100 msg_rx_work);
101 struct sk_buff *skb;
102 struct hcp_message *message;
103 u8 pipe;
104 u8 type;
105 u8 instruction;
106
107 while ((skb = skb_dequeue(&hdev->msg_rx_queue)) != NULL) {
108 pipe = skb->data[0];
109 skb_pull(skb, NFC_HCI_HCP_PACKET_HEADER_LEN);
110 message = (struct hcp_message *)skb->data;
111 type = HCP_MSG_GET_TYPE(message->header);
112 instruction = HCP_MSG_GET_CMD(message->header);
113 skb_pull(skb, NFC_HCI_HCP_MESSAGE_HEADER_LEN);
114
115 nfc_hci_hcp_message_rx(hdev, pipe, type, instruction, skb);
116 }
117}
118
119void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result,
120 struct sk_buff *skb)
121{
122 mutex_lock(&hdev->msg_tx_mutex);
123
124 if (hdev->cmd_pending_msg == NULL) {
125 kfree_skb(skb);
126 goto exit;
127 }
128
129 del_timer_sync(&hdev->cmd_timer);
130
131 if (hdev->cmd_pending_msg->cb)
132 hdev->cmd_pending_msg->cb(hdev, result, skb,
133 hdev->cmd_pending_msg->cb_context);
134 else
135 kfree_skb(skb);
136
137 kfree(hdev->cmd_pending_msg);
138 hdev->cmd_pending_msg = NULL;
139
140 queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work);
141
142exit:
143 mutex_unlock(&hdev->msg_tx_mutex);
144}
145
146void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
147 struct sk_buff *skb)
148{
149 kfree_skb(skb);
150}
151
152static u32 nfc_hci_sak_to_protocol(u8 sak)
153{
154 switch (NFC_HCI_TYPE_A_SEL_PROT(sak)) {
155 case NFC_HCI_TYPE_A_SEL_PROT_MIFARE:
156 return NFC_PROTO_MIFARE_MASK;
157 case NFC_HCI_TYPE_A_SEL_PROT_ISO14443:
158 return NFC_PROTO_ISO14443_MASK;
159 case NFC_HCI_TYPE_A_SEL_PROT_DEP:
160 return NFC_PROTO_NFC_DEP_MASK;
161 case NFC_HCI_TYPE_A_SEL_PROT_ISO14443_DEP:
162 return NFC_PROTO_ISO14443_MASK | NFC_PROTO_NFC_DEP_MASK;
163 default:
164 return 0xffffffff;
165 }
166}
167
168static int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate)
169{
170 struct nfc_target *targets;
171 struct sk_buff *atqa_skb = NULL;
172 struct sk_buff *sak_skb = NULL;
173 int r;
174
175 pr_debug("from gate %d\n", gate);
176
177 targets = kzalloc(sizeof(struct nfc_target), GFP_KERNEL);
178 if (targets == NULL)
179 return -ENOMEM;
180
181 switch (gate) {
182 case NFC_HCI_RF_READER_A_GATE:
183 r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE,
184 NFC_HCI_RF_READER_A_ATQA, &atqa_skb);
185 if (r < 0)
186 goto exit;
187
188 r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE,
189 NFC_HCI_RF_READER_A_SAK, &sak_skb);
190 if (r < 0)
191 goto exit;
192
193 if (atqa_skb->len != 2 || sak_skb->len != 1) {
194 r = -EPROTO;
195 goto exit;
196 }
197
198 targets->supported_protocols =
199 nfc_hci_sak_to_protocol(sak_skb->data[0]);
200 if (targets->supported_protocols == 0xffffffff) {
201 r = -EPROTO;
202 goto exit;
203 }
204
205 targets->sens_res = be16_to_cpu(*(u16 *)atqa_skb->data);
206 targets->sel_res = sak_skb->data[0];
207
208 if (hdev->ops->complete_target_discovered) {
209 r = hdev->ops->complete_target_discovered(hdev, gate,
210 targets);
211 if (r < 0)
212 goto exit;
213 }
214 break;
215 case NFC_HCI_RF_READER_B_GATE:
216 targets->supported_protocols = NFC_PROTO_ISO14443_MASK;
217 break;
218 default:
219 if (hdev->ops->target_from_gate)
220 r = hdev->ops->target_from_gate(hdev, gate, targets);
221 else
222 r = -EPROTO;
223 if (r < 0)
224 goto exit;
225
226 if (hdev->ops->complete_target_discovered) {
227 r = hdev->ops->complete_target_discovered(hdev, gate,
228 targets);
229 if (r < 0)
230 goto exit;
231 }
232 break;
233 }
234
235 targets->hci_reader_gate = gate;
236
237 r = nfc_targets_found(hdev->ndev, targets, 1);
238 if (r < 0)
239 goto exit;
240
241 kfree(hdev->targets);
242 hdev->targets = targets;
243 targets = NULL;
244 hdev->target_count = 1;
245
246exit:
247 kfree(targets);
248 kfree_skb(atqa_skb);
249 kfree_skb(sak_skb);
250
251 return r;
252}
253
254void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
255 struct sk_buff *skb)
256{
257 int r = 0;
258
259 switch (event) {
260 case NFC_HCI_EVT_TARGET_DISCOVERED:
261 if (hdev->poll_started == false) {
262 r = -EPROTO;
263 goto exit;
264 }
265
266 if (skb->len < 1) { /* no status data? */
267 r = -EPROTO;
268 goto exit;
269 }
270
271 if (skb->data[0] == 3) {
272 /* TODO: Multiple targets in field, none activated
273 * poll is supposedly stopped, but there is no
274 * single target to activate, so nothing to report
275 * up.
276 * if we need to restart poll, we must save the
277 * protocols from the initial poll and reuse here.
278 */
279 }
280
281 if (skb->data[0] != 0) {
282 r = -EPROTO;
283 goto exit;
284 }
285
286 r = nfc_hci_target_discovered(hdev,
287 nfc_hci_pipe2gate(hdev, pipe));
288 break;
289 default:
290 /* TODO: Unknown events are hardware specific
291 * pass them to the driver (needs a new hci_ops) */
292 break;
293 }
294
295exit:
296 kfree_skb(skb);
297
298 if (r) {
299 /* TODO: There was an error dispatching the event,
300 * how to propagate up to nfc core?
301 */
302 }
303}
304
305static void nfc_hci_cmd_timeout(unsigned long data)
306{
307 struct nfc_hci_dev *hdev = (struct nfc_hci_dev *)data;
308
309 queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work);
310}
311
312static int hci_dev_connect_gates(struct nfc_hci_dev *hdev, u8 gate_count,
313 u8 gates[])
314{
315 int r;
316 u8 *p = gates;
317 while (gate_count--) {
318 r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID, *p);
319 if (r < 0)
320 return r;
321 p++;
322 }
323
324 return 0;
325}
326
327static int hci_dev_session_init(struct nfc_hci_dev *hdev)
328{
329 struct sk_buff *skb = NULL;
330 int r;
331 u8 hci_gates[] = { /* NFC_HCI_ADMIN_GATE MUST be first */
332 NFC_HCI_ADMIN_GATE, NFC_HCI_LOOPBACK_GATE,
333 NFC_HCI_ID_MGMT_GATE, NFC_HCI_LINK_MGMT_GATE,
334 NFC_HCI_RF_READER_B_GATE, NFC_HCI_RF_READER_A_GATE
335 };
336
337 r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID,
338 NFC_HCI_ADMIN_GATE);
339 if (r < 0)
340 goto exit;
341
342 r = nfc_hci_get_param(hdev, NFC_HCI_ADMIN_GATE,
343 NFC_HCI_ADMIN_SESSION_IDENTITY, &skb);
344 if (r < 0)
345 goto disconnect_all;
346
347 if (skb->len && skb->len == strlen(hdev->init_data.session_id))
348 if (memcmp(hdev->init_data.session_id, skb->data,
349 skb->len) == 0) {
350 /* TODO ELa: restore gate<->pipe table from
351 * some TBD location.
352 * note: it doesn't seem possible to get the chip
353 * currently open gate/pipe table.
354 * It is only possible to obtain the supported
355 * gate list.
356 */
357
358 /* goto exit
359 * For now, always do a full initialization */
360 }
361
362 r = nfc_hci_disconnect_all_gates(hdev);
363 if (r < 0)
364 goto exit;
365
366 r = hci_dev_connect_gates(hdev, sizeof(hci_gates), hci_gates);
367 if (r < 0)
368 goto disconnect_all;
369
370 r = hci_dev_connect_gates(hdev, hdev->init_data.gate_count,
371 hdev->init_data.gates);
372 if (r < 0)
373 goto disconnect_all;
374
375 r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE,
376 NFC_HCI_ADMIN_SESSION_IDENTITY,
377 hdev->init_data.session_id,
378 strlen(hdev->init_data.session_id));
379 if (r == 0)
380 goto exit;
381
382disconnect_all:
383 nfc_hci_disconnect_all_gates(hdev);
384
385exit:
386 if (skb)
387 kfree_skb(skb);
388
389 return r;
390}
391
392static int hci_dev_version(struct nfc_hci_dev *hdev)
393{
394 int r;
395 struct sk_buff *skb;
396
397 r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE,
398 NFC_HCI_ID_MGMT_VERSION_SW, &skb);
399 if (r < 0)
400 return r;
401
402 if (skb->len != 3) {
403 kfree_skb(skb);
404 return -EINVAL;
405 }
406
407 hdev->sw_romlib = (skb->data[0] & 0xf0) >> 4;
408 hdev->sw_patch = skb->data[0] & 0x0f;
409 hdev->sw_flashlib_major = skb->data[1];
410 hdev->sw_flashlib_minor = skb->data[2];
411
412 kfree_skb(skb);
413
414 r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE,
415 NFC_HCI_ID_MGMT_VERSION_HW, &skb);
416 if (r < 0)
417 return r;
418
419 if (skb->len != 3) {
420 kfree_skb(skb);
421 return -EINVAL;
422 }
423
424 hdev->hw_derivative = (skb->data[0] & 0xe0) >> 5;
425 hdev->hw_version = skb->data[0] & 0x1f;
426 hdev->hw_mpw = (skb->data[1] & 0xc0) >> 6;
427 hdev->hw_software = skb->data[1] & 0x3f;
428 hdev->hw_bsid = skb->data[2];
429
430 kfree_skb(skb);
431
432 pr_info("SOFTWARE INFO:\n");
433 pr_info("RomLib : %d\n", hdev->sw_romlib);
434 pr_info("Patch : %d\n", hdev->sw_patch);
435 pr_info("FlashLib Major : %d\n", hdev->sw_flashlib_major);
436 pr_info("FlashLib Minor : %d\n", hdev->sw_flashlib_minor);
437 pr_info("HARDWARE INFO:\n");
438 pr_info("Derivative : %d\n", hdev->hw_derivative);
439 pr_info("HW Version : %d\n", hdev->hw_version);
440 pr_info("#MPW : %d\n", hdev->hw_mpw);
441 pr_info("Software : %d\n", hdev->hw_software);
442 pr_info("BSID Version : %d\n", hdev->hw_bsid);
443
444 return 0;
445}
446
447static int hci_dev_up(struct nfc_dev *nfc_dev)
448{
449 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
450 int r = 0;
451
452 if (hdev->ops->open) {
453 r = hdev->ops->open(hdev);
454 if (r < 0)
455 return r;
456 }
457
458 r = hci_dev_session_init(hdev);
459 if (r < 0)
460 goto exit;
461
462 r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
463 NFC_HCI_EVT_END_OPERATION, NULL, 0);
464 if (r < 0)
465 goto exit;
466
467 if (hdev->ops->hci_ready) {
468 r = hdev->ops->hci_ready(hdev);
469 if (r < 0)
470 goto exit;
471 }
472
473 r = hci_dev_version(hdev);
474 if (r < 0)
475 goto exit;
476
477exit:
478 if (r < 0)
479 if (hdev->ops->close)
480 hdev->ops->close(hdev);
481 return r;
482}
483
484static int hci_dev_down(struct nfc_dev *nfc_dev)
485{
486 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
487
488 if (hdev->ops->close)
489 hdev->ops->close(hdev);
490
491 memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe));
492
493 return 0;
494}
495
496static int hci_start_poll(struct nfc_dev *nfc_dev, u32 protocols)
497{
498 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
499 int r;
500
501 if (hdev->ops->start_poll)
502 r = hdev->ops->start_poll(hdev, protocols);
503 else
504 r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
505 NFC_HCI_EVT_READER_REQUESTED, NULL, 0);
506 if (r == 0)
507 hdev->poll_started = true;
508
509 return r;
510}
511
512static void hci_stop_poll(struct nfc_dev *nfc_dev)
513{
514 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
515
516 if (hdev->poll_started) {
517 nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
518 NFC_HCI_EVT_END_OPERATION, NULL, 0);
519 hdev->poll_started = false;
520 }
521}
522
523static struct nfc_target *hci_find_target(struct nfc_hci_dev *hdev,
524 u32 target_idx)
525{
526 int i;
527 if (hdev->poll_started == false || hdev->targets == NULL)
528 return NULL;
529
530 for (i = 0; i < hdev->target_count; i++) {
531 if (hdev->targets[i].idx == target_idx)
532 return &hdev->targets[i];
533 }
534
535 return NULL;
536}
537
538static int hci_activate_target(struct nfc_dev *nfc_dev, u32 target_idx,
539 u32 protocol)
540{
541 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
542
543 if (hci_find_target(hdev, target_idx) == NULL)
544 return -ENOMEDIUM;
545
546 return 0;
547}
548
549static void hci_deactivate_target(struct nfc_dev *nfc_dev, u32 target_idx)
550{
551}
552
553static int hci_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
554 struct sk_buff *skb, data_exchange_cb_t cb,
555 void *cb_context)
556{
557 struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
558 int r;
559 struct nfc_target *target;
560 struct sk_buff *res_skb = NULL;
561
562 pr_debug("target_idx=%d\n", target_idx);
563
564 target = hci_find_target(hdev, target_idx);
565 if (target == NULL)
566 return -ENOMEDIUM;
567
568 switch (target->hci_reader_gate) {
569 case NFC_HCI_RF_READER_A_GATE:
570 case NFC_HCI_RF_READER_B_GATE:
571 if (hdev->ops->data_exchange) {
572 r = hdev->ops->data_exchange(hdev, target, skb,
573 &res_skb);
574 if (r <= 0) /* handled */
575 break;
576 }
577
578 *skb_push(skb, 1) = 0; /* CTR, see spec:10.2.2.1 */
579 r = nfc_hci_send_cmd(hdev, target->hci_reader_gate,
580 NFC_HCI_WR_XCHG_DATA,
581 skb->data, skb->len, &res_skb);
582 /*
583 * TODO: Check RF Error indicator to make sure data is valid.
584 * It seems that HCI cmd can complete without error, but data
585 * can be invalid if an RF error occured? Ignore for now.
586 */
587 if (r == 0)
588 skb_trim(res_skb, res_skb->len - 1); /* RF Err ind */
589 break;
590 default:
591 if (hdev->ops->data_exchange) {
592 r = hdev->ops->data_exchange(hdev, target, skb,
593 &res_skb);
594 if (r == 1)
595 r = -ENOTSUPP;
596 }
597 else
598 r = -ENOTSUPP;
599 }
600
601 kfree_skb(skb);
602
603 cb(cb_context, res_skb, r);
604
605 return 0;
606}
607
608struct nfc_ops hci_nfc_ops = {
609 .dev_up = hci_dev_up,
610 .dev_down = hci_dev_down,
611 .start_poll = hci_start_poll,
612 .stop_poll = hci_stop_poll,
613 .activate_target = hci_activate_target,
614 .deactivate_target = hci_deactivate_target,
615 .data_exchange = hci_data_exchange,
616};
617
618struct nfc_hci_dev *nfc_hci_allocate_device(struct nfc_hci_ops *ops,
619 struct nfc_hci_init_data *init_data,
620 u32 protocols,
621 int tx_headroom,
622 int tx_tailroom,
623 int max_link_payload)
624{
625 struct nfc_hci_dev *hdev;
626
627 if (ops->xmit == NULL)
628 return NULL;
629
630 if (protocols == 0)
631 return NULL;
632
633 hdev = kzalloc(sizeof(struct nfc_hci_dev), GFP_KERNEL);
634 if (hdev == NULL)
635 return NULL;
636
637 hdev->ndev = nfc_allocate_device(&hci_nfc_ops, protocols,
638 tx_headroom + HCI_CMDS_HEADROOM,
639 tx_tailroom);
640 if (!hdev->ndev) {
641 kfree(hdev);
642 return NULL;
643 }
644
645 hdev->ops = ops;
646 hdev->max_data_link_payload = max_link_payload;
647 hdev->init_data = *init_data;
648
649 nfc_set_drvdata(hdev->ndev, hdev);
650
651 memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe));
652
653 return hdev;
654}
655EXPORT_SYMBOL(nfc_hci_allocate_device);
656
657void nfc_hci_free_device(struct nfc_hci_dev *hdev)
658{
659 nfc_free_device(hdev->ndev);
660 kfree(hdev);
661}
662EXPORT_SYMBOL(nfc_hci_free_device);
663
664int nfc_hci_register_device(struct nfc_hci_dev *hdev)
665{
666 struct device *dev = &hdev->ndev->dev;
667 const char *devname = dev_name(dev);
668 char name[32];
669 int r = 0;
670
671 mutex_init(&hdev->msg_tx_mutex);
672
673 INIT_LIST_HEAD(&hdev->msg_tx_queue);
674
675 INIT_WORK(&hdev->msg_tx_work, nfc_hci_msg_tx_work);
676 snprintf(name, sizeof(name), "%s_hci_msg_tx_wq", devname);
677 hdev->msg_tx_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND |
678 WQ_MEM_RECLAIM, 1);
679 if (hdev->msg_tx_wq == NULL) {
680 r = -ENOMEM;
681 goto exit;
682 }
683
684 init_timer(&hdev->cmd_timer);
685 hdev->cmd_timer.data = (unsigned long)hdev;
686 hdev->cmd_timer.function = nfc_hci_cmd_timeout;
687
688 skb_queue_head_init(&hdev->rx_hcp_frags);
689
690 INIT_WORK(&hdev->msg_rx_work, nfc_hci_msg_rx_work);
691 snprintf(name, sizeof(name), "%s_hci_msg_rx_wq", devname);
692 hdev->msg_rx_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND |
693 WQ_MEM_RECLAIM, 1);
694 if (hdev->msg_rx_wq == NULL) {
695 r = -ENOMEM;
696 goto exit;
697 }
698
699 skb_queue_head_init(&hdev->msg_rx_queue);
700
701 r = nfc_register_device(hdev->ndev);
702
703exit:
704 if (r < 0) {
705 if (hdev->msg_tx_wq)
706 destroy_workqueue(hdev->msg_tx_wq);
707 if (hdev->msg_rx_wq)
708 destroy_workqueue(hdev->msg_rx_wq);
709 }
710
711 return r;
712}
713EXPORT_SYMBOL(nfc_hci_register_device);
714
715void nfc_hci_unregister_device(struct nfc_hci_dev *hdev)
716{
717 struct hci_msg *msg;
718
719 skb_queue_purge(&hdev->rx_hcp_frags);
720 skb_queue_purge(&hdev->msg_rx_queue);
721
722 while ((msg = list_first_entry(&hdev->msg_tx_queue, struct hci_msg,
723 msg_l)) != NULL) {
724 list_del(&msg->msg_l);
725 skb_queue_purge(&msg->msg_frags);
726 kfree(msg);
727 }
728
729 del_timer_sync(&hdev->cmd_timer);
730
731 nfc_unregister_device(hdev->ndev);
732
733 destroy_workqueue(hdev->msg_tx_wq);
734
735 destroy_workqueue(hdev->msg_rx_wq);
736}
737EXPORT_SYMBOL(nfc_hci_unregister_device);
738
739void nfc_hci_set_clientdata(struct nfc_hci_dev *hdev, void *clientdata)
740{
741 hdev->clientdata = clientdata;
742}
743EXPORT_SYMBOL(nfc_hci_set_clientdata);
744
745void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev)
746{
747 return hdev->clientdata;
748}
749EXPORT_SYMBOL(nfc_hci_get_clientdata);
750
751void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb)
752{
753 struct hcp_packet *packet;
754 u8 type;
755 u8 instruction;
756 struct sk_buff *hcp_skb;
757 u8 pipe;
758 struct sk_buff *frag_skb;
759 int msg_len;
760
761 if (skb == NULL) {
762 /* TODO ELa: lower layer had permanent failure, need to
763 * propagate that up
764 */
765
766 skb_queue_purge(&hdev->rx_hcp_frags);
767
768 return;
769 }
770
771 packet = (struct hcp_packet *)skb->data;
772 if ((packet->header & ~NFC_HCI_FRAGMENT) == 0) {
773 skb_queue_tail(&hdev->rx_hcp_frags, skb);
774 return;
775 }
776
777 /* it's the last fragment. Does it need re-aggregation? */
778 if (skb_queue_len(&hdev->rx_hcp_frags)) {
779 pipe = packet->header & NFC_HCI_FRAGMENT;
780 skb_queue_tail(&hdev->rx_hcp_frags, skb);
781
782 msg_len = 0;
783 skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) {
784 msg_len += (frag_skb->len -
785 NFC_HCI_HCP_PACKET_HEADER_LEN);
786 }
787
788 hcp_skb = nfc_alloc_recv_skb(NFC_HCI_HCP_PACKET_HEADER_LEN +
789 msg_len, GFP_KERNEL);
790 if (hcp_skb == NULL) {
791 /* TODO ELa: cannot deliver HCP message. How to
792 * propagate error up?
793 */
794 }
795
796 *skb_put(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN) = pipe;
797
798 skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) {
799 msg_len = frag_skb->len - NFC_HCI_HCP_PACKET_HEADER_LEN;
800 memcpy(skb_put(hcp_skb, msg_len),
801 frag_skb->data + NFC_HCI_HCP_PACKET_HEADER_LEN,
802 msg_len);
803 }
804
805 skb_queue_purge(&hdev->rx_hcp_frags);
806 } else {
807 packet->header &= NFC_HCI_FRAGMENT;
808 hcp_skb = skb;
809 }
810
811 /* if this is a response, dispatch immediately to
812 * unblock waiting cmd context. Otherwise, enqueue to dispatch
813 * in separate context where handler can also execute command.
814 */
815 packet = (struct hcp_packet *)hcp_skb->data;
816 type = HCP_MSG_GET_TYPE(packet->message.header);
817 if (type == NFC_HCI_HCP_RESPONSE) {
818 pipe = packet->header;
819 instruction = HCP_MSG_GET_CMD(packet->message.header);
820 skb_pull(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN +
821 NFC_HCI_HCP_MESSAGE_HEADER_LEN);
822 nfc_hci_hcp_message_rx(hdev, pipe, type, instruction, hcp_skb);
823 } else {
824 skb_queue_tail(&hdev->msg_rx_queue, hcp_skb);
825 queue_work(hdev->msg_rx_wq, &hdev->msg_rx_work);
826 }
827}
828EXPORT_SYMBOL(nfc_hci_recv_frame);
829
830MODULE_LICENSE("GPL");
diff --git a/net/nfc/hci/hci.h b/net/nfc/hci/hci.h
new file mode 100644
index 000000000000..45f2fe4fd486
--- /dev/null
+++ b/net/nfc/hci/hci.h
@@ -0,0 +1,139 @@
1/*
2 * Copyright (C) 2012 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the
16 * Free Software Foundation, Inc.,
17 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#ifndef __LOCAL_HCI_H
21#define __LOCAL_HCI_H
22
23struct gate_pipe_map {
24 u8 gate;
25 u8 pipe;
26};
27
28struct hcp_message {
29 u8 header; /* type -cmd,evt,rsp- + instruction */
30 u8 data[];
31} __packed;
32
33struct hcp_packet {
34 u8 header; /* cbit+pipe */
35 struct hcp_message message;
36} __packed;
37
38/*
39 * HCI command execution completion callback.
40 * result will be one of the HCI response codes.
41 * skb contains the response data and must be disposed.
42 */
43typedef void (*hci_cmd_cb_t) (struct nfc_hci_dev *hdev, u8 result,
44 struct sk_buff *skb, void *cb_data);
45
46struct hcp_exec_waiter {
47 wait_queue_head_t *wq;
48 bool exec_complete;
49 int exec_result;
50 struct sk_buff *result_skb;
51};
52
53struct hci_msg {
54 struct list_head msg_l;
55 struct sk_buff_head msg_frags;
56 bool wait_response;
57 hci_cmd_cb_t cb;
58 void *cb_context;
59 unsigned long completion_delay;
60};
61
62struct hci_create_pipe_params {
63 u8 src_gate;
64 u8 dest_host;
65 u8 dest_gate;
66} __packed;
67
68struct hci_create_pipe_resp {
69 u8 src_host;
70 u8 src_gate;
71 u8 dest_host;
72 u8 dest_gate;
73 u8 pipe;
74} __packed;
75
76#define NFC_HCI_FRAGMENT 0x7f
77
78#define HCP_HEADER(type, instr) ((((type) & 0x03) << 6) | ((instr) & 0x3f))
79#define HCP_MSG_GET_TYPE(header) ((header & 0xc0) >> 6)
80#define HCP_MSG_GET_CMD(header) (header & 0x3f)
81
82int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
83 u8 type, u8 instruction,
84 const u8 *payload, size_t payload_len,
85 hci_cmd_cb_t cb, void *cb_data,
86 unsigned long completion_delay);
87
88u8 nfc_hci_pipe2gate(struct nfc_hci_dev *hdev, u8 pipe);
89
90void nfc_hci_hcp_message_rx(struct nfc_hci_dev *hdev, u8 pipe, u8 type,
91 u8 instruction, struct sk_buff *skb);
92
93/* HCP headers */
94#define NFC_HCI_HCP_PACKET_HEADER_LEN 1
95#define NFC_HCI_HCP_MESSAGE_HEADER_LEN 1
96#define NFC_HCI_HCP_HEADER_LEN 2
97
98/* HCP types */
99#define NFC_HCI_HCP_COMMAND 0x00
100#define NFC_HCI_HCP_EVENT 0x01
101#define NFC_HCI_HCP_RESPONSE 0x02
102
103/* Generic commands */
104#define NFC_HCI_ANY_SET_PARAMETER 0x01
105#define NFC_HCI_ANY_GET_PARAMETER 0x02
106#define NFC_HCI_ANY_OPEN_PIPE 0x03
107#define NFC_HCI_ANY_CLOSE_PIPE 0x04
108
109/* Reader RF commands */
110#define NFC_HCI_WR_XCHG_DATA 0x10
111
112/* Admin commands */
113#define NFC_HCI_ADM_CREATE_PIPE 0x10
114#define NFC_HCI_ADM_DELETE_PIPE 0x11
115#define NFC_HCI_ADM_NOTIFY_PIPE_CREATED 0x12
116#define NFC_HCI_ADM_NOTIFY_PIPE_DELETED 0x13
117#define NFC_HCI_ADM_CLEAR_ALL_PIPE 0x14
118#define NFC_HCI_ADM_NOTIFY_ALL_PIPE_CLEARED 0x15
119
120/* Generic responses */
121#define NFC_HCI_ANY_OK 0x00
122#define NFC_HCI_ANY_E_NOT_CONNECTED 0x01
123#define NFC_HCI_ANY_E_CMD_PAR_UNKNOWN 0x02
124#define NFC_HCI_ANY_E_NOK 0x03
125#define NFC_HCI_ANY_E_PIPES_FULL 0x04
126#define NFC_HCI_ANY_E_REG_PAR_UNKNOWN 0x05
127#define NFC_HCI_ANY_E_PIPE_NOT_OPENED 0x06
128#define NFC_HCI_ANY_E_CMD_NOT_SUPPORTED 0x07
129#define NFC_HCI_ANY_E_INHIBITED 0x08
130#define NFC_HCI_ANY_E_TIMEOUT 0x09
131#define NFC_HCI_ANY_E_REG_ACCESS_DENIED 0x0a
132#define NFC_HCI_ANY_E_PIPE_ACCESS_DENIED 0x0b
133
134/* Pipes */
135#define NFC_HCI_INVALID_PIPE 0x80
136#define NFC_HCI_LINK_MGMT_PIPE 0x00
137#define NFC_HCI_ADMIN_PIPE 0x01
138
139#endif /* __LOCAL_HCI_H */
diff --git a/net/nfc/hci/hcp.c b/net/nfc/hci/hcp.c
new file mode 100644
index 000000000000..7212cf2c5785
--- /dev/null
+++ b/net/nfc/hci/hcp.c
@@ -0,0 +1,156 @@
1/*
2 * Copyright (C) 2012 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the
16 * Free Software Foundation, Inc.,
17 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#define pr_fmt(fmt) "hci: %s: " fmt, __func__
21
22#include <linux/init.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25
26#include <net/nfc/hci.h>
27
28#include "hci.h"
29
30/*
31 * Payload is the HCP message data only. Instruction will be prepended.
32 * Guarantees that cb will be called upon completion or timeout delay
33 * counted from the moment the cmd is sent to the transport.
34 */
35int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
36 u8 type, u8 instruction,
37 const u8 *payload, size_t payload_len,
38 hci_cmd_cb_t cb, void *cb_data,
39 unsigned long completion_delay)
40{
41 struct nfc_dev *ndev = hdev->ndev;
42 struct hci_msg *cmd;
43 const u8 *ptr = payload;
44 int hci_len, err;
45 bool firstfrag = true;
46
47 cmd = kzalloc(sizeof(struct hci_msg), GFP_KERNEL);
48 if (cmd == NULL)
49 return -ENOMEM;
50
51 INIT_LIST_HEAD(&cmd->msg_l);
52 skb_queue_head_init(&cmd->msg_frags);
53 cmd->wait_response = (type == NFC_HCI_HCP_COMMAND) ? true : false;
54 cmd->cb = cb;
55 cmd->cb_context = cb_data;
56 cmd->completion_delay = completion_delay;
57
58 hci_len = payload_len + 1;
59 while (hci_len > 0) {
60 struct sk_buff *skb;
61 int skb_len, data_link_len;
62 struct hcp_packet *packet;
63
64 if (NFC_HCI_HCP_PACKET_HEADER_LEN + hci_len <=
65 hdev->max_data_link_payload)
66 data_link_len = hci_len;
67 else
68 data_link_len = hdev->max_data_link_payload -
69 NFC_HCI_HCP_PACKET_HEADER_LEN;
70
71 skb_len = ndev->tx_headroom + NFC_HCI_HCP_PACKET_HEADER_LEN +
72 data_link_len + ndev->tx_tailroom;
73 hci_len -= data_link_len;
74
75 skb = alloc_skb(skb_len, GFP_KERNEL);
76 if (skb == NULL) {
77 err = -ENOMEM;
78 goto out_skb_err;
79 }
80 skb_reserve(skb, ndev->tx_headroom);
81
82 skb_put(skb, NFC_HCI_HCP_PACKET_HEADER_LEN + data_link_len);
83
84 /* Only the last fragment will have the cb bit set to 1 */
85 packet = (struct hcp_packet *)skb->data;
86 packet->header = pipe;
87 if (firstfrag) {
88 firstfrag = false;
89 packet->message.header = HCP_HEADER(type, instruction);
90 if (ptr) {
91 memcpy(packet->message.data, ptr,
92 data_link_len - 1);
93 ptr += data_link_len - 1;
94 }
95 } else {
96 memcpy(&packet->message, ptr, data_link_len);
97 ptr += data_link_len;
98 }
99
100 /* This is the last fragment, set the cb bit */
101 if (hci_len == 0)
102 packet->header |= ~NFC_HCI_FRAGMENT;
103
104 skb_queue_tail(&cmd->msg_frags, skb);
105 }
106
107 mutex_lock(&hdev->msg_tx_mutex);
108 list_add_tail(&hdev->msg_tx_queue, &cmd->msg_l);
109 mutex_unlock(&hdev->msg_tx_mutex);
110
111 queue_work(hdev->msg_tx_wq, &hdev->msg_tx_work);
112
113 return 0;
114
115out_skb_err:
116 skb_queue_purge(&cmd->msg_frags);
117 kfree(cmd);
118
119 return err;
120}
121
122u8 nfc_hci_pipe2gate(struct nfc_hci_dev *hdev, u8 pipe)
123{
124 int gate;
125
126 for (gate = 0; gate < NFC_HCI_MAX_GATES; gate++)
127 if (hdev->gate2pipe[gate] == pipe)
128 return gate;
129
130 return 0xff;
131}
132
133/*
134 * Receive hcp message for pipe, with type and cmd.
135 * skb contains optional message data only.
136 */
137void nfc_hci_hcp_message_rx(struct nfc_hci_dev *hdev, u8 pipe, u8 type,
138 u8 instruction, struct sk_buff *skb)
139{
140 switch (type) {
141 case NFC_HCI_HCP_RESPONSE:
142 nfc_hci_resp_received(hdev, instruction, skb);
143 break;
144 case NFC_HCI_HCP_COMMAND:
145 nfc_hci_cmd_received(hdev, pipe, instruction, skb);
146 break;
147 case NFC_HCI_HCP_EVENT:
148 nfc_hci_event_received(hdev, pipe, instruction, skb);
149 break;
150 default:
151 pr_err("UNKNOWN MSG Type %d, instruction=%d\n",
152 type, instruction);
153 kfree_skb(skb);
154 break;
155 }
156}
diff --git a/net/nfc/hci/shdlc.c b/net/nfc/hci/shdlc.c
new file mode 100644
index 000000000000..923bdf7c26d6
--- /dev/null
+++ b/net/nfc/hci/shdlc.c
@@ -0,0 +1,945 @@
1/*
2 * Copyright (C) 2012 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the
16 * Free Software Foundation, Inc.,
17 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#define pr_fmt(fmt) "shdlc: %s: " fmt, __func__
21
22#include <linux/sched.h>
23#include <linux/export.h>
24#include <linux/wait.h>
25#include <linux/crc-ccitt.h>
26#include <linux/slab.h>
27#include <linux/skbuff.h>
28
29#include <net/nfc/hci.h>
30#include <net/nfc/shdlc.h>
31
32#define SHDLC_LLC_HEAD_ROOM 2
33#define SHDLC_LLC_TAIL_ROOM 2
34
35#define SHDLC_MAX_WINDOW 4
36#define SHDLC_SREJ_SUPPORT false
37
38#define SHDLC_CONTROL_HEAD_MASK 0xe0
39#define SHDLC_CONTROL_HEAD_I 0x80
40#define SHDLC_CONTROL_HEAD_I2 0xa0
41#define SHDLC_CONTROL_HEAD_S 0xc0
42#define SHDLC_CONTROL_HEAD_U 0xe0
43
44#define SHDLC_CONTROL_NS_MASK 0x38
45#define SHDLC_CONTROL_NR_MASK 0x07
46#define SHDLC_CONTROL_TYPE_MASK 0x18
47
48#define SHDLC_CONTROL_M_MASK 0x1f
49
50enum sframe_type {
51 S_FRAME_RR = 0x00,
52 S_FRAME_REJ = 0x01,
53 S_FRAME_RNR = 0x02,
54 S_FRAME_SREJ = 0x03
55};
56
57enum uframe_modifier {
58 U_FRAME_UA = 0x06,
59 U_FRAME_RSET = 0x19
60};
61
62#define SHDLC_CONNECT_VALUE_MS 5
63#define SHDLC_T1_VALUE_MS(w) ((5 * w) / 4)
64#define SHDLC_T2_VALUE_MS 300
65
66#define SHDLC_DUMP_SKB(info, skb) \
67do { \
68 pr_debug("%s:\n", info); \
69 print_hex_dump(KERN_DEBUG, "shdlc: ", DUMP_PREFIX_OFFSET, \
70 16, 1, skb->data, skb->len, 0); \
71} while (0)
72
73/* checks x < y <= z modulo 8 */
74static bool nfc_shdlc_x_lt_y_lteq_z(int x, int y, int z)
75{
76 if (x < z)
77 return ((x < y) && (y <= z)) ? true : false;
78 else
79 return ((y > x) || (y <= z)) ? true : false;
80}
81
82/* checks x <= y < z modulo 8 */
83static bool nfc_shdlc_x_lteq_y_lt_z(int x, int y, int z)
84{
85 if (x <= z)
86 return ((x <= y) && (y < z)) ? true : false;
87 else /* x > z -> z+8 > x */
88 return ((y >= x) || (y < z)) ? true : false;
89}
90
91static struct sk_buff *nfc_shdlc_alloc_skb(struct nfc_shdlc *shdlc,
92 int payload_len)
93{
94 struct sk_buff *skb;
95
96 skb = alloc_skb(shdlc->client_headroom + SHDLC_LLC_HEAD_ROOM +
97 shdlc->client_tailroom + SHDLC_LLC_TAIL_ROOM +
98 payload_len, GFP_KERNEL);
99 if (skb)
100 skb_reserve(skb, shdlc->client_headroom + SHDLC_LLC_HEAD_ROOM);
101
102 return skb;
103}
104
105static void nfc_shdlc_add_len_crc(struct sk_buff *skb)
106{
107 u16 crc;
108 int len;
109
110 len = skb->len + 2;
111 *skb_push(skb, 1) = len;
112
113 crc = crc_ccitt(0xffff, skb->data, skb->len);
114 crc = ~crc;
115 *skb_put(skb, 1) = crc & 0xff;
116 *skb_put(skb, 1) = crc >> 8;
117}
118
119/* immediately sends an S frame. */
120static int nfc_shdlc_send_s_frame(struct nfc_shdlc *shdlc,
121 enum sframe_type sframe_type, int nr)
122{
123 int r;
124 struct sk_buff *skb;
125
126 pr_debug("sframe_type=%d nr=%d\n", sframe_type, nr);
127
128 skb = nfc_shdlc_alloc_skb(shdlc, 0);
129 if (skb == NULL)
130 return -ENOMEM;
131
132 *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_S | (sframe_type << 3) | nr;
133
134 nfc_shdlc_add_len_crc(skb);
135
136 r = shdlc->ops->xmit(shdlc, skb);
137
138 kfree_skb(skb);
139
140 return r;
141}
142
143/* immediately sends an U frame. skb may contain optional payload */
144static int nfc_shdlc_send_u_frame(struct nfc_shdlc *shdlc,
145 struct sk_buff *skb,
146 enum uframe_modifier uframe_modifier)
147{
148 int r;
149
150 pr_debug("uframe_modifier=%d\n", uframe_modifier);
151
152 *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_U | uframe_modifier;
153
154 nfc_shdlc_add_len_crc(skb);
155
156 r = shdlc->ops->xmit(shdlc, skb);
157
158 kfree_skb(skb);
159
160 return r;
161}
162
163/*
164 * Free ack_pending frames until y_nr - 1, and reset t2 according to
165 * the remaining oldest ack_pending frame sent time
166 */
167static void nfc_shdlc_reset_t2(struct nfc_shdlc *shdlc, int y_nr)
168{
169 struct sk_buff *skb;
170 int dnr = shdlc->dnr; /* MUST initially be < y_nr */
171
172 pr_debug("release ack pending up to frame %d excluded\n", y_nr);
173
174 while (dnr != y_nr) {
175 pr_debug("release ack pending frame %d\n", dnr);
176
177 skb = skb_dequeue(&shdlc->ack_pending_q);
178 kfree_skb(skb);
179
180 dnr = (dnr + 1) % 8;
181 }
182
183 if (skb_queue_empty(&shdlc->ack_pending_q)) {
184 if (shdlc->t2_active) {
185 del_timer_sync(&shdlc->t2_timer);
186 shdlc->t2_active = false;
187
188 pr_debug
189 ("All sent frames acked. Stopped T2(retransmit)\n");
190 }
191 } else {
192 skb = skb_peek(&shdlc->ack_pending_q);
193
194 mod_timer(&shdlc->t2_timer, *(unsigned long *)skb->cb +
195 msecs_to_jiffies(SHDLC_T2_VALUE_MS));
196 shdlc->t2_active = true;
197
198 pr_debug
199 ("Start T2(retransmit) for remaining unacked sent frames\n");
200 }
201}
202
203/*
204 * Receive validated frames from lower layer. skb contains HCI payload only.
205 * Handle according to algorithm at spec:10.8.2
206 */
207static void nfc_shdlc_rcv_i_frame(struct nfc_shdlc *shdlc,
208 struct sk_buff *skb, int ns, int nr)
209{
210 int x_ns = ns;
211 int y_nr = nr;
212
213 pr_debug("recvd I-frame %d, remote waiting frame %d\n", ns, nr);
214
215 if (shdlc->state != SHDLC_CONNECTED)
216 goto exit;
217
218 if (x_ns != shdlc->nr) {
219 nfc_shdlc_send_s_frame(shdlc, S_FRAME_REJ, shdlc->nr);
220 goto exit;
221 }
222
223 if (shdlc->t1_active == false) {
224 shdlc->t1_active = true;
225 mod_timer(&shdlc->t1_timer,
226 msecs_to_jiffies(SHDLC_T1_VALUE_MS(shdlc->w)));
227 pr_debug("(re)Start T1(send ack)\n");
228 }
229
230 if (skb->len) {
231 nfc_hci_recv_frame(shdlc->hdev, skb);
232 skb = NULL;
233 }
234
235 shdlc->nr = (shdlc->nr + 1) % 8;
236
237 if (nfc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
238 nfc_shdlc_reset_t2(shdlc, y_nr);
239
240 shdlc->dnr = y_nr;
241 }
242
243exit:
244 if (skb)
245 kfree_skb(skb);
246}
247
248static void nfc_shdlc_rcv_ack(struct nfc_shdlc *shdlc, int y_nr)
249{
250 pr_debug("remote acked up to frame %d excluded\n", y_nr);
251
252 if (nfc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
253 nfc_shdlc_reset_t2(shdlc, y_nr);
254 shdlc->dnr = y_nr;
255 }
256}
257
258static void nfc_shdlc_requeue_ack_pending(struct nfc_shdlc *shdlc)
259{
260 struct sk_buff *skb;
261
262 pr_debug("ns reset to %d\n", shdlc->dnr);
263
264 while ((skb = skb_dequeue_tail(&shdlc->ack_pending_q))) {
265 skb_pull(skb, 2); /* remove len+control */
266 skb_trim(skb, skb->len - 2); /* remove crc */
267 skb_queue_head(&shdlc->send_q, skb);
268 }
269 shdlc->ns = shdlc->dnr;
270}
271
272static void nfc_shdlc_rcv_rej(struct nfc_shdlc *shdlc, int y_nr)
273{
274 struct sk_buff *skb;
275
276 pr_debug("remote asks retransmition from frame %d\n", y_nr);
277
278 if (nfc_shdlc_x_lteq_y_lt_z(shdlc->dnr, y_nr, shdlc->ns)) {
279 if (shdlc->t2_active) {
280 del_timer_sync(&shdlc->t2_timer);
281 shdlc->t2_active = false;
282 pr_debug("Stopped T2(retransmit)\n");
283 }
284
285 if (shdlc->dnr != y_nr) {
286 while ((shdlc->dnr = ((shdlc->dnr + 1) % 8)) != y_nr) {
287 skb = skb_dequeue(&shdlc->ack_pending_q);
288 kfree_skb(skb);
289 }
290 }
291
292 nfc_shdlc_requeue_ack_pending(shdlc);
293 }
294}
295
296/* See spec RR:10.8.3 REJ:10.8.4 */
297static void nfc_shdlc_rcv_s_frame(struct nfc_shdlc *shdlc,
298 enum sframe_type s_frame_type, int nr)
299{
300 struct sk_buff *skb;
301
302 if (shdlc->state != SHDLC_CONNECTED)
303 return;
304
305 switch (s_frame_type) {
306 case S_FRAME_RR:
307 nfc_shdlc_rcv_ack(shdlc, nr);
308 if (shdlc->rnr == true) { /* see SHDLC 10.7.7 */
309 shdlc->rnr = false;
310 if (shdlc->send_q.qlen == 0) {
311 skb = nfc_shdlc_alloc_skb(shdlc, 0);
312 if (skb)
313 skb_queue_tail(&shdlc->send_q, skb);
314 }
315 }
316 break;
317 case S_FRAME_REJ:
318 nfc_shdlc_rcv_rej(shdlc, nr);
319 break;
320 case S_FRAME_RNR:
321 nfc_shdlc_rcv_ack(shdlc, nr);
322 shdlc->rnr = true;
323 break;
324 default:
325 break;
326 }
327}
328
329static void nfc_shdlc_connect_complete(struct nfc_shdlc *shdlc, int r)
330{
331 pr_debug("result=%d\n", r);
332
333 del_timer_sync(&shdlc->connect_timer);
334
335 if (r == 0) {
336 shdlc->ns = 0;
337 shdlc->nr = 0;
338 shdlc->dnr = 0;
339
340 shdlc->state = SHDLC_CONNECTED;
341 } else {
342 shdlc->state = SHDLC_DISCONNECTED;
343
344 /*
345 * TODO: Could it be possible that there are pending
346 * executing commands that are waiting for connect to complete
347 * before they can be carried? As connect is a blocking
348 * operation, it would require that the userspace process can
349 * send commands on the same device from a second thread before
350 * the device is up. I don't think that is possible, is it?
351 */
352 }
353
354 shdlc->connect_result = r;
355
356 wake_up(shdlc->connect_wq);
357}
358
359static int nfc_shdlc_connect_initiate(struct nfc_shdlc *shdlc)
360{
361 struct sk_buff *skb;
362
363 pr_debug("\n");
364
365 skb = nfc_shdlc_alloc_skb(shdlc, 2);
366 if (skb == NULL)
367 return -ENOMEM;
368
369 *skb_put(skb, 1) = SHDLC_MAX_WINDOW;
370 *skb_put(skb, 1) = SHDLC_SREJ_SUPPORT ? 1 : 0;
371
372 return nfc_shdlc_send_u_frame(shdlc, skb, U_FRAME_RSET);
373}
374
375static int nfc_shdlc_connect_send_ua(struct nfc_shdlc *shdlc)
376{
377 struct sk_buff *skb;
378
379 pr_debug("\n");
380
381 skb = nfc_shdlc_alloc_skb(shdlc, 0);
382 if (skb == NULL)
383 return -ENOMEM;
384
385 return nfc_shdlc_send_u_frame(shdlc, skb, U_FRAME_UA);
386}
387
388static void nfc_shdlc_rcv_u_frame(struct nfc_shdlc *shdlc,
389 struct sk_buff *skb,
390 enum uframe_modifier u_frame_modifier)
391{
392 u8 w = SHDLC_MAX_WINDOW;
393 bool srej_support = SHDLC_SREJ_SUPPORT;
394 int r;
395
396 pr_debug("u_frame_modifier=%d\n", u_frame_modifier);
397
398 switch (u_frame_modifier) {
399 case U_FRAME_RSET:
400 if (shdlc->state == SHDLC_NEGOCIATING) {
401 /* we sent RSET, but chip wants to negociate */
402 if (skb->len > 0)
403 w = skb->data[0];
404
405 if (skb->len > 1)
406 srej_support = skb->data[1] & 0x01 ? true :
407 false;
408
409 if ((w <= SHDLC_MAX_WINDOW) &&
410 (SHDLC_SREJ_SUPPORT || (srej_support == false))) {
411 shdlc->w = w;
412 shdlc->srej_support = srej_support;
413 r = nfc_shdlc_connect_send_ua(shdlc);
414 nfc_shdlc_connect_complete(shdlc, r);
415 }
416 } else if (shdlc->state > SHDLC_NEGOCIATING) {
417 /*
418 * TODO: Chip wants to reset link
419 * send ua, empty skb lists, reset counters
420 * propagate info to HCI layer
421 */
422 }
423 break;
424 case U_FRAME_UA:
425 if ((shdlc->state == SHDLC_CONNECTING &&
426 shdlc->connect_tries > 0) ||
427 (shdlc->state == SHDLC_NEGOCIATING))
428 nfc_shdlc_connect_complete(shdlc, 0);
429 break;
430 default:
431 break;
432 }
433
434 kfree_skb(skb);
435}
436
437static void nfc_shdlc_handle_rcv_queue(struct nfc_shdlc *shdlc)
438{
439 struct sk_buff *skb;
440 u8 control;
441 int nr;
442 int ns;
443 enum sframe_type s_frame_type;
444 enum uframe_modifier u_frame_modifier;
445
446 if (shdlc->rcv_q.qlen)
447 pr_debug("rcvQlen=%d\n", shdlc->rcv_q.qlen);
448
449 while ((skb = skb_dequeue(&shdlc->rcv_q)) != NULL) {
450 control = skb->data[0];
451 skb_pull(skb, 1);
452 switch (control & SHDLC_CONTROL_HEAD_MASK) {
453 case SHDLC_CONTROL_HEAD_I:
454 case SHDLC_CONTROL_HEAD_I2:
455 ns = (control & SHDLC_CONTROL_NS_MASK) >> 3;
456 nr = control & SHDLC_CONTROL_NR_MASK;
457 nfc_shdlc_rcv_i_frame(shdlc, skb, ns, nr);
458 break;
459 case SHDLC_CONTROL_HEAD_S:
460 s_frame_type = (control & SHDLC_CONTROL_TYPE_MASK) >> 3;
461 nr = control & SHDLC_CONTROL_NR_MASK;
462 nfc_shdlc_rcv_s_frame(shdlc, s_frame_type, nr);
463 kfree_skb(skb);
464 break;
465 case SHDLC_CONTROL_HEAD_U:
466 u_frame_modifier = control & SHDLC_CONTROL_M_MASK;
467 nfc_shdlc_rcv_u_frame(shdlc, skb, u_frame_modifier);
468 break;
469 default:
470 pr_err("UNKNOWN Control=%d\n", control);
471 kfree_skb(skb);
472 break;
473 }
474 }
475}
476
477static int nfc_shdlc_w_used(int ns, int dnr)
478{
479 int unack_count;
480
481 if (dnr <= ns)
482 unack_count = ns - dnr;
483 else
484 unack_count = 8 - dnr + ns;
485
486 return unack_count;
487}
488
489/* Send frames according to algorithm at spec:10.8.1 */
490static void nfc_shdlc_handle_send_queue(struct nfc_shdlc *shdlc)
491{
492 struct sk_buff *skb;
493 int r;
494 unsigned long time_sent;
495
496 if (shdlc->send_q.qlen)
497 pr_debug
498 ("sendQlen=%d ns=%d dnr=%d rnr=%s w_room=%d unackQlen=%d\n",
499 shdlc->send_q.qlen, shdlc->ns, shdlc->dnr,
500 shdlc->rnr == false ? "false" : "true",
501 shdlc->w - nfc_shdlc_w_used(shdlc->ns, shdlc->dnr),
502 shdlc->ack_pending_q.qlen);
503
504 while (shdlc->send_q.qlen && shdlc->ack_pending_q.qlen < shdlc->w &&
505 (shdlc->rnr == false)) {
506
507 if (shdlc->t1_active) {
508 del_timer_sync(&shdlc->t1_timer);
509 shdlc->t1_active = false;
510 pr_debug("Stopped T1(send ack)\n");
511 }
512
513 skb = skb_dequeue(&shdlc->send_q);
514
515 *skb_push(skb, 1) = SHDLC_CONTROL_HEAD_I | (shdlc->ns << 3) |
516 shdlc->nr;
517
518 pr_debug("Sending I-Frame %d, waiting to rcv %d\n", shdlc->ns,
519 shdlc->nr);
520 /* SHDLC_DUMP_SKB("shdlc frame written", skb); */
521
522 nfc_shdlc_add_len_crc(skb);
523
524 r = shdlc->ops->xmit(shdlc, skb);
525 if (r < 0) {
526 /*
527 * TODO: Cannot send, shdlc machine is dead, we
528 * must propagate the information up to HCI.
529 */
530 shdlc->hard_fault = r;
531 break;
532 }
533
534 shdlc->ns = (shdlc->ns + 1) % 8;
535
536 time_sent = jiffies;
537 *(unsigned long *)skb->cb = time_sent;
538
539 skb_queue_tail(&shdlc->ack_pending_q, skb);
540
541 if (shdlc->t2_active == false) {
542 shdlc->t2_active = true;
543 mod_timer(&shdlc->t2_timer, time_sent +
544 msecs_to_jiffies(SHDLC_T2_VALUE_MS));
545 pr_debug("Started T2 (retransmit)\n");
546 }
547 }
548}
549
550static void nfc_shdlc_connect_timeout(unsigned long data)
551{
552 struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data;
553
554 pr_debug("\n");
555
556 queue_work(shdlc->sm_wq, &shdlc->sm_work);
557}
558
559static void nfc_shdlc_t1_timeout(unsigned long data)
560{
561 struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data;
562
563 pr_debug("SoftIRQ: need to send ack\n");
564
565 queue_work(shdlc->sm_wq, &shdlc->sm_work);
566}
567
568static void nfc_shdlc_t2_timeout(unsigned long data)
569{
570 struct nfc_shdlc *shdlc = (struct nfc_shdlc *)data;
571
572 pr_debug("SoftIRQ: need to retransmit\n");
573
574 queue_work(shdlc->sm_wq, &shdlc->sm_work);
575}
576
577static void nfc_shdlc_sm_work(struct work_struct *work)
578{
579 struct nfc_shdlc *shdlc = container_of(work, struct nfc_shdlc, sm_work);
580 int r;
581
582 pr_debug("\n");
583
584 mutex_lock(&shdlc->state_mutex);
585
586 switch (shdlc->state) {
587 case SHDLC_DISCONNECTED:
588 skb_queue_purge(&shdlc->rcv_q);
589 skb_queue_purge(&shdlc->send_q);
590 skb_queue_purge(&shdlc->ack_pending_q);
591 break;
592 case SHDLC_CONNECTING:
593 if (shdlc->connect_tries++ < 5)
594 r = nfc_shdlc_connect_initiate(shdlc);
595 else
596 r = -ETIME;
597 if (r < 0)
598 nfc_shdlc_connect_complete(shdlc, r);
599 else {
600 mod_timer(&shdlc->connect_timer, jiffies +
601 msecs_to_jiffies(SHDLC_CONNECT_VALUE_MS));
602
603 shdlc->state = SHDLC_NEGOCIATING;
604 }
605 break;
606 case SHDLC_NEGOCIATING:
607 if (timer_pending(&shdlc->connect_timer) == 0) {
608 shdlc->state = SHDLC_CONNECTING;
609 queue_work(shdlc->sm_wq, &shdlc->sm_work);
610 }
611
612 nfc_shdlc_handle_rcv_queue(shdlc);
613 break;
614 case SHDLC_CONNECTED:
615 nfc_shdlc_handle_rcv_queue(shdlc);
616 nfc_shdlc_handle_send_queue(shdlc);
617
618 if (shdlc->t1_active && timer_pending(&shdlc->t1_timer) == 0) {
619 pr_debug
620 ("Handle T1(send ack) elapsed (T1 now inactive)\n");
621
622 shdlc->t1_active = false;
623 r = nfc_shdlc_send_s_frame(shdlc, S_FRAME_RR,
624 shdlc->nr);
625 if (r < 0)
626 shdlc->hard_fault = r;
627 }
628
629 if (shdlc->t2_active && timer_pending(&shdlc->t2_timer) == 0) {
630 pr_debug
631 ("Handle T2(retransmit) elapsed (T2 inactive)\n");
632
633 shdlc->t2_active = false;
634
635 nfc_shdlc_requeue_ack_pending(shdlc);
636 nfc_shdlc_handle_send_queue(shdlc);
637 }
638
639 if (shdlc->hard_fault) {
640 /*
641 * TODO: Handle hard_fault that occured during
642 * this invocation of the shdlc worker
643 */
644 }
645 break;
646 default:
647 break;
648 }
649 mutex_unlock(&shdlc->state_mutex);
650}
651
652/*
653 * Called from syscall context to establish shdlc link. Sleeps until
654 * link is ready or failure.
655 */
656static int nfc_shdlc_connect(struct nfc_shdlc *shdlc)
657{
658 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(connect_wq);
659
660 pr_debug("\n");
661
662 mutex_lock(&shdlc->state_mutex);
663
664 shdlc->state = SHDLC_CONNECTING;
665 shdlc->connect_wq = &connect_wq;
666 shdlc->connect_tries = 0;
667 shdlc->connect_result = 1;
668
669 mutex_unlock(&shdlc->state_mutex);
670
671 queue_work(shdlc->sm_wq, &shdlc->sm_work);
672
673 wait_event(connect_wq, shdlc->connect_result != 1);
674
675 return shdlc->connect_result;
676}
677
678static void nfc_shdlc_disconnect(struct nfc_shdlc *shdlc)
679{
680 pr_debug("\n");
681
682 mutex_lock(&shdlc->state_mutex);
683
684 shdlc->state = SHDLC_DISCONNECTED;
685
686 mutex_unlock(&shdlc->state_mutex);
687
688 queue_work(shdlc->sm_wq, &shdlc->sm_work);
689}
690
691/*
692 * Receive an incoming shdlc frame. Frame has already been crc-validated.
693 * skb contains only LLC header and payload.
694 * If skb == NULL, it is a notification that the link below is dead.
695 */
696void nfc_shdlc_recv_frame(struct nfc_shdlc *shdlc, struct sk_buff *skb)
697{
698 if (skb == NULL) {
699 pr_err("NULL Frame -> link is dead\n");
700 shdlc->hard_fault = -EREMOTEIO;
701 } else {
702 SHDLC_DUMP_SKB("incoming frame", skb);
703 skb_queue_tail(&shdlc->rcv_q, skb);
704 }
705
706 queue_work(shdlc->sm_wq, &shdlc->sm_work);
707}
708EXPORT_SYMBOL(nfc_shdlc_recv_frame);
709
710static int nfc_shdlc_open(struct nfc_hci_dev *hdev)
711{
712 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
713 int r;
714
715 pr_debug("\n");
716
717 if (shdlc->ops->open) {
718 r = shdlc->ops->open(shdlc);
719 if (r < 0)
720 return r;
721 }
722
723 r = nfc_shdlc_connect(shdlc);
724 if (r < 0 && shdlc->ops->close)
725 shdlc->ops->close(shdlc);
726
727 return r;
728}
729
730static void nfc_shdlc_close(struct nfc_hci_dev *hdev)
731{
732 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
733
734 pr_debug("\n");
735
736 nfc_shdlc_disconnect(shdlc);
737
738 if (shdlc->ops->close)
739 shdlc->ops->close(shdlc);
740}
741
742static int nfc_shdlc_hci_ready(struct nfc_hci_dev *hdev)
743{
744 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
745 int r = 0;
746
747 pr_debug("\n");
748
749 if (shdlc->ops->hci_ready)
750 r = shdlc->ops->hci_ready(shdlc);
751
752 return r;
753}
754
755static int nfc_shdlc_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb)
756{
757 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
758
759 SHDLC_DUMP_SKB("queuing HCP packet to shdlc", skb);
760
761 skb_queue_tail(&shdlc->send_q, skb);
762
763 queue_work(shdlc->sm_wq, &shdlc->sm_work);
764
765 return 0;
766}
767
768static int nfc_shdlc_start_poll(struct nfc_hci_dev *hdev, u32 protocols)
769{
770 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
771
772 pr_debug("\n");
773
774 if (shdlc->ops->start_poll)
775 return shdlc->ops->start_poll(shdlc, protocols);
776
777 return 0;
778}
779
780static int nfc_shdlc_target_from_gate(struct nfc_hci_dev *hdev, u8 gate,
781 struct nfc_target *target)
782{
783 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
784
785 if (shdlc->ops->target_from_gate)
786 return shdlc->ops->target_from_gate(shdlc, gate, target);
787
788 return -EPERM;
789}
790
791static int nfc_shdlc_complete_target_discovered(struct nfc_hci_dev *hdev,
792 u8 gate,
793 struct nfc_target *target)
794{
795 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
796
797 pr_debug("\n");
798
799 if (shdlc->ops->complete_target_discovered)
800 return shdlc->ops->complete_target_discovered(shdlc, gate,
801 target);
802
803 return 0;
804}
805
806static int nfc_shdlc_data_exchange(struct nfc_hci_dev *hdev,
807 struct nfc_target *target,
808 struct sk_buff *skb,
809 struct sk_buff **res_skb)
810{
811 struct nfc_shdlc *shdlc = nfc_hci_get_clientdata(hdev);
812
813 if (shdlc->ops->data_exchange)
814 return shdlc->ops->data_exchange(shdlc, target, skb, res_skb);
815
816 return -EPERM;
817}
818
819static struct nfc_hci_ops shdlc_ops = {
820 .open = nfc_shdlc_open,
821 .close = nfc_shdlc_close,
822 .hci_ready = nfc_shdlc_hci_ready,
823 .xmit = nfc_shdlc_xmit,
824 .start_poll = nfc_shdlc_start_poll,
825 .target_from_gate = nfc_shdlc_target_from_gate,
826 .complete_target_discovered = nfc_shdlc_complete_target_discovered,
827 .data_exchange = nfc_shdlc_data_exchange,
828};
829
830struct nfc_shdlc *nfc_shdlc_allocate(struct nfc_shdlc_ops *ops,
831 struct nfc_hci_init_data *init_data,
832 u32 protocols,
833 int tx_headroom, int tx_tailroom,
834 int max_link_payload, const char *devname)
835{
836 struct nfc_shdlc *shdlc;
837 int r;
838 char name[32];
839
840 if (ops->xmit == NULL)
841 return NULL;
842
843 shdlc = kzalloc(sizeof(struct nfc_shdlc), GFP_KERNEL);
844 if (shdlc == NULL)
845 return NULL;
846
847 mutex_init(&shdlc->state_mutex);
848 shdlc->ops = ops;
849 shdlc->state = SHDLC_DISCONNECTED;
850
851 init_timer(&shdlc->connect_timer);
852 shdlc->connect_timer.data = (unsigned long)shdlc;
853 shdlc->connect_timer.function = nfc_shdlc_connect_timeout;
854
855 init_timer(&shdlc->t1_timer);
856 shdlc->t1_timer.data = (unsigned long)shdlc;
857 shdlc->t1_timer.function = nfc_shdlc_t1_timeout;
858
859 init_timer(&shdlc->t2_timer);
860 shdlc->t2_timer.data = (unsigned long)shdlc;
861 shdlc->t2_timer.function = nfc_shdlc_t2_timeout;
862
863 shdlc->w = SHDLC_MAX_WINDOW;
864 shdlc->srej_support = SHDLC_SREJ_SUPPORT;
865
866 skb_queue_head_init(&shdlc->rcv_q);
867 skb_queue_head_init(&shdlc->send_q);
868 skb_queue_head_init(&shdlc->ack_pending_q);
869
870 INIT_WORK(&shdlc->sm_work, nfc_shdlc_sm_work);
871 snprintf(name, sizeof(name), "%s_shdlc_sm_wq", devname);
872 shdlc->sm_wq = alloc_workqueue(name, WQ_NON_REENTRANT | WQ_UNBOUND |
873 WQ_MEM_RECLAIM, 1);
874 if (shdlc->sm_wq == NULL)
875 goto err_allocwq;
876
877 shdlc->client_headroom = tx_headroom;
878 shdlc->client_tailroom = tx_tailroom;
879
880 shdlc->hdev = nfc_hci_allocate_device(&shdlc_ops, init_data, protocols,
881 tx_headroom + SHDLC_LLC_HEAD_ROOM,
882 tx_tailroom + SHDLC_LLC_TAIL_ROOM,
883 max_link_payload);
884 if (shdlc->hdev == NULL)
885 goto err_allocdev;
886
887 nfc_hci_set_clientdata(shdlc->hdev, shdlc);
888
889 r = nfc_hci_register_device(shdlc->hdev);
890 if (r < 0)
891 goto err_regdev;
892
893 return shdlc;
894
895err_regdev:
896 nfc_hci_free_device(shdlc->hdev);
897
898err_allocdev:
899 destroy_workqueue(shdlc->sm_wq);
900
901err_allocwq:
902 kfree(shdlc);
903
904 return NULL;
905}
906EXPORT_SYMBOL(nfc_shdlc_allocate);
907
908void nfc_shdlc_free(struct nfc_shdlc *shdlc)
909{
910 pr_debug("\n");
911
912 /* TODO: Check that this cannot be called while still in use */
913
914 nfc_hci_unregister_device(shdlc->hdev);
915 nfc_hci_free_device(shdlc->hdev);
916
917 destroy_workqueue(shdlc->sm_wq);
918
919 skb_queue_purge(&shdlc->rcv_q);
920 skb_queue_purge(&shdlc->send_q);
921 skb_queue_purge(&shdlc->ack_pending_q);
922
923 kfree(shdlc);
924}
925EXPORT_SYMBOL(nfc_shdlc_free);
926
927void nfc_shdlc_set_clientdata(struct nfc_shdlc *shdlc, void *clientdata)
928{
929 pr_debug("\n");
930
931 shdlc->clientdata = clientdata;
932}
933EXPORT_SYMBOL(nfc_shdlc_set_clientdata);
934
935void *nfc_shdlc_get_clientdata(struct nfc_shdlc *shdlc)
936{
937 return shdlc->clientdata;
938}
939EXPORT_SYMBOL(nfc_shdlc_get_clientdata);
940
941struct nfc_hci_dev *nfc_shdlc_get_hci_dev(struct nfc_shdlc *shdlc)
942{
943 return shdlc->hdev;
944}
945EXPORT_SYMBOL(nfc_shdlc_get_hci_dev);
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c
index ef10ffcb4b6f..11a3b7d98dc5 100644
--- a/net/nfc/llcp/commands.c
+++ b/net/nfc/llcp/commands.c
@@ -102,7 +102,7 @@ u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length)
102 length = llcp_tlv_length[type]; 102 length = llcp_tlv_length[type];
103 if (length == 0 && value_length == 0) 103 if (length == 0 && value_length == 0)
104 return NULL; 104 return NULL;
105 else 105 else if (length == 0)
106 length = value_length; 106 length = value_length;
107 107
108 *tlv_length = 2 + length; 108 *tlv_length = 2 + length;
@@ -248,7 +248,7 @@ int nfc_llcp_disconnect(struct nfc_llcp_sock *sock)
248 248
249 skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); 249 skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
250 250
251 skb = llcp_add_header(skb, sock->ssap, sock->dsap, LLCP_PDU_DISC); 251 skb = llcp_add_header(skb, sock->dsap, sock->ssap, LLCP_PDU_DISC);
252 252
253 skb_queue_tail(&local->tx_queue, skb); 253 skb_queue_tail(&local->tx_queue, skb);
254 254
@@ -416,7 +416,7 @@ int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason)
416 416
417 skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); 417 skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
418 418
419 skb = llcp_add_header(skb, ssap, dsap, LLCP_PDU_DM); 419 skb = llcp_add_header(skb, dsap, ssap, LLCP_PDU_DM);
420 420
421 memcpy(skb_put(skb, 1), &reason, 1); 421 memcpy(skb_put(skb, 1), &reason, 1);
422 422
@@ -522,7 +522,7 @@ int nfc_llcp_send_rr(struct nfc_llcp_sock *sock)
522 522
523 skb_put(skb, LLCP_SEQUENCE_SIZE); 523 skb_put(skb, LLCP_SEQUENCE_SIZE);
524 524
525 skb->data[2] = sock->recv_n % 16; 525 skb->data[2] = sock->recv_n;
526 526
527 skb_queue_head(&local->tx_queue, skb); 527 skb_queue_head(&local->tx_queue, skb);
528 528
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index 17a578f641f1..92988aa620dc 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -307,6 +307,8 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
307 u8 *gb_cur, *version_tlv, version, version_length; 307 u8 *gb_cur, *version_tlv, version, version_length;
308 u8 *lto_tlv, lto, lto_length; 308 u8 *lto_tlv, lto, lto_length;
309 u8 *wks_tlv, wks_length; 309 u8 *wks_tlv, wks_length;
310 u8 *miux_tlv, miux_length;
311 __be16 miux;
310 u8 gb_len = 0; 312 u8 gb_len = 0;
311 313
312 version = LLCP_VERSION_11; 314 version = LLCP_VERSION_11;
@@ -316,7 +318,7 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
316 318
317 /* 1500 ms */ 319 /* 1500 ms */
318 lto = 150; 320 lto = 150;
319 lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &lto, 1, &lto_length); 321 lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, &lto, 1, &lto_length);
320 gb_len += lto_length; 322 gb_len += lto_length;
321 323
322 pr_debug("Local wks 0x%lx\n", local->local_wks); 324 pr_debug("Local wks 0x%lx\n", local->local_wks);
@@ -324,6 +326,11 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
324 &wks_length); 326 &wks_length);
325 gb_len += wks_length; 327 gb_len += wks_length;
326 328
329 miux = cpu_to_be16(LLCP_MAX_MIUX);
330 miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
331 &miux_length);
332 gb_len += miux_length;
333
327 gb_len += ARRAY_SIZE(llcp_magic); 334 gb_len += ARRAY_SIZE(llcp_magic);
328 335
329 if (gb_len > NFC_MAX_GT_LEN) { 336 if (gb_len > NFC_MAX_GT_LEN) {
@@ -345,6 +352,9 @@ static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
345 memcpy(gb_cur, wks_tlv, wks_length); 352 memcpy(gb_cur, wks_tlv, wks_length);
346 gb_cur += wks_length; 353 gb_cur += wks_length;
347 354
355 memcpy(gb_cur, miux_tlv, miux_length);
356 gb_cur += miux_length;
357
348 kfree(version_tlv); 358 kfree(version_tlv);
349 kfree(lto_tlv); 359 kfree(lto_tlv);
350 360
@@ -388,6 +398,9 @@ static void nfc_llcp_tx_work(struct work_struct *work)
388 skb = skb_dequeue(&local->tx_queue); 398 skb = skb_dequeue(&local->tx_queue);
389 if (skb != NULL) { 399 if (skb != NULL) {
390 pr_debug("Sending pending skb\n"); 400 pr_debug("Sending pending skb\n");
401 print_hex_dump(KERN_DEBUG, "LLCP Tx: ", DUMP_PREFIX_OFFSET,
402 16, 1, skb->data, skb->len, true);
403
391 nfc_data_exchange(local->dev, local->target_idx, 404 nfc_data_exchange(local->dev, local->target_idx,
392 skb, nfc_llcp_recv, local); 405 skb, nfc_llcp_recv, local);
393 } else { 406 } else {
@@ -425,7 +438,7 @@ static u8 nfc_llcp_nr(struct sk_buff *pdu)
425 438
426static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu) 439static void nfc_llcp_set_nrns(struct nfc_llcp_sock *sock, struct sk_buff *pdu)
427{ 440{
428 pdu->data[2] = (sock->send_n << 4) | (sock->recv_n % 16); 441 pdu->data[2] = (sock->send_n << 4) | (sock->recv_n);
429 sock->send_n = (sock->send_n + 1) % 16; 442 sock->send_n = (sock->send_n + 1) % 16;
430 sock->recv_ack_n = (sock->recv_n - 1) % 16; 443 sock->recv_ack_n = (sock->recv_n - 1) % 16;
431} 444}
@@ -814,6 +827,10 @@ static void nfc_llcp_rx_work(struct work_struct *work)
814 827
815 pr_debug("ptype 0x%x dsap 0x%x ssap 0x%x\n", ptype, dsap, ssap); 828 pr_debug("ptype 0x%x dsap 0x%x ssap 0x%x\n", ptype, dsap, ssap);
816 829
830 if (ptype != LLCP_PDU_SYMM)
831 print_hex_dump(KERN_DEBUG, "LLCP Rx: ", DUMP_PREFIX_OFFSET,
832 16, 1, skb->data, skb->len, true);
833
817 switch (ptype) { 834 switch (ptype) {
818 case LLCP_PDU_SYMM: 835 case LLCP_PDU_SYMM:
819 pr_debug("SYMM\n"); 836 pr_debug("SYMM\n");
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 9ec065bb9ee1..8737c2089fdd 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -477,7 +477,7 @@ static int nci_activate_target(struct nfc_dev *nfc_dev, __u32 target_idx,
477 } 477 }
478 478
479 if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) { 479 if (atomic_read(&ndev->state) == NCI_W4_HOST_SELECT) {
480 param.rf_discovery_id = target->idx; 480 param.rf_discovery_id = target->logical_idx;
481 481
482 if (protocol == NFC_PROTO_JEWEL) 482 if (protocol == NFC_PROTO_JEWEL)
483 param.rf_protocol = NCI_RF_PROTOCOL_T1T; 483 param.rf_protocol = NCI_RF_PROTOCOL_T1T;
diff --git a/net/nfc/nci/ntf.c b/net/nfc/nci/ntf.c
index 2e3dee42196d..99e1632e6aac 100644
--- a/net/nfc/nci/ntf.c
+++ b/net/nfc/nci/ntf.c
@@ -227,7 +227,7 @@ static void nci_add_new_target(struct nci_dev *ndev,
227 227
228 for (i = 0; i < ndev->n_targets; i++) { 228 for (i = 0; i < ndev->n_targets; i++) {
229 target = &ndev->targets[i]; 229 target = &ndev->targets[i];
230 if (target->idx == ntf->rf_discovery_id) { 230 if (target->logical_idx == ntf->rf_discovery_id) {
231 /* This target already exists, add the new protocol */ 231 /* This target already exists, add the new protocol */
232 nci_add_new_protocol(ndev, target, ntf->rf_protocol, 232 nci_add_new_protocol(ndev, target, ntf->rf_protocol,
233 ntf->rf_tech_and_mode, 233 ntf->rf_tech_and_mode,
@@ -248,10 +248,10 @@ static void nci_add_new_target(struct nci_dev *ndev,
248 ntf->rf_tech_and_mode, 248 ntf->rf_tech_and_mode,
249 &ntf->rf_tech_specific_params); 249 &ntf->rf_tech_specific_params);
250 if (!rc) { 250 if (!rc) {
251 target->idx = ntf->rf_discovery_id; 251 target->logical_idx = ntf->rf_discovery_id;
252 ndev->n_targets++; 252 ndev->n_targets++;
253 253
254 pr_debug("target_idx %d, n_targets %d\n", target->idx, 254 pr_debug("logical idx %d, n_targets %d\n", target->logical_idx,
255 ndev->n_targets); 255 ndev->n_targets);
256 } 256 }
257} 257}
@@ -372,10 +372,11 @@ static void nci_target_auto_activated(struct nci_dev *ndev,
372 if (rc) 372 if (rc)
373 return; 373 return;
374 374
375 target->idx = ntf->rf_discovery_id; 375 target->logical_idx = ntf->rf_discovery_id;
376 ndev->n_targets++; 376 ndev->n_targets++;
377 377
378 pr_debug("target_idx %d, n_targets %d\n", target->idx, ndev->n_targets); 378 pr_debug("logical idx %d, n_targets %d\n",
379 target->logical_idx, ndev->n_targets);
379 380
380 nfc_targets_found(ndev->nfc_dev, ndev->targets, ndev->n_targets); 381 nfc_targets_found(ndev->nfc_dev, ndev->targets, ndev->n_targets);
381} 382}
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 6404052d6c07..f1829f6ae9c5 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -63,19 +63,23 @@ static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
63 63
64 genl_dump_check_consistent(cb, hdr, &nfc_genl_family); 64 genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
65 65
66 NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target->idx); 66 if (nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target->idx) ||
67 NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, target->supported_protocols); 67 nla_put_u32(msg, NFC_ATTR_PROTOCOLS, target->supported_protocols) ||
68 NLA_PUT_U16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res); 68 nla_put_u16(msg, NFC_ATTR_TARGET_SENS_RES, target->sens_res) ||
69 NLA_PUT_U8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res); 69 nla_put_u8(msg, NFC_ATTR_TARGET_SEL_RES, target->sel_res))
70 if (target->nfcid1_len > 0) 70 goto nla_put_failure;
71 NLA_PUT(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len, 71 if (target->nfcid1_len > 0 &&
72 target->nfcid1); 72 nla_put(msg, NFC_ATTR_TARGET_NFCID1, target->nfcid1_len,
73 if (target->sensb_res_len > 0) 73 target->nfcid1))
74 NLA_PUT(msg, NFC_ATTR_TARGET_SENSB_RES, target->sensb_res_len, 74 goto nla_put_failure;
75 target->sensb_res); 75 if (target->sensb_res_len > 0 &&
76 if (target->sensf_res_len > 0) 76 nla_put(msg, NFC_ATTR_TARGET_SENSB_RES, target->sensb_res_len,
77 NLA_PUT(msg, NFC_ATTR_TARGET_SENSF_RES, target->sensf_res_len, 77 target->sensb_res))
78 target->sensf_res); 78 goto nla_put_failure;
79 if (target->sensf_res_len > 0 &&
80 nla_put(msg, NFC_ATTR_TARGET_SENSF_RES, target->sensf_res_len,
81 target->sensf_res))
82 goto nla_put_failure;
79 83
80 return genlmsg_end(msg, hdr); 84 return genlmsg_end(msg, hdr);
81 85
@@ -170,7 +174,8 @@ int nfc_genl_targets_found(struct nfc_dev *dev)
170 if (!hdr) 174 if (!hdr)
171 goto free_msg; 175 goto free_msg;
172 176
173 NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); 177 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
178 goto nla_put_failure;
174 179
175 genlmsg_end(msg, hdr); 180 genlmsg_end(msg, hdr);
176 181
@@ -183,6 +188,37 @@ free_msg:
183 return -EMSGSIZE; 188 return -EMSGSIZE;
184} 189}
185 190
191int nfc_genl_target_lost(struct nfc_dev *dev, u32 target_idx)
192{
193 struct sk_buff *msg;
194 void *hdr;
195
196 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
197 if (!msg)
198 return -ENOMEM;
199
200 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
201 NFC_EVENT_TARGET_LOST);
202 if (!hdr)
203 goto free_msg;
204
205 if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) ||
206 nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target_idx))
207 goto nla_put_failure;
208
209 genlmsg_end(msg, hdr);
210
211 genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_KERNEL);
212
213 return 0;
214
215nla_put_failure:
216 genlmsg_cancel(msg, hdr);
217free_msg:
218 nlmsg_free(msg);
219 return -EMSGSIZE;
220}
221
186int nfc_genl_device_added(struct nfc_dev *dev) 222int nfc_genl_device_added(struct nfc_dev *dev)
187{ 223{
188 struct sk_buff *msg; 224 struct sk_buff *msg;
@@ -197,10 +233,11 @@ int nfc_genl_device_added(struct nfc_dev *dev)
197 if (!hdr) 233 if (!hdr)
198 goto free_msg; 234 goto free_msg;
199 235
200 NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)); 236 if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) ||
201 NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); 237 nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
202 NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols); 238 nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) ||
203 NLA_PUT_U8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up); 239 nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up))
240 goto nla_put_failure;
204 241
205 genlmsg_end(msg, hdr); 242 genlmsg_end(msg, hdr);
206 243
@@ -229,7 +266,8 @@ int nfc_genl_device_removed(struct nfc_dev *dev)
229 if (!hdr) 266 if (!hdr)
230 goto free_msg; 267 goto free_msg;
231 268
232 NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); 269 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
270 goto nla_put_failure;
233 271
234 genlmsg_end(msg, hdr); 272 genlmsg_end(msg, hdr);
235 273
@@ -259,10 +297,11 @@ static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
259 if (cb) 297 if (cb)
260 genl_dump_check_consistent(cb, hdr, &nfc_genl_family); 298 genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
261 299
262 NLA_PUT_STRING(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)); 300 if (nla_put_string(msg, NFC_ATTR_DEVICE_NAME, nfc_device_name(dev)) ||
263 NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); 301 nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
264 NLA_PUT_U32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols); 302 nla_put_u32(msg, NFC_ATTR_PROTOCOLS, dev->supported_protocols) ||
265 NLA_PUT_U8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up); 303 nla_put_u8(msg, NFC_ATTR_DEVICE_POWERED, dev->dev_up))
304 goto nla_put_failure;
266 305
267 return genlmsg_end(msg, hdr); 306 return genlmsg_end(msg, hdr);
268 307
@@ -339,11 +378,14 @@ int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
339 if (!hdr) 378 if (!hdr)
340 goto free_msg; 379 goto free_msg;
341 380
342 NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); 381 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
343 if (rf_mode == NFC_RF_INITIATOR) 382 goto nla_put_failure;
344 NLA_PUT_U32(msg, NFC_ATTR_TARGET_INDEX, target_idx); 383 if (rf_mode == NFC_RF_INITIATOR &&
345 NLA_PUT_U8(msg, NFC_ATTR_COMM_MODE, comm_mode); 384 nla_put_u32(msg, NFC_ATTR_TARGET_INDEX, target_idx))
346 NLA_PUT_U8(msg, NFC_ATTR_RF_MODE, rf_mode); 385 goto nla_put_failure;
386 if (nla_put_u8(msg, NFC_ATTR_COMM_MODE, comm_mode) ||
387 nla_put_u8(msg, NFC_ATTR_RF_MODE, rf_mode))
388 goto nla_put_failure;
347 389
348 genlmsg_end(msg, hdr); 390 genlmsg_end(msg, hdr);
349 391
@@ -376,7 +418,8 @@ int nfc_genl_dep_link_down_event(struct nfc_dev *dev)
376 if (!hdr) 418 if (!hdr)
377 goto free_msg; 419 goto free_msg;
378 420
379 NLA_PUT_U32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx); 421 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
422 goto nla_put_failure;
380 423
381 genlmsg_end(msg, hdr); 424 genlmsg_end(msg, hdr);
382 425
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index ec8794c1099c..7d589a81942e 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -119,6 +119,7 @@ void nfc_genl_data_init(struct nfc_genl_data *genl_data);
119void nfc_genl_data_exit(struct nfc_genl_data *genl_data); 119void nfc_genl_data_exit(struct nfc_genl_data *genl_data);
120 120
121int nfc_genl_targets_found(struct nfc_dev *dev); 121int nfc_genl_targets_found(struct nfc_dev *dev);
122int nfc_genl_target_lost(struct nfc_dev *dev, u32 target_idx);
122 123
123int nfc_genl_device_added(struct nfc_dev *dev); 124int nfc_genl_device_added(struct nfc_dev *dev);
124int nfc_genl_device_removed(struct nfc_dev *dev); 125int nfc_genl_device_removed(struct nfc_dev *dev);
@@ -127,7 +128,7 @@ int nfc_genl_dep_link_up_event(struct nfc_dev *dev, u32 target_idx,
127 u8 comm_mode, u8 rf_mode); 128 u8 comm_mode, u8 rf_mode);
128int nfc_genl_dep_link_down_event(struct nfc_dev *dev); 129int nfc_genl_dep_link_down_event(struct nfc_dev *dev);
129 130
130struct nfc_dev *nfc_get_device(unsigned idx); 131struct nfc_dev *nfc_get_device(unsigned int idx);
131 132
132static inline void nfc_put_device(struct nfc_dev *dev) 133static inline void nfc_put_device(struct nfc_dev *dev)
133{ 134{
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 5a839ceb2e82..ec1134c9e07f 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -92,6 +92,12 @@ static int rawsock_connect(struct socket *sock, struct sockaddr *_addr,
92 goto error; 92 goto error;
93 } 93 }
94 94
95 if (addr->target_idx > dev->target_next_idx - 1 ||
96 addr->target_idx < dev->target_next_idx - dev->n_targets) {
97 rc = -EINVAL;
98 goto error;
99 }
100
95 rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol); 101 rc = nfc_activate_target(dev, addr->target_idx, addr->nfc_protocol);
96 if (rc) 102 if (rc)
97 goto put_dev; 103 goto put_dev;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index e44e631ea952..f86de29979ef 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -778,15 +778,18 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
778 tcp_flags = flow->tcp_flags; 778 tcp_flags = flow->tcp_flags;
779 spin_unlock_bh(&flow->lock); 779 spin_unlock_bh(&flow->lock);
780 780
781 if (used) 781 if (used &&
782 NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)); 782 nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
783 goto nla_put_failure;
783 784
784 if (stats.n_packets) 785 if (stats.n_packets &&
785 NLA_PUT(skb, OVS_FLOW_ATTR_STATS, 786 nla_put(skb, OVS_FLOW_ATTR_STATS,
786 sizeof(struct ovs_flow_stats), &stats); 787 sizeof(struct ovs_flow_stats), &stats))
788 goto nla_put_failure;
787 789
788 if (tcp_flags) 790 if (tcp_flags &&
789 NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags); 791 nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags))
792 goto nla_put_failure;
790 793
791 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if 794 /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
792 * this is the first flow to be dumped into 'skb'. This is unusual for 795 * this is the first flow to be dumped into 'skb'. This is unusual for
@@ -1168,7 +1171,8 @@ static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1168 goto nla_put_failure; 1171 goto nla_put_failure;
1169 1172
1170 get_dp_stats(dp, &dp_stats); 1173 get_dp_stats(dp, &dp_stats);
1171 NLA_PUT(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats); 1174 if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats), &dp_stats))
1175 goto nla_put_failure;
1172 1176
1173 return genlmsg_end(skb, ovs_header); 1177 return genlmsg_end(skb, ovs_header);
1174 1178
@@ -1468,14 +1472,16 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1468 1472
1469 ovs_header->dp_ifindex = get_dpifindex(vport->dp); 1473 ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1470 1474
1471 NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no); 1475 if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1472 NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type); 1476 nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1473 NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)); 1477 nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) ||
1474 NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid); 1478 nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid))
1479 goto nla_put_failure;
1475 1480
1476 ovs_vport_get_stats(vport, &vport_stats); 1481 ovs_vport_get_stats(vport, &vport_stats);
1477 NLA_PUT(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats), 1482 if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
1478 &vport_stats); 1483 &vport_stats))
1484 goto nla_put_failure;
1479 1485
1480 err = ovs_vport_get_options(vport, skb); 1486 err = ovs_vport_get_options(vport, skb);
1481 if (err == -EMSGSIZE) 1487 if (err == -EMSGSIZE)
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 1252c3081ef1..7cb416381e87 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -1174,11 +1174,13 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
1174 struct ovs_key_ethernet *eth_key; 1174 struct ovs_key_ethernet *eth_key;
1175 struct nlattr *nla, *encap; 1175 struct nlattr *nla, *encap;
1176 1176
1177 if (swkey->phy.priority) 1177 if (swkey->phy.priority &&
1178 NLA_PUT_U32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority); 1178 nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority))
1179 goto nla_put_failure;
1179 1180
1180 if (swkey->phy.in_port != USHRT_MAX) 1181 if (swkey->phy.in_port != USHRT_MAX &&
1181 NLA_PUT_U32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port); 1182 nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port))
1183 goto nla_put_failure;
1182 1184
1183 nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key)); 1185 nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
1184 if (!nla) 1186 if (!nla)
@@ -1188,8 +1190,9 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
1188 memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN); 1190 memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN);
1189 1191
1190 if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) { 1192 if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
1191 NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)); 1193 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)) ||
1192 NLA_PUT_BE16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci); 1194 nla_put_be16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci))
1195 goto nla_put_failure;
1193 encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP); 1196 encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
1194 if (!swkey->eth.tci) 1197 if (!swkey->eth.tci)
1195 goto unencap; 1198 goto unencap;
@@ -1200,7 +1203,8 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
1200 if (swkey->eth.type == htons(ETH_P_802_2)) 1203 if (swkey->eth.type == htons(ETH_P_802_2))
1201 goto unencap; 1204 goto unencap;
1202 1205
1203 NLA_PUT_BE16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type); 1206 if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type))
1207 goto nla_put_failure;
1204 1208
1205 if (swkey->eth.type == htons(ETH_P_IP)) { 1209 if (swkey->eth.type == htons(ETH_P_IP)) {
1206 struct ovs_key_ipv4 *ipv4_key; 1210 struct ovs_key_ipv4 *ipv4_key;
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index c1068aed03d1..5920bda4ab6b 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -140,9 +140,9 @@ int ovs_netdev_get_ifindex(const struct vport *vport)
140 return netdev_vport->dev->ifindex; 140 return netdev_vport->dev->ifindex;
141} 141}
142 142
143static unsigned packet_length(const struct sk_buff *skb) 143static unsigned int packet_length(const struct sk_buff *skb)
144{ 144{
145 unsigned length = skb->len - ETH_HLEN; 145 unsigned int length = skb->len - ETH_HLEN;
146 146
147 if (skb->protocol == htons(ETH_P_8021Q)) 147 if (skb->protocol == htons(ETH_P_8021Q))
148 length -= VLAN_HLEN; 148 length -= VLAN_HLEN;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 4f2c0df79563..0f661745df0f 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1654,7 +1654,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1654 skb->data = skb_head; 1654 skb->data = skb_head;
1655 skb->len = skb_len; 1655 skb->len = skb_len;
1656 } 1656 }
1657 kfree_skb(skb); 1657 consume_skb(skb);
1658 skb = nskb; 1658 skb = nskb;
1659 } 1659 }
1660 1660
@@ -1764,7 +1764,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1764 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 + 1764 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
1765 po->tp_reserve; 1765 po->tp_reserve;
1766 } else { 1766 } else {
1767 unsigned maclen = skb_network_offset(skb); 1767 unsigned int maclen = skb_network_offset(skb);
1768 netoff = TPACKET_ALIGN(po->tp_hdrlen + 1768 netoff = TPACKET_ALIGN(po->tp_hdrlen +
1769 (maclen < 16 ? 16 : maclen)) + 1769 (maclen < 16 ? 16 : maclen)) +
1770 po->tp_reserve; 1770 po->tp_reserve;
@@ -3224,10 +3224,10 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3224 char __user *optval, int __user *optlen) 3224 char __user *optval, int __user *optlen)
3225{ 3225{
3226 int len; 3226 int len;
3227 int val; 3227 int val, lv = sizeof(val);
3228 struct sock *sk = sock->sk; 3228 struct sock *sk = sock->sk;
3229 struct packet_sock *po = pkt_sk(sk); 3229 struct packet_sock *po = pkt_sk(sk);
3230 void *data; 3230 void *data = &val;
3231 struct tpacket_stats st; 3231 struct tpacket_stats st;
3232 union tpacket_stats_u st_u; 3232 union tpacket_stats_u st_u;
3233 3233
@@ -3242,21 +3242,17 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3242 3242
3243 switch (optname) { 3243 switch (optname) {
3244 case PACKET_STATISTICS: 3244 case PACKET_STATISTICS:
3245 if (po->tp_version == TPACKET_V3) {
3246 len = sizeof(struct tpacket_stats_v3);
3247 } else {
3248 if (len > sizeof(struct tpacket_stats))
3249 len = sizeof(struct tpacket_stats);
3250 }
3251 spin_lock_bh(&sk->sk_receive_queue.lock); 3245 spin_lock_bh(&sk->sk_receive_queue.lock);
3252 if (po->tp_version == TPACKET_V3) { 3246 if (po->tp_version == TPACKET_V3) {
3247 lv = sizeof(struct tpacket_stats_v3);
3253 memcpy(&st_u.stats3, &po->stats, 3248 memcpy(&st_u.stats3, &po->stats,
3254 sizeof(struct tpacket_stats)); 3249 sizeof(struct tpacket_stats));
3255 st_u.stats3.tp_freeze_q_cnt = 3250 st_u.stats3.tp_freeze_q_cnt =
3256 po->stats_u.stats3.tp_freeze_q_cnt; 3251 po->stats_u.stats3.tp_freeze_q_cnt;
3257 st_u.stats3.tp_packets += po->stats.tp_drops; 3252 st_u.stats3.tp_packets += po->stats.tp_drops;
3258 data = &st_u.stats3; 3253 data = &st_u.stats3;
3259 } else { 3254 } else {
3255 lv = sizeof(struct tpacket_stats);
3260 st = po->stats; 3256 st = po->stats;
3261 st.tp_packets += st.tp_drops; 3257 st.tp_packets += st.tp_drops;
3262 data = &st; 3258 data = &st;
@@ -3265,31 +3261,16 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3265 spin_unlock_bh(&sk->sk_receive_queue.lock); 3261 spin_unlock_bh(&sk->sk_receive_queue.lock);
3266 break; 3262 break;
3267 case PACKET_AUXDATA: 3263 case PACKET_AUXDATA:
3268 if (len > sizeof(int))
3269 len = sizeof(int);
3270 val = po->auxdata; 3264 val = po->auxdata;
3271
3272 data = &val;
3273 break; 3265 break;
3274 case PACKET_ORIGDEV: 3266 case PACKET_ORIGDEV:
3275 if (len > sizeof(int))
3276 len = sizeof(int);
3277 val = po->origdev; 3267 val = po->origdev;
3278
3279 data = &val;
3280 break; 3268 break;
3281 case PACKET_VNET_HDR: 3269 case PACKET_VNET_HDR:
3282 if (len > sizeof(int))
3283 len = sizeof(int);
3284 val = po->has_vnet_hdr; 3270 val = po->has_vnet_hdr;
3285
3286 data = &val;
3287 break; 3271 break;
3288 case PACKET_VERSION: 3272 case PACKET_VERSION:
3289 if (len > sizeof(int))
3290 len = sizeof(int);
3291 val = po->tp_version; 3273 val = po->tp_version;
3292 data = &val;
3293 break; 3274 break;
3294 case PACKET_HDRLEN: 3275 case PACKET_HDRLEN:
3295 if (len > sizeof(int)) 3276 if (len > sizeof(int))
@@ -3309,39 +3290,28 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3309 default: 3290 default:
3310 return -EINVAL; 3291 return -EINVAL;
3311 } 3292 }
3312 data = &val;
3313 break; 3293 break;
3314 case PACKET_RESERVE: 3294 case PACKET_RESERVE:
3315 if (len > sizeof(unsigned int))
3316 len = sizeof(unsigned int);
3317 val = po->tp_reserve; 3295 val = po->tp_reserve;
3318 data = &val;
3319 break; 3296 break;
3320 case PACKET_LOSS: 3297 case PACKET_LOSS:
3321 if (len > sizeof(unsigned int))
3322 len = sizeof(unsigned int);
3323 val = po->tp_loss; 3298 val = po->tp_loss;
3324 data = &val;
3325 break; 3299 break;
3326 case PACKET_TIMESTAMP: 3300 case PACKET_TIMESTAMP:
3327 if (len > sizeof(int))
3328 len = sizeof(int);
3329 val = po->tp_tstamp; 3301 val = po->tp_tstamp;
3330 data = &val;
3331 break; 3302 break;
3332 case PACKET_FANOUT: 3303 case PACKET_FANOUT:
3333 if (len > sizeof(int))
3334 len = sizeof(int);
3335 val = (po->fanout ? 3304 val = (po->fanout ?
3336 ((u32)po->fanout->id | 3305 ((u32)po->fanout->id |
3337 ((u32)po->fanout->type << 16)) : 3306 ((u32)po->fanout->type << 16)) :
3338 0); 3307 0);
3339 data = &val;
3340 break; 3308 break;
3341 default: 3309 default:
3342 return -ENOPROTOOPT; 3310 return -ENOPROTOOPT;
3343 } 3311 }
3344 3312
3313 if (len > lv)
3314 len = lv;
3345 if (put_user(len, optlen)) 3315 if (put_user(len, optlen))
3346 return -EFAULT; 3316 return -EFAULT;
3347 if (copy_to_user(optval, data, len)) 3317 if (copy_to_user(optval, data, len))
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index d65f699fbf34..779ce4ff92ec 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -129,7 +129,7 @@ static const struct net_proto_family phonet_proto_family = {
129/* Phonet device header operations */ 129/* Phonet device header operations */
130static int pn_header_create(struct sk_buff *skb, struct net_device *dev, 130static int pn_header_create(struct sk_buff *skb, struct net_device *dev,
131 unsigned short type, const void *daddr, 131 unsigned short type, const void *daddr,
132 const void *saddr, unsigned len) 132 const void *saddr, unsigned int len)
133{ 133{
134 u8 *media = skb_push(skb, 1); 134 u8 *media = skb_push(skb, 1);
135 135
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
index 9726fe684ab8..9dd4f926f7d1 100644
--- a/net/phonet/pep.c
+++ b/net/phonet/pep.c
@@ -273,7 +273,7 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
273 hdr = pnp_hdr(skb); 273 hdr = pnp_hdr(skb);
274 if (hdr->data[0] != PN_PEP_TYPE_COMMON) { 274 if (hdr->data[0] != PN_PEP_TYPE_COMMON) {
275 LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n", 275 LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP type: %u\n",
276 (unsigned)hdr->data[0]); 276 (unsigned int)hdr->data[0]);
277 return -EOPNOTSUPP; 277 return -EOPNOTSUPP;
278 } 278 }
279 279
@@ -305,7 +305,7 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
305 305
306 default: 306 default:
307 LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP indication: %u\n", 307 LIMIT_NETDEBUG(KERN_DEBUG"Phonet unknown PEP indication: %u\n",
308 (unsigned)hdr->data[1]); 308 (unsigned int)hdr->data[1]);
309 return -EOPNOTSUPP; 309 return -EOPNOTSUPP;
310 } 310 }
311 if (wake) 311 if (wake)
@@ -478,9 +478,9 @@ static void pipe_destruct(struct sock *sk)
478 skb_queue_purge(&pn->ctrlreq_queue); 478 skb_queue_purge(&pn->ctrlreq_queue);
479} 479}
480 480
481static u8 pipe_negotiate_fc(const u8 *fcs, unsigned n) 481static u8 pipe_negotiate_fc(const u8 *fcs, unsigned int n)
482{ 482{
483 unsigned i; 483 unsigned int i;
484 u8 final_fc = PN_NO_FLOW_CONTROL; 484 u8 final_fc = PN_NO_FLOW_CONTROL;
485 485
486 for (i = 0; i < n; i++) { 486 for (i = 0; i < n; i++) {
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
index bf5cf69c820a..36f75a9e2c3d 100644
--- a/net/phonet/pn_dev.c
+++ b/net/phonet/pn_dev.c
@@ -44,7 +44,7 @@ struct phonet_net {
44 struct phonet_routes routes; 44 struct phonet_routes routes;
45}; 45};
46 46
47int phonet_net_id __read_mostly; 47static int phonet_net_id __read_mostly;
48 48
49static struct phonet_net *phonet_pernet(struct net *net) 49static struct phonet_net *phonet_pernet(struct net *net)
50{ 50{
@@ -268,7 +268,7 @@ static int phonet_device_autoconf(struct net_device *dev)
268static void phonet_route_autodel(struct net_device *dev) 268static void phonet_route_autodel(struct net_device *dev)
269{ 269{
270 struct phonet_net *pnn = phonet_pernet(dev_net(dev)); 270 struct phonet_net *pnn = phonet_pernet(dev_net(dev));
271 unsigned i; 271 unsigned int i;
272 DECLARE_BITMAP(deleted, 64); 272 DECLARE_BITMAP(deleted, 64);
273 273
274 /* Remove left-over Phonet routes */ 274 /* Remove left-over Phonet routes */
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index d61f6761777d..cfdf135fcd69 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -116,7 +116,8 @@ static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
116 ifm->ifa_flags = IFA_F_PERMANENT; 116 ifm->ifa_flags = IFA_F_PERMANENT;
117 ifm->ifa_scope = RT_SCOPE_LINK; 117 ifm->ifa_scope = RT_SCOPE_LINK;
118 ifm->ifa_index = dev->ifindex; 118 ifm->ifa_index = dev->ifindex;
119 NLA_PUT_U8(skb, IFA_LOCAL, addr); 119 if (nla_put_u8(skb, IFA_LOCAL, addr))
120 goto nla_put_failure;
120 return nlmsg_end(skb, nlh); 121 return nlmsg_end(skb, nlh);
121 122
122nla_put_failure: 123nla_put_failure:
@@ -183,8 +184,9 @@ static int fill_route(struct sk_buff *skb, struct net_device *dev, u8 dst,
183 rtm->rtm_scope = RT_SCOPE_UNIVERSE; 184 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
184 rtm->rtm_type = RTN_UNICAST; 185 rtm->rtm_type = RTN_UNICAST;
185 rtm->rtm_flags = 0; 186 rtm->rtm_flags = 0;
186 NLA_PUT_U8(skb, RTA_DST, dst); 187 if (nla_put_u8(skb, RTA_DST, dst) ||
187 NLA_PUT_U32(skb, RTA_OIF, dev->ifindex); 188 nla_put_u32(skb, RTA_OIF, dev->ifindex))
189 goto nla_put_failure;
188 return nlmsg_end(skb, nlh); 190 return nlmsg_end(skb, nlh);
189 191
190nla_put_failure: 192nla_put_failure:
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 4c7eff30dfa9..89cfa9ce4939 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -58,7 +58,7 @@ static struct {
58 58
59void __init pn_sock_init(void) 59void __init pn_sock_init(void)
60{ 60{
61 unsigned i; 61 unsigned int i;
62 62
63 for (i = 0; i < PN_HASHSIZE; i++) 63 for (i = 0; i < PN_HASHSIZE; i++)
64 INIT_HLIST_HEAD(pnsocks.hlist + i); 64 INIT_HLIST_HEAD(pnsocks.hlist + i);
@@ -116,7 +116,7 @@ struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
116void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb) 116void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
117{ 117{
118 struct hlist_head *hlist = pnsocks.hlist; 118 struct hlist_head *hlist = pnsocks.hlist;
119 unsigned h; 119 unsigned int h;
120 120
121 rcu_read_lock(); 121 rcu_read_lock();
122 for (h = 0; h < PN_HASHSIZE; h++) { 122 for (h = 0; h < PN_HASHSIZE; h++) {
@@ -545,7 +545,7 @@ static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
545 struct hlist_head *hlist = pnsocks.hlist; 545 struct hlist_head *hlist = pnsocks.hlist;
546 struct hlist_node *node; 546 struct hlist_node *node;
547 struct sock *sknode; 547 struct sock *sknode;
548 unsigned h; 548 unsigned int h;
549 549
550 for (h = 0; h < PN_HASHSIZE; h++) { 550 for (h = 0; h < PN_HASHSIZE; h++) {
551 sk_for_each_rcu(sknode, node, hlist) { 551 sk_for_each_rcu(sknode, node, hlist) {
@@ -710,7 +710,7 @@ int pn_sock_unbind_res(struct sock *sk, u8 res)
710 710
711void pn_sock_unbind_all_res(struct sock *sk) 711void pn_sock_unbind_all_res(struct sock *sk)
712{ 712{
713 unsigned res, match = 0; 713 unsigned int res, match = 0;
714 714
715 mutex_lock(&resource_mutex); 715 mutex_lock(&resource_mutex);
716 for (res = 0; res < 256; res++) { 716 for (res = 0; res < 256; res++) {
@@ -732,7 +732,7 @@ void pn_sock_unbind_all_res(struct sock *sk)
732static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos) 732static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
733{ 733{
734 struct net *net = seq_file_net(seq); 734 struct net *net = seq_file_net(seq);
735 unsigned i; 735 unsigned int i;
736 736
737 if (!net_eq(net, &init_net)) 737 if (!net_eq(net, &init_net))
738 return NULL; 738 return NULL;
@@ -750,7 +750,7 @@ static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
750static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk) 750static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk)
751{ 751{
752 struct net *net = seq_file_net(seq); 752 struct net *net = seq_file_net(seq);
753 unsigned i; 753 unsigned int i;
754 754
755 BUG_ON(!net_eq(net, &init_net)); 755 BUG_ON(!net_eq(net, &init_net));
756 756
diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c
index cea1c7dbdae2..696348fd31a1 100644
--- a/net/phonet/sysctl.c
+++ b/net/phonet/sysctl.c
@@ -27,6 +27,10 @@
27#include <linux/errno.h> 27#include <linux/errno.h>
28#include <linux/init.h> 28#include <linux/init.h>
29 29
30#include <net/sock.h>
31#include <linux/phonet.h>
32#include <net/phonet/phonet.h>
33
30#define DYNAMIC_PORT_MIN 0x40 34#define DYNAMIC_PORT_MIN 0x40
31#define DYNAMIC_PORT_MAX 0x7f 35#define DYNAMIC_PORT_MAX 0x7f
32 36
@@ -46,7 +50,8 @@ static void set_local_port_range(int range[2])
46 50
47void phonet_get_local_port_range(int *min, int *max) 51void phonet_get_local_port_range(int *min, int *max)
48{ 52{
49 unsigned seq; 53 unsigned int seq;
54
50 do { 55 do {
51 seq = read_seqbegin(&local_port_range_lock); 56 seq = read_seqbegin(&local_port_range_lock);
52 if (min) 57 if (min)
@@ -93,19 +98,13 @@ static struct ctl_table phonet_table[] = {
93 { } 98 { }
94}; 99};
95 100
96static struct ctl_path phonet_ctl_path[] = {
97 { .procname = "net", },
98 { .procname = "phonet", },
99 { },
100};
101
102int __init phonet_sysctl_init(void) 101int __init phonet_sysctl_init(void)
103{ 102{
104 phonet_table_hrd = register_sysctl_paths(phonet_ctl_path, phonet_table); 103 phonet_table_hrd = register_net_sysctl(&init_net, "net/phonet", phonet_table);
105 return phonet_table_hrd == NULL ? -ENOMEM : 0; 104 return phonet_table_hrd == NULL ? -ENOMEM : 0;
106} 105}
107 106
108void phonet_sysctl_exit(void) 107void phonet_sysctl_exit(void)
109{ 108{
110 unregister_sysctl_table(phonet_table_hrd); 109 unregister_net_sysctl_table(phonet_table_hrd);
111} 110}
diff --git a/net/rds/ib_sysctl.c b/net/rds/ib_sysctl.c
index 1253b006efdb..7e643bafb4af 100644
--- a/net/rds/ib_sysctl.c
+++ b/net/rds/ib_sysctl.c
@@ -106,22 +106,15 @@ static ctl_table rds_ib_sysctl_table[] = {
106 { } 106 { }
107}; 107};
108 108
109static struct ctl_path rds_ib_sysctl_path[] = {
110 { .procname = "net", },
111 { .procname = "rds", },
112 { .procname = "ib", },
113 { }
114};
115
116void rds_ib_sysctl_exit(void) 109void rds_ib_sysctl_exit(void)
117{ 110{
118 if (rds_ib_sysctl_hdr) 111 if (rds_ib_sysctl_hdr)
119 unregister_sysctl_table(rds_ib_sysctl_hdr); 112 unregister_net_sysctl_table(rds_ib_sysctl_hdr);
120} 113}
121 114
122int rds_ib_sysctl_init(void) 115int rds_ib_sysctl_init(void)
123{ 116{
124 rds_ib_sysctl_hdr = register_sysctl_paths(rds_ib_sysctl_path, rds_ib_sysctl_table); 117 rds_ib_sysctl_hdr = register_net_sysctl(&init_net, "net/rds/ib", rds_ib_sysctl_table);
125 if (!rds_ib_sysctl_hdr) 118 if (!rds_ib_sysctl_hdr)
126 return -ENOMEM; 119 return -ENOMEM;
127 return 0; 120 return 0;
diff --git a/net/rds/iw_sysctl.c b/net/rds/iw_sysctl.c
index e2e47176e729..5d5ebd576f3f 100644
--- a/net/rds/iw_sysctl.c
+++ b/net/rds/iw_sysctl.c
@@ -109,22 +109,15 @@ static ctl_table rds_iw_sysctl_table[] = {
109 { } 109 { }
110}; 110};
111 111
112static struct ctl_path rds_iw_sysctl_path[] = {
113 { .procname = "net", },
114 { .procname = "rds", },
115 { .procname = "iw", },
116 { }
117};
118
119void rds_iw_sysctl_exit(void) 112void rds_iw_sysctl_exit(void)
120{ 113{
121 if (rds_iw_sysctl_hdr) 114 if (rds_iw_sysctl_hdr)
122 unregister_sysctl_table(rds_iw_sysctl_hdr); 115 unregister_net_sysctl_table(rds_iw_sysctl_hdr);
123} 116}
124 117
125int rds_iw_sysctl_init(void) 118int rds_iw_sysctl_init(void)
126{ 119{
127 rds_iw_sysctl_hdr = register_sysctl_paths(rds_iw_sysctl_path, rds_iw_sysctl_table); 120 rds_iw_sysctl_hdr = register_net_sysctl(&init_net, "net/rds/iw", rds_iw_sysctl_table);
128 if (!rds_iw_sysctl_hdr) 121 if (!rds_iw_sysctl_hdr)
129 return -ENOMEM; 122 return -ENOMEM;
130 return 0; 123 return 0;
diff --git a/net/rds/sysctl.c b/net/rds/sysctl.c
index 25ad0c77a26c..907214b4c4d0 100644
--- a/net/rds/sysctl.c
+++ b/net/rds/sysctl.c
@@ -92,17 +92,10 @@ static ctl_table rds_sysctl_rds_table[] = {
92 { } 92 { }
93}; 93};
94 94
95static struct ctl_path rds_sysctl_path[] = {
96 { .procname = "net", },
97 { .procname = "rds", },
98 { }
99};
100
101
102void rds_sysctl_exit(void) 95void rds_sysctl_exit(void)
103{ 96{
104 if (rds_sysctl_reg_table) 97 if (rds_sysctl_reg_table)
105 unregister_sysctl_table(rds_sysctl_reg_table); 98 unregister_net_sysctl_table(rds_sysctl_reg_table);
106} 99}
107 100
108int rds_sysctl_init(void) 101int rds_sysctl_init(void)
@@ -110,7 +103,7 @@ int rds_sysctl_init(void)
110 rds_sysctl_reconnect_min = msecs_to_jiffies(1); 103 rds_sysctl_reconnect_min = msecs_to_jiffies(1);
111 rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min; 104 rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min;
112 105
113 rds_sysctl_reg_table = register_sysctl_paths(rds_sysctl_path, rds_sysctl_rds_table); 106 rds_sysctl_reg_table = register_net_sysctl(&init_net,"net/rds", rds_sysctl_rds_table);
114 if (!rds_sysctl_reg_table) 107 if (!rds_sysctl_reg_table)
115 return -ENOMEM; 108 return -ENOMEM;
116 return 0; 109 return 0;
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 8b5cc4aa8868..72981375f47c 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -145,7 +145,7 @@ int rds_tcp_listen_init(void)
145 if (ret < 0) 145 if (ret < 0)
146 goto out; 146 goto out;
147 147
148 sock->sk->sk_reuse = 1; 148 sock->sk->sk_reuse = SK_CAN_REUSE;
149 rds_tcp_nonagle(sock); 149 rds_tcp_nonagle(sock);
150 150
151 write_lock_bh(&sock->sk->sk_callback_lock); 151 write_lock_bh(&sock->sk->sk_callback_lock);
diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c
index 906cc05bba63..28dbdb911b85 100644
--- a/net/rose/rose_dev.c
+++ b/net/rose/rose_dev.c
@@ -37,7 +37,7 @@
37 37
38static int rose_header(struct sk_buff *skb, struct net_device *dev, 38static int rose_header(struct sk_buff *skb, struct net_device *dev,
39 unsigned short type, 39 unsigned short type,
40 const void *daddr, const void *saddr, unsigned len) 40 const void *daddr, const void *saddr, unsigned int len)
41{ 41{
42 unsigned char *buff = skb_push(skb, ROSE_MIN_LEN + 2); 42 unsigned char *buff = skb_push(skb, ROSE_MIN_LEN + 2);
43 43
diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c
index 47f1fdb346b0..7ca57741b2fb 100644
--- a/net/rose/rose_subr.c
+++ b/net/rose/rose_subr.c
@@ -399,7 +399,7 @@ int rose_parse_facilities(unsigned char *p, unsigned packet_len,
399 399
400 facilities_len = *p++; 400 facilities_len = *p++;
401 401
402 if (facilities_len == 0 || (unsigned)facilities_len > packet_len) 402 if (facilities_len == 0 || (unsigned int)facilities_len > packet_len)
403 return 0; 403 return 0;
404 404
405 while (facilities_len >= 3 && *p == 0x00) { 405 while (facilities_len >= 3 && *p == 0x00) {
diff --git a/net/rose/sysctl_net_rose.c b/net/rose/sysctl_net_rose.c
index df6d9dac2186..94ca9c2ccd69 100644
--- a/net/rose/sysctl_net_rose.c
+++ b/net/rose/sysctl_net_rose.c
@@ -118,18 +118,12 @@ static ctl_table rose_table[] = {
118 { } 118 { }
119}; 119};
120 120
121static struct ctl_path rose_path[] = {
122 { .procname = "net", },
123 { .procname = "rose", },
124 { }
125};
126
127void __init rose_register_sysctl(void) 121void __init rose_register_sysctl(void)
128{ 122{
129 rose_table_header = register_sysctl_paths(rose_path, rose_table); 123 rose_table_header = register_net_sysctl(&init_net, "net/rose", rose_table);
130} 124}
131 125
132void rose_unregister_sysctl(void) 126void rose_unregister_sysctl(void)
133{ 127{
134 unregister_sysctl_table(rose_table_header); 128 unregister_net_sysctl_table(rose_table_header);
135} 129}
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 74c064c0dfdd..05996d0dd828 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -26,7 +26,7 @@ MODULE_AUTHOR("Red Hat, Inc.");
26MODULE_LICENSE("GPL"); 26MODULE_LICENSE("GPL");
27MODULE_ALIAS_NETPROTO(PF_RXRPC); 27MODULE_ALIAS_NETPROTO(PF_RXRPC);
28 28
29unsigned rxrpc_debug; // = RXRPC_DEBUG_KPROTO; 29unsigned int rxrpc_debug; // = RXRPC_DEBUG_KPROTO;
30module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO); 30module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
31MODULE_PARM_DESC(debug, "RxRPC debugging mask"); 31MODULE_PARM_DESC(debug, "RxRPC debugging mask");
32 32
@@ -513,7 +513,7 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
513 char __user *optval, unsigned int optlen) 513 char __user *optval, unsigned int optlen)
514{ 514{
515 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 515 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
516 unsigned min_sec_level; 516 unsigned int min_sec_level;
517 int ret; 517 int ret;
518 518
519 _enter(",%d,%d,,%d", level, optname, optlen); 519 _enter(",%d,%d,,%d", level, optname, optlen);
@@ -555,13 +555,13 @@ static int rxrpc_setsockopt(struct socket *sock, int level, int optname,
555 555
556 case RXRPC_MIN_SECURITY_LEVEL: 556 case RXRPC_MIN_SECURITY_LEVEL:
557 ret = -EINVAL; 557 ret = -EINVAL;
558 if (optlen != sizeof(unsigned)) 558 if (optlen != sizeof(unsigned int))
559 goto error; 559 goto error;
560 ret = -EISCONN; 560 ret = -EISCONN;
561 if (rx->sk.sk_state != RXRPC_UNCONNECTED) 561 if (rx->sk.sk_state != RXRPC_UNCONNECTED)
562 goto error; 562 goto error;
563 ret = get_user(min_sec_level, 563 ret = get_user(min_sec_level,
564 (unsigned __user *) optval); 564 (unsigned int __user *) optval);
565 if (ret < 0) 565 if (ret < 0)
566 goto error; 566 goto error;
567 ret = -EINVAL; 567 ret = -EINVAL;
diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
index c3126e864f3c..e4d9cbcff402 100644
--- a/net/rxrpc/ar-ack.c
+++ b/net/rxrpc/ar-ack.c
@@ -19,7 +19,7 @@
19#include <net/af_rxrpc.h> 19#include <net/af_rxrpc.h>
20#include "ar-internal.h" 20#include "ar-internal.h"
21 21
22static unsigned rxrpc_ack_defer = 1; 22static unsigned int rxrpc_ack_defer = 1;
23 23
24static const char *const rxrpc_acks[] = { 24static const char *const rxrpc_acks[] = {
25 "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL", 25 "---", "REQ", "DUP", "OOS", "WIN", "MEM", "PNG", "PNR", "DLY", "IDL",
@@ -548,11 +548,11 @@ static void rxrpc_zap_tx_window(struct rxrpc_call *call)
548 * process the extra information that may be appended to an ACK packet 548 * process the extra information that may be appended to an ACK packet
549 */ 549 */
550static void rxrpc_extract_ackinfo(struct rxrpc_call *call, struct sk_buff *skb, 550static void rxrpc_extract_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
551 unsigned latest, int nAcks) 551 unsigned int latest, int nAcks)
552{ 552{
553 struct rxrpc_ackinfo ackinfo; 553 struct rxrpc_ackinfo ackinfo;
554 struct rxrpc_peer *peer; 554 struct rxrpc_peer *peer;
555 unsigned mtu; 555 unsigned int mtu;
556 556
557 if (skb_copy_bits(skb, nAcks + 3, &ackinfo, sizeof(ackinfo)) < 0) { 557 if (skb_copy_bits(skb, nAcks + 3, &ackinfo, sizeof(ackinfo)) < 0) {
558 _leave(" [no ackinfo]"); 558 _leave(" [no ackinfo]");
diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
index bf656c230ba9..a3bbb360a3f9 100644
--- a/net/rxrpc/ar-call.c
+++ b/net/rxrpc/ar-call.c
@@ -38,8 +38,8 @@ const char *const rxrpc_call_states[] = {
38struct kmem_cache *rxrpc_call_jar; 38struct kmem_cache *rxrpc_call_jar;
39LIST_HEAD(rxrpc_calls); 39LIST_HEAD(rxrpc_calls);
40DEFINE_RWLOCK(rxrpc_call_lock); 40DEFINE_RWLOCK(rxrpc_call_lock);
41static unsigned rxrpc_call_max_lifetime = 60; 41static unsigned int rxrpc_call_max_lifetime = 60;
42static unsigned rxrpc_dead_call_timeout = 2; 42static unsigned int rxrpc_dead_call_timeout = 2;
43 43
44static void rxrpc_destroy_call(struct work_struct *work); 44static void rxrpc_destroy_call(struct work_struct *work);
45static void rxrpc_call_life_expired(unsigned long _call); 45static void rxrpc_call_life_expired(unsigned long _call);
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index 1a2b0633fece..529572f18d1f 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -76,7 +76,7 @@ int rxrpc_queue_rcv_skb(struct rxrpc_call *call, struct sk_buff *skb,
76 * --ANK */ 76 * --ANK */
77// ret = -ENOBUFS; 77// ret = -ENOBUFS;
78// if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 78// if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
79// (unsigned) sk->sk_rcvbuf) 79// (unsigned int) sk->sk_rcvbuf)
80// goto out; 80// goto out;
81 81
82 ret = sk_filter(sk, skb); 82 ret = sk_filter(sk, skb);
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 8e22bd345e71..a693aca2ae2e 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -83,7 +83,7 @@ struct rxrpc_skb_priv {
83 struct rxrpc_call *call; /* call with which associated */ 83 struct rxrpc_call *call; /* call with which associated */
84 unsigned long resend_at; /* time in jiffies at which to resend */ 84 unsigned long resend_at; /* time in jiffies at which to resend */
85 union { 85 union {
86 unsigned offset; /* offset into buffer of next read */ 86 unsigned int offset; /* offset into buffer of next read */
87 int remain; /* amount of space remaining for next write */ 87 int remain; /* amount of space remaining for next write */
88 u32 error; /* network error code */ 88 u32 error; /* network error code */
89 bool need_resend; /* T if needs resending */ 89 bool need_resend; /* T if needs resending */
@@ -176,9 +176,9 @@ struct rxrpc_peer {
176 struct list_head error_targets; /* targets for net error distribution */ 176 struct list_head error_targets; /* targets for net error distribution */
177 spinlock_t lock; /* access lock */ 177 spinlock_t lock; /* access lock */
178 atomic_t usage; 178 atomic_t usage;
179 unsigned if_mtu; /* interface MTU for this peer */ 179 unsigned int if_mtu; /* interface MTU for this peer */
180 unsigned mtu; /* network MTU for this peer */ 180 unsigned int mtu; /* network MTU for this peer */
181 unsigned maxdata; /* data size (MTU - hdrsize) */ 181 unsigned int maxdata; /* data size (MTU - hdrsize) */
182 unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */ 182 unsigned short hdrsize; /* header size (IP + UDP + RxRPC) */
183 int debug_id; /* debug ID for printks */ 183 int debug_id; /* debug ID for printks */
184 int net_error; /* network error distributed */ 184 int net_error; /* network error distributed */
@@ -187,8 +187,8 @@ struct rxrpc_peer {
187 /* calculated RTT cache */ 187 /* calculated RTT cache */
188#define RXRPC_RTT_CACHE_SIZE 32 188#define RXRPC_RTT_CACHE_SIZE 32
189 suseconds_t rtt; /* current RTT estimate (in uS) */ 189 suseconds_t rtt; /* current RTT estimate (in uS) */
190 unsigned rtt_point; /* next entry at which to insert */ 190 unsigned int rtt_point; /* next entry at which to insert */
191 unsigned rtt_usage; /* amount of cache actually used */ 191 unsigned int rtt_usage; /* amount of cache actually used */
192 suseconds_t rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */ 192 suseconds_t rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */
193}; 193};
194 194
@@ -271,7 +271,7 @@ struct rxrpc_connection {
271 } state; 271 } state;
272 int error; /* error code for local abort */ 272 int error; /* error code for local abort */
273 int debug_id; /* debug ID for printks */ 273 int debug_id; /* debug ID for printks */
274 unsigned call_counter; /* call ID counter */ 274 unsigned int call_counter; /* call ID counter */
275 atomic_t serial; /* packet serial number counter */ 275 atomic_t serial; /* packet serial number counter */
276 atomic_t hi_serial; /* highest serial number received */ 276 atomic_t hi_serial; /* highest serial number received */
277 u8 avail_calls; /* number of calls available */ 277 u8 avail_calls; /* number of calls available */
@@ -592,7 +592,7 @@ extern struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *,
592/* 592/*
593 * debug tracing 593 * debug tracing
594 */ 594 */
595extern unsigned rxrpc_debug; 595extern unsigned int rxrpc_debug;
596 596
597#define dbgprintk(FMT,...) \ 597#define dbgprintk(FMT,...) \
598 printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__) 598 printk("[%-6.6s] "FMT"\n", current->comm ,##__VA_ARGS__)
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index ae3a035f5390..8b1f9f49960f 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -82,7 +82,7 @@ static int rxrpc_vet_description_s(const char *desc)
82 * - the caller guarantees we have at least 4 words 82 * - the caller guarantees we have at least 4 words
83 */ 83 */
84static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr, 84static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
85 unsigned toklen) 85 unsigned int toklen)
86{ 86{
87 struct rxrpc_key_token *token, **pptoken; 87 struct rxrpc_key_token *token, **pptoken;
88 size_t plen; 88 size_t plen;
@@ -210,10 +210,10 @@ static void rxrpc_rxk5_free(struct rxk5_key *rxk5)
210 */ 210 */
211static int rxrpc_krb5_decode_principal(struct krb5_principal *princ, 211static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
212 const __be32 **_xdr, 212 const __be32 **_xdr,
213 unsigned *_toklen) 213 unsigned int *_toklen)
214{ 214{
215 const __be32 *xdr = *_xdr; 215 const __be32 *xdr = *_xdr;
216 unsigned toklen = *_toklen, n_parts, loop, tmp; 216 unsigned int toklen = *_toklen, n_parts, loop, tmp;
217 217
218 /* there must be at least one name, and at least #names+1 length 218 /* there must be at least one name, and at least #names+1 length
219 * words */ 219 * words */
@@ -286,10 +286,10 @@ static int rxrpc_krb5_decode_principal(struct krb5_principal *princ,
286static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td, 286static int rxrpc_krb5_decode_tagged_data(struct krb5_tagged_data *td,
287 size_t max_data_size, 287 size_t max_data_size,
288 const __be32 **_xdr, 288 const __be32 **_xdr,
289 unsigned *_toklen) 289 unsigned int *_toklen)
290{ 290{
291 const __be32 *xdr = *_xdr; 291 const __be32 *xdr = *_xdr;
292 unsigned toklen = *_toklen, len; 292 unsigned int toklen = *_toklen, len;
293 293
294 /* there must be at least one tag and one length word */ 294 /* there must be at least one tag and one length word */
295 if (toklen <= 8) 295 if (toklen <= 8)
@@ -330,11 +330,11 @@ static int rxrpc_krb5_decode_tagged_array(struct krb5_tagged_data **_td,
330 u8 max_n_elem, 330 u8 max_n_elem,
331 size_t max_elem_size, 331 size_t max_elem_size,
332 const __be32 **_xdr, 332 const __be32 **_xdr,
333 unsigned *_toklen) 333 unsigned int *_toklen)
334{ 334{
335 struct krb5_tagged_data *td; 335 struct krb5_tagged_data *td;
336 const __be32 *xdr = *_xdr; 336 const __be32 *xdr = *_xdr;
337 unsigned toklen = *_toklen, n_elem, loop; 337 unsigned int toklen = *_toklen, n_elem, loop;
338 int ret; 338 int ret;
339 339
340 /* there must be at least one count */ 340 /* there must be at least one count */
@@ -380,10 +380,10 @@ static int rxrpc_krb5_decode_tagged_array(struct krb5_tagged_data **_td,
380 * extract a krb5 ticket 380 * extract a krb5 ticket
381 */ 381 */
382static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen, 382static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
383 const __be32 **_xdr, unsigned *_toklen) 383 const __be32 **_xdr, unsigned int *_toklen)
384{ 384{
385 const __be32 *xdr = *_xdr; 385 const __be32 *xdr = *_xdr;
386 unsigned toklen = *_toklen, len; 386 unsigned int toklen = *_toklen, len;
387 387
388 /* there must be at least one length word */ 388 /* there must be at least one length word */
389 if (toklen <= 4) 389 if (toklen <= 4)
@@ -419,7 +419,7 @@ static int rxrpc_krb5_decode_ticket(u8 **_ticket, u16 *_tktlen,
419 * - the caller guarantees we have at least 4 words 419 * - the caller guarantees we have at least 4 words
420 */ 420 */
421static int rxrpc_instantiate_xdr_rxk5(struct key *key, const __be32 *xdr, 421static int rxrpc_instantiate_xdr_rxk5(struct key *key, const __be32 *xdr,
422 unsigned toklen) 422 unsigned int toklen)
423{ 423{
424 struct rxrpc_key_token *token, **pptoken; 424 struct rxrpc_key_token *token, **pptoken;
425 struct rxk5_key *rxk5; 425 struct rxk5_key *rxk5;
@@ -549,7 +549,7 @@ static int rxrpc_instantiate_xdr(struct key *key, const void *data, size_t datal
549{ 549{
550 const __be32 *xdr = data, *token; 550 const __be32 *xdr = data, *token;
551 const char *cp; 551 const char *cp;
552 unsigned len, tmp, loop, ntoken, toklen, sec_ix; 552 unsigned int len, tmp, loop, ntoken, toklen, sec_ix;
553 int ret; 553 int ret;
554 554
555 _enter(",{%x,%x,%x,%x},%zu", 555 _enter(",{%x,%x,%x,%x},%zu",
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 7635107726ce..f226709ebd8f 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -31,7 +31,7 @@
31#define REALM_SZ 40 /* size of principal's auth domain */ 31#define REALM_SZ 40 /* size of principal's auth domain */
32#define SNAME_SZ 40 /* size of service name */ 32#define SNAME_SZ 40 /* size of service name */
33 33
34unsigned rxrpc_debug; 34unsigned int rxrpc_debug;
35module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO); 35module_param_named(debug, rxrpc_debug, uint, S_IWUSR | S_IRUGO);
36MODULE_PARM_DESC(debug, "rxkad debugging mask"); 36MODULE_PARM_DESC(debug, "rxkad debugging mask");
37 37
@@ -207,7 +207,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
207 struct rxrpc_crypt iv; 207 struct rxrpc_crypt iv;
208 struct scatterlist sg[16]; 208 struct scatterlist sg[16];
209 struct sk_buff *trailer; 209 struct sk_buff *trailer;
210 unsigned len; 210 unsigned int len;
211 u16 check; 211 u16 check;
212 int nsg; 212 int nsg;
213 213
@@ -826,7 +826,7 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
826 struct rxrpc_crypt iv, key; 826 struct rxrpc_crypt iv, key;
827 struct scatterlist sg[1]; 827 struct scatterlist sg[1];
828 struct in_addr addr; 828 struct in_addr addr;
829 unsigned life; 829 unsigned int life;
830 time_t issue, now; 830 time_t issue, now;
831 bool little_endian; 831 bool little_endian;
832 int ret; 832 int ret;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 93fdf131bd75..5cfb160df063 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -127,7 +127,8 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
127 nest = nla_nest_start(skb, a->order); 127 nest = nla_nest_start(skb, a->order);
128 if (nest == NULL) 128 if (nest == NULL)
129 goto nla_put_failure; 129 goto nla_put_failure;
130 NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind); 130 if (nla_put_string(skb, TCA_KIND, a->ops->kind))
131 goto nla_put_failure;
131 for (i = 0; i < (hinfo->hmask + 1); i++) { 132 for (i = 0; i < (hinfo->hmask + 1); i++) {
132 p = hinfo->htab[tcf_hash(i, hinfo->hmask)]; 133 p = hinfo->htab[tcf_hash(i, hinfo->hmask)];
133 134
@@ -139,7 +140,8 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
139 p = s_p; 140 p = s_p;
140 } 141 }
141 } 142 }
142 NLA_PUT_U32(skb, TCA_FCNT, n_i); 143 if (nla_put_u32(skb, TCA_FCNT, n_i))
144 goto nla_put_failure;
143 nla_nest_end(skb, nest); 145 nla_nest_end(skb, nest);
144 146
145 return n_i; 147 return n_i;
@@ -437,7 +439,8 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
437 if (a->ops == NULL || a->ops->dump == NULL) 439 if (a->ops == NULL || a->ops->dump == NULL)
438 return err; 440 return err;
439 441
440 NLA_PUT_STRING(skb, TCA_KIND, a->ops->kind); 442 if (nla_put_string(skb, TCA_KIND, a->ops->kind))
443 goto nla_put_failure;
441 if (tcf_action_copy_stats(skb, a, 0)) 444 if (tcf_action_copy_stats(skb, a, 0))
442 goto nla_put_failure; 445 goto nla_put_failure;
443 nest = nla_nest_start(skb, TCA_OPTIONS); 446 nest = nla_nest_start(skb, TCA_OPTIONS);
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 453a73431ac4..882124ceb70c 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -550,11 +550,13 @@ static int tcf_csum_dump(struct sk_buff *skb,
550 }; 550 };
551 struct tcf_t t; 551 struct tcf_t t;
552 552
553 NLA_PUT(skb, TCA_CSUM_PARMS, sizeof(opt), &opt); 553 if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
554 goto nla_put_failure;
554 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); 555 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
555 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); 556 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
556 t.expires = jiffies_to_clock_t(p->tcf_tm.expires); 557 t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
557 NLA_PUT(skb, TCA_CSUM_TM, sizeof(t), &t); 558 if (nla_put(skb, TCA_CSUM_TM, sizeof(t), &t))
559 goto nla_put_failure;
558 560
559 return skb->len; 561 return skb->len;
560 562
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index b77f5a06a658..f10fb8256442 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -162,7 +162,8 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
162 }; 162 };
163 struct tcf_t t; 163 struct tcf_t t;
164 164
165 NLA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt); 165 if (nla_put(skb, TCA_GACT_PARMS, sizeof(opt), &opt))
166 goto nla_put_failure;
166#ifdef CONFIG_GACT_PROB 167#ifdef CONFIG_GACT_PROB
167 if (gact->tcfg_ptype) { 168 if (gact->tcfg_ptype) {
168 struct tc_gact_p p_opt = { 169 struct tc_gact_p p_opt = {
@@ -171,13 +172,15 @@ static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
171 .ptype = gact->tcfg_ptype, 172 .ptype = gact->tcfg_ptype,
172 }; 173 };
173 174
174 NLA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt); 175 if (nla_put(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt))
176 goto nla_put_failure;
175 } 177 }
176#endif 178#endif
177 t.install = jiffies_to_clock_t(jiffies - gact->tcf_tm.install); 179 t.install = jiffies_to_clock_t(jiffies - gact->tcf_tm.install);
178 t.lastuse = jiffies_to_clock_t(jiffies - gact->tcf_tm.lastuse); 180 t.lastuse = jiffies_to_clock_t(jiffies - gact->tcf_tm.lastuse);
179 t.expires = jiffies_to_clock_t(gact->tcf_tm.expires); 181 t.expires = jiffies_to_clock_t(gact->tcf_tm.expires);
180 NLA_PUT(skb, TCA_GACT_TM, sizeof(t), &t); 182 if (nla_put(skb, TCA_GACT_TM, sizeof(t), &t))
183 goto nla_put_failure;
181 return skb->len; 184 return skb->len;
182 185
183nla_put_failure: 186nla_put_failure:
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 60f8f616e8fa..0beba0e5312e 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -267,15 +267,17 @@ static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int
267 c.refcnt = ipt->tcf_refcnt - ref; 267 c.refcnt = ipt->tcf_refcnt - ref;
268 strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name); 268 strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name);
269 269
270 NLA_PUT(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t); 270 if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) ||
271 NLA_PUT_U32(skb, TCA_IPT_INDEX, ipt->tcf_index); 271 nla_put_u32(skb, TCA_IPT_INDEX, ipt->tcf_index) ||
272 NLA_PUT_U32(skb, TCA_IPT_HOOK, ipt->tcfi_hook); 272 nla_put_u32(skb, TCA_IPT_HOOK, ipt->tcfi_hook) ||
273 NLA_PUT(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c); 273 nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) ||
274 NLA_PUT_STRING(skb, TCA_IPT_TABLE, ipt->tcfi_tname); 274 nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname))
275 goto nla_put_failure;
275 tm.install = jiffies_to_clock_t(jiffies - ipt->tcf_tm.install); 276 tm.install = jiffies_to_clock_t(jiffies - ipt->tcf_tm.install);
276 tm.lastuse = jiffies_to_clock_t(jiffies - ipt->tcf_tm.lastuse); 277 tm.lastuse = jiffies_to_clock_t(jiffies - ipt->tcf_tm.lastuse);
277 tm.expires = jiffies_to_clock_t(ipt->tcf_tm.expires); 278 tm.expires = jiffies_to_clock_t(ipt->tcf_tm.expires);
278 NLA_PUT(skb, TCA_IPT_TM, sizeof (tm), &tm); 279 if (nla_put(skb, TCA_IPT_TM, sizeof (tm), &tm))
280 goto nla_put_failure;
279 kfree(t); 281 kfree(t);
280 return skb->len; 282 return skb->len;
281 283
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index e051398fdf6b..d583aea3b3df 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -227,11 +227,13 @@ static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, i
227 }; 227 };
228 struct tcf_t t; 228 struct tcf_t t;
229 229
230 NLA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt); 230 if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
231 goto nla_put_failure;
231 t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install); 232 t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install);
232 t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse); 233 t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse);
233 t.expires = jiffies_to_clock_t(m->tcf_tm.expires); 234 t.expires = jiffies_to_clock_t(m->tcf_tm.expires);
234 NLA_PUT(skb, TCA_MIRRED_TM, sizeof(t), &t); 235 if (nla_put(skb, TCA_MIRRED_TM, sizeof(t), &t))
236 goto nla_put_failure;
235 return skb->len; 237 return skb->len;
236 238
237nla_put_failure: 239nla_put_failure:
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 001d1b354869..b5d029eb44f2 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -284,11 +284,13 @@ static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
284 }; 284 };
285 struct tcf_t t; 285 struct tcf_t t;
286 286
287 NLA_PUT(skb, TCA_NAT_PARMS, sizeof(opt), &opt); 287 if (nla_put(skb, TCA_NAT_PARMS, sizeof(opt), &opt))
288 goto nla_put_failure;
288 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); 289 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
289 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); 290 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
290 t.expires = jiffies_to_clock_t(p->tcf_tm.expires); 291 t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
291 NLA_PUT(skb, TCA_NAT_TM, sizeof(t), &t); 292 if (nla_put(skb, TCA_NAT_TM, sizeof(t), &t))
293 goto nla_put_failure;
292 294
293 return skb->len; 295 return skb->len;
294 296
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 10d3aed86560..26aa2f6ce257 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -215,11 +215,13 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
215 opt->refcnt = p->tcf_refcnt - ref; 215 opt->refcnt = p->tcf_refcnt - ref;
216 opt->bindcnt = p->tcf_bindcnt - bind; 216 opt->bindcnt = p->tcf_bindcnt - bind;
217 217
218 NLA_PUT(skb, TCA_PEDIT_PARMS, s, opt); 218 if (nla_put(skb, TCA_PEDIT_PARMS, s, opt))
219 goto nla_put_failure;
219 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); 220 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
220 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); 221 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
221 t.expires = jiffies_to_clock_t(p->tcf_tm.expires); 222 t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
222 NLA_PUT(skb, TCA_PEDIT_TM, sizeof(t), &t); 223 if (nla_put(skb, TCA_PEDIT_TM, sizeof(t), &t))
224 goto nla_put_failure;
223 kfree(opt); 225 kfree(opt);
224 return skb->len; 226 return skb->len;
225 227
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 6fb3f5af0f85..a9de23297d47 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -356,11 +356,14 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
356 opt.rate = police->tcfp_R_tab->rate; 356 opt.rate = police->tcfp_R_tab->rate;
357 if (police->tcfp_P_tab) 357 if (police->tcfp_P_tab)
358 opt.peakrate = police->tcfp_P_tab->rate; 358 opt.peakrate = police->tcfp_P_tab->rate;
359 NLA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt); 359 if (nla_put(skb, TCA_POLICE_TBF, sizeof(opt), &opt))
360 if (police->tcfp_result) 360 goto nla_put_failure;
361 NLA_PUT_U32(skb, TCA_POLICE_RESULT, police->tcfp_result); 361 if (police->tcfp_result &&
362 if (police->tcfp_ewma_rate) 362 nla_put_u32(skb, TCA_POLICE_RESULT, police->tcfp_result))
363 NLA_PUT_U32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate); 363 goto nla_put_failure;
364 if (police->tcfp_ewma_rate &&
365 nla_put_u32(skb, TCA_POLICE_AVRATE, police->tcfp_ewma_rate))
366 goto nla_put_failure;
364 return skb->len; 367 return skb->len;
365 368
366nla_put_failure: 369nla_put_failure:
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 73e0a3ab4d55..3922f2a2821b 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -172,12 +172,14 @@ static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
172 }; 172 };
173 struct tcf_t t; 173 struct tcf_t t;
174 174
175 NLA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt); 175 if (nla_put(skb, TCA_DEF_PARMS, sizeof(opt), &opt) ||
176 NLA_PUT_STRING(skb, TCA_DEF_DATA, d->tcfd_defdata); 176 nla_put_string(skb, TCA_DEF_DATA, d->tcfd_defdata))
177 goto nla_put_failure;
177 t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); 178 t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
178 t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); 179 t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
179 t.expires = jiffies_to_clock_t(d->tcf_tm.expires); 180 t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
180 NLA_PUT(skb, TCA_DEF_TM, sizeof(t), &t); 181 if (nla_put(skb, TCA_DEF_TM, sizeof(t), &t))
182 goto nla_put_failure;
181 return skb->len; 183 return skb->len;
182 184
183nla_put_failure: 185nla_put_failure:
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 35dbbe91027e..476e0fac6712 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -166,20 +166,25 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
166 }; 166 };
167 struct tcf_t t; 167 struct tcf_t t;
168 168
169 NLA_PUT(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt); 169 if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
170 if (d->flags & SKBEDIT_F_PRIORITY) 170 goto nla_put_failure;
171 NLA_PUT(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority), 171 if ((d->flags & SKBEDIT_F_PRIORITY) &&
172 &d->priority); 172 nla_put(skb, TCA_SKBEDIT_PRIORITY, sizeof(d->priority),
173 if (d->flags & SKBEDIT_F_QUEUE_MAPPING) 173 &d->priority))
174 NLA_PUT(skb, TCA_SKBEDIT_QUEUE_MAPPING, 174 goto nla_put_failure;
175 sizeof(d->queue_mapping), &d->queue_mapping); 175 if ((d->flags & SKBEDIT_F_QUEUE_MAPPING) &&
176 if (d->flags & SKBEDIT_F_MARK) 176 nla_put(skb, TCA_SKBEDIT_QUEUE_MAPPING,
177 NLA_PUT(skb, TCA_SKBEDIT_MARK, sizeof(d->mark), 177 sizeof(d->queue_mapping), &d->queue_mapping))
178 &d->mark); 178 goto nla_put_failure;
179 if ((d->flags & SKBEDIT_F_MARK) &&
180 nla_put(skb, TCA_SKBEDIT_MARK, sizeof(d->mark),
181 &d->mark))
182 goto nla_put_failure;
179 t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install); 183 t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
180 t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse); 184 t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
181 t.expires = jiffies_to_clock_t(d->tcf_tm.expires); 185 t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
182 NLA_PUT(skb, TCA_SKBEDIT_TM, sizeof(t), &t); 186 if (nla_put(skb, TCA_SKBEDIT_TM, sizeof(t), &t))
187 goto nla_put_failure;
183 return skb->len; 188 return skb->len;
184 189
185nla_put_failure: 190nla_put_failure:
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index a69d44f1dac5..f452f696b4b3 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -357,7 +357,8 @@ static int tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp,
357 tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex; 357 tcm->tcm_ifindex = qdisc_dev(tp->q)->ifindex;
358 tcm->tcm_parent = tp->classid; 358 tcm->tcm_parent = tp->classid;
359 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol); 359 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
360 NLA_PUT_STRING(skb, TCA_KIND, tp->ops->kind); 360 if (nla_put_string(skb, TCA_KIND, tp->ops->kind))
361 goto nla_put_failure;
361 tcm->tcm_handle = fh; 362 tcm->tcm_handle = fh;
362 if (RTM_DELTFILTER != event) { 363 if (RTM_DELTFILTER != event) {
363 tcm->tcm_handle = 0; 364 tcm->tcm_handle = 0;
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index ea1f70b5a5f4..590960a22a77 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -257,8 +257,9 @@ static int basic_dump(struct tcf_proto *tp, unsigned long fh,
257 if (nest == NULL) 257 if (nest == NULL)
258 goto nla_put_failure; 258 goto nla_put_failure;
259 259
260 if (f->res.classid) 260 if (f->res.classid &&
261 NLA_PUT_U32(skb, TCA_BASIC_CLASSID, f->res.classid); 261 nla_put_u32(skb, TCA_BASIC_CLASSID, f->res.classid))
262 goto nla_put_failure;
262 263
263 if (tcf_exts_dump(skb, &f->exts, &basic_ext_map) < 0 || 264 if (tcf_exts_dump(skb, &f->exts, &basic_ext_map) < 0 ||
264 tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0) 265 tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0)
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 1d8bd0dbcd1f..ccd08c8dc6a7 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -572,25 +572,32 @@ static int flow_dump(struct tcf_proto *tp, unsigned long fh,
572 if (nest == NULL) 572 if (nest == NULL)
573 goto nla_put_failure; 573 goto nla_put_failure;
574 574
575 NLA_PUT_U32(skb, TCA_FLOW_KEYS, f->keymask); 575 if (nla_put_u32(skb, TCA_FLOW_KEYS, f->keymask) ||
576 NLA_PUT_U32(skb, TCA_FLOW_MODE, f->mode); 576 nla_put_u32(skb, TCA_FLOW_MODE, f->mode))
577 goto nla_put_failure;
577 578
578 if (f->mask != ~0 || f->xor != 0) { 579 if (f->mask != ~0 || f->xor != 0) {
579 NLA_PUT_U32(skb, TCA_FLOW_MASK, f->mask); 580 if (nla_put_u32(skb, TCA_FLOW_MASK, f->mask) ||
580 NLA_PUT_U32(skb, TCA_FLOW_XOR, f->xor); 581 nla_put_u32(skb, TCA_FLOW_XOR, f->xor))
582 goto nla_put_failure;
581 } 583 }
582 if (f->rshift) 584 if (f->rshift &&
583 NLA_PUT_U32(skb, TCA_FLOW_RSHIFT, f->rshift); 585 nla_put_u32(skb, TCA_FLOW_RSHIFT, f->rshift))
584 if (f->addend) 586 goto nla_put_failure;
585 NLA_PUT_U32(skb, TCA_FLOW_ADDEND, f->addend); 587 if (f->addend &&
588 nla_put_u32(skb, TCA_FLOW_ADDEND, f->addend))
589 goto nla_put_failure;
586 590
587 if (f->divisor) 591 if (f->divisor &&
588 NLA_PUT_U32(skb, TCA_FLOW_DIVISOR, f->divisor); 592 nla_put_u32(skb, TCA_FLOW_DIVISOR, f->divisor))
589 if (f->baseclass) 593 goto nla_put_failure;
590 NLA_PUT_U32(skb, TCA_FLOW_BASECLASS, f->baseclass); 594 if (f->baseclass &&
595 nla_put_u32(skb, TCA_FLOW_BASECLASS, f->baseclass))
596 goto nla_put_failure;
591 597
592 if (f->perturb_period) 598 if (f->perturb_period &&
593 NLA_PUT_U32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ); 599 nla_put_u32(skb, TCA_FLOW_PERTURB, f->perturb_period / HZ))
600 goto nla_put_failure;
594 601
595 if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0) 602 if (tcf_exts_dump(skb, &f->exts, &flow_ext_map) < 0)
596 goto nla_put_failure; 603 goto nla_put_failure;
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index 389af152ec45..8384a4797240 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -346,14 +346,17 @@ static int fw_dump(struct tcf_proto *tp, unsigned long fh,
346 if (nest == NULL) 346 if (nest == NULL)
347 goto nla_put_failure; 347 goto nla_put_failure;
348 348
349 if (f->res.classid) 349 if (f->res.classid &&
350 NLA_PUT_U32(skb, TCA_FW_CLASSID, f->res.classid); 350 nla_put_u32(skb, TCA_FW_CLASSID, f->res.classid))
351 goto nla_put_failure;
351#ifdef CONFIG_NET_CLS_IND 352#ifdef CONFIG_NET_CLS_IND
352 if (strlen(f->indev)) 353 if (strlen(f->indev) &&
353 NLA_PUT_STRING(skb, TCA_FW_INDEV, f->indev); 354 nla_put_string(skb, TCA_FW_INDEV, f->indev))
355 goto nla_put_failure;
354#endif /* CONFIG_NET_CLS_IND */ 356#endif /* CONFIG_NET_CLS_IND */
355 if (head->mask != 0xFFFFFFFF) 357 if (head->mask != 0xFFFFFFFF &&
356 NLA_PUT_U32(skb, TCA_FW_MASK, head->mask); 358 nla_put_u32(skb, TCA_FW_MASK, head->mask))
359 goto nla_put_failure;
357 360
358 if (tcf_exts_dump(skb, &f->exts, &fw_ext_map) < 0) 361 if (tcf_exts_dump(skb, &f->exts, &fw_ext_map) < 0)
359 goto nla_put_failure; 362 goto nla_put_failure;
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index 13ab66e9df58..36fec4227401 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -571,17 +571,21 @@ static int route4_dump(struct tcf_proto *tp, unsigned long fh,
571 571
572 if (!(f->handle & 0x8000)) { 572 if (!(f->handle & 0x8000)) {
573 id = f->id & 0xFF; 573 id = f->id & 0xFF;
574 NLA_PUT_U32(skb, TCA_ROUTE4_TO, id); 574 if (nla_put_u32(skb, TCA_ROUTE4_TO, id))
575 goto nla_put_failure;
575 } 576 }
576 if (f->handle & 0x80000000) { 577 if (f->handle & 0x80000000) {
577 if ((f->handle >> 16) != 0xFFFF) 578 if ((f->handle >> 16) != 0xFFFF &&
578 NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif); 579 nla_put_u32(skb, TCA_ROUTE4_IIF, f->iif))
580 goto nla_put_failure;
579 } else { 581 } else {
580 id = f->id >> 16; 582 id = f->id >> 16;
581 NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id); 583 if (nla_put_u32(skb, TCA_ROUTE4_FROM, id))
584 goto nla_put_failure;
582 } 585 }
583 if (f->res.classid) 586 if (f->res.classid &&
584 NLA_PUT_U32(skb, TCA_ROUTE4_CLASSID, f->res.classid); 587 nla_put_u32(skb, TCA_ROUTE4_CLASSID, f->res.classid))
588 goto nla_put_failure;
585 589
586 if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0) 590 if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
587 goto nla_put_failure; 591 goto nla_put_failure;
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h
index b01427924f81..18ab93ec8d7e 100644
--- a/net/sched/cls_rsvp.h
+++ b/net/sched/cls_rsvp.h
@@ -615,18 +615,22 @@ static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
615 if (nest == NULL) 615 if (nest == NULL)
616 goto nla_put_failure; 616 goto nla_put_failure;
617 617
618 NLA_PUT(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst); 618 if (nla_put(skb, TCA_RSVP_DST, sizeof(s->dst), &s->dst))
619 goto nla_put_failure;
619 pinfo.dpi = s->dpi; 620 pinfo.dpi = s->dpi;
620 pinfo.spi = f->spi; 621 pinfo.spi = f->spi;
621 pinfo.protocol = s->protocol; 622 pinfo.protocol = s->protocol;
622 pinfo.tunnelid = s->tunnelid; 623 pinfo.tunnelid = s->tunnelid;
623 pinfo.tunnelhdr = f->tunnelhdr; 624 pinfo.tunnelhdr = f->tunnelhdr;
624 pinfo.pad = 0; 625 pinfo.pad = 0;
625 NLA_PUT(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo); 626 if (nla_put(skb, TCA_RSVP_PINFO, sizeof(pinfo), &pinfo))
626 if (f->res.classid) 627 goto nla_put_failure;
627 NLA_PUT_U32(skb, TCA_RSVP_CLASSID, f->res.classid); 628 if (f->res.classid &&
628 if (((f->handle >> 8) & 0xFF) != 16) 629 nla_put_u32(skb, TCA_RSVP_CLASSID, f->res.classid))
629 NLA_PUT(skb, TCA_RSVP_SRC, sizeof(f->src), f->src); 630 goto nla_put_failure;
631 if (((f->handle >> 8) & 0xFF) != 16 &&
632 nla_put(skb, TCA_RSVP_SRC, sizeof(f->src), f->src))
633 goto nla_put_failure;
630 634
631 if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0) 635 if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0)
632 goto nla_put_failure; 636 goto nla_put_failure;
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index dbe199234c63..fe29420d0b0e 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -438,10 +438,11 @@ static int tcindex_dump(struct tcf_proto *tp, unsigned long fh,
438 438
439 if (!fh) { 439 if (!fh) {
440 t->tcm_handle = ~0; /* whatever ... */ 440 t->tcm_handle = ~0; /* whatever ... */
441 NLA_PUT_U32(skb, TCA_TCINDEX_HASH, p->hash); 441 if (nla_put_u32(skb, TCA_TCINDEX_HASH, p->hash) ||
442 NLA_PUT_U16(skb, TCA_TCINDEX_MASK, p->mask); 442 nla_put_u16(skb, TCA_TCINDEX_MASK, p->mask) ||
443 NLA_PUT_U32(skb, TCA_TCINDEX_SHIFT, p->shift); 443 nla_put_u32(skb, TCA_TCINDEX_SHIFT, p->shift) ||
444 NLA_PUT_U32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through); 444 nla_put_u32(skb, TCA_TCINDEX_FALL_THROUGH, p->fall_through))
445 goto nla_put_failure;
445 nla_nest_end(skb, nest); 446 nla_nest_end(skb, nest);
446 } else { 447 } else {
447 if (p->perfect) { 448 if (p->perfect) {
@@ -460,8 +461,9 @@ static int tcindex_dump(struct tcf_proto *tp, unsigned long fh,
460 } 461 }
461 } 462 }
462 pr_debug("handle = %d\n", t->tcm_handle); 463 pr_debug("handle = %d\n", t->tcm_handle);
463 if (r->res.class) 464 if (r->res.class &&
464 NLA_PUT_U32(skb, TCA_TCINDEX_CLASSID, r->res.classid); 465 nla_put_u32(skb, TCA_TCINDEX_CLASSID, r->res.classid))
466 goto nla_put_failure;
465 467
466 if (tcf_exts_dump(skb, &r->exts, &tcindex_ext_map) < 0) 468 if (tcf_exts_dump(skb, &r->exts, &tcindex_ext_map) < 0)
467 goto nla_put_failure; 469 goto nla_put_failure;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 939b627b4795..591b006a8c5a 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -733,36 +733,44 @@ static int u32_dump(struct tcf_proto *tp, unsigned long fh,
733 struct tc_u_hnode *ht = (struct tc_u_hnode *)fh; 733 struct tc_u_hnode *ht = (struct tc_u_hnode *)fh;
734 u32 divisor = ht->divisor + 1; 734 u32 divisor = ht->divisor + 1;
735 735
736 NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor); 736 if (nla_put_u32(skb, TCA_U32_DIVISOR, divisor))
737 goto nla_put_failure;
737 } else { 738 } else {
738 NLA_PUT(skb, TCA_U32_SEL, 739 if (nla_put(skb, TCA_U32_SEL,
739 sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key), 740 sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
740 &n->sel); 741 &n->sel))
742 goto nla_put_failure;
741 if (n->ht_up) { 743 if (n->ht_up) {
742 u32 htid = n->handle & 0xFFFFF000; 744 u32 htid = n->handle & 0xFFFFF000;
743 NLA_PUT_U32(skb, TCA_U32_HASH, htid); 745 if (nla_put_u32(skb, TCA_U32_HASH, htid))
746 goto nla_put_failure;
744 } 747 }
745 if (n->res.classid) 748 if (n->res.classid &&
746 NLA_PUT_U32(skb, TCA_U32_CLASSID, n->res.classid); 749 nla_put_u32(skb, TCA_U32_CLASSID, n->res.classid))
747 if (n->ht_down) 750 goto nla_put_failure;
748 NLA_PUT_U32(skb, TCA_U32_LINK, n->ht_down->handle); 751 if (n->ht_down &&
752 nla_put_u32(skb, TCA_U32_LINK, n->ht_down->handle))
753 goto nla_put_failure;
749 754
750#ifdef CONFIG_CLS_U32_MARK 755#ifdef CONFIG_CLS_U32_MARK
751 if (n->mark.val || n->mark.mask) 756 if ((n->mark.val || n->mark.mask) &&
752 NLA_PUT(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark); 757 nla_put(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark))
758 goto nla_put_failure;
753#endif 759#endif
754 760
755 if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0) 761 if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0)
756 goto nla_put_failure; 762 goto nla_put_failure;
757 763
758#ifdef CONFIG_NET_CLS_IND 764#ifdef CONFIG_NET_CLS_IND
759 if (strlen(n->indev)) 765 if (strlen(n->indev) &&
760 NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev); 766 nla_put_string(skb, TCA_U32_INDEV, n->indev))
767 goto nla_put_failure;
761#endif 768#endif
762#ifdef CONFIG_CLS_U32_PERF 769#ifdef CONFIG_CLS_U32_PERF
763 NLA_PUT(skb, TCA_U32_PCNT, 770 if (nla_put(skb, TCA_U32_PCNT,
764 sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64), 771 sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
765 n->pf); 772 n->pf))
773 goto nla_put_failure;
766#endif 774#endif
767 } 775 }
768 776
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 1363bf14e61b..4790c696cbce 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -585,8 +585,9 @@ static void meta_var_apply_extras(struct meta_value *v,
585 585
586static int meta_var_dump(struct sk_buff *skb, struct meta_value *v, int tlv) 586static int meta_var_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
587{ 587{
588 if (v->val && v->len) 588 if (v->val && v->len &&
589 NLA_PUT(skb, tlv, v->len, (void *) v->val); 589 nla_put(skb, tlv, v->len, (void *) v->val))
590 goto nla_put_failure;
590 return 0; 591 return 0;
591 592
592nla_put_failure: 593nla_put_failure:
@@ -636,10 +637,13 @@ static void meta_int_apply_extras(struct meta_value *v,
636 637
637static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv) 638static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
638{ 639{
639 if (v->len == sizeof(unsigned long)) 640 if (v->len == sizeof(unsigned long)) {
640 NLA_PUT(skb, tlv, sizeof(unsigned long), &v->val); 641 if (nla_put(skb, tlv, sizeof(unsigned long), &v->val))
641 else if (v->len == sizeof(u32)) 642 goto nla_put_failure;
642 NLA_PUT_U32(skb, tlv, v->val); 643 } else if (v->len == sizeof(u32)) {
644 if (nla_put_u32(skb, tlv, v->val))
645 goto nla_put_failure;
646 }
643 647
644 return 0; 648 return 0;
645 649
@@ -831,7 +835,8 @@ static int em_meta_dump(struct sk_buff *skb, struct tcf_ematch *em)
831 memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left)); 835 memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left));
832 memcpy(&hdr.right, &meta->rvalue.hdr, sizeof(hdr.right)); 836 memcpy(&hdr.right, &meta->rvalue.hdr, sizeof(hdr.right));
833 837
834 NLA_PUT(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr); 838 if (nla_put(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr))
839 goto nla_put_failure;
835 840
836 ops = meta_type_ops(&meta->lvalue); 841 ops = meta_type_ops(&meta->lvalue);
837 if (ops->dump(skb, &meta->lvalue, TCA_EM_META_LVALUE) < 0 || 842 if (ops->dump(skb, &meta->lvalue, TCA_EM_META_LVALUE) < 0 ||
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 88d93eb92507..aca233c2b848 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -441,7 +441,8 @@ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv)
441 if (top_start == NULL) 441 if (top_start == NULL)
442 goto nla_put_failure; 442 goto nla_put_failure;
443 443
444 NLA_PUT(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr); 444 if (nla_put(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr))
445 goto nla_put_failure;
445 446
446 list_start = nla_nest_start(skb, TCA_EMATCH_TREE_LIST); 447 list_start = nla_nest_start(skb, TCA_EMATCH_TREE_LIST);
447 if (list_start == NULL) 448 if (list_start == NULL)
@@ -457,7 +458,8 @@ int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv)
457 .flags = em->flags 458 .flags = em->flags
458 }; 459 };
459 460
460 NLA_PUT(skb, i + 1, sizeof(em_hdr), &em_hdr); 461 if (nla_put(skb, i + 1, sizeof(em_hdr), &em_hdr))
462 goto nla_put_failure;
461 463
462 if (em->ops && em->ops->dump) { 464 if (em->ops && em->ops->dump) {
463 if (em->ops->dump(skb, em) < 0) 465 if (em->ops->dump(skb, em) < 0)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 3d8981fde301..d2daefcc205f 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -426,7 +426,8 @@ static int qdisc_dump_stab(struct sk_buff *skb, struct qdisc_size_table *stab)
426 nest = nla_nest_start(skb, TCA_STAB); 426 nest = nla_nest_start(skb, TCA_STAB);
427 if (nest == NULL) 427 if (nest == NULL)
428 goto nla_put_failure; 428 goto nla_put_failure;
429 NLA_PUT(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts); 429 if (nla_put(skb, TCA_STAB_BASE, sizeof(stab->szopts), &stab->szopts))
430 goto nla_put_failure;
430 nla_nest_end(skb, nest); 431 nla_nest_end(skb, nest);
431 432
432 return skb->len; 433 return skb->len;
@@ -1201,7 +1202,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
1201 tcm->tcm_parent = clid; 1202 tcm->tcm_parent = clid;
1202 tcm->tcm_handle = q->handle; 1203 tcm->tcm_handle = q->handle;
1203 tcm->tcm_info = atomic_read(&q->refcnt); 1204 tcm->tcm_info = atomic_read(&q->refcnt);
1204 NLA_PUT_STRING(skb, TCA_KIND, q->ops->id); 1205 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1206 goto nla_put_failure;
1205 if (q->ops->dump && q->ops->dump(q, skb) < 0) 1207 if (q->ops->dump && q->ops->dump(q, skb) < 0)
1206 goto nla_put_failure; 1208 goto nla_put_failure;
1207 q->qstats.qlen = q->q.qlen; 1209 q->qstats.qlen = q->q.qlen;
@@ -1505,7 +1507,8 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q,
1505 tcm->tcm_parent = q->handle; 1507 tcm->tcm_parent = q->handle;
1506 tcm->tcm_handle = q->handle; 1508 tcm->tcm_handle = q->handle;
1507 tcm->tcm_info = 0; 1509 tcm->tcm_info = 0;
1508 NLA_PUT_STRING(skb, TCA_KIND, q->ops->id); 1510 if (nla_put_string(skb, TCA_KIND, q->ops->id))
1511 goto nla_put_failure;
1509 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0) 1512 if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0)
1510 goto nla_put_failure; 1513 goto nla_put_failure;
1511 1514
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index e25e49061a0d..a77a4fbc069a 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -601,7 +601,8 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
601 if (nest == NULL) 601 if (nest == NULL)
602 goto nla_put_failure; 602 goto nla_put_failure;
603 603
604 NLA_PUT(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr); 604 if (nla_put(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr))
605 goto nla_put_failure;
605 if (flow->vcc) { 606 if (flow->vcc) {
606 struct sockaddr_atmpvc pvc; 607 struct sockaddr_atmpvc pvc;
607 int state; 608 int state;
@@ -610,15 +611,19 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
610 pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1; 611 pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
611 pvc.sap_addr.vpi = flow->vcc->vpi; 612 pvc.sap_addr.vpi = flow->vcc->vpi;
612 pvc.sap_addr.vci = flow->vcc->vci; 613 pvc.sap_addr.vci = flow->vcc->vci;
613 NLA_PUT(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc); 614 if (nla_put(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc))
615 goto nla_put_failure;
614 state = ATM_VF2VS(flow->vcc->flags); 616 state = ATM_VF2VS(flow->vcc->flags);
615 NLA_PUT_U32(skb, TCA_ATM_STATE, state); 617 if (nla_put_u32(skb, TCA_ATM_STATE, state))
618 goto nla_put_failure;
619 }
620 if (flow->excess) {
621 if (nla_put_u32(skb, TCA_ATM_EXCESS, flow->classid))
622 goto nla_put_failure;
623 } else {
624 if (nla_put_u32(skb, TCA_ATM_EXCESS, 0))
625 goto nla_put_failure;
616 } 626 }
617 if (flow->excess)
618 NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid);
619 else
620 NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0);
621
622 nla_nest_end(skb, nest); 627 nla_nest_end(skb, nest);
623 return skb->len; 628 return skb->len;
624 629
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 24d94c097b35..6aabd77d1cfd 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1425,7 +1425,8 @@ static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
1425{ 1425{
1426 unsigned char *b = skb_tail_pointer(skb); 1426 unsigned char *b = skb_tail_pointer(skb);
1427 1427
1428 NLA_PUT(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate); 1428 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
1429 goto nla_put_failure;
1429 return skb->len; 1430 return skb->len;
1430 1431
1431nla_put_failure: 1432nla_put_failure:
@@ -1450,7 +1451,8 @@ static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
1450 opt.minidle = (u32)(-cl->minidle); 1451 opt.minidle = (u32)(-cl->minidle);
1451 opt.offtime = cl->offtime; 1452 opt.offtime = cl->offtime;
1452 opt.change = ~0; 1453 opt.change = ~0;
1453 NLA_PUT(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt); 1454 if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
1455 goto nla_put_failure;
1454 return skb->len; 1456 return skb->len;
1455 1457
1456nla_put_failure: 1458nla_put_failure:
@@ -1468,7 +1470,8 @@ static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
1468 opt.priority = cl->priority + 1; 1470 opt.priority = cl->priority + 1;
1469 opt.cpriority = cl->cpriority + 1; 1471 opt.cpriority = cl->cpriority + 1;
1470 opt.weight = cl->weight; 1472 opt.weight = cl->weight;
1471 NLA_PUT(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt); 1473 if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
1474 goto nla_put_failure;
1472 return skb->len; 1475 return skb->len;
1473 1476
1474nla_put_failure: 1477nla_put_failure:
@@ -1485,7 +1488,8 @@ static int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl)
1485 opt.priority2 = cl->priority2 + 1; 1488 opt.priority2 = cl->priority2 + 1;
1486 opt.pad = 0; 1489 opt.pad = 0;
1487 opt.penalty = cl->penalty; 1490 opt.penalty = cl->penalty;
1488 NLA_PUT(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt); 1491 if (nla_put(skb, TCA_CBQ_OVL_STRATEGY, sizeof(opt), &opt))
1492 goto nla_put_failure;
1489 return skb->len; 1493 return skb->len;
1490 1494
1491nla_put_failure: 1495nla_put_failure:
@@ -1502,7 +1506,8 @@ static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
1502 opt.split = cl->split ? cl->split->common.classid : 0; 1506 opt.split = cl->split ? cl->split->common.classid : 0;
1503 opt.defmap = cl->defmap; 1507 opt.defmap = cl->defmap;
1504 opt.defchange = ~0; 1508 opt.defchange = ~0;
1505 NLA_PUT(skb, TCA_CBQ_FOPT, sizeof(opt), &opt); 1509 if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
1510 goto nla_put_failure;
1506 } 1511 }
1507 return skb->len; 1512 return skb->len;
1508 1513
@@ -1521,7 +1526,8 @@ static int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
1521 opt.police = cl->police; 1526 opt.police = cl->police;
1522 opt.__res1 = 0; 1527 opt.__res1 = 0;
1523 opt.__res2 = 0; 1528 opt.__res2 = 0;
1524 NLA_PUT(skb, TCA_CBQ_POLICE, sizeof(opt), &opt); 1529 if (nla_put(skb, TCA_CBQ_POLICE, sizeof(opt), &opt))
1530 goto nla_put_failure;
1525 } 1531 }
1526 return skb->len; 1532 return skb->len;
1527 1533
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 7e267d7b9c75..cc37dd52ecf9 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -332,15 +332,13 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
332 } 332 }
333 333
334 q->stats.pdrop++; 334 q->stats.pdrop++;
335 sch->qstats.drops++; 335 return qdisc_drop(skb, sch);
336 kfree_skb(skb);
337 return NET_XMIT_DROP;
338 336
339 congestion_drop: 337congestion_drop:
340 qdisc_drop(skb, sch); 338 qdisc_drop(skb, sch);
341 return NET_XMIT_CN; 339 return NET_XMIT_CN;
342 340
343 other_drop: 341other_drop:
344 if (ret & __NET_XMIT_BYPASS) 342 if (ret & __NET_XMIT_BYPASS)
345 sch->qstats.drops++; 343 sch->qstats.drops++;
346 kfree_skb(skb); 344 kfree_skb(skb);
@@ -515,8 +513,9 @@ static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
515 if (opts == NULL) 513 if (opts == NULL)
516 goto nla_put_failure; 514 goto nla_put_failure;
517 515
518 NLA_PUT(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt); 516 if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) ||
519 NLA_PUT_U32(skb, TCA_CHOKE_MAX_P, q->parms.max_P); 517 nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P))
518 goto nla_put_failure;
520 return nla_nest_end(skb, opts); 519 return nla_nest_end(skb, opts);
521 520
522nla_put_failure: 521nla_put_failure:
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 6b7fe4a84f13..c2189879359b 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -260,7 +260,8 @@ static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
260 nest = nla_nest_start(skb, TCA_OPTIONS); 260 nest = nla_nest_start(skb, TCA_OPTIONS);
261 if (nest == NULL) 261 if (nest == NULL)
262 goto nla_put_failure; 262 goto nla_put_failure;
263 NLA_PUT_U32(skb, TCA_DRR_QUANTUM, cl->quantum); 263 if (nla_put_u32(skb, TCA_DRR_QUANTUM, cl->quantum))
264 goto nla_put_failure;
264 return nla_nest_end(skb, nest); 265 return nla_nest_end(skb, nest);
265 266
266nla_put_failure: 267nla_put_failure:
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 2c790204d042..3886365cc207 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -265,8 +265,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
265 return NET_XMIT_SUCCESS; 265 return NET_XMIT_SUCCESS;
266 266
267drop: 267drop:
268 kfree_skb(skb); 268 qdisc_drop(skb, sch);
269 sch->qstats.drops++;
270 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; 269 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
271} 270}
272 271
@@ -429,8 +428,9 @@ static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
429 opts = nla_nest_start(skb, TCA_OPTIONS); 428 opts = nla_nest_start(skb, TCA_OPTIONS);
430 if (opts == NULL) 429 if (opts == NULL)
431 goto nla_put_failure; 430 goto nla_put_failure;
432 NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]); 431 if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]) ||
433 NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]); 432 nla_put_u8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]))
433 goto nla_put_failure;
434 434
435 return nla_nest_end(skb, opts); 435 return nla_nest_end(skb, opts);
436 436
@@ -447,13 +447,16 @@ static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
447 opts = nla_nest_start(skb, TCA_OPTIONS); 447 opts = nla_nest_start(skb, TCA_OPTIONS);
448 if (opts == NULL) 448 if (opts == NULL)
449 goto nla_put_failure; 449 goto nla_put_failure;
450 NLA_PUT_U16(skb, TCA_DSMARK_INDICES, p->indices); 450 if (nla_put_u16(skb, TCA_DSMARK_INDICES, p->indices))
451 goto nla_put_failure;
451 452
452 if (p->default_index != NO_DEFAULT_INDEX) 453 if (p->default_index != NO_DEFAULT_INDEX &&
453 NLA_PUT_U16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index); 454 nla_put_u16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index))
455 goto nla_put_failure;
454 456
455 if (p->set_tc_index) 457 if (p->set_tc_index &&
456 NLA_PUT_FLAG(skb, TCA_DSMARK_SET_TC_INDEX); 458 nla_put_flag(skb, TCA_DSMARK_SET_TC_INDEX))
459 goto nla_put_failure;
457 460
458 return nla_nest_end(skb, opts); 461 return nla_nest_end(skb, opts);
459 462
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index 66effe2da8e0..e15a9eb29087 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -85,7 +85,8 @@ static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
85{ 85{
86 struct tc_fifo_qopt opt = { .limit = sch->limit }; 86 struct tc_fifo_qopt opt = { .limit = sch->limit };
87 87
88 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 88 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
89 goto nla_put_failure;
89 return skb->len; 90 return skb->len;
90 91
91nla_put_failure: 92nla_put_failure:
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 67fc573e013a..0eb1202c22a6 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -512,7 +512,8 @@ static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
512 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; 512 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
513 513
514 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1); 514 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX + 1);
515 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 515 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
516 goto nla_put_failure;
516 return skb->len; 517 return skb->len;
517 518
518nla_put_failure: 519nla_put_failure:
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 8179494c269a..ab620bf90785 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -521,14 +521,16 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
521 opts = nla_nest_start(skb, TCA_OPTIONS); 521 opts = nla_nest_start(skb, TCA_OPTIONS);
522 if (opts == NULL) 522 if (opts == NULL)
523 goto nla_put_failure; 523 goto nla_put_failure;
524 NLA_PUT(skb, TCA_GRED_DPS, sizeof(sopt), &sopt); 524 if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
525 goto nla_put_failure;
525 526
526 for (i = 0; i < MAX_DPs; i++) { 527 for (i = 0; i < MAX_DPs; i++) {
527 struct gred_sched_data *q = table->tab[i]; 528 struct gred_sched_data *q = table->tab[i];
528 529
529 max_p[i] = q ? q->parms.max_P : 0; 530 max_p[i] = q ? q->parms.max_P : 0;
530 } 531 }
531 NLA_PUT(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p); 532 if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
533 goto nla_put_failure;
532 534
533 parms = nla_nest_start(skb, TCA_GRED_PARMS); 535 parms = nla_nest_start(skb, TCA_GRED_PARMS);
534 if (parms == NULL) 536 if (parms == NULL)
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 9bdca2e011e9..8db3e2c72827 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1305,7 +1305,8 @@ hfsc_dump_sc(struct sk_buff *skb, int attr, struct internal_sc *sc)
1305 tsc.m1 = sm2m(sc->sm1); 1305 tsc.m1 = sm2m(sc->sm1);
1306 tsc.d = dx2d(sc->dx); 1306 tsc.d = dx2d(sc->dx);
1307 tsc.m2 = sm2m(sc->sm2); 1307 tsc.m2 = sm2m(sc->sm2);
1308 NLA_PUT(skb, attr, sizeof(tsc), &tsc); 1308 if (nla_put(skb, attr, sizeof(tsc), &tsc))
1309 goto nla_put_failure;
1309 1310
1310 return skb->len; 1311 return skb->len;
1311 1312
@@ -1573,7 +1574,8 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
1573 } 1574 }
1574 1575
1575 qopt.defcls = q->defcls; 1576 qopt.defcls = q->defcls;
1576 NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); 1577 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1578 goto nla_put_failure;
1577 return skb->len; 1579 return skb->len;
1578 1580
1579 nla_put_failure: 1581 nla_put_failure:
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 29b942ce9e82..acae5b0e3849 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -558,9 +558,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
558 __skb_queue_tail(&q->direct_queue, skb); 558 __skb_queue_tail(&q->direct_queue, skb);
559 q->direct_pkts++; 559 q->direct_pkts++;
560 } else { 560 } else {
561 kfree_skb(skb); 561 return qdisc_drop(skb, sch);
562 sch->qstats.drops++;
563 return NET_XMIT_DROP;
564 } 562 }
565#ifdef CONFIG_NET_CLS_ACT 563#ifdef CONFIG_NET_CLS_ACT
566 } else if (!cl) { 564 } else if (!cl) {
@@ -1051,7 +1049,8 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1051 nest = nla_nest_start(skb, TCA_OPTIONS); 1049 nest = nla_nest_start(skb, TCA_OPTIONS);
1052 if (nest == NULL) 1050 if (nest == NULL)
1053 goto nla_put_failure; 1051 goto nla_put_failure;
1054 NLA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); 1052 if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt))
1053 goto nla_put_failure;
1055 nla_nest_end(skb, nest); 1054 nla_nest_end(skb, nest);
1056 1055
1057 spin_unlock_bh(root_lock); 1056 spin_unlock_bh(root_lock);
@@ -1090,7 +1089,8 @@ static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1090 opt.quantum = cl->quantum; 1089 opt.quantum = cl->quantum;
1091 opt.prio = cl->prio; 1090 opt.prio = cl->prio;
1092 opt.level = cl->level; 1091 opt.level = cl->level;
1093 NLA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); 1092 if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1093 goto nla_put_failure;
1094 1094
1095 nla_nest_end(skb, nest); 1095 nla_nest_end(skb, nest);
1096 spin_unlock_bh(root_lock); 1096 spin_unlock_bh(root_lock);
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index 28de43092330..d1831ca966d4 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -247,7 +247,8 @@ static int mqprio_dump(struct Qdisc *sch, struct sk_buff *skb)
247 opt.offset[i] = dev->tc_to_txq[i].offset; 247 opt.offset[i] = dev->tc_to_txq[i].offset;
248 } 248 }
249 249
250 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 250 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
251 goto nla_put_failure;
251 252
252 return skb->len; 253 return skb->len;
253nla_put_failure: 254nla_put_failure:
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 49131d7a7446..2a2b096d9a66 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -284,7 +284,8 @@ static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb)
284 opt.bands = q->bands; 284 opt.bands = q->bands;
285 opt.max_bands = q->max_bands; 285 opt.max_bands = q->max_bands;
286 286
287 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 287 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
288 goto nla_put_failure;
288 289
289 return skb->len; 290 return skb->len;
290 291
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index ebd22966f748..a2a95aabf9c2 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -26,6 +26,7 @@
26 26
27#include <net/netlink.h> 27#include <net/netlink.h>
28#include <net/pkt_sched.h> 28#include <net/pkt_sched.h>
29#include <net/inet_ecn.h>
29 30
30#define VERSION "1.3" 31#define VERSION "1.3"
31 32
@@ -78,6 +79,7 @@ struct netem_sched_data {
78 psched_tdiff_t jitter; 79 psched_tdiff_t jitter;
79 80
80 u32 loss; 81 u32 loss;
82 u32 ecn;
81 u32 limit; 83 u32 limit;
82 u32 counter; 84 u32 counter;
83 u32 gap; 85 u32 gap;
@@ -374,9 +376,12 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
374 ++count; 376 ++count;
375 377
376 /* Drop packet? */ 378 /* Drop packet? */
377 if (loss_event(q)) 379 if (loss_event(q)) {
378 --count; 380 if (q->ecn && INET_ECN_set_ce(skb))
379 381 sch->qstats.drops++; /* mark packet */
382 else
383 --count;
384 }
380 if (count == 0) { 385 if (count == 0) {
381 sch->qstats.drops++; 386 sch->qstats.drops++;
382 kfree_skb(skb); 387 kfree_skb(skb);
@@ -704,6 +709,7 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
704 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, 709 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
705 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) }, 710 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
706 [TCA_NETEM_LOSS] = { .type = NLA_NESTED }, 711 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
712 [TCA_NETEM_ECN] = { .type = NLA_U32 },
707}; 713};
708 714
709static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, 715static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
@@ -774,6 +780,9 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
774 if (tb[TCA_NETEM_RATE]) 780 if (tb[TCA_NETEM_RATE])
775 get_rate(sch, tb[TCA_NETEM_RATE]); 781 get_rate(sch, tb[TCA_NETEM_RATE]);
776 782
783 if (tb[TCA_NETEM_ECN])
784 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
785
777 q->loss_model = CLG_RANDOM; 786 q->loss_model = CLG_RANDOM;
778 if (tb[TCA_NETEM_LOSS]) 787 if (tb[TCA_NETEM_LOSS])
779 ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]); 788 ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);
@@ -832,7 +841,8 @@ static int dump_loss_model(const struct netem_sched_data *q,
832 .p23 = q->clg.a5, 841 .p23 = q->clg.a5,
833 }; 842 };
834 843
835 NLA_PUT(skb, NETEM_LOSS_GI, sizeof(gi), &gi); 844 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
845 goto nla_put_failure;
836 break; 846 break;
837 } 847 }
838 case CLG_GILB_ELL: { 848 case CLG_GILB_ELL: {
@@ -843,7 +853,8 @@ static int dump_loss_model(const struct netem_sched_data *q,
843 .k1 = q->clg.a4, 853 .k1 = q->clg.a4,
844 }; 854 };
845 855
846 NLA_PUT(skb, NETEM_LOSS_GE, sizeof(ge), &ge); 856 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
857 goto nla_put_failure;
847 break; 858 break;
848 } 859 }
849 } 860 }
@@ -872,26 +883,34 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
872 qopt.loss = q->loss; 883 qopt.loss = q->loss;
873 qopt.gap = q->gap; 884 qopt.gap = q->gap;
874 qopt.duplicate = q->duplicate; 885 qopt.duplicate = q->duplicate;
875 NLA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt); 886 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
887 goto nla_put_failure;
876 888
877 cor.delay_corr = q->delay_cor.rho; 889 cor.delay_corr = q->delay_cor.rho;
878 cor.loss_corr = q->loss_cor.rho; 890 cor.loss_corr = q->loss_cor.rho;
879 cor.dup_corr = q->dup_cor.rho; 891 cor.dup_corr = q->dup_cor.rho;
880 NLA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor); 892 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
893 goto nla_put_failure;
881 894
882 reorder.probability = q->reorder; 895 reorder.probability = q->reorder;
883 reorder.correlation = q->reorder_cor.rho; 896 reorder.correlation = q->reorder_cor.rho;
884 NLA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder); 897 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
898 goto nla_put_failure;
885 899
886 corrupt.probability = q->corrupt; 900 corrupt.probability = q->corrupt;
887 corrupt.correlation = q->corrupt_cor.rho; 901 corrupt.correlation = q->corrupt_cor.rho;
888 NLA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt); 902 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
903 goto nla_put_failure;
889 904
890 rate.rate = q->rate; 905 rate.rate = q->rate;
891 rate.packet_overhead = q->packet_overhead; 906 rate.packet_overhead = q->packet_overhead;
892 rate.cell_size = q->cell_size; 907 rate.cell_size = q->cell_size;
893 rate.cell_overhead = q->cell_overhead; 908 rate.cell_overhead = q->cell_overhead;
894 NLA_PUT(skb, TCA_NETEM_RATE, sizeof(rate), &rate); 909 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
910 goto nla_put_failure;
911
912 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
913 goto nla_put_failure;
895 914
896 if (dump_loss_model(q, skb) != 0) 915 if (dump_loss_model(q, skb) != 0)
897 goto nla_put_failure; 916 goto nla_put_failure;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index b5d56a22b1d2..79359b69ad8d 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -247,7 +247,8 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
247 opt.bands = q->bands; 247 opt.bands = q->bands;
248 memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1); 248 memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
249 249
250 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 250 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
251 goto nla_put_failure;
251 252
252 return skb->len; 253 return skb->len;
253 254
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index e68cb440756a..9af01f3df18c 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -429,8 +429,9 @@ static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
429 nest = nla_nest_start(skb, TCA_OPTIONS); 429 nest = nla_nest_start(skb, TCA_OPTIONS);
430 if (nest == NULL) 430 if (nest == NULL)
431 goto nla_put_failure; 431 goto nla_put_failure;
432 NLA_PUT_U32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w); 432 if (nla_put_u32(skb, TCA_QFQ_WEIGHT, ONE_FP/cl->inv_w) ||
433 NLA_PUT_U32(skb, TCA_QFQ_LMAX, cl->lmax); 433 nla_put_u32(skb, TCA_QFQ_LMAX, cl->lmax))
434 goto nla_put_failure;
434 return nla_nest_end(skb, nest); 435 return nla_nest_end(skb, nest);
435 436
436nla_put_failure: 437nla_put_failure:
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index a5cc3012cf42..633e32defdcc 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -272,8 +272,9 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
272 opts = nla_nest_start(skb, TCA_OPTIONS); 272 opts = nla_nest_start(skb, TCA_OPTIONS);
273 if (opts == NULL) 273 if (opts == NULL)
274 goto nla_put_failure; 274 goto nla_put_failure;
275 NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt); 275 if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
276 NLA_PUT_U32(skb, TCA_RED_MAX_P, q->parms.max_P); 276 nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P))
277 goto nla_put_failure;
277 return nla_nest_end(skb, opts); 278 return nla_nest_end(skb, opts);
278 279
279nla_put_failure: 280nla_put_failure:
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index d7eea99333e9..74305c883bd3 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -570,7 +570,8 @@ static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
570 570
571 sch->qstats.backlog = q->qdisc->qstats.backlog; 571 sch->qstats.backlog = q->qdisc->qstats.backlog;
572 opts = nla_nest_start(skb, TCA_OPTIONS); 572 opts = nla_nest_start(skb, TCA_OPTIONS);
573 NLA_PUT(skb, TCA_SFB_PARMS, sizeof(opt), &opt); 573 if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
574 goto nla_put_failure;
574 return nla_nest_end(skb, opts); 575 return nla_nest_end(skb, opts);
575 576
576nla_put_failure: 577nla_put_failure:
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 02a21abea65e..d3a1bc26dbfc 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -812,7 +812,8 @@ static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
812 memcpy(&opt.stats, &q->stats, sizeof(opt.stats)); 812 memcpy(&opt.stats, &q->stats, sizeof(opt.stats));
813 opt.flags = q->flags; 813 opt.flags = q->flags;
814 814
815 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); 815 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
816 goto nla_put_failure;
816 817
817 return skb->len; 818 return skb->len;
818 819
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index b8e156319d7b..4b056c15e90c 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -359,7 +359,8 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
359 memset(&opt.peakrate, 0, sizeof(opt.peakrate)); 359 memset(&opt.peakrate, 0, sizeof(opt.peakrate));
360 opt.mtu = q->mtu; 360 opt.mtu = q->mtu;
361 opt.buffer = q->buffer; 361 opt.buffer = q->buffer;
362 NLA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt); 362 if (nla_put(skb, TCA_TBF_PARMS, sizeof(opt), &opt))
363 goto nla_put_failure;
363 364
364 nla_nest_end(skb, nest); 365 nla_nest_end(skb, nest);
365 return skb->len; 366 return skb->len;
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 45326599fda3..ca0c29695d51 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -88,9 +88,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc *sch)
88 return NET_XMIT_SUCCESS; 88 return NET_XMIT_SUCCESS;
89 } 89 }
90 90
91 kfree_skb(skb); 91 return qdisc_drop(skb, sch);
92 sch->qstats.drops++;
93 return NET_XMIT_DROP;
94} 92}
95 93
96static struct sk_buff * 94static struct sk_buff *
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index acd2edbc073e..5bc9ab161b37 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1408,7 +1408,7 @@ static inline int sctp_peer_needs_update(struct sctp_association *asoc)
1408} 1408}
1409 1409
1410/* Increase asoc's rwnd by len and send any window update SACK if needed. */ 1410/* Increase asoc's rwnd by len and send any window update SACK if needed. */
1411void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len) 1411void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
1412{ 1412{
1413 struct sctp_chunk *sack; 1413 struct sctp_chunk *sack;
1414 struct timer_list *timer; 1414 struct timer_list *timer;
@@ -1465,7 +1465,7 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len)
1465} 1465}
1466 1466
1467/* Decrease asoc's rwnd by len. */ 1467/* Decrease asoc's rwnd by len. */
1468void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len) 1468void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
1469{ 1469{
1470 int rx_count; 1470 int rx_count;
1471 int over = 0; 1471 int over = 0;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 80f71af71384..80564fe03024 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -342,7 +342,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
342 sctp_bh_lock_sock(sk); 342 sctp_bh_lock_sock(sk);
343 343
344 if (sock_owned_by_user(sk)) { 344 if (sock_owned_by_user(sk)) {
345 if (sk_add_backlog(sk, skb)) 345 if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
346 sctp_chunk_free(chunk); 346 sctp_chunk_free(chunk);
347 else 347 else
348 backloged = 1; 348 backloged = 1;
@@ -376,7 +376,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
376 struct sctp_ep_common *rcvr = chunk->rcvr; 376 struct sctp_ep_common *rcvr = chunk->rcvr;
377 int ret; 377 int ret;
378 378
379 ret = sk_add_backlog(sk, skb); 379 ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf);
380 if (!ret) { 380 if (!ret) {
381 /* Hold the assoc/ep while hanging on the backlog queue. 381 /* Hold the assoc/ep while hanging on the backlog queue.
382 * This way, we know structures we need will not disappear 382 * This way, we know structures we need will not disappear
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 817174eb5f41..69534c5f8afa 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -663,8 +663,8 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
663 */ 663 */
664 if (!sctp_sk(asoc->base.sk)->nodelay && sctp_packet_empty(packet) && 664 if (!sctp_sk(asoc->base.sk)->nodelay && sctp_packet_empty(packet) &&
665 inflight && sctp_state(asoc, ESTABLISHED)) { 665 inflight && sctp_state(asoc, ESTABLISHED)) {
666 unsigned max = transport->pathmtu - packet->overhead; 666 unsigned int max = transport->pathmtu - packet->overhead;
667 unsigned len = chunk->skb->len + q->out_qlen; 667 unsigned int len = chunk->skb->len + q->out_qlen;
668 668
669 /* Check whether this chunk and all the rest of pending 669 /* Check whether this chunk and all the rest of pending
670 * data will fit or delay in hopes of bundling a full 670 * data will fit or delay in hopes of bundling a full
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index cfeb1d4a1ee6..a0fa19f5650c 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -1147,7 +1147,7 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
1147 __u32 sack_ctsn, ctsn, tsn; 1147 __u32 sack_ctsn, ctsn, tsn;
1148 __u32 highest_tsn, highest_new_tsn; 1148 __u32 highest_tsn, highest_new_tsn;
1149 __u32 sack_a_rwnd; 1149 __u32 sack_a_rwnd;
1150 unsigned outstanding; 1150 unsigned int outstanding;
1151 struct sctp_transport *primary = asoc->peer.primary_path; 1151 struct sctp_transport *primary = asoc->peer.primary_path;
1152 int count_of_newacks = 0; 1152 int count_of_newacks = 0;
1153 int gap_ack_blocks; 1153 int gap_ack_blocks;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 1ff51c9d18d5..fbb374c65945 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -524,7 +524,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
524/* Worker routine to handle INIT command failure. */ 524/* Worker routine to handle INIT command failure. */
525static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands, 525static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands,
526 struct sctp_association *asoc, 526 struct sctp_association *asoc,
527 unsigned error) 527 unsigned int error)
528{ 528{
529 struct sctp_ulpevent *event; 529 struct sctp_ulpevent *event;
530 530
@@ -550,7 +550,7 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands,
550 sctp_event_t event_type, 550 sctp_event_t event_type,
551 sctp_subtype_t subtype, 551 sctp_subtype_t subtype,
552 struct sctp_chunk *chunk, 552 struct sctp_chunk *chunk,
553 unsigned error) 553 unsigned int error)
554{ 554{
555 struct sctp_ulpevent *event; 555 struct sctp_ulpevent *event;
556 556
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 891f5db8cc31..a147b4d307d2 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -2410,7 +2410,7 @@ static sctp_disposition_t __sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep,
2410 sctp_cmd_seq_t *commands) 2410 sctp_cmd_seq_t *commands)
2411{ 2411{
2412 struct sctp_chunk *chunk = arg; 2412 struct sctp_chunk *chunk = arg;
2413 unsigned len; 2413 unsigned int len;
2414 __be16 error = SCTP_ERROR_NO_ERROR; 2414 __be16 error = SCTP_ERROR_NO_ERROR;
2415 2415
2416 /* See if we have an error cause code in the chunk. */ 2416 /* See if we have an error cause code in the chunk. */
@@ -2446,7 +2446,7 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep,
2446 sctp_cmd_seq_t *commands) 2446 sctp_cmd_seq_t *commands)
2447{ 2447{
2448 struct sctp_chunk *chunk = arg; 2448 struct sctp_chunk *chunk = arg;
2449 unsigned len; 2449 unsigned int len;
2450 __be16 error = SCTP_ERROR_NO_ERROR; 2450 __be16 error = SCTP_ERROR_NO_ERROR;
2451 2451
2452 if (!sctp_vtag_verify_either(chunk, asoc)) 2452 if (!sctp_vtag_verify_either(chunk, asoc))
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 60ffbd067ff7..e5fe639c89e7 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -275,22 +275,16 @@ static ctl_table sctp_table[] = {
275 { /* sentinel */ } 275 { /* sentinel */ }
276}; 276};
277 277
278static struct ctl_path sctp_path[] = {
279 { .procname = "net", },
280 { .procname = "sctp", },
281 { }
282};
283
284static struct ctl_table_header * sctp_sysctl_header; 278static struct ctl_table_header * sctp_sysctl_header;
285 279
286/* Sysctl registration. */ 280/* Sysctl registration. */
287void sctp_sysctl_register(void) 281void sctp_sysctl_register(void)
288{ 282{
289 sctp_sysctl_header = register_sysctl_paths(sctp_path, sctp_table); 283 sctp_sysctl_header = register_net_sysctl(&init_net, "net/sctp", sctp_table);
290} 284}
291 285
292/* Sysctl deregistration. */ 286/* Sysctl deregistration. */
293void sctp_sysctl_unregister(void) 287void sctp_sysctl_unregister(void)
294{ 288{
295 unregister_sysctl_table(sctp_sysctl_header); 289 unregister_net_sysctl_table(sctp_sysctl_header);
296} 290}
diff --git a/net/socket.c b/net/socket.c
index 851edcd6b098..d3aaa4f67a3b 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1479,7 +1479,7 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
1479 sock = sockfd_lookup_light(fd, &err, &fput_needed); 1479 sock = sockfd_lookup_light(fd, &err, &fput_needed);
1480 if (sock) { 1480 if (sock) {
1481 somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn; 1481 somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn;
1482 if ((unsigned)backlog > somaxconn) 1482 if ((unsigned int)backlog > somaxconn)
1483 backlog = somaxconn; 1483 backlog = somaxconn;
1484 1484
1485 err = security_socket_listen(sock, backlog); 1485 err = security_socket_listen(sock, backlog);
@@ -1691,7 +1691,7 @@ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
1691 */ 1691 */
1692 1692
1693SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, 1693SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
1694 unsigned, flags, struct sockaddr __user *, addr, 1694 unsigned int, flags, struct sockaddr __user *, addr,
1695 int, addr_len) 1695 int, addr_len)
1696{ 1696{
1697 struct socket *sock; 1697 struct socket *sock;
@@ -1738,7 +1738,7 @@ out:
1738 */ 1738 */
1739 1739
1740SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len, 1740SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len,
1741 unsigned, flags) 1741 unsigned int, flags)
1742{ 1742{
1743 return sys_sendto(fd, buff, len, flags, NULL, 0); 1743 return sys_sendto(fd, buff, len, flags, NULL, 0);
1744} 1744}
@@ -1750,7 +1750,7 @@ SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len,
1750 */ 1750 */
1751 1751
1752SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, 1752SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
1753 unsigned, flags, struct sockaddr __user *, addr, 1753 unsigned int, flags, struct sockaddr __user *, addr,
1754 int __user *, addr_len) 1754 int __user *, addr_len)
1755{ 1755{
1756 struct socket *sock; 1756 struct socket *sock;
@@ -1795,7 +1795,7 @@ out:
1795 */ 1795 */
1796 1796
1797asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size, 1797asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size,
1798 unsigned flags) 1798 unsigned int flags)
1799{ 1799{
1800 return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL); 1800 return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL);
1801} 1801}
@@ -1897,7 +1897,7 @@ struct used_address {
1897}; 1897};
1898 1898
1899static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, 1899static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
1900 struct msghdr *msg_sys, unsigned flags, 1900 struct msghdr *msg_sys, unsigned int flags,
1901 struct used_address *used_address) 1901 struct used_address *used_address)
1902{ 1902{
1903 struct compat_msghdr __user *msg_compat = 1903 struct compat_msghdr __user *msg_compat =
@@ -1908,7 +1908,7 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
1908 __attribute__ ((aligned(sizeof(__kernel_size_t)))); 1908 __attribute__ ((aligned(sizeof(__kernel_size_t))));
1909 /* 20 is size of ipv6_pktinfo */ 1909 /* 20 is size of ipv6_pktinfo */
1910 unsigned char *ctl_buf = ctl; 1910 unsigned char *ctl_buf = ctl;
1911 int err, ctl_len, iov_size, total_len; 1911 int err, ctl_len, total_len;
1912 1912
1913 err = -EFAULT; 1913 err = -EFAULT;
1914 if (MSG_CMSG_COMPAT & flags) { 1914 if (MSG_CMSG_COMPAT & flags) {
@@ -1917,16 +1917,13 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
1917 } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) 1917 } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
1918 return -EFAULT; 1918 return -EFAULT;
1919 1919
1920 /* do not move before msg_sys is valid */
1921 err = -EMSGSIZE;
1922 if (msg_sys->msg_iovlen > UIO_MAXIOV)
1923 goto out;
1924
1925 /* Check whether to allocate the iovec area */
1926 err = -ENOMEM;
1927 iov_size = msg_sys->msg_iovlen * sizeof(struct iovec);
1928 if (msg_sys->msg_iovlen > UIO_FASTIOV) { 1920 if (msg_sys->msg_iovlen > UIO_FASTIOV) {
1929 iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); 1921 err = -EMSGSIZE;
1922 if (msg_sys->msg_iovlen > UIO_MAXIOV)
1923 goto out;
1924 err = -ENOMEM;
1925 iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec),
1926 GFP_KERNEL);
1930 if (!iov) 1927 if (!iov)
1931 goto out; 1928 goto out;
1932 } 1929 }
@@ -2005,7 +2002,7 @@ out_freectl:
2005 sock_kfree_s(sock->sk, ctl_buf, ctl_len); 2002 sock_kfree_s(sock->sk, ctl_buf, ctl_len);
2006out_freeiov: 2003out_freeiov:
2007 if (iov != iovstack) 2004 if (iov != iovstack)
2008 sock_kfree_s(sock->sk, iov, iov_size); 2005 kfree(iov);
2009out: 2006out:
2010 return err; 2007 return err;
2011} 2008}
@@ -2014,7 +2011,7 @@ out:
2014 * BSD sendmsg interface 2011 * BSD sendmsg interface
2015 */ 2012 */
2016 2013
2017SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags) 2014SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags)
2018{ 2015{
2019 int fput_needed, err; 2016 int fput_needed, err;
2020 struct msghdr msg_sys; 2017 struct msghdr msg_sys;
@@ -2096,14 +2093,14 @@ SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg,
2096} 2093}
2097 2094
2098static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, 2095static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
2099 struct msghdr *msg_sys, unsigned flags, int nosec) 2096 struct msghdr *msg_sys, unsigned int flags, int nosec)
2100{ 2097{
2101 struct compat_msghdr __user *msg_compat = 2098 struct compat_msghdr __user *msg_compat =
2102 (struct compat_msghdr __user *)msg; 2099 (struct compat_msghdr __user *)msg;
2103 struct iovec iovstack[UIO_FASTIOV]; 2100 struct iovec iovstack[UIO_FASTIOV];
2104 struct iovec *iov = iovstack; 2101 struct iovec *iov = iovstack;
2105 unsigned long cmsg_ptr; 2102 unsigned long cmsg_ptr;
2106 int err, iov_size, total_len, len; 2103 int err, total_len, len;
2107 2104
2108 /* kernel mode address */ 2105 /* kernel mode address */
2109 struct sockaddr_storage addr; 2106 struct sockaddr_storage addr;
@@ -2118,15 +2115,13 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
2118 } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) 2115 } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
2119 return -EFAULT; 2116 return -EFAULT;
2120 2117
2121 err = -EMSGSIZE;
2122 if (msg_sys->msg_iovlen > UIO_MAXIOV)
2123 goto out;
2124
2125 /* Check whether to allocate the iovec area */
2126 err = -ENOMEM;
2127 iov_size = msg_sys->msg_iovlen * sizeof(struct iovec);
2128 if (msg_sys->msg_iovlen > UIO_FASTIOV) { 2118 if (msg_sys->msg_iovlen > UIO_FASTIOV) {
2129 iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); 2119 err = -EMSGSIZE;
2120 if (msg_sys->msg_iovlen > UIO_MAXIOV)
2121 goto out;
2122 err = -ENOMEM;
2123 iov = kmalloc(msg_sys->msg_iovlen * sizeof(struct iovec),
2124 GFP_KERNEL);
2130 if (!iov) 2125 if (!iov)
2131 goto out; 2126 goto out;
2132 } 2127 }
@@ -2180,7 +2175,7 @@ static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
2180 2175
2181out_freeiov: 2176out_freeiov:
2182 if (iov != iovstack) 2177 if (iov != iovstack)
2183 sock_kfree_s(sock->sk, iov, iov_size); 2178 kfree(iov);
2184out: 2179out:
2185 return err; 2180 return err;
2186} 2181}
@@ -2524,6 +2519,12 @@ EXPORT_SYMBOL(sock_unregister);
2524static int __init sock_init(void) 2519static int __init sock_init(void)
2525{ 2520{
2526 int err; 2521 int err;
2522 /*
2523 * Initialize the network sysctl infrastructure.
2524 */
2525 err = net_sysctl_init();
2526 if (err)
2527 goto out;
2527 2528
2528 /* 2529 /*
2529 * Initialize sock SLAB cache. 2530 * Initialize sock SLAB cache.
@@ -3223,7 +3224,7 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
3223 return -ENOIOCTLCMD; 3224 return -ENOIOCTLCMD;
3224} 3225}
3225 3226
3226static long compat_sock_ioctl(struct file *file, unsigned cmd, 3227static long compat_sock_ioctl(struct file *file, unsigned int cmd,
3227 unsigned long arg) 3228 unsigned long arg)
3228{ 3229{
3229 struct socket *sock = file->private_data; 3230 struct socket *sock = file->private_data;
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 8eff8c32d1b9..d3611f11a8df 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -624,7 +624,7 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
624 ctx->seq_send = ctx->seq_send64; 624 ctx->seq_send = ctx->seq_send64;
625 if (ctx->seq_send64 != ctx->seq_send) { 625 if (ctx->seq_send64 != ctx->seq_send) {
626 dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__, 626 dprintk("%s: seq_send64 %lx, seq_send %x overflow?\n", __func__,
627 (long unsigned)ctx->seq_send64, ctx->seq_send); 627 (unsigned long)ctx->seq_send64, ctx->seq_send);
628 p = ERR_PTR(-EINVAL); 628 p = ERR_PTR(-EINVAL);
629 goto out_err; 629 goto out_err;
630 } 630 }
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index de0b0f39d9d8..47ad2666fdf6 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1273,7 +1273,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
1273 __acquires(cd->hash_lock) 1273 __acquires(cd->hash_lock)
1274{ 1274{
1275 loff_t n = *pos; 1275 loff_t n = *pos;
1276 unsigned hash, entry; 1276 unsigned int hash, entry;
1277 struct cache_head *ch; 1277 struct cache_head *ch;
1278 struct cache_detail *cd = ((struct handle*)m->private)->cd; 1278 struct cache_detail *cd = ((struct handle*)m->private)->cd;
1279 1279
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 824d32fb3121..f0132b2e875e 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1556,7 +1556,7 @@ static struct svc_xprt *svc_create_socket(struct svc_serv *serv,
1556 (char *)&val, sizeof(val)); 1556 (char *)&val, sizeof(val));
1557 1557
1558 if (type == SOCK_STREAM) 1558 if (type == SOCK_STREAM)
1559 sock->sk->sk_reuse = 1; /* allow address reuse */ 1559 sock->sk->sk_reuse = SK_CAN_REUSE; /* allow address reuse */
1560 error = kernel_bind(sock, sin, len); 1560 error = kernel_bind(sock, sin, len);
1561 if (error < 0) 1561 if (error < 0)
1562 goto bummer; 1562 goto bummer;
diff --git a/net/sunrpc/timer.c b/net/sunrpc/timer.c
index dd824341c349..08881d0c9672 100644
--- a/net/sunrpc/timer.c
+++ b/net/sunrpc/timer.c
@@ -34,7 +34,7 @@
34void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo) 34void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo)
35{ 35{
36 unsigned long init = 0; 36 unsigned long init = 0;
37 unsigned i; 37 unsigned int i;
38 38
39 rt->timeo = timeo; 39 rt->timeo = timeo;
40 40
@@ -57,7 +57,7 @@ EXPORT_SYMBOL_GPL(rpc_init_rtt);
57 * NB: When computing the smoothed RTT and standard deviation, 57 * NB: When computing the smoothed RTT and standard deviation,
58 * be careful not to produce negative intermediate results. 58 * be careful not to produce negative intermediate results.
59 */ 59 */
60void rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m) 60void rpc_update_rtt(struct rpc_rtt *rt, unsigned int timer, long m)
61{ 61{
62 long *srtt, *sdrtt; 62 long *srtt, *sdrtt;
63 63
@@ -106,7 +106,7 @@ EXPORT_SYMBOL_GPL(rpc_update_rtt);
106 * read, write, commit - A+4D 106 * read, write, commit - A+4D
107 * other - timeo 107 * other - timeo
108 */ 108 */
109unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned timer) 109unsigned long rpc_calc_rto(struct rpc_rtt *rt, unsigned int timer)
110{ 110{
111 unsigned long res; 111 unsigned long res;
112 112
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index b97a3dd9a60a..fddcccfcdf76 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -1204,7 +1204,7 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1204 int (*actor)(struct scatterlist *, void *), void *data) 1204 int (*actor)(struct scatterlist *, void *), void *data)
1205{ 1205{
1206 int i, ret = 0; 1206 int i, ret = 0;
1207 unsigned page_len, thislen, page_offset; 1207 unsigned int page_len, thislen, page_offset;
1208 struct scatterlist sg[1]; 1208 struct scatterlist sg[1];
1209 1209
1210 sg_init_table(sg, 1); 1210 sg_init_table(sg, 1);
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 0cbcd1ab49ab..6fe2dcead150 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -783,7 +783,7 @@ static void xprt_update_rtt(struct rpc_task *task)
783{ 783{
784 struct rpc_rqst *req = task->tk_rqstp; 784 struct rpc_rqst *req = task->tk_rqstp;
785 struct rpc_rtt *rtt = task->tk_client->cl_rtt; 785 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
786 unsigned timer = task->tk_msg.rpc_proc->p_timer; 786 unsigned int timer = task->tk_msg.rpc_proc->p_timer;
787 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt)); 787 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
788 788
789 if (timer) { 789 if (timer) {
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index c3e65aebecc0..f3e813a8d107 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -59,19 +59,6 @@ static struct ctl_table_root net_sysctl_root = {
59 .permissions = net_ctl_permissions, 59 .permissions = net_ctl_permissions,
60}; 60};
61 61
62static int net_ctl_ro_header_perms(struct ctl_table_root *root,
63 struct nsproxy *namespaces, struct ctl_table *table)
64{
65 if (net_eq(namespaces->net_ns, &init_net))
66 return table->mode;
67 else
68 return table->mode & ~0222;
69}
70
71static struct ctl_table_root net_sysctl_ro_root = {
72 .permissions = net_ctl_ro_header_perms,
73};
74
75static int __net_init sysctl_net_init(struct net *net) 62static int __net_init sysctl_net_init(struct net *net)
76{ 63{
77 setup_sysctl_set(&net->sysctls, &net_sysctl_root, is_seen); 64 setup_sysctl_set(&net->sysctls, &net_sysctl_root, is_seen);
@@ -88,34 +75,32 @@ static struct pernet_operations sysctl_pernet_ops = {
88 .exit = sysctl_net_exit, 75 .exit = sysctl_net_exit,
89}; 76};
90 77
91static __init int net_sysctl_init(void) 78static struct ctl_table_header *net_header;
79__init int net_sysctl_init(void)
92{ 80{
93 int ret; 81 static struct ctl_table empty[1];
82 int ret = -ENOMEM;
83 /* Avoid limitations in the sysctl implementation by
84 * registering "/proc/sys/net" as an empty directory not in a
85 * network namespace.
86 */
87 net_header = register_sysctl("net", empty);
88 if (!net_header)
89 goto out;
94 ret = register_pernet_subsys(&sysctl_pernet_ops); 90 ret = register_pernet_subsys(&sysctl_pernet_ops);
95 if (ret) 91 if (ret)
96 goto out; 92 goto out;
97 setup_sysctl_set(&net_sysctl_ro_root.default_set, &net_sysctl_ro_root, NULL);
98 register_sysctl_root(&net_sysctl_ro_root);
99 register_sysctl_root(&net_sysctl_root); 93 register_sysctl_root(&net_sysctl_root);
100out: 94out:
101 return ret; 95 return ret;
102} 96}
103subsys_initcall(net_sysctl_init);
104
105struct ctl_table_header *register_net_sysctl_table(struct net *net,
106 const struct ctl_path *path, struct ctl_table *table)
107{
108 return __register_sysctl_paths(&net->sysctls, path, table);
109}
110EXPORT_SYMBOL_GPL(register_net_sysctl_table);
111 97
112struct ctl_table_header *register_net_sysctl_rotable(const 98struct ctl_table_header *register_net_sysctl(struct net *net,
113 struct ctl_path *path, struct ctl_table *table) 99 const char *path, struct ctl_table *table)
114{ 100{
115 return __register_sysctl_paths(&net_sysctl_ro_root.default_set, 101 return __register_sysctl_table(&net->sysctls, path, table);
116 path, table);
117} 102}
118EXPORT_SYMBOL_GPL(register_net_sysctl_rotable); 103EXPORT_SYMBOL_GPL(register_net_sysctl);
119 104
120void unregister_net_sysctl_table(struct ctl_table_header *header) 105void unregister_net_sysctl_table(struct ctl_table_header *header)
121{ 106{
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index 521d24d04ab2..6cd55d671d3a 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -9,5 +9,3 @@ tipc-y += addr.o bcast.o bearer.o config.o \
9 name_distr.o subscr.o name_table.o net.o \ 9 name_distr.o subscr.o name_table.o net.o \
10 netlink.o node.o node_subscr.o port.o ref.o \ 10 netlink.o node.o node_subscr.o port.o ref.o \
11 socket.o log.o eth_media.o 11 socket.o log.o eth_media.o
12
13# End of file
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index a6fdab33877e..357b74b26f9e 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -45,7 +45,6 @@
45 * 45 *
46 * Returns 1 if domain address is valid, otherwise 0 46 * Returns 1 if domain address is valid, otherwise 0
47 */ 47 */
48
49int tipc_addr_domain_valid(u32 addr) 48int tipc_addr_domain_valid(u32 addr)
50{ 49{
51 u32 n = tipc_node(addr); 50 u32 n = tipc_node(addr);
@@ -66,7 +65,6 @@ int tipc_addr_domain_valid(u32 addr)
66 * 65 *
67 * Returns 1 if address can be used, otherwise 0 66 * Returns 1 if address can be used, otherwise 0
68 */ 67 */
69
70int tipc_addr_node_valid(u32 addr) 68int tipc_addr_node_valid(u32 addr)
71{ 69{
72 return tipc_addr_domain_valid(addr) && tipc_node(addr); 70 return tipc_addr_domain_valid(addr) && tipc_node(addr);
@@ -86,7 +84,6 @@ int tipc_in_scope(u32 domain, u32 addr)
86/** 84/**
87 * tipc_addr_scope - convert message lookup domain to a 2-bit scope value 85 * tipc_addr_scope - convert message lookup domain to a 2-bit scope value
88 */ 86 */
89
90int tipc_addr_scope(u32 domain) 87int tipc_addr_scope(u32 domain)
91{ 88{
92 if (likely(!domain)) 89 if (likely(!domain))
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
index e4f35afe3207..60b00ab93d74 100644
--- a/net/tipc/addr.h
+++ b/net/tipc/addr.h
@@ -50,18 +50,33 @@ static inline u32 tipc_cluster_mask(u32 addr)
50 return addr & TIPC_CLUSTER_MASK; 50 return addr & TIPC_CLUSTER_MASK;
51} 51}
52 52
53static inline int in_own_cluster(u32 addr) 53static inline int in_own_cluster_exact(u32 addr)
54{ 54{
55 return !((addr ^ tipc_own_addr) >> 12); 55 return !((addr ^ tipc_own_addr) >> 12);
56} 56}
57 57
58/** 58/**
59 * in_own_node - test for node inclusion; <0.0.0> always matches
60 */
61static inline int in_own_node(u32 addr)
62{
63 return (addr == tipc_own_addr) || !addr;
64}
65
66/**
67 * in_own_cluster - test for cluster inclusion; <0.0.0> always matches
68 */
69static inline int in_own_cluster(u32 addr)
70{
71 return in_own_cluster_exact(addr) || !addr;
72}
73
74/**
59 * addr_domain - convert 2-bit scope value to equivalent message lookup domain 75 * addr_domain - convert 2-bit scope value to equivalent message lookup domain
60 * 76 *
61 * Needed when address of a named message must be looked up a second time 77 * Needed when address of a named message must be looked up a second time
62 * after a network hop. 78 * after a network hop.
63 */ 79 */
64
65static inline u32 addr_domain(u32 sc) 80static inline u32 addr_domain(u32 sc)
66{ 81{
67 if (likely(sc == TIPC_NODE_SCOPE)) 82 if (likely(sc == TIPC_NODE_SCOPE))
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index e00441a2092f..2625f5ebe3e8 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -73,7 +73,6 @@ struct tipc_bcbearer_pair {
73 * large local variables within multicast routines. Concurrent access is 73 * large local variables within multicast routines. Concurrent access is
74 * prevented through use of the spinlock "bc_lock". 74 * prevented through use of the spinlock "bc_lock".
75 */ 75 */
76
77struct tipc_bcbearer { 76struct tipc_bcbearer {
78 struct tipc_bearer bearer; 77 struct tipc_bearer bearer;
79 struct tipc_media media; 78 struct tipc_media media;
@@ -92,7 +91,6 @@ struct tipc_bcbearer {
92 * 91 *
93 * Handles sequence numbering, fragmentation, bundling, etc. 92 * Handles sequence numbering, fragmentation, bundling, etc.
94 */ 93 */
95
96struct tipc_bclink { 94struct tipc_bclink {
97 struct tipc_link link; 95 struct tipc_link link;
98 struct tipc_node node; 96 struct tipc_node node;
@@ -169,7 +167,6 @@ static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
169 * 167 *
170 * Called with bc_lock locked 168 * Called with bc_lock locked
171 */ 169 */
172
173struct tipc_node *tipc_bclink_retransmit_to(void) 170struct tipc_node *tipc_bclink_retransmit_to(void)
174{ 171{
175 return bclink->retransmit_to; 172 return bclink->retransmit_to;
@@ -182,7 +179,6 @@ struct tipc_node *tipc_bclink_retransmit_to(void)
182 * 179 *
183 * Called with bc_lock locked 180 * Called with bc_lock locked
184 */ 181 */
185
186static void bclink_retransmit_pkt(u32 after, u32 to) 182static void bclink_retransmit_pkt(u32 after, u32 to)
187{ 183{
188 struct sk_buff *buf; 184 struct sk_buff *buf;
@@ -200,7 +196,6 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
200 * 196 *
201 * Node is locked, bc_lock unlocked. 197 * Node is locked, bc_lock unlocked.
202 */ 198 */
203
204void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) 199void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
205{ 200{
206 struct sk_buff *crs; 201 struct sk_buff *crs;
@@ -280,7 +275,6 @@ exit:
280 * 275 *
281 * tipc_net_lock and node lock set 276 * tipc_net_lock and node lock set
282 */ 277 */
283
284void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent) 278void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
285{ 279{
286 struct sk_buff *buf; 280 struct sk_buff *buf;
@@ -344,7 +338,6 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
344 * 338 *
345 * Only tipc_net_lock set. 339 * Only tipc_net_lock set.
346 */ 340 */
347
348static void bclink_peek_nack(struct tipc_msg *msg) 341static void bclink_peek_nack(struct tipc_msg *msg)
349{ 342{
350 struct tipc_node *n_ptr = tipc_node_find(msg_destnode(msg)); 343 struct tipc_node *n_ptr = tipc_node_find(msg_destnode(msg));
@@ -365,7 +358,6 @@ static void bclink_peek_nack(struct tipc_msg *msg)
365/* 358/*
366 * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster 359 * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
367 */ 360 */
368
369int tipc_bclink_send_msg(struct sk_buff *buf) 361int tipc_bclink_send_msg(struct sk_buff *buf)
370{ 362{
371 int res; 363 int res;
@@ -394,7 +386,6 @@ exit:
394 * 386 *
395 * Called with both sending node's lock and bc_lock taken. 387 * Called with both sending node's lock and bc_lock taken.
396 */ 388 */
397
398static void bclink_accept_pkt(struct tipc_node *node, u32 seqno) 389static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
399{ 390{
400 bclink_update_last_sent(node, seqno); 391 bclink_update_last_sent(node, seqno);
@@ -420,7 +411,6 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
420 * 411 *
421 * tipc_net_lock is read_locked, no other locks set 412 * tipc_net_lock is read_locked, no other locks set
422 */ 413 */
423
424void tipc_bclink_recv_pkt(struct sk_buff *buf) 414void tipc_bclink_recv_pkt(struct sk_buff *buf)
425{ 415{
426 struct tipc_msg *msg = buf_msg(buf); 416 struct tipc_msg *msg = buf_msg(buf);
@@ -588,7 +578,6 @@ u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
588 * Returns 0 (packet sent successfully) under all circumstances, 578 * Returns 0 (packet sent successfully) under all circumstances,
589 * since the broadcast link's pseudo-bearer never blocks 579 * since the broadcast link's pseudo-bearer never blocks
590 */ 580 */
591
592static int tipc_bcbearer_send(struct sk_buff *buf, 581static int tipc_bcbearer_send(struct sk_buff *buf,
593 struct tipc_bearer *unused1, 582 struct tipc_bearer *unused1,
594 struct tipc_media_addr *unused2) 583 struct tipc_media_addr *unused2)
@@ -601,7 +590,6 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
601 * preparation is skipped for broadcast link protocol messages 590 * preparation is skipped for broadcast link protocol messages
602 * since they are sent in an unreliable manner and don't need it 591 * since they are sent in an unreliable manner and don't need it
603 */ 592 */
604
605 if (likely(!msg_non_seq(buf_msg(buf)))) { 593 if (likely(!msg_non_seq(buf_msg(buf)))) {
606 struct tipc_msg *msg; 594 struct tipc_msg *msg;
607 595
@@ -618,7 +606,6 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
618 } 606 }
619 607
620 /* Send buffer over bearers until all targets reached */ 608 /* Send buffer over bearers until all targets reached */
621
622 bcbearer->remains = bclink->bcast_nodes; 609 bcbearer->remains = bclink->bcast_nodes;
623 610
624 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) { 611 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
@@ -660,7 +647,6 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
660/** 647/**
661 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer 648 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
662 */ 649 */
663
664void tipc_bcbearer_sort(void) 650void tipc_bcbearer_sort(void)
665{ 651{
666 struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp; 652 struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
@@ -671,7 +657,6 @@ void tipc_bcbearer_sort(void)
671 spin_lock_bh(&bc_lock); 657 spin_lock_bh(&bc_lock);
672 658
673 /* Group bearers by priority (can assume max of two per priority) */ 659 /* Group bearers by priority (can assume max of two per priority) */
674
675 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp)); 660 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
676 661
677 for (b_index = 0; b_index < MAX_BEARERS; b_index++) { 662 for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
@@ -687,7 +672,6 @@ void tipc_bcbearer_sort(void)
687 } 672 }
688 673
689 /* Create array of bearer pairs for broadcasting */ 674 /* Create array of bearer pairs for broadcasting */
690
691 bp_curr = bcbearer->bpairs; 675 bp_curr = bcbearer->bpairs;
692 memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs)); 676 memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
693 677
@@ -817,7 +801,6 @@ void tipc_bclink_stop(void)
817/** 801/**
818 * tipc_nmap_add - add a node to a node map 802 * tipc_nmap_add - add a node to a node map
819 */ 803 */
820
821void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node) 804void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
822{ 805{
823 int n = tipc_node(node); 806 int n = tipc_node(node);
@@ -833,7 +816,6 @@ void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
833/** 816/**
834 * tipc_nmap_remove - remove a node from a node map 817 * tipc_nmap_remove - remove a node from a node map
835 */ 818 */
836
837void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node) 819void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
838{ 820{
839 int n = tipc_node(node); 821 int n = tipc_node(node);
@@ -852,7 +834,6 @@ void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
852 * @nm_b: input node map B 834 * @nm_b: input node map B
853 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B) 835 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
854 */ 836 */
855
856static void tipc_nmap_diff(struct tipc_node_map *nm_a, 837static void tipc_nmap_diff(struct tipc_node_map *nm_a,
857 struct tipc_node_map *nm_b, 838 struct tipc_node_map *nm_b,
858 struct tipc_node_map *nm_diff) 839 struct tipc_node_map *nm_diff)
@@ -878,7 +859,6 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a,
878/** 859/**
879 * tipc_port_list_add - add a port to a port list, ensuring no duplicates 860 * tipc_port_list_add - add a port to a port list, ensuring no duplicates
880 */ 861 */
881
882void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port) 862void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port)
883{ 863{
884 struct tipc_port_list *item = pl_ptr; 864 struct tipc_port_list *item = pl_ptr;
@@ -912,7 +892,6 @@ void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port)
912 * tipc_port_list_free - free dynamically created entries in port_list chain 892 * tipc_port_list_free - free dynamically created entries in port_list chain
913 * 893 *
914 */ 894 */
915
916void tipc_port_list_free(struct tipc_port_list *pl_ptr) 895void tipc_port_list_free(struct tipc_port_list *pl_ptr)
917{ 896{
918 struct tipc_port_list *item; 897 struct tipc_port_list *item;
@@ -923,4 +902,3 @@ void tipc_port_list_free(struct tipc_port_list *pl_ptr)
923 kfree(item); 902 kfree(item);
924 } 903 }
925} 904}
926
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 5571394098f9..a93306557e00 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -45,7 +45,6 @@
45 * @count: # of nodes in set 45 * @count: # of nodes in set
46 * @map: bitmap of node identifiers that are in the set 46 * @map: bitmap of node identifiers that are in the set
47 */ 47 */
48
49struct tipc_node_map { 48struct tipc_node_map {
50 u32 count; 49 u32 count;
51 u32 map[MAX_NODES / WSIZE]; 50 u32 map[MAX_NODES / WSIZE];
@@ -59,7 +58,6 @@ struct tipc_node_map {
59 * @next: pointer to next entry in list 58 * @next: pointer to next entry in list
60 * @ports: array of port references 59 * @ports: array of port references
61 */ 60 */
62
63struct tipc_port_list { 61struct tipc_port_list {
64 int count; 62 int count;
65 struct tipc_port_list *next; 63 struct tipc_port_list *next;
@@ -77,7 +75,6 @@ void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
77/** 75/**
78 * tipc_nmap_equal - test for equality of node maps 76 * tipc_nmap_equal - test for equality of node maps
79 */ 77 */
80
81static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b) 78static inline int tipc_nmap_equal(struct tipc_node_map *nm_a, struct tipc_node_map *nm_b)
82{ 79{
83 return !memcmp(nm_a, nm_b, sizeof(*nm_a)); 80 return !memcmp(nm_a, nm_b, sizeof(*nm_a));
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 5dfd89c40429..a297e3a2e3e7 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -53,7 +53,6 @@ static void bearer_disable(struct tipc_bearer *b_ptr);
53 * 53 *
54 * Returns 1 if media name is valid, otherwise 0. 54 * Returns 1 if media name is valid, otherwise 0.
55 */ 55 */
56
57static int media_name_valid(const char *name) 56static int media_name_valid(const char *name)
58{ 57{
59 u32 len; 58 u32 len;
@@ -67,7 +66,6 @@ static int media_name_valid(const char *name)
67/** 66/**
68 * tipc_media_find - locates specified media object by name 67 * tipc_media_find - locates specified media object by name
69 */ 68 */
70
71struct tipc_media *tipc_media_find(const char *name) 69struct tipc_media *tipc_media_find(const char *name)
72{ 70{
73 u32 i; 71 u32 i;
@@ -82,7 +80,6 @@ struct tipc_media *tipc_media_find(const char *name)
82/** 80/**
83 * media_find_id - locates specified media object by type identifier 81 * media_find_id - locates specified media object by type identifier
84 */ 82 */
85
86static struct tipc_media *media_find_id(u8 type) 83static struct tipc_media *media_find_id(u8 type)
87{ 84{
88 u32 i; 85 u32 i;
@@ -99,7 +96,6 @@ static struct tipc_media *media_find_id(u8 type)
99 * 96 *
100 * Bearers for this media type must be activated separately at a later stage. 97 * Bearers for this media type must be activated separately at a later stage.
101 */ 98 */
102
103int tipc_register_media(struct tipc_media *m_ptr) 99int tipc_register_media(struct tipc_media *m_ptr)
104{ 100{
105 int res = -EINVAL; 101 int res = -EINVAL;
@@ -134,7 +130,6 @@ exit:
134/** 130/**
135 * tipc_media_addr_printf - record media address in print buffer 131 * tipc_media_addr_printf - record media address in print buffer
136 */ 132 */
137
138void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a) 133void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
139{ 134{
140 char addr_str[MAX_ADDR_STR]; 135 char addr_str[MAX_ADDR_STR];
@@ -156,7 +151,6 @@ void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
156/** 151/**
157 * tipc_media_get_names - record names of registered media in buffer 152 * tipc_media_get_names - record names of registered media in buffer
158 */ 153 */
159
160struct sk_buff *tipc_media_get_names(void) 154struct sk_buff *tipc_media_get_names(void)
161{ 155{
162 struct sk_buff *buf; 156 struct sk_buff *buf;
@@ -183,7 +177,6 @@ struct sk_buff *tipc_media_get_names(void)
183 * 177 *
184 * Returns 1 if bearer name is valid, otherwise 0. 178 * Returns 1 if bearer name is valid, otherwise 0.
185 */ 179 */
186
187static int bearer_name_validate(const char *name, 180static int bearer_name_validate(const char *name,
188 struct tipc_bearer_names *name_parts) 181 struct tipc_bearer_names *name_parts)
189{ 182{
@@ -194,7 +187,6 @@ static int bearer_name_validate(const char *name,
194 u32 if_len; 187 u32 if_len;
195 188
196 /* copy bearer name & ensure length is OK */ 189 /* copy bearer name & ensure length is OK */
197
198 name_copy[TIPC_MAX_BEARER_NAME - 1] = 0; 190 name_copy[TIPC_MAX_BEARER_NAME - 1] = 0;
199 /* need above in case non-Posix strncpy() doesn't pad with nulls */ 191 /* need above in case non-Posix strncpy() doesn't pad with nulls */
200 strncpy(name_copy, name, TIPC_MAX_BEARER_NAME); 192 strncpy(name_copy, name, TIPC_MAX_BEARER_NAME);
@@ -202,7 +194,6 @@ static int bearer_name_validate(const char *name,
202 return 0; 194 return 0;
203 195
204 /* ensure all component parts of bearer name are present */ 196 /* ensure all component parts of bearer name are present */
205
206 media_name = name_copy; 197 media_name = name_copy;
207 if_name = strchr(media_name, ':'); 198 if_name = strchr(media_name, ':');
208 if (if_name == NULL) 199 if (if_name == NULL)
@@ -212,7 +203,6 @@ static int bearer_name_validate(const char *name,
212 if_len = strlen(if_name) + 1; 203 if_len = strlen(if_name) + 1;
213 204
214 /* validate component parts of bearer name */ 205 /* validate component parts of bearer name */
215
216 if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) || 206 if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) ||
217 (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME) || 207 (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME) ||
218 (strspn(media_name, tipc_alphabet) != (media_len - 1)) || 208 (strspn(media_name, tipc_alphabet) != (media_len - 1)) ||
@@ -220,7 +210,6 @@ static int bearer_name_validate(const char *name,
220 return 0; 210 return 0;
221 211
222 /* return bearer name components, if necessary */ 212 /* return bearer name components, if necessary */
223
224 if (name_parts) { 213 if (name_parts) {
225 strcpy(name_parts->media_name, media_name); 214 strcpy(name_parts->media_name, media_name);
226 strcpy(name_parts->if_name, if_name); 215 strcpy(name_parts->if_name, if_name);
@@ -231,7 +220,6 @@ static int bearer_name_validate(const char *name,
231/** 220/**
232 * tipc_bearer_find - locates bearer object with matching bearer name 221 * tipc_bearer_find - locates bearer object with matching bearer name
233 */ 222 */
234
235struct tipc_bearer *tipc_bearer_find(const char *name) 223struct tipc_bearer *tipc_bearer_find(const char *name)
236{ 224{
237 struct tipc_bearer *b_ptr; 225 struct tipc_bearer *b_ptr;
@@ -247,7 +235,6 @@ struct tipc_bearer *tipc_bearer_find(const char *name)
247/** 235/**
248 * tipc_bearer_find_interface - locates bearer object with matching interface name 236 * tipc_bearer_find_interface - locates bearer object with matching interface name
249 */ 237 */
250
251struct tipc_bearer *tipc_bearer_find_interface(const char *if_name) 238struct tipc_bearer *tipc_bearer_find_interface(const char *if_name)
252{ 239{
253 struct tipc_bearer *b_ptr; 240 struct tipc_bearer *b_ptr;
@@ -267,7 +254,6 @@ struct tipc_bearer *tipc_bearer_find_interface(const char *if_name)
267/** 254/**
268 * tipc_bearer_get_names - record names of bearers in buffer 255 * tipc_bearer_get_names - record names of bearers in buffer
269 */ 256 */
270
271struct sk_buff *tipc_bearer_get_names(void) 257struct sk_buff *tipc_bearer_get_names(void)
272{ 258{
273 struct sk_buff *buf; 259 struct sk_buff *buf;
@@ -363,7 +349,6 @@ void tipc_continue(struct tipc_bearer *b_ptr)
363 * the bearer is congested. 'tipc_net_lock' is in read_lock here 349 * the bearer is congested. 'tipc_net_lock' is in read_lock here
364 * bearer.lock is busy 350 * bearer.lock is busy
365 */ 351 */
366
367static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr, 352static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr,
368 struct tipc_link *l_ptr) 353 struct tipc_link *l_ptr)
369{ 354{
@@ -377,7 +362,6 @@ static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr,
377 * the bearer is congested. 'tipc_net_lock' is in read_lock here, 362 * the bearer is congested. 'tipc_net_lock' is in read_lock here,
378 * bearer.lock is free 363 * bearer.lock is free
379 */ 364 */
380
381void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr) 365void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr)
382{ 366{
383 spin_lock_bh(&b_ptr->lock); 367 spin_lock_bh(&b_ptr->lock);
@@ -410,7 +394,6 @@ int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr,
410/** 394/**
411 * tipc_bearer_congested - determines if bearer is currently congested 395 * tipc_bearer_congested - determines if bearer is currently congested
412 */ 396 */
413
414int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr) 397int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr)
415{ 398{
416 if (unlikely(b_ptr->blocked)) 399 if (unlikely(b_ptr->blocked))
@@ -423,7 +406,6 @@ int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr)
423/** 406/**
424 * tipc_enable_bearer - enable bearer with the given name 407 * tipc_enable_bearer - enable bearer with the given name
425 */ 408 */
426
427int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority) 409int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
428{ 410{
429 struct tipc_bearer *b_ptr; 411 struct tipc_bearer *b_ptr;
@@ -449,7 +431,7 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
449 if (tipc_in_scope(disc_domain, tipc_own_addr)) { 431 if (tipc_in_scope(disc_domain, tipc_own_addr)) {
450 disc_domain = tipc_own_addr & TIPC_CLUSTER_MASK; 432 disc_domain = tipc_own_addr & TIPC_CLUSTER_MASK;
451 res = 0; /* accept any node in own cluster */ 433 res = 0; /* accept any node in own cluster */
452 } else if (in_own_cluster(disc_domain)) 434 } else if (in_own_cluster_exact(disc_domain))
453 res = 0; /* accept specified node in own cluster */ 435 res = 0; /* accept specified node in own cluster */
454 } 436 }
455 if (res) { 437 if (res) {
@@ -541,7 +523,6 @@ exit:
541 * tipc_block_bearer(): Block the bearer with the given name, 523 * tipc_block_bearer(): Block the bearer with the given name,
542 * and reset all its links 524 * and reset all its links
543 */ 525 */
544
545int tipc_block_bearer(const char *name) 526int tipc_block_bearer(const char *name)
546{ 527{
547 struct tipc_bearer *b_ptr = NULL; 528 struct tipc_bearer *b_ptr = NULL;
@@ -573,11 +554,10 @@ int tipc_block_bearer(const char *name)
573} 554}
574 555
575/** 556/**
576 * bearer_disable - 557 * bearer_disable
577 * 558 *
578 * Note: This routine assumes caller holds tipc_net_lock. 559 * Note: This routine assumes caller holds tipc_net_lock.
579 */ 560 */
580
581static void bearer_disable(struct tipc_bearer *b_ptr) 561static void bearer_disable(struct tipc_bearer *b_ptr)
582{ 562{
583 struct tipc_link *l_ptr; 563 struct tipc_link *l_ptr;
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index d3eac56b8c21..e3b2be37fb31 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -49,7 +49,6 @@
49 * - media type identifier located at offset 3 49 * - media type identifier located at offset 3
50 * - remaining bytes vary according to media type 50 * - remaining bytes vary according to media type
51 */ 51 */
52
53#define TIPC_MEDIA_ADDR_SIZE 20 52#define TIPC_MEDIA_ADDR_SIZE 20
54#define TIPC_MEDIA_TYPE_OFFSET 3 53#define TIPC_MEDIA_TYPE_OFFSET 3
55 54
@@ -64,7 +63,6 @@
64 * @media_id: TIPC media type identifier 63 * @media_id: TIPC media type identifier
65 * @broadcast: non-zero if address is a broadcast address 64 * @broadcast: non-zero if address is a broadcast address
66 */ 65 */
67
68struct tipc_media_addr { 66struct tipc_media_addr {
69 u8 value[TIPC_MEDIA_ADDR_SIZE]; 67 u8 value[TIPC_MEDIA_ADDR_SIZE];
70 u8 media_id; 68 u8 media_id;
@@ -89,7 +87,6 @@ struct tipc_bearer;
89 * @type_id: TIPC media identifier 87 * @type_id: TIPC media identifier
90 * @name: media name 88 * @name: media name
91 */ 89 */
92
93struct tipc_media { 90struct tipc_media {
94 int (*send_msg)(struct sk_buff *buf, 91 int (*send_msg)(struct sk_buff *buf,
95 struct tipc_bearer *b_ptr, 92 struct tipc_bearer *b_ptr,
@@ -216,7 +213,6 @@ void tipc_bearer_lock_push(struct tipc_bearer *b_ptr);
216 * send routine always returns success -- even if the buffer was not sent -- 213 * send routine always returns success -- even if the buffer was not sent --
217 * and let TIPC's link code deal with the undelivered message. 214 * and let TIPC's link code deal with the undelivered message.
218 */ 215 */
219
220static inline int tipc_bearer_send(struct tipc_bearer *b_ptr, 216static inline int tipc_bearer_send(struct tipc_bearer *b_ptr,
221 struct sk_buff *buf, 217 struct sk_buff *buf,
222 struct tipc_media_addr *dest) 218 struct tipc_media_addr *dest)
diff --git a/net/tipc/config.c b/net/tipc/config.c
index f76d3b15e4e2..c5712a343810 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -131,7 +131,6 @@ static struct sk_buff *tipc_show_stats(void)
131 tipc_printf(&pb, "TIPC version " TIPC_MOD_VER "\n"); 131 tipc_printf(&pb, "TIPC version " TIPC_MOD_VER "\n");
132 132
133 /* Use additional tipc_printf()'s to return more info ... */ 133 /* Use additional tipc_printf()'s to return more info ... */
134
135 str_len = tipc_printbuf_validate(&pb); 134 str_len = tipc_printbuf_validate(&pb);
136 skb_put(buf, TLV_SPACE(str_len)); 135 skb_put(buf, TLV_SPACE(str_len));
137 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 136 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -191,7 +190,6 @@ static struct sk_buff *cfg_set_own_addr(void)
191 * configuration commands can't be received until a local configuration 190 * configuration commands can't be received until a local configuration
192 * command to enable the first bearer is received and processed. 191 * command to enable the first bearer is received and processed.
193 */ 192 */
194
195 spin_unlock_bh(&config_lock); 193 spin_unlock_bh(&config_lock);
196 tipc_core_start_net(addr); 194 tipc_core_start_net(addr);
197 spin_lock_bh(&config_lock); 195 spin_lock_bh(&config_lock);
@@ -283,14 +281,12 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
283 spin_lock_bh(&config_lock); 281 spin_lock_bh(&config_lock);
284 282
285 /* Save request and reply details in a well-known location */ 283 /* Save request and reply details in a well-known location */
286
287 req_tlv_area = request_area; 284 req_tlv_area = request_area;
288 req_tlv_space = request_space; 285 req_tlv_space = request_space;
289 rep_headroom = reply_headroom; 286 rep_headroom = reply_headroom;
290 287
291 /* Check command authorization */ 288 /* Check command authorization */
292 289 if (likely(in_own_node(orig_node))) {
293 if (likely(orig_node == tipc_own_addr)) {
294 /* command is permitted */ 290 /* command is permitted */
295 } else if (cmd >= 0x8000) { 291 } else if (cmd >= 0x8000) {
296 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 292 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
@@ -310,7 +306,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
310 } 306 }
311 307
312 /* Call appropriate processing routine */ 308 /* Call appropriate processing routine */
313
314 switch (cmd) { 309 switch (cmd) {
315 case TIPC_CMD_NOOP: 310 case TIPC_CMD_NOOP:
316 rep_tlv_buf = tipc_cfg_reply_none(); 311 rep_tlv_buf = tipc_cfg_reply_none();
@@ -433,7 +428,6 @@ static void cfg_named_msg_event(void *userdata,
433 struct sk_buff *rep_buf; 428 struct sk_buff *rep_buf;
434 429
435 /* Validate configuration message header (ignore invalid message) */ 430 /* Validate configuration message header (ignore invalid message) */
436
437 req_hdr = (struct tipc_cfg_msg_hdr *)msg; 431 req_hdr = (struct tipc_cfg_msg_hdr *)msg;
438 if ((size < sizeof(*req_hdr)) || 432 if ((size < sizeof(*req_hdr)) ||
439 (size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) || 433 (size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) ||
@@ -443,7 +437,6 @@ static void cfg_named_msg_event(void *userdata,
443 } 437 }
444 438
445 /* Generate reply for request (if can't, return request) */ 439 /* Generate reply for request (if can't, return request) */
446
447 rep_buf = tipc_cfg_do_cmd(orig->node, 440 rep_buf = tipc_cfg_do_cmd(orig->node,
448 ntohs(req_hdr->tcm_type), 441 ntohs(req_hdr->tcm_type),
449 msg + sizeof(*req_hdr), 442 msg + sizeof(*req_hdr),
@@ -489,10 +482,23 @@ failed:
489 return res; 482 return res;
490} 483}
491 484
485void tipc_cfg_reinit(void)
486{
487 struct tipc_name_seq seq;
488 int res;
489
490 seq.type = TIPC_CFG_SRV;
491 seq.lower = seq.upper = 0;
492 tipc_withdraw(config_port_ref, TIPC_ZONE_SCOPE, &seq);
493
494 seq.lower = seq.upper = tipc_own_addr;
495 res = tipc_publish(config_port_ref, TIPC_ZONE_SCOPE, &seq);
496 if (res)
497 err("Unable to reinitialize configuration service\n");
498}
499
492void tipc_cfg_stop(void) 500void tipc_cfg_stop(void)
493{ 501{
494 if (config_port_ref) { 502 tipc_deleteport(config_port_ref);
495 tipc_deleteport(config_port_ref); 503 config_port_ref = 0;
496 config_port_ref = 0;
497 }
498} 504}
diff --git a/net/tipc/config.h b/net/tipc/config.h
index 80da6ebc2785..1f252f3fa058 100644
--- a/net/tipc/config.h
+++ b/net/tipc/config.h
@@ -66,6 +66,7 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd,
66 int headroom); 66 int headroom);
67 67
68int tipc_cfg_init(void); 68int tipc_cfg_init(void);
69void tipc_cfg_reinit(void);
69void tipc_cfg_stop(void); 70void tipc_cfg_stop(void);
70 71
71#endif 72#endif
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 68eba03e7955..f7b95239ebda 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -52,14 +52,12 @@
52#endif 52#endif
53 53
54/* global variables used by multiple sub-systems within TIPC */ 54/* global variables used by multiple sub-systems within TIPC */
55
56int tipc_random; 55int tipc_random;
57 56
58const char tipc_alphabet[] = 57const char tipc_alphabet[] =
59 "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_."; 58 "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_.";
60 59
61/* configurable TIPC parameters */ 60/* configurable TIPC parameters */
62
63u32 tipc_own_addr; 61u32 tipc_own_addr;
64int tipc_max_ports; 62int tipc_max_ports;
65int tipc_max_subscriptions; 63int tipc_max_subscriptions;
@@ -77,7 +75,6 @@ int tipc_remote_management;
77 * NOTE: Headroom is reserved to allow prepending of a data link header. 75 * NOTE: Headroom is reserved to allow prepending of a data link header.
78 * There may also be unrequested tailroom present at the buffer's end. 76 * There may also be unrequested tailroom present at the buffer's end.
79 */ 77 */
80
81struct sk_buff *tipc_buf_acquire(u32 size) 78struct sk_buff *tipc_buf_acquire(u32 size)
82{ 79{
83 struct sk_buff *skb; 80 struct sk_buff *skb;
@@ -95,7 +92,6 @@ struct sk_buff *tipc_buf_acquire(u32 size)
95/** 92/**
96 * tipc_core_stop_net - shut down TIPC networking sub-systems 93 * tipc_core_stop_net - shut down TIPC networking sub-systems
97 */ 94 */
98
99static void tipc_core_stop_net(void) 95static void tipc_core_stop_net(void)
100{ 96{
101 tipc_net_stop(); 97 tipc_net_stop();
@@ -105,7 +101,6 @@ static void tipc_core_stop_net(void)
105/** 101/**
106 * start_net - start TIPC networking sub-systems 102 * start_net - start TIPC networking sub-systems
107 */ 103 */
108
109int tipc_core_start_net(unsigned long addr) 104int tipc_core_start_net(unsigned long addr)
110{ 105{
111 int res; 106 int res;
@@ -121,7 +116,6 @@ int tipc_core_start_net(unsigned long addr)
121/** 116/**
122 * tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode 117 * tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode
123 */ 118 */
124
125static void tipc_core_stop(void) 119static void tipc_core_stop(void)
126{ 120{
127 tipc_netlink_stop(); 121 tipc_netlink_stop();
@@ -137,7 +131,6 @@ static void tipc_core_stop(void)
137/** 131/**
138 * tipc_core_start - switch TIPC from NOT RUNNING to SINGLE NODE mode 132 * tipc_core_start - switch TIPC from NOT RUNNING to SINGLE NODE mode
139 */ 133 */
140
141static int tipc_core_start(void) 134static int tipc_core_start(void)
142{ 135{
143 int res; 136 int res;
@@ -150,9 +143,9 @@ static int tipc_core_start(void)
150 if (!res) 143 if (!res)
151 res = tipc_nametbl_init(); 144 res = tipc_nametbl_init();
152 if (!res) 145 if (!res)
153 res = tipc_k_signal((Handler)tipc_subscr_start, 0); 146 res = tipc_subscr_start();
154 if (!res) 147 if (!res)
155 res = tipc_k_signal((Handler)tipc_cfg_init, 0); 148 res = tipc_cfg_init();
156 if (!res) 149 if (!res)
157 res = tipc_netlink_start(); 150 res = tipc_netlink_start();
158 if (!res) 151 if (!res)
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 13837e0e56b1..2a9bb99537b3 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -85,7 +85,6 @@ void tipc_printf(struct print_buf *, const char *fmt, ...);
85/* 85/*
86 * TIPC_OUTPUT is the destination print buffer for system messages. 86 * TIPC_OUTPUT is the destination print buffer for system messages.
87 */ 87 */
88
89#ifndef TIPC_OUTPUT 88#ifndef TIPC_OUTPUT
90#define TIPC_OUTPUT TIPC_LOG 89#define TIPC_OUTPUT TIPC_LOG
91#endif 90#endif
@@ -102,7 +101,6 @@ void tipc_printf(struct print_buf *, const char *fmt, ...);
102/* 101/*
103 * DBG_OUTPUT is the destination print buffer for debug messages. 102 * DBG_OUTPUT is the destination print buffer for debug messages.
104 */ 103 */
105
106#ifndef DBG_OUTPUT 104#ifndef DBG_OUTPUT
107#define DBG_OUTPUT TIPC_LOG 105#define DBG_OUTPUT TIPC_LOG
108#endif 106#endif
@@ -126,13 +124,11 @@ void tipc_msg_dbg(struct print_buf *, struct tipc_msg *, const char *);
126/* 124/*
127 * TIPC-specific error codes 125 * TIPC-specific error codes
128 */ 126 */
129
130#define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */ 127#define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */
131 128
132/* 129/*
133 * Global configuration variables 130 * Global configuration variables
134 */ 131 */
135
136extern u32 tipc_own_addr; 132extern u32 tipc_own_addr;
137extern int tipc_max_ports; 133extern int tipc_max_ports;
138extern int tipc_max_subscriptions; 134extern int tipc_max_subscriptions;
@@ -143,7 +139,6 @@ extern int tipc_remote_management;
143/* 139/*
144 * Other global variables 140 * Other global variables
145 */ 141 */
146
147extern int tipc_random; 142extern int tipc_random;
148extern const char tipc_alphabet[]; 143extern const char tipc_alphabet[];
149 144
@@ -151,7 +146,6 @@ extern const char tipc_alphabet[];
151/* 146/*
152 * Routines available to privileged subsystems 147 * Routines available to privileged subsystems
153 */ 148 */
154
155extern int tipc_core_start_net(unsigned long); 149extern int tipc_core_start_net(unsigned long);
156extern int tipc_handler_start(void); 150extern int tipc_handler_start(void);
157extern void tipc_handler_stop(void); 151extern void tipc_handler_stop(void);
@@ -163,7 +157,6 @@ extern void tipc_socket_stop(void);
163/* 157/*
164 * TIPC timer and signal code 158 * TIPC timer and signal code
165 */ 159 */
166
167typedef void (*Handler) (unsigned long); 160typedef void (*Handler) (unsigned long);
168 161
169u32 tipc_k_signal(Handler routine, unsigned long argument); 162u32 tipc_k_signal(Handler routine, unsigned long argument);
@@ -176,7 +169,6 @@ u32 tipc_k_signal(Handler routine, unsigned long argument);
176 * 169 *
177 * Timer must be initialized before use (and terminated when no longer needed). 170 * Timer must be initialized before use (and terminated when no longer needed).
178 */ 171 */
179
180static inline void k_init_timer(struct timer_list *timer, Handler routine, 172static inline void k_init_timer(struct timer_list *timer, Handler routine,
181 unsigned long argument) 173 unsigned long argument)
182{ 174{
@@ -196,7 +188,6 @@ static inline void k_init_timer(struct timer_list *timer, Handler routine,
196 * then an additional jiffy is added to account for the fact that 188 * then an additional jiffy is added to account for the fact that
197 * the starting time may be in the middle of the current jiffy. 189 * the starting time may be in the middle of the current jiffy.
198 */ 190 */
199
200static inline void k_start_timer(struct timer_list *timer, unsigned long msec) 191static inline void k_start_timer(struct timer_list *timer, unsigned long msec)
201{ 192{
202 mod_timer(timer, jiffies + msecs_to_jiffies(msec) + 1); 193 mod_timer(timer, jiffies + msecs_to_jiffies(msec) + 1);
@@ -212,7 +203,6 @@ static inline void k_start_timer(struct timer_list *timer, unsigned long msec)
212 * WARNING: Must not be called when holding locks required by the timer's 203 * WARNING: Must not be called when holding locks required by the timer's
213 * timeout routine, otherwise deadlock can occur on SMP systems! 204 * timeout routine, otherwise deadlock can occur on SMP systems!
214 */ 205 */
215
216static inline void k_cancel_timer(struct timer_list *timer) 206static inline void k_cancel_timer(struct timer_list *timer)
217{ 207{
218 del_timer_sync(timer); 208 del_timer_sync(timer);
@@ -229,12 +219,10 @@ static inline void k_cancel_timer(struct timer_list *timer)
229 * (Do not "enhance" this routine to automatically cancel an active timer, 219 * (Do not "enhance" this routine to automatically cancel an active timer,
230 * otherwise deadlock can arise when a timeout routine calls k_term_timer.) 220 * otherwise deadlock can arise when a timeout routine calls k_term_timer.)
231 */ 221 */
232
233static inline void k_term_timer(struct timer_list *timer) 222static inline void k_term_timer(struct timer_list *timer)
234{ 223{
235} 224}
236 225
237
238/* 226/*
239 * TIPC message buffer code 227 * TIPC message buffer code
240 * 228 *
@@ -244,7 +232,6 @@ static inline void k_term_timer(struct timer_list *timer)
244 * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields 232 * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields
245 * are word aligned for quicker access 233 * are word aligned for quicker access
246 */ 234 */
247
248#define BUF_HEADROOM LL_MAX_HEADER 235#define BUF_HEADROOM LL_MAX_HEADER
249 236
250struct tipc_skb_cb { 237struct tipc_skb_cb {
@@ -253,7 +240,6 @@ struct tipc_skb_cb {
253 240
254#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0])) 241#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
255 242
256
257static inline struct tipc_msg *buf_msg(struct sk_buff *skb) 243static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
258{ 244{
259 return (struct tipc_msg *)skb->data; 245 return (struct tipc_msg *)skb->data;
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index c630a21b2bed..ae054cfe179f 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -70,7 +70,6 @@ struct tipc_link_req {
70 * @dest_domain: network domain of node(s) which should respond to message 70 * @dest_domain: network domain of node(s) which should respond to message
71 * @b_ptr: ptr to bearer issuing message 71 * @b_ptr: ptr to bearer issuing message
72 */ 72 */
73
74static struct sk_buff *tipc_disc_init_msg(u32 type, 73static struct sk_buff *tipc_disc_init_msg(u32 type,
75 u32 dest_domain, 74 u32 dest_domain,
76 struct tipc_bearer *b_ptr) 75 struct tipc_bearer *b_ptr)
@@ -96,7 +95,6 @@ static struct sk_buff *tipc_disc_init_msg(u32 type,
96 * @node_addr: duplicated node address 95 * @node_addr: duplicated node address
97 * @media_addr: media address advertised by duplicated node 96 * @media_addr: media address advertised by duplicated node
98 */ 97 */
99
100static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr, 98static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
101 struct tipc_media_addr *media_addr) 99 struct tipc_media_addr *media_addr)
102{ 100{
@@ -117,7 +115,6 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
117 * @buf: buffer containing message 115 * @buf: buffer containing message
118 * @b_ptr: bearer that message arrived on 116 * @b_ptr: bearer that message arrived on
119 */ 117 */
120
121void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr) 118void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
122{ 119{
123 struct tipc_node *n_ptr; 120 struct tipc_node *n_ptr;
@@ -221,7 +218,6 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
221 * the new media address and reset the link to ensure it starts up 218 * the new media address and reset the link to ensure it starts up
222 * cleanly. 219 * cleanly.
223 */ 220 */
224
225 if (addr_mismatch) { 221 if (addr_mismatch) {
226 if (tipc_link_is_up(link)) { 222 if (tipc_link_is_up(link)) {
227 disc_dupl_alert(b_ptr, orig, &media_addr); 223 disc_dupl_alert(b_ptr, orig, &media_addr);
@@ -264,7 +260,6 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
264 * Reinitiates discovery process if discovery object has no associated nodes 260 * Reinitiates discovery process if discovery object has no associated nodes
265 * and is either not currently searching or is searching at a slow rate 261 * and is either not currently searching or is searching at a slow rate
266 */ 262 */
267
268static void disc_update(struct tipc_link_req *req) 263static void disc_update(struct tipc_link_req *req)
269{ 264{
270 if (!req->num_nodes) { 265 if (!req->num_nodes) {
@@ -280,7 +275,6 @@ static void disc_update(struct tipc_link_req *req)
280 * tipc_disc_add_dest - increment set of discovered nodes 275 * tipc_disc_add_dest - increment set of discovered nodes
281 * @req: ptr to link request structure 276 * @req: ptr to link request structure
282 */ 277 */
283
284void tipc_disc_add_dest(struct tipc_link_req *req) 278void tipc_disc_add_dest(struct tipc_link_req *req)
285{ 279{
286 req->num_nodes++; 280 req->num_nodes++;
@@ -290,7 +284,6 @@ void tipc_disc_add_dest(struct tipc_link_req *req)
290 * tipc_disc_remove_dest - decrement set of discovered nodes 284 * tipc_disc_remove_dest - decrement set of discovered nodes
291 * @req: ptr to link request structure 285 * @req: ptr to link request structure
292 */ 286 */
293
294void tipc_disc_remove_dest(struct tipc_link_req *req) 287void tipc_disc_remove_dest(struct tipc_link_req *req)
295{ 288{
296 req->num_nodes--; 289 req->num_nodes--;
@@ -301,7 +294,6 @@ void tipc_disc_remove_dest(struct tipc_link_req *req)
301 * disc_send_msg - send link setup request message 294 * disc_send_msg - send link setup request message
302 * @req: ptr to link request structure 295 * @req: ptr to link request structure
303 */ 296 */
304
305static void disc_send_msg(struct tipc_link_req *req) 297static void disc_send_msg(struct tipc_link_req *req)
306{ 298{
307 if (!req->bearer->blocked) 299 if (!req->bearer->blocked)
@@ -314,7 +306,6 @@ static void disc_send_msg(struct tipc_link_req *req)
314 * 306 *
315 * Called whenever a link setup request timer associated with a bearer expires. 307 * Called whenever a link setup request timer associated with a bearer expires.
316 */ 308 */
317
318static void disc_timeout(struct tipc_link_req *req) 309static void disc_timeout(struct tipc_link_req *req)
319{ 310{
320 int max_delay; 311 int max_delay;
@@ -322,7 +313,6 @@ static void disc_timeout(struct tipc_link_req *req)
322 spin_lock_bh(&req->bearer->lock); 313 spin_lock_bh(&req->bearer->lock);
323 314
324 /* Stop searching if only desired node has been found */ 315 /* Stop searching if only desired node has been found */
325
326 if (tipc_node(req->domain) && req->num_nodes) { 316 if (tipc_node(req->domain) && req->num_nodes) {
327 req->timer_intv = TIPC_LINK_REQ_INACTIVE; 317 req->timer_intv = TIPC_LINK_REQ_INACTIVE;
328 goto exit; 318 goto exit;
@@ -335,7 +325,6 @@ static void disc_timeout(struct tipc_link_req *req)
335 * hold at fast polling rate if don't have any associated nodes, 325 * hold at fast polling rate if don't have any associated nodes,
336 * otherwise hold at slow polling rate 326 * otherwise hold at slow polling rate
337 */ 327 */
338
339 disc_send_msg(req); 328 disc_send_msg(req);
340 329
341 req->timer_intv *= 2; 330 req->timer_intv *= 2;
@@ -359,7 +348,6 @@ exit:
359 * 348 *
360 * Returns 0 if successful, otherwise -errno. 349 * Returns 0 if successful, otherwise -errno.
361 */ 350 */
362
363int tipc_disc_create(struct tipc_bearer *b_ptr, 351int tipc_disc_create(struct tipc_bearer *b_ptr,
364 struct tipc_media_addr *dest, u32 dest_domain) 352 struct tipc_media_addr *dest, u32 dest_domain)
365{ 353{
@@ -391,7 +379,6 @@ int tipc_disc_create(struct tipc_bearer *b_ptr,
391 * tipc_disc_delete - destroy object sending periodic link setup requests 379 * tipc_disc_delete - destroy object sending periodic link setup requests
392 * @req: ptr to link request structure 380 * @req: ptr to link request structure
393 */ 381 */
394
395void tipc_disc_delete(struct tipc_link_req *req) 382void tipc_disc_delete(struct tipc_link_req *req)
396{ 383{
397 k_cancel_timer(&req->timer); 384 k_cancel_timer(&req->timer);
@@ -399,4 +386,3 @@ void tipc_disc_delete(struct tipc_link_req *req)
399 kfree_skb(req->buf); 386 kfree_skb(req->buf);
400 kfree(req); 387 kfree(req);
401} 388}
402
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 527e3f0e165d..90ac9bfa7abb 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -48,7 +48,6 @@
48 * @tipc_packet_type: used in binding TIPC to Ethernet driver 48 * @tipc_packet_type: used in binding TIPC to Ethernet driver
49 * @cleanup: work item used when disabling bearer 49 * @cleanup: work item used when disabling bearer
50 */ 50 */
51
52struct eth_bearer { 51struct eth_bearer {
53 struct tipc_bearer *bearer; 52 struct tipc_bearer *bearer;
54 struct net_device *dev; 53 struct net_device *dev;
@@ -67,7 +66,6 @@ static struct notifier_block notifier;
67 * Media-dependent "value" field stores MAC address in first 6 bytes 66 * Media-dependent "value" field stores MAC address in first 6 bytes
68 * and zeroes out the remaining bytes. 67 * and zeroes out the remaining bytes.
69 */ 68 */
70
71static void eth_media_addr_set(struct tipc_media_addr *a, char *mac) 69static void eth_media_addr_set(struct tipc_media_addr *a, char *mac)
72{ 70{
73 memcpy(a->value, mac, ETH_ALEN); 71 memcpy(a->value, mac, ETH_ALEN);
@@ -79,7 +77,6 @@ static void eth_media_addr_set(struct tipc_media_addr *a, char *mac)
79/** 77/**
80 * send_msg - send a TIPC message out over an Ethernet interface 78 * send_msg - send a TIPC message out over an Ethernet interface
81 */ 79 */
82
83static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr, 80static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
84 struct tipc_media_addr *dest) 81 struct tipc_media_addr *dest)
85{ 82{
@@ -115,7 +112,6 @@ static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
115 * ignores packets sent using Ethernet multicast, and traffic sent to other 112 * ignores packets sent using Ethernet multicast, and traffic sent to other
116 * nodes (which can happen if interface is running in promiscuous mode). 113 * nodes (which can happen if interface is running in promiscuous mode).
117 */ 114 */
118
119static int recv_msg(struct sk_buff *buf, struct net_device *dev, 115static int recv_msg(struct sk_buff *buf, struct net_device *dev,
120 struct packet_type *pt, struct net_device *orig_dev) 116 struct packet_type *pt, struct net_device *orig_dev)
121{ 117{
@@ -140,7 +136,6 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev,
140/** 136/**
141 * enable_bearer - attach TIPC bearer to an Ethernet interface 137 * enable_bearer - attach TIPC bearer to an Ethernet interface
142 */ 138 */
143
144static int enable_bearer(struct tipc_bearer *tb_ptr) 139static int enable_bearer(struct tipc_bearer *tb_ptr)
145{ 140{
146 struct net_device *dev = NULL; 141 struct net_device *dev = NULL;
@@ -151,7 +146,6 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
151 int pending_dev = 0; 146 int pending_dev = 0;
152 147
153 /* Find unused Ethernet bearer structure */ 148 /* Find unused Ethernet bearer structure */
154
155 while (eb_ptr->dev) { 149 while (eb_ptr->dev) {
156 if (!eb_ptr->bearer) 150 if (!eb_ptr->bearer)
157 pending_dev++; 151 pending_dev++;
@@ -160,7 +154,6 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
160 } 154 }
161 155
162 /* Find device with specified name */ 156 /* Find device with specified name */
163
164 read_lock(&dev_base_lock); 157 read_lock(&dev_base_lock);
165 for_each_netdev(&init_net, pdev) { 158 for_each_netdev(&init_net, pdev) {
166 if (!strncmp(pdev->name, driver_name, IFNAMSIZ)) { 159 if (!strncmp(pdev->name, driver_name, IFNAMSIZ)) {
@@ -174,7 +167,6 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
174 return -ENODEV; 167 return -ENODEV;
175 168
176 /* Create Ethernet bearer for device */ 169 /* Create Ethernet bearer for device */
177
178 eb_ptr->dev = dev; 170 eb_ptr->dev = dev;
179 eb_ptr->tipc_packet_type.type = htons(ETH_P_TIPC); 171 eb_ptr->tipc_packet_type.type = htons(ETH_P_TIPC);
180 eb_ptr->tipc_packet_type.dev = dev; 172 eb_ptr->tipc_packet_type.dev = dev;
@@ -184,7 +176,6 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
184 dev_add_pack(&eb_ptr->tipc_packet_type); 176 dev_add_pack(&eb_ptr->tipc_packet_type);
185 177
186 /* Associate TIPC bearer with Ethernet bearer */ 178 /* Associate TIPC bearer with Ethernet bearer */
187
188 eb_ptr->bearer = tb_ptr; 179 eb_ptr->bearer = tb_ptr;
189 tb_ptr->usr_handle = (void *)eb_ptr; 180 tb_ptr->usr_handle = (void *)eb_ptr;
190 tb_ptr->mtu = dev->mtu; 181 tb_ptr->mtu = dev->mtu;
@@ -198,7 +189,6 @@ static int enable_bearer(struct tipc_bearer *tb_ptr)
198 * 189 *
199 * This routine must be invoked from a work queue because it can sleep. 190 * This routine must be invoked from a work queue because it can sleep.
200 */ 191 */
201
202static void cleanup_bearer(struct work_struct *work) 192static void cleanup_bearer(struct work_struct *work)
203{ 193{
204 struct eth_bearer *eb_ptr = 194 struct eth_bearer *eb_ptr =
@@ -216,7 +206,6 @@ static void cleanup_bearer(struct work_struct *work)
216 * then get worker thread to complete bearer cleanup. (Can't do cleanup 206 * then get worker thread to complete bearer cleanup. (Can't do cleanup
217 * here because cleanup code needs to sleep and caller holds spinlocks.) 207 * here because cleanup code needs to sleep and caller holds spinlocks.)
218 */ 208 */
219
220static void disable_bearer(struct tipc_bearer *tb_ptr) 209static void disable_bearer(struct tipc_bearer *tb_ptr)
221{ 210{
222 struct eth_bearer *eb_ptr = (struct eth_bearer *)tb_ptr->usr_handle; 211 struct eth_bearer *eb_ptr = (struct eth_bearer *)tb_ptr->usr_handle;
@@ -232,7 +221,6 @@ static void disable_bearer(struct tipc_bearer *tb_ptr)
232 * Change the state of the Ethernet bearer (if any) associated with the 221 * Change the state of the Ethernet bearer (if any) associated with the
233 * specified device. 222 * specified device.
234 */ 223 */
235
236static int recv_notification(struct notifier_block *nb, unsigned long evt, 224static int recv_notification(struct notifier_block *nb, unsigned long evt,
237 void *dv) 225 void *dv)
238{ 226{
@@ -281,7 +269,6 @@ static int recv_notification(struct notifier_block *nb, unsigned long evt,
281/** 269/**
282 * eth_addr2str - convert Ethernet address to string 270 * eth_addr2str - convert Ethernet address to string
283 */ 271 */
284
285static int eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size) 272static int eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
286{ 273{
287 if (str_size < 18) /* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */ 274 if (str_size < 18) /* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */
@@ -294,7 +281,6 @@ static int eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
294/** 281/**
295 * eth_str2addr - convert string to Ethernet address 282 * eth_str2addr - convert string to Ethernet address
296 */ 283 */
297
298static int eth_str2addr(struct tipc_media_addr *a, char *str_buf) 284static int eth_str2addr(struct tipc_media_addr *a, char *str_buf)
299{ 285{
300 char mac[ETH_ALEN]; 286 char mac[ETH_ALEN];
@@ -314,7 +300,6 @@ static int eth_str2addr(struct tipc_media_addr *a, char *str_buf)
314/** 300/**
315 * eth_str2addr - convert Ethernet address format to message header format 301 * eth_str2addr - convert Ethernet address format to message header format
316 */ 302 */
317
318static int eth_addr2msg(struct tipc_media_addr *a, char *msg_area) 303static int eth_addr2msg(struct tipc_media_addr *a, char *msg_area)
319{ 304{
320 memset(msg_area, 0, TIPC_MEDIA_ADDR_SIZE); 305 memset(msg_area, 0, TIPC_MEDIA_ADDR_SIZE);
@@ -326,7 +311,6 @@ static int eth_addr2msg(struct tipc_media_addr *a, char *msg_area)
326/** 311/**
327 * eth_str2addr - convert message header address format to Ethernet format 312 * eth_str2addr - convert message header address format to Ethernet format
328 */ 313 */
329
330static int eth_msg2addr(struct tipc_media_addr *a, char *msg_area) 314static int eth_msg2addr(struct tipc_media_addr *a, char *msg_area)
331{ 315{
332 if (msg_area[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_ETH) 316 if (msg_area[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_ETH)
@@ -339,7 +323,6 @@ static int eth_msg2addr(struct tipc_media_addr *a, char *msg_area)
339/* 323/*
340 * Ethernet media registration info 324 * Ethernet media registration info
341 */ 325 */
342
343static struct tipc_media eth_media_info = { 326static struct tipc_media eth_media_info = {
344 .send_msg = send_msg, 327 .send_msg = send_msg,
345 .enable_bearer = enable_bearer, 328 .enable_bearer = enable_bearer,
@@ -363,7 +346,6 @@ static struct tipc_media eth_media_info = {
363 * Register Ethernet media type with TIPC bearer code. Also register 346 * Register Ethernet media type with TIPC bearer code. Also register
364 * with OS for notifications about device state changes. 347 * with OS for notifications about device state changes.
365 */ 348 */
366
367int tipc_eth_media_start(void) 349int tipc_eth_media_start(void)
368{ 350{
369 int res; 351 int res;
@@ -386,7 +368,6 @@ int tipc_eth_media_start(void)
386/** 368/**
387 * tipc_eth_media_stop - deactivate Ethernet bearer support 369 * tipc_eth_media_stop - deactivate Ethernet bearer support
388 */ 370 */
389
390void tipc_eth_media_stop(void) 371void tipc_eth_media_stop(void)
391{ 372{
392 if (!eth_started) 373 if (!eth_started)
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index 274c98e164b7..9c6f22ff1c6d 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -129,4 +129,3 @@ void tipc_handler_stop(void)
129 129
130 kmem_cache_destroy(tipc_queue_item_cache); 130 kmem_cache_destroy(tipc_queue_item_cache);
131} 131}
132
diff --git a/net/tipc/link.c b/net/tipc/link.c
index b4b9b30167a3..7a614f43549d 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -45,13 +45,11 @@
45/* 45/*
46 * Out-of-range value for link session numbers 46 * Out-of-range value for link session numbers
47 */ 47 */
48
49#define INVALID_SESSION 0x10000 48#define INVALID_SESSION 0x10000
50 49
51/* 50/*
52 * Link state events: 51 * Link state events:
53 */ 52 */
54
55#define STARTING_EVT 856384768 /* link processing trigger */ 53#define STARTING_EVT 856384768 /* link processing trigger */
56#define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */ 54#define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
57#define TIMEOUT_EVT 560817u /* link timer expired */ 55#define TIMEOUT_EVT 560817u /* link timer expired */
@@ -67,7 +65,6 @@
67/* 65/*
68 * State value stored in 'exp_msg_count' 66 * State value stored in 'exp_msg_count'
69 */ 67 */
70
71#define START_CHANGEOVER 100000u 68#define START_CHANGEOVER 100000u
72 69
73/** 70/**
@@ -77,7 +74,6 @@
77 * @addr_peer: network address of node at far end 74 * @addr_peer: network address of node at far end
78 * @if_peer: name of interface at far end 75 * @if_peer: name of interface at far end
79 */ 76 */
80
81struct tipc_link_name { 77struct tipc_link_name {
82 u32 addr_local; 78 u32 addr_local;
83 char if_local[TIPC_MAX_IF_NAME]; 79 char if_local[TIPC_MAX_IF_NAME];
@@ -105,7 +101,6 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
105/* 101/*
106 * Simple link routines 102 * Simple link routines
107 */ 103 */
108
109static unsigned int align(unsigned int i) 104static unsigned int align(unsigned int i)
110{ 105{
111 return (i + 3) & ~3u; 106 return (i + 3) & ~3u;
@@ -143,7 +138,6 @@ static u32 link_last_sent(struct tipc_link *l_ptr)
143/* 138/*
144 * Simple non-static link routines (i.e. referenced outside this file) 139 * Simple non-static link routines (i.e. referenced outside this file)
145 */ 140 */
146
147int tipc_link_is_up(struct tipc_link *l_ptr) 141int tipc_link_is_up(struct tipc_link *l_ptr)
148{ 142{
149 if (!l_ptr) 143 if (!l_ptr)
@@ -164,7 +158,6 @@ int tipc_link_is_active(struct tipc_link *l_ptr)
164 * 158 *
165 * Returns 1 if link name is valid, otherwise 0. 159 * Returns 1 if link name is valid, otherwise 0.
166 */ 160 */
167
168static int link_name_validate(const char *name, 161static int link_name_validate(const char *name,
169 struct tipc_link_name *name_parts) 162 struct tipc_link_name *name_parts)
170{ 163{
@@ -180,7 +173,6 @@ static int link_name_validate(const char *name,
180 u32 if_peer_len; 173 u32 if_peer_len;
181 174
182 /* copy link name & ensure length is OK */ 175 /* copy link name & ensure length is OK */
183
184 name_copy[TIPC_MAX_LINK_NAME - 1] = 0; 176 name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
185 /* need above in case non-Posix strncpy() doesn't pad with nulls */ 177 /* need above in case non-Posix strncpy() doesn't pad with nulls */
186 strncpy(name_copy, name, TIPC_MAX_LINK_NAME); 178 strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
@@ -188,7 +180,6 @@ static int link_name_validate(const char *name,
188 return 0; 180 return 0;
189 181
190 /* ensure all component parts of link name are present */ 182 /* ensure all component parts of link name are present */
191
192 addr_local = name_copy; 183 addr_local = name_copy;
193 if_local = strchr(addr_local, ':'); 184 if_local = strchr(addr_local, ':');
194 if (if_local == NULL) 185 if (if_local == NULL)
@@ -206,7 +197,6 @@ static int link_name_validate(const char *name,
206 if_peer_len = strlen(if_peer) + 1; 197 if_peer_len = strlen(if_peer) + 1;
207 198
208 /* validate component parts of link name */ 199 /* validate component parts of link name */
209
210 if ((sscanf(addr_local, "%u.%u.%u%c", 200 if ((sscanf(addr_local, "%u.%u.%u%c",
211 &z_local, &c_local, &n_local, &dummy) != 3) || 201 &z_local, &c_local, &n_local, &dummy) != 3) ||
212 (sscanf(addr_peer, "%u.%u.%u%c", 202 (sscanf(addr_peer, "%u.%u.%u%c",
@@ -220,7 +210,6 @@ static int link_name_validate(const char *name,
220 return 0; 210 return 0;
221 211
222 /* return link name components, if necessary */ 212 /* return link name components, if necessary */
223
224 if (name_parts) { 213 if (name_parts) {
225 name_parts->addr_local = tipc_addr(z_local, c_local, n_local); 214 name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
226 strcpy(name_parts->if_local, if_local); 215 strcpy(name_parts->if_local, if_local);
@@ -239,13 +228,11 @@ static int link_name_validate(const char *name,
239 * another thread because tipc_link_delete() always cancels the link timer before 228 * another thread because tipc_link_delete() always cancels the link timer before
240 * tipc_node_delete() is called.) 229 * tipc_node_delete() is called.)
241 */ 230 */
242
243static void link_timeout(struct tipc_link *l_ptr) 231static void link_timeout(struct tipc_link *l_ptr)
244{ 232{
245 tipc_node_lock(l_ptr->owner); 233 tipc_node_lock(l_ptr->owner);
246 234
247 /* update counters used in statistical profiling of send traffic */ 235 /* update counters used in statistical profiling of send traffic */
248
249 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size; 236 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
250 l_ptr->stats.queue_sz_counts++; 237 l_ptr->stats.queue_sz_counts++;
251 238
@@ -278,7 +265,6 @@ static void link_timeout(struct tipc_link *l_ptr)
278 } 265 }
279 266
280 /* do all other link processing performed on a periodic basis */ 267 /* do all other link processing performed on a periodic basis */
281
282 link_check_defragm_bufs(l_ptr); 268 link_check_defragm_bufs(l_ptr);
283 269
284 link_state_event(l_ptr, TIMEOUT_EVT); 270 link_state_event(l_ptr, TIMEOUT_EVT);
@@ -302,7 +288,6 @@ static void link_set_timer(struct tipc_link *l_ptr, u32 time)
302 * 288 *
303 * Returns pointer to link. 289 * Returns pointer to link.
304 */ 290 */
305
306struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, 291struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
307 struct tipc_bearer *b_ptr, 292 struct tipc_bearer *b_ptr,
308 const struct tipc_media_addr *media_addr) 293 const struct tipc_media_addr *media_addr)
@@ -383,7 +368,6 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
383 * This routine must not grab the node lock until after link timer cancellation 368 * This routine must not grab the node lock until after link timer cancellation
384 * to avoid a potential deadlock situation. 369 * to avoid a potential deadlock situation.
385 */ 370 */
386
387void tipc_link_delete(struct tipc_link *l_ptr) 371void tipc_link_delete(struct tipc_link *l_ptr)
388{ 372{
389 if (!l_ptr) { 373 if (!l_ptr) {
@@ -419,7 +403,6 @@ static void link_start(struct tipc_link *l_ptr)
419 * Schedules port for renewed sending of messages after link congestion 403 * Schedules port for renewed sending of messages after link congestion
420 * has abated. 404 * has abated.
421 */ 405 */
422
423static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz) 406static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz)
424{ 407{
425 struct tipc_port *p_ptr; 408 struct tipc_port *p_ptr;
@@ -476,7 +459,6 @@ exit:
476 * link_release_outqueue - purge link's outbound message queue 459 * link_release_outqueue - purge link's outbound message queue
477 * @l_ptr: pointer to link 460 * @l_ptr: pointer to link
478 */ 461 */
479
480static void link_release_outqueue(struct tipc_link *l_ptr) 462static void link_release_outqueue(struct tipc_link *l_ptr)
481{ 463{
482 struct sk_buff *buf = l_ptr->first_out; 464 struct sk_buff *buf = l_ptr->first_out;
@@ -495,7 +477,6 @@ static void link_release_outqueue(struct tipc_link *l_ptr)
495 * tipc_link_reset_fragments - purge link's inbound message fragments queue 477 * tipc_link_reset_fragments - purge link's inbound message fragments queue
496 * @l_ptr: pointer to link 478 * @l_ptr: pointer to link
497 */ 479 */
498
499void tipc_link_reset_fragments(struct tipc_link *l_ptr) 480void tipc_link_reset_fragments(struct tipc_link *l_ptr)
500{ 481{
501 struct sk_buff *buf = l_ptr->defragm_buf; 482 struct sk_buff *buf = l_ptr->defragm_buf;
@@ -513,7 +494,6 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr)
513 * tipc_link_stop - purge all inbound and outbound messages associated with link 494 * tipc_link_stop - purge all inbound and outbound messages associated with link
514 * @l_ptr: pointer to link 495 * @l_ptr: pointer to link
515 */ 496 */
516
517void tipc_link_stop(struct tipc_link *l_ptr) 497void tipc_link_stop(struct tipc_link *l_ptr)
518{ 498{
519 struct sk_buff *buf; 499 struct sk_buff *buf;
@@ -569,7 +549,6 @@ void tipc_link_reset(struct tipc_link *l_ptr)
569 } 549 }
570 550
571 /* Clean up all queues: */ 551 /* Clean up all queues: */
572
573 link_release_outqueue(l_ptr); 552 link_release_outqueue(l_ptr);
574 kfree_skb(l_ptr->proto_msg_queue); 553 kfree_skb(l_ptr->proto_msg_queue);
575 l_ptr->proto_msg_queue = NULL; 554 l_ptr->proto_msg_queue = NULL;
@@ -611,8 +590,7 @@ static void link_activate(struct tipc_link *l_ptr)
611 * @l_ptr: pointer to link 590 * @l_ptr: pointer to link
612 * @event: state machine event to process 591 * @event: state machine event to process
613 */ 592 */
614 593static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
615static void link_state_event(struct tipc_link *l_ptr, unsigned event)
616{ 594{
617 struct tipc_link *other; 595 struct tipc_link *other;
618 u32 cont_intv = l_ptr->continuity_interval; 596 u32 cont_intv = l_ptr->continuity_interval;
@@ -785,7 +763,6 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned event)
785 * link_bundle_buf(): Append contents of a buffer to 763 * link_bundle_buf(): Append contents of a buffer to
786 * the tail of an existing one. 764 * the tail of an existing one.
787 */ 765 */
788
789static int link_bundle_buf(struct tipc_link *l_ptr, 766static int link_bundle_buf(struct tipc_link *l_ptr,
790 struct sk_buff *bundler, 767 struct sk_buff *bundler,
791 struct sk_buff *buf) 768 struct sk_buff *buf)
@@ -860,7 +837,6 @@ static void link_add_chain_to_outqueue(struct tipc_link *l_ptr,
860 * inside TIPC when the 'fast path' in tipc_send_buf 837 * inside TIPC when the 'fast path' in tipc_send_buf
861 * has failed, and from link_send() 838 * has failed, and from link_send()
862 */ 839 */
863
864int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf) 840int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
865{ 841{
866 struct tipc_msg *msg = buf_msg(buf); 842 struct tipc_msg *msg = buf_msg(buf);
@@ -872,7 +848,6 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
872 u32 max_packet = l_ptr->max_pkt; 848 u32 max_packet = l_ptr->max_pkt;
873 849
874 /* Match msg importance against queue limits: */ 850 /* Match msg importance against queue limits: */
875
876 if (unlikely(queue_size >= queue_limit)) { 851 if (unlikely(queue_size >= queue_limit)) {
877 if (imp <= TIPC_CRITICAL_IMPORTANCE) { 852 if (imp <= TIPC_CRITICAL_IMPORTANCE) {
878 link_schedule_port(l_ptr, msg_origport(msg), size); 853 link_schedule_port(l_ptr, msg_origport(msg), size);
@@ -888,12 +863,10 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
888 } 863 }
889 864
890 /* Fragmentation needed ? */ 865 /* Fragmentation needed ? */
891
892 if (size > max_packet) 866 if (size > max_packet)
893 return link_send_long_buf(l_ptr, buf); 867 return link_send_long_buf(l_ptr, buf);
894 868
895 /* Packet can be queued or sent: */ 869 /* Packet can be queued or sent. */
896
897 if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) && 870 if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) &&
898 !link_congested(l_ptr))) { 871 !link_congested(l_ptr))) {
899 link_add_to_outqueue(l_ptr, buf, msg); 872 link_add_to_outqueue(l_ptr, buf, msg);
@@ -907,13 +880,11 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
907 } 880 }
908 return dsz; 881 return dsz;
909 } 882 }
910 /* Congestion: can message be bundled ?: */ 883 /* Congestion: can message be bundled ? */
911
912 if ((msg_user(msg) != CHANGEOVER_PROTOCOL) && 884 if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
913 (msg_user(msg) != MSG_FRAGMENTER)) { 885 (msg_user(msg) != MSG_FRAGMENTER)) {
914 886
915 /* Try adding message to an existing bundle */ 887 /* Try adding message to an existing bundle */
916
917 if (l_ptr->next_out && 888 if (l_ptr->next_out &&
918 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) { 889 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
919 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr); 890 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
@@ -921,7 +892,6 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
921 } 892 }
922 893
923 /* Try creating a new bundle */ 894 /* Try creating a new bundle */
924
925 if (size <= max_packet * 2 / 3) { 895 if (size <= max_packet * 2 / 3) {
926 struct sk_buff *bundler = tipc_buf_acquire(max_packet); 896 struct sk_buff *bundler = tipc_buf_acquire(max_packet);
927 struct tipc_msg bundler_hdr; 897 struct tipc_msg bundler_hdr;
@@ -951,7 +921,6 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
951 * not been selected yet, and the the owner node is not locked 921 * not been selected yet, and the the owner node is not locked
952 * Called by TIPC internal users, e.g. the name distributor 922 * Called by TIPC internal users, e.g. the name distributor
953 */ 923 */
954
955int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector) 924int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
956{ 925{
957 struct tipc_link *l_ptr; 926 struct tipc_link *l_ptr;
@@ -984,7 +953,6 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
984 * small enough not to require fragmentation. 953 * small enough not to require fragmentation.
985 * Called without any locks held. 954 * Called without any locks held.
986 */ 955 */
987
988void tipc_link_send_names(struct list_head *message_list, u32 dest) 956void tipc_link_send_names(struct list_head *message_list, u32 dest)
989{ 957{
990 struct tipc_node *n_ptr; 958 struct tipc_node *n_ptr;
@@ -1013,7 +981,6 @@ void tipc_link_send_names(struct list_head *message_list, u32 dest)
1013 read_unlock_bh(&tipc_net_lock); 981 read_unlock_bh(&tipc_net_lock);
1014 982
1015 /* discard the messages if they couldn't be sent */ 983 /* discard the messages if they couldn't be sent */
1016
1017 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) { 984 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
1018 list_del((struct list_head *)buf); 985 list_del((struct list_head *)buf);
1019 kfree_skb(buf); 986 kfree_skb(buf);
@@ -1026,7 +993,6 @@ void tipc_link_send_names(struct list_head *message_list, u32 dest)
1026 * inclusive total message length. Very time critical. 993 * inclusive total message length. Very time critical.
1027 * Link is locked. Returns user data length. 994 * Link is locked. Returns user data length.
1028 */ 995 */
1029
1030static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf, 996static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
1031 u32 *used_max_pkt) 997 u32 *used_max_pkt)
1032{ 998{
@@ -1111,7 +1077,6 @@ again:
1111 * Try building message using port's max_pkt hint. 1077 * Try building message using port's max_pkt hint.
1112 * (Must not hold any locks while building message.) 1078 * (Must not hold any locks while building message.)
1113 */ 1079 */
1114
1115 res = tipc_msg_build(hdr, msg_sect, num_sect, total_len, 1080 res = tipc_msg_build(hdr, msg_sect, num_sect, total_len,
1116 sender->max_pkt, !sender->user_port, &buf); 1081 sender->max_pkt, !sender->user_port, &buf);
1117 1082
@@ -1131,12 +1096,10 @@ exit:
1131 } 1096 }
1132 1097
1133 /* Exit if build request was invalid */ 1098 /* Exit if build request was invalid */
1134
1135 if (unlikely(res < 0)) 1099 if (unlikely(res < 0))
1136 goto exit; 1100 goto exit;
1137 1101
1138 /* Exit if link (or bearer) is congested */ 1102 /* Exit if link (or bearer) is congested */
1139
1140 if (link_congested(l_ptr) || 1103 if (link_congested(l_ptr) ||
1141 !list_empty(&l_ptr->b_ptr->cong_links)) { 1104 !list_empty(&l_ptr->b_ptr->cong_links)) {
1142 res = link_schedule_port(l_ptr, 1105 res = link_schedule_port(l_ptr,
@@ -1148,7 +1111,6 @@ exit:
1148 * Message size exceeds max_pkt hint; update hint, 1111 * Message size exceeds max_pkt hint; update hint,
1149 * then re-try fast path or fragment the message 1112 * then re-try fast path or fragment the message
1150 */ 1113 */
1151
1152 sender->max_pkt = l_ptr->max_pkt; 1114 sender->max_pkt = l_ptr->max_pkt;
1153 tipc_node_unlock(node); 1115 tipc_node_unlock(node);
1154 read_unlock_bh(&tipc_net_lock); 1116 read_unlock_bh(&tipc_net_lock);
@@ -1166,7 +1128,6 @@ exit:
1166 read_unlock_bh(&tipc_net_lock); 1128 read_unlock_bh(&tipc_net_lock);
1167 1129
1168 /* Couldn't find a link to the destination node */ 1130 /* Couldn't find a link to the destination node */
1169
1170 if (buf) 1131 if (buf)
1171 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE); 1132 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1172 if (res >= 0) 1133 if (res >= 0)
@@ -1220,15 +1181,13 @@ again:
1220 sect_crs = NULL; 1181 sect_crs = NULL;
1221 curr_sect = -1; 1182 curr_sect = -1;
1222 1183
1223 /* Prepare reusable fragment header: */ 1184 /* Prepare reusable fragment header */
1224
1225 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 1185 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
1226 INT_H_SIZE, msg_destnode(hdr)); 1186 INT_H_SIZE, msg_destnode(hdr));
1227 msg_set_size(&fragm_hdr, max_pkt); 1187 msg_set_size(&fragm_hdr, max_pkt);
1228 msg_set_fragm_no(&fragm_hdr, 1); 1188 msg_set_fragm_no(&fragm_hdr, 1);
1229 1189
1230 /* Prepare header of first fragment: */ 1190 /* Prepare header of first fragment */
1231
1232 buf_chain = buf = tipc_buf_acquire(max_pkt); 1191 buf_chain = buf = tipc_buf_acquire(max_pkt);
1233 if (!buf) 1192 if (!buf)
1234 return -ENOMEM; 1193 return -ENOMEM;
@@ -1237,8 +1196,7 @@ again:
1237 hsz = msg_hdr_sz(hdr); 1196 hsz = msg_hdr_sz(hdr);
1238 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz); 1197 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz);
1239 1198
1240 /* Chop up message: */ 1199 /* Chop up message */
1241
1242 fragm_crs = INT_H_SIZE + hsz; 1200 fragm_crs = INT_H_SIZE + hsz;
1243 fragm_rest = fragm_sz - hsz; 1201 fragm_rest = fragm_sz - hsz;
1244 1202
@@ -1329,7 +1287,6 @@ reject:
1329 } 1287 }
1330 1288
1331 /* Append chain of fragments to send queue & send them */ 1289 /* Append chain of fragments to send queue & send them */
1332
1333 l_ptr->long_msg_seq_no++; 1290 l_ptr->long_msg_seq_no++;
1334 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no); 1291 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
1335 l_ptr->stats.sent_fragments += fragm_no; 1292 l_ptr->stats.sent_fragments += fragm_no;
@@ -1350,7 +1307,6 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1350 1307
1351 /* Step to position where retransmission failed, if any, */ 1308 /* Step to position where retransmission failed, if any, */
1352 /* consider that buffers may have been released in meantime */ 1309 /* consider that buffers may have been released in meantime */
1353
1354 if (r_q_size && buf) { 1310 if (r_q_size && buf) {
1355 u32 last = lesser(mod(r_q_head + r_q_size), 1311 u32 last = lesser(mod(r_q_head + r_q_size),
1356 link_last_sent(l_ptr)); 1312 link_last_sent(l_ptr));
@@ -1365,7 +1321,6 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1365 } 1321 }
1366 1322
1367 /* Continue retransmission now, if there is anything: */ 1323 /* Continue retransmission now, if there is anything: */
1368
1369 if (r_q_size && buf) { 1324 if (r_q_size && buf) {
1370 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1325 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1371 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1326 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
@@ -1381,7 +1336,6 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1381 } 1336 }
1382 1337
1383 /* Send deferred protocol message, if any: */ 1338 /* Send deferred protocol message, if any: */
1384
1385 buf = l_ptr->proto_msg_queue; 1339 buf = l_ptr->proto_msg_queue;
1386 if (buf) { 1340 if (buf) {
1387 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1341 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
@@ -1398,7 +1352,6 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1398 } 1352 }
1399 1353
1400 /* Send one deferred data message, if send window not full: */ 1354 /* Send one deferred data message, if send window not full: */
1401
1402 buf = l_ptr->next_out; 1355 buf = l_ptr->next_out;
1403 if (buf) { 1356 if (buf) {
1404 struct tipc_msg *msg = buf_msg(buf); 1357 struct tipc_msg *msg = buf_msg(buf);
@@ -1478,16 +1431,12 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
1478 warn("Retransmission failure on link <%s>\n", l_ptr->name); 1431 warn("Retransmission failure on link <%s>\n", l_ptr->name);
1479 1432
1480 if (l_ptr->addr) { 1433 if (l_ptr->addr) {
1481
1482 /* Handle failure on standard link */ 1434 /* Handle failure on standard link */
1483
1484 link_print(l_ptr, "Resetting link\n"); 1435 link_print(l_ptr, "Resetting link\n");
1485 tipc_link_reset(l_ptr); 1436 tipc_link_reset(l_ptr);
1486 1437
1487 } else { 1438 } else {
1488
1489 /* Handle failure on broadcast link */ 1439 /* Handle failure on broadcast link */
1490
1491 struct tipc_node *n_ptr; 1440 struct tipc_node *n_ptr;
1492 char addr_string[16]; 1441 char addr_string[16];
1493 1442
@@ -1536,7 +1485,6 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
1536 return; 1485 return;
1537 } else { 1486 } else {
1538 /* Detect repeated retransmit failures on uncongested bearer */ 1487 /* Detect repeated retransmit failures on uncongested bearer */
1539
1540 if (l_ptr->last_retransmitted == msg_seqno(msg)) { 1488 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1541 if (++l_ptr->stale_count > 100) { 1489 if (++l_ptr->stale_count > 100) {
1542 link_retransmit_failure(l_ptr, buf); 1490 link_retransmit_failure(l_ptr, buf);
@@ -1571,7 +1519,6 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
1571/** 1519/**
1572 * link_insert_deferred_queue - insert deferred messages back into receive chain 1520 * link_insert_deferred_queue - insert deferred messages back into receive chain
1573 */ 1521 */
1574
1575static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr, 1522static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
1576 struct sk_buff *buf) 1523 struct sk_buff *buf)
1577{ 1524{
@@ -1602,7 +1549,6 @@ static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
1602 * TIPC will ignore the excess, under the assumption that it is optional info 1549 * TIPC will ignore the excess, under the assumption that it is optional info
1603 * introduced by a later release of the protocol. 1550 * introduced by a later release of the protocol.
1604 */ 1551 */
1605
1606static int link_recv_buf_validate(struct sk_buff *buf) 1552static int link_recv_buf_validate(struct sk_buff *buf)
1607{ 1553{
1608 static u32 min_data_hdr_size[8] = { 1554 static u32 min_data_hdr_size[8] = {
@@ -1648,7 +1594,6 @@ static int link_recv_buf_validate(struct sk_buff *buf)
1648 * Invoked with no locks held. Bearer pointer must point to a valid bearer 1594 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1649 * structure (i.e. cannot be NULL), but bearer can be inactive. 1595 * structure (i.e. cannot be NULL), but bearer can be inactive.
1650 */ 1596 */
1651
1652void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr) 1597void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1653{ 1598{
1654 read_lock_bh(&tipc_net_lock); 1599 read_lock_bh(&tipc_net_lock);
@@ -1666,22 +1611,18 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1666 head = head->next; 1611 head = head->next;
1667 1612
1668 /* Ensure bearer is still enabled */ 1613 /* Ensure bearer is still enabled */
1669
1670 if (unlikely(!b_ptr->active)) 1614 if (unlikely(!b_ptr->active))
1671 goto cont; 1615 goto cont;
1672 1616
1673 /* Ensure message is well-formed */ 1617 /* Ensure message is well-formed */
1674
1675 if (unlikely(!link_recv_buf_validate(buf))) 1618 if (unlikely(!link_recv_buf_validate(buf)))
1676 goto cont; 1619 goto cont;
1677 1620
1678 /* Ensure message data is a single contiguous unit */ 1621 /* Ensure message data is a single contiguous unit */
1679
1680 if (unlikely(skb_linearize(buf))) 1622 if (unlikely(skb_linearize(buf)))
1681 goto cont; 1623 goto cont;
1682 1624
1683 /* Handle arrival of a non-unicast link message */ 1625 /* Handle arrival of a non-unicast link message */
1684
1685 msg = buf_msg(buf); 1626 msg = buf_msg(buf);
1686 1627
1687 if (unlikely(msg_non_seq(msg))) { 1628 if (unlikely(msg_non_seq(msg))) {
@@ -1693,20 +1634,17 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1693 } 1634 }
1694 1635
1695 /* Discard unicast link messages destined for another node */ 1636 /* Discard unicast link messages destined for another node */
1696
1697 if (unlikely(!msg_short(msg) && 1637 if (unlikely(!msg_short(msg) &&
1698 (msg_destnode(msg) != tipc_own_addr))) 1638 (msg_destnode(msg) != tipc_own_addr)))
1699 goto cont; 1639 goto cont;
1700 1640
1701 /* Locate neighboring node that sent message */ 1641 /* Locate neighboring node that sent message */
1702
1703 n_ptr = tipc_node_find(msg_prevnode(msg)); 1642 n_ptr = tipc_node_find(msg_prevnode(msg));
1704 if (unlikely(!n_ptr)) 1643 if (unlikely(!n_ptr))
1705 goto cont; 1644 goto cont;
1706 tipc_node_lock(n_ptr); 1645 tipc_node_lock(n_ptr);
1707 1646
1708 /* Locate unicast link endpoint that should handle message */ 1647 /* Locate unicast link endpoint that should handle message */
1709
1710 l_ptr = n_ptr->links[b_ptr->identity]; 1648 l_ptr = n_ptr->links[b_ptr->identity];
1711 if (unlikely(!l_ptr)) { 1649 if (unlikely(!l_ptr)) {
1712 tipc_node_unlock(n_ptr); 1650 tipc_node_unlock(n_ptr);
@@ -1714,7 +1652,6 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1714 } 1652 }
1715 1653
1716 /* Verify that communication with node is currently allowed */ 1654 /* Verify that communication with node is currently allowed */
1717
1718 if ((n_ptr->block_setup & WAIT_PEER_DOWN) && 1655 if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
1719 msg_user(msg) == LINK_PROTOCOL && 1656 msg_user(msg) == LINK_PROTOCOL &&
1720 (msg_type(msg) == RESET_MSG || 1657 (msg_type(msg) == RESET_MSG ||
@@ -1728,12 +1665,10 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1728 } 1665 }
1729 1666
1730 /* Validate message sequence number info */ 1667 /* Validate message sequence number info */
1731
1732 seq_no = msg_seqno(msg); 1668 seq_no = msg_seqno(msg);
1733 ackd = msg_ack(msg); 1669 ackd = msg_ack(msg);
1734 1670
1735 /* Release acked messages */ 1671 /* Release acked messages */
1736
1737 if (n_ptr->bclink.supported) 1672 if (n_ptr->bclink.supported)
1738 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); 1673 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1739 1674
@@ -1752,7 +1687,6 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1752 } 1687 }
1753 1688
1754 /* Try sending any messages link endpoint has pending */ 1689 /* Try sending any messages link endpoint has pending */
1755
1756 if (unlikely(l_ptr->next_out)) 1690 if (unlikely(l_ptr->next_out))
1757 tipc_link_push_queue(l_ptr); 1691 tipc_link_push_queue(l_ptr);
1758 if (unlikely(!list_empty(&l_ptr->waiting_ports))) 1692 if (unlikely(!list_empty(&l_ptr->waiting_ports)))
@@ -1763,7 +1697,6 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1763 } 1697 }
1764 1698
1765 /* Now (finally!) process the incoming message */ 1699 /* Now (finally!) process the incoming message */
1766
1767protocol_check: 1700protocol_check:
1768 if (likely(link_working_working(l_ptr))) { 1701 if (likely(link_working_working(l_ptr))) {
1769 if (likely(seq_no == mod(l_ptr->next_in_no))) { 1702 if (likely(seq_no == mod(l_ptr->next_in_no))) {
@@ -1859,7 +1792,6 @@ cont:
1859 * 1792 *
1860 * Returns increase in queue length (i.e. 0 or 1) 1793 * Returns increase in queue length (i.e. 0 or 1)
1861 */ 1794 */
1862
1863u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail, 1795u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
1864 struct sk_buff *buf) 1796 struct sk_buff *buf)
1865{ 1797{
@@ -1908,7 +1840,6 @@ u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
1908/* 1840/*
1909 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet 1841 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1910 */ 1842 */
1911
1912static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, 1843static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1913 struct sk_buff *buf) 1844 struct sk_buff *buf)
1914{ 1845{
@@ -1920,14 +1851,12 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1920 } 1851 }
1921 1852
1922 /* Record OOS packet arrival (force mismatch on next timeout) */ 1853 /* Record OOS packet arrival (force mismatch on next timeout) */
1923
1924 l_ptr->checkpoint--; 1854 l_ptr->checkpoint--;
1925 1855
1926 /* 1856 /*
1927 * Discard packet if a duplicate; otherwise add it to deferred queue 1857 * Discard packet if a duplicate; otherwise add it to deferred queue
1928 * and notify peer of gap as per protocol specification 1858 * and notify peer of gap as per protocol specification
1929 */ 1859 */
1930
1931 if (less(seq_no, mod(l_ptr->next_in_no))) { 1860 if (less(seq_no, mod(l_ptr->next_in_no))) {
1932 l_ptr->stats.duplicates++; 1861 l_ptr->stats.duplicates++;
1933 kfree_skb(buf); 1862 kfree_skb(buf);
@@ -1957,7 +1886,6 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
1957 int r_flag; 1886 int r_flag;
1958 1887
1959 /* Discard any previous message that was deferred due to congestion */ 1888 /* Discard any previous message that was deferred due to congestion */
1960
1961 if (l_ptr->proto_msg_queue) { 1889 if (l_ptr->proto_msg_queue) {
1962 kfree_skb(l_ptr->proto_msg_queue); 1890 kfree_skb(l_ptr->proto_msg_queue);
1963 l_ptr->proto_msg_queue = NULL; 1891 l_ptr->proto_msg_queue = NULL;
@@ -1967,12 +1895,10 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
1967 return; 1895 return;
1968 1896
1969 /* Abort non-RESET send if communication with node is prohibited */ 1897 /* Abort non-RESET send if communication with node is prohibited */
1970
1971 if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG)) 1898 if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG))
1972 return; 1899 return;
1973 1900
1974 /* Create protocol message with "out-of-sequence" sequence number */ 1901 /* Create protocol message with "out-of-sequence" sequence number */
1975
1976 msg_set_type(msg, msg_typ); 1902 msg_set_type(msg, msg_typ);
1977 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane); 1903 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
1978 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1904 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
@@ -2040,14 +1966,12 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
2040 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); 1966 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
2041 1967
2042 /* Defer message if bearer is already congested */ 1968 /* Defer message if bearer is already congested */
2043
2044 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) { 1969 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
2045 l_ptr->proto_msg_queue = buf; 1970 l_ptr->proto_msg_queue = buf;
2046 return; 1971 return;
2047 } 1972 }
2048 1973
2049 /* Defer message if attempting to send results in bearer congestion */ 1974 /* Defer message if attempting to send results in bearer congestion */
2050
2051 if (!tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1975 if (!tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
2052 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr); 1976 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
2053 l_ptr->proto_msg_queue = buf; 1977 l_ptr->proto_msg_queue = buf;
@@ -2056,7 +1980,6 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
2056 } 1980 }
2057 1981
2058 /* Discard message if it was sent successfully */ 1982 /* Discard message if it was sent successfully */
2059
2060 l_ptr->unacked_window = 0; 1983 l_ptr->unacked_window = 0;
2061 kfree_skb(buf); 1984 kfree_skb(buf);
2062} 1985}
@@ -2066,7 +1989,6 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
2066 * Note that network plane id propagates through the network, and may 1989 * Note that network plane id propagates through the network, and may
2067 * change at any time. The node with lowest address rules 1990 * change at any time. The node with lowest address rules
2068 */ 1991 */
2069
2070static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf) 1992static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
2071{ 1993{
2072 u32 rec_gap = 0; 1994 u32 rec_gap = 0;
@@ -2079,7 +2001,6 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
2079 goto exit; 2001 goto exit;
2080 2002
2081 /* record unnumbered packet arrival (force mismatch on next timeout) */ 2003 /* record unnumbered packet arrival (force mismatch on next timeout) */
2082
2083 l_ptr->checkpoint--; 2004 l_ptr->checkpoint--;
2084 2005
2085 if (l_ptr->b_ptr->net_plane != msg_net_plane(msg)) 2006 if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
@@ -2111,7 +2032,6 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
2111 /* fall thru' */ 2032 /* fall thru' */
2112 case ACTIVATE_MSG: 2033 case ACTIVATE_MSG:
2113 /* Update link settings according other endpoint's values */ 2034 /* Update link settings according other endpoint's values */
2114
2115 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg)); 2035 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
2116 2036
2117 msg_tol = msg_link_tolerance(msg); 2037 msg_tol = msg_link_tolerance(msg);
@@ -2133,7 +2053,6 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
2133 l_ptr->owner->bclink.supportable = (max_pkt_info != 0); 2053 l_ptr->owner->bclink.supportable = (max_pkt_info != 0);
2134 2054
2135 /* Synchronize broadcast link info, if not done previously */ 2055 /* Synchronize broadcast link info, if not done previously */
2136
2137 if (!tipc_node_is_up(l_ptr->owner)) { 2056 if (!tipc_node_is_up(l_ptr->owner)) {
2138 l_ptr->owner->bclink.last_sent = 2057 l_ptr->owner->bclink.last_sent =
2139 l_ptr->owner->bclink.last_in = 2058 l_ptr->owner->bclink.last_in =
@@ -2185,7 +2104,6 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
2185 } 2104 }
2186 2105
2187 /* Protocol message before retransmits, reduce loss risk */ 2106 /* Protocol message before retransmits, reduce loss risk */
2188
2189 if (l_ptr->owner->bclink.supported) 2107 if (l_ptr->owner->bclink.supported)
2190 tipc_bclink_update_link_state(l_ptr->owner, 2108 tipc_bclink_update_link_state(l_ptr->owner,
2191 msg_last_bcast(msg)); 2109 msg_last_bcast(msg));
@@ -2243,7 +2161,6 @@ static void tipc_link_tunnel(struct tipc_link *l_ptr,
2243 * changeover(): Send whole message queue via the remaining link 2161 * changeover(): Send whole message queue via the remaining link
2244 * Owner node is locked. 2162 * Owner node is locked.
2245 */ 2163 */
2246
2247void tipc_link_changeover(struct tipc_link *l_ptr) 2164void tipc_link_changeover(struct tipc_link *l_ptr)
2248{ 2165{
2249 u32 msgcount = l_ptr->out_queue_size; 2166 u32 msgcount = l_ptr->out_queue_size;
@@ -2343,8 +2260,6 @@ void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel)
2343 } 2260 }
2344} 2261}
2345 2262
2346
2347
2348/** 2263/**
2349 * buf_extract - extracts embedded TIPC message from another message 2264 * buf_extract - extracts embedded TIPC message from another message
2350 * @skb: encapsulating message buffer 2265 * @skb: encapsulating message buffer
@@ -2353,7 +2268,6 @@ void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel)
2353 * Returns a new message buffer containing an embedded message. The 2268 * Returns a new message buffer containing an embedded message. The
2354 * encapsulating message itself is left unchanged. 2269 * encapsulating message itself is left unchanged.
2355 */ 2270 */
2356
2357static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos) 2271static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
2358{ 2272{
2359 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos); 2273 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
@@ -2370,7 +2284,6 @@ static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
2370 * link_recv_changeover_msg(): Receive tunneled packet sent 2284 * link_recv_changeover_msg(): Receive tunneled packet sent
2371 * via other link. Node is locked. Return extracted buffer. 2285 * via other link. Node is locked. Return extracted buffer.
2372 */ 2286 */
2373
2374static int link_recv_changeover_msg(struct tipc_link **l_ptr, 2287static int link_recv_changeover_msg(struct tipc_link **l_ptr,
2375 struct sk_buff **buf) 2288 struct sk_buff **buf)
2376{ 2289{
@@ -2405,7 +2318,6 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
2405 } 2318 }
2406 2319
2407 /* First original message ?: */ 2320 /* First original message ?: */
2408
2409 if (tipc_link_is_up(dest_link)) { 2321 if (tipc_link_is_up(dest_link)) {
2410 info("Resetting link <%s>, changeover initiated by peer\n", 2322 info("Resetting link <%s>, changeover initiated by peer\n",
2411 dest_link->name); 2323 dest_link->name);
@@ -2420,7 +2332,6 @@ static int link_recv_changeover_msg(struct tipc_link **l_ptr,
2420 } 2332 }
2421 2333
2422 /* Receive original message */ 2334 /* Receive original message */
2423
2424 if (dest_link->exp_msg_count == 0) { 2335 if (dest_link->exp_msg_count == 0) {
2425 warn("Link switchover error, " 2336 warn("Link switchover error, "
2426 "got too many tunnelled messages\n"); 2337 "got too many tunnelled messages\n");
@@ -2469,7 +2380,6 @@ void tipc_link_recv_bundle(struct sk_buff *buf)
2469 * Fragmentation/defragmentation: 2380 * Fragmentation/defragmentation:
2470 */ 2381 */
2471 2382
2472
2473/* 2383/*
2474 * link_send_long_buf: Entry for buffers needing fragmentation. 2384 * link_send_long_buf: Entry for buffers needing fragmentation.
2475 * The buffer is complete, inclusive total message length. 2385 * The buffer is complete, inclusive total message length.
@@ -2496,12 +2406,10 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
2496 destaddr = msg_destnode(inmsg); 2406 destaddr = msg_destnode(inmsg);
2497 2407
2498 /* Prepare reusable fragment header: */ 2408 /* Prepare reusable fragment header: */
2499
2500 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 2409 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
2501 INT_H_SIZE, destaddr); 2410 INT_H_SIZE, destaddr);
2502 2411
2503 /* Chop up message: */ 2412 /* Chop up message: */
2504
2505 while (rest > 0) { 2413 while (rest > 0) {
2506 struct sk_buff *fragm; 2414 struct sk_buff *fragm;
2507 2415
@@ -2535,7 +2443,6 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
2535 kfree_skb(buf); 2443 kfree_skb(buf);
2536 2444
2537 /* Append chain of fragments to send queue & send them */ 2445 /* Append chain of fragments to send queue & send them */
2538
2539 l_ptr->long_msg_seq_no++; 2446 l_ptr->long_msg_seq_no++;
2540 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no); 2447 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
2541 l_ptr->stats.sent_fragments += fragm_no; 2448 l_ptr->stats.sent_fragments += fragm_no;
@@ -2551,7 +2458,6 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
2551 * help storing these values in unused, available fields in the 2458 * help storing these values in unused, available fields in the
2552 * pending message. This makes dynamic memory allocation unnecessary. 2459 * pending message. This makes dynamic memory allocation unnecessary.
2553 */ 2460 */
2554
2555static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno) 2461static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
2556{ 2462{
2557 msg_set_seqno(buf_msg(buf), seqno); 2463 msg_set_seqno(buf_msg(buf), seqno);
@@ -2603,7 +2509,6 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2603 *fb = NULL; 2509 *fb = NULL;
2604 2510
2605 /* Is there an incomplete message waiting for this fragment? */ 2511 /* Is there an incomplete message waiting for this fragment? */
2606
2607 while (pbuf && ((buf_seqno(pbuf) != long_msg_seq_no) || 2512 while (pbuf && ((buf_seqno(pbuf) != long_msg_seq_no) ||
2608 (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) { 2513 (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
2609 prev = pbuf; 2514 prev = pbuf;
@@ -2629,7 +2534,6 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2629 skb_copy_to_linear_data(pbuf, imsg, 2534 skb_copy_to_linear_data(pbuf, imsg,
2630 msg_data_sz(fragm)); 2535 msg_data_sz(fragm));
2631 /* Prepare buffer for subsequent fragments. */ 2536 /* Prepare buffer for subsequent fragments. */
2632
2633 set_long_msg_seqno(pbuf, long_msg_seq_no); 2537 set_long_msg_seqno(pbuf, long_msg_seq_no);
2634 set_fragm_size(pbuf, fragm_sz); 2538 set_fragm_size(pbuf, fragm_sz);
2635 set_expected_frags(pbuf, exp_fragm_cnt - 1); 2539 set_expected_frags(pbuf, exp_fragm_cnt - 1);
@@ -2650,7 +2554,6 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2650 kfree_skb(fbuf); 2554 kfree_skb(fbuf);
2651 2555
2652 /* Is message complete? */ 2556 /* Is message complete? */
2653
2654 if (exp_frags == 0) { 2557 if (exp_frags == 0) {
2655 if (prev) 2558 if (prev)
2656 prev->next = pbuf->next; 2559 prev->next = pbuf->next;
@@ -2672,7 +2575,6 @@ int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2672 * link_check_defragm_bufs - flush stale incoming message fragments 2575 * link_check_defragm_bufs - flush stale incoming message fragments
2673 * @l_ptr: pointer to link 2576 * @l_ptr: pointer to link
2674 */ 2577 */
2675
2676static void link_check_defragm_bufs(struct tipc_link *l_ptr) 2578static void link_check_defragm_bufs(struct tipc_link *l_ptr)
2677{ 2579{
2678 struct sk_buff *prev = NULL; 2580 struct sk_buff *prev = NULL;
@@ -2701,8 +2603,6 @@ static void link_check_defragm_bufs(struct tipc_link *l_ptr)
2701 } 2603 }
2702} 2604}
2703 2605
2704
2705
2706static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance) 2606static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
2707{ 2607{
2708 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL)) 2608 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
@@ -2714,7 +2614,6 @@ static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
2714 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4); 2614 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
2715} 2615}
2716 2616
2717
2718void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window) 2617void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
2719{ 2618{
2720 /* Data messages from this node, inclusive FIRST_FRAGM */ 2619 /* Data messages from this node, inclusive FIRST_FRAGM */
@@ -2744,7 +2643,6 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
2744 * 2643 *
2745 * Returns pointer to link (or 0 if invalid link name). 2644 * Returns pointer to link (or 0 if invalid link name).
2746 */ 2645 */
2747
2748static struct tipc_link *link_find_link(const char *name, 2646static struct tipc_link *link_find_link(const char *name,
2749 struct tipc_node **node) 2647 struct tipc_node **node)
2750{ 2648{
@@ -2778,7 +2676,6 @@ static struct tipc_link *link_find_link(const char *name,
2778 * 2676 *
2779 * Returns 1 if value is within range, 0 if not. 2677 * Returns 1 if value is within range, 0 if not.
2780 */ 2678 */
2781
2782static int link_value_is_valid(u16 cmd, u32 new_value) 2679static int link_value_is_valid(u16 cmd, u32 new_value)
2783{ 2680{
2784 switch (cmd) { 2681 switch (cmd) {
@@ -2794,7 +2691,6 @@ static int link_value_is_valid(u16 cmd, u32 new_value)
2794 return 0; 2691 return 0;
2795} 2692}
2796 2693
2797
2798/** 2694/**
2799 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media 2695 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
2800 * @name - ptr to link, bearer, or media name 2696 * @name - ptr to link, bearer, or media name
@@ -2805,7 +2701,6 @@ static int link_value_is_valid(u16 cmd, u32 new_value)
2805 * 2701 *
2806 * Returns 0 if value updated and negative value on error. 2702 * Returns 0 if value updated and negative value on error.
2807 */ 2703 */
2808
2809static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd) 2704static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
2810{ 2705{
2811 struct tipc_node *node; 2706 struct tipc_node *node;
@@ -2910,7 +2805,6 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space
2910 * link_reset_statistics - reset link statistics 2805 * link_reset_statistics - reset link statistics
2911 * @l_ptr: pointer to link 2806 * @l_ptr: pointer to link
2912 */ 2807 */
2913
2914static void link_reset_statistics(struct tipc_link *l_ptr) 2808static void link_reset_statistics(struct tipc_link *l_ptr)
2915{ 2809{
2916 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats)); 2810 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
@@ -2951,7 +2845,6 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_
2951/** 2845/**
2952 * percent - convert count to a percentage of total (rounding up or down) 2846 * percent - convert count to a percentage of total (rounding up or down)
2953 */ 2847 */
2954
2955static u32 percent(u32 count, u32 total) 2848static u32 percent(u32 count, u32 total)
2956{ 2849{
2957 return (count * 100 + (total / 2)) / total; 2850 return (count * 100 + (total / 2)) / total;
@@ -2965,7 +2858,6 @@ static u32 percent(u32 count, u32 total)
2965 * 2858 *
2966 * Returns length of print buffer data string (or 0 if error) 2859 * Returns length of print buffer data string (or 0 if error)
2967 */ 2860 */
2968
2969static int tipc_link_stats(const char *name, char *buf, const u32 buf_size) 2861static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2970{ 2862{
2971 struct print_buf pb; 2863 struct print_buf pb;
@@ -3087,7 +2979,6 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_s
3087 * 2979 *
3088 * If no active link can be found, uses default maximum packet size. 2980 * If no active link can be found, uses default maximum packet size.
3089 */ 2981 */
3090
3091u32 tipc_link_get_max_pkt(u32 dest, u32 selector) 2982u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
3092{ 2983{
3093 struct tipc_node *n_ptr; 2984 struct tipc_node *n_ptr;
@@ -3171,4 +3062,3 @@ print_state:
3171 tipc_printbuf_validate(buf); 3062 tipc_printbuf_validate(buf);
3172 info("%s", print_area); 3063 info("%s", print_area);
3173} 3064}
3174
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 73c18c140e1d..d6a60a963ce6 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -47,13 +47,11 @@
47/* 47/*
48 * Out-of-range value for link sequence numbers 48 * Out-of-range value for link sequence numbers
49 */ 49 */
50
51#define INVALID_LINK_SEQ 0x10000 50#define INVALID_LINK_SEQ 0x10000
52 51
53/* 52/*
54 * Link states 53 * Link states
55 */ 54 */
56
57#define WORKING_WORKING 560810u 55#define WORKING_WORKING 560810u
58#define WORKING_UNKNOWN 560811u 56#define WORKING_UNKNOWN 560811u
59#define RESET_UNKNOWN 560812u 57#define RESET_UNKNOWN 560812u
@@ -63,7 +61,6 @@
63 * Starting value for maximum packet size negotiation on unicast links 61 * Starting value for maximum packet size negotiation on unicast links
64 * (unless bearer MTU is less) 62 * (unless bearer MTU is less)
65 */ 63 */
66
67#define MAX_PKT_DEFAULT 1500 64#define MAX_PKT_DEFAULT 1500
68 65
69/** 66/**
@@ -114,7 +111,6 @@
114 * @defragm_buf: list of partially reassembled inbound message fragments 111 * @defragm_buf: list of partially reassembled inbound message fragments
115 * @stats: collects statistics regarding link activity 112 * @stats: collects statistics regarding link activity
116 */ 113 */
117
118struct tipc_link { 114struct tipc_link {
119 u32 addr; 115 u32 addr;
120 char name[TIPC_MAX_LINK_NAME]; 116 char name[TIPC_MAX_LINK_NAME];
@@ -255,7 +251,6 @@ void tipc_link_retransmit(struct tipc_link *l_ptr,
255/* 251/*
256 * Link sequence number manipulation routines (uses modulo 2**16 arithmetic) 252 * Link sequence number manipulation routines (uses modulo 2**16 arithmetic)
257 */ 253 */
258
259static inline u32 buf_seqno(struct sk_buff *buf) 254static inline u32 buf_seqno(struct sk_buff *buf)
260{ 255{
261 return msg_seqno(buf_msg(buf)); 256 return msg_seqno(buf_msg(buf));
@@ -294,7 +289,6 @@ static inline u32 lesser(u32 left, u32 right)
294/* 289/*
295 * Link status checking routines 290 * Link status checking routines
296 */ 291 */
297
298static inline int link_working_working(struct tipc_link *l_ptr) 292static inline int link_working_working(struct tipc_link *l_ptr)
299{ 293{
300 return l_ptr->state == WORKING_WORKING; 294 return l_ptr->state == WORKING_WORKING;
diff --git a/net/tipc/log.c b/net/tipc/log.c
index 895c6e530b0b..026733f24919 100644
--- a/net/tipc/log.c
+++ b/net/tipc/log.c
@@ -47,7 +47,6 @@
47 * 47 *
48 * Additional user-defined print buffers are also permitted. 48 * Additional user-defined print buffers are also permitted.
49 */ 49 */
50
51static struct print_buf null_buf = { NULL, 0, NULL, 0 }; 50static struct print_buf null_buf = { NULL, 0, NULL, 0 };
52struct print_buf *const TIPC_NULL = &null_buf; 51struct print_buf *const TIPC_NULL = &null_buf;
53 52
@@ -72,7 +71,6 @@ struct print_buf *const TIPC_LOG = &log_buf;
72 * on the caller to prevent simultaneous use of the print buffer(s) being 71 * on the caller to prevent simultaneous use of the print buffer(s) being
73 * manipulated. 72 * manipulated.
74 */ 73 */
75
76static char print_string[TIPC_PB_MAX_STR]; 74static char print_string[TIPC_PB_MAX_STR];
77static DEFINE_SPINLOCK(print_lock); 75static DEFINE_SPINLOCK(print_lock);
78 76
@@ -97,7 +95,6 @@ static void tipc_printbuf_move(struct print_buf *pb_to,
97 * Note: If the character array is too small (or absent), the print buffer 95 * Note: If the character array is too small (or absent), the print buffer
98 * becomes a null device that discards anything written to it. 96 * becomes a null device that discards anything written to it.
99 */ 97 */
100
101void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size) 98void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size)
102{ 99{
103 pb->buf = raw; 100 pb->buf = raw;
@@ -117,7 +114,6 @@ void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 size)
117 * tipc_printbuf_reset - reinitialize print buffer to empty state 114 * tipc_printbuf_reset - reinitialize print buffer to empty state
118 * @pb: pointer to print buffer structure 115 * @pb: pointer to print buffer structure
119 */ 116 */
120
121static void tipc_printbuf_reset(struct print_buf *pb) 117static void tipc_printbuf_reset(struct print_buf *pb)
122{ 118{
123 if (pb->buf) { 119 if (pb->buf) {
@@ -133,7 +129,6 @@ static void tipc_printbuf_reset(struct print_buf *pb)
133 * 129 *
134 * Returns non-zero if print buffer is empty. 130 * Returns non-zero if print buffer is empty.
135 */ 131 */
136
137static int tipc_printbuf_empty(struct print_buf *pb) 132static int tipc_printbuf_empty(struct print_buf *pb)
138{ 133{
139 return !pb->buf || (pb->crs == pb->buf); 134 return !pb->buf || (pb->crs == pb->buf);
@@ -148,7 +143,6 @@ static int tipc_printbuf_empty(struct print_buf *pb)
148 * 143 *
149 * Returns length of print buffer data string (including trailing NUL) 144 * Returns length of print buffer data string (including trailing NUL)
150 */ 145 */
151
152int tipc_printbuf_validate(struct print_buf *pb) 146int tipc_printbuf_validate(struct print_buf *pb)
153{ 147{
154 char *err = "\n\n*** PRINT BUFFER OVERFLOW ***\n\n"; 148 char *err = "\n\n*** PRINT BUFFER OVERFLOW ***\n\n";
@@ -182,14 +176,12 @@ int tipc_printbuf_validate(struct print_buf *pb)
182 * Current contents of destination print buffer (if any) are discarded. 176 * Current contents of destination print buffer (if any) are discarded.
183 * Source print buffer becomes empty if a successful move occurs. 177 * Source print buffer becomes empty if a successful move occurs.
184 */ 178 */
185
186static void tipc_printbuf_move(struct print_buf *pb_to, 179static void tipc_printbuf_move(struct print_buf *pb_to,
187 struct print_buf *pb_from) 180 struct print_buf *pb_from)
188{ 181{
189 int len; 182 int len;
190 183
191 /* Handle the cases where contents can't be moved */ 184 /* Handle the cases where contents can't be moved */
192
193 if (!pb_to->buf) 185 if (!pb_to->buf)
194 return; 186 return;
195 187
@@ -206,7 +198,6 @@ static void tipc_printbuf_move(struct print_buf *pb_to,
206 } 198 }
207 199
208 /* Copy data from char after cursor to end (if used) */ 200 /* Copy data from char after cursor to end (if used) */
209
210 len = pb_from->buf + pb_from->size - pb_from->crs - 2; 201 len = pb_from->buf + pb_from->size - pb_from->crs - 2;
211 if ((pb_from->buf[pb_from->size - 1] == 0) && (len > 0)) { 202 if ((pb_from->buf[pb_from->size - 1] == 0) && (len > 0)) {
212 strcpy(pb_to->buf, pb_from->crs + 1); 203 strcpy(pb_to->buf, pb_from->crs + 1);
@@ -215,7 +206,6 @@ static void tipc_printbuf_move(struct print_buf *pb_to,
215 pb_to->crs = pb_to->buf; 206 pb_to->crs = pb_to->buf;
216 207
217 /* Copy data from start to cursor (always) */ 208 /* Copy data from start to cursor (always) */
218
219 len = pb_from->crs - pb_from->buf; 209 len = pb_from->crs - pb_from->buf;
220 strcpy(pb_to->crs, pb_from->buf); 210 strcpy(pb_to->crs, pb_from->buf);
221 pb_to->crs += len; 211 pb_to->crs += len;
@@ -228,7 +218,6 @@ static void tipc_printbuf_move(struct print_buf *pb_to,
228 * @pb: pointer to print buffer 218 * @pb: pointer to print buffer
229 * @fmt: formatted info to be printed 219 * @fmt: formatted info to be printed
230 */ 220 */
231
232void tipc_printf(struct print_buf *pb, const char *fmt, ...) 221void tipc_printf(struct print_buf *pb, const char *fmt, ...)
233{ 222{
234 int chars_to_add; 223 int chars_to_add;
@@ -270,7 +259,6 @@ void tipc_printf(struct print_buf *pb, const char *fmt, ...)
270 * tipc_log_resize - change the size of the TIPC log buffer 259 * tipc_log_resize - change the size of the TIPC log buffer
271 * @log_size: print buffer size to use 260 * @log_size: print buffer size to use
272 */ 261 */
273
274int tipc_log_resize(int log_size) 262int tipc_log_resize(int log_size)
275{ 263{
276 int res = 0; 264 int res = 0;
@@ -295,7 +283,6 @@ int tipc_log_resize(int log_size)
295/** 283/**
296 * tipc_log_resize_cmd - reconfigure size of TIPC log buffer 284 * tipc_log_resize_cmd - reconfigure size of TIPC log buffer
297 */ 285 */
298
299struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area, int req_tlv_space) 286struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area, int req_tlv_space)
300{ 287{
301 u32 value; 288 u32 value;
@@ -316,7 +303,6 @@ struct sk_buff *tipc_log_resize_cmd(const void *req_tlv_area, int req_tlv_space)
316/** 303/**
317 * tipc_log_dump - capture TIPC log buffer contents in configuration message 304 * tipc_log_dump - capture TIPC log buffer contents in configuration message
318 */ 305 */
319
320struct sk_buff *tipc_log_dump(void) 306struct sk_buff *tipc_log_dump(void)
321{ 307{
322 struct sk_buff *reply; 308 struct sk_buff *reply;
diff --git a/net/tipc/log.h b/net/tipc/log.h
index 2248d96238e6..d1f5eb967fd8 100644
--- a/net/tipc/log.h
+++ b/net/tipc/log.h
@@ -44,7 +44,6 @@
44 * @crs: pointer to first unused space in character array (i.e. final NUL) 44 * @crs: pointer to first unused space in character array (i.e. final NUL)
45 * @echo: echo output to system console if non-zero 45 * @echo: echo output to system console if non-zero
46 */ 46 */
47
48struct print_buf { 47struct print_buf {
49 char *buf; 48 char *buf;
50 u32 size; 49 u32 size;
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index e3afe162c0ac..deea0d232dca 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -72,7 +72,6 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type,
72 * 72 *
73 * Returns message data size or errno 73 * Returns message data size or errno
74 */ 74 */
75
76int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect, 75int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
77 u32 num_sect, unsigned int total_len, 76 u32 num_sect, unsigned int total_len,
78 int max_size, int usrmem, struct sk_buff **buf) 77 int max_size, int usrmem, struct sk_buff **buf)
@@ -112,7 +111,6 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
112} 111}
113 112
114#ifdef CONFIG_TIPC_DEBUG 113#ifdef CONFIG_TIPC_DEBUG
115
116void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str) 114void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
117{ 115{
118 u32 usr = msg_user(msg); 116 u32 usr = msg_user(msg);
@@ -352,5 +350,4 @@ void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str)
352 if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) 350 if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT))
353 tipc_msg_dbg(buf, msg_get_wrapped(msg), " /"); 351 tipc_msg_dbg(buf, msg_get_wrapped(msg), " /");
354} 352}
355
356#endif 353#endif
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index eba524e34a6b..ba2a72beea68 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -44,7 +44,6 @@
44 * 44 *
45 * Note: Some items are also used with TIPC internal message headers 45 * Note: Some items are also used with TIPC internal message headers
46 */ 46 */
47
48#define TIPC_VERSION 2 47#define TIPC_VERSION 2
49 48
50/* 49/*
@@ -58,7 +57,6 @@
58/* 57/*
59 * Payload message types 58 * Payload message types
60 */ 59 */
61
62#define TIPC_CONN_MSG 0 60#define TIPC_CONN_MSG 0
63#define TIPC_MCAST_MSG 1 61#define TIPC_MCAST_MSG 1
64#define TIPC_NAMED_MSG 2 62#define TIPC_NAMED_MSG 2
@@ -67,7 +65,6 @@
67/* 65/*
68 * Message header sizes 66 * Message header sizes
69 */ 67 */
70
71#define SHORT_H_SIZE 24 /* In-cluster basic payload message */ 68#define SHORT_H_SIZE 24 /* In-cluster basic payload message */
72#define BASIC_H_SIZE 32 /* Basic payload message */ 69#define BASIC_H_SIZE 32 /* Basic payload message */
73#define NAMED_H_SIZE 40 /* Named payload message */ 70#define NAMED_H_SIZE 40 /* Named payload message */
@@ -121,7 +118,6 @@ static inline void msg_swap_words(struct tipc_msg *msg, u32 a, u32 b)
121/* 118/*
122 * Word 0 119 * Word 0
123 */ 120 */
124
125static inline u32 msg_version(struct tipc_msg *m) 121static inline u32 msg_version(struct tipc_msg *m)
126{ 122{
127 return msg_bits(m, 0, 29, 7); 123 return msg_bits(m, 0, 29, 7);
@@ -216,7 +212,6 @@ static inline void msg_set_size(struct tipc_msg *m, u32 sz)
216/* 212/*
217 * Word 1 213 * Word 1
218 */ 214 */
219
220static inline u32 msg_type(struct tipc_msg *m) 215static inline u32 msg_type(struct tipc_msg *m)
221{ 216{
222 return msg_bits(m, 1, 29, 0x7); 217 return msg_bits(m, 1, 29, 0x7);
@@ -291,7 +286,6 @@ static inline void msg_set_bcast_ack(struct tipc_msg *m, u32 n)
291/* 286/*
292 * Word 2 287 * Word 2
293 */ 288 */
294
295static inline u32 msg_ack(struct tipc_msg *m) 289static inline u32 msg_ack(struct tipc_msg *m)
296{ 290{
297 return msg_bits(m, 2, 16, 0xffff); 291 return msg_bits(m, 2, 16, 0xffff);
@@ -315,8 +309,6 @@ static inline void msg_set_seqno(struct tipc_msg *m, u32 n)
315/* 309/*
316 * Words 3-10 310 * Words 3-10
317 */ 311 */
318
319
320static inline u32 msg_prevnode(struct tipc_msg *m) 312static inline u32 msg_prevnode(struct tipc_msg *m)
321{ 313{
322 return msg_word(m, 3); 314 return msg_word(m, 3);
@@ -434,7 +426,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
434 return (struct tipc_msg *)msg_data(m); 426 return (struct tipc_msg *)msg_data(m);
435} 427}
436 428
437
438/* 429/*
439 * Constants and routines used to read and write TIPC internal message headers 430 * Constants and routines used to read and write TIPC internal message headers
440 */ 431 */
@@ -442,7 +433,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
442/* 433/*
443 * Internal message users 434 * Internal message users
444 */ 435 */
445
446#define BCAST_PROTOCOL 5 436#define BCAST_PROTOCOL 5
447#define MSG_BUNDLER 6 437#define MSG_BUNDLER 6
448#define LINK_PROTOCOL 7 438#define LINK_PROTOCOL 7
@@ -456,7 +446,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
456/* 446/*
457 * Connection management protocol message types 447 * Connection management protocol message types
458 */ 448 */
459
460#define CONN_PROBE 0 449#define CONN_PROBE 0
461#define CONN_PROBE_REPLY 1 450#define CONN_PROBE_REPLY 1
462#define CONN_ACK 2 451#define CONN_ACK 2
@@ -464,14 +453,12 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
464/* 453/*
465 * Name distributor message types 454 * Name distributor message types
466 */ 455 */
467
468#define PUBLICATION 0 456#define PUBLICATION 0
469#define WITHDRAWAL 1 457#define WITHDRAWAL 1
470 458
471/* 459/*
472 * Segmentation message types 460 * Segmentation message types
473 */ 461 */
474
475#define FIRST_FRAGMENT 0 462#define FIRST_FRAGMENT 0
476#define FRAGMENT 1 463#define FRAGMENT 1
477#define LAST_FRAGMENT 2 464#define LAST_FRAGMENT 2
@@ -479,7 +466,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
479/* 466/*
480 * Link management protocol message types 467 * Link management protocol message types
481 */ 468 */
482
483#define STATE_MSG 0 469#define STATE_MSG 0
484#define RESET_MSG 1 470#define RESET_MSG 1
485#define ACTIVATE_MSG 2 471#define ACTIVATE_MSG 2
@@ -493,7 +479,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
493/* 479/*
494 * Config protocol message types 480 * Config protocol message types
495 */ 481 */
496
497#define DSC_REQ_MSG 0 482#define DSC_REQ_MSG 0
498#define DSC_RESP_MSG 1 483#define DSC_RESP_MSG 1
499 484
@@ -501,7 +486,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
501/* 486/*
502 * Word 1 487 * Word 1
503 */ 488 */
504
505static inline u32 msg_seq_gap(struct tipc_msg *m) 489static inline u32 msg_seq_gap(struct tipc_msg *m)
506{ 490{
507 return msg_bits(m, 1, 16, 0x1fff); 491 return msg_bits(m, 1, 16, 0x1fff);
@@ -526,7 +510,6 @@ static inline void msg_set_node_sig(struct tipc_msg *m, u32 n)
526/* 510/*
527 * Word 2 511 * Word 2
528 */ 512 */
529
530static inline u32 msg_dest_domain(struct tipc_msg *m) 513static inline u32 msg_dest_domain(struct tipc_msg *m)
531{ 514{
532 return msg_word(m, 2); 515 return msg_word(m, 2);
@@ -561,7 +544,6 @@ static inline void msg_set_bcgap_to(struct tipc_msg *m, u32 n)
561/* 544/*
562 * Word 4 545 * Word 4
563 */ 546 */
564
565static inline u32 msg_last_bcast(struct tipc_msg *m) 547static inline u32 msg_last_bcast(struct tipc_msg *m)
566{ 548{
567 return msg_bits(m, 4, 16, 0xffff); 549 return msg_bits(m, 4, 16, 0xffff);
@@ -628,7 +610,6 @@ static inline void msg_set_link_selector(struct tipc_msg *m, u32 n)
628/* 610/*
629 * Word 5 611 * Word 5
630 */ 612 */
631
632static inline u32 msg_session(struct tipc_msg *m) 613static inline u32 msg_session(struct tipc_msg *m)
633{ 614{
634 return msg_bits(m, 5, 16, 0xffff); 615 return msg_bits(m, 5, 16, 0xffff);
@@ -697,7 +678,6 @@ static inline char *msg_media_addr(struct tipc_msg *m)
697/* 678/*
698 * Word 9 679 * Word 9
699 */ 680 */
700
701static inline u32 msg_msgcnt(struct tipc_msg *m) 681static inline u32 msg_msgcnt(struct tipc_msg *m)
702{ 682{
703 return msg_bits(m, 9, 16, 0xffff); 683 return msg_bits(m, 9, 16, 0xffff);
@@ -744,5 +724,4 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type,
744int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect, 724int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
745 u32 num_sect, unsigned int total_len, 725 u32 num_sect, unsigned int total_len,
746 int max_size, int usrmem, struct sk_buff **buf); 726 int max_size, int usrmem, struct sk_buff **buf);
747
748#endif 727#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index d57da6159616..158318e67b08 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -58,7 +58,6 @@
58 * Note: There is no field that identifies the publishing node because it is 58 * Note: There is no field that identifies the publishing node because it is
59 * the same for all items contained within a publication message. 59 * the same for all items contained within a publication message.
60 */ 60 */
61
62struct distr_item { 61struct distr_item {
63 __be32 type; 62 __be32 type;
64 __be32 lower; 63 __be32 lower;
@@ -68,17 +67,41 @@ struct distr_item {
68}; 67};
69 68
70/** 69/**
71 * List of externally visible publications by this node -- 70 * struct publ_list - list of publications made by this node
72 * that is, all publications having scope > TIPC_NODE_SCOPE. 71 * @list: circular list of publications
72 * @list_size: number of entries in list
73 */ 73 */
74struct publ_list {
75 struct list_head list;
76 u32 size;
77};
78
79static struct publ_list publ_zone = {
80 .list = LIST_HEAD_INIT(publ_zone.list),
81 .size = 0,
82};
83
84static struct publ_list publ_cluster = {
85 .list = LIST_HEAD_INIT(publ_cluster.list),
86 .size = 0,
87};
88
89static struct publ_list publ_node = {
90 .list = LIST_HEAD_INIT(publ_node.list),
91 .size = 0,
92};
93
94static struct publ_list *publ_lists[] = {
95 NULL,
96 &publ_zone, /* publ_lists[TIPC_ZONE_SCOPE] */
97 &publ_cluster, /* publ_lists[TIPC_CLUSTER_SCOPE] */
98 &publ_node /* publ_lists[TIPC_NODE_SCOPE] */
99};
74 100
75static LIST_HEAD(publ_root);
76static u32 publ_cnt;
77 101
78/** 102/**
79 * publ_to_item - add publication info to a publication message 103 * publ_to_item - add publication info to a publication message
80 */ 104 */
81
82static void publ_to_item(struct distr_item *i, struct publication *p) 105static void publ_to_item(struct distr_item *i, struct publication *p)
83{ 106{
84 i->type = htonl(p->type); 107 i->type = htonl(p->type);
@@ -91,7 +114,6 @@ static void publ_to_item(struct distr_item *i, struct publication *p)
91/** 114/**
92 * named_prepare_buf - allocate & initialize a publication message 115 * named_prepare_buf - allocate & initialize a publication message
93 */ 116 */
94
95static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest) 117static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
96{ 118{
97 struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size); 119 struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size);
@@ -126,14 +148,16 @@ static void named_cluster_distribute(struct sk_buff *buf)
126/** 148/**
127 * tipc_named_publish - tell other nodes about a new publication by this node 149 * tipc_named_publish - tell other nodes about a new publication by this node
128 */ 150 */
129
130void tipc_named_publish(struct publication *publ) 151void tipc_named_publish(struct publication *publ)
131{ 152{
132 struct sk_buff *buf; 153 struct sk_buff *buf;
133 struct distr_item *item; 154 struct distr_item *item;
134 155
135 list_add_tail(&publ->local_list, &publ_root); 156 list_add_tail(&publ->local_list, &publ_lists[publ->scope]->list);
136 publ_cnt++; 157 publ_lists[publ->scope]->size++;
158
159 if (publ->scope == TIPC_NODE_SCOPE)
160 return;
137 161
138 buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0); 162 buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
139 if (!buf) { 163 if (!buf) {
@@ -149,14 +173,16 @@ void tipc_named_publish(struct publication *publ)
149/** 173/**
150 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node 174 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
151 */ 175 */
152
153void tipc_named_withdraw(struct publication *publ) 176void tipc_named_withdraw(struct publication *publ)
154{ 177{
155 struct sk_buff *buf; 178 struct sk_buff *buf;
156 struct distr_item *item; 179 struct distr_item *item;
157 180
158 list_del(&publ->local_list); 181 list_del(&publ->local_list);
159 publ_cnt--; 182 publ_lists[publ->scope]->size--;
183
184 if (publ->scope == TIPC_NODE_SCOPE)
185 return;
160 186
161 buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0); 187 buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
162 if (!buf) { 188 if (!buf) {
@@ -169,25 +195,51 @@ void tipc_named_withdraw(struct publication *publ)
169 named_cluster_distribute(buf); 195 named_cluster_distribute(buf);
170} 196}
171 197
198/*
199 * named_distribute - prepare name info for bulk distribution to another node
200 */
201static void named_distribute(struct list_head *message_list, u32 node,
202 struct publ_list *pls, u32 max_item_buf)
203{
204 struct publication *publ;
205 struct sk_buff *buf = NULL;
206 struct distr_item *item = NULL;
207 u32 left = 0;
208 u32 rest = pls->size * ITEM_SIZE;
209
210 list_for_each_entry(publ, &pls->list, local_list) {
211 if (!buf) {
212 left = (rest <= max_item_buf) ? rest : max_item_buf;
213 rest -= left;
214 buf = named_prepare_buf(PUBLICATION, left, node);
215 if (!buf) {
216 warn("Bulk publication failure\n");
217 return;
218 }
219 item = (struct distr_item *)msg_data(buf_msg(buf));
220 }
221 publ_to_item(item, publ);
222 item++;
223 left -= ITEM_SIZE;
224 if (!left) {
225 list_add_tail((struct list_head *)buf, message_list);
226 buf = NULL;
227 }
228 }
229}
230
172/** 231/**
173 * tipc_named_node_up - tell specified node about all publications by this node 232 * tipc_named_node_up - tell specified node about all publications by this node
174 */ 233 */
175
176void tipc_named_node_up(unsigned long nodearg) 234void tipc_named_node_up(unsigned long nodearg)
177{ 235{
178 struct tipc_node *n_ptr; 236 struct tipc_node *n_ptr;
179 struct tipc_link *l_ptr; 237 struct tipc_link *l_ptr;
180 struct publication *publ;
181 struct distr_item *item = NULL;
182 struct sk_buff *buf = NULL;
183 struct list_head message_list; 238 struct list_head message_list;
184 u32 node = (u32)nodearg; 239 u32 node = (u32)nodearg;
185 u32 left = 0;
186 u32 rest;
187 u32 max_item_buf = 0; 240 u32 max_item_buf = 0;
188 241
189 /* compute maximum amount of publication data to send per message */ 242 /* compute maximum amount of publication data to send per message */
190
191 read_lock_bh(&tipc_net_lock); 243 read_lock_bh(&tipc_net_lock);
192 n_ptr = tipc_node_find(node); 244 n_ptr = tipc_node_find(node);
193 if (n_ptr) { 245 if (n_ptr) {
@@ -203,32 +255,11 @@ void tipc_named_node_up(unsigned long nodearg)
203 return; 255 return;
204 256
205 /* create list of publication messages, then send them as a unit */ 257 /* create list of publication messages, then send them as a unit */
206
207 INIT_LIST_HEAD(&message_list); 258 INIT_LIST_HEAD(&message_list);
208 259
209 read_lock_bh(&tipc_nametbl_lock); 260 read_lock_bh(&tipc_nametbl_lock);
210 rest = publ_cnt * ITEM_SIZE; 261 named_distribute(&message_list, node, &publ_cluster, max_item_buf);
211 262 named_distribute(&message_list, node, &publ_zone, max_item_buf);
212 list_for_each_entry(publ, &publ_root, local_list) {
213 if (!buf) {
214 left = (rest <= max_item_buf) ? rest : max_item_buf;
215 rest -= left;
216 buf = named_prepare_buf(PUBLICATION, left, node);
217 if (!buf) {
218 warn("Bulk publication distribution failure\n");
219 goto exit;
220 }
221 item = (struct distr_item *)msg_data(buf_msg(buf));
222 }
223 publ_to_item(item, publ);
224 item++;
225 left -= ITEM_SIZE;
226 if (!left) {
227 list_add_tail((struct list_head *)buf, &message_list);
228 buf = NULL;
229 }
230 }
231exit:
232 read_unlock_bh(&tipc_nametbl_lock); 263 read_unlock_bh(&tipc_nametbl_lock);
233 264
234 tipc_link_send_names(&message_list, (u32)node); 265 tipc_link_send_names(&message_list, (u32)node);
@@ -240,7 +271,6 @@ exit:
240 * Invoked for each publication issued by a newly failed node. 271 * Invoked for each publication issued by a newly failed node.
241 * Removes publication structure from name table & deletes it. 272 * Removes publication structure from name table & deletes it.
242 */ 273 */
243
244static void named_purge_publ(struct publication *publ) 274static void named_purge_publ(struct publication *publ)
245{ 275{
246 struct publication *p; 276 struct publication *p;
@@ -264,7 +294,6 @@ static void named_purge_publ(struct publication *publ)
264/** 294/**
265 * tipc_named_recv - process name table update message sent by another node 295 * tipc_named_recv - process name table update message sent by another node
266 */ 296 */
267
268void tipc_named_recv(struct sk_buff *buf) 297void tipc_named_recv(struct sk_buff *buf)
269{ 298{
270 struct publication *publ; 299 struct publication *publ;
@@ -316,21 +345,22 @@ void tipc_named_recv(struct sk_buff *buf)
316} 345}
317 346
318/** 347/**
319 * tipc_named_reinit - re-initialize local publication list 348 * tipc_named_reinit - re-initialize local publications
320 * 349 *
321 * This routine is called whenever TIPC networking is enabled. 350 * This routine is called whenever TIPC networking is enabled.
322 * All existing publications by this node that have "cluster" or "zone" scope 351 * All name table entries published by this node are updated to reflect
323 * are updated to reflect the node's new network address. 352 * the node's new network address.
324 */ 353 */
325
326void tipc_named_reinit(void) 354void tipc_named_reinit(void)
327{ 355{
328 struct publication *publ; 356 struct publication *publ;
357 int scope;
329 358
330 write_lock_bh(&tipc_nametbl_lock); 359 write_lock_bh(&tipc_nametbl_lock);
331 360
332 list_for_each_entry(publ, &publ_root, local_list) 361 for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++)
333 publ->node = tipc_own_addr; 362 list_for_each_entry(publ, &publ_lists[scope]->list, local_list)
363 publ->node = tipc_own_addr;
334 364
335 write_unlock_bh(&tipc_nametbl_lock); 365 write_unlock_bh(&tipc_nametbl_lock);
336} 366}
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index c6a1ae36952e..010f24a59da2 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -56,7 +56,6 @@ static int tipc_nametbl_size = 1024; /* must be a power of 2 */
56 * publications of the associated name sequence belong to it. 56 * publications of the associated name sequence belong to it.
57 * (The cluster and node lists may be empty.) 57 * (The cluster and node lists may be empty.)
58 */ 58 */
59
60struct name_info { 59struct name_info {
61 struct list_head node_list; 60 struct list_head node_list;
62 struct list_head cluster_list; 61 struct list_head cluster_list;
@@ -72,7 +71,6 @@ struct name_info {
72 * @upper: name sequence upper bound 71 * @upper: name sequence upper bound
73 * @info: pointer to name sequence publication info 72 * @info: pointer to name sequence publication info
74 */ 73 */
75
76struct sub_seq { 74struct sub_seq {
77 u32 lower; 75 u32 lower;
78 u32 upper; 76 u32 upper;
@@ -90,7 +88,6 @@ struct sub_seq {
90 * @subscriptions: list of subscriptions for this 'type' 88 * @subscriptions: list of subscriptions for this 'type'
91 * @lock: spinlock controlling access to publication lists of all sub-sequences 89 * @lock: spinlock controlling access to publication lists of all sub-sequences
92 */ 90 */
93
94struct name_seq { 91struct name_seq {
95 u32 type; 92 u32 type;
96 struct sub_seq *sseqs; 93 struct sub_seq *sseqs;
@@ -107,7 +104,6 @@ struct name_seq {
107 * accessed via hashing on 'type'; name sequence lists are *not* sorted 104 * accessed via hashing on 'type'; name sequence lists are *not* sorted
108 * @local_publ_count: number of publications issued by this node 105 * @local_publ_count: number of publications issued by this node
109 */ 106 */
110
111struct name_table { 107struct name_table {
112 struct hlist_head *types; 108 struct hlist_head *types;
113 u32 local_publ_count; 109 u32 local_publ_count;
@@ -124,7 +120,6 @@ static int hash(int x)
124/** 120/**
125 * publ_create - create a publication structure 121 * publ_create - create a publication structure
126 */ 122 */
127
128static struct publication *publ_create(u32 type, u32 lower, u32 upper, 123static struct publication *publ_create(u32 type, u32 lower, u32 upper,
129 u32 scope, u32 node, u32 port_ref, 124 u32 scope, u32 node, u32 port_ref,
130 u32 key) 125 u32 key)
@@ -151,7 +146,6 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
151/** 146/**
152 * tipc_subseq_alloc - allocate a specified number of sub-sequence structures 147 * tipc_subseq_alloc - allocate a specified number of sub-sequence structures
153 */ 148 */
154
155static struct sub_seq *tipc_subseq_alloc(u32 cnt) 149static struct sub_seq *tipc_subseq_alloc(u32 cnt)
156{ 150{
157 struct sub_seq *sseq = kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC); 151 struct sub_seq *sseq = kcalloc(cnt, sizeof(struct sub_seq), GFP_ATOMIC);
@@ -163,7 +157,6 @@ static struct sub_seq *tipc_subseq_alloc(u32 cnt)
163 * 157 *
164 * Allocates a single sub-sequence structure and sets it to all 0's. 158 * Allocates a single sub-sequence structure and sets it to all 0's.
165 */ 159 */
166
167static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head) 160static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head)
168{ 161{
169 struct name_seq *nseq = kzalloc(sizeof(*nseq), GFP_ATOMIC); 162 struct name_seq *nseq = kzalloc(sizeof(*nseq), GFP_ATOMIC);
@@ -186,12 +179,23 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea
186 return nseq; 179 return nseq;
187} 180}
188 181
189/** 182/*
183 * nameseq_delete_empty - deletes a name sequence structure if now unused
184 */
185static void nameseq_delete_empty(struct name_seq *seq)
186{
187 if (!seq->first_free && list_empty(&seq->subscriptions)) {
188 hlist_del_init(&seq->ns_list);
189 kfree(seq->sseqs);
190 kfree(seq);
191 }
192}
193
194/*
190 * nameseq_find_subseq - find sub-sequence (if any) matching a name instance 195 * nameseq_find_subseq - find sub-sequence (if any) matching a name instance
191 * 196 *
192 * Very time-critical, so binary searches through sub-sequence array. 197 * Very time-critical, so binary searches through sub-sequence array.
193 */ 198 */
194
195static struct sub_seq *nameseq_find_subseq(struct name_seq *nseq, 199static struct sub_seq *nameseq_find_subseq(struct name_seq *nseq,
196 u32 instance) 200 u32 instance)
197{ 201{
@@ -221,7 +225,6 @@ static struct sub_seq *nameseq_find_subseq(struct name_seq *nseq,
221 * 225 *
222 * Note: Similar to binary search code for locating a sub-sequence. 226 * Note: Similar to binary search code for locating a sub-sequence.
223 */ 227 */
224
225static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance) 228static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
226{ 229{
227 struct sub_seq *sseqs = nseq->sseqs; 230 struct sub_seq *sseqs = nseq->sseqs;
@@ -242,9 +245,8 @@ static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
242} 245}
243 246
244/** 247/**
245 * tipc_nameseq_insert_publ - 248 * tipc_nameseq_insert_publ
246 */ 249 */
247
248static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, 250static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
249 u32 type, u32 lower, u32 upper, 251 u32 type, u32 lower, u32 upper,
250 u32 scope, u32 node, u32 port, u32 key) 252 u32 scope, u32 node, u32 port, u32 key)
@@ -260,7 +262,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
260 if (sseq) { 262 if (sseq) {
261 263
262 /* Lower end overlaps existing entry => need an exact match */ 264 /* Lower end overlaps existing entry => need an exact match */
263
264 if ((sseq->lower != lower) || (sseq->upper != upper)) { 265 if ((sseq->lower != lower) || (sseq->upper != upper)) {
265 warn("Cannot publish {%u,%u,%u}, overlap error\n", 266 warn("Cannot publish {%u,%u,%u}, overlap error\n",
266 type, lower, upper); 267 type, lower, upper);
@@ -280,11 +281,9 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
280 struct sub_seq *freesseq; 281 struct sub_seq *freesseq;
281 282
282 /* Find where lower end should be inserted */ 283 /* Find where lower end should be inserted */
283
284 inspos = nameseq_locate_subseq(nseq, lower); 284 inspos = nameseq_locate_subseq(nseq, lower);
285 285
286 /* Fail if upper end overlaps into an existing entry */ 286 /* Fail if upper end overlaps into an existing entry */
287
288 if ((inspos < nseq->first_free) && 287 if ((inspos < nseq->first_free) &&
289 (upper >= nseq->sseqs[inspos].lower)) { 288 (upper >= nseq->sseqs[inspos].lower)) {
290 warn("Cannot publish {%u,%u,%u}, overlap error\n", 289 warn("Cannot publish {%u,%u,%u}, overlap error\n",
@@ -293,7 +292,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
293 } 292 }
294 293
295 /* Ensure there is space for new sub-sequence */ 294 /* Ensure there is space for new sub-sequence */
296
297 if (nseq->first_free == nseq->alloc) { 295 if (nseq->first_free == nseq->alloc) {
298 struct sub_seq *sseqs = tipc_subseq_alloc(nseq->alloc * 2); 296 struct sub_seq *sseqs = tipc_subseq_alloc(nseq->alloc * 2);
299 297
@@ -321,7 +319,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
321 INIT_LIST_HEAD(&info->zone_list); 319 INIT_LIST_HEAD(&info->zone_list);
322 320
323 /* Insert new sub-sequence */ 321 /* Insert new sub-sequence */
324
325 sseq = &nseq->sseqs[inspos]; 322 sseq = &nseq->sseqs[inspos];
326 freesseq = &nseq->sseqs[nseq->first_free]; 323 freesseq = &nseq->sseqs[nseq->first_free];
327 memmove(sseq + 1, sseq, (freesseq - sseq) * sizeof(*sseq)); 324 memmove(sseq + 1, sseq, (freesseq - sseq) * sizeof(*sseq));
@@ -333,8 +330,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
333 created_subseq = 1; 330 created_subseq = 1;
334 } 331 }
335 332
336 /* Insert a publication: */ 333 /* Insert a publication */
337
338 publ = publ_create(type, lower, upper, scope, node, port, key); 334 publ = publ_create(type, lower, upper, scope, node, port, key);
339 if (!publ) 335 if (!publ)
340 return NULL; 336 return NULL;
@@ -347,14 +343,12 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
347 info->cluster_list_size++; 343 info->cluster_list_size++;
348 } 344 }
349 345
350 if (node == tipc_own_addr) { 346 if (in_own_node(node)) {
351 list_add(&publ->node_list, &info->node_list); 347 list_add(&publ->node_list, &info->node_list);
352 info->node_list_size++; 348 info->node_list_size++;
353 } 349 }
354 350
355 /* 351 /* Any subscriptions waiting for notification? */
356 * Any subscriptions waiting for notification?
357 */
358 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) { 352 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
359 tipc_subscr_report_overlap(s, 353 tipc_subscr_report_overlap(s,
360 publ->lower, 354 publ->lower,
@@ -368,7 +362,7 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
368} 362}
369 363
370/** 364/**
371 * tipc_nameseq_remove_publ - 365 * tipc_nameseq_remove_publ
372 * 366 *
373 * NOTE: There may be cases where TIPC is asked to remove a publication 367 * NOTE: There may be cases where TIPC is asked to remove a publication
374 * that is not in the name table. For example, if another node issues a 368 * that is not in the name table. For example, if another node issues a
@@ -378,7 +372,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
378 * A failed withdraw request simply returns a failure indication and lets the 372 * A failed withdraw request simply returns a failure indication and lets the
379 * caller issue any error or warning messages associated with such a problem. 373 * caller issue any error or warning messages associated with such a problem.
380 */ 374 */
381
382static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst, 375static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst,
383 u32 node, u32 ref, u32 key) 376 u32 node, u32 ref, u32 key)
384{ 377{
@@ -395,7 +388,6 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
395 info = sseq->info; 388 info = sseq->info;
396 389
397 /* Locate publication, if it exists */ 390 /* Locate publication, if it exists */
398
399 list_for_each_entry(publ, &info->zone_list, zone_list) { 391 list_for_each_entry(publ, &info->zone_list, zone_list) {
400 if ((publ->key == key) && (publ->ref == ref) && 392 if ((publ->key == key) && (publ->ref == ref) &&
401 (!publ->node || (publ->node == node))) 393 (!publ->node || (publ->node == node)))
@@ -405,26 +397,22 @@ static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 i
405 397
406found: 398found:
407 /* Remove publication from zone scope list */ 399 /* Remove publication from zone scope list */
408
409 list_del(&publ->zone_list); 400 list_del(&publ->zone_list);
410 info->zone_list_size--; 401 info->zone_list_size--;
411 402
412 /* Remove publication from cluster scope list, if present */ 403 /* Remove publication from cluster scope list, if present */
413
414 if (in_own_cluster(node)) { 404 if (in_own_cluster(node)) {
415 list_del(&publ->cluster_list); 405 list_del(&publ->cluster_list);
416 info->cluster_list_size--; 406 info->cluster_list_size--;
417 } 407 }
418 408
419 /* Remove publication from node scope list, if present */ 409 /* Remove publication from node scope list, if present */
420 410 if (in_own_node(node)) {
421 if (node == tipc_own_addr) {
422 list_del(&publ->node_list); 411 list_del(&publ->node_list);
423 info->node_list_size--; 412 info->node_list_size--;
424 } 413 }
425 414
426 /* Contract subseq list if no more publications for that subseq */ 415 /* Contract subseq list if no more publications for that subseq */
427
428 if (list_empty(&info->zone_list)) { 416 if (list_empty(&info->zone_list)) {
429 kfree(info); 417 kfree(info);
430 free = &nseq->sseqs[nseq->first_free--]; 418 free = &nseq->sseqs[nseq->first_free--];
@@ -433,7 +421,6 @@ found:
433 } 421 }
434 422
435 /* Notify any waiting subscriptions */ 423 /* Notify any waiting subscriptions */
436
437 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) { 424 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
438 tipc_subscr_report_overlap(s, 425 tipc_subscr_report_overlap(s,
439 publ->lower, 426 publ->lower,
@@ -452,7 +439,6 @@ found:
452 * the prescribed number of events if there is any sub- 439 * the prescribed number of events if there is any sub-
453 * sequence overlapping with the requested sequence 440 * sequence overlapping with the requested sequence
454 */ 441 */
455
456static void tipc_nameseq_subscribe(struct name_seq *nseq, 442static void tipc_nameseq_subscribe(struct name_seq *nseq,
457 struct tipc_subscription *s) 443 struct tipc_subscription *s)
458{ 444{
@@ -504,9 +490,10 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
504{ 490{
505 struct name_seq *seq = nametbl_find_seq(type); 491 struct name_seq *seq = nametbl_find_seq(type);
506 492
507 if (lower > upper) { 493 if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) ||
508 warn("Failed to publish illegal {%u,%u,%u}\n", 494 (lower > upper)) {
509 type, lower, upper); 495 dbg("Failed to publish illegal {%u,%u,%u} with scope %u\n",
496 type, lower, upper, scope);
510 return NULL; 497 return NULL;
511 } 498 }
512 499
@@ -529,12 +516,7 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
529 return NULL; 516 return NULL;
530 517
531 publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key); 518 publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key);
532 519 nameseq_delete_empty(seq);
533 if (!seq->first_free && list_empty(&seq->subscriptions)) {
534 hlist_del_init(&seq->ns_list);
535 kfree(seq->sseqs);
536 kfree(seq);
537 }
538 return publ; 520 return publ;
539} 521}
540 522
@@ -551,7 +533,6 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
551 * - if name translation is attempted and fails, sets 'destnode' to 0 533 * - if name translation is attempted and fails, sets 'destnode' to 0
552 * and returns 0 534 * and returns 0
553 */ 535 */
554
555u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode) 536u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
556{ 537{
557 struct sub_seq *sseq; 538 struct sub_seq *sseq;
@@ -574,7 +555,7 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
574 spin_lock_bh(&seq->lock); 555 spin_lock_bh(&seq->lock);
575 info = sseq->info; 556 info = sseq->info;
576 557
577 /* Closest-First Algorithm: */ 558 /* Closest-First Algorithm */
578 if (likely(!*destnode)) { 559 if (likely(!*destnode)) {
579 if (!list_empty(&info->node_list)) { 560 if (!list_empty(&info->node_list)) {
580 publ = list_first_entry(&info->node_list, 561 publ = list_first_entry(&info->node_list,
@@ -597,14 +578,14 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
597 } 578 }
598 } 579 }
599 580
600 /* Round-Robin Algorithm: */ 581 /* Round-Robin Algorithm */
601 else if (*destnode == tipc_own_addr) { 582 else if (*destnode == tipc_own_addr) {
602 if (list_empty(&info->node_list)) 583 if (list_empty(&info->node_list))
603 goto no_match; 584 goto no_match;
604 publ = list_first_entry(&info->node_list, struct publication, 585 publ = list_first_entry(&info->node_list, struct publication,
605 node_list); 586 node_list);
606 list_move_tail(&publ->node_list, &info->node_list); 587 list_move_tail(&publ->node_list, &info->node_list);
607 } else if (in_own_cluster(*destnode)) { 588 } else if (in_own_cluster_exact(*destnode)) {
608 if (list_empty(&info->cluster_list)) 589 if (list_empty(&info->cluster_list))
609 goto no_match; 590 goto no_match;
610 publ = list_first_entry(&info->cluster_list, struct publication, 591 publ = list_first_entry(&info->cluster_list, struct publication,
@@ -638,7 +619,6 @@ not_found:
638 * 619 *
639 * Returns non-zero if any off-node ports overlap 620 * Returns non-zero if any off-node ports overlap
640 */ 621 */
641
642int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, 622int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
643 struct tipc_port_list *dports) 623 struct tipc_port_list *dports)
644{ 624{
@@ -682,7 +662,6 @@ exit:
682/* 662/*
683 * tipc_nametbl_publish - add name publication to network name tables 663 * tipc_nametbl_publish - add name publication to network name tables
684 */ 664 */
685
686struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, 665struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
687 u32 scope, u32 port_ref, u32 key) 666 u32 scope, u32 port_ref, u32 key)
688{ 667{
@@ -695,11 +674,12 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
695 } 674 }
696 675
697 write_lock_bh(&tipc_nametbl_lock); 676 write_lock_bh(&tipc_nametbl_lock);
698 table.local_publ_count++;
699 publ = tipc_nametbl_insert_publ(type, lower, upper, scope, 677 publ = tipc_nametbl_insert_publ(type, lower, upper, scope,
700 tipc_own_addr, port_ref, key); 678 tipc_own_addr, port_ref, key);
701 if (publ && (scope != TIPC_NODE_SCOPE)) 679 if (likely(publ)) {
680 table.local_publ_count++;
702 tipc_named_publish(publ); 681 tipc_named_publish(publ);
682 }
703 write_unlock_bh(&tipc_nametbl_lock); 683 write_unlock_bh(&tipc_nametbl_lock);
704 return publ; 684 return publ;
705} 685}
@@ -707,7 +687,6 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
707/** 687/**
708 * tipc_nametbl_withdraw - withdraw name publication from network name tables 688 * tipc_nametbl_withdraw - withdraw name publication from network name tables
709 */ 689 */
710
711int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) 690int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
712{ 691{
713 struct publication *publ; 692 struct publication *publ;
@@ -716,8 +695,7 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
716 publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key); 695 publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
717 if (likely(publ)) { 696 if (likely(publ)) {
718 table.local_publ_count--; 697 table.local_publ_count--;
719 if (publ->scope != TIPC_NODE_SCOPE) 698 tipc_named_withdraw(publ);
720 tipc_named_withdraw(publ);
721 write_unlock_bh(&tipc_nametbl_lock); 699 write_unlock_bh(&tipc_nametbl_lock);
722 list_del_init(&publ->pport_list); 700 list_del_init(&publ->pport_list);
723 kfree(publ); 701 kfree(publ);
@@ -733,7 +711,6 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
733/** 711/**
734 * tipc_nametbl_subscribe - add a subscription object to the name table 712 * tipc_nametbl_subscribe - add a subscription object to the name table
735 */ 713 */
736
737void tipc_nametbl_subscribe(struct tipc_subscription *s) 714void tipc_nametbl_subscribe(struct tipc_subscription *s)
738{ 715{
739 u32 type = s->seq.type; 716 u32 type = s->seq.type;
@@ -757,7 +734,6 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s)
757/** 734/**
758 * tipc_nametbl_unsubscribe - remove a subscription object from name table 735 * tipc_nametbl_unsubscribe - remove a subscription object from name table
759 */ 736 */
760
761void tipc_nametbl_unsubscribe(struct tipc_subscription *s) 737void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
762{ 738{
763 struct name_seq *seq; 739 struct name_seq *seq;
@@ -768,11 +744,7 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
768 spin_lock_bh(&seq->lock); 744 spin_lock_bh(&seq->lock);
769 list_del_init(&s->nameseq_list); 745 list_del_init(&s->nameseq_list);
770 spin_unlock_bh(&seq->lock); 746 spin_unlock_bh(&seq->lock);
771 if ((seq->first_free == 0) && list_empty(&seq->subscriptions)) { 747 nameseq_delete_empty(seq);
772 hlist_del_init(&seq->ns_list);
773 kfree(seq->sseqs);
774 kfree(seq);
775 }
776 } 748 }
777 write_unlock_bh(&tipc_nametbl_lock); 749 write_unlock_bh(&tipc_nametbl_lock);
778} 750}
@@ -781,7 +753,6 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
781/** 753/**
782 * subseq_list: print specified sub-sequence contents into the given buffer 754 * subseq_list: print specified sub-sequence contents into the given buffer
783 */ 755 */
784
785static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth, 756static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
786 u32 index) 757 u32 index)
787{ 758{
@@ -818,7 +789,6 @@ static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
818/** 789/**
819 * nameseq_list: print specified name sequence contents into the given buffer 790 * nameseq_list: print specified name sequence contents into the given buffer
820 */ 791 */
821
822static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth, 792static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
823 u32 type, u32 lowbound, u32 upbound, u32 index) 793 u32 type, u32 lowbound, u32 upbound, u32 index)
824{ 794{
@@ -849,7 +819,6 @@ static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
849/** 819/**
850 * nametbl_header - print name table header into the given buffer 820 * nametbl_header - print name table header into the given buffer
851 */ 821 */
852
853static void nametbl_header(struct print_buf *buf, u32 depth) 822static void nametbl_header(struct print_buf *buf, u32 depth)
854{ 823{
855 const char *header[] = { 824 const char *header[] = {
@@ -871,7 +840,6 @@ static void nametbl_header(struct print_buf *buf, u32 depth)
871/** 840/**
872 * nametbl_list - print specified name table contents into the given buffer 841 * nametbl_list - print specified name table contents into the given buffer
873 */ 842 */
874
875static void nametbl_list(struct print_buf *buf, u32 depth_info, 843static void nametbl_list(struct print_buf *buf, u32 depth_info,
876 u32 type, u32 lowbound, u32 upbound) 844 u32 type, u32 lowbound, u32 upbound)
877{ 845{
@@ -970,7 +938,6 @@ void tipc_nametbl_stop(void)
970 return; 938 return;
971 939
972 /* Verify name table is empty, then release it */ 940 /* Verify name table is empty, then release it */
973
974 write_lock_bh(&tipc_nametbl_lock); 941 write_lock_bh(&tipc_nametbl_lock);
975 for (i = 0; i < tipc_nametbl_size; i++) { 942 for (i = 0; i < tipc_nametbl_size; i++) {
976 if (!hlist_empty(&table.types[i])) 943 if (!hlist_empty(&table.types[i]))
@@ -980,4 +947,3 @@ void tipc_nametbl_stop(void)
980 table.types = NULL; 947 table.types = NULL;
981 write_unlock_bh(&tipc_nametbl_lock); 948 write_unlock_bh(&tipc_nametbl_lock);
982} 949}
983
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index 207d59ebf849..71cb4dc712df 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -45,10 +45,8 @@ struct tipc_port_list;
45/* 45/*
46 * TIPC name types reserved for internal TIPC use (both current and planned) 46 * TIPC name types reserved for internal TIPC use (both current and planned)
47 */ 47 */
48
49#define TIPC_ZM_SRV 3 /* zone master service name type */ 48#define TIPC_ZM_SRV 3 /* zone master service name type */
50 49
51
52/** 50/**
53 * struct publication - info about a published (name or) name sequence 51 * struct publication - info about a published (name or) name sequence
54 * @type: name sequence type 52 * @type: name sequence type
@@ -67,7 +65,6 @@ struct tipc_port_list;
67 * 65 *
68 * Note that the node list, cluster list, and zone list are circular lists. 66 * Note that the node list, cluster list, and zone list are circular lists.
69 */ 67 */
70
71struct publication { 68struct publication {
72 u32 type; 69 u32 type;
73 u32 lower; 70 u32 lower;
diff --git a/net/tipc/net.c b/net/tipc/net.c
index d4531b07076c..7c236c89cf5e 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -175,17 +175,14 @@ int tipc_net_start(u32 addr)
175{ 175{
176 char addr_string[16]; 176 char addr_string[16];
177 177
178 tipc_subscr_stop(); 178 write_lock_bh(&tipc_net_lock);
179 tipc_cfg_stop();
180
181 tipc_own_addr = addr; 179 tipc_own_addr = addr;
182 tipc_named_reinit(); 180 tipc_named_reinit();
183 tipc_port_reinit(); 181 tipc_port_reinit();
184
185 tipc_bclink_init(); 182 tipc_bclink_init();
183 write_unlock_bh(&tipc_net_lock);
186 184
187 tipc_k_signal((Handler)tipc_subscr_start, 0); 185 tipc_cfg_reinit();
188 tipc_k_signal((Handler)tipc_cfg_init, 0);
189 186
190 info("Started in network mode\n"); 187 info("Started in network mode\n");
191 info("Own node address %s, network identity %u\n", 188 info("Own node address %s, network identity %u\n",
diff --git a/net/tipc/node.c b/net/tipc/node.c
index a34cabc2c43a..d4fd341e6e0d 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -58,7 +58,7 @@ static atomic_t tipc_num_links = ATOMIC_INIT(0);
58 * entries has been chosen so that no hash chain exceeds 8 nodes and will 58 * entries has been chosen so that no hash chain exceeds 8 nodes and will
59 * usually be much smaller (typically only a single node). 59 * usually be much smaller (typically only a single node).
60 */ 60 */
61static inline unsigned int tipc_hashfn(u32 addr) 61static unsigned int tipc_hashfn(u32 addr)
62{ 62{
63 return addr & (NODE_HTABLE_SIZE - 1); 63 return addr & (NODE_HTABLE_SIZE - 1);
64} 64}
@@ -66,13 +66,12 @@ static inline unsigned int tipc_hashfn(u32 addr)
66/* 66/*
67 * tipc_node_find - locate specified node object, if it exists 67 * tipc_node_find - locate specified node object, if it exists
68 */ 68 */
69
70struct tipc_node *tipc_node_find(u32 addr) 69struct tipc_node *tipc_node_find(u32 addr)
71{ 70{
72 struct tipc_node *node; 71 struct tipc_node *node;
73 struct hlist_node *pos; 72 struct hlist_node *pos;
74 73
75 if (unlikely(!in_own_cluster(addr))) 74 if (unlikely(!in_own_cluster_exact(addr)))
76 return NULL; 75 return NULL;
77 76
78 hlist_for_each_entry(node, pos, &node_htable[tipc_hashfn(addr)], hash) { 77 hlist_for_each_entry(node, pos, &node_htable[tipc_hashfn(addr)], hash) {
@@ -91,7 +90,6 @@ struct tipc_node *tipc_node_find(u32 addr)
91 * time. (It would be preferable to switch to holding net_lock in write mode, 90 * time. (It would be preferable to switch to holding net_lock in write mode,
92 * but this is a non-trivial change.) 91 * but this is a non-trivial change.)
93 */ 92 */
94
95struct tipc_node *tipc_node_create(u32 addr) 93struct tipc_node *tipc_node_create(u32 addr)
96{ 94{
97 struct tipc_node *n_ptr, *temp_node; 95 struct tipc_node *n_ptr, *temp_node;
@@ -142,13 +140,11 @@ void tipc_node_delete(struct tipc_node *n_ptr)
142 tipc_num_nodes--; 140 tipc_num_nodes--;
143} 141}
144 142
145
146/** 143/**
147 * tipc_node_link_up - handle addition of link 144 * tipc_node_link_up - handle addition of link
148 * 145 *
149 * Link becomes active (alone or shared) or standby, depending on its priority. 146 * Link becomes active (alone or shared) or standby, depending on its priority.
150 */ 147 */
151
152void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 148void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
153{ 149{
154 struct tipc_link **active = &n_ptr->active_links[0]; 150 struct tipc_link **active = &n_ptr->active_links[0];
@@ -181,7 +177,6 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
181/** 177/**
182 * node_select_active_links - select active link 178 * node_select_active_links - select active link
183 */ 179 */
184
185static void node_select_active_links(struct tipc_node *n_ptr) 180static void node_select_active_links(struct tipc_node *n_ptr)
186{ 181{
187 struct tipc_link **active = &n_ptr->active_links[0]; 182 struct tipc_link **active = &n_ptr->active_links[0];
@@ -209,7 +204,6 @@ static void node_select_active_links(struct tipc_node *n_ptr)
209/** 204/**
210 * tipc_node_link_down - handle loss of link 205 * tipc_node_link_down - handle loss of link
211 */ 206 */
212
213void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 207void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
214{ 208{
215 struct tipc_link **active; 209 struct tipc_link **active;
@@ -300,7 +294,6 @@ static void node_lost_contact(struct tipc_node *n_ptr)
300 tipc_addr_string_fill(addr_string, n_ptr->addr)); 294 tipc_addr_string_fill(addr_string, n_ptr->addr));
301 295
302 /* Flush broadcast link info associated with lost node */ 296 /* Flush broadcast link info associated with lost node */
303
304 if (n_ptr->bclink.supported) { 297 if (n_ptr->bclink.supported) {
305 while (n_ptr->bclink.deferred_head) { 298 while (n_ptr->bclink.deferred_head) {
306 struct sk_buff *buf = n_ptr->bclink.deferred_head; 299 struct sk_buff *buf = n_ptr->bclink.deferred_head;
@@ -334,7 +327,6 @@ static void node_lost_contact(struct tipc_node *n_ptr)
334 tipc_nodesub_notify(n_ptr); 327 tipc_nodesub_notify(n_ptr);
335 328
336 /* Prevent re-contact with node until cleanup is done */ 329 /* Prevent re-contact with node until cleanup is done */
337
338 n_ptr->block_setup = WAIT_PEER_DOWN | WAIT_NAMES_GONE; 330 n_ptr->block_setup = WAIT_PEER_DOWN | WAIT_NAMES_GONE;
339 tipc_k_signal((Handler)node_name_purge_complete, n_ptr->addr); 331 tipc_k_signal((Handler)node_name_purge_complete, n_ptr->addr);
340} 332}
@@ -362,7 +354,6 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
362 } 354 }
363 355
364 /* For now, get space for all other nodes */ 356 /* For now, get space for all other nodes */
365
366 payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes; 357 payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes;
367 if (payload_size > 32768u) { 358 if (payload_size > 32768u) {
368 read_unlock_bh(&tipc_net_lock); 359 read_unlock_bh(&tipc_net_lock);
@@ -376,7 +367,6 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
376 } 367 }
377 368
378 /* Add TLVs for all nodes in scope */ 369 /* Add TLVs for all nodes in scope */
379
380 list_for_each_entry(n_ptr, &tipc_node_list, list) { 370 list_for_each_entry(n_ptr, &tipc_node_list, list) {
381 if (!tipc_in_scope(domain, n_ptr->addr)) 371 if (!tipc_in_scope(domain, n_ptr->addr))
382 continue; 372 continue;
@@ -412,7 +402,6 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
412 read_lock_bh(&tipc_net_lock); 402 read_lock_bh(&tipc_net_lock);
413 403
414 /* Get space for all unicast links + broadcast link */ 404 /* Get space for all unicast links + broadcast link */
415
416 payload_size = TLV_SPACE(sizeof(link_info)) * 405 payload_size = TLV_SPACE(sizeof(link_info)) *
417 (atomic_read(&tipc_num_links) + 1); 406 (atomic_read(&tipc_num_links) + 1);
418 if (payload_size > 32768u) { 407 if (payload_size > 32768u) {
@@ -427,14 +416,12 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
427 } 416 }
428 417
429 /* Add TLV for broadcast link */ 418 /* Add TLV for broadcast link */
430
431 link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr)); 419 link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr));
432 link_info.up = htonl(1); 420 link_info.up = htonl(1);
433 strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME); 421 strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME);
434 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); 422 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
435 423
436 /* Add TLVs for any other links in scope */ 424 /* Add TLVs for any other links in scope */
437
438 list_for_each_entry(n_ptr, &tipc_node_list, list) { 425 list_for_each_entry(n_ptr, &tipc_node_list, list) {
439 u32 i; 426 u32 i;
440 427
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 72561c971d67..cfcaf4d6e480 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -48,7 +48,6 @@
48#define INVALID_NODE_SIG 0x10000 48#define INVALID_NODE_SIG 0x10000
49 49
50/* Flags used to block (re)establishment of contact with a neighboring node */ 50/* Flags used to block (re)establishment of contact with a neighboring node */
51
52#define WAIT_PEER_DOWN 0x0001 /* wait to see that peer's links are down */ 51#define WAIT_PEER_DOWN 0x0001 /* wait to see that peer's links are down */
53#define WAIT_NAMES_GONE 0x0002 /* wait for peer's publications to be purged */ 52#define WAIT_NAMES_GONE 0x0002 /* wait for peer's publications to be purged */
54#define WAIT_NODE_DOWN 0x0004 /* wait until peer node is declared down */ 53#define WAIT_NODE_DOWN 0x0004 /* wait until peer node is declared down */
@@ -79,7 +78,6 @@
79 * @deferred_tail: newest OOS b'cast message received from node 78 * @deferred_tail: newest OOS b'cast message received from node
80 * @defragm: list of partially reassembled b'cast message fragments from node 79 * @defragm: list of partially reassembled b'cast message fragments from node
81 */ 80 */
82
83struct tipc_node { 81struct tipc_node {
84 u32 addr; 82 u32 addr;
85 spinlock_t lock; 83 spinlock_t lock;
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
index c3c2815ae630..7a27344108fe 100644
--- a/net/tipc/node_subscr.c
+++ b/net/tipc/node_subscr.c
@@ -41,11 +41,10 @@
41/** 41/**
42 * tipc_nodesub_subscribe - create "node down" subscription for specified node 42 * tipc_nodesub_subscribe - create "node down" subscription for specified node
43 */ 43 */
44
45void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr, 44void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
46 void *usr_handle, net_ev_handler handle_down) 45 void *usr_handle, net_ev_handler handle_down)
47{ 46{
48 if (addr == tipc_own_addr) { 47 if (in_own_node(addr)) {
49 node_sub->node = NULL; 48 node_sub->node = NULL;
50 return; 49 return;
51 } 50 }
@@ -66,7 +65,6 @@ void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
66/** 65/**
67 * tipc_nodesub_unsubscribe - cancel "node down" subscription (if any) 66 * tipc_nodesub_unsubscribe - cancel "node down" subscription (if any)
68 */ 67 */
69
70void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub) 68void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub)
71{ 69{
72 if (!node_sub->node) 70 if (!node_sub->node)
@@ -82,7 +80,6 @@ void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub)
82 * 80 *
83 * Note: node is locked by caller 81 * Note: node is locked by caller
84 */ 82 */
85
86void tipc_nodesub_notify(struct tipc_node *node) 83void tipc_nodesub_notify(struct tipc_node *node)
87{ 84{
88 struct tipc_node_subscr *ns; 85 struct tipc_node_subscr *ns;
diff --git a/net/tipc/node_subscr.h b/net/tipc/node_subscr.h
index 4bc2ca0867a1..c95d20727ded 100644
--- a/net/tipc/node_subscr.h
+++ b/net/tipc/node_subscr.h
@@ -48,7 +48,6 @@ typedef void (*net_ev_handler) (void *usr_handle);
48 * @usr_handle: argument to pass to routine when node fails 48 * @usr_handle: argument to pass to routine when node fails
49 * @nodesub_list: adjacent entries in list of subscriptions for the node 49 * @nodesub_list: adjacent entries in list of subscriptions for the node
50 */ 50 */
51
52struct tipc_node_subscr { 51struct tipc_node_subscr {
53 struct tipc_node *node; 52 struct tipc_node *node;
54 net_ev_handler handle_node_down; 53 net_ev_handler handle_node_down;
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 94d2904cce66..2ad37a4db376 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -69,10 +69,30 @@ static u32 port_peerport(struct tipc_port *p_ptr)
69 return msg_destport(&p_ptr->phdr); 69 return msg_destport(&p_ptr->phdr);
70} 70}
71 71
72/*
73 * tipc_port_peer_msg - verify message was sent by connected port's peer
74 *
75 * Handles cases where the node's network address has changed from
76 * the default of <0.0.0> to its configured setting.
77 */
78int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg)
79{
80 u32 peernode;
81 u32 orignode;
82
83 if (msg_origport(msg) != port_peerport(p_ptr))
84 return 0;
85
86 orignode = msg_orignode(msg);
87 peernode = port_peernode(p_ptr);
88 return (orignode == peernode) ||
89 (!orignode && (peernode == tipc_own_addr)) ||
90 (!peernode && (orignode == tipc_own_addr));
91}
92
72/** 93/**
73 * tipc_multicast - send a multicast message to local and remote destinations 94 * tipc_multicast - send a multicast message to local and remote destinations
74 */ 95 */
75
76int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, 96int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
77 u32 num_sect, struct iovec const *msg_sect, 97 u32 num_sect, struct iovec const *msg_sect,
78 unsigned int total_len) 98 unsigned int total_len)
@@ -89,7 +109,6 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
89 return -EINVAL; 109 return -EINVAL;
90 110
91 /* Create multicast message */ 111 /* Create multicast message */
92
93 hdr = &oport->phdr; 112 hdr = &oport->phdr;
94 msg_set_type(hdr, TIPC_MCAST_MSG); 113 msg_set_type(hdr, TIPC_MCAST_MSG);
95 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE); 114 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
@@ -105,12 +124,10 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
105 return res; 124 return res;
106 125
107 /* Figure out where to send multicast message */ 126 /* Figure out where to send multicast message */
108
109 ext_targets = tipc_nametbl_mc_translate(seq->type, seq->lower, seq->upper, 127 ext_targets = tipc_nametbl_mc_translate(seq->type, seq->lower, seq->upper,
110 TIPC_NODE_SCOPE, &dports); 128 TIPC_NODE_SCOPE, &dports);
111 129
112 /* Send message to destinations (duplicate it only if necessary) */ 130 /* Send message to destinations (duplicate it only if necessary) */
113
114 if (ext_targets) { 131 if (ext_targets) {
115 if (dports.count != 0) { 132 if (dports.count != 0) {
116 ibuf = skb_copy(buf, GFP_ATOMIC); 133 ibuf = skb_copy(buf, GFP_ATOMIC);
@@ -141,7 +158,6 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
141 * 158 *
142 * If there is no port list, perform a lookup to create one 159 * If there is no port list, perform a lookup to create one
143 */ 160 */
144
145void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp) 161void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
146{ 162{
147 struct tipc_msg *msg; 163 struct tipc_msg *msg;
@@ -152,7 +168,6 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
152 msg = buf_msg(buf); 168 msg = buf_msg(buf);
153 169
154 /* Create destination port list, if one wasn't supplied */ 170 /* Create destination port list, if one wasn't supplied */
155
156 if (dp == NULL) { 171 if (dp == NULL) {
157 tipc_nametbl_mc_translate(msg_nametype(msg), 172 tipc_nametbl_mc_translate(msg_nametype(msg),
158 msg_namelower(msg), 173 msg_namelower(msg),
@@ -163,7 +178,6 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
163 } 178 }
164 179
165 /* Deliver a copy of message to each destination port */ 180 /* Deliver a copy of message to each destination port */
166
167 if (dp->count != 0) { 181 if (dp->count != 0) {
168 msg_set_destnode(msg, tipc_own_addr); 182 msg_set_destnode(msg, tipc_own_addr);
169 if (dp->count == 1) { 183 if (dp->count == 1) {
@@ -196,7 +210,6 @@ exit:
196 * 210 *
197 * Returns pointer to (locked) TIPC port, or NULL if unable to create it 211 * Returns pointer to (locked) TIPC port, or NULL if unable to create it
198 */ 212 */
199
200struct tipc_port *tipc_createport_raw(void *usr_handle, 213struct tipc_port *tipc_createport_raw(void *usr_handle,
201 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *), 214 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
202 void (*wakeup)(struct tipc_port *), 215 void (*wakeup)(struct tipc_port *),
@@ -221,18 +234,24 @@ struct tipc_port *tipc_createport_raw(void *usr_handle,
221 p_ptr->usr_handle = usr_handle; 234 p_ptr->usr_handle = usr_handle;
222 p_ptr->max_pkt = MAX_PKT_DEFAULT; 235 p_ptr->max_pkt = MAX_PKT_DEFAULT;
223 p_ptr->ref = ref; 236 p_ptr->ref = ref;
224 msg = &p_ptr->phdr;
225 tipc_msg_init(msg, importance, TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
226 msg_set_origport(msg, ref);
227 INIT_LIST_HEAD(&p_ptr->wait_list); 237 INIT_LIST_HEAD(&p_ptr->wait_list);
228 INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list); 238 INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
229 p_ptr->dispatcher = dispatcher; 239 p_ptr->dispatcher = dispatcher;
230 p_ptr->wakeup = wakeup; 240 p_ptr->wakeup = wakeup;
231 p_ptr->user_port = NULL; 241 p_ptr->user_port = NULL;
232 k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref); 242 k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref);
233 spin_lock_bh(&tipc_port_list_lock);
234 INIT_LIST_HEAD(&p_ptr->publications); 243 INIT_LIST_HEAD(&p_ptr->publications);
235 INIT_LIST_HEAD(&p_ptr->port_list); 244 INIT_LIST_HEAD(&p_ptr->port_list);
245
246 /*
247 * Must hold port list lock while initializing message header template
248 * to ensure a change to node's own network address doesn't result
249 * in template containing out-dated network address information
250 */
251 spin_lock_bh(&tipc_port_list_lock);
252 msg = &p_ptr->phdr;
253 tipc_msg_init(msg, importance, TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
254 msg_set_origport(msg, ref);
236 list_add_tail(&p_ptr->port_list, &ports); 255 list_add_tail(&p_ptr->port_list, &ports);
237 spin_unlock_bh(&tipc_port_list_lock); 256 spin_unlock_bh(&tipc_port_list_lock);
238 return p_ptr; 257 return p_ptr;
@@ -361,7 +380,6 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
361 u32 rmsg_sz; 380 u32 rmsg_sz;
362 381
363 /* discard rejected message if it shouldn't be returned to sender */ 382 /* discard rejected message if it shouldn't be returned to sender */
364
365 if (WARN(!msg_isdata(msg), 383 if (WARN(!msg_isdata(msg),
366 "attempt to reject message with user=%u", msg_user(msg))) { 384 "attempt to reject message with user=%u", msg_user(msg))) {
367 dump_stack(); 385 dump_stack();
@@ -374,7 +392,6 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
374 * construct returned message by copying rejected message header and 392 * construct returned message by copying rejected message header and
375 * data (or subset), then updating header fields that need adjusting 393 * data (or subset), then updating header fields that need adjusting
376 */ 394 */
377
378 hdr_sz = msg_hdr_sz(msg); 395 hdr_sz = msg_hdr_sz(msg);
379 rmsg_sz = hdr_sz + min_t(u32, data_sz, MAX_REJECT_SIZE); 396 rmsg_sz = hdr_sz + min_t(u32, data_sz, MAX_REJECT_SIZE);
380 397
@@ -413,9 +430,8 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
413 } 430 }
414 431
415 /* send returned message & dispose of rejected message */ 432 /* send returned message & dispose of rejected message */
416
417 src_node = msg_prevnode(msg); 433 src_node = msg_prevnode(msg);
418 if (src_node == tipc_own_addr) 434 if (in_own_node(src_node))
419 tipc_port_recv_msg(rbuf); 435 tipc_port_recv_msg(rbuf);
420 else 436 else
421 tipc_link_send(rbuf, src_node, msg_link_selector(rmsg)); 437 tipc_link_send(rbuf, src_node, msg_link_selector(rmsg));
@@ -519,25 +535,20 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
519 struct tipc_msg *msg = buf_msg(buf); 535 struct tipc_msg *msg = buf_msg(buf);
520 struct tipc_port *p_ptr; 536 struct tipc_port *p_ptr;
521 struct sk_buff *r_buf = NULL; 537 struct sk_buff *r_buf = NULL;
522 u32 orignode = msg_orignode(msg);
523 u32 origport = msg_origport(msg);
524 u32 destport = msg_destport(msg); 538 u32 destport = msg_destport(msg);
525 int wakeable; 539 int wakeable;
526 540
527 /* Validate connection */ 541 /* Validate connection */
528
529 p_ptr = tipc_port_lock(destport); 542 p_ptr = tipc_port_lock(destport);
530 if (!p_ptr || !p_ptr->connected || 543 if (!p_ptr || !p_ptr->connected || !tipc_port_peer_msg(p_ptr, msg)) {
531 (port_peernode(p_ptr) != orignode) ||
532 (port_peerport(p_ptr) != origport)) {
533 r_buf = tipc_buf_acquire(BASIC_H_SIZE); 544 r_buf = tipc_buf_acquire(BASIC_H_SIZE);
534 if (r_buf) { 545 if (r_buf) {
535 msg = buf_msg(r_buf); 546 msg = buf_msg(r_buf);
536 tipc_msg_init(msg, TIPC_HIGH_IMPORTANCE, TIPC_CONN_MSG, 547 tipc_msg_init(msg, TIPC_HIGH_IMPORTANCE, TIPC_CONN_MSG,
537 BASIC_H_SIZE, orignode); 548 BASIC_H_SIZE, msg_orignode(msg));
538 msg_set_errcode(msg, TIPC_ERR_NO_PORT); 549 msg_set_errcode(msg, TIPC_ERR_NO_PORT);
539 msg_set_origport(msg, destport); 550 msg_set_origport(msg, destport);
540 msg_set_destport(msg, origport); 551 msg_set_destport(msg, msg_origport(msg));
541 } 552 }
542 if (p_ptr) 553 if (p_ptr)
543 tipc_port_unlock(p_ptr); 554 tipc_port_unlock(p_ptr);
@@ -545,7 +556,6 @@ void tipc_port_recv_proto_msg(struct sk_buff *buf)
545 } 556 }
546 557
547 /* Process protocol message sent by peer */ 558 /* Process protocol message sent by peer */
548
549 switch (msg_type(msg)) { 559 switch (msg_type(msg)) {
550 case CONN_ACK: 560 case CONN_ACK:
551 wakeable = tipc_port_congested(p_ptr) && p_ptr->congested && 561 wakeable = tipc_port_congested(p_ptr) && p_ptr->congested &&
@@ -646,8 +656,6 @@ void tipc_port_reinit(void)
646 spin_lock_bh(&tipc_port_list_lock); 656 spin_lock_bh(&tipc_port_list_lock);
647 list_for_each_entry(p_ptr, &ports, port_list) { 657 list_for_each_entry(p_ptr, &ports, port_list) {
648 msg = &p_ptr->phdr; 658 msg = &p_ptr->phdr;
649 if (msg_orignode(msg) == tipc_own_addr)
650 break;
651 msg_set_prevnode(msg, tipc_own_addr); 659 msg_set_prevnode(msg, tipc_own_addr);
652 msg_set_orignode(msg, tipc_own_addr); 660 msg_set_orignode(msg, tipc_own_addr);
653 } 661 }
@@ -659,7 +667,6 @@ void tipc_port_reinit(void)
659 * port_dispatcher_sigh(): Signal handler for messages destinated 667 * port_dispatcher_sigh(): Signal handler for messages destinated
660 * to the tipc_port interface. 668 * to the tipc_port interface.
661 */ 669 */
662
663static void port_dispatcher_sigh(void *dummy) 670static void port_dispatcher_sigh(void *dummy)
664{ 671{
665 struct sk_buff *buf; 672 struct sk_buff *buf;
@@ -676,6 +683,7 @@ static void port_dispatcher_sigh(void *dummy)
676 struct tipc_name_seq dseq; 683 struct tipc_name_seq dseq;
677 void *usr_handle; 684 void *usr_handle;
678 int connected; 685 int connected;
686 int peer_invalid;
679 int published; 687 int published;
680 u32 message_type; 688 u32 message_type;
681 689
@@ -696,6 +704,7 @@ static void port_dispatcher_sigh(void *dummy)
696 up_ptr = p_ptr->user_port; 704 up_ptr = p_ptr->user_port;
697 usr_handle = up_ptr->usr_handle; 705 usr_handle = up_ptr->usr_handle;
698 connected = p_ptr->connected; 706 connected = p_ptr->connected;
707 peer_invalid = connected && !tipc_port_peer_msg(p_ptr, msg);
699 published = p_ptr->published; 708 published = p_ptr->published;
700 709
701 if (unlikely(msg_errcode(msg))) 710 if (unlikely(msg_errcode(msg)))
@@ -705,8 +714,6 @@ static void port_dispatcher_sigh(void *dummy)
705 714
706 case TIPC_CONN_MSG:{ 715 case TIPC_CONN_MSG:{
707 tipc_conn_msg_event cb = up_ptr->conn_msg_cb; 716 tipc_conn_msg_event cb = up_ptr->conn_msg_cb;
708 u32 peer_port = port_peerport(p_ptr);
709 u32 peer_node = port_peernode(p_ptr);
710 u32 dsz; 717 u32 dsz;
711 718
712 tipc_port_unlock(p_ptr); 719 tipc_port_unlock(p_ptr);
@@ -715,8 +722,7 @@ static void port_dispatcher_sigh(void *dummy)
715 if (unlikely(!connected)) { 722 if (unlikely(!connected)) {
716 if (tipc_connect2port(dref, &orig)) 723 if (tipc_connect2port(dref, &orig))
717 goto reject; 724 goto reject;
718 } else if ((msg_origport(msg) != peer_port) || 725 } else if (peer_invalid)
719 (msg_orignode(msg) != peer_node))
720 goto reject; 726 goto reject;
721 dsz = msg_data_sz(msg); 727 dsz = msg_data_sz(msg);
722 if (unlikely(dsz && 728 if (unlikely(dsz &&
@@ -768,14 +774,9 @@ err:
768 case TIPC_CONN_MSG:{ 774 case TIPC_CONN_MSG:{
769 tipc_conn_shutdown_event cb = 775 tipc_conn_shutdown_event cb =
770 up_ptr->conn_err_cb; 776 up_ptr->conn_err_cb;
771 u32 peer_port = port_peerport(p_ptr);
772 u32 peer_node = port_peernode(p_ptr);
773 777
774 tipc_port_unlock(p_ptr); 778 tipc_port_unlock(p_ptr);
775 if (!cb || !connected) 779 if (!cb || !connected || peer_invalid)
776 break;
777 if ((msg_origport(msg) != peer_port) ||
778 (msg_orignode(msg) != peer_node))
779 break; 780 break;
780 tipc_disconnect(dref); 781 tipc_disconnect(dref);
781 skb_pull(buf, msg_hdr_sz(msg)); 782 skb_pull(buf, msg_hdr_sz(msg));
@@ -826,7 +827,6 @@ reject:
826 * port_dispatcher(): Dispatcher for messages destinated 827 * port_dispatcher(): Dispatcher for messages destinated
827 * to the tipc_port interface. Called with port locked. 828 * to the tipc_port interface. Called with port locked.
828 */ 829 */
829
830static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf) 830static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
831{ 831{
832 buf->next = NULL; 832 buf->next = NULL;
@@ -843,10 +843,8 @@ static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
843} 843}
844 844
845/* 845/*
846 * Wake up port after congestion: Called with port locked, 846 * Wake up port after congestion: Called with port locked
847 *
848 */ 847 */
849
850static void port_wakeup_sh(unsigned long ref) 848static void port_wakeup_sh(unsigned long ref)
851{ 849{
852 struct tipc_port *p_ptr; 850 struct tipc_port *p_ptr;
@@ -892,7 +890,6 @@ void tipc_acknowledge(u32 ref, u32 ack)
892/* 890/*
893 * tipc_createport(): user level call. 891 * tipc_createport(): user level call.
894 */ 892 */
895
896int tipc_createport(void *usr_handle, 893int tipc_createport(void *usr_handle,
897 unsigned int importance, 894 unsigned int importance,
898 tipc_msg_err_event error_cb, 895 tipc_msg_err_event error_cb,
@@ -901,7 +898,7 @@ int tipc_createport(void *usr_handle,
901 tipc_msg_event msg_cb, 898 tipc_msg_event msg_cb,
902 tipc_named_msg_event named_msg_cb, 899 tipc_named_msg_event named_msg_cb,
903 tipc_conn_msg_event conn_msg_cb, 900 tipc_conn_msg_event conn_msg_cb,
904 tipc_continue_event continue_event_cb,/* May be zero */ 901 tipc_continue_event continue_event_cb, /* May be zero */
905 u32 *portref) 902 u32 *portref)
906{ 903{
907 struct user_port *up_ptr; 904 struct user_port *up_ptr;
@@ -975,10 +972,6 @@ int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
975 972
976 if (p_ptr->connected) 973 if (p_ptr->connected)
977 goto exit; 974 goto exit;
978 if (seq->lower > seq->upper)
979 goto exit;
980 if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE))
981 goto exit;
982 key = ref + p_ptr->pub_count + 1; 975 key = ref + p_ptr->pub_count + 1;
983 if (key == ref) { 976 if (key == ref) {
984 res = -EADDRINUSE; 977 res = -EADDRINUSE;
@@ -1078,7 +1071,6 @@ exit:
1078 * 1071 *
1079 * Port must be locked. 1072 * Port must be locked.
1080 */ 1073 */
1081
1082int tipc_disconnect_port(struct tipc_port *tp_ptr) 1074int tipc_disconnect_port(struct tipc_port *tp_ptr)
1083{ 1075{
1084 int res; 1076 int res;
@@ -1099,7 +1091,6 @@ int tipc_disconnect_port(struct tipc_port *tp_ptr)
1099 * tipc_disconnect(): Disconnect port form peer. 1091 * tipc_disconnect(): Disconnect port form peer.
1100 * This is a node local operation. 1092 * This is a node local operation.
1101 */ 1093 */
1102
1103int tipc_disconnect(u32 ref) 1094int tipc_disconnect(u32 ref)
1104{ 1095{
1105 struct tipc_port *p_ptr; 1096 struct tipc_port *p_ptr;
@@ -1134,7 +1125,6 @@ int tipc_shutdown(u32 ref)
1134/** 1125/**
1135 * tipc_port_recv_msg - receive message from lower layer and deliver to port user 1126 * tipc_port_recv_msg - receive message from lower layer and deliver to port user
1136 */ 1127 */
1137
1138int tipc_port_recv_msg(struct sk_buff *buf) 1128int tipc_port_recv_msg(struct sk_buff *buf)
1139{ 1129{
1140 struct tipc_port *p_ptr; 1130 struct tipc_port *p_ptr;
@@ -1152,17 +1142,6 @@ int tipc_port_recv_msg(struct sk_buff *buf)
1152 /* validate destination & pass to port, otherwise reject message */ 1142 /* validate destination & pass to port, otherwise reject message */
1153 p_ptr = tipc_port_lock(destport); 1143 p_ptr = tipc_port_lock(destport);
1154 if (likely(p_ptr)) { 1144 if (likely(p_ptr)) {
1155 if (likely(p_ptr->connected)) {
1156 if ((unlikely(msg_origport(msg) !=
1157 tipc_peer_port(p_ptr))) ||
1158 (unlikely(msg_orignode(msg) !=
1159 tipc_peer_node(p_ptr))) ||
1160 (unlikely(!msg_connected(msg)))) {
1161 err = TIPC_ERR_NO_PORT;
1162 tipc_port_unlock(p_ptr);
1163 goto reject;
1164 }
1165 }
1166 err = p_ptr->dispatcher(p_ptr, buf); 1145 err = p_ptr->dispatcher(p_ptr, buf);
1167 tipc_port_unlock(p_ptr); 1146 tipc_port_unlock(p_ptr);
1168 if (likely(!err)) 1147 if (likely(!err))
@@ -1170,7 +1149,7 @@ int tipc_port_recv_msg(struct sk_buff *buf)
1170 } else { 1149 } else {
1171 err = TIPC_ERR_NO_PORT; 1150 err = TIPC_ERR_NO_PORT;
1172 } 1151 }
1173reject: 1152
1174 return tipc_reject_msg(buf, err); 1153 return tipc_reject_msg(buf, err);
1175} 1154}
1176 1155
@@ -1178,7 +1157,6 @@ reject:
1178 * tipc_port_recv_sections(): Concatenate and deliver sectioned 1157 * tipc_port_recv_sections(): Concatenate and deliver sectioned
1179 * message for this node. 1158 * message for this node.
1180 */ 1159 */
1181
1182static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_sect, 1160static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_sect,
1183 struct iovec const *msg_sect, 1161 struct iovec const *msg_sect,
1184 unsigned int total_len) 1162 unsigned int total_len)
@@ -1196,7 +1174,6 @@ static int tipc_port_recv_sections(struct tipc_port *sender, unsigned int num_se
1196/** 1174/**
1197 * tipc_send - send message sections on connection 1175 * tipc_send - send message sections on connection
1198 */ 1176 */
1199
1200int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect, 1177int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
1201 unsigned int total_len) 1178 unsigned int total_len)
1202{ 1179{
@@ -1211,7 +1188,7 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
1211 p_ptr->congested = 1; 1188 p_ptr->congested = 1;
1212 if (!tipc_port_congested(p_ptr)) { 1189 if (!tipc_port_congested(p_ptr)) {
1213 destnode = port_peernode(p_ptr); 1190 destnode = port_peernode(p_ptr);
1214 if (likely(destnode != tipc_own_addr)) 1191 if (likely(!in_own_node(destnode)))
1215 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, 1192 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
1216 total_len, destnode); 1193 total_len, destnode);
1217 else 1194 else
@@ -1235,7 +1212,6 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect,
1235/** 1212/**
1236 * tipc_send2name - send message sections to port name 1213 * tipc_send2name - send message sections to port name
1237 */ 1214 */
1238
1239int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain, 1215int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
1240 unsigned int num_sect, struct iovec const *msg_sect, 1216 unsigned int num_sect, struct iovec const *msg_sect,
1241 unsigned int total_len) 1217 unsigned int total_len)
@@ -1261,13 +1237,17 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
1261 msg_set_destport(msg, destport); 1237 msg_set_destport(msg, destport);
1262 1238
1263 if (likely(destport || destnode)) { 1239 if (likely(destport || destnode)) {
1264 if (likely(destnode == tipc_own_addr)) 1240 if (likely(in_own_node(destnode)))
1265 res = tipc_port_recv_sections(p_ptr, num_sect, 1241 res = tipc_port_recv_sections(p_ptr, num_sect,
1266 msg_sect, total_len); 1242 msg_sect, total_len);
1267 else 1243 else if (tipc_own_addr)
1268 res = tipc_link_send_sections_fast(p_ptr, msg_sect, 1244 res = tipc_link_send_sections_fast(p_ptr, msg_sect,
1269 num_sect, total_len, 1245 num_sect, total_len,
1270 destnode); 1246 destnode);
1247 else
1248 res = tipc_port_reject_sections(p_ptr, msg, msg_sect,
1249 num_sect, total_len,
1250 TIPC_ERR_NO_NODE);
1271 if (likely(res != -ELINKCONG)) { 1251 if (likely(res != -ELINKCONG)) {
1272 if (res > 0) 1252 if (res > 0)
1273 p_ptr->sent++; 1253 p_ptr->sent++;
@@ -1285,7 +1265,6 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
1285/** 1265/**
1286 * tipc_send2port - send message sections to port identity 1266 * tipc_send2port - send message sections to port identity
1287 */ 1267 */
1288
1289int tipc_send2port(u32 ref, struct tipc_portid const *dest, 1268int tipc_send2port(u32 ref, struct tipc_portid const *dest,
1290 unsigned int num_sect, struct iovec const *msg_sect, 1269 unsigned int num_sect, struct iovec const *msg_sect,
1291 unsigned int total_len) 1270 unsigned int total_len)
@@ -1305,12 +1284,15 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest,
1305 msg_set_destport(msg, dest->ref); 1284 msg_set_destport(msg, dest->ref);
1306 msg_set_hdr_sz(msg, BASIC_H_SIZE); 1285 msg_set_hdr_sz(msg, BASIC_H_SIZE);
1307 1286
1308 if (dest->node == tipc_own_addr) 1287 if (in_own_node(dest->node))
1309 res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect, 1288 res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect,
1310 total_len); 1289 total_len);
1311 else 1290 else if (tipc_own_addr)
1312 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, 1291 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
1313 total_len, dest->node); 1292 total_len, dest->node);
1293 else
1294 res = tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
1295 total_len, TIPC_ERR_NO_NODE);
1314 if (likely(res != -ELINKCONG)) { 1296 if (likely(res != -ELINKCONG)) {
1315 if (res > 0) 1297 if (res > 0)
1316 p_ptr->sent++; 1298 p_ptr->sent++;
@@ -1325,7 +1307,6 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest,
1325/** 1307/**
1326 * tipc_send_buf2port - send message buffer to port identity 1308 * tipc_send_buf2port - send message buffer to port identity
1327 */ 1309 */
1328
1329int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest, 1310int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
1330 struct sk_buff *buf, unsigned int dsz) 1311 struct sk_buff *buf, unsigned int dsz)
1331{ 1312{
@@ -1349,7 +1330,7 @@ int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
1349 skb_push(buf, BASIC_H_SIZE); 1330 skb_push(buf, BASIC_H_SIZE);
1350 skb_copy_to_linear_data(buf, msg, BASIC_H_SIZE); 1331 skb_copy_to_linear_data(buf, msg, BASIC_H_SIZE);
1351 1332
1352 if (dest->node == tipc_own_addr) 1333 if (in_own_node(dest->node))
1353 res = tipc_port_recv_msg(buf); 1334 res = tipc_port_recv_msg(buf);
1354 else 1335 else
1355 res = tipc_send_buf_fast(buf, dest->node); 1336 res = tipc_send_buf_fast(buf, dest->node);
@@ -1362,4 +1343,3 @@ int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
1362 return dsz; 1343 return dsz;
1363 return -ELINKCONG; 1344 return -ELINKCONG;
1364} 1345}
1365
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 9b88531e5a61..98cbec9c4532 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -81,7 +81,6 @@ typedef void (*tipc_continue_event) (void *usr_handle, u32 portref);
81 * @ref: object reference to associated TIPC port 81 * @ref: object reference to associated TIPC port
82 * <various callback routines> 82 * <various callback routines>
83 */ 83 */
84
85struct user_port { 84struct user_port {
86 void *usr_handle; 85 void *usr_handle;
87 u32 ref; 86 u32 ref;
@@ -201,6 +200,7 @@ int tipc_shutdown(u32 ref);
201 * The following routines require that the port be locked on entry 200 * The following routines require that the port be locked on entry
202 */ 201 */
203int tipc_disconnect_port(struct tipc_port *tp_ptr); 202int tipc_disconnect_port(struct tipc_port *tp_ptr);
203int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg);
204 204
205/* 205/*
206 * TIPC messaging routines 206 * TIPC messaging routines
@@ -235,7 +235,6 @@ void tipc_port_reinit(void);
235/** 235/**
236 * tipc_port_lock - lock port instance referred to and return its pointer 236 * tipc_port_lock - lock port instance referred to and return its pointer
237 */ 237 */
238
239static inline struct tipc_port *tipc_port_lock(u32 ref) 238static inline struct tipc_port *tipc_port_lock(u32 ref)
240{ 239{
241 return (struct tipc_port *)tipc_ref_lock(ref); 240 return (struct tipc_port *)tipc_ref_lock(ref);
@@ -246,7 +245,6 @@ static inline struct tipc_port *tipc_port_lock(u32 ref)
246 * 245 *
247 * Can use pointer instead of tipc_ref_unlock() since port is already locked. 246 * Can use pointer instead of tipc_ref_unlock() since port is already locked.
248 */ 247 */
249
250static inline void tipc_port_unlock(struct tipc_port *p_ptr) 248static inline void tipc_port_unlock(struct tipc_port *p_ptr)
251{ 249{
252 spin_unlock_bh(p_ptr->lock); 250 spin_unlock_bh(p_ptr->lock);
@@ -257,16 +255,6 @@ static inline struct tipc_port *tipc_port_deref(u32 ref)
257 return (struct tipc_port *)tipc_ref_deref(ref); 255 return (struct tipc_port *)tipc_ref_deref(ref);
258} 256}
259 257
260static inline u32 tipc_peer_port(struct tipc_port *p_ptr)
261{
262 return msg_destport(&p_ptr->phdr);
263}
264
265static inline u32 tipc_peer_node(struct tipc_port *p_ptr)
266{
267 return msg_destnode(&p_ptr->phdr);
268}
269
270static inline int tipc_port_congested(struct tipc_port *p_ptr) 258static inline int tipc_port_congested(struct tipc_port *p_ptr)
271{ 259{
272 return (p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2); 260 return (p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2);
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 9e37b7812c3c..5cada0e38e03 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -43,7 +43,6 @@
43 * @lock: spinlock controlling access to object 43 * @lock: spinlock controlling access to object
44 * @ref: reference value for object (combines instance & array index info) 44 * @ref: reference value for object (combines instance & array index info)
45 */ 45 */
46
47struct reference { 46struct reference {
48 void *object; 47 void *object;
49 spinlock_t lock; 48 spinlock_t lock;
@@ -60,7 +59,6 @@ struct reference {
60 * @index_mask: bitmask for array index portion of reference values 59 * @index_mask: bitmask for array index portion of reference values
61 * @start_mask: initial value for instance value portion of reference values 60 * @start_mask: initial value for instance value portion of reference values
62 */ 61 */
63
64struct ref_table { 62struct ref_table {
65 struct reference *entries; 63 struct reference *entries;
66 u32 capacity; 64 u32 capacity;
@@ -96,7 +94,6 @@ static DEFINE_RWLOCK(ref_table_lock);
96/** 94/**
97 * tipc_ref_table_init - create reference table for objects 95 * tipc_ref_table_init - create reference table for objects
98 */ 96 */
99
100int tipc_ref_table_init(u32 requested_size, u32 start) 97int tipc_ref_table_init(u32 requested_size, u32 start)
101{ 98{
102 struct reference *table; 99 struct reference *table;
@@ -109,7 +106,6 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
109 /* do nothing */ ; 106 /* do nothing */ ;
110 107
111 /* allocate table & mark all entries as uninitialized */ 108 /* allocate table & mark all entries as uninitialized */
112
113 table = vzalloc(actual_size * sizeof(struct reference)); 109 table = vzalloc(actual_size * sizeof(struct reference));
114 if (table == NULL) 110 if (table == NULL)
115 return -ENOMEM; 111 return -ENOMEM;
@@ -128,7 +124,6 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
128/** 124/**
129 * tipc_ref_table_stop - destroy reference table for objects 125 * tipc_ref_table_stop - destroy reference table for objects
130 */ 126 */
131
132void tipc_ref_table_stop(void) 127void tipc_ref_table_stop(void)
133{ 128{
134 if (!tipc_ref_table.entries) 129 if (!tipc_ref_table.entries)
@@ -149,7 +144,6 @@ void tipc_ref_table_stop(void)
149 * register a partially initialized object, without running the risk that 144 * register a partially initialized object, without running the risk that
150 * the object will be accessed before initialization is complete. 145 * the object will be accessed before initialization is complete.
151 */ 146 */
152
153u32 tipc_ref_acquire(void *object, spinlock_t **lock) 147u32 tipc_ref_acquire(void *object, spinlock_t **lock)
154{ 148{
155 u32 index; 149 u32 index;
@@ -168,7 +162,6 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
168 } 162 }
169 163
170 /* take a free entry, if available; otherwise initialize a new entry */ 164 /* take a free entry, if available; otherwise initialize a new entry */
171
172 write_lock_bh(&ref_table_lock); 165 write_lock_bh(&ref_table_lock);
173 if (tipc_ref_table.first_free) { 166 if (tipc_ref_table.first_free) {
174 index = tipc_ref_table.first_free; 167 index = tipc_ref_table.first_free;
@@ -211,7 +204,6 @@ u32 tipc_ref_acquire(void *object, spinlock_t **lock)
211 * Disallow future references to an object and free up the entry for re-use. 204 * Disallow future references to an object and free up the entry for re-use.
212 * Note: The entry's spin_lock may still be busy after discard 205 * Note: The entry's spin_lock may still be busy after discard
213 */ 206 */
214
215void tipc_ref_discard(u32 ref) 207void tipc_ref_discard(u32 ref)
216{ 208{
217 struct reference *entry; 209 struct reference *entry;
@@ -242,12 +234,10 @@ void tipc_ref_discard(u32 ref)
242 * mark entry as unused; increment instance part of entry's reference 234 * mark entry as unused; increment instance part of entry's reference
243 * to invalidate any subsequent references 235 * to invalidate any subsequent references
244 */ 236 */
245
246 entry->object = NULL; 237 entry->object = NULL;
247 entry->ref = (ref & ~index_mask) + (index_mask + 1); 238 entry->ref = (ref & ~index_mask) + (index_mask + 1);
248 239
249 /* append entry to free entry list */ 240 /* append entry to free entry list */
250
251 if (tipc_ref_table.first_free == 0) 241 if (tipc_ref_table.first_free == 0)
252 tipc_ref_table.first_free = index; 242 tipc_ref_table.first_free = index;
253 else 243 else
@@ -261,7 +251,6 @@ exit:
261/** 251/**
262 * tipc_ref_lock - lock referenced object and return pointer to it 252 * tipc_ref_lock - lock referenced object and return pointer to it
263 */ 253 */
264
265void *tipc_ref_lock(u32 ref) 254void *tipc_ref_lock(u32 ref)
266{ 255{
267 if (likely(tipc_ref_table.entries)) { 256 if (likely(tipc_ref_table.entries)) {
@@ -283,7 +272,6 @@ void *tipc_ref_lock(u32 ref)
283/** 272/**
284 * tipc_ref_deref - return pointer referenced object (without locking it) 273 * tipc_ref_deref - return pointer referenced object (without locking it)
285 */ 274 */
286
287void *tipc_ref_deref(u32 ref) 275void *tipc_ref_deref(u32 ref)
288{ 276{
289 if (likely(tipc_ref_table.entries)) { 277 if (likely(tipc_ref_table.entries)) {
@@ -296,4 +284,3 @@ void *tipc_ref_deref(u32 ref)
296 } 284 }
297 return NULL; 285 return NULL;
298} 286}
299
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 29e957f64458..5577a447f531 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -123,7 +123,6 @@ static atomic_t tipc_queue_size = ATOMIC_INIT(0);
123 * 123 *
124 * Caller must hold socket lock 124 * Caller must hold socket lock
125 */ 125 */
126
127static void advance_rx_queue(struct sock *sk) 126static void advance_rx_queue(struct sock *sk)
128{ 127{
129 kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); 128 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
@@ -135,7 +134,6 @@ static void advance_rx_queue(struct sock *sk)
135 * 134 *
136 * Caller must hold socket lock 135 * Caller must hold socket lock
137 */ 136 */
138
139static void discard_rx_queue(struct sock *sk) 137static void discard_rx_queue(struct sock *sk)
140{ 138{
141 struct sk_buff *buf; 139 struct sk_buff *buf;
@@ -151,7 +149,6 @@ static void discard_rx_queue(struct sock *sk)
151 * 149 *
152 * Caller must hold socket lock 150 * Caller must hold socket lock
153 */ 151 */
154
155static void reject_rx_queue(struct sock *sk) 152static void reject_rx_queue(struct sock *sk)
156{ 153{
157 struct sk_buff *buf; 154 struct sk_buff *buf;
@@ -174,7 +171,6 @@ static void reject_rx_queue(struct sock *sk)
174 * 171 *
175 * Returns 0 on success, errno otherwise 172 * Returns 0 on success, errno otherwise
176 */ 173 */
177
178static int tipc_create(struct net *net, struct socket *sock, int protocol, 174static int tipc_create(struct net *net, struct socket *sock, int protocol,
179 int kern) 175 int kern)
180{ 176{
@@ -184,7 +180,6 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
184 struct tipc_port *tp_ptr; 180 struct tipc_port *tp_ptr;
185 181
186 /* Validate arguments */ 182 /* Validate arguments */
187
188 if (unlikely(protocol != 0)) 183 if (unlikely(protocol != 0))
189 return -EPROTONOSUPPORT; 184 return -EPROTONOSUPPORT;
190 185
@@ -207,13 +202,11 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
207 } 202 }
208 203
209 /* Allocate socket's protocol area */ 204 /* Allocate socket's protocol area */
210
211 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto); 205 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
212 if (sk == NULL) 206 if (sk == NULL)
213 return -ENOMEM; 207 return -ENOMEM;
214 208
215 /* Allocate TIPC port for socket to use */ 209 /* Allocate TIPC port for socket to use */
216
217 tp_ptr = tipc_createport_raw(sk, &dispatch, &wakeupdispatch, 210 tp_ptr = tipc_createport_raw(sk, &dispatch, &wakeupdispatch,
218 TIPC_LOW_IMPORTANCE); 211 TIPC_LOW_IMPORTANCE);
219 if (unlikely(!tp_ptr)) { 212 if (unlikely(!tp_ptr)) {
@@ -222,7 +215,6 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
222 } 215 }
223 216
224 /* Finish initializing socket data structures */ 217 /* Finish initializing socket data structures */
225
226 sock->ops = ops; 218 sock->ops = ops;
227 sock->state = state; 219 sock->state = state;
228 220
@@ -258,7 +250,6 @@ static int tipc_create(struct net *net, struct socket *sock, int protocol,
258 * 250 *
259 * Returns 0 on success, errno otherwise 251 * Returns 0 on success, errno otherwise
260 */ 252 */
261
262static int release(struct socket *sock) 253static int release(struct socket *sock)
263{ 254{
264 struct sock *sk = sock->sk; 255 struct sock *sk = sock->sk;
@@ -270,7 +261,6 @@ static int release(struct socket *sock)
270 * Exit if socket isn't fully initialized (occurs when a failed accept() 261 * Exit if socket isn't fully initialized (occurs when a failed accept()
271 * releases a pre-allocated child socket that was never used) 262 * releases a pre-allocated child socket that was never used)
272 */ 263 */
273
274 if (sk == NULL) 264 if (sk == NULL)
275 return 0; 265 return 0;
276 266
@@ -281,7 +271,6 @@ static int release(struct socket *sock)
281 * Reject all unreceived messages, except on an active connection 271 * Reject all unreceived messages, except on an active connection
282 * (which disconnects locally & sends a 'FIN+' to peer) 272 * (which disconnects locally & sends a 'FIN+' to peer)
283 */ 273 */
284
285 while (sock->state != SS_DISCONNECTING) { 274 while (sock->state != SS_DISCONNECTING) {
286 buf = __skb_dequeue(&sk->sk_receive_queue); 275 buf = __skb_dequeue(&sk->sk_receive_queue);
287 if (buf == NULL) 276 if (buf == NULL)
@@ -303,15 +292,12 @@ static int release(struct socket *sock)
303 * Delete TIPC port; this ensures no more messages are queued 292 * Delete TIPC port; this ensures no more messages are queued
304 * (also disconnects an active connection & sends a 'FIN-' to peer) 293 * (also disconnects an active connection & sends a 'FIN-' to peer)
305 */ 294 */
306
307 res = tipc_deleteport(tport->ref); 295 res = tipc_deleteport(tport->ref);
308 296
309 /* Discard any remaining (connection-based) messages in receive queue */ 297 /* Discard any remaining (connection-based) messages in receive queue */
310
311 discard_rx_queue(sk); 298 discard_rx_queue(sk);
312 299
313 /* Reject any messages that accumulated in backlog queue */ 300 /* Reject any messages that accumulated in backlog queue */
314
315 sock->state = SS_DISCONNECTING; 301 sock->state = SS_DISCONNECTING;
316 release_sock(sk); 302 release_sock(sk);
317 303
@@ -336,7 +322,6 @@ static int release(struct socket *sock)
336 * NOTE: This routine doesn't need to take the socket lock since it doesn't 322 * NOTE: This routine doesn't need to take the socket lock since it doesn't
337 * access any non-constant socket information. 323 * access any non-constant socket information.
338 */ 324 */
339
340static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len) 325static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
341{ 326{
342 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 327 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
@@ -376,7 +361,6 @@ static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
376 * accesses socket information that is unchanging (or which changes in 361 * accesses socket information that is unchanging (or which changes in
377 * a completely predictable manner). 362 * a completely predictable manner).
378 */ 363 */
379
380static int get_name(struct socket *sock, struct sockaddr *uaddr, 364static int get_name(struct socket *sock, struct sockaddr *uaddr,
381 int *uaddr_len, int peer) 365 int *uaddr_len, int peer)
382{ 366{
@@ -444,7 +428,6 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
444 * imply that the operation will succeed, merely that it should be performed 428 * imply that the operation will succeed, merely that it should be performed
445 * and will not block. 429 * and will not block.
446 */ 430 */
447
448static unsigned int poll(struct file *file, struct socket *sock, 431static unsigned int poll(struct file *file, struct socket *sock,
449 poll_table *wait) 432 poll_table *wait)
450{ 433{
@@ -482,7 +465,6 @@ static unsigned int poll(struct file *file, struct socket *sock,
482 * 465 *
483 * Returns 0 if permission is granted, otherwise errno 466 * Returns 0 if permission is granted, otherwise errno
484 */ 467 */
485
486static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m) 468static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
487{ 469{
488 struct tipc_cfg_msg_hdr hdr; 470 struct tipc_cfg_msg_hdr hdr;
@@ -518,7 +500,6 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
518 * 500 *
519 * Returns the number of bytes sent on success, or errno otherwise 501 * Returns the number of bytes sent on success, or errno otherwise
520 */ 502 */
521
522static int send_msg(struct kiocb *iocb, struct socket *sock, 503static int send_msg(struct kiocb *iocb, struct socket *sock,
523 struct msghdr *m, size_t total_len) 504 struct msghdr *m, size_t total_len)
524{ 505{
@@ -535,7 +516,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
535 (dest->family != AF_TIPC))) 516 (dest->family != AF_TIPC)))
536 return -EINVAL; 517 return -EINVAL;
537 if ((total_len > TIPC_MAX_USER_MSG_SIZE) || 518 if ((total_len > TIPC_MAX_USER_MSG_SIZE) ||
538 (m->msg_iovlen > (unsigned)INT_MAX)) 519 (m->msg_iovlen > (unsigned int)INT_MAX))
539 return -EMSGSIZE; 520 return -EMSGSIZE;
540 521
541 if (iocb) 522 if (iocb)
@@ -562,7 +543,6 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
562 } 543 }
563 544
564 /* Abort any pending connection attempts (very unlikely) */ 545 /* Abort any pending connection attempts (very unlikely) */
565
566 reject_rx_queue(sk); 546 reject_rx_queue(sk);
567 } 547 }
568 548
@@ -631,7 +611,6 @@ exit:
631 * 611 *
632 * Returns the number of bytes sent on success, or errno otherwise 612 * Returns the number of bytes sent on success, or errno otherwise
633 */ 613 */
634
635static int send_packet(struct kiocb *iocb, struct socket *sock, 614static int send_packet(struct kiocb *iocb, struct socket *sock,
636 struct msghdr *m, size_t total_len) 615 struct msghdr *m, size_t total_len)
637{ 616{
@@ -642,12 +621,11 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
642 int res; 621 int res;
643 622
644 /* Handle implied connection establishment */ 623 /* Handle implied connection establishment */
645
646 if (unlikely(dest)) 624 if (unlikely(dest))
647 return send_msg(iocb, sock, m, total_len); 625 return send_msg(iocb, sock, m, total_len);
648 626
649 if ((total_len > TIPC_MAX_USER_MSG_SIZE) || 627 if ((total_len > TIPC_MAX_USER_MSG_SIZE) ||
650 (m->msg_iovlen > (unsigned)INT_MAX)) 628 (m->msg_iovlen > (unsigned int)INT_MAX))
651 return -EMSGSIZE; 629 return -EMSGSIZE;
652 630
653 if (iocb) 631 if (iocb)
@@ -695,7 +673,6 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
695 * Returns the number of bytes sent on success (or partial success), 673 * Returns the number of bytes sent on success (or partial success),
696 * or errno if no data sent 674 * or errno if no data sent
697 */ 675 */
698
699static int send_stream(struct kiocb *iocb, struct socket *sock, 676static int send_stream(struct kiocb *iocb, struct socket *sock,
700 struct msghdr *m, size_t total_len) 677 struct msghdr *m, size_t total_len)
701{ 678{
@@ -715,7 +692,6 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
715 lock_sock(sk); 692 lock_sock(sk);
716 693
717 /* Handle special cases where there is no connection */ 694 /* Handle special cases where there is no connection */
718
719 if (unlikely(sock->state != SS_CONNECTED)) { 695 if (unlikely(sock->state != SS_CONNECTED)) {
720 if (sock->state == SS_UNCONNECTED) { 696 if (sock->state == SS_UNCONNECTED) {
721 res = send_packet(NULL, sock, m, total_len); 697 res = send_packet(NULL, sock, m, total_len);
@@ -734,8 +710,8 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
734 goto exit; 710 goto exit;
735 } 711 }
736 712
737 if ((total_len > (unsigned)INT_MAX) || 713 if ((total_len > (unsigned int)INT_MAX) ||
738 (m->msg_iovlen > (unsigned)INT_MAX)) { 714 (m->msg_iovlen > (unsigned int)INT_MAX)) {
739 res = -EMSGSIZE; 715 res = -EMSGSIZE;
740 goto exit; 716 goto exit;
741 } 717 }
@@ -747,7 +723,6 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
747 * (i.e. one large iovec entry), but could be improved to pass sets 723 * (i.e. one large iovec entry), but could be improved to pass sets
748 * of small iovec entries into send_packet(). 724 * of small iovec entries into send_packet().
749 */ 725 */
750
751 curr_iov = m->msg_iov; 726 curr_iov = m->msg_iov;
752 curr_iovlen = m->msg_iovlen; 727 curr_iovlen = m->msg_iovlen;
753 my_msg.msg_iov = &my_iov; 728 my_msg.msg_iov = &my_iov;
@@ -796,7 +771,6 @@ exit:
796 * 771 *
797 * Returns 0 on success, errno otherwise 772 * Returns 0 on success, errno otherwise
798 */ 773 */
799
800static int auto_connect(struct socket *sock, struct tipc_msg *msg) 774static int auto_connect(struct socket *sock, struct tipc_msg *msg)
801{ 775{
802 struct tipc_sock *tsock = tipc_sk(sock->sk); 776 struct tipc_sock *tsock = tipc_sk(sock->sk);
@@ -821,7 +795,6 @@ static int auto_connect(struct socket *sock, struct tipc_msg *msg)
821 * 795 *
822 * Note: Address is not captured if not requested by receiver. 796 * Note: Address is not captured if not requested by receiver.
823 */ 797 */
824
825static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg) 798static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
826{ 799{
827 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)m->msg_name; 800 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)m->msg_name;
@@ -847,7 +820,6 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
847 * 820 *
848 * Returns 0 if successful, otherwise errno 821 * Returns 0 if successful, otherwise errno
849 */ 822 */
850
851static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg, 823static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
852 struct tipc_port *tport) 824 struct tipc_port *tport)
853{ 825{
@@ -861,7 +833,6 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
861 return 0; 833 return 0;
862 834
863 /* Optionally capture errored message object(s) */ 835 /* Optionally capture errored message object(s) */
864
865 err = msg ? msg_errcode(msg) : 0; 836 err = msg ? msg_errcode(msg) : 0;
866 if (unlikely(err)) { 837 if (unlikely(err)) {
867 anc_data[0] = err; 838 anc_data[0] = err;
@@ -878,7 +849,6 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
878 } 849 }
879 850
880 /* Optionally capture message destination object */ 851 /* Optionally capture message destination object */
881
882 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG; 852 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
883 switch (dest_type) { 853 switch (dest_type) {
884 case TIPC_NAMED_MSG: 854 case TIPC_NAMED_MSG:
@@ -923,7 +893,6 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
923 * 893 *
924 * Returns size of returned message data, errno otherwise 894 * Returns size of returned message data, errno otherwise
925 */ 895 */
926
927static int recv_msg(struct kiocb *iocb, struct socket *sock, 896static int recv_msg(struct kiocb *iocb, struct socket *sock,
928 struct msghdr *m, size_t buf_len, int flags) 897 struct msghdr *m, size_t buf_len, int flags)
929{ 898{
@@ -937,7 +906,6 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
937 int res; 906 int res;
938 907
939 /* Catch invalid receive requests */ 908 /* Catch invalid receive requests */
940
941 if (unlikely(!buf_len)) 909 if (unlikely(!buf_len))
942 return -EINVAL; 910 return -EINVAL;
943 911
@@ -952,7 +920,6 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,
952restart: 920restart:
953 921
954 /* Look for a message in receive queue; wait if necessary */ 922 /* Look for a message in receive queue; wait if necessary */
955
956 while (skb_queue_empty(&sk->sk_receive_queue)) { 923 while (skb_queue_empty(&sk->sk_receive_queue)) {
957 if (sock->state == SS_DISCONNECTING) { 924 if (sock->state == SS_DISCONNECTING) {
958 res = -ENOTCONN; 925 res = -ENOTCONN;
@@ -970,14 +937,12 @@ restart:
970 } 937 }
971 938
972 /* Look at first message in receive queue */ 939 /* Look at first message in receive queue */
973
974 buf = skb_peek(&sk->sk_receive_queue); 940 buf = skb_peek(&sk->sk_receive_queue);
975 msg = buf_msg(buf); 941 msg = buf_msg(buf);
976 sz = msg_data_sz(msg); 942 sz = msg_data_sz(msg);
977 err = msg_errcode(msg); 943 err = msg_errcode(msg);
978 944
979 /* Complete connection setup for an implied connect */ 945 /* Complete connection setup for an implied connect */
980
981 if (unlikely(sock->state == SS_CONNECTING)) { 946 if (unlikely(sock->state == SS_CONNECTING)) {
982 res = auto_connect(sock, msg); 947 res = auto_connect(sock, msg);
983 if (res) 948 if (res)
@@ -985,24 +950,20 @@ restart:
985 } 950 }
986 951
987 /* Discard an empty non-errored message & try again */ 952 /* Discard an empty non-errored message & try again */
988
989 if ((!sz) && (!err)) { 953 if ((!sz) && (!err)) {
990 advance_rx_queue(sk); 954 advance_rx_queue(sk);
991 goto restart; 955 goto restart;
992 } 956 }
993 957
994 /* Capture sender's address (optional) */ 958 /* Capture sender's address (optional) */
995
996 set_orig_addr(m, msg); 959 set_orig_addr(m, msg);
997 960
998 /* Capture ancillary data (optional) */ 961 /* Capture ancillary data (optional) */
999
1000 res = anc_data_recv(m, msg, tport); 962 res = anc_data_recv(m, msg, tport);
1001 if (res) 963 if (res)
1002 goto exit; 964 goto exit;
1003 965
1004 /* Capture message data (if valid) & compute return value (always) */ 966 /* Capture message data (if valid) & compute return value (always) */
1005
1006 if (!err) { 967 if (!err) {
1007 if (unlikely(buf_len < sz)) { 968 if (unlikely(buf_len < sz)) {
1008 sz = buf_len; 969 sz = buf_len;
@@ -1022,7 +983,6 @@ restart:
1022 } 983 }
1023 984
1024 /* Consume received message (optional) */ 985 /* Consume received message (optional) */
1025
1026 if (likely(!(flags & MSG_PEEK))) { 986 if (likely(!(flags & MSG_PEEK))) {
1027 if ((sock->state != SS_READY) && 987 if ((sock->state != SS_READY) &&
1028 (++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN)) 988 (++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
@@ -1046,7 +1006,6 @@ exit:
1046 * 1006 *
1047 * Returns size of returned message data, errno otherwise 1007 * Returns size of returned message data, errno otherwise
1048 */ 1008 */
1049
1050static int recv_stream(struct kiocb *iocb, struct socket *sock, 1009static int recv_stream(struct kiocb *iocb, struct socket *sock,
1051 struct msghdr *m, size_t buf_len, int flags) 1010 struct msghdr *m, size_t buf_len, int flags)
1052{ 1011{
@@ -1062,7 +1021,6 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
1062 int res = 0; 1021 int res = 0;
1063 1022
1064 /* Catch invalid receive attempts */ 1023 /* Catch invalid receive attempts */
1065
1066 if (unlikely(!buf_len)) 1024 if (unlikely(!buf_len))
1067 return -EINVAL; 1025 return -EINVAL;
1068 1026
@@ -1076,10 +1034,9 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,
1076 1034
1077 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); 1035 target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);
1078 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); 1036 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1079restart:
1080 1037
1038restart:
1081 /* Look for a message in receive queue; wait if necessary */ 1039 /* Look for a message in receive queue; wait if necessary */
1082
1083 while (skb_queue_empty(&sk->sk_receive_queue)) { 1040 while (skb_queue_empty(&sk->sk_receive_queue)) {
1084 if (sock->state == SS_DISCONNECTING) { 1041 if (sock->state == SS_DISCONNECTING) {
1085 res = -ENOTCONN; 1042 res = -ENOTCONN;
@@ -1097,21 +1054,18 @@ restart:
1097 } 1054 }
1098 1055
1099 /* Look at first message in receive queue */ 1056 /* Look at first message in receive queue */
1100
1101 buf = skb_peek(&sk->sk_receive_queue); 1057 buf = skb_peek(&sk->sk_receive_queue);
1102 msg = buf_msg(buf); 1058 msg = buf_msg(buf);
1103 sz = msg_data_sz(msg); 1059 sz = msg_data_sz(msg);
1104 err = msg_errcode(msg); 1060 err = msg_errcode(msg);
1105 1061
1106 /* Discard an empty non-errored message & try again */ 1062 /* Discard an empty non-errored message & try again */
1107
1108 if ((!sz) && (!err)) { 1063 if ((!sz) && (!err)) {
1109 advance_rx_queue(sk); 1064 advance_rx_queue(sk);
1110 goto restart; 1065 goto restart;
1111 } 1066 }
1112 1067
1113 /* Optionally capture sender's address & ancillary data of first msg */ 1068 /* Optionally capture sender's address & ancillary data of first msg */
1114
1115 if (sz_copied == 0) { 1069 if (sz_copied == 0) {
1116 set_orig_addr(m, msg); 1070 set_orig_addr(m, msg);
1117 res = anc_data_recv(m, msg, tport); 1071 res = anc_data_recv(m, msg, tport);
@@ -1120,7 +1074,6 @@ restart:
1120 } 1074 }
1121 1075
1122 /* Capture message data (if valid) & compute return value (always) */ 1076 /* Capture message data (if valid) & compute return value (always) */
1123
1124 if (!err) { 1077 if (!err) {
1125 u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle); 1078 u32 offset = (u32)(unsigned long)(TIPC_SKB_CB(buf)->handle);
1126 1079
@@ -1152,7 +1105,6 @@ restart:
1152 } 1105 }
1153 1106
1154 /* Consume received message (optional) */ 1107 /* Consume received message (optional) */
1155
1156 if (likely(!(flags & MSG_PEEK))) { 1108 if (likely(!(flags & MSG_PEEK))) {
1157 if (unlikely(++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN)) 1109 if (unlikely(++tport->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
1158 tipc_acknowledge(tport->ref, tport->conn_unacked); 1110 tipc_acknowledge(tport->ref, tport->conn_unacked);
@@ -1160,7 +1112,6 @@ restart:
1160 } 1112 }
1161 1113
1162 /* Loop around if more data is required */ 1114 /* Loop around if more data is required */
1163
1164 if ((sz_copied < buf_len) && /* didn't get all requested data */ 1115 if ((sz_copied < buf_len) && /* didn't get all requested data */
1165 (!skb_queue_empty(&sk->sk_receive_queue) || 1116 (!skb_queue_empty(&sk->sk_receive_queue) ||
1166 (sz_copied < target)) && /* and more is ready or required */ 1117 (sz_copied < target)) && /* and more is ready or required */
@@ -1181,7 +1132,6 @@ exit:
1181 * 1132 *
1182 * Returns 1 if queue is unable to accept message, 0 otherwise 1133 * Returns 1 if queue is unable to accept message, 0 otherwise
1183 */ 1134 */
1184
1185static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base) 1135static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base)
1186{ 1136{
1187 u32 threshold; 1137 u32 threshold;
@@ -1214,7 +1164,6 @@ static int rx_queue_full(struct tipc_msg *msg, u32 queue_size, u32 base)
1214 * 1164 *
1215 * Returns TIPC error status code (TIPC_OK if message is not to be rejected) 1165 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
1216 */ 1166 */
1217
1218static u32 filter_rcv(struct sock *sk, struct sk_buff *buf) 1167static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1219{ 1168{
1220 struct socket *sock = sk->sk_socket; 1169 struct socket *sock = sk->sk_socket;
@@ -1222,12 +1171,8 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1222 u32 recv_q_len; 1171 u32 recv_q_len;
1223 1172
1224 /* Reject message if it is wrong sort of message for socket */ 1173 /* Reject message if it is wrong sort of message for socket */
1225 1174 if (msg_type(msg) > TIPC_DIRECT_MSG)
1226 /* 1175 return TIPC_ERR_NO_PORT;
1227 * WOULD IT BE BETTER TO JUST DISCARD THESE MESSAGES INSTEAD?
1228 * "NO PORT" ISN'T REALLY THE RIGHT ERROR CODE, AND THERE MAY
1229 * BE SECURITY IMPLICATIONS INHERENT IN REJECTING INVALID TRAFFIC
1230 */
1231 1176
1232 if (sock->state == SS_READY) { 1177 if (sock->state == SS_READY) {
1233 if (msg_connected(msg)) 1178 if (msg_connected(msg))
@@ -1236,7 +1181,8 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1236 if (msg_mcast(msg)) 1181 if (msg_mcast(msg))
1237 return TIPC_ERR_NO_PORT; 1182 return TIPC_ERR_NO_PORT;
1238 if (sock->state == SS_CONNECTED) { 1183 if (sock->state == SS_CONNECTED) {
1239 if (!msg_connected(msg)) 1184 if (!msg_connected(msg) ||
1185 !tipc_port_peer_msg(tipc_sk_port(sk), msg))
1240 return TIPC_ERR_NO_PORT; 1186 return TIPC_ERR_NO_PORT;
1241 } else if (sock->state == SS_CONNECTING) { 1187 } else if (sock->state == SS_CONNECTING) {
1242 if (!msg_connected(msg) && (msg_errcode(msg) == 0)) 1188 if (!msg_connected(msg) && (msg_errcode(msg) == 0))
@@ -1253,7 +1199,6 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1253 } 1199 }
1254 1200
1255 /* Reject message if there isn't room to queue it */ 1201 /* Reject message if there isn't room to queue it */
1256
1257 recv_q_len = (u32)atomic_read(&tipc_queue_size); 1202 recv_q_len = (u32)atomic_read(&tipc_queue_size);
1258 if (unlikely(recv_q_len >= OVERLOAD_LIMIT_BASE)) { 1203 if (unlikely(recv_q_len >= OVERLOAD_LIMIT_BASE)) {
1259 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE)) 1204 if (rx_queue_full(msg, recv_q_len, OVERLOAD_LIMIT_BASE))
@@ -1266,13 +1211,11 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1266 } 1211 }
1267 1212
1268 /* Enqueue message (finally!) */ 1213 /* Enqueue message (finally!) */
1269
1270 TIPC_SKB_CB(buf)->handle = 0; 1214 TIPC_SKB_CB(buf)->handle = 0;
1271 atomic_inc(&tipc_queue_size); 1215 atomic_inc(&tipc_queue_size);
1272 __skb_queue_tail(&sk->sk_receive_queue, buf); 1216 __skb_queue_tail(&sk->sk_receive_queue, buf);
1273 1217
1274 /* Initiate connection termination for an incoming 'FIN' */ 1218 /* Initiate connection termination for an incoming 'FIN' */
1275
1276 if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) { 1219 if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) {
1277 sock->state = SS_DISCONNECTING; 1220 sock->state = SS_DISCONNECTING;
1278 tipc_disconnect_port(tipc_sk_port(sk)); 1221 tipc_disconnect_port(tipc_sk_port(sk));
@@ -1292,7 +1235,6 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1292 * 1235 *
1293 * Returns 0 1236 * Returns 0
1294 */ 1237 */
1295
1296static int backlog_rcv(struct sock *sk, struct sk_buff *buf) 1238static int backlog_rcv(struct sock *sk, struct sk_buff *buf)
1297{ 1239{
1298 u32 res; 1240 u32 res;
@@ -1312,7 +1254,6 @@ static int backlog_rcv(struct sock *sk, struct sk_buff *buf)
1312 * 1254 *
1313 * Returns TIPC error status code (TIPC_OK if message is not to be rejected) 1255 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
1314 */ 1256 */
1315
1316static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf) 1257static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1317{ 1258{
1318 struct sock *sk = (struct sock *)tport->usr_handle; 1259 struct sock *sk = (struct sock *)tport->usr_handle;
@@ -1324,12 +1265,11 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1324 * This code is based on sk_receive_skb(), but must be distinct from it 1265 * This code is based on sk_receive_skb(), but must be distinct from it
1325 * since a TIPC-specific filter/reject mechanism is utilized 1266 * since a TIPC-specific filter/reject mechanism is utilized
1326 */ 1267 */
1327
1328 bh_lock_sock(sk); 1268 bh_lock_sock(sk);
1329 if (!sock_owned_by_user(sk)) { 1269 if (!sock_owned_by_user(sk)) {
1330 res = filter_rcv(sk, buf); 1270 res = filter_rcv(sk, buf);
1331 } else { 1271 } else {
1332 if (sk_add_backlog(sk, buf)) 1272 if (sk_add_backlog(sk, buf, sk->sk_rcvbuf))
1333 res = TIPC_ERR_OVERLOAD; 1273 res = TIPC_ERR_OVERLOAD;
1334 else 1274 else
1335 res = TIPC_OK; 1275 res = TIPC_OK;
@@ -1345,7 +1285,6 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1345 * 1285 *
1346 * Called with port lock already taken. 1286 * Called with port lock already taken.
1347 */ 1287 */
1348
1349static void wakeupdispatch(struct tipc_port *tport) 1288static void wakeupdispatch(struct tipc_port *tport)
1350{ 1289{
1351 struct sock *sk = (struct sock *)tport->usr_handle; 1290 struct sock *sk = (struct sock *)tport->usr_handle;
@@ -1363,7 +1302,6 @@ static void wakeupdispatch(struct tipc_port *tport)
1363 * 1302 *
1364 * Returns 0 on success, errno otherwise 1303 * Returns 0 on success, errno otherwise
1365 */ 1304 */
1366
1367static int connect(struct socket *sock, struct sockaddr *dest, int destlen, 1305static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1368 int flags) 1306 int flags)
1369{ 1307{
@@ -1378,21 +1316,18 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1378 lock_sock(sk); 1316 lock_sock(sk);
1379 1317
1380 /* For now, TIPC does not allow use of connect() with DGRAM/RDM types */ 1318 /* For now, TIPC does not allow use of connect() with DGRAM/RDM types */
1381
1382 if (sock->state == SS_READY) { 1319 if (sock->state == SS_READY) {
1383 res = -EOPNOTSUPP; 1320 res = -EOPNOTSUPP;
1384 goto exit; 1321 goto exit;
1385 } 1322 }
1386 1323
1387 /* For now, TIPC does not support the non-blocking form of connect() */ 1324 /* For now, TIPC does not support the non-blocking form of connect() */
1388
1389 if (flags & O_NONBLOCK) { 1325 if (flags & O_NONBLOCK) {
1390 res = -EOPNOTSUPP; 1326 res = -EOPNOTSUPP;
1391 goto exit; 1327 goto exit;
1392 } 1328 }
1393 1329
1394 /* Issue Posix-compliant error code if socket is in the wrong state */ 1330 /* Issue Posix-compliant error code if socket is in the wrong state */
1395
1396 if (sock->state == SS_LISTENING) { 1331 if (sock->state == SS_LISTENING) {
1397 res = -EOPNOTSUPP; 1332 res = -EOPNOTSUPP;
1398 goto exit; 1333 goto exit;
@@ -1412,18 +1347,15 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1412 * Note: send_msg() validates the rest of the address fields, 1347 * Note: send_msg() validates the rest of the address fields,
1413 * so there's no need to do it here 1348 * so there's no need to do it here
1414 */ 1349 */
1415
1416 if (dst->addrtype == TIPC_ADDR_MCAST) { 1350 if (dst->addrtype == TIPC_ADDR_MCAST) {
1417 res = -EINVAL; 1351 res = -EINVAL;
1418 goto exit; 1352 goto exit;
1419 } 1353 }
1420 1354
1421 /* Reject any messages already in receive queue (very unlikely) */ 1355 /* Reject any messages already in receive queue (very unlikely) */
1422
1423 reject_rx_queue(sk); 1356 reject_rx_queue(sk);
1424 1357
1425 /* Send a 'SYN-' to destination */ 1358 /* Send a 'SYN-' to destination */
1426
1427 m.msg_name = dest; 1359 m.msg_name = dest;
1428 m.msg_namelen = destlen; 1360 m.msg_namelen = destlen;
1429 res = send_msg(NULL, sock, &m, 0); 1361 res = send_msg(NULL, sock, &m, 0);
@@ -1431,7 +1363,6 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1431 goto exit; 1363 goto exit;
1432 1364
1433 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ 1365 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1434
1435 timeout = tipc_sk(sk)->conn_timeout; 1366 timeout = tipc_sk(sk)->conn_timeout;
1436 release_sock(sk); 1367 release_sock(sk);
1437 res = wait_event_interruptible_timeout(*sk_sleep(sk), 1368 res = wait_event_interruptible_timeout(*sk_sleep(sk),
@@ -1476,7 +1407,6 @@ exit:
1476 * 1407 *
1477 * Returns 0 on success, errno otherwise 1408 * Returns 0 on success, errno otherwise
1478 */ 1409 */
1479
1480static int listen(struct socket *sock, int len) 1410static int listen(struct socket *sock, int len)
1481{ 1411{
1482 struct sock *sk = sock->sk; 1412 struct sock *sk = sock->sk;
@@ -1503,7 +1433,6 @@ static int listen(struct socket *sock, int len)
1503 * 1433 *
1504 * Returns 0 on success, errno otherwise 1434 * Returns 0 on success, errno otherwise
1505 */ 1435 */
1506
1507static int accept(struct socket *sock, struct socket *new_sock, int flags) 1436static int accept(struct socket *sock, struct socket *new_sock, int flags)
1508{ 1437{
1509 struct sock *sk = sock->sk; 1438 struct sock *sk = sock->sk;
@@ -1546,11 +1475,9 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
1546 * Reject any stray messages received by new socket 1475 * Reject any stray messages received by new socket
1547 * before the socket lock was taken (very, very unlikely) 1476 * before the socket lock was taken (very, very unlikely)
1548 */ 1477 */
1549
1550 reject_rx_queue(new_sk); 1478 reject_rx_queue(new_sk);
1551 1479
1552 /* Connect new socket to it's peer */ 1480 /* Connect new socket to it's peer */
1553
1554 new_tsock->peer_name.ref = msg_origport(msg); 1481 new_tsock->peer_name.ref = msg_origport(msg);
1555 new_tsock->peer_name.node = msg_orignode(msg); 1482 new_tsock->peer_name.node = msg_orignode(msg);
1556 tipc_connect2port(new_ref, &new_tsock->peer_name); 1483 tipc_connect2port(new_ref, &new_tsock->peer_name);
@@ -1566,7 +1493,6 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
1566 * Respond to 'SYN-' by discarding it & returning 'ACK'-. 1493 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
1567 * Respond to 'SYN+' by queuing it on new socket. 1494 * Respond to 'SYN+' by queuing it on new socket.
1568 */ 1495 */
1569
1570 if (!msg_data_sz(msg)) { 1496 if (!msg_data_sz(msg)) {
1571 struct msghdr m = {NULL,}; 1497 struct msghdr m = {NULL,};
1572 1498
@@ -1592,7 +1518,6 @@ exit:
1592 * 1518 *
1593 * Returns 0 on success, errno otherwise 1519 * Returns 0 on success, errno otherwise
1594 */ 1520 */
1595
1596static int shutdown(struct socket *sock, int how) 1521static int shutdown(struct socket *sock, int how)
1597{ 1522{
1598 struct sock *sk = sock->sk; 1523 struct sock *sk = sock->sk;
@@ -1609,8 +1534,8 @@ static int shutdown(struct socket *sock, int how)
1609 case SS_CONNECTING: 1534 case SS_CONNECTING:
1610 case SS_CONNECTED: 1535 case SS_CONNECTED:
1611 1536
1612 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
1613restart: 1537restart:
1538 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
1614 buf = __skb_dequeue(&sk->sk_receive_queue); 1539 buf = __skb_dequeue(&sk->sk_receive_queue);
1615 if (buf) { 1540 if (buf) {
1616 atomic_dec(&tipc_queue_size); 1541 atomic_dec(&tipc_queue_size);
@@ -1631,7 +1556,6 @@ restart:
1631 case SS_DISCONNECTING: 1556 case SS_DISCONNECTING:
1632 1557
1633 /* Discard any unreceived messages; wake up sleeping tasks */ 1558 /* Discard any unreceived messages; wake up sleeping tasks */
1634
1635 discard_rx_queue(sk); 1559 discard_rx_queue(sk);
1636 if (waitqueue_active(sk_sleep(sk))) 1560 if (waitqueue_active(sk_sleep(sk)))
1637 wake_up_interruptible(sk_sleep(sk)); 1561 wake_up_interruptible(sk_sleep(sk));
@@ -1659,7 +1583,6 @@ restart:
1659 * 1583 *
1660 * Returns 0 on success, errno otherwise 1584 * Returns 0 on success, errno otherwise
1661 */ 1585 */
1662
1663static int setsockopt(struct socket *sock, 1586static int setsockopt(struct socket *sock,
1664 int lvl, int opt, char __user *ov, unsigned int ol) 1587 int lvl, int opt, char __user *ov, unsigned int ol)
1665{ 1588{
@@ -1719,7 +1642,6 @@ static int setsockopt(struct socket *sock,
1719 * 1642 *
1720 * Returns 0 on success, errno otherwise 1643 * Returns 0 on success, errno otherwise
1721 */ 1644 */
1722
1723static int getsockopt(struct socket *sock, 1645static int getsockopt(struct socket *sock,
1724 int lvl, int opt, char __user *ov, int __user *ol) 1646 int lvl, int opt, char __user *ov, int __user *ol)
1725{ 1647{
@@ -1780,7 +1702,6 @@ static int getsockopt(struct socket *sock,
1780/** 1702/**
1781 * Protocol switches for the various types of TIPC sockets 1703 * Protocol switches for the various types of TIPC sockets
1782 */ 1704 */
1783
1784static const struct proto_ops msg_ops = { 1705static const struct proto_ops msg_ops = {
1785 .owner = THIS_MODULE, 1706 .owner = THIS_MODULE,
1786 .family = AF_TIPC, 1707 .family = AF_TIPC,
@@ -1886,7 +1807,6 @@ int tipc_socket_init(void)
1886/** 1807/**
1887 * tipc_socket_stop - stop TIPC socket interface 1808 * tipc_socket_stop - stop TIPC socket interface
1888 */ 1809 */
1889
1890void tipc_socket_stop(void) 1810void tipc_socket_stop(void)
1891{ 1811{
1892 if (!sockets_enabled) 1812 if (!sockets_enabled)
@@ -1896,4 +1816,3 @@ void tipc_socket_stop(void)
1896 sock_unregister(tipc_family_ops.family); 1816 sock_unregister(tipc_family_ops.family);
1897 proto_unregister(&tipc_proto); 1817 proto_unregister(&tipc_proto);
1898} 1818}
1899
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index b2964e9895d3..f976e9cd6a72 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -46,7 +46,6 @@
46 * @subscriber_list: adjacent subscribers in top. server's list of subscribers 46 * @subscriber_list: adjacent subscribers in top. server's list of subscribers
47 * @subscription_list: list of subscription objects for this subscriber 47 * @subscription_list: list of subscription objects for this subscriber
48 */ 48 */
49
50struct tipc_subscriber { 49struct tipc_subscriber {
51 u32 port_ref; 50 u32 port_ref;
52 spinlock_t *lock; 51 spinlock_t *lock;
@@ -56,13 +55,11 @@ struct tipc_subscriber {
56 55
57/** 56/**
58 * struct top_srv - TIPC network topology subscription service 57 * struct top_srv - TIPC network topology subscription service
59 * @user_ref: TIPC userid of subscription service
60 * @setup_port: reference to TIPC port that handles subscription requests 58 * @setup_port: reference to TIPC port that handles subscription requests
61 * @subscription_count: number of active subscriptions (not subscribers!) 59 * @subscription_count: number of active subscriptions (not subscribers!)
62 * @subscriber_list: list of ports subscribing to service 60 * @subscriber_list: list of ports subscribing to service
63 * @lock: spinlock govering access to subscriber list 61 * @lock: spinlock govering access to subscriber list
64 */ 62 */
65
66struct top_srv { 63struct top_srv {
67 u32 setup_port; 64 u32 setup_port;
68 atomic_t subscription_count; 65 atomic_t subscription_count;
@@ -79,7 +76,6 @@ static struct top_srv topsrv;
79 * 76 *
80 * Returns converted value 77 * Returns converted value
81 */ 78 */
82
83static u32 htohl(u32 in, int swap) 79static u32 htohl(u32 in, int swap)
84{ 80{
85 return swap ? swab32(in) : in; 81 return swap ? swab32(in) : in;
@@ -91,7 +87,6 @@ static u32 htohl(u32 in, int swap)
91 * Note: Must not hold subscriber's server port lock, since tipc_send() will 87 * Note: Must not hold subscriber's server port lock, since tipc_send() will
92 * try to take the lock if the message is rejected and returned! 88 * try to take the lock if the message is rejected and returned!
93 */ 89 */
94
95static void subscr_send_event(struct tipc_subscription *sub, 90static void subscr_send_event(struct tipc_subscription *sub,
96 u32 found_lower, 91 u32 found_lower,
97 u32 found_upper, 92 u32 found_upper,
@@ -117,7 +112,6 @@ static void subscr_send_event(struct tipc_subscription *sub,
117 * 112 *
118 * Returns 1 if there is overlap, otherwise 0. 113 * Returns 1 if there is overlap, otherwise 0.
119 */ 114 */
120
121int tipc_subscr_overlap(struct tipc_subscription *sub, 115int tipc_subscr_overlap(struct tipc_subscription *sub,
122 u32 found_lower, 116 u32 found_lower,
123 u32 found_upper) 117 u32 found_upper)
@@ -137,7 +131,6 @@ int tipc_subscr_overlap(struct tipc_subscription *sub,
137 * 131 *
138 * Protected by nameseq.lock in name_table.c 132 * Protected by nameseq.lock in name_table.c
139 */ 133 */
140
141void tipc_subscr_report_overlap(struct tipc_subscription *sub, 134void tipc_subscr_report_overlap(struct tipc_subscription *sub,
142 u32 found_lower, 135 u32 found_lower,
143 u32 found_upper, 136 u32 found_upper,
@@ -157,43 +150,35 @@ void tipc_subscr_report_overlap(struct tipc_subscription *sub,
157/** 150/**
158 * subscr_timeout - subscription timeout has occurred 151 * subscr_timeout - subscription timeout has occurred
159 */ 152 */
160
161static void subscr_timeout(struct tipc_subscription *sub) 153static void subscr_timeout(struct tipc_subscription *sub)
162{ 154{
163 struct tipc_port *server_port; 155 struct tipc_port *server_port;
164 156
165 /* Validate server port reference (in case subscriber is terminating) */ 157 /* Validate server port reference (in case subscriber is terminating) */
166
167 server_port = tipc_port_lock(sub->server_ref); 158 server_port = tipc_port_lock(sub->server_ref);
168 if (server_port == NULL) 159 if (server_port == NULL)
169 return; 160 return;
170 161
171 /* Validate timeout (in case subscription is being cancelled) */ 162 /* Validate timeout (in case subscription is being cancelled) */
172
173 if (sub->timeout == TIPC_WAIT_FOREVER) { 163 if (sub->timeout == TIPC_WAIT_FOREVER) {
174 tipc_port_unlock(server_port); 164 tipc_port_unlock(server_port);
175 return; 165 return;
176 } 166 }
177 167
178 /* Unlink subscription from name table */ 168 /* Unlink subscription from name table */
179
180 tipc_nametbl_unsubscribe(sub); 169 tipc_nametbl_unsubscribe(sub);
181 170
182 /* Unlink subscription from subscriber */ 171 /* Unlink subscription from subscriber */
183
184 list_del(&sub->subscription_list); 172 list_del(&sub->subscription_list);
185 173
186 /* Release subscriber's server port */ 174 /* Release subscriber's server port */
187
188 tipc_port_unlock(server_port); 175 tipc_port_unlock(server_port);
189 176
190 /* Notify subscriber of timeout */ 177 /* Notify subscriber of timeout */
191
192 subscr_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper, 178 subscr_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
193 TIPC_SUBSCR_TIMEOUT, 0, 0); 179 TIPC_SUBSCR_TIMEOUT, 0, 0);
194 180
195 /* Now destroy subscription */ 181 /* Now destroy subscription */
196
197 k_term_timer(&sub->timer); 182 k_term_timer(&sub->timer);
198 kfree(sub); 183 kfree(sub);
199 atomic_dec(&topsrv.subscription_count); 184 atomic_dec(&topsrv.subscription_count);
@@ -204,7 +189,6 @@ static void subscr_timeout(struct tipc_subscription *sub)
204 * 189 *
205 * Called with subscriber port locked. 190 * Called with subscriber port locked.
206 */ 191 */
207
208static void subscr_del(struct tipc_subscription *sub) 192static void subscr_del(struct tipc_subscription *sub)
209{ 193{
210 tipc_nametbl_unsubscribe(sub); 194 tipc_nametbl_unsubscribe(sub);
@@ -223,7 +207,6 @@ static void subscr_del(struct tipc_subscription *sub)
223 * a new object reference in the interim that uses this lock; this routine will 207 * a new object reference in the interim that uses this lock; this routine will
224 * simply wait for it to be released, then claim it.) 208 * simply wait for it to be released, then claim it.)
225 */ 209 */
226
227static void subscr_terminate(struct tipc_subscriber *subscriber) 210static void subscr_terminate(struct tipc_subscriber *subscriber)
228{ 211{
229 u32 port_ref; 212 u32 port_ref;
@@ -231,18 +214,15 @@ static void subscr_terminate(struct tipc_subscriber *subscriber)
231 struct tipc_subscription *sub_temp; 214 struct tipc_subscription *sub_temp;
232 215
233 /* Invalidate subscriber reference */ 216 /* Invalidate subscriber reference */
234
235 port_ref = subscriber->port_ref; 217 port_ref = subscriber->port_ref;
236 subscriber->port_ref = 0; 218 subscriber->port_ref = 0;
237 spin_unlock_bh(subscriber->lock); 219 spin_unlock_bh(subscriber->lock);
238 220
239 /* Sever connection to subscriber */ 221 /* Sever connection to subscriber */
240
241 tipc_shutdown(port_ref); 222 tipc_shutdown(port_ref);
242 tipc_deleteport(port_ref); 223 tipc_deleteport(port_ref);
243 224
244 /* Destroy any existing subscriptions for subscriber */ 225 /* Destroy any existing subscriptions for subscriber */
245
246 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, 226 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
247 subscription_list) { 227 subscription_list) {
248 if (sub->timeout != TIPC_WAIT_FOREVER) { 228 if (sub->timeout != TIPC_WAIT_FOREVER) {
@@ -253,17 +233,14 @@ static void subscr_terminate(struct tipc_subscriber *subscriber)
253 } 233 }
254 234
255 /* Remove subscriber from topology server's subscriber list */ 235 /* Remove subscriber from topology server's subscriber list */
256
257 spin_lock_bh(&topsrv.lock); 236 spin_lock_bh(&topsrv.lock);
258 list_del(&subscriber->subscriber_list); 237 list_del(&subscriber->subscriber_list);
259 spin_unlock_bh(&topsrv.lock); 238 spin_unlock_bh(&topsrv.lock);
260 239
261 /* Reclaim subscriber lock */ 240 /* Reclaim subscriber lock */
262
263 spin_lock_bh(subscriber->lock); 241 spin_lock_bh(subscriber->lock);
264 242
265 /* Now destroy subscriber */ 243 /* Now destroy subscriber */
266
267 kfree(subscriber); 244 kfree(subscriber);
268} 245}
269 246
@@ -276,7 +253,6 @@ static void subscr_terminate(struct tipc_subscriber *subscriber)
276 * 253 *
277 * Note that fields of 's' use subscriber's endianness! 254 * Note that fields of 's' use subscriber's endianness!
278 */ 255 */
279
280static void subscr_cancel(struct tipc_subscr *s, 256static void subscr_cancel(struct tipc_subscr *s,
281 struct tipc_subscriber *subscriber) 257 struct tipc_subscriber *subscriber)
282{ 258{
@@ -285,7 +261,6 @@ static void subscr_cancel(struct tipc_subscr *s,
285 int found = 0; 261 int found = 0;
286 262
287 /* Find first matching subscription, exit if not found */ 263 /* Find first matching subscription, exit if not found */
288
289 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, 264 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
290 subscription_list) { 265 subscription_list) {
291 if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) { 266 if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
@@ -297,7 +272,6 @@ static void subscr_cancel(struct tipc_subscr *s,
297 return; 272 return;
298 273
299 /* Cancel subscription timer (if used), then delete subscription */ 274 /* Cancel subscription timer (if used), then delete subscription */
300
301 if (sub->timeout != TIPC_WAIT_FOREVER) { 275 if (sub->timeout != TIPC_WAIT_FOREVER) {
302 sub->timeout = TIPC_WAIT_FOREVER; 276 sub->timeout = TIPC_WAIT_FOREVER;
303 spin_unlock_bh(subscriber->lock); 277 spin_unlock_bh(subscriber->lock);
@@ -313,7 +287,6 @@ static void subscr_cancel(struct tipc_subscr *s,
313 * 287 *
314 * Called with subscriber port locked. 288 * Called with subscriber port locked.
315 */ 289 */
316
317static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s, 290static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
318 struct tipc_subscriber *subscriber) 291 struct tipc_subscriber *subscriber)
319{ 292{
@@ -321,11 +294,9 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
321 int swap; 294 int swap;
322 295
323 /* Determine subscriber's endianness */ 296 /* Determine subscriber's endianness */
324
325 swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE)); 297 swap = !(s->filter & (TIPC_SUB_PORTS | TIPC_SUB_SERVICE));
326 298
327 /* Detect & process a subscription cancellation request */ 299 /* Detect & process a subscription cancellation request */
328
329 if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) { 300 if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
330 s->filter &= ~htohl(TIPC_SUB_CANCEL, swap); 301 s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
331 subscr_cancel(s, subscriber); 302 subscr_cancel(s, subscriber);
@@ -333,7 +304,6 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
333 } 304 }
334 305
335 /* Refuse subscription if global limit exceeded */ 306 /* Refuse subscription if global limit exceeded */
336
337 if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) { 307 if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) {
338 warn("Subscription rejected, subscription limit reached (%u)\n", 308 warn("Subscription rejected, subscription limit reached (%u)\n",
339 tipc_max_subscriptions); 309 tipc_max_subscriptions);
@@ -342,7 +312,6 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
342 } 312 }
343 313
344 /* Allocate subscription object */ 314 /* Allocate subscription object */
345
346 sub = kmalloc(sizeof(*sub), GFP_ATOMIC); 315 sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
347 if (!sub) { 316 if (!sub) {
348 warn("Subscription rejected, no memory\n"); 317 warn("Subscription rejected, no memory\n");
@@ -351,7 +320,6 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
351 } 320 }
352 321
353 /* Initialize subscription object */ 322 /* Initialize subscription object */
354
355 sub->seq.type = htohl(s->seq.type, swap); 323 sub->seq.type = htohl(s->seq.type, swap);
356 sub->seq.lower = htohl(s->seq.lower, swap); 324 sub->seq.lower = htohl(s->seq.lower, swap);
357 sub->seq.upper = htohl(s->seq.upper, swap); 325 sub->seq.upper = htohl(s->seq.upper, swap);
@@ -385,7 +353,6 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
385 * 353 *
386 * Called with subscriber's server port unlocked. 354 * Called with subscriber's server port unlocked.
387 */ 355 */
388
389static void subscr_conn_shutdown_event(void *usr_handle, 356static void subscr_conn_shutdown_event(void *usr_handle,
390 u32 port_ref, 357 u32 port_ref,
391 struct sk_buff **buf, 358 struct sk_buff **buf,
@@ -409,7 +376,6 @@ static void subscr_conn_shutdown_event(void *usr_handle,
409 * 376 *
410 * Called with subscriber's server port unlocked. 377 * Called with subscriber's server port unlocked.
411 */ 378 */
412
413static void subscr_conn_msg_event(void *usr_handle, 379static void subscr_conn_msg_event(void *usr_handle,
414 u32 port_ref, 380 u32 port_ref,
415 struct sk_buff **buf, 381 struct sk_buff **buf,
@@ -424,7 +390,6 @@ static void subscr_conn_msg_event(void *usr_handle,
424 * Lock subscriber's server port (& make a local copy of lock pointer, 390 * Lock subscriber's server port (& make a local copy of lock pointer,
425 * in case subscriber is deleted while processing subscription request) 391 * in case subscriber is deleted while processing subscription request)
426 */ 392 */
427
428 if (tipc_port_lock(port_ref) == NULL) 393 if (tipc_port_lock(port_ref) == NULL)
429 return; 394 return;
430 395
@@ -452,7 +417,6 @@ static void subscr_conn_msg_event(void *usr_handle,
452 * timeout code cannot delete the subscription, 417 * timeout code cannot delete the subscription,
453 * so the subscription object is still protected. 418 * so the subscription object is still protected.
454 */ 419 */
455
456 tipc_nametbl_subscribe(sub); 420 tipc_nametbl_subscribe(sub);
457 } 421 }
458 } 422 }
@@ -461,7 +425,6 @@ static void subscr_conn_msg_event(void *usr_handle,
461/** 425/**
462 * subscr_named_msg_event - handle request to establish a new subscriber 426 * subscr_named_msg_event - handle request to establish a new subscriber
463 */ 427 */
464
465static void subscr_named_msg_event(void *usr_handle, 428static void subscr_named_msg_event(void *usr_handle,
466 u32 port_ref, 429 u32 port_ref,
467 struct sk_buff **buf, 430 struct sk_buff **buf,
@@ -475,7 +438,6 @@ static void subscr_named_msg_event(void *usr_handle,
475 u32 server_port_ref; 438 u32 server_port_ref;
476 439
477 /* Create subscriber object */ 440 /* Create subscriber object */
478
479 subscriber = kzalloc(sizeof(struct tipc_subscriber), GFP_ATOMIC); 441 subscriber = kzalloc(sizeof(struct tipc_subscriber), GFP_ATOMIC);
480 if (subscriber == NULL) { 442 if (subscriber == NULL) {
481 warn("Subscriber rejected, no memory\n"); 443 warn("Subscriber rejected, no memory\n");
@@ -485,7 +447,6 @@ static void subscr_named_msg_event(void *usr_handle,
485 INIT_LIST_HEAD(&subscriber->subscriber_list); 447 INIT_LIST_HEAD(&subscriber->subscriber_list);
486 448
487 /* Create server port & establish connection to subscriber */ 449 /* Create server port & establish connection to subscriber */
488
489 tipc_createport(subscriber, 450 tipc_createport(subscriber,
490 importance, 451 importance,
491 NULL, 452 NULL,
@@ -504,26 +465,21 @@ static void subscr_named_msg_event(void *usr_handle,
504 tipc_connect2port(subscriber->port_ref, orig); 465 tipc_connect2port(subscriber->port_ref, orig);
505 466
506 /* Lock server port (& save lock address for future use) */ 467 /* Lock server port (& save lock address for future use) */
507
508 subscriber->lock = tipc_port_lock(subscriber->port_ref)->lock; 468 subscriber->lock = tipc_port_lock(subscriber->port_ref)->lock;
509 469
510 /* Add subscriber to topology server's subscriber list */ 470 /* Add subscriber to topology server's subscriber list */
511
512 spin_lock_bh(&topsrv.lock); 471 spin_lock_bh(&topsrv.lock);
513 list_add(&subscriber->subscriber_list, &topsrv.subscriber_list); 472 list_add(&subscriber->subscriber_list, &topsrv.subscriber_list);
514 spin_unlock_bh(&topsrv.lock); 473 spin_unlock_bh(&topsrv.lock);
515 474
516 /* Unlock server port */ 475 /* Unlock server port */
517
518 server_port_ref = subscriber->port_ref; 476 server_port_ref = subscriber->port_ref;
519 spin_unlock_bh(subscriber->lock); 477 spin_unlock_bh(subscriber->lock);
520 478
521 /* Send an ACK- to complete connection handshaking */ 479 /* Send an ACK- to complete connection handshaking */
522
523 tipc_send(server_port_ref, 0, NULL, 0); 480 tipc_send(server_port_ref, 0, NULL, 0);
524 481
525 /* Handle optional subscription request */ 482 /* Handle optional subscription request */
526
527 if (size != 0) { 483 if (size != 0) {
528 subscr_conn_msg_event(subscriber, server_port_ref, 484 subscr_conn_msg_event(subscriber, server_port_ref,
529 buf, data, size); 485 buf, data, size);
@@ -535,7 +491,6 @@ int tipc_subscr_start(void)
535 struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV}; 491 struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV};
536 int res; 492 int res;
537 493
538 memset(&topsrv, 0, sizeof(topsrv));
539 spin_lock_init(&topsrv.lock); 494 spin_lock_init(&topsrv.lock);
540 INIT_LIST_HEAD(&topsrv.subscriber_list); 495 INIT_LIST_HEAD(&topsrv.subscriber_list);
541 496
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index ef6529c8456f..218d2e07f0cc 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -51,7 +51,6 @@ struct tipc_subscription;
51 * @swap: indicates if subscriber uses opposite endianness in its messages 51 * @swap: indicates if subscriber uses opposite endianness in its messages
52 * @evt: template for events generated by subscription 52 * @evt: template for events generated by subscription
53 */ 53 */
54
55struct tipc_subscription { 54struct tipc_subscription {
56 struct tipc_name_seq seq; 55 struct tipc_name_seq seq;
57 u32 timeout; 56 u32 timeout;
@@ -80,5 +79,4 @@ int tipc_subscr_start(void);
80 79
81void tipc_subscr_stop(void); 80void tipc_subscr_stop(void);
82 81
83
84#endif 82#endif
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index d510353ef431..641f2e47f165 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -149,9 +149,10 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
149 * each socket state is protected by separate spin lock. 149 * each socket state is protected by separate spin lock.
150 */ 150 */
151 151
152static inline unsigned unix_hash_fold(__wsum n) 152static inline unsigned int unix_hash_fold(__wsum n)
153{ 153{
154 unsigned hash = (__force unsigned)n; 154 unsigned int hash = (__force unsigned int)n;
155
155 hash ^= hash>>16; 156 hash ^= hash>>16;
156 hash ^= hash>>8; 157 hash ^= hash>>8;
157 return hash&(UNIX_HASH_SIZE-1); 158 return hash&(UNIX_HASH_SIZE-1);
@@ -200,7 +201,7 @@ static inline void unix_release_addr(struct unix_address *addr)
200 * - if started by zero, it is abstract name. 201 * - if started by zero, it is abstract name.
201 */ 202 */
202 203
203static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned *hashp) 204static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
204{ 205{
205 if (len <= sizeof(short) || len > sizeof(*sunaddr)) 206 if (len <= sizeof(short) || len > sizeof(*sunaddr))
206 return -EINVAL; 207 return -EINVAL;
@@ -250,7 +251,7 @@ static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
250 251
251static struct sock *__unix_find_socket_byname(struct net *net, 252static struct sock *__unix_find_socket_byname(struct net *net,
252 struct sockaddr_un *sunname, 253 struct sockaddr_un *sunname,
253 int len, int type, unsigned hash) 254 int len, int type, unsigned int hash)
254{ 255{
255 struct sock *s; 256 struct sock *s;
256 struct hlist_node *node; 257 struct hlist_node *node;
@@ -273,7 +274,7 @@ found:
273static inline struct sock *unix_find_socket_byname(struct net *net, 274static inline struct sock *unix_find_socket_byname(struct net *net,
274 struct sockaddr_un *sunname, 275 struct sockaddr_un *sunname,
275 int len, int type, 276 int len, int type,
276 unsigned hash) 277 unsigned int hash)
277{ 278{
278 struct sock *s; 279 struct sock *s;
279 280
@@ -760,7 +761,7 @@ out: mutex_unlock(&u->readlock);
760 761
761static struct sock *unix_find_other(struct net *net, 762static struct sock *unix_find_other(struct net *net,
762 struct sockaddr_un *sunname, int len, 763 struct sockaddr_un *sunname, int len,
763 int type, unsigned hash, int *error) 764 int type, unsigned int hash, int *error)
764{ 765{
765 struct sock *u; 766 struct sock *u;
766 struct path path; 767 struct path path;
@@ -824,7 +825,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
824 struct dentry *dentry = NULL; 825 struct dentry *dentry = NULL;
825 struct path path; 826 struct path path;
826 int err; 827 int err;
827 unsigned hash; 828 unsigned int hash;
828 struct unix_address *addr; 829 struct unix_address *addr;
829 struct hlist_head *list; 830 struct hlist_head *list;
830 831
@@ -964,7 +965,7 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
964 struct net *net = sock_net(sk); 965 struct net *net = sock_net(sk);
965 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr; 966 struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
966 struct sock *other; 967 struct sock *other;
967 unsigned hash; 968 unsigned int hash;
968 int err; 969 int err;
969 970
970 if (addr->sa_family != AF_UNSPEC) { 971 if (addr->sa_family != AF_UNSPEC) {
@@ -1062,7 +1063,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
1062 struct sock *newsk = NULL; 1063 struct sock *newsk = NULL;
1063 struct sock *other = NULL; 1064 struct sock *other = NULL;
1064 struct sk_buff *skb = NULL; 1065 struct sk_buff *skb = NULL;
1065 unsigned hash; 1066 unsigned int hash;
1066 int st; 1067 int st;
1067 int err; 1068 int err;
1068 long timeo; 1069 long timeo;
@@ -1437,11 +1438,12 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1437 struct sock *other = NULL; 1438 struct sock *other = NULL;
1438 int namelen = 0; /* fake GCC */ 1439 int namelen = 0; /* fake GCC */
1439 int err; 1440 int err;
1440 unsigned hash; 1441 unsigned int hash;
1441 struct sk_buff *skb; 1442 struct sk_buff *skb;
1442 long timeo; 1443 long timeo;
1443 struct scm_cookie tmp_scm; 1444 struct scm_cookie tmp_scm;
1444 int max_level; 1445 int max_level;
1446 int data_len = 0;
1445 1447
1446 if (NULL == siocb->scm) 1448 if (NULL == siocb->scm)
1447 siocb->scm = &tmp_scm; 1449 siocb->scm = &tmp_scm;
@@ -1475,7 +1477,13 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1475 if (len > sk->sk_sndbuf - 32) 1477 if (len > sk->sk_sndbuf - 32)
1476 goto out; 1478 goto out;
1477 1479
1478 skb = sock_alloc_send_skb(sk, len, msg->msg_flags&MSG_DONTWAIT, &err); 1480 if (len > SKB_MAX_ALLOC)
1481 data_len = min_t(size_t,
1482 len - SKB_MAX_ALLOC,
1483 MAX_SKB_FRAGS * PAGE_SIZE);
1484
1485 skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
1486 msg->msg_flags & MSG_DONTWAIT, &err);
1479 if (skb == NULL) 1487 if (skb == NULL)
1480 goto out; 1488 goto out;
1481 1489
@@ -1485,8 +1493,10 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1485 max_level = err + 1; 1493 max_level = err + 1;
1486 unix_get_secdata(siocb->scm, skb); 1494 unix_get_secdata(siocb->scm, skb);
1487 1495
1488 skb_reset_transport_header(skb); 1496 skb_put(skb, len - data_len);
1489 err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); 1497 skb->data_len = data_len;
1498 skb->len = len;
1499 err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov, 0, len);
1490 if (err) 1500 if (err)
1491 goto out_free; 1501 goto out_free;
1492 1502
diff --git a/net/unix/diag.c b/net/unix/diag.c
index f0486ae9ebe6..47d3002737f5 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -310,7 +310,7 @@ static int unix_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
310 return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h)); 310 return unix_diag_get_exact(skb, h, (struct unix_diag_req *)NLMSG_DATA(h));
311} 311}
312 312
313static struct sock_diag_handler unix_diag_handler = { 313static const struct sock_diag_handler unix_diag_handler = {
314 .family = AF_UNIX, 314 .family = AF_UNIX,
315 .dump = unix_diag_handler_dump, 315 .dump = unix_diag_handler_dump,
316}; 316};
diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
index 397cffebb3b6..b34b5b9792f0 100644
--- a/net/unix/sysctl_net_unix.c
+++ b/net/unix/sysctl_net_unix.c
@@ -26,12 +26,6 @@ static ctl_table unix_table[] = {
26 { } 26 { }
27}; 27};
28 28
29static struct ctl_path unix_path[] = {
30 { .procname = "net", },
31 { .procname = "unix", },
32 { },
33};
34
35int __net_init unix_sysctl_register(struct net *net) 29int __net_init unix_sysctl_register(struct net *net)
36{ 30{
37 struct ctl_table *table; 31 struct ctl_table *table;
@@ -41,7 +35,7 @@ int __net_init unix_sysctl_register(struct net *net)
41 goto err_alloc; 35 goto err_alloc;
42 36
43 table[0].data = &net->unx.sysctl_max_dgram_qlen; 37 table[0].data = &net->unx.sysctl_max_dgram_qlen;
44 net->unx.ctl = register_net_sysctl_table(net, unix_path, table); 38 net->unx.ctl = register_net_sysctl(net, "net/unix", table);
45 if (net->unx.ctl == NULL) 39 if (net->unx.ctl == NULL)
46 goto err_reg; 40 goto err_reg;
47 41
@@ -58,6 +52,6 @@ void unix_sysctl_unregister(struct net *net)
58 struct ctl_table *table; 52 struct ctl_table *table;
59 53
60 table = net->unx.ctl->ctl_table_arg; 54 table = net->unx.ctl->ctl_table_arg;
61 unregister_sysctl_table(net->unx.ctl); 55 unregister_net_sysctl_table(net->unx.ctl);
62 kfree(table); 56 kfree(table);
63} 57}
diff --git a/net/wimax/stack.c b/net/wimax/stack.c
index 3c65eae701c4..a6470ac39498 100644
--- a/net/wimax/stack.c
+++ b/net/wimax/stack.c
@@ -187,7 +187,7 @@ out:
187 187
188static 188static
189void __check_new_state(enum wimax_st old_state, enum wimax_st new_state, 189void __check_new_state(enum wimax_st old_state, enum wimax_st new_state,
190 unsigned allowed_states_bm) 190 unsigned int allowed_states_bm)
191{ 191{
192 if (WARN_ON(((1 << new_state) & allowed_states_bm) == 0)) { 192 if (WARN_ON(((1 << new_state) & allowed_states_bm) == 0)) {
193 printk(KERN_ERR "SW BUG! Forbidden state change %u -> %u\n", 193 printk(KERN_ERR "SW BUG! Forbidden state change %u -> %u\n",
@@ -425,7 +425,8 @@ static
425size_t wimax_addr_scnprint(char *addr_str, size_t addr_str_size, 425size_t wimax_addr_scnprint(char *addr_str, size_t addr_str_size,
426 unsigned char *addr, size_t addr_len) 426 unsigned char *addr, size_t addr_len)
427{ 427{
428 unsigned cnt, total; 428 unsigned int cnt, total;
429
429 for (total = cnt = 0; cnt < addr_len; cnt++) 430 for (total = cnt = 0; cnt < addr_len; cnt++)
430 total += scnprintf(addr_str + total, addr_str_size - total, 431 total += scnprintf(addr_str + total, addr_str_size - total,
431 "%02x%c", addr[cnt], 432 "%02x%c", addr[cnt],
diff --git a/net/wireless/core.c b/net/wireless/core.c
index ccdfed897651..39f2538a46fc 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -422,10 +422,6 @@ static int wiphy_verify_combinations(struct wiphy *wiphy)
422 const struct ieee80211_iface_combination *c; 422 const struct ieee80211_iface_combination *c;
423 int i, j; 423 int i, j;
424 424
425 /* If we have combinations enforce them */
426 if (wiphy->n_iface_combinations)
427 wiphy->flags |= WIPHY_FLAG_ENFORCE_COMBINATIONS;
428
429 for (i = 0; i < wiphy->n_iface_combinations; i++) { 425 for (i = 0; i < wiphy->n_iface_combinations; i++) {
430 u32 cnt = 0; 426 u32 cnt = 0;
431 u16 all_iftypes = 0; 427 u16 all_iftypes = 0;
@@ -708,6 +704,10 @@ void wiphy_unregister(struct wiphy *wiphy)
708 flush_work(&rdev->scan_done_wk); 704 flush_work(&rdev->scan_done_wk);
709 cancel_work_sync(&rdev->conn_work); 705 cancel_work_sync(&rdev->conn_work);
710 flush_work(&rdev->event_work); 706 flush_work(&rdev->event_work);
707
708 if (rdev->wowlan && rdev->ops->set_wakeup)
709 rdev->ops->set_wakeup(&rdev->wiphy, false);
710 cfg80211_rdev_free_wowlan(rdev);
711} 711}
712EXPORT_SYMBOL(wiphy_unregister); 712EXPORT_SYMBOL(wiphy_unregister);
713 713
@@ -720,7 +720,6 @@ void cfg80211_dev_free(struct cfg80211_registered_device *rdev)
720 mutex_destroy(&rdev->sched_scan_mtx); 720 mutex_destroy(&rdev->sched_scan_mtx);
721 list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list) 721 list_for_each_entry_safe(scan, tmp, &rdev->bss_list, list)
722 cfg80211_put_bss(&scan->pub); 722 cfg80211_put_bss(&scan->pub);
723 cfg80211_rdev_free_wowlan(rdev);
724 kfree(rdev); 723 kfree(rdev);
725} 724}
726 725
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index ba21ab22187b..8c747fa9319b 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -38,6 +38,7 @@
38 38
39#define MESH_MAX_PREQ_RETRIES 4 39#define MESH_MAX_PREQ_RETRIES 4
40 40
41#define MESH_SYNC_NEIGHBOR_OFFSET_MAX 50
41 42
42const struct mesh_config default_mesh_config = { 43const struct mesh_config default_mesh_config = {
43 .dot11MeshRetryTimeout = MESH_RET_T, 44 .dot11MeshRetryTimeout = MESH_RET_T,
@@ -48,6 +49,7 @@ const struct mesh_config default_mesh_config = {
48 .element_ttl = MESH_DEFAULT_ELEMENT_TTL, 49 .element_ttl = MESH_DEFAULT_ELEMENT_TTL,
49 .auto_open_plinks = true, 50 .auto_open_plinks = true,
50 .dot11MeshMaxPeerLinks = MESH_MAX_ESTAB_PLINKS, 51 .dot11MeshMaxPeerLinks = MESH_MAX_ESTAB_PLINKS,
52 .dot11MeshNbrOffsetMaxNeighbor = MESH_SYNC_NEIGHBOR_OFFSET_MAX,
51 .dot11MeshHWMPactivePathTimeout = MESH_PATH_TIMEOUT, 53 .dot11MeshHWMPactivePathTimeout = MESH_PATH_TIMEOUT,
52 .dot11MeshHWMPpreqMinInterval = MESH_PREQ_MIN_INT, 54 .dot11MeshHWMPpreqMinInterval = MESH_PREQ_MIN_INT,
53 .dot11MeshHWMPperrMinInterval = MESH_PERR_MIN_INT, 55 .dot11MeshHWMPperrMinInterval = MESH_PERR_MIN_INT,
@@ -62,6 +64,7 @@ const struct mesh_config default_mesh_config = {
62}; 64};
63 65
64const struct mesh_setup default_mesh_setup = { 66const struct mesh_setup default_mesh_setup = {
67 .sync_method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET,
65 .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP, 68 .path_sel_proto = IEEE80211_PATH_PROTOCOL_HWMP,
66 .path_metric = IEEE80211_PATH_METRIC_AIRTIME, 69 .path_metric = IEEE80211_PATH_METRIC_AIRTIME,
67 .ie = NULL, 70 .ie = NULL,
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index f5a7ac3a0939..6801d96bc224 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -6,6 +6,7 @@
6 6
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/etherdevice.h>
9#include <linux/netdevice.h> 10#include <linux/netdevice.h>
10#include <linux/nl80211.h> 11#include <linux/nl80211.h>
11#include <linux/slab.h> 12#include <linux/slab.h>
@@ -100,7 +101,7 @@ void __cfg80211_send_deauth(struct net_device *dev,
100 ASSERT_WDEV_LOCK(wdev); 101 ASSERT_WDEV_LOCK(wdev);
101 102
102 if (wdev->current_bss && 103 if (wdev->current_bss &&
103 memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { 104 compare_ether_addr(wdev->current_bss->pub.bssid, bssid) == 0) {
104 cfg80211_unhold_bss(wdev->current_bss); 105 cfg80211_unhold_bss(wdev->current_bss);
105 cfg80211_put_bss(&wdev->current_bss->pub); 106 cfg80211_put_bss(&wdev->current_bss->pub);
106 wdev->current_bss = NULL; 107 wdev->current_bss = NULL;
@@ -115,7 +116,7 @@ void __cfg80211_send_deauth(struct net_device *dev,
115 116
116 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code); 117 reason_code = le16_to_cpu(mgmt->u.deauth.reason_code);
117 118
118 from_ap = memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0; 119 from_ap = compare_ether_addr(mgmt->sa, dev->dev_addr) != 0;
119 __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); 120 __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap);
120 } else if (wdev->sme_state == CFG80211_SME_CONNECTING) { 121 } else if (wdev->sme_state == CFG80211_SME_CONNECTING) {
121 __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0, 122 __cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, NULL, 0,
@@ -154,7 +155,7 @@ void __cfg80211_send_disassoc(struct net_device *dev,
154 return; 155 return;
155 156
156 if (wdev->current_bss && 157 if (wdev->current_bss &&
157 memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { 158 compare_ether_addr(wdev->current_bss->pub.bssid, bssid) == 0) {
158 cfg80211_sme_disassoc(dev, wdev->current_bss); 159 cfg80211_sme_disassoc(dev, wdev->current_bss);
159 cfg80211_unhold_bss(wdev->current_bss); 160 cfg80211_unhold_bss(wdev->current_bss);
160 cfg80211_put_bss(&wdev->current_bss->pub); 161 cfg80211_put_bss(&wdev->current_bss->pub);
@@ -165,7 +166,7 @@ void __cfg80211_send_disassoc(struct net_device *dev,
165 166
166 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code); 167 reason_code = le16_to_cpu(mgmt->u.disassoc.reason_code);
167 168
168 from_ap = memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0; 169 from_ap = compare_ether_addr(mgmt->sa, dev->dev_addr) != 0;
169 __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap); 170 __cfg80211_disconnected(dev, NULL, 0, reason_code, from_ap);
170} 171}
171EXPORT_SYMBOL(__cfg80211_send_disassoc); 172EXPORT_SYMBOL(__cfg80211_send_disassoc);
@@ -285,7 +286,7 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
285 return -EINVAL; 286 return -EINVAL;
286 287
287 if (wdev->current_bss && 288 if (wdev->current_bss &&
288 memcmp(bssid, wdev->current_bss->pub.bssid, ETH_ALEN) == 0) 289 compare_ether_addr(bssid, wdev->current_bss->pub.bssid) == 0)
289 return -EALREADY; 290 return -EALREADY;
290 291
291 memset(&req, 0, sizeof(req)); 292 memset(&req, 0, sizeof(req));
@@ -362,7 +363,7 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
362 memset(&req, 0, sizeof(req)); 363 memset(&req, 0, sizeof(req));
363 364
364 if (wdev->current_bss && prev_bssid && 365 if (wdev->current_bss && prev_bssid &&
365 memcmp(wdev->current_bss->pub.bssid, prev_bssid, ETH_ALEN) == 0) { 366 compare_ether_addr(wdev->current_bss->pub.bssid, prev_bssid) == 0) {
366 /* 367 /*
367 * Trying to reassociate: Allow this to proceed and let the old 368 * Trying to reassociate: Allow this to proceed and let the old
368 * association to be dropped when the new one is completed. 369 * association to be dropped when the new one is completed.
@@ -446,7 +447,8 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
446 447
447 if (local_state_change) { 448 if (local_state_change) {
448 if (wdev->current_bss && 449 if (wdev->current_bss &&
449 memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) { 450 compare_ether_addr(wdev->current_bss->pub.bssid, bssid)
451 == 0) {
450 cfg80211_unhold_bss(wdev->current_bss); 452 cfg80211_unhold_bss(wdev->current_bss);
451 cfg80211_put_bss(&wdev->current_bss->pub); 453 cfg80211_put_bss(&wdev->current_bss->pub);
452 wdev->current_bss = NULL; 454 wdev->current_bss = NULL;
@@ -495,7 +497,7 @@ static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
495 req.local_state_change = local_state_change; 497 req.local_state_change = local_state_change;
496 req.ie = ie; 498 req.ie = ie;
497 req.ie_len = ie_len; 499 req.ie_len = ie_len;
498 if (memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) 500 if (compare_ether_addr(wdev->current_bss->pub.bssid, bssid) == 0)
499 req.bss = &wdev->current_bss->pub; 501 req.bss = &wdev->current_bss->pub;
500 else 502 else
501 return -ENOTCONN; 503 return -ENOTCONN;
@@ -758,8 +760,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
758 break; 760 break;
759 } 761 }
760 762
761 if (memcmp(wdev->current_bss->pub.bssid, 763 if (compare_ether_addr(wdev->current_bss->pub.bssid,
762 mgmt->bssid, ETH_ALEN)) { 764 mgmt->bssid)) {
763 err = -ENOTCONN; 765 err = -ENOTCONN;
764 break; 766 break;
765 } 767 }
@@ -772,8 +774,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
772 break; 774 break;
773 775
774 /* for station, check that DA is the AP */ 776 /* for station, check that DA is the AP */
775 if (memcmp(wdev->current_bss->pub.bssid, 777 if (compare_ether_addr(wdev->current_bss->pub.bssid,
776 mgmt->da, ETH_ALEN)) { 778 mgmt->da)) {
777 err = -ENOTCONN; 779 err = -ENOTCONN;
778 break; 780 break;
779 } 781 }
@@ -781,11 +783,11 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
781 case NL80211_IFTYPE_AP: 783 case NL80211_IFTYPE_AP:
782 case NL80211_IFTYPE_P2P_GO: 784 case NL80211_IFTYPE_P2P_GO:
783 case NL80211_IFTYPE_AP_VLAN: 785 case NL80211_IFTYPE_AP_VLAN:
784 if (memcmp(mgmt->bssid, dev->dev_addr, ETH_ALEN)) 786 if (compare_ether_addr(mgmt->bssid, dev->dev_addr))
785 err = -EINVAL; 787 err = -EINVAL;
786 break; 788 break;
787 case NL80211_IFTYPE_MESH_POINT: 789 case NL80211_IFTYPE_MESH_POINT:
788 if (memcmp(mgmt->sa, mgmt->bssid, ETH_ALEN)) { 790 if (compare_ether_addr(mgmt->sa, mgmt->bssid)) {
789 err = -EINVAL; 791 err = -EINVAL;
790 break; 792 break;
791 } 793 }
@@ -804,7 +806,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
804 return err; 806 return err;
805 } 807 }
806 808
807 if (memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0) 809 if (compare_ether_addr(mgmt->sa, dev->dev_addr) != 0)
808 return -EINVAL; 810 return -EINVAL;
809 811
810 /* Transmit the Action frame as requested by user space */ 812 /* Transmit the Action frame as requested by user space */
@@ -928,6 +930,33 @@ void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
928} 930}
929EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify); 931EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify);
930 932
933void cfg80211_ch_switch_notify(struct net_device *dev, int freq,
934 enum nl80211_channel_type type)
935{
936 struct wireless_dev *wdev = dev->ieee80211_ptr;
937 struct wiphy *wiphy = wdev->wiphy;
938 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
939 struct ieee80211_channel *chan;
940
941 wdev_lock(wdev);
942
943 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
944 wdev->iftype != NL80211_IFTYPE_P2P_GO))
945 goto out;
946
947 chan = rdev_freq_to_chan(rdev, freq, type);
948 if (WARN_ON(!chan))
949 goto out;
950
951 wdev->channel = chan;
952
953 nl80211_ch_switch_notify(rdev, dev, freq, type, GFP_KERNEL);
954out:
955 wdev_unlock(wdev);
956 return;
957}
958EXPORT_SYMBOL(cfg80211_ch_switch_notify);
959
931bool cfg80211_rx_spurious_frame(struct net_device *dev, 960bool cfg80211_rx_spurious_frame(struct net_device *dev,
932 const u8 *addr, gfp_t gfp) 961 const u8 *addr, gfp_t gfp)
933{ 962{
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index f432c57af05d..d5005c59c472 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -356,20 +356,26 @@ static inline void *nl80211hdr_put(struct sk_buff *skb, u32 pid, u32 seq,
356static int nl80211_msg_put_channel(struct sk_buff *msg, 356static int nl80211_msg_put_channel(struct sk_buff *msg,
357 struct ieee80211_channel *chan) 357 struct ieee80211_channel *chan)
358{ 358{
359 NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_FREQ, 359 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_FREQ,
360 chan->center_freq); 360 chan->center_freq))
361 goto nla_put_failure;
361 362
362 if (chan->flags & IEEE80211_CHAN_DISABLED) 363 if ((chan->flags & IEEE80211_CHAN_DISABLED) &&
363 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_DISABLED); 364 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_DISABLED))
364 if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) 365 goto nla_put_failure;
365 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN); 366 if ((chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) &&
366 if (chan->flags & IEEE80211_CHAN_NO_IBSS) 367 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN))
367 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_NO_IBSS); 368 goto nla_put_failure;
368 if (chan->flags & IEEE80211_CHAN_RADAR) 369 if ((chan->flags & IEEE80211_CHAN_NO_IBSS) &&
369 NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_RADAR); 370 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS))
371 goto nla_put_failure;
372 if ((chan->flags & IEEE80211_CHAN_RADAR) &&
373 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))
374 goto nla_put_failure;
370 375
371 NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, 376 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
372 DBM_TO_MBM(chan->max_power)); 377 DBM_TO_MBM(chan->max_power)))
378 goto nla_put_failure;
373 379
374 return 0; 380 return 0;
375 381
@@ -621,8 +627,8 @@ static int nl80211_put_iftypes(struct sk_buff *msg, u32 attr, u16 ifmodes)
621 627
622 i = 0; 628 i = 0;
623 while (ifmodes) { 629 while (ifmodes) {
624 if (ifmodes & 1) 630 if ((ifmodes & 1) && nla_put_flag(msg, i))
625 NLA_PUT_FLAG(msg, i); 631 goto nla_put_failure;
626 ifmodes >>= 1; 632 ifmodes >>= 1;
627 i++; 633 i++;
628 } 634 }
@@ -665,8 +671,9 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy,
665 nl_limit = nla_nest_start(msg, j + 1); 671 nl_limit = nla_nest_start(msg, j + 1);
666 if (!nl_limit) 672 if (!nl_limit)
667 goto nla_put_failure; 673 goto nla_put_failure;
668 NLA_PUT_U32(msg, NL80211_IFACE_LIMIT_MAX, 674 if (nla_put_u32(msg, NL80211_IFACE_LIMIT_MAX,
669 c->limits[j].max); 675 c->limits[j].max))
676 goto nla_put_failure;
670 if (nl80211_put_iftypes(msg, NL80211_IFACE_LIMIT_TYPES, 677 if (nl80211_put_iftypes(msg, NL80211_IFACE_LIMIT_TYPES,
671 c->limits[j].types)) 678 c->limits[j].types))
672 goto nla_put_failure; 679 goto nla_put_failure;
@@ -675,13 +682,14 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy,
675 682
676 nla_nest_end(msg, nl_limits); 683 nla_nest_end(msg, nl_limits);
677 684
678 if (c->beacon_int_infra_match) 685 if (c->beacon_int_infra_match &&
679 NLA_PUT_FLAG(msg, 686 nla_put_flag(msg, NL80211_IFACE_COMB_STA_AP_BI_MATCH))
680 NL80211_IFACE_COMB_STA_AP_BI_MATCH); 687 goto nla_put_failure;
681 NLA_PUT_U32(msg, NL80211_IFACE_COMB_NUM_CHANNELS, 688 if (nla_put_u32(msg, NL80211_IFACE_COMB_NUM_CHANNELS,
682 c->num_different_channels); 689 c->num_different_channels) ||
683 NLA_PUT_U32(msg, NL80211_IFACE_COMB_MAXNUM, 690 nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM,
684 c->max_interfaces); 691 c->max_interfaces))
692 goto nla_put_failure;
685 693
686 nla_nest_end(msg, nl_combi); 694 nla_nest_end(msg, nl_combi);
687 } 695 }
@@ -712,64 +720,74 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
712 if (!hdr) 720 if (!hdr)
713 return -1; 721 return -1;
714 722
715 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx); 723 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx) ||
716 NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)); 724 nla_put_string(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)) ||
717 725 nla_put_u32(msg, NL80211_ATTR_GENERATION,
718 NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, 726 cfg80211_rdev_list_generation) ||
719 cfg80211_rdev_list_generation); 727 nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT,
720 728 dev->wiphy.retry_short) ||
721 NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT, 729 nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_LONG,
722 dev->wiphy.retry_short); 730 dev->wiphy.retry_long) ||
723 NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_LONG, 731 nla_put_u32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD,
724 dev->wiphy.retry_long); 732 dev->wiphy.frag_threshold) ||
725 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD, 733 nla_put_u32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
726 dev->wiphy.frag_threshold); 734 dev->wiphy.rts_threshold) ||
727 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD, 735 nla_put_u8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
728 dev->wiphy.rts_threshold); 736 dev->wiphy.coverage_class) ||
729 NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS, 737 nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
730 dev->wiphy.coverage_class); 738 dev->wiphy.max_scan_ssids) ||
731 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS, 739 nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS,
732 dev->wiphy.max_scan_ssids); 740 dev->wiphy.max_sched_scan_ssids) ||
733 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS, 741 nla_put_u16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN,
734 dev->wiphy.max_sched_scan_ssids); 742 dev->wiphy.max_scan_ie_len) ||
735 NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN, 743 nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
736 dev->wiphy.max_scan_ie_len); 744 dev->wiphy.max_sched_scan_ie_len) ||
737 NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN, 745 nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS,
738 dev->wiphy.max_sched_scan_ie_len); 746 dev->wiphy.max_match_sets))
739 NLA_PUT_U8(msg, NL80211_ATTR_MAX_MATCH_SETS, 747 goto nla_put_failure;
740 dev->wiphy.max_match_sets); 748
741 749 if ((dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) &&
742 if (dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) 750 nla_put_flag(msg, NL80211_ATTR_SUPPORT_IBSS_RSN))
743 NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_IBSS_RSN); 751 goto nla_put_failure;
744 if (dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) 752 if ((dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) &&
745 NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_MESH_AUTH); 753 nla_put_flag(msg, NL80211_ATTR_SUPPORT_MESH_AUTH))
746 if (dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) 754 goto nla_put_failure;
747 NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_AP_UAPSD); 755 if ((dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
748 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) 756 nla_put_flag(msg, NL80211_ATTR_SUPPORT_AP_UAPSD))
749 NLA_PUT_FLAG(msg, NL80211_ATTR_ROAM_SUPPORT); 757 goto nla_put_failure;
750 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) 758 if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) &&
751 NLA_PUT_FLAG(msg, NL80211_ATTR_TDLS_SUPPORT); 759 nla_put_flag(msg, NL80211_ATTR_ROAM_SUPPORT))
752 if (dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) 760 goto nla_put_failure;
753 NLA_PUT_FLAG(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP); 761 if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
754 762 nla_put_flag(msg, NL80211_ATTR_TDLS_SUPPORT))
755 NLA_PUT(msg, NL80211_ATTR_CIPHER_SUITES, 763 goto nla_put_failure;
756 sizeof(u32) * dev->wiphy.n_cipher_suites, 764 if ((dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) &&
757 dev->wiphy.cipher_suites); 765 nla_put_flag(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP))
758 766 goto nla_put_failure;
759 NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_PMKIDS, 767
760 dev->wiphy.max_num_pmkids); 768 if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES,
761 769 sizeof(u32) * dev->wiphy.n_cipher_suites,
762 if (dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) 770 dev->wiphy.cipher_suites))
763 NLA_PUT_FLAG(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE); 771 goto nla_put_failure;
764 772
765 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX, 773 if (nla_put_u8(msg, NL80211_ATTR_MAX_NUM_PMKIDS,
766 dev->wiphy.available_antennas_tx); 774 dev->wiphy.max_num_pmkids))
767 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX, 775 goto nla_put_failure;
768 dev->wiphy.available_antennas_rx); 776
769 777 if ((dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) &&
770 if (dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) 778 nla_put_flag(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE))
771 NLA_PUT_U32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD, 779 goto nla_put_failure;
772 dev->wiphy.probe_resp_offload); 780
781 if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX,
782 dev->wiphy.available_antennas_tx) ||
783 nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX,
784 dev->wiphy.available_antennas_rx))
785 goto nla_put_failure;
786
787 if ((dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) &&
788 nla_put_u32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD,
789 dev->wiphy.probe_resp_offload))
790 goto nla_put_failure;
773 791
774 if ((dev->wiphy.available_antennas_tx || 792 if ((dev->wiphy.available_antennas_tx ||
775 dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) { 793 dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) {
@@ -777,8 +795,11 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
777 int res; 795 int res;
778 res = dev->ops->get_antenna(&dev->wiphy, &tx_ant, &rx_ant); 796 res = dev->ops->get_antenna(&dev->wiphy, &tx_ant, &rx_ant);
779 if (!res) { 797 if (!res) {
780 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX, tx_ant); 798 if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX,
781 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_RX, rx_ant); 799 tx_ant) ||
800 nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_RX,
801 rx_ant))
802 goto nla_put_failure;
782 } 803 }
783 } 804 }
784 805
@@ -799,17 +820,17 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
799 goto nla_put_failure; 820 goto nla_put_failure;
800 821
801 /* add HT info */ 822 /* add HT info */
802 if (dev->wiphy.bands[band]->ht_cap.ht_supported) { 823 if (dev->wiphy.bands[band]->ht_cap.ht_supported &&
803 NLA_PUT(msg, NL80211_BAND_ATTR_HT_MCS_SET, 824 (nla_put(msg, NL80211_BAND_ATTR_HT_MCS_SET,
804 sizeof(dev->wiphy.bands[band]->ht_cap.mcs), 825 sizeof(dev->wiphy.bands[band]->ht_cap.mcs),
805 &dev->wiphy.bands[band]->ht_cap.mcs); 826 &dev->wiphy.bands[band]->ht_cap.mcs) ||
806 NLA_PUT_U16(msg, NL80211_BAND_ATTR_HT_CAPA, 827 nla_put_u16(msg, NL80211_BAND_ATTR_HT_CAPA,
807 dev->wiphy.bands[band]->ht_cap.cap); 828 dev->wiphy.bands[band]->ht_cap.cap) ||
808 NLA_PUT_U8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR, 829 nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR,
809 dev->wiphy.bands[band]->ht_cap.ampdu_factor); 830 dev->wiphy.bands[band]->ht_cap.ampdu_factor) ||
810 NLA_PUT_U8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY, 831 nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY,
811 dev->wiphy.bands[band]->ht_cap.ampdu_density); 832 dev->wiphy.bands[band]->ht_cap.ampdu_density)))
812 } 833 goto nla_put_failure;
813 834
814 /* add frequencies */ 835 /* add frequencies */
815 nl_freqs = nla_nest_start(msg, NL80211_BAND_ATTR_FREQS); 836 nl_freqs = nla_nest_start(msg, NL80211_BAND_ATTR_FREQS);
@@ -842,11 +863,13 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
842 goto nla_put_failure; 863 goto nla_put_failure;
843 864
844 rate = &dev->wiphy.bands[band]->bitrates[i]; 865 rate = &dev->wiphy.bands[band]->bitrates[i];
845 NLA_PUT_U32(msg, NL80211_BITRATE_ATTR_RATE, 866 if (nla_put_u32(msg, NL80211_BITRATE_ATTR_RATE,
846 rate->bitrate); 867 rate->bitrate))
847 if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) 868 goto nla_put_failure;
848 NLA_PUT_FLAG(msg, 869 if ((rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) &&
849 NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE); 870 nla_put_flag(msg,
871 NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE))
872 goto nla_put_failure;
850 873
851 nla_nest_end(msg, nl_rate); 874 nla_nest_end(msg, nl_rate);
852 } 875 }
@@ -866,7 +889,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
866 do { \ 889 do { \
867 if (dev->ops->op) { \ 890 if (dev->ops->op) { \
868 i++; \ 891 i++; \
869 NLA_PUT_U32(msg, i, NL80211_CMD_ ## n); \ 892 if (nla_put_u32(msg, i, NL80211_CMD_ ## n)) \
893 goto nla_put_failure; \
870 } \ 894 } \
871 } while (0) 895 } while (0)
872 896
@@ -894,7 +918,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
894 CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL); 918 CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL);
895 if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) { 919 if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
896 i++; 920 i++;
897 NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS); 921 if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS))
922 goto nla_put_failure;
898 } 923 }
899 CMD(set_channel, SET_CHANNEL); 924 CMD(set_channel, SET_CHANNEL);
900 CMD(set_wds_peer, SET_WDS_PEER); 925 CMD(set_wds_peer, SET_WDS_PEER);
@@ -908,7 +933,8 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
908 CMD(set_noack_map, SET_NOACK_MAP); 933 CMD(set_noack_map, SET_NOACK_MAP);
909 if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) { 934 if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) {
910 i++; 935 i++;
911 NLA_PUT_U32(msg, i, NL80211_CMD_REGISTER_BEACONS); 936 if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS))
937 goto nla_put_failure;
912 } 938 }
913 939
914#ifdef CONFIG_NL80211_TESTMODE 940#ifdef CONFIG_NL80211_TESTMODE
@@ -919,23 +945,27 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
919 945
920 if (dev->ops->connect || dev->ops->auth) { 946 if (dev->ops->connect || dev->ops->auth) {
921 i++; 947 i++;
922 NLA_PUT_U32(msg, i, NL80211_CMD_CONNECT); 948 if (nla_put_u32(msg, i, NL80211_CMD_CONNECT))
949 goto nla_put_failure;
923 } 950 }
924 951
925 if (dev->ops->disconnect || dev->ops->deauth) { 952 if (dev->ops->disconnect || dev->ops->deauth) {
926 i++; 953 i++;
927 NLA_PUT_U32(msg, i, NL80211_CMD_DISCONNECT); 954 if (nla_put_u32(msg, i, NL80211_CMD_DISCONNECT))
955 goto nla_put_failure;
928 } 956 }
929 957
930 nla_nest_end(msg, nl_cmds); 958 nla_nest_end(msg, nl_cmds);
931 959
932 if (dev->ops->remain_on_channel && 960 if (dev->ops->remain_on_channel &&
933 dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) 961 (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) &&
934 NLA_PUT_U32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION, 962 nla_put_u32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
935 dev->wiphy.max_remain_on_channel_duration); 963 dev->wiphy.max_remain_on_channel_duration))
964 goto nla_put_failure;
936 965
937 if (dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) 966 if ((dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) &&
938 NLA_PUT_FLAG(msg, NL80211_ATTR_OFFCHANNEL_TX_OK); 967 nla_put_flag(msg, NL80211_ATTR_OFFCHANNEL_TX_OK))
968 goto nla_put_failure;
939 969
940 if (mgmt_stypes) { 970 if (mgmt_stypes) {
941 u16 stypes; 971 u16 stypes;
@@ -953,9 +983,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
953 i = 0; 983 i = 0;
954 stypes = mgmt_stypes[ift].tx; 984 stypes = mgmt_stypes[ift].tx;
955 while (stypes) { 985 while (stypes) {
956 if (stypes & 1) 986 if ((stypes & 1) &&
957 NLA_PUT_U16(msg, NL80211_ATTR_FRAME_TYPE, 987 nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE,
958 (i << 4) | IEEE80211_FTYPE_MGMT); 988 (i << 4) | IEEE80211_FTYPE_MGMT))
989 goto nla_put_failure;
959 stypes >>= 1; 990 stypes >>= 1;
960 i++; 991 i++;
961 } 992 }
@@ -975,9 +1006,10 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
975 i = 0; 1006 i = 0;
976 stypes = mgmt_stypes[ift].rx; 1007 stypes = mgmt_stypes[ift].rx;
977 while (stypes) { 1008 while (stypes) {
978 if (stypes & 1) 1009 if ((stypes & 1) &&
979 NLA_PUT_U16(msg, NL80211_ATTR_FRAME_TYPE, 1010 nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE,
980 (i << 4) | IEEE80211_FTYPE_MGMT); 1011 (i << 4) | IEEE80211_FTYPE_MGMT))
1012 goto nla_put_failure;
981 stypes >>= 1; 1013 stypes >>= 1;
982 i++; 1014 i++;
983 } 1015 }
@@ -994,22 +1026,23 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
994 if (!nl_wowlan) 1026 if (!nl_wowlan)
995 goto nla_put_failure; 1027 goto nla_put_failure;
996 1028
997 if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY) 1029 if (((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY) &&
998 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_ANY); 1030 nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) ||
999 if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT) 1031 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT) &&
1000 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_DISCONNECT); 1032 nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) ||
1001 if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT) 1033 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT) &&
1002 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT); 1034 nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) ||
1003 if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) 1035 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) &&
1004 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED); 1036 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) ||
1005 if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) 1037 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
1006 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE); 1038 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) ||
1007 if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) 1039 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) &&
1008 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST); 1040 nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) ||
1009 if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) 1041 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) &&
1010 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE); 1042 nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) ||
1011 if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_RFKILL_RELEASE) 1043 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_RFKILL_RELEASE) &&
1012 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE); 1044 nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)))
1045 goto nla_put_failure;
1013 if (dev->wiphy.wowlan.n_patterns) { 1046 if (dev->wiphy.wowlan.n_patterns) {
1014 struct nl80211_wowlan_pattern_support pat = { 1047 struct nl80211_wowlan_pattern_support pat = {
1015 .max_patterns = dev->wiphy.wowlan.n_patterns, 1048 .max_patterns = dev->wiphy.wowlan.n_patterns,
@@ -1018,8 +1051,9 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
1018 .max_pattern_len = 1051 .max_pattern_len =
1019 dev->wiphy.wowlan.pattern_max_len, 1052 dev->wiphy.wowlan.pattern_max_len,
1020 }; 1053 };
1021 NLA_PUT(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN, 1054 if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
1022 sizeof(pat), &pat); 1055 sizeof(pat), &pat))
1056 goto nla_put_failure;
1023 } 1057 }
1024 1058
1025 nla_nest_end(msg, nl_wowlan); 1059 nla_nest_end(msg, nl_wowlan);
@@ -1032,16 +1066,20 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags,
1032 if (nl80211_put_iface_combinations(&dev->wiphy, msg)) 1066 if (nl80211_put_iface_combinations(&dev->wiphy, msg))
1033 goto nla_put_failure; 1067 goto nla_put_failure;
1034 1068
1035 if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) 1069 if ((dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) &&
1036 NLA_PUT_U32(msg, NL80211_ATTR_DEVICE_AP_SME, 1070 nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME,
1037 dev->wiphy.ap_sme_capa); 1071 dev->wiphy.ap_sme_capa))
1072 goto nla_put_failure;
1038 1073
1039 NLA_PUT_U32(msg, NL80211_ATTR_FEATURE_FLAGS, dev->wiphy.features); 1074 if (nla_put_u32(msg, NL80211_ATTR_FEATURE_FLAGS,
1075 dev->wiphy.features))
1076 goto nla_put_failure;
1040 1077
1041 if (dev->wiphy.ht_capa_mod_mask) 1078 if (dev->wiphy.ht_capa_mod_mask &&
1042 NLA_PUT(msg, NL80211_ATTR_HT_CAPABILITY_MASK, 1079 nla_put(msg, NL80211_ATTR_HT_CAPABILITY_MASK,
1043 sizeof(*dev->wiphy.ht_capa_mod_mask), 1080 sizeof(*dev->wiphy.ht_capa_mod_mask),
1044 dev->wiphy.ht_capa_mod_mask); 1081 dev->wiphy.ht_capa_mod_mask))
1082 goto nla_put_failure;
1045 1083
1046 return genlmsg_end(msg, hdr); 1084 return genlmsg_end(msg, hdr);
1047 1085
@@ -1104,17 +1142,20 @@ static const struct nla_policy txq_params_policy[NL80211_TXQ_ATTR_MAX + 1] = {
1104static int parse_txq_params(struct nlattr *tb[], 1142static int parse_txq_params(struct nlattr *tb[],
1105 struct ieee80211_txq_params *txq_params) 1143 struct ieee80211_txq_params *txq_params)
1106{ 1144{
1107 if (!tb[NL80211_TXQ_ATTR_QUEUE] || !tb[NL80211_TXQ_ATTR_TXOP] || 1145 if (!tb[NL80211_TXQ_ATTR_AC] || !tb[NL80211_TXQ_ATTR_TXOP] ||
1108 !tb[NL80211_TXQ_ATTR_CWMIN] || !tb[NL80211_TXQ_ATTR_CWMAX] || 1146 !tb[NL80211_TXQ_ATTR_CWMIN] || !tb[NL80211_TXQ_ATTR_CWMAX] ||
1109 !tb[NL80211_TXQ_ATTR_AIFS]) 1147 !tb[NL80211_TXQ_ATTR_AIFS])
1110 return -EINVAL; 1148 return -EINVAL;
1111 1149
1112 txq_params->queue = nla_get_u8(tb[NL80211_TXQ_ATTR_QUEUE]); 1150 txq_params->ac = nla_get_u8(tb[NL80211_TXQ_ATTR_AC]);
1113 txq_params->txop = nla_get_u16(tb[NL80211_TXQ_ATTR_TXOP]); 1151 txq_params->txop = nla_get_u16(tb[NL80211_TXQ_ATTR_TXOP]);
1114 txq_params->cwmin = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMIN]); 1152 txq_params->cwmin = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMIN]);
1115 txq_params->cwmax = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMAX]); 1153 txq_params->cwmax = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMAX]);
1116 txq_params->aifs = nla_get_u8(tb[NL80211_TXQ_ATTR_AIFS]); 1154 txq_params->aifs = nla_get_u8(tb[NL80211_TXQ_ATTR_AIFS]);
1117 1155
1156 if (txq_params->ac >= NL80211_NUM_ACS)
1157 return -EINVAL;
1158
1118 return 0; 1159 return 0;
1119} 1160}
1120 1161
@@ -1489,14 +1530,28 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags,
1489 if (!hdr) 1530 if (!hdr)
1490 return -1; 1531 return -1;
1491 1532
1492 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); 1533 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
1493 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 1534 nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
1494 NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name); 1535 nla_put_string(msg, NL80211_ATTR_IFNAME, dev->name) ||
1495 NLA_PUT_U32(msg, NL80211_ATTR_IFTYPE, dev->ieee80211_ptr->iftype); 1536 nla_put_u32(msg, NL80211_ATTR_IFTYPE,
1537 dev->ieee80211_ptr->iftype) ||
1538 nla_put_u32(msg, NL80211_ATTR_GENERATION,
1539 rdev->devlist_generation ^
1540 (cfg80211_rdev_list_generation << 2)))
1541 goto nla_put_failure;
1496 1542
1497 NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, 1543 if (rdev->ops->get_channel) {
1498 rdev->devlist_generation ^ 1544 struct ieee80211_channel *chan;
1499 (cfg80211_rdev_list_generation << 2)); 1545 enum nl80211_channel_type channel_type;
1546
1547 chan = rdev->ops->get_channel(&rdev->wiphy, &channel_type);
1548 if (chan &&
1549 (nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ,
1550 chan->center_freq) ||
1551 nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE,
1552 channel_type)))
1553 goto nla_put_failure;
1554 }
1500 1555
1501 return genlmsg_end(msg, hdr); 1556 return genlmsg_end(msg, hdr);
1502 1557
@@ -1794,35 +1849,34 @@ static void get_key_callback(void *c, struct key_params *params)
1794 struct nlattr *key; 1849 struct nlattr *key;
1795 struct get_key_cookie *cookie = c; 1850 struct get_key_cookie *cookie = c;
1796 1851
1797 if (params->key) 1852 if ((params->key &&
1798 NLA_PUT(cookie->msg, NL80211_ATTR_KEY_DATA, 1853 nla_put(cookie->msg, NL80211_ATTR_KEY_DATA,
1799 params->key_len, params->key); 1854 params->key_len, params->key)) ||
1800 1855 (params->seq &&
1801 if (params->seq) 1856 nla_put(cookie->msg, NL80211_ATTR_KEY_SEQ,
1802 NLA_PUT(cookie->msg, NL80211_ATTR_KEY_SEQ, 1857 params->seq_len, params->seq)) ||
1803 params->seq_len, params->seq); 1858 (params->cipher &&
1804 1859 nla_put_u32(cookie->msg, NL80211_ATTR_KEY_CIPHER,
1805 if (params->cipher) 1860 params->cipher)))
1806 NLA_PUT_U32(cookie->msg, NL80211_ATTR_KEY_CIPHER, 1861 goto nla_put_failure;
1807 params->cipher);
1808 1862
1809 key = nla_nest_start(cookie->msg, NL80211_ATTR_KEY); 1863 key = nla_nest_start(cookie->msg, NL80211_ATTR_KEY);
1810 if (!key) 1864 if (!key)
1811 goto nla_put_failure; 1865 goto nla_put_failure;
1812 1866
1813 if (params->key) 1867 if ((params->key &&
1814 NLA_PUT(cookie->msg, NL80211_KEY_DATA, 1868 nla_put(cookie->msg, NL80211_KEY_DATA,
1815 params->key_len, params->key); 1869 params->key_len, params->key)) ||
1816 1870 (params->seq &&
1817 if (params->seq) 1871 nla_put(cookie->msg, NL80211_KEY_SEQ,
1818 NLA_PUT(cookie->msg, NL80211_KEY_SEQ, 1872 params->seq_len, params->seq)) ||
1819 params->seq_len, params->seq); 1873 (params->cipher &&
1820 1874 nla_put_u32(cookie->msg, NL80211_KEY_CIPHER,
1821 if (params->cipher) 1875 params->cipher)))
1822 NLA_PUT_U32(cookie->msg, NL80211_KEY_CIPHER, 1876 goto nla_put_failure;
1823 params->cipher);
1824 1877
1825 NLA_PUT_U8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx); 1878 if (nla_put_u8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx))
1879 goto nla_put_failure;
1826 1880
1827 nla_nest_end(cookie->msg, key); 1881 nla_nest_end(cookie->msg, key);
1828 1882
@@ -1880,10 +1934,12 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
1880 cookie.msg = msg; 1934 cookie.msg = msg;
1881 cookie.idx = key_idx; 1935 cookie.idx = key_idx;
1882 1936
1883 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); 1937 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
1884 NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_idx); 1938 nla_put_u8(msg, NL80211_ATTR_KEY_IDX, key_idx))
1885 if (mac_addr) 1939 goto nla_put_failure;
1886 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); 1940 if (mac_addr &&
1941 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
1942 goto nla_put_failure;
1887 1943
1888 if (pairwise && mac_addr && 1944 if (pairwise && mac_addr &&
1889 !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) 1945 !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
@@ -2373,15 +2429,15 @@ static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info,
2373 2429
2374 /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */ 2430 /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */
2375 bitrate = cfg80211_calculate_bitrate(info); 2431 bitrate = cfg80211_calculate_bitrate(info);
2376 if (bitrate > 0) 2432 if ((bitrate > 0 &&
2377 NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate); 2433 nla_put_u16(msg, NL80211_RATE_INFO_BITRATE, bitrate)) ||
2378 2434 ((info->flags & RATE_INFO_FLAGS_MCS) &&
2379 if (info->flags & RATE_INFO_FLAGS_MCS) 2435 nla_put_u8(msg, NL80211_RATE_INFO_MCS, info->mcs)) ||
2380 NLA_PUT_U8(msg, NL80211_RATE_INFO_MCS, info->mcs); 2436 ((info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) &&
2381 if (info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) 2437 nla_put_flag(msg, NL80211_RATE_INFO_40_MHZ_WIDTH)) ||
2382 NLA_PUT_FLAG(msg, NL80211_RATE_INFO_40_MHZ_WIDTH); 2438 ((info->flags & RATE_INFO_FLAGS_SHORT_GI) &&
2383 if (info->flags & RATE_INFO_FLAGS_SHORT_GI) 2439 nla_put_flag(msg, NL80211_RATE_INFO_SHORT_GI)))
2384 NLA_PUT_FLAG(msg, NL80211_RATE_INFO_SHORT_GI); 2440 goto nla_put_failure;
2385 2441
2386 nla_nest_end(msg, rate); 2442 nla_nest_end(msg, rate);
2387 return true; 2443 return true;
@@ -2403,43 +2459,50 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
2403 if (!hdr) 2459 if (!hdr)
2404 return -1; 2460 return -1;
2405 2461
2406 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); 2462 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
2407 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); 2463 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr) ||
2408 2464 nla_put_u32(msg, NL80211_ATTR_GENERATION, sinfo->generation))
2409 NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, sinfo->generation); 2465 goto nla_put_failure;
2410 2466
2411 sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO); 2467 sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO);
2412 if (!sinfoattr) 2468 if (!sinfoattr)
2413 goto nla_put_failure; 2469 goto nla_put_failure;
2414 if (sinfo->filled & STATION_INFO_CONNECTED_TIME) 2470 if ((sinfo->filled & STATION_INFO_CONNECTED_TIME) &&
2415 NLA_PUT_U32(msg, NL80211_STA_INFO_CONNECTED_TIME, 2471 nla_put_u32(msg, NL80211_STA_INFO_CONNECTED_TIME,
2416 sinfo->connected_time); 2472 sinfo->connected_time))
2417 if (sinfo->filled & STATION_INFO_INACTIVE_TIME) 2473 goto nla_put_failure;
2418 NLA_PUT_U32(msg, NL80211_STA_INFO_INACTIVE_TIME, 2474 if ((sinfo->filled & STATION_INFO_INACTIVE_TIME) &&
2419 sinfo->inactive_time); 2475 nla_put_u32(msg, NL80211_STA_INFO_INACTIVE_TIME,
2420 if (sinfo->filled & STATION_INFO_RX_BYTES) 2476 sinfo->inactive_time))
2421 NLA_PUT_U32(msg, NL80211_STA_INFO_RX_BYTES, 2477 goto nla_put_failure;
2422 sinfo->rx_bytes); 2478 if ((sinfo->filled & STATION_INFO_RX_BYTES) &&
2423 if (sinfo->filled & STATION_INFO_TX_BYTES) 2479 nla_put_u32(msg, NL80211_STA_INFO_RX_BYTES,
2424 NLA_PUT_U32(msg, NL80211_STA_INFO_TX_BYTES, 2480 sinfo->rx_bytes))
2425 sinfo->tx_bytes); 2481 goto nla_put_failure;
2426 if (sinfo->filled & STATION_INFO_LLID) 2482 if ((sinfo->filled & STATION_INFO_TX_BYTES) &&
2427 NLA_PUT_U16(msg, NL80211_STA_INFO_LLID, 2483 nla_put_u32(msg, NL80211_STA_INFO_TX_BYTES,
2428 sinfo->llid); 2484 sinfo->tx_bytes))
2429 if (sinfo->filled & STATION_INFO_PLID) 2485 goto nla_put_failure;
2430 NLA_PUT_U16(msg, NL80211_STA_INFO_PLID, 2486 if ((sinfo->filled & STATION_INFO_LLID) &&
2431 sinfo->plid); 2487 nla_put_u16(msg, NL80211_STA_INFO_LLID, sinfo->llid))
2432 if (sinfo->filled & STATION_INFO_PLINK_STATE) 2488 goto nla_put_failure;
2433 NLA_PUT_U8(msg, NL80211_STA_INFO_PLINK_STATE, 2489 if ((sinfo->filled & STATION_INFO_PLID) &&
2434 sinfo->plink_state); 2490 nla_put_u16(msg, NL80211_STA_INFO_PLID, sinfo->plid))
2491 goto nla_put_failure;
2492 if ((sinfo->filled & STATION_INFO_PLINK_STATE) &&
2493 nla_put_u8(msg, NL80211_STA_INFO_PLINK_STATE,
2494 sinfo->plink_state))
2495 goto nla_put_failure;
2435 switch (rdev->wiphy.signal_type) { 2496 switch (rdev->wiphy.signal_type) {
2436 case CFG80211_SIGNAL_TYPE_MBM: 2497 case CFG80211_SIGNAL_TYPE_MBM:
2437 if (sinfo->filled & STATION_INFO_SIGNAL) 2498 if ((sinfo->filled & STATION_INFO_SIGNAL) &&
2438 NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL, 2499 nla_put_u8(msg, NL80211_STA_INFO_SIGNAL,
2439 sinfo->signal); 2500 sinfo->signal))
2440 if (sinfo->filled & STATION_INFO_SIGNAL_AVG) 2501 goto nla_put_failure;
2441 NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL_AVG, 2502 if ((sinfo->filled & STATION_INFO_SIGNAL_AVG) &&
2442 sinfo->signal_avg); 2503 nla_put_u8(msg, NL80211_STA_INFO_SIGNAL_AVG,
2504 sinfo->signal_avg))
2505 goto nla_put_failure;
2443 break; 2506 break;
2444 default: 2507 default:
2445 break; 2508 break;
@@ -2454,49 +2517,60 @@ static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq,
2454 NL80211_STA_INFO_RX_BITRATE)) 2517 NL80211_STA_INFO_RX_BITRATE))
2455 goto nla_put_failure; 2518 goto nla_put_failure;
2456 } 2519 }
2457 if (sinfo->filled & STATION_INFO_RX_PACKETS) 2520 if ((sinfo->filled & STATION_INFO_RX_PACKETS) &&
2458 NLA_PUT_U32(msg, NL80211_STA_INFO_RX_PACKETS, 2521 nla_put_u32(msg, NL80211_STA_INFO_RX_PACKETS,
2459 sinfo->rx_packets); 2522 sinfo->rx_packets))
2460 if (sinfo->filled & STATION_INFO_TX_PACKETS) 2523 goto nla_put_failure;
2461 NLA_PUT_U32(msg, NL80211_STA_INFO_TX_PACKETS, 2524 if ((sinfo->filled & STATION_INFO_TX_PACKETS) &&
2462 sinfo->tx_packets); 2525 nla_put_u32(msg, NL80211_STA_INFO_TX_PACKETS,
2463 if (sinfo->filled & STATION_INFO_TX_RETRIES) 2526 sinfo->tx_packets))
2464 NLA_PUT_U32(msg, NL80211_STA_INFO_TX_RETRIES, 2527 goto nla_put_failure;
2465 sinfo->tx_retries); 2528 if ((sinfo->filled & STATION_INFO_TX_RETRIES) &&
2466 if (sinfo->filled & STATION_INFO_TX_FAILED) 2529 nla_put_u32(msg, NL80211_STA_INFO_TX_RETRIES,
2467 NLA_PUT_U32(msg, NL80211_STA_INFO_TX_FAILED, 2530 sinfo->tx_retries))
2468 sinfo->tx_failed); 2531 goto nla_put_failure;
2469 if (sinfo->filled & STATION_INFO_BEACON_LOSS_COUNT) 2532 if ((sinfo->filled & STATION_INFO_TX_FAILED) &&
2470 NLA_PUT_U32(msg, NL80211_STA_INFO_BEACON_LOSS, 2533 nla_put_u32(msg, NL80211_STA_INFO_TX_FAILED,
2471 sinfo->beacon_loss_count); 2534 sinfo->tx_failed))
2535 goto nla_put_failure;
2536 if ((sinfo->filled & STATION_INFO_BEACON_LOSS_COUNT) &&
2537 nla_put_u32(msg, NL80211_STA_INFO_BEACON_LOSS,
2538 sinfo->beacon_loss_count))
2539 goto nla_put_failure;
2472 if (sinfo->filled & STATION_INFO_BSS_PARAM) { 2540 if (sinfo->filled & STATION_INFO_BSS_PARAM) {
2473 bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM); 2541 bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM);
2474 if (!bss_param) 2542 if (!bss_param)
2475 goto nla_put_failure; 2543 goto nla_put_failure;
2476 2544
2477 if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_CTS_PROT) 2545 if (((sinfo->bss_param.flags & BSS_PARAM_FLAGS_CTS_PROT) &&
2478 NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_CTS_PROT); 2546 nla_put_flag(msg, NL80211_STA_BSS_PARAM_CTS_PROT)) ||
2479 if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_PREAMBLE) 2547 ((sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_PREAMBLE) &&
2480 NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_SHORT_PREAMBLE); 2548 nla_put_flag(msg, NL80211_STA_BSS_PARAM_SHORT_PREAMBLE)) ||
2481 if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_SLOT_TIME) 2549 ((sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_SLOT_TIME) &&
2482 NLA_PUT_FLAG(msg, 2550 nla_put_flag(msg, NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME)) ||
2483 NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME); 2551 nla_put_u8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD,
2484 NLA_PUT_U8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD, 2552 sinfo->bss_param.dtim_period) ||
2485 sinfo->bss_param.dtim_period); 2553 nla_put_u16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL,
2486 NLA_PUT_U16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL, 2554 sinfo->bss_param.beacon_interval))
2487 sinfo->bss_param.beacon_interval); 2555 goto nla_put_failure;
2488 2556
2489 nla_nest_end(msg, bss_param); 2557 nla_nest_end(msg, bss_param);
2490 } 2558 }
2491 if (sinfo->filled & STATION_INFO_STA_FLAGS) 2559 if ((sinfo->filled & STATION_INFO_STA_FLAGS) &&
2492 NLA_PUT(msg, NL80211_STA_INFO_STA_FLAGS, 2560 nla_put(msg, NL80211_STA_INFO_STA_FLAGS,
2493 sizeof(struct nl80211_sta_flag_update), 2561 sizeof(struct nl80211_sta_flag_update),
2494 &sinfo->sta_flags); 2562 &sinfo->sta_flags))
2563 goto nla_put_failure;
2564 if ((sinfo->filled & STATION_INFO_T_OFFSET) &&
2565 nla_put_u64(msg, NL80211_STA_INFO_T_OFFSET,
2566 sinfo->t_offset))
2567 goto nla_put_failure;
2495 nla_nest_end(msg, sinfoattr); 2568 nla_nest_end(msg, sinfoattr);
2496 2569
2497 if (sinfo->filled & STATION_INFO_ASSOC_REQ_IES) 2570 if ((sinfo->filled & STATION_INFO_ASSOC_REQ_IES) &&
2498 NLA_PUT(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len, 2571 nla_put(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len,
2499 sinfo->assoc_req_ies); 2572 sinfo->assoc_req_ies))
2573 goto nla_put_failure;
2500 2574
2501 return genlmsg_end(msg, hdr); 2575 return genlmsg_end(msg, hdr);
2502 2576
@@ -2918,36 +2992,37 @@ static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq,
2918 if (!hdr) 2992 if (!hdr)
2919 return -1; 2993 return -1;
2920 2994
2921 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); 2995 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
2922 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, dst); 2996 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, dst) ||
2923 NLA_PUT(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop); 2997 nla_put(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop) ||
2924 2998 nla_put_u32(msg, NL80211_ATTR_GENERATION, pinfo->generation))
2925 NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, pinfo->generation); 2999 goto nla_put_failure;
2926 3000
2927 pinfoattr = nla_nest_start(msg, NL80211_ATTR_MPATH_INFO); 3001 pinfoattr = nla_nest_start(msg, NL80211_ATTR_MPATH_INFO);
2928 if (!pinfoattr) 3002 if (!pinfoattr)
2929 goto nla_put_failure; 3003 goto nla_put_failure;
2930 if (pinfo->filled & MPATH_INFO_FRAME_QLEN) 3004 if ((pinfo->filled & MPATH_INFO_FRAME_QLEN) &&
2931 NLA_PUT_U32(msg, NL80211_MPATH_INFO_FRAME_QLEN, 3005 nla_put_u32(msg, NL80211_MPATH_INFO_FRAME_QLEN,
2932 pinfo->frame_qlen); 3006 pinfo->frame_qlen))
2933 if (pinfo->filled & MPATH_INFO_SN) 3007 goto nla_put_failure;
2934 NLA_PUT_U32(msg, NL80211_MPATH_INFO_SN, 3008 if (((pinfo->filled & MPATH_INFO_SN) &&
2935 pinfo->sn); 3009 nla_put_u32(msg, NL80211_MPATH_INFO_SN, pinfo->sn)) ||
2936 if (pinfo->filled & MPATH_INFO_METRIC) 3010 ((pinfo->filled & MPATH_INFO_METRIC) &&
2937 NLA_PUT_U32(msg, NL80211_MPATH_INFO_METRIC, 3011 nla_put_u32(msg, NL80211_MPATH_INFO_METRIC,
2938 pinfo->metric); 3012 pinfo->metric)) ||
2939 if (pinfo->filled & MPATH_INFO_EXPTIME) 3013 ((pinfo->filled & MPATH_INFO_EXPTIME) &&
2940 NLA_PUT_U32(msg, NL80211_MPATH_INFO_EXPTIME, 3014 nla_put_u32(msg, NL80211_MPATH_INFO_EXPTIME,
2941 pinfo->exptime); 3015 pinfo->exptime)) ||
2942 if (pinfo->filled & MPATH_INFO_FLAGS) 3016 ((pinfo->filled & MPATH_INFO_FLAGS) &&
2943 NLA_PUT_U8(msg, NL80211_MPATH_INFO_FLAGS, 3017 nla_put_u8(msg, NL80211_MPATH_INFO_FLAGS,
2944 pinfo->flags); 3018 pinfo->flags)) ||
2945 if (pinfo->filled & MPATH_INFO_DISCOVERY_TIMEOUT) 3019 ((pinfo->filled & MPATH_INFO_DISCOVERY_TIMEOUT) &&
2946 NLA_PUT_U32(msg, NL80211_MPATH_INFO_DISCOVERY_TIMEOUT, 3020 nla_put_u32(msg, NL80211_MPATH_INFO_DISCOVERY_TIMEOUT,
2947 pinfo->discovery_timeout); 3021 pinfo->discovery_timeout)) ||
2948 if (pinfo->filled & MPATH_INFO_DISCOVERY_RETRIES) 3022 ((pinfo->filled & MPATH_INFO_DISCOVERY_RETRIES) &&
2949 NLA_PUT_U8(msg, NL80211_MPATH_INFO_DISCOVERY_RETRIES, 3023 nla_put_u8(msg, NL80211_MPATH_INFO_DISCOVERY_RETRIES,
2950 pinfo->discovery_retries); 3024 pinfo->discovery_retries)))
3025 goto nla_put_failure;
2951 3026
2952 nla_nest_end(msg, pinfoattr); 3027 nla_nest_end(msg, pinfoattr);
2953 3028
@@ -3273,47 +3348,50 @@ static int nl80211_get_mesh_config(struct sk_buff *skb,
3273 pinfoattr = nla_nest_start(msg, NL80211_ATTR_MESH_CONFIG); 3348 pinfoattr = nla_nest_start(msg, NL80211_ATTR_MESH_CONFIG);
3274 if (!pinfoattr) 3349 if (!pinfoattr)
3275 goto nla_put_failure; 3350 goto nla_put_failure;
3276 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); 3351 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
3277 NLA_PUT_U16(msg, NL80211_MESHCONF_RETRY_TIMEOUT, 3352 nla_put_u16(msg, NL80211_MESHCONF_RETRY_TIMEOUT,
3278 cur_params.dot11MeshRetryTimeout); 3353 cur_params.dot11MeshRetryTimeout) ||
3279 NLA_PUT_U16(msg, NL80211_MESHCONF_CONFIRM_TIMEOUT, 3354 nla_put_u16(msg, NL80211_MESHCONF_CONFIRM_TIMEOUT,
3280 cur_params.dot11MeshConfirmTimeout); 3355 cur_params.dot11MeshConfirmTimeout) ||
3281 NLA_PUT_U16(msg, NL80211_MESHCONF_HOLDING_TIMEOUT, 3356 nla_put_u16(msg, NL80211_MESHCONF_HOLDING_TIMEOUT,
3282 cur_params.dot11MeshHoldingTimeout); 3357 cur_params.dot11MeshHoldingTimeout) ||
3283 NLA_PUT_U16(msg, NL80211_MESHCONF_MAX_PEER_LINKS, 3358 nla_put_u16(msg, NL80211_MESHCONF_MAX_PEER_LINKS,
3284 cur_params.dot11MeshMaxPeerLinks); 3359 cur_params.dot11MeshMaxPeerLinks) ||
3285 NLA_PUT_U8(msg, NL80211_MESHCONF_MAX_RETRIES, 3360 nla_put_u8(msg, NL80211_MESHCONF_MAX_RETRIES,
3286 cur_params.dot11MeshMaxRetries); 3361 cur_params.dot11MeshMaxRetries) ||
3287 NLA_PUT_U8(msg, NL80211_MESHCONF_TTL, 3362 nla_put_u8(msg, NL80211_MESHCONF_TTL,
3288 cur_params.dot11MeshTTL); 3363 cur_params.dot11MeshTTL) ||
3289 NLA_PUT_U8(msg, NL80211_MESHCONF_ELEMENT_TTL, 3364 nla_put_u8(msg, NL80211_MESHCONF_ELEMENT_TTL,
3290 cur_params.element_ttl); 3365 cur_params.element_ttl) ||
3291 NLA_PUT_U8(msg, NL80211_MESHCONF_AUTO_OPEN_PLINKS, 3366 nla_put_u8(msg, NL80211_MESHCONF_AUTO_OPEN_PLINKS,
3292 cur_params.auto_open_plinks); 3367 cur_params.auto_open_plinks) ||
3293 NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, 3368 nla_put_u32(msg, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR,
3294 cur_params.dot11MeshHWMPmaxPREQretries); 3369 cur_params.dot11MeshNbrOffsetMaxNeighbor) ||
3295 NLA_PUT_U32(msg, NL80211_MESHCONF_PATH_REFRESH_TIME, 3370 nla_put_u8(msg, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
3296 cur_params.path_refresh_time); 3371 cur_params.dot11MeshHWMPmaxPREQretries) ||
3297 NLA_PUT_U16(msg, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, 3372 nla_put_u32(msg, NL80211_MESHCONF_PATH_REFRESH_TIME,
3298 cur_params.min_discovery_timeout); 3373 cur_params.path_refresh_time) ||
3299 NLA_PUT_U32(msg, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, 3374 nla_put_u16(msg, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT,
3300 cur_params.dot11MeshHWMPactivePathTimeout); 3375 cur_params.min_discovery_timeout) ||
3301 NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, 3376 nla_put_u32(msg, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT,
3302 cur_params.dot11MeshHWMPpreqMinInterval); 3377 cur_params.dot11MeshHWMPactivePathTimeout) ||
3303 NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL, 3378 nla_put_u16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL,
3304 cur_params.dot11MeshHWMPperrMinInterval); 3379 cur_params.dot11MeshHWMPpreqMinInterval) ||
3305 NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, 3380 nla_put_u16(msg, NL80211_MESHCONF_HWMP_PERR_MIN_INTERVAL,
3306 cur_params.dot11MeshHWMPnetDiameterTraversalTime); 3381 cur_params.dot11MeshHWMPperrMinInterval) ||
3307 NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_ROOTMODE, 3382 nla_put_u16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME,
3308 cur_params.dot11MeshHWMPRootMode); 3383 cur_params.dot11MeshHWMPnetDiameterTraversalTime) ||
3309 NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_RANN_INTERVAL, 3384 nla_put_u8(msg, NL80211_MESHCONF_HWMP_ROOTMODE,
3310 cur_params.dot11MeshHWMPRannInterval); 3385 cur_params.dot11MeshHWMPRootMode) ||
3311 NLA_PUT_U8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS, 3386 nla_put_u16(msg, NL80211_MESHCONF_HWMP_RANN_INTERVAL,
3312 cur_params.dot11MeshGateAnnouncementProtocol); 3387 cur_params.dot11MeshHWMPRannInterval) ||
3313 NLA_PUT_U8(msg, NL80211_MESHCONF_FORWARDING, 3388 nla_put_u8(msg, NL80211_MESHCONF_GATE_ANNOUNCEMENTS,
3314 cur_params.dot11MeshForwarding); 3389 cur_params.dot11MeshGateAnnouncementProtocol) ||
3315 NLA_PUT_U32(msg, NL80211_MESHCONF_RSSI_THRESHOLD, 3390 nla_put_u8(msg, NL80211_MESHCONF_FORWARDING,
3316 cur_params.rssi_threshold); 3391 cur_params.dot11MeshForwarding) ||
3392 nla_put_u32(msg, NL80211_MESHCONF_RSSI_THRESHOLD,
3393 cur_params.rssi_threshold))
3394 goto nla_put_failure;
3317 nla_nest_end(msg, pinfoattr); 3395 nla_nest_end(msg, pinfoattr);
3318 genlmsg_end(msg, hdr); 3396 genlmsg_end(msg, hdr);
3319 return genlmsg_reply(msg, info); 3397 return genlmsg_reply(msg, info);
@@ -3334,6 +3412,7 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
3334 [NL80211_MESHCONF_TTL] = { .type = NLA_U8 }, 3412 [NL80211_MESHCONF_TTL] = { .type = NLA_U8 },
3335 [NL80211_MESHCONF_ELEMENT_TTL] = { .type = NLA_U8 }, 3413 [NL80211_MESHCONF_ELEMENT_TTL] = { .type = NLA_U8 },
3336 [NL80211_MESHCONF_AUTO_OPEN_PLINKS] = { .type = NLA_U8 }, 3414 [NL80211_MESHCONF_AUTO_OPEN_PLINKS] = { .type = NLA_U8 },
3415 [NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR] = { .type = NLA_U32 },
3337 3416
3338 [NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES] = { .type = NLA_U8 }, 3417 [NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES] = { .type = NLA_U8 },
3339 [NL80211_MESHCONF_PATH_REFRESH_TIME] = { .type = NLA_U32 }, 3418 [NL80211_MESHCONF_PATH_REFRESH_TIME] = { .type = NLA_U32 },
@@ -3351,6 +3430,7 @@ static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_A
3351 3430
3352static const struct nla_policy 3431static const struct nla_policy
3353 nl80211_mesh_setup_params_policy[NL80211_MESH_SETUP_ATTR_MAX+1] = { 3432 nl80211_mesh_setup_params_policy[NL80211_MESH_SETUP_ATTR_MAX+1] = {
3433 [NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC] = { .type = NLA_U8 },
3354 [NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 }, 3434 [NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 },
3355 [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 }, 3435 [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 },
3356 [NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG }, 3436 [NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG },
@@ -3403,6 +3483,9 @@ do {\
3403 mask, NL80211_MESHCONF_ELEMENT_TTL, nla_get_u8); 3483 mask, NL80211_MESHCONF_ELEMENT_TTL, nla_get_u8);
3404 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks, 3484 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks,
3405 mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS, nla_get_u8); 3485 mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS, nla_get_u8);
3486 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshNbrOffsetMaxNeighbor,
3487 mask, NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR,
3488 nla_get_u32);
3406 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries, 3489 FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries,
3407 mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, 3490 mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES,
3408 nla_get_u8); 3491 nla_get_u8);
@@ -3460,6 +3543,12 @@ static int nl80211_parse_mesh_setup(struct genl_info *info,
3460 nl80211_mesh_setup_params_policy)) 3543 nl80211_mesh_setup_params_policy))
3461 return -EINVAL; 3544 return -EINVAL;
3462 3545
3546 if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC])
3547 setup->sync_method =
3548 (nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC])) ?
3549 IEEE80211_SYNC_METHOD_VENDOR :
3550 IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET;
3551
3463 if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL]) 3552 if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL])
3464 setup->path_sel_proto = 3553 setup->path_sel_proto =
3465 (nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL])) ? 3554 (nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL])) ?
@@ -3544,11 +3633,12 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
3544 if (!hdr) 3633 if (!hdr)
3545 goto put_failure; 3634 goto put_failure;
3546 3635
3547 NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2, 3636 if (nla_put_string(msg, NL80211_ATTR_REG_ALPHA2,
3548 cfg80211_regdomain->alpha2); 3637 cfg80211_regdomain->alpha2) ||
3549 if (cfg80211_regdomain->dfs_region) 3638 (cfg80211_regdomain->dfs_region &&
3550 NLA_PUT_U8(msg, NL80211_ATTR_DFS_REGION, 3639 nla_put_u8(msg, NL80211_ATTR_DFS_REGION,
3551 cfg80211_regdomain->dfs_region); 3640 cfg80211_regdomain->dfs_region)))
3641 goto nla_put_failure;
3552 3642
3553 nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES); 3643 nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES);
3554 if (!nl_reg_rules) 3644 if (!nl_reg_rules)
@@ -3568,18 +3658,19 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
3568 if (!nl_reg_rule) 3658 if (!nl_reg_rule)
3569 goto nla_put_failure; 3659 goto nla_put_failure;
3570 3660
3571 NLA_PUT_U32(msg, NL80211_ATTR_REG_RULE_FLAGS, 3661 if (nla_put_u32(msg, NL80211_ATTR_REG_RULE_FLAGS,
3572 reg_rule->flags); 3662 reg_rule->flags) ||
3573 NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_START, 3663 nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_START,
3574 freq_range->start_freq_khz); 3664 freq_range->start_freq_khz) ||
3575 NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_END, 3665 nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_END,
3576 freq_range->end_freq_khz); 3666 freq_range->end_freq_khz) ||
3577 NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW, 3667 nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW,
3578 freq_range->max_bandwidth_khz); 3668 freq_range->max_bandwidth_khz) ||
3579 NLA_PUT_U32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN, 3669 nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN,
3580 power_rule->max_antenna_gain); 3670 power_rule->max_antenna_gain) ||
3581 NLA_PUT_U32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP, 3671 nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP,
3582 power_rule->max_eirp); 3672 power_rule->max_eirp))
3673 goto nla_put_failure;
3583 3674
3584 nla_nest_end(msg, nl_reg_rule); 3675 nla_nest_end(msg, nl_reg_rule);
3585 } 3676 }
@@ -4150,37 +4241,44 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
4150 4241
4151 genl_dump_check_consistent(cb, hdr, &nl80211_fam); 4242 genl_dump_check_consistent(cb, hdr, &nl80211_fam);
4152 4243
4153 NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation); 4244 if (nla_put_u32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation) ||
4154 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex); 4245 nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex))
4246 goto nla_put_failure;
4155 4247
4156 bss = nla_nest_start(msg, NL80211_ATTR_BSS); 4248 bss = nla_nest_start(msg, NL80211_ATTR_BSS);
4157 if (!bss) 4249 if (!bss)
4158 goto nla_put_failure; 4250 goto nla_put_failure;
4159 if (!is_zero_ether_addr(res->bssid)) 4251 if ((!is_zero_ether_addr(res->bssid) &&
4160 NLA_PUT(msg, NL80211_BSS_BSSID, ETH_ALEN, res->bssid); 4252 nla_put(msg, NL80211_BSS_BSSID, ETH_ALEN, res->bssid)) ||
4161 if (res->information_elements && res->len_information_elements) 4253 (res->information_elements && res->len_information_elements &&
4162 NLA_PUT(msg, NL80211_BSS_INFORMATION_ELEMENTS, 4254 nla_put(msg, NL80211_BSS_INFORMATION_ELEMENTS,
4163 res->len_information_elements, 4255 res->len_information_elements,
4164 res->information_elements); 4256 res->information_elements)) ||
4165 if (res->beacon_ies && res->len_beacon_ies && 4257 (res->beacon_ies && res->len_beacon_ies &&
4166 res->beacon_ies != res->information_elements) 4258 res->beacon_ies != res->information_elements &&
4167 NLA_PUT(msg, NL80211_BSS_BEACON_IES, 4259 nla_put(msg, NL80211_BSS_BEACON_IES,
4168 res->len_beacon_ies, res->beacon_ies); 4260 res->len_beacon_ies, res->beacon_ies)))
4169 if (res->tsf) 4261 goto nla_put_failure;
4170 NLA_PUT_U64(msg, NL80211_BSS_TSF, res->tsf); 4262 if (res->tsf &&
4171 if (res->beacon_interval) 4263 nla_put_u64(msg, NL80211_BSS_TSF, res->tsf))
4172 NLA_PUT_U16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval); 4264 goto nla_put_failure;
4173 NLA_PUT_U16(msg, NL80211_BSS_CAPABILITY, res->capability); 4265 if (res->beacon_interval &&
4174 NLA_PUT_U32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq); 4266 nla_put_u16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval))
4175 NLA_PUT_U32(msg, NL80211_BSS_SEEN_MS_AGO, 4267 goto nla_put_failure;
4176 jiffies_to_msecs(jiffies - intbss->ts)); 4268 if (nla_put_u16(msg, NL80211_BSS_CAPABILITY, res->capability) ||
4269 nla_put_u32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq) ||
4270 nla_put_u32(msg, NL80211_BSS_SEEN_MS_AGO,
4271 jiffies_to_msecs(jiffies - intbss->ts)))
4272 goto nla_put_failure;
4177 4273
4178 switch (rdev->wiphy.signal_type) { 4274 switch (rdev->wiphy.signal_type) {
4179 case CFG80211_SIGNAL_TYPE_MBM: 4275 case CFG80211_SIGNAL_TYPE_MBM:
4180 NLA_PUT_U32(msg, NL80211_BSS_SIGNAL_MBM, res->signal); 4276 if (nla_put_u32(msg, NL80211_BSS_SIGNAL_MBM, res->signal))
4277 goto nla_put_failure;
4181 break; 4278 break;
4182 case CFG80211_SIGNAL_TYPE_UNSPEC: 4279 case CFG80211_SIGNAL_TYPE_UNSPEC:
4183 NLA_PUT_U8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal); 4280 if (nla_put_u8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal))
4281 goto nla_put_failure;
4184 break; 4282 break;
4185 default: 4283 default:
4186 break; 4284 break;
@@ -4189,14 +4287,16 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
4189 switch (wdev->iftype) { 4287 switch (wdev->iftype) {
4190 case NL80211_IFTYPE_P2P_CLIENT: 4288 case NL80211_IFTYPE_P2P_CLIENT:
4191 case NL80211_IFTYPE_STATION: 4289 case NL80211_IFTYPE_STATION:
4192 if (intbss == wdev->current_bss) 4290 if (intbss == wdev->current_bss &&
4193 NLA_PUT_U32(msg, NL80211_BSS_STATUS, 4291 nla_put_u32(msg, NL80211_BSS_STATUS,
4194 NL80211_BSS_STATUS_ASSOCIATED); 4292 NL80211_BSS_STATUS_ASSOCIATED))
4293 goto nla_put_failure;
4195 break; 4294 break;
4196 case NL80211_IFTYPE_ADHOC: 4295 case NL80211_IFTYPE_ADHOC:
4197 if (intbss == wdev->current_bss) 4296 if (intbss == wdev->current_bss &&
4198 NLA_PUT_U32(msg, NL80211_BSS_STATUS, 4297 nla_put_u32(msg, NL80211_BSS_STATUS,
4199 NL80211_BSS_STATUS_IBSS_JOINED); 4298 NL80211_BSS_STATUS_IBSS_JOINED))
4299 goto nla_put_failure;
4200 break; 4300 break;
4201 default: 4301 default:
4202 break; 4302 break;
@@ -4265,34 +4365,43 @@ static int nl80211_send_survey(struct sk_buff *msg, u32 pid, u32 seq,
4265 if (!hdr) 4365 if (!hdr)
4266 return -ENOMEM; 4366 return -ENOMEM;
4267 4367
4268 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); 4368 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex))
4369 goto nla_put_failure;
4269 4370
4270 infoattr = nla_nest_start(msg, NL80211_ATTR_SURVEY_INFO); 4371 infoattr = nla_nest_start(msg, NL80211_ATTR_SURVEY_INFO);
4271 if (!infoattr) 4372 if (!infoattr)
4272 goto nla_put_failure; 4373 goto nla_put_failure;
4273 4374
4274 NLA_PUT_U32(msg, NL80211_SURVEY_INFO_FREQUENCY, 4375 if (nla_put_u32(msg, NL80211_SURVEY_INFO_FREQUENCY,
4275 survey->channel->center_freq); 4376 survey->channel->center_freq))
4276 if (survey->filled & SURVEY_INFO_NOISE_DBM) 4377 goto nla_put_failure;
4277 NLA_PUT_U8(msg, NL80211_SURVEY_INFO_NOISE, 4378
4278 survey->noise); 4379 if ((survey->filled & SURVEY_INFO_NOISE_DBM) &&
4279 if (survey->filled & SURVEY_INFO_IN_USE) 4380 nla_put_u8(msg, NL80211_SURVEY_INFO_NOISE, survey->noise))
4280 NLA_PUT_FLAG(msg, NL80211_SURVEY_INFO_IN_USE); 4381 goto nla_put_failure;
4281 if (survey->filled & SURVEY_INFO_CHANNEL_TIME) 4382 if ((survey->filled & SURVEY_INFO_IN_USE) &&
4282 NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME, 4383 nla_put_flag(msg, NL80211_SURVEY_INFO_IN_USE))
4283 survey->channel_time); 4384 goto nla_put_failure;
4284 if (survey->filled & SURVEY_INFO_CHANNEL_TIME_BUSY) 4385 if ((survey->filled & SURVEY_INFO_CHANNEL_TIME) &&
4285 NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY, 4386 nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME,
4286 survey->channel_time_busy); 4387 survey->channel_time))
4287 if (survey->filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY) 4388 goto nla_put_failure;
4288 NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY, 4389 if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_BUSY) &&
4289 survey->channel_time_ext_busy); 4390 nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY,
4290 if (survey->filled & SURVEY_INFO_CHANNEL_TIME_RX) 4391 survey->channel_time_busy))
4291 NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_RX, 4392 goto nla_put_failure;
4292 survey->channel_time_rx); 4393 if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY) &&
4293 if (survey->filled & SURVEY_INFO_CHANNEL_TIME_TX) 4394 nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY,
4294 NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_TX, 4395 survey->channel_time_ext_busy))
4295 survey->channel_time_tx); 4396 goto nla_put_failure;
4397 if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_RX) &&
4398 nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_RX,
4399 survey->channel_time_rx))
4400 goto nla_put_failure;
4401 if ((survey->filled & SURVEY_INFO_CHANNEL_TIME_TX) &&
4402 nla_put_u64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_TX,
4403 survey->channel_time_tx))
4404 goto nla_put_failure;
4296 4405
4297 nla_nest_end(msg, infoattr); 4406 nla_nest_end(msg, infoattr);
4298 4407
@@ -4973,7 +5082,7 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
4973 NL80211_CMD_TESTMODE); 5082 NL80211_CMD_TESTMODE);
4974 struct nlattr *tmdata; 5083 struct nlattr *tmdata;
4975 5084
4976 if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx) < 0) { 5085 if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) {
4977 genlmsg_cancel(skb, hdr); 5086 genlmsg_cancel(skb, hdr);
4978 break; 5087 break;
4979 } 5088 }
@@ -5024,7 +5133,8 @@ __cfg80211_testmode_alloc_skb(struct cfg80211_registered_device *rdev,
5024 return NULL; 5133 return NULL;
5025 } 5134 }
5026 5135
5027 NLA_PUT_U32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 5136 if (nla_put_u32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx))
5137 goto nla_put_failure;
5028 data = nla_nest_start(skb, NL80211_ATTR_TESTDATA); 5138 data = nla_nest_start(skb, NL80211_ATTR_TESTDATA);
5029 5139
5030 ((void **)skb->cb)[0] = rdev; 5140 ((void **)skb->cb)[0] = rdev;
@@ -5403,7 +5513,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
5403 if (err) 5513 if (err)
5404 goto free_msg; 5514 goto free_msg;
5405 5515
5406 NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); 5516 if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
5517 goto nla_put_failure;
5407 5518
5408 genlmsg_end(msg, hdr); 5519 genlmsg_end(msg, hdr);
5409 5520
@@ -5545,6 +5656,9 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
5545 sband, 5656 sband,
5546 nla_data(tb[NL80211_TXRATE_LEGACY]), 5657 nla_data(tb[NL80211_TXRATE_LEGACY]),
5547 nla_len(tb[NL80211_TXRATE_LEGACY])); 5658 nla_len(tb[NL80211_TXRATE_LEGACY]));
5659 if ((mask.control[band].legacy == 0) &&
5660 nla_len(tb[NL80211_TXRATE_LEGACY]))
5661 return -EINVAL;
5548 } 5662 }
5549 if (tb[NL80211_TXRATE_MCS]) { 5663 if (tb[NL80211_TXRATE_MCS]) {
5550 if (!ht_rateset_to_mask( 5664 if (!ht_rateset_to_mask(
@@ -5690,7 +5804,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
5690 goto free_msg; 5804 goto free_msg;
5691 5805
5692 if (msg) { 5806 if (msg) {
5693 NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); 5807 if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
5808 goto nla_put_failure;
5694 5809
5695 genlmsg_end(msg, hdr); 5810 genlmsg_end(msg, hdr);
5696 return genlmsg_reply(msg, info); 5811 return genlmsg_reply(msg, info);
@@ -5795,7 +5910,8 @@ static int nl80211_get_power_save(struct sk_buff *skb, struct genl_info *info)
5795 else 5910 else
5796 ps_state = NL80211_PS_DISABLED; 5911 ps_state = NL80211_PS_DISABLED;
5797 5912
5798 NLA_PUT_U32(msg, NL80211_ATTR_PS_STATE, ps_state); 5913 if (nla_put_u32(msg, NL80211_ATTR_PS_STATE, ps_state))
5914 goto nla_put_failure;
5799 5915
5800 genlmsg_end(msg, hdr); 5916 genlmsg_end(msg, hdr);
5801 return genlmsg_reply(msg, info); 5917 return genlmsg_reply(msg, info);
@@ -5942,20 +6058,21 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
5942 if (!nl_wowlan) 6058 if (!nl_wowlan)
5943 goto nla_put_failure; 6059 goto nla_put_failure;
5944 6060
5945 if (rdev->wowlan->any) 6061 if ((rdev->wowlan->any &&
5946 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_ANY); 6062 nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) ||
5947 if (rdev->wowlan->disconnect) 6063 (rdev->wowlan->disconnect &&
5948 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_DISCONNECT); 6064 nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) ||
5949 if (rdev->wowlan->magic_pkt) 6065 (rdev->wowlan->magic_pkt &&
5950 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT); 6066 nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) ||
5951 if (rdev->wowlan->gtk_rekey_failure) 6067 (rdev->wowlan->gtk_rekey_failure &&
5952 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE); 6068 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) ||
5953 if (rdev->wowlan->eap_identity_req) 6069 (rdev->wowlan->eap_identity_req &&
5954 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST); 6070 nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) ||
5955 if (rdev->wowlan->four_way_handshake) 6071 (rdev->wowlan->four_way_handshake &&
5956 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE); 6072 nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) ||
5957 if (rdev->wowlan->rfkill_release) 6073 (rdev->wowlan->rfkill_release &&
5958 NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE); 6074 nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)))
6075 goto nla_put_failure;
5959 if (rdev->wowlan->n_patterns) { 6076 if (rdev->wowlan->n_patterns) {
5960 struct nlattr *nl_pats, *nl_pat; 6077 struct nlattr *nl_pats, *nl_pat;
5961 int i, pat_len; 6078 int i, pat_len;
@@ -5970,12 +6087,13 @@ static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info)
5970 if (!nl_pat) 6087 if (!nl_pat)
5971 goto nla_put_failure; 6088 goto nla_put_failure;
5972 pat_len = rdev->wowlan->patterns[i].pattern_len; 6089 pat_len = rdev->wowlan->patterns[i].pattern_len;
5973 NLA_PUT(msg, NL80211_WOWLAN_PKTPAT_MASK, 6090 if (nla_put(msg, NL80211_WOWLAN_PKTPAT_MASK,
5974 DIV_ROUND_UP(pat_len, 8), 6091 DIV_ROUND_UP(pat_len, 8),
5975 rdev->wowlan->patterns[i].mask); 6092 rdev->wowlan->patterns[i].mask) ||
5976 NLA_PUT(msg, NL80211_WOWLAN_PKTPAT_PATTERN, 6093 nla_put(msg, NL80211_WOWLAN_PKTPAT_PATTERN,
5977 pat_len, 6094 pat_len,
5978 rdev->wowlan->patterns[i].pattern); 6095 rdev->wowlan->patterns[i].pattern))
6096 goto nla_put_failure;
5979 nla_nest_end(msg, nl_pat); 6097 nla_nest_end(msg, nl_pat);
5980 } 6098 }
5981 nla_nest_end(msg, nl_pats); 6099 nla_nest_end(msg, nl_pats);
@@ -6000,6 +6118,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
6000 struct cfg80211_wowlan new_triggers = {}; 6118 struct cfg80211_wowlan new_triggers = {};
6001 struct wiphy_wowlan_support *wowlan = &rdev->wiphy.wowlan; 6119 struct wiphy_wowlan_support *wowlan = &rdev->wiphy.wowlan;
6002 int err, i; 6120 int err, i;
6121 bool prev_enabled = rdev->wowlan;
6003 6122
6004 if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns) 6123 if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns)
6005 return -EOPNOTSUPP; 6124 return -EOPNOTSUPP;
@@ -6132,6 +6251,9 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
6132 rdev->wowlan = NULL; 6251 rdev->wowlan = NULL;
6133 } 6252 }
6134 6253
6254 if (rdev->ops->set_wakeup && prev_enabled != !!rdev->wowlan)
6255 rdev->ops->set_wakeup(&rdev->wiphy, rdev->wowlan);
6256
6135 return 0; 6257 return 0;
6136 error: 6258 error:
6137 for (i = 0; i < new_triggers.n_patterns; i++) 6259 for (i = 0; i < new_triggers.n_patterns; i++)
@@ -6248,7 +6370,8 @@ static int nl80211_probe_client(struct sk_buff *skb,
6248 if (err) 6370 if (err)
6249 goto free_msg; 6371 goto free_msg;
6250 6372
6251 NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); 6373 if (nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
6374 goto nla_put_failure;
6252 6375
6253 genlmsg_end(msg, hdr); 6376 genlmsg_end(msg, hdr);
6254 6377
@@ -6916,19 +7039,24 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
6916 nest = nla_nest_start(msg, NL80211_ATTR_SCAN_SSIDS); 7039 nest = nla_nest_start(msg, NL80211_ATTR_SCAN_SSIDS);
6917 if (!nest) 7040 if (!nest)
6918 goto nla_put_failure; 7041 goto nla_put_failure;
6919 for (i = 0; i < req->n_ssids; i++) 7042 for (i = 0; i < req->n_ssids; i++) {
6920 NLA_PUT(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid); 7043 if (nla_put(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid))
7044 goto nla_put_failure;
7045 }
6921 nla_nest_end(msg, nest); 7046 nla_nest_end(msg, nest);
6922 7047
6923 nest = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES); 7048 nest = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES);
6924 if (!nest) 7049 if (!nest)
6925 goto nla_put_failure; 7050 goto nla_put_failure;
6926 for (i = 0; i < req->n_channels; i++) 7051 for (i = 0; i < req->n_channels; i++) {
6927 NLA_PUT_U32(msg, i, req->channels[i]->center_freq); 7052 if (nla_put_u32(msg, i, req->channels[i]->center_freq))
7053 goto nla_put_failure;
7054 }
6928 nla_nest_end(msg, nest); 7055 nla_nest_end(msg, nest);
6929 7056
6930 if (req->ie) 7057 if (req->ie &&
6931 NLA_PUT(msg, NL80211_ATTR_IE, req->ie_len, req->ie); 7058 nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie))
7059 goto nla_put_failure;
6932 7060
6933 return 0; 7061 return 0;
6934 nla_put_failure: 7062 nla_put_failure:
@@ -6947,8 +7075,9 @@ static int nl80211_send_scan_msg(struct sk_buff *msg,
6947 if (!hdr) 7075 if (!hdr)
6948 return -1; 7076 return -1;
6949 7077
6950 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7078 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
6951 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7079 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
7080 goto nla_put_failure;
6952 7081
6953 /* ignore errors and send incomplete event anyway */ 7082 /* ignore errors and send incomplete event anyway */
6954 nl80211_add_scan_req(msg, rdev); 7083 nl80211_add_scan_req(msg, rdev);
@@ -6972,8 +7101,9 @@ nl80211_send_sched_scan_msg(struct sk_buff *msg,
6972 if (!hdr) 7101 if (!hdr)
6973 return -1; 7102 return -1;
6974 7103
6975 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7104 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
6976 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7105 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
7106 goto nla_put_failure;
6977 7107
6978 return genlmsg_end(msg, hdr); 7108 return genlmsg_end(msg, hdr);
6979 7109
@@ -7096,26 +7226,33 @@ void nl80211_send_reg_change_event(struct regulatory_request *request)
7096 } 7226 }
7097 7227
7098 /* Userspace can always count this one always being set */ 7228 /* Userspace can always count this one always being set */
7099 NLA_PUT_U8(msg, NL80211_ATTR_REG_INITIATOR, request->initiator); 7229 if (nla_put_u8(msg, NL80211_ATTR_REG_INITIATOR, request->initiator))
7100 7230 goto nla_put_failure;
7101 if (request->alpha2[0] == '0' && request->alpha2[1] == '0') 7231
7102 NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, 7232 if (request->alpha2[0] == '0' && request->alpha2[1] == '0') {
7103 NL80211_REGDOM_TYPE_WORLD); 7233 if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE,
7104 else if (request->alpha2[0] == '9' && request->alpha2[1] == '9') 7234 NL80211_REGDOM_TYPE_WORLD))
7105 NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, 7235 goto nla_put_failure;
7106 NL80211_REGDOM_TYPE_CUSTOM_WORLD); 7236 } else if (request->alpha2[0] == '9' && request->alpha2[1] == '9') {
7107 else if ((request->alpha2[0] == '9' && request->alpha2[1] == '8') || 7237 if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE,
7108 request->intersect) 7238 NL80211_REGDOM_TYPE_CUSTOM_WORLD))
7109 NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, 7239 goto nla_put_failure;
7110 NL80211_REGDOM_TYPE_INTERSECTION); 7240 } else if ((request->alpha2[0] == '9' && request->alpha2[1] == '8') ||
7111 else { 7241 request->intersect) {
7112 NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, 7242 if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE,
7113 NL80211_REGDOM_TYPE_COUNTRY); 7243 NL80211_REGDOM_TYPE_INTERSECTION))
7114 NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2, request->alpha2); 7244 goto nla_put_failure;
7115 } 7245 } else {
7116 7246 if (nla_put_u8(msg, NL80211_ATTR_REG_TYPE,
7117 if (wiphy_idx_valid(request->wiphy_idx)) 7247 NL80211_REGDOM_TYPE_COUNTRY) ||
7118 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx); 7248 nla_put_string(msg, NL80211_ATTR_REG_ALPHA2,
7249 request->alpha2))
7250 goto nla_put_failure;
7251 }
7252
7253 if (wiphy_idx_valid(request->wiphy_idx) &&
7254 nla_put_u32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx))
7255 goto nla_put_failure;
7119 7256
7120 genlmsg_end(msg, hdr); 7257 genlmsg_end(msg, hdr);
7121 7258
@@ -7149,9 +7286,10 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
7149 return; 7286 return;
7150 } 7287 }
7151 7288
7152 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7289 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7153 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7290 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7154 NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf); 7291 nla_put(msg, NL80211_ATTR_FRAME, len, buf))
7292 goto nla_put_failure;
7155 7293
7156 genlmsg_end(msg, hdr); 7294 genlmsg_end(msg, hdr);
7157 7295
@@ -7229,10 +7367,11 @@ static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev,
7229 return; 7367 return;
7230 } 7368 }
7231 7369
7232 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7370 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7233 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7371 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7234 NLA_PUT_FLAG(msg, NL80211_ATTR_TIMED_OUT); 7372 nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) ||
7235 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); 7373 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr))
7374 goto nla_put_failure;
7236 7375
7237 genlmsg_end(msg, hdr); 7376 genlmsg_end(msg, hdr);
7238 7377
@@ -7280,15 +7419,15 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
7280 return; 7419 return;
7281 } 7420 }
7282 7421
7283 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7422 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7284 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7423 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7285 if (bssid) 7424 (bssid && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid)) ||
7286 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); 7425 nla_put_u16(msg, NL80211_ATTR_STATUS_CODE, status) ||
7287 NLA_PUT_U16(msg, NL80211_ATTR_STATUS_CODE, status); 7426 (req_ie &&
7288 if (req_ie) 7427 nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) ||
7289 NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie); 7428 (resp_ie &&
7290 if (resp_ie) 7429 nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie)))
7291 NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie); 7430 goto nla_put_failure;
7292 7431
7293 genlmsg_end(msg, hdr); 7432 genlmsg_end(msg, hdr);
7294 7433
@@ -7320,13 +7459,14 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
7320 return; 7459 return;
7321 } 7460 }
7322 7461
7323 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7462 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7324 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7463 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7325 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); 7464 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid) ||
7326 if (req_ie) 7465 (req_ie &&
7327 NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie); 7466 nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) ||
7328 if (resp_ie) 7467 (resp_ie &&
7329 NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie); 7468 nla_put(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie)))
7469 goto nla_put_failure;
7330 7470
7331 genlmsg_end(msg, hdr); 7471 genlmsg_end(msg, hdr);
7332 7472
@@ -7357,14 +7497,14 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
7357 return; 7497 return;
7358 } 7498 }
7359 7499
7360 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7500 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7361 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7501 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7362 if (from_ap && reason) 7502 (from_ap && reason &&
7363 NLA_PUT_U16(msg, NL80211_ATTR_REASON_CODE, reason); 7503 nla_put_u16(msg, NL80211_ATTR_REASON_CODE, reason)) ||
7364 if (from_ap) 7504 (from_ap &&
7365 NLA_PUT_FLAG(msg, NL80211_ATTR_DISCONNECTED_BY_AP); 7505 nla_put_flag(msg, NL80211_ATTR_DISCONNECTED_BY_AP)) ||
7366 if (ie) 7506 (ie && nla_put(msg, NL80211_ATTR_IE, ie_len, ie)))
7367 NLA_PUT(msg, NL80211_ATTR_IE, ie_len, ie); 7507 goto nla_put_failure;
7368 7508
7369 genlmsg_end(msg, hdr); 7509 genlmsg_end(msg, hdr);
7370 7510
@@ -7395,9 +7535,10 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
7395 return; 7535 return;
7396 } 7536 }
7397 7537
7398 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7538 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7399 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7539 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7400 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); 7540 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid))
7541 goto nla_put_failure;
7401 7542
7402 genlmsg_end(msg, hdr); 7543 genlmsg_end(msg, hdr);
7403 7544
@@ -7428,11 +7569,12 @@ void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev,
7428 return; 7569 return;
7429 } 7570 }
7430 7571
7431 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7572 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7432 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7573 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7433 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, macaddr); 7574 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, macaddr) ||
7434 if (ie_len && ie) 7575 (ie_len && ie &&
7435 NLA_PUT(msg, NL80211_ATTR_IE, ie_len , ie); 7576 nla_put(msg, NL80211_ATTR_IE, ie_len , ie)))
7577 goto nla_put_failure;
7436 7578
7437 genlmsg_end(msg, hdr); 7579 genlmsg_end(msg, hdr);
7438 7580
@@ -7463,15 +7605,14 @@ void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
7463 return; 7605 return;
7464 } 7606 }
7465 7607
7466 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7608 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7467 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7609 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7468 if (addr) 7610 (addr && nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr)) ||
7469 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); 7611 nla_put_u32(msg, NL80211_ATTR_KEY_TYPE, key_type) ||
7470 NLA_PUT_U32(msg, NL80211_ATTR_KEY_TYPE, key_type); 7612 (key_id != -1 &&
7471 if (key_id != -1) 7613 nla_put_u8(msg, NL80211_ATTR_KEY_IDX, key_id)) ||
7472 NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id); 7614 (tsc && nla_put(msg, NL80211_ATTR_KEY_SEQ, 6, tsc)))
7473 if (tsc) 7615 goto nla_put_failure;
7474 NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc);
7475 7616
7476 genlmsg_end(msg, hdr); 7617 genlmsg_end(msg, hdr);
7477 7618
@@ -7506,7 +7647,8 @@ void nl80211_send_beacon_hint_event(struct wiphy *wiphy,
7506 * Since we are applying the beacon hint to a wiphy we know its 7647 * Since we are applying the beacon hint to a wiphy we know its
7507 * wiphy_idx is valid 7648 * wiphy_idx is valid
7508 */ 7649 */
7509 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy)); 7650 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy)))
7651 goto nla_put_failure;
7510 7652
7511 /* Before */ 7653 /* Before */
7512 nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_BEFORE); 7654 nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_BEFORE);
@@ -7558,14 +7700,16 @@ static void nl80211_send_remain_on_chan_event(
7558 return; 7700 return;
7559 } 7701 }
7560 7702
7561 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7703 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7562 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7704 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7563 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq); 7705 nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq) ||
7564 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type); 7706 nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type) ||
7565 NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); 7707 nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie))
7708 goto nla_put_failure;
7566 7709
7567 if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL) 7710 if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL &&
7568 NLA_PUT_U32(msg, NL80211_ATTR_DURATION, duration); 7711 nla_put_u32(msg, NL80211_ATTR_DURATION, duration))
7712 goto nla_put_failure;
7569 7713
7570 genlmsg_end(msg, hdr); 7714 genlmsg_end(msg, hdr);
7571 7715
@@ -7636,8 +7780,9 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
7636 return; 7780 return;
7637 } 7781 }
7638 7782
7639 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); 7783 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
7640 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); 7784 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
7785 goto nla_put_failure;
7641 7786
7642 genlmsg_end(msg, hdr); 7787 genlmsg_end(msg, hdr);
7643 7788
@@ -7673,9 +7818,10 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
7673 return true; 7818 return true;
7674 } 7819 }
7675 7820
7676 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7821 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7677 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); 7822 nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
7678 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); 7823 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr))
7824 goto nla_put_failure;
7679 7825
7680 err = genlmsg_end(msg, hdr); 7826 err = genlmsg_end(msg, hdr);
7681 if (err < 0) { 7827 if (err < 0) {
@@ -7724,12 +7870,13 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
7724 return -ENOMEM; 7870 return -ENOMEM;
7725 } 7871 }
7726 7872
7727 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7873 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7728 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7874 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7729 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq); 7875 nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) ||
7730 if (sig_dbm) 7876 (sig_dbm &&
7731 NLA_PUT_U32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm); 7877 nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) ||
7732 NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf); 7878 nla_put(msg, NL80211_ATTR_FRAME, len, buf))
7879 goto nla_put_failure;
7733 7880
7734 genlmsg_end(msg, hdr); 7881 genlmsg_end(msg, hdr);
7735 7882
@@ -7759,12 +7906,12 @@ void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
7759 return; 7906 return;
7760 } 7907 }
7761 7908
7762 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7909 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7763 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7910 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7764 NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf); 7911 nla_put(msg, NL80211_ATTR_FRAME, len, buf) ||
7765 NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); 7912 nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) ||
7766 if (ack) 7913 (ack && nla_put_flag(msg, NL80211_ATTR_ACK)))
7767 NLA_PUT_FLAG(msg, NL80211_ATTR_ACK); 7914 goto nla_put_failure;
7768 7915
7769 genlmsg_end(msg, hdr); 7916 genlmsg_end(msg, hdr);
7770 7917
@@ -7796,15 +7943,17 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
7796 return; 7943 return;
7797 } 7944 }
7798 7945
7799 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7946 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7800 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7947 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
7948 goto nla_put_failure;
7801 7949
7802 pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM); 7950 pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
7803 if (!pinfoattr) 7951 if (!pinfoattr)
7804 goto nla_put_failure; 7952 goto nla_put_failure;
7805 7953
7806 NLA_PUT_U32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT, 7954 if (nla_put_u32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT,
7807 rssi_event); 7955 rssi_event))
7956 goto nla_put_failure;
7808 7957
7809 nla_nest_end(msg, pinfoattr); 7958 nla_nest_end(msg, pinfoattr);
7810 7959
@@ -7837,16 +7986,18 @@ void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
7837 return; 7986 return;
7838 } 7987 }
7839 7988
7840 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 7989 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7841 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 7990 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7842 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); 7991 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid))
7992 goto nla_put_failure;
7843 7993
7844 rekey_attr = nla_nest_start(msg, NL80211_ATTR_REKEY_DATA); 7994 rekey_attr = nla_nest_start(msg, NL80211_ATTR_REKEY_DATA);
7845 if (!rekey_attr) 7995 if (!rekey_attr)
7846 goto nla_put_failure; 7996 goto nla_put_failure;
7847 7997
7848 NLA_PUT(msg, NL80211_REKEY_DATA_REPLAY_CTR, 7998 if (nla_put(msg, NL80211_REKEY_DATA_REPLAY_CTR,
7849 NL80211_REPLAY_CTR_LEN, replay_ctr); 7999 NL80211_REPLAY_CTR_LEN, replay_ctr))
8000 goto nla_put_failure;
7850 8001
7851 nla_nest_end(msg, rekey_attr); 8002 nla_nest_end(msg, rekey_attr);
7852 8003
@@ -7879,17 +8030,19 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
7879 return; 8030 return;
7880 } 8031 }
7881 8032
7882 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 8033 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7883 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 8034 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex))
8035 goto nla_put_failure;
7884 8036
7885 attr = nla_nest_start(msg, NL80211_ATTR_PMKSA_CANDIDATE); 8037 attr = nla_nest_start(msg, NL80211_ATTR_PMKSA_CANDIDATE);
7886 if (!attr) 8038 if (!attr)
7887 goto nla_put_failure; 8039 goto nla_put_failure;
7888 8040
7889 NLA_PUT_U32(msg, NL80211_PMKSA_CANDIDATE_INDEX, index); 8041 if (nla_put_u32(msg, NL80211_PMKSA_CANDIDATE_INDEX, index) ||
7890 NLA_PUT(msg, NL80211_PMKSA_CANDIDATE_BSSID, ETH_ALEN, bssid); 8042 nla_put(msg, NL80211_PMKSA_CANDIDATE_BSSID, ETH_ALEN, bssid) ||
7891 if (preauth) 8043 (preauth &&
7892 NLA_PUT_FLAG(msg, NL80211_PMKSA_CANDIDATE_PREAUTH); 8044 nla_put_flag(msg, NL80211_PMKSA_CANDIDATE_PREAUTH)))
8045 goto nla_put_failure;
7893 8046
7894 nla_nest_end(msg, attr); 8047 nla_nest_end(msg, attr);
7895 8048
@@ -7904,6 +8057,39 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
7904 nlmsg_free(msg); 8057 nlmsg_free(msg);
7905} 8058}
7906 8059
8060void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
8061 struct net_device *netdev, int freq,
8062 enum nl80211_channel_type type, gfp_t gfp)
8063{
8064 struct sk_buff *msg;
8065 void *hdr;
8066
8067 msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
8068 if (!msg)
8069 return;
8070
8071 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CH_SWITCH_NOTIFY);
8072 if (!hdr) {
8073 nlmsg_free(msg);
8074 return;
8075 }
8076
8077 if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
8078 nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) ||
8079 nla_put_u32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, type))
8080 goto nla_put_failure;
8081
8082 genlmsg_end(msg, hdr);
8083
8084 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
8085 nl80211_mlme_mcgrp.id, gfp);
8086 return;
8087
8088 nla_put_failure:
8089 genlmsg_cancel(msg, hdr);
8090 nlmsg_free(msg);
8091}
8092
7907void 8093void
7908nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev, 8094nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
7909 struct net_device *netdev, const u8 *peer, 8095 struct net_device *netdev, const u8 *peer,
@@ -7923,15 +8109,17 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
7923 return; 8109 return;
7924 } 8110 }
7925 8111
7926 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 8112 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7927 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); 8113 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
7928 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, peer); 8114 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer))
8115 goto nla_put_failure;
7929 8116
7930 pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM); 8117 pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
7931 if (!pinfoattr) 8118 if (!pinfoattr)
7932 goto nla_put_failure; 8119 goto nla_put_failure;
7933 8120
7934 NLA_PUT_U32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets); 8121 if (nla_put_u32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets))
8122 goto nla_put_failure;
7935 8123
7936 nla_nest_end(msg, pinfoattr); 8124 nla_nest_end(msg, pinfoattr);
7937 8125
@@ -7965,12 +8153,12 @@ void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
7965 return; 8153 return;
7966 } 8154 }
7967 8155
7968 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 8156 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
7969 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); 8157 nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
7970 NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); 8158 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) ||
7971 NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); 8159 nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) ||
7972 if (acked) 8160 (acked && nla_put_flag(msg, NL80211_ATTR_ACK)))
7973 NLA_PUT_FLAG(msg, NL80211_ATTR_ACK); 8161 goto nla_put_failure;
7974 8162
7975 err = genlmsg_end(msg, hdr); 8163 err = genlmsg_end(msg, hdr);
7976 if (err < 0) { 8164 if (err < 0) {
@@ -8010,12 +8198,13 @@ void cfg80211_report_obss_beacon(struct wiphy *wiphy,
8010 return; 8198 return;
8011 } 8199 }
8012 8200
8013 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); 8201 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
8014 if (freq) 8202 (freq &&
8015 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq); 8203 nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq)) ||
8016 if (sig_dbm) 8204 (sig_dbm &&
8017 NLA_PUT_U32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm); 8205 nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) ||
8018 NLA_PUT(msg, NL80211_ATTR_FRAME, len, frame); 8206 nla_put(msg, NL80211_ATTR_FRAME, len, frame))
8207 goto nla_put_failure;
8019 8208
8020 genlmsg_end(msg, hdr); 8209 genlmsg_end(msg, hdr);
8021 8210
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 4ffe50df9f31..01a1122c3b33 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -118,6 +118,10 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
118 struct net_device *netdev, int index, 118 struct net_device *netdev, int index,
119 const u8 *bssid, bool preauth, gfp_t gfp); 119 const u8 *bssid, bool preauth, gfp_t gfp);
120 120
121void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
122 struct net_device *dev, int freq,
123 enum nl80211_channel_type type, gfp_t gfp);
124
121bool nl80211_unexpected_frame(struct net_device *dev, 125bool nl80211_unexpected_frame(struct net_device *dev,
122 const u8 *addr, gfp_t gfp); 126 const u8 *addr, gfp_t gfp);
123bool nl80211_unexpected_4addr_frame(struct net_device *dev, 127bool nl80211_unexpected_4addr_frame(struct net_device *dev,
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index e9a0ac83b84c..15f347477a99 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -388,7 +388,15 @@ static void reg_regdb_query(const char *alpha2)
388 388
389 schedule_work(&reg_regdb_work); 389 schedule_work(&reg_regdb_work);
390} 390}
391
392/* Feel free to add any other sanity checks here */
393static void reg_regdb_size_check(void)
394{
395 /* We should ideally BUILD_BUG_ON() but then random builds would fail */
396 WARN_ONCE(!reg_regdb_size, "db.txt is empty, you should update it...");
397}
391#else 398#else
399static inline void reg_regdb_size_check(void) {}
392static inline void reg_regdb_query(const char *alpha2) {} 400static inline void reg_regdb_query(const char *alpha2) {}
393#endif /* CONFIG_CFG80211_INTERNAL_REGDB */ 401#endif /* CONFIG_CFG80211_INTERNAL_REGDB */
394 402
@@ -2322,6 +2330,8 @@ int __init regulatory_init(void)
2322 spin_lock_init(&reg_requests_lock); 2330 spin_lock_init(&reg_requests_lock);
2323 spin_lock_init(&reg_pending_beacons_lock); 2331 spin_lock_init(&reg_pending_beacons_lock);
2324 2332
2333 reg_regdb_size_check();
2334
2325 cfg80211_regdomain = cfg80211_world_regdom; 2335 cfg80211_regdomain = cfg80211_world_regdom;
2326 2336
2327 user_alpha2[0] = '9'; 2337 user_alpha2[0] = '9';
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 70faadf16a32..1442bb68a3f3 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -18,7 +18,7 @@
18#include "nl80211.h" 18#include "nl80211.h"
19#include "wext-compat.h" 19#include "wext-compat.h"
20 20
21#define IEEE80211_SCAN_RESULT_EXPIRE (15 * HZ) 21#define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ)
22 22
23void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak) 23void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)
24{ 24{
@@ -378,7 +378,7 @@ static int cmp_bss_core(struct cfg80211_bss *a,
378 b->len_information_elements); 378 b->len_information_elements);
379 } 379 }
380 380
381 return memcmp(a->bssid, b->bssid, ETH_ALEN); 381 return compare_ether_addr(a->bssid, b->bssid);
382} 382}
383 383
384static int cmp_bss(struct cfg80211_bss *a, 384static int cmp_bss(struct cfg80211_bss *a,
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 957f25621617..6cba00173a2f 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -946,13 +946,6 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
946 if (rdev->wiphy.software_iftypes & BIT(iftype)) 946 if (rdev->wiphy.software_iftypes & BIT(iftype))
947 return 0; 947 return 0;
948 948
949 /*
950 * Drivers will gradually all set this flag, until all
951 * have it we only enforce for those that set it.
952 */
953 if (!(rdev->wiphy.flags & WIPHY_FLAG_ENFORCE_COMBINATIONS))
954 return 0;
955
956 memset(num, 0, sizeof(num)); 949 memset(num, 0, sizeof(num));
957 950
958 num[iftype] = 1; 951 num[iftype] = 1;
@@ -972,6 +965,9 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
972 } 965 }
973 mutex_unlock(&rdev->devlist_mtx); 966 mutex_unlock(&rdev->devlist_mtx);
974 967
968 if (total == 1)
969 return 0;
970
975 for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) { 971 for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
976 const struct ieee80211_iface_combination *c; 972 const struct ieee80211_iface_combination *c;
977 struct ieee80211_iface_limit *limits; 973 struct ieee80211_iface_limit *limits;
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 3c24eb97e9d7..6a6181a673ca 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -821,6 +821,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
821 struct wireless_dev *wdev = dev->ieee80211_ptr; 821 struct wireless_dev *wdev = dev->ieee80211_ptr;
822 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 822 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
823 struct ieee80211_channel *chan; 823 struct ieee80211_channel *chan;
824 enum nl80211_channel_type channel_type;
824 825
825 switch (wdev->iftype) { 826 switch (wdev->iftype) {
826 case NL80211_IFTYPE_STATION: 827 case NL80211_IFTYPE_STATION:
@@ -831,7 +832,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
831 if (!rdev->ops->get_channel) 832 if (!rdev->ops->get_channel)
832 return -EINVAL; 833 return -EINVAL;
833 834
834 chan = rdev->ops->get_channel(wdev->wiphy); 835 chan = rdev->ops->get_channel(wdev->wiphy, &channel_type);
835 if (!chan) 836 if (!chan)
836 return -EINVAL; 837 return -EINVAL;
837 freq->m = chan->center_freq; 838 freq->m = chan->center_freq;
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index af648e08e61b..b0eb7aa49b60 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -256,7 +256,7 @@ static const struct iw_ioctl_description standard_ioctl[] = {
256 .max_tokens = sizeof(struct iw_pmksa), 256 .max_tokens = sizeof(struct iw_pmksa),
257 }, 257 },
258}; 258};
259static const unsigned standard_ioctl_num = ARRAY_SIZE(standard_ioctl); 259static const unsigned int standard_ioctl_num = ARRAY_SIZE(standard_ioctl);
260 260
261/* 261/*
262 * Meta-data about all the additional standard Wireless Extension events 262 * Meta-data about all the additional standard Wireless Extension events
@@ -306,7 +306,7 @@ static const struct iw_ioctl_description standard_event[] = {
306 .max_tokens = sizeof(struct iw_pmkid_cand), 306 .max_tokens = sizeof(struct iw_pmkid_cand),
307 }, 307 },
308}; 308};
309static const unsigned standard_event_num = ARRAY_SIZE(standard_event); 309static const unsigned int standard_event_num = ARRAY_SIZE(standard_event);
310 310
311/* Size (in bytes) of various events */ 311/* Size (in bytes) of various events */
312static const int event_type_size[] = { 312static const int event_type_size[] = {
@@ -402,7 +402,8 @@ static struct nlmsghdr *rtnetlink_ifinfo_prep(struct net_device *dev,
402 r->ifi_flags = dev_get_flags(dev); 402 r->ifi_flags = dev_get_flags(dev);
403 r->ifi_change = 0; /* Wireless changes don't affect those flags */ 403 r->ifi_change = 0; /* Wireless changes don't affect those flags */
404 404
405 NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name); 405 if (nla_put_string(skb, IFLA_IFNAME, dev->name))
406 goto nla_put_failure;
406 407
407 return nlh; 408 return nlh;
408 nla_put_failure: 409 nla_put_failure:
@@ -428,7 +429,7 @@ void wireless_send_event(struct net_device * dev,
428 int hdr_len; /* Size of the event header */ 429 int hdr_len; /* Size of the event header */
429 int wrqu_off = 0; /* Offset in wrqu */ 430 int wrqu_off = 0; /* Offset in wrqu */
430 /* Don't "optimise" the following variable, it will crash */ 431 /* Don't "optimise" the following variable, it will crash */
431 unsigned cmd_index; /* *MUST* be unsigned */ 432 unsigned int cmd_index; /* *MUST* be unsigned */
432 struct sk_buff *skb; 433 struct sk_buff *skb;
433 struct nlmsghdr *nlh; 434 struct nlmsghdr *nlh;
434 struct nlattr *nla; 435 struct nlattr *nla;
diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c
index d2efd29f434e..43239527a205 100644
--- a/net/x25/sysctl_net_x25.c
+++ b/net/x25/sysctl_net_x25.c
@@ -73,18 +73,12 @@ static struct ctl_table x25_table[] = {
73 { 0, }, 73 { 0, },
74}; 74};
75 75
76static struct ctl_path x25_path[] = {
77 { .procname = "net", },
78 { .procname = "x25", },
79 { }
80};
81
82void __init x25_register_sysctl(void) 76void __init x25_register_sysctl(void)
83{ 77{
84 x25_table_header = register_sysctl_paths(x25_path, x25_table); 78 x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table);
85} 79}
86 80
87void x25_unregister_sysctl(void) 81void x25_unregister_sysctl(void)
88{ 82{
89 unregister_sysctl_table(x25_table_header); 83 unregister_net_sysctl_table(x25_table_header);
90} 84}
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c
index f0ce862d1f46..a8a236338e61 100644
--- a/net/x25/x25_dev.c
+++ b/net/x25/x25_dev.c
@@ -58,7 +58,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
58 if (!sock_owned_by_user(sk)) { 58 if (!sock_owned_by_user(sk)) {
59 queued = x25_process_rx_frame(sk, skb); 59 queued = x25_process_rx_frame(sk, skb);
60 } else { 60 } else {
61 queued = !sk_add_backlog(sk, skb); 61 queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf);
62 } 62 }
63 bh_unlock_sock(sk); 63 bh_unlock_sock(sk);
64 sock_put(sk); 64 sock_put(sk);
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 36384a1fa9f2..66c638730c7a 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -231,7 +231,7 @@ int x25_create_facilities(unsigned char *buffer,
231 } 231 }
232 232
233 if (dte_facs->calling_len && (facil_mask & X25_MASK_CALLING_AE)) { 233 if (dte_facs->calling_len && (facil_mask & X25_MASK_CALLING_AE)) {
234 unsigned bytecount = (dte_facs->calling_len + 1) >> 1; 234 unsigned int bytecount = (dte_facs->calling_len + 1) >> 1;
235 *p++ = X25_FAC_CALLING_AE; 235 *p++ = X25_FAC_CALLING_AE;
236 *p++ = 1 + bytecount; 236 *p++ = 1 + bytecount;
237 *p++ = dte_facs->calling_len; 237 *p++ = dte_facs->calling_len;
@@ -240,7 +240,7 @@ int x25_create_facilities(unsigned char *buffer,
240 } 240 }
241 241
242 if (dte_facs->called_len && (facil_mask & X25_MASK_CALLED_AE)) { 242 if (dte_facs->called_len && (facil_mask & X25_MASK_CALLED_AE)) {
243 unsigned bytecount = (dte_facs->called_len % 2) ? 243 unsigned int bytecount = (dte_facs->called_len % 2) ?
244 dte_facs->called_len / 2 + 1 : 244 dte_facs->called_len / 2 + 1 :
245 dte_facs->called_len / 2; 245 dte_facs->called_len / 2;
246 *p++ = X25_FAC_CALLED_AE; 246 *p++ = X25_FAC_CALLED_AE;
diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h
index 7199d78b2aa1..716502ada53b 100644
--- a/net/xfrm/xfrm_hash.h
+++ b/net/xfrm/xfrm_hash.h
@@ -45,10 +45,10 @@ static inline unsigned int __xfrm_dst_hash(const xfrm_address_t *daddr,
45 return (h ^ (h >> 16)) & hmask; 45 return (h ^ (h >> 16)) & hmask;
46} 46}
47 47
48static inline unsigned __xfrm_src_hash(const xfrm_address_t *daddr, 48static inline unsigned int __xfrm_src_hash(const xfrm_address_t *daddr,
49 const xfrm_address_t *saddr, 49 const xfrm_address_t *saddr,
50 unsigned short family, 50 unsigned short family,
51 unsigned int hmask) 51 unsigned int hmask)
52{ 52{
53 unsigned int h = family; 53 unsigned int h = family;
54 switch (family) { 54 switch (family) {
diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c
index 05640bc9594b..380976f74c4c 100644
--- a/net/xfrm/xfrm_sysctl.c
+++ b/net/xfrm/xfrm_sysctl.c
@@ -54,7 +54,7 @@ int __net_init xfrm_sysctl_init(struct net *net)
54 table[2].data = &net->xfrm.sysctl_larval_drop; 54 table[2].data = &net->xfrm.sysctl_larval_drop;
55 table[3].data = &net->xfrm.sysctl_acq_expires; 55 table[3].data = &net->xfrm.sysctl_acq_expires;
56 56
57 net->xfrm.sysctl_hdr = register_net_sysctl_table(net, net_core_path, table); 57 net->xfrm.sysctl_hdr = register_net_sysctl(net, "net/core", table);
58 if (!net->xfrm.sysctl_hdr) 58 if (!net->xfrm.sysctl_hdr)
59 goto out_register; 59 goto out_register;
60 return 0; 60 return 0;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 7128dde0fe1a..44293b3fd6a1 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -756,40 +756,50 @@ static int copy_to_user_state_extra(struct xfrm_state *x,
756{ 756{
757 copy_to_user_state(x, p); 757 copy_to_user_state(x, p);
758 758
759 if (x->coaddr) 759 if (x->coaddr &&
760 NLA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr); 760 nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr))
761 goto nla_put_failure;
761 762
762 if (x->lastused) 763 if (x->lastused &&
763 NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused); 764 nla_put_u64(skb, XFRMA_LASTUSED, x->lastused))
765 goto nla_put_failure;
764 766
765 if (x->aead) 767 if (x->aead &&
766 NLA_PUT(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead); 768 nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead))
767 if (x->aalg) { 769 goto nla_put_failure;
768 if (copy_to_user_auth(x->aalg, skb))
769 goto nla_put_failure;
770 770
771 NLA_PUT(skb, XFRMA_ALG_AUTH_TRUNC, 771 if (x->aalg &&
772 xfrm_alg_auth_len(x->aalg), x->aalg); 772 (copy_to_user_auth(x->aalg, skb) ||
773 } 773 nla_put(skb, XFRMA_ALG_AUTH_TRUNC,
774 if (x->ealg) 774 xfrm_alg_auth_len(x->aalg), x->aalg)))
775 NLA_PUT(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg); 775 goto nla_put_failure;
776 if (x->calg)
777 NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
778 776
779 if (x->encap) 777 if (x->ealg &&
780 NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap); 778 nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg))
779 goto nla_put_failure;
781 780
782 if (x->tfcpad) 781 if (x->calg &&
783 NLA_PUT_U32(skb, XFRMA_TFCPAD, x->tfcpad); 782 nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg))
783 goto nla_put_failure;
784
785 if (x->encap &&
786 nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap))
787 goto nla_put_failure;
788
789 if (x->tfcpad &&
790 nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad))
791 goto nla_put_failure;
784 792
785 if (xfrm_mark_put(skb, &x->mark)) 793 if (xfrm_mark_put(skb, &x->mark))
786 goto nla_put_failure; 794 goto nla_put_failure;
787 795
788 if (x->replay_esn) 796 if (x->replay_esn &&
789 NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL, 797 nla_put(skb, XFRMA_REPLAY_ESN_VAL,
790 xfrm_replay_state_esn_len(x->replay_esn), x->replay_esn); 798 xfrm_replay_state_esn_len(x->replay_esn),
799 x->replay_esn))
800 goto nla_put_failure;
791 801
792 if (x->security && copy_sec_ctx(x->security, skb) < 0) 802 if (x->security && copy_sec_ctx(x->security, skb))
793 goto nla_put_failure; 803 goto nla_put_failure;
794 804
795 return 0; 805 return 0;
@@ -912,8 +922,9 @@ static int build_spdinfo(struct sk_buff *skb, struct net *net,
912 sph.spdhcnt = si.spdhcnt; 922 sph.spdhcnt = si.spdhcnt;
913 sph.spdhmcnt = si.spdhmcnt; 923 sph.spdhmcnt = si.spdhmcnt;
914 924
915 NLA_PUT(skb, XFRMA_SPD_INFO, sizeof(spc), &spc); 925 if (nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc) ||
916 NLA_PUT(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph); 926 nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph))
927 goto nla_put_failure;
917 928
918 return nlmsg_end(skb, nlh); 929 return nlmsg_end(skb, nlh);
919 930
@@ -967,8 +978,9 @@ static int build_sadinfo(struct sk_buff *skb, struct net *net,
967 sh.sadhmcnt = si.sadhmcnt; 978 sh.sadhmcnt = si.sadhmcnt;
968 sh.sadhcnt = si.sadhcnt; 979 sh.sadhcnt = si.sadhcnt;
969 980
970 NLA_PUT_U32(skb, XFRMA_SAD_CNT, si.sadcnt); 981 if (nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt) ||
971 NLA_PUT(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh); 982 nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh))
983 goto nla_put_failure;
972 984
973 return nlmsg_end(skb, nlh); 985 return nlmsg_end(skb, nlh);
974 986
@@ -1690,21 +1702,27 @@ static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct
1690 id->reqid = x->props.reqid; 1702 id->reqid = x->props.reqid;
1691 id->flags = c->data.aevent; 1703 id->flags = c->data.aevent;
1692 1704
1693 if (x->replay_esn) 1705 if (x->replay_esn) {
1694 NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL, 1706 if (nla_put(skb, XFRMA_REPLAY_ESN_VAL,
1695 xfrm_replay_state_esn_len(x->replay_esn), 1707 xfrm_replay_state_esn_len(x->replay_esn),
1696 x->replay_esn); 1708 x->replay_esn))
1697 else 1709 goto nla_put_failure;
1698 NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay); 1710 } else {
1699 1711 if (nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
1700 NLA_PUT(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft); 1712 &x->replay))
1713 goto nla_put_failure;
1714 }
1715 if (nla_put(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft))
1716 goto nla_put_failure;
1701 1717
1702 if (id->flags & XFRM_AE_RTHR) 1718 if ((id->flags & XFRM_AE_RTHR) &&
1703 NLA_PUT_U32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff); 1719 nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff))
1720 goto nla_put_failure;
1704 1721
1705 if (id->flags & XFRM_AE_ETHR) 1722 if ((id->flags & XFRM_AE_ETHR) &&
1706 NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH, 1723 nla_put_u32(skb, XFRMA_ETIMER_THRESH,
1707 x->replay_maxage * 10 / HZ); 1724 x->replay_maxage * 10 / HZ))
1725 goto nla_put_failure;
1708 1726
1709 if (xfrm_mark_put(skb, &x->mark)) 1727 if (xfrm_mark_put(skb, &x->mark))
1710 goto nla_put_failure; 1728 goto nla_put_failure;
@@ -2835,8 +2853,9 @@ static int build_report(struct sk_buff *skb, u8 proto,
2835 ur->proto = proto; 2853 ur->proto = proto;
2836 memcpy(&ur->sel, sel, sizeof(ur->sel)); 2854 memcpy(&ur->sel, sel, sizeof(ur->sel));
2837 2855
2838 if (addr) 2856 if (addr &&
2839 NLA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr); 2857 nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr))
2858 goto nla_put_failure;
2840 2859
2841 return nlmsg_end(skb, nlh); 2860 return nlmsg_end(skb, nlh);
2842 2861
diff --git a/tools/virtio/linux/virtio.h b/tools/virtio/linux/virtio.h
index 7579f19e61e0..81847dd08bd0 100644
--- a/tools/virtio/linux/virtio.h
+++ b/tools/virtio/linux/virtio.h
@@ -203,6 +203,7 @@ void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
203void virtqueue_disable_cb(struct virtqueue *vq); 203void virtqueue_disable_cb(struct virtqueue *vq);
204 204
205bool virtqueue_enable_cb(struct virtqueue *vq); 205bool virtqueue_enable_cb(struct virtqueue *vq);
206bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
206 207
207void *virtqueue_detach_unused_buf(struct virtqueue *vq); 208void *virtqueue_detach_unused_buf(struct virtqueue *vq);
208struct virtqueue *vring_new_virtqueue(unsigned int num, 209struct virtqueue *vring_new_virtqueue(unsigned int num,
diff --git a/tools/virtio/virtio_test.c b/tools/virtio/virtio_test.c
index 6bf95f995364..e626fa553c5a 100644
--- a/tools/virtio/virtio_test.c
+++ b/tools/virtio/virtio_test.c
@@ -144,7 +144,8 @@ static void wait_for_interrupt(struct vdev_info *dev)
144 } 144 }
145} 145}
146 146
147static void run_test(struct vdev_info *dev, struct vq_info *vq, int bufs) 147static void run_test(struct vdev_info *dev, struct vq_info *vq,
148 bool delayed, int bufs)
148{ 149{
149 struct scatterlist sl; 150 struct scatterlist sl;
150 long started = 0, completed = 0; 151 long started = 0, completed = 0;
@@ -183,8 +184,12 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq, int bufs)
183 assert(started <= bufs); 184 assert(started <= bufs);
184 if (completed == bufs) 185 if (completed == bufs)
185 break; 186 break;
186 if (virtqueue_enable_cb(vq->vq)) { 187 if (delayed) {
187 wait_for_interrupt(dev); 188 if (virtqueue_enable_cb_delayed(vq->vq))
189 wait_for_interrupt(dev);
190 } else {
191 if (virtqueue_enable_cb(vq->vq))
192 wait_for_interrupt(dev);
188 } 193 }
189 } 194 }
190 test = 0; 195 test = 0;
@@ -216,6 +221,14 @@ const struct option longopts[] = {
216 .val = 'i', 221 .val = 'i',
217 }, 222 },
218 { 223 {
224 .name = "delayed-interrupt",
225 .val = 'D',
226 },
227 {
228 .name = "no-delayed-interrupt",
229 .val = 'd',
230 },
231 {
219 } 232 }
220}; 233};
221 234
@@ -224,6 +237,7 @@ static void help()
224 fprintf(stderr, "Usage: virtio_test [--help]" 237 fprintf(stderr, "Usage: virtio_test [--help]"
225 " [--no-indirect]" 238 " [--no-indirect]"
226 " [--no-event-idx]" 239 " [--no-event-idx]"
240 " [--delayed-interrupt]"
227 "\n"); 241 "\n");
228} 242}
229 243
@@ -233,6 +247,7 @@ int main(int argc, char **argv)
233 unsigned long long features = (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | 247 unsigned long long features = (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
234 (1ULL << VIRTIO_RING_F_EVENT_IDX); 248 (1ULL << VIRTIO_RING_F_EVENT_IDX);
235 int o; 249 int o;
250 bool delayed = false;
236 251
237 for (;;) { 252 for (;;) {
238 o = getopt_long(argc, argv, optstring, longopts, NULL); 253 o = getopt_long(argc, argv, optstring, longopts, NULL);
@@ -251,6 +266,9 @@ int main(int argc, char **argv)
251 case 'i': 266 case 'i':
252 features &= ~(1ULL << VIRTIO_RING_F_INDIRECT_DESC); 267 features &= ~(1ULL << VIRTIO_RING_F_INDIRECT_DESC);
253 break; 268 break;
269 case 'D':
270 delayed = true;
271 break;
254 default: 272 default:
255 assert(0); 273 assert(0);
256 break; 274 break;
@@ -260,6 +278,6 @@ int main(int argc, char **argv)
260done: 278done:
261 vdev_info_init(&dev, features); 279 vdev_info_init(&dev, features);
262 vq_info_add(&dev, 256); 280 vq_info_add(&dev, 256);
263 run_test(&dev, &dev.vqs[0], 0x100000); 281 run_test(&dev, &dev.vqs[0], delayed, 0x100000);
264 return 0; 282 return 0;
265} 283}