aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-class-net-mesh8
-rw-r--r--Documentation/devicetree/bindings/net/can/atmel-can.txt14
-rw-r--r--Documentation/devicetree/bindings/net/cpsw.txt16
-rw-r--r--Documentation/devicetree/bindings/net/dsa/dsa.txt91
-rw-r--r--Documentation/devicetree/bindings/net/marvell-orion-mdio.txt3
-rw-r--r--Documentation/networking/ip-sysctl.txt53
-rw-r--r--Documentation/networking/packet_mmap.txt327
-rw-r--r--Documentation/networking/stmmac.txt33
-rw-r--r--MAINTAINERS4
-rw-r--r--arch/alpha/include/uapi/asm/socket.h2
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi2
-rw-r--r--arch/arm/net/bpf_jit_32.c5
-rw-r--r--arch/arm/plat-orion/common.c54
-rw-r--r--arch/avr32/include/uapi/asm/socket.h2
-rw-r--r--arch/cris/include/uapi/asm/socket.h2
-rw-r--r--arch/frv/include/uapi/asm/socket.h2
-rw-r--r--arch/h8300/include/uapi/asm/socket.h2
-rw-r--r--arch/ia64/include/uapi/asm/socket.h2
-rw-r--r--arch/m32r/include/uapi/asm/socket.h2
-rw-r--r--arch/mips/include/uapi/asm/socket.h2
-rw-r--r--arch/mn10300/include/uapi/asm/socket.h2
-rw-r--r--arch/parisc/include/uapi/asm/socket.h2
-rw-r--r--arch/powerpc/include/uapi/asm/socket.h2
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c12
-rw-r--r--arch/powerpc/platforms/chrp/pegasos_eth.c20
-rw-r--r--arch/powerpc/sysdev/mv64x60_dev.c16
-rw-r--r--arch/s390/include/uapi/asm/socket.h2
-rw-r--r--arch/sparc/include/uapi/asm/socket.h2
-rw-r--r--arch/sparc/net/bpf_jit_comp.c6
-rw-r--r--arch/x86/net/bpf_jit_comp.c9
-rw-r--r--arch/xtensa/include/uapi/asm/socket.h2
-rw-r--r--drivers/bcma/core.c8
-rw-r--r--drivers/bcma/main.c5
-rw-r--r--drivers/connector/cn_proc.c25
-rw-r--r--drivers/connector/connector.c12
-rw-r--r--drivers/dma/ioat/dca.c11
-rw-r--r--drivers/firewire/Kconfig6
-rw-r--r--drivers/firewire/net.c462
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c68
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c34
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h14
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c155
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c15
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c113
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h11
-rw-r--r--drivers/isdn/capi/capidrv.c3
-rw-r--r--drivers/isdn/divert/isdn_divert.c8
-rw-r--r--drivers/isdn/hisax/fsm.c2
-rw-r--r--drivers/isdn/hisax/hfc_sx.c2
-rw-r--r--drivers/isdn/i4l/isdn_net.c2
-rw-r--r--drivers/media/dvb-core/dvb_net.c10
-rw-r--r--drivers/net/Kconfig1
-rw-r--r--drivers/net/appletalk/Kconfig18
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/caif/Kconfig7
-rw-r--r--drivers/net/caif/Makefile4
-rw-r--r--drivers/net/caif/caif_shm_u5500.c128
-rw-r--r--drivers/net/caif/caif_shmcore.c747
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/at91_can.c76
-rw-r--r--drivers/net/can/bfin_can.c4
-rw-r--r--drivers/net/can/mcp251x.c67
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c6
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c24
-rw-r--r--drivers/net/ethernet/amd/7990.c2
-rw-r--r--drivers/net/ethernet/amd/a2065.c1
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c1
-rw-r--r--drivers/net/ethernet/amd/ariadne.c1
-rw-r--r--drivers/net/ethernet/amd/atarilance.c6
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c1
-rw-r--r--drivers/net/ethernet/amd/declance.c2
-rw-r--r--drivers/net/ethernet/amd/mvme147.c4
-rw-r--r--drivers/net/ethernet/amd/ni65.c2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c1
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c7
-rw-r--r--drivers/net/ethernet/amd/sunlance.c9
-rw-r--r--drivers/net/ethernet/apple/macmace.c16
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c6
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c3
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c73
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c84
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h47
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c327
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h28
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c127
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h87
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h252
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c226
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h16
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c278
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c79
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c351
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c77
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c107
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h9
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c5
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c389
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h18
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c5
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c77
-rw-r--r--drivers/net/ethernet/cadence/macb.c80
-rw-r--r--drivers/net/ethernet/cadence/macb.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h55
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c778
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c89
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c256
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h53
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h98
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c14
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c74
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c13
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c17
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c9
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c7
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c31
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h10
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c11
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c60
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h2
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c22
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c8
-rw-r--r--drivers/net/ethernet/freescale/Makefile3
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c (renamed from drivers/net/ethernet/freescale/fec.c)53
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c17
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c159
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h8
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c3
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c2
-rw-r--r--drivers/net/ethernet/i825xx/82596.c8
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c6
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c9
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c9
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c131
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c38
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h27
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c240
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c408
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h11
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c37
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c306
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c62
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c134
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c42
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c23
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c104
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c7
-rw-r--r--drivers/net/ethernet/marvell/Kconfig5
-rw-r--r--drivers/net/ethernet/marvell/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c239
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c131
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c14
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c86
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c20
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c8
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c32
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c4
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c3
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c12
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c12
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c1
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c12
-rw-r--r--drivers/net/ethernet/neterion/s2io.c6
-rw-r--r--drivers/net/ethernet/netx-eth.c2
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c20
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c42
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c21
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c5
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig10
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h5
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c220
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/Makefile4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h81
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c379
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h181
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c55
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c75
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c14
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c49
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c68
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c107
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h214
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c1297
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c1175
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c255
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c6
-rw-r--r--drivers/net/ethernet/rdc/r6040.c12
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/realtek/atp.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c270
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c397
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h220
-rw-r--r--drivers/net/ethernet/s6gmac.c16
-rw-r--r--drivers/net/ethernet/seeq/ether3.c22
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c2
-rw-r--r--drivers/net/ethernet/sfc/efx.c267
-rw-r--r--drivers/net/ethernet/sfc/efx.h14
-rw-r--r--drivers/net/ethernet/sfc/enum.h12
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon.c17
-rw-r--r--drivers/net/ethernet/sfc/filter.c249
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h1
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h97
-rw-r--r--drivers/net/ethernet/sfc/nic.c94
-rw-r--r--drivers/net/ethernet/sfc/ptp.c116
-rw-r--r--drivers/net/ethernet/sfc/rx.c793
-rw-r--r--drivers/net/ethernet/sfc/siena.c25
-rw-r--r--drivers/net/ethernet/sgi/meth.c5
-rw-r--r--drivers/net/ethernet/sis/sis900.c7
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c90
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h122
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h51
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h44
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h40
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c104
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c151
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c85
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c38
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h23
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c156
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c148
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c998
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c215
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h74
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c4
-rw-r--r--drivers/net/ethernet/sun/sunhme.c13
-rw-r--r--drivers/net/ethernet/sun/sunqe.c5
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw.c159
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c48
-rw-r--r--drivers/net/ethernet/ti/tlan.c4
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c240
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c3
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c21
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c4
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c4
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c27
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c25
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c1
-rw-r--r--drivers/net/fddi/defxx.c9
-rw-r--r--drivers/net/hamradio/yam.c2
-rw-r--r--drivers/net/ieee802154/at86rf230.c51
-rw-r--r--drivers/net/ieee802154/mrf24j40.c21
-rw-r--r--drivers/net/irda/ali-ircc.c6
-rw-r--r--drivers/net/irda/au1k_ir.c18
-rw-r--r--drivers/net/irda/bfin_sir.c3
-rw-r--r--drivers/net/irda/nsc-ircc.c6
-rw-r--r--drivers/net/irda/pxaficp_ir.c4
-rw-r--r--drivers/net/irda/smsc-ircc2.c17
-rw-r--r--drivers/net/irda/via-ircc.c6
-rw-r--r--drivers/net/irda/w83977af_ir.c7
-rw-r--r--drivers/net/macvlan.c19
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/phy/lxt.c2
-rw-r--r--drivers/net/phy/marvell.c127
-rw-r--r--drivers/net/phy/mdio-gpio.c12
-rw-r--r--drivers/net/phy/mdio-octeon.c107
-rw-r--r--drivers/net/phy/micrel.c41
-rw-r--r--drivers/net/phy/phy.c66
-rw-r--r--drivers/net/phy/spi_ks8995.c8
-rw-r--r--drivers/net/phy/vitesse.c3
-rw-r--r--drivers/net/plip/plip.c2
-rw-r--r--drivers/net/ppp/ppp_synctty.c53
-rw-r--r--drivers/net/team/Kconfig12
-rw-r--r--drivers/net/team/Makefile1
-rw-r--r--drivers/net/team/team.c25
-rw-r--r--drivers/net/team/team_mode_broadcast.c14
-rw-r--r--drivers/net/team/team_mode_random.c71
-rw-r--r--drivers/net/team/team_mode_roundrobin.c36
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/usb/cdc_ncm.c4
-rw-r--r--drivers/net/virtio_net.c4
-rw-r--r--drivers/net/vxlan.c456
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h3
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h3
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c20
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/Kconfig9
-rw-r--r--drivers/net/wireless/ath/ath6kl/Makefile5
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c15
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h3
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c72
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h11
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c21
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c15
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c116
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c41
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c16
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/trace.c23
-rw-r--r--drivers/net/wireless/ath/ath6kl/trace.h332
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c5
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c38
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h49
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c188
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h1
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c69
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile4
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c33
-rw-r--r--drivers/net/wireless/ath/wil6210/dbg_hexdump.h20
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c58
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c25
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c60
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c7
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h17
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c154
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h363
-rw-r--r--drivers/net/wireless/b43/Kconfig6
-rw-r--r--drivers/net/wireless/b43/b43.h6
-rw-r--r--drivers/net/wireless/b43/dma.c9
-rw-r--r--drivers/net/wireless/b43/main.c7
-rw-r--r--drivers/net/wireless/b43/phy_ht.c610
-rw-r--r--drivers/net/wireless/b43/phy_ht.h77
-rw-r--r--drivers/net/wireless/b43legacy/dma.c8
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile3
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h7
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c30
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c33
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c42
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h34
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c39
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c28
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c382
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h25
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c22
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h87
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c37
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/Makefile4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/led.c126
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/led.h36
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c11
-rw-r--r--drivers/net/wireless/brcm80211/brcmutil/utils.c25
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_utils.h27
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c2
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c4
-rw-r--r--drivers/net/wireless/iwlegacy/3945.h4
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c33
-rw-r--r--drivers/net/wireless/iwlegacy/common.c21
-rw-r--r--drivers/net/wireless/iwlegacy/common.h9
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig11
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/calib.h2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/commands.h3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c26
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/lib.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/testmode.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c13
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/ucode.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c (renamed from drivers/net/wireless/iwlwifi/pcie/1000.c)1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c (renamed from drivers/net/wireless/iwlwifi/pcie/2000.c)1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c (renamed from drivers/net/wireless/iwlwifi/pcie/5000.c)1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c (renamed from drivers/net/wireless/iwlwifi/pcie/6000.c)1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c (renamed from drivers/net/wireless/iwlwifi/pcie/7000.c)63
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h49
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.h17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom-read.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-notif-wait.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c51
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-test.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/binding.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/bt-coex.c347
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c260
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c138
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h319
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h53
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c14
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c45
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c57
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h17
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c11
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c14
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c91
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/cfg.h115
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c4
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c25
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c6
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c3
-rw-r--r--drivers/net/wireless/mwifiex/Makefile1
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c218
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c2
-rw-r--r--drivers/net/wireless/mwifiex/ethtool.c70
-rw-r--r--drivers/net/wireless/mwifiex/fw.h72
-rw-r--r--drivers/net/wireless/mwifiex/init.c6
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h23
-rw-r--r--drivers/net/wireless/mwifiex/main.c15
-rw-r--r--drivers/net/wireless/mwifiex/main.h9
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c156
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c79
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c2
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c10
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c2
-rw-r--r--drivers/net/wireless/mwifiex/util.c5
-rw-r--r--drivers/net/wireless/mwl8k.c111
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c2
-rw-r--r--drivers/net/wireless/p54/p54spi.c6
-rw-r--r--drivers/net/wireless/ray_cs.c8
-rw-r--r--drivers/net/wireless/rndis_wlan.c5
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig7
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h103
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c857
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c116
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c63
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h9
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c20
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c3
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h3
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c3
-rw-r--r--drivers/net/xen-netback/netback.c9
-rw-r--r--drivers/net/xen-netfront.c26
-rw-r--r--drivers/ptp/ptp_pch.c29
-rw-r--r--drivers/s390/kvm/virtio_ccw.c6
-rw-r--r--drivers/scsi/csiostor/Makefile3
-rw-r--r--drivers/scsi/csiostor/csio_hw.c559
-rw-r--r--drivers/scsi/csiostor/csio_hw.h47
-rw-r--r--drivers/scsi/csiostor/csio_hw_chip.h175
-rw-r--r--drivers/scsi/csiostor/csio_hw_t4.c403
-rw-r--r--drivers/scsi/csiostor/csio_hw_t5.c397
-rw-r--r--drivers/scsi/csiostor/csio_init.c48
-rw-r--r--drivers/scsi/csiostor/csio_init.h29
-rw-r--r--drivers/scsi/csiostor/csio_lnode.h2
-rw-r--r--drivers/scsi/csiostor/csio_rnode.c10
-rw-r--r--drivers/scsi/csiostor/csio_rnode.h2
-rw-r--r--drivers/scsi/csiostor/csio_wr.c60
-rw-r--r--drivers/scsi/scsi_netlink.c4
-rw-r--r--drivers/scsi/scsi_transport_fc.c21
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c70
-rw-r--r--drivers/ssb/driver_chipcommon.c2
-rw-r--r--drivers/ssb/driver_chipcommon_pmu.c41
-rw-r--r--drivers/ssb/driver_mipscore.c25
-rw-r--r--drivers/ssb/driver_pcicore.c15
-rw-r--r--drivers/ssb/embedded.c5
-rw-r--r--drivers/ssb/main.c51
-rw-r--r--drivers/ssb/pci.c97
-rw-r--r--drivers/ssb/pcmcia.c46
-rw-r--r--drivers/ssb/scan.c31
-rw-r--r--drivers/ssb/sprom.c4
-rw-r--r--drivers/ssb/ssb_private.h19
-rw-r--r--drivers/staging/gdm72xx/netlink_k.c12
-rw-r--r--include/linux/cn_proc.h4
-rw-r--r--include/linux/filter.h14
-rw-r--r--include/linux/ieee80211.h52
-rw-r--r--include/linux/if_arp.h12
-rw-r--r--include/linux/if_team.h25
-rw-r--r--include/linux/if_vlan.h2
-rw-r--r--include/linux/micrel_phy.h1
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/mv643xx_eth.h1
-rw-r--r--include/linux/netdev_features.h9
-rw-r--r--include/linux/netdevice.h62
-rw-r--r--include/linux/netfilter.h5
-rw-r--r--include/linux/of_net.h10
-rw-r--r--include/linux/openvswitch.h13
-rw-r--r--include/linux/phy.h10
-rw-r--r--include/linux/platform_data/cpsw.h2
-rw-r--r--include/linux/rtnetlink.h9
-rw-r--r--include/linux/sh_eth.h1
-rw-r--r--include/linux/skbuff.h87
-rw-r--r--include/linux/socket.h1
-rw-r--r--include/linux/ssb/ssb.h6
-rw-r--r--include/linux/tcp.h21
-rw-r--r--include/linux/virtio.h6
-rw-r--r--include/linux/vm_sockets.h23
-rw-r--r--include/net/caif/caif_shm.h26
-rw-r--r--include/net/cfg80211.h126
-rw-r--r--include/net/dn_fib.h28
-rw-r--r--include/net/firewire.h25
-rw-r--r--include/net/gre.h51
-rw-r--r--include/net/inet_connection_sock.h5
-rw-r--r--include/net/inet_frag.h22
-rw-r--r--include/net/ip6_tunnel.h21
-rw-r--r--include/net/ip_tunnels.h177
-rw-r--r--include/net/ip_vs.h132
-rw-r--r--include/net/ipip.h87
-rw-r--r--include/net/ipv6.h22
-rw-r--r--include/net/mac80211.h29
-rw-r--r--include/net/net_namespace.h2
-rw-r--r--include/net/netfilter/nf_conntrack_core.h1
-rw-r--r--include/net/netfilter/nf_log.h14
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/netns/netfilter.h18
-rw-r--r--include/net/request_sock.h8
-rw-r--r--include/net/rtnetlink.h2
-rw-r--r--include/net/sock.h1
-rw-r--r--include/net/tcp.h145
-rw-r--r--include/uapi/asm-generic/socket.h2
-rw-r--r--include/uapi/linux/cn_proc.h10
-rw-r--r--include/uapi/linux/filter.h3
-rw-r--r--include/uapi/linux/if_ether.h3
-rw-r--r--include/uapi/linux/if_packet.h2
-rw-r--r--include/uapi/linux/neighbour.h3
-rw-r--r--include/uapi/linux/netfilter/xt_NFQUEUE.h9
-rw-r--r--include/uapi/linux/netfilter_ipv6/ip6t_frag.h4
-rw-r--r--include/uapi/linux/netlink.h2
-rw-r--r--include/uapi/linux/netlink_diag.h42
-rw-r--r--include/uapi/linux/nfc.h16
-rw-r--r--include/uapi/linux/nl80211.h117
-rw-r--r--include/uapi/linux/pkt_sched.h1
-rw-r--r--include/uapi/linux/snmp.h2
-rw-r--r--include/uapi/linux/tcp.h26
-rw-r--r--include/uapi/linux/vm_sockets.h23
-rw-r--r--kernel/audit.c10
-rw-r--r--kernel/signal.c2
-rw-r--r--net/802/garp.c4
-rw-r--r--net/Kconfig1
-rw-r--r--net/atm/lec.h2
-rw-r--r--net/batman-adv/Kconfig14
-rw-r--r--net/batman-adv/Makefile3
-rw-r--r--net/batman-adv/bat_iv_ogm.c5
-rw-r--r--net/batman-adv/debugfs.c18
-rw-r--r--net/batman-adv/distributed-arp-table.c22
-rw-r--r--net/batman-adv/gateway_client.c2
-rw-r--r--net/batman-adv/hard-interface.c66
-rw-r--r--net/batman-adv/hard-interface.h13
-rw-r--r--net/batman-adv/main.c10
-rw-r--r--net/batman-adv/main.h15
-rw-r--r--net/batman-adv/network-coding.c1821
-rw-r--r--net/batman-adv/network-coding.h123
-rw-r--r--net/batman-adv/originator.c10
-rw-r--r--net/batman-adv/packet.h33
-rw-r--r--net/batman-adv/routing.c49
-rw-r--r--net/batman-adv/send.c5
-rw-r--r--net/batman-adv/soft-interface.c281
-rw-r--r--net/batman-adv/soft-interface.h3
-rw-r--r--net/batman-adv/sysfs.c22
-rw-r--r--net/batman-adv/translation-table.c29
-rw-r--r--net/batman-adv/types.h136
-rw-r--r--net/batman-adv/unicast.c6
-rw-r--r--net/batman-adv/vis.c4
-rw-r--r--net/bluetooth/af_bluetooth.c3
-rw-r--r--net/bluetooth/bnep/netdev.c2
-rw-r--r--net/bridge/br_fdb.c14
-rw-r--r--net/bridge/br_if.c1
-rw-r--r--net/bridge/br_mdb.c4
-rw-r--r--net/bridge/br_multicast.c2
-rw-r--r--net/bridge/br_netlink.c21
-rw-r--r--net/bridge/netfilter/ebt_log.c44
-rw-r--r--net/bridge/netfilter/ebt_nflog.c5
-rw-r--r--net/bridge/netfilter/ebt_ulog.c132
-rw-r--r--net/bridge/netfilter/ebtable_broute.c4
-rw-r--r--net/bridge/netfilter/ebtables.c2
-rw-r--r--net/caif/caif_dev.c9
-rw-r--r--net/caif/caif_socket.c22
-rw-r--r--net/caif/caif_usb.c4
-rw-r--r--net/caif/cfcnfg.c19
-rw-r--r--net/caif/cfctrl.c14
-rw-r--r--net/caif/cffrml.c4
-rw-r--r--net/caif/cfmuxl.c4
-rw-r--r--net/caif/cfpkt_skbuff.c8
-rw-r--r--net/caif/cfrfml.c4
-rw-r--r--net/caif/cfserl.c4
-rw-r--r--net/caif/cfsrvl.c13
-rw-r--r--net/caif/chnl_net.c6
-rw-r--r--net/can/af_can.c30
-rw-r--r--net/can/gw.c5
-rw-r--r--net/core/datagram.c4
-rw-r--r--net/core/dev.c80
-rw-r--r--net/core/dst.c9
-rw-r--r--net/core/ethtool.c1
-rw-r--r--net/core/fib_rules.c4
-rw-r--r--net/core/filter.c5
-rw-r--r--net/core/flow.c42
-rw-r--r--net/core/flow_dissector.c68
-rw-r--r--net/core/neighbour.c6
-rw-r--r--net/core/net-procfs.c2
-rw-r--r--net/core/netpoll.c3
-rw-r--r--net/core/rtnetlink.c176
-rw-r--r--net/core/skbuff.c50
-rw-r--r--net/core/sock.c10
-rw-r--r--net/core/utils.c5
-rw-r--r--net/dcb/dcbevent.c1
-rw-r--r--net/dcb/dcbnl.c2
-rw-r--r--net/dccp/ipv4.c5
-rw-r--r--net/dccp/ipv6.c5
-rw-r--r--net/decnet/dn_dev.c4
-rw-r--r--net/decnet/dn_fib.c203
-rw-r--r--net/decnet/dn_route.c43
-rw-r--r--net/decnet/dn_table.c45
-rw-r--r--net/decnet/netfilter/dn_rtmsg.c12
-rw-r--r--net/dsa/dsa.c233
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/ieee802154/6lowpan.c142
-rw-r--r--net/ieee802154/6lowpan.h7
-rw-r--r--net/ieee802154/dgram.c10
-rw-r--r--net/ieee802154/netlink.c8
-rw-r--r--net/ipv4/Kconfig7
-rw-r--r--net/ipv4/Makefile1
-rw-r--r--net/ipv4/af_inet.c10
-rw-r--r--net/ipv4/arp.c27
-rw-r--r--net/ipv4/devinet.c83
-rw-r--r--net/ipv4/fib_frontend.c10
-rw-r--r--net/ipv4/gre.c5
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/inet_diag.c4
-rw-r--r--net/ipv4/inet_fragment.c84
-rw-r--r--net/ipv4/inet_lro.c5
-rw-r--r--net/ipv4/ip_fragment.c31
-rw-r--r--net/ipv4/ip_gre.c1517
-rw-r--r--net/ipv4/ip_output.c3
-rw-r--r--net/ipv4/ip_tunnel.c1035
-rw-r--r--net/ipv4/ip_vti.c42
-rw-r--r--net/ipv4/ipconfig.c13
-rw-r--r--net/ipv4/ipip.c748
-rw-r--r--net/ipv4/ipmr.c12
-rw-r--r--net/ipv4/netfilter/arptable_filter.c4
-rw-r--r--net/ipv4/netfilter/ip_tables.c9
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c133
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c8
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/syncookies.c3
-rw-r--r--net/ipv4/sysctl_net_ipv4.c18
-rw-r--r--net/ipv4/tcp.c268
-rw-r--r--net/ipv4/tcp_input.c606
-rw-r--r--net/ipv4/tcp_ipv4.c108
-rw-r--r--net/ipv4/tcp_minisocks.c44
-rw-r--r--net/ipv4/tcp_output.c367
-rw-r--r--net/ipv4/tcp_timer.c21
-rw-r--r--net/ipv4/tcp_westwood.c2
-rw-r--r--net/ipv4/udp.c115
-rw-r--r--net/ipv4/udp_diag.c6
-rw-r--r--net/ipv6/Kconfig2
-rw-r--r--net/ipv6/addrconf.c108
-rw-r--r--net/ipv6/addrlabel.c12
-rw-r--r--net/ipv6/af_inet6.c7
-rw-r--r--net/ipv6/datagram.c20
-rw-r--r--net/ipv6/icmp.c2
-rw-r--r--net/ipv6/inet6_connection_sock.c10
-rw-r--r--net/ipv6/ip6_flowlabel.c11
-rw-r--r--net/ipv6/ip6_gre.c62
-rw-r--r--net/ipv6/ip6_offload.c4
-rw-r--r--net/ipv6/ip6_tunnel.c16
-rw-r--r--net/ipv6/ip6mr.c10
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c3
-rw-r--r--net/ipv6/netfilter/ip6t_NPT.c11
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c8
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c7
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c22
-rw-r--r--net/ipv6/raw.c9
-rw-r--r--net/ipv6/reassembly.c23
-rw-r--r--net/ipv6/route.c6
-rw-r--r--net/ipv6/sit.c41
-rw-r--r--net/ipv6/syncookies.c3
-rw-r--r--net/ipv6/tcp_ipv6.c56
-rw-r--r--net/ipv6/udp.c13
-rw-r--r--net/ipv6/udp_offload.c8
-rw-r--r--net/iucv/af_iucv.c3
-rw-r--r--net/l2tp/l2tp_core.c2
-rw-r--r--net/mac80211/cfg.c151
-rw-r--r--net/mac80211/debugfs_sta.c31
-rw-r--r--net/mac80211/driver-ops.h7
-rw-r--r--net/mac80211/ht.c52
-rw-r--r--net/mac80211/ibss.c29
-rw-r--r--net/mac80211/ieee80211_i.h26
-rw-r--r--net/mac80211/iface.c14
-rw-r--r--net/mac80211/key.c103
-rw-r--r--net/mac80211/key.h5
-rw-r--r--net/mac80211/main.c55
-rw-r--r--net/mac80211/mesh.c59
-rw-r--r--net/mac80211/mesh.h12
-rw-r--r--net/mac80211/mesh_plink.c37
-rw-r--r--net/mac80211/mlme.c100
-rw-r--r--net/mac80211/offchannel.c2
-rw-r--r--net/mac80211/pm.c117
-rw-r--r--net/mac80211/rc80211_minstrel.c204
-rw-r--r--net/mac80211/rc80211_minstrel.h31
-rw-r--r--net/mac80211/rc80211_minstrel_debugfs.c12
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c79
-rw-r--r--net/mac80211/rc80211_minstrel_ht.h6
-rw-r--r--net/mac80211/rx.c61
-rw-r--r--net/mac80211/sta_info.c12
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/mac80211/trace.h11
-rw-r--r--net/mac80211/tx.c2
-rw-r--r--net/mac80211/util.c73
-rw-r--r--net/mac80211/vht.c212
-rw-r--r--net/mac802154/mac802154.h3
-rw-r--r--net/mac802154/mac_cmd.c1
-rw-r--r--net/mac802154/mib.c9
-rw-r--r--net/mac802154/tx.c26
-rw-r--r--net/mac802154/wpan.c4
-rw-r--r--net/netfilter/core.c29
-rw-r--r--net/netfilter/ipset/ip_set_core.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c31
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c306
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c81
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c649
-rw-r--r--net/netfilter/ipvs/ip_vs_dh.c86
-rw-r--r--net/netfilter/ipvs/ip_vs_est.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c115
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c190
-rw-r--r--net/netfilter/ipvs/ip_vs_lc.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_nq.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_pe.c55
-rw-r--r--net/netfilter/ipvs/ip_vs_pe_sip.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c36
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c40
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c33
-rw-r--r--net/netfilter/ipvs/ip_vs_rr.c64
-rw-r--r--net/netfilter/ipvs/ip_vs_sched.c63
-rw-r--r--net/netfilter/ipvs/ip_vs_sed.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c86
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c35
-rw-r--r--net/netfilter/ipvs/ip_vs_wlc.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_wrr.c176
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c1050
-rw-r--r--net/netfilter/nf_conntrack_core.c47
-rw-r--r--net/netfilter/nf_conntrack_helper.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c100
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c9
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c18
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c6
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c8
-rw-r--r--net/netfilter/nf_conntrack_standalone.c16
-rw-r--r--net/netfilter/nf_log.c206
-rw-r--r--net/netfilter/nfnetlink.c7
-rw-r--r--net/netfilter/nfnetlink_log.c186
-rw-r--r--net/netfilter/nfnetlink_queue_core.c271
-rw-r--r--net/netfilter/xt_LOG.c52
-rw-r--r--net/netfilter/xt_NFQUEUE.c63
-rw-r--r--net/netfilter/xt_osf.c6
-rw-r--r--net/netlink/Kconfig10
-rw-r--r--net/netlink/Makefile3
-rw-r--r--net/netlink/af_netlink.c63
-rw-r--r--net/netlink/af_netlink.h62
-rw-r--r--net/netlink/diag.c188
-rw-r--r--net/nfc/llcp/commands.c205
-rw-r--r--net/nfc/llcp/llcp.c112
-rw-r--r--net/nfc/llcp/llcp.h36
-rw-r--r--net/nfc/llcp/sock.c133
-rw-r--r--net/nfc/netlink.c172
-rw-r--r--net/nfc/nfc.h14
-rw-r--r--net/openvswitch/datapath.c15
-rw-r--r--net/openvswitch/datapath.h2
-rw-r--r--net/openvswitch/flow.c6
-rw-r--r--net/openvswitch/vport-internal_dev.c13
-rw-r--r--net/openvswitch/vport.h4
-rw-r--r--net/packet/af_packet.c114
-rw-r--r--net/packet/internal.h3
-rw-r--r--net/phonet/pn_netlink.c4
-rw-r--r--net/rfkill/rfkill-regulator.c2
-rw-r--r--net/sched/act_api.c2
-rw-r--r--net/sched/cls_api.c14
-rw-r--r--net/sched/sch_api.c44
-rw-r--r--net/sched/sch_htb.c31
-rw-r--r--net/sctp/socket.c8
-rw-r--r--net/tipc/netlink.c6
-rw-r--r--net/unix/af_unix.c4
-rw-r--r--net/vmw_vsock/vmci_transport.c16
-rw-r--r--net/vmw_vsock/vmci_transport.h3
-rw-r--r--net/wireless/ap.c62
-rw-r--r--net/wireless/core.c73
-rw-r--r--net/wireless/core.h24
-rw-r--r--net/wireless/mesh.c15
-rw-r--r--net/wireless/mlme.c230
-rw-r--r--net/wireless/nl80211.c1857
-rw-r--r--net/wireless/nl80211.h68
-rw-r--r--net/wireless/rdev-ops.h20
-rw-r--r--net/wireless/reg.c6
-rw-r--r--net/wireless/sme.c26
-rw-r--r--net/wireless/sysfs.c25
-rw-r--r--net/wireless/trace.h46
-rw-r--r--net/wireless/util.c2
-rw-r--r--net/xfrm/xfrm_policy.c23
-rw-r--r--security/selinux/hooks.c4
-rw-r--r--security/selinux/netlink.c3
-rw-r--r--tools/Makefile11
-rw-r--r--tools/net/Makefile15
-rw-r--r--tools/net/bpf_jit_disasm.c199
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/net/Makefile19
-rw-r--r--tools/testing/selftests/net/psock_fanout.c312
-rw-r--r--tools/testing/selftests/net/psock_lib.h127
-rw-r--r--tools/testing/selftests/net/psock_tpacket.c824
-rw-r--r--tools/testing/selftests/net/run_afpackettests26
-rw-r--r--tools/testing/selftests/net/run_netsocktests12
-rw-r--r--tools/testing/selftests/net/socket.c92
895 files changed, 36521 insertions, 17189 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-net-mesh b/Documentation/ABI/testing/sysfs-class-net-mesh
index bc41da61608d..bdcd8b4e38f2 100644
--- a/Documentation/ABI/testing/sysfs-class-net-mesh
+++ b/Documentation/ABI/testing/sysfs-class-net-mesh
@@ -67,6 +67,14 @@ Description:
67 Defines the penalty which will be applied to an 67 Defines the penalty which will be applied to an
68 originator message's tq-field on every hop. 68 originator message's tq-field on every hop.
69 69
70What: /sys/class/net/<mesh_iface>/mesh/network_coding
71Date: Nov 2012
72Contact: Martin Hundeboll <martin@hundeboll.net>
73Description:
74 Controls whether Network Coding (using some magic
75 to send fewer wifi packets but still the same
76 content) is enabled or not.
77
70What: /sys/class/net/<mesh_iface>/mesh/orig_interval 78What: /sys/class/net/<mesh_iface>/mesh/orig_interval
71Date: May 2010 79Date: May 2010
72Contact: Marek Lindner <lindner_marek@yahoo.de> 80Contact: Marek Lindner <lindner_marek@yahoo.de>
diff --git a/Documentation/devicetree/bindings/net/can/atmel-can.txt b/Documentation/devicetree/bindings/net/can/atmel-can.txt
new file mode 100644
index 000000000000..72cf0c5daff4
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/can/atmel-can.txt
@@ -0,0 +1,14 @@
1* AT91 CAN *
2
3Required properties:
4 - compatible: Should be "atmel,at91sam9263-can" or "atmel,at91sam9x5-can"
5 - reg: Should contain CAN controller registers location and length
6 - interrupts: Should contain IRQ line for the CAN controller
7
8Example:
9
10 can0: can@f000c000 {
11 compatbile = "atmel,at91sam9x5-can";
12 reg = <0xf000c000 0x300>;
13 interrupts = <40 4 5>
14 };
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
index ecfdf756d10f..4f2ca6b4a182 100644
--- a/Documentation/devicetree/bindings/net/cpsw.txt
+++ b/Documentation/devicetree/bindings/net/cpsw.txt
@@ -15,16 +15,22 @@ Required properties:
15- mac_control : Specifies Default MAC control register content 15- mac_control : Specifies Default MAC control register content
16 for the specific platform 16 for the specific platform
17- slaves : Specifies number for slaves 17- slaves : Specifies number for slaves
18- cpts_active_slave : Specifies the slave to use for time stamping 18- active_slave : Specifies the slave to use for time stamping,
19 ethtool and SIOCGMIIPHY
19- cpts_clock_mult : Numerator to convert input clock ticks into nanoseconds 20- cpts_clock_mult : Numerator to convert input clock ticks into nanoseconds
20- cpts_clock_shift : Denominator to convert input clock ticks into nanoseconds 21- cpts_clock_shift : Denominator to convert input clock ticks into nanoseconds
21- phy_id : Specifies slave phy id
22- mac-address : Specifies slave MAC address
23 22
24Optional properties: 23Optional properties:
25- ti,hwmods : Must be "cpgmac0" 24- ti,hwmods : Must be "cpgmac0"
26- no_bd_ram : Must be 0 or 1 25- no_bd_ram : Must be 0 or 1
27- dual_emac : Specifies Switch to act as Dual EMAC 26- dual_emac : Specifies Switch to act as Dual EMAC
27
28Slave Properties:
29Required properties:
30- phy_id : Specifies slave phy id
31- mac-address : Specifies slave MAC address
32
33Optional properties:
28- dual_emac_res_vlan : Specifies VID to be used to segregate the ports 34- dual_emac_res_vlan : Specifies VID to be used to segregate the ports
29 35
30Note: "ti,hwmods" field is used to fetch the base address and irq 36Note: "ti,hwmods" field is used to fetch the base address and irq
@@ -47,7 +53,7 @@ Examples:
47 rx_descs = <64>; 53 rx_descs = <64>;
48 mac_control = <0x20>; 54 mac_control = <0x20>;
49 slaves = <2>; 55 slaves = <2>;
50 cpts_active_slave = <0>; 56 active_slave = <0>;
51 cpts_clock_mult = <0x80000000>; 57 cpts_clock_mult = <0x80000000>;
52 cpts_clock_shift = <29>; 58 cpts_clock_shift = <29>;
53 cpsw_emac0: slave@0 { 59 cpsw_emac0: slave@0 {
@@ -73,7 +79,7 @@ Examples:
73 rx_descs = <64>; 79 rx_descs = <64>;
74 mac_control = <0x20>; 80 mac_control = <0x20>;
75 slaves = <2>; 81 slaves = <2>;
76 cpts_active_slave = <0>; 82 active_slave = <0>;
77 cpts_clock_mult = <0x80000000>; 83 cpts_clock_mult = <0x80000000>;
78 cpts_clock_shift = <29>; 84 cpts_clock_shift = <29>;
79 cpsw_emac0: slave@0 { 85 cpsw_emac0: slave@0 {
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt
new file mode 100644
index 000000000000..49f4f7ae3f51
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt
@@ -0,0 +1,91 @@
1Marvell Distributed Switch Architecture Device Tree Bindings
2------------------------------------------------------------
3
4Required properties:
5- compatible : Should be "marvell,dsa"
6- #address-cells : Must be 2, first cell is the address on the MDIO bus
7 and second cell is the address in the switch tree.
8 Second cell is used only when cascading/chaining.
9- #size-cells : Must be 0
10- dsa,ethernet : Should be a phandle to a valid Ethernet device node
11- dsa,mii-bus : Should be a phandle to a valid MDIO bus device node
12
13Optionnal properties:
14- interrupts : property with a value describing the switch
15 interrupt number (not supported by the driver)
16
17A DSA node can contain multiple switch chips which are therefore child nodes of
18the parent DSA node. The maximum number of allowed child nodes is 4
19(DSA_MAX_SWITCHES).
20Each of these switch child nodes should have the following required properties:
21
22- reg : Describes the switch address on the MII bus
23- #address-cells : Must be 1
24- #size-cells : Must be 0
25
26A switch may have multiple "port" children nodes
27
28Each port children node must have the following mandatory properties:
29- reg : Describes the port address in the switch
30- label : Describes the label associated with this port, special
31 labels are "cpu" to indicate a CPU port and "dsa" to
32 indicate an uplink/downlink port.
33
34Note that a port labelled "dsa" will imply checking for the uplink phandle
35described below.
36
37Optionnal property:
38- link : Should be a phandle to another switch's DSA port.
39 This property is only used when switches are being
40 chained/cascaded together.
41
42Example:
43
44 dsa@0 {
45 compatible = "marvell,dsa";
46 #address-cells = <2>;
47 #size-cells = <0>;
48
49 interrupts = <10>;
50 dsa,ethernet = <&ethernet0>;
51 dsa,mii-bus = <&mii_bus0>;
52
53 switch@0 {
54 #address-cells = <1>;
55 #size-cells = <0>;
56 reg = <16 0>; /* MDIO address 16, switch 0 in tree */
57
58 port@0 {
59 reg = <0>;
60 label = "lan1";
61 };
62
63 port@1 {
64 reg = <1>;
65 label = "lan2";
66 };
67
68 port@5 {
69 reg = <5>;
70 label = "cpu";
71 };
72
73 switch0uplink: port@6 {
74 reg = <6>;
75 label = "dsa";
76 link = <&switch1uplink>;
77 };
78 };
79
80 switch@1 {
81 #address-cells = <1>;
82 #size-cells = <0>;
83 reg = <17 1>; /* MDIO address 17, switch 1 in tree */
84
85 switch1uplink: port@0 {
86 reg = <0>;
87 label = "dsa";
88 link = <&switch0uplink>;
89 };
90 };
91 };
diff --git a/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt b/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt
index 34e7aafa321c..052b5f28a624 100644
--- a/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt
+++ b/Documentation/devicetree/bindings/net/marvell-orion-mdio.txt
@@ -9,6 +9,9 @@ Required properties:
9- compatible: "marvell,orion-mdio" 9- compatible: "marvell,orion-mdio"
10- reg: address and length of the SMI register 10- reg: address and length of the SMI register
11 11
12Optional properties:
13- interrupts: interrupt line number for the SMI error/done interrupt
14
12The child nodes of the MDIO driver are the individual PHY devices 15The child nodes of the MDIO driver are the individual PHY devices
13connected to this MDIO bus. They must have a "reg" property given the 16connected to this MDIO bus. They must have a "reg" property given the
14PHY address on the MDIO bus. 17PHY address on the MDIO bus.
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index dc2dc87d2557..f98ca633b528 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -29,7 +29,7 @@ route/max_size - INTEGER
29neigh/default/gc_thresh1 - INTEGER 29neigh/default/gc_thresh1 - INTEGER
30 Minimum number of entries to keep. Garbage collector will not 30 Minimum number of entries to keep. Garbage collector will not
31 purge entries if there are fewer than this number. 31 purge entries if there are fewer than this number.
32 Default: 256 32 Default: 128
33 33
34neigh/default/gc_thresh3 - INTEGER 34neigh/default/gc_thresh3 - INTEGER
35 Maximum number of neighbor entries allowed. Increase this 35 Maximum number of neighbor entries allowed. Increase this
@@ -175,14 +175,6 @@ tcp_congestion_control - STRING
175 is inherited. 175 is inherited.
176 [see setsockopt(listenfd, SOL_TCP, TCP_CONGESTION, "name" ...) ] 176 [see setsockopt(listenfd, SOL_TCP, TCP_CONGESTION, "name" ...) ]
177 177
178tcp_cookie_size - INTEGER
179 Default size of TCP Cookie Transactions (TCPCT) option, that may be
180 overridden on a per socket basis by the TCPCT socket option.
181 Values greater than the maximum (16) are interpreted as the maximum.
182 Values greater than zero and less than the minimum (8) are interpreted
183 as the minimum. Odd values are interpreted as the next even value.
184 Default: 0 (off).
185
186tcp_dsack - BOOLEAN 178tcp_dsack - BOOLEAN
187 Allows TCP to send "duplicate" SACKs. 179 Allows TCP to send "duplicate" SACKs.
188 180
@@ -190,7 +182,9 @@ tcp_early_retrans - INTEGER
190 Enable Early Retransmit (ER), per RFC 5827. ER lowers the threshold 182 Enable Early Retransmit (ER), per RFC 5827. ER lowers the threshold
191 for triggering fast retransmit when the amount of outstanding data is 183 for triggering fast retransmit when the amount of outstanding data is
192 small and when no previously unsent data can be transmitted (such 184 small and when no previously unsent data can be transmitted (such
193 that limited transmit could be used). 185 that limited transmit could be used). Also controls the use of
186 Tail loss probe (TLP) that converts RTOs occuring due to tail
187 losses into fast recovery (draft-dukkipati-tcpm-tcp-loss-probe-01).
194 Possible values: 188 Possible values:
195 0 disables ER 189 0 disables ER
196 1 enables ER 190 1 enables ER
@@ -198,7 +192,9 @@ tcp_early_retrans - INTEGER
198 by a fourth of RTT. This mitigates connection falsely 192 by a fourth of RTT. This mitigates connection falsely
199 recovers when network has a small degree of reordering 193 recovers when network has a small degree of reordering
200 (less than 3 packets). 194 (less than 3 packets).
201 Default: 2 195 3 enables delayed ER and TLP.
196 4 enables TLP only.
197 Default: 3
202 198
203tcp_ecn - INTEGER 199tcp_ecn - INTEGER
204 Control use of Explicit Congestion Notification (ECN) by TCP. 200 Control use of Explicit Congestion Notification (ECN) by TCP.
@@ -229,36 +225,13 @@ tcp_fin_timeout - INTEGER
229 Default: 60 seconds 225 Default: 60 seconds
230 226
231tcp_frto - INTEGER 227tcp_frto - INTEGER
232 Enables Forward RTO-Recovery (F-RTO) defined in RFC4138. 228 Enables Forward RTO-Recovery (F-RTO) defined in RFC5682.
233 F-RTO is an enhanced recovery algorithm for TCP retransmission 229 F-RTO is an enhanced recovery algorithm for TCP retransmission
234 timeouts. It is particularly beneficial in wireless environments 230 timeouts. It is particularly beneficial in networks where the
235 where packet loss is typically due to random radio interference 231 RTT fluctuates (e.g., wireless). F-RTO is sender-side only
236 rather than intermediate router congestion. F-RTO is sender-side 232 modification. It does not require any support from the peer.
237 only modification. Therefore it does not require any support from 233
238 the peer. 234 By default it's enabled with a non-zero value. 0 disables F-RTO.
239
240 If set to 1, basic version is enabled. 2 enables SACK enhanced
241 F-RTO if flow uses SACK. The basic version can be used also when
242 SACK is in use though scenario(s) with it exists where F-RTO
243 interacts badly with the packet counting of the SACK enabled TCP
244 flow.
245
246tcp_frto_response - INTEGER
247 When F-RTO has detected that a TCP retransmission timeout was
248 spurious (i.e, the timeout would have been avoided had TCP set a
249 longer retransmission timeout), TCP has several options what to do
250 next. Possible values are:
251 0 Rate halving based; a smooth and conservative response,
252 results in halved cwnd and ssthresh after one RTT
253 1 Very conservative response; not recommended because even
254 though being valid, it interacts poorly with the rest of
255 Linux TCP, halves cwnd and ssthresh immediately
256 2 Aggressive response; undoes congestion control measures
257 that are now known to be unnecessary (ignoring the
258 possibility of a lost retransmission that would require
259 TCP to be more cautious), cwnd and ssthresh are restored
260 to the values prior timeout
261 Default: 0 (rate halving based)
262 235
263tcp_keepalive_time - INTEGER 236tcp_keepalive_time - INTEGER
264 How often TCP sends out keepalive messages when keepalive is enabled. 237 How often TCP sends out keepalive messages when keepalive is enabled.
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 94444b152fbc..65efb85e49de 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -685,6 +685,333 @@ int main(int argc, char **argp)
685} 685}
686 686
687------------------------------------------------------------------------------- 687-------------------------------------------------------------------------------
688+ AF_PACKET TPACKET_V3 example
689-------------------------------------------------------------------------------
690
691AF_PACKET's TPACKET_V3 ring buffer can be configured to use non-static frame
692sizes by doing it's own memory management. It is based on blocks where polling
693works on a per block basis instead of per ring as in TPACKET_V2 and predecessor.
694
695It is said that TPACKET_V3 brings the following benefits:
696 *) ~15 - 20% reduction in CPU-usage
697 *) ~20% increase in packet capture rate
698 *) ~2x increase in packet density
699 *) Port aggregation analysis
700 *) Non static frame size to capture entire packet payload
701
702So it seems to be a good candidate to be used with packet fanout.
703
704Minimal example code by Daniel Borkmann based on Chetan Loke's lolpcap (compile
705it with gcc -Wall -O2 blob.c, and try things like "./a.out eth0", etc.):
706
707#include <stdio.h>
708#include <stdlib.h>
709#include <stdint.h>
710#include <string.h>
711#include <assert.h>
712#include <net/if.h>
713#include <arpa/inet.h>
714#include <netdb.h>
715#include <poll.h>
716#include <unistd.h>
717#include <signal.h>
718#include <inttypes.h>
719#include <sys/socket.h>
720#include <sys/mman.h>
721#include <linux/if_packet.h>
722#include <linux/if_ether.h>
723#include <linux/ip.h>
724
725#define BLOCK_SIZE (1 << 22)
726#define FRAME_SIZE 2048
727
728#define NUM_BLOCKS 64
729#define NUM_FRAMES ((BLOCK_SIZE * NUM_BLOCKS) / FRAME_SIZE)
730
731#define BLOCK_RETIRE_TOV_IN_MS 64
732#define BLOCK_PRIV_AREA_SZ 13
733
734#define ALIGN_8(x) (((x) + 8 - 1) & ~(8 - 1))
735
736#define BLOCK_STATUS(x) ((x)->h1.block_status)
737#define BLOCK_NUM_PKTS(x) ((x)->h1.num_pkts)
738#define BLOCK_O2FP(x) ((x)->h1.offset_to_first_pkt)
739#define BLOCK_LEN(x) ((x)->h1.blk_len)
740#define BLOCK_SNUM(x) ((x)->h1.seq_num)
741#define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
742#define BLOCK_PRIV(x) ((void *) ((uint8_t *) (x) + BLOCK_O2PRIV(x)))
743#define BLOCK_HDR_LEN (ALIGN_8(sizeof(struct block_desc)))
744#define BLOCK_PLUS_PRIV(sz_pri) (BLOCK_HDR_LEN + ALIGN_8((sz_pri)))
745
746#ifndef likely
747# define likely(x) __builtin_expect(!!(x), 1)
748#endif
749#ifndef unlikely
750# define unlikely(x) __builtin_expect(!!(x), 0)
751#endif
752
753struct block_desc {
754 uint32_t version;
755 uint32_t offset_to_priv;
756 struct tpacket_hdr_v1 h1;
757};
758
759struct ring {
760 struct iovec *rd;
761 uint8_t *map;
762 struct tpacket_req3 req;
763};
764
765static unsigned long packets_total = 0, bytes_total = 0;
766static sig_atomic_t sigint = 0;
767
768void sighandler(int num)
769{
770 sigint = 1;
771}
772
773static int setup_socket(struct ring *ring, char *netdev)
774{
775 int err, i, fd, v = TPACKET_V3;
776 struct sockaddr_ll ll;
777
778 fd = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
779 if (fd < 0) {
780 perror("socket");
781 exit(1);
782 }
783
784 err = setsockopt(fd, SOL_PACKET, PACKET_VERSION, &v, sizeof(v));
785 if (err < 0) {
786 perror("setsockopt");
787 exit(1);
788 }
789
790 memset(&ring->req, 0, sizeof(ring->req));
791 ring->req.tp_block_size = BLOCK_SIZE;
792 ring->req.tp_frame_size = FRAME_SIZE;
793 ring->req.tp_block_nr = NUM_BLOCKS;
794 ring->req.tp_frame_nr = NUM_FRAMES;
795 ring->req.tp_retire_blk_tov = BLOCK_RETIRE_TOV_IN_MS;
796 ring->req.tp_sizeof_priv = BLOCK_PRIV_AREA_SZ;
797 ring->req.tp_feature_req_word |= TP_FT_REQ_FILL_RXHASH;
798
799 err = setsockopt(fd, SOL_PACKET, PACKET_RX_RING, &ring->req,
800 sizeof(ring->req));
801 if (err < 0) {
802 perror("setsockopt");
803 exit(1);
804 }
805
806 ring->map = mmap(NULL, ring->req.tp_block_size * ring->req.tp_block_nr,
807 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED,
808 fd, 0);
809 if (ring->map == MAP_FAILED) {
810 perror("mmap");
811 exit(1);
812 }
813
814 ring->rd = malloc(ring->req.tp_block_nr * sizeof(*ring->rd));
815 assert(ring->rd);
816 for (i = 0; i < ring->req.tp_block_nr; ++i) {
817 ring->rd[i].iov_base = ring->map + (i * ring->req.tp_block_size);
818 ring->rd[i].iov_len = ring->req.tp_block_size;
819 }
820
821 memset(&ll, 0, sizeof(ll));
822 ll.sll_family = PF_PACKET;
823 ll.sll_protocol = htons(ETH_P_ALL);
824 ll.sll_ifindex = if_nametoindex(netdev);
825 ll.sll_hatype = 0;
826 ll.sll_pkttype = 0;
827 ll.sll_halen = 0;
828
829 err = bind(fd, (struct sockaddr *) &ll, sizeof(ll));
830 if (err < 0) {
831 perror("bind");
832 exit(1);
833 }
834
835 return fd;
836}
837
838#ifdef __checked
839static uint64_t prev_block_seq_num = 0;
840
841void assert_block_seq_num(struct block_desc *pbd)
842{
843 if (unlikely(prev_block_seq_num + 1 != BLOCK_SNUM(pbd))) {
844 printf("prev_block_seq_num:%"PRIu64", expected seq:%"PRIu64" != "
845 "actual seq:%"PRIu64"\n", prev_block_seq_num,
846 prev_block_seq_num + 1, (uint64_t) BLOCK_SNUM(pbd));
847 exit(1);
848 }
849
850 prev_block_seq_num = BLOCK_SNUM(pbd);
851}
852
853static void assert_block_len(struct block_desc *pbd, uint32_t bytes, int block_num)
854{
855 if (BLOCK_NUM_PKTS(pbd)) {
856 if (unlikely(bytes != BLOCK_LEN(pbd))) {
857 printf("block:%u with %upackets, expected len:%u != actual len:%u\n",
858 block_num, BLOCK_NUM_PKTS(pbd), bytes, BLOCK_LEN(pbd));
859 exit(1);
860 }
861 } else {
862 if (unlikely(BLOCK_LEN(pbd) != BLOCK_PLUS_PRIV(BLOCK_PRIV_AREA_SZ))) {
863 printf("block:%u, expected len:%lu != actual len:%u\n",
864 block_num, BLOCK_HDR_LEN, BLOCK_LEN(pbd));
865 exit(1);
866 }
867 }
868}
869
870static void assert_block_header(struct block_desc *pbd, const int block_num)
871{
872 uint32_t block_status = BLOCK_STATUS(pbd);
873
874 if (unlikely((block_status & TP_STATUS_USER) == 0)) {
875 printf("block:%u, not in TP_STATUS_USER\n", block_num);
876 exit(1);
877 }
878
879 assert_block_seq_num(pbd);
880}
881#else
882static inline void assert_block_header(struct block_desc *pbd, const int block_num)
883{
884}
885static void assert_block_len(struct block_desc *pbd, uint32_t bytes, int block_num)
886{
887}
888#endif
889
890static void display(struct tpacket3_hdr *ppd)
891{
892 struct ethhdr *eth = (struct ethhdr *) ((uint8_t *) ppd + ppd->tp_mac);
893 struct iphdr *ip = (struct iphdr *) ((uint8_t *) eth + ETH_HLEN);
894
895 if (eth->h_proto == htons(ETH_P_IP)) {
896 struct sockaddr_in ss, sd;
897 char sbuff[NI_MAXHOST], dbuff[NI_MAXHOST];
898
899 memset(&ss, 0, sizeof(ss));
900 ss.sin_family = PF_INET;
901 ss.sin_addr.s_addr = ip->saddr;
902 getnameinfo((struct sockaddr *) &ss, sizeof(ss),
903 sbuff, sizeof(sbuff), NULL, 0, NI_NUMERICHOST);
904
905 memset(&sd, 0, sizeof(sd));
906 sd.sin_family = PF_INET;
907 sd.sin_addr.s_addr = ip->daddr;
908 getnameinfo((struct sockaddr *) &sd, sizeof(sd),
909 dbuff, sizeof(dbuff), NULL, 0, NI_NUMERICHOST);
910
911 printf("%s -> %s, ", sbuff, dbuff);
912 }
913
914 printf("rxhash: 0x%x\n", ppd->hv1.tp_rxhash);
915}
916
917static void walk_block(struct block_desc *pbd, const int block_num)
918{
919 int num_pkts = BLOCK_NUM_PKTS(pbd), i;
920 unsigned long bytes = 0;
921 unsigned long bytes_with_padding = BLOCK_PLUS_PRIV(BLOCK_PRIV_AREA_SZ);
922 struct tpacket3_hdr *ppd;
923
924 assert_block_header(pbd, block_num);
925
926 ppd = (struct tpacket3_hdr *) ((uint8_t *) pbd + BLOCK_O2FP(pbd));
927 for (i = 0; i < num_pkts; ++i) {
928 bytes += ppd->tp_snaplen;
929 if (ppd->tp_next_offset)
930 bytes_with_padding += ppd->tp_next_offset;
931 else
932 bytes_with_padding += ALIGN_8(ppd->tp_snaplen + ppd->tp_mac);
933
934 display(ppd);
935
936 ppd = (struct tpacket3_hdr *) ((uint8_t *) ppd + ppd->tp_next_offset);
937 __sync_synchronize();
938 }
939
940 assert_block_len(pbd, bytes_with_padding, block_num);
941
942 packets_total += num_pkts;
943 bytes_total += bytes;
944}
945
946void flush_block(struct block_desc *pbd)
947{
948 BLOCK_STATUS(pbd) = TP_STATUS_KERNEL;
949 __sync_synchronize();
950}
951
952static void teardown_socket(struct ring *ring, int fd)
953{
954 munmap(ring->map, ring->req.tp_block_size * ring->req.tp_block_nr);
955 free(ring->rd);
956 close(fd);
957}
958
959int main(int argc, char **argp)
960{
961 int fd, err;
962 socklen_t len;
963 struct ring ring;
964 struct pollfd pfd;
965 unsigned int block_num = 0;
966 struct block_desc *pbd;
967 struct tpacket_stats_v3 stats;
968
969 if (argc != 2) {
970 fprintf(stderr, "Usage: %s INTERFACE\n", argp[0]);
971 return EXIT_FAILURE;
972 }
973
974 signal(SIGINT, sighandler);
975
976 memset(&ring, 0, sizeof(ring));
977 fd = setup_socket(&ring, argp[argc - 1]);
978 assert(fd > 0);
979
980 memset(&pfd, 0, sizeof(pfd));
981 pfd.fd = fd;
982 pfd.events = POLLIN | POLLERR;
983 pfd.revents = 0;
984
985 while (likely(!sigint)) {
986 pbd = (struct block_desc *) ring.rd[block_num].iov_base;
987retry_block:
988 if ((BLOCK_STATUS(pbd) & TP_STATUS_USER) == 0) {
989 poll(&pfd, 1, -1);
990 goto retry_block;
991 }
992
993 walk_block(pbd, block_num);
994 flush_block(pbd);
995 block_num = (block_num + 1) % NUM_BLOCKS;
996 }
997
998 len = sizeof(stats);
999 err = getsockopt(fd, SOL_PACKET, PACKET_STATISTICS, &stats, &len);
1000 if (err < 0) {
1001 perror("getsockopt");
1002 exit(1);
1003 }
1004
1005 fflush(stdout);
1006 printf("\nReceived %u packets, %lu bytes, %u dropped, freeze_q_cnt: %u\n",
1007 stats.tp_packets, bytes_total, stats.tp_drops,
1008 stats.tp_freeze_q_cnt);
1009
1010 teardown_socket(&ring, fd);
1011 return 0;
1012}
1013
1014-------------------------------------------------------------------------------
688+ PACKET_TIMESTAMP 1015+ PACKET_TIMESTAMP
689------------------------------------------------------------------------------- 1016-------------------------------------------------------------------------------
690 1017
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index f9fa6db40a52..8efe0b3c8b83 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -326,6 +326,35 @@ To enter in Tx LPI mode the driver needs to have a software timer
326that enable and disable the LPI mode when there is nothing to be 326that enable and disable the LPI mode when there is nothing to be
327transmitted. 327transmitted.
328 328
3297) TODO: 3297) Extended descriptors
330The extended descriptors give us information about the receive Ethernet payload
331when it is carrying PTP packets or TCP/UDP/ICMP over IP.
332These are not available on GMAC Synopsys chips older than the 3.50.
333At probe time the driver will decide if these can be actually used.
334This support also is mandatory for PTPv2 because the extra descriptors 6 and 7
335are used for saving the hardware timestamps.
336
3378) Precision Time Protocol (PTP)
338The driver supports the IEEE 1588-2002, Precision Time Protocol (PTP),
339which enables precise synchronization of clocks in measurement and
340control systems implemented with technologies such as network
341communication.
342
343In addition to the basic timestamp features mentioned in IEEE 1588-2002
344Timestamps, new GMAC cores support the advanced timestamp features.
345IEEE 1588-2008 that can be enabled when configure the Kernel.
346
3479) SGMII/RGMII supports
348New GMAC devices provide own way to manage RGMII/SGMII.
349This information is available at run-time by looking at the
350HW capability register. This means that the stmmac can manage
351auto-negotiation and link status w/o using the PHYLIB stuff
352In fact, the HW provides a subset of extended registers to
353restart the ANE, verify Full/Half duplex mode and Speed.
354Also thanks to these registers it is possible to look at the
355Auto-negotiated Link Parter Ability.
356
35710) TODO:
330 o XGMAC is not supported. 358 o XGMAC is not supported.
331 o Add the PTP - precision time protocol 359 o Complete the TBI & RTBI support.
360 o extened VLAN support for 3.70a SYNP GMAC.
diff --git a/MAINTAINERS b/MAINTAINERS
index 376704078d65..c8f792ae9582 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6330,6 +6330,7 @@ F: drivers/acpi/apei/erst.c
6330 6330
6331PTP HARDWARE CLOCK SUPPORT 6331PTP HARDWARE CLOCK SUPPORT
6332M: Richard Cochran <richardcochran@gmail.com> 6332M: Richard Cochran <richardcochran@gmail.com>
6333L: netdev@vger.kernel.org
6333S: Maintained 6334S: Maintained
6334W: http://linuxptp.sourceforge.net/ 6335W: http://linuxptp.sourceforge.net/
6335F: Documentation/ABI/testing/sysfs-ptp 6336F: Documentation/ABI/testing/sysfs-ptp
@@ -6461,6 +6462,7 @@ S: Supported
6461F: drivers/net/ethernet/qlogic/qlcnic/ 6462F: drivers/net/ethernet/qlogic/qlcnic/
6462 6463
6463QLOGIC QLGE 10Gb ETHERNET DRIVER 6464QLOGIC QLGE 10Gb ETHERNET DRIVER
6465M: Shahed Shaikh <shahed.shaikh@qlogic.com>
6464M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com> 6466M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
6465M: Ron Mercer <ron.mercer@qlogic.com> 6467M: Ron Mercer <ron.mercer@qlogic.com>
6466M: linux-driver@qlogic.com 6468M: linux-driver@qlogic.com
@@ -8514,7 +8516,7 @@ F: drivers/usb/gadget/*uvc*.c
8514F: drivers/usb/gadget/webcam.c 8516F: drivers/usb/gadget/webcam.c
8515 8517
8516USB WIRELESS RNDIS DRIVER (rndis_wlan) 8518USB WIRELESS RNDIS DRIVER (rndis_wlan)
8517M: Jussi Kivilinna <jussi.kivilinna@mbnet.fi> 8519M: Jussi Kivilinna <jussi.kivilinna@iki.fi>
8518L: linux-wireless@vger.kernel.org 8520L: linux-wireless@vger.kernel.org
8519S: Maintained 8521S: Maintained
8520F: drivers/net/wireless/rndis_wlan.c 8522F: drivers/net/wireless/rndis_wlan.c
diff --git a/arch/alpha/include/uapi/asm/socket.h b/arch/alpha/include/uapi/asm/socket.h
index c5195524d1ef..eee6ea76bdaf 100644
--- a/arch/alpha/include/uapi/asm/socket.h
+++ b/arch/alpha/include/uapi/asm/socket.h
@@ -79,4 +79,6 @@
79 79
80#define SO_LOCK_FILTER 44 80#define SO_LOCK_FILTER 44
81 81
82#define SO_SELECT_ERR_QUEUE 45
83
82#endif /* _UAPI_ASM_SOCKET_H */ 84#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 0957645b73af..91fe4f148f80 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -349,7 +349,7 @@
349 rx_descs = <64>; 349 rx_descs = <64>;
350 mac_control = <0x20>; 350 mac_control = <0x20>;
351 slaves = <2>; 351 slaves = <2>;
352 cpts_active_slave = <0>; 352 active_slave = <0>;
353 cpts_clock_mult = <0x80000000>; 353 cpts_clock_mult = <0x80000000>;
354 cpts_clock_shift = <29>; 354 cpts_clock_shift = <29>;
355 reg = <0x4a100000 0x800 355 reg = <0x4a100000 0x800
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index a0bd8a755bdf..1a643ee8e082 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -918,9 +918,8 @@ void bpf_jit_compile(struct sk_filter *fp)
918#endif 918#endif
919 919
920 if (bpf_jit_enable > 1) 920 if (bpf_jit_enable > 1)
921 print_hex_dump(KERN_INFO, "BPF JIT code: ", 921 /* there are 2 passes here */
922 DUMP_PREFIX_ADDRESS, 16, 4, ctx.target, 922 bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
923 alloc_size, false);
924 923
925 fp->bpf_func = (void *)ctx.target; 924 fp->bpf_func = (void *)ctx.target;
926out: 925out:
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index 2d4b6414609f..251f827271e9 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -238,6 +238,7 @@ static __init void ge_complete(
238 struct mv643xx_eth_shared_platform_data *orion_ge_shared_data, 238 struct mv643xx_eth_shared_platform_data *orion_ge_shared_data,
239 struct resource *orion_ge_resource, unsigned long irq, 239 struct resource *orion_ge_resource, unsigned long irq,
240 struct platform_device *orion_ge_shared, 240 struct platform_device *orion_ge_shared,
241 struct platform_device *orion_ge_mvmdio,
241 struct mv643xx_eth_platform_data *eth_data, 242 struct mv643xx_eth_platform_data *eth_data,
242 struct platform_device *orion_ge) 243 struct platform_device *orion_ge)
243{ 244{
@@ -247,6 +248,8 @@ static __init void ge_complete(
247 orion_ge->dev.platform_data = eth_data; 248 orion_ge->dev.platform_data = eth_data;
248 249
249 platform_device_register(orion_ge_shared); 250 platform_device_register(orion_ge_shared);
251 if (orion_ge_mvmdio)
252 platform_device_register(orion_ge_mvmdio);
250 platform_device_register(orion_ge); 253 platform_device_register(orion_ge);
251} 254}
252 255
@@ -258,8 +261,6 @@ struct mv643xx_eth_shared_platform_data orion_ge00_shared_data;
258static struct resource orion_ge00_shared_resources[] = { 261static struct resource orion_ge00_shared_resources[] = {
259 { 262 {
260 .name = "ge00 base", 263 .name = "ge00 base",
261 }, {
262 .name = "ge00 err irq",
263 }, 264 },
264}; 265};
265 266
@@ -271,6 +272,19 @@ static struct platform_device orion_ge00_shared = {
271 }, 272 },
272}; 273};
273 274
275static struct resource orion_ge_mvmdio_resources[] = {
276 {
277 .name = "ge00 mvmdio base",
278 }, {
279 .name = "ge00 mvmdio err irq",
280 },
281};
282
283static struct platform_device orion_ge_mvmdio = {
284 .name = "orion-mdio",
285 .id = -1,
286};
287
274static struct resource orion_ge00_resources[] = { 288static struct resource orion_ge00_resources[] = {
275 { 289 {
276 .name = "ge00 irq", 290 .name = "ge00 irq",
@@ -295,26 +309,25 @@ void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
295 unsigned int tx_csum_limit) 309 unsigned int tx_csum_limit)
296{ 310{
297 fill_resources(&orion_ge00_shared, orion_ge00_shared_resources, 311 fill_resources(&orion_ge00_shared, orion_ge00_shared_resources,
298 mapbase + 0x2000, SZ_16K - 1, irq_err); 312 mapbase + 0x2000, SZ_16K - 1, NO_IRQ);
313 fill_resources(&orion_ge_mvmdio, orion_ge_mvmdio_resources,
314 mapbase + 0x2004, 0x84 - 1, irq_err);
299 orion_ge00_shared_data.tx_csum_limit = tx_csum_limit; 315 orion_ge00_shared_data.tx_csum_limit = tx_csum_limit;
300 ge_complete(&orion_ge00_shared_data, 316 ge_complete(&orion_ge00_shared_data,
301 orion_ge00_resources, irq, &orion_ge00_shared, 317 orion_ge00_resources, irq, &orion_ge00_shared,
318 &orion_ge_mvmdio,
302 eth_data, &orion_ge00); 319 eth_data, &orion_ge00);
303} 320}
304 321
305/***************************************************************************** 322/*****************************************************************************
306 * GE01 323 * GE01
307 ****************************************************************************/ 324 ****************************************************************************/
308struct mv643xx_eth_shared_platform_data orion_ge01_shared_data = { 325struct mv643xx_eth_shared_platform_data orion_ge01_shared_data;
309 .shared_smi = &orion_ge00_shared,
310};
311 326
312static struct resource orion_ge01_shared_resources[] = { 327static struct resource orion_ge01_shared_resources[] = {
313 { 328 {
314 .name = "ge01 base", 329 .name = "ge01 base",
315 }, { 330 }
316 .name = "ge01 err irq",
317 },
318}; 331};
319 332
320static struct platform_device orion_ge01_shared = { 333static struct platform_device orion_ge01_shared = {
@@ -349,26 +362,23 @@ void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
349 unsigned int tx_csum_limit) 362 unsigned int tx_csum_limit)
350{ 363{
351 fill_resources(&orion_ge01_shared, orion_ge01_shared_resources, 364 fill_resources(&orion_ge01_shared, orion_ge01_shared_resources,
352 mapbase + 0x2000, SZ_16K - 1, irq_err); 365 mapbase + 0x2000, SZ_16K - 1, NO_IRQ);
353 orion_ge01_shared_data.tx_csum_limit = tx_csum_limit; 366 orion_ge01_shared_data.tx_csum_limit = tx_csum_limit;
354 ge_complete(&orion_ge01_shared_data, 367 ge_complete(&orion_ge01_shared_data,
355 orion_ge01_resources, irq, &orion_ge01_shared, 368 orion_ge01_resources, irq, &orion_ge01_shared,
369 NULL,
356 eth_data, &orion_ge01); 370 eth_data, &orion_ge01);
357} 371}
358 372
359/***************************************************************************** 373/*****************************************************************************
360 * GE10 374 * GE10
361 ****************************************************************************/ 375 ****************************************************************************/
362struct mv643xx_eth_shared_platform_data orion_ge10_shared_data = { 376struct mv643xx_eth_shared_platform_data orion_ge10_shared_data;
363 .shared_smi = &orion_ge00_shared,
364};
365 377
366static struct resource orion_ge10_shared_resources[] = { 378static struct resource orion_ge10_shared_resources[] = {
367 { 379 {
368 .name = "ge10 base", 380 .name = "ge10 base",
369 }, { 381 }
370 .name = "ge10 err irq",
371 },
372}; 382};
373 383
374static struct platform_device orion_ge10_shared = { 384static struct platform_device orion_ge10_shared = {
@@ -402,24 +412,21 @@ void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
402 unsigned long irq_err) 412 unsigned long irq_err)
403{ 413{
404 fill_resources(&orion_ge10_shared, orion_ge10_shared_resources, 414 fill_resources(&orion_ge10_shared, orion_ge10_shared_resources,
405 mapbase + 0x2000, SZ_16K - 1, irq_err); 415 mapbase + 0x2000, SZ_16K - 1, NO_IRQ);
406 ge_complete(&orion_ge10_shared_data, 416 ge_complete(&orion_ge10_shared_data,
407 orion_ge10_resources, irq, &orion_ge10_shared, 417 orion_ge10_resources, irq, &orion_ge10_shared,
418 NULL,
408 eth_data, &orion_ge10); 419 eth_data, &orion_ge10);
409} 420}
410 421
411/***************************************************************************** 422/*****************************************************************************
412 * GE11 423 * GE11
413 ****************************************************************************/ 424 ****************************************************************************/
414struct mv643xx_eth_shared_platform_data orion_ge11_shared_data = { 425struct mv643xx_eth_shared_platform_data orion_ge11_shared_data;
415 .shared_smi = &orion_ge00_shared,
416};
417 426
418static struct resource orion_ge11_shared_resources[] = { 427static struct resource orion_ge11_shared_resources[] = {
419 { 428 {
420 .name = "ge11 base", 429 .name = "ge11 base",
421 }, {
422 .name = "ge11 err irq",
423 }, 430 },
424}; 431};
425 432
@@ -454,9 +461,10 @@ void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
454 unsigned long irq_err) 461 unsigned long irq_err)
455{ 462{
456 fill_resources(&orion_ge11_shared, orion_ge11_shared_resources, 463 fill_resources(&orion_ge11_shared, orion_ge11_shared_resources,
457 mapbase + 0x2000, SZ_16K - 1, irq_err); 464 mapbase + 0x2000, SZ_16K - 1, NO_IRQ);
458 ge_complete(&orion_ge11_shared_data, 465 ge_complete(&orion_ge11_shared_data,
459 orion_ge11_resources, irq, &orion_ge11_shared, 466 orion_ge11_resources, irq, &orion_ge11_shared,
467 NULL,
460 eth_data, &orion_ge11); 468 eth_data, &orion_ge11);
461} 469}
462 470
diff --git a/arch/avr32/include/uapi/asm/socket.h b/arch/avr32/include/uapi/asm/socket.h
index 51c6401582ea..37401f535126 100644
--- a/arch/avr32/include/uapi/asm/socket.h
+++ b/arch/avr32/include/uapi/asm/socket.h
@@ -72,4 +72,6 @@
72 72
73#define SO_LOCK_FILTER 44 73#define SO_LOCK_FILTER 44
74 74
75#define SO_SELECT_ERR_QUEUE 45
76
75#endif /* __ASM_AVR32_SOCKET_H */ 77#endif /* __ASM_AVR32_SOCKET_H */
diff --git a/arch/cris/include/uapi/asm/socket.h b/arch/cris/include/uapi/asm/socket.h
index 50692b738c75..ba409c9947bc 100644
--- a/arch/cris/include/uapi/asm/socket.h
+++ b/arch/cris/include/uapi/asm/socket.h
@@ -74,6 +74,8 @@
74 74
75#define SO_LOCK_FILTER 44 75#define SO_LOCK_FILTER 44
76 76
77#define SO_SELECT_ERR_QUEUE 45
78
77#endif /* _ASM_SOCKET_H */ 79#endif /* _ASM_SOCKET_H */
78 80
79 81
diff --git a/arch/frv/include/uapi/asm/socket.h b/arch/frv/include/uapi/asm/socket.h
index 595391f0f98c..31dbb5d8e13d 100644
--- a/arch/frv/include/uapi/asm/socket.h
+++ b/arch/frv/include/uapi/asm/socket.h
@@ -72,5 +72,7 @@
72 72
73#define SO_LOCK_FILTER 44 73#define SO_LOCK_FILTER 44
74 74
75#define SO_SELECT_ERR_QUEUE 45
76
75#endif /* _ASM_SOCKET_H */ 77#endif /* _ASM_SOCKET_H */
76 78
diff --git a/arch/h8300/include/uapi/asm/socket.h b/arch/h8300/include/uapi/asm/socket.h
index 43e32621da7d..5d1c6d0870e6 100644
--- a/arch/h8300/include/uapi/asm/socket.h
+++ b/arch/h8300/include/uapi/asm/socket.h
@@ -72,4 +72,6 @@
72 72
73#define SO_LOCK_FILTER 44 73#define SO_LOCK_FILTER 44
74 74
75#define SO_SELECT_ERR_QUEUE 45
76
75#endif /* _ASM_SOCKET_H */ 77#endif /* _ASM_SOCKET_H */
diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h
index c567adc8bea5..6b4329f18b29 100644
--- a/arch/ia64/include/uapi/asm/socket.h
+++ b/arch/ia64/include/uapi/asm/socket.h
@@ -81,4 +81,6 @@
81 81
82#define SO_LOCK_FILTER 44 82#define SO_LOCK_FILTER 44
83 83
84#define SO_SELECT_ERR_QUEUE 45
85
84#endif /* _ASM_IA64_SOCKET_H */ 86#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h
index 519afa2755db..2a3b59e0e171 100644
--- a/arch/m32r/include/uapi/asm/socket.h
+++ b/arch/m32r/include/uapi/asm/socket.h
@@ -72,4 +72,6 @@
72 72
73#define SO_LOCK_FILTER 44 73#define SO_LOCK_FILTER 44
74 74
75#define SO_SELECT_ERR_QUEUE 45
76
75#endif /* _ASM_M32R_SOCKET_H */ 77#endif /* _ASM_M32R_SOCKET_H */
diff --git a/arch/mips/include/uapi/asm/socket.h b/arch/mips/include/uapi/asm/socket.h
index 47132f44c955..3b211507be7f 100644
--- a/arch/mips/include/uapi/asm/socket.h
+++ b/arch/mips/include/uapi/asm/socket.h
@@ -90,4 +90,6 @@
90 90
91#define SO_LOCK_FILTER 44 91#define SO_LOCK_FILTER 44
92 92
93#define SO_SELECT_ERR_QUEUE 45
94
93#endif /* _UAPI_ASM_SOCKET_H */ 95#endif /* _UAPI_ASM_SOCKET_H */
diff --git a/arch/mn10300/include/uapi/asm/socket.h b/arch/mn10300/include/uapi/asm/socket.h
index 5c7c7c988544..b4ce844c9391 100644
--- a/arch/mn10300/include/uapi/asm/socket.h
+++ b/arch/mn10300/include/uapi/asm/socket.h
@@ -72,4 +72,6 @@
72 72
73#define SO_LOCK_FILTER 44 73#define SO_LOCK_FILTER 44
74 74
75#define SO_SELECT_ERR_QUEUE 45
76
75#endif /* _ASM_SOCKET_H */ 77#endif /* _ASM_SOCKET_H */
diff --git a/arch/parisc/include/uapi/asm/socket.h b/arch/parisc/include/uapi/asm/socket.h
index 526e4b9aece0..70c512a386f7 100644
--- a/arch/parisc/include/uapi/asm/socket.h
+++ b/arch/parisc/include/uapi/asm/socket.h
@@ -71,6 +71,8 @@
71 71
72#define SO_LOCK_FILTER 0x4025 72#define SO_LOCK_FILTER 0x4025
73 73
74#define SO_SELECT_ERR_QUEUE 0x4026
75
74/* O_NONBLOCK clashes with the bits used for socket types. Therefore we 76/* O_NONBLOCK clashes with the bits used for socket types. Therefore we
75 * have to define SOCK_NONBLOCK to a different value here. 77 * have to define SOCK_NONBLOCK to a different value here.
76 */ 78 */
diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h
index a26dcaece509..a36daf3c6f9a 100644
--- a/arch/powerpc/include/uapi/asm/socket.h
+++ b/arch/powerpc/include/uapi/asm/socket.h
@@ -79,4 +79,6 @@
79 79
80#define SO_LOCK_FILTER 44 80#define SO_LOCK_FILTER 44
81 81
82#define SO_SELECT_ERR_QUEUE 45
83
82#endif /* _ASM_POWERPC_SOCKET_H */ 84#endif /* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index e834f1ec23c8..c427ae36374a 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -671,16 +671,12 @@ void bpf_jit_compile(struct sk_filter *fp)
671 } 671 }
672 672
673 if (bpf_jit_enable > 1) 673 if (bpf_jit_enable > 1)
674 pr_info("flen=%d proglen=%u pass=%d image=%p\n", 674 /* Note that we output the base address of the code_base
675 flen, proglen, pass, image); 675 * rather than image, since opcodes are in code_base.
676 */
677 bpf_jit_dump(flen, proglen, pass, code_base);
676 678
677 if (image) { 679 if (image) {
678 if (bpf_jit_enable > 1)
679 print_hex_dump(KERN_ERR, "JIT code: ",
680 DUMP_PREFIX_ADDRESS,
681 16, 1, code_base,
682 proglen, false);
683
684 bpf_flush_icache(code_base, code_base + (proglen/4)); 680 bpf_flush_icache(code_base, code_base + (proglen/4));
685 /* Function descriptor nastiness: Address + TOC */ 681 /* Function descriptor nastiness: Address + TOC */
686 ((u64 *)image)[0] = (u64)code_base; 682 ((u64 *)image)[0] = (u64)code_base;
diff --git a/arch/powerpc/platforms/chrp/pegasos_eth.c b/arch/powerpc/platforms/chrp/pegasos_eth.c
index 039fc8e82199..2b4dc6abde6c 100644
--- a/arch/powerpc/platforms/chrp/pegasos_eth.c
+++ b/arch/powerpc/platforms/chrp/pegasos_eth.c
@@ -47,6 +47,25 @@ static struct platform_device mv643xx_eth_shared_device = {
47 .resource = mv643xx_eth_shared_resources, 47 .resource = mv643xx_eth_shared_resources,
48}; 48};
49 49
50/*
51 * The orion mdio driver only covers shared + 0x4 up to shared + 0x84 - 1
52 */
53static struct resource mv643xx_eth_mvmdio_resources[] = {
54 [0] = {
55 .name = "ethernet mdio base",
56 .start = 0xf1000000 + MV643XX_ETH_SHARED_REGS + 0x4,
57 .end = 0xf1000000 + MV643XX_ETH_SHARED_REGS + 0x83,
58 .flags = IORESOURCE_MEM,
59 },
60};
61
62static struct platform_device mv643xx_eth_mvmdio_device = {
63 .name = "orion-mdio",
64 .id = -1,
65 .num_resources = ARRAY_SIZE(mv643xx_eth_mvmdio_resources),
66 .resource = mv643xx_eth_shared_resources,
67};
68
50static struct resource mv643xx_eth_port1_resources[] = { 69static struct resource mv643xx_eth_port1_resources[] = {
51 [0] = { 70 [0] = {
52 .name = "eth port1 irq", 71 .name = "eth port1 irq",
@@ -82,6 +101,7 @@ static struct platform_device eth_port1_device = {
82 101
83static struct platform_device *mv643xx_eth_pd_devs[] __initdata = { 102static struct platform_device *mv643xx_eth_pd_devs[] __initdata = {
84 &mv643xx_eth_shared_device, 103 &mv643xx_eth_shared_device,
104 &mv643xx_eth_mvmdio_device,
85 &eth_port1_device, 105 &eth_port1_device,
86}; 106};
87 107
diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c
index 0f6af41ebb44..4a25c26f0bf4 100644
--- a/arch/powerpc/sysdev/mv64x60_dev.c
+++ b/arch/powerpc/sysdev/mv64x60_dev.c
@@ -214,15 +214,27 @@ static struct platform_device * __init mv64x60_eth_register_shared_pdev(
214 struct device_node *np, int id) 214 struct device_node *np, int id)
215{ 215{
216 struct platform_device *pdev; 216 struct platform_device *pdev;
217 struct resource r[1]; 217 struct resource r[2];
218 int err; 218 int err;
219 219
220 err = of_address_to_resource(np, 0, &r[0]); 220 err = of_address_to_resource(np, 0, &r[0]);
221 if (err) 221 if (err)
222 return ERR_PTR(err); 222 return ERR_PTR(err);
223 223
224 /* register an orion mdio bus driver */
225 r[1].start = r[0].start + 0x4;
226 r[1].end = r[0].start + 0x84 - 1;
227 r[1].flags = IORESOURCE_MEM;
228
229 if (id == 0) {
230 pdev = platform_device_register_simple("orion-mdio", -1, &r[1], 1);
231 if (!pdev)
232 return pdev;
233 }
234
224 pdev = platform_device_register_simple(MV643XX_ETH_SHARED_NAME, id, 235 pdev = platform_device_register_simple(MV643XX_ETH_SHARED_NAME, id,
225 r, 1); 236 &r[0], 1);
237
226 return pdev; 238 return pdev;
227} 239}
228 240
diff --git a/arch/s390/include/uapi/asm/socket.h b/arch/s390/include/uapi/asm/socket.h
index f99eea7fff0f..2dacb306835c 100644
--- a/arch/s390/include/uapi/asm/socket.h
+++ b/arch/s390/include/uapi/asm/socket.h
@@ -78,4 +78,6 @@
78 78
79#define SO_LOCK_FILTER 44 79#define SO_LOCK_FILTER 44
80 80
81#define SO_SELECT_ERR_QUEUE 45
82
81#endif /* _ASM_SOCKET_H */ 83#endif /* _ASM_SOCKET_H */
diff --git a/arch/sparc/include/uapi/asm/socket.h b/arch/sparc/include/uapi/asm/socket.h
index cbbad74b2e06..89f49b68a21c 100644
--- a/arch/sparc/include/uapi/asm/socket.h
+++ b/arch/sparc/include/uapi/asm/socket.h
@@ -68,6 +68,8 @@
68 68
69#define SO_LOCK_FILTER 0x0028 69#define SO_LOCK_FILTER 0x0028
70 70
71#define SO_SELECT_ERR_QUEUE 0x0029
72
71/* Security levels - as per NRL IPv6 - don't actually do anything */ 73/* Security levels - as per NRL IPv6 - don't actually do anything */
72#define SO_SECURITY_AUTHENTICATION 0x5001 74#define SO_SECURITY_AUTHENTICATION 0x5001
73#define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002 75#define SO_SECURITY_ENCRYPTION_TRANSPORT 0x5002
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index 3109ca684a99..d36a85ebb5e0 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -795,13 +795,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
795 } 795 }
796 796
797 if (bpf_jit_enable > 1) 797 if (bpf_jit_enable > 1)
798 pr_err("flen=%d proglen=%u pass=%d image=%p\n", 798 bpf_jit_dump(flen, proglen, pass, image);
799 flen, proglen, pass, image);
800 799
801 if (image) { 800 if (image) {
802 if (bpf_jit_enable > 1)
803 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS,
804 16, 1, image, proglen, false);
805 bpf_flush_icache(image, image + proglen); 801 bpf_flush_icache(image, image + proglen);
806 fp->bpf_func = (void *)image; 802 fp->bpf_func = (void *)image;
807 } 803 }
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 3cbe45381bbb..f66b54086ce5 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -725,17 +725,12 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
725 } 725 }
726 oldproglen = proglen; 726 oldproglen = proglen;
727 } 727 }
728
728 if (bpf_jit_enable > 1) 729 if (bpf_jit_enable > 1)
729 pr_err("flen=%d proglen=%u pass=%d image=%p\n", 730 bpf_jit_dump(flen, proglen, pass, image);
730 flen, proglen, pass, image);
731 731
732 if (image) { 732 if (image) {
733 if (bpf_jit_enable > 1)
734 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS,
735 16, 1, image, proglen, false);
736
737 bpf_flush_icache(image, image + proglen); 733 bpf_flush_icache(image, image + proglen);
738
739 fp->bpf_func = (void *)image; 734 fp->bpf_func = (void *)image;
740 } 735 }
741out: 736out:
diff --git a/arch/xtensa/include/uapi/asm/socket.h b/arch/xtensa/include/uapi/asm/socket.h
index 35905cb6e419..a8f44f50e651 100644
--- a/arch/xtensa/include/uapi/asm/socket.h
+++ b/arch/xtensa/include/uapi/asm/socket.h
@@ -83,4 +83,6 @@
83 83
84#define SO_LOCK_FILTER 44 84#define SO_LOCK_FILTER 44
85 85
86#define SO_SELECT_ERR_QUEUE 45
87
86#endif /* _XTENSA_SOCKET_H */ 88#endif /* _XTENSA_SOCKET_H */
diff --git a/drivers/bcma/core.c b/drivers/bcma/core.c
index 03bbe104338f..17b26ce7e051 100644
--- a/drivers/bcma/core.c
+++ b/drivers/bcma/core.c
@@ -104,7 +104,13 @@ void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, bool on)
104 if (i) 104 if (i)
105 bcma_err(core->bus, "PLL enable timeout\n"); 105 bcma_err(core->bus, "PLL enable timeout\n");
106 } else { 106 } else {
107 bcma_warn(core->bus, "Disabling PLL not supported yet!\n"); 107 /*
108 * Mask the PLL but don't wait for it to be disabled. PLL may be
109 * shared between cores and will be still up if there is another
110 * core using it.
111 */
112 bcma_mask32(core, BCMA_CLKCTLST, ~req);
113 bcma_read32(core, BCMA_CLKCTLST);
108 } 114 }
109} 115}
110EXPORT_SYMBOL_GPL(bcma_core_pll_ctl); 116EXPORT_SYMBOL_GPL(bcma_core_pll_ctl);
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 9a6188add590..f72f52b4b1dd 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -120,6 +120,11 @@ static int bcma_register_cores(struct bcma_bus *bus)
120 continue; 120 continue;
121 } 121 }
122 122
123 /* Only first GMAC core on BCM4706 is connected and working */
124 if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
125 core->core_unit > 0)
126 continue;
127
123 core->dev.release = bcma_release_core_dev; 128 core->dev.release = bcma_release_core_dev;
124 core->dev.bus = &bcma_bus_type; 129 core->dev.bus = &bcma_bus_type;
125 dev_set_name(&core->dev, "bcma%d:%d", bus->num, dev_id); 130 dev_set_name(&core->dev, "bcma%d:%d", bus->num, dev_id);
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 1110478dd0fd..08ae128cce9b 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -232,6 +232,31 @@ void proc_comm_connector(struct task_struct *task)
232 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); 232 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
233} 233}
234 234
235void proc_coredump_connector(struct task_struct *task)
236{
237 struct cn_msg *msg;
238 struct proc_event *ev;
239 __u8 buffer[CN_PROC_MSG_SIZE];
240 struct timespec ts;
241
242 if (atomic_read(&proc_event_num_listeners) < 1)
243 return;
244
245 msg = (struct cn_msg *)buffer;
246 ev = (struct proc_event *)msg->data;
247 get_seq(&msg->seq, &ev->cpu);
248 ktime_get_ts(&ts); /* get high res monotonic timestamp */
249 put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns);
250 ev->what = PROC_EVENT_COREDUMP;
251 ev->event_data.coredump.process_pid = task->pid;
252 ev->event_data.coredump.process_tgid = task->tgid;
253
254 memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
255 msg->ack = 0; /* not used */
256 msg->len = sizeof(*ev);
257 cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL);
258}
259
235void proc_exit_connector(struct task_struct *task) 260void proc_exit_connector(struct task_struct *task)
236{ 261{
237 struct cn_msg *msg; 262 struct cn_msg *msg;
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index f1b7e244bfc1..6ecfa758942c 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -23,7 +23,7 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/list.h> 24#include <linux/list.h>
25#include <linux/skbuff.h> 25#include <linux/skbuff.h>
26#include <linux/netlink.h> 26#include <net/netlink.h>
27#include <linux/moduleparam.h> 27#include <linux/moduleparam.h>
28#include <linux/connector.h> 28#include <linux/connector.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
@@ -95,13 +95,13 @@ int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
95 if (!netlink_has_listeners(dev->nls, group)) 95 if (!netlink_has_listeners(dev->nls, group))
96 return -ESRCH; 96 return -ESRCH;
97 97
98 size = NLMSG_SPACE(sizeof(*msg) + msg->len); 98 size = sizeof(*msg) + msg->len;
99 99
100 skb = alloc_skb(size, gfp_mask); 100 skb = nlmsg_new(size, gfp_mask);
101 if (!skb) 101 if (!skb)
102 return -ENOMEM; 102 return -ENOMEM;
103 103
104 nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size - sizeof(*nlh), 0); 104 nlh = nlmsg_put(skb, 0, msg->seq, NLMSG_DONE, size, 0);
105 if (!nlh) { 105 if (!nlh) {
106 kfree_skb(skb); 106 kfree_skb(skb);
107 return -EMSGSIZE; 107 return -EMSGSIZE;
@@ -124,7 +124,7 @@ static int cn_call_callback(struct sk_buff *skb)
124{ 124{
125 struct cn_callback_entry *i, *cbq = NULL; 125 struct cn_callback_entry *i, *cbq = NULL;
126 struct cn_dev *dev = &cdev; 126 struct cn_dev *dev = &cdev;
127 struct cn_msg *msg = NLMSG_DATA(nlmsg_hdr(skb)); 127 struct cn_msg *msg = nlmsg_data(nlmsg_hdr(skb));
128 struct netlink_skb_parms *nsp = &NETLINK_CB(skb); 128 struct netlink_skb_parms *nsp = &NETLINK_CB(skb);
129 int err = -ENODEV; 129 int err = -ENODEV;
130 130
@@ -162,7 +162,7 @@ static void cn_rx_skb(struct sk_buff *__skb)
162 162
163 skb = skb_get(__skb); 163 skb = skb_get(__skb);
164 164
165 if (skb->len >= NLMSG_SPACE(0)) { 165 if (skb->len >= NLMSG_HDRLEN) {
166 nlh = nlmsg_hdr(skb); 166 nlh = nlmsg_hdr(skb);
167 167
168 if (nlh->nlmsg_len < sizeof(struct cn_msg) || 168 if (nlh->nlmsg_len < sizeof(struct cn_msg) ||
diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c
index 9b041858d10d..9e84d5bc9307 100644
--- a/drivers/dma/ioat/dca.c
+++ b/drivers/dma/ioat/dca.c
@@ -470,8 +470,10 @@ struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase)
470 } 470 }
471 471
472 if (!dca2_tag_map_valid(ioatdca->tag_map)) { 472 if (!dca2_tag_map_valid(ioatdca->tag_map)) {
473 dev_err(&pdev->dev, "APICID_TAG_MAP set incorrectly by BIOS, " 473 WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND,
474 "disabling DCA\n"); 474 "%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
475 dev_driver_string(&pdev->dev),
476 dev_name(&pdev->dev));
475 free_dca_provider(dca); 477 free_dca_provider(dca);
476 return NULL; 478 return NULL;
477 } 479 }
@@ -689,7 +691,10 @@ struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
689 } 691 }
690 692
691 if (dca3_tag_map_invalid(ioatdca->tag_map)) { 693 if (dca3_tag_map_invalid(ioatdca->tag_map)) {
692 dev_err(&pdev->dev, "APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n"); 694 WARN_TAINT_ONCE(1, TAINT_FIRMWARE_WORKAROUND,
695 "%s %s: APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n",
696 dev_driver_string(&pdev->dev),
697 dev_name(&pdev->dev));
693 free_dca_provider(dca); 698 free_dca_provider(dca);
694 return NULL; 699 return NULL;
695 } 700 }
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 7224533e8ca6..7a701a58bbf0 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -47,9 +47,9 @@ config FIREWIRE_NET
47 tristate "IP networking over 1394" 47 tristate "IP networking over 1394"
48 depends on FIREWIRE && INET 48 depends on FIREWIRE && INET
49 help 49 help
50 This enables IPv4 over IEEE 1394, providing IP connectivity with 50 This enables IPv4/IPv6 over IEEE 1394, providing IP connectivity
51 other implementations of RFC 2734 as found on several operating 51 with other implementations of RFC 2734/3146 as found on several
52 systems. Multicast support is currently limited. 52 operating systems. Multicast support is currently limited.
53 53
54 To compile this driver as a module, say M here: The module will be 54 To compile this driver as a module, say M here: The module will be
55 called firewire-net. 55 called firewire-net.
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 2b27bff2591a..4d565365e476 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * IPv4 over IEEE 1394, per RFC 2734 2 * IPv4 over IEEE 1394, per RFC 2734
3 * IPv6 over IEEE 1394, per RFC 3146
3 * 4 *
4 * Copyright (C) 2009 Jay Fenlason <fenlason@redhat.com> 5 * Copyright (C) 2009 Jay Fenlason <fenlason@redhat.com>
5 * 6 *
@@ -28,6 +29,7 @@
28 29
29#include <asm/unaligned.h> 30#include <asm/unaligned.h>
30#include <net/arp.h> 31#include <net/arp.h>
32#include <net/firewire.h>
31 33
32/* rx limits */ 34/* rx limits */
33#define FWNET_MAX_FRAGMENTS 30 /* arbitrary, > TX queue depth */ 35#define FWNET_MAX_FRAGMENTS 30 /* arbitrary, > TX queue depth */
@@ -45,6 +47,7 @@
45 47
46#define IANA_SPECIFIER_ID 0x00005eU 48#define IANA_SPECIFIER_ID 0x00005eU
47#define RFC2734_SW_VERSION 0x000001U 49#define RFC2734_SW_VERSION 0x000001U
50#define RFC3146_SW_VERSION 0x000002U
48 51
49#define IEEE1394_GASP_HDR_SIZE 8 52#define IEEE1394_GASP_HDR_SIZE 8
50 53
@@ -57,32 +60,10 @@
57#define RFC2374_HDR_LASTFRAG 2 /* last fragment */ 60#define RFC2374_HDR_LASTFRAG 2 /* last fragment */
58#define RFC2374_HDR_INTFRAG 3 /* interior fragment */ 61#define RFC2374_HDR_INTFRAG 3 /* interior fragment */
59 62
60#define RFC2734_HW_ADDR_LEN 16 63static bool fwnet_hwaddr_is_multicast(u8 *ha)
61 64{
62struct rfc2734_arp { 65 return !!(*ha & 1);
63 __be16 hw_type; /* 0x0018 */ 66}
64 __be16 proto_type; /* 0x0806 */
65 u8 hw_addr_len; /* 16 */
66 u8 ip_addr_len; /* 4 */
67 __be16 opcode; /* ARP Opcode */
68 /* Above is exactly the same format as struct arphdr */
69
70 __be64 s_uniq_id; /* Sender's 64bit EUI */
71 u8 max_rec; /* Sender's max packet size */
72 u8 sspd; /* Sender's max speed */
73 __be16 fifo_hi; /* hi 16bits of sender's FIFO addr */
74 __be32 fifo_lo; /* lo 32bits of sender's FIFO addr */
75 __be32 sip; /* Sender's IP Address */
76 __be32 tip; /* IP Address of requested hw addr */
77} __packed;
78
79/* This header format is specific to this driver implementation. */
80#define FWNET_ALEN 8
81#define FWNET_HLEN 10
82struct fwnet_header {
83 u8 h_dest[FWNET_ALEN]; /* destination address */
84 __be16 h_proto; /* packet type ID field */
85} __packed;
86 67
87/* IPv4 and IPv6 encapsulation header */ 68/* IPv4 and IPv6 encapsulation header */
88struct rfc2734_header { 69struct rfc2734_header {
@@ -191,8 +172,6 @@ struct fwnet_peer {
191 struct list_head peer_link; 172 struct list_head peer_link;
192 struct fwnet_device *dev; 173 struct fwnet_device *dev;
193 u64 guid; 174 u64 guid;
194 u64 fifo;
195 __be32 ip;
196 175
197 /* guarded by dev->lock */ 176 /* guarded by dev->lock */
198 struct list_head pd_list; /* received partial datagrams */ 177 struct list_head pd_list; /* received partial datagrams */
@@ -222,6 +201,15 @@ struct fwnet_packet_task {
222}; 201};
223 202
224/* 203/*
204 * Get fifo address embedded in hwaddr
205 */
206static __u64 fwnet_hwaddr_fifo(union fwnet_hwaddr *ha)
207{
208 return (u64)get_unaligned_be16(&ha->uc.fifo_hi) << 32
209 | get_unaligned_be32(&ha->uc.fifo_lo);
210}
211
212/*
225 * saddr == NULL means use device source address. 213 * saddr == NULL means use device source address.
226 * daddr == NULL means leave destination address (eg unresolved arp). 214 * daddr == NULL means leave destination address (eg unresolved arp).
227 */ 215 */
@@ -513,10 +501,20 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
513 bool is_broadcast, u16 ether_type) 501 bool is_broadcast, u16 ether_type)
514{ 502{
515 struct fwnet_device *dev; 503 struct fwnet_device *dev;
516 static const __be64 broadcast_hw = cpu_to_be64(~0ULL);
517 int status; 504 int status;
518 __be64 guid; 505 __be64 guid;
519 506
507 switch (ether_type) {
508 case ETH_P_ARP:
509 case ETH_P_IP:
510#if IS_ENABLED(CONFIG_IPV6)
511 case ETH_P_IPV6:
512#endif
513 break;
514 default:
515 goto err;
516 }
517
520 dev = netdev_priv(net); 518 dev = netdev_priv(net);
521 /* Write metadata, and then pass to the receive level */ 519 /* Write metadata, and then pass to the receive level */
522 skb->dev = net; 520 skb->dev = net;
@@ -524,92 +522,11 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
524 522
525 /* 523 /*
526 * Parse the encapsulation header. This actually does the job of 524 * Parse the encapsulation header. This actually does the job of
527 * converting to an ethernet frame header, as well as arp 525 * converting to an ethernet-like pseudo frame header.
528 * conversion if needed. ARP conversion is easier in this
529 * direction, since we are using ethernet as our backend.
530 */ 526 */
531 /*
532 * If this is an ARP packet, convert it. First, we want to make
533 * use of some of the fields, since they tell us a little bit
534 * about the sending machine.
535 */
536 if (ether_type == ETH_P_ARP) {
537 struct rfc2734_arp *arp1394;
538 struct arphdr *arp;
539 unsigned char *arp_ptr;
540 u64 fifo_addr;
541 u64 peer_guid;
542 unsigned sspd;
543 u16 max_payload;
544 struct fwnet_peer *peer;
545 unsigned long flags;
546
547 arp1394 = (struct rfc2734_arp *)skb->data;
548 arp = (struct arphdr *)skb->data;
549 arp_ptr = (unsigned char *)(arp + 1);
550 peer_guid = get_unaligned_be64(&arp1394->s_uniq_id);
551 fifo_addr = (u64)get_unaligned_be16(&arp1394->fifo_hi) << 32
552 | get_unaligned_be32(&arp1394->fifo_lo);
553
554 sspd = arp1394->sspd;
555 /* Sanity check. OS X 10.3 PPC reportedly sends 131. */
556 if (sspd > SCODE_3200) {
557 dev_notice(&net->dev, "sspd %x out of range\n", sspd);
558 sspd = SCODE_3200;
559 }
560 max_payload = fwnet_max_payload(arp1394->max_rec, sspd);
561
562 spin_lock_irqsave(&dev->lock, flags);
563 peer = fwnet_peer_find_by_guid(dev, peer_guid);
564 if (peer) {
565 peer->fifo = fifo_addr;
566
567 if (peer->speed > sspd)
568 peer->speed = sspd;
569 if (peer->max_payload > max_payload)
570 peer->max_payload = max_payload;
571
572 peer->ip = arp1394->sip;
573 }
574 spin_unlock_irqrestore(&dev->lock, flags);
575
576 if (!peer) {
577 dev_notice(&net->dev,
578 "no peer for ARP packet from %016llx\n",
579 (unsigned long long)peer_guid);
580 goto no_peer;
581 }
582
583 /*
584 * Now that we're done with the 1394 specific stuff, we'll
585 * need to alter some of the data. Believe it or not, all
586 * that needs to be done is sender_IP_address needs to be
587 * moved, the destination hardware address get stuffed
588 * in and the hardware address length set to 8.
589 *
590 * IMPORTANT: The code below overwrites 1394 specific data
591 * needed above so keep the munging of the data for the
592 * higher level IP stack last.
593 */
594
595 arp->ar_hln = 8;
596 /* skip over sender unique id */
597 arp_ptr += arp->ar_hln;
598 /* move sender IP addr */
599 put_unaligned(arp1394->sip, (u32 *)arp_ptr);
600 /* skip over sender IP addr */
601 arp_ptr += arp->ar_pln;
602
603 if (arp->ar_op == htons(ARPOP_REQUEST))
604 memset(arp_ptr, 0, sizeof(u64));
605 else
606 memcpy(arp_ptr, net->dev_addr, sizeof(u64));
607 }
608
609 /* Now add the ethernet header. */
610 guid = cpu_to_be64(dev->card->guid); 527 guid = cpu_to_be64(dev->card->guid);
611 if (dev_hard_header(skb, net, ether_type, 528 if (dev_hard_header(skb, net, ether_type,
612 is_broadcast ? &broadcast_hw : &guid, 529 is_broadcast ? net->broadcast : net->dev_addr,
613 NULL, skb->len) >= 0) { 530 NULL, skb->len) >= 0) {
614 struct fwnet_header *eth; 531 struct fwnet_header *eth;
615 u16 *rawp; 532 u16 *rawp;
@@ -618,7 +535,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
618 skb_reset_mac_header(skb); 535 skb_reset_mac_header(skb);
619 skb_pull(skb, sizeof(*eth)); 536 skb_pull(skb, sizeof(*eth));
620 eth = (struct fwnet_header *)skb_mac_header(skb); 537 eth = (struct fwnet_header *)skb_mac_header(skb);
621 if (*eth->h_dest & 1) { 538 if (fwnet_hwaddr_is_multicast(eth->h_dest)) {
622 if (memcmp(eth->h_dest, net->broadcast, 539 if (memcmp(eth->h_dest, net->broadcast,
623 net->addr_len) == 0) 540 net->addr_len) == 0)
624 skb->pkt_type = PACKET_BROADCAST; 541 skb->pkt_type = PACKET_BROADCAST;
@@ -630,7 +547,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
630 if (memcmp(eth->h_dest, net->dev_addr, net->addr_len)) 547 if (memcmp(eth->h_dest, net->dev_addr, net->addr_len))
631 skb->pkt_type = PACKET_OTHERHOST; 548 skb->pkt_type = PACKET_OTHERHOST;
632 } 549 }
633 if (ntohs(eth->h_proto) >= 1536) { 550 if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) {
634 protocol = eth->h_proto; 551 protocol = eth->h_proto;
635 } else { 552 } else {
636 rawp = (u16 *)skb->data; 553 rawp = (u16 *)skb->data;
@@ -652,7 +569,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
652 569
653 return 0; 570 return 0;
654 571
655 no_peer: 572 err:
656 net->stats.rx_errors++; 573 net->stats.rx_errors++;
657 net->stats.rx_dropped++; 574 net->stats.rx_dropped++;
658 575
@@ -856,7 +773,12 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
856 ver = be32_to_cpu(buf_ptr[1]) & 0xffffff; 773 ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
857 source_node_id = be32_to_cpu(buf_ptr[0]) >> 16; 774 source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
858 775
859 if (specifier_id == IANA_SPECIFIER_ID && ver == RFC2734_SW_VERSION) { 776 if (specifier_id == IANA_SPECIFIER_ID &&
777 (ver == RFC2734_SW_VERSION
778#if IS_ENABLED(CONFIG_IPV6)
779 || ver == RFC3146_SW_VERSION
780#endif
781 )) {
860 buf_ptr += 2; 782 buf_ptr += 2;
861 length -= IEEE1394_GASP_HDR_SIZE; 783 length -= IEEE1394_GASP_HDR_SIZE;
862 fwnet_incoming_packet(dev, buf_ptr, length, source_node_id, 784 fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
@@ -1059,16 +981,27 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
1059 u8 *p; 981 u8 *p;
1060 int generation; 982 int generation;
1061 int node_id; 983 int node_id;
984 unsigned int sw_version;
1062 985
1063 /* ptask->generation may not have been set yet */ 986 /* ptask->generation may not have been set yet */
1064 generation = dev->card->generation; 987 generation = dev->card->generation;
1065 smp_rmb(); 988 smp_rmb();
1066 node_id = dev->card->node_id; 989 node_id = dev->card->node_id;
1067 990
991 switch (ptask->skb->protocol) {
992 default:
993 sw_version = RFC2734_SW_VERSION;
994 break;
995#if IS_ENABLED(CONFIG_IPV6)
996 case htons(ETH_P_IPV6):
997 sw_version = RFC3146_SW_VERSION;
998#endif
999 }
1000
1068 p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE); 1001 p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE);
1069 put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p); 1002 put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p);
1070 put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24 1003 put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24
1071 | RFC2734_SW_VERSION, &p[4]); 1004 | sw_version, &p[4]);
1072 1005
1073 /* We should not transmit if broadcast_channel.valid == 0. */ 1006 /* We should not transmit if broadcast_channel.valid == 0. */
1074 fw_send_request(dev->card, &ptask->transaction, 1007 fw_send_request(dev->card, &ptask->transaction,
@@ -1116,6 +1049,62 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
1116 return 0; 1049 return 0;
1117} 1050}
1118 1051
1052static void fwnet_fifo_stop(struct fwnet_device *dev)
1053{
1054 if (dev->local_fifo == FWNET_NO_FIFO_ADDR)
1055 return;
1056
1057 fw_core_remove_address_handler(&dev->handler);
1058 dev->local_fifo = FWNET_NO_FIFO_ADDR;
1059}
1060
1061static int fwnet_fifo_start(struct fwnet_device *dev)
1062{
1063 int retval;
1064
1065 if (dev->local_fifo != FWNET_NO_FIFO_ADDR)
1066 return 0;
1067
1068 dev->handler.length = 4096;
1069 dev->handler.address_callback = fwnet_receive_packet;
1070 dev->handler.callback_data = dev;
1071
1072 retval = fw_core_add_address_handler(&dev->handler,
1073 &fw_high_memory_region);
1074 if (retval < 0)
1075 return retval;
1076
1077 dev->local_fifo = dev->handler.offset;
1078
1079 return 0;
1080}
1081
1082static void __fwnet_broadcast_stop(struct fwnet_device *dev)
1083{
1084 unsigned u;
1085
1086 if (dev->broadcast_state != FWNET_BROADCAST_ERROR) {
1087 for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++)
1088 kunmap(dev->broadcast_rcv_buffer.pages[u]);
1089 fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card);
1090 }
1091 if (dev->broadcast_rcv_context) {
1092 fw_iso_context_destroy(dev->broadcast_rcv_context);
1093 dev->broadcast_rcv_context = NULL;
1094 }
1095 kfree(dev->broadcast_rcv_buffer_ptrs);
1096 dev->broadcast_rcv_buffer_ptrs = NULL;
1097 dev->broadcast_state = FWNET_BROADCAST_ERROR;
1098}
1099
1100static void fwnet_broadcast_stop(struct fwnet_device *dev)
1101{
1102 if (dev->broadcast_state == FWNET_BROADCAST_ERROR)
1103 return;
1104 fw_iso_context_stop(dev->broadcast_rcv_context);
1105 __fwnet_broadcast_stop(dev);
1106}
1107
1119static int fwnet_broadcast_start(struct fwnet_device *dev) 1108static int fwnet_broadcast_start(struct fwnet_device *dev)
1120{ 1109{
1121 struct fw_iso_context *context; 1110 struct fw_iso_context *context;
@@ -1124,60 +1113,47 @@ static int fwnet_broadcast_start(struct fwnet_device *dev)
1124 unsigned max_receive; 1113 unsigned max_receive;
1125 struct fw_iso_packet packet; 1114 struct fw_iso_packet packet;
1126 unsigned long offset; 1115 unsigned long offset;
1116 void **ptrptr;
1127 unsigned u; 1117 unsigned u;
1128 1118
1129 if (dev->local_fifo == FWNET_NO_FIFO_ADDR) { 1119 if (dev->broadcast_state != FWNET_BROADCAST_ERROR)
1130 dev->handler.length = 4096; 1120 return 0;
1131 dev->handler.address_callback = fwnet_receive_packet;
1132 dev->handler.callback_data = dev;
1133
1134 retval = fw_core_add_address_handler(&dev->handler,
1135 &fw_high_memory_region);
1136 if (retval < 0)
1137 goto failed_initial;
1138
1139 dev->local_fifo = dev->handler.offset;
1140 }
1141 1121
1142 max_receive = 1U << (dev->card->max_receive + 1); 1122 max_receive = 1U << (dev->card->max_receive + 1);
1143 num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive; 1123 num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive;
1144 1124
1145 if (!dev->broadcast_rcv_context) { 1125 ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL);
1146 void **ptrptr; 1126 if (!ptrptr) {
1147 1127 retval = -ENOMEM;
1148 context = fw_iso_context_create(dev->card, 1128 goto failed;
1149 FW_ISO_CONTEXT_RECEIVE, IEEE1394_BROADCAST_CHANNEL, 1129 }
1150 dev->card->link_speed, 8, fwnet_receive_broadcast, dev); 1130 dev->broadcast_rcv_buffer_ptrs = ptrptr;
1151 if (IS_ERR(context)) { 1131
1152 retval = PTR_ERR(context); 1132 context = fw_iso_context_create(dev->card, FW_ISO_CONTEXT_RECEIVE,
1153 goto failed_context_create; 1133 IEEE1394_BROADCAST_CHANNEL,
1154 } 1134 dev->card->link_speed, 8,
1135 fwnet_receive_broadcast, dev);
1136 if (IS_ERR(context)) {
1137 retval = PTR_ERR(context);
1138 goto failed;
1139 }
1155 1140
1156 retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer, 1141 retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer, dev->card,
1157 dev->card, FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE); 1142 FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE);
1158 if (retval < 0) 1143 if (retval < 0)
1159 goto failed_buffer_init; 1144 goto failed;
1160 1145
1161 ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL); 1146 dev->broadcast_state = FWNET_BROADCAST_STOPPED;
1162 if (!ptrptr) {
1163 retval = -ENOMEM;
1164 goto failed_ptrs_alloc;
1165 }
1166 1147
1167 dev->broadcast_rcv_buffer_ptrs = ptrptr; 1148 for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) {
1168 for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) { 1149 void *ptr;
1169 void *ptr; 1150 unsigned v;
1170 unsigned v;
1171 1151
1172 ptr = kmap(dev->broadcast_rcv_buffer.pages[u]); 1152 ptr = kmap(dev->broadcast_rcv_buffer.pages[u]);
1173 for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++) 1153 for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++)
1174 *ptrptr++ = (void *) 1154 *ptrptr++ = (void *) ((char *)ptr + v * max_receive);
1175 ((char *)ptr + v * max_receive);
1176 }
1177 dev->broadcast_rcv_context = context;
1178 } else {
1179 context = dev->broadcast_rcv_context;
1180 } 1155 }
1156 dev->broadcast_rcv_context = context;
1181 1157
1182 packet.payload_length = max_receive; 1158 packet.payload_length = max_receive;
1183 packet.interrupt = 1; 1159 packet.interrupt = 1;
@@ -1191,7 +1167,7 @@ static int fwnet_broadcast_start(struct fwnet_device *dev)
1191 retval = fw_iso_context_queue(context, &packet, 1167 retval = fw_iso_context_queue(context, &packet,
1192 &dev->broadcast_rcv_buffer, offset); 1168 &dev->broadcast_rcv_buffer, offset);
1193 if (retval < 0) 1169 if (retval < 0)
1194 goto failed_rcv_queue; 1170 goto failed;
1195 1171
1196 offset += max_receive; 1172 offset += max_receive;
1197 } 1173 }
@@ -1201,7 +1177,7 @@ static int fwnet_broadcast_start(struct fwnet_device *dev)
1201 retval = fw_iso_context_start(context, -1, 0, 1177 retval = fw_iso_context_start(context, -1, 0,
1202 FW_ISO_CONTEXT_MATCH_ALL_TAGS); /* ??? sync */ 1178 FW_ISO_CONTEXT_MATCH_ALL_TAGS); /* ??? sync */
1203 if (retval < 0) 1179 if (retval < 0)
1204 goto failed_rcv_queue; 1180 goto failed;
1205 1181
1206 /* FIXME: adjust it according to the min. speed of all known peers? */ 1182 /* FIXME: adjust it according to the min. speed of all known peers? */
1207 dev->broadcast_xmt_max_payload = IEEE1394_MAX_PAYLOAD_S100 1183 dev->broadcast_xmt_max_payload = IEEE1394_MAX_PAYLOAD_S100
@@ -1210,19 +1186,8 @@ static int fwnet_broadcast_start(struct fwnet_device *dev)
1210 1186
1211 return 0; 1187 return 0;
1212 1188
1213 failed_rcv_queue: 1189 failed:
1214 kfree(dev->broadcast_rcv_buffer_ptrs); 1190 __fwnet_broadcast_stop(dev);
1215 dev->broadcast_rcv_buffer_ptrs = NULL;
1216 failed_ptrs_alloc:
1217 fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card);
1218 failed_buffer_init:
1219 fw_iso_context_destroy(context);
1220 dev->broadcast_rcv_context = NULL;
1221 failed_context_create:
1222 fw_core_remove_address_handler(&dev->handler);
1223 failed_initial:
1224 dev->local_fifo = FWNET_NO_FIFO_ADDR;
1225
1226 return retval; 1191 return retval;
1227} 1192}
1228 1193
@@ -1240,11 +1205,10 @@ static int fwnet_open(struct net_device *net)
1240 struct fwnet_device *dev = netdev_priv(net); 1205 struct fwnet_device *dev = netdev_priv(net);
1241 int ret; 1206 int ret;
1242 1207
1243 if (dev->broadcast_state == FWNET_BROADCAST_ERROR) { 1208 ret = fwnet_broadcast_start(dev);
1244 ret = fwnet_broadcast_start(dev); 1209 if (ret)
1245 if (ret) 1210 return ret;
1246 return ret; 1211
1247 }
1248 netif_start_queue(net); 1212 netif_start_queue(net);
1249 1213
1250 spin_lock_irq(&dev->lock); 1214 spin_lock_irq(&dev->lock);
@@ -1257,9 +1221,10 @@ static int fwnet_open(struct net_device *net)
1257/* ifdown */ 1221/* ifdown */
1258static int fwnet_stop(struct net_device *net) 1222static int fwnet_stop(struct net_device *net)
1259{ 1223{
1260 netif_stop_queue(net); 1224 struct fwnet_device *dev = netdev_priv(net);
1261 1225
1262 /* Deallocate iso context for use by other applications? */ 1226 netif_stop_queue(net);
1227 fwnet_broadcast_stop(dev);
1263 1228
1264 return 0; 1229 return 0;
1265} 1230}
@@ -1299,19 +1264,27 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
1299 * We might need to rebuild the header on tx failure. 1264 * We might need to rebuild the header on tx failure.
1300 */ 1265 */
1301 memcpy(&hdr_buf, skb->data, sizeof(hdr_buf)); 1266 memcpy(&hdr_buf, skb->data, sizeof(hdr_buf));
1302 skb_pull(skb, sizeof(hdr_buf));
1303
1304 proto = hdr_buf.h_proto; 1267 proto = hdr_buf.h_proto;
1268
1269 switch (proto) {
1270 case htons(ETH_P_ARP):
1271 case htons(ETH_P_IP):
1272#if IS_ENABLED(CONFIG_IPV6)
1273 case htons(ETH_P_IPV6):
1274#endif
1275 break;
1276 default:
1277 goto fail;
1278 }
1279
1280 skb_pull(skb, sizeof(hdr_buf));
1305 dg_size = skb->len; 1281 dg_size = skb->len;
1306 1282
1307 /* 1283 /*
1308 * Set the transmission type for the packet. ARP packets and IP 1284 * Set the transmission type for the packet. ARP packets and IP
1309 * broadcast packets are sent via GASP. 1285 * broadcast packets are sent via GASP.
1310 */ 1286 */
1311 if (memcmp(hdr_buf.h_dest, net->broadcast, FWNET_ALEN) == 0 1287 if (fwnet_hwaddr_is_multicast(hdr_buf.h_dest)) {
1312 || proto == htons(ETH_P_ARP)
1313 || (proto == htons(ETH_P_IP)
1314 && IN_MULTICAST(ntohl(ip_hdr(skb)->daddr)))) {
1315 max_payload = dev->broadcast_xmt_max_payload; 1288 max_payload = dev->broadcast_xmt_max_payload;
1316 datagram_label_ptr = &dev->broadcast_xmt_datagramlabel; 1289 datagram_label_ptr = &dev->broadcast_xmt_datagramlabel;
1317 1290
@@ -1320,11 +1293,12 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
1320 ptask->dest_node = IEEE1394_ALL_NODES; 1293 ptask->dest_node = IEEE1394_ALL_NODES;
1321 ptask->speed = SCODE_100; 1294 ptask->speed = SCODE_100;
1322 } else { 1295 } else {
1323 __be64 guid = get_unaligned((__be64 *)hdr_buf.h_dest); 1296 union fwnet_hwaddr *ha = (union fwnet_hwaddr *)hdr_buf.h_dest;
1297 __be64 guid = get_unaligned(&ha->uc.uniq_id);
1324 u8 generation; 1298 u8 generation;
1325 1299
1326 peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid)); 1300 peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid));
1327 if (!peer || peer->fifo == FWNET_NO_FIFO_ADDR) 1301 if (!peer)
1328 goto fail; 1302 goto fail;
1329 1303
1330 generation = peer->generation; 1304 generation = peer->generation;
@@ -1332,32 +1306,12 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
1332 max_payload = peer->max_payload; 1306 max_payload = peer->max_payload;
1333 datagram_label_ptr = &peer->datagram_label; 1307 datagram_label_ptr = &peer->datagram_label;
1334 1308
1335 ptask->fifo_addr = peer->fifo; 1309 ptask->fifo_addr = fwnet_hwaddr_fifo(ha);
1336 ptask->generation = generation; 1310 ptask->generation = generation;
1337 ptask->dest_node = dest_node; 1311 ptask->dest_node = dest_node;
1338 ptask->speed = peer->speed; 1312 ptask->speed = peer->speed;
1339 } 1313 }
1340 1314
1341 /* If this is an ARP packet, convert it */
1342 if (proto == htons(ETH_P_ARP)) {
1343 struct arphdr *arp = (struct arphdr *)skb->data;
1344 unsigned char *arp_ptr = (unsigned char *)(arp + 1);
1345 struct rfc2734_arp *arp1394 = (struct rfc2734_arp *)skb->data;
1346 __be32 ipaddr;
1347
1348 ipaddr = get_unaligned((__be32 *)(arp_ptr + FWNET_ALEN));
1349
1350 arp1394->hw_addr_len = RFC2734_HW_ADDR_LEN;
1351 arp1394->max_rec = dev->card->max_receive;
1352 arp1394->sspd = dev->card->link_speed;
1353
1354 put_unaligned_be16(dev->local_fifo >> 32,
1355 &arp1394->fifo_hi);
1356 put_unaligned_be32(dev->local_fifo & 0xffffffff,
1357 &arp1394->fifo_lo);
1358 put_unaligned(ipaddr, &arp1394->sip);
1359 }
1360
1361 ptask->hdr.w0 = 0; 1315 ptask->hdr.w0 = 0;
1362 ptask->hdr.w1 = 0; 1316 ptask->hdr.w1 = 0;
1363 ptask->skb = skb; 1317 ptask->skb = skb;
@@ -1472,8 +1426,6 @@ static int fwnet_add_peer(struct fwnet_device *dev,
1472 1426
1473 peer->dev = dev; 1427 peer->dev = dev;
1474 peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; 1428 peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
1475 peer->fifo = FWNET_NO_FIFO_ADDR;
1476 peer->ip = 0;
1477 INIT_LIST_HEAD(&peer->pd_list); 1429 INIT_LIST_HEAD(&peer->pd_list);
1478 peer->pdg_size = 0; 1430 peer->pdg_size = 0;
1479 peer->datagram_label = 0; 1431 peer->datagram_label = 0;
@@ -1503,6 +1455,7 @@ static int fwnet_probe(struct device *_dev)
1503 struct fwnet_device *dev; 1455 struct fwnet_device *dev;
1504 unsigned max_mtu; 1456 unsigned max_mtu;
1505 int ret; 1457 int ret;
1458 union fwnet_hwaddr *ha;
1506 1459
1507 mutex_lock(&fwnet_device_mutex); 1460 mutex_lock(&fwnet_device_mutex);
1508 1461
@@ -1533,6 +1486,11 @@ static int fwnet_probe(struct device *_dev)
1533 dev->card = card; 1486 dev->card = card;
1534 dev->netdev = net; 1487 dev->netdev = net;
1535 1488
1489 ret = fwnet_fifo_start(dev);
1490 if (ret < 0)
1491 goto out;
1492 dev->local_fifo = dev->handler.offset;
1493
1536 /* 1494 /*
1537 * Use the RFC 2734 default 1500 octets or the maximum payload 1495 * Use the RFC 2734 default 1500 octets or the maximum payload
1538 * as initial MTU 1496 * as initial MTU
@@ -1542,24 +1500,31 @@ static int fwnet_probe(struct device *_dev)
1542 net->mtu = min(1500U, max_mtu); 1500 net->mtu = min(1500U, max_mtu);
1543 1501
1544 /* Set our hardware address while we're at it */ 1502 /* Set our hardware address while we're at it */
1545 put_unaligned_be64(card->guid, net->dev_addr); 1503 ha = (union fwnet_hwaddr *)net->dev_addr;
1546 put_unaligned_be64(~0ULL, net->broadcast); 1504 put_unaligned_be64(card->guid, &ha->uc.uniq_id);
1505 ha->uc.max_rec = dev->card->max_receive;
1506 ha->uc.sspd = dev->card->link_speed;
1507 put_unaligned_be16(dev->local_fifo >> 32, &ha->uc.fifo_hi);
1508 put_unaligned_be32(dev->local_fifo & 0xffffffff, &ha->uc.fifo_lo);
1509
1510 memset(net->broadcast, -1, net->addr_len);
1511
1547 ret = register_netdev(net); 1512 ret = register_netdev(net);
1548 if (ret) 1513 if (ret)
1549 goto out; 1514 goto out;
1550 1515
1551 list_add_tail(&dev->dev_link, &fwnet_device_list); 1516 list_add_tail(&dev->dev_link, &fwnet_device_list);
1552 dev_notice(&net->dev, "IPv4 over IEEE 1394 on card %s\n", 1517 dev_notice(&net->dev, "IP over IEEE 1394 on card %s\n",
1553 dev_name(card->device)); 1518 dev_name(card->device));
1554 have_dev: 1519 have_dev:
1555 ret = fwnet_add_peer(dev, unit, device); 1520 ret = fwnet_add_peer(dev, unit, device);
1556 if (ret && allocated_netdev) { 1521 if (ret && allocated_netdev) {
1557 unregister_netdev(net); 1522 unregister_netdev(net);
1558 list_del(&dev->dev_link); 1523 list_del(&dev->dev_link);
1559 }
1560 out: 1524 out:
1561 if (ret && allocated_netdev) 1525 fwnet_fifo_stop(dev);
1562 free_netdev(net); 1526 free_netdev(net);
1527 }
1563 1528
1564 mutex_unlock(&fwnet_device_mutex); 1529 mutex_unlock(&fwnet_device_mutex);
1565 1530
@@ -1592,22 +1557,14 @@ static int fwnet_remove(struct device *_dev)
1592 mutex_lock(&fwnet_device_mutex); 1557 mutex_lock(&fwnet_device_mutex);
1593 1558
1594 net = dev->netdev; 1559 net = dev->netdev;
1595 if (net && peer->ip)
1596 arp_invalidate(net, peer->ip);
1597 1560
1598 fwnet_remove_peer(peer, dev); 1561 fwnet_remove_peer(peer, dev);
1599 1562
1600 if (list_empty(&dev->peer_list)) { 1563 if (list_empty(&dev->peer_list)) {
1601 unregister_netdev(net); 1564 unregister_netdev(net);
1602 1565
1603 if (dev->local_fifo != FWNET_NO_FIFO_ADDR) 1566 fwnet_fifo_stop(dev);
1604 fw_core_remove_address_handler(&dev->handler); 1567
1605 if (dev->broadcast_rcv_context) {
1606 fw_iso_context_stop(dev->broadcast_rcv_context);
1607 fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer,
1608 dev->card);
1609 fw_iso_context_destroy(dev->broadcast_rcv_context);
1610 }
1611 for (i = 0; dev->queued_datagrams && i < 5; i++) 1568 for (i = 0; dev->queued_datagrams && i < 5; i++)
1612 ssleep(1); 1569 ssleep(1);
1613 WARN_ON(dev->queued_datagrams); 1570 WARN_ON(dev->queued_datagrams);
@@ -1646,6 +1603,14 @@ static const struct ieee1394_device_id fwnet_id_table[] = {
1646 .specifier_id = IANA_SPECIFIER_ID, 1603 .specifier_id = IANA_SPECIFIER_ID,
1647 .version = RFC2734_SW_VERSION, 1604 .version = RFC2734_SW_VERSION,
1648 }, 1605 },
1606#if IS_ENABLED(CONFIG_IPV6)
1607 {
1608 .match_flags = IEEE1394_MATCH_SPECIFIER_ID |
1609 IEEE1394_MATCH_VERSION,
1610 .specifier_id = IANA_SPECIFIER_ID,
1611 .version = RFC3146_SW_VERSION,
1612 },
1613#endif
1649 { } 1614 { }
1650}; 1615};
1651 1616
@@ -1683,6 +1648,30 @@ static struct fw_descriptor rfc2374_unit_directory = {
1683 .data = rfc2374_unit_directory_data 1648 .data = rfc2374_unit_directory_data
1684}; 1649};
1685 1650
1651#if IS_ENABLED(CONFIG_IPV6)
1652static const u32 rfc3146_unit_directory_data[] = {
1653 0x00040000, /* directory_length */
1654 0x1200005e, /* unit_specifier_id: IANA */
1655 0x81000003, /* textual descriptor offset */
1656 0x13000002, /* unit_sw_version: RFC 3146 */
1657 0x81000005, /* textual descriptor offset */
1658 0x00030000, /* descriptor_length */
1659 0x00000000, /* text */
1660 0x00000000, /* minimal ASCII, en */
1661 0x49414e41, /* I A N A */
1662 0x00030000, /* descriptor_length */
1663 0x00000000, /* text */
1664 0x00000000, /* minimal ASCII, en */
1665 0x49507636, /* I P v 6 */
1666};
1667
1668static struct fw_descriptor rfc3146_unit_directory = {
1669 .length = ARRAY_SIZE(rfc3146_unit_directory_data),
1670 .key = (CSR_DIRECTORY | CSR_UNIT) << 24,
1671 .data = rfc3146_unit_directory_data
1672};
1673#endif
1674
1686static int __init fwnet_init(void) 1675static int __init fwnet_init(void)
1687{ 1676{
1688 int err; 1677 int err;
@@ -1691,11 +1680,17 @@ static int __init fwnet_init(void)
1691 if (err) 1680 if (err)
1692 return err; 1681 return err;
1693 1682
1683#if IS_ENABLED(CONFIG_IPV6)
1684 err = fw_core_add_descriptor(&rfc3146_unit_directory);
1685 if (err)
1686 goto out;
1687#endif
1688
1694 fwnet_packet_task_cache = kmem_cache_create("packet_task", 1689 fwnet_packet_task_cache = kmem_cache_create("packet_task",
1695 sizeof(struct fwnet_packet_task), 0, 0, NULL); 1690 sizeof(struct fwnet_packet_task), 0, 0, NULL);
1696 if (!fwnet_packet_task_cache) { 1691 if (!fwnet_packet_task_cache) {
1697 err = -ENOMEM; 1692 err = -ENOMEM;
1698 goto out; 1693 goto out2;
1699 } 1694 }
1700 1695
1701 err = driver_register(&fwnet_driver.driver); 1696 err = driver_register(&fwnet_driver.driver);
@@ -1703,7 +1698,11 @@ static int __init fwnet_init(void)
1703 return 0; 1698 return 0;
1704 1699
1705 kmem_cache_destroy(fwnet_packet_task_cache); 1700 kmem_cache_destroy(fwnet_packet_task_cache);
1701out2:
1702#if IS_ENABLED(CONFIG_IPV6)
1703 fw_core_remove_descriptor(&rfc3146_unit_directory);
1706out: 1704out:
1705#endif
1707 fw_core_remove_descriptor(&rfc2374_unit_directory); 1706 fw_core_remove_descriptor(&rfc2374_unit_directory);
1708 1707
1709 return err; 1708 return err;
@@ -1714,11 +1713,14 @@ static void __exit fwnet_cleanup(void)
1714{ 1713{
1715 driver_unregister(&fwnet_driver.driver); 1714 driver_unregister(&fwnet_driver.driver);
1716 kmem_cache_destroy(fwnet_packet_task_cache); 1715 kmem_cache_destroy(fwnet_packet_task_cache);
1716#if IS_ENABLED(CONFIG_IPV6)
1717 fw_core_remove_descriptor(&rfc3146_unit_directory);
1718#endif
1717 fw_core_remove_descriptor(&rfc2374_unit_directory); 1719 fw_core_remove_descriptor(&rfc2374_unit_directory);
1718} 1720}
1719module_exit(fwnet_cleanup); 1721module_exit(fwnet_cleanup);
1720 1722
1721MODULE_AUTHOR("Jay Fenlason <fenlason@redhat.com>"); 1723MODULE_AUTHOR("Jay Fenlason <fenlason@redhat.com>");
1722MODULE_DESCRIPTION("IPv4 over IEEE1394 as per RFC 2734"); 1724MODULE_DESCRIPTION("IP over IEEE1394 as per RFC 2734/3146");
1723MODULE_LICENSE("GPL"); 1725MODULE_LICENSE("GPL");
1724MODULE_DEVICE_TABLE(ieee1394, fwnet_id_table); 1726MODULE_DEVICE_TABLE(ieee1394, fwnet_id_table);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index a3fde52840ca..65c30ea8c1a1 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -511,12 +511,16 @@ static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
511static int send_connect(struct c4iw_ep *ep) 511static int send_connect(struct c4iw_ep *ep)
512{ 512{
513 struct cpl_act_open_req *req; 513 struct cpl_act_open_req *req;
514 struct cpl_t5_act_open_req *t5_req;
514 struct sk_buff *skb; 515 struct sk_buff *skb;
515 u64 opt0; 516 u64 opt0;
516 u32 opt2; 517 u32 opt2;
517 unsigned int mtu_idx; 518 unsigned int mtu_idx;
518 int wscale; 519 int wscale;
519 int wrlen = roundup(sizeof *req, 16); 520 int size = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
521 sizeof(struct cpl_act_open_req) :
522 sizeof(struct cpl_t5_act_open_req);
523 int wrlen = roundup(size, 16);
520 524
521 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid); 525 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
522 526
@@ -552,17 +556,36 @@ static int send_connect(struct c4iw_ep *ep)
552 opt2 |= WND_SCALE_EN(1); 556 opt2 |= WND_SCALE_EN(1);
553 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); 557 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
554 558
555 req = (struct cpl_act_open_req *) skb_put(skb, wrlen); 559 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
556 INIT_TP_WR(req, 0); 560 req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
557 OPCODE_TID(req) = cpu_to_be32( 561 INIT_TP_WR(req, 0);
558 MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid))); 562 OPCODE_TID(req) = cpu_to_be32(
559 req->local_port = ep->com.local_addr.sin_port; 563 MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
560 req->peer_port = ep->com.remote_addr.sin_port; 564 ((ep->rss_qid << 14) | ep->atid)));
561 req->local_ip = ep->com.local_addr.sin_addr.s_addr; 565 req->local_port = ep->com.local_addr.sin_port;
562 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; 566 req->peer_port = ep->com.remote_addr.sin_port;
563 req->opt0 = cpu_to_be64(opt0); 567 req->local_ip = ep->com.local_addr.sin_addr.s_addr;
564 req->params = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, ep->l2t)); 568 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
565 req->opt2 = cpu_to_be32(opt2); 569 req->opt0 = cpu_to_be64(opt0);
570 req->params = cpu_to_be32(select_ntuple(ep->com.dev,
571 ep->dst, ep->l2t));
572 req->opt2 = cpu_to_be32(opt2);
573 } else {
574 t5_req = (struct cpl_t5_act_open_req *) skb_put(skb, wrlen);
575 INIT_TP_WR(t5_req, 0);
576 OPCODE_TID(t5_req) = cpu_to_be32(
577 MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
578 ((ep->rss_qid << 14) | ep->atid)));
579 t5_req->local_port = ep->com.local_addr.sin_port;
580 t5_req->peer_port = ep->com.remote_addr.sin_port;
581 t5_req->local_ip = ep->com.local_addr.sin_addr.s_addr;
582 t5_req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
583 t5_req->opt0 = cpu_to_be64(opt0);
584 t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
585 select_ntuple(ep->com.dev, ep->dst, ep->l2t)));
586 t5_req->opt2 = cpu_to_be32(opt2);
587 }
588
566 set_bit(ACT_OPEN_REQ, &ep->com.history); 589 set_bit(ACT_OPEN_REQ, &ep->com.history);
567 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 590 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
568} 591}
@@ -1676,9 +1699,9 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1676 case CPL_ERR_CONN_TIMEDOUT: 1699 case CPL_ERR_CONN_TIMEDOUT:
1677 break; 1700 break;
1678 case CPL_ERR_TCAM_FULL: 1701 case CPL_ERR_TCAM_FULL:
1702 dev->rdev.stats.tcam_full++;
1679 if (dev->rdev.lldi.enable_fw_ofld_conn) { 1703 if (dev->rdev.lldi.enable_fw_ofld_conn) {
1680 mutex_lock(&dev->rdev.stats.lock); 1704 mutex_lock(&dev->rdev.stats.lock);
1681 dev->rdev.stats.tcam_full++;
1682 mutex_unlock(&dev->rdev.stats.lock); 1705 mutex_unlock(&dev->rdev.stats.lock);
1683 send_fw_act_open_req(ep, 1706 send_fw_act_open_req(ep,
1684 GET_TID_TID(GET_AOPEN_ATID( 1707 GET_TID_TID(GET_AOPEN_ATID(
@@ -2875,12 +2898,14 @@ static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2875static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) 2898static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
2876{ 2899{
2877 u32 l2info; 2900 u32 l2info;
2878 u16 vlantag, len, hdr_len; 2901 u16 vlantag, len, hdr_len, eth_hdr_len;
2879 u8 intf; 2902 u8 intf;
2880 struct cpl_rx_pkt *cpl = cplhdr(skb); 2903 struct cpl_rx_pkt *cpl = cplhdr(skb);
2881 struct cpl_pass_accept_req *req; 2904 struct cpl_pass_accept_req *req;
2882 struct tcp_options_received tmp_opt; 2905 struct tcp_options_received tmp_opt;
2906 struct c4iw_dev *dev;
2883 2907
2908 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
2884 /* Store values from cpl_rx_pkt in temporary location. */ 2909 /* Store values from cpl_rx_pkt in temporary location. */
2885 vlantag = (__force u16) cpl->vlan; 2910 vlantag = (__force u16) cpl->vlan;
2886 len = (__force u16) cpl->len; 2911 len = (__force u16) cpl->len;
@@ -2896,7 +2921,7 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
2896 */ 2921 */
2897 memset(&tmp_opt, 0, sizeof(tmp_opt)); 2922 memset(&tmp_opt, 0, sizeof(tmp_opt));
2898 tcp_clear_options(&tmp_opt); 2923 tcp_clear_options(&tmp_opt);
2899 tcp_parse_options(skb, &tmp_opt, NULL, 0, NULL); 2924 tcp_parse_options(skb, &tmp_opt, 0, NULL);
2900 2925
2901 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); 2926 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
2902 memset(req, 0, sizeof(*req)); 2927 memset(req, 0, sizeof(*req));
@@ -2904,14 +2929,16 @@ static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
2904 V_SYN_MAC_IDX(G_RX_MACIDX( 2929 V_SYN_MAC_IDX(G_RX_MACIDX(
2905 (__force int) htonl(l2info))) | 2930 (__force int) htonl(l2info))) |
2906 F_SYN_XACT_MATCH); 2931 F_SYN_XACT_MATCH);
2932 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
2933 G_RX_ETHHDR_LEN((__force int) htonl(l2info)) :
2934 G_RX_T5_ETHHDR_LEN((__force int) htonl(l2info));
2907 req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN( 2935 req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN(
2908 (__force int) htonl(l2info))) | 2936 (__force int) htonl(l2info))) |
2909 V_TCP_HDR_LEN(G_RX_TCPHDR_LEN( 2937 V_TCP_HDR_LEN(G_RX_TCPHDR_LEN(
2910 (__force int) htons(hdr_len))) | 2938 (__force int) htons(hdr_len))) |
2911 V_IP_HDR_LEN(G_RX_IPHDR_LEN( 2939 V_IP_HDR_LEN(G_RX_IPHDR_LEN(
2912 (__force int) htons(hdr_len))) | 2940 (__force int) htons(hdr_len))) |
2913 V_ETH_HDR_LEN(G_RX_ETHHDR_LEN( 2941 V_ETH_HDR_LEN(G_RX_ETHHDR_LEN(eth_hdr_len)));
2914 (__force int) htonl(l2info))));
2915 req->vlan = (__force __be16) vlantag; 2942 req->vlan = (__force __be16) vlantag;
2916 req->len = (__force __be16) len; 2943 req->len = (__force __be16) len;
2917 req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) | 2944 req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) |
@@ -2999,7 +3026,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
2999 u16 window; 3026 u16 window;
3000 struct port_info *pi; 3027 struct port_info *pi;
3001 struct net_device *pdev; 3028 struct net_device *pdev;
3002 u16 rss_qid; 3029 u16 rss_qid, eth_hdr_len;
3003 int step; 3030 int step;
3004 u32 tx_chan; 3031 u32 tx_chan;
3005 struct neighbour *neigh; 3032 struct neighbour *neigh;
@@ -3028,7 +3055,10 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3028 goto reject; 3055 goto reject;
3029 } 3056 }
3030 3057
3031 if (G_RX_ETHHDR_LEN(ntohl(cpl->l2info)) == ETH_HLEN) { 3058 eth_hdr_len = is_t4(dev->rdev.lldi.adapter_type) ?
3059 G_RX_ETHHDR_LEN(htonl(cpl->l2info)) :
3060 G_RX_T5_ETHHDR_LEN(htonl(cpl->l2info));
3061 if (eth_hdr_len == ETH_HLEN) {
3032 eh = (struct ethhdr *)(req + 1); 3062 eh = (struct ethhdr *)(req + 1);
3033 iph = (struct iphdr *)(eh + 1); 3063 iph = (struct iphdr *)(eh + 1);
3034 } else { 3064 } else {
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 80069ad595c1..ae656016e1ae 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -41,10 +41,20 @@
41#define DRV_VERSION "0.1" 41#define DRV_VERSION "0.1"
42 42
43MODULE_AUTHOR("Steve Wise"); 43MODULE_AUTHOR("Steve Wise");
44MODULE_DESCRIPTION("Chelsio T4 RDMA Driver"); 44MODULE_DESCRIPTION("Chelsio T4/T5 RDMA Driver");
45MODULE_LICENSE("Dual BSD/GPL"); 45MODULE_LICENSE("Dual BSD/GPL");
46MODULE_VERSION(DRV_VERSION); 46MODULE_VERSION(DRV_VERSION);
47 47
48static int allow_db_fc_on_t5;
49module_param(allow_db_fc_on_t5, int, 0644);
50MODULE_PARM_DESC(allow_db_fc_on_t5,
51 "Allow DB Flow Control on T5 (default = 0)");
52
53static int allow_db_coalescing_on_t5;
54module_param(allow_db_coalescing_on_t5, int, 0644);
55MODULE_PARM_DESC(allow_db_coalescing_on_t5,
56 "Allow DB Coalescing on T5 (default = 0)");
57
48struct uld_ctx { 58struct uld_ctx {
49 struct list_head entry; 59 struct list_head entry;
50 struct cxgb4_lld_info lldi; 60 struct cxgb4_lld_info lldi;
@@ -614,7 +624,7 @@ static int rdma_supported(const struct cxgb4_lld_info *infop)
614{ 624{
615 return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 && 625 return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 &&
616 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 && 626 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
617 infop->vr->cq.size > 0 && infop->vr->ocq.size > 0; 627 infop->vr->cq.size > 0;
618} 628}
619 629
620static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) 630static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
@@ -627,6 +637,22 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
627 pci_name(infop->pdev)); 637 pci_name(infop->pdev));
628 return ERR_PTR(-ENOSYS); 638 return ERR_PTR(-ENOSYS);
629 } 639 }
640 if (!ocqp_supported(infop))
641 pr_info("%s: On-Chip Queues not supported on this device.\n",
642 pci_name(infop->pdev));
643
644 if (!is_t4(infop->adapter_type)) {
645 if (!allow_db_fc_on_t5) {
646 db_fc_threshold = 100000;
647 pr_info("DB Flow Control Disabled.\n");
648 }
649
650 if (!allow_db_coalescing_on_t5) {
651 db_coalescing_threshold = -1;
652 pr_info("DB Coalescing Disabled.\n");
653 }
654 }
655
630 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); 656 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
631 if (!devp) { 657 if (!devp) {
632 printk(KERN_ERR MOD "Cannot allocate ib device\n"); 658 printk(KERN_ERR MOD "Cannot allocate ib device\n");
@@ -678,8 +704,8 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
678 int i; 704 int i;
679 705
680 if (!vers_printed++) 706 if (!vers_printed++)
681 printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n", 707 pr_info("Chelsio T4/T5 RDMA Driver - version %s\n",
682 DRV_VERSION); 708 DRV_VERSION);
683 709
684 ctx = kzalloc(sizeof *ctx, GFP_KERNEL); 710 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
685 if (!ctx) { 711 if (!ctx) {
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 7eec5e13fa8c..485183ad34cd 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -162,7 +162,7 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
162 return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5)); 162 return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5));
163} 163}
164 164
165#define C4IW_WR_TO (10*HZ) 165#define C4IW_WR_TO (30*HZ)
166 166
167struct c4iw_wr_wait { 167struct c4iw_wr_wait {
168 struct completion completion; 168 struct completion completion;
@@ -369,7 +369,6 @@ struct c4iw_fr_page_list {
369 DEFINE_DMA_UNMAP_ADDR(mapping); 369 DEFINE_DMA_UNMAP_ADDR(mapping);
370 dma_addr_t dma_addr; 370 dma_addr_t dma_addr;
371 struct c4iw_dev *dev; 371 struct c4iw_dev *dev;
372 int size;
373}; 372};
374 373
375static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list( 374static inline struct c4iw_fr_page_list *to_c4iw_fr_page_list(
@@ -817,6 +816,15 @@ static inline int compute_wscale(int win)
817 return wscale; 816 return wscale;
818} 817}
819 818
819static inline int ocqp_supported(const struct cxgb4_lld_info *infop)
820{
821#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
822 return infop->vr->ocq.size > 0;
823#else
824 return 0;
825#endif
826}
827
820u32 c4iw_id_alloc(struct c4iw_id_table *alloc); 828u32 c4iw_id_alloc(struct c4iw_id_table *alloc);
821void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj); 829void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj);
822int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num, 830int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
@@ -930,6 +938,8 @@ extern struct cxgb4_client t4c_client;
930extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS]; 938extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
931extern int c4iw_max_read_depth; 939extern int c4iw_max_read_depth;
932extern int db_fc_threshold; 940extern int db_fc_threshold;
941extern int db_coalescing_threshold;
942extern int use_dsgl;
933 943
934 944
935#endif 945#endif
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 903a92d6f91d..4cb8eb24497c 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -30,16 +30,76 @@
30 * SOFTWARE. 30 * SOFTWARE.
31 */ 31 */
32 32
33#include <linux/module.h>
34#include <linux/moduleparam.h>
33#include <rdma/ib_umem.h> 35#include <rdma/ib_umem.h>
34#include <linux/atomic.h> 36#include <linux/atomic.h>
35 37
36#include "iw_cxgb4.h" 38#include "iw_cxgb4.h"
37 39
40int use_dsgl = 1;
41module_param(use_dsgl, int, 0644);
42MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=1)");
43
38#define T4_ULPTX_MIN_IO 32 44#define T4_ULPTX_MIN_IO 32
39#define C4IW_MAX_INLINE_SIZE 96 45#define C4IW_MAX_INLINE_SIZE 96
46#define T4_ULPTX_MAX_DMA 1024
47#define C4IW_INLINE_THRESHOLD 128
40 48
41static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, 49static int inline_threshold = C4IW_INLINE_THRESHOLD;
42 void *data) 50module_param(inline_threshold, int, 0644);
51MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)");
52
53static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
54 u32 len, dma_addr_t data, int wait)
55{
56 struct sk_buff *skb;
57 struct ulp_mem_io *req;
58 struct ulptx_sgl *sgl;
59 u8 wr_len;
60 int ret = 0;
61 struct c4iw_wr_wait wr_wait;
62
63 addr &= 0x7FFFFFF;
64
65 if (wait)
66 c4iw_init_wr_wait(&wr_wait);
67 wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16);
68
69 skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL);
70 if (!skb)
71 return -ENOMEM;
72 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
73
74 req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
75 memset(req, 0, wr_len);
76 INIT_ULPTX_WR(req, wr_len, 0, 0);
77 req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) |
78 (wait ? FW_WR_COMPL(1) : 0));
79 req->wr.wr_lo = wait ? (__force __be64)&wr_wait : 0;
80 req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
81 req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
82 req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
83 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(len>>5));
84 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
85 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr));
86
87 sgl = (struct ulptx_sgl *)(req + 1);
88 sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_DSGL) |
89 ULPTX_NSGE(1));
90 sgl->len0 = cpu_to_be32(len);
91 sgl->addr0 = cpu_to_be64(data);
92
93 ret = c4iw_ofld_send(rdev, skb);
94 if (ret)
95 return ret;
96 if (wait)
97 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
98 return ret;
99}
100
101static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
102 void *data)
43{ 103{
44 struct sk_buff *skb; 104 struct sk_buff *skb;
45 struct ulp_mem_io *req; 105 struct ulp_mem_io *req;
@@ -47,6 +107,12 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
47 u8 wr_len, *to_dp, *from_dp; 107 u8 wr_len, *to_dp, *from_dp;
48 int copy_len, num_wqe, i, ret = 0; 108 int copy_len, num_wqe, i, ret = 0;
49 struct c4iw_wr_wait wr_wait; 109 struct c4iw_wr_wait wr_wait;
110 __be32 cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE));
111
112 if (is_t4(rdev->lldi.adapter_type))
113 cmd |= cpu_to_be32(ULP_MEMIO_ORDER(1));
114 else
115 cmd |= cpu_to_be32(V_T5_ULP_MEMIO_IMM(1));
50 116
51 addr &= 0x7FFFFFF; 117 addr &= 0x7FFFFFF;
52 PDBG("%s addr 0x%x len %u\n", __func__, addr, len); 118 PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
@@ -77,7 +143,7 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
77 req->wr.wr_mid = cpu_to_be32( 143 req->wr.wr_mid = cpu_to_be32(
78 FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16))); 144 FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
79 145
80 req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE) | (1<<23)); 146 req->cmd = cmd;
81 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN( 147 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(
82 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO))); 148 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
83 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 149 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
@@ -107,6 +173,67 @@ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
107 return ret; 173 return ret;
108} 174}
109 175
176int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
177{
178 u32 remain = len;
179 u32 dmalen;
180 int ret = 0;
181 dma_addr_t daddr;
182 dma_addr_t save;
183
184 daddr = dma_map_single(&rdev->lldi.pdev->dev, data, len, DMA_TO_DEVICE);
185 if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr))
186 return -1;
187 save = daddr;
188
189 while (remain > inline_threshold) {
190 if (remain < T4_ULPTX_MAX_DMA) {
191 if (remain & ~T4_ULPTX_MIN_IO)
192 dmalen = remain & ~(T4_ULPTX_MIN_IO-1);
193 else
194 dmalen = remain;
195 } else
196 dmalen = T4_ULPTX_MAX_DMA;
197 remain -= dmalen;
198 ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr,
199 !remain);
200 if (ret)
201 goto out;
202 addr += dmalen >> 5;
203 data += dmalen;
204 daddr += dmalen;
205 }
206 if (remain)
207 ret = _c4iw_write_mem_inline(rdev, addr, remain, data);
208out:
209 dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE);
210 return ret;
211}
212
213/*
214 * write len bytes of data into addr (32B aligned address)
215 * If data is NULL, clear len byte of memory to zero.
216 */
217static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len,
218 void *data)
219{
220 if (is_t5(rdev->lldi.adapter_type) && use_dsgl) {
221 if (len > inline_threshold) {
222 if (_c4iw_write_mem_dma(rdev, addr, len, data)) {
223 printk_ratelimited(KERN_WARNING
224 "%s: dma map"
225 " failure (non fatal)\n",
226 pci_name(rdev->lldi.pdev));
227 return _c4iw_write_mem_inline(rdev, addr, len,
228 data);
229 } else
230 return 0;
231 } else
232 return _c4iw_write_mem_inline(rdev, addr, len, data);
233 } else
234 return _c4iw_write_mem_inline(rdev, addr, len, data);
235}
236
110/* 237/*
111 * Build and write a TPT entry. 238 * Build and write a TPT entry.
112 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size, 239 * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size,
@@ -760,19 +887,23 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device,
760 struct c4iw_fr_page_list *c4pl; 887 struct c4iw_fr_page_list *c4pl;
761 struct c4iw_dev *dev = to_c4iw_dev(device); 888 struct c4iw_dev *dev = to_c4iw_dev(device);
762 dma_addr_t dma_addr; 889 dma_addr_t dma_addr;
763 int size = sizeof *c4pl + page_list_len * sizeof(u64); 890 int pll_len = roundup(page_list_len * sizeof(u64), 32);
764 891
765 c4pl = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, size, 892 c4pl = kmalloc(sizeof(*c4pl), GFP_KERNEL);
766 &dma_addr, GFP_KERNEL);
767 if (!c4pl) 893 if (!c4pl)
768 return ERR_PTR(-ENOMEM); 894 return ERR_PTR(-ENOMEM);
769 895
896 c4pl->ibpl.page_list = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev,
897 pll_len, &dma_addr,
898 GFP_KERNEL);
899 if (!c4pl->ibpl.page_list) {
900 kfree(c4pl);
901 return ERR_PTR(-ENOMEM);
902 }
770 dma_unmap_addr_set(c4pl, mapping, dma_addr); 903 dma_unmap_addr_set(c4pl, mapping, dma_addr);
771 c4pl->dma_addr = dma_addr; 904 c4pl->dma_addr = dma_addr;
772 c4pl->dev = dev; 905 c4pl->dev = dev;
773 c4pl->size = size; 906 c4pl->ibpl.max_page_list_len = pll_len;
774 c4pl->ibpl.page_list = (u64 *)(c4pl + 1);
775 c4pl->ibpl.max_page_list_len = page_list_len;
776 907
777 return &c4pl->ibpl; 908 return &c4pl->ibpl;
778} 909}
@@ -781,8 +912,10 @@ void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl)
781{ 912{
782 struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl); 913 struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl);
783 914
784 dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->size, 915 dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev,
785 c4pl, dma_unmap_addr(c4pl, mapping)); 916 c4pl->ibpl.max_page_list_len,
917 c4pl->ibpl.page_list, dma_unmap_addr(c4pl, mapping));
918 kfree(c4pl);
786} 919}
787 920
788int c4iw_dereg_mr(struct ib_mr *ib_mr) 921int c4iw_dereg_mr(struct ib_mr *ib_mr)
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index e084fdc6da7f..7e94c9a656a1 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -162,8 +162,14 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
162 */ 162 */
163 if (addr >= rdev->oc_mw_pa) 163 if (addr >= rdev->oc_mw_pa)
164 vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot); 164 vma->vm_page_prot = t4_pgprot_wc(vma->vm_page_prot);
165 else 165 else {
166 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 166 if (is_t5(rdev->lldi.adapter_type))
167 vma->vm_page_prot =
168 t4_pgprot_wc(vma->vm_page_prot);
169 else
170 vma->vm_page_prot =
171 pgprot_noncached(vma->vm_page_prot);
172 }
167 ret = io_remap_pfn_range(vma, vma->vm_start, 173 ret = io_remap_pfn_range(vma, vma->vm_start,
168 addr >> PAGE_SHIFT, 174 addr >> PAGE_SHIFT,
169 len, vma->vm_page_prot); 175 len, vma->vm_page_prot);
@@ -263,7 +269,7 @@ static int c4iw_query_device(struct ib_device *ibdev,
263 dev = to_c4iw_dev(ibdev); 269 dev = to_c4iw_dev(ibdev);
264 memset(props, 0, sizeof *props); 270 memset(props, 0, sizeof *props);
265 memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6); 271 memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
266 props->hw_ver = dev->rdev.lldi.adapter_type; 272 props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type);
267 props->fw_ver = dev->rdev.lldi.fw_vers; 273 props->fw_ver = dev->rdev.lldi.fw_vers;
268 props->device_cap_flags = dev->device_cap_flags; 274 props->device_cap_flags = dev->device_cap_flags;
269 props->page_size_cap = T4_PAGESIZE_MASK; 275 props->page_size_cap = T4_PAGESIZE_MASK;
@@ -346,7 +352,8 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
346 struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev, 352 struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
347 ibdev.dev); 353 ibdev.dev);
348 PDBG("%s dev 0x%p\n", __func__, dev); 354 PDBG("%s dev 0x%p\n", __func__, dev);
349 return sprintf(buf, "%d\n", c4iw_dev->rdev.lldi.adapter_type); 355 return sprintf(buf, "%d\n",
356 CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
350} 357}
351 358
352static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr, 359static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 70b1808a08f4..5b059e2d80cc 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -42,10 +42,21 @@ static int ocqp_support = 1;
42module_param(ocqp_support, int, 0644); 42module_param(ocqp_support, int, 0644);
43MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)"); 43MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
44 44
45int db_fc_threshold = 2000; 45int db_fc_threshold = 1000;
46module_param(db_fc_threshold, int, 0644); 46module_param(db_fc_threshold, int, 0644);
47MODULE_PARM_DESC(db_fc_threshold, "QP count/threshold that triggers automatic " 47MODULE_PARM_DESC(db_fc_threshold,
48 "db flow control mode (default = 2000)"); 48 "QP count/threshold that triggers"
49 " automatic db flow control mode (default = 1000)");
50
51int db_coalescing_threshold;
52module_param(db_coalescing_threshold, int, 0644);
53MODULE_PARM_DESC(db_coalescing_threshold,
54 "QP count/threshold that triggers"
55 " disabling db coalescing (default = 0)");
56
57static int max_fr_immd = T4_MAX_FR_IMMD;
58module_param(max_fr_immd, int, 0644);
59MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
49 60
50static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) 61static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
51{ 62{
@@ -76,7 +87,7 @@ static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
76 87
77static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) 88static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
78{ 89{
79 if (!ocqp_support || !t4_ocqp_supported()) 90 if (!ocqp_support || !ocqp_supported(&rdev->lldi))
80 return -ENOSYS; 91 return -ENOSYS;
81 sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize); 92 sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
82 if (!sq->dma_addr) 93 if (!sq->dma_addr)
@@ -129,7 +140,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
129 int wr_len; 140 int wr_len;
130 struct c4iw_wr_wait wr_wait; 141 struct c4iw_wr_wait wr_wait;
131 struct sk_buff *skb; 142 struct sk_buff *skb;
132 int ret; 143 int ret = 0;
133 int eqsize; 144 int eqsize;
134 145
135 wq->sq.qid = c4iw_get_qpid(rdev, uctx); 146 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
@@ -169,17 +180,14 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
169 } 180 }
170 181
171 if (user) { 182 if (user) {
172 ret = alloc_oc_sq(rdev, &wq->sq); 183 if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq))
173 if (ret)
174 goto free_hwaddr; 184 goto free_hwaddr;
175 185 } else {
176 ret = alloc_host_sq(rdev, &wq->sq);
177 if (ret)
178 goto free_sq;
179 } else
180 ret = alloc_host_sq(rdev, &wq->sq); 186 ret = alloc_host_sq(rdev, &wq->sq);
181 if (ret) 187 if (ret)
182 goto free_hwaddr; 188 goto free_hwaddr;
189 }
190
183 memset(wq->sq.queue, 0, wq->sq.memsize); 191 memset(wq->sq.queue, 0, wq->sq.memsize);
184 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); 192 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
185 193
@@ -534,7 +542,7 @@ static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
534} 542}
535 543
536static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe, 544static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
537 struct ib_send_wr *wr, u8 *len16) 545 struct ib_send_wr *wr, u8 *len16, u8 t5dev)
538{ 546{
539 547
540 struct fw_ri_immd *imdp; 548 struct fw_ri_immd *imdp;
@@ -556,28 +564,51 @@ static int build_fastreg(struct t4_sq *sq, union t4_wr *wqe,
556 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32); 564 wqe->fr.va_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
557 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start & 565 wqe->fr.va_lo_fbo = cpu_to_be32(wr->wr.fast_reg.iova_start &
558 0xffffffff); 566 0xffffffff);
559 WARN_ON(pbllen > T4_MAX_FR_IMMD); 567
560 imdp = (struct fw_ri_immd *)(&wqe->fr + 1); 568 if (t5dev && use_dsgl && (pbllen > max_fr_immd)) {
561 imdp->op = FW_RI_DATA_IMMD; 569 struct c4iw_fr_page_list *c4pl =
562 imdp->r1 = 0; 570 to_c4iw_fr_page_list(wr->wr.fast_reg.page_list);
563 imdp->r2 = 0; 571 struct fw_ri_dsgl *sglp;
564 imdp->immdlen = cpu_to_be32(pbllen); 572
565 p = (__be64 *)(imdp + 1); 573 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
566 rem = pbllen; 574 wr->wr.fast_reg.page_list->page_list[i] = (__force u64)
567 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { 575 cpu_to_be64((u64)
568 *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]); 576 wr->wr.fast_reg.page_list->page_list[i]);
569 rem -= sizeof *p; 577 }
570 if (++p == (__be64 *)&sq->queue[sq->size]) 578
571 p = (__be64 *)sq->queue; 579 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
572 } 580 sglp->op = FW_RI_DATA_DSGL;
573 BUG_ON(rem < 0); 581 sglp->r1 = 0;
574 while (rem) { 582 sglp->nsge = cpu_to_be16(1);
575 *p = 0; 583 sglp->addr0 = cpu_to_be64(c4pl->dma_addr);
576 rem -= sizeof *p; 584 sglp->len0 = cpu_to_be32(pbllen);
577 if (++p == (__be64 *)&sq->queue[sq->size]) 585
578 p = (__be64 *)sq->queue; 586 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
587 } else {
588 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
589 imdp->op = FW_RI_DATA_IMMD;
590 imdp->r1 = 0;
591 imdp->r2 = 0;
592 imdp->immdlen = cpu_to_be32(pbllen);
593 p = (__be64 *)(imdp + 1);
594 rem = pbllen;
595 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
596 *p = cpu_to_be64(
597 (u64)wr->wr.fast_reg.page_list->page_list[i]);
598 rem -= sizeof(*p);
599 if (++p == (__be64 *)&sq->queue[sq->size])
600 p = (__be64 *)sq->queue;
601 }
602 BUG_ON(rem < 0);
603 while (rem) {
604 *p = 0;
605 rem -= sizeof(*p);
606 if (++p == (__be64 *)&sq->queue[sq->size])
607 p = (__be64 *)sq->queue;
608 }
609 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
610 + pbllen, 16);
579 } 611 }
580 *len16 = DIV_ROUND_UP(sizeof wqe->fr + sizeof *imdp + pbllen, 16);
581 return 0; 612 return 0;
582} 613}
583 614
@@ -678,7 +709,10 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
678 case IB_WR_FAST_REG_MR: 709 case IB_WR_FAST_REG_MR:
679 fw_opcode = FW_RI_FR_NSMR_WR; 710 fw_opcode = FW_RI_FR_NSMR_WR;
680 swsqe->opcode = FW_RI_FAST_REGISTER; 711 swsqe->opcode = FW_RI_FAST_REGISTER;
681 err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16); 712 err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16,
713 is_t5(
714 qhp->rhp->rdev.lldi.adapter_type) ?
715 1 : 0);
682 break; 716 break;
683 case IB_WR_LOCAL_INV: 717 case IB_WR_LOCAL_INV:
684 if (wr->send_flags & IB_SEND_FENCE) 718 if (wr->send_flags & IB_SEND_FENCE)
@@ -1450,6 +1484,9 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
1450 rhp->db_state = NORMAL; 1484 rhp->db_state = NORMAL;
1451 idr_for_each(&rhp->qpidr, enable_qp_db, NULL); 1485 idr_for_each(&rhp->qpidr, enable_qp_db, NULL);
1452 } 1486 }
1487 if (db_coalescing_threshold >= 0)
1488 if (rhp->qpcnt <= db_coalescing_threshold)
1489 cxgb4_enable_db_coalescing(rhp->rdev.lldi.ports[0]);
1453 spin_unlock_irq(&rhp->lock); 1490 spin_unlock_irq(&rhp->lock);
1454 atomic_dec(&qhp->refcnt); 1491 atomic_dec(&qhp->refcnt);
1455 wait_event(qhp->wait, !atomic_read(&qhp->refcnt)); 1492 wait_event(qhp->wait, !atomic_read(&qhp->refcnt));
@@ -1561,11 +1598,15 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
1561 spin_lock_irq(&rhp->lock); 1598 spin_lock_irq(&rhp->lock);
1562 if (rhp->db_state != NORMAL) 1599 if (rhp->db_state != NORMAL)
1563 t4_disable_wq_db(&qhp->wq); 1600 t4_disable_wq_db(&qhp->wq);
1564 if (++rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) { 1601 rhp->qpcnt++;
1602 if (rhp->qpcnt > db_fc_threshold && rhp->db_state == NORMAL) {
1565 rhp->rdev.stats.db_state_transitions++; 1603 rhp->rdev.stats.db_state_transitions++;
1566 rhp->db_state = FLOW_CONTROL; 1604 rhp->db_state = FLOW_CONTROL;
1567 idr_for_each(&rhp->qpidr, disable_qp_db, NULL); 1605 idr_for_each(&rhp->qpidr, disable_qp_db, NULL);
1568 } 1606 }
1607 if (db_coalescing_threshold >= 0)
1608 if (rhp->qpcnt > db_coalescing_threshold)
1609 cxgb4_disable_db_coalescing(rhp->rdev.lldi.ports[0]);
1569 ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); 1610 ret = insert_handle_nolock(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
1570 spin_unlock_irq(&rhp->lock); 1611 spin_unlock_irq(&rhp->lock);
1571 if (ret) 1612 if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 16f26ab29302..ebcb03bd1b72 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -84,7 +84,7 @@ struct t4_status_page {
84 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge)) 84 sizeof(struct fw_ri_isgl)) / sizeof(struct fw_ri_sge))
85#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \ 85#define T4_MAX_FR_IMMD ((T4_SQ_NUM_BYTES - sizeof(struct fw_ri_fr_nsmr_wr) - \
86 sizeof(struct fw_ri_immd)) & ~31UL) 86 sizeof(struct fw_ri_immd)) & ~31UL)
87#define T4_MAX_FR_DEPTH (T4_MAX_FR_IMMD / sizeof(u64)) 87#define T4_MAX_FR_DEPTH (1024 / sizeof(u64))
88 88
89#define T4_RQ_NUM_SLOTS 2 89#define T4_RQ_NUM_SLOTS 2
90#define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS) 90#define T4_RQ_NUM_BYTES (T4_EQ_ENTRY_SIZE * T4_RQ_NUM_SLOTS)
@@ -280,15 +280,6 @@ static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
280#endif 280#endif
281} 281}
282 282
283static inline int t4_ocqp_supported(void)
284{
285#if defined(__i386__) || defined(__x86_64__) || defined(CONFIG_PPC64)
286 return 1;
287#else
288 return 0;
289#endif
290}
291
292enum { 283enum {
293 T4_SQ_ONCHIP = (1<<0), 284 T4_SQ_ONCHIP = (1<<0),
294}; 285};
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 832bc807ed20..cc9f1927a322 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -469,8 +469,7 @@ static int capidrv_add_ack(struct capidrv_ncci *nccip,
469{ 469{
470 struct ncci_datahandle_queue *n, **pp; 470 struct ncci_datahandle_queue *n, **pp;
471 471
472 n = (struct ncci_datahandle_queue *) 472 n = kmalloc(sizeof(struct ncci_datahandle_queue), GFP_ATOMIC);
473 kmalloc(sizeof(struct ncci_datahandle_queue), GFP_ATOMIC);
474 if (!n) { 473 if (!n) {
475 printk(KERN_ERR "capidrv: kmalloc ncci_datahandle failed\n"); 474 printk(KERN_ERR "capidrv: kmalloc ncci_datahandle failed\n");
476 return -1; 475 return -1;
diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c
index db432e635496..50749a70c5ca 100644
--- a/drivers/isdn/divert/isdn_divert.c
+++ b/drivers/isdn/divert/isdn_divert.c
@@ -441,8 +441,7 @@ static int isdn_divert_icall(isdn_ctrl *ic)
441 441
442 switch (dv->rule.action) { 442 switch (dv->rule.action) {
443 case DEFLECT_IGNORE: 443 case DEFLECT_IGNORE:
444 return (0); 444 return 0;
445 break;
446 445
447 case DEFLECT_ALERT: 446 case DEFLECT_ALERT:
448 case DEFLECT_PROCEED: 447 case DEFLECT_PROCEED:
@@ -510,10 +509,9 @@ static int isdn_divert_icall(isdn_ctrl *ic)
510 break; 509 break;
511 510
512 default: 511 default:
513 return (0); /* ignore call */ 512 return 0; /* ignore call */
514 break;
515 } /* switch action */ 513 } /* switch action */
516 break; 514 break; /* will break the 'for' looping */
517 } /* scan_table */ 515 } /* scan_table */
518 516
519 if (cs) { 517 if (cs) {
diff --git a/drivers/isdn/hisax/fsm.c b/drivers/isdn/hisax/fsm.c
index 1bb291021fdb..c7a94713e9ec 100644
--- a/drivers/isdn/hisax/fsm.c
+++ b/drivers/isdn/hisax/fsm.c
@@ -26,7 +26,7 @@ FsmNew(struct Fsm *fsm, struct FsmNode *fnlist, int fncount)
26{ 26{
27 int i; 27 int i;
28 28
29 fsm->jumpmatrix = (FSMFNPTR *) 29 fsm->jumpmatrix =
30 kzalloc(sizeof(FSMFNPTR) * fsm->state_count * fsm->event_count, GFP_KERNEL); 30 kzalloc(sizeof(FSMFNPTR) * fsm->state_count * fsm->event_count, GFP_KERNEL);
31 if (!fsm->jumpmatrix) 31 if (!fsm->jumpmatrix)
32 return -ENOMEM; 32 return -ENOMEM;
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index 90f34ae2b80f..dc4574f735ef 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -1479,7 +1479,7 @@ int setup_hfcsx(struct IsdnCard *card)
1479 release_region(cs->hw.hfcsx.base, 2); 1479 release_region(cs->hw.hfcsx.base, 2);
1480 return (0); 1480 return (0);
1481 } 1481 }
1482 if (!(cs->hw.hfcsx.extra = (void *) 1482 if (!(cs->hw.hfcsx.extra =
1483 kmalloc(sizeof(struct hfcsx_extra), GFP_ATOMIC))) { 1483 kmalloc(sizeof(struct hfcsx_extra), GFP_ATOMIC))) {
1484 release_region(cs->hw.hfcsx.base, 2); 1484 release_region(cs->hw.hfcsx.base, 2);
1485 printk(KERN_WARNING "HFC-SX: unable to allocate memory\n"); 1485 printk(KERN_WARNING "HFC-SX: unable to allocate memory\n");
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index babc621a07fb..88d657dff474 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -1385,7 +1385,7 @@ isdn_net_type_trans(struct sk_buff *skb, struct net_device *dev)
1385 if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN)) 1385 if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
1386 skb->pkt_type = PACKET_OTHERHOST; 1386 skb->pkt_type = PACKET_OTHERHOST;
1387 } 1387 }
1388 if (ntohs(eth->h_proto) >= 1536) 1388 if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
1389 return eth->h_proto; 1389 return eth->h_proto;
1390 1390
1391 rawp = skb->data; 1391 rawp = skb->data;
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index 44225b186f6d..83a23afb13ab 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -185,7 +185,7 @@ static __be16 dvb_net_eth_type_trans(struct sk_buff *skb,
185 skb->pkt_type=PACKET_MULTICAST; 185 skb->pkt_type=PACKET_MULTICAST;
186 } 186 }
187 187
188 if (ntohs(eth->h_proto) >= 1536) 188 if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
189 return eth->h_proto; 189 return eth->h_proto;
190 190
191 rawp = skb->data; 191 rawp = skb->data;
@@ -228,9 +228,9 @@ static int ule_test_sndu( struct dvb_net_priv *p )
228static int ule_bridged_sndu( struct dvb_net_priv *p ) 228static int ule_bridged_sndu( struct dvb_net_priv *p )
229{ 229{
230 struct ethhdr *hdr = (struct ethhdr*) p->ule_next_hdr; 230 struct ethhdr *hdr = (struct ethhdr*) p->ule_next_hdr;
231 if(ntohs(hdr->h_proto) < 1536) { 231 if(ntohs(hdr->h_proto) < ETH_P_802_3_MIN) {
232 int framelen = p->ule_sndu_len - ((p->ule_next_hdr+sizeof(struct ethhdr)) - p->ule_skb->data); 232 int framelen = p->ule_sndu_len - ((p->ule_next_hdr+sizeof(struct ethhdr)) - p->ule_skb->data);
233 /* A frame Type < 1536 for a bridged frame, introduces a LLC Length field. */ 233 /* A frame Type < ETH_P_802_3_MIN for a bridged frame, introduces a LLC Length field. */
234 if(framelen != ntohs(hdr->h_proto)) { 234 if(framelen != ntohs(hdr->h_proto)) {
235 return -1; 235 return -1;
236 } 236 }
@@ -320,7 +320,7 @@ static int handle_ule_extensions( struct dvb_net_priv *p )
320 (int) p->ule_sndu_type, l, total_ext_len); 320 (int) p->ule_sndu_type, l, total_ext_len);
321#endif 321#endif
322 322
323 } while (p->ule_sndu_type < 1536); 323 } while (p->ule_sndu_type < ETH_P_802_3_MIN);
324 324
325 return total_ext_len; 325 return total_ext_len;
326} 326}
@@ -712,7 +712,7 @@ static void dvb_net_ule( struct net_device *dev, const u8 *buf, size_t buf_len )
712 } 712 }
713 713
714 /* Handle ULE Extension Headers. */ 714 /* Handle ULE Extension Headers. */
715 if (priv->ule_sndu_type < 1536) { 715 if (priv->ule_sndu_type < ETH_P_802_3_MIN) {
716 /* There is an extension header. Handle it accordingly. */ 716 /* There is an extension header. Handle it accordingly. */
717 int l = handle_ule_extensions(priv); 717 int l = handle_ule_extensions(priv);
718 if (l < 0) { 718 if (l < 0) {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 87f1d39ca551..3835321b8cf3 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -151,6 +151,7 @@ config MACVTAP
151config VXLAN 151config VXLAN
152 tristate "Virtual eXtensible Local Area Network (VXLAN)" 152 tristate "Virtual eXtensible Local Area Network (VXLAN)"
153 depends on INET 153 depends on INET
154 select NET_IP_TUNNEL
154 ---help--- 155 ---help---
155 This allows one to create vxlan virtual interfaces that provide 156 This allows one to create vxlan virtual interfaces that provide
156 Layer 2 Networks over Layer 3 Networks. VXLAN is often used 157 Layer 2 Networks over Layer 3 Networks. VXLAN is often used
diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig
index f5a89164e779..4ce6ca5f3d36 100644
--- a/drivers/net/appletalk/Kconfig
+++ b/drivers/net/appletalk/Kconfig
@@ -106,20 +106,4 @@ config IPDDP_ENCAP
106 IP packets inside AppleTalk frames; this is useful if your Linux box 106 IP packets inside AppleTalk frames; this is useful if your Linux box
107 is stuck on an AppleTalk network (which hopefully contains a 107 is stuck on an AppleTalk network (which hopefully contains a
108 decapsulator somewhere). Please see 108 decapsulator somewhere). Please see
109 <file:Documentation/networking/ipddp.txt> for more information. If 109 <file:Documentation/networking/ipddp.txt> for more information.
110 you said Y to "AppleTalk-IP driver support" above and you say Y
111 here, then you cannot say Y to "AppleTalk-IP to IP Decapsulation
112 support", below.
113
114config IPDDP_DECAP
115 bool "Appletalk-IP to IP Decapsulation support"
116 depends on IPDDP
117 help
118 If you say Y here, the AppleTalk-IP code will be able to decapsulate
119 AppleTalk-IP frames to IP packets; this is useful if you want your
120 Linux box to act as an Internet gateway for an AppleTalk network.
121 Please see <file:Documentation/networking/ipddp.txt> for more
122 information. If you said Y to "AppleTalk-IP driver support" above
123 and you say Y here, then you cannot say Y to "IP to AppleTalk-IP
124 Encapsulation support", above.
125
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 171b10f167a5..2aac890320cb 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -796,9 +796,8 @@ static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
796{ 796{
797 struct bonding *bond = container_of(work, struct bonding, 797 struct bonding *bond = container_of(work, struct bonding,
798 mcast_work.work); 798 mcast_work.work);
799 rcu_read_lock(); 799
800 bond_resend_igmp_join_requests(bond); 800 bond_resend_igmp_join_requests(bond);
801 rcu_read_unlock();
802} 801}
803 802
804/* 803/*
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 60c2142373c9..a966128c2a7a 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -32,13 +32,6 @@ config CAIF_SPI_SYNC
32 help to synchronize to the next transfer in case of over or under-runs. 32 help to synchronize to the next transfer in case of over or under-runs.
33 This option also needs to be enabled on the modem. 33 This option also needs to be enabled on the modem.
34 34
35config CAIF_SHM
36 tristate "CAIF shared memory protocol driver"
37 depends on CAIF && U5500_MBOX
38 default n
39 ---help---
40 The CAIF shared memory protocol driver for the STE UX5500 platform.
41
42config CAIF_HSI 35config CAIF_HSI
43 tristate "CAIF HSI transport driver" 36 tristate "CAIF HSI transport driver"
44 depends on CAIF 37 depends on CAIF
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index 91dff861560f..15a9d2fc753d 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -7,9 +7,5 @@ obj-$(CONFIG_CAIF_TTY) += caif_serial.o
7cfspi_slave-objs := caif_spi.o caif_spi_slave.o 7cfspi_slave-objs := caif_spi.o caif_spi_slave.o
8obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o 8obj-$(CONFIG_CAIF_SPI_SLAVE) += cfspi_slave.o
9 9
10# Shared memory
11caif_shm-objs := caif_shmcore.o caif_shm_u5500.o
12obj-$(CONFIG_CAIF_SHM) += caif_shm.o
13
14# HSI interface 10# HSI interface
15obj-$(CONFIG_CAIF_HSI) += caif_hsi.o 11obj-$(CONFIG_CAIF_HSI) += caif_hsi.o
diff --git a/drivers/net/caif/caif_shm_u5500.c b/drivers/net/caif/caif_shm_u5500.c
deleted file mode 100644
index 89d76b7b325a..000000000000
--- a/drivers/net/caif/caif_shm_u5500.c
+++ /dev/null
@@ -1,128 +0,0 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Author: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com
5 * License terms: GNU General Public License (GPL) version 2
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
9
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/netdevice.h>
13#include <mach/mbox-db5500.h>
14#include <net/caif/caif_shm.h>
15
16MODULE_LICENSE("GPL");
17MODULE_DESCRIPTION("CAIF Shared Memory protocol driver");
18
19#define MAX_SHM_INSTANCES 1
20
21enum {
22 MBX_ACC0,
23 MBX_ACC1,
24 MBX_DSP
25};
26
27static struct shmdev_layer shmdev_lyr[MAX_SHM_INSTANCES];
28
29static unsigned int shm_start;
30static unsigned int shm_size;
31
32module_param(shm_size, uint , 0440);
33MODULE_PARM_DESC(shm_total_size, "Start of SHM shared memory");
34
35module_param(shm_start, uint , 0440);
36MODULE_PARM_DESC(shm_total_start, "Total Size of SHM shared memory");
37
38static int shmdev_send_msg(u32 dev_id, u32 mbx_msg)
39{
40 /* Always block until msg is written successfully */
41 mbox_send(shmdev_lyr[dev_id].hmbx, mbx_msg, true);
42 return 0;
43}
44
45static int shmdev_mbx_setup(void *pshmdrv_cb, struct shmdev_layer *pshm_dev,
46 void *pshm_drv)
47{
48 /*
49 * For UX5500, we have only 1 SHM instance which uses MBX0
50 * for communication with the peer modem
51 */
52 pshm_dev->hmbx = mbox_setup(MBX_ACC0, pshmdrv_cb, pshm_drv);
53
54 if (!pshm_dev->hmbx)
55 return -ENODEV;
56 else
57 return 0;
58}
59
60static int __init caif_shmdev_init(void)
61{
62 int i, result;
63
64 /* Loop is currently overkill, there is only one instance */
65 for (i = 0; i < MAX_SHM_INSTANCES; i++) {
66
67 shmdev_lyr[i].shm_base_addr = shm_start;
68 shmdev_lyr[i].shm_total_sz = shm_size;
69
70 if (((char *)shmdev_lyr[i].shm_base_addr == NULL)
71 || (shmdev_lyr[i].shm_total_sz <= 0)) {
72 pr_warn("ERROR,"
73 "Shared memory Address and/or Size incorrect"
74 ", Bailing out ...\n");
75 result = -EINVAL;
76 goto clean;
77 }
78
79 pr_info("SHM AREA (instance %d) STARTS"
80 " AT %p\n", i, (char *)shmdev_lyr[i].shm_base_addr);
81
82 shmdev_lyr[i].shm_id = i;
83 shmdev_lyr[i].pshmdev_mbxsend = shmdev_send_msg;
84 shmdev_lyr[i].pshmdev_mbxsetup = shmdev_mbx_setup;
85
86 /*
87 * Finally, CAIF core module is called with details in place:
88 * 1. SHM base address
89 * 2. SHM size
90 * 3. MBX handle
91 */
92 result = caif_shmcore_probe(&shmdev_lyr[i]);
93 if (result) {
94 pr_warn("ERROR[%d],"
95 "Could not probe SHM core (instance %d)"
96 " Bailing out ...\n", result, i);
97 goto clean;
98 }
99 }
100
101 return 0;
102
103clean:
104 /*
105 * For now, we assume that even if one instance of SHM fails, we bail
106 * out of the driver support completely. For this, we need to release
107 * any memory allocated and unregister any instance of SHM net device.
108 */
109 for (i = 0; i < MAX_SHM_INSTANCES; i++) {
110 if (shmdev_lyr[i].pshm_netdev)
111 unregister_netdev(shmdev_lyr[i].pshm_netdev);
112 }
113 return result;
114}
115
116static void __exit caif_shmdev_exit(void)
117{
118 int i;
119
120 for (i = 0; i < MAX_SHM_INSTANCES; i++) {
121 caif_shmcore_remove(shmdev_lyr[i].pshm_netdev);
122 kfree((void *)shmdev_lyr[i].shm_base_addr);
123 }
124
125}
126
127module_init(caif_shmdev_init);
128module_exit(caif_shmdev_exit);
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
deleted file mode 100644
index bce8bac311c9..000000000000
--- a/drivers/net/caif/caif_shmcore.c
+++ /dev/null
@@ -1,747 +0,0 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Authors: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com,
5 * Daniel Martensson / daniel.martensson@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2
7 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
10
11#include <linux/spinlock.h>
12#include <linux/sched.h>
13#include <linux/list.h>
14#include <linux/netdevice.h>
15#include <linux/if_arp.h>
16#include <linux/io.h>
17
18#include <net/caif/caif_device.h>
19#include <net/caif/caif_shm.h>
20
21#define NR_TX_BUF 6
22#define NR_RX_BUF 6
23#define TX_BUF_SZ 0x2000
24#define RX_BUF_SZ 0x2000
25
26#define CAIF_NEEDED_HEADROOM 32
27
28#define CAIF_FLOW_ON 1
29#define CAIF_FLOW_OFF 0
30
31#define LOW_WATERMARK 3
32#define HIGH_WATERMARK 4
33
34/* Maximum number of CAIF buffers per shared memory buffer. */
35#define SHM_MAX_FRMS_PER_BUF 10
36
37/*
38 * Size in bytes of the descriptor area
39 * (With end of descriptor signalling)
40 */
41#define SHM_CAIF_DESC_SIZE ((SHM_MAX_FRMS_PER_BUF + 1) * \
42 sizeof(struct shm_pck_desc))
43
44/*
45 * Offset to the first CAIF frame within a shared memory buffer.
46 * Aligned on 32 bytes.
47 */
48#define SHM_CAIF_FRM_OFS (SHM_CAIF_DESC_SIZE + (SHM_CAIF_DESC_SIZE % 32))
49
50/* Number of bytes for CAIF shared memory header. */
51#define SHM_HDR_LEN 1
52
53/* Number of padding bytes for the complete CAIF frame. */
54#define SHM_FRM_PAD_LEN 4
55
56#define CAIF_MAX_MTU 4096
57
58#define SHM_SET_FULL(x) (((x+1) & 0x0F) << 0)
59#define SHM_GET_FULL(x) (((x >> 0) & 0x0F) - 1)
60
61#define SHM_SET_EMPTY(x) (((x+1) & 0x0F) << 4)
62#define SHM_GET_EMPTY(x) (((x >> 4) & 0x0F) - 1)
63
64#define SHM_FULL_MASK (0x0F << 0)
65#define SHM_EMPTY_MASK (0x0F << 4)
66
67struct shm_pck_desc {
68 /*
69 * Offset from start of shared memory area to start of
70 * shared memory CAIF frame.
71 */
72 u32 frm_ofs;
73 u32 frm_len;
74};
75
76struct buf_list {
77 unsigned char *desc_vptr;
78 u32 phy_addr;
79 u32 index;
80 u32 len;
81 u32 frames;
82 u32 frm_ofs;
83 struct list_head list;
84};
85
86struct shm_caif_frm {
87 /* Number of bytes of padding before the CAIF frame. */
88 u8 hdr_ofs;
89};
90
91struct shmdrv_layer {
92 /* caif_dev_common must always be first in the structure*/
93 struct caif_dev_common cfdev;
94
95 u32 shm_tx_addr;
96 u32 shm_rx_addr;
97 u32 shm_base_addr;
98 u32 tx_empty_available;
99 spinlock_t lock;
100
101 struct list_head tx_empty_list;
102 struct list_head tx_pend_list;
103 struct list_head tx_full_list;
104 struct list_head rx_empty_list;
105 struct list_head rx_pend_list;
106 struct list_head rx_full_list;
107
108 struct workqueue_struct *pshm_tx_workqueue;
109 struct workqueue_struct *pshm_rx_workqueue;
110
111 struct work_struct shm_tx_work;
112 struct work_struct shm_rx_work;
113
114 struct sk_buff_head sk_qhead;
115 struct shmdev_layer *pshm_dev;
116};
117
118static int shm_netdev_open(struct net_device *shm_netdev)
119{
120 netif_wake_queue(shm_netdev);
121 return 0;
122}
123
124static int shm_netdev_close(struct net_device *shm_netdev)
125{
126 netif_stop_queue(shm_netdev);
127 return 0;
128}
129
130int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
131{
132 struct buf_list *pbuf;
133 struct shmdrv_layer *pshm_drv;
134 struct list_head *pos;
135 u32 avail_emptybuff = 0;
136 unsigned long flags = 0;
137
138 pshm_drv = priv;
139
140 /* Check for received buffers. */
141 if (mbx_msg & SHM_FULL_MASK) {
142 int idx;
143
144 spin_lock_irqsave(&pshm_drv->lock, flags);
145
146 /* Check whether we have any outstanding buffers. */
147 if (list_empty(&pshm_drv->rx_empty_list)) {
148
149 /* Release spin lock. */
150 spin_unlock_irqrestore(&pshm_drv->lock, flags);
151
152 /* We print even in IRQ context... */
153 pr_warn("No empty Rx buffers to fill: "
154 "mbx_msg:%x\n", mbx_msg);
155
156 /* Bail out. */
157 goto err_sync;
158 }
159
160 pbuf =
161 list_entry(pshm_drv->rx_empty_list.next,
162 struct buf_list, list);
163 idx = pbuf->index;
164
165 /* Check buffer synchronization. */
166 if (idx != SHM_GET_FULL(mbx_msg)) {
167
168 /* We print even in IRQ context... */
169 pr_warn(
170 "phyif_shm_mbx_msg_cb: RX full out of sync:"
171 " idx:%d, msg:%x SHM_GET_FULL(mbx_msg):%x\n",
172 idx, mbx_msg, SHM_GET_FULL(mbx_msg));
173
174 spin_unlock_irqrestore(&pshm_drv->lock, flags);
175
176 /* Bail out. */
177 goto err_sync;
178 }
179
180 list_del_init(&pbuf->list);
181 list_add_tail(&pbuf->list, &pshm_drv->rx_full_list);
182
183 spin_unlock_irqrestore(&pshm_drv->lock, flags);
184
185 /* Schedule RX work queue. */
186 if (!work_pending(&pshm_drv->shm_rx_work))
187 queue_work(pshm_drv->pshm_rx_workqueue,
188 &pshm_drv->shm_rx_work);
189 }
190
191 /* Check for emptied buffers. */
192 if (mbx_msg & SHM_EMPTY_MASK) {
193 int idx;
194
195 spin_lock_irqsave(&pshm_drv->lock, flags);
196
197 /* Check whether we have any outstanding buffers. */
198 if (list_empty(&pshm_drv->tx_full_list)) {
199
200 /* We print even in IRQ context... */
201 pr_warn("No TX to empty: msg:%x\n", mbx_msg);
202
203 spin_unlock_irqrestore(&pshm_drv->lock, flags);
204
205 /* Bail out. */
206 goto err_sync;
207 }
208
209 pbuf =
210 list_entry(pshm_drv->tx_full_list.next,
211 struct buf_list, list);
212 idx = pbuf->index;
213
214 /* Check buffer synchronization. */
215 if (idx != SHM_GET_EMPTY(mbx_msg)) {
216
217 spin_unlock_irqrestore(&pshm_drv->lock, flags);
218
219 /* We print even in IRQ context... */
220 pr_warn("TX empty "
221 "out of sync:idx:%d, msg:%x\n", idx, mbx_msg);
222
223 /* Bail out. */
224 goto err_sync;
225 }
226 list_del_init(&pbuf->list);
227
228 /* Reset buffer parameters. */
229 pbuf->frames = 0;
230 pbuf->frm_ofs = SHM_CAIF_FRM_OFS;
231
232 list_add_tail(&pbuf->list, &pshm_drv->tx_empty_list);
233
234 /* Check the available no. of buffers in the empty list */
235 list_for_each(pos, &pshm_drv->tx_empty_list)
236 avail_emptybuff++;
237
238 /* Check whether we have to wake up the transmitter. */
239 if ((avail_emptybuff > HIGH_WATERMARK) &&
240 (!pshm_drv->tx_empty_available)) {
241 pshm_drv->tx_empty_available = 1;
242 spin_unlock_irqrestore(&pshm_drv->lock, flags);
243 pshm_drv->cfdev.flowctrl
244 (pshm_drv->pshm_dev->pshm_netdev,
245 CAIF_FLOW_ON);
246
247
248 /* Schedule the work queue. if required */
249 if (!work_pending(&pshm_drv->shm_tx_work))
250 queue_work(pshm_drv->pshm_tx_workqueue,
251 &pshm_drv->shm_tx_work);
252 } else
253 spin_unlock_irqrestore(&pshm_drv->lock, flags);
254 }
255
256 return 0;
257
258err_sync:
259 return -EIO;
260}
261
262static void shm_rx_work_func(struct work_struct *rx_work)
263{
264 struct shmdrv_layer *pshm_drv;
265 struct buf_list *pbuf;
266 unsigned long flags = 0;
267 struct sk_buff *skb;
268 char *p;
269 int ret;
270
271 pshm_drv = container_of(rx_work, struct shmdrv_layer, shm_rx_work);
272
273 while (1) {
274
275 struct shm_pck_desc *pck_desc;
276
277 spin_lock_irqsave(&pshm_drv->lock, flags);
278
279 /* Check for received buffers. */
280 if (list_empty(&pshm_drv->rx_full_list)) {
281 spin_unlock_irqrestore(&pshm_drv->lock, flags);
282 break;
283 }
284
285 pbuf =
286 list_entry(pshm_drv->rx_full_list.next, struct buf_list,
287 list);
288 list_del_init(&pbuf->list);
289 spin_unlock_irqrestore(&pshm_drv->lock, flags);
290
291 /* Retrieve pointer to start of the packet descriptor area. */
292 pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
293
294 /*
295 * Check whether descriptor contains a CAIF shared memory
296 * frame.
297 */
298 while (pck_desc->frm_ofs) {
299 unsigned int frm_buf_ofs;
300 unsigned int frm_pck_ofs;
301 unsigned int frm_pck_len;
302 /*
303 * Check whether offset is within buffer limits
304 * (lower).
305 */
306 if (pck_desc->frm_ofs <
307 (pbuf->phy_addr - pshm_drv->shm_base_addr))
308 break;
309 /*
310 * Check whether offset is within buffer limits
311 * (higher).
312 */
313 if (pck_desc->frm_ofs >
314 ((pbuf->phy_addr - pshm_drv->shm_base_addr) +
315 pbuf->len))
316 break;
317
318 /* Calculate offset from start of buffer. */
319 frm_buf_ofs =
320 pck_desc->frm_ofs - (pbuf->phy_addr -
321 pshm_drv->shm_base_addr);
322
323 /*
324 * Calculate offset and length of CAIF packet while
325 * taking care of the shared memory header.
326 */
327 frm_pck_ofs =
328 frm_buf_ofs + SHM_HDR_LEN +
329 (*(pbuf->desc_vptr + frm_buf_ofs));
330 frm_pck_len =
331 (pck_desc->frm_len - SHM_HDR_LEN -
332 (*(pbuf->desc_vptr + frm_buf_ofs)));
333
334 /* Check whether CAIF packet is within buffer limits */
335 if ((frm_pck_ofs + pck_desc->frm_len) > pbuf->len)
336 break;
337
338 /* Get a suitable CAIF packet and copy in data. */
339 skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
340 frm_pck_len + 1);
341
342 if (skb == NULL) {
343 pr_info("OOM: Try next frame in descriptor\n");
344 break;
345 }
346
347 p = skb_put(skb, frm_pck_len);
348 memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
349
350 skb->protocol = htons(ETH_P_CAIF);
351 skb_reset_mac_header(skb);
352 skb->dev = pshm_drv->pshm_dev->pshm_netdev;
353
354 /* Push received packet up the stack. */
355 ret = netif_rx_ni(skb);
356
357 if (!ret) {
358 pshm_drv->pshm_dev->pshm_netdev->stats.
359 rx_packets++;
360 pshm_drv->pshm_dev->pshm_netdev->stats.
361 rx_bytes += pck_desc->frm_len;
362 } else
363 ++pshm_drv->pshm_dev->pshm_netdev->stats.
364 rx_dropped;
365 /* Move to next packet descriptor. */
366 pck_desc++;
367 }
368
369 spin_lock_irqsave(&pshm_drv->lock, flags);
370 list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
371
372 spin_unlock_irqrestore(&pshm_drv->lock, flags);
373
374 }
375
376 /* Schedule the work queue. if required */
377 if (!work_pending(&pshm_drv->shm_tx_work))
378 queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
379
380}
381
382static void shm_tx_work_func(struct work_struct *tx_work)
383{
384 u32 mbox_msg;
385 unsigned int frmlen, avail_emptybuff, append = 0;
386 unsigned long flags = 0;
387 struct buf_list *pbuf = NULL;
388 struct shmdrv_layer *pshm_drv;
389 struct shm_caif_frm *frm;
390 struct sk_buff *skb;
391 struct shm_pck_desc *pck_desc;
392 struct list_head *pos;
393
394 pshm_drv = container_of(tx_work, struct shmdrv_layer, shm_tx_work);
395
396 do {
397 /* Initialize mailbox message. */
398 mbox_msg = 0x00;
399 avail_emptybuff = 0;
400
401 spin_lock_irqsave(&pshm_drv->lock, flags);
402
403 /* Check for pending receive buffers. */
404 if (!list_empty(&pshm_drv->rx_pend_list)) {
405
406 pbuf = list_entry(pshm_drv->rx_pend_list.next,
407 struct buf_list, list);
408
409 list_del_init(&pbuf->list);
410 list_add_tail(&pbuf->list, &pshm_drv->rx_empty_list);
411 /*
412 * Value index is never changed,
413 * so read access should be safe.
414 */
415 mbox_msg |= SHM_SET_EMPTY(pbuf->index);
416 }
417
418 skb = skb_peek(&pshm_drv->sk_qhead);
419
420 if (skb == NULL)
421 goto send_msg;
422 /* Check the available no. of buffers in the empty list */
423 list_for_each(pos, &pshm_drv->tx_empty_list)
424 avail_emptybuff++;
425
426 if ((avail_emptybuff < LOW_WATERMARK) &&
427 pshm_drv->tx_empty_available) {
428 /* Update blocking condition. */
429 pshm_drv->tx_empty_available = 0;
430 spin_unlock_irqrestore(&pshm_drv->lock, flags);
431 pshm_drv->cfdev.flowctrl
432 (pshm_drv->pshm_dev->pshm_netdev,
433 CAIF_FLOW_OFF);
434 spin_lock_irqsave(&pshm_drv->lock, flags);
435 }
436 /*
437 * We simply return back to the caller if we do not have space
438 * either in Tx pending list or Tx empty list. In this case,
439 * we hold the received skb in the skb list, waiting to
440 * be transmitted once Tx buffers become available
441 */
442 if (list_empty(&pshm_drv->tx_empty_list))
443 goto send_msg;
444
445 /* Get the first free Tx buffer. */
446 pbuf = list_entry(pshm_drv->tx_empty_list.next,
447 struct buf_list, list);
448 do {
449 if (append) {
450 skb = skb_peek(&pshm_drv->sk_qhead);
451 if (skb == NULL)
452 break;
453 }
454
455 frm = (struct shm_caif_frm *)
456 (pbuf->desc_vptr + pbuf->frm_ofs);
457
458 frm->hdr_ofs = 0;
459 frmlen = 0;
460 frmlen += SHM_HDR_LEN + frm->hdr_ofs + skb->len;
461
462 /* Add tail padding if needed. */
463 if (frmlen % SHM_FRM_PAD_LEN)
464 frmlen += SHM_FRM_PAD_LEN -
465 (frmlen % SHM_FRM_PAD_LEN);
466
467 /*
468 * Verify that packet, header and additional padding
469 * can fit within the buffer frame area.
470 */
471 if (frmlen >= (pbuf->len - pbuf->frm_ofs))
472 break;
473
474 if (!append) {
475 list_del_init(&pbuf->list);
476 append = 1;
477 }
478
479 skb = skb_dequeue(&pshm_drv->sk_qhead);
480 if (skb == NULL)
481 break;
482 /* Copy in CAIF frame. */
483 skb_copy_bits(skb, 0, pbuf->desc_vptr +
484 pbuf->frm_ofs + SHM_HDR_LEN +
485 frm->hdr_ofs, skb->len);
486
487 pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
488 pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
489 frmlen;
490 dev_kfree_skb_irq(skb);
491
492 /* Fill in the shared memory packet descriptor area. */
493 pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
494 /* Forward to current frame. */
495 pck_desc += pbuf->frames;
496 pck_desc->frm_ofs = (pbuf->phy_addr -
497 pshm_drv->shm_base_addr) +
498 pbuf->frm_ofs;
499 pck_desc->frm_len = frmlen;
500 /* Terminate packet descriptor area. */
501 pck_desc++;
502 pck_desc->frm_ofs = 0;
503 /* Update buffer parameters. */
504 pbuf->frames++;
505 pbuf->frm_ofs += frmlen + (frmlen % 32);
506
507 } while (pbuf->frames < SHM_MAX_FRMS_PER_BUF);
508
509 /* Assign buffer as full. */
510 list_add_tail(&pbuf->list, &pshm_drv->tx_full_list);
511 append = 0;
512 mbox_msg |= SHM_SET_FULL(pbuf->index);
513send_msg:
514 spin_unlock_irqrestore(&pshm_drv->lock, flags);
515
516 if (mbox_msg)
517 pshm_drv->pshm_dev->pshmdev_mbxsend
518 (pshm_drv->pshm_dev->shm_id, mbox_msg);
519 } while (mbox_msg);
520}
521
522static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
523{
524 struct shmdrv_layer *pshm_drv;
525
526 pshm_drv = netdev_priv(shm_netdev);
527
528 skb_queue_tail(&pshm_drv->sk_qhead, skb);
529
530 /* Schedule Tx work queue. for deferred processing of skbs*/
531 if (!work_pending(&pshm_drv->shm_tx_work))
532 queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
533
534 return 0;
535}
536
537static const struct net_device_ops netdev_ops = {
538 .ndo_open = shm_netdev_open,
539 .ndo_stop = shm_netdev_close,
540 .ndo_start_xmit = shm_netdev_tx,
541};
542
543static void shm_netdev_setup(struct net_device *pshm_netdev)
544{
545 struct shmdrv_layer *pshm_drv;
546 pshm_netdev->netdev_ops = &netdev_ops;
547
548 pshm_netdev->mtu = CAIF_MAX_MTU;
549 pshm_netdev->type = ARPHRD_CAIF;
550 pshm_netdev->hard_header_len = CAIF_NEEDED_HEADROOM;
551 pshm_netdev->tx_queue_len = 0;
552 pshm_netdev->destructor = free_netdev;
553
554 pshm_drv = netdev_priv(pshm_netdev);
555
556 /* Initialize structures in a clean state. */
557 memset(pshm_drv, 0, sizeof(struct shmdrv_layer));
558
559 pshm_drv->cfdev.link_select = CAIF_LINK_LOW_LATENCY;
560}
561
562int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
563{
564 int result, j;
565 struct shmdrv_layer *pshm_drv = NULL;
566
567 pshm_dev->pshm_netdev = alloc_netdev(sizeof(struct shmdrv_layer),
568 "cfshm%d", shm_netdev_setup);
569 if (!pshm_dev->pshm_netdev)
570 return -ENOMEM;
571
572 pshm_drv = netdev_priv(pshm_dev->pshm_netdev);
573 pshm_drv->pshm_dev = pshm_dev;
574
575 /*
576 * Initialization starts with the verification of the
577 * availability of MBX driver by calling its setup function.
578 * MBX driver must be available by this time for proper
579 * functioning of SHM driver.
580 */
581 if ((pshm_dev->pshmdev_mbxsetup
582 (caif_shmdrv_rx_cb, pshm_dev, pshm_drv)) != 0) {
583 pr_warn("Could not config. SHM Mailbox,"
584 " Bailing out.....\n");
585 free_netdev(pshm_dev->pshm_netdev);
586 return -ENODEV;
587 }
588
589 skb_queue_head_init(&pshm_drv->sk_qhead);
590
591 pr_info("SHM DEVICE[%d] PROBED BY DRIVER, NEW SHM DRIVER"
592 " INSTANCE AT pshm_drv =0x%p\n",
593 pshm_drv->pshm_dev->shm_id, pshm_drv);
594
595 if (pshm_dev->shm_total_sz <
596 (NR_TX_BUF * TX_BUF_SZ + NR_RX_BUF * RX_BUF_SZ)) {
597
598 pr_warn("ERROR, Amount of available"
599 " Phys. SHM cannot accommodate current SHM "
600 "driver configuration, Bailing out ...\n");
601 free_netdev(pshm_dev->pshm_netdev);
602 return -ENOMEM;
603 }
604
605 pshm_drv->shm_base_addr = pshm_dev->shm_base_addr;
606 pshm_drv->shm_tx_addr = pshm_drv->shm_base_addr;
607
608 if (pshm_dev->shm_loopback)
609 pshm_drv->shm_rx_addr = pshm_drv->shm_tx_addr;
610 else
611 pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
612 (NR_TX_BUF * TX_BUF_SZ);
613
614 spin_lock_init(&pshm_drv->lock);
615 INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
616 INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
617 INIT_LIST_HEAD(&pshm_drv->tx_full_list);
618
619 INIT_LIST_HEAD(&pshm_drv->rx_empty_list);
620 INIT_LIST_HEAD(&pshm_drv->rx_pend_list);
621 INIT_LIST_HEAD(&pshm_drv->rx_full_list);
622
623 INIT_WORK(&pshm_drv->shm_tx_work, shm_tx_work_func);
624 INIT_WORK(&pshm_drv->shm_rx_work, shm_rx_work_func);
625
626 pshm_drv->pshm_tx_workqueue =
627 create_singlethread_workqueue("shm_tx_work");
628 pshm_drv->pshm_rx_workqueue =
629 create_singlethread_workqueue("shm_rx_work");
630
631 for (j = 0; j < NR_TX_BUF; j++) {
632 struct buf_list *tx_buf =
633 kmalloc(sizeof(struct buf_list), GFP_KERNEL);
634
635 if (tx_buf == NULL) {
636 free_netdev(pshm_dev->pshm_netdev);
637 return -ENOMEM;
638 }
639 tx_buf->index = j;
640 tx_buf->phy_addr = pshm_drv->shm_tx_addr + (TX_BUF_SZ * j);
641 tx_buf->len = TX_BUF_SZ;
642 tx_buf->frames = 0;
643 tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
644
645 if (pshm_dev->shm_loopback)
646 tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
647 else
648 /*
649 * FIXME: the result of ioremap is not a pointer - arnd
650 */
651 tx_buf->desc_vptr =
652 ioremap(tx_buf->phy_addr, TX_BUF_SZ);
653
654 list_add_tail(&tx_buf->list, &pshm_drv->tx_empty_list);
655 }
656
657 for (j = 0; j < NR_RX_BUF; j++) {
658 struct buf_list *rx_buf =
659 kmalloc(sizeof(struct buf_list), GFP_KERNEL);
660
661 if (rx_buf == NULL) {
662 free_netdev(pshm_dev->pshm_netdev);
663 return -ENOMEM;
664 }
665 rx_buf->index = j;
666 rx_buf->phy_addr = pshm_drv->shm_rx_addr + (RX_BUF_SZ * j);
667 rx_buf->len = RX_BUF_SZ;
668
669 if (pshm_dev->shm_loopback)
670 rx_buf->desc_vptr = (unsigned char *)rx_buf->phy_addr;
671 else
672 rx_buf->desc_vptr =
673 ioremap(rx_buf->phy_addr, RX_BUF_SZ);
674 list_add_tail(&rx_buf->list, &pshm_drv->rx_empty_list);
675 }
676
677 pshm_drv->tx_empty_available = 1;
678 result = register_netdev(pshm_dev->pshm_netdev);
679 if (result)
680 pr_warn("ERROR[%d], SHM could not, "
681 "register with NW FRMWK Bailing out ...\n", result);
682
683 return result;
684}
685
686void caif_shmcore_remove(struct net_device *pshm_netdev)
687{
688 struct buf_list *pbuf;
689 struct shmdrv_layer *pshm_drv = NULL;
690
691 pshm_drv = netdev_priv(pshm_netdev);
692
693 while (!(list_empty(&pshm_drv->tx_pend_list))) {
694 pbuf =
695 list_entry(pshm_drv->tx_pend_list.next,
696 struct buf_list, list);
697
698 list_del(&pbuf->list);
699 kfree(pbuf);
700 }
701
702 while (!(list_empty(&pshm_drv->tx_full_list))) {
703 pbuf =
704 list_entry(pshm_drv->tx_full_list.next,
705 struct buf_list, list);
706 list_del(&pbuf->list);
707 kfree(pbuf);
708 }
709
710 while (!(list_empty(&pshm_drv->tx_empty_list))) {
711 pbuf =
712 list_entry(pshm_drv->tx_empty_list.next,
713 struct buf_list, list);
714 list_del(&pbuf->list);
715 kfree(pbuf);
716 }
717
718 while (!(list_empty(&pshm_drv->rx_full_list))) {
719 pbuf =
720 list_entry(pshm_drv->tx_full_list.next,
721 struct buf_list, list);
722 list_del(&pbuf->list);
723 kfree(pbuf);
724 }
725
726 while (!(list_empty(&pshm_drv->rx_pend_list))) {
727 pbuf =
728 list_entry(pshm_drv->tx_pend_list.next,
729 struct buf_list, list);
730 list_del(&pbuf->list);
731 kfree(pbuf);
732 }
733
734 while (!(list_empty(&pshm_drv->rx_empty_list))) {
735 pbuf =
736 list_entry(pshm_drv->rx_empty_list.next,
737 struct buf_list, list);
738 list_del(&pbuf->list);
739 kfree(pbuf);
740 }
741
742 /* Destroy work queues. */
743 destroy_workqueue(pshm_drv->pshm_tx_workqueue);
744 destroy_workqueue(pshm_drv->pshm_rx_workqueue);
745
746 unregister_netdev(pshm_netdev);
747}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 9862b2e07644..e456b70933c2 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -65,7 +65,7 @@ config CAN_LEDS
65 65
66config CAN_AT91 66config CAN_AT91
67 tristate "Atmel AT91 onchip CAN controller" 67 tristate "Atmel AT91 onchip CAN controller"
68 depends on ARCH_AT91SAM9263 || ARCH_AT91SAM9X5 68 depends on ARM
69 ---help--- 69 ---help---
70 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263 70 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263
71 and AT91SAM9X5 processors. 71 and AT91SAM9X5 processors.
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 44f363792b59..db52f4414def 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -27,6 +27,7 @@
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/netdevice.h> 29#include <linux/netdevice.h>
30#include <linux/of.h>
30#include <linux/platform_device.h> 31#include <linux/platform_device.h>
31#include <linux/rtnetlink.h> 32#include <linux/rtnetlink.h>
32#include <linux/skbuff.h> 33#include <linux/skbuff.h>
@@ -155,19 +156,20 @@ struct at91_priv {
155 canid_t mb0_id; 156 canid_t mb0_id;
156}; 157};
157 158
158static const struct at91_devtype_data at91_devtype_data[] = { 159static const struct at91_devtype_data at91_at91sam9263_data = {
159 [AT91_DEVTYPE_SAM9263] = { 160 .rx_first = 1,
160 .rx_first = 1, 161 .rx_split = 8,
161 .rx_split = 8, 162 .rx_last = 11,
162 .rx_last = 11, 163 .tx_shift = 2,
163 .tx_shift = 2, 164 .type = AT91_DEVTYPE_SAM9263,
164 }, 165};
165 [AT91_DEVTYPE_SAM9X5] = { 166
166 .rx_first = 0, 167static const struct at91_devtype_data at91_at91sam9x5_data = {
167 .rx_split = 4, 168 .rx_first = 0,
168 .rx_last = 5, 169 .rx_split = 4,
169 .tx_shift = 1, 170 .rx_last = 5,
170 }, 171 .tx_shift = 1,
172 .type = AT91_DEVTYPE_SAM9X5,
171}; 173};
172 174
173static const struct can_bittiming_const at91_bittiming_const = { 175static const struct can_bittiming_const at91_bittiming_const = {
@@ -1249,10 +1251,42 @@ static struct attribute_group at91_sysfs_attr_group = {
1249 .attrs = at91_sysfs_attrs, 1251 .attrs = at91_sysfs_attrs,
1250}; 1252};
1251 1253
1254#if defined(CONFIG_OF)
1255static const struct of_device_id at91_can_dt_ids[] = {
1256 {
1257 .compatible = "atmel,at91sam9x5-can",
1258 .data = &at91_at91sam9x5_data,
1259 }, {
1260 .compatible = "atmel,at91sam9263-can",
1261 .data = &at91_at91sam9263_data,
1262 }, {
1263 /* sentinel */
1264 }
1265};
1266MODULE_DEVICE_TABLE(of, at91_can_dt_ids);
1267#else
1268#define at91_can_dt_ids NULL
1269#endif
1270
1271static const struct at91_devtype_data *at91_can_get_driver_data(struct platform_device *pdev)
1272{
1273 if (pdev->dev.of_node) {
1274 const struct of_device_id *match;
1275
1276 match = of_match_node(at91_can_dt_ids, pdev->dev.of_node);
1277 if (!match) {
1278 dev_err(&pdev->dev, "no matching node found in dtb\n");
1279 return NULL;
1280 }
1281 return (const struct at91_devtype_data *)match->data;
1282 }
1283 return (const struct at91_devtype_data *)
1284 platform_get_device_id(pdev)->driver_data;
1285}
1286
1252static int at91_can_probe(struct platform_device *pdev) 1287static int at91_can_probe(struct platform_device *pdev)
1253{ 1288{
1254 const struct at91_devtype_data *devtype_data; 1289 const struct at91_devtype_data *devtype_data;
1255 enum at91_devtype devtype;
1256 struct net_device *dev; 1290 struct net_device *dev;
1257 struct at91_priv *priv; 1291 struct at91_priv *priv;
1258 struct resource *res; 1292 struct resource *res;
@@ -1260,8 +1294,12 @@ static int at91_can_probe(struct platform_device *pdev)
1260 void __iomem *addr; 1294 void __iomem *addr;
1261 int err, irq; 1295 int err, irq;
1262 1296
1263 devtype = pdev->id_entry->driver_data; 1297 devtype_data = at91_can_get_driver_data(pdev);
1264 devtype_data = &at91_devtype_data[devtype]; 1298 if (!devtype_data) {
1299 dev_err(&pdev->dev, "no driver data\n");
1300 err = -ENODEV;
1301 goto exit;
1302 }
1265 1303
1266 clk = clk_get(&pdev->dev, "can_clk"); 1304 clk = clk_get(&pdev->dev, "can_clk");
1267 if (IS_ERR(clk)) { 1305 if (IS_ERR(clk)) {
@@ -1310,7 +1348,6 @@ static int at91_can_probe(struct platform_device *pdev)
1310 priv->dev = dev; 1348 priv->dev = dev;
1311 priv->reg_base = addr; 1349 priv->reg_base = addr;
1312 priv->devtype_data = *devtype_data; 1350 priv->devtype_data = *devtype_data;
1313 priv->devtype_data.type = devtype;
1314 priv->clk = clk; 1351 priv->clk = clk;
1315 priv->pdata = pdev->dev.platform_data; 1352 priv->pdata = pdev->dev.platform_data;
1316 priv->mb0_id = 0x7ff; 1353 priv->mb0_id = 0x7ff;
@@ -1373,10 +1410,10 @@ static int at91_can_remove(struct platform_device *pdev)
1373static const struct platform_device_id at91_can_id_table[] = { 1410static const struct platform_device_id at91_can_id_table[] = {
1374 { 1411 {
1375 .name = "at91_can", 1412 .name = "at91_can",
1376 .driver_data = AT91_DEVTYPE_SAM9263, 1413 .driver_data = (kernel_ulong_t)&at91_at91sam9x5_data,
1377 }, { 1414 }, {
1378 .name = "at91sam9x5_can", 1415 .name = "at91sam9x5_can",
1379 .driver_data = AT91_DEVTYPE_SAM9X5, 1416 .driver_data = (kernel_ulong_t)&at91_at91sam9263_data,
1380 }, { 1417 }, {
1381 /* sentinel */ 1418 /* sentinel */
1382 } 1419 }
@@ -1389,6 +1426,7 @@ static struct platform_driver at91_can_driver = {
1389 .driver = { 1426 .driver = {
1390 .name = KBUILD_MODNAME, 1427 .name = KBUILD_MODNAME,
1391 .owner = THIS_MODULE, 1428 .owner = THIS_MODULE,
1429 .of_match_table = at91_can_dt_ids,
1392 }, 1430 },
1393 .id_table = at91_can_id_table, 1431 .id_table = at91_can_id_table,
1394}; 1432};
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 6a0532176b69..d4a15e82bfc0 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -412,7 +412,7 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
412 return 0; 412 return 0;
413} 413}
414 414
415irqreturn_t bfin_can_interrupt(int irq, void *dev_id) 415static irqreturn_t bfin_can_interrupt(int irq, void *dev_id)
416{ 416{
417 struct net_device *dev = dev_id; 417 struct net_device *dev = dev_id;
418 struct bfin_can_priv *priv = netdev_priv(dev); 418 struct bfin_can_priv *priv = netdev_priv(dev);
@@ -504,7 +504,7 @@ static int bfin_can_close(struct net_device *dev)
504 return 0; 504 return 0;
505} 505}
506 506
507struct net_device *alloc_bfin_candev(void) 507static struct net_device *alloc_bfin_candev(void)
508{ 508{
509 struct net_device *dev; 509 struct net_device *dev;
510 struct bfin_can_priv *priv; 510 struct bfin_can_priv *priv;
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index f32b9fc6a983..3444e9ee4a80 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -269,7 +269,7 @@ struct mcp251x_priv {
269#define MCP251X_IS(_model) \ 269#define MCP251X_IS(_model) \
270static inline int mcp251x_is_##_model(struct spi_device *spi) \ 270static inline int mcp251x_is_##_model(struct spi_device *spi) \
271{ \ 271{ \
272 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); \ 272 struct mcp251x_priv *priv = spi_get_drvdata(spi); \
273 return priv->model == CAN_MCP251X_MCP##_model; \ 273 return priv->model == CAN_MCP251X_MCP##_model; \
274} 274}
275 275
@@ -305,7 +305,7 @@ static void mcp251x_clean(struct net_device *net)
305 */ 305 */
306static int mcp251x_spi_trans(struct spi_device *spi, int len) 306static int mcp251x_spi_trans(struct spi_device *spi, int len)
307{ 307{
308 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 308 struct mcp251x_priv *priv = spi_get_drvdata(spi);
309 struct spi_transfer t = { 309 struct spi_transfer t = {
310 .tx_buf = priv->spi_tx_buf, 310 .tx_buf = priv->spi_tx_buf,
311 .rx_buf = priv->spi_rx_buf, 311 .rx_buf = priv->spi_rx_buf,
@@ -333,7 +333,7 @@ static int mcp251x_spi_trans(struct spi_device *spi, int len)
333 333
334static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg) 334static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
335{ 335{
336 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 336 struct mcp251x_priv *priv = spi_get_drvdata(spi);
337 u8 val = 0; 337 u8 val = 0;
338 338
339 priv->spi_tx_buf[0] = INSTRUCTION_READ; 339 priv->spi_tx_buf[0] = INSTRUCTION_READ;
@@ -348,7 +348,7 @@ static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
348static void mcp251x_read_2regs(struct spi_device *spi, uint8_t reg, 348static void mcp251x_read_2regs(struct spi_device *spi, uint8_t reg,
349 uint8_t *v1, uint8_t *v2) 349 uint8_t *v1, uint8_t *v2)
350{ 350{
351 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 351 struct mcp251x_priv *priv = spi_get_drvdata(spi);
352 352
353 priv->spi_tx_buf[0] = INSTRUCTION_READ; 353 priv->spi_tx_buf[0] = INSTRUCTION_READ;
354 priv->spi_tx_buf[1] = reg; 354 priv->spi_tx_buf[1] = reg;
@@ -361,7 +361,7 @@ static void mcp251x_read_2regs(struct spi_device *spi, uint8_t reg,
361 361
362static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val) 362static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
363{ 363{
364 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 364 struct mcp251x_priv *priv = spi_get_drvdata(spi);
365 365
366 priv->spi_tx_buf[0] = INSTRUCTION_WRITE; 366 priv->spi_tx_buf[0] = INSTRUCTION_WRITE;
367 priv->spi_tx_buf[1] = reg; 367 priv->spi_tx_buf[1] = reg;
@@ -373,7 +373,7 @@ static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
373static void mcp251x_write_bits(struct spi_device *spi, u8 reg, 373static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
374 u8 mask, uint8_t val) 374 u8 mask, uint8_t val)
375{ 375{
376 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 376 struct mcp251x_priv *priv = spi_get_drvdata(spi);
377 377
378 priv->spi_tx_buf[0] = INSTRUCTION_BIT_MODIFY; 378 priv->spi_tx_buf[0] = INSTRUCTION_BIT_MODIFY;
379 priv->spi_tx_buf[1] = reg; 379 priv->spi_tx_buf[1] = reg;
@@ -386,7 +386,7 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
386static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf, 386static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
387 int len, int tx_buf_idx) 387 int len, int tx_buf_idx)
388{ 388{
389 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 389 struct mcp251x_priv *priv = spi_get_drvdata(spi);
390 390
391 if (mcp251x_is_2510(spi)) { 391 if (mcp251x_is_2510(spi)) {
392 int i; 392 int i;
@@ -403,7 +403,7 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
403static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame, 403static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
404 int tx_buf_idx) 404 int tx_buf_idx)
405{ 405{
406 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 406 struct mcp251x_priv *priv = spi_get_drvdata(spi);
407 u32 sid, eid, exide, rtr; 407 u32 sid, eid, exide, rtr;
408 u8 buf[SPI_TRANSFER_BUF_LEN]; 408 u8 buf[SPI_TRANSFER_BUF_LEN];
409 409
@@ -434,7 +434,7 @@ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
434static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf, 434static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
435 int buf_idx) 435 int buf_idx)
436{ 436{
437 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 437 struct mcp251x_priv *priv = spi_get_drvdata(spi);
438 438
439 if (mcp251x_is_2510(spi)) { 439 if (mcp251x_is_2510(spi)) {
440 int i, len; 440 int i, len;
@@ -454,7 +454,7 @@ static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
454 454
455static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx) 455static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
456{ 456{
457 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 457 struct mcp251x_priv *priv = spi_get_drvdata(spi);
458 struct sk_buff *skb; 458 struct sk_buff *skb;
459 struct can_frame *frame; 459 struct can_frame *frame;
460 u8 buf[SPI_TRANSFER_BUF_LEN]; 460 u8 buf[SPI_TRANSFER_BUF_LEN];
@@ -550,7 +550,7 @@ static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode)
550 550
551static int mcp251x_set_normal_mode(struct spi_device *spi) 551static int mcp251x_set_normal_mode(struct spi_device *spi)
552{ 552{
553 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 553 struct mcp251x_priv *priv = spi_get_drvdata(spi);
554 unsigned long timeout; 554 unsigned long timeout;
555 555
556 /* Enable interrupts */ 556 /* Enable interrupts */
@@ -620,7 +620,7 @@ static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
620 620
621static int mcp251x_hw_reset(struct spi_device *spi) 621static int mcp251x_hw_reset(struct spi_device *spi)
622{ 622{
623 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 623 struct mcp251x_priv *priv = spi_get_drvdata(spi);
624 int ret; 624 int ret;
625 unsigned long timeout; 625 unsigned long timeout;
626 626
@@ -1020,7 +1020,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
1020 CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY; 1020 CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
1021 priv->model = spi_get_device_id(spi)->driver_data; 1021 priv->model = spi_get_device_id(spi)->driver_data;
1022 priv->net = net; 1022 priv->net = net;
1023 dev_set_drvdata(&spi->dev, priv); 1023 spi_set_drvdata(spi, priv);
1024 1024
1025 priv->spi = spi; 1025 priv->spi = spi;
1026 mutex_init(&priv->mcp_lock); 1026 mutex_init(&priv->mcp_lock);
@@ -1118,7 +1118,7 @@ error_out:
1118static int mcp251x_can_remove(struct spi_device *spi) 1118static int mcp251x_can_remove(struct spi_device *spi)
1119{ 1119{
1120 struct mcp251x_platform_data *pdata = spi->dev.platform_data; 1120 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
1121 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 1121 struct mcp251x_priv *priv = spi_get_drvdata(spi);
1122 struct net_device *net = priv->net; 1122 struct net_device *net = priv->net;
1123 1123
1124 unregister_candev(net); 1124 unregister_candev(net);
@@ -1138,11 +1138,13 @@ static int mcp251x_can_remove(struct spi_device *spi)
1138 return 0; 1138 return 0;
1139} 1139}
1140 1140
1141#ifdef CONFIG_PM 1141#ifdef CONFIG_PM_SLEEP
1142static int mcp251x_can_suspend(struct spi_device *spi, pm_message_t state) 1142
1143static int mcp251x_can_suspend(struct device *dev)
1143{ 1144{
1145 struct spi_device *spi = to_spi_device(dev);
1144 struct mcp251x_platform_data *pdata = spi->dev.platform_data; 1146 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
1145 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 1147 struct mcp251x_priv *priv = spi_get_drvdata(spi);
1146 struct net_device *net = priv->net; 1148 struct net_device *net = priv->net;
1147 1149
1148 priv->force_quit = 1; 1150 priv->force_quit = 1;
@@ -1170,10 +1172,11 @@ static int mcp251x_can_suspend(struct spi_device *spi, pm_message_t state)
1170 return 0; 1172 return 0;
1171} 1173}
1172 1174
1173static int mcp251x_can_resume(struct spi_device *spi) 1175static int mcp251x_can_resume(struct device *dev)
1174{ 1176{
1177 struct spi_device *spi = to_spi_device(dev);
1175 struct mcp251x_platform_data *pdata = spi->dev.platform_data; 1178 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
1176 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); 1179 struct mcp251x_priv *priv = spi_get_drvdata(spi);
1177 1180
1178 if (priv->after_suspend & AFTER_SUSPEND_POWER) { 1181 if (priv->after_suspend & AFTER_SUSPEND_POWER) {
1179 pdata->power_enable(1); 1182 pdata->power_enable(1);
@@ -1191,9 +1194,13 @@ static int mcp251x_can_resume(struct spi_device *spi)
1191 enable_irq(spi->irq); 1194 enable_irq(spi->irq);
1192 return 0; 1195 return 0;
1193} 1196}
1197
1198static SIMPLE_DEV_PM_OPS(mcp251x_can_pm_ops, mcp251x_can_suspend,
1199 mcp251x_can_resume);
1200#define MCP251X_PM_OPS (&mcp251x_can_pm_ops)
1201
1194#else 1202#else
1195#define mcp251x_can_suspend NULL 1203#define MCP251X_PM_OPS NULL
1196#define mcp251x_can_resume NULL
1197#endif 1204#endif
1198 1205
1199static const struct spi_device_id mcp251x_id_table[] = { 1206static const struct spi_device_id mcp251x_id_table[] = {
@@ -1207,29 +1214,15 @@ MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
1207static struct spi_driver mcp251x_can_driver = { 1214static struct spi_driver mcp251x_can_driver = {
1208 .driver = { 1215 .driver = {
1209 .name = DEVICE_NAME, 1216 .name = DEVICE_NAME,
1210 .bus = &spi_bus_type,
1211 .owner = THIS_MODULE, 1217 .owner = THIS_MODULE,
1218 .pm = MCP251X_PM_OPS,
1212 }, 1219 },
1213 1220
1214 .id_table = mcp251x_id_table, 1221 .id_table = mcp251x_id_table,
1215 .probe = mcp251x_can_probe, 1222 .probe = mcp251x_can_probe,
1216 .remove = mcp251x_can_remove, 1223 .remove = mcp251x_can_remove,
1217 .suspend = mcp251x_can_suspend,
1218 .resume = mcp251x_can_resume,
1219}; 1224};
1220 1225module_spi_driver(mcp251x_can_driver);
1221static int __init mcp251x_can_init(void)
1222{
1223 return spi_register_driver(&mcp251x_can_driver);
1224}
1225
1226static void __exit mcp251x_can_exit(void)
1227{
1228 spi_unregister_driver(&mcp251x_can_driver);
1229}
1230
1231module_init(mcp251x_can_init);
1232module_exit(mcp251x_can_exit);
1233 1226
1234MODULE_AUTHOR("Chris Elston <celston@katalix.com>, " 1227MODULE_AUTHOR("Chris Elston <celston@katalix.com>, "
1235 "Christian Pellegrin <chripell@evolware.org>"); 1228 "Christian Pellegrin <chripell@evolware.org>");
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index a175d0be1ae1..ee705771bd2c 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -188,10 +188,9 @@ static int desc_list_init(struct net_device *dev)
188 188
189 /* allocate a new skb for next time receive */ 189 /* allocate a new skb for next time receive */
190 new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN); 190 new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
191 if (!new_skb) { 191 if (!new_skb)
192 pr_notice("init: low on mem - packet dropped\n");
193 goto init_error; 192 goto init_error;
194 } 193
195 skb_reserve(new_skb, NET_IP_ALIGN); 194 skb_reserve(new_skb, NET_IP_ALIGN);
196 /* Invidate the data cache of skb->data range when it is write back 195 /* Invidate the data cache of skb->data range when it is write back
197 * cache. It will prevent overwritting the new data from DMA 196 * cache. It will prevent overwritting the new data from DMA
@@ -1236,7 +1235,6 @@ static void bfin_mac_rx(struct net_device *dev)
1236 1235
1237 new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN); 1236 new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
1238 if (!new_skb) { 1237 if (!new_skb) {
1239 netdev_notice(dev, "rx: low on mem - packet dropped\n");
1240 dev->stats.rx_dropped++; 1238 dev->stats.rx_dropped++;
1241 goto out; 1239 goto out;
1242 } 1240 }
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 0be2195e5034..269295403fc4 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1464,35 +1464,23 @@ static int greth_of_probe(struct platform_device *ofdev)
1464 } 1464 }
1465 1465
1466 /* Allocate TX descriptor ring in coherent memory */ 1466 /* Allocate TX descriptor ring in coherent memory */
1467 greth->tx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev, 1467 greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024,
1468 1024, 1468 &greth->tx_bd_base_phys,
1469 &greth->tx_bd_base_phys, 1469 GFP_KERNEL | __GFP_ZERO);
1470 GFP_KERNEL);
1471
1472 if (!greth->tx_bd_base) { 1470 if (!greth->tx_bd_base) {
1473 if (netif_msg_probe(greth))
1474 dev_err(&dev->dev, "could not allocate descriptor memory.\n");
1475 err = -ENOMEM; 1471 err = -ENOMEM;
1476 goto error3; 1472 goto error3;
1477 } 1473 }
1478 1474
1479 memset(greth->tx_bd_base, 0, 1024);
1480
1481 /* Allocate RX descriptor ring in coherent memory */ 1475 /* Allocate RX descriptor ring in coherent memory */
1482 greth->rx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev, 1476 greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024,
1483 1024, 1477 &greth->rx_bd_base_phys,
1484 &greth->rx_bd_base_phys, 1478 GFP_KERNEL | __GFP_ZERO);
1485 GFP_KERNEL);
1486
1487 if (!greth->rx_bd_base) { 1479 if (!greth->rx_bd_base) {
1488 if (netif_msg_probe(greth))
1489 dev_err(greth->dev, "could not allocate descriptor memory.\n");
1490 err = -ENOMEM; 1480 err = -ENOMEM;
1491 goto error4; 1481 goto error4;
1492 } 1482 }
1493 1483
1494 memset(greth->rx_bd_base, 0, 1024);
1495
1496 /* Get MAC address from: module param, OF property or ID prom */ 1484 /* Get MAC address from: module param, OF property or ID prom */
1497 for (i = 0; i < 6; i++) { 1485 for (i = 0; i < 6; i++) {
1498 if (macaddr[i] != 0) 1486 if (macaddr[i] != 0)
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index 6e722dc37db7..65926a956575 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -318,8 +318,6 @@ static int lance_rx (struct net_device *dev)
318 struct sk_buff *skb = netdev_alloc_skb(dev, len + 2); 318 struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
319 319
320 if (!skb) { 320 if (!skb) {
321 printk ("%s: Memory squeeze, deferring packet.\n",
322 dev->name);
323 dev->stats.rx_dropped++; 321 dev->stats.rx_dropped++;
324 rd->mblength = 0; 322 rd->mblength = 0;
325 rd->rmd1_bits = LE_R1_OWN; 323 rd->rmd1_bits = LE_R1_OWN;
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 3789affbc0e5..0866e7627433 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -293,7 +293,6 @@ static int lance_rx(struct net_device *dev)
293 struct sk_buff *skb = netdev_alloc_skb(dev, len + 2); 293 struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
294 294
295 if (!skb) { 295 if (!skb) {
296 netdev_warn(dev, "Memory squeeze, deferring packet\n");
297 dev->stats.rx_dropped++; 296 dev->stats.rx_dropped++;
298 rd->mblength = 0; 297 rd->mblength = 0;
299 rd->rmd1_bits = LE_R1_OWN; 298 rd->rmd1_bits = LE_R1_OWN;
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index 60e2b701afe7..9793767996a2 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -528,7 +528,6 @@ am79c961_rx(struct net_device *dev, struct dev_priv *priv)
528 dev->stats.rx_packets++; 528 dev->stats.rx_packets++;
529 } else { 529 } else {
530 am_writeword (dev, hdraddr + 2, RMD_OWN); 530 am_writeword (dev, hdraddr + 2, RMD_OWN);
531 printk (KERN_WARNING "%s: memory squeeze, dropping packet.\n", dev->name);
532 dev->stats.rx_dropped++; 531 dev->stats.rx_dropped++;
533 break; 532 break;
534 } 533 }
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index 98f4522fd17b..c178eb4c8166 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -193,7 +193,6 @@ static int ariadne_rx(struct net_device *dev)
193 193
194 skb = netdev_alloc_skb(dev, pkt_len + 2); 194 skb = netdev_alloc_skb(dev, pkt_len + 2);
195 if (skb == NULL) { 195 if (skb == NULL) {
196 netdev_warn(dev, "Memory squeeze, deferring packet\n");
197 for (i = 0; i < RX_RING_SIZE; i++) 196 for (i = 0; i < RX_RING_SIZE; i++)
198 if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN) 197 if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN)
199 break; 198 break;
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 84219df72f51..e8d0ef508f48 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -996,8 +996,6 @@ static int lance_rx( struct net_device *dev )
996 else { 996 else {
997 skb = netdev_alloc_skb(dev, pkt_len + 2); 997 skb = netdev_alloc_skb(dev, pkt_len + 2);
998 if (skb == NULL) { 998 if (skb == NULL) {
999 DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n",
1000 dev->name ));
1001 for( i = 0; i < RX_RING_SIZE; i++ ) 999 for( i = 0; i < RX_RING_SIZE; i++ )
1002 if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag & 1000 if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag &
1003 RMD1_OWN_CHIP) 1001 RMD1_OWN_CHIP)
@@ -1149,9 +1147,7 @@ static struct net_device *atarilance_dev;
1149static int __init atarilance_module_init(void) 1147static int __init atarilance_module_init(void)
1150{ 1148{
1151 atarilance_dev = atarilance_probe(-1); 1149 atarilance_dev = atarilance_probe(-1);
1152 if (IS_ERR(atarilance_dev)) 1150 return PTR_RET(atarilance_dev);
1153 return PTR_ERR(atarilance_dev);
1154 return 0;
1155} 1151}
1156 1152
1157static void __exit atarilance_module_exit(void) 1153static void __exit atarilance_module_exit(void)
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index de774d419144..688aede742c7 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -727,7 +727,6 @@ static int au1000_rx(struct net_device *dev)
727 frmlen -= 4; /* Remove FCS */ 727 frmlen -= 4; /* Remove FCS */
728 skb = netdev_alloc_skb(dev, frmlen + 2); 728 skb = netdev_alloc_skb(dev, frmlen + 2);
729 if (skb == NULL) { 729 if (skb == NULL) {
730 netdev_err(dev, "Memory squeeze, dropping packet.\n");
731 dev->stats.rx_dropped++; 730 dev->stats.rx_dropped++;
732 continue; 731 continue;
733 } 732 }
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index baca0bd1b393..3d86ffeb4e15 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -607,8 +607,6 @@ static int lance_rx(struct net_device *dev)
607 skb = netdev_alloc_skb(dev, len + 2); 607 skb = netdev_alloc_skb(dev, len + 2);
608 608
609 if (skb == 0) { 609 if (skb == 0) {
610 printk("%s: Memory squeeze, deferring packet.\n",
611 dev->name);
612 dev->stats.rx_dropped++; 610 dev->stats.rx_dropped++;
613 *rds_ptr(rd, mblength, lp->type) = 0; 611 *rds_ptr(rd, mblength, lp->type) = 0;
614 *rds_ptr(rd, rmd1, lp->type) = 612 *rds_ptr(rd, rmd1, lp->type) =
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index 9af3c307862c..a51497c9d2af 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -188,9 +188,7 @@ static struct net_device *dev_mvme147_lance;
188int __init init_module(void) 188int __init init_module(void)
189{ 189{
190 dev_mvme147_lance = mvme147lance_probe(-1); 190 dev_mvme147_lance = mvme147lance_probe(-1);
191 if (IS_ERR(dev_mvme147_lance)) 191 return PTR_RET(dev_mvme147_lance);
192 return PTR_ERR(dev_mvme147_lance);
193 return 0;
194} 192}
195 193
196void __exit cleanup_module(void) 194void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index 013b65108536..26fc0ce0faa3 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -1238,7 +1238,7 @@ MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
1238int __init init_module(void) 1238int __init init_module(void)
1239{ 1239{
1240 dev_ni65 = ni65_probe(-1); 1240 dev_ni65 = ni65_probe(-1);
1241 return IS_ERR(dev_ni65) ? PTR_ERR(dev_ni65) : 0; 1241 return PTR_RET(dev_ni65);
1242} 1242}
1243 1243
1244void __exit cleanup_module(void) 1244void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 797f847edf13..ed2130727643 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1166,7 +1166,6 @@ static void pcnet32_rx_entry(struct net_device *dev,
1166 skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN); 1166 skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
1167 1167
1168 if (skb == NULL) { 1168 if (skb == NULL) {
1169 netif_err(lp, drv, dev, "Memory squeeze, dropping packet\n");
1170 dev->stats.rx_dropped++; 1169 dev->stats.rx_dropped++;
1171 return; 1170 return;
1172 } 1171 }
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 74b3891b6483..4375abe61da1 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -812,9 +812,6 @@ static int lance_rx( struct net_device *dev )
812 else { 812 else {
813 skb = netdev_alloc_skb(dev, pkt_len + 2); 813 skb = netdev_alloc_skb(dev, pkt_len + 2);
814 if (skb == NULL) { 814 if (skb == NULL) {
815 DPRINTK( 1, ( "%s: Memory squeeze, deferring packet.\n",
816 dev->name ));
817
818 dev->stats.rx_dropped++; 815 dev->stats.rx_dropped++;
819 head->msg_length = 0; 816 head->msg_length = 0;
820 head->flag |= RMD1_OWN_CHIP; 817 head->flag |= RMD1_OWN_CHIP;
@@ -943,9 +940,7 @@ static struct net_device *sun3lance_dev;
943int __init init_module(void) 940int __init init_module(void)
944{ 941{
945 sun3lance_dev = sun3lance_probe(-1); 942 sun3lance_dev = sun3lance_probe(-1);
946 if (IS_ERR(sun3lance_dev)) 943 return PTR_RET(sun3lance_dev);
947 return PTR_ERR(sun3lance_dev);
948 return 0;
949} 944}
950 945
951void __exit cleanup_module(void) 946void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 6a40290d3727..f47b780892e9 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -536,8 +536,6 @@ static void lance_rx_dvma(struct net_device *dev)
536 skb = netdev_alloc_skb(dev, len + 2); 536 skb = netdev_alloc_skb(dev, len + 2);
537 537
538 if (skb == NULL) { 538 if (skb == NULL) {
539 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
540 dev->name);
541 dev->stats.rx_dropped++; 539 dev->stats.rx_dropped++;
542 rd->mblength = 0; 540 rd->mblength = 0;
543 rd->rmd1_bits = LE_R1_OWN; 541 rd->rmd1_bits = LE_R1_OWN;
@@ -708,8 +706,6 @@ static void lance_rx_pio(struct net_device *dev)
708 skb = netdev_alloc_skb(dev, len + 2); 706 skb = netdev_alloc_skb(dev, len + 2);
709 707
710 if (skb == NULL) { 708 if (skb == NULL) {
711 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
712 dev->name);
713 dev->stats.rx_dropped++; 709 dev->stats.rx_dropped++;
714 sbus_writew(0, &rd->mblength); 710 sbus_writew(0, &rd->mblength);
715 sbus_writeb(LE_R1_OWN, &rd->rmd1_bits); 711 sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
@@ -1377,10 +1373,9 @@ static int sparc_lance_probe_one(struct platform_device *op,
1377 dma_alloc_coherent(&op->dev, 1373 dma_alloc_coherent(&op->dev,
1378 sizeof(struct lance_init_block), 1374 sizeof(struct lance_init_block),
1379 &lp->init_block_dvma, GFP_ATOMIC); 1375 &lp->init_block_dvma, GFP_ATOMIC);
1380 if (!lp->init_block_mem) { 1376 if (!lp->init_block_mem)
1381 printk(KERN_ERR "SunLance: Cannot allocate consistent DMA memory.\n");
1382 goto fail; 1377 goto fail;
1383 } 1378
1384 lp->pio_buffer = 0; 1379 lp->pio_buffer = 0;
1385 lp->init_ring = lance_init_ring_dvma; 1380 lp->init_ring = lance_init_ring_dvma;
1386 lp->rx = lance_rx_dvma; 1381 lp->rx = lance_rx_dvma;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index a206779c68cf..4ce8ceb62205 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -386,20 +386,16 @@ static int mace_open(struct net_device *dev)
386 /* Allocate the DMA ring buffers */ 386 /* Allocate the DMA ring buffers */
387 387
388 mp->tx_ring = dma_alloc_coherent(mp->device, 388 mp->tx_ring = dma_alloc_coherent(mp->device,
389 N_TX_RING * MACE_BUFF_SIZE, 389 N_TX_RING * MACE_BUFF_SIZE,
390 &mp->tx_ring_phys, GFP_KERNEL); 390 &mp->tx_ring_phys, GFP_KERNEL);
391 if (mp->tx_ring == NULL) { 391 if (mp->tx_ring == NULL)
392 printk(KERN_ERR "%s: unable to allocate DMA tx buffers\n", dev->name);
393 goto out1; 392 goto out1;
394 }
395 393
396 mp->rx_ring = dma_alloc_coherent(mp->device, 394 mp->rx_ring = dma_alloc_coherent(mp->device,
397 N_RX_RING * MACE_BUFF_SIZE, 395 N_RX_RING * MACE_BUFF_SIZE,
398 &mp->rx_ring_phys, GFP_KERNEL); 396 &mp->rx_ring_phys, GFP_KERNEL);
399 if (mp->rx_ring == NULL) { 397 if (mp->rx_ring == NULL)
400 printk(KERN_ERR "%s: unable to allocate DMA rx buffers\n", dev->name);
401 goto out2; 398 goto out2;
402 }
403 399
404 mace_dma_off(dev); 400 mace_dma_off(dev);
405 401
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index ac25f05ff68f..d058d0061ed0 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1420,11 +1420,9 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
1420 packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & 1420 packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
1421 RRS_PKT_SIZE_MASK) - 4; /* CRC */ 1421 RRS_PKT_SIZE_MASK) - 4; /* CRC */
1422 skb = netdev_alloc_skb_ip_align(netdev, packet_size); 1422 skb = netdev_alloc_skb_ip_align(netdev, packet_size);
1423 if (skb == NULL) { 1423 if (skb == NULL)
1424 netdev_warn(netdev,
1425 "Memory squeeze, deferring packet\n");
1426 goto skip_pkt; 1424 goto skip_pkt;
1427 } 1425
1428 memcpy(skb->data, (u8 *)(prrs + 1), packet_size); 1426 memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
1429 skb_put(skb, packet_size); 1427 skb_put(skb, packet_size);
1430 skb->protocol = eth_type_trans(skb, netdev); 1428 skb->protocol = eth_type_trans(skb, netdev);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 5b0d9931c720..9948fee28ae5 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2774,7 +2774,7 @@ static int atl1_close(struct net_device *netdev)
2774 return 0; 2774 return 0;
2775} 2775}
2776 2776
2777#ifdef CONFIG_PM 2777#ifdef CONFIG_PM_SLEEP
2778static int atl1_suspend(struct device *dev) 2778static int atl1_suspend(struct device *dev)
2779{ 2779{
2780 struct pci_dev *pdev = to_pci_dev(dev); 2780 struct pci_dev *pdev = to_pci_dev(dev);
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 1278b47022e0..a046b6ff847c 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -437,9 +437,6 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
437 /* alloc new buffer */ 437 /* alloc new buffer */
438 skb = netdev_alloc_skb_ip_align(netdev, rx_size); 438 skb = netdev_alloc_skb_ip_align(netdev, rx_size);
439 if (NULL == skb) { 439 if (NULL == skb) {
440 printk(KERN_WARNING
441 "%s: Mem squeeze, deferring packet.\n",
442 netdev->name);
443 /* 440 /*
444 * Check that some rx space is free. If not, 441 * Check that some rx space is free. If not,
445 * free one and mark stats->rx_dropped++. 442 * free one and mark stats->rx_dropped++.
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 7d81e059e811..0b3e23ec37f7 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -862,27 +862,25 @@ static int bcm_enet_open(struct net_device *dev)
862 862
863 /* allocate rx dma ring */ 863 /* allocate rx dma ring */
864 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); 864 size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
865 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); 865 p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma,
866 GFP_KERNEL | __GFP_ZERO);
866 if (!p) { 867 if (!p) {
867 dev_err(kdev, "cannot allocate rx ring %u\n", size);
868 ret = -ENOMEM; 868 ret = -ENOMEM;
869 goto out_freeirq_tx; 869 goto out_freeirq_tx;
870 } 870 }
871 871
872 memset(p, 0, size);
873 priv->rx_desc_alloc_size = size; 872 priv->rx_desc_alloc_size = size;
874 priv->rx_desc_cpu = p; 873 priv->rx_desc_cpu = p;
875 874
876 /* allocate tx dma ring */ 875 /* allocate tx dma ring */
877 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); 876 size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
878 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); 877 p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma,
878 GFP_KERNEL | __GFP_ZERO);
879 if (!p) { 879 if (!p) {
880 dev_err(kdev, "cannot allocate tx ring\n");
881 ret = -ENOMEM; 880 ret = -ENOMEM;
882 goto out_free_rx_ring; 881 goto out_free_rx_ring;
883 } 882 }
884 883
885 memset(p, 0, size);
886 priv->tx_desc_alloc_size = size; 884 priv->tx_desc_alloc_size = size;
887 priv->tx_desc_cpu = p; 885 priv->tx_desc_cpu = p;
888 886
@@ -1619,7 +1617,6 @@ static int bcm_enet_probe(struct platform_device *pdev)
1619 struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx; 1617 struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
1620 struct mii_bus *bus; 1618 struct mii_bus *bus;
1621 const char *clk_name; 1619 const char *clk_name;
1622 unsigned int iomem_size;
1623 int i, ret; 1620 int i, ret;
1624 1621
1625 /* stop if shared driver failed, assume driver->probe will be 1622 /* stop if shared driver failed, assume driver->probe will be
@@ -1644,17 +1641,12 @@ static int bcm_enet_probe(struct platform_device *pdev)
1644 if (ret) 1641 if (ret)
1645 goto out; 1642 goto out;
1646 1643
1647 iomem_size = resource_size(res_mem); 1644 priv->base = devm_request_and_ioremap(&pdev->dev, res_mem);
1648 if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) {
1649 ret = -EBUSY;
1650 goto out;
1651 }
1652
1653 priv->base = ioremap(res_mem->start, iomem_size);
1654 if (priv->base == NULL) { 1645 if (priv->base == NULL) {
1655 ret = -ENOMEM; 1646 ret = -ENOMEM;
1656 goto out_release_mem; 1647 goto out;
1657 } 1648 }
1649
1658 dev->irq = priv->irq = res_irq->start; 1650 dev->irq = priv->irq = res_irq->start;
1659 priv->irq_rx = res_irq_rx->start; 1651 priv->irq_rx = res_irq_rx->start;
1660 priv->irq_tx = res_irq_tx->start; 1652 priv->irq_tx = res_irq_tx->start;
@@ -1674,9 +1666,9 @@ static int bcm_enet_probe(struct platform_device *pdev)
1674 priv->mac_clk = clk_get(&pdev->dev, clk_name); 1666 priv->mac_clk = clk_get(&pdev->dev, clk_name);
1675 if (IS_ERR(priv->mac_clk)) { 1667 if (IS_ERR(priv->mac_clk)) {
1676 ret = PTR_ERR(priv->mac_clk); 1668 ret = PTR_ERR(priv->mac_clk);
1677 goto out_unmap; 1669 goto out;
1678 } 1670 }
1679 clk_enable(priv->mac_clk); 1671 clk_prepare_enable(priv->mac_clk);
1680 1672
1681 /* initialize default and fetch platform data */ 1673 /* initialize default and fetch platform data */
1682 priv->rx_ring_size = BCMENET_DEF_RX_DESC; 1674 priv->rx_ring_size = BCMENET_DEF_RX_DESC;
@@ -1705,7 +1697,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
1705 priv->phy_clk = NULL; 1697 priv->phy_clk = NULL;
1706 goto out_put_clk_mac; 1698 goto out_put_clk_mac;
1707 } 1699 }
1708 clk_enable(priv->phy_clk); 1700 clk_prepare_enable(priv->phy_clk);
1709 } 1701 }
1710 1702
1711 /* do minimal hardware init to be able to probe mii bus */ 1703 /* do minimal hardware init to be able to probe mii bus */
@@ -1733,7 +1725,8 @@ static int bcm_enet_probe(struct platform_device *pdev)
1733 * if a slave is not present on hw */ 1725 * if a slave is not present on hw */
1734 bus->phy_mask = ~(1 << priv->phy_id); 1726 bus->phy_mask = ~(1 << priv->phy_id);
1735 1727
1736 bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); 1728 bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR,
1729 GFP_KERNEL);
1737 if (!bus->irq) { 1730 if (!bus->irq) {
1738 ret = -ENOMEM; 1731 ret = -ENOMEM;
1739 goto out_free_mdio; 1732 goto out_free_mdio;
@@ -1794,10 +1787,8 @@ static int bcm_enet_probe(struct platform_device *pdev)
1794 return 0; 1787 return 0;
1795 1788
1796out_unregister_mdio: 1789out_unregister_mdio:
1797 if (priv->mii_bus) { 1790 if (priv->mii_bus)
1798 mdiobus_unregister(priv->mii_bus); 1791 mdiobus_unregister(priv->mii_bus);
1799 kfree(priv->mii_bus->irq);
1800 }
1801 1792
1802out_free_mdio: 1793out_free_mdio:
1803 if (priv->mii_bus) 1794 if (priv->mii_bus)
@@ -1807,19 +1798,13 @@ out_uninit_hw:
1807 /* turn off mdc clock */ 1798 /* turn off mdc clock */
1808 enet_writel(priv, 0, ENET_MIISC_REG); 1799 enet_writel(priv, 0, ENET_MIISC_REG);
1809 if (priv->phy_clk) { 1800 if (priv->phy_clk) {
1810 clk_disable(priv->phy_clk); 1801 clk_disable_unprepare(priv->phy_clk);
1811 clk_put(priv->phy_clk); 1802 clk_put(priv->phy_clk);
1812 } 1803 }
1813 1804
1814out_put_clk_mac: 1805out_put_clk_mac:
1815 clk_disable(priv->mac_clk); 1806 clk_disable_unprepare(priv->mac_clk);
1816 clk_put(priv->mac_clk); 1807 clk_put(priv->mac_clk);
1817
1818out_unmap:
1819 iounmap(priv->base);
1820
1821out_release_mem:
1822 release_mem_region(res_mem->start, iomem_size);
1823out: 1808out:
1824 free_netdev(dev); 1809 free_netdev(dev);
1825 return ret; 1810 return ret;
@@ -1833,7 +1818,6 @@ static int bcm_enet_remove(struct platform_device *pdev)
1833{ 1818{
1834 struct bcm_enet_priv *priv; 1819 struct bcm_enet_priv *priv;
1835 struct net_device *dev; 1820 struct net_device *dev;
1836 struct resource *res;
1837 1821
1838 /* stop netdevice */ 1822 /* stop netdevice */
1839 dev = platform_get_drvdata(pdev); 1823 dev = platform_get_drvdata(pdev);
@@ -1845,7 +1829,6 @@ static int bcm_enet_remove(struct platform_device *pdev)
1845 1829
1846 if (priv->has_phy) { 1830 if (priv->has_phy) {
1847 mdiobus_unregister(priv->mii_bus); 1831 mdiobus_unregister(priv->mii_bus);
1848 kfree(priv->mii_bus->irq);
1849 mdiobus_free(priv->mii_bus); 1832 mdiobus_free(priv->mii_bus);
1850 } else { 1833 } else {
1851 struct bcm63xx_enet_platform_data *pd; 1834 struct bcm63xx_enet_platform_data *pd;
@@ -1856,17 +1839,12 @@ static int bcm_enet_remove(struct platform_device *pdev)
1856 bcm_enet_mdio_write_mii); 1839 bcm_enet_mdio_write_mii);
1857 } 1840 }
1858 1841
1859 /* release device resources */
1860 iounmap(priv->base);
1861 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1862 release_mem_region(res->start, resource_size(res));
1863
1864 /* disable hw block clocks */ 1842 /* disable hw block clocks */
1865 if (priv->phy_clk) { 1843 if (priv->phy_clk) {
1866 clk_disable(priv->phy_clk); 1844 clk_disable_unprepare(priv->phy_clk);
1867 clk_put(priv->phy_clk); 1845 clk_put(priv->phy_clk);
1868 } 1846 }
1869 clk_disable(priv->mac_clk); 1847 clk_disable_unprepare(priv->mac_clk);
1870 clk_put(priv->mac_clk); 1848 clk_put(priv->mac_clk);
1871 1849
1872 platform_set_drvdata(pdev, NULL); 1850 platform_set_drvdata(pdev, NULL);
@@ -1889,31 +1867,20 @@ struct platform_driver bcm63xx_enet_driver = {
1889static int bcm_enet_shared_probe(struct platform_device *pdev) 1867static int bcm_enet_shared_probe(struct platform_device *pdev)
1890{ 1868{
1891 struct resource *res; 1869 struct resource *res;
1892 unsigned int iomem_size;
1893 1870
1894 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1871 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1895 if (!res) 1872 if (!res)
1896 return -ENODEV; 1873 return -ENODEV;
1897 1874
1898 iomem_size = resource_size(res); 1875 bcm_enet_shared_base = devm_request_and_ioremap(&pdev->dev, res);
1899 if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma")) 1876 if (!bcm_enet_shared_base)
1900 return -EBUSY;
1901
1902 bcm_enet_shared_base = ioremap(res->start, iomem_size);
1903 if (!bcm_enet_shared_base) {
1904 release_mem_region(res->start, iomem_size);
1905 return -ENOMEM; 1877 return -ENOMEM;
1906 } 1878
1907 return 0; 1879 return 0;
1908} 1880}
1909 1881
1910static int bcm_enet_shared_remove(struct platform_device *pdev) 1882static int bcm_enet_shared_remove(struct platform_device *pdev)
1911{ 1883{
1912 struct resource *res;
1913
1914 iounmap(bcm_enet_shared_base);
1915 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1916 release_mem_region(res->start, resource_size(res));
1917 return 0; 1884 return 0;
1918} 1885}
1919 1886
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index da5f4397f87c..eec0af45b859 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -13,6 +13,7 @@
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/etherdevice.h> 14#include <linux/etherdevice.h>
15#include <linux/mii.h> 15#include <linux/mii.h>
16#include <linux/phy.h>
16#include <linux/interrupt.h> 17#include <linux/interrupt.h>
17#include <linux/dma-mapping.h> 18#include <linux/dma-mapping.h>
18#include <bcm47xx_nvram.h> 19#include <bcm47xx_nvram.h>
@@ -244,10 +245,8 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
244 245
245 /* Alloc skb */ 246 /* Alloc skb */
246 slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE); 247 slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
247 if (!slot->skb) { 248 if (!slot->skb)
248 bgmac_err(bgmac, "Allocation of skb failed!\n");
249 return -ENOMEM; 249 return -ENOMEM;
250 }
251 250
252 /* Poison - if everything goes fine, hardware will overwrite it */ 251 /* Poison - if everything goes fine, hardware will overwrite it */
253 rx = (struct bgmac_rx_header *)slot->skb->data; 252 rx = (struct bgmac_rx_header *)slot->skb->data;
@@ -1313,6 +1312,73 @@ static const struct ethtool_ops bgmac_ethtool_ops = {
1313}; 1312};
1314 1313
1315/************************************************** 1314/**************************************************
1315 * MII
1316 **************************************************/
1317
1318static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum)
1319{
1320 return bgmac_phy_read(bus->priv, mii_id, regnum);
1321}
1322
1323static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
1324 u16 value)
1325{
1326 return bgmac_phy_write(bus->priv, mii_id, regnum, value);
1327}
1328
1329static int bgmac_mii_register(struct bgmac *bgmac)
1330{
1331 struct mii_bus *mii_bus;
1332 int i, err = 0;
1333
1334 mii_bus = mdiobus_alloc();
1335 if (!mii_bus)
1336 return -ENOMEM;
1337
1338 mii_bus->name = "bgmac mii bus";
1339 sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num,
1340 bgmac->core->core_unit);
1341 mii_bus->priv = bgmac;
1342 mii_bus->read = bgmac_mii_read;
1343 mii_bus->write = bgmac_mii_write;
1344 mii_bus->parent = &bgmac->core->dev;
1345 mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
1346
1347 mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
1348 if (!mii_bus->irq) {
1349 err = -ENOMEM;
1350 goto err_free_bus;
1351 }
1352 for (i = 0; i < PHY_MAX_ADDR; i++)
1353 mii_bus->irq[i] = PHY_POLL;
1354
1355 err = mdiobus_register(mii_bus);
1356 if (err) {
1357 bgmac_err(bgmac, "Registration of mii bus failed\n");
1358 goto err_free_irq;
1359 }
1360
1361 bgmac->mii_bus = mii_bus;
1362
1363 return err;
1364
1365err_free_irq:
1366 kfree(mii_bus->irq);
1367err_free_bus:
1368 mdiobus_free(mii_bus);
1369 return err;
1370}
1371
1372static void bgmac_mii_unregister(struct bgmac *bgmac)
1373{
1374 struct mii_bus *mii_bus = bgmac->mii_bus;
1375
1376 mdiobus_unregister(mii_bus);
1377 kfree(mii_bus->irq);
1378 mdiobus_free(mii_bus);
1379}
1380
1381/**************************************************
1316 * BCMA bus ops 1382 * BCMA bus ops
1317 **************************************************/ 1383 **************************************************/
1318 1384
@@ -1404,11 +1470,18 @@ static int bgmac_probe(struct bcma_device *core)
1404 if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM) 1470 if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
1405 bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n"); 1471 bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
1406 1472
1473 err = bgmac_mii_register(bgmac);
1474 if (err) {
1475 bgmac_err(bgmac, "Cannot register MDIO\n");
1476 err = -ENOTSUPP;
1477 goto err_dma_free;
1478 }
1479
1407 err = register_netdev(bgmac->net_dev); 1480 err = register_netdev(bgmac->net_dev);
1408 if (err) { 1481 if (err) {
1409 bgmac_err(bgmac, "Cannot register net device\n"); 1482 bgmac_err(bgmac, "Cannot register net device\n");
1410 err = -ENOTSUPP; 1483 err = -ENOTSUPP;
1411 goto err_dma_free; 1484 goto err_mii_unregister;
1412 } 1485 }
1413 1486
1414 netif_carrier_off(net_dev); 1487 netif_carrier_off(net_dev);
@@ -1417,6 +1490,8 @@ static int bgmac_probe(struct bcma_device *core)
1417 1490
1418 return 0; 1491 return 0;
1419 1492
1493err_mii_unregister:
1494 bgmac_mii_unregister(bgmac);
1420err_dma_free: 1495err_dma_free:
1421 bgmac_dma_free(bgmac); 1496 bgmac_dma_free(bgmac);
1422 1497
@@ -1433,6 +1508,7 @@ static void bgmac_remove(struct bcma_device *core)
1433 1508
1434 netif_napi_del(&bgmac->napi); 1509 netif_napi_del(&bgmac->napi);
1435 unregister_netdev(bgmac->net_dev); 1510 unregister_netdev(bgmac->net_dev);
1511 bgmac_mii_unregister(bgmac);
1436 bgmac_dma_free(bgmac); 1512 bgmac_dma_free(bgmac);
1437 bcma_set_drvdata(core, NULL); 1513 bcma_set_drvdata(core, NULL);
1438 free_netdev(bgmac->net_dev); 1514 free_netdev(bgmac->net_dev);
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 4ede614c81f8..98d4b5fcc070 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -399,6 +399,7 @@ struct bgmac {
399 struct bcma_device *cmn; /* Reference to CMN core for BCM4706 */ 399 struct bcma_device *cmn; /* Reference to CMN core for BCM4706 */
400 struct net_device *net_dev; 400 struct net_device *net_dev;
401 struct napi_struct napi; 401 struct napi_struct napi;
402 struct mii_bus *mii_bus;
402 403
403 /* DMA */ 404 /* DMA */
404 struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS]; 405 struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS];
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 2f0ba8f2fd6c..e709296e3b85 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -854,12 +854,11 @@ bnx2_alloc_mem(struct bnx2 *bp)
854 sizeof(struct statistics_block); 854 sizeof(struct statistics_block);
855 855
856 status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size, 856 status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
857 &bp->status_blk_mapping, GFP_KERNEL); 857 &bp->status_blk_mapping,
858 GFP_KERNEL | __GFP_ZERO);
858 if (status_blk == NULL) 859 if (status_blk == NULL)
859 goto alloc_mem_err; 860 goto alloc_mem_err;
860 861
861 memset(status_blk, 0, bp->status_stats_size);
862
863 bnapi = &bp->bnx2_napi[0]; 862 bnapi = &bp->bnx2_napi[0];
864 bnapi->status_blk.msi = status_blk; 863 bnapi->status_blk.msi = status_blk;
865 bnapi->hw_tx_cons_ptr = 864 bnapi->hw_tx_cons_ptr =
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e4605a965084..c6303428f9e9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -492,7 +492,6 @@ enum bnx2x_tpa_mode_t {
492struct bnx2x_fastpath { 492struct bnx2x_fastpath {
493 struct bnx2x *bp; /* parent */ 493 struct bnx2x *bp; /* parent */
494 494
495#define BNX2X_NAPI_WEIGHT 128
496 struct napi_struct napi; 495 struct napi_struct napi;
497 union host_hc_status_block status_blk; 496 union host_hc_status_block status_blk;
498 /* chip independed shortcuts into sb structure */ 497 /* chip independed shortcuts into sb structure */
@@ -613,9 +612,10 @@ struct bnx2x_fastpath {
613 * START_BD - describes packed 612 * START_BD - describes packed
614 * START_BD(splitted) - includes unpaged data segment for GSO 613 * START_BD(splitted) - includes unpaged data segment for GSO
615 * PARSING_BD - for TSO and CSUM data 614 * PARSING_BD - for TSO and CSUM data
615 * PARSING_BD2 - for encapsulation data
616 * Frag BDs - decribes pages for frags 616 * Frag BDs - decribes pages for frags
617 */ 617 */
618#define BDS_PER_TX_PKT 3 618#define BDS_PER_TX_PKT 4
619#define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT) 619#define MAX_BDS_PER_TX_PKT (MAX_SKB_FRAGS + BDS_PER_TX_PKT)
620/* max BDs per tx packet including next pages */ 620/* max BDs per tx packet including next pages */
621#define MAX_DESC_PER_TX_PKT (MAX_BDS_PER_TX_PKT + \ 621#define MAX_DESC_PER_TX_PKT (MAX_BDS_PER_TX_PKT + \
@@ -730,18 +730,24 @@ struct bnx2x_fastpath {
730#define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \ 730#define SKB_CS(skb) (*(u16 *)(skb_transport_header(skb) + \
731 skb->csum_offset)) 731 skb->csum_offset))
732 732
733#define pbd_tcp_flags(skb) (ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff) 733#define pbd_tcp_flags(tcp_hdr) (ntohl(tcp_flag_word(tcp_hdr))>>16 & 0xff)
734 734
735#define XMIT_PLAIN 0 735#define XMIT_PLAIN 0
736#define XMIT_CSUM_V4 0x1 736#define XMIT_CSUM_V4 (1 << 0)
737#define XMIT_CSUM_V6 0x2 737#define XMIT_CSUM_V6 (1 << 1)
738#define XMIT_CSUM_TCP 0x4 738#define XMIT_CSUM_TCP (1 << 2)
739#define XMIT_GSO_V4 0x8 739#define XMIT_GSO_V4 (1 << 3)
740#define XMIT_GSO_V6 0x10 740#define XMIT_GSO_V6 (1 << 4)
741#define XMIT_CSUM_ENC_V4 (1 << 5)
742#define XMIT_CSUM_ENC_V6 (1 << 6)
743#define XMIT_GSO_ENC_V4 (1 << 7)
744#define XMIT_GSO_ENC_V6 (1 << 8)
741 745
742#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6) 746#define XMIT_CSUM_ENC (XMIT_CSUM_ENC_V4 | XMIT_CSUM_ENC_V6)
743#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6) 747#define XMIT_GSO_ENC (XMIT_GSO_ENC_V4 | XMIT_GSO_ENC_V6)
744 748
749#define XMIT_CSUM (XMIT_CSUM_V4 | XMIT_CSUM_V6 | XMIT_CSUM_ENC)
750#define XMIT_GSO (XMIT_GSO_V4 | XMIT_GSO_V6 | XMIT_GSO_ENC)
745 751
746/* stuff added to make the code fit 80Col */ 752/* stuff added to make the code fit 80Col */
747#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE) 753#define CQE_TYPE(cqe_fp_flags) ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
@@ -1215,14 +1221,16 @@ enum {
1215 BNX2X_SP_RTNL_ENABLE_SRIOV, 1221 BNX2X_SP_RTNL_ENABLE_SRIOV,
1216 BNX2X_SP_RTNL_VFPF_MCAST, 1222 BNX2X_SP_RTNL_VFPF_MCAST,
1217 BNX2X_SP_RTNL_VFPF_STORM_RX_MODE, 1223 BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
1224 BNX2X_SP_RTNL_HYPERVISOR_VLAN,
1218}; 1225};
1219 1226
1220 1227
1221struct bnx2x_prev_path_list { 1228struct bnx2x_prev_path_list {
1229 struct list_head list;
1222 u8 bus; 1230 u8 bus;
1223 u8 slot; 1231 u8 slot;
1224 u8 path; 1232 u8 path;
1225 struct list_head list; 1233 u8 aer;
1226 u8 undi; 1234 u8 undi;
1227}; 1235};
1228 1236
@@ -1269,6 +1277,8 @@ struct bnx2x {
1269#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp)) 1277#define BP_FW_MB_IDX(bp) BP_FW_MB_IDX_VN(bp, BP_VN(bp))
1270 1278
1271#ifdef CONFIG_BNX2X_SRIOV 1279#ifdef CONFIG_BNX2X_SRIOV
1280 /* protects vf2pf mailbox from simultaneous access */
1281 struct mutex vf2pf_mutex;
1272 /* vf pf channel mailbox contains request and response buffers */ 1282 /* vf pf channel mailbox contains request and response buffers */
1273 struct bnx2x_vf_mbx_msg *vf2pf_mbox; 1283 struct bnx2x_vf_mbx_msg *vf2pf_mbox;
1274 dma_addr_t vf2pf_mbox_mapping; 1284 dma_addr_t vf2pf_mbox_mapping;
@@ -1281,6 +1291,8 @@ struct bnx2x {
1281 dma_addr_t pf2vf_bulletin_mapping; 1291 dma_addr_t pf2vf_bulletin_mapping;
1282 1292
1283 struct pf_vf_bulletin_content old_bulletin; 1293 struct pf_vf_bulletin_content old_bulletin;
1294
1295 u16 requested_nr_virtfn;
1284#endif /* CONFIG_BNX2X_SRIOV */ 1296#endif /* CONFIG_BNX2X_SRIOV */
1285 1297
1286 struct net_device *dev; 1298 struct net_device *dev;
@@ -1944,12 +1956,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1944void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, 1956void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
1945 bool is_pf); 1957 bool is_pf);
1946 1958
1947#define BNX2X_ILT_ZALLOC(x, y, size) \ 1959#define BNX2X_ILT_ZALLOC(x, y, size) \
1948 do { \ 1960 x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
1949 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 1961 GFP_KERNEL | __GFP_ZERO)
1950 if (x) \
1951 memset(x, 0, size); \
1952 } while (0)
1953 1962
1954#define BNX2X_ILT_FREE(x, y, size) \ 1963#define BNX2X_ILT_FREE(x, y, size) \
1955 do { \ 1964 do { \
@@ -2286,7 +2295,7 @@ static const u32 dmae_reg_go_c[] = {
2286 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15 2295 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
2287}; 2296};
2288 2297
2289void bnx2x_set_ethtool_ops(struct net_device *netdev); 2298void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev);
2290void bnx2x_notify_link_changed(struct bnx2x *bp); 2299void bnx2x_notify_link_changed(struct bnx2x *bp);
2291 2300
2292#define BNX2X_MF_SD_PROTOCOL(bp) \ 2301#define BNX2X_MF_SD_PROTOCOL(bp) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 4046f97378c2..352e58ede4d5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -451,7 +451,8 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
451 * Compute number of aggregated segments, and gso_type. 451 * Compute number of aggregated segments, and gso_type.
452 */ 452 */
453static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags, 453static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
454 u16 len_on_bd, unsigned int pkt_len) 454 u16 len_on_bd, unsigned int pkt_len,
455 u16 num_of_coalesced_segs)
455{ 456{
456 /* TPA aggregation won't have either IP options or TCP options 457 /* TPA aggregation won't have either IP options or TCP options
457 * other than timestamp or IPv6 extension headers. 458 * other than timestamp or IPv6 extension headers.
@@ -480,8 +481,7 @@ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
480 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count 481 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
481 * to skb_shinfo(skb)->gso_segs 482 * to skb_shinfo(skb)->gso_segs
482 */ 483 */
483 NAPI_GRO_CB(skb)->count = DIV_ROUND_UP(pkt_len - hdrs_len, 484 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
484 skb_shinfo(skb)->gso_size);
485} 485}
486 486
487static int bnx2x_alloc_rx_sge(struct bnx2x *bp, 487static int bnx2x_alloc_rx_sge(struct bnx2x *bp,
@@ -537,7 +537,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
537 /* This is needed in order to enable forwarding support */ 537 /* This is needed in order to enable forwarding support */
538 if (frag_size) 538 if (frag_size)
539 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd, 539 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
540 le16_to_cpu(cqe->pkt_len)); 540 le16_to_cpu(cqe->pkt_len),
541 le16_to_cpu(cqe->num_of_coalesced_segs));
541 542
542#ifdef BNX2X_STOP_ON_ERROR 543#ifdef BNX2X_STOP_ON_ERROR
543 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) { 544 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
@@ -2009,7 +2010,7 @@ static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2009 * Cleans the object that have internal lists without sending 2010 * Cleans the object that have internal lists without sending
2010 * ramrods. Should be run when interrutps are disabled. 2011 * ramrods. Should be run when interrutps are disabled.
2011 */ 2012 */
2012static void bnx2x_squeeze_objects(struct bnx2x *bp) 2013void bnx2x_squeeze_objects(struct bnx2x *bp)
2013{ 2014{
2014 int rc; 2015 int rc;
2015 unsigned long ramrod_flags = 0, vlan_mac_flags = 0; 2016 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
@@ -2774,7 +2775,7 @@ load_error0:
2774#endif /* ! BNX2X_STOP_ON_ERROR */ 2775#endif /* ! BNX2X_STOP_ON_ERROR */
2775} 2776}
2776 2777
2777static int bnx2x_drain_tx_queues(struct bnx2x *bp) 2778int bnx2x_drain_tx_queues(struct bnx2x *bp)
2778{ 2779{
2779 u8 rc = 0, cos, i; 2780 u8 rc = 0, cos, i;
2780 2781
@@ -3086,11 +3087,11 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
3086 * to ease the pain of our fellow microcode engineers 3087 * to ease the pain of our fellow microcode engineers
3087 * we use one mapping for both BDs 3088 * we use one mapping for both BDs
3088 */ 3089 */
3089static noinline u16 bnx2x_tx_split(struct bnx2x *bp, 3090static u16 bnx2x_tx_split(struct bnx2x *bp,
3090 struct bnx2x_fp_txdata *txdata, 3091 struct bnx2x_fp_txdata *txdata,
3091 struct sw_tx_bd *tx_buf, 3092 struct sw_tx_bd *tx_buf,
3092 struct eth_tx_start_bd **tx_bd, u16 hlen, 3093 struct eth_tx_start_bd **tx_bd, u16 hlen,
3093 u16 bd_prod, int nbd) 3094 u16 bd_prod)
3094{ 3095{
3095 struct eth_tx_start_bd *h_tx_bd = *tx_bd; 3096 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3096 struct eth_tx_bd *d_tx_bd; 3097 struct eth_tx_bd *d_tx_bd;
@@ -3098,11 +3099,10 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
3098 int old_len = le16_to_cpu(h_tx_bd->nbytes); 3099 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3099 3100
3100 /* first fix first BD */ 3101 /* first fix first BD */
3101 h_tx_bd->nbd = cpu_to_le16(nbd);
3102 h_tx_bd->nbytes = cpu_to_le16(hlen); 3102 h_tx_bd->nbytes = cpu_to_le16(hlen);
3103 3103
3104 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n", 3104 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3105 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd); 3105 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3106 3106
3107 /* now get a new data BD 3107 /* now get a new data BD
3108 * (after the pbd) and fill it */ 3108 * (after the pbd) and fill it */
@@ -3131,7 +3131,7 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
3131 3131
3132#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32))) 3132#define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3133#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16))) 3133#define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3134static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix) 3134static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3135{ 3135{
3136 __sum16 tsum = (__force __sum16) csum; 3136 __sum16 tsum = (__force __sum16) csum;
3137 3137
@@ -3146,30 +3146,47 @@ static inline __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3146 return bswab16(tsum); 3146 return bswab16(tsum);
3147} 3147}
3148 3148
3149static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb) 3149static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3150{ 3150{
3151 u32 rc; 3151 u32 rc;
3152 __u8 prot = 0;
3153 __be16 protocol;
3152 3154
3153 if (skb->ip_summed != CHECKSUM_PARTIAL) 3155 if (skb->ip_summed != CHECKSUM_PARTIAL)
3154 rc = XMIT_PLAIN; 3156 return XMIT_PLAIN;
3155 3157
3156 else { 3158 protocol = vlan_get_protocol(skb);
3157 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) { 3159 if (protocol == htons(ETH_P_IPV6)) {
3158 rc = XMIT_CSUM_V6; 3160 rc = XMIT_CSUM_V6;
3159 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 3161 prot = ipv6_hdr(skb)->nexthdr;
3160 rc |= XMIT_CSUM_TCP; 3162 } else {
3163 rc = XMIT_CSUM_V4;
3164 prot = ip_hdr(skb)->protocol;
3165 }
3161 3166
3167 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3168 if (inner_ip_hdr(skb)->version == 6) {
3169 rc |= XMIT_CSUM_ENC_V6;
3170 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3171 rc |= XMIT_CSUM_TCP;
3162 } else { 3172 } else {
3163 rc = XMIT_CSUM_V4; 3173 rc |= XMIT_CSUM_ENC_V4;
3164 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 3174 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3165 rc |= XMIT_CSUM_TCP; 3175 rc |= XMIT_CSUM_TCP;
3166 } 3176 }
3167 } 3177 }
3178 if (prot == IPPROTO_TCP)
3179 rc |= XMIT_CSUM_TCP;
3168 3180
3169 if (skb_is_gso_v6(skb)) 3181 if (skb_is_gso_v6(skb)) {
3170 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6; 3182 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
3171 else if (skb_is_gso(skb)) 3183 if (rc & XMIT_CSUM_ENC)
3172 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP; 3184 rc |= XMIT_GSO_ENC_V6;
3185 } else if (skb_is_gso(skb)) {
3186 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
3187 if (rc & XMIT_CSUM_ENC)
3188 rc |= XMIT_GSO_ENC_V4;
3189 }
3173 3190
3174 return rc; 3191 return rc;
3175} 3192}
@@ -3254,14 +3271,23 @@ exit_lbl:
3254} 3271}
3255#endif 3272#endif
3256 3273
3257static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, 3274static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3258 u32 xmit_type) 3275 u32 xmit_type)
3259{ 3276{
3277 struct ipv6hdr *ipv6;
3278
3260 *parsing_data |= (skb_shinfo(skb)->gso_size << 3279 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3261 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) & 3280 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3262 ETH_TX_PARSE_BD_E2_LSO_MSS; 3281 ETH_TX_PARSE_BD_E2_LSO_MSS;
3263 if ((xmit_type & XMIT_GSO_V6) && 3282
3264 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) 3283 if (xmit_type & XMIT_GSO_ENC_V6)
3284 ipv6 = inner_ipv6_hdr(skb);
3285 else if (xmit_type & XMIT_GSO_V6)
3286 ipv6 = ipv6_hdr(skb);
3287 else
3288 ipv6 = NULL;
3289
3290 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
3265 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; 3291 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3266} 3292}
3267 3293
@@ -3272,13 +3298,13 @@ static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3272 * @pbd: parse BD 3298 * @pbd: parse BD
3273 * @xmit_type: xmit flags 3299 * @xmit_type: xmit flags
3274 */ 3300 */
3275static inline void bnx2x_set_pbd_gso(struct sk_buff *skb, 3301static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3276 struct eth_tx_parse_bd_e1x *pbd, 3302 struct eth_tx_parse_bd_e1x *pbd,
3277 u32 xmit_type) 3303 u32 xmit_type)
3278{ 3304{
3279 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 3305 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3280 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq); 3306 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3281 pbd->tcp_flags = pbd_tcp_flags(skb); 3307 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3282 3308
3283 if (xmit_type & XMIT_GSO_V4) { 3309 if (xmit_type & XMIT_GSO_V4) {
3284 pbd->ip_id = bswab16(ip_hdr(skb)->id); 3310 pbd->ip_id = bswab16(ip_hdr(skb)->id);
@@ -3298,6 +3324,40 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
3298} 3324}
3299 3325
3300/** 3326/**
3327 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3328 *
3329 * @bp: driver handle
3330 * @skb: packet skb
3331 * @parsing_data: data to be updated
3332 * @xmit_type: xmit flags
3333 *
3334 * 57712/578xx related, when skb has encapsulation
3335 */
3336static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3337 u32 *parsing_data, u32 xmit_type)
3338{
3339 *parsing_data |=
3340 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3341 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3342 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3343
3344 if (xmit_type & XMIT_CSUM_TCP) {
3345 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3346 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3347 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3348
3349 return skb_inner_transport_header(skb) +
3350 inner_tcp_hdrlen(skb) - skb->data;
3351 }
3352
3353 /* We support checksum offload for TCP and UDP only.
3354 * No need to pass the UDP header length - it's a constant.
3355 */
3356 return skb_inner_transport_header(skb) +
3357 sizeof(struct udphdr) - skb->data;
3358}
3359
3360/**
3301 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length 3361 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3302 * 3362 *
3303 * @bp: driver handle 3363 * @bp: driver handle
@@ -3305,15 +3365,15 @@ static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
3305 * @parsing_data: data to be updated 3365 * @parsing_data: data to be updated
3306 * @xmit_type: xmit flags 3366 * @xmit_type: xmit flags
3307 * 3367 *
3308 * 57712 related 3368 * 57712/578xx related
3309 */ 3369 */
3310static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb, 3370static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3311 u32 *parsing_data, u32 xmit_type) 3371 u32 *parsing_data, u32 xmit_type)
3312{ 3372{
3313 *parsing_data |= 3373 *parsing_data |=
3314 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) << 3374 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3315 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) & 3375 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3316 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W; 3376 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3317 3377
3318 if (xmit_type & XMIT_CSUM_TCP) { 3378 if (xmit_type & XMIT_CSUM_TCP) {
3319 *parsing_data |= ((tcp_hdrlen(skb) / 4) << 3379 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
@@ -3328,17 +3388,15 @@ static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3328 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data; 3388 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3329} 3389}
3330 3390
3331static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb, 3391/* set FW indication according to inner or outer protocols if tunneled */
3332 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type) 3392static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3393 struct eth_tx_start_bd *tx_start_bd,
3394 u32 xmit_type)
3333{ 3395{
3334 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM; 3396 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3335 3397
3336 if (xmit_type & XMIT_CSUM_V4) 3398 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3337 tx_start_bd->bd_flags.as_bitfield |= 3399 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3338 ETH_TX_BD_FLAGS_IP_CSUM;
3339 else
3340 tx_start_bd->bd_flags.as_bitfield |=
3341 ETH_TX_BD_FLAGS_IPV6;
3342 3400
3343 if (!(xmit_type & XMIT_CSUM_TCP)) 3401 if (!(xmit_type & XMIT_CSUM_TCP))
3344 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP; 3402 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
@@ -3352,9 +3410,9 @@ static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3352 * @pbd: parse BD to be updated 3410 * @pbd: parse BD to be updated
3353 * @xmit_type: xmit flags 3411 * @xmit_type: xmit flags
3354 */ 3412 */
3355static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb, 3413static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3356 struct eth_tx_parse_bd_e1x *pbd, 3414 struct eth_tx_parse_bd_e1x *pbd,
3357 u32 xmit_type) 3415 u32 xmit_type)
3358{ 3416{
3359 u8 hlen = (skb_network_header(skb) - skb->data) >> 1; 3417 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3360 3418
@@ -3400,6 +3458,70 @@ static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3400 return hlen; 3458 return hlen;
3401} 3459}
3402 3460
3461static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3462 struct eth_tx_parse_bd_e2 *pbd_e2,
3463 struct eth_tx_parse_2nd_bd *pbd2,
3464 u16 *global_data,
3465 u32 xmit_type)
3466{
3467 u16 hlen_w = 0;
3468 u8 outerip_off, outerip_len = 0;
3469 /* from outer IP to transport */
3470 hlen_w = (skb_inner_transport_header(skb) -
3471 skb_network_header(skb)) >> 1;
3472
3473 /* transport len */
3474 if (xmit_type & XMIT_CSUM_TCP)
3475 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3476 else
3477 hlen_w += sizeof(struct udphdr) >> 1;
3478
3479 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3480
3481 if (xmit_type & XMIT_CSUM_ENC_V4) {
3482 struct iphdr *iph = ip_hdr(skb);
3483 pbd2->fw_ip_csum_wo_len_flags_frag =
3484 bswab16(csum_fold((~iph->check) -
3485 iph->tot_len - iph->frag_off));
3486 } else {
3487 pbd2->fw_ip_hdr_to_payload_w =
3488 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3489 }
3490
3491 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3492
3493 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3494
3495 if (xmit_type & XMIT_GSO_V4) {
3496 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3497
3498 pbd_e2->data.tunnel_data.pseudo_csum =
3499 bswab16(~csum_tcpudp_magic(
3500 inner_ip_hdr(skb)->saddr,
3501 inner_ip_hdr(skb)->daddr,
3502 0, IPPROTO_TCP, 0));
3503
3504 outerip_len = ip_hdr(skb)->ihl << 1;
3505 } else {
3506 pbd_e2->data.tunnel_data.pseudo_csum =
3507 bswab16(~csum_ipv6_magic(
3508 &inner_ipv6_hdr(skb)->saddr,
3509 &inner_ipv6_hdr(skb)->daddr,
3510 0, IPPROTO_TCP, 0));
3511 }
3512
3513 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3514
3515 *global_data |=
3516 outerip_off |
3517 (!!(xmit_type & XMIT_CSUM_V6) <<
3518 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3519 (outerip_len <<
3520 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3521 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3522 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3523}
3524
3403/* called with netif_tx_lock 3525/* called with netif_tx_lock
3404 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call 3526 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3405 * netif_wake_queue() 3527 * netif_wake_queue()
@@ -3415,6 +3537,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3415 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL; 3537 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3416 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL; 3538 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3417 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; 3539 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3540 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3418 u32 pbd_e2_parsing_data = 0; 3541 u32 pbd_e2_parsing_data = 0;
3419 u16 pkt_prod, bd_prod; 3542 u16 pkt_prod, bd_prod;
3420 int nbd, txq_index; 3543 int nbd, txq_index;
@@ -3482,7 +3605,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3482 mac_type = MULTICAST_ADDRESS; 3605 mac_type = MULTICAST_ADDRESS;
3483 } 3606 }
3484 3607
3485#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) 3608#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3486 /* First, check if we need to linearize the skb (due to FW 3609 /* First, check if we need to linearize the skb (due to FW
3487 restrictions). No need to check fragmentation if page size > 8K 3610 restrictions). No need to check fragmentation if page size > 8K
3488 (there will be no violation to FW restrictions) */ 3611 (there will be no violation to FW restrictions) */
@@ -3530,12 +3653,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3530 first_bd = tx_start_bd; 3653 first_bd = tx_start_bd;
3531 3654
3532 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 3655 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3533 SET_FLAG(tx_start_bd->general_data,
3534 ETH_TX_START_BD_PARSE_NBDS,
3535 0);
3536 3656
3537 /* header nbd */ 3657 /* header nbd: indirectly zero other flags! */
3538 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1); 3658 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3539 3659
3540 /* remember the first BD of the packet */ 3660 /* remember the first BD of the packet */
3541 tx_buf->first_bd = txdata->tx_bd_prod; 3661 tx_buf->first_bd = txdata->tx_bd_prod;
@@ -3555,19 +3675,16 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3555 /* when transmitting in a vf, start bd must hold the ethertype 3675 /* when transmitting in a vf, start bd must hold the ethertype
3556 * for fw to enforce it 3676 * for fw to enforce it
3557 */ 3677 */
3558#ifndef BNX2X_STOP_ON_ERROR 3678 if (IS_VF(bp))
3559 if (IS_VF(bp)) {
3560#endif
3561 tx_start_bd->vlan_or_ethertype = 3679 tx_start_bd->vlan_or_ethertype =
3562 cpu_to_le16(ntohs(eth->h_proto)); 3680 cpu_to_le16(ntohs(eth->h_proto));
3563#ifndef BNX2X_STOP_ON_ERROR 3681 else
3564 } else {
3565 /* used by FW for packet accounting */ 3682 /* used by FW for packet accounting */
3566 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); 3683 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3567 }
3568#endif
3569 } 3684 }
3570 3685
3686 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3687
3571 /* turn on parsing and get a BD */ 3688 /* turn on parsing and get a BD */
3572 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); 3689 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3573 3690
@@ -3577,23 +3694,58 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3577 if (!CHIP_IS_E1x(bp)) { 3694 if (!CHIP_IS_E1x(bp)) {
3578 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2; 3695 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3579 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2)); 3696 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3580 /* Set PBD in checksum offload case */ 3697
3581 if (xmit_type & XMIT_CSUM) 3698 if (xmit_type & XMIT_CSUM_ENC) {
3699 u16 global_data = 0;
3700
3701 /* Set PBD in enc checksum offload case */
3702 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3703 &pbd_e2_parsing_data,
3704 xmit_type);
3705
3706 /* turn on 2nd parsing and get a BD */
3707 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3708
3709 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3710
3711 memset(pbd2, 0, sizeof(*pbd2));
3712
3713 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3714 (skb_inner_network_header(skb) -
3715 skb->data) >> 1;
3716
3717 if (xmit_type & XMIT_GSO_ENC)
3718 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3719 &global_data,
3720 xmit_type);
3721
3722 pbd2->global_data = cpu_to_le16(global_data);
3723
3724 /* add addition parse BD indication to start BD */
3725 SET_FLAG(tx_start_bd->general_data,
3726 ETH_TX_START_BD_PARSE_NBDS, 1);
3727 /* set encapsulation flag in start BD */
3728 SET_FLAG(tx_start_bd->general_data,
3729 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3730 nbd++;
3731 } else if (xmit_type & XMIT_CSUM) {
3732 /* Set PBD in checksum offload case w/o encapsulation */
3582 hlen = bnx2x_set_pbd_csum_e2(bp, skb, 3733 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3583 &pbd_e2_parsing_data, 3734 &pbd_e2_parsing_data,
3584 xmit_type); 3735 xmit_type);
3736 }
3585 3737
3586 if (IS_MF_SI(bp) || IS_VF(bp)) { 3738 /* Add the macs to the parsing BD this is a vf */
3587 /* fill in the MAC addresses in the PBD - for local 3739 if (IS_VF(bp)) {
3588 * switching 3740 /* override GRE parameters in BD */
3589 */ 3741 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3590 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi, 3742 &pbd_e2->data.mac_addr.src_mid,
3591 &pbd_e2->src_mac_addr_mid, 3743 &pbd_e2->data.mac_addr.src_lo,
3592 &pbd_e2->src_mac_addr_lo,
3593 eth->h_source); 3744 eth->h_source);
3594 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi, 3745
3595 &pbd_e2->dst_mac_addr_mid, 3746 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3596 &pbd_e2->dst_mac_addr_lo, 3747 &pbd_e2->data.mac_addr.dst_mid,
3748 &pbd_e2->data.mac_addr.dst_lo,
3597 eth->h_dest); 3749 eth->h_dest);
3598 } 3750 }
3599 3751
@@ -3615,14 +3767,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3615 /* Setup the data pointer of the first BD of the packet */ 3767 /* Setup the data pointer of the first BD of the packet */
3616 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 3768 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3617 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 3769 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3618 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3619 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb)); 3770 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3620 pkt_size = tx_start_bd->nbytes; 3771 pkt_size = tx_start_bd->nbytes;
3621 3772
3622 DP(NETIF_MSG_TX_QUEUED, 3773 DP(NETIF_MSG_TX_QUEUED,
3623 "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n", 3774 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
3624 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo, 3775 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3625 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes), 3776 le16_to_cpu(tx_start_bd->nbytes),
3626 tx_start_bd->bd_flags.as_bitfield, 3777 tx_start_bd->bd_flags.as_bitfield,
3627 le16_to_cpu(tx_start_bd->vlan_or_ethertype)); 3778 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3628 3779
@@ -3635,10 +3786,12 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3635 3786
3636 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO; 3787 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3637 3788
3638 if (unlikely(skb_headlen(skb) > hlen)) 3789 if (unlikely(skb_headlen(skb) > hlen)) {
3790 nbd++;
3639 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf, 3791 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3640 &tx_start_bd, hlen, 3792 &tx_start_bd, hlen,
3641 bd_prod, ++nbd); 3793 bd_prod);
3794 }
3642 if (!CHIP_IS_E1x(bp)) 3795 if (!CHIP_IS_E1x(bp))
3643 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, 3796 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3644 xmit_type); 3797 xmit_type);
@@ -3728,9 +3881,13 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3728 if (pbd_e2) 3881 if (pbd_e2)
3729 DP(NETIF_MSG_TX_QUEUED, 3882 DP(NETIF_MSG_TX_QUEUED,
3730 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n", 3883 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
3731 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid, 3884 pbd_e2,
3732 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi, 3885 pbd_e2->data.mac_addr.dst_hi,
3733 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo, 3886 pbd_e2->data.mac_addr.dst_mid,
3887 pbd_e2->data.mac_addr.dst_lo,
3888 pbd_e2->data.mac_addr.src_hi,
3889 pbd_e2->data.mac_addr.src_mid,
3890 pbd_e2->data.mac_addr.src_lo,
3734 pbd_e2->parsing_data); 3891 pbd_e2->parsing_data);
3735 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod); 3892 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
3736 3893
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index aee7671ff4c1..54e1b149acb3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -50,13 +50,13 @@ extern int int_mode;
50 } \ 50 } \
51 } while (0) 51 } while (0)
52 52
53#define BNX2X_PCI_ALLOC(x, y, size) \ 53#define BNX2X_PCI_ALLOC(x, y, size) \
54 do { \ 54do { \
55 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \ 55 x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
56 if (x == NULL) \ 56 GFP_KERNEL | __GFP_ZERO); \
57 goto alloc_mem_err; \ 57 if (x == NULL) \
58 memset((void *)x, 0, size); \ 58 goto alloc_mem_err; \
59 } while (0) 59} while (0)
60 60
61#define BNX2X_ALLOC(x, size) \ 61#define BNX2X_ALLOC(x, size) \
62 do { \ 62 do { \
@@ -496,7 +496,10 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
496/* setup_tc callback */ 496/* setup_tc callback */
497int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); 497int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
498 498
499int bnx2x_get_vf_config(struct net_device *dev, int vf,
500 struct ifla_vf_info *ivi);
499int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac); 501int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac);
502int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
500 503
501/* select_queue callback */ 504/* select_queue callback */
502u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); 505u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
@@ -834,7 +837,7 @@ static inline void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
834 /* Add NAPI objects */ 837 /* Add NAPI objects */
835 for_each_rx_queue_cnic(bp, i) 838 for_each_rx_queue_cnic(bp, i)
836 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 839 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
837 bnx2x_poll, BNX2X_NAPI_WEIGHT); 840 bnx2x_poll, NAPI_POLL_WEIGHT);
838} 841}
839 842
840static inline void bnx2x_add_all_napi(struct bnx2x *bp) 843static inline void bnx2x_add_all_napi(struct bnx2x *bp)
@@ -844,7 +847,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
844 /* Add NAPI objects */ 847 /* Add NAPI objects */
845 for_each_eth_queue(bp, i) 848 for_each_eth_queue(bp, i)
846 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 849 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
847 bnx2x_poll, BNX2X_NAPI_WEIGHT); 850 bnx2x_poll, NAPI_POLL_WEIGHT);
848} 851}
849 852
850static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp) 853static inline void bnx2x_del_all_napi_cnic(struct bnx2x *bp)
@@ -970,6 +973,9 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
970 else /* CHIP_IS_E1X */ 973 else /* CHIP_IS_E1X */
971 start_params->network_cos_mode = FW_WRR; 974 start_params->network_cos_mode = FW_WRR;
972 975
976 start_params->gre_tunnel_mode = IPGRE_TUNNEL;
977 start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS;
978
973 return bnx2x_func_state_change(bp, &func_params); 979 return bnx2x_func_state_change(bp, &func_params);
974} 980}
975 981
@@ -1396,4 +1402,8 @@ static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
1396 * 1402 *
1397 */ 1403 */
1398void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len); 1404void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
1405
1406int bnx2x_drain_tx_queues(struct bnx2x *bp);
1407void bnx2x_squeeze_objects(struct bnx2x *bp);
1408
1399#endif /* BNX2X_CMN_H */ 1409#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index edfa67adf2f9..129d6b21317c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1393,10 +1393,9 @@ static int bnx2x_get_module_eeprom(struct net_device *dev,
1393 u8 *data) 1393 u8 *data)
1394{ 1394{
1395 struct bnx2x *bp = netdev_priv(dev); 1395 struct bnx2x *bp = netdev_priv(dev);
1396 int rc = 0, phy_idx; 1396 int rc = -EINVAL, phy_idx;
1397 u8 *user_data = data; 1397 u8 *user_data = data;
1398 int remaining_len = ee->len, xfer_size; 1398 unsigned int start_addr = ee->offset, xfer_size = 0;
1399 unsigned int page_off = ee->offset;
1400 1399
1401 if (!netif_running(dev)) { 1400 if (!netif_running(dev)) {
1402 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 1401 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
@@ -1405,21 +1404,52 @@ static int bnx2x_get_module_eeprom(struct net_device *dev,
1405 } 1404 }
1406 1405
1407 phy_idx = bnx2x_get_cur_phy_idx(bp); 1406 phy_idx = bnx2x_get_cur_phy_idx(bp);
1408 bnx2x_acquire_phy_lock(bp); 1407
1409 while (!rc && remaining_len > 0) { 1408 /* Read A0 section */
1410 xfer_size = (remaining_len > SFP_EEPROM_PAGE_SIZE) ? 1409 if (start_addr < ETH_MODULE_SFF_8079_LEN) {
1411 SFP_EEPROM_PAGE_SIZE : remaining_len; 1410 /* Limit transfer size to the A0 section boundary */
1411 if (start_addr + ee->len > ETH_MODULE_SFF_8079_LEN)
1412 xfer_size = ETH_MODULE_SFF_8079_LEN - start_addr;
1413 else
1414 xfer_size = ee->len;
1415 bnx2x_acquire_phy_lock(bp);
1412 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], 1416 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
1413 &bp->link_params, 1417 &bp->link_params,
1414 page_off, 1418 I2C_DEV_ADDR_A0,
1419 start_addr,
1415 xfer_size, 1420 xfer_size,
1416 user_data); 1421 user_data);
1417 remaining_len -= xfer_size; 1422 bnx2x_release_phy_lock(bp);
1423 if (rc) {
1424 DP(BNX2X_MSG_ETHTOOL, "Failed reading A0 section\n");
1425
1426 return -EINVAL;
1427 }
1418 user_data += xfer_size; 1428 user_data += xfer_size;
1419 page_off += xfer_size; 1429 start_addr += xfer_size;
1420 } 1430 }
1421 1431
1422 bnx2x_release_phy_lock(bp); 1432 /* Read A2 section */
1433 if ((start_addr >= ETH_MODULE_SFF_8079_LEN) &&
1434 (start_addr < ETH_MODULE_SFF_8472_LEN)) {
1435 xfer_size = ee->len - xfer_size;
1436 /* Limit transfer size to the A2 section boundary */
1437 if (start_addr + xfer_size > ETH_MODULE_SFF_8472_LEN)
1438 xfer_size = ETH_MODULE_SFF_8472_LEN - start_addr;
1439 start_addr -= ETH_MODULE_SFF_8079_LEN;
1440 bnx2x_acquire_phy_lock(bp);
1441 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
1442 &bp->link_params,
1443 I2C_DEV_ADDR_A2,
1444 start_addr,
1445 xfer_size,
1446 user_data);
1447 bnx2x_release_phy_lock(bp);
1448 if (rc) {
1449 DP(BNX2X_MSG_ETHTOOL, "Failed reading A2 section\n");
1450 return -EINVAL;
1451 }
1452 }
1423 return rc; 1453 return rc;
1424} 1454}
1425 1455
@@ -1427,24 +1457,50 @@ static int bnx2x_get_module_info(struct net_device *dev,
1427 struct ethtool_modinfo *modinfo) 1457 struct ethtool_modinfo *modinfo)
1428{ 1458{
1429 struct bnx2x *bp = netdev_priv(dev); 1459 struct bnx2x *bp = netdev_priv(dev);
1430 int phy_idx; 1460 int phy_idx, rc;
1461 u8 sff8472_comp, diag_type;
1462
1431 if (!netif_running(dev)) { 1463 if (!netif_running(dev)) {
1432 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 1464 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1433 "cannot access eeprom when the interface is down\n"); 1465 "cannot access eeprom when the interface is down\n");
1434 return -EAGAIN; 1466 return -EAGAIN;
1435 } 1467 }
1436
1437 phy_idx = bnx2x_get_cur_phy_idx(bp); 1468 phy_idx = bnx2x_get_cur_phy_idx(bp);
1438 switch (bp->link_params.phy[phy_idx].media_type) { 1469 bnx2x_acquire_phy_lock(bp);
1439 case ETH_PHY_SFPP_10G_FIBER: 1470 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
1440 case ETH_PHY_SFP_1G_FIBER: 1471 &bp->link_params,
1441 case ETH_PHY_DA_TWINAX: 1472 I2C_DEV_ADDR_A0,
1473 SFP_EEPROM_SFF_8472_COMP_ADDR,
1474 SFP_EEPROM_SFF_8472_COMP_SIZE,
1475 &sff8472_comp);
1476 bnx2x_release_phy_lock(bp);
1477 if (rc) {
1478 DP(BNX2X_MSG_ETHTOOL, "Failed reading SFF-8472 comp field\n");
1479 return -EINVAL;
1480 }
1481
1482 bnx2x_acquire_phy_lock(bp);
1483 rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx],
1484 &bp->link_params,
1485 I2C_DEV_ADDR_A0,
1486 SFP_EEPROM_DIAG_TYPE_ADDR,
1487 SFP_EEPROM_DIAG_TYPE_SIZE,
1488 &diag_type);
1489 bnx2x_release_phy_lock(bp);
1490 if (rc) {
1491 DP(BNX2X_MSG_ETHTOOL, "Failed reading Diag Type field\n");
1492 return -EINVAL;
1493 }
1494
1495 if (!sff8472_comp ||
1496 (diag_type & SFP_EEPROM_DIAG_ADDR_CHANGE_REQ)) {
1442 modinfo->type = ETH_MODULE_SFF_8079; 1497 modinfo->type = ETH_MODULE_SFF_8079;
1443 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; 1498 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
1444 return 0; 1499 } else {
1445 default: 1500 modinfo->type = ETH_MODULE_SFF_8472;
1446 return -EOPNOTSUPP; 1501 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
1447 } 1502 }
1503 return 0;
1448} 1504}
1449 1505
1450static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, 1506static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
@@ -3232,7 +3288,32 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
3232 .get_ts_info = ethtool_op_get_ts_info, 3288 .get_ts_info = ethtool_op_get_ts_info,
3233}; 3289};
3234 3290
3235void bnx2x_set_ethtool_ops(struct net_device *netdev) 3291static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
3292 .get_settings = bnx2x_get_settings,
3293 .set_settings = bnx2x_set_settings,
3294 .get_drvinfo = bnx2x_get_drvinfo,
3295 .get_msglevel = bnx2x_get_msglevel,
3296 .set_msglevel = bnx2x_set_msglevel,
3297 .get_link = bnx2x_get_link,
3298 .get_coalesce = bnx2x_get_coalesce,
3299 .get_ringparam = bnx2x_get_ringparam,
3300 .set_ringparam = bnx2x_set_ringparam,
3301 .get_sset_count = bnx2x_get_sset_count,
3302 .get_strings = bnx2x_get_strings,
3303 .get_ethtool_stats = bnx2x_get_ethtool_stats,
3304 .get_rxnfc = bnx2x_get_rxnfc,
3305 .set_rxnfc = bnx2x_set_rxnfc,
3306 .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
3307 .get_rxfh_indir = bnx2x_get_rxfh_indir,
3308 .set_rxfh_indir = bnx2x_set_rxfh_indir,
3309 .get_channels = bnx2x_get_channels,
3310 .set_channels = bnx2x_set_channels,
3311};
3312
3313void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
3236{ 3314{
3237 SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops); 3315 if (IS_PF(bp))
3316 SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
3317 else /* vf */
3318 SET_ETHTOOL_OPS(netdev, &bnx2x_vf_ethtool_ops);
3238} 3319}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index e5f808377c91..40f22c6794cd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -30,31 +30,31 @@
30 * IRO[138].m2) + ((sbId) * IRO[138].m3)) 30 * IRO[138].m2) + ((sbId) * IRO[138].m3))
31#define CSTORM_IGU_MODE_OFFSET (IRO[157].base) 31#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
32#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ 32#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
33 (IRO[316].base + ((pfId) * IRO[316].m1))
34#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
35 (IRO[317].base + ((pfId) * IRO[317].m1)) 33 (IRO[317].base + ((pfId) * IRO[317].m1))
34#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
35 (IRO[318].base + ((pfId) * IRO[318].m1))
36#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \ 36#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
37 (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2)) 37 (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
38#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \ 38#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
39 (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2)) 39 (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
40#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \ 40#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
41 (IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2)) 41 (IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
42#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \ 42#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
43 (IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2)) 43 (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
44#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \ 44#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
45 (IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2)) 45 (IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
46#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \ 46#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
47 (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2)) 47 (IRO[315].base + ((pfId) * IRO[315].m1) + ((iscsiEqId) * IRO[315].m2))
48#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \ 48#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
49 (IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2)) 49 (IRO[314].base + ((pfId) * IRO[314].m1) + ((iscsiEqId) * IRO[314].m2))
50#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ 50#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
51 (IRO[315].base + ((pfId) * IRO[315].m1)) 51 (IRO[316].base + ((pfId) * IRO[316].m1))
52#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ 52#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
53 (IRO[307].base + ((pfId) * IRO[307].m1)) 53 (IRO[308].base + ((pfId) * IRO[308].m1))
54#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ 54#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
55 (IRO[306].base + ((pfId) * IRO[306].m1)) 55 (IRO[307].base + ((pfId) * IRO[307].m1))
56#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ 56#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
57 (IRO[305].base + ((pfId) * IRO[305].m1)) 57 (IRO[306].base + ((pfId) * IRO[306].m1))
58#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ 58#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
59 (IRO[151].base + ((funcId) * IRO[151].m1)) 59 (IRO[151].base + ((funcId) * IRO[151].m1))
60#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \ 60#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
@@ -114,7 +114,7 @@
114#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \ 114#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
115 (IRO[268].base + ((pfId) * IRO[268].m1)) 115 (IRO[268].base + ((pfId) * IRO[268].m1))
116#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \ 116#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
117 (IRO[277].base + ((pfId) * IRO[277].m1)) 117 (IRO[278].base + ((pfId) * IRO[278].m1))
118#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ 118#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
119 (IRO[264].base + ((pfId) * IRO[264].m1)) 119 (IRO[264].base + ((pfId) * IRO[264].m1))
120#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \ 120#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
@@ -136,35 +136,32 @@
136#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base) 136#define USTORM_ASSERT_LIST_INDEX_OFFSET (IRO[177].base)
137#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \ 137#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
138 (IRO[176].base + ((assertListEntry) * IRO[176].m1)) 138 (IRO[176].base + ((assertListEntry) * IRO[176].m1))
139#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \
140 (IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * \
141 IRO[205].m2))
142#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \ 139#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
143 (IRO[183].base + ((portId) * IRO[183].m1)) 140 (IRO[183].base + ((portId) * IRO[183].m1))
144#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \ 141#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
145 (IRO[318].base + ((pfId) * IRO[318].m1)) 142 (IRO[319].base + ((pfId) * IRO[319].m1))
146#define USTORM_FUNC_EN_OFFSET(funcId) \ 143#define USTORM_FUNC_EN_OFFSET(funcId) \
147 (IRO[178].base + ((funcId) * IRO[178].m1)) 144 (IRO[178].base + ((funcId) * IRO[178].m1))
148#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \ 145#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
149 (IRO[282].base + ((pfId) * IRO[282].m1))
150#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
151 (IRO[283].base + ((pfId) * IRO[283].m1)) 146 (IRO[283].base + ((pfId) * IRO[283].m1))
147#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
148 (IRO[284].base + ((pfId) * IRO[284].m1))
152#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \ 149#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
153 (IRO[287].base + ((pfId) * IRO[287].m1)) 150 (IRO[288].base + ((pfId) * IRO[288].m1))
154#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \ 151#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
155 (IRO[284].base + ((pfId) * IRO[284].m1)) 152 (IRO[285].base + ((pfId) * IRO[285].m1))
156#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ 153#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
157 (IRO[280].base + ((pfId) * IRO[280].m1)) 154 (IRO[281].base + ((pfId) * IRO[281].m1))
158#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ 155#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
159 (IRO[279].base + ((pfId) * IRO[279].m1)) 156 (IRO[280].base + ((pfId) * IRO[280].m1))
160#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ 157#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
161 (IRO[278].base + ((pfId) * IRO[278].m1)) 158 (IRO[279].base + ((pfId) * IRO[279].m1))
162#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ 159#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
163 (IRO[281].base + ((pfId) * IRO[281].m1)) 160 (IRO[282].base + ((pfId) * IRO[282].m1))
164#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \ 161#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
165 (IRO[285].base + ((pfId) * IRO[285].m1))
166#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
167 (IRO[286].base + ((pfId) * IRO[286].m1)) 162 (IRO[286].base + ((pfId) * IRO[286].m1))
163#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
164 (IRO[287].base + ((pfId) * IRO[287].m1))
168#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \ 165#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
169 (IRO[182].base + ((pfId) * IRO[182].m1)) 166 (IRO[182].base + ((pfId) * IRO[182].m1))
170#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ 167#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
@@ -190,39 +187,39 @@
190#define XSTORM_FUNC_EN_OFFSET(funcId) \ 187#define XSTORM_FUNC_EN_OFFSET(funcId) \
191 (IRO[47].base + ((funcId) * IRO[47].m1)) 188 (IRO[47].base + ((funcId) * IRO[47].m1))
192#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \ 189#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
193 (IRO[295].base + ((pfId) * IRO[295].m1)) 190 (IRO[296].base + ((pfId) * IRO[296].m1))
194#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \ 191#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
195 (IRO[298].base + ((pfId) * IRO[298].m1))
196#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
197 (IRO[299].base + ((pfId) * IRO[299].m1)) 192 (IRO[299].base + ((pfId) * IRO[299].m1))
198#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \ 193#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
199 (IRO[300].base + ((pfId) * IRO[300].m1)) 194 (IRO[300].base + ((pfId) * IRO[300].m1))
200#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \ 195#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
201 (IRO[301].base + ((pfId) * IRO[301].m1)) 196 (IRO[301].base + ((pfId) * IRO[301].m1))
202#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \ 197#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
203 (IRO[302].base + ((pfId) * IRO[302].m1)) 198 (IRO[302].base + ((pfId) * IRO[302].m1))
204#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \ 199#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
205 (IRO[303].base + ((pfId) * IRO[303].m1)) 200 (IRO[303].base + ((pfId) * IRO[303].m1))
206#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \ 201#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
207 (IRO[304].base + ((pfId) * IRO[304].m1)) 202 (IRO[304].base + ((pfId) * IRO[304].m1))
203#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
204 (IRO[305].base + ((pfId) * IRO[305].m1))
208#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \ 205#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
209 (IRO[294].base + ((pfId) * IRO[294].m1)) 206 (IRO[295].base + ((pfId) * IRO[295].m1))
210#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \ 207#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
211 (IRO[293].base + ((pfId) * IRO[293].m1)) 208 (IRO[294].base + ((pfId) * IRO[294].m1))
212#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \ 209#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
213 (IRO[292].base + ((pfId) * IRO[292].m1)) 210 (IRO[293].base + ((pfId) * IRO[293].m1))
214#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \ 211#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
215 (IRO[297].base + ((pfId) * IRO[297].m1)) 212 (IRO[298].base + ((pfId) * IRO[298].m1))
216#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \ 213#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
217 (IRO[296].base + ((pfId) * IRO[296].m1)) 214 (IRO[297].base + ((pfId) * IRO[297].m1))
218#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \ 215#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
219 (IRO[291].base + ((pfId) * IRO[291].m1)) 216 (IRO[292].base + ((pfId) * IRO[292].m1))
220#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \ 217#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
221 (IRO[290].base + ((pfId) * IRO[290].m1)) 218 (IRO[291].base + ((pfId) * IRO[291].m1))
222#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \ 219#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
223 (IRO[289].base + ((pfId) * IRO[289].m1)) 220 (IRO[290].base + ((pfId) * IRO[290].m1))
224#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \ 221#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
225 (IRO[288].base + ((pfId) * IRO[288].m1)) 222 (IRO[289].base + ((pfId) * IRO[289].m1))
226#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \ 223#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
227 (IRO[44].base + ((pfId) * IRO[44].m1)) 224 (IRO[44].base + ((pfId) * IRO[44].m1))
228#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \ 225#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 037860ecc343..12f00a40cdf0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -114,6 +114,10 @@ struct license_key {
114#define EPIO_CFG_EPIO30 0x0000001f 114#define EPIO_CFG_EPIO30 0x0000001f
115#define EPIO_CFG_EPIO31 0x00000020 115#define EPIO_CFG_EPIO31 0x00000020
116 116
117struct mac_addr {
118 u32 upper;
119 u32 lower;
120};
117 121
118struct shared_hw_cfg { /* NVRAM Offset */ 122struct shared_hw_cfg { /* NVRAM Offset */
119 /* Up to 16 bytes of NULL-terminated string */ 123 /* Up to 16 bytes of NULL-terminated string */
@@ -508,7 +512,22 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
508 #define PORT_HW_CFG_PAUSE_ON_HOST_RING_DISABLED 0x00000000 512 #define PORT_HW_CFG_PAUSE_ON_HOST_RING_DISABLED 0x00000000
509 #define PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED 0x00000001 513 #define PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED 0x00000001
510 514
511 u32 reserved0[6]; /* 0x178 */ 515 /* SFP+ Tx Equalization: NIC recommended and tested value is 0xBEB2
516 * LOM recommended and tested value is 0xBEB2. Using a different
517 * value means using a value not tested by BRCM
518 */
519 u32 sfi_tap_values; /* 0x178 */
520 #define PORT_HW_CFG_TX_EQUALIZATION_MASK 0x0000FFFF
521 #define PORT_HW_CFG_TX_EQUALIZATION_SHIFT 0
522
523 /* SFP+ Tx driver broadcast IDRIVER: NIC recommended and tested
524 * value is 0x2. LOM recommended and tested value is 0x2. Using a
525 * different value means using a value not tested by BRCM
526 */
527 #define PORT_HW_CFG_TX_DRV_BROADCAST_MASK 0x000F0000
528 #define PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT 16
529
530 u32 reserved0[5]; /* 0x17c */
512 531
513 u32 aeu_int_mask; /* 0x190 */ 532 u32 aeu_int_mask; /* 0x190 */
514 533
@@ -2821,8 +2840,8 @@ struct afex_stats {
2821 2840
2822#define BCM_5710_FW_MAJOR_VERSION 7 2841#define BCM_5710_FW_MAJOR_VERSION 7
2823#define BCM_5710_FW_MINOR_VERSION 8 2842#define BCM_5710_FW_MINOR_VERSION 8
2824#define BCM_5710_FW_REVISION_VERSION 2 2843#define BCM_5710_FW_REVISION_VERSION 17
2825#define BCM_5710_FW_ENGINEERING_VERSION 0 2844#define BCM_5710_FW_ENGINEERING_VERSION 0
2826#define BCM_5710_FW_COMPILE_FLAGS 1 2845#define BCM_5710_FW_COMPILE_FLAGS 1
2827 2846
2828 2847
@@ -3513,11 +3532,14 @@ struct client_init_tx_data {
3513#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2 3532#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2
3514#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3) 3533#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3)
3515#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3 3534#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3
3516#define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4) 3535#define CLIENT_INIT_TX_DATA_RESERVED0 (0xFFF<<4)
3517#define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4 3536#define CLIENT_INIT_TX_DATA_RESERVED0_SHIFT 4
3518 u8 default_vlan_flg; 3537 u8 default_vlan_flg;
3519 u8 force_default_pri_flg; 3538 u8 force_default_pri_flg;
3520 __le32 reserved3; 3539 u8 tunnel_lso_inc_ip_id;
3540 u8 refuse_outband_vlan_flg;
3541 u8 tunnel_non_lso_pcsum_location;
3542 u8 reserved1;
3521}; 3543};
3522 3544
3523/* 3545/*
@@ -3551,6 +3573,11 @@ struct client_update_ramrod_data {
3551 __le16 silent_vlan_mask; 3573 __le16 silent_vlan_mask;
3552 u8 silent_vlan_removal_flg; 3574 u8 silent_vlan_removal_flg;
3553 u8 silent_vlan_change_flg; 3575 u8 silent_vlan_change_flg;
3576 u8 refuse_outband_vlan_flg;
3577 u8 refuse_outband_vlan_change_flg;
3578 u8 tx_switching_flg;
3579 u8 tx_switching_change_flg;
3580 __le32 reserved1;
3554 __le32 echo; 3581 __le32 echo;
3555}; 3582};
3556 3583
@@ -3620,7 +3647,8 @@ struct eth_classify_header {
3620 */ 3647 */
3621struct eth_classify_mac_cmd { 3648struct eth_classify_mac_cmd {
3622 struct eth_classify_cmd_header header; 3649 struct eth_classify_cmd_header header;
3623 __le32 reserved0; 3650 __le16 reserved0;
3651 __le16 inner_mac;
3624 __le16 mac_lsb; 3652 __le16 mac_lsb;
3625 __le16 mac_mid; 3653 __le16 mac_mid;
3626 __le16 mac_msb; 3654 __le16 mac_msb;
@@ -3633,7 +3661,8 @@ struct eth_classify_mac_cmd {
3633 */ 3661 */
3634struct eth_classify_pair_cmd { 3662struct eth_classify_pair_cmd {
3635 struct eth_classify_cmd_header header; 3663 struct eth_classify_cmd_header header;
3636 __le32 reserved0; 3664 __le16 reserved0;
3665 __le16 inner_mac;
3637 __le16 mac_lsb; 3666 __le16 mac_lsb;
3638 __le16 mac_mid; 3667 __le16 mac_mid;
3639 __le16 mac_msb; 3668 __le16 mac_msb;
@@ -3855,8 +3884,68 @@ struct eth_halt_ramrod_data {
3855 3884
3856 3885
3857/* 3886/*
3858 * Command for setting multicast classification for a client 3887 * destination and source mac address.
3888 */
3889struct eth_mac_addresses {
3890#if defined(__BIG_ENDIAN)
3891 __le16 dst_mid;
3892 __le16 dst_lo;
3893#elif defined(__LITTLE_ENDIAN)
3894 __le16 dst_lo;
3895 __le16 dst_mid;
3896#endif
3897#if defined(__BIG_ENDIAN)
3898 __le16 src_lo;
3899 __le16 dst_hi;
3900#elif defined(__LITTLE_ENDIAN)
3901 __le16 dst_hi;
3902 __le16 src_lo;
3903#endif
3904#if defined(__BIG_ENDIAN)
3905 __le16 src_hi;
3906 __le16 src_mid;
3907#elif defined(__LITTLE_ENDIAN)
3908 __le16 src_mid;
3909 __le16 src_hi;
3910#endif
3911};
3912
3913/* tunneling related data */
3914struct eth_tunnel_data {
3915#if defined(__BIG_ENDIAN)
3916 __le16 dst_mid;
3917 __le16 dst_lo;
3918#elif defined(__LITTLE_ENDIAN)
3919 __le16 dst_lo;
3920 __le16 dst_mid;
3921#endif
3922#if defined(__BIG_ENDIAN)
3923 __le16 reserved0;
3924 __le16 dst_hi;
3925#elif defined(__LITTLE_ENDIAN)
3926 __le16 dst_hi;
3927 __le16 reserved0;
3928#endif
3929#if defined(__BIG_ENDIAN)
3930 u8 reserved1;
3931 u8 ip_hdr_start_inner_w;
3932 __le16 pseudo_csum;
3933#elif defined(__LITTLE_ENDIAN)
3934 __le16 pseudo_csum;
3935 u8 ip_hdr_start_inner_w;
3936 u8 reserved1;
3937#endif
3938};
3939
3940/* union for mac addresses and for tunneling data.
3941 * considered as tunneling data only if (tunnel_exist == 1).
3859 */ 3942 */
3943union eth_mac_addr_or_tunnel_data {
3944 struct eth_mac_addresses mac_addr;
3945 struct eth_tunnel_data tunnel_data;
3946};
3947
3948/*Command for setting multicast classification for a client */
3860struct eth_multicast_rules_cmd { 3949struct eth_multicast_rules_cmd {
3861 u8 cmd_general_data; 3950 u8 cmd_general_data;
3862#define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0) 3951#define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0)
@@ -3874,7 +3963,6 @@ struct eth_multicast_rules_cmd {
3874 struct regpair reserved3; 3963 struct regpair reserved3;
3875}; 3964};
3876 3965
3877
3878/* 3966/*
3879 * parameters for multicast classification ramrod 3967 * parameters for multicast classification ramrod
3880 */ 3968 */
@@ -3883,7 +3971,6 @@ struct eth_multicast_rules_ramrod_data {
3883 struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT]; 3971 struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT];
3884}; 3972};
3885 3973
3886
3887/* 3974/*
3888 * Place holder for ramrods protocol specific data 3975 * Place holder for ramrods protocol specific data
3889 */ 3976 */
@@ -3947,11 +4034,14 @@ struct eth_rss_update_ramrod_data {
3947#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4 4034#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4
3948#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5) 4035#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5)
3949#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5 4036#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5
4037#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY (0x1<<6)
4038#define ETH_RSS_UPDATE_RAMROD_DATA_EN_5_TUPLE_CAPABILITY_SHIFT 6
3950#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7) 4039#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<7)
3951#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7 4040#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 7
3952 u8 rss_result_mask; 4041 u8 rss_result_mask;
3953 u8 rss_mode; 4042 u8 rss_mode;
3954 __le32 __reserved2; 4043 __le16 udp_4tuple_dst_port_mask;
4044 __le16 udp_4tuple_dst_port_value;
3955 u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE]; 4045 u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE];
3956 __le32 rss_key[T_ETH_RSS_KEY]; 4046 __le32 rss_key[T_ETH_RSS_KEY];
3957 __le32 echo; 4047 __le32 echo;
@@ -4115,6 +4205,23 @@ enum eth_tpa_update_command {
4115 MAX_ETH_TPA_UPDATE_COMMAND 4205 MAX_ETH_TPA_UPDATE_COMMAND
4116}; 4206};
4117 4207
4208/* In case of LSO over IPv4 tunnel, whether to increment
4209 * IP ID on external IP header or internal IP header
4210 */
4211enum eth_tunnel_lso_inc_ip_id {
4212 EXT_HEADER,
4213 INT_HEADER,
4214 MAX_ETH_TUNNEL_LSO_INC_IP_ID
4215};
4216
4217/* In case tunnel exist and L4 checksum offload,
4218 * the pseudo checksum location, on packet or on BD.
4219 */
4220enum eth_tunnel_non_lso_pcsum_location {
4221 PCSUM_ON_PKT,
4222 PCSUM_ON_BD,
4223 MAX_ETH_TUNNEL_NON_LSO_PCSUM_LOCATION
4224};
4118 4225
4119/* 4226/*
4120 * Tx regular BD structure 4227 * Tx regular BD structure
@@ -4166,8 +4273,8 @@ struct eth_tx_start_bd {
4166#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4 4273#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
4167#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5) 4274#define ETH_TX_START_BD_PARSE_NBDS (0x3<<5)
4168#define ETH_TX_START_BD_PARSE_NBDS_SHIFT 5 4275#define ETH_TX_START_BD_PARSE_NBDS_SHIFT 5
4169#define ETH_TX_START_BD_RESREVED (0x1<<7) 4276#define ETH_TX_START_BD_TUNNEL_EXIST (0x1<<7)
4170#define ETH_TX_START_BD_RESREVED_SHIFT 7 4277#define ETH_TX_START_BD_TUNNEL_EXIST_SHIFT 7
4171}; 4278};
4172 4279
4173/* 4280/*
@@ -4216,15 +4323,10 @@ struct eth_tx_parse_bd_e1x {
4216 * Tx parsing BD structure for ETH E2 4323 * Tx parsing BD structure for ETH E2
4217 */ 4324 */
4218struct eth_tx_parse_bd_e2 { 4325struct eth_tx_parse_bd_e2 {
4219 __le16 dst_mac_addr_lo; 4326 union eth_mac_addr_or_tunnel_data data;
4220 __le16 dst_mac_addr_mid;
4221 __le16 dst_mac_addr_hi;
4222 __le16 src_mac_addr_lo;
4223 __le16 src_mac_addr_mid;
4224 __le16 src_mac_addr_hi;
4225 __le32 parsing_data; 4327 __le32 parsing_data;
4226#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x7FF<<0) 4328#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W (0x7FF<<0)
4227#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0 4329#define ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT 0
4228#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<11) 4330#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<11)
4229#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 11 4331#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 11
4230#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<15) 4332#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<15)
@@ -4236,8 +4338,51 @@ struct eth_tx_parse_bd_e2 {
4236}; 4338};
4237 4339
4238/* 4340/*
4239 * The last BD in the BD memory will hold a pointer to the next BD memory 4341 * Tx 2nd parsing BD structure for ETH packet
4240 */ 4342 */
4343struct eth_tx_parse_2nd_bd {
4344 __le16 global_data;
4345#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W (0xF<<0)
4346#define ETH_TX_PARSE_2ND_BD_IP_HDR_START_OUTER_W_SHIFT 0
4347#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER (0x1<<4)
4348#define ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT 4
4349#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN (0x1<<5)
4350#define ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT 5
4351#define ETH_TX_PARSE_2ND_BD_NS_FLG (0x1<<6)
4352#define ETH_TX_PARSE_2ND_BD_NS_FLG_SHIFT 6
4353#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST (0x1<<7)
4354#define ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST_SHIFT 7
4355#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W (0x1F<<8)
4356#define ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT 8
4357#define ETH_TX_PARSE_2ND_BD_RESERVED0 (0x7<<13)
4358#define ETH_TX_PARSE_2ND_BD_RESERVED0_SHIFT 13
4359 __le16 reserved1;
4360 u8 tcp_flags;
4361#define ETH_TX_PARSE_2ND_BD_FIN_FLG (0x1<<0)
4362#define ETH_TX_PARSE_2ND_BD_FIN_FLG_SHIFT 0
4363#define ETH_TX_PARSE_2ND_BD_SYN_FLG (0x1<<1)
4364#define ETH_TX_PARSE_2ND_BD_SYN_FLG_SHIFT 1
4365#define ETH_TX_PARSE_2ND_BD_RST_FLG (0x1<<2)
4366#define ETH_TX_PARSE_2ND_BD_RST_FLG_SHIFT 2
4367#define ETH_TX_PARSE_2ND_BD_PSH_FLG (0x1<<3)
4368#define ETH_TX_PARSE_2ND_BD_PSH_FLG_SHIFT 3
4369#define ETH_TX_PARSE_2ND_BD_ACK_FLG (0x1<<4)
4370#define ETH_TX_PARSE_2ND_BD_ACK_FLG_SHIFT 4
4371#define ETH_TX_PARSE_2ND_BD_URG_FLG (0x1<<5)
4372#define ETH_TX_PARSE_2ND_BD_URG_FLG_SHIFT 5
4373#define ETH_TX_PARSE_2ND_BD_ECE_FLG (0x1<<6)
4374#define ETH_TX_PARSE_2ND_BD_ECE_FLG_SHIFT 6
4375#define ETH_TX_PARSE_2ND_BD_CWR_FLG (0x1<<7)
4376#define ETH_TX_PARSE_2ND_BD_CWR_FLG_SHIFT 7
4377 u8 reserved2;
4378 u8 tunnel_udp_hdr_start_w;
4379 u8 fw_ip_hdr_to_payload_w;
4380 __le16 fw_ip_csum_wo_len_flags_frag;
4381 __le16 hw_ip_id;
4382 __le32 tcp_send_seq;
4383};
4384
4385/* The last BD in the BD memory will hold a pointer to the next BD memory */
4241struct eth_tx_next_bd { 4386struct eth_tx_next_bd {
4242 __le32 addr_lo; 4387 __le32 addr_lo;
4243 __le32 addr_hi; 4388 __le32 addr_hi;
@@ -4252,6 +4397,7 @@ union eth_tx_bd_types {
4252 struct eth_tx_bd reg_bd; 4397 struct eth_tx_bd reg_bd;
4253 struct eth_tx_parse_bd_e1x parse_bd_e1x; 4398 struct eth_tx_parse_bd_e1x parse_bd_e1x;
4254 struct eth_tx_parse_bd_e2 parse_bd_e2; 4399 struct eth_tx_parse_bd_e2 parse_bd_e2;
4400 struct eth_tx_parse_2nd_bd parse_2nd_bd;
4255 struct eth_tx_next_bd next_bd; 4401 struct eth_tx_next_bd next_bd;
4256}; 4402};
4257 4403
@@ -4663,10 +4809,10 @@ enum common_spqe_cmd_id {
4663 RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 4809 RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
4664 RAMROD_CMD_ID_COMMON_START_TRAFFIC, 4810 RAMROD_CMD_ID_COMMON_START_TRAFFIC,
4665 RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS, 4811 RAMROD_CMD_ID_COMMON_AFEX_VIF_LISTS,
4812 RAMROD_CMD_ID_COMMON_SET_TIMESYNC,
4666 MAX_COMMON_SPQE_CMD_ID 4813 MAX_COMMON_SPQE_CMD_ID
4667}; 4814};
4668 4815
4669
4670/* 4816/*
4671 * Per-protocol connection types 4817 * Per-protocol connection types
4672 */ 4818 */
@@ -4863,7 +5009,7 @@ struct vf_flr_event_data {
4863 */ 5009 */
4864struct malicious_vf_event_data { 5010struct malicious_vf_event_data {
4865 u8 vf_id; 5011 u8 vf_id;
4866 u8 reserved0; 5012 u8 err_id;
4867 u16 reserved1; 5013 u16 reserved1;
4868 u32 reserved2; 5014 u32 reserved2;
4869 u32 reserved3; 5015 u32 reserved3;
@@ -4969,10 +5115,10 @@ enum event_ring_opcode {
4969 EVENT_RING_OPCODE_CLASSIFICATION_RULES, 5115 EVENT_RING_OPCODE_CLASSIFICATION_RULES,
4970 EVENT_RING_OPCODE_FILTERS_RULES, 5116 EVENT_RING_OPCODE_FILTERS_RULES,
4971 EVENT_RING_OPCODE_MULTICAST_RULES, 5117 EVENT_RING_OPCODE_MULTICAST_RULES,
5118 EVENT_RING_OPCODE_SET_TIMESYNC,
4972 MAX_EVENT_RING_OPCODE 5119 MAX_EVENT_RING_OPCODE
4973}; 5120};
4974 5121
4975
4976/* 5122/*
4977 * Modes for fairness algorithm 5123 * Modes for fairness algorithm
4978 */ 5124 */
@@ -5010,14 +5156,18 @@ struct flow_control_configuration {
5010 */ 5156 */
5011struct function_start_data { 5157struct function_start_data {
5012 u8 function_mode; 5158 u8 function_mode;
5013 u8 reserved; 5159 u8 allow_npar_tx_switching;
5014 __le16 sd_vlan_tag; 5160 __le16 sd_vlan_tag;
5015 __le16 vif_id; 5161 __le16 vif_id;
5016 u8 path_id; 5162 u8 path_id;
5017 u8 network_cos_mode; 5163 u8 network_cos_mode;
5164 u8 dmae_cmd_id;
5165 u8 gre_tunnel_mode;
5166 u8 gre_tunnel_rss;
5167 u8 nvgre_clss_en;
5168 __le16 reserved1[2];
5018}; 5169};
5019 5170
5020
5021struct function_update_data { 5171struct function_update_data {
5022 u8 vif_id_change_flg; 5172 u8 vif_id_change_flg;
5023 u8 afex_default_vlan_change_flg; 5173 u8 afex_default_vlan_change_flg;
@@ -5027,14 +5177,19 @@ struct function_update_data {
5027 __le16 afex_default_vlan; 5177 __le16 afex_default_vlan;
5028 u8 allowed_priorities; 5178 u8 allowed_priorities;
5029 u8 network_cos_mode; 5179 u8 network_cos_mode;
5180 u8 lb_mode_en_change_flg;
5030 u8 lb_mode_en; 5181 u8 lb_mode_en;
5031 u8 tx_switch_suspend_change_flg; 5182 u8 tx_switch_suspend_change_flg;
5032 u8 tx_switch_suspend; 5183 u8 tx_switch_suspend;
5033 u8 echo; 5184 u8 echo;
5034 __le16 reserved1; 5185 u8 reserved1;
5186 u8 update_gre_cfg_flg;
5187 u8 gre_tunnel_mode;
5188 u8 gre_tunnel_rss;
5189 u8 nvgre_clss_en;
5190 u32 reserved3;
5035}; 5191};
5036 5192
5037
5038/* 5193/*
5039 * FW version stored in the Xstorm RAM 5194 * FW version stored in the Xstorm RAM
5040 */ 5195 */
@@ -5061,6 +5216,22 @@ struct fw_version {
5061#define __FW_VERSION_RESERVED_SHIFT 4 5216#define __FW_VERSION_RESERVED_SHIFT 4
5062}; 5217};
5063 5218
5219/* GRE RSS Mode */
5220enum gre_rss_mode {
5221 GRE_OUTER_HEADERS_RSS,
5222 GRE_INNER_HEADERS_RSS,
5223 NVGRE_KEY_ENTROPY_RSS,
5224 MAX_GRE_RSS_MODE
5225};
5226
5227/* GRE Tunnel Mode */
5228enum gre_tunnel_type {
5229 NO_GRE_TUNNEL,
5230 NVGRE_TUNNEL,
5231 L2GRE_TUNNEL,
5232 IPGRE_TUNNEL,
5233 MAX_GRE_TUNNEL_TYPE
5234};
5064 5235
5065/* 5236/*
5066 * Dynamic Host-Coalescing - Driver(host) counters 5237 * Dynamic Host-Coalescing - Driver(host) counters
@@ -5224,6 +5395,26 @@ enum ip_ver {
5224 MAX_IP_VER 5395 MAX_IP_VER
5225}; 5396};
5226 5397
5398/*
5399 * Malicious VF error ID
5400 */
5401enum malicious_vf_error_id {
5402 VF_PF_CHANNEL_NOT_READY,
5403 ETH_ILLEGAL_BD_LENGTHS,
5404 ETH_PACKET_TOO_SHORT,
5405 ETH_PAYLOAD_TOO_BIG,
5406 ETH_ILLEGAL_ETH_TYPE,
5407 ETH_ILLEGAL_LSO_HDR_LEN,
5408 ETH_TOO_MANY_BDS,
5409 ETH_ZERO_HDR_NBDS,
5410 ETH_START_BD_NOT_SET,
5411 ETH_ILLEGAL_PARSE_NBDS,
5412 ETH_IPV6_AND_CHECKSUM,
5413 ETH_VLAN_FLG_INCORRECT,
5414 ETH_ILLEGAL_LSO_MSS,
5415 ETH_TUNNEL_NOT_SUPPORTED,
5416 MAX_MALICIOUS_VF_ERROR_ID
5417};
5227 5418
5228/* 5419/*
5229 * Multi-function modes 5420 * Multi-function modes
@@ -5368,7 +5559,6 @@ struct protocol_common_spe {
5368 union protocol_common_specific_data data; 5559 union protocol_common_specific_data data;
5369}; 5560};
5370 5561
5371
5372/* 5562/*
5373 * The send queue element 5563 * The send queue element
5374 */ 5564 */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 0283f343b0d1..40f58d73de78 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -27,6 +27,10 @@
27#include "bnx2x.h" 27#include "bnx2x.h"
28#include "bnx2x_cmn.h" 28#include "bnx2x_cmn.h"
29 29
30typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy,
31 struct link_params *params,
32 u8 dev_addr, u16 addr, u8 byte_cnt,
33 u8 *o_buf, u8);
30/********************************************************/ 34/********************************************************/
31#define ETH_HLEN 14 35#define ETH_HLEN 14
32/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */ 36/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
@@ -152,6 +156,7 @@
152#define SFP_EEPROM_CON_TYPE_ADDR 0x2 156#define SFP_EEPROM_CON_TYPE_ADDR 0x2
153 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 157 #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7
154 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 158 #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21
159 #define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22
155 160
156 161
157#define SFP_EEPROM_COMP_CODE_ADDR 0x3 162#define SFP_EEPROM_COMP_CODE_ADDR 0x3
@@ -3127,11 +3132,6 @@ static int bnx2x_bsc_read(struct link_params *params,
3127 int rc = 0; 3132 int rc = 0;
3128 struct bnx2x *bp = params->bp; 3133 struct bnx2x *bp = params->bp;
3129 3134
3130 if ((sl_devid != 0xa0) && (sl_devid != 0xa2)) {
3131 DP(NETIF_MSG_LINK, "invalid sl_devid 0x%x\n", sl_devid);
3132 return -EINVAL;
3133 }
3134
3135 if (xfer_cnt > 16) { 3135 if (xfer_cnt > 16) {
3136 DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n", 3136 DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
3137 xfer_cnt); 3137 xfer_cnt);
@@ -3629,6 +3629,16 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
3629 * init configuration, and set/clear SGMII flag. Internal 3629 * init configuration, and set/clear SGMII flag. Internal
3630 * phy init is done purely in phy_init stage. 3630 * phy init is done purely in phy_init stage.
3631 */ 3631 */
3632#define WC_TX_DRIVER(post2, idriver, ipre) \
3633 ((post2 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | \
3634 (idriver << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | \
3635 (ipre << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET))
3636
3637#define WC_TX_FIR(post, main, pre) \
3638 ((post << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | \
3639 (main << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | \
3640 (pre << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET))
3641
3632static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy, 3642static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy,
3633 struct link_params *params, 3643 struct link_params *params,
3634 struct link_vars *vars) 3644 struct link_vars *vars)
@@ -3728,7 +3738,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3728 if (((vars->line_speed == SPEED_AUTO_NEG) && 3738 if (((vars->line_speed == SPEED_AUTO_NEG) &&
3729 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || 3739 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
3730 (vars->line_speed == SPEED_1000)) { 3740 (vars->line_speed == SPEED_1000)) {
3731 u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2; 3741 u16 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2;
3732 an_adv |= (1<<5); 3742 an_adv |= (1<<5);
3733 3743
3734 /* Enable CL37 1G Parallel Detect */ 3744 /* Enable CL37 1G Parallel Detect */
@@ -3753,20 +3763,13 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
3753 /* Set Transmit PMD settings */ 3763 /* Set Transmit PMD settings */
3754 lane = bnx2x_get_warpcore_lane(phy, params); 3764 lane = bnx2x_get_warpcore_lane(phy, params);
3755 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3765 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3756 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 3766 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
3757 ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | 3767 WC_TX_DRIVER(0x02, 0x06, 0x09));
3758 (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
3759 (0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
3760 /* Configure the next lane if dual mode */ 3768 /* Configure the next lane if dual mode */
3761 if (phy->flags & FLAGS_WC_DUAL_MODE) 3769 if (phy->flags & FLAGS_WC_DUAL_MODE)
3762 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3770 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3763 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*(lane+1), 3771 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*(lane+1),
3764 ((0x02 << 3772 WC_TX_DRIVER(0x02, 0x06, 0x09));
3765 MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
3766 (0x06 <<
3767 MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
3768 (0x09 <<
3769 MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
3770 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3773 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3771 MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL, 3774 MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL,
3772 0x03f0); 3775 0x03f0);
@@ -3909,6 +3912,8 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
3909{ 3912{
3910 struct bnx2x *bp = params->bp; 3913 struct bnx2x *bp = params->bp;
3911 u16 misc1_val, tap_val, tx_driver_val, lane, val; 3914 u16 misc1_val, tap_val, tx_driver_val, lane, val;
3915 u32 cfg_tap_val, tx_drv_brdct, tx_equal;
3916
3912 /* Hold rxSeqStart */ 3917 /* Hold rxSeqStart */
3913 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, 3918 bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
3914 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000); 3919 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000);
@@ -3952,23 +3957,33 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
3952 3957
3953 if (is_xfi) { 3958 if (is_xfi) {
3954 misc1_val |= 0x5; 3959 misc1_val |= 0x5;
3955 tap_val = ((0x08 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | 3960 tap_val = WC_TX_FIR(0x08, 0x37, 0x00);
3956 (0x37 << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | 3961 tx_driver_val = WC_TX_DRIVER(0x00, 0x02, 0x03);
3957 (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
3958 tx_driver_val =
3959 ((0x00 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
3960 (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
3961 (0x03 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
3962
3963 } else { 3962 } else {
3963 cfg_tap_val = REG_RD(bp, params->shmem_base +
3964 offsetof(struct shmem_region, dev_info.
3965 port_hw_config[params->port].
3966 sfi_tap_values));
3967
3968 tx_equal = cfg_tap_val & PORT_HW_CFG_TX_EQUALIZATION_MASK;
3969
3970 tx_drv_brdct = (cfg_tap_val &
3971 PORT_HW_CFG_TX_DRV_BROADCAST_MASK) >>
3972 PORT_HW_CFG_TX_DRV_BROADCAST_SHIFT;
3973
3964 misc1_val |= 0x9; 3974 misc1_val |= 0x9;
3965 tap_val = ((0x0f << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | 3975
3966 (0x2b << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | 3976 /* TAP values are controlled by nvram, if value there isn't 0 */
3967 (0x02 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET)); 3977 if (tx_equal)
3968 tx_driver_val = 3978 tap_val = (u16)tx_equal;
3969 ((0x03 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | 3979 else
3970 (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) | 3980 tap_val = WC_TX_FIR(0x0f, 0x2b, 0x02);
3971 (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)); 3981
3982 if (tx_drv_brdct)
3983 tx_driver_val = WC_TX_DRIVER(0x03, (u16)tx_drv_brdct,
3984 0x06);
3985 else
3986 tx_driver_val = WC_TX_DRIVER(0x03, 0x02, 0x06);
3972 } 3987 }
3973 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 3988 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
3974 MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val); 3989 MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
@@ -4105,15 +4120,11 @@ static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
4105 /* Set Transmit PMD settings */ 4120 /* Set Transmit PMD settings */
4106 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4121 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4107 MDIO_WC_REG_TX_FIR_TAP, 4122 MDIO_WC_REG_TX_FIR_TAP,
4108 ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) | 4123 (WC_TX_FIR(0x12, 0x2d, 0x00) |
4109 (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) | 4124 MDIO_WC_REG_TX_FIR_TAP_ENABLE));
4110 (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET) |
4111 MDIO_WC_REG_TX_FIR_TAP_ENABLE));
4112 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, 4125 bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
4113 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 4126 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
4114 ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) | 4127 WC_TX_DRIVER(0x02, 0x02, 0x02));
4115 (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
4116 (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
4117} 4128}
4118 4129
4119static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy, 4130static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
@@ -4750,8 +4761,8 @@ void bnx2x_link_status_update(struct link_params *params,
4750 port_mb[port].link_status)); 4761 port_mb[port].link_status));
4751 4762
4752 /* Force link UP in non LOOPBACK_EXT loopback mode(s) */ 4763 /* Force link UP in non LOOPBACK_EXT loopback mode(s) */
4753 if (bp->link_params.loopback_mode != LOOPBACK_NONE && 4764 if (params->loopback_mode != LOOPBACK_NONE &&
4754 bp->link_params.loopback_mode != LOOPBACK_EXT) 4765 params->loopback_mode != LOOPBACK_EXT)
4755 vars->link_status |= LINK_STATUS_LINK_UP; 4766 vars->link_status |= LINK_STATUS_LINK_UP;
4756 4767
4757 if (bnx2x_eee_has_cap(params)) 4768 if (bnx2x_eee_has_cap(params))
@@ -7758,7 +7769,8 @@ static void bnx2x_sfp_set_transmitter(struct link_params *params,
7758 7769
7759static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, 7770static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7760 struct link_params *params, 7771 struct link_params *params,
7761 u16 addr, u8 byte_cnt, u8 *o_buf) 7772 u8 dev_addr, u16 addr, u8 byte_cnt,
7773 u8 *o_buf, u8 is_init)
7762{ 7774{
7763 struct bnx2x *bp = params->bp; 7775 struct bnx2x *bp = params->bp;
7764 u16 val = 0; 7776 u16 val = 0;
@@ -7771,7 +7783,7 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7771 /* Set the read command byte count */ 7783 /* Set the read command byte count */
7772 bnx2x_cl45_write(bp, phy, 7784 bnx2x_cl45_write(bp, phy,
7773 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT, 7785 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
7774 (byte_cnt | 0xa000)); 7786 (byte_cnt | (dev_addr << 8)));
7775 7787
7776 /* Set the read command address */ 7788 /* Set the read command address */
7777 bnx2x_cl45_write(bp, phy, 7789 bnx2x_cl45_write(bp, phy,
@@ -7845,6 +7857,7 @@ static void bnx2x_warpcore_power_module(struct link_params *params,
7845} 7857}
7846static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, 7858static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7847 struct link_params *params, 7859 struct link_params *params,
7860 u8 dev_addr,
7848 u16 addr, u8 byte_cnt, 7861 u16 addr, u8 byte_cnt,
7849 u8 *o_buf, u8 is_init) 7862 u8 *o_buf, u8 is_init)
7850{ 7863{
@@ -7869,7 +7882,7 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7869 usleep_range(1000, 2000); 7882 usleep_range(1000, 2000);
7870 bnx2x_warpcore_power_module(params, 1); 7883 bnx2x_warpcore_power_module(params, 1);
7871 } 7884 }
7872 rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt, 7885 rc = bnx2x_bsc_read(params, phy, dev_addr, addr32, 0, byte_cnt,
7873 data_array); 7886 data_array);
7874 } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT)); 7887 } while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
7875 7888
@@ -7885,7 +7898,8 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7885 7898
7886static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, 7899static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7887 struct link_params *params, 7900 struct link_params *params,
7888 u16 addr, u8 byte_cnt, u8 *o_buf) 7901 u8 dev_addr, u16 addr, u8 byte_cnt,
7902 u8 *o_buf, u8 is_init)
7889{ 7903{
7890 struct bnx2x *bp = params->bp; 7904 struct bnx2x *bp = params->bp;
7891 u16 val, i; 7905 u16 val, i;
@@ -7896,6 +7910,15 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7896 return -EINVAL; 7910 return -EINVAL;
7897 } 7911 }
7898 7912
7913 /* Set 2-wire transfer rate of SFP+ module EEPROM
7914 * to 100Khz since some DACs(direct attached cables) do
7915 * not work at 400Khz.
7916 */
7917 bnx2x_cl45_write(bp, phy,
7918 MDIO_PMA_DEVAD,
7919 MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
7920 ((dev_addr << 8) | 1));
7921
7899 /* Need to read from 1.8000 to clear it */ 7922 /* Need to read from 1.8000 to clear it */
7900 bnx2x_cl45_read(bp, phy, 7923 bnx2x_cl45_read(bp, phy,
7901 MDIO_PMA_DEVAD, 7924 MDIO_PMA_DEVAD,
@@ -7968,26 +7991,44 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7968 7991
7969 return -EINVAL; 7992 return -EINVAL;
7970} 7993}
7971
7972int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, 7994int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
7973 struct link_params *params, u16 addr, 7995 struct link_params *params, u8 dev_addr,
7974 u8 byte_cnt, u8 *o_buf) 7996 u16 addr, u16 byte_cnt, u8 *o_buf)
7975{ 7997{
7976 int rc = -EOPNOTSUPP; 7998 int rc = 0;
7999 struct bnx2x *bp = params->bp;
8000 u8 xfer_size;
8001 u8 *user_data = o_buf;
8002 read_sfp_module_eeprom_func_p read_func;
8003
8004 if ((dev_addr != 0xa0) && (dev_addr != 0xa2)) {
8005 DP(NETIF_MSG_LINK, "invalid dev_addr 0x%x\n", dev_addr);
8006 return -EINVAL;
8007 }
8008
7977 switch (phy->type) { 8009 switch (phy->type) {
7978 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: 8010 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7979 rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr, 8011 read_func = bnx2x_8726_read_sfp_module_eeprom;
7980 byte_cnt, o_buf); 8012 break;
7981 break;
7982 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727: 8013 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
7983 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722: 8014 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
7984 rc = bnx2x_8727_read_sfp_module_eeprom(phy, params, addr, 8015 read_func = bnx2x_8727_read_sfp_module_eeprom;
7985 byte_cnt, o_buf); 8016 break;
7986 break;
7987 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 8017 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7988 rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr, 8018 read_func = bnx2x_warpcore_read_sfp_module_eeprom;
7989 byte_cnt, o_buf, 0); 8019 break;
7990 break; 8020 default:
8021 return -EOPNOTSUPP;
8022 }
8023
8024 while (!rc && (byte_cnt > 0)) {
8025 xfer_size = (byte_cnt > SFP_EEPROM_PAGE_SIZE) ?
8026 SFP_EEPROM_PAGE_SIZE : byte_cnt;
8027 rc = read_func(phy, params, dev_addr, addr, xfer_size,
8028 user_data, 0);
8029 byte_cnt -= xfer_size;
8030 user_data += xfer_size;
8031 addr += xfer_size;
7991 } 8032 }
7992 return rc; 8033 return rc;
7993} 8034}
@@ -8004,6 +8045,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8004 /* First check for copper cable */ 8045 /* First check for copper cable */
8005 if (bnx2x_read_sfp_module_eeprom(phy, 8046 if (bnx2x_read_sfp_module_eeprom(phy,
8006 params, 8047 params,
8048 I2C_DEV_ADDR_A0,
8007 SFP_EEPROM_CON_TYPE_ADDR, 8049 SFP_EEPROM_CON_TYPE_ADDR,
8008 2, 8050 2,
8009 (u8 *)val) != 0) { 8051 (u8 *)val) != 0) {
@@ -8021,6 +8063,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8021 */ 8063 */
8022 if (bnx2x_read_sfp_module_eeprom(phy, 8064 if (bnx2x_read_sfp_module_eeprom(phy,
8023 params, 8065 params,
8066 I2C_DEV_ADDR_A0,
8024 SFP_EEPROM_FC_TX_TECH_ADDR, 8067 SFP_EEPROM_FC_TX_TECH_ADDR,
8025 1, 8068 1,
8026 &copper_module_type) != 0) { 8069 &copper_module_type) != 0) {
@@ -8049,20 +8092,24 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8049 break; 8092 break;
8050 } 8093 }
8051 case SFP_EEPROM_CON_TYPE_VAL_LC: 8094 case SFP_EEPROM_CON_TYPE_VAL_LC:
8095 case SFP_EEPROM_CON_TYPE_VAL_RJ45:
8052 check_limiting_mode = 1; 8096 check_limiting_mode = 1;
8053 if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK | 8097 if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK |
8054 SFP_EEPROM_COMP_CODE_LR_MASK | 8098 SFP_EEPROM_COMP_CODE_LR_MASK |
8055 SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) { 8099 SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) {
8056 DP(NETIF_MSG_LINK, "1G Optic module detected\n"); 8100 DP(NETIF_MSG_LINK, "1G SFP module detected\n");
8057 gport = params->port; 8101 gport = params->port;
8058 phy->media_type = ETH_PHY_SFP_1G_FIBER; 8102 phy->media_type = ETH_PHY_SFP_1G_FIBER;
8059 phy->req_line_speed = SPEED_1000; 8103 if (phy->req_line_speed != SPEED_1000) {
8060 if (!CHIP_IS_E1x(bp)) 8104 phy->req_line_speed = SPEED_1000;
8061 gport = BP_PATH(bp) + (params->port << 1); 8105 if (!CHIP_IS_E1x(bp)) {
8062 netdev_err(bp->dev, "Warning: Link speed was forced to 1000Mbps." 8106 gport = BP_PATH(bp) +
8063 " Current SFP module in port %d is not" 8107 (params->port << 1);
8064 " compliant with 10G Ethernet\n", 8108 }
8065 gport); 8109 netdev_err(bp->dev,
8110 "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n",
8111 gport);
8112 }
8066 } else { 8113 } else {
8067 int idx, cfg_idx = 0; 8114 int idx, cfg_idx = 0;
8068 DP(NETIF_MSG_LINK, "10G Optic module detected\n"); 8115 DP(NETIF_MSG_LINK, "10G Optic module detected\n");
@@ -8101,6 +8148,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
8101 u8 options[SFP_EEPROM_OPTIONS_SIZE]; 8148 u8 options[SFP_EEPROM_OPTIONS_SIZE];
8102 if (bnx2x_read_sfp_module_eeprom(phy, 8149 if (bnx2x_read_sfp_module_eeprom(phy,
8103 params, 8150 params,
8151 I2C_DEV_ADDR_A0,
8104 SFP_EEPROM_OPTIONS_ADDR, 8152 SFP_EEPROM_OPTIONS_ADDR,
8105 SFP_EEPROM_OPTIONS_SIZE, 8153 SFP_EEPROM_OPTIONS_SIZE,
8106 options) != 0) { 8154 options) != 0) {
@@ -8167,6 +8215,7 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
8167 /* Format the warning message */ 8215 /* Format the warning message */
8168 if (bnx2x_read_sfp_module_eeprom(phy, 8216 if (bnx2x_read_sfp_module_eeprom(phy,
8169 params, 8217 params,
8218 I2C_DEV_ADDR_A0,
8170 SFP_EEPROM_VENDOR_NAME_ADDR, 8219 SFP_EEPROM_VENDOR_NAME_ADDR,
8171 SFP_EEPROM_VENDOR_NAME_SIZE, 8220 SFP_EEPROM_VENDOR_NAME_SIZE,
8172 (u8 *)vendor_name)) 8221 (u8 *)vendor_name))
@@ -8175,6 +8224,7 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
8175 vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0'; 8224 vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
8176 if (bnx2x_read_sfp_module_eeprom(phy, 8225 if (bnx2x_read_sfp_module_eeprom(phy,
8177 params, 8226 params,
8227 I2C_DEV_ADDR_A0,
8178 SFP_EEPROM_PART_NO_ADDR, 8228 SFP_EEPROM_PART_NO_ADDR,
8179 SFP_EEPROM_PART_NO_SIZE, 8229 SFP_EEPROM_PART_NO_SIZE,
8180 (u8 *)vendor_pn)) 8230 (u8 *)vendor_pn))
@@ -8205,12 +8255,13 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
8205 8255
8206 for (timeout = 0; timeout < 60; timeout++) { 8256 for (timeout = 0; timeout < 60; timeout++) {
8207 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) 8257 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8208 rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, 8258 rc = bnx2x_warpcore_read_sfp_module_eeprom(
8209 params, 1, 8259 phy, params, I2C_DEV_ADDR_A0, 1, 1, &val,
8210 1, &val, 1); 8260 1);
8211 else 8261 else
8212 rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, 8262 rc = bnx2x_read_sfp_module_eeprom(phy, params,
8213 &val); 8263 I2C_DEV_ADDR_A0,
8264 1, 1, &val);
8214 if (rc == 0) { 8265 if (rc == 0) {
8215 DP(NETIF_MSG_LINK, 8266 DP(NETIF_MSG_LINK,
8216 "SFP+ module initialization took %d ms\n", 8267 "SFP+ module initialization took %d ms\n",
@@ -8219,7 +8270,8 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
8219 } 8270 }
8220 usleep_range(5000, 10000); 8271 usleep_range(5000, 10000);
8221 } 8272 }
8222 rc = bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val); 8273 rc = bnx2x_read_sfp_module_eeprom(phy, params, I2C_DEV_ADDR_A0,
8274 1, 1, &val);
8223 return rc; 8275 return rc;
8224} 8276}
8225 8277
@@ -8376,15 +8428,6 @@ static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
8376 bnx2x_cl45_write(bp, phy, 8428 bnx2x_cl45_write(bp, phy,
8377 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, 8429 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
8378 val); 8430 val);
8379
8380 /* Set 2-wire transfer rate of SFP+ module EEPROM
8381 * to 100Khz since some DACs(direct attached cables) do
8382 * not work at 400Khz.
8383 */
8384 bnx2x_cl45_write(bp, phy,
8385 MDIO_PMA_DEVAD,
8386 MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
8387 0xa001);
8388 break; 8431 break;
8389 default: 8432 default:
8390 DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n", 8433 DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
@@ -9528,8 +9571,7 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
9528 } else { 9571 } else {
9529 /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */ 9572 /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
9530 /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ 9573 /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
9531 for (i = 0; i < ARRAY_SIZE(reg_set); 9574 for (i = 0; i < ARRAY_SIZE(reg_set); i++)
9532 i++)
9533 bnx2x_cl45_write(bp, phy, reg_set[i].devad, 9575 bnx2x_cl45_write(bp, phy, reg_set[i].devad,
9534 reg_set[i].reg, reg_set[i].val); 9576 reg_set[i].reg, reg_set[i].val);
9535 9577
@@ -10281,7 +10323,8 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
10281 LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; 10323 LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
10282 10324
10283 /* Determine if EEE was negotiated */ 10325 /* Determine if EEE was negotiated */
10284 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) 10326 if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
10327 (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834))
10285 bnx2x_eee_an_resolve(phy, params, vars); 10328 bnx2x_eee_an_resolve(phy, params, vars);
10286 } 10329 }
10287 10330
@@ -12242,7 +12285,7 @@ static void bnx2x_init_bmac_loopback(struct link_params *params,
12242 12285
12243 bnx2x_xgxs_deassert(params); 12286 bnx2x_xgxs_deassert(params);
12244 12287
12245 /* set bmac loopback */ 12288 /* Set bmac loopback */
12246 bnx2x_bmac_enable(params, vars, 1, 1); 12289 bnx2x_bmac_enable(params, vars, 1, 1);
12247 12290
12248 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); 12291 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
@@ -12261,7 +12304,7 @@ static void bnx2x_init_emac_loopback(struct link_params *params,
12261 vars->phy_flags = PHY_XGXS_FLAG; 12304 vars->phy_flags = PHY_XGXS_FLAG;
12262 12305
12263 bnx2x_xgxs_deassert(params); 12306 bnx2x_xgxs_deassert(params);
12264 /* set bmac loopback */ 12307 /* Set bmac loopback */
12265 bnx2x_emac_enable(params, vars, 1); 12308 bnx2x_emac_enable(params, vars, 1);
12266 bnx2x_emac_program(params, vars); 12309 bnx2x_emac_program(params, vars);
12267 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); 12310 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
@@ -12521,6 +12564,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
12521 params->req_line_speed[0], params->req_flow_ctrl[0]); 12564 params->req_line_speed[0], params->req_flow_ctrl[0]);
12522 DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n", 12565 DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n",
12523 params->req_line_speed[1], params->req_flow_ctrl[1]); 12566 params->req_line_speed[1], params->req_flow_ctrl[1]);
12567 DP(NETIF_MSG_LINK, "req_adv_flow_ctrl 0x%x\n", params->req_fc_auto_adv);
12524 vars->link_status = 0; 12568 vars->link_status = 0;
12525 vars->phy_link_up = 0; 12569 vars->phy_link_up = 0;
12526 vars->link_up = 0; 12570 vars->link_up = 0;
@@ -13440,8 +13484,8 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13440 int sigdet; 13484 int sigdet;
13441 13485
13442 /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery 13486 /* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
13443 * since some switches tend to reinit the AN process and clear the 13487 * Since some switches tend to reinit the AN process and clear the
13444 * advertised BP/NP after ~2 seconds causing the KR2 to be disabled 13488 * the advertised BP/NP after ~2 seconds causing the KR2 to be disabled
13445 * and recovered many times 13489 * and recovered many times
13446 */ 13490 */
13447 if (vars->check_kr2_recovery_cnt > 0) { 13491 if (vars->check_kr2_recovery_cnt > 0) {
@@ -13469,8 +13513,10 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13469 13513
13470 /* CL73 has not begun yet */ 13514 /* CL73 has not begun yet */
13471 if (base_page == 0) { 13515 if (base_page == 0) {
13472 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) 13516 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
13473 bnx2x_kr2_recovery(params, vars, phy); 13517 bnx2x_kr2_recovery(params, vars, phy);
13518 DP(NETIF_MSG_LINK, "No BP\n");
13519 }
13474 return; 13520 return;
13475 } 13521 }
13476 13522
@@ -13486,7 +13532,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
13486 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { 13532 if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
13487 if (!not_kr2_device) { 13533 if (!not_kr2_device) {
13488 DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, 13534 DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page,
13489 next_page); 13535 next_page);
13490 bnx2x_kr2_recovery(params, vars, phy); 13536 bnx2x_kr2_recovery(params, vars, phy);
13491 } 13537 }
13492 return; 13538 return;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 56c2aae4e2c8..4df45234fdc0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -41,6 +41,9 @@
41#define SPEED_AUTO_NEG 0 41#define SPEED_AUTO_NEG 0
42#define SPEED_20000 20000 42#define SPEED_20000 20000
43 43
44#define I2C_DEV_ADDR_A0 0xa0
45#define I2C_DEV_ADDR_A2 0xa2
46
44#define SFP_EEPROM_PAGE_SIZE 16 47#define SFP_EEPROM_PAGE_SIZE 16
45#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14 48#define SFP_EEPROM_VENDOR_NAME_ADDR 0x14
46#define SFP_EEPROM_VENDOR_NAME_SIZE 16 49#define SFP_EEPROM_VENDOR_NAME_SIZE 16
@@ -54,6 +57,15 @@
54#define SFP_EEPROM_SERIAL_SIZE 16 57#define SFP_EEPROM_SERIAL_SIZE 16
55#define SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */ 58#define SFP_EEPROM_DATE_ADDR 0x54 /* ASCII YYMMDD */
56#define SFP_EEPROM_DATE_SIZE 6 59#define SFP_EEPROM_DATE_SIZE 6
60#define SFP_EEPROM_DIAG_TYPE_ADDR 0x5c
61#define SFP_EEPROM_DIAG_TYPE_SIZE 1
62#define SFP_EEPROM_DIAG_ADDR_CHANGE_REQ (1<<2)
63#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
64#define SFP_EEPROM_SFF_8472_COMP_SIZE 1
65
66#define SFP_EEPROM_A2_CHECKSUM_RANGE 0x5e
67#define SFP_EEPROM_A2_CC_DMI_ADDR 0x5f
68
57#define PWR_FLT_ERR_MSG_LEN 250 69#define PWR_FLT_ERR_MSG_LEN 250
58 70
59#define XGXS_EXT_PHY_TYPE(ext_phy_config) \ 71#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
@@ -420,8 +432,8 @@ void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
420 432
421/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */ 433/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
422int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, 434int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
423 struct link_params *params, u16 addr, 435 struct link_params *params, u8 dev_addr,
424 u8 byte_cnt, u8 *o_buf); 436 u16 addr, u16 byte_cnt, u8 *o_buf);
425 437
426void bnx2x_hw_reset_phy(struct link_params *params); 438void bnx2x_hw_reset_phy(struct link_params *params);
427 439
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e81a747ea8ce..fdfe33bc097b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -75,8 +75,6 @@
75#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw" 75#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
76#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw" 76#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
77 77
78#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
79
80/* Time in jiffies before concluding the transmitter is hung */ 78/* Time in jiffies before concluding the transmitter is hung */
81#define TX_TIMEOUT (5*HZ) 79#define TX_TIMEOUT (5*HZ)
82 80
@@ -2955,14 +2953,16 @@ static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
2955 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags); 2953 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
2956 2954
2957 /* tx only connections collect statistics (on the same index as the 2955 /* tx only connections collect statistics (on the same index as the
2958 * parent connection). The statistics are zeroed when the parent 2956 * parent connection). The statistics are zeroed when the parent
2959 * connection is initialized. 2957 * connection is initialized.
2960 */ 2958 */
2961 2959
2962 __set_bit(BNX2X_Q_FLG_STATS, &flags); 2960 __set_bit(BNX2X_Q_FLG_STATS, &flags);
2963 if (zero_stats) 2961 if (zero_stats)
2964 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags); 2962 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
2965 2963
2964 __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
2965 __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
2966 2966
2967#ifdef BNX2X_STOP_ON_ERROR 2967#ifdef BNX2X_STOP_ON_ERROR
2968 __set_bit(BNX2X_Q_FLG_TX_SEC, &flags); 2968 __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
@@ -3227,16 +3227,29 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3227{ 3227{
3228 struct eth_stats_info *ether_stat = 3228 struct eth_stats_info *ether_stat =
3229 &bp->slowpath->drv_info_to_mcp.ether_stat; 3229 &bp->slowpath->drv_info_to_mcp.ether_stat;
3230 struct bnx2x_vlan_mac_obj *mac_obj =
3231 &bp->sp_objs->mac_obj;
3232 int i;
3230 3233
3231 strlcpy(ether_stat->version, DRV_MODULE_VERSION, 3234 strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3232 ETH_STAT_INFO_VERSION_LEN); 3235 ETH_STAT_INFO_VERSION_LEN);
3233 3236
3234 bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj, 3237 /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
3235 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, 3238 * mac_local field in ether_stat struct. The base address is offset by 2
3236 ether_stat->mac_local); 3239 * bytes to account for the field being 8 bytes but a mac address is
3237 3240 * only 6 bytes. Likewise, the stride for the get_n_elements function is
3241 * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
3242 * allocated by the ether_stat struct, so the macs will land in their
3243 * proper positions.
3244 */
3245 for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
3246 memset(ether_stat->mac_local + i, 0,
3247 sizeof(ether_stat->mac_local[0]));
3248 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3249 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3250 ether_stat->mac_local + MAC_PAD, MAC_PAD,
3251 ETH_ALEN);
3238 ether_stat->mtu_size = bp->dev->mtu; 3252 ether_stat->mtu_size = bp->dev->mtu;
3239
3240 if (bp->dev->features & NETIF_F_RXCSUM) 3253 if (bp->dev->features & NETIF_F_RXCSUM)
3241 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK; 3254 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3242 if (bp->dev->features & NETIF_F_TSO) 3255 if (bp->dev->features & NETIF_F_TSO)
@@ -3258,8 +3271,7 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3258 if (!CNIC_LOADED(bp)) 3271 if (!CNIC_LOADED(bp))
3259 return; 3272 return;
3260 3273
3261 memcpy(fcoe_stat->mac_local + MAC_LEADING_ZERO_CNT, 3274 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
3262 bp->fip_mac, ETH_ALEN);
3263 3275
3264 fcoe_stat->qos_priority = 3276 fcoe_stat->qos_priority =
3265 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE]; 3277 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
@@ -3361,8 +3373,8 @@ static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3361 if (!CNIC_LOADED(bp)) 3373 if (!CNIC_LOADED(bp))
3362 return; 3374 return;
3363 3375
3364 memcpy(iscsi_stat->mac_local + MAC_LEADING_ZERO_CNT, 3376 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
3365 bp->cnic_eth_dev.iscsi_mac, ETH_ALEN); 3377 ETH_ALEN);
3366 3378
3367 iscsi_stat->qos_priority = 3379 iscsi_stat->qos_priority =
3368 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI]; 3380 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
@@ -6029,9 +6041,10 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
6029 rmb(); 6041 rmb();
6030 bnx2x_init_rx_rings(bp); 6042 bnx2x_init_rx_rings(bp);
6031 bnx2x_init_tx_rings(bp); 6043 bnx2x_init_tx_rings(bp);
6032 6044 if (IS_VF(bp)) {
6033 if (IS_VF(bp)) 6045 bnx2x_memset_stats(bp);
6034 return; 6046 return;
6047 }
6035 6048
6036 /* Initialize MOD_ABS interrupts */ 6049 /* Initialize MOD_ABS interrupts */
6037 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id, 6050 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
@@ -9525,6 +9538,10 @@ sp_rtnl_not_reset:
9525 bnx2x_vfpf_storm_rx_mode(bp); 9538 bnx2x_vfpf_storm_rx_mode(bp);
9526 } 9539 }
9527 9540
9541 if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
9542 &bp->sp_rtnl_state))
9543 bnx2x_pf_set_vfs_vlan(bp);
9544
9528 /* work which needs rtnl lock not-taken (as it takes the lock itself and 9545 /* work which needs rtnl lock not-taken (as it takes the lock itself and
9529 * can be called from other contexts as well) 9546 * can be called from other contexts as well)
9530 */ 9547 */
@@ -9532,8 +9549,10 @@ sp_rtnl_not_reset:
9532 9549
9533 /* enable SR-IOV if applicable */ 9550 /* enable SR-IOV if applicable */
9534 if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, 9551 if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
9535 &bp->sp_rtnl_state)) 9552 &bp->sp_rtnl_state)) {
9553 bnx2x_disable_sriov(bp);
9536 bnx2x_enable_sriov(bp); 9554 bnx2x_enable_sriov(bp);
9555 }
9537} 9556}
9538 9557
9539static void bnx2x_period_task(struct work_struct *work) 9558static void bnx2x_period_task(struct work_struct *work)
@@ -9701,6 +9720,31 @@ static struct bnx2x_prev_path_list *
9701 return NULL; 9720 return NULL;
9702} 9721}
9703 9722
9723static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
9724{
9725 struct bnx2x_prev_path_list *tmp_list;
9726 int rc;
9727
9728 rc = down_interruptible(&bnx2x_prev_sem);
9729 if (rc) {
9730 BNX2X_ERR("Received %d when tried to take lock\n", rc);
9731 return rc;
9732 }
9733
9734 tmp_list = bnx2x_prev_path_get_entry(bp);
9735 if (tmp_list) {
9736 tmp_list->aer = 1;
9737 rc = 0;
9738 } else {
9739 BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
9740 BP_PATH(bp));
9741 }
9742
9743 up(&bnx2x_prev_sem);
9744
9745 return rc;
9746}
9747
9704static bool bnx2x_prev_is_path_marked(struct bnx2x *bp) 9748static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
9705{ 9749{
9706 struct bnx2x_prev_path_list *tmp_list; 9750 struct bnx2x_prev_path_list *tmp_list;
@@ -9709,14 +9753,15 @@ static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
9709 if (down_trylock(&bnx2x_prev_sem)) 9753 if (down_trylock(&bnx2x_prev_sem))
9710 return false; 9754 return false;
9711 9755
9712 list_for_each_entry(tmp_list, &bnx2x_prev_list, list) { 9756 tmp_list = bnx2x_prev_path_get_entry(bp);
9713 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot && 9757 if (tmp_list) {
9714 bp->pdev->bus->number == tmp_list->bus && 9758 if (tmp_list->aer) {
9715 BP_PATH(bp) == tmp_list->path) { 9759 DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
9760 BP_PATH(bp));
9761 } else {
9716 rc = true; 9762 rc = true;
9717 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n", 9763 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
9718 BP_PATH(bp)); 9764 BP_PATH(bp));
9719 break;
9720 } 9765 }
9721 } 9766 }
9722 9767
@@ -9730,6 +9775,28 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
9730 struct bnx2x_prev_path_list *tmp_list; 9775 struct bnx2x_prev_path_list *tmp_list;
9731 int rc; 9776 int rc;
9732 9777
9778 rc = down_interruptible(&bnx2x_prev_sem);
9779 if (rc) {
9780 BNX2X_ERR("Received %d when tried to take lock\n", rc);
9781 return rc;
9782 }
9783
9784 /* Check whether the entry for this path already exists */
9785 tmp_list = bnx2x_prev_path_get_entry(bp);
9786 if (tmp_list) {
9787 if (!tmp_list->aer) {
9788 BNX2X_ERR("Re-Marking the path.\n");
9789 } else {
9790 DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
9791 BP_PATH(bp));
9792 tmp_list->aer = 0;
9793 }
9794 up(&bnx2x_prev_sem);
9795 return 0;
9796 }
9797 up(&bnx2x_prev_sem);
9798
9799 /* Create an entry for this path and add it */
9733 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL); 9800 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
9734 if (!tmp_list) { 9801 if (!tmp_list) {
9735 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n"); 9802 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
@@ -9739,6 +9806,7 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
9739 tmp_list->bus = bp->pdev->bus->number; 9806 tmp_list->bus = bp->pdev->bus->number;
9740 tmp_list->slot = PCI_SLOT(bp->pdev->devfn); 9807 tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
9741 tmp_list->path = BP_PATH(bp); 9808 tmp_list->path = BP_PATH(bp);
9809 tmp_list->aer = 0;
9742 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0; 9810 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
9743 9811
9744 rc = down_interruptible(&bnx2x_prev_sem); 9812 rc = down_interruptible(&bnx2x_prev_sem);
@@ -9746,8 +9814,8 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
9746 BNX2X_ERR("Received %d when tried to take lock\n", rc); 9814 BNX2X_ERR("Received %d when tried to take lock\n", rc);
9747 kfree(tmp_list); 9815 kfree(tmp_list);
9748 } else { 9816 } else {
9749 BNX2X_DEV_INFO("Marked path [%d] - finished previous unload\n", 9817 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
9750 BP_PATH(bp)); 9818 BP_PATH(bp));
9751 list_add(&tmp_list->list, &bnx2x_prev_list); 9819 list_add(&tmp_list->list, &bnx2x_prev_list);
9752 up(&bnx2x_prev_sem); 9820 up(&bnx2x_prev_sem);
9753 } 9821 }
@@ -9986,6 +10054,7 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
9986 } 10054 }
9987 10055
9988 do { 10056 do {
10057 int aer = 0;
9989 /* Lock MCP using an unload request */ 10058 /* Lock MCP using an unload request */
9990 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0); 10059 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
9991 if (!fw) { 10060 if (!fw) {
@@ -9994,7 +10063,18 @@ static int bnx2x_prev_unload(struct bnx2x *bp)
9994 break; 10063 break;
9995 } 10064 }
9996 10065
9997 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON) { 10066 rc = down_interruptible(&bnx2x_prev_sem);
10067 if (rc) {
10068 BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
10069 rc);
10070 } else {
10071 /* If Path is marked by EEH, ignore unload status */
10072 aer = !!(bnx2x_prev_path_get_entry(bp) &&
10073 bnx2x_prev_path_get_entry(bp)->aer);
10074 up(&bnx2x_prev_sem);
10075 }
10076
10077 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
9998 rc = bnx2x_prev_unload_common(bp); 10078 rc = bnx2x_prev_unload_common(bp);
9999 break; 10079 break;
10000 } 10080 }
@@ -10034,8 +10114,12 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
10034 id = ((val & 0xffff) << 16); 10114 id = ((val & 0xffff) << 16);
10035 val = REG_RD(bp, MISC_REG_CHIP_REV); 10115 val = REG_RD(bp, MISC_REG_CHIP_REV);
10036 id |= ((val & 0xf) << 12); 10116 id |= ((val & 0xf) << 12);
10037 val = REG_RD(bp, MISC_REG_CHIP_METAL); 10117
10038 id |= ((val & 0xff) << 4); 10118 /* Metal is read from PCI regs, but we can't access >=0x400 from
10119 * the configuration space (so we need to reg_rd)
10120 */
10121 val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
10122 id |= (((val >> 24) & 0xf) << 4);
10039 val = REG_RD(bp, MISC_REG_BOND_ID); 10123 val = REG_RD(bp, MISC_REG_BOND_ID);
10040 id |= (val & 0xf); 10124 id |= (val & 0xf);
10041 bp->common.chip_id = id; 10125 bp->common.chip_id = id;
@@ -10812,14 +10896,12 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
10812 } 10896 }
10813 } 10897 }
10814 10898
10815 if (IS_MF_STORAGE_SD(bp)) 10899 /* If this is a storage-only interface, use SAN mac as
10816 /* Zero primary MAC configuration */ 10900 * primary MAC. Notice that for SD this is already the case,
10817 memset(bp->dev->dev_addr, 0, ETH_ALEN); 10901 * as the SAN mac was copied from the primary MAC.
10818 10902 */
10819 if (IS_MF_FCOE_AFEX(bp) || IS_MF_FCOE_SD(bp)) 10903 if (IS_MF_FCOE_AFEX(bp))
10820 /* use FIP MAC as primary MAC */
10821 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN); 10904 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
10822
10823 } else { 10905 } else {
10824 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port]. 10906 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
10825 iscsi_mac_upper); 10907 iscsi_mac_upper);
@@ -11056,6 +11138,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
11056 } else 11138 } else
11057 BNX2X_DEV_INFO("illegal OV for SD\n"); 11139 BNX2X_DEV_INFO("illegal OV for SD\n");
11058 break; 11140 break;
11141 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
11142 bp->mf_config[vn] = 0;
11143 break;
11059 default: 11144 default:
11060 /* Unknown configuration: reset mf_config */ 11145 /* Unknown configuration: reset mf_config */
11061 bp->mf_config[vn] = 0; 11146 bp->mf_config[vn] = 0;
@@ -11402,26 +11487,6 @@ static int bnx2x_init_bp(struct bnx2x *bp)
11402 * net_device service functions 11487 * net_device service functions
11403 */ 11488 */
11404 11489
11405static int bnx2x_open_epilog(struct bnx2x *bp)
11406{
11407 /* Enable sriov via delayed work. This must be done via delayed work
11408 * because it causes the probe of the vf devices to be run, which invoke
11409 * register_netdevice which must have rtnl lock taken. As we are holding
11410 * the lock right now, that could only work if the probe would not take
11411 * the lock. However, as the probe of the vf may be called from other
11412 * contexts as well (such as passthrough to vm failes) it can't assume
11413 * the lock is being held for it. Using delayed work here allows the
11414 * probe code to simply take the lock (i.e. wait for it to be released
11415 * if it is being held).
11416 */
11417 smp_mb__before_clear_bit();
11418 set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
11419 smp_mb__after_clear_bit();
11420 schedule_delayed_work(&bp->sp_rtnl_task, 0);
11421
11422 return 0;
11423}
11424
11425/* called with rtnl_lock */ 11490/* called with rtnl_lock */
11426static int bnx2x_open(struct net_device *dev) 11491static int bnx2x_open(struct net_device *dev)
11427{ 11492{
@@ -11791,6 +11856,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
11791 .ndo_setup_tc = bnx2x_setup_tc, 11856 .ndo_setup_tc = bnx2x_setup_tc,
11792#ifdef CONFIG_BNX2X_SRIOV 11857#ifdef CONFIG_BNX2X_SRIOV
11793 .ndo_set_vf_mac = bnx2x_set_vf_mac, 11858 .ndo_set_vf_mac = bnx2x_set_vf_mac,
11859 .ndo_set_vf_vlan = bnx2x_set_vf_vlan,
11860 .ndo_get_vf_config = bnx2x_get_vf_config,
11794#endif 11861#endif
11795#ifdef NETDEV_FCOE_WWNN 11862#ifdef NETDEV_FCOE_WWNN
11796 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, 11863 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
@@ -11953,7 +12020,7 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
11953 dev->watchdog_timeo = TX_TIMEOUT; 12020 dev->watchdog_timeo = TX_TIMEOUT;
11954 12021
11955 dev->netdev_ops = &bnx2x_netdev_ops; 12022 dev->netdev_ops = &bnx2x_netdev_ops;
11956 bnx2x_set_ethtool_ops(dev); 12023 bnx2x_set_ethtool_ops(bp, dev);
11957 12024
11958 dev->priv_flags |= IFF_UNICAST_FLT; 12025 dev->priv_flags |= IFF_UNICAST_FLT;
11959 12026
@@ -11961,6 +12028,13 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
11961 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | 12028 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
11962 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO | 12029 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
11963 NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX; 12030 NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
12031 if (!CHIP_IS_E1x(bp)) {
12032 dev->hw_features |= NETIF_F_GSO_GRE;
12033 dev->hw_enc_features =
12034 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
12035 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
12036 NETIF_F_GSO_GRE;
12037 }
11964 12038
11965 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 12039 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
11966 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA; 12040 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
@@ -12447,7 +12521,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
12447 * l2 connections. 12521 * l2 connections.
12448 */ 12522 */
12449 if (IS_VF(bp)) { 12523 if (IS_VF(bp)) {
12450 bnx2x_vf_map_doorbells(bp); 12524 bp->doorbells = bnx2x_vf_doorbells(bp);
12451 rc = bnx2x_vf_pci_alloc(bp); 12525 rc = bnx2x_vf_pci_alloc(bp);
12452 if (rc) 12526 if (rc)
12453 goto init_one_exit; 12527 goto init_one_exit;
@@ -12475,13 +12549,8 @@ static int bnx2x_init_one(struct pci_dev *pdev,
12475 goto init_one_exit; 12549 goto init_one_exit;
12476 } 12550 }
12477 12551
12478 /* Enable SRIOV if capability found in configuration space. 12552 /* Enable SRIOV if capability found in configuration space */
12479 * Once the generic SR-IOV framework makes it in from the 12553 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
12480 * pci tree this will be revised, to allow dynamic control
12481 * over the number of VFs. Right now, change the num of vfs
12482 * param below to enable SR-IOV.
12483 */
12484 rc = bnx2x_iov_init_one(bp, int_mode, 0/*num vfs*/);
12485 if (rc) 12554 if (rc)
12486 goto init_one_exit; 12555 goto init_one_exit;
12487 12556
@@ -12493,16 +12562,6 @@ static int bnx2x_init_one(struct pci_dev *pdev,
12493 if (CHIP_IS_E1x(bp)) 12562 if (CHIP_IS_E1x(bp))
12494 bp->flags |= NO_FCOE_FLAG; 12563 bp->flags |= NO_FCOE_FLAG;
12495 12564
12496 /* disable FCOE for 57840 device, until FW supports it */
12497 switch (ent->driver_data) {
12498 case BCM57840_O:
12499 case BCM57840_4_10:
12500 case BCM57840_2_20:
12501 case BCM57840_MFO:
12502 case BCM57840_MF:
12503 bp->flags |= NO_FCOE_FLAG;
12504 }
12505
12506 /* Set bp->num_queues for MSI-X mode*/ 12565 /* Set bp->num_queues for MSI-X mode*/
12507 bnx2x_set_num_queues(bp); 12566 bnx2x_set_num_queues(bp);
12508 12567
@@ -12636,9 +12695,7 @@ static void bnx2x_remove_one(struct pci_dev *pdev)
12636 12695
12637static int bnx2x_eeh_nic_unload(struct bnx2x *bp) 12696static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12638{ 12697{
12639 int i; 12698 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
12640
12641 bp->state = BNX2X_STATE_ERROR;
12642 12699
12643 bp->rx_mode = BNX2X_RX_MODE_NONE; 12700 bp->rx_mode = BNX2X_RX_MODE_NONE;
12644 12701
@@ -12647,29 +12704,21 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12647 12704
12648 /* Stop Tx */ 12705 /* Stop Tx */
12649 bnx2x_tx_disable(bp); 12706 bnx2x_tx_disable(bp);
12650
12651 bnx2x_netif_stop(bp, 0);
12652 /* Delete all NAPI objects */ 12707 /* Delete all NAPI objects */
12653 bnx2x_del_all_napi(bp); 12708 bnx2x_del_all_napi(bp);
12654 if (CNIC_LOADED(bp)) 12709 if (CNIC_LOADED(bp))
12655 bnx2x_del_all_napi_cnic(bp); 12710 bnx2x_del_all_napi_cnic(bp);
12711 netdev_reset_tc(bp->dev);
12656 12712
12657 del_timer_sync(&bp->timer); 12713 del_timer_sync(&bp->timer);
12714 cancel_delayed_work(&bp->sp_task);
12715 cancel_delayed_work(&bp->period_task);
12658 12716
12659 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 12717 spin_lock_bh(&bp->stats_lock);
12660 12718 bp->stats_state = STATS_STATE_DISABLED;
12661 /* Release IRQs */ 12719 spin_unlock_bh(&bp->stats_lock);
12662 bnx2x_free_irq(bp);
12663
12664 /* Free SKBs, SGEs, TPA pool and driver internals */
12665 bnx2x_free_skbs(bp);
12666
12667 for_each_rx_queue(bp, i)
12668 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12669
12670 bnx2x_free_mem(bp);
12671 12720
12672 bp->state = BNX2X_STATE_CLOSED; 12721 bnx2x_save_statistics(bp);
12673 12722
12674 netif_carrier_off(bp->dev); 12723 netif_carrier_off(bp->dev);
12675 12724
@@ -12705,6 +12754,8 @@ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12705 12754
12706 rtnl_lock(); 12755 rtnl_lock();
12707 12756
12757 BNX2X_ERR("IO error detected\n");
12758
12708 netif_device_detach(dev); 12759 netif_device_detach(dev);
12709 12760
12710 if (state == pci_channel_io_perm_failure) { 12761 if (state == pci_channel_io_perm_failure) {
@@ -12715,6 +12766,8 @@ static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12715 if (netif_running(dev)) 12766 if (netif_running(dev))
12716 bnx2x_eeh_nic_unload(bp); 12767 bnx2x_eeh_nic_unload(bp);
12717 12768
12769 bnx2x_prev_path_mark_eeh(bp);
12770
12718 pci_disable_device(pdev); 12771 pci_disable_device(pdev);
12719 12772
12720 rtnl_unlock(); 12773 rtnl_unlock();
@@ -12733,9 +12786,10 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12733{ 12786{
12734 struct net_device *dev = pci_get_drvdata(pdev); 12787 struct net_device *dev = pci_get_drvdata(pdev);
12735 struct bnx2x *bp = netdev_priv(dev); 12788 struct bnx2x *bp = netdev_priv(dev);
12789 int i;
12736 12790
12737 rtnl_lock(); 12791 rtnl_lock();
12738 12792 BNX2X_ERR("IO slot reset initializing...\n");
12739 if (pci_enable_device(pdev)) { 12793 if (pci_enable_device(pdev)) {
12740 dev_err(&pdev->dev, 12794 dev_err(&pdev->dev,
12741 "Cannot re-enable PCI device after reset\n"); 12795 "Cannot re-enable PCI device after reset\n");
@@ -12749,6 +12803,42 @@ static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12749 if (netif_running(dev)) 12803 if (netif_running(dev))
12750 bnx2x_set_power_state(bp, PCI_D0); 12804 bnx2x_set_power_state(bp, PCI_D0);
12751 12805
12806 if (netif_running(dev)) {
12807 BNX2X_ERR("IO slot reset --> driver unload\n");
12808 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
12809 u32 v;
12810
12811 v = SHMEM2_RD(bp,
12812 drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
12813 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
12814 v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
12815 }
12816 bnx2x_drain_tx_queues(bp);
12817 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
12818 bnx2x_netif_stop(bp, 1);
12819 bnx2x_free_irq(bp);
12820
12821 /* Report UNLOAD_DONE to MCP */
12822 bnx2x_send_unload_done(bp, true);
12823
12824 bp->sp_state = 0;
12825 bp->port.pmf = 0;
12826
12827 bnx2x_prev_unload(bp);
12828
12829 /* We should have resetted the engine, so It's fair to
12830 * assume the FW will no longer write to the bnx2x driver.
12831 */
12832 bnx2x_squeeze_objects(bp);
12833 bnx2x_free_skbs(bp);
12834 for_each_rx_queue(bp, i)
12835 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12836 bnx2x_free_fp_mem(bp);
12837 bnx2x_free_mem(bp);
12838
12839 bp->state = BNX2X_STATE_CLOSED;
12840 }
12841
12752 rtnl_unlock(); 12842 rtnl_unlock();
12753 12843
12754 return PCI_ERS_RESULT_RECOVERED; 12844 return PCI_ERS_RESULT_RECOVERED;
@@ -12775,6 +12865,9 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
12775 12865
12776 bnx2x_eeh_recover(bp); 12866 bnx2x_eeh_recover(bp);
12777 12867
12868 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
12869 DRV_MSG_SEQ_NUMBER_MASK;
12870
12778 if (netif_running(dev)) 12871 if (netif_running(dev))
12779 bnx2x_nic_load(bp, LOAD_NORMAL); 12872 bnx2x_nic_load(bp, LOAD_NORMAL);
12780 12873
@@ -12797,6 +12890,9 @@ static struct pci_driver bnx2x_pci_driver = {
12797 .suspend = bnx2x_suspend, 12890 .suspend = bnx2x_suspend,
12798 .resume = bnx2x_resume, 12891 .resume = bnx2x_resume,
12799 .err_handler = &bnx2x_err_handler, 12892 .err_handler = &bnx2x_err_handler,
12893#ifdef CONFIG_BNX2X_SRIOV
12894 .sriov_configure = bnx2x_sriov_configure,
12895#endif
12800}; 12896};
12801 12897
12802static int __init bnx2x_init(void) 12898static int __init bnx2x_init(void)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 791eb2d53011..d22bc40091ec 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1491,10 +1491,6 @@
1491/* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1 1491/* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1
1492 Port. */ 1492 Port. */
1493#define MISC_REG_BOND_ID 0xa400 1493#define MISC_REG_BOND_ID 0xa400
1494/* [R 8] These bits indicate the metal revision of the chip. This value
1495 starts at 0x00 for each all-layer tape-out and increments by one for each
1496 tape-out. */
1497#define MISC_REG_CHIP_METAL 0xa404
1498/* [R 16] These bits indicate the part number for the chip. */ 1494/* [R 16] These bits indicate the part number for the chip. */
1499#define MISC_REG_CHIP_NUM 0xa408 1495#define MISC_REG_CHIP_NUM 0xa408
1500/* [R 4] These bits indicate the base revision of the chip. This value 1496/* [R 4] These bits indicate the base revision of the chip. This value
@@ -6331,6 +6327,8 @@
6331#define PCI_PM_DATA_B 0x414 6327#define PCI_PM_DATA_B 0x414
6332#define PCI_ID_VAL1 0x434 6328#define PCI_ID_VAL1 0x434
6333#define PCI_ID_VAL2 0x438 6329#define PCI_ID_VAL2 0x438
6330#define PCI_ID_VAL3 0x43c
6331
6334#define GRC_CONFIG_REG_PF_INIT_VF 0x624 6332#define GRC_CONFIG_REG_PF_INIT_VF 0x624
6335#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf 6333#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf
6336/* First VF_NUM for PF is encoded in this register. 6334/* First VF_NUM for PF is encoded in this register.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 7306416bc90d..32a9609cc98b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -30,8 +30,6 @@
30 30
31#define BNX2X_MAX_EMUL_MULTI 16 31#define BNX2X_MAX_EMUL_MULTI 16
32 32
33#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
34
35/**** Exe Queue interfaces ****/ 33/**** Exe Queue interfaces ****/
36 34
37/** 35/**
@@ -444,30 +442,21 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
444} 442}
445 443
446static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, 444static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
447 int n, u8 *buf) 445 int n, u8 *base, u8 stride, u8 size)
448{ 446{
449 struct bnx2x_vlan_mac_registry_elem *pos; 447 struct bnx2x_vlan_mac_registry_elem *pos;
450 u8 *next = buf; 448 u8 *next = base;
451 int counter = 0; 449 int counter = 0;
452 450
453 /* traverse list */ 451 /* traverse list */
454 list_for_each_entry(pos, &o->head, link) { 452 list_for_each_entry(pos, &o->head, link) {
455 if (counter < n) { 453 if (counter < n) {
456 /* place leading zeroes in buffer */ 454 memcpy(next, &pos->u, size);
457 memset(next, 0, MAC_LEADING_ZERO_CNT);
458
459 /* place mac after leading zeroes*/
460 memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
461 ETH_ALEN);
462
463 /* calculate address of next element and
464 * advance counter
465 */
466 counter++; 455 counter++;
467 next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32)); 456 DP(BNX2X_MSG_SP, "copied element number %d to address %p element was:\n",
457 counter, next);
458 next += stride + size;
468 459
469 DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
470 counter, next, pos->u.mac.mac);
471 } 460 }
472 } 461 }
473 return counter * ETH_ALEN; 462 return counter * ETH_ALEN;
@@ -487,7 +476,8 @@ static int bnx2x_check_mac_add(struct bnx2x *bp,
487 476
488 /* Check if a requested MAC already exists */ 477 /* Check if a requested MAC already exists */
489 list_for_each_entry(pos, &o->head, link) 478 list_for_each_entry(pos, &o->head, link)
490 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) 479 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN) &&
480 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
491 return -EEXIST; 481 return -EEXIST;
492 482
493 return 0; 483 return 0;
@@ -520,7 +510,9 @@ static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
520 list_for_each_entry(pos, &o->head, link) 510 list_for_each_entry(pos, &o->head, link)
521 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && 511 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
522 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, 512 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
523 ETH_ALEN))) 513 ETH_ALEN)) &&
514 (data->vlan_mac.is_inner_mac ==
515 pos->u.vlan_mac.is_inner_mac))
524 return -EEXIST; 516 return -EEXIST;
525 517
526 return 0; 518 return 0;
@@ -538,7 +530,8 @@ static struct bnx2x_vlan_mac_registry_elem *
538 DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac); 530 DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
539 531
540 list_for_each_entry(pos, &o->head, link) 532 list_for_each_entry(pos, &o->head, link)
541 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) 533 if ((!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN)) &&
534 (data->mac.is_inner_mac == pos->u.mac.is_inner_mac))
542 return pos; 535 return pos;
543 536
544 return NULL; 537 return NULL;
@@ -573,7 +566,9 @@ static struct bnx2x_vlan_mac_registry_elem *
573 list_for_each_entry(pos, &o->head, link) 566 list_for_each_entry(pos, &o->head, link)
574 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) && 567 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
575 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac, 568 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
576 ETH_ALEN))) 569 ETH_ALEN)) &&
570 (data->vlan_mac.is_inner_mac ==
571 pos->u.vlan_mac.is_inner_mac))
577 return pos; 572 return pos;
578 573
579 return NULL; 574 return NULL;
@@ -770,6 +765,8 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
770 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, 765 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
771 &rule_entry->mac.mac_mid, 766 &rule_entry->mac.mac_mid,
772 &rule_entry->mac.mac_lsb, mac); 767 &rule_entry->mac.mac_lsb, mac);
768 rule_entry->mac.inner_mac =
769 cpu_to_le16(elem->cmd_data.vlan_mac.u.mac.is_inner_mac);
773 770
774 /* MOVE: Add a rule that will add this MAC to the target Queue */ 771 /* MOVE: Add a rule that will add this MAC to the target Queue */
775 if (cmd == BNX2X_VLAN_MAC_MOVE) { 772 if (cmd == BNX2X_VLAN_MAC_MOVE) {
@@ -786,6 +783,9 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
786 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb, 783 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
787 &rule_entry->mac.mac_mid, 784 &rule_entry->mac.mac_mid,
788 &rule_entry->mac.mac_lsb, mac); 785 &rule_entry->mac.mac_lsb, mac);
786 rule_entry->mac.inner_mac =
787 cpu_to_le16(elem->cmd_data.vlan_mac.
788 u.mac.is_inner_mac);
789 } 789 }
790 790
791 /* Set the ramrod data header */ 791 /* Set the ramrod data header */
@@ -974,7 +974,8 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
974 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, 974 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
975 &rule_entry->pair.mac_mid, 975 &rule_entry->pair.mac_mid,
976 &rule_entry->pair.mac_lsb, mac); 976 &rule_entry->pair.mac_lsb, mac);
977 977 rule_entry->pair.inner_mac =
978 cpu_to_le16(elem->cmd_data.vlan_mac.u.vlan_mac.is_inner_mac);
978 /* MOVE: Add a rule that will add this MAC to the target Queue */ 979 /* MOVE: Add a rule that will add this MAC to the target Queue */
979 if (cmd == BNX2X_VLAN_MAC_MOVE) { 980 if (cmd == BNX2X_VLAN_MAC_MOVE) {
980 rule_entry++; 981 rule_entry++;
@@ -991,6 +992,9 @@ static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
991 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb, 992 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
992 &rule_entry->pair.mac_mid, 993 &rule_entry->pair.mac_mid,
993 &rule_entry->pair.mac_lsb, mac); 994 &rule_entry->pair.mac_lsb, mac);
995 rule_entry->pair.inner_mac =
996 cpu_to_le16(elem->cmd_data.vlan_mac.u.
997 vlan_mac.is_inner_mac);
994 } 998 }
995 999
996 /* Set the ramrod data header */ 1000 /* Set the ramrod data header */
@@ -1854,6 +1858,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1854 return rc; 1858 return rc;
1855 } 1859 }
1856 list_del(&exeq_pos->link); 1860 list_del(&exeq_pos->link);
1861 bnx2x_exe_queue_free_elem(bp, exeq_pos);
1857 } 1862 }
1858 } 1863 }
1859 1864
@@ -2012,6 +2017,7 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
2012 vlan_obj->check_move = bnx2x_check_move; 2017 vlan_obj->check_move = bnx2x_check_move;
2013 vlan_obj->ramrod_cmd = 2018 vlan_obj->ramrod_cmd =
2014 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES; 2019 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
2020 vlan_obj->get_n_elements = bnx2x_get_n_elements;
2015 2021
2016 /* Exe Queue */ 2022 /* Exe Queue */
2017 bnx2x_exe_queue_init(bp, 2023 bnx2x_exe_queue_init(bp,
@@ -4426,6 +4432,12 @@ static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4426 tx_data->force_default_pri_flg = 4432 tx_data->force_default_pri_flg =
4427 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags); 4433 test_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, flags);
4428 4434
4435 tx_data->tunnel_lso_inc_ip_id =
4436 test_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, flags);
4437 tx_data->tunnel_non_lso_pcsum_location =
4438 test_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, flags) ? PCSUM_ON_PKT :
4439 PCSUM_ON_BD;
4440
4429 tx_data->tx_status_block_id = params->fw_sb_id; 4441 tx_data->tx_status_block_id = params->fw_sb_id;
4430 tx_data->tx_sb_index_number = params->sb_cq_index; 4442 tx_data->tx_sb_index_number = params->sb_cq_index;
4431 tx_data->tss_leading_client_id = params->tss_leading_cl_id; 4443 tx_data->tss_leading_client_id = params->tss_leading_cl_id;
@@ -5669,17 +5681,18 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
5669 memset(rdata, 0, sizeof(*rdata)); 5681 memset(rdata, 0, sizeof(*rdata));
5670 5682
5671 /* Fill the ramrod data with provided parameters */ 5683 /* Fill the ramrod data with provided parameters */
5672 rdata->function_mode = (u8)start_params->mf_mode; 5684 rdata->function_mode = (u8)start_params->mf_mode;
5673 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag); 5685 rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
5674 rdata->path_id = BP_PATH(bp); 5686 rdata->path_id = BP_PATH(bp);
5675 rdata->network_cos_mode = start_params->network_cos_mode; 5687 rdata->network_cos_mode = start_params->network_cos_mode;
5676 5688 rdata->gre_tunnel_mode = start_params->gre_tunnel_mode;
5677 /* 5689 rdata->gre_tunnel_rss = start_params->gre_tunnel_rss;
5678 * No need for an explicit memory barrier here as long we would 5690
5679 * need to ensure the ordering of writing to the SPQ element 5691 /* No need for an explicit memory barrier here as long we would
5680 * and updating of the SPQ producer which involves a memory 5692 * need to ensure the ordering of writing to the SPQ element
5681 * read and we will have to put a full memory barrier there 5693 * and updating of the SPQ producer which involves a memory
5682 * (inside bnx2x_sp_post()). 5694 * read and we will have to put a full memory barrier there
5695 * (inside bnx2x_sp_post()).
5683 */ 5696 */
5684 5697
5685 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 5698 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index ff907609b9fc..43c00bc84a08 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -100,6 +100,7 @@ struct bnx2x_raw_obj {
100/************************* VLAN-MAC commands related parameters ***************/ 100/************************* VLAN-MAC commands related parameters ***************/
101struct bnx2x_mac_ramrod_data { 101struct bnx2x_mac_ramrod_data {
102 u8 mac[ETH_ALEN]; 102 u8 mac[ETH_ALEN];
103 u8 is_inner_mac;
103}; 104};
104 105
105struct bnx2x_vlan_ramrod_data { 106struct bnx2x_vlan_ramrod_data {
@@ -108,6 +109,7 @@ struct bnx2x_vlan_ramrod_data {
108 109
109struct bnx2x_vlan_mac_ramrod_data { 110struct bnx2x_vlan_mac_ramrod_data {
110 u8 mac[ETH_ALEN]; 111 u8 mac[ETH_ALEN];
112 u8 is_inner_mac;
111 u16 vlan; 113 u16 vlan;
112}; 114};
113 115
@@ -313,8 +315,9 @@ struct bnx2x_vlan_mac_obj {
313 * 315 *
314 * @return number of copied bytes 316 * @return number of copied bytes
315 */ 317 */
316 int (*get_n_elements)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o, 318 int (*get_n_elements)(struct bnx2x *bp,
317 int n, u8 *buf); 319 struct bnx2x_vlan_mac_obj *o, int n, u8 *base,
320 u8 stride, u8 size);
318 321
319 /** 322 /**
320 * Checks if ADD-ramrod with the given params may be performed. 323 * Checks if ADD-ramrod with the given params may be performed.
@@ -824,7 +827,9 @@ enum {
824 BNX2X_Q_FLG_TX_SEC, 827 BNX2X_Q_FLG_TX_SEC,
825 BNX2X_Q_FLG_ANTI_SPOOF, 828 BNX2X_Q_FLG_ANTI_SPOOF,
826 BNX2X_Q_FLG_SILENT_VLAN_REM, 829 BNX2X_Q_FLG_SILENT_VLAN_REM,
827 BNX2X_Q_FLG_FORCE_DEFAULT_PRI 830 BNX2X_Q_FLG_FORCE_DEFAULT_PRI,
831 BNX2X_Q_FLG_PCSUM_ON_PKT,
832 BNX2X_Q_FLG_TUN_INC_INNER_IP_ID
828}; 833};
829 834
830/* Queue type options: queue type may be a compination of below. */ 835/* Queue type options: queue type may be a compination of below. */
@@ -842,6 +847,7 @@ enum bnx2x_q_type {
842#define BNX2X_MULTI_TX_COS_E3B0 3 847#define BNX2X_MULTI_TX_COS_E3B0 3
843#define BNX2X_MULTI_TX_COS 3 /* Maximum possible */ 848#define BNX2X_MULTI_TX_COS 3 /* Maximum possible */
844 849
850#define MAC_PAD (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
845 851
846struct bnx2x_queue_init_params { 852struct bnx2x_queue_init_params {
847 struct { 853 struct {
@@ -1118,6 +1124,15 @@ struct bnx2x_func_start_params {
1118 1124
1119 /* Function cos mode */ 1125 /* Function cos mode */
1120 u8 network_cos_mode; 1126 u8 network_cos_mode;
1127
1128 /* NVGRE classification enablement */
1129 u8 nvgre_clss_en;
1130
1131 /* NO_GRE_TUNNEL/NVGRE_TUNNEL/L2GRE_TUNNEL/IPGRE_TUNNEL */
1132 u8 gre_tunnel_mode;
1133
1134 /* GRE_OUTER_HEADERS_RSS/GRE_INNER_HEADERS_RSS/NVGRE_KEY_ENTROPY_RSS */
1135 u8 gre_tunnel_rss;
1121}; 1136};
1122 1137
1123struct bnx2x_func_switch_update_params { 1138struct bnx2x_func_switch_update_params {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 6adfa2093581..2ce7c7471367 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -20,7 +20,9 @@
20#include "bnx2x.h" 20#include "bnx2x.h"
21#include "bnx2x_init.h" 21#include "bnx2x_init.h"
22#include "bnx2x_cmn.h" 22#include "bnx2x_cmn.h"
23#include "bnx2x_sp.h"
23#include <linux/crc32.h> 24#include <linux/crc32.h>
25#include <linux/if_vlan.h>
24 26
25/* General service functions */ 27/* General service functions */
26static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid, 28static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
@@ -555,8 +557,7 @@ static int bnx2x_vfop_config_list(struct bnx2x *bp,
555 rc = bnx2x_config_vlan_mac(bp, vlan_mac); 557 rc = bnx2x_config_vlan_mac(bp, vlan_mac);
556 if (rc >= 0) { 558 if (rc >= 0) {
557 cnt += pos->add ? 1 : -1; 559 cnt += pos->add ? 1 : -1;
558 list_del(&pos->link); 560 list_move(&pos->link, &rollback_list);
559 list_add(&pos->link, &rollback_list);
560 rc = 0; 561 rc = 0;
561 } else if (rc == -EEXIST) { 562 } else if (rc == -EEXIST) {
562 rc = 0; 563 rc = 0;
@@ -958,6 +959,12 @@ op_err:
958 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc); 959 BNX2X_ERR("QSETUP[%d:%d] error: rc %d\n", vf->abs_vfid, qid, vfop->rc);
959op_done: 960op_done:
960 case BNX2X_VFOP_QSETUP_DONE: 961 case BNX2X_VFOP_QSETUP_DONE:
962 vf->cfg_flags |= VF_CFG_VLAN;
963 smp_mb__before_clear_bit();
964 set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
965 &bp->sp_rtnl_state);
966 smp_mb__after_clear_bit();
967 schedule_delayed_work(&bp->sp_rtnl_task, 0);
961 bnx2x_vfop_end(bp, vf, vfop); 968 bnx2x_vfop_end(bp, vf, vfop);
962 return; 969 return;
963 default: 970 default:
@@ -1459,7 +1466,6 @@ static u8 bnx2x_vf_is_pcie_pending(struct bnx2x *bp, u8 abs_vfid)
1459 return bnx2x_is_pcie_pending(dev); 1466 return bnx2x_is_pcie_pending(dev);
1460 1467
1461unknown_dev: 1468unknown_dev:
1462 BNX2X_ERR("Unknown device\n");
1463 return false; 1469 return false;
1464} 1470}
1465 1471
@@ -1926,20 +1932,22 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1926 1932
1927 /* SRIOV can be enabled only with MSIX */ 1933 /* SRIOV can be enabled only with MSIX */
1928 if (int_mode_param == BNX2X_INT_MODE_MSI || 1934 if (int_mode_param == BNX2X_INT_MODE_MSI ||
1929 int_mode_param == BNX2X_INT_MODE_INTX) 1935 int_mode_param == BNX2X_INT_MODE_INTX) {
1930 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n"); 1936 BNX2X_ERR("Forced MSI/INTx mode is incompatible with SRIOV\n");
1937 return 0;
1938 }
1931 1939
1932 err = -EIO; 1940 err = -EIO;
1933 /* verify ari is enabled */ 1941 /* verify ari is enabled */
1934 if (!bnx2x_ari_enabled(bp->pdev)) { 1942 if (!bnx2x_ari_enabled(bp->pdev)) {
1935 BNX2X_ERR("ARI not supported, SRIOV can not be enabled\n"); 1943 BNX2X_ERR("ARI not supported (check pci bridge ARI forwarding), SRIOV can not be enabled\n");
1936 return err; 1944 return 0;
1937 } 1945 }
1938 1946
1939 /* verify igu is in normal mode */ 1947 /* verify igu is in normal mode */
1940 if (CHIP_INT_MODE_IS_BC(bp)) { 1948 if (CHIP_INT_MODE_IS_BC(bp)) {
1941 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n"); 1949 BNX2X_ERR("IGU not normal mode, SRIOV can not be enabled\n");
1942 return err; 1950 return 0;
1943 } 1951 }
1944 1952
1945 /* allocate the vfs database */ 1953 /* allocate the vfs database */
@@ -1964,8 +1972,10 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
1964 if (iov->total == 0) 1972 if (iov->total == 0)
1965 goto failed; 1973 goto failed;
1966 1974
1967 /* calculate the actual number of VFs */ 1975 iov->nr_virtfn = min_t(u16, iov->total, num_vfs_param);
1968 iov->nr_virtfn = min_t(u16, iov->total, (u16)num_vfs_param); 1976
1977 DP(BNX2X_MSG_IOV, "num_vfs_param was %d, nr_virtfn was %d\n",
1978 num_vfs_param, iov->nr_virtfn);
1969 1979
1970 /* allocate the vf array */ 1980 /* allocate the vf array */
1971 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) * 1981 bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
@@ -2378,8 +2388,8 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
2378 goto get_vf; 2388 goto get_vf;
2379 case EVENT_RING_OPCODE_MALICIOUS_VF: 2389 case EVENT_RING_OPCODE_MALICIOUS_VF:
2380 abs_vfid = elem->message.data.malicious_vf_event.vf_id; 2390 abs_vfid = elem->message.data.malicious_vf_event.vf_id;
2381 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d\n", 2391 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d err_id=0x%x\n",
2382 abs_vfid); 2392 abs_vfid, elem->message.data.malicious_vf_event.err_id);
2383 goto get_vf; 2393 goto get_vf;
2384 default: 2394 default:
2385 return 1; 2395 return 1;
@@ -2436,8 +2446,8 @@ get_vf:
2436 /* Do nothing for now */ 2446 /* Do nothing for now */
2437 break; 2447 break;
2438 case EVENT_RING_OPCODE_MALICIOUS_VF: 2448 case EVENT_RING_OPCODE_MALICIOUS_VF:
2439 DP(BNX2X_MSG_IOV, "got VF [%d] MALICIOUS notification\n", 2449 DP(BNX2X_MSG_IOV, "Got VF MALICIOUS notification abs_vfid=%d error id %x\n",
2440 vf->abs_vfid); 2450 abs_vfid, elem->message.data.malicious_vf_event.err_id);
2441 /* Do nothing for now */ 2451 /* Do nothing for now */
2442 break; 2452 break;
2443 } 2453 }
@@ -3012,21 +3022,138 @@ void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
3012 vf->op_current = CHANNEL_TLV_NONE; 3022 vf->op_current = CHANNEL_TLV_NONE;
3013} 3023}
3014 3024
3015void bnx2x_enable_sriov(struct bnx2x *bp) 3025int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
3016{ 3026{
3017 int rc = 0;
3018 3027
3019 /* disbale sriov in case it is still enabled */ 3028 struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
3029
3030 DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
3031 num_vfs_param, BNX2X_NR_VIRTFN(bp));
3032
3033 /* HW channel is only operational when PF is up */
3034 if (bp->state != BNX2X_STATE_OPEN) {
3035 BNX2X_ERR("VF num configurtion via sysfs not supported while PF is down");
3036 return -EINVAL;
3037 }
3038
3039 /* we are always bound by the total_vfs in the configuration space */
3040 if (num_vfs_param > BNX2X_NR_VIRTFN(bp)) {
3041 BNX2X_ERR("truncating requested number of VFs (%d) down to maximum allowed (%d)\n",
3042 num_vfs_param, BNX2X_NR_VIRTFN(bp));
3043 num_vfs_param = BNX2X_NR_VIRTFN(bp);
3044 }
3045
3046 bp->requested_nr_virtfn = num_vfs_param;
3047 if (num_vfs_param == 0) {
3048 pci_disable_sriov(dev);
3049 return 0;
3050 } else {
3051 return bnx2x_enable_sriov(bp);
3052 }
3053}
3054
3055int bnx2x_enable_sriov(struct bnx2x *bp)
3056{
3057 int rc = 0, req_vfs = bp->requested_nr_virtfn;
3058
3059 rc = pci_enable_sriov(bp->pdev, req_vfs);
3060 if (rc) {
3061 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
3062 return rc;
3063 }
3064 DP(BNX2X_MSG_IOV, "sriov enabled (%d vfs)\n", req_vfs);
3065 return req_vfs;
3066}
3067
3068void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp)
3069{
3070 int vfidx;
3071 struct pf_vf_bulletin_content *bulletin;
3072
3073 DP(BNX2X_MSG_IOV, "configuring vlan for VFs from sp-task\n");
3074 for_each_vf(bp, vfidx) {
3075 bulletin = BP_VF_BULLETIN(bp, vfidx);
3076 if (BP_VF(bp, vfidx)->cfg_flags & VF_CFG_VLAN)
3077 bnx2x_set_vf_vlan(bp->dev, vfidx, bulletin->vlan, 0);
3078 }
3079}
3080
3081void bnx2x_disable_sriov(struct bnx2x *bp)
3082{
3020 pci_disable_sriov(bp->pdev); 3083 pci_disable_sriov(bp->pdev);
3021 DP(BNX2X_MSG_IOV, "sriov disabled\n"); 3084}
3085
3086static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
3087 struct bnx2x_virtf *vf)
3088{
3089 if (!IS_SRIOV(bp)) {
3090 BNX2X_ERR("vf ndo called though sriov is disabled\n");
3091 return -EINVAL;
3092 }
3093
3094 if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
3095 BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
3096 vfidx, BNX2X_NR_VIRTFN(bp));
3097 return -EINVAL;
3098 }
3099
3100 if (!vf) {
3101 BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n",
3102 vfidx);
3103 return -EINVAL;
3104 }
3022 3105
3023 /* enable sriov */ 3106 return 0;
3024 DP(BNX2X_MSG_IOV, "vf num (%d)\n", (bp->vfdb->sriov.nr_virtfn)); 3107}
3025 rc = pci_enable_sriov(bp->pdev, (bp->vfdb->sriov.nr_virtfn)); 3108
3109int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
3110 struct ifla_vf_info *ivi)
3111{
3112 struct bnx2x *bp = netdev_priv(dev);
3113 struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
3114 struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
3115 struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
3116 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
3117 int rc;
3118
3119 /* sanity */
3120 rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
3026 if (rc) 3121 if (rc)
3027 BNX2X_ERR("pci_enable_sriov failed with %d\n", rc); 3122 return rc;
3028 else 3123 if (!mac_obj || !vlan_obj || !bulletin) {
3029 DP(BNX2X_MSG_IOV, "sriov enabled\n"); 3124 BNX2X_ERR("VF partially initialized\n");
3125 return -EINVAL;
3126 }
3127
3128 ivi->vf = vfidx;
3129 ivi->qos = 0;
3130 ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */
3131 ivi->spoofchk = 1; /*always enabled */
3132 if (vf->state == VF_ENABLED) {
3133 /* mac and vlan are in vlan_mac objects */
3134 mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
3135 0, ETH_ALEN);
3136 vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan,
3137 0, VLAN_HLEN);
3138 } else {
3139 /* mac */
3140 if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
3141 /* mac configured by ndo so its in bulletin board */
3142 memcpy(&ivi->mac, bulletin->mac, ETH_ALEN);
3143 else
3144 /* funtion has not been loaded yet. Show mac as 0s */
3145 memset(&ivi->mac, 0, ETH_ALEN);
3146
3147 /* vlan */
3148 if (bulletin->valid_bitmap & (1 << VLAN_VALID))
3149 /* vlan configured by ndo so its in bulletin board */
3150 memcpy(&ivi->vlan, &bulletin->vlan, VLAN_HLEN);
3151 else
3152 /* funtion has not been loaded yet. Show vlans as 0s */
3153 memset(&ivi->vlan, 0, VLAN_HLEN);
3154 }
3155
3156 return 0;
3030} 3157}
3031 3158
3032/* New mac for VF. Consider these cases: 3159/* New mac for VF. Consider these cases:
@@ -3044,23 +3171,19 @@ void bnx2x_enable_sriov(struct bnx2x *bp)
3044 * VF to configure any mac for itself except for this mac. In case of a race 3171 * VF to configure any mac for itself except for this mac. In case of a race
3045 * where the VF fails to see the new post on its bulletin board before sending a 3172 * where the VF fails to see the new post on its bulletin board before sending a
3046 * mac configuration request, the PF will simply fail the request and VF can try 3173 * mac configuration request, the PF will simply fail the request and VF can try
3047 * again after consulting its bulletin board 3174 * again after consulting its bulletin board.
3048 */ 3175 */
3049int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac) 3176int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
3050{ 3177{
3051 struct bnx2x *bp = netdev_priv(dev); 3178 struct bnx2x *bp = netdev_priv(dev);
3052 int rc, q_logical_state, vfidx = queue; 3179 int rc, q_logical_state;
3053 struct bnx2x_virtf *vf = BP_VF(bp, vfidx); 3180 struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
3054 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx); 3181 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
3055 3182
3056 /* if SRIOV is disabled there is nothing to do (and somewhere, someone 3183 /* sanity */
3057 * has erred). 3184 rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
3058 */ 3185 if (rc)
3059 if (!IS_SRIOV(bp)) { 3186 return rc;
3060 BNX2X_ERR("bnx2x_set_vf_mac called though sriov is disabled\n");
3061 return -EINVAL;
3062 }
3063
3064 if (!is_valid_ether_addr(mac)) { 3187 if (!is_valid_ether_addr(mac)) {
3065 BNX2X_ERR("mac address invalid\n"); 3188 BNX2X_ERR("mac address invalid\n");
3066 return -EINVAL; 3189 return -EINVAL;
@@ -3085,7 +3208,7 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
3085 if (vf->state == VF_ENABLED && 3208 if (vf->state == VF_ENABLED &&
3086 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) { 3209 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3087 /* configure the mac in device on this vf's queue */ 3210 /* configure the mac in device on this vf's queue */
3088 unsigned long flags = 0; 3211 unsigned long ramrod_flags = 0;
3089 struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj); 3212 struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
3090 3213
3091 /* must lock vfpf channel to protect against vf flows */ 3214 /* must lock vfpf channel to protect against vf flows */
@@ -3106,14 +3229,133 @@ int bnx2x_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
3106 } 3229 }
3107 3230
3108 /* configure the new mac to device */ 3231 /* configure the new mac to device */
3109 __set_bit(RAMROD_COMP_WAIT, &flags); 3232 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3110 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true, 3233 bnx2x_set_mac_one(bp, (u8 *)&bulletin->mac, mac_obj, true,
3111 BNX2X_ETH_MAC, &flags); 3234 BNX2X_ETH_MAC, &ramrod_flags);
3112 3235
3113 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC); 3236 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
3114 } 3237 }
3115 3238
3116 return rc; 3239 return 0;
3240}
3241
3242int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
3243{
3244 struct bnx2x *bp = netdev_priv(dev);
3245 int rc, q_logical_state;
3246 struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
3247 struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
3248
3249 /* sanity */
3250 rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
3251 if (rc)
3252 return rc;
3253
3254 if (vlan > 4095) {
3255 BNX2X_ERR("illegal vlan value %d\n", vlan);
3256 return -EINVAL;
3257 }
3258
3259 DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
3260 vfidx, vlan, 0);
3261
3262 /* update PF's copy of the VF's bulletin. No point in posting the vlan
3263 * to the VF since it doesn't have anything to do with it. But it useful
3264 * to store it here in case the VF is not up yet and we can only
3265 * configure the vlan later when it does.
3266 */
3267 bulletin->valid_bitmap |= 1 << VLAN_VALID;
3268 bulletin->vlan = vlan;
3269
3270 /* is vf initialized and queue set up? */
3271 q_logical_state =
3272 bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
3273 if (vf->state == VF_ENABLED &&
3274 q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
3275 /* configure the vlan in device on this vf's queue */
3276 unsigned long ramrod_flags = 0;
3277 unsigned long vlan_mac_flags = 0;
3278 struct bnx2x_vlan_mac_obj *vlan_obj =
3279 &bnx2x_vfq(vf, 0, vlan_obj);
3280 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
3281 struct bnx2x_queue_state_params q_params = {NULL};
3282 struct bnx2x_queue_update_params *update_params;
3283
3284 memset(&ramrod_param, 0, sizeof(ramrod_param));
3285
3286 /* must lock vfpf channel to protect against vf flows */
3287 bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3288
3289 /* remove existing vlans */
3290 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3291 rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
3292 &ramrod_flags);
3293 if (rc) {
3294 BNX2X_ERR("failed to delete vlans\n");
3295 return -EINVAL;
3296 }
3297
3298 /* send queue update ramrod to configure default vlan and silent
3299 * vlan removal
3300 */
3301 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
3302 q_params.cmd = BNX2X_Q_CMD_UPDATE;
3303 q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj);
3304 update_params = &q_params.params.update;
3305 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
3306 &update_params->update_flags);
3307 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
3308 &update_params->update_flags);
3309
3310 if (vlan == 0) {
3311 /* if vlan is 0 then we want to leave the VF traffic
3312 * untagged, and leave the incoming traffic untouched
3313 * (i.e. do not remove any vlan tags).
3314 */
3315 __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3316 &update_params->update_flags);
3317 __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3318 &update_params->update_flags);
3319 } else {
3320 /* configure the new vlan to device */
3321 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
3322 ramrod_param.vlan_mac_obj = vlan_obj;
3323 ramrod_param.ramrod_flags = ramrod_flags;
3324 ramrod_param.user_req.u.vlan.vlan = vlan;
3325 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
3326 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
3327 if (rc) {
3328 BNX2X_ERR("failed to configure vlan\n");
3329 return -EINVAL;
3330 }
3331
3332 /* configure default vlan to vf queue and set silent
3333 * vlan removal (the vf remains unaware of this vlan).
3334 */
3335 update_params = &q_params.params.update;
3336 __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
3337 &update_params->update_flags);
3338 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
3339 &update_params->update_flags);
3340 update_params->def_vlan = vlan;
3341 }
3342
3343 /* Update the Queue state */
3344 rc = bnx2x_queue_state_change(bp, &q_params);
3345 if (rc) {
3346 BNX2X_ERR("Failed to configure default VLAN\n");
3347 return rc;
3348 }
3349
3350 /* clear the flag indicating that this VF needs its vlan
3351 * (will only be set if the HV configured th Vlan before vf was
3352 * and we were called because the VF came up later
3353 */
3354 vf->cfg_flags &= ~VF_CFG_VLAN;
3355
3356 bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
3357 }
3358 return 0;
3117} 3359}
3118 3360
3119/* crc is the first field in the bulletin board. compute the crc over the 3361/* crc is the first field in the bulletin board. compute the crc over the
@@ -3165,20 +3407,26 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
3165 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN); 3407 memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
3166 } 3408 }
3167 3409
3410 /* the vlan in bulletin board is valid and is new */
3411 if (bulletin.valid_bitmap & 1 << VLAN_VALID)
3412 memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN);
3413
3168 /* copy new bulletin board to bp */ 3414 /* copy new bulletin board to bp */
3169 bp->old_bulletin = bulletin; 3415 bp->old_bulletin = bulletin;
3170 3416
3171 return PFVF_BULLETIN_UPDATED; 3417 return PFVF_BULLETIN_UPDATED;
3172} 3418}
3173 3419
3174void bnx2x_vf_map_doorbells(struct bnx2x *bp) 3420void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
3175{ 3421{
3176 /* vf doorbells are embedded within the regview */ 3422 /* vf doorbells are embedded within the regview */
3177 bp->doorbells = bp->regview + PXP_VF_ADDR_DB_START; 3423 return bp->regview + PXP_VF_ADDR_DB_START;
3178} 3424}
3179 3425
3180int bnx2x_vf_pci_alloc(struct bnx2x *bp) 3426int bnx2x_vf_pci_alloc(struct bnx2x *bp)
3181{ 3427{
3428 mutex_init(&bp->vf2pf_mutex);
3429
3182 /* allocate vf2pf mailbox for vf to pf channel */ 3430 /* allocate vf2pf mailbox for vf to pf channel */
3183 BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping, 3431 BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping,
3184 sizeof(struct bnx2x_vf_mbx_msg)); 3432 sizeof(struct bnx2x_vf_mbx_msg));
@@ -3196,3 +3444,26 @@ alloc_mem_err:
3196 sizeof(union pf_vf_bulletin)); 3444 sizeof(union pf_vf_bulletin));
3197 return -ENOMEM; 3445 return -ENOMEM;
3198} 3446}
3447
3448int bnx2x_open_epilog(struct bnx2x *bp)
3449{
3450 /* Enable sriov via delayed work. This must be done via delayed work
3451 * because it causes the probe of the vf devices to be run, which invoke
3452 * register_netdevice which must have rtnl lock taken. As we are holding
3453 * the lock right now, that could only work if the probe would not take
3454 * the lock. However, as the probe of the vf may be called from other
3455 * contexts as well (such as passthrough to vm failes) it can't assume
3456 * the lock is being held for it. Using delayed work here allows the
3457 * probe code to simply take the lock (i.e. wait for it to be released
3458 * if it is being held). We only want to do this if the number of VFs
3459 * was set before PF driver was loaded.
3460 */
3461 if (IS_SRIOV(bp) && BNX2X_NR_VIRTFN(bp)) {
3462 smp_mb__before_clear_bit();
3463 set_bit(BNX2X_SP_RTNL_ENABLE_SRIOV, &bp->sp_rtnl_state);
3464 smp_mb__after_clear_bit();
3465 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3466 }
3467
3468 return 0;
3469}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index b4050173add9..d4b17b7a774e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -193,6 +193,7 @@ struct bnx2x_virtf {
193#define VF_CFG_TPA 0x0004 193#define VF_CFG_TPA 0x0004
194#define VF_CFG_INT_SIMD 0x0008 194#define VF_CFG_INT_SIMD 0x0008
195#define VF_CACHE_LINE 0x0010 195#define VF_CACHE_LINE 0x0010
196#define VF_CFG_VLAN 0x0020
196 197
197 u8 state; 198 u8 state;
198#define VF_FREE 0 /* VF ready to be acquired holds no resc */ 199#define VF_FREE 0 /* VF ready to be acquired holds no resc */
@@ -712,6 +713,7 @@ void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
712 u16 length); 713 u16 length);
713void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, 714void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
714 u16 type, u16 length); 715 u16 type, u16 length);
716void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv);
715void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list); 717void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list);
716 718
717bool bnx2x_tlv_supported(u16 tlvtype); 719bool bnx2x_tlv_supported(u16 tlvtype);
@@ -750,13 +752,17 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
750} 752}
751 753
752enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp); 754enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
753void bnx2x_vf_map_doorbells(struct bnx2x *bp); 755void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp);
754int bnx2x_vf_pci_alloc(struct bnx2x *bp); 756int bnx2x_vf_pci_alloc(struct bnx2x *bp);
755void bnx2x_enable_sriov(struct bnx2x *bp); 757int bnx2x_enable_sriov(struct bnx2x *bp);
758void bnx2x_disable_sriov(struct bnx2x *bp);
756static inline int bnx2x_vf_headroom(struct bnx2x *bp) 759static inline int bnx2x_vf_headroom(struct bnx2x *bp)
757{ 760{
758 return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF; 761 return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF;
759} 762}
763void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
764int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
765int bnx2x_open_epilog(struct bnx2x *bp);
760 766
761#else /* CONFIG_BNX2X_SRIOV */ 767#else /* CONFIG_BNX2X_SRIOV */
762 768
@@ -779,7 +785,8 @@ static inline void bnx2x_iov_init_dmae(struct bnx2x *bp) {}
779static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param, 785static inline int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
780 int num_vfs_param) {return 0; } 786 int num_vfs_param) {return 0; }
781static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {} 787static inline void bnx2x_iov_remove_one(struct bnx2x *bp) {}
782static inline void bnx2x_enable_sriov(struct bnx2x *bp) {} 788static inline int bnx2x_enable_sriov(struct bnx2x *bp) {return 0; }
789static inline void bnx2x_disable_sriov(struct bnx2x *bp) {}
783static inline int bnx2x_vfpf_acquire(struct bnx2x *bp, 790static inline int bnx2x_vfpf_acquire(struct bnx2x *bp,
784 u8 tx_count, u8 rx_count) {return 0; } 791 u8 tx_count, u8 rx_count) {return 0; }
785static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; } 792static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
@@ -802,8 +809,15 @@ static inline enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp
802 return PFVF_BULLETIN_UNCHANGED; 809 return PFVF_BULLETIN_UNCHANGED;
803} 810}
804 811
805static inline int bnx2x_vf_map_doorbells(struct bnx2x *bp) {return 0; } 812static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
813{
814 return NULL;
815}
816
806static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; } 817static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
818static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
819static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
820static inline int bnx2x_open_epilog(struct bnx2x *bp) {return 0; }
807 821
808#endif /* CONFIG_BNX2X_SRIOV */ 822#endif /* CONFIG_BNX2X_SRIOV */
809#endif /* bnx2x_sriov.h */ 823#endif /* bnx2x_sriov.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 4397f8b76f2e..2ca3d94fcec2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1547,11 +1547,51 @@ static void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
1547 } 1547 }
1548} 1548}
1549 1549
1550void bnx2x_memset_stats(struct bnx2x *bp)
1551{
1552 int i;
1553
1554 /* function stats */
1555 for_each_queue(bp, i) {
1556 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
1557
1558 memset(&fp_stats->old_tclient, 0,
1559 sizeof(fp_stats->old_tclient));
1560 memset(&fp_stats->old_uclient, 0,
1561 sizeof(fp_stats->old_uclient));
1562 memset(&fp_stats->old_xclient, 0,
1563 sizeof(fp_stats->old_xclient));
1564 if (bp->stats_init) {
1565 memset(&fp_stats->eth_q_stats, 0,
1566 sizeof(fp_stats->eth_q_stats));
1567 memset(&fp_stats->eth_q_stats_old, 0,
1568 sizeof(fp_stats->eth_q_stats_old));
1569 }
1570 }
1571
1572 memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
1573
1574 if (bp->stats_init) {
1575 memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
1576 memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
1577 memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
1578 memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
1579 memset(&bp->func_stats, 0, sizeof(bp->func_stats));
1580 }
1581
1582 bp->stats_state = STATS_STATE_DISABLED;
1583
1584 if (bp->port.pmf && bp->port.port_stx)
1585 bnx2x_port_stats_base_init(bp);
1586
1587 /* mark the end of statistics initializiation */
1588 bp->stats_init = false;
1589}
1590
1550void bnx2x_stats_init(struct bnx2x *bp) 1591void bnx2x_stats_init(struct bnx2x *bp)
1551{ 1592{
1552 int /*abs*/port = BP_PORT(bp); 1593 int /*abs*/port = BP_PORT(bp);
1553 int mb_idx = BP_FW_MB_IDX(bp); 1594 int mb_idx = BP_FW_MB_IDX(bp);
1554 int i;
1555 1595
1556 bp->stats_pending = 0; 1596 bp->stats_pending = 0;
1557 bp->executer_idx = 0; 1597 bp->executer_idx = 0;
@@ -1587,36 +1627,11 @@ void bnx2x_stats_init(struct bnx2x *bp)
1587 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); 1627 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
1588 } 1628 }
1589 1629
1590 /* function stats */
1591 for_each_queue(bp, i) {
1592 struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i];
1593
1594 memset(&fp_stats->old_tclient, 0,
1595 sizeof(fp_stats->old_tclient));
1596 memset(&fp_stats->old_uclient, 0,
1597 sizeof(fp_stats->old_uclient));
1598 memset(&fp_stats->old_xclient, 0,
1599 sizeof(fp_stats->old_xclient));
1600 if (bp->stats_init) {
1601 memset(&fp_stats->eth_q_stats, 0,
1602 sizeof(fp_stats->eth_q_stats));
1603 memset(&fp_stats->eth_q_stats_old, 0,
1604 sizeof(fp_stats->eth_q_stats_old));
1605 }
1606 }
1607
1608 /* Prepare statistics ramrod data */ 1630 /* Prepare statistics ramrod data */
1609 bnx2x_prep_fw_stats_req(bp); 1631 bnx2x_prep_fw_stats_req(bp);
1610 1632
1611 memset(&bp->dev->stats, 0, sizeof(bp->dev->stats)); 1633 /* Clean SP from previous statistics */
1612 if (bp->stats_init) { 1634 if (bp->stats_init) {
1613 memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
1614 memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
1615 memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
1616 memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
1617 memset(&bp->func_stats, 0, sizeof(bp->func_stats));
1618
1619 /* Clean SP from previous statistics */
1620 if (bp->func_stx) { 1635 if (bp->func_stx) {
1621 memset(bnx2x_sp(bp, func_stats), 0, 1636 memset(bnx2x_sp(bp, func_stats), 0,
1622 sizeof(struct host_func_stats)); 1637 sizeof(struct host_func_stats));
@@ -1626,13 +1641,7 @@ void bnx2x_stats_init(struct bnx2x *bp)
1626 } 1641 }
1627 } 1642 }
1628 1643
1629 bp->stats_state = STATS_STATE_DISABLED; 1644 bnx2x_memset_stats(bp);
1630
1631 if (bp->port.pmf && bp->port.port_stx)
1632 bnx2x_port_stats_base_init(bp);
1633
1634 /* mark the end of statistics initializiation */
1635 bp->stats_init = false;
1636} 1645}
1637 1646
1638void bnx2x_save_statistics(struct bnx2x *bp) 1647void bnx2x_save_statistics(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 198f6f1c9ad5..d117f472816c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -540,8 +540,8 @@ struct bnx2x_fw_port_stats_old {
540/* forward */ 540/* forward */
541struct bnx2x; 541struct bnx2x;
542 542
543void bnx2x_memset_stats(struct bnx2x *bp);
543void bnx2x_stats_init(struct bnx2x *bp); 544void bnx2x_stats_init(struct bnx2x *bp);
544
545void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); 545void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
546 546
547/** 547/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 531eebf40d60..90fbf9cc2c2c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -36,6 +36,8 @@ void bnx2x_add_tlv(struct bnx2x *bp, void *tlvs_list, u16 offset, u16 type,
36void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv, 36void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
37 u16 type, u16 length) 37 u16 type, u16 length)
38{ 38{
39 mutex_lock(&bp->vf2pf_mutex);
40
39 DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n", 41 DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n",
40 type); 42 type);
41 43
@@ -49,6 +51,15 @@ void bnx2x_vfpf_prep(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv,
49 first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req); 51 first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req);
50} 52}
51 53
54/* releases the mailbox */
55void bnx2x_vfpf_finalize(struct bnx2x *bp, struct vfpf_first_tlv *first_tlv)
56{
57 DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n",
58 first_tlv->tl.type);
59
60 mutex_unlock(&bp->vf2pf_mutex);
61}
62
52/* list the types and lengths of the tlvs on the buffer */ 63/* list the types and lengths of the tlvs on the buffer */
53void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list) 64void bnx2x_dp_tlv_list(struct bnx2x *bp, void *tlvs_list)
54{ 65{
@@ -181,8 +192,10 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
181 /* clear mailbox and prep first tlv */ 192 /* clear mailbox and prep first tlv */
182 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req)); 193 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req));
183 194
184 if (bnx2x_get_vf_id(bp, &vf_id)) 195 if (bnx2x_get_vf_id(bp, &vf_id)) {
185 return -EAGAIN; 196 rc = -EAGAIN;
197 goto out;
198 }
186 199
187 req->vfdev_info.vf_id = vf_id; 200 req->vfdev_info.vf_id = vf_id;
188 req->vfdev_info.vf_os = 0; 201 req->vfdev_info.vf_os = 0;
@@ -213,7 +226,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
213 226
214 /* PF timeout */ 227 /* PF timeout */
215 if (rc) 228 if (rc)
216 return rc; 229 goto out;
217 230
218 /* copy acquire response from buffer to bp */ 231 /* copy acquire response from buffer to bp */
219 memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp)); 232 memcpy(&bp->acquire_resp, resp, sizeof(bp->acquire_resp));
@@ -253,7 +266,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
253 /* PF reports error */ 266 /* PF reports error */
254 BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n", 267 BNX2X_ERR("Failed to get the requested amount of resources: %d. Breaking...\n",
255 bp->acquire_resp.hdr.status); 268 bp->acquire_resp.hdr.status);
256 return -EAGAIN; 269 rc = -EAGAIN;
270 goto out;
257 } 271 }
258 } 272 }
259 273
@@ -279,20 +293,24 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
279 bp->acquire_resp.resc.current_mac_addr, 293 bp->acquire_resp.resc.current_mac_addr,
280 ETH_ALEN); 294 ETH_ALEN);
281 295
282 return 0; 296out:
297 bnx2x_vfpf_finalize(bp, &req->first_tlv);
298 return rc;
283} 299}
284 300
285int bnx2x_vfpf_release(struct bnx2x *bp) 301int bnx2x_vfpf_release(struct bnx2x *bp)
286{ 302{
287 struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release; 303 struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release;
288 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; 304 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
289 u32 rc = 0, vf_id; 305 u32 rc, vf_id;
290 306
291 /* clear mailbox and prep first tlv */ 307 /* clear mailbox and prep first tlv */
292 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req)); 308 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req));
293 309
294 if (bnx2x_get_vf_id(bp, &vf_id)) 310 if (bnx2x_get_vf_id(bp, &vf_id)) {
295 return -EAGAIN; 311 rc = -EAGAIN;
312 goto out;
313 }
296 314
297 req->vf_id = vf_id; 315 req->vf_id = vf_id;
298 316
@@ -308,7 +326,8 @@ int bnx2x_vfpf_release(struct bnx2x *bp)
308 326
309 if (rc) 327 if (rc)
310 /* PF timeout */ 328 /* PF timeout */
311 return rc; 329 goto out;
330
312 if (resp->hdr.status == PFVF_STATUS_SUCCESS) { 331 if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
313 /* PF released us */ 332 /* PF released us */
314 DP(BNX2X_MSG_SP, "vf released\n"); 333 DP(BNX2X_MSG_SP, "vf released\n");
@@ -316,10 +335,13 @@ int bnx2x_vfpf_release(struct bnx2x *bp)
316 /* PF reports error */ 335 /* PF reports error */
317 BNX2X_ERR("PF failed our release request - are we out of sync? response status: %d\n", 336 BNX2X_ERR("PF failed our release request - are we out of sync? response status: %d\n",
318 resp->hdr.status); 337 resp->hdr.status);
319 return -EAGAIN; 338 rc = -EAGAIN;
339 goto out;
320 } 340 }
341out:
342 bnx2x_vfpf_finalize(bp, &req->first_tlv);
321 343
322 return 0; 344 return rc;
323} 345}
324 346
325/* Tell PF about SB addresses */ 347/* Tell PF about SB addresses */
@@ -350,16 +372,20 @@ int bnx2x_vfpf_init(struct bnx2x *bp)
350 372
351 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); 373 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
352 if (rc) 374 if (rc)
353 return rc; 375 goto out;
354 376
355 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 377 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
356 BNX2X_ERR("INIT VF failed: %d. Breaking...\n", 378 BNX2X_ERR("INIT VF failed: %d. Breaking...\n",
357 resp->hdr.status); 379 resp->hdr.status);
358 return -EAGAIN; 380 rc = -EAGAIN;
381 goto out;
359 } 382 }
360 383
361 DP(BNX2X_MSG_SP, "INIT VF Succeeded\n"); 384 DP(BNX2X_MSG_SP, "INIT VF Succeeded\n");
362 return 0; 385out:
386 bnx2x_vfpf_finalize(bp, &req->first_tlv);
387
388 return rc;
363} 389}
364 390
365/* CLOSE VF - opposite to INIT_VF */ 391/* CLOSE VF - opposite to INIT_VF */
@@ -401,6 +427,8 @@ void bnx2x_vfpf_close_vf(struct bnx2x *bp)
401 BNX2X_ERR("Sending CLOSE failed: pf response was %d\n", 427 BNX2X_ERR("Sending CLOSE failed: pf response was %d\n",
402 resp->hdr.status); 428 resp->hdr.status);
403 429
430 bnx2x_vfpf_finalize(bp, &req->first_tlv);
431
404free_irq: 432free_irq:
405 /* Disable HW interrupts, NAPI */ 433 /* Disable HW interrupts, NAPI */
406 bnx2x_netif_stop(bp, 0); 434 bnx2x_netif_stop(bp, 0);
@@ -435,7 +463,6 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
435 /* calculate queue flags */ 463 /* calculate queue flags */
436 flags |= VFPF_QUEUE_FLG_STATS; 464 flags |= VFPF_QUEUE_FLG_STATS;
437 flags |= VFPF_QUEUE_FLG_CACHE_ALIGN; 465 flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
438 flags |= IS_MF_SD(bp) ? VFPF_QUEUE_FLG_OV : 0;
439 flags |= VFPF_QUEUE_FLG_VLAN; 466 flags |= VFPF_QUEUE_FLG_VLAN;
440 DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); 467 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
441 468
@@ -486,8 +513,11 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
486 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 513 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
487 BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n", 514 BNX2X_ERR("Status of SETUP_Q for queue[%d] is %d\n",
488 fp_idx, resp->hdr.status); 515 fp_idx, resp->hdr.status);
489 return -EINVAL; 516 rc = -EINVAL;
490 } 517 }
518
519 bnx2x_vfpf_finalize(bp, &req->first_tlv);
520
491 return rc; 521 return rc;
492} 522}
493 523
@@ -515,17 +545,19 @@ int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx)
515 if (rc) { 545 if (rc) {
516 BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx, 546 BNX2X_ERR("Sending TEARDOWN for queue %d failed: %d\n", qidx,
517 rc); 547 rc);
518 return rc; 548 goto out;
519 } 549 }
520 550
521 /* PF failed the transaction */ 551 /* PF failed the transaction */
522 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 552 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
523 BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx, 553 BNX2X_ERR("TEARDOWN for queue %d failed: %d\n", qidx,
524 resp->hdr.status); 554 resp->hdr.status);
525 return -EINVAL; 555 rc = -EINVAL;
526 } 556 }
527 557
528 return 0; 558out:
559 bnx2x_vfpf_finalize(bp, &req->first_tlv);
560 return rc;
529} 561}
530 562
531/* request pf to add a mac for the vf */ 563/* request pf to add a mac for the vf */
@@ -533,7 +565,7 @@ int bnx2x_vfpf_set_mac(struct bnx2x *bp)
533{ 565{
534 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; 566 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
535 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp; 567 struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
536 int rc; 568 int rc = 0;
537 569
538 /* clear mailbox and prep first tlv */ 570 /* clear mailbox and prep first tlv */
539 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, 571 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS,
@@ -562,7 +594,7 @@ int bnx2x_vfpf_set_mac(struct bnx2x *bp)
562 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); 594 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
563 if (rc) { 595 if (rc) {
564 BNX2X_ERR("failed to send message to pf. rc was %d\n", rc); 596 BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
565 return rc; 597 goto out;
566 } 598 }
567 599
568 /* failure may mean PF was configured with a new mac for us */ 600 /* failure may mean PF was configured with a new mac for us */
@@ -587,8 +619,10 @@ int bnx2x_vfpf_set_mac(struct bnx2x *bp)
587 619
588 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 620 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
589 BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status); 621 BNX2X_ERR("vfpf SET MAC failed: %d\n", resp->hdr.status);
590 return -EINVAL; 622 rc = -EINVAL;
591 } 623 }
624out:
625 bnx2x_vfpf_finalize(bp, &req->first_tlv);
592 626
593 return 0; 627 return 0;
594} 628}
@@ -643,14 +677,16 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
643 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping); 677 rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
644 if (rc) { 678 if (rc) {
645 BNX2X_ERR("Sending a message failed: %d\n", rc); 679 BNX2X_ERR("Sending a message failed: %d\n", rc);
646 return rc; 680 goto out;
647 } 681 }
648 682
649 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 683 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
650 BNX2X_ERR("Set Rx mode/multicast failed: %d\n", 684 BNX2X_ERR("Set Rx mode/multicast failed: %d\n",
651 resp->hdr.status); 685 resp->hdr.status);
652 return -EINVAL; 686 rc = -EINVAL;
653 } 687 }
688out:
689 bnx2x_vfpf_finalize(bp, &req->first_tlv);
654 690
655 return 0; 691 return 0;
656} 692}
@@ -689,7 +725,8 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
689 break; 725 break;
690 default: 726 default:
691 BNX2X_ERR("BAD rx mode (%d)\n", mode); 727 BNX2X_ERR("BAD rx mode (%d)\n", mode);
692 return -EINVAL; 728 rc = -EINVAL;
729 goto out;
693 } 730 }
694 731
695 req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED; 732 req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED;
@@ -708,8 +745,10 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp)
708 745
709 if (resp->hdr.status != PFVF_STATUS_SUCCESS) { 746 if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
710 BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status); 747 BNX2X_ERR("Set Rx mode failed: %d\n", resp->hdr.status);
711 return -EINVAL; 748 rc = -EINVAL;
712 } 749 }
750out:
751 bnx2x_vfpf_finalize(bp, &req->first_tlv);
713 752
714 return rc; 753 return rc;
715} 754}
@@ -1004,7 +1043,7 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
1004} 1043}
1005 1044
1006/* convert MBX queue-flags to standard SP queue-flags */ 1045/* convert MBX queue-flags to standard SP queue-flags */
1007static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags, 1046static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
1008 unsigned long *sp_q_flags) 1047 unsigned long *sp_q_flags)
1009{ 1048{
1010 if (mbx_q_flags & VFPF_QUEUE_FLG_TPA) 1049 if (mbx_q_flags & VFPF_QUEUE_FLG_TPA)
@@ -1015,8 +1054,6 @@ static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags,
1015 __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags); 1054 __set_bit(BNX2X_Q_FLG_TPA_GRO, sp_q_flags);
1016 if (mbx_q_flags & VFPF_QUEUE_FLG_STATS) 1055 if (mbx_q_flags & VFPF_QUEUE_FLG_STATS)
1017 __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags); 1056 __set_bit(BNX2X_Q_FLG_STATS, sp_q_flags);
1018 if (mbx_q_flags & VFPF_QUEUE_FLG_OV)
1019 __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
1020 if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN) 1057 if (mbx_q_flags & VFPF_QUEUE_FLG_VLAN)
1021 __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags); 1058 __set_bit(BNX2X_Q_FLG_VLAN, sp_q_flags);
1022 if (mbx_q_flags & VFPF_QUEUE_FLG_COS) 1059 if (mbx_q_flags & VFPF_QUEUE_FLG_COS)
@@ -1025,6 +1062,10 @@ static void bnx2x_vf_mbx_set_q_flags(u32 mbx_q_flags,
1025 __set_bit(BNX2X_Q_FLG_HC, sp_q_flags); 1062 __set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
1026 if (mbx_q_flags & VFPF_QUEUE_FLG_DHC) 1063 if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
1027 __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags); 1064 __set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
1065
1066 /* outer vlan removal is set according to the PF's multi fuction mode */
1067 if (IS_MF_SD(bp))
1068 __set_bit(BNX2X_Q_FLG_OV, sp_q_flags);
1028} 1069}
1029 1070
1030static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf, 1071static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -1075,11 +1116,11 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1075 init_p->tx.hc_rate = setup_q->txq.hc_rate; 1116 init_p->tx.hc_rate = setup_q->txq.hc_rate;
1076 init_p->tx.sb_cq_index = setup_q->txq.sb_index; 1117 init_p->tx.sb_cq_index = setup_q->txq.sb_index;
1077 1118
1078 bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags, 1119 bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
1079 &init_p->tx.flags); 1120 &init_p->tx.flags);
1080 1121
1081 /* tx setup - flags */ 1122 /* tx setup - flags */
1082 bnx2x_vf_mbx_set_q_flags(setup_q->txq.flags, 1123 bnx2x_vf_mbx_set_q_flags(bp, setup_q->txq.flags,
1083 &setup_p->flags); 1124 &setup_p->flags);
1084 1125
1085 /* tx setup - general, nothing */ 1126 /* tx setup - general, nothing */
@@ -1107,11 +1148,11 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
1107 /* rx init */ 1148 /* rx init */
1108 init_p->rx.hc_rate = setup_q->rxq.hc_rate; 1149 init_p->rx.hc_rate = setup_q->rxq.hc_rate;
1109 init_p->rx.sb_cq_index = setup_q->rxq.sb_index; 1150 init_p->rx.sb_cq_index = setup_q->rxq.sb_index;
1110 bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags, 1151 bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
1111 &init_p->rx.flags); 1152 &init_p->rx.flags);
1112 1153
1113 /* rx setup - flags */ 1154 /* rx setup - flags */
1114 bnx2x_vf_mbx_set_q_flags(setup_q->rxq.flags, 1155 bnx2x_vf_mbx_set_q_flags(bp, setup_q->rxq.flags,
1115 &setup_p->flags); 1156 &setup_p->flags);
1116 1157
1117 /* rx setup - general */ 1158 /* rx setup - general */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index bfc80baec00d..41708faab575 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -328,9 +328,15 @@ struct pf_vf_bulletin_content {
328#define MAC_ADDR_VALID 0 /* alert the vf that a new mac address 328#define MAC_ADDR_VALID 0 /* alert the vf that a new mac address
329 * is available for it 329 * is available for it
330 */ 330 */
331#define VLAN_VALID 1 /* when set, the vf should not access
332 * the vfpf channel
333 */
331 334
332 u8 mac[ETH_ALEN]; 335 u8 mac[ETH_ALEN];
333 u8 padding[2]; 336 u8 mac_padding[2];
337
338 u16 vlan;
339 u8 vlan_padding[6];
334}; 340};
335 341
336union pf_vf_bulletin { 342union pf_vf_bulletin {
@@ -353,6 +359,7 @@ enum channel_tlvs {
353 CHANNEL_TLV_LIST_END, 359 CHANNEL_TLV_LIST_END,
354 CHANNEL_TLV_FLR, 360 CHANNEL_TLV_FLR,
355 CHANNEL_TLV_PF_SET_MAC, 361 CHANNEL_TLV_PF_SET_MAC,
362 CHANNEL_TLV_PF_SET_VLAN,
356 CHANNEL_TLV_MAX 363 CHANNEL_TLV_MAX
357}; 364};
358 365
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index e9b35da375cb..e80bfb60c3ef 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -831,11 +831,8 @@ static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
831 sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE + 831 sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE +
832 SMP_CACHE_BYTES * 2 + 832 SMP_CACHE_BYTES * 2 +
833 NET_IP_ALIGN); 833 NET_IP_ALIGN);
834 if (sb_new == NULL) { 834 if (sb_new == NULL)
835 pr_info("%s: sk_buff allocation failed\n",
836 d->sbdma_eth->sbm_dev->name);
837 return -ENOBUFS; 835 return -ENOBUFS;
838 }
839 836
840 sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN); 837 sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN);
841 } 838 }
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 17a972734ba7..a4416b09f209 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -212,6 +212,7 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
212#define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2) 212#define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213 213
214#define FIRMWARE_TG3 "tigon/tg3.bin" 214#define FIRMWARE_TG3 "tigon/tg3.bin"
215#define FIRMWARE_TG357766 "tigon/tg357766.bin"
215#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" 216#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
216#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" 217#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
217 218
@@ -3448,11 +3449,58 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3448#define TX_CPU_SCRATCH_SIZE 0x04000 3449#define TX_CPU_SCRATCH_SIZE 0x04000
3449 3450
3450/* tp->lock is held. */ 3451/* tp->lock is held. */
3451static int tg3_halt_cpu(struct tg3 *tp, u32 offset) 3452static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3452{ 3453{
3453 int i; 3454 int i;
3455 const int iters = 10000;
3454 3456
3455 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)); 3457 for (i = 0; i < iters; i++) {
3458 tw32(cpu_base + CPU_STATE, 0xffffffff);
3459 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3460 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3461 break;
3462 }
3463
3464 return (i == iters) ? -EBUSY : 0;
3465}
3466
3467/* tp->lock is held. */
3468static int tg3_rxcpu_pause(struct tg3 *tp)
3469{
3470 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3471
3472 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3473 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3474 udelay(10);
3475
3476 return rc;
3477}
3478
3479/* tp->lock is held. */
3480static int tg3_txcpu_pause(struct tg3 *tp)
3481{
3482 return tg3_pause_cpu(tp, TX_CPU_BASE);
3483}
3484
3485/* tp->lock is held. */
3486static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3487{
3488 tw32(cpu_base + CPU_STATE, 0xffffffff);
3489 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3490}
3491
3492/* tp->lock is held. */
3493static void tg3_rxcpu_resume(struct tg3 *tp)
3494{
3495 tg3_resume_cpu(tp, RX_CPU_BASE);
3496}
3497
3498/* tp->lock is held. */
3499static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3500{
3501 int rc;
3502
3503 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3456 3504
3457 if (tg3_asic_rev(tp) == ASIC_REV_5906) { 3505 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3458 u32 val = tr32(GRC_VCPU_EXT_CTRL); 3506 u32 val = tr32(GRC_VCPU_EXT_CTRL);
@@ -3460,17 +3508,8 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3460 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU); 3508 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3461 return 0; 3509 return 0;
3462 } 3510 }
3463 if (offset == RX_CPU_BASE) { 3511 if (cpu_base == RX_CPU_BASE) {
3464 for (i = 0; i < 10000; i++) { 3512 rc = tg3_rxcpu_pause(tp);
3465 tw32(offset + CPU_STATE, 0xffffffff);
3466 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3467 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3468 break;
3469 }
3470
3471 tw32(offset + CPU_STATE, 0xffffffff);
3472 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3473 udelay(10);
3474 } else { 3513 } else {
3475 /* 3514 /*
3476 * There is only an Rx CPU for the 5750 derivative in the 3515 * There is only an Rx CPU for the 5750 derivative in the
@@ -3479,17 +3518,12 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3479 if (tg3_flag(tp, IS_SSB_CORE)) 3518 if (tg3_flag(tp, IS_SSB_CORE))
3480 return 0; 3519 return 0;
3481 3520
3482 for (i = 0; i < 10000; i++) { 3521 rc = tg3_txcpu_pause(tp);
3483 tw32(offset + CPU_STATE, 0xffffffff);
3484 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3485 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3486 break;
3487 }
3488 } 3522 }
3489 3523
3490 if (i >= 10000) { 3524 if (rc) {
3491 netdev_err(tp->dev, "%s timed out, %s CPU\n", 3525 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3492 __func__, offset == RX_CPU_BASE ? "RX" : "TX"); 3526 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3493 return -ENODEV; 3527 return -ENODEV;
3494 } 3528 }
3495 3529
@@ -3499,19 +3533,41 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3499 return 0; 3533 return 0;
3500} 3534}
3501 3535
3502struct fw_info { 3536static int tg3_fw_data_len(struct tg3 *tp,
3503 unsigned int fw_base; 3537 const struct tg3_firmware_hdr *fw_hdr)
3504 unsigned int fw_len; 3538{
3505 const __be32 *fw_data; 3539 int fw_len;
3506}; 3540
3541 /* Non fragmented firmware have one firmware header followed by a
3542 * contiguous chunk of data to be written. The length field in that
3543 * header is not the length of data to be written but the complete
3544 * length of the bss. The data length is determined based on
3545 * tp->fw->size minus headers.
3546 *
3547 * Fragmented firmware have a main header followed by multiple
3548 * fragments. Each fragment is identical to non fragmented firmware
3549 * with a firmware header followed by a contiguous chunk of data. In
3550 * the main header, the length field is unused and set to 0xffffffff.
3551 * In each fragment header the length is the entire size of that
3552 * fragment i.e. fragment data + header length. Data length is
3553 * therefore length field in the header minus TG3_FW_HDR_LEN.
3554 */
3555 if (tp->fw_len == 0xffffffff)
3556 fw_len = be32_to_cpu(fw_hdr->len);
3557 else
3558 fw_len = tp->fw->size;
3559
3560 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3561}
3507 3562
3508/* tp->lock is held. */ 3563/* tp->lock is held. */
3509static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, 3564static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3510 u32 cpu_scratch_base, int cpu_scratch_size, 3565 u32 cpu_scratch_base, int cpu_scratch_size,
3511 struct fw_info *info) 3566 const struct tg3_firmware_hdr *fw_hdr)
3512{ 3567{
3513 int err, lock_err, i; 3568 int err, i;
3514 void (*write_op)(struct tg3 *, u32, u32); 3569 void (*write_op)(struct tg3 *, u32, u32);
3570 int total_len = tp->fw->size;
3515 3571
3516 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) { 3572 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3517 netdev_err(tp->dev, 3573 netdev_err(tp->dev,
@@ -3520,30 +3576,49 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3520 return -EINVAL; 3576 return -EINVAL;
3521 } 3577 }
3522 3578
3523 if (tg3_flag(tp, 5705_PLUS)) 3579 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3524 write_op = tg3_write_mem; 3580 write_op = tg3_write_mem;
3525 else 3581 else
3526 write_op = tg3_write_indirect_reg32; 3582 write_op = tg3_write_indirect_reg32;
3527 3583
3528 /* It is possible that bootcode is still loading at this point. 3584 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3529 * Get the nvram lock first before halting the cpu. 3585 /* It is possible that bootcode is still loading at this point.
3530 */ 3586 * Get the nvram lock first before halting the cpu.
3531 lock_err = tg3_nvram_lock(tp); 3587 */
3532 err = tg3_halt_cpu(tp, cpu_base); 3588 int lock_err = tg3_nvram_lock(tp);
3533 if (!lock_err) 3589 err = tg3_halt_cpu(tp, cpu_base);
3534 tg3_nvram_unlock(tp); 3590 if (!lock_err)
3535 if (err) 3591 tg3_nvram_unlock(tp);
3536 goto out; 3592 if (err)
3593 goto out;
3537 3594
3538 for (i = 0; i < cpu_scratch_size; i += sizeof(u32)) 3595 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3539 write_op(tp, cpu_scratch_base + i, 0); 3596 write_op(tp, cpu_scratch_base + i, 0);
3540 tw32(cpu_base + CPU_STATE, 0xffffffff); 3597 tw32(cpu_base + CPU_STATE, 0xffffffff);
3541 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT); 3598 tw32(cpu_base + CPU_MODE,
3542 for (i = 0; i < (info->fw_len / sizeof(u32)); i++) 3599 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3543 write_op(tp, (cpu_scratch_base + 3600 } else {
3544 (info->fw_base & 0xffff) + 3601 /* Subtract additional main header for fragmented firmware and
3545 (i * sizeof(u32))), 3602 * advance to the first fragment
3546 be32_to_cpu(info->fw_data[i])); 3603 */
3604 total_len -= TG3_FW_HDR_LEN;
3605 fw_hdr++;
3606 }
3607
3608 do {
3609 u32 *fw_data = (u32 *)(fw_hdr + 1);
3610 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3611 write_op(tp, cpu_scratch_base +
3612 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3613 (i * sizeof(u32)),
3614 be32_to_cpu(fw_data[i]));
3615
3616 total_len -= be32_to_cpu(fw_hdr->len);
3617
3618 /* Advance to next fragment */
3619 fw_hdr = (struct tg3_firmware_hdr *)
3620 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3621 } while (total_len > 0);
3547 3622
3548 err = 0; 3623 err = 0;
3549 3624
@@ -3552,13 +3627,33 @@ out:
3552} 3627}
3553 3628
3554/* tp->lock is held. */ 3629/* tp->lock is held. */
3630static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3631{
3632 int i;
3633 const int iters = 5;
3634
3635 tw32(cpu_base + CPU_STATE, 0xffffffff);
3636 tw32_f(cpu_base + CPU_PC, pc);
3637
3638 for (i = 0; i < iters; i++) {
3639 if (tr32(cpu_base + CPU_PC) == pc)
3640 break;
3641 tw32(cpu_base + CPU_STATE, 0xffffffff);
3642 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3643 tw32_f(cpu_base + CPU_PC, pc);
3644 udelay(1000);
3645 }
3646
3647 return (i == iters) ? -EBUSY : 0;
3648}
3649
3650/* tp->lock is held. */
3555static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp) 3651static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3556{ 3652{
3557 struct fw_info info; 3653 const struct tg3_firmware_hdr *fw_hdr;
3558 const __be32 *fw_data; 3654 int err;
3559 int err, i;
3560 3655
3561 fw_data = (void *)tp->fw->data; 3656 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3562 3657
3563 /* Firmware blob starts with version numbers, followed by 3658 /* Firmware blob starts with version numbers, followed by
3564 start address and length. We are setting complete length. 3659 start address and length. We are setting complete length.
@@ -3566,60 +3661,117 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3566 Remainder is the blob to be loaded contiguously 3661 Remainder is the blob to be loaded contiguously
3567 from start address. */ 3662 from start address. */
3568 3663
3569 info.fw_base = be32_to_cpu(fw_data[1]);
3570 info.fw_len = tp->fw->size - 12;
3571 info.fw_data = &fw_data[3];
3572
3573 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE, 3664 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3574 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE, 3665 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3575 &info); 3666 fw_hdr);
3576 if (err) 3667 if (err)
3577 return err; 3668 return err;
3578 3669
3579 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE, 3670 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3580 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE, 3671 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3581 &info); 3672 fw_hdr);
3582 if (err) 3673 if (err)
3583 return err; 3674 return err;
3584 3675
3585 /* Now startup only the RX cpu. */ 3676 /* Now startup only the RX cpu. */
3586 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); 3677 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3587 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base); 3678 be32_to_cpu(fw_hdr->base_addr));
3588 3679 if (err) {
3589 for (i = 0; i < 5; i++) {
3590 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3591 break;
3592 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3593 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3594 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3595 udelay(1000);
3596 }
3597 if (i >= 5) {
3598 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x " 3680 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3599 "should be %08x\n", __func__, 3681 "should be %08x\n", __func__,
3600 tr32(RX_CPU_BASE + CPU_PC), info.fw_base); 3682 tr32(RX_CPU_BASE + CPU_PC),
3683 be32_to_cpu(fw_hdr->base_addr));
3601 return -ENODEV; 3684 return -ENODEV;
3602 } 3685 }
3603 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff); 3686
3604 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000); 3687 tg3_rxcpu_resume(tp);
3688
3689 return 0;
3690}
3691
3692static int tg3_validate_rxcpu_state(struct tg3 *tp)
3693{
3694 const int iters = 1000;
3695 int i;
3696 u32 val;
3697
3698 /* Wait for boot code to complete initialization and enter service
3699 * loop. It is then safe to download service patches
3700 */
3701 for (i = 0; i < iters; i++) {
3702 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3703 break;
3704
3705 udelay(10);
3706 }
3707
3708 if (i == iters) {
3709 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3710 return -EBUSY;
3711 }
3712
3713 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3714 if (val & 0xff) {
3715 netdev_warn(tp->dev,
3716 "Other patches exist. Not downloading EEE patch\n");
3717 return -EEXIST;
3718 }
3605 3719
3606 return 0; 3720 return 0;
3607} 3721}
3608 3722
3609/* tp->lock is held. */ 3723/* tp->lock is held. */
3724static void tg3_load_57766_firmware(struct tg3 *tp)
3725{
3726 struct tg3_firmware_hdr *fw_hdr;
3727
3728 if (!tg3_flag(tp, NO_NVRAM))
3729 return;
3730
3731 if (tg3_validate_rxcpu_state(tp))
3732 return;
3733
3734 if (!tp->fw)
3735 return;
3736
3737 /* This firmware blob has a different format than older firmware
3738 * releases as given below. The main difference is we have fragmented
3739 * data to be written to non-contiguous locations.
3740 *
3741 * In the beginning we have a firmware header identical to other
3742 * firmware which consists of version, base addr and length. The length
3743 * here is unused and set to 0xffffffff.
3744 *
3745 * This is followed by a series of firmware fragments which are
3746 * individually identical to previous firmware. i.e. they have the
3747 * firmware header and followed by data for that fragment. The version
3748 * field of the individual fragment header is unused.
3749 */
3750
3751 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3752 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3753 return;
3754
3755 if (tg3_rxcpu_pause(tp))
3756 return;
3757
3758 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3759 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3760
3761 tg3_rxcpu_resume(tp);
3762}
3763
3764/* tp->lock is held. */
3610static int tg3_load_tso_firmware(struct tg3 *tp) 3765static int tg3_load_tso_firmware(struct tg3 *tp)
3611{ 3766{
3612 struct fw_info info; 3767 const struct tg3_firmware_hdr *fw_hdr;
3613 const __be32 *fw_data;
3614 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size; 3768 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3615 int err, i; 3769 int err;
3616 3770
3617 if (tg3_flag(tp, HW_TSO_1) || 3771 if (!tg3_flag(tp, FW_TSO))
3618 tg3_flag(tp, HW_TSO_2) ||
3619 tg3_flag(tp, HW_TSO_3))
3620 return 0; 3772 return 0;
3621 3773
3622 fw_data = (void *)tp->fw->data; 3774 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3623 3775
3624 /* Firmware blob starts with version numbers, followed by 3776 /* Firmware blob starts with version numbers, followed by
3625 start address and length. We are setting complete length. 3777 start address and length. We are setting complete length.
@@ -3627,10 +3779,7 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
3627 Remainder is the blob to be loaded contiguously 3779 Remainder is the blob to be loaded contiguously
3628 from start address. */ 3780 from start address. */
3629 3781
3630 info.fw_base = be32_to_cpu(fw_data[1]);
3631 cpu_scratch_size = tp->fw_len; 3782 cpu_scratch_size = tp->fw_len;
3632 info.fw_len = tp->fw->size - 12;
3633 info.fw_data = &fw_data[3];
3634 3783
3635 if (tg3_asic_rev(tp) == ASIC_REV_5705) { 3784 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3636 cpu_base = RX_CPU_BASE; 3785 cpu_base = RX_CPU_BASE;
@@ -3643,30 +3792,22 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
3643 3792
3644 err = tg3_load_firmware_cpu(tp, cpu_base, 3793 err = tg3_load_firmware_cpu(tp, cpu_base,
3645 cpu_scratch_base, cpu_scratch_size, 3794 cpu_scratch_base, cpu_scratch_size,
3646 &info); 3795 fw_hdr);
3647 if (err) 3796 if (err)
3648 return err; 3797 return err;
3649 3798
3650 /* Now startup the cpu. */ 3799 /* Now startup the cpu. */
3651 tw32(cpu_base + CPU_STATE, 0xffffffff); 3800 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3652 tw32_f(cpu_base + CPU_PC, info.fw_base); 3801 be32_to_cpu(fw_hdr->base_addr));
3653 3802 if (err) {
3654 for (i = 0; i < 5; i++) {
3655 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3656 break;
3657 tw32(cpu_base + CPU_STATE, 0xffffffff);
3658 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3659 tw32_f(cpu_base + CPU_PC, info.fw_base);
3660 udelay(1000);
3661 }
3662 if (i >= 5) {
3663 netdev_err(tp->dev, 3803 netdev_err(tp->dev,
3664 "%s fails to set CPU PC, is %08x should be %08x\n", 3804 "%s fails to set CPU PC, is %08x should be %08x\n",
3665 __func__, tr32(cpu_base + CPU_PC), info.fw_base); 3805 __func__, tr32(cpu_base + CPU_PC),
3806 be32_to_cpu(fw_hdr->base_addr));
3666 return -ENODEV; 3807 return -ENODEV;
3667 } 3808 }
3668 tw32(cpu_base + CPU_STATE, 0xffffffff); 3809
3669 tw32_f(cpu_base + CPU_MODE, 0x00000000); 3810 tg3_resume_cpu(tp, cpu_base);
3670 return 0; 3811 return 0;
3671} 3812}
3672 3813
@@ -8039,11 +8180,9 @@ static int tg3_mem_rx_acquire(struct tg3 *tp)
8039 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev, 8180 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8040 TG3_RX_RCB_RING_BYTES(tp), 8181 TG3_RX_RCB_RING_BYTES(tp),
8041 &tnapi->rx_rcb_mapping, 8182 &tnapi->rx_rcb_mapping,
8042 GFP_KERNEL); 8183 GFP_KERNEL | __GFP_ZERO);
8043 if (!tnapi->rx_rcb) 8184 if (!tnapi->rx_rcb)
8044 goto err_out; 8185 goto err_out;
8045
8046 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8047 } 8186 }
8048 8187
8049 return 0; 8188 return 0;
@@ -8093,12 +8232,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
8093 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev, 8232 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8094 sizeof(struct tg3_hw_stats), 8233 sizeof(struct tg3_hw_stats),
8095 &tp->stats_mapping, 8234 &tp->stats_mapping,
8096 GFP_KERNEL); 8235 GFP_KERNEL | __GFP_ZERO);
8097 if (!tp->hw_stats) 8236 if (!tp->hw_stats)
8098 goto err_out; 8237 goto err_out;
8099 8238
8100 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8101
8102 for (i = 0; i < tp->irq_cnt; i++) { 8239 for (i = 0; i < tp->irq_cnt; i++) {
8103 struct tg3_napi *tnapi = &tp->napi[i]; 8240 struct tg3_napi *tnapi = &tp->napi[i];
8104 struct tg3_hw_status *sblk; 8241 struct tg3_hw_status *sblk;
@@ -8106,11 +8243,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
8106 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev, 8243 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8107 TG3_HW_STATUS_SIZE, 8244 TG3_HW_STATUS_SIZE,
8108 &tnapi->status_mapping, 8245 &tnapi->status_mapping,
8109 GFP_KERNEL); 8246 GFP_KERNEL | __GFP_ZERO);
8110 if (!tnapi->hw_status) 8247 if (!tnapi->hw_status)
8111 goto err_out; 8248 goto err_out;
8112 8249
8113 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8114 sblk = tnapi->hw_status; 8250 sblk = tnapi->hw_status;
8115 8251
8116 if (tg3_flag(tp, ENABLE_RSS)) { 8252 if (tg3_flag(tp, ENABLE_RSS)) {
@@ -9781,6 +9917,13 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
9781 return err; 9917 return err;
9782 } 9918 }
9783 9919
9920 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
9921 /* Ignore any errors for the firmware download. If download
9922 * fails, the device will operate with EEE disabled
9923 */
9924 tg3_load_57766_firmware(tp);
9925 }
9926
9784 if (tg3_flag(tp, TSO_CAPABLE)) { 9927 if (tg3_flag(tp, TSO_CAPABLE)) {
9785 err = tg3_load_tso_firmware(tp); 9928 err = tg3_load_tso_firmware(tp);
9786 if (err) 9929 if (err)
@@ -10570,7 +10713,7 @@ static int tg3_test_msi(struct tg3 *tp)
10570 10713
10571static int tg3_request_firmware(struct tg3 *tp) 10714static int tg3_request_firmware(struct tg3 *tp)
10572{ 10715{
10573 const __be32 *fw_data; 10716 const struct tg3_firmware_hdr *fw_hdr;
10574 10717
10575 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) { 10718 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
10576 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n", 10719 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
@@ -10578,15 +10721,15 @@ static int tg3_request_firmware(struct tg3 *tp)
10578 return -ENOENT; 10721 return -ENOENT;
10579 } 10722 }
10580 10723
10581 fw_data = (void *)tp->fw->data; 10724 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
10582 10725
10583 /* Firmware blob starts with version numbers, followed by 10726 /* Firmware blob starts with version numbers, followed by
10584 * start address and _full_ length including BSS sections 10727 * start address and _full_ length including BSS sections
10585 * (which must be longer than the actual data, of course 10728 * (which must be longer than the actual data, of course
10586 */ 10729 */
10587 10730
10588 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */ 10731 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
10589 if (tp->fw_len < (tp->fw->size - 12)) { 10732 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
10590 netdev_err(tp->dev, "bogus length %d in \"%s\"\n", 10733 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
10591 tp->fw_len, tp->fw_needed); 10734 tp->fw_len, tp->fw_needed);
10592 release_firmware(tp->fw); 10735 release_firmware(tp->fw);
@@ -10885,7 +11028,15 @@ static int tg3_open(struct net_device *dev)
10885 11028
10886 if (tp->fw_needed) { 11029 if (tp->fw_needed) {
10887 err = tg3_request_firmware(tp); 11030 err = tg3_request_firmware(tp);
10888 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) { 11031 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11032 if (err) {
11033 netdev_warn(tp->dev, "EEE capability disabled\n");
11034 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11035 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11036 netdev_warn(tp->dev, "EEE capability restored\n");
11037 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11038 }
11039 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10889 if (err) 11040 if (err)
10890 return err; 11041 return err;
10891 } else if (err) { 11042 } else if (err) {
@@ -14515,6 +14666,7 @@ static int tg3_phy_probe(struct tg3 *tp)
14515 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 14666 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
14516 (tg3_asic_rev(tp) == ASIC_REV_5719 || 14667 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
14517 tg3_asic_rev(tp) == ASIC_REV_5720 || 14668 tg3_asic_rev(tp) == ASIC_REV_5720 ||
14669 tg3_asic_rev(tp) == ASIC_REV_57766 ||
14518 tg3_asic_rev(tp) == ASIC_REV_5762 || 14670 tg3_asic_rev(tp) == ASIC_REV_5762 ||
14519 (tg3_asic_rev(tp) == ASIC_REV_5717 && 14671 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
14520 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) || 14672 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
@@ -15300,7 +15452,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15300 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 && 15452 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15301 tg3_asic_rev(tp) != ASIC_REV_5701 && 15453 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15302 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) { 15454 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15303 tg3_flag_set(tp, TSO_BUG); 15455 tg3_flag_set(tp, FW_TSO);
15456 tg3_flag_set(tp, TSO_BUG);
15304 if (tg3_asic_rev(tp) == ASIC_REV_5705) 15457 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15305 tp->fw_needed = FIRMWARE_TG3TSO5; 15458 tp->fw_needed = FIRMWARE_TG3TSO5;
15306 else 15459 else
@@ -15311,7 +15464,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15311 if (tg3_flag(tp, HW_TSO_1) || 15464 if (tg3_flag(tp, HW_TSO_1) ||
15312 tg3_flag(tp, HW_TSO_2) || 15465 tg3_flag(tp, HW_TSO_2) ||
15313 tg3_flag(tp, HW_TSO_3) || 15466 tg3_flag(tp, HW_TSO_3) ||
15314 tp->fw_needed) { 15467 tg3_flag(tp, FW_TSO)) {
15315 /* For firmware TSO, assume ASF is disabled. 15468 /* For firmware TSO, assume ASF is disabled.
15316 * We'll disable TSO later if we discover ASF 15469 * We'll disable TSO later if we discover ASF
15317 * is enabled in tg3_get_eeprom_hw_cfg(). 15470 * is enabled in tg3_get_eeprom_hw_cfg().
@@ -15326,6 +15479,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15326 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) 15479 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15327 tp->fw_needed = FIRMWARE_TG3; 15480 tp->fw_needed = FIRMWARE_TG3;
15328 15481
15482 if (tg3_asic_rev(tp) == ASIC_REV_57766)
15483 tp->fw_needed = FIRMWARE_TG357766;
15484
15329 tp->irq_max = 1; 15485 tp->irq_max = 1;
15330 15486
15331 if (tg3_flag(tp, 5750_PLUS)) { 15487 if (tg3_flag(tp, 5750_PLUS)) {
@@ -15598,7 +15754,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15598 */ 15754 */
15599 tg3_get_eeprom_hw_cfg(tp); 15755 tg3_get_eeprom_hw_cfg(tp);
15600 15756
15601 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) { 15757 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
15602 tg3_flag_clear(tp, TSO_CAPABLE); 15758 tg3_flag_clear(tp, TSO_CAPABLE);
15603 tg3_flag_clear(tp, TSO_BUG); 15759 tg3_flag_clear(tp, TSO_BUG);
15604 tp->fw_needed = NULL; 15760 tp->fw_needed = NULL;
@@ -15786,6 +15942,11 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15786 udelay(50); 15942 udelay(50);
15787 tg3_nvram_init(tp); 15943 tg3_nvram_init(tp);
15788 15944
15945 /* If the device has an NVRAM, no need to load patch firmware */
15946 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
15947 !tg3_flag(tp, NO_NVRAM))
15948 tp->fw_needed = NULL;
15949
15789 grc_misc_cfg = tr32(GRC_MISC_CFG); 15950 grc_misc_cfg = tr32(GRC_MISC_CFG);
15790 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK; 15951 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
15791 15952
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 8d7d4c2ab5d6..1cdc1b641c77 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2222,6 +2222,12 @@
2222#define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000 2222#define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000
2223#define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000 2223#define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000
2224 2224
2225#define TG3_SRAM_RXCPU_SCRATCH_BASE_57766 0x00030000
2226#define TG3_SRAM_RXCPU_SCRATCH_SIZE_57766 0x00010000
2227#define TG3_57766_FW_BASE_ADDR 0x00030000
2228#define TG3_57766_FW_HANDSHAKE 0x0003fccc
2229#define TG3_SBROM_IN_SERVICE_LOOP 0x51
2230
2225#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5700 128 2231#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5700 128
2226#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5755 64 2232#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5755 64
2227#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5906 32 2233#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5906 32
@@ -3009,17 +3015,18 @@ enum TG3_FLAGS {
3009 TG3_FLAG_JUMBO_CAPABLE, 3015 TG3_FLAG_JUMBO_CAPABLE,
3010 TG3_FLAG_CHIP_RESETTING, 3016 TG3_FLAG_CHIP_RESETTING,
3011 TG3_FLAG_INIT_COMPLETE, 3017 TG3_FLAG_INIT_COMPLETE,
3012 TG3_FLAG_TSO_BUG,
3013 TG3_FLAG_MAX_RXPEND_64, 3018 TG3_FLAG_MAX_RXPEND_64,
3014 TG3_FLAG_TSO_CAPABLE,
3015 TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */ 3019 TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */
3016 TG3_FLAG_ASF_NEW_HANDSHAKE, 3020 TG3_FLAG_ASF_NEW_HANDSHAKE,
3017 TG3_FLAG_HW_AUTONEG, 3021 TG3_FLAG_HW_AUTONEG,
3018 TG3_FLAG_IS_NIC, 3022 TG3_FLAG_IS_NIC,
3019 TG3_FLAG_FLASH, 3023 TG3_FLAG_FLASH,
3024 TG3_FLAG_FW_TSO,
3020 TG3_FLAG_HW_TSO_1, 3025 TG3_FLAG_HW_TSO_1,
3021 TG3_FLAG_HW_TSO_2, 3026 TG3_FLAG_HW_TSO_2,
3022 TG3_FLAG_HW_TSO_3, 3027 TG3_FLAG_HW_TSO_3,
3028 TG3_FLAG_TSO_CAPABLE,
3029 TG3_FLAG_TSO_BUG,
3023 TG3_FLAG_ICH_WORKAROUND, 3030 TG3_FLAG_ICH_WORKAROUND,
3024 TG3_FLAG_1SHOT_MSI, 3031 TG3_FLAG_1SHOT_MSI,
3025 TG3_FLAG_NO_FWARE_REPORTED, 3032 TG3_FLAG_NO_FWARE_REPORTED,
@@ -3064,6 +3071,13 @@ enum TG3_FLAGS {
3064 TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */ 3071 TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
3065}; 3072};
3066 3073
3074struct tg3_firmware_hdr {
3075 __be32 version; /* unused for fragments */
3076 __be32 base_addr;
3077 __be32 len;
3078};
3079#define TG3_FW_HDR_LEN (sizeof(struct tg3_firmware_hdr))
3080
3067struct tg3 { 3081struct tg3 {
3068 /* begin "general, frequently-used members" cacheline section */ 3082 /* begin "general, frequently-used members" cacheline section */
3069 3083
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 3227fdde521b..f2b73ffa9122 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -76,7 +76,7 @@ static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
76static void bfa_ioc_pf_failed(struct bfa_ioc *ioc); 76static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
77static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc); 77static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
78static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc); 78static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
79static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, 79static void bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
80 u32 boot_param); 80 u32 boot_param);
81static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr); 81static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
82static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, 82static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 7cce42dc2f20..d588f842d557 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1264,9 +1264,8 @@ bnad_mem_alloc(struct bnad *bnad,
1264 mem_info->mdl[i].len = mem_info->len; 1264 mem_info->mdl[i].len = mem_info->len;
1265 mem_info->mdl[i].kva = 1265 mem_info->mdl[i].kva =
1266 dma_alloc_coherent(&bnad->pcidev->dev, 1266 dma_alloc_coherent(&bnad->pcidev->dev,
1267 mem_info->len, &dma_pa, 1267 mem_info->len, &dma_pa,
1268 GFP_KERNEL); 1268 GFP_KERNEL);
1269
1270 if (mem_info->mdl[i].kva == NULL) 1269 if (mem_info->mdl[i].kva == NULL)
1271 goto err_return; 1270 goto err_return;
1272 1271
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 3becdb2deb46..a5f499f53dd6 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -47,22 +47,19 @@ static int at91ether_start(struct net_device *dev)
47 int i; 47 int i;
48 48
49 lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev, 49 lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
50 MAX_RX_DESCR * sizeof(struct macb_dma_desc), 50 (MAX_RX_DESCR *
51 &lp->rx_ring_dma, GFP_KERNEL); 51 sizeof(struct macb_dma_desc)),
52 if (!lp->rx_ring) { 52 &lp->rx_ring_dma, GFP_KERNEL);
53 netdev_err(dev, "unable to alloc rx ring DMA buffer\n"); 53 if (!lp->rx_ring)
54 return -ENOMEM; 54 return -ENOMEM;
55 }
56 55
57 lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, 56 lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
58 MAX_RX_DESCR * MAX_RBUFF_SZ, 57 MAX_RX_DESCR * MAX_RBUFF_SZ,
59 &lp->rx_buffers_dma, GFP_KERNEL); 58 &lp->rx_buffers_dma, GFP_KERNEL);
60 if (!lp->rx_buffers) { 59 if (!lp->rx_buffers) {
61 netdev_err(dev, "unable to alloc rx data DMA buffer\n");
62
63 dma_free_coherent(&lp->pdev->dev, 60 dma_free_coherent(&lp->pdev->dev,
64 MAX_RX_DESCR * sizeof(struct macb_dma_desc), 61 MAX_RX_DESCR * sizeof(struct macb_dma_desc),
65 lp->rx_ring, lp->rx_ring_dma); 62 lp->rx_ring, lp->rx_ring_dma);
66 lp->rx_ring = NULL; 63 lp->rx_ring = NULL;
67 return -ENOMEM; 64 return -ENOMEM;
68 } 65 }
@@ -209,7 +206,6 @@ static void at91ether_rx(struct net_device *dev)
209 netif_rx(skb); 206 netif_rx(skb);
210 } else { 207 } else {
211 lp->stats.rx_dropped++; 208 lp->stats.rx_dropped++;
212 netdev_notice(dev, "Memory squeeze, dropping packet.\n");
213 } 209 }
214 210
215 if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH)) 211 if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
@@ -303,42 +299,7 @@ static const struct of_device_id at91ether_dt_ids[] = {
303 { .compatible = "cdns,emac" }, 299 { .compatible = "cdns,emac" },
304 { /* sentinel */ } 300 { /* sentinel */ }
305}; 301};
306
307MODULE_DEVICE_TABLE(of, at91ether_dt_ids); 302MODULE_DEVICE_TABLE(of, at91ether_dt_ids);
308
309static int at91ether_get_phy_mode_dt(struct platform_device *pdev)
310{
311 struct device_node *np = pdev->dev.of_node;
312
313 if (np)
314 return of_get_phy_mode(np);
315
316 return -ENODEV;
317}
318
319static int at91ether_get_hwaddr_dt(struct macb *bp)
320{
321 struct device_node *np = bp->pdev->dev.of_node;
322
323 if (np) {
324 const char *mac = of_get_mac_address(np);
325 if (mac) {
326 memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
327 return 0;
328 }
329 }
330
331 return -ENODEV;
332}
333#else
334static int at91ether_get_phy_mode_dt(struct platform_device *pdev)
335{
336 return -ENODEV;
337}
338static int at91ether_get_hwaddr_dt(struct macb *bp)
339{
340 return -ENODEV;
341}
342#endif 303#endif
343 304
344/* Detect MAC & PHY and perform ethernet interface initialization */ 305/* Detect MAC & PHY and perform ethernet interface initialization */
@@ -352,6 +313,7 @@ static int __init at91ether_probe(struct platform_device *pdev)
352 struct macb *lp; 313 struct macb *lp;
353 int res; 314 int res;
354 u32 reg; 315 u32 reg;
316 const char *mac;
355 317
356 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 318 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
357 if (!regs) 319 if (!regs)
@@ -403,11 +365,13 @@ static int __init at91ether_probe(struct platform_device *pdev)
403 platform_set_drvdata(pdev, dev); 365 platform_set_drvdata(pdev, dev);
404 SET_NETDEV_DEV(dev, &pdev->dev); 366 SET_NETDEV_DEV(dev, &pdev->dev);
405 367
406 res = at91ether_get_hwaddr_dt(lp); 368 mac = of_get_mac_address(pdev->dev.of_node);
407 if (res < 0) 369 if (mac)
370 memcpy(lp->dev->dev_addr, mac, ETH_ALEN);
371 else
408 macb_get_hwaddr(lp); 372 macb_get_hwaddr(lp);
409 373
410 res = at91ether_get_phy_mode_dt(pdev); 374 res = of_get_phy_mode(pdev->dev.of_node);
411 if (res < 0) { 375 if (res < 0) {
412 if (board_data && board_data->is_rmii) 376 if (board_data && board_data->is_rmii)
413 lp->phy_interface = PHY_INTERFACE_MODE_RMII; 377 lp->phy_interface = PHY_INTERFACE_MODE_RMII;
@@ -519,18 +483,7 @@ static struct platform_driver at91ether_driver = {
519 }, 483 },
520}; 484};
521 485
522static int __init at91ether_init(void) 486module_platform_driver_probe(at91ether_driver, at91ether_probe);
523{
524 return platform_driver_probe(&at91ether_driver, at91ether_probe);
525}
526
527static void __exit at91ether_exit(void)
528{
529 platform_driver_unregister(&at91ether_driver);
530}
531
532module_init(at91ether_init)
533module_exit(at91ether_exit)
534 487
535MODULE_LICENSE("GPL"); 488MODULE_LICENSE("GPL");
536MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver"); 489MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver");
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 79039439bfdc..7fd0e3e977af 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -485,6 +485,8 @@ static void macb_tx_interrupt(struct macb *bp)
485 status = macb_readl(bp, TSR); 485 status = macb_readl(bp, TSR);
486 macb_writel(bp, TSR, status); 486 macb_writel(bp, TSR, status);
487 487
488 macb_writel(bp, ISR, MACB_BIT(TCOMP));
489
488 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", 490 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
489 (unsigned long)status); 491 (unsigned long)status);
490 492
@@ -736,6 +738,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
736 * now. 738 * now.
737 */ 739 */
738 macb_writel(bp, IDR, MACB_RX_INT_FLAGS); 740 macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
741 macb_writel(bp, ISR, MACB_BIT(RCOMP));
739 742
740 if (napi_schedule_prep(&bp->napi)) { 743 if (napi_schedule_prep(&bp->napi)) {
741 netdev_vdbg(bp->dev, "scheduling RX softirq\n"); 744 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
@@ -1054,6 +1057,7 @@ static void macb_configure_dma(struct macb *bp)
1054 dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64); 1057 dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64);
1055 dmacfg |= GEM_BF(FBLDO, 16); 1058 dmacfg |= GEM_BF(FBLDO, 16);
1056 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); 1059 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
1060 dmacfg &= ~GEM_BIT(ENDIA);
1057 gem_writel(bp, DMACFG, dmacfg); 1061 gem_writel(bp, DMACFG, dmacfg);
1058 } 1062 }
1059} 1063}
@@ -1472,41 +1476,7 @@ static const struct of_device_id macb_dt_ids[] = {
1472 { .compatible = "cdns,gem" }, 1476 { .compatible = "cdns,gem" },
1473 { /* sentinel */ } 1477 { /* sentinel */ }
1474}; 1478};
1475
1476MODULE_DEVICE_TABLE(of, macb_dt_ids); 1479MODULE_DEVICE_TABLE(of, macb_dt_ids);
1477
1478static int macb_get_phy_mode_dt(struct platform_device *pdev)
1479{
1480 struct device_node *np = pdev->dev.of_node;
1481
1482 if (np)
1483 return of_get_phy_mode(np);
1484
1485 return -ENODEV;
1486}
1487
1488static int macb_get_hwaddr_dt(struct macb *bp)
1489{
1490 struct device_node *np = bp->pdev->dev.of_node;
1491 if (np) {
1492 const char *mac = of_get_mac_address(np);
1493 if (mac) {
1494 memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
1495 return 0;
1496 }
1497 }
1498
1499 return -ENODEV;
1500}
1501#else
1502static int macb_get_phy_mode_dt(struct platform_device *pdev)
1503{
1504 return -ENODEV;
1505}
1506static int macb_get_hwaddr_dt(struct macb *bp)
1507{
1508 return -ENODEV;
1509}
1510#endif 1480#endif
1511 1481
1512static int __init macb_probe(struct platform_device *pdev) 1482static int __init macb_probe(struct platform_device *pdev)
@@ -1519,6 +1489,7 @@ static int __init macb_probe(struct platform_device *pdev)
1519 u32 config; 1489 u32 config;
1520 int err = -ENXIO; 1490 int err = -ENXIO;
1521 struct pinctrl *pinctrl; 1491 struct pinctrl *pinctrl;
1492 const char *mac;
1522 1493
1523 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1494 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1524 if (!regs) { 1495 if (!regs) {
@@ -1557,14 +1528,14 @@ static int __init macb_probe(struct platform_device *pdev)
1557 dev_err(&pdev->dev, "failed to get macb_clk\n"); 1528 dev_err(&pdev->dev, "failed to get macb_clk\n");
1558 goto err_out_free_dev; 1529 goto err_out_free_dev;
1559 } 1530 }
1560 clk_enable(bp->pclk); 1531 clk_prepare_enable(bp->pclk);
1561 1532
1562 bp->hclk = clk_get(&pdev->dev, "hclk"); 1533 bp->hclk = clk_get(&pdev->dev, "hclk");
1563 if (IS_ERR(bp->hclk)) { 1534 if (IS_ERR(bp->hclk)) {
1564 dev_err(&pdev->dev, "failed to get hclk\n"); 1535 dev_err(&pdev->dev, "failed to get hclk\n");
1565 goto err_out_put_pclk; 1536 goto err_out_put_pclk;
1566 } 1537 }
1567 clk_enable(bp->hclk); 1538 clk_prepare_enable(bp->hclk);
1568 1539
1569 bp->regs = ioremap(regs->start, resource_size(regs)); 1540 bp->regs = ioremap(regs->start, resource_size(regs));
1570 if (!bp->regs) { 1541 if (!bp->regs) {
@@ -1592,11 +1563,13 @@ static int __init macb_probe(struct platform_device *pdev)
1592 config |= macb_dbw(bp); 1563 config |= macb_dbw(bp);
1593 macb_writel(bp, NCFGR, config); 1564 macb_writel(bp, NCFGR, config);
1594 1565
1595 err = macb_get_hwaddr_dt(bp); 1566 mac = of_get_mac_address(pdev->dev.of_node);
1596 if (err < 0) 1567 if (mac)
1568 memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
1569 else
1597 macb_get_hwaddr(bp); 1570 macb_get_hwaddr(bp);
1598 1571
1599 err = macb_get_phy_mode_dt(pdev); 1572 err = of_get_phy_mode(pdev->dev.of_node);
1600 if (err < 0) { 1573 if (err < 0) {
1601 pdata = pdev->dev.platform_data; 1574 pdata = pdev->dev.platform_data;
1602 if (pdata && pdata->is_rmii) 1575 if (pdata && pdata->is_rmii)
@@ -1654,9 +1627,9 @@ err_out_free_irq:
1654err_out_iounmap: 1627err_out_iounmap:
1655 iounmap(bp->regs); 1628 iounmap(bp->regs);
1656err_out_disable_clocks: 1629err_out_disable_clocks:
1657 clk_disable(bp->hclk); 1630 clk_disable_unprepare(bp->hclk);
1658 clk_put(bp->hclk); 1631 clk_put(bp->hclk);
1659 clk_disable(bp->pclk); 1632 clk_disable_unprepare(bp->pclk);
1660err_out_put_pclk: 1633err_out_put_pclk:
1661 clk_put(bp->pclk); 1634 clk_put(bp->pclk);
1662err_out_free_dev: 1635err_out_free_dev:
@@ -1683,9 +1656,9 @@ static int __exit macb_remove(struct platform_device *pdev)
1683 unregister_netdev(dev); 1656 unregister_netdev(dev);
1684 free_irq(dev->irq, dev); 1657 free_irq(dev->irq, dev);
1685 iounmap(bp->regs); 1658 iounmap(bp->regs);
1686 clk_disable(bp->hclk); 1659 clk_disable_unprepare(bp->hclk);
1687 clk_put(bp->hclk); 1660 clk_put(bp->hclk);
1688 clk_disable(bp->pclk); 1661 clk_disable_unprepare(bp->pclk);
1689 clk_put(bp->pclk); 1662 clk_put(bp->pclk);
1690 free_netdev(dev); 1663 free_netdev(dev);
1691 platform_set_drvdata(pdev, NULL); 1664 platform_set_drvdata(pdev, NULL);
@@ -1703,8 +1676,8 @@ static int macb_suspend(struct platform_device *pdev, pm_message_t state)
1703 netif_carrier_off(netdev); 1676 netif_carrier_off(netdev);
1704 netif_device_detach(netdev); 1677 netif_device_detach(netdev);
1705 1678
1706 clk_disable(bp->hclk); 1679 clk_disable_unprepare(bp->hclk);
1707 clk_disable(bp->pclk); 1680 clk_disable_unprepare(bp->pclk);
1708 1681
1709 return 0; 1682 return 0;
1710} 1683}
@@ -1714,8 +1687,8 @@ static int macb_resume(struct platform_device *pdev)
1714 struct net_device *netdev = platform_get_drvdata(pdev); 1687 struct net_device *netdev = platform_get_drvdata(pdev);
1715 struct macb *bp = netdev_priv(netdev); 1688 struct macb *bp = netdev_priv(netdev);
1716 1689
1717 clk_enable(bp->pclk); 1690 clk_prepare_enable(bp->pclk);
1718 clk_enable(bp->hclk); 1691 clk_prepare_enable(bp->hclk);
1719 1692
1720 netif_device_attach(netdev); 1693 netif_device_attach(netdev);
1721 1694
@@ -1737,18 +1710,7 @@ static struct platform_driver macb_driver = {
1737 }, 1710 },
1738}; 1711};
1739 1712
1740static int __init macb_init(void) 1713module_platform_driver_probe(macb_driver, macb_probe);
1741{
1742 return platform_driver_probe(&macb_driver, macb_probe);
1743}
1744
1745static void __exit macb_exit(void)
1746{
1747 platform_driver_unregister(&macb_driver);
1748}
1749
1750module_init(macb_init);
1751module_exit(macb_exit);
1752 1714
1753MODULE_LICENSE("GPL"); 1715MODULE_LICENSE("GPL");
1754MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver"); 1716MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 570908b93578..993d70380688 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -173,6 +173,8 @@
173/* Bitfields in DMACFG. */ 173/* Bitfields in DMACFG. */
174#define GEM_FBLDO_OFFSET 0 174#define GEM_FBLDO_OFFSET 0
175#define GEM_FBLDO_SIZE 5 175#define GEM_FBLDO_SIZE 5
176#define GEM_ENDIA_OFFSET 7
177#define GEM_ENDIA_SIZE 1
176#define GEM_RXBMS_OFFSET 8 178#define GEM_RXBMS_OFFSET 8
177#define GEM_RXBMS_SIZE 2 179#define GEM_RXBMS_SIZE 2
178#define GEM_TXPBMS_OFFSET 10 180#define GEM_TXPBMS_OFFSET 10
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 482976925154..55fe8c9f0484 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -835,7 +835,7 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
835 struct sk_buff *skb; 835 struct sk_buff *skb;
836 dma_addr_t mapping; 836 dma_addr_t mapping;
837 837
838 skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC); 838 skb = dev_alloc_skb(q->rx_buffer_size);
839 if (!skb) 839 if (!skb)
840 break; 840 break;
841 841
@@ -1046,11 +1046,10 @@ static inline struct sk_buff *get_packet(struct pci_dev *pdev,
1046 const struct freelQ_ce *ce = &fl->centries[fl->cidx]; 1046 const struct freelQ_ce *ce = &fl->centries[fl->cidx];
1047 1047
1048 if (len < copybreak) { 1048 if (len < copybreak) {
1049 skb = alloc_skb(len + 2, GFP_ATOMIC); 1049 skb = netdev_alloc_skb_ip_align(NULL, len);
1050 if (!skb) 1050 if (!skb)
1051 goto use_orig_buf; 1051 goto use_orig_buf;
1052 1052
1053 skb_reserve(skb, 2); /* align IP header */
1054 skb_put(skb, len); 1053 skb_put(skb, len);
1055 pci_dma_sync_single_for_cpu(pdev, 1054 pci_dma_sync_single_for_cpu(pdev,
1056 dma_unmap_addr(ce, dma_addr), 1055 dma_unmap_addr(ce, dma_addr),
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 6db997c78a5f..681804b30a3f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -54,6 +54,10 @@
54#define FW_VERSION_MINOR 1 54#define FW_VERSION_MINOR 1
55#define FW_VERSION_MICRO 0 55#define FW_VERSION_MICRO 0
56 56
57#define FW_VERSION_MAJOR_T5 0
58#define FW_VERSION_MINOR_T5 0
59#define FW_VERSION_MICRO_T5 0
60
57#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) 61#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
58 62
59enum { 63enum {
@@ -66,7 +70,9 @@ enum {
66enum { 70enum {
67 MEM_EDC0, 71 MEM_EDC0,
68 MEM_EDC1, 72 MEM_EDC1,
69 MEM_MC 73 MEM_MC,
74 MEM_MC0 = MEM_MC,
75 MEM_MC1
70}; 76};
71 77
72enum { 78enum {
@@ -74,8 +80,10 @@ enum {
74 MEMWIN0_BASE = 0x1b800, 80 MEMWIN0_BASE = 0x1b800,
75 MEMWIN1_APERTURE = 32768, 81 MEMWIN1_APERTURE = 32768,
76 MEMWIN1_BASE = 0x28000, 82 MEMWIN1_BASE = 0x28000,
83 MEMWIN1_BASE_T5 = 0x52000,
77 MEMWIN2_APERTURE = 65536, 84 MEMWIN2_APERTURE = 65536,
78 MEMWIN2_BASE = 0x30000, 85 MEMWIN2_BASE = 0x30000,
86 MEMWIN2_BASE_T5 = 0x54000,
79}; 87};
80 88
81enum dev_master { 89enum dev_master {
@@ -431,6 +439,7 @@ struct sge_txq {
431 spinlock_t db_lock; 439 spinlock_t db_lock;
432 int db_disabled; 440 int db_disabled;
433 unsigned short db_pidx; 441 unsigned short db_pidx;
442 u64 udb;
434}; 443};
435 444
436struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */ 445struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
@@ -504,13 +513,44 @@ struct sge {
504 513
505struct l2t_data; 514struct l2t_data;
506 515
516#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
517#define CHELSIO_CHIP_VERSION(code) ((code) >> 4)
518#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
519
520#define CHELSIO_T4 0x4
521#define CHELSIO_T5 0x5
522
523enum chip_type {
524 T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0),
525 T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
526 T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
527 T4_FIRST_REV = T4_A1,
528 T4_LAST_REV = T4_A3,
529
530 T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
531 T5_FIRST_REV = T5_A1,
532 T5_LAST_REV = T5_A1,
533};
534
535#ifdef CONFIG_PCI_IOV
536
537/* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial
538 * Configuration initialization for T5 only has SR-IOV functionality enabled
539 * on PF0-3 in order to simplify everything.
540 */
541#define NUM_OF_PF_WITH_SRIOV 4
542
543#endif
544
507struct adapter { 545struct adapter {
508 void __iomem *regs; 546 void __iomem *regs;
547 void __iomem *bar2;
509 struct pci_dev *pdev; 548 struct pci_dev *pdev;
510 struct device *pdev_dev; 549 struct device *pdev_dev;
511 unsigned int mbox; 550 unsigned int mbox;
512 unsigned int fn; 551 unsigned int fn;
513 unsigned int flags; 552 unsigned int flags;
553 enum chip_type chip;
514 554
515 int msg_enable; 555 int msg_enable;
516 556
@@ -673,6 +713,16 @@ enum {
673 VLAN_REWRITE 713 VLAN_REWRITE
674}; 714};
675 715
716static inline int is_t5(enum chip_type chip)
717{
718 return (chip >= T5_FIRST_REV && chip <= T5_LAST_REV);
719}
720
721static inline int is_t4(enum chip_type chip)
722{
723 return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV);
724}
725
676static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) 726static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
677{ 727{
678 return readl(adap->regs + reg_addr); 728 return readl(adap->regs + reg_addr);
@@ -858,7 +908,8 @@ int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
858 int start, int n, const u16 *rspq, unsigned int nrspq); 908 int start, int n, const u16 *rspq, unsigned int nrspq);
859int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode, 909int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
860 unsigned int flags); 910 unsigned int flags);
861int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity); 911int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
912 u64 *parity);
862int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, 913int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
863 u64 *parity); 914 u64 *parity);
864 915
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index e707e31abd81..e76cf035100b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -68,8 +68,8 @@
68#include "t4fw_api.h" 68#include "t4fw_api.h"
69#include "l2t.h" 69#include "l2t.h"
70 70
71#define DRV_VERSION "1.3.0-ko" 71#define DRV_VERSION "2.0.0-ko"
72#define DRV_DESC "Chelsio T4 Network Driver" 72#define DRV_DESC "Chelsio T4/T5 Network Driver"
73 73
74/* 74/*
75 * Max interrupt hold-off timer value in us. Queues fall back to this value 75 * Max interrupt hold-off timer value in us. Queues fall back to this value
@@ -229,11 +229,51 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
229 CH_DEVICE(0x440a, 4), 229 CH_DEVICE(0x440a, 4),
230 CH_DEVICE(0x440d, 4), 230 CH_DEVICE(0x440d, 4),
231 CH_DEVICE(0x440e, 4), 231 CH_DEVICE(0x440e, 4),
232 CH_DEVICE(0x5001, 5),
233 CH_DEVICE(0x5002, 5),
234 CH_DEVICE(0x5003, 5),
235 CH_DEVICE(0x5004, 5),
236 CH_DEVICE(0x5005, 5),
237 CH_DEVICE(0x5006, 5),
238 CH_DEVICE(0x5007, 5),
239 CH_DEVICE(0x5008, 5),
240 CH_DEVICE(0x5009, 5),
241 CH_DEVICE(0x500A, 5),
242 CH_DEVICE(0x500B, 5),
243 CH_DEVICE(0x500C, 5),
244 CH_DEVICE(0x500D, 5),
245 CH_DEVICE(0x500E, 5),
246 CH_DEVICE(0x500F, 5),
247 CH_DEVICE(0x5010, 5),
248 CH_DEVICE(0x5011, 5),
249 CH_DEVICE(0x5012, 5),
250 CH_DEVICE(0x5013, 5),
251 CH_DEVICE(0x5401, 5),
252 CH_DEVICE(0x5402, 5),
253 CH_DEVICE(0x5403, 5),
254 CH_DEVICE(0x5404, 5),
255 CH_DEVICE(0x5405, 5),
256 CH_DEVICE(0x5406, 5),
257 CH_DEVICE(0x5407, 5),
258 CH_DEVICE(0x5408, 5),
259 CH_DEVICE(0x5409, 5),
260 CH_DEVICE(0x540A, 5),
261 CH_DEVICE(0x540B, 5),
262 CH_DEVICE(0x540C, 5),
263 CH_DEVICE(0x540D, 5),
264 CH_DEVICE(0x540E, 5),
265 CH_DEVICE(0x540F, 5),
266 CH_DEVICE(0x5410, 5),
267 CH_DEVICE(0x5411, 5),
268 CH_DEVICE(0x5412, 5),
269 CH_DEVICE(0x5413, 5),
232 { 0, } 270 { 0, }
233}; 271};
234 272
235#define FW_FNAME "cxgb4/t4fw.bin" 273#define FW_FNAME "cxgb4/t4fw.bin"
274#define FW5_FNAME "cxgb4/t5fw.bin"
236#define FW_CFNAME "cxgb4/t4-config.txt" 275#define FW_CFNAME "cxgb4/t4-config.txt"
276#define FW5_CFNAME "cxgb4/t5-config.txt"
237 277
238MODULE_DESCRIPTION(DRV_DESC); 278MODULE_DESCRIPTION(DRV_DESC);
239MODULE_AUTHOR("Chelsio Communications"); 279MODULE_AUTHOR("Chelsio Communications");
@@ -241,6 +281,7 @@ MODULE_LICENSE("Dual BSD/GPL");
241MODULE_VERSION(DRV_VERSION); 281MODULE_VERSION(DRV_VERSION);
242MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl); 282MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
243MODULE_FIRMWARE(FW_FNAME); 283MODULE_FIRMWARE(FW_FNAME);
284MODULE_FIRMWARE(FW5_FNAME);
244 285
245/* 286/*
246 * Normally we're willing to become the firmware's Master PF but will be happy 287 * Normally we're willing to become the firmware's Master PF but will be happy
@@ -319,7 +360,10 @@ static bool vf_acls;
319module_param(vf_acls, bool, 0644); 360module_param(vf_acls, bool, 0644);
320MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement"); 361MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
321 362
322static unsigned int num_vf[4]; 363/* Configure the number of PCI-E Virtual Function which are to be instantiated
364 * on SR-IOV Capable Physical Functions.
365 */
366static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
323 367
324module_param_array(num_vf, uint, NULL, 0644); 368module_param_array(num_vf, uint, NULL, 0644);
325MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3"); 369MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
@@ -1002,21 +1046,36 @@ freeout: t4_free_sge_resources(adap);
1002static int upgrade_fw(struct adapter *adap) 1046static int upgrade_fw(struct adapter *adap)
1003{ 1047{
1004 int ret; 1048 int ret;
1005 u32 vers; 1049 u32 vers, exp_major;
1006 const struct fw_hdr *hdr; 1050 const struct fw_hdr *hdr;
1007 const struct firmware *fw; 1051 const struct firmware *fw;
1008 struct device *dev = adap->pdev_dev; 1052 struct device *dev = adap->pdev_dev;
1053 char *fw_file_name;
1009 1054
1010 ret = request_firmware(&fw, FW_FNAME, dev); 1055 switch (CHELSIO_CHIP_VERSION(adap->chip)) {
1056 case CHELSIO_T4:
1057 fw_file_name = FW_FNAME;
1058 exp_major = FW_VERSION_MAJOR;
1059 break;
1060 case CHELSIO_T5:
1061 fw_file_name = FW5_FNAME;
1062 exp_major = FW_VERSION_MAJOR_T5;
1063 break;
1064 default:
1065 dev_err(dev, "Unsupported chip type, %x\n", adap->chip);
1066 return -EINVAL;
1067 }
1068
1069 ret = request_firmware(&fw, fw_file_name, dev);
1011 if (ret < 0) { 1070 if (ret < 0) {
1012 dev_err(dev, "unable to load firmware image " FW_FNAME 1071 dev_err(dev, "unable to load firmware image %s, error %d\n",
1013 ", error %d\n", ret); 1072 fw_file_name, ret);
1014 return ret; 1073 return ret;
1015 } 1074 }
1016 1075
1017 hdr = (const struct fw_hdr *)fw->data; 1076 hdr = (const struct fw_hdr *)fw->data;
1018 vers = ntohl(hdr->fw_ver); 1077 vers = ntohl(hdr->fw_ver);
1019 if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) { 1078 if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) {
1020 ret = -EINVAL; /* wrong major version, won't do */ 1079 ret = -EINVAL; /* wrong major version, won't do */
1021 goto out; 1080 goto out;
1022 } 1081 }
@@ -1024,18 +1083,15 @@ static int upgrade_fw(struct adapter *adap)
1024 /* 1083 /*
1025 * If the flash FW is unusable or we found something newer, load it. 1084 * If the flash FW is unusable or we found something newer, load it.
1026 */ 1085 */
1027 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR || 1086 if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major ||
1028 vers > adap->params.fw_vers) { 1087 vers > adap->params.fw_vers) {
1029 dev_info(dev, "upgrading firmware ...\n"); 1088 dev_info(dev, "upgrading firmware ...\n");
1030 ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size, 1089 ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
1031 /*force=*/false); 1090 /*force=*/false);
1032 if (!ret) 1091 if (!ret)
1033 dev_info(dev, "firmware successfully upgraded to " 1092 dev_info(dev,
1034 FW_FNAME " (%d.%d.%d.%d)\n", 1093 "firmware upgraded to version %pI4 from %s\n",
1035 FW_HDR_FW_VER_MAJOR_GET(vers), 1094 &hdr->fw_ver, fw_file_name);
1036 FW_HDR_FW_VER_MINOR_GET(vers),
1037 FW_HDR_FW_VER_MICRO_GET(vers),
1038 FW_HDR_FW_VER_BUILD_GET(vers));
1039 else 1095 else
1040 dev_err(dev, "firmware upgrade failed! err=%d\n", -ret); 1096 dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
1041 } else { 1097 } else {
@@ -1308,6 +1364,8 @@ static char stats_strings[][ETH_GSTRING_LEN] = {
1308 "VLANinsertions ", 1364 "VLANinsertions ",
1309 "GROpackets ", 1365 "GROpackets ",
1310 "GROmerged ", 1366 "GROmerged ",
1367 "WriteCoalSuccess ",
1368 "WriteCoalFail ",
1311}; 1369};
1312 1370
1313static int get_sset_count(struct net_device *dev, int sset) 1371static int get_sset_count(struct net_device *dev, int sset)
@@ -1321,10 +1379,15 @@ static int get_sset_count(struct net_device *dev, int sset)
1321} 1379}
1322 1380
1323#define T4_REGMAP_SIZE (160 * 1024) 1381#define T4_REGMAP_SIZE (160 * 1024)
1382#define T5_REGMAP_SIZE (332 * 1024)
1324 1383
1325static int get_regs_len(struct net_device *dev) 1384static int get_regs_len(struct net_device *dev)
1326{ 1385{
1327 return T4_REGMAP_SIZE; 1386 struct adapter *adap = netdev2adap(dev);
1387 if (is_t4(adap->chip))
1388 return T4_REGMAP_SIZE;
1389 else
1390 return T5_REGMAP_SIZE;
1328} 1391}
1329 1392
1330static int get_eeprom_len(struct net_device *dev) 1393static int get_eeprom_len(struct net_device *dev)
@@ -1398,11 +1461,25 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1398{ 1461{
1399 struct port_info *pi = netdev_priv(dev); 1462 struct port_info *pi = netdev_priv(dev);
1400 struct adapter *adapter = pi->adapter; 1463 struct adapter *adapter = pi->adapter;
1464 u32 val1, val2;
1401 1465
1402 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data); 1466 t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1403 1467
1404 data += sizeof(struct port_stats) / sizeof(u64); 1468 data += sizeof(struct port_stats) / sizeof(u64);
1405 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data); 1469 collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1470 data += sizeof(struct queue_port_stats) / sizeof(u64);
1471 if (!is_t4(adapter->chip)) {
1472 t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
1473 val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
1474 val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
1475 *data = val1 - val2;
1476 data++;
1477 *data = val2;
1478 data++;
1479 } else {
1480 memset(data, 0, 2 * sizeof(u64));
1481 *data += 2;
1482 }
1406} 1483}
1407 1484
1408/* 1485/*
@@ -1413,7 +1490,8 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1413 */ 1490 */
1414static inline unsigned int mk_adap_vers(const struct adapter *ap) 1491static inline unsigned int mk_adap_vers(const struct adapter *ap)
1415{ 1492{
1416 return 4 | (ap->params.rev << 10) | (1 << 16); 1493 return CHELSIO_CHIP_VERSION(ap->chip) |
1494 (CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16);
1417} 1495}
1418 1496
1419static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start, 1497static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
@@ -1428,7 +1506,7 @@ static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1428static void get_regs(struct net_device *dev, struct ethtool_regs *regs, 1506static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1429 void *buf) 1507 void *buf)
1430{ 1508{
1431 static const unsigned int reg_ranges[] = { 1509 static const unsigned int t4_reg_ranges[] = {
1432 0x1008, 0x1108, 1510 0x1008, 0x1108,
1433 0x1180, 0x11b4, 1511 0x1180, 0x11b4,
1434 0x11fc, 0x123c, 1512 0x11fc, 0x123c,
@@ -1648,13 +1726,452 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1648 0x27e00, 0x27e04 1726 0x27e00, 0x27e04
1649 }; 1727 };
1650 1728
1729 static const unsigned int t5_reg_ranges[] = {
1730 0x1008, 0x1148,
1731 0x1180, 0x11b4,
1732 0x11fc, 0x123c,
1733 0x1280, 0x173c,
1734 0x1800, 0x18fc,
1735 0x3000, 0x3028,
1736 0x3060, 0x30d8,
1737 0x30e0, 0x30fc,
1738 0x3140, 0x357c,
1739 0x35a8, 0x35cc,
1740 0x35ec, 0x35ec,
1741 0x3600, 0x5624,
1742 0x56cc, 0x575c,
1743 0x580c, 0x5814,
1744 0x5890, 0x58bc,
1745 0x5940, 0x59dc,
1746 0x59fc, 0x5a18,
1747 0x5a60, 0x5a9c,
1748 0x5b9c, 0x5bfc,
1749 0x6000, 0x6040,
1750 0x6058, 0x614c,
1751 0x7700, 0x7798,
1752 0x77c0, 0x78fc,
1753 0x7b00, 0x7c54,
1754 0x7d00, 0x7efc,
1755 0x8dc0, 0x8de0,
1756 0x8df8, 0x8e84,
1757 0x8ea0, 0x8f84,
1758 0x8fc0, 0x90f8,
1759 0x9400, 0x9470,
1760 0x9600, 0x96f4,
1761 0x9800, 0x9808,
1762 0x9820, 0x983c,
1763 0x9850, 0x9864,
1764 0x9c00, 0x9c6c,
1765 0x9c80, 0x9cec,
1766 0x9d00, 0x9d6c,
1767 0x9d80, 0x9dec,
1768 0x9e00, 0x9e6c,
1769 0x9e80, 0x9eec,
1770 0x9f00, 0x9f6c,
1771 0x9f80, 0xa020,
1772 0xd004, 0xd03c,
1773 0xdfc0, 0xdfe0,
1774 0xe000, 0x11088,
1775 0x1109c, 0x1117c,
1776 0x11190, 0x11204,
1777 0x19040, 0x1906c,
1778 0x19078, 0x19080,
1779 0x1908c, 0x19124,
1780 0x19150, 0x191b0,
1781 0x191d0, 0x191e8,
1782 0x19238, 0x19290,
1783 0x193f8, 0x19474,
1784 0x19490, 0x194cc,
1785 0x194f0, 0x194f8,
1786 0x19c00, 0x19c60,
1787 0x19c94, 0x19e10,
1788 0x19e50, 0x19f34,
1789 0x19f40, 0x19f50,
1790 0x19f90, 0x19fe4,
1791 0x1a000, 0x1a06c,
1792 0x1a0b0, 0x1a120,
1793 0x1a128, 0x1a138,
1794 0x1a190, 0x1a1c4,
1795 0x1a1fc, 0x1a1fc,
1796 0x1e008, 0x1e00c,
1797 0x1e040, 0x1e04c,
1798 0x1e284, 0x1e290,
1799 0x1e2c0, 0x1e2c0,
1800 0x1e2e0, 0x1e2e0,
1801 0x1e300, 0x1e384,
1802 0x1e3c0, 0x1e3c8,
1803 0x1e408, 0x1e40c,
1804 0x1e440, 0x1e44c,
1805 0x1e684, 0x1e690,
1806 0x1e6c0, 0x1e6c0,
1807 0x1e6e0, 0x1e6e0,
1808 0x1e700, 0x1e784,
1809 0x1e7c0, 0x1e7c8,
1810 0x1e808, 0x1e80c,
1811 0x1e840, 0x1e84c,
1812 0x1ea84, 0x1ea90,
1813 0x1eac0, 0x1eac0,
1814 0x1eae0, 0x1eae0,
1815 0x1eb00, 0x1eb84,
1816 0x1ebc0, 0x1ebc8,
1817 0x1ec08, 0x1ec0c,
1818 0x1ec40, 0x1ec4c,
1819 0x1ee84, 0x1ee90,
1820 0x1eec0, 0x1eec0,
1821 0x1eee0, 0x1eee0,
1822 0x1ef00, 0x1ef84,
1823 0x1efc0, 0x1efc8,
1824 0x1f008, 0x1f00c,
1825 0x1f040, 0x1f04c,
1826 0x1f284, 0x1f290,
1827 0x1f2c0, 0x1f2c0,
1828 0x1f2e0, 0x1f2e0,
1829 0x1f300, 0x1f384,
1830 0x1f3c0, 0x1f3c8,
1831 0x1f408, 0x1f40c,
1832 0x1f440, 0x1f44c,
1833 0x1f684, 0x1f690,
1834 0x1f6c0, 0x1f6c0,
1835 0x1f6e0, 0x1f6e0,
1836 0x1f700, 0x1f784,
1837 0x1f7c0, 0x1f7c8,
1838 0x1f808, 0x1f80c,
1839 0x1f840, 0x1f84c,
1840 0x1fa84, 0x1fa90,
1841 0x1fac0, 0x1fac0,
1842 0x1fae0, 0x1fae0,
1843 0x1fb00, 0x1fb84,
1844 0x1fbc0, 0x1fbc8,
1845 0x1fc08, 0x1fc0c,
1846 0x1fc40, 0x1fc4c,
1847 0x1fe84, 0x1fe90,
1848 0x1fec0, 0x1fec0,
1849 0x1fee0, 0x1fee0,
1850 0x1ff00, 0x1ff84,
1851 0x1ffc0, 0x1ffc8,
1852 0x30000, 0x30030,
1853 0x30100, 0x30144,
1854 0x30190, 0x301d0,
1855 0x30200, 0x30318,
1856 0x30400, 0x3052c,
1857 0x30540, 0x3061c,
1858 0x30800, 0x30834,
1859 0x308c0, 0x30908,
1860 0x30910, 0x309ac,
1861 0x30a00, 0x30a04,
1862 0x30a0c, 0x30a2c,
1863 0x30a44, 0x30a50,
1864 0x30a74, 0x30c24,
1865 0x30d08, 0x30d14,
1866 0x30d1c, 0x30d20,
1867 0x30d3c, 0x30d50,
1868 0x31200, 0x3120c,
1869 0x31220, 0x31220,
1870 0x31240, 0x31240,
1871 0x31600, 0x31600,
1872 0x31608, 0x3160c,
1873 0x31a00, 0x31a1c,
1874 0x31e04, 0x31e20,
1875 0x31e38, 0x31e3c,
1876 0x31e80, 0x31e80,
1877 0x31e88, 0x31ea8,
1878 0x31eb0, 0x31eb4,
1879 0x31ec8, 0x31ed4,
1880 0x31fb8, 0x32004,
1881 0x32208, 0x3223c,
1882 0x32600, 0x32630,
1883 0x32a00, 0x32abc,
1884 0x32b00, 0x32b70,
1885 0x33000, 0x33048,
1886 0x33060, 0x3309c,
1887 0x330f0, 0x33148,
1888 0x33160, 0x3319c,
1889 0x331f0, 0x332e4,
1890 0x332f8, 0x333e4,
1891 0x333f8, 0x33448,
1892 0x33460, 0x3349c,
1893 0x334f0, 0x33548,
1894 0x33560, 0x3359c,
1895 0x335f0, 0x336e4,
1896 0x336f8, 0x337e4,
1897 0x337f8, 0x337fc,
1898 0x33814, 0x33814,
1899 0x3382c, 0x3382c,
1900 0x33880, 0x3388c,
1901 0x338e8, 0x338ec,
1902 0x33900, 0x33948,
1903 0x33960, 0x3399c,
1904 0x339f0, 0x33ae4,
1905 0x33af8, 0x33b10,
1906 0x33b28, 0x33b28,
1907 0x33b3c, 0x33b50,
1908 0x33bf0, 0x33c10,
1909 0x33c28, 0x33c28,
1910 0x33c3c, 0x33c50,
1911 0x33cf0, 0x33cfc,
1912 0x34000, 0x34030,
1913 0x34100, 0x34144,
1914 0x34190, 0x341d0,
1915 0x34200, 0x34318,
1916 0x34400, 0x3452c,
1917 0x34540, 0x3461c,
1918 0x34800, 0x34834,
1919 0x348c0, 0x34908,
1920 0x34910, 0x349ac,
1921 0x34a00, 0x34a04,
1922 0x34a0c, 0x34a2c,
1923 0x34a44, 0x34a50,
1924 0x34a74, 0x34c24,
1925 0x34d08, 0x34d14,
1926 0x34d1c, 0x34d20,
1927 0x34d3c, 0x34d50,
1928 0x35200, 0x3520c,
1929 0x35220, 0x35220,
1930 0x35240, 0x35240,
1931 0x35600, 0x35600,
1932 0x35608, 0x3560c,
1933 0x35a00, 0x35a1c,
1934 0x35e04, 0x35e20,
1935 0x35e38, 0x35e3c,
1936 0x35e80, 0x35e80,
1937 0x35e88, 0x35ea8,
1938 0x35eb0, 0x35eb4,
1939 0x35ec8, 0x35ed4,
1940 0x35fb8, 0x36004,
1941 0x36208, 0x3623c,
1942 0x36600, 0x36630,
1943 0x36a00, 0x36abc,
1944 0x36b00, 0x36b70,
1945 0x37000, 0x37048,
1946 0x37060, 0x3709c,
1947 0x370f0, 0x37148,
1948 0x37160, 0x3719c,
1949 0x371f0, 0x372e4,
1950 0x372f8, 0x373e4,
1951 0x373f8, 0x37448,
1952 0x37460, 0x3749c,
1953 0x374f0, 0x37548,
1954 0x37560, 0x3759c,
1955 0x375f0, 0x376e4,
1956 0x376f8, 0x377e4,
1957 0x377f8, 0x377fc,
1958 0x37814, 0x37814,
1959 0x3782c, 0x3782c,
1960 0x37880, 0x3788c,
1961 0x378e8, 0x378ec,
1962 0x37900, 0x37948,
1963 0x37960, 0x3799c,
1964 0x379f0, 0x37ae4,
1965 0x37af8, 0x37b10,
1966 0x37b28, 0x37b28,
1967 0x37b3c, 0x37b50,
1968 0x37bf0, 0x37c10,
1969 0x37c28, 0x37c28,
1970 0x37c3c, 0x37c50,
1971 0x37cf0, 0x37cfc,
1972 0x38000, 0x38030,
1973 0x38100, 0x38144,
1974 0x38190, 0x381d0,
1975 0x38200, 0x38318,
1976 0x38400, 0x3852c,
1977 0x38540, 0x3861c,
1978 0x38800, 0x38834,
1979 0x388c0, 0x38908,
1980 0x38910, 0x389ac,
1981 0x38a00, 0x38a04,
1982 0x38a0c, 0x38a2c,
1983 0x38a44, 0x38a50,
1984 0x38a74, 0x38c24,
1985 0x38d08, 0x38d14,
1986 0x38d1c, 0x38d20,
1987 0x38d3c, 0x38d50,
1988 0x39200, 0x3920c,
1989 0x39220, 0x39220,
1990 0x39240, 0x39240,
1991 0x39600, 0x39600,
1992 0x39608, 0x3960c,
1993 0x39a00, 0x39a1c,
1994 0x39e04, 0x39e20,
1995 0x39e38, 0x39e3c,
1996 0x39e80, 0x39e80,
1997 0x39e88, 0x39ea8,
1998 0x39eb0, 0x39eb4,
1999 0x39ec8, 0x39ed4,
2000 0x39fb8, 0x3a004,
2001 0x3a208, 0x3a23c,
2002 0x3a600, 0x3a630,
2003 0x3aa00, 0x3aabc,
2004 0x3ab00, 0x3ab70,
2005 0x3b000, 0x3b048,
2006 0x3b060, 0x3b09c,
2007 0x3b0f0, 0x3b148,
2008 0x3b160, 0x3b19c,
2009 0x3b1f0, 0x3b2e4,
2010 0x3b2f8, 0x3b3e4,
2011 0x3b3f8, 0x3b448,
2012 0x3b460, 0x3b49c,
2013 0x3b4f0, 0x3b548,
2014 0x3b560, 0x3b59c,
2015 0x3b5f0, 0x3b6e4,
2016 0x3b6f8, 0x3b7e4,
2017 0x3b7f8, 0x3b7fc,
2018 0x3b814, 0x3b814,
2019 0x3b82c, 0x3b82c,
2020 0x3b880, 0x3b88c,
2021 0x3b8e8, 0x3b8ec,
2022 0x3b900, 0x3b948,
2023 0x3b960, 0x3b99c,
2024 0x3b9f0, 0x3bae4,
2025 0x3baf8, 0x3bb10,
2026 0x3bb28, 0x3bb28,
2027 0x3bb3c, 0x3bb50,
2028 0x3bbf0, 0x3bc10,
2029 0x3bc28, 0x3bc28,
2030 0x3bc3c, 0x3bc50,
2031 0x3bcf0, 0x3bcfc,
2032 0x3c000, 0x3c030,
2033 0x3c100, 0x3c144,
2034 0x3c190, 0x3c1d0,
2035 0x3c200, 0x3c318,
2036 0x3c400, 0x3c52c,
2037 0x3c540, 0x3c61c,
2038 0x3c800, 0x3c834,
2039 0x3c8c0, 0x3c908,
2040 0x3c910, 0x3c9ac,
2041 0x3ca00, 0x3ca04,
2042 0x3ca0c, 0x3ca2c,
2043 0x3ca44, 0x3ca50,
2044 0x3ca74, 0x3cc24,
2045 0x3cd08, 0x3cd14,
2046 0x3cd1c, 0x3cd20,
2047 0x3cd3c, 0x3cd50,
2048 0x3d200, 0x3d20c,
2049 0x3d220, 0x3d220,
2050 0x3d240, 0x3d240,
2051 0x3d600, 0x3d600,
2052 0x3d608, 0x3d60c,
2053 0x3da00, 0x3da1c,
2054 0x3de04, 0x3de20,
2055 0x3de38, 0x3de3c,
2056 0x3de80, 0x3de80,
2057 0x3de88, 0x3dea8,
2058 0x3deb0, 0x3deb4,
2059 0x3dec8, 0x3ded4,
2060 0x3dfb8, 0x3e004,
2061 0x3e208, 0x3e23c,
2062 0x3e600, 0x3e630,
2063 0x3ea00, 0x3eabc,
2064 0x3eb00, 0x3eb70,
2065 0x3f000, 0x3f048,
2066 0x3f060, 0x3f09c,
2067 0x3f0f0, 0x3f148,
2068 0x3f160, 0x3f19c,
2069 0x3f1f0, 0x3f2e4,
2070 0x3f2f8, 0x3f3e4,
2071 0x3f3f8, 0x3f448,
2072 0x3f460, 0x3f49c,
2073 0x3f4f0, 0x3f548,
2074 0x3f560, 0x3f59c,
2075 0x3f5f0, 0x3f6e4,
2076 0x3f6f8, 0x3f7e4,
2077 0x3f7f8, 0x3f7fc,
2078 0x3f814, 0x3f814,
2079 0x3f82c, 0x3f82c,
2080 0x3f880, 0x3f88c,
2081 0x3f8e8, 0x3f8ec,
2082 0x3f900, 0x3f948,
2083 0x3f960, 0x3f99c,
2084 0x3f9f0, 0x3fae4,
2085 0x3faf8, 0x3fb10,
2086 0x3fb28, 0x3fb28,
2087 0x3fb3c, 0x3fb50,
2088 0x3fbf0, 0x3fc10,
2089 0x3fc28, 0x3fc28,
2090 0x3fc3c, 0x3fc50,
2091 0x3fcf0, 0x3fcfc,
2092 0x40000, 0x4000c,
2093 0x40040, 0x40068,
2094 0x40080, 0x40144,
2095 0x40180, 0x4018c,
2096 0x40200, 0x40298,
2097 0x402ac, 0x4033c,
2098 0x403f8, 0x403fc,
2099 0x41300, 0x413c4,
2100 0x41400, 0x4141c,
2101 0x41480, 0x414d0,
2102 0x44000, 0x44078,
2103 0x440c0, 0x44278,
2104 0x442c0, 0x44478,
2105 0x444c0, 0x44678,
2106 0x446c0, 0x44878,
2107 0x448c0, 0x449fc,
2108 0x45000, 0x45068,
2109 0x45080, 0x45084,
2110 0x450a0, 0x450b0,
2111 0x45200, 0x45268,
2112 0x45280, 0x45284,
2113 0x452a0, 0x452b0,
2114 0x460c0, 0x460e4,
2115 0x47000, 0x4708c,
2116 0x47200, 0x47250,
2117 0x47400, 0x47420,
2118 0x47600, 0x47618,
2119 0x47800, 0x47814,
2120 0x48000, 0x4800c,
2121 0x48040, 0x48068,
2122 0x48080, 0x48144,
2123 0x48180, 0x4818c,
2124 0x48200, 0x48298,
2125 0x482ac, 0x4833c,
2126 0x483f8, 0x483fc,
2127 0x49300, 0x493c4,
2128 0x49400, 0x4941c,
2129 0x49480, 0x494d0,
2130 0x4c000, 0x4c078,
2131 0x4c0c0, 0x4c278,
2132 0x4c2c0, 0x4c478,
2133 0x4c4c0, 0x4c678,
2134 0x4c6c0, 0x4c878,
2135 0x4c8c0, 0x4c9fc,
2136 0x4d000, 0x4d068,
2137 0x4d080, 0x4d084,
2138 0x4d0a0, 0x4d0b0,
2139 0x4d200, 0x4d268,
2140 0x4d280, 0x4d284,
2141 0x4d2a0, 0x4d2b0,
2142 0x4e0c0, 0x4e0e4,
2143 0x4f000, 0x4f08c,
2144 0x4f200, 0x4f250,
2145 0x4f400, 0x4f420,
2146 0x4f600, 0x4f618,
2147 0x4f800, 0x4f814,
2148 0x50000, 0x500cc,
2149 0x50400, 0x50400,
2150 0x50800, 0x508cc,
2151 0x50c00, 0x50c00,
2152 0x51000, 0x5101c,
2153 0x51300, 0x51308,
2154 };
2155
1651 int i; 2156 int i;
1652 struct adapter *ap = netdev2adap(dev); 2157 struct adapter *ap = netdev2adap(dev);
2158 static const unsigned int *reg_ranges;
2159 int arr_size = 0, buf_size = 0;
2160
2161 if (is_t4(ap->chip)) {
2162 reg_ranges = &t4_reg_ranges[0];
2163 arr_size = ARRAY_SIZE(t4_reg_ranges);
2164 buf_size = T4_REGMAP_SIZE;
2165 } else {
2166 reg_ranges = &t5_reg_ranges[0];
2167 arr_size = ARRAY_SIZE(t5_reg_ranges);
2168 buf_size = T5_REGMAP_SIZE;
2169 }
1653 2170
1654 regs->version = mk_adap_vers(ap); 2171 regs->version = mk_adap_vers(ap);
1655 2172
1656 memset(buf, 0, T4_REGMAP_SIZE); 2173 memset(buf, 0, buf_size);
1657 for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2) 2174 for (i = 0; i < arr_size; i += 2)
1658 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]); 2175 reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1659} 2176}
1660 2177
@@ -2363,8 +2880,8 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2363 int ret, ofst; 2880 int ret, ofst;
2364 __be32 data[16]; 2881 __be32 data[16];
2365 2882
2366 if (mem == MEM_MC) 2883 if ((mem == MEM_MC) || (mem == MEM_MC1))
2367 ret = t4_mc_read(adap, pos, data, NULL); 2884 ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
2368 else 2885 else
2369 ret = t4_edc_read(adap, mem, pos, data, NULL); 2886 ret = t4_edc_read(adap, mem, pos, data, NULL);
2370 if (ret) 2887 if (ret)
@@ -2405,18 +2922,37 @@ static void add_debugfs_mem(struct adapter *adap, const char *name,
2405static int setup_debugfs(struct adapter *adap) 2922static int setup_debugfs(struct adapter *adap)
2406{ 2923{
2407 int i; 2924 int i;
2925 u32 size;
2408 2926
2409 if (IS_ERR_OR_NULL(adap->debugfs_root)) 2927 if (IS_ERR_OR_NULL(adap->debugfs_root))
2410 return -1; 2928 return -1;
2411 2929
2412 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE); 2930 i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2413 if (i & EDRAM0_ENABLE) 2931 if (i & EDRAM0_ENABLE) {
2414 add_debugfs_mem(adap, "edc0", MEM_EDC0, 5); 2932 size = t4_read_reg(adap, MA_EDRAM0_BAR);
2415 if (i & EDRAM1_ENABLE) 2933 add_debugfs_mem(adap, "edc0", MEM_EDC0, EDRAM_SIZE_GET(size));
2416 add_debugfs_mem(adap, "edc1", MEM_EDC1, 5); 2934 }
2417 if (i & EXT_MEM_ENABLE) 2935 if (i & EDRAM1_ENABLE) {
2418 add_debugfs_mem(adap, "mc", MEM_MC, 2936 size = t4_read_reg(adap, MA_EDRAM1_BAR);
2419 EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR))); 2937 add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
2938 }
2939 if (is_t4(adap->chip)) {
2940 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2941 if (i & EXT_MEM_ENABLE)
2942 add_debugfs_mem(adap, "mc", MEM_MC,
2943 EXT_MEM_SIZE_GET(size));
2944 } else {
2945 if (i & EXT_MEM_ENABLE) {
2946 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
2947 add_debugfs_mem(adap, "mc0", MEM_MC0,
2948 EXT_MEM_SIZE_GET(size));
2949 }
2950 if (i & EXT_MEM1_ENABLE) {
2951 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR);
2952 add_debugfs_mem(adap, "mc1", MEM_MC1,
2953 EXT_MEM_SIZE_GET(size));
2954 }
2955 }
2420 if (adap->l2t) 2956 if (adap->l2t)
2421 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap, 2957 debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2422 &t4_l2t_fops); 2958 &t4_l2t_fops);
@@ -2747,10 +3283,18 @@ EXPORT_SYMBOL(cxgb4_port_chan);
2747unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo) 3283unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
2748{ 3284{
2749 struct adapter *adap = netdev2adap(dev); 3285 struct adapter *adap = netdev2adap(dev);
2750 u32 v; 3286 u32 v1, v2, lp_count, hp_count;
2751 3287
2752 v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS); 3288 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
2753 return lpfifo ? G_LP_COUNT(v) : G_HP_COUNT(v); 3289 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3290 if (is_t4(adap->chip)) {
3291 lp_count = G_LP_COUNT(v1);
3292 hp_count = G_HP_COUNT(v1);
3293 } else {
3294 lp_count = G_LP_COUNT_T5(v1);
3295 hp_count = G_HP_COUNT_T5(v2);
3296 }
3297 return lpfifo ? lp_count : hp_count;
2754} 3298}
2755EXPORT_SYMBOL(cxgb4_dbfifo_count); 3299EXPORT_SYMBOL(cxgb4_dbfifo_count);
2756 3300
@@ -2853,6 +3397,25 @@ out:
2853} 3397}
2854EXPORT_SYMBOL(cxgb4_sync_txq_pidx); 3398EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
2855 3399
3400void cxgb4_disable_db_coalescing(struct net_device *dev)
3401{
3402 struct adapter *adap;
3403
3404 adap = netdev2adap(dev);
3405 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE,
3406 F_NOCOALESCE);
3407}
3408EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
3409
3410void cxgb4_enable_db_coalescing(struct net_device *dev)
3411{
3412 struct adapter *adap;
3413
3414 adap = netdev2adap(dev);
3415 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_NOCOALESCE, 0);
3416}
3417EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
3418
2856static struct pci_driver cxgb4_driver; 3419static struct pci_driver cxgb4_driver;
2857 3420
2858static void check_neigh_update(struct neighbour *neigh) 3421static void check_neigh_update(struct neighbour *neigh)
@@ -2888,14 +3451,23 @@ static struct notifier_block cxgb4_netevent_nb = {
2888 3451
2889static void drain_db_fifo(struct adapter *adap, int usecs) 3452static void drain_db_fifo(struct adapter *adap, int usecs)
2890{ 3453{
2891 u32 v; 3454 u32 v1, v2, lp_count, hp_count;
2892 3455
2893 do { 3456 do {
3457 v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
3458 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
3459 if (is_t4(adap->chip)) {
3460 lp_count = G_LP_COUNT(v1);
3461 hp_count = G_HP_COUNT(v1);
3462 } else {
3463 lp_count = G_LP_COUNT_T5(v1);
3464 hp_count = G_HP_COUNT_T5(v2);
3465 }
3466
3467 if (lp_count == 0 && hp_count == 0)
3468 break;
2894 set_current_state(TASK_UNINTERRUPTIBLE); 3469 set_current_state(TASK_UNINTERRUPTIBLE);
2895 schedule_timeout(usecs_to_jiffies(usecs)); 3470 schedule_timeout(usecs_to_jiffies(usecs));
2896 v = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
2897 if (G_LP_COUNT(v) == 0 && G_HP_COUNT(v) == 0)
2898 break;
2899 } while (1); 3471 } while (1);
2900} 3472}
2901 3473
@@ -3004,24 +3576,62 @@ static void process_db_drop(struct work_struct *work)
3004 3576
3005 adap = container_of(work, struct adapter, db_drop_task); 3577 adap = container_of(work, struct adapter, db_drop_task);
3006 3578
3579 if (is_t4(adap->chip)) {
3580 disable_dbs(adap);
3581 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3582 drain_db_fifo(adap, 1);
3583 recover_all_queues(adap);
3584 enable_dbs(adap);
3585 } else {
3586 u32 dropped_db = t4_read_reg(adap, 0x010ac);
3587 u16 qid = (dropped_db >> 15) & 0x1ffff;
3588 u16 pidx_inc = dropped_db & 0x1fff;
3589 unsigned int s_qpp;
3590 unsigned short udb_density;
3591 unsigned long qpshift;
3592 int page;
3593 u32 udb;
3594
3595 dev_warn(adap->pdev_dev,
3596 "Dropped DB 0x%x qid %d bar2 %d coalesce %d pidx %d\n",
3597 dropped_db, qid,
3598 (dropped_db >> 14) & 1,
3599 (dropped_db >> 13) & 1,
3600 pidx_inc);
3601
3602 drain_db_fifo(adap, 1);
3603
3604 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
3605 udb_density = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adap,
3606 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
3607 qpshift = PAGE_SHIFT - ilog2(udb_density);
3608 udb = qid << qpshift;
3609 udb &= PAGE_MASK;
3610 page = udb / PAGE_SIZE;
3611 udb += (qid - (page * udb_density)) * 128;
3612
3613 writel(PIDX(pidx_inc), adap->bar2 + udb + 8);
3614
3615 /* Re-enable BAR2 WC */
3616 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
3617 }
3618
3007 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0); 3619 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_DROPPED_DB, 0);
3008 disable_dbs(adap);
3009 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
3010 drain_db_fifo(adap, 1);
3011 recover_all_queues(adap);
3012 enable_dbs(adap);
3013} 3620}
3014 3621
3015void t4_db_full(struct adapter *adap) 3622void t4_db_full(struct adapter *adap)
3016{ 3623{
3017 t4_set_reg_field(adap, SGE_INT_ENABLE3, 3624 if (is_t4(adap->chip)) {
3018 DBFIFO_HP_INT | DBFIFO_LP_INT, 0); 3625 t4_set_reg_field(adap, SGE_INT_ENABLE3,
3019 queue_work(workq, &adap->db_full_task); 3626 DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
3627 queue_work(workq, &adap->db_full_task);
3628 }
3020} 3629}
3021 3630
3022void t4_db_dropped(struct adapter *adap) 3631void t4_db_dropped(struct adapter *adap)
3023{ 3632{
3024 queue_work(workq, &adap->db_drop_task); 3633 if (is_t4(adap->chip))
3634 queue_work(workq, &adap->db_drop_task);
3025} 3635}
3026 3636
3027static void uld_attach(struct adapter *adap, unsigned int uld) 3637static void uld_attach(struct adapter *adap, unsigned int uld)
@@ -3566,17 +4176,27 @@ void t4_fatal_err(struct adapter *adap)
3566 4176
3567static void setup_memwin(struct adapter *adap) 4177static void setup_memwin(struct adapter *adap)
3568{ 4178{
3569 u32 bar0; 4179 u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
3570 4180
3571 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */ 4181 bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
4182 if (is_t4(adap->chip)) {
4183 mem_win0_base = bar0 + MEMWIN0_BASE;
4184 mem_win1_base = bar0 + MEMWIN1_BASE;
4185 mem_win2_base = bar0 + MEMWIN2_BASE;
4186 } else {
4187 /* For T5, only relative offset inside the PCIe BAR is passed */
4188 mem_win0_base = MEMWIN0_BASE;
4189 mem_win1_base = MEMWIN1_BASE_T5;
4190 mem_win2_base = MEMWIN2_BASE_T5;
4191 }
3572 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0), 4192 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
3573 (bar0 + MEMWIN0_BASE) | BIR(0) | 4193 mem_win0_base | BIR(0) |
3574 WINDOW(ilog2(MEMWIN0_APERTURE) - 10)); 4194 WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
3575 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1), 4195 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
3576 (bar0 + MEMWIN1_BASE) | BIR(0) | 4196 mem_win1_base | BIR(0) |
3577 WINDOW(ilog2(MEMWIN1_APERTURE) - 10)); 4197 WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
3578 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2), 4198 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
3579 (bar0 + MEMWIN2_BASE) | BIR(0) | 4199 mem_win2_base | BIR(0) |
3580 WINDOW(ilog2(MEMWIN2_APERTURE) - 10)); 4200 WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
3581} 4201}
3582 4202
@@ -3745,6 +4365,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
3745 unsigned long mtype = 0, maddr = 0; 4365 unsigned long mtype = 0, maddr = 0;
3746 u32 finiver, finicsum, cfcsum; 4366 u32 finiver, finicsum, cfcsum;
3747 int ret, using_flash; 4367 int ret, using_flash;
4368 char *fw_config_file, fw_config_file_path[256];
3748 4369
3749 /* 4370 /*
3750 * Reset device if necessary. 4371 * Reset device if necessary.
@@ -3761,7 +4382,21 @@ static int adap_init0_config(struct adapter *adapter, int reset)
3761 * then use that. Otherwise, use the configuration file stored 4382 * then use that. Otherwise, use the configuration file stored
3762 * in the adapter flash ... 4383 * in the adapter flash ...
3763 */ 4384 */
3764 ret = request_firmware(&cf, FW_CFNAME, adapter->pdev_dev); 4385 switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
4386 case CHELSIO_T4:
4387 fw_config_file = FW_CFNAME;
4388 break;
4389 case CHELSIO_T5:
4390 fw_config_file = FW5_CFNAME;
4391 break;
4392 default:
4393 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
4394 adapter->pdev->device);
4395 ret = -EINVAL;
4396 goto bye;
4397 }
4398
4399 ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
3765 if (ret < 0) { 4400 if (ret < 0) {
3766 using_flash = 1; 4401 using_flash = 1;
3767 mtype = FW_MEMTYPE_CF_FLASH; 4402 mtype = FW_MEMTYPE_CF_FLASH;
@@ -3877,6 +4512,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
3877 if (ret < 0) 4512 if (ret < 0)
3878 goto bye; 4513 goto bye;
3879 4514
4515 sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file);
3880 /* 4516 /*
3881 * Return successfully and note that we're operating with parameters 4517 * Return successfully and note that we're operating with parameters
3882 * not supplied by the driver, rather than from hard-wired 4518 * not supplied by the driver, rather than from hard-wired
@@ -3887,7 +4523,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
3887 "Configuration File %s, version %#x, computed checksum %#x\n", 4523 "Configuration File %s, version %#x, computed checksum %#x\n",
3888 (using_flash 4524 (using_flash
3889 ? "in device FLASH" 4525 ? "in device FLASH"
3890 : "/lib/firmware/" FW_CFNAME), 4526 : fw_config_file_path),
3891 finiver, cfcsum); 4527 finiver, cfcsum);
3892 return 0; 4528 return 0;
3893 4529
@@ -4814,7 +5450,8 @@ static void print_port_info(const struct net_device *dev)
4814 sprintf(bufp, "BASE-%s", base[pi->port_type]); 5450 sprintf(bufp, "BASE-%s", base[pi->port_type]);
4815 5451
4816 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", 5452 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
4817 adap->params.vpd.id, adap->params.rev, buf, 5453 adap->params.vpd.id,
5454 CHELSIO_CHIP_RELEASE(adap->params.rev), buf,
4818 is_offload(adap) ? "R" : "", adap->params.pci.width, spd, 5455 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
4819 (adap->flags & USING_MSIX) ? " MSI-X" : 5456 (adap->flags & USING_MSIX) ? " MSI-X" :
4820 (adap->flags & USING_MSI) ? " MSI" : ""); 5457 (adap->flags & USING_MSI) ? " MSI" : "");
@@ -4854,10 +5491,11 @@ static void free_some_resources(struct adapter *adapter)
4854#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) 5491#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
4855#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \ 5492#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
4856 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA) 5493 NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
5494#define SEGMENT_SIZE 128
4857 5495
4858static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 5496static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4859{ 5497{
4860 int func, i, err; 5498 int func, i, err, s_qpp, qpp, num_seg;
4861 struct port_info *pi; 5499 struct port_info *pi;
4862 bool highdma = false; 5500 bool highdma = false;
4863 struct adapter *adapter = NULL; 5501 struct adapter *adapter = NULL;
@@ -4934,7 +5572,34 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4934 5572
4935 err = t4_prep_adapter(adapter); 5573 err = t4_prep_adapter(adapter);
4936 if (err) 5574 if (err)
4937 goto out_unmap_bar; 5575 goto out_unmap_bar0;
5576
5577 if (!is_t4(adapter->chip)) {
5578 s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
5579 qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
5580 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
5581 num_seg = PAGE_SIZE / SEGMENT_SIZE;
5582
5583 /* Each segment size is 128B. Write coalescing is enabled only
5584 * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
5585 * queue is less no of segments that can be accommodated in
5586 * a page size.
5587 */
5588 if (qpp > num_seg) {
5589 dev_err(&pdev->dev,
5590 "Incorrect number of egress queues per page\n");
5591 err = -EINVAL;
5592 goto out_unmap_bar0;
5593 }
5594 adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
5595 pci_resource_len(pdev, 2));
5596 if (!adapter->bar2) {
5597 dev_err(&pdev->dev, "cannot map device bar2 region\n");
5598 err = -ENOMEM;
5599 goto out_unmap_bar0;
5600 }
5601 }
5602
4938 setup_memwin(adapter); 5603 setup_memwin(adapter);
4939 err = adap_init0(adapter); 5604 err = adap_init0(adapter);
4940 setup_memwin_rdma(adapter); 5605 setup_memwin_rdma(adapter);
@@ -5063,6 +5728,9 @@ sriov:
5063 out_free_dev: 5728 out_free_dev:
5064 free_some_resources(adapter); 5729 free_some_resources(adapter);
5065 out_unmap_bar: 5730 out_unmap_bar:
5731 if (!is_t4(adapter->chip))
5732 iounmap(adapter->bar2);
5733 out_unmap_bar0:
5066 iounmap(adapter->regs); 5734 iounmap(adapter->regs);
5067 out_free_adapter: 5735 out_free_adapter:
5068 kfree(adapter); 5736 kfree(adapter);
@@ -5113,6 +5781,8 @@ static void remove_one(struct pci_dev *pdev)
5113 5781
5114 free_some_resources(adapter); 5782 free_some_resources(adapter);
5115 iounmap(adapter->regs); 5783 iounmap(adapter->regs);
5784 if (!is_t4(adapter->chip))
5785 iounmap(adapter->bar2);
5116 kfree(adapter); 5786 kfree(adapter);
5117 pci_disable_pcie_error_reporting(pdev); 5787 pci_disable_pcie_error_reporting(pdev);
5118 pci_disable_device(pdev); 5788 pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index e2bbc7f3e2de..4faf4d067ee7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -269,4 +269,7 @@ struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
269 unsigned int skb_len, unsigned int pull_len); 269 unsigned int skb_len, unsigned int pull_len);
270int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size); 270int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size);
271int cxgb4_flush_eq_cache(struct net_device *dev); 271int cxgb4_flush_eq_cache(struct net_device *dev);
272void cxgb4_disable_db_coalescing(struct net_device *dev);
273void cxgb4_enable_db_coalescing(struct net_device *dev);
274
272#endif /* !__CXGB4_OFLD_H */ 275#endif /* !__CXGB4_OFLD_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index fe9a2ea3588b..8b47b253e204 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -506,10 +506,14 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
506 506
507static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) 507static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
508{ 508{
509 u32 val;
509 if (q->pend_cred >= 8) { 510 if (q->pend_cred >= 8) {
511 val = PIDX(q->pend_cred / 8);
512 if (!is_t4(adap->chip))
513 val |= DBTYPE(1);
510 wmb(); 514 wmb();
511 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) | 515 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) |
512 QID(q->cntxt_id) | PIDX(q->pend_cred / 8)); 516 QID(q->cntxt_id) | val);
513 q->pend_cred &= 7; 517 q->pend_cred &= 7;
514 } 518 }
515} 519}
@@ -812,6 +816,22 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
812 *end = 0; 816 *end = 0;
813} 817}
814 818
819/* This function copies 64 byte coalesced work request to
820 * memory mapped BAR2 space(user space writes).
821 * For coalesced WR SGE, fetches data from the FIFO instead of from Host.
822 */
823static void cxgb_pio_copy(u64 __iomem *dst, u64 *src)
824{
825 int count = 8;
826
827 while (count) {
828 writeq(*src, dst);
829 src++;
830 dst++;
831 count--;
832 }
833}
834
815/** 835/**
816 * ring_tx_db - check and potentially ring a Tx queue's doorbell 836 * ring_tx_db - check and potentially ring a Tx queue's doorbell
817 * @adap: the adapter 837 * @adap: the adapter
@@ -822,11 +842,25 @@ static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
822 */ 842 */
823static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) 843static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
824{ 844{
845 unsigned int *wr, index;
846
825 wmb(); /* write descriptors before telling HW */ 847 wmb(); /* write descriptors before telling HW */
826 spin_lock(&q->db_lock); 848 spin_lock(&q->db_lock);
827 if (!q->db_disabled) { 849 if (!q->db_disabled) {
828 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), 850 if (is_t4(adap->chip)) {
829 QID(q->cntxt_id) | PIDX(n)); 851 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
852 QID(q->cntxt_id) | PIDX(n));
853 } else {
854 if (n == 1) {
855 index = q->pidx ? (q->pidx - 1) : (q->size - 1);
856 wr = (unsigned int *)&q->desc[index];
857 cxgb_pio_copy((u64 __iomem *)
858 (adap->bar2 + q->udb + 64),
859 (u64 *)wr);
860 } else
861 writel(n, adap->bar2 + q->udb + 8);
862 wmb();
863 }
830 } 864 }
831 q->db_pidx = q->pidx; 865 q->db_pidx = q->pidx;
832 spin_unlock(&q->db_lock); 866 spin_unlock(&q->db_lock);
@@ -1555,7 +1589,6 @@ static noinline int handle_trace_pkt(struct adapter *adap,
1555 const struct pkt_gl *gl) 1589 const struct pkt_gl *gl)
1556{ 1590{
1557 struct sk_buff *skb; 1591 struct sk_buff *skb;
1558 struct cpl_trace_pkt *p;
1559 1592
1560 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); 1593 skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
1561 if (unlikely(!skb)) { 1594 if (unlikely(!skb)) {
@@ -1563,8 +1596,11 @@ static noinline int handle_trace_pkt(struct adapter *adap,
1563 return 0; 1596 return 0;
1564 } 1597 }
1565 1598
1566 p = (struct cpl_trace_pkt *)skb->data; 1599 if (is_t4(adap->chip))
1567 __skb_pull(skb, sizeof(*p)); 1600 __skb_pull(skb, sizeof(struct cpl_trace_pkt));
1601 else
1602 __skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
1603
1568 skb_reset_mac_header(skb); 1604 skb_reset_mac_header(skb);
1569 skb->protocol = htons(0xffff); 1605 skb->protocol = htons(0xffff);
1570 skb->dev = adap->port[0]; 1606 skb->dev = adap->port[0];
@@ -1625,8 +1661,10 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1625 const struct cpl_rx_pkt *pkt; 1661 const struct cpl_rx_pkt *pkt;
1626 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); 1662 struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
1627 struct sge *s = &q->adap->sge; 1663 struct sge *s = &q->adap->sge;
1664 int cpl_trace_pkt = is_t4(q->adap->chip) ?
1665 CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
1628 1666
1629 if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT)) 1667 if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
1630 return handle_trace_pkt(q->adap, si); 1668 return handle_trace_pkt(q->adap, si);
1631 1669
1632 pkt = (const struct cpl_rx_pkt *)rsp; 1670 pkt = (const struct cpl_rx_pkt *)rsp;
@@ -2143,11 +2181,27 @@ err:
2143 2181
2144static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) 2182static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
2145{ 2183{
2184 q->cntxt_id = id;
2185 if (!is_t4(adap->chip)) {
2186 unsigned int s_qpp;
2187 unsigned short udb_density;
2188 unsigned long qpshift;
2189 int page;
2190
2191 s_qpp = QUEUESPERPAGEPF1 * adap->fn;
2192 udb_density = 1 << QUEUESPERPAGEPF0_GET((t4_read_reg(adap,
2193 SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp));
2194 qpshift = PAGE_SHIFT - ilog2(udb_density);
2195 q->udb = q->cntxt_id << qpshift;
2196 q->udb &= PAGE_MASK;
2197 page = q->udb / PAGE_SIZE;
2198 q->udb += (q->cntxt_id - (page * udb_density)) * 128;
2199 }
2200
2146 q->in_use = 0; 2201 q->in_use = 0;
2147 q->cidx = q->pidx = 0; 2202 q->cidx = q->pidx = 0;
2148 q->stops = q->restarts = 0; 2203 q->stops = q->restarts = 0;
2149 q->stat = (void *)&q->desc[q->size]; 2204 q->stat = (void *)&q->desc[q->size];
2150 q->cntxt_id = id;
2151 spin_lock_init(&q->db_lock); 2205 spin_lock_init(&q->db_lock);
2152 adap->sge.egr_map[id - adap->sge.egr_start] = q; 2206 adap->sge.egr_map[id - adap->sge.egr_start] = q;
2153} 2207}
@@ -2587,11 +2641,20 @@ static int t4_sge_init_hard(struct adapter *adap)
2587 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows 2641 * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
2588 * and generate an interrupt when this occurs so we can recover. 2642 * and generate an interrupt when this occurs so we can recover.
2589 */ 2643 */
2590 t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS, 2644 if (is_t4(adap->chip)) {
2591 V_HP_INT_THRESH(M_HP_INT_THRESH) | 2645 t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
2592 V_LP_INT_THRESH(M_LP_INT_THRESH), 2646 V_HP_INT_THRESH(M_HP_INT_THRESH) |
2593 V_HP_INT_THRESH(dbfifo_int_thresh) | 2647 V_LP_INT_THRESH(M_LP_INT_THRESH),
2594 V_LP_INT_THRESH(dbfifo_int_thresh)); 2648 V_HP_INT_THRESH(dbfifo_int_thresh) |
2649 V_LP_INT_THRESH(dbfifo_int_thresh));
2650 } else {
2651 t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
2652 V_LP_INT_THRESH_T5(M_LP_INT_THRESH_T5),
2653 V_LP_INT_THRESH_T5(dbfifo_int_thresh));
2654 t4_set_reg_field(adap, SGE_DBFIFO_STATUS2,
2655 V_HP_INT_THRESH_T5(M_HP_INT_THRESH_T5),
2656 V_HP_INT_THRESH_T5(dbfifo_int_thresh));
2657 }
2595 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP, 2658 t4_set_reg_field(adap, A_SGE_DOORBELL_CONTROL, F_ENABLE_DROP,
2596 F_ENABLE_DROP); 2659 F_ENABLE_DROP);
2597 2660
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 8049268ce0f2..d02d4e8c4417 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -282,6 +282,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
282 * t4_mc_read - read from MC through backdoor accesses 282 * t4_mc_read - read from MC through backdoor accesses
283 * @adap: the adapter 283 * @adap: the adapter
284 * @addr: address of first byte requested 284 * @addr: address of first byte requested
285 * @idx: which MC to access
285 * @data: 64 bytes of data containing the requested address 286 * @data: 64 bytes of data containing the requested address
286 * @ecc: where to store the corresponding 64-bit ECC word 287 * @ecc: where to store the corresponding 64-bit ECC word
287 * 288 *
@@ -289,22 +290,38 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
289 * that covers the requested address @addr. If @parity is not %NULL it 290 * that covers the requested address @addr. If @parity is not %NULL it
290 * is assigned the 64-bit ECC word for the read data. 291 * is assigned the 64-bit ECC word for the read data.
291 */ 292 */
292int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc) 293int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
293{ 294{
294 int i; 295 int i;
296 u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
297 u32 mc_bist_status_rdata, mc_bist_data_pattern;
298
299 if (is_t4(adap->chip)) {
300 mc_bist_cmd = MC_BIST_CMD;
301 mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
302 mc_bist_cmd_len = MC_BIST_CMD_LEN;
303 mc_bist_status_rdata = MC_BIST_STATUS_RDATA;
304 mc_bist_data_pattern = MC_BIST_DATA_PATTERN;
305 } else {
306 mc_bist_cmd = MC_REG(MC_P_BIST_CMD, idx);
307 mc_bist_cmd_addr = MC_REG(MC_P_BIST_CMD_ADDR, idx);
308 mc_bist_cmd_len = MC_REG(MC_P_BIST_CMD_LEN, idx);
309 mc_bist_status_rdata = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
310 mc_bist_data_pattern = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
311 }
295 312
296 if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST) 313 if (t4_read_reg(adap, mc_bist_cmd) & START_BIST)
297 return -EBUSY; 314 return -EBUSY;
298 t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU); 315 t4_write_reg(adap, mc_bist_cmd_addr, addr & ~0x3fU);
299 t4_write_reg(adap, MC_BIST_CMD_LEN, 64); 316 t4_write_reg(adap, mc_bist_cmd_len, 64);
300 t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc); 317 t4_write_reg(adap, mc_bist_data_pattern, 0xc);
301 t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST | 318 t4_write_reg(adap, mc_bist_cmd, BIST_OPCODE(1) | START_BIST |
302 BIST_CMD_GAP(1)); 319 BIST_CMD_GAP(1));
303 i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1); 320 i = t4_wait_op_done(adap, mc_bist_cmd, START_BIST, 0, 10, 1);
304 if (i) 321 if (i)
305 return i; 322 return i;
306 323
307#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i) 324#define MC_DATA(i) MC_BIST_STATUS_REG(mc_bist_status_rdata, i)
308 325
309 for (i = 15; i >= 0; i--) 326 for (i = 15; i >= 0; i--)
310 *data++ = htonl(t4_read_reg(adap, MC_DATA(i))); 327 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
@@ -329,20 +346,39 @@ int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
329int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) 346int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
330{ 347{
331 int i; 348 int i;
349 u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
350 u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
351
352 if (is_t4(adap->chip)) {
353 edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
354 edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
355 edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
356 edc_bist_cmd_data_pattern = EDC_REG(EDC_BIST_DATA_PATTERN,
357 idx);
358 edc_bist_status_rdata = EDC_REG(EDC_BIST_STATUS_RDATA,
359 idx);
360 } else {
361 edc_bist_cmd = EDC_REG_T5(EDC_H_BIST_CMD, idx);
362 edc_bist_cmd_addr = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
363 edc_bist_cmd_len = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
364 edc_bist_cmd_data_pattern =
365 EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
366 edc_bist_status_rdata =
367 EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
368 }
332 369
333 idx *= EDC_STRIDE; 370 if (t4_read_reg(adap, edc_bist_cmd) & START_BIST)
334 if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
335 return -EBUSY; 371 return -EBUSY;
336 t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU); 372 t4_write_reg(adap, edc_bist_cmd_addr, addr & ~0x3fU);
337 t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64); 373 t4_write_reg(adap, edc_bist_cmd_len, 64);
338 t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc); 374 t4_write_reg(adap, edc_bist_cmd_data_pattern, 0xc);
339 t4_write_reg(adap, EDC_BIST_CMD + idx, 375 t4_write_reg(adap, edc_bist_cmd,
340 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST); 376 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
341 i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1); 377 i = t4_wait_op_done(adap, edc_bist_cmd, START_BIST, 0, 10, 1);
342 if (i) 378 if (i)
343 return i; 379 return i;
344 380
345#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx) 381#define EDC_DATA(i) (EDC_BIST_STATUS_REG(edc_bist_status_rdata, i))
346 382
347 for (i = 15; i >= 0; i--) 383 for (i = 15; i >= 0; i--)
348 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i))); 384 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
@@ -366,6 +402,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
366static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir) 402static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
367{ 403{
368 int i; 404 int i;
405 u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn);
369 406
370 /* 407 /*
371 * Setup offset into PCIE memory window. Address must be a 408 * Setup offset into PCIE memory window. Address must be a
@@ -374,7 +411,7 @@ static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
374 * values.) 411 * values.)
375 */ 412 */
376 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, 413 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
377 addr & ~(MEMWIN0_APERTURE - 1)); 414 (addr & ~(MEMWIN0_APERTURE - 1)) | win_pf);
378 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET); 415 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
379 416
380 /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */ 417 /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
@@ -410,6 +447,7 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
410 __be32 *buf, int dir) 447 __be32 *buf, int dir)
411{ 448{
412 u32 pos, start, end, offset, memoffset; 449 u32 pos, start, end, offset, memoffset;
450 u32 edc_size, mc_size;
413 int ret = 0; 451 int ret = 0;
414 __be32 *data; 452 __be32 *data;
415 453
@@ -423,13 +461,21 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
423 if (!data) 461 if (!data)
424 return -ENOMEM; 462 return -ENOMEM;
425 463
426 /* 464 /* Offset into the region of memory which is being accessed
427 * Offset into the region of memory which is being accessed
428 * MEM_EDC0 = 0 465 * MEM_EDC0 = 0
429 * MEM_EDC1 = 1 466 * MEM_EDC1 = 1
430 * MEM_MC = 2 467 * MEM_MC = 2 -- T4
468 * MEM_MC0 = 2 -- For T5
469 * MEM_MC1 = 3 -- For T5
431 */ 470 */
432 memoffset = (mtype * (5 * 1024 * 1024)); 471 edc_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR));
472 if (mtype != MEM_MC1)
473 memoffset = (mtype * (edc_size * 1024 * 1024));
474 else {
475 mc_size = EXT_MEM_SIZE_GET(t4_read_reg(adap,
476 MA_EXT_MEMORY_BAR));
477 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
478 }
433 479
434 /* Determine the PCIE_MEM_ACCESS_OFFSET */ 480 /* Determine the PCIE_MEM_ACCESS_OFFSET */
435 addr = addr + memoffset; 481 addr = addr + memoffset;
@@ -497,9 +543,9 @@ int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
497} 543}
498 544
499#define EEPROM_STAT_ADDR 0x7bfc 545#define EEPROM_STAT_ADDR 0x7bfc
500#define VPD_LEN 512
501#define VPD_BASE 0x400 546#define VPD_BASE 0x400
502#define VPD_BASE_OLD 0 547#define VPD_BASE_OLD 0
548#define VPD_LEN 1024
503 549
504/** 550/**
505 * t4_seeprom_wp - enable/disable EEPROM write protection 551 * t4_seeprom_wp - enable/disable EEPROM write protection
@@ -856,6 +902,7 @@ int t4_check_fw_version(struct adapter *adapter)
856{ 902{
857 u32 api_vers[2]; 903 u32 api_vers[2];
858 int ret, major, minor, micro; 904 int ret, major, minor, micro;
905 int exp_major, exp_minor, exp_micro;
859 906
860 ret = get_fw_version(adapter, &adapter->params.fw_vers); 907 ret = get_fw_version(adapter, &adapter->params.fw_vers);
861 if (!ret) 908 if (!ret)
@@ -870,17 +917,35 @@ int t4_check_fw_version(struct adapter *adapter)
870 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers); 917 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
871 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers); 918 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
872 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers); 919 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
920
921 switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
922 case CHELSIO_T4:
923 exp_major = FW_VERSION_MAJOR;
924 exp_minor = FW_VERSION_MINOR;
925 exp_micro = FW_VERSION_MICRO;
926 break;
927 case CHELSIO_T5:
928 exp_major = FW_VERSION_MAJOR_T5;
929 exp_minor = FW_VERSION_MINOR_T5;
930 exp_micro = FW_VERSION_MICRO_T5;
931 break;
932 default:
933 dev_err(adapter->pdev_dev, "Unsupported chip type, %x\n",
934 adapter->chip);
935 return -EINVAL;
936 }
937
873 memcpy(adapter->params.api_vers, api_vers, 938 memcpy(adapter->params.api_vers, api_vers,
874 sizeof(adapter->params.api_vers)); 939 sizeof(adapter->params.api_vers));
875 940
876 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */ 941 if (major != exp_major) { /* major mismatch - fail */
877 dev_err(adapter->pdev_dev, 942 dev_err(adapter->pdev_dev,
878 "card FW has major version %u, driver wants %u\n", 943 "card FW has major version %u, driver wants %u\n",
879 major, FW_VERSION_MAJOR); 944 major, exp_major);
880 return -EINVAL; 945 return -EINVAL;
881 } 946 }
882 947
883 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO) 948 if (minor == exp_minor && micro == exp_micro)
884 return 0; /* perfect match */ 949 return 0; /* perfect match */
885 950
886 /* Minor/micro version mismatch. Report it but often it's OK. */ 951 /* Minor/micro version mismatch. Report it but often it's OK. */
@@ -1246,6 +1311,45 @@ static void pcie_intr_handler(struct adapter *adapter)
1246 { 0 } 1311 { 0 }
1247 }; 1312 };
1248 1313
1314 static struct intr_info t5_pcie_intr_info[] = {
1315 { MSTGRPPERR, "Master Response Read Queue parity error",
1316 -1, 1 },
1317 { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
1318 { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
1319 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
1320 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
1321 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
1322 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
1323 { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
1324 -1, 1 },
1325 { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
1326 -1, 1 },
1327 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
1328 { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
1329 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
1330 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
1331 { DREQWRPERR, "PCI DMA channel write request parity error",
1332 -1, 1 },
1333 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
1334 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
1335 { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
1336 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
1337 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
1338 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
1339 { FIDPERR, "PCI FID parity error", -1, 1 },
1340 { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
1341 { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
1342 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
1343 { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
1344 -1, 1 },
1345 { IPRXDATAGRPPERR, "PCI IP Rx data group parity error", -1, 1 },
1346 { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
1347 { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
1348 { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
1349 { READRSPERR, "Outbound read error", -1, 0 },
1350 { 0 }
1351 };
1352
1249 int fat; 1353 int fat;
1250 1354
1251 fat = t4_handle_intr_status(adapter, 1355 fat = t4_handle_intr_status(adapter,
@@ -1254,7 +1358,10 @@ static void pcie_intr_handler(struct adapter *adapter)
1254 t4_handle_intr_status(adapter, 1358 t4_handle_intr_status(adapter,
1255 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, 1359 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
1256 pcie_port_intr_info) + 1360 pcie_port_intr_info) +
1257 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info); 1361 t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
1362 is_t4(adapter->chip) ?
1363 pcie_intr_info : t5_pcie_intr_info);
1364
1258 if (fat) 1365 if (fat)
1259 t4_fatal_err(adapter); 1366 t4_fatal_err(adapter);
1260} 1367}
@@ -1664,7 +1771,14 @@ static void ncsi_intr_handler(struct adapter *adap)
1664 */ 1771 */
1665static void xgmac_intr_handler(struct adapter *adap, int port) 1772static void xgmac_intr_handler(struct adapter *adap, int port)
1666{ 1773{
1667 u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); 1774 u32 v, int_cause_reg;
1775
1776 if (is_t4(adap->chip))
1777 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
1778 else
1779 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
1780
1781 v = t4_read_reg(adap, int_cause_reg);
1668 1782
1669 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; 1783 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1670 if (!v) 1784 if (!v)
@@ -2126,7 +2240,9 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2126 u32 bgmap = get_mps_bg_map(adap, idx); 2240 u32 bgmap = get_mps_bg_map(adap, idx);
2127 2241
2128#define GET_STAT(name) \ 2242#define GET_STAT(name) \
2129 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L)) 2243 t4_read_reg64(adap, \
2244 (is_t4(adap->chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
2245 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
2130#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L) 2246#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
2131 2247
2132 p->tx_octets = GET_STAT(TX_PORT_BYTES); 2248 p->tx_octets = GET_STAT(TX_PORT_BYTES);
@@ -2205,14 +2321,26 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
2205void t4_wol_magic_enable(struct adapter *adap, unsigned int port, 2321void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
2206 const u8 *addr) 2322 const u8 *addr)
2207{ 2323{
2324 u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
2325
2326 if (is_t4(adap->chip)) {
2327 mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
2328 mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
2329 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2330 } else {
2331 mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
2332 mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
2333 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
2334 }
2335
2208 if (addr) { 2336 if (addr) {
2209 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO), 2337 t4_write_reg(adap, mag_id_reg_l,
2210 (addr[2] << 24) | (addr[3] << 16) | 2338 (addr[2] << 24) | (addr[3] << 16) |
2211 (addr[4] << 8) | addr[5]); 2339 (addr[4] << 8) | addr[5]);
2212 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI), 2340 t4_write_reg(adap, mag_id_reg_h,
2213 (addr[0] << 8) | addr[1]); 2341 (addr[0] << 8) | addr[1]);
2214 } 2342 }
2215 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN, 2343 t4_set_reg_field(adap, port_cfg_reg, MAGICEN,
2216 addr ? MAGICEN : 0); 2344 addr ? MAGICEN : 0);
2217} 2345}
2218 2346
@@ -2235,16 +2363,23 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
2235 u64 mask0, u64 mask1, unsigned int crc, bool enable) 2363 u64 mask0, u64 mask1, unsigned int crc, bool enable)
2236{ 2364{
2237 int i; 2365 int i;
2366 u32 port_cfg_reg;
2367
2368 if (is_t4(adap->chip))
2369 port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
2370 else
2371 port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
2238 2372
2239 if (!enable) { 2373 if (!enable) {
2240 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 2374 t4_set_reg_field(adap, port_cfg_reg, PATEN, 0);
2241 PATEN, 0);
2242 return 0; 2375 return 0;
2243 } 2376 }
2244 if (map > 0xff) 2377 if (map > 0xff)
2245 return -EINVAL; 2378 return -EINVAL;
2246 2379
2247#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name) 2380#define EPIO_REG(name) \
2381 (is_t4(adap->chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
2382 T5_PORT_REG(port, MAC_PORT_EPIO_##name))
2248 2383
2249 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32); 2384 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
2250 t4_write_reg(adap, EPIO_REG(DATA2), mask1); 2385 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
@@ -2322,24 +2457,24 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
2322 * @addr: address of first byte requested aligned on 32b. 2457 * @addr: address of first byte requested aligned on 32b.
2323 * @data: len bytes to hold the data read 2458 * @data: len bytes to hold the data read
2324 * @len: amount of data to read from window. Must be <= 2459 * @len: amount of data to read from window. Must be <=
2325 * MEMWIN0_APERATURE after adjusting for 16B alignment 2460 * MEMWIN0_APERATURE after adjusting for 16B for T4 and
2326 * requirements of the the memory window. 2461 * 128B for T5 alignment requirements of the the memory window.
2327 * 2462 *
2328 * Read len bytes of data from MC starting at @addr. 2463 * Read len bytes of data from MC starting at @addr.
2329 */ 2464 */
2330int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len) 2465int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
2331{ 2466{
2332 int i; 2467 int i, off;
2333 int off; 2468 u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn);
2334 2469
2335 /* 2470 /* Align on a 2KB boundary.
2336 * Align on a 16B boundary.
2337 */ 2471 */
2338 off = addr & 15; 2472 off = addr & MEMWIN0_APERTURE;
2339 if ((addr & 3) || (len + off) > MEMWIN0_APERTURE) 2473 if ((addr & 3) || (len + off) > MEMWIN0_APERTURE)
2340 return -EINVAL; 2474 return -EINVAL;
2341 2475
2342 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET, addr & ~15); 2476 t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
2477 (addr & ~MEMWIN0_APERTURE) | win_pf);
2343 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET); 2478 t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
2344 2479
2345 for (i = 0; i < len; i += 4) 2480 for (i = 0; i < len; i += 4)
@@ -3162,6 +3297,9 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
3162 int i, ret; 3297 int i, ret;
3163 struct fw_vi_mac_cmd c; 3298 struct fw_vi_mac_cmd c;
3164 struct fw_vi_mac_exact *p; 3299 struct fw_vi_mac_exact *p;
3300 unsigned int max_naddr = is_t4(adap->chip) ?
3301 NUM_MPS_CLS_SRAM_L_INSTANCES :
3302 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
3165 3303
3166 if (naddr > 7) 3304 if (naddr > 7)
3167 return -EINVAL; 3305 return -EINVAL;
@@ -3187,8 +3325,8 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
3187 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); 3325 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3188 3326
3189 if (idx) 3327 if (idx)
3190 idx[i] = index >= NEXACT_MAC ? 0xffff : index; 3328 idx[i] = index >= max_naddr ? 0xffff : index;
3191 if (index < NEXACT_MAC) 3329 if (index < max_naddr)
3192 ret++; 3330 ret++;
3193 else if (hash) 3331 else if (hash)
3194 *hash |= (1ULL << hash_mac_addr(addr[i])); 3332 *hash |= (1ULL << hash_mac_addr(addr[i]));
@@ -3221,6 +3359,9 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3221 int ret, mode; 3359 int ret, mode;
3222 struct fw_vi_mac_cmd c; 3360 struct fw_vi_mac_cmd c;
3223 struct fw_vi_mac_exact *p = c.u.exact; 3361 struct fw_vi_mac_exact *p = c.u.exact;
3362 unsigned int max_mac_addr = is_t4(adap->chip) ?
3363 NUM_MPS_CLS_SRAM_L_INSTANCES :
3364 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
3224 3365
3225 if (idx < 0) /* new allocation */ 3366 if (idx < 0) /* new allocation */
3226 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; 3367 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
@@ -3238,7 +3379,7 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
3238 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); 3379 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
3239 if (ret == 0) { 3380 if (ret == 0) {
3240 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx)); 3381 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
3241 if (ret >= NEXACT_MAC) 3382 if (ret >= max_mac_addr)
3242 ret = -ENOMEM; 3383 ret = -ENOMEM;
3243 } 3384 }
3244 return ret; 3385 return ret;
@@ -3547,7 +3688,8 @@ static int get_flash_params(struct adapter *adap)
3547 */ 3688 */
3548int t4_prep_adapter(struct adapter *adapter) 3689int t4_prep_adapter(struct adapter *adapter)
3549{ 3690{
3550 int ret; 3691 int ret, ver;
3692 uint16_t device_id;
3551 3693
3552 ret = t4_wait_dev_ready(adapter); 3694 ret = t4_wait_dev_ready(adapter);
3553 if (ret < 0) 3695 if (ret < 0)
@@ -3562,6 +3704,28 @@ int t4_prep_adapter(struct adapter *adapter)
3562 return ret; 3704 return ret;
3563 } 3705 }
3564 3706
3707 /* Retrieve adapter's device ID
3708 */
3709 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
3710 ver = device_id >> 12;
3711 switch (ver) {
3712 case CHELSIO_T4:
3713 adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4,
3714 adapter->params.rev);
3715 break;
3716 case CHELSIO_T5:
3717 adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5,
3718 adapter->params.rev);
3719 break;
3720 default:
3721 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
3722 device_id);
3723 return -EINVAL;
3724 }
3725
3726 /* Reassign the updated revision field */
3727 adapter->params.rev = adapter->chip;
3728
3565 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd); 3729 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3566 3730
3567 /* 3731 /*
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index f534ed7e10e9..1d1623be9f1e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -47,7 +47,6 @@ enum {
47 TCB_SIZE = 128, /* TCB size */ 47 TCB_SIZE = 128, /* TCB size */
48 NMTUS = 16, /* size of MTU table */ 48 NMTUS = 16, /* size of MTU table */
49 NCCTRL_WIN = 32, /* # of congestion control windows */ 49 NCCTRL_WIN = 32, /* # of congestion control windows */
50 NEXACT_MAC = 336, /* # of exact MAC address filters */
51 L2T_SIZE = 4096, /* # of L2T entries */ 50 L2T_SIZE = 4096, /* # of L2T entries */
52 MBOX_LEN = 64, /* mailbox size in bytes */ 51 MBOX_LEN = 64, /* mailbox size in bytes */
53 TRACE_LEN = 112, /* length of trace data and mask */ 52 TRACE_LEN = 112, /* length of trace data and mask */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 261d17703adc..47656ac1ac25 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -74,6 +74,7 @@ enum {
74 CPL_PASS_ESTABLISH = 0x41, 74 CPL_PASS_ESTABLISH = 0x41,
75 CPL_RX_DATA_DDP = 0x42, 75 CPL_RX_DATA_DDP = 0x42,
76 CPL_PASS_ACCEPT_REQ = 0x44, 76 CPL_PASS_ACCEPT_REQ = 0x44,
77 CPL_TRACE_PKT_T5 = 0x48,
77 78
78 CPL_RDMA_READ_REQ = 0x60, 79 CPL_RDMA_READ_REQ = 0x60,
79 80
@@ -287,6 +288,23 @@ struct cpl_act_open_req {
287 __be32 opt2; 288 __be32 opt2;
288}; 289};
289 290
291#define S_FILTER_TUPLE 24
292#define M_FILTER_TUPLE 0xFFFFFFFFFF
293#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE)
294#define G_FILTER_TUPLE(x) (((x) >> S_FILTER_TUPLE) & M_FILTER_TUPLE)
295struct cpl_t5_act_open_req {
296 WR_HDR;
297 union opcode_tid ot;
298 __be16 local_port;
299 __be16 peer_port;
300 __be32 local_ip;
301 __be32 peer_ip;
302 __be64 opt0;
303 __be32 rsvd;
304 __be32 opt2;
305 __be64 params;
306};
307
290struct cpl_act_open_req6 { 308struct cpl_act_open_req6 {
291 WR_HDR; 309 WR_HDR;
292 union opcode_tid ot; 310 union opcode_tid ot;
@@ -566,6 +584,11 @@ struct cpl_rx_pkt {
566#define V_RX_ETHHDR_LEN(x) ((x) << S_RX_ETHHDR_LEN) 584#define V_RX_ETHHDR_LEN(x) ((x) << S_RX_ETHHDR_LEN)
567#define G_RX_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_ETHHDR_LEN) 585#define G_RX_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_ETHHDR_LEN)
568 586
587#define S_RX_T5_ETHHDR_LEN 0
588#define M_RX_T5_ETHHDR_LEN 0x3F
589#define V_RX_T5_ETHHDR_LEN(x) ((x) << S_RX_T5_ETHHDR_LEN)
590#define G_RX_T5_ETHHDR_LEN(x) (((x) >> S_RX_T5_ETHHDR_LEN) & M_RX_T5_ETHHDR_LEN)
591
569#define S_RX_MACIDX 8 592#define S_RX_MACIDX 8
570#define M_RX_MACIDX 0x1FF 593#define M_RX_MACIDX 0x1FF
571#define V_RX_MACIDX(x) ((x) << S_RX_MACIDX) 594#define V_RX_MACIDX(x) ((x) << S_RX_MACIDX)
@@ -612,6 +635,28 @@ struct cpl_trace_pkt {
612 __be64 tstamp; 635 __be64 tstamp;
613}; 636};
614 637
638struct cpl_t5_trace_pkt {
639 __u8 opcode;
640 __u8 intf;
641#if defined(__LITTLE_ENDIAN_BITFIELD)
642 __u8 runt:4;
643 __u8 filter_hit:4;
644 __u8:6;
645 __u8 err:1;
646 __u8 trunc:1;
647#else
648 __u8 filter_hit:4;
649 __u8 runt:4;
650 __u8 trunc:1;
651 __u8 err:1;
652 __u8:6;
653#endif
654 __be16 rsvd;
655 __be16 len;
656 __be64 tstamp;
657 __be64 rsvd1;
658};
659
615struct cpl_l2t_write_req { 660struct cpl_l2t_write_req {
616 WR_HDR; 661 WR_HDR;
617 union opcode_tid ot; 662 union opcode_tid ot;
@@ -742,4 +787,12 @@ struct ulp_mem_io {
742#define ULP_MEMIO_LOCK(x) ((x) << 31) 787#define ULP_MEMIO_LOCK(x) ((x) << 31)
743}; 788};
744 789
790#define S_T5_ULP_MEMIO_IMM 23
791#define V_T5_ULP_MEMIO_IMM(x) ((x) << S_T5_ULP_MEMIO_IMM)
792#define F_T5_ULP_MEMIO_IMM V_T5_ULP_MEMIO_IMM(1U)
793
794#define S_T5_ULP_MEMIO_ORDER 22
795#define V_T5_ULP_MEMIO_ORDER(x) ((x) << S_T5_ULP_MEMIO_ORDER)
796#define F_T5_ULP_MEMIO_ORDER V_T5_ULP_MEMIO_ORDER(1U)
797
745#endif /* __T4_MSG_H */ 798#endif /* __T4_MSG_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 83ec5f7844ac..ef146c0ba481 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -68,9 +68,14 @@
68#define QID_SHIFT 15 68#define QID_SHIFT 15
69#define QID(x) ((x) << QID_SHIFT) 69#define QID(x) ((x) << QID_SHIFT)
70#define DBPRIO(x) ((x) << 14) 70#define DBPRIO(x) ((x) << 14)
71#define DBTYPE(x) ((x) << 13)
71#define PIDX_MASK 0x00003fffU 72#define PIDX_MASK 0x00003fffU
72#define PIDX_SHIFT 0 73#define PIDX_SHIFT 0
73#define PIDX(x) ((x) << PIDX_SHIFT) 74#define PIDX(x) ((x) << PIDX_SHIFT)
75#define S_PIDX_T5 0
76#define M_PIDX_T5 0x1fffU
77#define PIDX_T5(x) (((x) >> S_PIDX_T5) & M_PIDX_T5)
78
74 79
75#define SGE_PF_GTS 0x4 80#define SGE_PF_GTS 0x4
76#define INGRESSQID_MASK 0xffff0000U 81#define INGRESSQID_MASK 0xffff0000U
@@ -152,6 +157,8 @@
152#define QUEUESPERPAGEPF0_MASK 0x0000000fU 157#define QUEUESPERPAGEPF0_MASK 0x0000000fU
153#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK) 158#define QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK)
154 159
160#define QUEUESPERPAGEPF1 4
161
155#define SGE_INT_CAUSE1 0x1024 162#define SGE_INT_CAUSE1 0x1024
156#define SGE_INT_CAUSE2 0x1030 163#define SGE_INT_CAUSE2 0x1030
157#define SGE_INT_CAUSE3 0x103c 164#define SGE_INT_CAUSE3 0x103c
@@ -234,6 +241,10 @@
234#define SGE_DOORBELL_CONTROL 0x10a8 241#define SGE_DOORBELL_CONTROL 0x10a8
235#define ENABLE_DROP (1 << 13) 242#define ENABLE_DROP (1 << 13)
236 243
244#define S_NOCOALESCE 26
245#define V_NOCOALESCE(x) ((x) << S_NOCOALESCE)
246#define F_NOCOALESCE V_NOCOALESCE(1U)
247
237#define SGE_TIMER_VALUE_0_AND_1 0x10b8 248#define SGE_TIMER_VALUE_0_AND_1 0x10b8
238#define TIMERVALUE0_MASK 0xffff0000U 249#define TIMERVALUE0_MASK 0xffff0000U
239#define TIMERVALUE0_SHIFT 16 250#define TIMERVALUE0_SHIFT 16
@@ -272,17 +283,36 @@
272#define S_HP_INT_THRESH 28 283#define S_HP_INT_THRESH 28
273#define M_HP_INT_THRESH 0xfU 284#define M_HP_INT_THRESH 0xfU
274#define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH) 285#define V_HP_INT_THRESH(x) ((x) << S_HP_INT_THRESH)
286#define S_LP_INT_THRESH_T5 18
287#define V_LP_INT_THRESH_T5(x) ((x) << S_LP_INT_THRESH_T5)
288#define M_LP_COUNT_T5 0x3ffffU
289#define G_LP_COUNT_T5(x) (((x) >> S_LP_COUNT) & M_LP_COUNT_T5)
275#define M_HP_COUNT 0x7ffU 290#define M_HP_COUNT 0x7ffU
276#define S_HP_COUNT 16 291#define S_HP_COUNT 16
277#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT) 292#define G_HP_COUNT(x) (((x) >> S_HP_COUNT) & M_HP_COUNT)
278#define S_LP_INT_THRESH 12 293#define S_LP_INT_THRESH 12
279#define M_LP_INT_THRESH 0xfU 294#define M_LP_INT_THRESH 0xfU
295#define M_LP_INT_THRESH_T5 0xfffU
280#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH) 296#define V_LP_INT_THRESH(x) ((x) << S_LP_INT_THRESH)
281#define M_LP_COUNT 0x7ffU 297#define M_LP_COUNT 0x7ffU
282#define S_LP_COUNT 0 298#define S_LP_COUNT 0
283#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT) 299#define G_LP_COUNT(x) (((x) >> S_LP_COUNT) & M_LP_COUNT)
284#define A_SGE_DBFIFO_STATUS 0x10a4 300#define A_SGE_DBFIFO_STATUS 0x10a4
285 301
302#define SGE_STAT_TOTAL 0x10e4
303#define SGE_STAT_MATCH 0x10e8
304
305#define SGE_STAT_CFG 0x10ec
306#define S_STATSOURCE_T5 9
307#define STATSOURCE_T5(x) ((x) << S_STATSOURCE_T5)
308
309#define SGE_DBFIFO_STATUS2 0x1118
310#define M_HP_COUNT_T5 0x3ffU
311#define G_HP_COUNT_T5(x) ((x) & M_HP_COUNT_T5)
312#define S_HP_INT_THRESH_T5 10
313#define M_HP_INT_THRESH_T5 0xfU
314#define V_HP_INT_THRESH_T5(x) ((x) << S_HP_INT_THRESH_T5)
315
286#define S_ENABLE_DROP 13 316#define S_ENABLE_DROP 13
287#define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP) 317#define V_ENABLE_DROP(x) ((x) << S_ENABLE_DROP)
288#define F_ENABLE_DROP V_ENABLE_DROP(1U) 318#define F_ENABLE_DROP V_ENABLE_DROP(1U)
@@ -331,8 +361,27 @@
331#define MSIADDRHPERR 0x00000002U 361#define MSIADDRHPERR 0x00000002U
332#define MSIADDRLPERR 0x00000001U 362#define MSIADDRLPERR 0x00000001U
333 363
364#define READRSPERR 0x20000000U
365#define TRGT1GRPPERR 0x10000000U
366#define IPSOTPERR 0x08000000U
367#define IPRXDATAGRPPERR 0x02000000U
368#define IPRXHDRGRPPERR 0x01000000U
369#define MAGRPPERR 0x00400000U
370#define VFIDPERR 0x00200000U
371#define HREQWRPERR 0x00010000U
372#define DREQWRPERR 0x00002000U
373#define MSTTAGQPERR 0x00000400U
374#define PIOREQGRPPERR 0x00000100U
375#define PIOCPLGRPPERR 0x00000080U
376#define MSIXSTIPERR 0x00000004U
377#define MSTTIMEOUTPERR 0x00000002U
378#define MSTGRPPERR 0x00000001U
379
334#define PCIE_NONFAT_ERR 0x3010 380#define PCIE_NONFAT_ERR 0x3010
335#define PCIE_MEM_ACCESS_BASE_WIN 0x3068 381#define PCIE_MEM_ACCESS_BASE_WIN 0x3068
382#define S_PCIEOFST 10
383#define M_PCIEOFST 0x3fffffU
384#define GET_PCIEOFST(x) (((x) >> S_PCIEOFST) & M_PCIEOFST)
336#define PCIEOFST_MASK 0xfffffc00U 385#define PCIEOFST_MASK 0xfffffc00U
337#define BIR_MASK 0x00000300U 386#define BIR_MASK 0x00000300U
338#define BIR_SHIFT 8 387#define BIR_SHIFT 8
@@ -342,6 +391,9 @@
342#define WINDOW(x) ((x) << WINDOW_SHIFT) 391#define WINDOW(x) ((x) << WINDOW_SHIFT)
343#define PCIE_MEM_ACCESS_OFFSET 0x306c 392#define PCIE_MEM_ACCESS_OFFSET 0x306c
344 393
394#define S_PFNUM 0
395#define V_PFNUM(x) ((x) << S_PFNUM)
396
345#define PCIE_FW 0x30b8 397#define PCIE_FW 0x30b8
346#define PCIE_FW_ERR 0x80000000U 398#define PCIE_FW_ERR 0x80000000U
347#define PCIE_FW_INIT 0x40000000U 399#define PCIE_FW_INIT 0x40000000U
@@ -407,12 +459,18 @@
407 459
408#define MC_BIST_STATUS_RDATA 0x7688 460#define MC_BIST_STATUS_RDATA 0x7688
409 461
462#define MA_EDRAM0_BAR 0x77c0
463#define MA_EDRAM1_BAR 0x77c4
464#define EDRAM_SIZE_MASK 0xfffU
465#define EDRAM_SIZE_GET(x) ((x) & EDRAM_SIZE_MASK)
466
410#define MA_EXT_MEMORY_BAR 0x77c8 467#define MA_EXT_MEMORY_BAR 0x77c8
411#define EXT_MEM_SIZE_MASK 0x00000fffU 468#define EXT_MEM_SIZE_MASK 0x00000fffU
412#define EXT_MEM_SIZE_SHIFT 0 469#define EXT_MEM_SIZE_SHIFT 0
413#define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT) 470#define EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT)
414 471
415#define MA_TARGET_MEM_ENABLE 0x77d8 472#define MA_TARGET_MEM_ENABLE 0x77d8
473#define EXT_MEM1_ENABLE 0x00000010U
416#define EXT_MEM_ENABLE 0x00000004U 474#define EXT_MEM_ENABLE 0x00000004U
417#define EDRAM1_ENABLE 0x00000002U 475#define EDRAM1_ENABLE 0x00000002U
418#define EDRAM0_ENABLE 0x00000001U 476#define EDRAM0_ENABLE 0x00000001U
@@ -431,6 +489,7 @@
431#define MA_PCIE_FW 0x30b8 489#define MA_PCIE_FW 0x30b8
432#define MA_PARITY_ERROR_STATUS 0x77f4 490#define MA_PARITY_ERROR_STATUS 0x77f4
433 491
492#define MA_EXT_MEMORY1_BAR 0x7808
434#define EDC_0_BASE_ADDR 0x7900 493#define EDC_0_BASE_ADDR 0x7900
435 494
436#define EDC_BIST_CMD 0x7904 495#define EDC_BIST_CMD 0x7904
@@ -801,6 +860,15 @@
801#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c 860#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c
802#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610 861#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610
803#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614 862#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
863#define MAC_PORT_CFG2 0x818
864#define MAC_PORT_MAGIC_MACID_LO 0x824
865#define MAC_PORT_MAGIC_MACID_HI 0x828
866#define MAC_PORT_EPIO_DATA0 0x8c0
867#define MAC_PORT_EPIO_DATA1 0x8c4
868#define MAC_PORT_EPIO_DATA2 0x8c8
869#define MAC_PORT_EPIO_DATA3 0x8cc
870#define MAC_PORT_EPIO_OP 0x8d0
871
804#define MPS_CMN_CTL 0x9000 872#define MPS_CMN_CTL 0x9000
805#define NUMPORTS_MASK 0x00000003U 873#define NUMPORTS_MASK 0x00000003U
806#define NUMPORTS_SHIFT 0 874#define NUMPORTS_SHIFT 0
@@ -1063,6 +1131,7 @@
1063#define ADDRESS_SHIFT 0 1131#define ADDRESS_SHIFT 0
1064#define ADDRESS(x) ((x) << ADDRESS_SHIFT) 1132#define ADDRESS(x) ((x) << ADDRESS_SHIFT)
1065 1133
1134#define MAC_PORT_INT_CAUSE 0x8dc
1066#define XGMAC_PORT_INT_CAUSE 0x10dc 1135#define XGMAC_PORT_INT_CAUSE 0x10dc
1067 1136
1068#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x7e28 1137#define A_TP_TX_MOD_QUEUE_REQ_MAP 0x7e28
@@ -1101,4 +1170,33 @@
1101#define V_PORT(x) ((x) << S_PORT) 1170#define V_PORT(x) ((x) << S_PORT)
1102#define F_PORT V_PORT(1U) 1171#define F_PORT V_PORT(1U)
1103 1172
1173#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
1174#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
1175
1176#define T5_PORT0_BASE 0x30000
1177#define T5_PORT_STRIDE 0x4000
1178#define T5_PORT_BASE(idx) (T5_PORT0_BASE + (idx) * T5_PORT_STRIDE)
1179#define T5_PORT_REG(idx, reg) (T5_PORT_BASE(idx) + (reg))
1180
1181#define MC_0_BASE_ADDR 0x40000
1182#define MC_1_BASE_ADDR 0x48000
1183#define MC_STRIDE (MC_1_BASE_ADDR - MC_0_BASE_ADDR)
1184#define MC_REG(reg, idx) (reg + MC_STRIDE * idx)
1185
1186#define MC_P_BIST_CMD 0x41400
1187#define MC_P_BIST_CMD_ADDR 0x41404
1188#define MC_P_BIST_CMD_LEN 0x41408
1189#define MC_P_BIST_DATA_PATTERN 0x4140c
1190#define MC_P_BIST_STATUS_RDATA 0x41488
1191#define EDC_T50_BASE_ADDR 0x50000
1192#define EDC_H_BIST_CMD 0x50004
1193#define EDC_H_BIST_CMD_ADDR 0x50008
1194#define EDC_H_BIST_CMD_LEN 0x5000c
1195#define EDC_H_BIST_DATA_PATTERN 0x50010
1196#define EDC_H_BIST_STATUS_RDATA 0x50028
1197
1198#define EDC_T51_BASE_ADDR 0x50800
1199#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
1200#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
1201
1104#endif /* __T4_REGS_H */ 1202#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index a0dcccd846c9..93444325b1e8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -574,7 +574,7 @@ struct fw_eth_tx_pkt_vm_wr {
574 __be16 vlantci; 574 __be16 vlantci;
575}; 575};
576 576
577#define FW_CMD_MAX_TIMEOUT 3000 577#define FW_CMD_MAX_TIMEOUT 10000
578 578
579/* 579/*
580 * If a host driver does a HELLO and discovers that there's already a MASTER 580 * If a host driver does a HELLO and discovers that there's already a MASTER
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 68eaa9c88c7d..be5c7ef6ca93 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -344,6 +344,7 @@ struct adapter {
344 unsigned long registered_device_map; 344 unsigned long registered_device_map;
345 unsigned long open_device_map; 345 unsigned long open_device_map;
346 unsigned long flags; 346 unsigned long flags;
347 enum chip_type chip;
347 struct adapter_params params; 348 struct adapter_params params;
348 349
349 /* queue and interrupt resources */ 350 /* queue and interrupt resources */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 56b46ab2d4c5..7fcac2003769 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -54,8 +54,8 @@
54/* 54/*
55 * Generic information about the driver. 55 * Generic information about the driver.
56 */ 56 */
57#define DRV_VERSION "1.0.0" 57#define DRV_VERSION "2.0.0-ko"
58#define DRV_DESC "Chelsio T4 Virtual Function (VF) Network Driver" 58#define DRV_DESC "Chelsio T4/T5 Virtual Function (VF) Network Driver"
59 59
60/* 60/*
61 * Module Parameters. 61 * Module Parameters.
@@ -1050,7 +1050,7 @@ static inline unsigned int mk_adap_vers(const struct adapter *adapter)
1050 /* 1050 /*
1051 * Chip version 4, revision 0x3f (cxgb4vf). 1051 * Chip version 4, revision 0x3f (cxgb4vf).
1052 */ 1052 */
1053 return 4 | (0x3f << 10); 1053 return CHELSIO_CHIP_VERSION(adapter->chip) | (0x3f << 10);
1054} 1054}
1055 1055
1056/* 1056/*
@@ -2099,6 +2099,15 @@ static int adap_init0(struct adapter *adapter)
2099 return err; 2099 return err;
2100 } 2100 }
2101 2101
2102 switch (adapter->pdev->device >> 12) {
2103 case CHELSIO_T4:
2104 adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
2105 break;
2106 case CHELSIO_T5:
2107 adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, 0);
2108 break;
2109 }
2110
2102 /* 2111 /*
2103 * Grab basic operational parameters. These will predominantly have 2112 * Grab basic operational parameters. These will predominantly have
2104 * been set up by the Physical Function Driver or will be hard coded 2113 * been set up by the Physical Function Driver or will be hard coded
@@ -2888,6 +2897,26 @@ static struct pci_device_id cxgb4vf_pci_tbl[] = {
2888 CH_DEVICE(0x480a, 0), /* T404-bt */ 2897 CH_DEVICE(0x480a, 0), /* T404-bt */
2889 CH_DEVICE(0x480d, 0), /* T480-cr */ 2898 CH_DEVICE(0x480d, 0), /* T480-cr */
2890 CH_DEVICE(0x480e, 0), /* T440-lp-cr */ 2899 CH_DEVICE(0x480e, 0), /* T440-lp-cr */
2900 CH_DEVICE(0x5800, 0), /* T580-dbg */
2901 CH_DEVICE(0x5801, 0), /* T520-cr */
2902 CH_DEVICE(0x5802, 0), /* T522-cr */
2903 CH_DEVICE(0x5803, 0), /* T540-cr */
2904 CH_DEVICE(0x5804, 0), /* T520-bch */
2905 CH_DEVICE(0x5805, 0), /* T540-bch */
2906 CH_DEVICE(0x5806, 0), /* T540-ch */
2907 CH_DEVICE(0x5807, 0), /* T520-so */
2908 CH_DEVICE(0x5808, 0), /* T520-cx */
2909 CH_DEVICE(0x5809, 0), /* T520-bt */
2910 CH_DEVICE(0x580a, 0), /* T504-bt */
2911 CH_DEVICE(0x580b, 0), /* T520-sr */
2912 CH_DEVICE(0x580c, 0), /* T504-bt */
2913 CH_DEVICE(0x580d, 0), /* T580-cr */
2914 CH_DEVICE(0x580e, 0), /* T540-lp-cr */
2915 CH_DEVICE(0x580f, 0), /* Amsterdam */
2916 CH_DEVICE(0x5810, 0), /* T580-lp-cr */
2917 CH_DEVICE(0x5811, 0), /* T520-lp-cr */
2918 CH_DEVICE(0x5812, 0), /* T560-cr */
2919 CH_DEVICE(0x5813, 0), /* T580-cr */
2891 { 0, } 2920 { 0, }
2892}; 2921};
2893 2922
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 9488032d6d2d..61dfb2a47929 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -528,17 +528,21 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
528 */ 528 */
529static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl) 529static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
530{ 530{
531 u32 val;
532
531 /* 533 /*
532 * The SGE keeps track of its Producer and Consumer Indices in terms 534 * The SGE keeps track of its Producer and Consumer Indices in terms
533 * of Egress Queue Units so we can only tell it about integral numbers 535 * of Egress Queue Units so we can only tell it about integral numbers
534 * of multiples of Free List Entries per Egress Queue Units ... 536 * of multiples of Free List Entries per Egress Queue Units ...
535 */ 537 */
536 if (fl->pend_cred >= FL_PER_EQ_UNIT) { 538 if (fl->pend_cred >= FL_PER_EQ_UNIT) {
539 val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT);
540 if (!is_t4(adapter->chip))
541 val |= DBTYPE(1);
537 wmb(); 542 wmb();
538 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL, 543 t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
539 DBPRIO(1) | 544 DBPRIO(1) |
540 QID(fl->cntxt_id) | 545 QID(fl->cntxt_id) | val);
541 PIDX(fl->pend_cred / FL_PER_EQ_UNIT));
542 fl->pend_cred %= FL_PER_EQ_UNIT; 546 fl->pend_cred %= FL_PER_EQ_UNIT;
543 } 547 }
544} 548}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 283f9d0d37fd..53cbfed21d0b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -38,6 +38,25 @@
38 38
39#include "../cxgb4/t4fw_api.h" 39#include "../cxgb4/t4fw_api.h"
40 40
41#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
42#define CHELSIO_CHIP_VERSION(code) ((code) >> 4)
43#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
44
45#define CHELSIO_T4 0x4
46#define CHELSIO_T5 0x5
47
48enum chip_type {
49 T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0),
50 T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
51 T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
52 T4_FIRST_REV = T4_A1,
53 T4_LAST_REV = T4_A3,
54
55 T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
56 T5_FIRST_REV = T5_A1,
57 T5_LAST_REV = T5_A1,
58};
59
41/* 60/*
42 * The "len16" field of a Firmware Command Structure ... 61 * The "len16" field of a Firmware Command Structure ...
43 */ 62 */
@@ -232,6 +251,11 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
232 return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false); 251 return t4vf_wr_mbox_core(adapter, cmd, size, rpl, false);
233} 252}
234 253
254static inline int is_t4(enum chip_type chip)
255{
256 return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV);
257}
258
235int t4vf_wait_dev_ready(struct adapter *); 259int t4vf_wait_dev_ready(struct adapter *);
236int t4vf_port_init(struct adapter *, int); 260int t4vf_port_init(struct adapter *, int);
237 261
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 7127c7b9efde..9f96dc3bb112 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -1027,8 +1027,11 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
1027 unsigned nfilters = 0; 1027 unsigned nfilters = 0;
1028 unsigned int rem = naddr; 1028 unsigned int rem = naddr;
1029 struct fw_vi_mac_cmd cmd, rpl; 1029 struct fw_vi_mac_cmd cmd, rpl;
1030 unsigned int max_naddr = is_t4(adapter->chip) ?
1031 NUM_MPS_CLS_SRAM_L_INSTANCES :
1032 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
1030 1033
1031 if (naddr > FW_CLS_TCAM_NUM_ENTRIES) 1034 if (naddr > max_naddr)
1032 return -EINVAL; 1035 return -EINVAL;
1033 1036
1034 for (offset = 0; offset < naddr; /**/) { 1037 for (offset = 0; offset < naddr; /**/) {
@@ -1069,10 +1072,10 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
1069 1072
1070 if (idx) 1073 if (idx)
1071 idx[offset+i] = 1074 idx[offset+i] =
1072 (index >= FW_CLS_TCAM_NUM_ENTRIES 1075 (index >= max_naddr
1073 ? 0xffff 1076 ? 0xffff
1074 : index); 1077 : index);
1075 if (index < FW_CLS_TCAM_NUM_ENTRIES) 1078 if (index < max_naddr)
1076 nfilters++; 1079 nfilters++;
1077 else if (hash) 1080 else if (hash)
1078 *hash |= (1ULL << hash_mac_addr(addr[offset+i])); 1081 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
@@ -1118,6 +1121,9 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
1118 struct fw_vi_mac_exact *p = &cmd.u.exact[0]; 1121 struct fw_vi_mac_exact *p = &cmd.u.exact[0];
1119 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd, 1122 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1120 u.exact[1]), 16); 1123 u.exact[1]), 16);
1124 unsigned int max_naddr = is_t4(adapter->chip) ?
1125 NUM_MPS_CLS_SRAM_L_INSTANCES :
1126 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
1121 1127
1122 /* 1128 /*
1123 * If this is a new allocation, determine whether it should be 1129 * If this is a new allocation, determine whether it should be
@@ -1140,7 +1146,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
1140 if (ret == 0) { 1146 if (ret == 0) {
1141 p = &rpl.u.exact[0]; 1147 p = &rpl.u.exact[0];
1142 ret = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx)); 1148 ret = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx));
1143 if (ret >= FW_CLS_TCAM_NUM_ENTRIES) 1149 if (ret >= max_naddr)
1144 ret = -ENOMEM; 1150 ret = -ENOMEM;
1145 } 1151 }
1146 return ret; 1152 return ret;
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 138446957786..19f642a45f40 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -101,23 +101,6 @@ static char version[] __initdata =
101 * them to system IRQ numbers. This mapping is card specific and is set to 101 * them to system IRQ numbers. This mapping is card specific and is set to
102 * the configuration of the Cirrus Eval board for this chip. 102 * the configuration of the Cirrus Eval board for this chip.
103 */ 103 */
104#if defined(CONFIG_MACH_IXDP2351)
105#define CS89x0_NONISA_IRQ
106static unsigned int netcard_portlist[] __used __initdata = {
107 IXDP2351_VIRT_CS8900_BASE, 0
108};
109static unsigned int cs8900_irq_map[] = {
110 IRQ_IXDP2351_CS8900, 0, 0, 0
111};
112#elif defined(CONFIG_ARCH_IXDP2X01)
113#define CS89x0_NONISA_IRQ
114static unsigned int netcard_portlist[] __used __initdata = {
115 IXDP2X01_CS8900_VIRT_BASE, 0
116};
117static unsigned int cs8900_irq_map[] = {
118 IRQ_IXDP2X01_CS8900, 0, 0, 0
119};
120#else
121#ifndef CONFIG_CS89x0_PLATFORM 104#ifndef CONFIG_CS89x0_PLATFORM
122static unsigned int netcard_portlist[] __used __initdata = { 105static unsigned int netcard_portlist[] __used __initdata = {
123 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 106 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240,
@@ -127,7 +110,6 @@ static unsigned int cs8900_irq_map[] = {
127 10, 11, 12, 5 110 10, 11, 12, 5
128}; 111};
129#endif 112#endif
130#endif
131 113
132#if DEBUGGING 114#if DEBUGGING
133static unsigned int net_debug = DEBUGGING; 115static unsigned int net_debug = DEBUGGING;
@@ -210,32 +192,6 @@ static int __init media_fn(char *str)
210__setup("cs89x0_media=", media_fn); 192__setup("cs89x0_media=", media_fn);
211#endif 193#endif
212 194
213#if defined(CONFIG_MACH_IXDP2351)
214static u16
215readword(unsigned long base_addr, int portno)
216{
217 return __raw_readw(base_addr + (portno << 1));
218}
219
220static void
221writeword(unsigned long base_addr, int portno, u16 value)
222{
223 __raw_writew(value, base_addr + (portno << 1));
224}
225#elif defined(CONFIG_ARCH_IXDP2X01)
226static u16
227readword(unsigned long base_addr, int portno)
228{
229 return __raw_readl(base_addr + (portno << 1));
230}
231
232static void
233writeword(unsigned long base_addr, int portno, u16 value)
234{
235 __raw_writel(value, base_addr + (portno << 1));
236}
237#endif
238
239static void readwords(struct net_local *lp, int portno, void *buf, int length) 195static void readwords(struct net_local *lp, int portno, void *buf, int length)
240{ 196{
241 u8 *buf8 = (u8 *)buf; 197 u8 *buf8 = (u8 *)buf;
@@ -478,9 +434,6 @@ dma_rx(struct net_device *dev)
478 /* Malloc up new buffer. */ 434 /* Malloc up new buffer. */
479 skb = netdev_alloc_skb(dev, length + 2); 435 skb = netdev_alloc_skb(dev, length + 2);
480 if (skb == NULL) { 436 if (skb == NULL) {
481 /* I don't think we want to do this to a stressed system */
482 cs89_dbg(0, err, "%s: Memory squeeze, dropping packet\n",
483 dev->name);
484 dev->stats.rx_dropped++; 437 dev->stats.rx_dropped++;
485 438
486 /* AKPM: advance bp to the next frame */ 439 /* AKPM: advance bp to the next frame */
@@ -731,9 +684,6 @@ net_rx(struct net_device *dev)
731 /* Malloc up new buffer. */ 684 /* Malloc up new buffer. */
732 skb = netdev_alloc_skb(dev, length + 2); 685 skb = netdev_alloc_skb(dev, length + 2);
733 if (skb == NULL) { 686 if (skb == NULL) {
734#if 0 /* Again, this seems a cruel thing to do */
735 pr_warn("%s: Memory squeeze, dropping packet\n", dev->name);
736#endif
737 dev->stats.rx_dropped++; 687 dev->stats.rx_dropped++;
738 return; 688 return;
739 } 689 }
@@ -908,7 +858,7 @@ net_open(struct net_device *dev)
908 goto bad_out; 858 goto bad_out;
909 } 859 }
910 } else { 860 } else {
911#if !defined(CS89x0_NONISA_IRQ) && !defined(CONFIG_CS89x0_PLATFORM) 861#if !defined(CONFIG_CS89x0_PLATFORM)
912 if (((1 << dev->irq) & lp->irq_map) == 0) { 862 if (((1 << dev->irq) & lp->irq_map) == 0) {
913 pr_err("%s: IRQ %d is not in our map of allowable IRQs, which is %x\n", 863 pr_err("%s: IRQ %d is not in our map of allowable IRQs, which is %x\n",
914 dev->name, dev->irq, lp->irq_map); 864 dev->name, dev->irq, lp->irq_map);
@@ -1321,9 +1271,7 @@ static const struct net_device_ops net_ops = {
1321static void __init reset_chip(struct net_device *dev) 1271static void __init reset_chip(struct net_device *dev)
1322{ 1272{
1323#if !defined(CONFIG_MACH_MX31ADS) 1273#if !defined(CONFIG_MACH_MX31ADS)
1324#if !defined(CS89x0_NONISA_IRQ)
1325 struct net_local *lp = netdev_priv(dev); 1274 struct net_local *lp = netdev_priv(dev);
1326#endif /* CS89x0_NONISA_IRQ */
1327 int reset_start_time; 1275 int reset_start_time;
1328 1276
1329 writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET); 1277 writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
@@ -1331,7 +1279,6 @@ static void __init reset_chip(struct net_device *dev)
1331 /* wait 30 ms */ 1279 /* wait 30 ms */
1332 msleep(30); 1280 msleep(30);
1333 1281
1334#if !defined(CS89x0_NONISA_IRQ)
1335 if (lp->chip_type != CS8900) { 1282 if (lp->chip_type != CS8900) {
1336 /* Hardware problem requires PNP registers to be reconfigured after a reset */ 1283 /* Hardware problem requires PNP registers to be reconfigured after a reset */
1337 iowrite16(PP_CS8920_ISAINT, lp->virt_addr + ADD_PORT); 1284 iowrite16(PP_CS8920_ISAINT, lp->virt_addr + ADD_PORT);
@@ -1344,7 +1291,6 @@ static void __init reset_chip(struct net_device *dev)
1344 iowrite8((dev->mem_start >> 8) & 0xff, 1291 iowrite8((dev->mem_start >> 8) & 0xff,
1345 lp->virt_addr + DATA_PORT + 1); 1292 lp->virt_addr + DATA_PORT + 1);
1346 } 1293 }
1347#endif /* CS89x0_NONISA_IRQ */
1348 1294
1349 /* Wait until the chip is reset */ 1295 /* Wait until the chip is reset */
1350 reset_start_time = jiffies; 1296 reset_start_time = jiffies;
@@ -1579,9 +1525,6 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
1579 i = lp->isa_config & INT_NO_MASK; 1525 i = lp->isa_config & INT_NO_MASK;
1580#ifndef CONFIG_CS89x0_PLATFORM 1526#ifndef CONFIG_CS89x0_PLATFORM
1581 if (lp->chip_type == CS8900) { 1527 if (lp->chip_type == CS8900) {
1582#ifdef CS89x0_NONISA_IRQ
1583 i = cs8900_irq_map[0];
1584#else
1585 /* Translate the IRQ using the IRQ mapping table. */ 1528 /* Translate the IRQ using the IRQ mapping table. */
1586 if (i >= ARRAY_SIZE(cs8900_irq_map)) 1529 if (i >= ARRAY_SIZE(cs8900_irq_map))
1587 pr_err("invalid ISA interrupt number %d\n", i); 1530 pr_err("invalid ISA interrupt number %d\n", i);
@@ -1599,7 +1542,6 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
1599 lp->irq_map = ((irq_map_buff[0] >> 8) | 1542 lp->irq_map = ((irq_map_buff[0] >> 8) |
1600 (irq_map_buff[1] << 8)); 1543 (irq_map_buff[1] << 8));
1601 } 1544 }
1602#endif
1603 } 1545 }
1604#endif 1546#endif
1605 if (!dev->irq) 1547 if (!dev->irq)
@@ -1978,18 +1920,6 @@ static struct platform_driver cs89x0_driver = {
1978 .remove = cs89x0_platform_remove, 1920 .remove = cs89x0_platform_remove,
1979}; 1921};
1980 1922
1981static int __init cs89x0_init(void) 1923module_platform_driver_probe(cs89x0_driver, cs89x0_platform_probe);
1982{
1983 return platform_driver_probe(&cs89x0_driver, cs89x0_platform_probe);
1984}
1985
1986module_init(cs89x0_init);
1987
1988static void __exit cs89x0_cleanup(void)
1989{
1990 platform_driver_unregister(&cs89x0_driver);
1991}
1992
1993module_exit(cs89x0_cleanup);
1994 1924
1995#endif /* CONFIG_CS89x0_PLATFORM */ 1925#endif /* CONFIG_CS89x0_PLATFORM */
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 354cbb78ed50..67b0388b6e68 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -887,18 +887,7 @@ static struct platform_driver ep93xx_eth_driver = {
887 }, 887 },
888}; 888};
889 889
890static int __init ep93xx_eth_init_module(void) 890module_platform_driver(ep93xx_eth_driver);
891{
892 printk(KERN_INFO DRV_MODULE_NAME " version " DRV_MODULE_VERSION " loading\n");
893 return platform_driver_register(&ep93xx_eth_driver);
894}
895
896static void __exit ep93xx_eth_cleanup_module(void)
897{
898 platform_driver_unregister(&ep93xx_eth_driver);
899}
900 891
901module_init(ep93xx_eth_init_module);
902module_exit(ep93xx_eth_cleanup_module);
903MODULE_LICENSE("GPL"); 892MODULE_LICENSE("GPL");
904MODULE_ALIAS("platform:ep93xx-eth"); 893MODULE_ALIAS("platform:ep93xx-eth");
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 9eada8e86078..9105465b2a1a 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1693,22 +1693,7 @@ static struct platform_driver dm9000_driver = {
1693 .remove = dm9000_drv_remove, 1693 .remove = dm9000_drv_remove,
1694}; 1694};
1695 1695
1696static int __init 1696module_platform_driver(dm9000_driver);
1697dm9000_init(void)
1698{
1699 printk(KERN_INFO "%s Ethernet Driver, V%s\n", CARDNAME, DRV_VERSION);
1700
1701 return platform_driver_register(&dm9000_driver);
1702}
1703
1704static void __exit
1705dm9000_cleanup(void)
1706{
1707 platform_driver_unregister(&dm9000_driver);
1708}
1709
1710module_init(dm9000_init);
1711module_exit(dm9000_cleanup);
1712 1697
1713MODULE_AUTHOR("Sascha Hauer, Ben Dooks"); 1698MODULE_AUTHOR("Sascha Hauer, Ben Dooks");
1714MODULE_DESCRIPTION("Davicom DM9000 network driver"); 1699MODULE_DESCRIPTION("Davicom DM9000 network driver");
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index 88feced9a629..cdbcd1643141 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -236,17 +236,14 @@ static int xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
236 private->rx_buffer = dma_alloc_coherent(d, 8192, 236 private->rx_buffer = dma_alloc_coherent(d, 8192,
237 &private->rx_dma_handle, 237 &private->rx_dma_handle,
238 GFP_KERNEL); 238 GFP_KERNEL);
239 if (private->rx_buffer == NULL) { 239 if (private->rx_buffer == NULL)
240 pr_err("%s: no memory for rx buffer\n", __func__);
241 goto rx_buf_fail; 240 goto rx_buf_fail;
242 } 241
243 private->tx_buffer = dma_alloc_coherent(d, 8192, 242 private->tx_buffer = dma_alloc_coherent(d, 8192,
244 &private->tx_dma_handle, 243 &private->tx_dma_handle,
245 GFP_KERNEL); 244 GFP_KERNEL);
246 if (private->tx_buffer == NULL) { 245 if (private->tx_buffer == NULL)
247 pr_err("%s: no memory for tx buffer\n", __func__);
248 goto tx_buf_fail; 246 goto tx_buf_fail;
249 }
250 247
251 SET_NETDEV_DEV(dev, &pdev->dev); 248 SET_NETDEV_DEV(dev, &pdev->dev);
252 249
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 110d26f4c602..afa8e3af2c4d 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -580,12 +580,9 @@ alloc_list (struct net_device *dev)
580 580
581 skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz); 581 skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
582 np->rx_skbuff[i] = skb; 582 np->rx_skbuff[i] = skb;
583 if (skb == NULL) { 583 if (skb == NULL)
584 printk (KERN_ERR
585 "%s: alloc_list: allocate Rx buffer error! ",
586 dev->name);
587 break; 584 break;
588 } 585
589 /* Rubicon now supports 40 bits of addressing space. */ 586 /* Rubicon now supports 40 bits of addressing space. */
590 np->rx_ring[i].fraginfo = 587 np->rx_ring[i].fraginfo =
591 cpu_to_le64 ( pci_map_single ( 588 cpu_to_le64 ( pci_map_single (
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 29aff55f2eea..2e2700e3a5ab 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2011 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 3c9b4f12e3e5..f286ad2da1ff 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2011 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -2667,10 +2667,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2667 cmd.size = sizeof(struct be_cmd_req_set_mac_list); 2667 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2668 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, 2668 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2669 &cmd.dma, GFP_KERNEL); 2669 &cmd.dma, GFP_KERNEL);
2670 if (!cmd.va) { 2670 if (!cmd.va)
2671 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2672 return -ENOMEM; 2671 return -ENOMEM;
2673 }
2674 2672
2675 spin_lock_bh(&adapter->mcc_lock); 2673 spin_lock_bh(&adapter->mcc_lock);
2676 2674
@@ -3202,6 +3200,31 @@ err:
3202 return status; 3200 return status;
3203} 3201}
3204 3202
3203int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
3204{
3205 struct be_mcc_wrb *wrb;
3206 struct be_cmd_req_intr_set *req;
3207 int status;
3208
3209 if (mutex_lock_interruptible(&adapter->mbox_lock))
3210 return -1;
3211
3212 wrb = wrb_from_mbox(adapter);
3213
3214 req = embedded_payload(wrb);
3215
3216 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3217 OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
3218 wrb, NULL);
3219
3220 req->intr_enabled = intr_enable;
3221
3222 status = be_mbox_notify_wait(adapter);
3223
3224 mutex_unlock(&adapter->mbox_lock);
3225 return status;
3226}
3227
3205int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, 3228int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
3206 int wrb_payload_size, u16 *cmd_status, u16 *ext_status) 3229 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
3207{ 3230{
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 96970860c915..f2af85517218 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2011 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -188,6 +188,7 @@ struct be_mcc_mailbox {
188#define OPCODE_COMMON_GET_BEACON_STATE 70 188#define OPCODE_COMMON_GET_BEACON_STATE 70
189#define OPCODE_COMMON_READ_TRANSRECV_DATA 73 189#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
190#define OPCODE_COMMON_GET_PORT_NAME 77 190#define OPCODE_COMMON_GET_PORT_NAME 77
191#define OPCODE_COMMON_SET_INTERRUPT_ENABLE 89
191#define OPCODE_COMMON_GET_PHY_DETAILS 102 192#define OPCODE_COMMON_GET_PHY_DETAILS 102
192#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103 193#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
193#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121 194#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
@@ -1791,6 +1792,12 @@ struct be_cmd_enable_disable_vf {
1791 u8 rsvd[3]; 1792 u8 rsvd[3];
1792}; 1793};
1793 1794
1795struct be_cmd_req_intr_set {
1796 struct be_cmd_req_hdr hdr;
1797 u8 intr_enabled;
1798 u8 rsvd[3];
1799};
1800
1794static inline bool check_privilege(struct be_adapter *adapter, u32 flags) 1801static inline bool check_privilege(struct be_adapter *adapter, u32 flags)
1795{ 1802{
1796 return flags & adapter->cmd_privileges ? true : false; 1803 return flags & adapter->cmd_privileges ? true : false;
@@ -1938,3 +1945,4 @@ extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
1938extern int be_cmd_get_if_id(struct be_adapter *adapter, 1945extern int be_cmd_get_if_id(struct be_adapter *adapter,
1939 struct be_vf_cfg *vf_cfg, int vf_num); 1946 struct be_vf_cfg *vf_cfg, int vf_num);
1940extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain); 1947extern int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain);
1948extern int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 76b302f30c87..07b7f27cb0b9 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2011 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -719,10 +719,8 @@ be_test_ddr_dma(struct be_adapter *adapter)
719 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); 719 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
720 ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size, 720 ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
721 &ddrdma_cmd.dma, GFP_KERNEL); 721 &ddrdma_cmd.dma, GFP_KERNEL);
722 if (!ddrdma_cmd.va) { 722 if (!ddrdma_cmd.va)
723 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
724 return -ENOMEM; 723 return -ENOMEM;
725 }
726 724
727 for (i = 0; i < 2; i++) { 725 for (i = 0; i < 2; i++) {
728 ret = be_cmd_ddr_dma_test(adapter, pattern[i], 726 ret = be_cmd_ddr_dma_test(adapter, pattern[i],
@@ -845,11 +843,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
845 eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size, 843 eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
846 &eeprom_cmd.dma, GFP_KERNEL); 844 &eeprom_cmd.dma, GFP_KERNEL);
847 845
848 if (!eeprom_cmd.va) { 846 if (!eeprom_cmd.va)
849 dev_err(&adapter->pdev->dev,
850 "Memory allocation failure. Could not read eeprom\n");
851 return -ENOMEM; 847 return -ENOMEM;
852 }
853 848
854 status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd); 849 status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
855 850
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 62dc220695f7..89e6d8cfaf0d 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2011 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 08e54f3d288b..536afa2fb94c 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2011 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
@@ -146,20 +146,16 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
146 q->entry_size = entry_size; 146 q->entry_size = entry_size;
147 mem->size = len * entry_size; 147 mem->size = len * entry_size;
148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma, 148 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149 GFP_KERNEL); 149 GFP_KERNEL | __GFP_ZERO);
150 if (!mem->va) 150 if (!mem->va)
151 return -ENOMEM; 151 return -ENOMEM;
152 memset(mem->va, 0, mem->size);
153 return 0; 152 return 0;
154} 153}
155 154
156static void be_intr_set(struct be_adapter *adapter, bool enable) 155static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
157{ 156{
158 u32 reg, enabled; 157 u32 reg, enabled;
159 158
160 if (adapter->eeh_error)
161 return;
162
163 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, 159 pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
164 &reg); 160 &reg);
165 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK; 161 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
@@ -175,6 +171,22 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
175 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg); 171 PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
176} 172}
177 173
174static void be_intr_set(struct be_adapter *adapter, bool enable)
175{
176 int status = 0;
177
178 /* On lancer interrupts can't be controlled via this register */
179 if (lancer_chip(adapter))
180 return;
181
182 if (adapter->eeh_error)
183 return;
184
185 status = be_cmd_intr_set(adapter, enable);
186 if (status)
187 be_reg_intr_set(adapter, enable);
188}
189
178static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted) 190static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
179{ 191{
180 u32 val = 0; 192 u32 val = 0;
@@ -2435,9 +2447,6 @@ static int be_close(struct net_device *netdev)
2435 2447
2436 be_roce_dev_close(adapter); 2448 be_roce_dev_close(adapter);
2437 2449
2438 if (!lancer_chip(adapter))
2439 be_intr_set(adapter, false);
2440
2441 for_all_evt_queues(adapter, eqo, i) 2450 for_all_evt_queues(adapter, eqo, i)
2442 napi_disable(&eqo->napi); 2451 napi_disable(&eqo->napi);
2443 2452
@@ -2525,9 +2534,6 @@ static int be_open(struct net_device *netdev)
2525 2534
2526 be_irq_register(adapter); 2535 be_irq_register(adapter);
2527 2536
2528 if (!lancer_chip(adapter))
2529 be_intr_set(adapter, true);
2530
2531 for_all_rx_queues(adapter, rxo, i) 2537 for_all_rx_queues(adapter, rxo, i)
2532 be_cq_notify(adapter, rxo->cq.id, true, 0); 2538 be_cq_notify(adapter, rxo->cq.id, true, 0);
2533 2539
@@ -2562,10 +2568,9 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
2562 2568
2563 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config); 2569 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2564 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 2570 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2565 GFP_KERNEL); 2571 GFP_KERNEL | __GFP_ZERO);
2566 if (cmd.va == NULL) 2572 if (cmd.va == NULL)
2567 return -1; 2573 return -1;
2568 memset(cmd.va, 0, cmd.size);
2569 2574
2570 if (enable) { 2575 if (enable) {
2571 status = pci_write_config_dword(adapter->pdev, 2576 status = pci_write_config_dword(adapter->pdev,
@@ -3457,11 +3462,9 @@ static int lancer_fw_download(struct be_adapter *adapter,
3457 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) 3462 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3458 + LANCER_FW_DOWNLOAD_CHUNK; 3463 + LANCER_FW_DOWNLOAD_CHUNK;
3459 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size, 3464 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3460 &flash_cmd.dma, GFP_KERNEL); 3465 &flash_cmd.dma, GFP_KERNEL);
3461 if (!flash_cmd.va) { 3466 if (!flash_cmd.va) {
3462 status = -ENOMEM; 3467 status = -ENOMEM;
3463 dev_err(&adapter->pdev->dev,
3464 "Memory allocation failure while flashing\n");
3465 goto lancer_fw_exit; 3468 goto lancer_fw_exit;
3466 } 3469 }
3467 3470
@@ -3563,8 +3566,6 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3563 &flash_cmd.dma, GFP_KERNEL); 3566 &flash_cmd.dma, GFP_KERNEL);
3564 if (!flash_cmd.va) { 3567 if (!flash_cmd.va) {
3565 status = -ENOMEM; 3568 status = -ENOMEM;
3566 dev_err(&adapter->pdev->dev,
3567 "Memory allocation failure while flashing\n");
3568 goto be_fw_exit; 3569 goto be_fw_exit;
3569 } 3570 }
3570 3571
@@ -3791,12 +3792,13 @@ static int be_ctrl_init(struct be_adapter *adapter)
3791 3792
3792 rx_filter->size = sizeof(struct be_cmd_req_rx_filter); 3793 rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3793 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size, 3794 rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3794 &rx_filter->dma, GFP_KERNEL); 3795 &rx_filter->dma,
3796 GFP_KERNEL | __GFP_ZERO);
3795 if (rx_filter->va == NULL) { 3797 if (rx_filter->va == NULL) {
3796 status = -ENOMEM; 3798 status = -ENOMEM;
3797 goto free_mbox; 3799 goto free_mbox;
3798 } 3800 }
3799 memset(rx_filter->va, 0, rx_filter->size); 3801
3800 mutex_init(&adapter->mbox_lock); 3802 mutex_init(&adapter->mbox_lock);
3801 spin_lock_init(&adapter->mcc_lock); 3803 spin_lock_init(&adapter->mcc_lock);
3802 spin_lock_init(&adapter->mcc_cq_lock); 3804 spin_lock_init(&adapter->mcc_cq_lock);
@@ -3838,10 +3840,9 @@ static int be_stats_init(struct be_adapter *adapter)
3838 cmd->size = sizeof(struct be_cmd_req_get_stats_v1); 3840 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3839 3841
3840 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma, 3842 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3841 GFP_KERNEL); 3843 GFP_KERNEL | __GFP_ZERO);
3842 if (cmd->va == NULL) 3844 if (cmd->va == NULL)
3843 return -1; 3845 return -1;
3844 memset(cmd->va, 0, cmd->size);
3845 return 0; 3846 return 0;
3846} 3847}
3847 3848
@@ -3853,6 +3854,7 @@ static void be_remove(struct pci_dev *pdev)
3853 return; 3854 return;
3854 3855
3855 be_roce_dev_remove(adapter); 3856 be_roce_dev_remove(adapter);
3857 be_intr_set(adapter, false);
3856 3858
3857 cancel_delayed_work_sync(&adapter->func_recovery_work); 3859 cancel_delayed_work_sync(&adapter->func_recovery_work);
3858 3860
@@ -4142,11 +4144,11 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4142 goto ctrl_clean; 4144 goto ctrl_clean;
4143 } 4145 }
4144 4146
4145 /* The INTR bit may be set in the card when probed by a kdump kernel 4147 /* Wait for interrupts to quiesce after an FLR */
4146 * after a crash. 4148 msleep(100);
4147 */ 4149
4148 if (!lancer_chip(adapter)) 4150 /* Allow interrupts for other ULPs running on NIC function */
4149 be_intr_set(adapter, false); 4151 be_intr_set(adapter, true);
4150 4152
4151 status = be_stats_init(adapter); 4153 status = be_stats_init(adapter);
4152 if (status) 4154 if (status)
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index 55d32aa0a093..f3d126dcc104 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2011 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index db4ea8081c07..276572998463 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2005 - 2011 Emulex 2 * Copyright (C) 2005 - 2013 Emulex
3 * All rights reserved. 3 * All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 7c361d1db94c..21b85fb7d05f 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -780,12 +780,11 @@ static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
780 780
781 priv->descs = dma_alloc_coherent(priv->dev, 781 priv->descs = dma_alloc_coherent(priv->dev,
782 sizeof(struct ftgmac100_descs), 782 sizeof(struct ftgmac100_descs),
783 &priv->descs_dma_addr, GFP_KERNEL); 783 &priv->descs_dma_addr,
784 GFP_KERNEL | __GFP_ZERO);
784 if (!priv->descs) 785 if (!priv->descs)
785 return -ENOMEM; 786 return -ENOMEM;
786 787
787 memset(priv->descs, 0, sizeof(struct ftgmac100_descs));
788
789 /* initialize RX ring */ 788 /* initialize RX ring */
790 ftgmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]); 789 ftgmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
791 790
@@ -1350,22 +1349,7 @@ static struct platform_driver ftgmac100_driver = {
1350 }, 1349 },
1351}; 1350};
1352 1351
1353/****************************************************************************** 1352module_platform_driver(ftgmac100_driver);
1354 * initialization / finalization
1355 *****************************************************************************/
1356static int __init ftgmac100_init(void)
1357{
1358 pr_info("Loading version " DRV_VERSION " ...\n");
1359 return platform_driver_register(&ftgmac100_driver);
1360}
1361
1362static void __exit ftgmac100_exit(void)
1363{
1364 platform_driver_unregister(&ftgmac100_driver);
1365}
1366
1367module_init(ftgmac100_init);
1368module_exit(ftgmac100_exit);
1369 1353
1370MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>"); 1354MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
1371MODULE_DESCRIPTION("FTGMAC100 driver"); 1355MODULE_DESCRIPTION("FTGMAC100 driver");
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index b5ea8fbd8a76..a6eda8d83138 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -732,13 +732,13 @@ static int ftmac100_alloc_buffers(struct ftmac100 *priv)
732{ 732{
733 int i; 733 int i;
734 734
735 priv->descs = dma_alloc_coherent(priv->dev, sizeof(struct ftmac100_descs), 735 priv->descs = dma_alloc_coherent(priv->dev,
736 &priv->descs_dma_addr, GFP_KERNEL); 736 sizeof(struct ftmac100_descs),
737 &priv->descs_dma_addr,
738 GFP_KERNEL | __GFP_ZERO);
737 if (!priv->descs) 739 if (!priv->descs)
738 return -ENOMEM; 740 return -ENOMEM;
739 741
740 memset(priv->descs, 0, sizeof(struct ftmac100_descs));
741
742 /* initialize RX ring */ 742 /* initialize RX ring */
743 ftmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]); 743 ftmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]);
744 744
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index b7d58fe6f531..549ce13b92ac 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -2,7 +2,8 @@
2# Makefile for the Freescale network device drivers. 2# Makefile for the Freescale network device drivers.
3# 3#
4 4
5obj-$(CONFIG_FEC) += fec.o fec_ptp.o 5obj-$(CONFIG_FEC) += fec.o
6fec-objs :=fec_main.o fec_ptp.o
6obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o 7obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx.o
7ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y) 8ifeq ($(CONFIG_FEC_MPC52xx_MDIO),y)
8 obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o 9 obj-$(CONFIG_FEC_MPC52xx) += fec_mpc52xx_phy.o
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec_main.c
index f292c3aa423f..621d07565dce 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -29,7 +29,6 @@
29#include <linux/ioport.h> 29#include <linux/ioport.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/interrupt.h> 31#include <linux/interrupt.h>
32#include <linux/pci.h>
33#include <linux/init.h> 32#include <linux/init.h>
34#include <linux/delay.h> 33#include <linux/delay.h>
35#include <linux/netdevice.h> 34#include <linux/netdevice.h>
@@ -791,8 +790,6 @@ fec_enet_rx(struct net_device *ndev, int budget)
791 skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN); 790 skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN);
792 791
793 if (unlikely(!skb)) { 792 if (unlikely(!skb)) {
794 printk("%s: Memory squeeze, dropping packet.\n",
795 ndev->name);
796 ndev->stats.rx_dropped++; 793 ndev->stats.rx_dropped++;
797 } else { 794 } else {
798 skb_reserve(skb, NET_IP_ALIGN); 795 skb_reserve(skb, NET_IP_ALIGN);
@@ -916,7 +913,6 @@ static void fec_get_mac(struct net_device *ndev)
916 */ 913 */
917 iap = macaddr; 914 iap = macaddr;
918 915
919#ifdef CONFIG_OF
920 /* 916 /*
921 * 2) from device tree data 917 * 2) from device tree data
922 */ 918 */
@@ -928,7 +924,6 @@ static void fec_get_mac(struct net_device *ndev)
928 iap = (unsigned char *) mac; 924 iap = (unsigned char *) mac;
929 } 925 }
930 } 926 }
931#endif
932 927
933 /* 928 /*
934 * 3) from flash or fuse (via platform data) 929 * 3) from flash or fuse (via platform data)
@@ -1442,7 +1437,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
1442 1437
1443 if (fep->bufdesc_ex) { 1438 if (fep->bufdesc_ex) {
1444 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1439 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1445 ebdp->cbd_esc = BD_ENET_RX_INT; 1440 ebdp->cbd_esc = BD_ENET_TX_INT;
1446 } 1441 }
1447 1442
1448 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); 1443 bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
@@ -1607,7 +1602,7 @@ fec_set_mac_address(struct net_device *ndev, void *p)
1607 * Polled functionality used by netconsole and others in non interrupt mode 1602 * Polled functionality used by netconsole and others in non interrupt mode
1608 * 1603 *
1609 */ 1604 */
1610void fec_poll_controller(struct net_device *dev) 1605static void fec_poll_controller(struct net_device *dev)
1611{ 1606{
1612 int i; 1607 int i;
1613 struct fec_enet_private *fep = netdev_priv(dev); 1608 struct fec_enet_private *fep = netdev_priv(dev);
@@ -1648,11 +1643,9 @@ static int fec_enet_init(struct net_device *ndev)
1648 1643
1649 /* Allocate memory for buffer descriptors. */ 1644 /* Allocate memory for buffer descriptors. */
1650 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, 1645 cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
1651 GFP_KERNEL); 1646 GFP_KERNEL);
1652 if (!cbd_base) { 1647 if (!cbd_base)
1653 printk("FEC: allocate descriptor memory failed?\n");
1654 return -ENOMEM; 1648 return -ENOMEM;
1655 }
1656 1649
1657 memset(cbd_base, 0, PAGE_SIZE); 1650 memset(cbd_base, 0, PAGE_SIZE);
1658 spin_lock_init(&fep->hw_lock); 1651 spin_lock_init(&fep->hw_lock);
@@ -1684,16 +1677,6 @@ static int fec_enet_init(struct net_device *ndev)
1684} 1677}
1685 1678
1686#ifdef CONFIG_OF 1679#ifdef CONFIG_OF
1687static int fec_get_phy_mode_dt(struct platform_device *pdev)
1688{
1689 struct device_node *np = pdev->dev.of_node;
1690
1691 if (np)
1692 return of_get_phy_mode(np);
1693
1694 return -ENODEV;
1695}
1696
1697static void fec_reset_phy(struct platform_device *pdev) 1680static void fec_reset_phy(struct platform_device *pdev)
1698{ 1681{
1699 int err, phy_reset; 1682 int err, phy_reset;
@@ -1722,11 +1705,6 @@ static void fec_reset_phy(struct platform_device *pdev)
1722 gpio_set_value(phy_reset, 1); 1705 gpio_set_value(phy_reset, 1);
1723} 1706}
1724#else /* CONFIG_OF */ 1707#else /* CONFIG_OF */
1725static int fec_get_phy_mode_dt(struct platform_device *pdev)
1726{
1727 return -ENODEV;
1728}
1729
1730static void fec_reset_phy(struct platform_device *pdev) 1708static void fec_reset_phy(struct platform_device *pdev)
1731{ 1709{
1732 /* 1710 /*
@@ -1757,16 +1735,10 @@ fec_probe(struct platform_device *pdev)
1757 if (!r) 1735 if (!r)
1758 return -ENXIO; 1736 return -ENXIO;
1759 1737
1760 r = request_mem_region(r->start, resource_size(r), pdev->name);
1761 if (!r)
1762 return -EBUSY;
1763
1764 /* Init network device */ 1738 /* Init network device */
1765 ndev = alloc_etherdev(sizeof(struct fec_enet_private)); 1739 ndev = alloc_etherdev(sizeof(struct fec_enet_private));
1766 if (!ndev) { 1740 if (!ndev)
1767 ret = -ENOMEM; 1741 return -ENOMEM;
1768 goto failed_alloc_etherdev;
1769 }
1770 1742
1771 SET_NETDEV_DEV(ndev, &pdev->dev); 1743 SET_NETDEV_DEV(ndev, &pdev->dev);
1772 1744
@@ -1778,7 +1750,7 @@ fec_probe(struct platform_device *pdev)
1778 (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT)) 1750 (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
1779 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; 1751 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
1780 1752
1781 fep->hwp = ioremap(r->start, resource_size(r)); 1753 fep->hwp = devm_request_and_ioremap(&pdev->dev, r);
1782 fep->pdev = pdev; 1754 fep->pdev = pdev;
1783 fep->dev_id = dev_id++; 1755 fep->dev_id = dev_id++;
1784 1756
@@ -1791,7 +1763,7 @@ fec_probe(struct platform_device *pdev)
1791 1763
1792 platform_set_drvdata(pdev, ndev); 1764 platform_set_drvdata(pdev, ndev);
1793 1765
1794 ret = fec_get_phy_mode_dt(pdev); 1766 ret = of_get_phy_mode(pdev->dev.of_node);
1795 if (ret < 0) { 1767 if (ret < 0) {
1796 pdata = pdev->dev.platform_data; 1768 pdata = pdev->dev.platform_data;
1797 if (pdata) 1769 if (pdata)
@@ -1900,11 +1872,8 @@ failed_regulator:
1900 clk_disable_unprepare(fep->clk_ptp); 1872 clk_disable_unprepare(fep->clk_ptp);
1901failed_pin: 1873failed_pin:
1902failed_clk: 1874failed_clk:
1903 iounmap(fep->hwp);
1904failed_ioremap: 1875failed_ioremap:
1905 free_netdev(ndev); 1876 free_netdev(ndev);
1906failed_alloc_etherdev:
1907 release_mem_region(r->start, resource_size(r));
1908 1877
1909 return ret; 1878 return ret;
1910} 1879}
@@ -1914,7 +1883,6 @@ fec_drv_remove(struct platform_device *pdev)
1914{ 1883{
1915 struct net_device *ndev = platform_get_drvdata(pdev); 1884 struct net_device *ndev = platform_get_drvdata(pdev);
1916 struct fec_enet_private *fep = netdev_priv(ndev); 1885 struct fec_enet_private *fep = netdev_priv(ndev);
1917 struct resource *r;
1918 int i; 1886 int i;
1919 1887
1920 unregister_netdev(ndev); 1888 unregister_netdev(ndev);
@@ -1930,13 +1898,8 @@ fec_drv_remove(struct platform_device *pdev)
1930 if (irq > 0) 1898 if (irq > 0)
1931 free_irq(irq, ndev); 1899 free_irq(irq, ndev);
1932 } 1900 }
1933 iounmap(fep->hwp);
1934 free_netdev(ndev); 1901 free_netdev(ndev);
1935 1902
1936 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1937 BUG_ON(!r);
1938 release_mem_region(r->start, resource_size(r));
1939
1940 platform_set_drvdata(pdev, NULL); 1903 platform_set_drvdata(pdev, NULL);
1941 1904
1942 return 0; 1905 return 0;
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 0d8df400a479..1f17ca0f2201 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -128,7 +128,6 @@ void fec_ptp_start_cyclecounter(struct net_device *ndev)
128 128
129 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 129 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
130} 130}
131EXPORT_SYMBOL(fec_ptp_start_cyclecounter);
132 131
133/** 132/**
134 * fec_ptp_adjfreq - adjust ptp cycle frequency 133 * fec_ptp_adjfreq - adjust ptp cycle frequency
@@ -319,7 +318,6 @@ int fec_ptp_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
319 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 318 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
320 -EFAULT : 0; 319 -EFAULT : 0;
321} 320}
322EXPORT_SYMBOL(fec_ptp_ioctl);
323 321
324/** 322/**
325 * fec_time_keep - call timecounter_read every second to avoid timer overrun 323 * fec_time_keep - call timecounter_read every second to avoid timer overrun
@@ -385,4 +383,3 @@ void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev)
385 pr_info("registered PHC device on %s\n", ndev->name); 383 pr_info("registered PHC device on %s\n", ndev->name);
386 } 384 }
387} 385}
388EXPORT_SYMBOL(fec_ptp_init);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 46df28893c10..edc120094c34 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -177,8 +177,6 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
177 received++; 177 received++;
178 netif_receive_skb(skb); 178 netif_receive_skb(skb);
179 } else { 179 } else {
180 dev_warn(fep->dev,
181 "Memory squeeze, dropping packet.\n");
182 fep->stats.rx_dropped++; 180 fep->stats.rx_dropped++;
183 skbn = skb; 181 skbn = skb;
184 } 182 }
@@ -309,8 +307,6 @@ static int fs_enet_rx_non_napi(struct net_device *dev)
309 received++; 307 received++;
310 netif_rx(skb); 308 netif_rx(skb);
311 } else { 309 } else {
312 dev_warn(fep->dev,
313 "Memory squeeze, dropping packet.\n");
314 fep->stats.rx_dropped++; 310 fep->stats.rx_dropped++;
315 skbn = skb; 311 skbn = skb;
316 } 312 }
@@ -505,11 +501,9 @@ void fs_init_bds(struct net_device *dev)
505 */ 501 */
506 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) { 502 for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
507 skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE); 503 skb = netdev_alloc_skb(dev, ENET_RX_FRSIZE);
508 if (skb == NULL) { 504 if (skb == NULL)
509 dev_warn(fep->dev,
510 "Memory squeeze, unable to allocate skb\n");
511 break; 505 break;
512 } 506
513 skb_align(skb, ENET_RX_ALIGN); 507 skb_align(skb, ENET_RX_ALIGN);
514 fep->rx_skbuff[i] = skb; 508 fep->rx_skbuff[i] = skb;
515 CBDW_BUFADDR(bdp, 509 CBDW_BUFADDR(bdp,
@@ -593,13 +587,8 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
593 587
594 /* Alloc new skb */ 588 /* Alloc new skb */
595 new_skb = netdev_alloc_skb(dev, skb->len + 4); 589 new_skb = netdev_alloc_skb(dev, skb->len + 4);
596 if (!new_skb) { 590 if (!new_skb)
597 if (net_ratelimit()) {
598 dev_warn(fep->dev,
599 "Memory squeeze, dropping tx packet.\n");
600 }
601 return NULL; 591 return NULL;
602 }
603 592
604 /* Make sure new skb is properly aligned */ 593 /* Make sure new skb is properly aligned */
605 skb_align(new_skb, 4); 594 skb_align(new_skb, 4);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index d2c5441d1bf0..96fbe3548243 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -132,7 +132,7 @@ static int gfar_poll(struct napi_struct *napi, int budget);
132static void gfar_netpoll(struct net_device *dev); 132static void gfar_netpoll(struct net_device *dev);
133#endif 133#endif
134int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit); 134int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
135static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue); 135static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
136static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 136static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
137 int amount_pull, struct napi_struct *napi); 137 int amount_pull, struct napi_struct *napi);
138void gfar_halt(struct net_device *dev); 138void gfar_halt(struct net_device *dev);
@@ -245,14 +245,13 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
245 245
246 /* Allocate memory for the buffer descriptors */ 246 /* Allocate memory for the buffer descriptors */
247 vaddr = dma_alloc_coherent(dev, 247 vaddr = dma_alloc_coherent(dev,
248 sizeof(struct txbd8) * priv->total_tx_ring_size + 248 (priv->total_tx_ring_size *
249 sizeof(struct rxbd8) * priv->total_rx_ring_size, 249 sizeof(struct txbd8)) +
250 &addr, GFP_KERNEL); 250 (priv->total_rx_ring_size *
251 if (!vaddr) { 251 sizeof(struct rxbd8)),
252 netif_err(priv, ifup, ndev, 252 &addr, GFP_KERNEL);
253 "Could not allocate buffer descriptors!\n"); 253 if (!vaddr)
254 return -ENOMEM; 254 return -ENOMEM;
255 }
256 255
257 for (i = 0; i < priv->num_tx_queues; i++) { 256 for (i = 0; i < priv->num_tx_queues; i++) {
258 tx_queue = priv->tx_queue[i]; 257 tx_queue = priv->tx_queue[i];
@@ -342,7 +341,7 @@ static void gfar_init_mac(struct net_device *ndev)
342 gfar_init_tx_rx_base(priv); 341 gfar_init_tx_rx_base(priv);
343 342
344 /* Configure the coalescing support */ 343 /* Configure the coalescing support */
345 gfar_configure_coalescing(priv, 0xFF, 0xFF); 344 gfar_configure_coalescing_all(priv);
346 345
347 /* set this when rx hw offload (TOE) functions are being used */ 346 /* set this when rx hw offload (TOE) functions are being used */
348 priv->uses_rxfcb = 0; 347 priv->uses_rxfcb = 0;
@@ -691,7 +690,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
691 } 690 }
692 691
693 for (i = 0; i < priv->num_tx_queues; i++) 692 for (i = 0; i < priv->num_tx_queues; i++)
694 priv->tx_queue[i] = NULL; 693 priv->tx_queue[i] = NULL;
695 for (i = 0; i < priv->num_rx_queues; i++) 694 for (i = 0; i < priv->num_rx_queues; i++)
696 priv->rx_queue[i] = NULL; 695 priv->rx_queue[i] = NULL;
697 696
@@ -1817,25 +1816,15 @@ void gfar_start(struct net_device *dev)
1817 dev->trans_start = jiffies; /* prevent tx timeout */ 1816 dev->trans_start = jiffies; /* prevent tx timeout */
1818} 1817}
1819 1818
1820void gfar_configure_coalescing(struct gfar_private *priv, 1819static void gfar_configure_coalescing(struct gfar_private *priv,
1821 unsigned long tx_mask, unsigned long rx_mask) 1820 unsigned long tx_mask, unsigned long rx_mask)
1822{ 1821{
1823 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1822 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1824 u32 __iomem *baddr; 1823 u32 __iomem *baddr;
1825 int i = 0;
1826
1827 /* Backward compatible case ---- even if we enable
1828 * multiple queues, there's only single reg to program
1829 */
1830 gfar_write(&regs->txic, 0);
1831 if (likely(priv->tx_queue[0]->txcoalescing))
1832 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1833
1834 gfar_write(&regs->rxic, 0);
1835 if (unlikely(priv->rx_queue[0]->rxcoalescing))
1836 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1837 1824
1838 if (priv->mode == MQ_MG_MODE) { 1825 if (priv->mode == MQ_MG_MODE) {
1826 int i = 0;
1827
1839 baddr = &regs->txic0; 1828 baddr = &regs->txic0;
1840 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) { 1829 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
1841 gfar_write(baddr + i, 0); 1830 gfar_write(baddr + i, 0);
@@ -1849,9 +1838,25 @@ void gfar_configure_coalescing(struct gfar_private *priv,
1849 if (likely(priv->rx_queue[i]->rxcoalescing)) 1838 if (likely(priv->rx_queue[i]->rxcoalescing))
1850 gfar_write(baddr + i, priv->rx_queue[i]->rxic); 1839 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1851 } 1840 }
1841 } else {
1842 /* Backward compatible case -- even if we enable
1843 * multiple queues, there's only single reg to program
1844 */
1845 gfar_write(&regs->txic, 0);
1846 if (likely(priv->tx_queue[0]->txcoalescing))
1847 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
1848
1849 gfar_write(&regs->rxic, 0);
1850 if (unlikely(priv->rx_queue[0]->rxcoalescing))
1851 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1852 } 1852 }
1853} 1853}
1854 1854
1855void gfar_configure_coalescing_all(struct gfar_private *priv)
1856{
1857 gfar_configure_coalescing(priv, 0xFF, 0xFF);
1858}
1859
1855static int register_grp_irqs(struct gfar_priv_grp *grp) 1860static int register_grp_irqs(struct gfar_priv_grp *grp)
1856{ 1861{
1857 struct gfar_private *priv = grp->priv; 1862 struct gfar_private *priv = grp->priv;
@@ -1941,7 +1946,7 @@ int startup_gfar(struct net_device *ndev)
1941 1946
1942 phy_start(priv->phydev); 1947 phy_start(priv->phydev);
1943 1948
1944 gfar_configure_coalescing(priv, 0xFF, 0xFF); 1949 gfar_configure_coalescing_all(priv);
1945 1950
1946 return 0; 1951 return 0;
1947 1952
@@ -2469,12 +2474,11 @@ static void gfar_align_skb(struct sk_buff *skb)
2469} 2474}
2470 2475
2471/* Interrupt Handler for Transmit complete */ 2476/* Interrupt Handler for Transmit complete */
2472static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue) 2477static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2473{ 2478{
2474 struct net_device *dev = tx_queue->dev; 2479 struct net_device *dev = tx_queue->dev;
2475 struct netdev_queue *txq; 2480 struct netdev_queue *txq;
2476 struct gfar_private *priv = netdev_priv(dev); 2481 struct gfar_private *priv = netdev_priv(dev);
2477 struct gfar_priv_rx_q *rx_queue = NULL;
2478 struct txbd8 *bdp, *next = NULL; 2482 struct txbd8 *bdp, *next = NULL;
2479 struct txbd8 *lbdp = NULL; 2483 struct txbd8 *lbdp = NULL;
2480 struct txbd8 *base = tx_queue->tx_bd_base; 2484 struct txbd8 *base = tx_queue->tx_bd_base;
@@ -2489,7 +2493,6 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2489 u32 lstatus; 2493 u32 lstatus;
2490 size_t buflen; 2494 size_t buflen;
2491 2495
2492 rx_queue = priv->rx_queue[tqi];
2493 txq = netdev_get_tx_queue(dev, tqi); 2496 txq = netdev_get_tx_queue(dev, tqi);
2494 bdp = tx_queue->dirty_tx; 2497 bdp = tx_queue->dirty_tx;
2495 skb_dirtytx = tx_queue->skb_dirtytx; 2498 skb_dirtytx = tx_queue->skb_dirtytx;
@@ -2571,8 +2574,6 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2571 tx_queue->dirty_tx = bdp; 2574 tx_queue->dirty_tx = bdp;
2572 2575
2573 netdev_tx_completed_queue(txq, howmany, bytes_sent); 2576 netdev_tx_completed_queue(txq, howmany, bytes_sent);
2574
2575 return howmany;
2576} 2577}
2577 2578
2578static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp) 2579static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
@@ -2694,8 +2695,6 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2694 struct gfar_private *priv = netdev_priv(dev); 2695 struct gfar_private *priv = netdev_priv(dev);
2695 struct rxfcb *fcb = NULL; 2696 struct rxfcb *fcb = NULL;
2696 2697
2697 gro_result_t ret;
2698
2699 /* fcb is at the beginning if exists */ 2698 /* fcb is at the beginning if exists */
2700 fcb = (struct rxfcb *)skb->data; 2699 fcb = (struct rxfcb *)skb->data;
2701 2700
@@ -2734,10 +2733,8 @@ static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2734 __vlan_hwaccel_put_tag(skb, fcb->vlctl); 2733 __vlan_hwaccel_put_tag(skb, fcb->vlctl);
2735 2734
2736 /* Send the packet up the stack */ 2735 /* Send the packet up the stack */
2737 ret = napi_gro_receive(napi, skb); 2736 napi_gro_receive(napi, skb);
2738 2737
2739 if (unlikely(GRO_DROP == ret))
2740 atomic64_inc(&priv->extra_stats.kernel_dropped);
2741} 2738}
2742 2739
2743/* gfar_clean_rx_ring() -- Processes each frame in the rx ring 2740/* gfar_clean_rx_ring() -- Processes each frame in the rx ring
@@ -2835,62 +2832,82 @@ static int gfar_poll(struct napi_struct *napi, int budget)
2835 struct gfar __iomem *regs = gfargrp->regs; 2832 struct gfar __iomem *regs = gfargrp->regs;
2836 struct gfar_priv_tx_q *tx_queue = NULL; 2833 struct gfar_priv_tx_q *tx_queue = NULL;
2837 struct gfar_priv_rx_q *rx_queue = NULL; 2834 struct gfar_priv_rx_q *rx_queue = NULL;
2838 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0; 2835 int work_done = 0, work_done_per_q = 0;
2839 int tx_cleaned = 0, i, left_over_budget = budget; 2836 int i, budget_per_q = 0;
2840 unsigned long serviced_queues = 0; 2837 int has_tx_work;
2841 int num_queues = 0; 2838 unsigned long rstat_rxf;
2842 2839 int num_act_queues;
2843 num_queues = gfargrp->num_rx_queues;
2844 budget_per_queue = budget/num_queues;
2845 2840
2846 /* Clear IEVENT, so interrupts aren't called again 2841 /* Clear IEVENT, so interrupts aren't called again
2847 * because of the packets that have already arrived 2842 * because of the packets that have already arrived
2848 */ 2843 */
2849 gfar_write(&regs->ievent, IEVENT_RTX_MASK); 2844 gfar_write(&regs->ievent, IEVENT_RTX_MASK);
2850 2845
2851 while (num_queues && left_over_budget) { 2846 rstat_rxf = gfar_read(&regs->rstat) & RSTAT_RXF_MASK;
2852 budget_per_queue = left_over_budget/num_queues; 2847
2853 left_over_budget = 0; 2848 num_act_queues = bitmap_weight(&rstat_rxf, MAX_RX_QS);
2849 if (num_act_queues)
2850 budget_per_q = budget/num_act_queues;
2851
2852 while (1) {
2853 has_tx_work = 0;
2854 for_each_set_bit(i, &gfargrp->tx_bit_map, priv->num_tx_queues) {
2855 tx_queue = priv->tx_queue[i];
2856 /* run Tx cleanup to completion */
2857 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx]) {
2858 gfar_clean_tx_ring(tx_queue);
2859 has_tx_work = 1;
2860 }
2861 }
2854 2862
2855 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) { 2863 for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2856 if (test_bit(i, &serviced_queues)) 2864 /* skip queue if not active */
2865 if (!(rstat_rxf & (RSTAT_CLEAR_RXF0 >> i)))
2857 continue; 2866 continue;
2867
2858 rx_queue = priv->rx_queue[i]; 2868 rx_queue = priv->rx_queue[i];
2859 tx_queue = priv->tx_queue[rx_queue->qindex]; 2869 work_done_per_q =
2860 2870 gfar_clean_rx_ring(rx_queue, budget_per_q);
2861 tx_cleaned += gfar_clean_tx_ring(tx_queue); 2871 work_done += work_done_per_q;
2862 rx_cleaned_per_queue = 2872
2863 gfar_clean_rx_ring(rx_queue, budget_per_queue); 2873 /* finished processing this queue */
2864 rx_cleaned += rx_cleaned_per_queue; 2874 if (work_done_per_q < budget_per_q) {
2865 if (rx_cleaned_per_queue < budget_per_queue) { 2875 /* clear active queue hw indication */
2866 left_over_budget = left_over_budget + 2876 gfar_write(&regs->rstat,
2867 (budget_per_queue - 2877 RSTAT_CLEAR_RXF0 >> i);
2868 rx_cleaned_per_queue); 2878 rstat_rxf &= ~(RSTAT_CLEAR_RXF0 >> i);
2869 set_bit(i, &serviced_queues); 2879 num_act_queues--;
2870 num_queues--; 2880
2881 if (!num_act_queues)
2882 break;
2883 /* recompute budget per Rx queue */
2884 budget_per_q =
2885 (budget - work_done) / num_act_queues;
2871 } 2886 }
2872 } 2887 }
2873 }
2874 2888
2875 if (tx_cleaned) 2889 if (work_done >= budget)
2876 return budget; 2890 break;
2877 2891
2878 if (rx_cleaned < budget) { 2892 if (!num_act_queues && !has_tx_work) {
2879 napi_complete(napi);
2880 2893
2881 /* Clear the halt bit in RSTAT */ 2894 napi_complete(napi);
2882 gfar_write(&regs->rstat, gfargrp->rstat);
2883 2895
2884 gfar_write(&regs->imask, IMASK_DEFAULT); 2896 /* Clear the halt bit in RSTAT */
2897 gfar_write(&regs->rstat, gfargrp->rstat);
2885 2898
2886 /* If we are coalescing interrupts, update the timer 2899 gfar_write(&regs->imask, IMASK_DEFAULT);
2887 * Otherwise, clear it 2900
2888 */ 2901 /* If we are coalescing interrupts, update the timer
2889 gfar_configure_coalescing(priv, gfargrp->rx_bit_map, 2902 * Otherwise, clear it
2890 gfargrp->tx_bit_map); 2903 */
2904 gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
2905 gfargrp->tx_bit_map);
2906 break;
2907 }
2891 } 2908 }
2892 2909
2893 return rx_cleaned; 2910 return work_done;
2894} 2911}
2895 2912
2896#ifdef CONFIG_NET_POLL_CONTROLLER 2913#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 63a28d294e20..04b552cd419d 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -291,7 +291,9 @@ extern const char gfar_driver_version[];
291#define RCTRL_PADDING(x) ((x << 16) & RCTRL_PAL_MASK) 291#define RCTRL_PADDING(x) ((x << 16) & RCTRL_PAL_MASK)
292 292
293 293
294#define RSTAT_CLEAR_RHALT 0x00800000 294#define RSTAT_CLEAR_RHALT 0x00800000
295#define RSTAT_CLEAR_RXF0 0x00000080
296#define RSTAT_RXF_MASK 0x000000ff
295 297
296#define TCTRL_IPCSEN 0x00004000 298#define TCTRL_IPCSEN 0x00004000
297#define TCTRL_TUCSEN 0x00002000 299#define TCTRL_TUCSEN 0x00002000
@@ -627,7 +629,6 @@ struct rmon_mib
627}; 629};
628 630
629struct gfar_extra_stats { 631struct gfar_extra_stats {
630 atomic64_t kernel_dropped;
631 atomic64_t rx_large; 632 atomic64_t rx_large;
632 atomic64_t rx_short; 633 atomic64_t rx_short;
633 atomic64_t rx_nonoctet; 634 atomic64_t rx_nonoctet;
@@ -1180,8 +1181,7 @@ extern void stop_gfar(struct net_device *dev);
1180extern void gfar_halt(struct net_device *dev); 1181extern void gfar_halt(struct net_device *dev);
1181extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, 1182extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
1182 int enable, u32 regnum, u32 read); 1183 int enable, u32 regnum, u32 read);
1183extern void gfar_configure_coalescing(struct gfar_private *priv, 1184extern void gfar_configure_coalescing_all(struct gfar_private *priv);
1184 unsigned long tx_mask, unsigned long rx_mask);
1185void gfar_init_sysfs(struct net_device *dev); 1185void gfar_init_sysfs(struct net_device *dev);
1186int gfar_set_features(struct net_device *dev, netdev_features_t features); 1186int gfar_set_features(struct net_device *dev, netdev_features_t features);
1187extern void gfar_check_rx_parser_mode(struct gfar_private *priv); 1187extern void gfar_check_rx_parser_mode(struct gfar_private *priv);
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 75e89acf4912..4e7118f9f075 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -66,7 +66,6 @@ static void gfar_gdrvinfo(struct net_device *dev,
66 struct ethtool_drvinfo *drvinfo); 66 struct ethtool_drvinfo *drvinfo);
67 67
68static const char stat_gstrings[][ETH_GSTRING_LEN] = { 68static const char stat_gstrings[][ETH_GSTRING_LEN] = {
69 "rx-dropped-by-kernel",
70 "rx-large-frame-errors", 69 "rx-large-frame-errors",
71 "rx-short-frame-errors", 70 "rx-short-frame-errors",
72 "rx-non-octet-errors", 71 "rx-non-octet-errors",
@@ -436,7 +435,7 @@ static int gfar_scoalesce(struct net_device *dev,
436 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs)); 435 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
437 } 436 }
438 437
439 gfar_configure_coalescing(priv, 0xFF, 0xFF); 438 gfar_configure_coalescing_all(priv);
440 439
441 return 0; 440 return 0;
442} 441}
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 2418faf2251a..84125707f321 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -1003,8 +1003,6 @@ static void fjn_rx(struct net_device *dev)
1003 } 1003 }
1004 skb = netdev_alloc_skb(dev, pkt_len + 2); 1004 skb = netdev_alloc_skb(dev, pkt_len + 2);
1005 if (skb == NULL) { 1005 if (skb == NULL) {
1006 netdev_notice(dev, "Memory squeeze, dropping packet (len %d)\n",
1007 pkt_len);
1008 outb(F_SKP_PKT, ioaddr + RX_SKIP); 1006 outb(F_SKP_PKT, ioaddr + RX_SKIP);
1009 dev->stats.rx_dropped++; 1007 dev->stats.rx_dropped++;
1010 break; 1008 break;
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index 1c54e229e3cc..e38816145395 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -798,16 +798,14 @@ static inline int i596_rx(struct net_device *dev)
798#ifdef __mc68000__ 798#ifdef __mc68000__
799 cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ); 799 cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ);
800#endif 800#endif
801 } 801 } else {
802 else
803 skb = netdev_alloc_skb(dev, pkt_len + 2); 802 skb = netdev_alloc_skb(dev, pkt_len + 2);
803 }
804memory_squeeze: 804memory_squeeze:
805 if (skb == NULL) { 805 if (skb == NULL) {
806 /* XXX tulip.c can defer packets here!! */ 806 /* XXX tulip.c can defer packets here!! */
807 printk(KERN_WARNING "%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
808 dev->stats.rx_dropped++; 807 dev->stats.rx_dropped++;
809 } 808 } else {
810 else {
811 if (!rx_in_place) { 809 if (!rx_in_place) {
812 /* 16 byte align the data fields */ 810 /* 16 byte align the data fields */
813 skb_reserve(skb, 2); 811 skb_reserve(skb, 2);
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index f045ea4dc514..d653bac4cfc4 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -715,14 +715,12 @@ static inline int i596_rx(struct net_device *dev)
715 rbd->v_data = newskb->data; 715 rbd->v_data = newskb->data;
716 rbd->b_data = SWAP32(dma_addr); 716 rbd->b_data = SWAP32(dma_addr);
717 DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd)); 717 DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
718 } else 718 } else {
719 skb = netdev_alloc_skb_ip_align(dev, pkt_len); 719 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
720 }
720memory_squeeze: 721memory_squeeze:
721 if (skb == NULL) { 722 if (skb == NULL) {
722 /* XXX tulip.c can defer packets here!! */ 723 /* XXX tulip.c can defer packets here!! */
723 printk(KERN_ERR
724 "%s: i596_rx Memory squeeze, dropping packet.\n",
725 dev->name);
726 dev->stats.rx_dropped++; 724 dev->stats.rx_dropped++;
727 } else { 725 } else {
728 if (!rx_in_place) { 726 if (!rx_in_place) {
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 328f47c92e26..029633434474 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -402,7 +402,6 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
402 skb_arr_rq1[index] = netdev_alloc_skb(dev, 402 skb_arr_rq1[index] = netdev_alloc_skb(dev,
403 EHEA_L_PKT_SIZE); 403 EHEA_L_PKT_SIZE);
404 if (!skb_arr_rq1[index]) { 404 if (!skb_arr_rq1[index]) {
405 netdev_info(dev, "Unable to allocate enough skb in the array\n");
406 pr->rq1_skba.os_skbs = fill_wqes - i; 405 pr->rq1_skba.os_skbs = fill_wqes - i;
407 break; 406 break;
408 } 407 }
@@ -432,10 +431,8 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
432 431
433 for (i = 0; i < nr_rq1a; i++) { 432 for (i = 0; i < nr_rq1a; i++) {
434 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); 433 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
435 if (!skb_arr_rq1[i]) { 434 if (!skb_arr_rq1[i])
436 netdev_info(dev, "Not enough memory to allocate skb array\n");
437 break; 435 break;
438 }
439 } 436 }
440 /* Ring doorbell */ 437 /* Ring doorbell */
441 ehea_update_rq1a(pr->qp, i - 1); 438 ehea_update_rq1a(pr->qp, i - 1);
@@ -695,10 +692,8 @@ static int ehea_proc_rwqes(struct net_device *dev,
695 692
696 skb = netdev_alloc_skb(dev, 693 skb = netdev_alloc_skb(dev,
697 EHEA_L_PKT_SIZE); 694 EHEA_L_PKT_SIZE);
698 if (!skb) { 695 if (!skb)
699 netdev_err(dev, "Not enough memory to allocate skb\n");
700 break; 696 break;
701 }
702 } 697 }
703 skb_copy_to_linear_data(skb, ((char *)cqe) + 64, 698 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
704 cqe->num_bytes_transfered - 4); 699 cqe->num_bytes_transfered - 4);
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 1f7ecf57181e..610ed223d1db 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -637,17 +637,12 @@ static int mal_probe(struct platform_device *ofdev)
637 bd_size = sizeof(struct mal_descriptor) * 637 bd_size = sizeof(struct mal_descriptor) *
638 (NUM_TX_BUFF * mal->num_tx_chans + 638 (NUM_TX_BUFF * mal->num_tx_chans +
639 NUM_RX_BUFF * mal->num_rx_chans); 639 NUM_RX_BUFF * mal->num_rx_chans);
640 mal->bd_virt = 640 mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
641 dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma, 641 GFP_KERNEL | __GFP_ZERO);
642 GFP_KERNEL);
643 if (mal->bd_virt == NULL) { 642 if (mal->bd_virt == NULL) {
644 printk(KERN_ERR
645 "mal%d: out of memory allocating RX/TX descriptors!\n",
646 index);
647 err = -ENOMEM; 643 err = -ENOMEM;
648 goto fail_unmap; 644 goto fail_unmap;
649 } 645 }
650 memset(mal->bd_virt, 0, bd_size);
651 646
652 for (i = 0; i < mal->num_tx_chans; ++i) 647 for (i = 0; i < mal->num_tx_chans; ++i)
653 set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma + 648 set_mal_dcrn(mal, MAL_TXCTPR(i), mal->bd_dma +
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index c859771a9902..302d59401065 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -556,11 +556,9 @@ static int ibmveth_open(struct net_device *netdev)
556 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * 556 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
557 rxq_entries; 557 rxq_entries;
558 adapter->rx_queue.queue_addr = 558 adapter->rx_queue.queue_addr =
559 dma_alloc_coherent(dev, adapter->rx_queue.queue_len, 559 dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
560 &adapter->rx_queue.queue_dma, GFP_KERNEL); 560 &adapter->rx_queue.queue_dma, GFP_KERNEL);
561
562 if (!adapter->rx_queue.queue_addr) { 561 if (!adapter->rx_queue.queue_addr) {
563 netdev_err(netdev, "unable to allocate rx queue pages\n");
564 rc = -ENOMEM; 562 rc = -ENOMEM;
565 goto err_out; 563 goto err_out;
566 } 564 }
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index ffd287196bf8..82a967c95598 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1020,12 +1020,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1020 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1020 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1021 txdr->size = ALIGN(txdr->size, 4096); 1021 txdr->size = ALIGN(txdr->size, 4096);
1022 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 1022 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1023 GFP_KERNEL); 1023 GFP_KERNEL | __GFP_ZERO);
1024 if (!txdr->desc) { 1024 if (!txdr->desc) {
1025 ret_val = 2; 1025 ret_val = 2;
1026 goto err_nomem; 1026 goto err_nomem;
1027 } 1027 }
1028 memset(txdr->desc, 0, txdr->size);
1029 txdr->next_to_use = txdr->next_to_clean = 0; 1028 txdr->next_to_use = txdr->next_to_clean = 0;
1030 1029
1031 ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF)); 1030 ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF));
@@ -1079,12 +1078,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1079 1078
1080 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); 1079 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
1081 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1080 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1082 GFP_KERNEL); 1081 GFP_KERNEL | __GFP_ZERO);
1083 if (!rxdr->desc) { 1082 if (!rxdr->desc) {
1084 ret_val = 6; 1083 ret_val = 6;
1085 goto err_nomem; 1084 goto err_nomem;
1086 } 1085 }
1087 memset(rxdr->desc, 0, rxdr->size);
1088 rxdr->next_to_use = rxdr->next_to_clean = 0; 1086 rxdr->next_to_use = rxdr->next_to_clean = 0;
1089 1087
1090 rctl = er32(RCTL); 1088 rctl = er32(RCTL);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 8502c625dbef..d98e1d0996d4 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1516,8 +1516,6 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1516 if (!txdr->desc) { 1516 if (!txdr->desc) {
1517setup_tx_desc_die: 1517setup_tx_desc_die:
1518 vfree(txdr->buffer_info); 1518 vfree(txdr->buffer_info);
1519 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1520 "ring\n");
1521 return -ENOMEM; 1519 return -ENOMEM;
1522 } 1520 }
1523 1521
@@ -1707,10 +1705,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1707 1705
1708 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, 1706 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1709 GFP_KERNEL); 1707 GFP_KERNEL);
1710
1711 if (!rxdr->desc) { 1708 if (!rxdr->desc) {
1712 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1713 "ring\n");
1714setup_rx_desc_die: 1709setup_rx_desc_die:
1715 vfree(rxdr->buffer_info); 1710 vfree(rxdr->buffer_info);
1716 return -ENOMEM; 1711 return -ENOMEM;
@@ -1729,8 +1724,6 @@ setup_rx_desc_die:
1729 if (!rxdr->desc) { 1724 if (!rxdr->desc) {
1730 dma_free_coherent(&pdev->dev, rxdr->size, olddesc, 1725 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1731 olddma); 1726 olddma);
1732 e_err(probe, "Unable to allocate memory for the Rx "
1733 "descriptor ring\n");
1734 goto setup_rx_desc_die; 1727 goto setup_rx_desc_die;
1735 } 1728 }
1736 1729
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index e0991388664c..b71c8502a2b3 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -37,7 +37,9 @@
37 * "index + 5". 37 * "index + 5".
38 */ 38 */
39static const u16 e1000_gg82563_cable_length_table[] = { 39static const u16 e1000_gg82563_cable_length_table[] = {
40 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF }; 40 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF
41};
42
41#define GG82563_CABLE_LENGTH_TABLE_SIZE \ 43#define GG82563_CABLE_LENGTH_TABLE_SIZE \
42 ARRAY_SIZE(e1000_gg82563_cable_length_table) 44 ARRAY_SIZE(e1000_gg82563_cable_length_table)
43 45
@@ -116,7 +118,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
116 nvm->type = e1000_nvm_eeprom_spi; 118 nvm->type = e1000_nvm_eeprom_spi;
117 119
118 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 120 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
119 E1000_EECD_SIZE_EX_SHIFT); 121 E1000_EECD_SIZE_EX_SHIFT);
120 122
121 /* Added to a constant, "size" becomes the left-shift value 123 /* Added to a constant, "size" becomes the left-shift value
122 * for setting word_size. 124 * for setting word_size.
@@ -393,7 +395,7 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
393 * before the device has completed the "Page Select" MDI 395 * before the device has completed the "Page Select" MDI
394 * transaction. So we wait 200us after each MDI command... 396 * transaction. So we wait 200us after each MDI command...
395 */ 397 */
396 udelay(200); 398 usleep_range(200, 400);
397 399
398 /* ...and verify the command was successful. */ 400 /* ...and verify the command was successful. */
399 ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); 401 ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
@@ -403,17 +405,17 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
403 return -E1000_ERR_PHY; 405 return -E1000_ERR_PHY;
404 } 406 }
405 407
406 udelay(200); 408 usleep_range(200, 400);
407 409
408 ret_val = e1000e_read_phy_reg_mdic(hw, 410 ret_val = e1000e_read_phy_reg_mdic(hw,
409 MAX_PHY_REG_ADDRESS & offset, 411 MAX_PHY_REG_ADDRESS & offset,
410 data); 412 data);
411 413
412 udelay(200); 414 usleep_range(200, 400);
413 } else { 415 } else {
414 ret_val = e1000e_read_phy_reg_mdic(hw, 416 ret_val = e1000e_read_phy_reg_mdic(hw,
415 MAX_PHY_REG_ADDRESS & offset, 417 MAX_PHY_REG_ADDRESS & offset,
416 data); 418 data);
417 } 419 }
418 420
419 e1000_release_phy_80003es2lan(hw); 421 e1000_release_phy_80003es2lan(hw);
@@ -462,7 +464,7 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
462 * before the device has completed the "Page Select" MDI 464 * before the device has completed the "Page Select" MDI
463 * transaction. So we wait 200us after each MDI command... 465 * transaction. So we wait 200us after each MDI command...
464 */ 466 */
465 udelay(200); 467 usleep_range(200, 400);
466 468
467 /* ...and verify the command was successful. */ 469 /* ...and verify the command was successful. */
468 ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); 470 ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
@@ -472,17 +474,17 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
472 return -E1000_ERR_PHY; 474 return -E1000_ERR_PHY;
473 } 475 }
474 476
475 udelay(200); 477 usleep_range(200, 400);
476 478
477 ret_val = e1000e_write_phy_reg_mdic(hw, 479 ret_val = e1000e_write_phy_reg_mdic(hw,
478 MAX_PHY_REG_ADDRESS & offset, 480 MAX_PHY_REG_ADDRESS &
479 data); 481 offset, data);
480 482
481 udelay(200); 483 usleep_range(200, 400);
482 } else { 484 } else {
483 ret_val = e1000e_write_phy_reg_mdic(hw, 485 ret_val = e1000e_write_phy_reg_mdic(hw,
484 MAX_PHY_REG_ADDRESS & offset, 486 MAX_PHY_REG_ADDRESS &
485 data); 487 offset, data);
486 } 488 }
487 489
488 e1000_release_phy_80003es2lan(hw); 490 e1000_release_phy_80003es2lan(hw);
@@ -580,7 +582,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
580 e_dbg("Waiting for forced speed/duplex link on GG82563 phy.\n"); 582 e_dbg("Waiting for forced speed/duplex link on GG82563 phy.\n");
581 583
582 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 584 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
583 100000, &link); 585 100000, &link);
584 if (ret_val) 586 if (ret_val)
585 return ret_val; 587 return ret_val;
586 588
@@ -595,7 +597,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
595 597
596 /* Try once more */ 598 /* Try once more */
597 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 599 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
598 100000, &link); 600 100000, &link);
599 if (ret_val) 601 if (ret_val)
600 return ret_val; 602 return ret_val;
601 } 603 }
@@ -666,14 +668,12 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
666 s32 ret_val; 668 s32 ret_val;
667 669
668 if (hw->phy.media_type == e1000_media_type_copper) { 670 if (hw->phy.media_type == e1000_media_type_copper) {
669 ret_val = e1000e_get_speed_and_duplex_copper(hw, 671 ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
670 speed,
671 duplex);
672 hw->phy.ops.cfg_on_link_up(hw); 672 hw->phy.ops.cfg_on_link_up(hw);
673 } else { 673 } else {
674 ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw, 674 ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw,
675 speed, 675 speed,
676 duplex); 676 duplex);
677 } 677 }
678 678
679 return ret_val; 679 return ret_val;
@@ -754,9 +754,9 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
754 754
755 /* Initialize identification LED */ 755 /* Initialize identification LED */
756 ret_val = mac->ops.id_led_init(hw); 756 ret_val = mac->ops.id_led_init(hw);
757 /* An error is not fatal and we should not stop init due to this */
757 if (ret_val) 758 if (ret_val)
758 e_dbg("Error initializing identification LED\n"); 759 e_dbg("Error initializing identification LED\n");
759 /* This is not fatal and we should not stop init due to this */
760 760
761 /* Disabling VLAN filtering */ 761 /* Disabling VLAN filtering */
762 e_dbg("Initializing the IEEE VLAN\n"); 762 e_dbg("Initializing the IEEE VLAN\n");
@@ -784,14 +784,14 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
784 784
785 /* Set the transmit descriptor write-back policy */ 785 /* Set the transmit descriptor write-back policy */
786 reg_data = er32(TXDCTL(0)); 786 reg_data = er32(TXDCTL(0));
787 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | 787 reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
788 E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; 788 E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
789 ew32(TXDCTL(0), reg_data); 789 ew32(TXDCTL(0), reg_data);
790 790
791 /* ...for both queues. */ 791 /* ...for both queues. */
792 reg_data = er32(TXDCTL(1)); 792 reg_data = er32(TXDCTL(1));
793 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | 793 reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
794 E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; 794 E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
795 ew32(TXDCTL(1), reg_data); 795 ew32(TXDCTL(1), reg_data);
796 796
797 /* Enable retransmit on late collisions */ 797 /* Enable retransmit on late collisions */
@@ -818,13 +818,12 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
818 /* default to true to enable the MDIC W/A */ 818 /* default to true to enable the MDIC W/A */
819 hw->dev_spec.e80003es2lan.mdic_wa_enable = true; 819 hw->dev_spec.e80003es2lan.mdic_wa_enable = true;
820 820
821 ret_val = e1000_read_kmrn_reg_80003es2lan(hw, 821 ret_val =
822 E1000_KMRNCTRLSTA_OFFSET >> 822 e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET >>
823 E1000_KMRNCTRLSTA_OFFSET_SHIFT, 823 E1000_KMRNCTRLSTA_OFFSET_SHIFT, &i);
824 &i);
825 if (!ret_val) { 824 if (!ret_val) {
826 if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) == 825 if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) ==
827 E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO) 826 E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO)
828 hw->dev_spec.e80003es2lan.mdic_wa_enable = false; 827 hw->dev_spec.e80003es2lan.mdic_wa_enable = false;
829 } 828 }
830 829
@@ -891,7 +890,7 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
891{ 890{
892 struct e1000_phy_info *phy = &hw->phy; 891 struct e1000_phy_info *phy = &hw->phy;
893 s32 ret_val; 892 s32 ret_val;
894 u32 ctrl_ext; 893 u32 reg;
895 u16 data; 894 u16 data;
896 895
897 ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data); 896 ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data);
@@ -954,22 +953,19 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
954 } 953 }
955 954
956 /* Bypass Rx and Tx FIFO's */ 955 /* Bypass Rx and Tx FIFO's */
957 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, 956 reg = E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL;
958 E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL, 957 data = (E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
959 E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | 958 E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
960 E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); 959 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data);
961 if (ret_val) 960 if (ret_val)
962 return ret_val; 961 return ret_val;
963 962
964 ret_val = e1000_read_kmrn_reg_80003es2lan(hw, 963 reg = E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE;
965 E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE, 964 ret_val = e1000_read_kmrn_reg_80003es2lan(hw, reg, &data);
966 &data);
967 if (ret_val) 965 if (ret_val)
968 return ret_val; 966 return ret_val;
969 data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE; 967 data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE;
970 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, 968 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data);
971 E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
972 data);
973 if (ret_val) 969 if (ret_val)
974 return ret_val; 970 return ret_val;
975 971
@@ -982,9 +978,9 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
982 if (ret_val) 978 if (ret_val)
983 return ret_val; 979 return ret_val;
984 980
985 ctrl_ext = er32(CTRL_EXT); 981 reg = er32(CTRL_EXT);
986 ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK); 982 reg &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
987 ew32(CTRL_EXT, ctrl_ext); 983 ew32(CTRL_EXT, reg);
988 984
989 ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data); 985 ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data);
990 if (ret_val) 986 if (ret_val)
@@ -1049,27 +1045,29 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
1049 * polling the phy; this fixes erroneous timeouts at 10Mbps. 1045 * polling the phy; this fixes erroneous timeouts at 10Mbps.
1050 */ 1046 */
1051 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4), 1047 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4),
1052 0xFFFF); 1048 0xFFFF);
1053 if (ret_val) 1049 if (ret_val)
1054 return ret_val; 1050 return ret_val;
1055 ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), 1051 ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
1056 &reg_data); 1052 &reg_data);
1057 if (ret_val) 1053 if (ret_val)
1058 return ret_val; 1054 return ret_val;
1059 reg_data |= 0x3F; 1055 reg_data |= 0x3F;
1060 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), 1056 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
1061 reg_data); 1057 reg_data);
1062 if (ret_val) 1058 if (ret_val)
1063 return ret_val; 1059 return ret_val;
1064 ret_val = e1000_read_kmrn_reg_80003es2lan(hw, 1060 ret_val =
1065 E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, 1061 e1000_read_kmrn_reg_80003es2lan(hw,
1066 &reg_data); 1062 E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
1063 &reg_data);
1067 if (ret_val) 1064 if (ret_val)
1068 return ret_val; 1065 return ret_val;
1069 reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; 1066 reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
1070 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, 1067 ret_val =
1071 E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, 1068 e1000_write_kmrn_reg_80003es2lan(hw,
1072 reg_data); 1069 E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
1070 reg_data);
1073 if (ret_val) 1071 if (ret_val)
1074 return ret_val; 1072 return ret_val;
1075 1073
@@ -1096,7 +1094,7 @@ static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw)
1096 1094
1097 if (hw->phy.media_type == e1000_media_type_copper) { 1095 if (hw->phy.media_type == e1000_media_type_copper) {
1098 ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed, 1096 ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed,
1099 &duplex); 1097 &duplex);
1100 if (ret_val) 1098 if (ret_val)
1101 return ret_val; 1099 return ret_val;
1102 1100
@@ -1125,9 +1123,10 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
1125 u16 reg_data, reg_data2; 1123 u16 reg_data, reg_data2;
1126 1124
1127 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; 1125 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
1128 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, 1126 ret_val =
1129 E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, 1127 e1000_write_kmrn_reg_80003es2lan(hw,
1130 reg_data); 1128 E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
1129 reg_data);
1131 if (ret_val) 1130 if (ret_val)
1132 return ret_val; 1131 return ret_val;
1133 1132
@@ -1171,9 +1170,10 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
1171 u32 i = 0; 1170 u32 i = 0;
1172 1171
1173 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; 1172 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
1174 ret_val = e1000_write_kmrn_reg_80003es2lan(hw, 1173 ret_val =
1175 E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, 1174 e1000_write_kmrn_reg_80003es2lan(hw,
1176 reg_data); 1175 E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
1176 reg_data);
1177 if (ret_val) 1177 if (ret_val)
1178 return ret_val; 1178 return ret_val;
1179 1179
@@ -1220,7 +1220,7 @@ static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
1220 return ret_val; 1220 return ret_val;
1221 1221
1222 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & 1222 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
1223 E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; 1223 E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
1224 ew32(KMRNCTRLSTA, kmrnctrlsta); 1224 ew32(KMRNCTRLSTA, kmrnctrlsta);
1225 e1e_flush(); 1225 e1e_flush();
1226 1226
@@ -1255,7 +1255,7 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
1255 return ret_val; 1255 return ret_val;
1256 1256
1257 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & 1257 kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
1258 E1000_KMRNCTRLSTA_OFFSET) | data; 1258 E1000_KMRNCTRLSTA_OFFSET) | data;
1259 ew32(KMRNCTRLSTA, kmrnctrlsta); 1259 ew32(KMRNCTRLSTA, kmrnctrlsta);
1260 e1e_flush(); 1260 e1e_flush();
1261 1261
@@ -1419,4 +1419,3 @@ const struct e1000_info e1000_es2_info = {
1419 .phy_ops = &es2_phy_ops, 1419 .phy_ops = &es2_phy_ops,
1420 .nvm_ops = &es2_nvm_ops, 1420 .nvm_ops = &es2_nvm_ops,
1421}; 1421};
1422
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 2faffbde179e..7380442a3829 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -184,7 +184,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
184 default: 184 default:
185 nvm->type = e1000_nvm_eeprom_spi; 185 nvm->type = e1000_nvm_eeprom_spi;
186 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 186 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
187 E1000_EECD_SIZE_EX_SHIFT); 187 E1000_EECD_SIZE_EX_SHIFT);
188 /* Added to a constant, "size" becomes the left-shift value 188 /* Added to a constant, "size" becomes the left-shift value
189 * for setting word_size. 189 * for setting word_size.
190 */ 190 */
@@ -437,7 +437,7 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
437 return ret_val; 437 return ret_val;
438 438
439 phy->id = (u32)(phy_id << 16); 439 phy->id = (u32)(phy_id << 16);
440 udelay(20); 440 usleep_range(20, 40);
441 ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id); 441 ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id);
442 if (ret_val) 442 if (ret_val)
443 return ret_val; 443 return ret_val;
@@ -482,7 +482,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
482 if (!(swsm & E1000_SWSM_SMBI)) 482 if (!(swsm & E1000_SWSM_SMBI))
483 break; 483 break;
484 484
485 udelay(50); 485 usleep_range(50, 100);
486 i++; 486 i++;
487 } 487 }
488 488
@@ -499,7 +499,7 @@ static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
499 if (er32(SWSM) & E1000_SWSM_SWESMBI) 499 if (er32(SWSM) & E1000_SWSM_SWESMBI)
500 break; 500 break;
501 501
502 udelay(50); 502 usleep_range(50, 100);
503 } 503 }
504 504
505 if (i == fw_timeout) { 505 if (i == fw_timeout) {
@@ -526,6 +526,7 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
526 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); 526 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
527 ew32(SWSM, swsm); 527 ew32(SWSM, swsm);
528} 528}
529
529/** 530/**
530 * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore 531 * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
531 * @hw: pointer to the HW structure 532 * @hw: pointer to the HW structure
@@ -846,9 +847,9 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
846 } 847 }
847 848
848 for (i = 0; i < words; i++) { 849 for (i = 0; i < words; i++) {
849 eewr = (data[i] << E1000_NVM_RW_REG_DATA) | 850 eewr = ((data[i] << E1000_NVM_RW_REG_DATA) |
850 ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) | 851 ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) |
851 E1000_NVM_RW_REG_START; 852 E1000_NVM_RW_REG_START);
852 853
853 ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); 854 ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
854 if (ret_val) 855 if (ret_val)
@@ -875,8 +876,7 @@ static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
875 s32 timeout = PHY_CFG_TIMEOUT; 876 s32 timeout = PHY_CFG_TIMEOUT;
876 877
877 while (timeout) { 878 while (timeout) {
878 if (er32(EEMNGCTL) & 879 if (er32(EEMNGCTL) & E1000_NVM_CFG_DONE_PORT_0)
879 E1000_NVM_CFG_DONE_PORT_0)
880 break; 880 break;
881 usleep_range(1000, 2000); 881 usleep_range(1000, 2000);
882 timeout--; 882 timeout--;
@@ -1022,7 +1022,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
1022 } 1022 }
1023 1023
1024 if (hw->nvm.type == e1000_nvm_flash_hw) { 1024 if (hw->nvm.type == e1000_nvm_flash_hw) {
1025 udelay(10); 1025 usleep_range(10, 20);
1026 ctrl_ext = er32(CTRL_EXT); 1026 ctrl_ext = er32(CTRL_EXT);
1027 ctrl_ext |= E1000_CTRL_EXT_EE_RST; 1027 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
1028 ew32(CTRL_EXT, ctrl_ext); 1028 ew32(CTRL_EXT, ctrl_ext);
@@ -1095,9 +1095,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
1095 1095
1096 /* Initialize identification LED */ 1096 /* Initialize identification LED */
1097 ret_val = mac->ops.id_led_init(hw); 1097 ret_val = mac->ops.id_led_init(hw);
1098 /* An error is not fatal and we should not stop init due to this */
1098 if (ret_val) 1099 if (ret_val)
1099 e_dbg("Error initializing identification LED\n"); 1100 e_dbg("Error initializing identification LED\n");
1100 /* This is not fatal and we should not stop init due to this */
1101 1101
1102 /* Disabling VLAN filtering */ 1102 /* Disabling VLAN filtering */
1103 e_dbg("Initializing the IEEE VLAN\n"); 1103 e_dbg("Initializing the IEEE VLAN\n");
@@ -1122,9 +1122,8 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
1122 1122
1123 /* Set the transmit descriptor write-back policy */ 1123 /* Set the transmit descriptor write-back policy */
1124 reg_data = er32(TXDCTL(0)); 1124 reg_data = er32(TXDCTL(0));
1125 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | 1125 reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
1126 E1000_TXDCTL_FULL_TX_DESC_WB | 1126 E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC);
1127 E1000_TXDCTL_COUNT_DESC;
1128 ew32(TXDCTL(0), reg_data); 1127 ew32(TXDCTL(0), reg_data);
1129 1128
1130 /* ...for both queues. */ 1129 /* ...for both queues. */
@@ -1140,9 +1139,9 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
1140 break; 1139 break;
1141 default: 1140 default:
1142 reg_data = er32(TXDCTL(1)); 1141 reg_data = er32(TXDCTL(1));
1143 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | 1142 reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) |
1144 E1000_TXDCTL_FULL_TX_DESC_WB | 1143 E1000_TXDCTL_FULL_TX_DESC_WB |
1145 E1000_TXDCTL_COUNT_DESC; 1144 E1000_TXDCTL_COUNT_DESC);
1146 ew32(TXDCTL(1), reg_data); 1145 ew32(TXDCTL(1), reg_data);
1147 break; 1146 break;
1148 } 1147 }
@@ -1530,7 +1529,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1530 status = er32(STATUS); 1529 status = er32(STATUS);
1531 er32(RXCW); 1530 er32(RXCW);
1532 /* SYNCH bit and IV bit are sticky */ 1531 /* SYNCH bit and IV bit are sticky */
1533 udelay(10); 1532 usleep_range(10, 20);
1534 rxcw = er32(RXCW); 1533 rxcw = er32(RXCW);
1535 1534
1536 if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) { 1535 if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
@@ -1633,7 +1632,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1633 * the IV bit and restart Autoneg 1632 * the IV bit and restart Autoneg
1634 */ 1633 */
1635 for (i = 0; i < AN_RETRY_COUNT; i++) { 1634 for (i = 0; i < AN_RETRY_COUNT; i++) {
1636 udelay(10); 1635 usleep_range(10, 20);
1637 rxcw = er32(RXCW); 1636 rxcw = er32(RXCW);
1638 if ((rxcw & E1000_RXCW_SYNCH) && 1637 if ((rxcw & E1000_RXCW_SYNCH) &&
1639 (rxcw & E1000_RXCW_C)) 1638 (rxcw & E1000_RXCW_C))
@@ -2066,4 +2065,3 @@ const struct e1000_info e1000_82583_info = {
2066 .phy_ops = &e82_phy_ops_bm, 2065 .phy_ops = &e82_phy_ops_bm,
2067 .nvm_ops = &e82571_nvm_ops, 2066 .nvm_ops = &e82571_nvm_ops,
2068}; 2067};
2069
diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h
index 85cb1a3b7cd4..08e24dc3dc0e 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.h
+++ b/drivers/net/ethernet/intel/e1000e/82571.h
@@ -44,6 +44,8 @@
44#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */ 44#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */
45#define E1000_EIAC_MASK_82574 0x01F00000 45#define E1000_EIAC_MASK_82574 0x01F00000
46 46
47#define E1000_IVAR_INT_ALLOC_VALID 0x8
48
47/* Manageability Operation Mode mask */ 49/* Manageability Operation Mode mask */
48#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 50#define E1000_NVM_INIT_CTRL2_MNGM 0x6000
49 51
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index fc3a4fe1ac71..351c94a0cf74 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -66,7 +66,7 @@
66#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 66#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
67#define E1000_CTRL_EXT_EIAME 0x01000000 67#define E1000_CTRL_EXT_EIAME 0x01000000
68#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ 68#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */
69#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ 69#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */
70#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ 70#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
71#define E1000_CTRL_EXT_LSECCK 0x00001000 71#define E1000_CTRL_EXT_LSECCK 0x00001000
72#define E1000_CTRL_EXT_PHYPDEN 0x00100000 72#define E1000_CTRL_EXT_PHYPDEN 0x00100000
@@ -216,6 +216,8 @@
216#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */ 216#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */
217#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ 217#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
218#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ 218#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
219#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
220#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
219#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ 221#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
220#define E1000_CTRL_RST 0x04000000 /* Global reset */ 222#define E1000_CTRL_RST 0x04000000 /* Global reset */
221#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ 223#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */
@@ -234,17 +236,17 @@
234#define E1000_STATUS_FUNC_SHIFT 2 236#define E1000_STATUS_FUNC_SHIFT 2
235#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ 237#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */
236#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ 238#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */
239#define E1000_STATUS_SPEED_MASK 0x000000C0
237#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ 240#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */
238#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ 241#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */
239#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ 242#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */
240#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */ 243#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */
241#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ 244#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */
242#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ 245#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master Req status */
243 246
244#define HALF_DUPLEX 1 247#define HALF_DUPLEX 1
245#define FULL_DUPLEX 2 248#define FULL_DUPLEX 2
246 249
247
248#define ADVERTISE_10_HALF 0x0001 250#define ADVERTISE_10_HALF 0x0001
249#define ADVERTISE_10_FULL 0x0002 251#define ADVERTISE_10_FULL 0x0002
250#define ADVERTISE_100_HALF 0x0004 252#define ADVERTISE_100_HALF 0x0004
@@ -311,6 +313,7 @@
311 313
312/* SerDes Control */ 314/* SerDes Control */
313#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 315#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
316#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410
314 317
315/* Receive Checksum Control */ 318/* Receive Checksum Control */
316#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ 319#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */
@@ -400,7 +403,8 @@
400#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ 403#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
401#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ 404#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
402#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ 405#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
403#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ 406/* If this bit asserted, the driver should claim the interrupt */
407#define E1000_ICR_INT_ASSERTED 0x80000000
404#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ 408#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
405#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ 409#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
406#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ 410#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */
@@ -583,13 +587,13 @@
583#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ 587#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */
584#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES) 588#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
585 589
586#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM read/write registers */ 590#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM r/w regs */
587#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ 591#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */
588#define E1000_NVM_RW_REG_START 1 /* Start operation */ 592#define E1000_NVM_RW_REG_START 1 /* Start operation */
589#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ 593#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
590#define E1000_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ 594#define E1000_NVM_POLL_WRITE 1 /* Flag for polling write complete */
591#define E1000_NVM_POLL_READ 0 /* Flag for polling for read complete */ 595#define E1000_NVM_POLL_READ 0 /* Flag for polling read complete */
592#define E1000_FLASH_UPDATES 2000 596#define E1000_FLASH_UPDATES 2000
593 597
594/* NVM Word Offsets */ 598/* NVM Word Offsets */
595#define NVM_COMPAT 0x0003 599#define NVM_COMPAT 0x0003
@@ -785,6 +789,7 @@
785 GG82563_REG(194, 18) /* Inband Control */ 789 GG82563_REG(194, 18) /* Inband Control */
786 790
787/* MDI Control */ 791/* MDI Control */
792#define E1000_MDIC_REG_MASK 0x001F0000
788#define E1000_MDIC_REG_SHIFT 16 793#define E1000_MDIC_REG_SHIFT 16
789#define E1000_MDIC_PHY_SHIFT 21 794#define E1000_MDIC_PHY_SHIFT 21
790#define E1000_MDIC_OP_WRITE 0x04000000 795#define E1000_MDIC_OP_WRITE 0x04000000
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index fcc758138b8a..82f1c84282db 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -46,6 +46,7 @@
46#include <linux/ptp_clock_kernel.h> 46#include <linux/ptp_clock_kernel.h>
47#include <linux/ptp_classify.h> 47#include <linux/ptp_classify.h>
48#include <linux/mii.h> 48#include <linux/mii.h>
49#include <linux/mdio.h>
49#include "hw.h" 50#include "hw.h"
50 51
51struct e1000_info; 52struct e1000_info;
@@ -61,7 +62,6 @@ struct e1000_info;
61#define e_notice(format, arg...) \ 62#define e_notice(format, arg...) \
62 netdev_notice(adapter->netdev, format, ## arg) 63 netdev_notice(adapter->netdev, format, ## arg)
63 64
64
65/* Interrupt modes, as used by the IntMode parameter */ 65/* Interrupt modes, as used by the IntMode parameter */
66#define E1000E_INT_MODE_LEGACY 0 66#define E1000E_INT_MODE_LEGACY 0
67#define E1000E_INT_MODE_MSI 1 67#define E1000E_INT_MODE_MSI 1
@@ -239,9 +239,8 @@ struct e1000_adapter {
239 u16 tx_itr; 239 u16 tx_itr;
240 u16 rx_itr; 240 u16 rx_itr;
241 241
242 /* Tx */ 242 /* Tx - one ring per active queue */
243 struct e1000_ring *tx_ring /* One per active queue */ 243 struct e1000_ring *tx_ring ____cacheline_aligned_in_smp;
244 ____cacheline_aligned_in_smp;
245 u32 tx_fifo_limit; 244 u32 tx_fifo_limit;
246 245
247 struct napi_struct napi; 246 struct napi_struct napi;
@@ -352,6 +351,8 @@ struct e1000_adapter {
352 struct timecounter tc; 351 struct timecounter tc;
353 struct ptp_clock *ptp_clock; 352 struct ptp_clock *ptp_clock;
354 struct ptp_clock_info ptp_clock_info; 353 struct ptp_clock_info ptp_clock_info;
354
355 u16 eee_advert;
355}; 356};
356 357
357struct e1000_info { 358struct e1000_info {
@@ -487,8 +488,8 @@ extern int e1000e_setup_tx_resources(struct e1000_ring *ring);
487extern void e1000e_free_rx_resources(struct e1000_ring *ring); 488extern void e1000e_free_rx_resources(struct e1000_ring *ring);
488extern void e1000e_free_tx_resources(struct e1000_ring *ring); 489extern void e1000e_free_tx_resources(struct e1000_ring *ring);
489extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, 490extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
490 struct rtnl_link_stats64 491 struct rtnl_link_stats64
491 *stats); 492 *stats);
492extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); 493extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
493extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); 494extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
494extern void e1000e_get_hw_control(struct e1000_adapter *adapter); 495extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
@@ -558,12 +559,14 @@ static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw)
558 return hw->nvm.ops.update(hw); 559 return hw->nvm.ops.update(hw);
559} 560}
560 561
561static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) 562static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words,
563 u16 *data)
562{ 564{
563 return hw->nvm.ops.read(hw, offset, words, data); 565 return hw->nvm.ops.read(hw, offset, words, data);
564} 566}
565 567
566static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) 568static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words,
569 u16 *data)
567{ 570{
568 return hw->nvm.ops.write(hw, offset, words, data); 571 return hw->nvm.ops.write(hw, offset, words, data);
569} 572}
@@ -597,7 +600,7 @@ static inline s32 __ew32_prepare(struct e1000_hw *hw)
597 s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT; 600 s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
598 601
599 while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) 602 while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
600 udelay(50); 603 usleep_range(50, 100);
601 604
602 return i; 605 return i;
603} 606}
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index f91a8f3f9d48..7c8ca658d553 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -35,12 +35,11 @@
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/vmalloc.h> 37#include <linux/vmalloc.h>
38#include <linux/mdio.h>
39#include <linux/pm_runtime.h> 38#include <linux/pm_runtime.h>
40 39
41#include "e1000.h" 40#include "e1000.h"
42 41
43enum {NETDEV_STATS, E1000_STATS}; 42enum { NETDEV_STATS, E1000_STATS };
44 43
45struct e1000_stats { 44struct e1000_stats {
46 char stat_string[ETH_GSTRING_LEN]; 45 char stat_string[ETH_GSTRING_LEN];
@@ -121,6 +120,7 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
121 "Interrupt test (offline)", "Loopback test (offline)", 120 "Interrupt test (offline)", "Loopback test (offline)",
122 "Link test (on/offline)" 121 "Link test (on/offline)"
123}; 122};
123
124#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) 124#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
125 125
126static int e1000_get_settings(struct net_device *netdev, 126static int e1000_get_settings(struct net_device *netdev,
@@ -197,8 +197,7 @@ static int e1000_get_settings(struct net_device *netdev,
197 /* MDI-X => 2; MDI =>1; Invalid =>0 */ 197 /* MDI-X => 2; MDI =>1; Invalid =>0 */
198 if ((hw->phy.media_type == e1000_media_type_copper) && 198 if ((hw->phy.media_type == e1000_media_type_copper) &&
199 netif_carrier_ok(netdev)) 199 netif_carrier_ok(netdev))
200 ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : 200 ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : ETH_TP_MDI;
201 ETH_TP_MDI;
202 else 201 else
203 ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; 202 ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
204 203
@@ -224,8 +223,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
224 223
225 /* Fiber NICs only allow 1000 gbps Full duplex */ 224 /* Fiber NICs only allow 1000 gbps Full duplex */
226 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) && 225 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
227 spd != SPEED_1000 && 226 (spd != SPEED_1000) && (dplx != DUPLEX_FULL)) {
228 dplx != DUPLEX_FULL) {
229 goto err_inval; 227 goto err_inval;
230 } 228 }
231 229
@@ -298,12 +296,10 @@ static int e1000_set_settings(struct net_device *netdev,
298 hw->mac.autoneg = 1; 296 hw->mac.autoneg = 1;
299 if (hw->phy.media_type == e1000_media_type_fiber) 297 if (hw->phy.media_type == e1000_media_type_fiber)
300 hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full | 298 hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
301 ADVERTISED_FIBRE | 299 ADVERTISED_FIBRE | ADVERTISED_Autoneg;
302 ADVERTISED_Autoneg;
303 else 300 else
304 hw->phy.autoneg_advertised = ecmd->advertising | 301 hw->phy.autoneg_advertised = ecmd->advertising |
305 ADVERTISED_TP | 302 ADVERTISED_TP | ADVERTISED_Autoneg;
306 ADVERTISED_Autoneg;
307 ecmd->advertising = hw->phy.autoneg_advertised; 303 ecmd->advertising = hw->phy.autoneg_advertised;
308 if (adapter->fc_autoneg) 304 if (adapter->fc_autoneg)
309 hw->fc.requested_mode = e1000_fc_default; 305 hw->fc.requested_mode = e1000_fc_default;
@@ -346,7 +342,7 @@ static void e1000_get_pauseparam(struct net_device *netdev,
346 struct e1000_hw *hw = &adapter->hw; 342 struct e1000_hw *hw = &adapter->hw;
347 343
348 pause->autoneg = 344 pause->autoneg =
349 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); 345 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
350 346
351 if (hw->fc.current_mode == e1000_fc_rx_pause) { 347 if (hw->fc.current_mode == e1000_fc_rx_pause) {
352 pause->rx_pause = 1; 348 pause->rx_pause = 1;
@@ -435,7 +431,7 @@ static void e1000_get_regs(struct net_device *netdev,
435 memset(p, 0, E1000_REGS_LEN * sizeof(u32)); 431 memset(p, 0, E1000_REGS_LEN * sizeof(u32));
436 432
437 regs->version = (1 << 24) | (adapter->pdev->revision << 16) | 433 regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
438 adapter->pdev->device; 434 adapter->pdev->device;
439 435
440 regs_buff[0] = er32(CTRL); 436 regs_buff[0] = er32(CTRL);
441 regs_buff[1] = er32(STATUS); 437 regs_buff[1] = er32(STATUS);
@@ -503,8 +499,8 @@ static int e1000_get_eeprom(struct net_device *netdev,
503 first_word = eeprom->offset >> 1; 499 first_word = eeprom->offset >> 1;
504 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 500 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
505 501
506 eeprom_buff = kmalloc(sizeof(u16) * 502 eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1),
507 (last_word - first_word + 1), GFP_KERNEL); 503 GFP_KERNEL);
508 if (!eeprom_buff) 504 if (!eeprom_buff)
509 return -ENOMEM; 505 return -ENOMEM;
510 506
@@ -515,7 +511,7 @@ static int e1000_get_eeprom(struct net_device *netdev,
515 } else { 511 } else {
516 for (i = 0; i < last_word - first_word + 1; i++) { 512 for (i = 0; i < last_word - first_word + 1; i++) {
517 ret_val = e1000_read_nvm(hw, first_word + i, 1, 513 ret_val = e1000_read_nvm(hw, first_word + i, 1,
518 &eeprom_buff[i]); 514 &eeprom_buff[i]);
519 if (ret_val) 515 if (ret_val)
520 break; 516 break;
521 } 517 }
@@ -553,7 +549,8 @@ static int e1000_set_eeprom(struct net_device *netdev,
553 if (eeprom->len == 0) 549 if (eeprom->len == 0)
554 return -EOPNOTSUPP; 550 return -EOPNOTSUPP;
555 551
556 if (eeprom->magic != (adapter->pdev->vendor | (adapter->pdev->device << 16))) 552 if (eeprom->magic !=
553 (adapter->pdev->vendor | (adapter->pdev->device << 16)))
557 return -EFAULT; 554 return -EFAULT;
558 555
559 if (adapter->flags & FLAG_READ_ONLY_NVM) 556 if (adapter->flags & FLAG_READ_ONLY_NVM)
@@ -579,7 +576,7 @@ static int e1000_set_eeprom(struct net_device *netdev,
579 /* need read/modify/write of last changed EEPROM word */ 576 /* need read/modify/write of last changed EEPROM word */
580 /* only the first byte of the word is being modified */ 577 /* only the first byte of the word is being modified */
581 ret_val = e1000_read_nvm(hw, last_word, 1, 578 ret_val = e1000_read_nvm(hw, last_word, 1,
582 &eeprom_buff[last_word - first_word]); 579 &eeprom_buff[last_word - first_word]);
583 580
584 if (ret_val) 581 if (ret_val)
585 goto out; 582 goto out;
@@ -618,8 +615,7 @@ static void e1000_get_drvinfo(struct net_device *netdev,
618{ 615{
619 struct e1000_adapter *adapter = netdev_priv(netdev); 616 struct e1000_adapter *adapter = netdev_priv(netdev);
620 617
621 strlcpy(drvinfo->driver, e1000e_driver_name, 618 strlcpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver));
622 sizeof(drvinfo->driver));
623 strlcpy(drvinfo->version, e1000e_driver_version, 619 strlcpy(drvinfo->version, e1000e_driver_version,
624 sizeof(drvinfo->version)); 620 sizeof(drvinfo->version));
625 621
@@ -627,10 +623,10 @@ static void e1000_get_drvinfo(struct net_device *netdev,
627 * PCI-E controllers 623 * PCI-E controllers
628 */ 624 */
629 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 625 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
630 "%d.%d-%d", 626 "%d.%d-%d",
631 (adapter->eeprom_vers & 0xF000) >> 12, 627 (adapter->eeprom_vers & 0xF000) >> 12,
632 (adapter->eeprom_vers & 0x0FF0) >> 4, 628 (adapter->eeprom_vers & 0x0FF0) >> 4,
633 (adapter->eeprom_vers & 0x000F)); 629 (adapter->eeprom_vers & 0x000F));
634 630
635 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 631 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
636 sizeof(drvinfo->bus_info)); 632 sizeof(drvinfo->bus_info));
@@ -756,7 +752,8 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
756{ 752{
757 u32 pat, val; 753 u32 pat, val;
758 static const u32 test[] = { 754 static const u32 test[] = {
759 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 755 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
756 };
760 for (pat = 0; pat < ARRAY_SIZE(test); pat++) { 757 for (pat = 0; pat < ARRAY_SIZE(test); pat++) {
761 E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset, 758 E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset,
762 (test[pat] & write)); 759 (test[pat] & write));
@@ -786,6 +783,7 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
786 } 783 }
787 return 0; 784 return 0;
788} 785}
786
789#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \ 787#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \
790 do { \ 788 do { \
791 if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \ 789 if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \
@@ -813,16 +811,16 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
813 u32 wlock_mac = 0; 811 u32 wlock_mac = 0;
814 812
815 /* The status register is Read Only, so a write should fail. 813 /* The status register is Read Only, so a write should fail.
816 * Some bits that get toggled are ignored. 814 * Some bits that get toggled are ignored. There are several bits
815 * on newer hardware that are r/w.
817 */ 816 */
818 switch (mac->type) { 817 switch (mac->type) {
819 /* there are several bits on newer hardware that are r/w */
820 case e1000_82571: 818 case e1000_82571:
821 case e1000_82572: 819 case e1000_82572:
822 case e1000_80003es2lan: 820 case e1000_80003es2lan:
823 toggle = 0x7FFFF3FF; 821 toggle = 0x7FFFF3FF;
824 break; 822 break;
825 default: 823 default:
826 toggle = 0x7FFFF033; 824 toggle = 0x7FFFF033;
827 break; 825 break;
828 } 826 }
@@ -928,7 +926,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
928 } 926 }
929 927
930 /* If Checksum is not Correct return error else test passed */ 928 /* If Checksum is not Correct return error else test passed */
931 if ((checksum != (u16) NVM_SUM) && !(*data)) 929 if ((checksum != (u16)NVM_SUM) && !(*data))
932 *data = 2; 930 *data = 2;
933 931
934 return *data; 932 return *data;
@@ -936,7 +934,7 @@ static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
936 934
937static irqreturn_t e1000_test_intr(int __always_unused irq, void *data) 935static irqreturn_t e1000_test_intr(int __always_unused irq, void *data)
938{ 936{
939 struct net_device *netdev = (struct net_device *) data; 937 struct net_device *netdev = (struct net_device *)data;
940 struct e1000_adapter *adapter = netdev_priv(netdev); 938 struct e1000_adapter *adapter = netdev_priv(netdev);
941 struct e1000_hw *hw = &adapter->hw; 939 struct e1000_hw *hw = &adapter->hw;
942 940
@@ -969,8 +967,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
969 if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, 967 if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
970 netdev)) { 968 netdev)) {
971 shared_int = 0; 969 shared_int = 0;
972 } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, 970 } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, netdev->name,
973 netdev->name, netdev)) { 971 netdev)) {
974 *data = 1; 972 *data = 1;
975 ret_val = -1; 973 ret_val = -1;
976 goto out; 974 goto out;
@@ -1080,28 +1078,33 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
1080 struct e1000_ring *tx_ring = &adapter->test_tx_ring; 1078 struct e1000_ring *tx_ring = &adapter->test_tx_ring;
1081 struct e1000_ring *rx_ring = &adapter->test_rx_ring; 1079 struct e1000_ring *rx_ring = &adapter->test_rx_ring;
1082 struct pci_dev *pdev = adapter->pdev; 1080 struct pci_dev *pdev = adapter->pdev;
1081 struct e1000_buffer *buffer_info;
1083 int i; 1082 int i;
1084 1083
1085 if (tx_ring->desc && tx_ring->buffer_info) { 1084 if (tx_ring->desc && tx_ring->buffer_info) {
1086 for (i = 0; i < tx_ring->count; i++) { 1085 for (i = 0; i < tx_ring->count; i++) {
1087 if (tx_ring->buffer_info[i].dma) 1086 buffer_info = &tx_ring->buffer_info[i];
1087
1088 if (buffer_info->dma)
1088 dma_unmap_single(&pdev->dev, 1089 dma_unmap_single(&pdev->dev,
1089 tx_ring->buffer_info[i].dma, 1090 buffer_info->dma,
1090 tx_ring->buffer_info[i].length, 1091 buffer_info->length,
1091 DMA_TO_DEVICE); 1092 DMA_TO_DEVICE);
1092 if (tx_ring->buffer_info[i].skb) 1093 if (buffer_info->skb)
1093 dev_kfree_skb(tx_ring->buffer_info[i].skb); 1094 dev_kfree_skb(buffer_info->skb);
1094 } 1095 }
1095 } 1096 }
1096 1097
1097 if (rx_ring->desc && rx_ring->buffer_info) { 1098 if (rx_ring->desc && rx_ring->buffer_info) {
1098 for (i = 0; i < rx_ring->count; i++) { 1099 for (i = 0; i < rx_ring->count; i++) {
1099 if (rx_ring->buffer_info[i].dma) 1100 buffer_info = &rx_ring->buffer_info[i];
1101
1102 if (buffer_info->dma)
1100 dma_unmap_single(&pdev->dev, 1103 dma_unmap_single(&pdev->dev,
1101 rx_ring->buffer_info[i].dma, 1104 buffer_info->dma,
1102 2048, DMA_FROM_DEVICE); 1105 2048, DMA_FROM_DEVICE);
1103 if (rx_ring->buffer_info[i].skb) 1106 if (buffer_info->skb)
1104 dev_kfree_skb(rx_ring->buffer_info[i].skb); 1107 dev_kfree_skb(buffer_info->skb);
1105 } 1108 }
1106 } 1109 }
1107 1110
@@ -1138,8 +1141,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1138 tx_ring->count = E1000_DEFAULT_TXD; 1141 tx_ring->count = E1000_DEFAULT_TXD;
1139 1142
1140 tx_ring->buffer_info = kcalloc(tx_ring->count, 1143 tx_ring->buffer_info = kcalloc(tx_ring->count,
1141 sizeof(struct e1000_buffer), 1144 sizeof(struct e1000_buffer), GFP_KERNEL);
1142 GFP_KERNEL);
1143 if (!tx_ring->buffer_info) { 1145 if (!tx_ring->buffer_info) {
1144 ret_val = 1; 1146 ret_val = 1;
1145 goto err_nomem; 1147 goto err_nomem;
@@ -1156,8 +1158,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1156 tx_ring->next_to_use = 0; 1158 tx_ring->next_to_use = 0;
1157 tx_ring->next_to_clean = 0; 1159 tx_ring->next_to_clean = 0;
1158 1160
1159 ew32(TDBAL(0), ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); 1161 ew32(TDBAL(0), ((u64)tx_ring->dma & 0x00000000FFFFFFFF));
1160 ew32(TDBAH(0), ((u64) tx_ring->dma >> 32)); 1162 ew32(TDBAH(0), ((u64)tx_ring->dma >> 32));
1161 ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc)); 1163 ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc));
1162 ew32(TDH(0), 0); 1164 ew32(TDH(0), 0);
1163 ew32(TDT(0), 0); 1165 ew32(TDT(0), 0);
@@ -1179,8 +1181,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1179 tx_ring->buffer_info[i].skb = skb; 1181 tx_ring->buffer_info[i].skb = skb;
1180 tx_ring->buffer_info[i].length = skb->len; 1182 tx_ring->buffer_info[i].length = skb->len;
1181 tx_ring->buffer_info[i].dma = 1183 tx_ring->buffer_info[i].dma =
1182 dma_map_single(&pdev->dev, skb->data, skb->len, 1184 dma_map_single(&pdev->dev, skb->data, skb->len,
1183 DMA_TO_DEVICE); 1185 DMA_TO_DEVICE);
1184 if (dma_mapping_error(&pdev->dev, 1186 if (dma_mapping_error(&pdev->dev,
1185 tx_ring->buffer_info[i].dma)) { 1187 tx_ring->buffer_info[i].dma)) {
1186 ret_val = 4; 1188 ret_val = 4;
@@ -1200,8 +1202,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1200 rx_ring->count = E1000_DEFAULT_RXD; 1202 rx_ring->count = E1000_DEFAULT_RXD;
1201 1203
1202 rx_ring->buffer_info = kcalloc(rx_ring->count, 1204 rx_ring->buffer_info = kcalloc(rx_ring->count,
1203 sizeof(struct e1000_buffer), 1205 sizeof(struct e1000_buffer), GFP_KERNEL);
1204 GFP_KERNEL);
1205 if (!rx_ring->buffer_info) { 1206 if (!rx_ring->buffer_info) {
1206 ret_val = 5; 1207 ret_val = 5;
1207 goto err_nomem; 1208 goto err_nomem;
@@ -1220,16 +1221,16 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1220 rctl = er32(RCTL); 1221 rctl = er32(RCTL);
1221 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) 1222 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
1222 ew32(RCTL, rctl & ~E1000_RCTL_EN); 1223 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1223 ew32(RDBAL(0), ((u64) rx_ring->dma & 0xFFFFFFFF)); 1224 ew32(RDBAL(0), ((u64)rx_ring->dma & 0xFFFFFFFF));
1224 ew32(RDBAH(0), ((u64) rx_ring->dma >> 32)); 1225 ew32(RDBAH(0), ((u64)rx_ring->dma >> 32));
1225 ew32(RDLEN(0), rx_ring->size); 1226 ew32(RDLEN(0), rx_ring->size);
1226 ew32(RDH(0), 0); 1227 ew32(RDH(0), 0);
1227 ew32(RDT(0), 0); 1228 ew32(RDT(0), 0);
1228 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | 1229 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
1229 E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE | 1230 E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
1230 E1000_RCTL_SBP | E1000_RCTL_SECRC | 1231 E1000_RCTL_SBP | E1000_RCTL_SECRC |
1231 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | 1232 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1232 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 1233 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1233 ew32(RCTL, rctl); 1234 ew32(RCTL, rctl);
1234 1235
1235 for (i = 0; i < rx_ring->count; i++) { 1236 for (i = 0; i < rx_ring->count; i++) {
@@ -1244,8 +1245,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1244 skb_reserve(skb, NET_IP_ALIGN); 1245 skb_reserve(skb, NET_IP_ALIGN);
1245 rx_ring->buffer_info[i].skb = skb; 1246 rx_ring->buffer_info[i].skb = skb;
1246 rx_ring->buffer_info[i].dma = 1247 rx_ring->buffer_info[i].dma =
1247 dma_map_single(&pdev->dev, skb->data, 2048, 1248 dma_map_single(&pdev->dev, skb->data, 2048,
1248 DMA_FROM_DEVICE); 1249 DMA_FROM_DEVICE);
1249 if (dma_mapping_error(&pdev->dev, 1250 if (dma_mapping_error(&pdev->dev,
1250 rx_ring->buffer_info[i].dma)) { 1251 rx_ring->buffer_info[i].dma)) {
1251 ret_val = 8; 1252 ret_val = 8;
@@ -1296,7 +1297,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1296 1297
1297 ew32(CTRL, ctrl_reg); 1298 ew32(CTRL, ctrl_reg);
1298 e1e_flush(); 1299 e1e_flush();
1299 udelay(500); 1300 usleep_range(500, 1000);
1300 1301
1301 return 0; 1302 return 0;
1302 } 1303 }
@@ -1322,7 +1323,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1322 e1e_wphy(hw, PHY_REG(2, 21), phy_reg); 1323 e1e_wphy(hw, PHY_REG(2, 21), phy_reg);
1323 /* Assert SW reset for above settings to take effect */ 1324 /* Assert SW reset for above settings to take effect */
1324 hw->phy.ops.commit(hw); 1325 hw->phy.ops.commit(hw);
1325 mdelay(1); 1326 usleep_range(1000, 2000);
1326 /* Force Full Duplex */ 1327 /* Force Full Duplex */
1327 e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); 1328 e1e_rphy(hw, PHY_REG(769, 16), &phy_reg);
1328 e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C); 1329 e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C);
@@ -1363,7 +1364,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1363 1364
1364 /* force 1000, set loopback */ 1365 /* force 1000, set loopback */
1365 e1e_wphy(hw, MII_BMCR, 0x4140); 1366 e1e_wphy(hw, MII_BMCR, 0x4140);
1366 mdelay(250); 1367 msleep(250);
1367 1368
1368 /* Now set up the MAC to the same speed/duplex as the PHY. */ 1369 /* Now set up the MAC to the same speed/duplex as the PHY. */
1369 ctrl_reg = er32(CTRL); 1370 ctrl_reg = er32(CTRL);
@@ -1395,7 +1396,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1395 if (hw->phy.type == e1000_phy_m88) 1396 if (hw->phy.type == e1000_phy_m88)
1396 e1000_phy_disable_receiver(adapter); 1397 e1000_phy_disable_receiver(adapter);
1397 1398
1398 udelay(500); 1399 usleep_range(500, 1000);
1399 1400
1400 return 0; 1401 return 0;
1401} 1402}
@@ -1431,8 +1432,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
1431 /* special write to serdes control register to enable SerDes analog 1432 /* special write to serdes control register to enable SerDes analog
1432 * loopback 1433 * loopback
1433 */ 1434 */
1434#define E1000_SERDES_LB_ON 0x410 1435 ew32(SCTL, E1000_SCTL_ENABLE_SERDES_LOOPBACK);
1435 ew32(SCTL, E1000_SERDES_LB_ON);
1436 e1e_flush(); 1436 e1e_flush();
1437 usleep_range(10000, 20000); 1437 usleep_range(10000, 20000);
1438 1438
@@ -1526,8 +1526,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
1526 case e1000_82572: 1526 case e1000_82572:
1527 if (hw->phy.media_type == e1000_media_type_fiber || 1527 if (hw->phy.media_type == e1000_media_type_fiber ||
1528 hw->phy.media_type == e1000_media_type_internal_serdes) { 1528 hw->phy.media_type == e1000_media_type_internal_serdes) {
1529#define E1000_SERDES_LB_OFF 0x400 1529 ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1530 ew32(SCTL, E1000_SERDES_LB_OFF);
1531 e1e_flush(); 1530 e1e_flush();
1532 usleep_range(10000, 20000); 1531 usleep_range(10000, 20000);
1533 break; 1532 break;
@@ -1564,7 +1563,7 @@ static int e1000_check_lbtest_frame(struct sk_buff *skb,
1564 frame_size &= ~1; 1563 frame_size &= ~1;
1565 if (*(skb->data + 3) == 0xFF) 1564 if (*(skb->data + 3) == 0xFF)
1566 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && 1565 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
1567 (*(skb->data + frame_size / 2 + 12) == 0xAF)) 1566 (*(skb->data + frame_size / 2 + 12) == 0xAF))
1568 return 0; 1567 return 0;
1569 return 13; 1568 return 13;
1570} 1569}
@@ -1575,6 +1574,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1575 struct e1000_ring *rx_ring = &adapter->test_rx_ring; 1574 struct e1000_ring *rx_ring = &adapter->test_rx_ring;
1576 struct pci_dev *pdev = adapter->pdev; 1575 struct pci_dev *pdev = adapter->pdev;
1577 struct e1000_hw *hw = &adapter->hw; 1576 struct e1000_hw *hw = &adapter->hw;
1577 struct e1000_buffer *buffer_info;
1578 int i, j, k, l; 1578 int i, j, k, l;
1579 int lc; 1579 int lc;
1580 int good_cnt; 1580 int good_cnt;
@@ -1595,14 +1595,17 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1595 1595
1596 k = 0; 1596 k = 0;
1597 l = 0; 1597 l = 0;
1598 for (j = 0; j <= lc; j++) { /* loop count loop */ 1598 /* loop count loop */
1599 for (i = 0; i < 64; i++) { /* send the packets */ 1599 for (j = 0; j <= lc; j++) {
1600 e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb, 1600 /* send the packets */
1601 1024); 1601 for (i = 0; i < 64; i++) {
1602 buffer_info = &tx_ring->buffer_info[k];
1603
1604 e1000_create_lbtest_frame(buffer_info->skb, 1024);
1602 dma_sync_single_for_device(&pdev->dev, 1605 dma_sync_single_for_device(&pdev->dev,
1603 tx_ring->buffer_info[k].dma, 1606 buffer_info->dma,
1604 tx_ring->buffer_info[k].length, 1607 buffer_info->length,
1605 DMA_TO_DEVICE); 1608 DMA_TO_DEVICE);
1606 k++; 1609 k++;
1607 if (k == tx_ring->count) 1610 if (k == tx_ring->count)
1608 k = 0; 1611 k = 0;
@@ -1612,13 +1615,16 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1612 msleep(200); 1615 msleep(200);
1613 time = jiffies; /* set the start time for the receive */ 1616 time = jiffies; /* set the start time for the receive */
1614 good_cnt = 0; 1617 good_cnt = 0;
1615 do { /* receive the sent packets */ 1618 /* receive the sent packets */
1619 do {
1620 buffer_info = &rx_ring->buffer_info[l];
1621
1616 dma_sync_single_for_cpu(&pdev->dev, 1622 dma_sync_single_for_cpu(&pdev->dev,
1617 rx_ring->buffer_info[l].dma, 2048, 1623 buffer_info->dma, 2048,
1618 DMA_FROM_DEVICE); 1624 DMA_FROM_DEVICE);
1619 1625
1620 ret_val = e1000_check_lbtest_frame( 1626 ret_val = e1000_check_lbtest_frame(buffer_info->skb,
1621 rx_ring->buffer_info[l].skb, 1024); 1627 1024);
1622 if (!ret_val) 1628 if (!ret_val)
1623 good_cnt++; 1629 good_cnt++;
1624 l++; 1630 l++;
@@ -1637,7 +1643,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1637 ret_val = 14; /* error code for time out error */ 1643 ret_val = 14; /* error code for time out error */
1638 break; 1644 break;
1639 } 1645 }
1640 } /* end loop count loop */ 1646 }
1641 return ret_val; 1647 return ret_val;
1642} 1648}
1643 1649
@@ -1696,7 +1702,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
1696 /* On some Phy/switch combinations, link establishment 1702 /* On some Phy/switch combinations, link establishment
1697 * can take a few seconds more than expected. 1703 * can take a few seconds more than expected.
1698 */ 1704 */
1699 msleep(5000); 1705 msleep_interruptible(5000);
1700 1706
1701 if (!(er32(STATUS) & E1000_STATUS_LU)) 1707 if (!(er32(STATUS) & E1000_STATUS_LU))
1702 *data = 1; 1708 *data = 1;
@@ -1980,12 +1986,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
1980 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 1986 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1981 switch (e1000_gstrings_stats[i].type) { 1987 switch (e1000_gstrings_stats[i].type) {
1982 case NETDEV_STATS: 1988 case NETDEV_STATS:
1983 p = (char *) &net_stats + 1989 p = (char *)&net_stats +
1984 e1000_gstrings_stats[i].stat_offset; 1990 e1000_gstrings_stats[i].stat_offset;
1985 break; 1991 break;
1986 case E1000_STATS: 1992 case E1000_STATS:
1987 p = (char *) adapter + 1993 p = (char *)adapter +
1988 e1000_gstrings_stats[i].stat_offset; 1994 e1000_gstrings_stats[i].stat_offset;
1989 break; 1995 break;
1990 default: 1996 default:
1991 data[i] = 0; 1997 data[i] = 0;
@@ -1993,7 +1999,7 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
1993 } 1999 }
1994 2000
1995 data[i] = (e1000_gstrings_stats[i].sizeof_stat == 2001 data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
1996 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 2002 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1997 } 2003 }
1998} 2004}
1999 2005
@@ -2069,23 +2075,20 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
2069{ 2075{
2070 struct e1000_adapter *adapter = netdev_priv(netdev); 2076 struct e1000_adapter *adapter = netdev_priv(netdev);
2071 struct e1000_hw *hw = &adapter->hw; 2077 struct e1000_hw *hw = &adapter->hw;
2072 u16 cap_addr, adv_addr, lpa_addr, pcs_stat_addr, phy_data, lpi_ctrl; 2078 u16 cap_addr, lpa_addr, pcs_stat_addr, phy_data;
2073 u32 status, ret_val; 2079 u32 ret_val;
2074 2080
2075 if (!(adapter->flags & FLAG_IS_ICH) || 2081 if (!(adapter->flags2 & FLAG2_HAS_EEE))
2076 !(adapter->flags2 & FLAG2_HAS_EEE))
2077 return -EOPNOTSUPP; 2082 return -EOPNOTSUPP;
2078 2083
2079 switch (hw->phy.type) { 2084 switch (hw->phy.type) {
2080 case e1000_phy_82579: 2085 case e1000_phy_82579:
2081 cap_addr = I82579_EEE_CAPABILITY; 2086 cap_addr = I82579_EEE_CAPABILITY;
2082 adv_addr = I82579_EEE_ADVERTISEMENT;
2083 lpa_addr = I82579_EEE_LP_ABILITY; 2087 lpa_addr = I82579_EEE_LP_ABILITY;
2084 pcs_stat_addr = I82579_EEE_PCS_STATUS; 2088 pcs_stat_addr = I82579_EEE_PCS_STATUS;
2085 break; 2089 break;
2086 case e1000_phy_i217: 2090 case e1000_phy_i217:
2087 cap_addr = I217_EEE_CAPABILITY; 2091 cap_addr = I217_EEE_CAPABILITY;
2088 adv_addr = I217_EEE_ADVERTISEMENT;
2089 lpa_addr = I217_EEE_LP_ABILITY; 2092 lpa_addr = I217_EEE_LP_ABILITY;
2090 pcs_stat_addr = I217_EEE_PCS_STATUS; 2093 pcs_stat_addr = I217_EEE_PCS_STATUS;
2091 break; 2094 break;
@@ -2104,10 +2107,7 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
2104 edata->supported = mmd_eee_cap_to_ethtool_sup_t(phy_data); 2107 edata->supported = mmd_eee_cap_to_ethtool_sup_t(phy_data);
2105 2108
2106 /* EEE Advertised */ 2109 /* EEE Advertised */
2107 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &phy_data); 2110 edata->advertised = mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert);
2108 if (ret_val)
2109 goto release;
2110 edata->advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data);
2111 2111
2112 /* EEE Link Partner Advertised */ 2112 /* EEE Link Partner Advertised */
2113 ret_val = e1000_read_emi_reg_locked(hw, lpa_addr, &phy_data); 2113 ret_val = e1000_read_emi_reg_locked(hw, lpa_addr, &phy_data);
@@ -2125,25 +2125,11 @@ release:
2125 if (ret_val) 2125 if (ret_val)
2126 return -ENODATA; 2126 return -ENODATA;
2127 2127
2128 e1e_rphy(hw, I82579_LPI_CTRL, &lpi_ctrl);
2129 status = er32(STATUS);
2130
2131 /* Result of the EEE auto negotiation - there is no register that 2128 /* Result of the EEE auto negotiation - there is no register that
2132 * has the status of the EEE negotiation so do a best-guess based 2129 * has the status of the EEE negotiation so do a best-guess based
2133 * on whether both Tx and Rx LPI indications have been received or 2130 * on whether Tx or Rx LPI indications have been received.
2134 * base it on the link speed, the EEE advertised speeds on both ends
2135 * and the speeds on which EEE is enabled locally.
2136 */ 2131 */
2137 if (((phy_data & E1000_EEE_TX_LPI_RCVD) && 2132 if (phy_data & (E1000_EEE_TX_LPI_RCVD | E1000_EEE_RX_LPI_RCVD))
2138 (phy_data & E1000_EEE_RX_LPI_RCVD)) ||
2139 ((status & E1000_STATUS_SPEED_100) &&
2140 (edata->advertised & ADVERTISED_100baseT_Full) &&
2141 (edata->lp_advertised & ADVERTISED_100baseT_Full) &&
2142 (lpi_ctrl & I82579_LPI_CTRL_100_ENABLE)) ||
2143 ((status & E1000_STATUS_SPEED_1000) &&
2144 (edata->advertised & ADVERTISED_1000baseT_Full) &&
2145 (edata->lp_advertised & ADVERTISED_1000baseT_Full) &&
2146 (lpi_ctrl & I82579_LPI_CTRL_1000_ENABLE)))
2147 edata->eee_active = true; 2133 edata->eee_active = true;
2148 2134
2149 edata->eee_enabled = !hw->dev_spec.ich8lan.eee_disable; 2135 edata->eee_enabled = !hw->dev_spec.ich8lan.eee_disable;
@@ -2160,19 +2146,10 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
2160 struct ethtool_eee eee_curr; 2146 struct ethtool_eee eee_curr;
2161 s32 ret_val; 2147 s32 ret_val;
2162 2148
2163 if (!(adapter->flags & FLAG_IS_ICH) ||
2164 !(adapter->flags2 & FLAG2_HAS_EEE))
2165 return -EOPNOTSUPP;
2166
2167 ret_val = e1000e_get_eee(netdev, &eee_curr); 2149 ret_val = e1000e_get_eee(netdev, &eee_curr);
2168 if (ret_val) 2150 if (ret_val)
2169 return ret_val; 2151 return ret_val;
2170 2152
2171 if (eee_curr.advertised != edata->advertised) {
2172 e_err("Setting EEE advertisement is not supported\n");
2173 return -EINVAL;
2174 }
2175
2176 if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { 2153 if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) {
2177 e_err("Setting EEE tx-lpi is not supported\n"); 2154 e_err("Setting EEE tx-lpi is not supported\n");
2178 return -EINVAL; 2155 return -EINVAL;
@@ -2183,16 +2160,21 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
2183 return -EINVAL; 2160 return -EINVAL;
2184 } 2161 }
2185 2162
2186 if (hw->dev_spec.ich8lan.eee_disable != !edata->eee_enabled) { 2163 if (edata->advertised & ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) {
2187 hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled; 2164 e_err("EEE advertisement supports only 100TX and/or 1000T full-duplex\n");
2188 2165 return -EINVAL;
2189 /* reset the link */
2190 if (netif_running(netdev))
2191 e1000e_reinit_locked(adapter);
2192 else
2193 e1000e_reset(adapter);
2194 } 2166 }
2195 2167
2168 adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised);
2169
2170 hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
2171
2172 /* reset the link */
2173 if (netif_running(netdev))
2174 e1000e_reinit_locked(adapter);
2175 else
2176 e1000e_reset(adapter);
2177
2196 return 0; 2178 return 0;
2197} 2179}
2198 2180
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 1e6b889aee87..84850f7a23e4 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -167,7 +167,7 @@ enum e1000_1000t_rx_status {
167 e1000_1000t_rx_status_undefined = 0xFF 167 e1000_1000t_rx_status_undefined = 0xFF
168}; 168};
169 169
170enum e1000_rev_polarity{ 170enum e1000_rev_polarity {
171 e1000_rev_polarity_normal = 0, 171 e1000_rev_polarity_normal = 0,
172 e1000_rev_polarity_reversed, 172 e1000_rev_polarity_reversed,
173 e1000_rev_polarity_undefined = 0xFF 173 e1000_rev_polarity_undefined = 0xFF
@@ -545,7 +545,7 @@ struct e1000_mac_info {
545 u16 mta_reg_count; 545 u16 mta_reg_count;
546 546
547 /* Maximum size of the MTA register table in all supported adapters */ 547 /* Maximum size of the MTA register table in all supported adapters */
548 #define MAX_MTA_REG 128 548#define MAX_MTA_REG 128
549 u32 mta_shadow[MAX_MTA_REG]; 549 u32 mta_shadow[MAX_MTA_REG];
550 u16 rar_entry_count; 550 u16 rar_entry_count;
551 551
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 121a865c7fbd..ad9d8f2dd868 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -61,15 +61,15 @@
61/* Offset 04h HSFSTS */ 61/* Offset 04h HSFSTS */
62union ich8_hws_flash_status { 62union ich8_hws_flash_status {
63 struct ich8_hsfsts { 63 struct ich8_hsfsts {
64 u16 flcdone :1; /* bit 0 Flash Cycle Done */ 64 u16 flcdone:1; /* bit 0 Flash Cycle Done */
65 u16 flcerr :1; /* bit 1 Flash Cycle Error */ 65 u16 flcerr:1; /* bit 1 Flash Cycle Error */
66 u16 dael :1; /* bit 2 Direct Access error Log */ 66 u16 dael:1; /* bit 2 Direct Access error Log */
67 u16 berasesz :2; /* bit 4:3 Sector Erase Size */ 67 u16 berasesz:2; /* bit 4:3 Sector Erase Size */
68 u16 flcinprog :1; /* bit 5 flash cycle in Progress */ 68 u16 flcinprog:1; /* bit 5 flash cycle in Progress */
69 u16 reserved1 :2; /* bit 13:6 Reserved */ 69 u16 reserved1:2; /* bit 13:6 Reserved */
70 u16 reserved2 :6; /* bit 13:6 Reserved */ 70 u16 reserved2:6; /* bit 13:6 Reserved */
71 u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */ 71 u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */
72 u16 flockdn :1; /* bit 15 Flash Config Lock-Down */ 72 u16 flockdn:1; /* bit 15 Flash Config Lock-Down */
73 } hsf_status; 73 } hsf_status;
74 u16 regval; 74 u16 regval;
75}; 75};
@@ -78,11 +78,11 @@ union ich8_hws_flash_status {
78/* Offset 06h FLCTL */ 78/* Offset 06h FLCTL */
79union ich8_hws_flash_ctrl { 79union ich8_hws_flash_ctrl {
80 struct ich8_hsflctl { 80 struct ich8_hsflctl {
81 u16 flcgo :1; /* 0 Flash Cycle Go */ 81 u16 flcgo:1; /* 0 Flash Cycle Go */
82 u16 flcycle :2; /* 2:1 Flash Cycle */ 82 u16 flcycle:2; /* 2:1 Flash Cycle */
83 u16 reserved :5; /* 7:3 Reserved */ 83 u16 reserved:5; /* 7:3 Reserved */
84 u16 fldbcount :2; /* 9:8 Flash Data Byte Count */ 84 u16 fldbcount:2; /* 9:8 Flash Data Byte Count */
85 u16 flockdn :6; /* 15:10 Reserved */ 85 u16 flockdn:6; /* 15:10 Reserved */
86 } hsf_ctrl; 86 } hsf_ctrl;
87 u16 regval; 87 u16 regval;
88}; 88};
@@ -90,10 +90,10 @@ union ich8_hws_flash_ctrl {
90/* ICH Flash Region Access Permissions */ 90/* ICH Flash Region Access Permissions */
91union ich8_hws_flash_regacc { 91union ich8_hws_flash_regacc {
92 struct ich8_flracc { 92 struct ich8_flracc {
93 u32 grra :8; /* 0:7 GbE region Read Access */ 93 u32 grra:8; /* 0:7 GbE region Read Access */
94 u32 grwa :8; /* 8:15 GbE region Write Access */ 94 u32 grwa:8; /* 8:15 GbE region Write Access */
95 u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */ 95 u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */
96 u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */ 96 u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */
97 } hsf_flregacc; 97 } hsf_flregacc;
98 u16 regval; 98 u16 regval;
99}; 99};
@@ -142,6 +142,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
142static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); 142static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
143static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); 143static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
144static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); 144static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
145static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw);
145 146
146static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) 147static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
147{ 148{
@@ -312,7 +313,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
312 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE; 313 mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
313 ew32(CTRL, mac_reg); 314 ew32(CTRL, mac_reg);
314 e1e_flush(); 315 e1e_flush();
315 udelay(10); 316 usleep_range(10, 20);
316 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE; 317 mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
317 ew32(CTRL, mac_reg); 318 ew32(CTRL, mac_reg);
318 e1e_flush(); 319 e1e_flush();
@@ -548,8 +549,8 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
548 /* find total size of the NVM, then cut in half since the total 549 /* find total size of the NVM, then cut in half since the total
549 * size represents two separate NVM banks. 550 * size represents two separate NVM banks.
550 */ 551 */
551 nvm->flash_bank_size = (sector_end_addr - sector_base_addr) 552 nvm->flash_bank_size = ((sector_end_addr - sector_base_addr)
552 << FLASH_SECTOR_ADDR_SHIFT; 553 << FLASH_SECTOR_ADDR_SHIFT);
553 nvm->flash_bank_size /= 2; 554 nvm->flash_bank_size /= 2;
554 /* Adjust to word count */ 555 /* Adjust to word count */
555 nvm->flash_bank_size /= sizeof(u16); 556 nvm->flash_bank_size /= sizeof(u16);
@@ -636,6 +637,8 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
636 if (mac->type == e1000_pch_lpt) { 637 if (mac->type == e1000_pch_lpt) {
637 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; 638 mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
638 mac->ops.rar_set = e1000_rar_set_pch_lpt; 639 mac->ops.rar_set = e1000_rar_set_pch_lpt;
640 mac->ops.setup_physical_interface =
641 e1000_setup_copper_link_pch_lpt;
639 } 642 }
640 643
641 /* Enable PCS Lock-loss workaround for ICH8 */ 644 /* Enable PCS Lock-loss workaround for ICH8 */
@@ -692,7 +695,7 @@ s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data)
692 * 695 *
693 * Assumes the SW/FW/HW Semaphore is already acquired. 696 * Assumes the SW/FW/HW Semaphore is already acquired.
694 **/ 697 **/
695static s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data) 698s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data)
696{ 699{
697 return __e1000_access_emi_reg_locked(hw, addr, &data, false); 700 return __e1000_access_emi_reg_locked(hw, addr, &data, false);
698} 701}
@@ -709,11 +712,22 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
709{ 712{
710 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 713 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
711 s32 ret_val; 714 s32 ret_val;
712 u16 lpi_ctrl; 715 u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data;
713 716
714 if ((hw->phy.type != e1000_phy_82579) && 717 switch (hw->phy.type) {
715 (hw->phy.type != e1000_phy_i217)) 718 case e1000_phy_82579:
719 lpa = I82579_EEE_LP_ABILITY;
720 pcs_status = I82579_EEE_PCS_STATUS;
721 adv_addr = I82579_EEE_ADVERTISEMENT;
722 break;
723 case e1000_phy_i217:
724 lpa = I217_EEE_LP_ABILITY;
725 pcs_status = I217_EEE_PCS_STATUS;
726 adv_addr = I217_EEE_ADVERTISEMENT;
727 break;
728 default:
716 return 0; 729 return 0;
730 }
717 731
718 ret_val = hw->phy.ops.acquire(hw); 732 ret_val = hw->phy.ops.acquire(hw);
719 if (ret_val) 733 if (ret_val)
@@ -728,34 +742,24 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
728 742
729 /* Enable EEE if not disabled by user */ 743 /* Enable EEE if not disabled by user */
730 if (!dev_spec->eee_disable) { 744 if (!dev_spec->eee_disable) {
731 u16 lpa, pcs_status, data;
732
733 /* Save off link partner's EEE ability */ 745 /* Save off link partner's EEE ability */
734 switch (hw->phy.type) {
735 case e1000_phy_82579:
736 lpa = I82579_EEE_LP_ABILITY;
737 pcs_status = I82579_EEE_PCS_STATUS;
738 break;
739 case e1000_phy_i217:
740 lpa = I217_EEE_LP_ABILITY;
741 pcs_status = I217_EEE_PCS_STATUS;
742 break;
743 default:
744 ret_val = -E1000_ERR_PHY;
745 goto release;
746 }
747 ret_val = e1000_read_emi_reg_locked(hw, lpa, 746 ret_val = e1000_read_emi_reg_locked(hw, lpa,
748 &dev_spec->eee_lp_ability); 747 &dev_spec->eee_lp_ability);
749 if (ret_val) 748 if (ret_val)
750 goto release; 749 goto release;
751 750
751 /* Read EEE advertisement */
752 ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv);
753 if (ret_val)
754 goto release;
755
752 /* Enable EEE only for speeds in which the link partner is 756 /* Enable EEE only for speeds in which the link partner is
753 * EEE capable. 757 * EEE capable and for which we advertise EEE.
754 */ 758 */
755 if (dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED) 759 if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED)
756 lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE; 760 lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE;
757 761
758 if (dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) { 762 if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) {
759 e1e_rphy_locked(hw, MII_LPA, &data); 763 e1e_rphy_locked(hw, MII_LPA, &data);
760 if (data & LPA_100FULL) 764 if (data & LPA_100FULL)
761 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; 765 lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE;
@@ -767,13 +771,13 @@ static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
767 dev_spec->eee_lp_ability &= 771 dev_spec->eee_lp_ability &=
768 ~I82579_EEE_100_SUPPORTED; 772 ~I82579_EEE_100_SUPPORTED;
769 } 773 }
770
771 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
772 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
773 if (ret_val)
774 goto release;
775 } 774 }
776 775
776 /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
777 ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
778 if (ret_val)
779 goto release;
780
777 ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl); 781 ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl);
778release: 782release:
779 hw->phy.ops.release(hw); 783 hw->phy.ops.release(hw);
@@ -835,6 +839,94 @@ release:
835} 839}
836 840
837/** 841/**
842 * e1000_platform_pm_pch_lpt - Set platform power management values
843 * @hw: pointer to the HW structure
844 * @link: bool indicating link status
845 *
846 * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like"
847 * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed
848 * when link is up (which must not exceed the maximum latency supported
849 * by the platform), otherwise specify there is no LTR requirement.
850 * Unlike true-PCIe devices which set the LTR maximum snoop/no-snoop
851 * latencies in the LTR Extended Capability Structure in the PCIe Extended
852 * Capability register set, on this device LTR is set by writing the
853 * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and
854 * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB)
855 * message to the PMC.
856 **/
857static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
858{
859 u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) |
860 link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND;
861 u16 lat_enc = 0; /* latency encoded */
862
863 if (link) {
864 u16 speed, duplex, scale = 0;
865 u16 max_snoop, max_nosnoop;
866 u16 max_ltr_enc; /* max LTR latency encoded */
867 s64 lat_ns; /* latency (ns) */
868 s64 value;
869 u32 rxa;
870
871 if (!hw->adapter->max_frame_size) {
872 e_dbg("max_frame_size not set.\n");
873 return -E1000_ERR_CONFIG;
874 }
875
876 hw->mac.ops.get_link_up_info(hw, &speed, &duplex);
877 if (!speed) {
878 e_dbg("Speed not set.\n");
879 return -E1000_ERR_CONFIG;
880 }
881
882 /* Rx Packet Buffer Allocation size (KB) */
883 rxa = er32(PBA) & E1000_PBA_RXA_MASK;
884
885 /* Determine the maximum latency tolerated by the device.
886 *
887 * Per the PCIe spec, the tolerated latencies are encoded as
888 * a 3-bit encoded scale (only 0-5 are valid) multiplied by
889 * a 10-bit value (0-1023) to provide a range from 1 ns to
890 * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
891 * 1=2^5ns, 2=2^10ns,...5=2^25ns.
892 */
893 lat_ns = ((s64)rxa * 1024 -
894 (2 * (s64)hw->adapter->max_frame_size)) * 8 * 1000;
895 if (lat_ns < 0)
896 lat_ns = 0;
897 else
898 do_div(lat_ns, speed);
899
900 value = lat_ns;
901 while (value > PCI_LTR_VALUE_MASK) {
902 scale++;
903 value = DIV_ROUND_UP(value, (1 << 5));
904 }
905 if (scale > E1000_LTRV_SCALE_MAX) {
906 e_dbg("Invalid LTR latency scale %d\n", scale);
907 return -E1000_ERR_CONFIG;
908 }
909 lat_enc = (u16)((scale << PCI_LTR_SCALE_SHIFT) | value);
910
911 /* Determine the maximum latency tolerated by the platform */
912 pci_read_config_word(hw->adapter->pdev, E1000_PCI_LTR_CAP_LPT,
913 &max_snoop);
914 pci_read_config_word(hw->adapter->pdev,
915 E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop);
916 max_ltr_enc = max_t(u16, max_snoop, max_nosnoop);
917
918 if (lat_enc > max_ltr_enc)
919 lat_enc = max_ltr_enc;
920 }
921
922 /* Set Snoop and No-Snoop latencies the same */
923 reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT);
924 ew32(LTRV, reg);
925
926 return 0;
927}
928
929/**
838 * e1000_check_for_copper_link_ich8lan - Check for link (Copper) 930 * e1000_check_for_copper_link_ich8lan - Check for link (Copper)
839 * @hw: pointer to the HW structure 931 * @hw: pointer to the HW structure
840 * 932 *
@@ -871,6 +963,34 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
871 return ret_val; 963 return ret_val;
872 } 964 }
873 965
966 /* When connected at 10Mbps half-duplex, 82579 parts are excessively
967 * aggressive resulting in many collisions. To avoid this, increase
968 * the IPG and reduce Rx latency in the PHY.
969 */
970 if ((hw->mac.type == e1000_pch2lan) && link) {
971 u32 reg;
972 reg = er32(STATUS);
973 if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
974 reg = er32(TIPG);
975 reg &= ~E1000_TIPG_IPGT_MASK;
976 reg |= 0xFF;
977 ew32(TIPG, reg);
978
979 /* Reduce Rx latency in analog PHY */
980 ret_val = hw->phy.ops.acquire(hw);
981 if (ret_val)
982 return ret_val;
983
984 ret_val =
985 e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0);
986
987 hw->phy.ops.release(hw);
988
989 if (ret_val)
990 return ret_val;
991 }
992 }
993
874 /* Work-around I218 hang issue */ 994 /* Work-around I218 hang issue */
875 if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || 995 if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
876 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) { 996 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
@@ -879,6 +999,15 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
879 return ret_val; 999 return ret_val;
880 } 1000 }
881 1001
1002 if (hw->mac.type == e1000_pch_lpt) {
1003 /* Set platform power management values for
1004 * Latency Tolerance Reporting (LTR)
1005 */
1006 ret_val = e1000_platform_pm_pch_lpt(hw, link);
1007 if (ret_val)
1008 return ret_val;
1009 }
1010
882 /* Clear link partner's EEE ability */ 1011 /* Clear link partner's EEE ability */
883 hw->dev_spec.ich8lan.eee_lp_ability = 0; 1012 hw->dev_spec.ich8lan.eee_lp_ability = 0;
884 1013
@@ -1002,10 +1131,6 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
1002 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) 1131 (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
1003 adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA; 1132 adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
1004 1133
1005 /* Disable EEE by default until IEEE802.3az spec is finalized */
1006 if (adapter->flags2 & FLAG2_HAS_EEE)
1007 adapter->hw.dev_spec.ich8lan.eee_disable = true;
1008
1009 return 0; 1134 return 0;
1010} 1135}
1011 1136
@@ -1134,9 +1259,9 @@ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
1134 u32 fwsm; 1259 u32 fwsm;
1135 1260
1136 fwsm = er32(FWSM); 1261 fwsm = er32(FWSM);
1137 return (fwsm & E1000_ICH_FWSM_FW_VALID) && 1262 return ((fwsm & E1000_ICH_FWSM_FW_VALID) &&
1138 ((fwsm & E1000_FWSM_MODE_MASK) == 1263 ((fwsm & E1000_FWSM_MODE_MASK) ==
1139 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); 1264 (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)));
1140} 1265}
1141 1266
1142/** 1267/**
@@ -1153,7 +1278,7 @@ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
1153 1278
1154 fwsm = er32(FWSM); 1279 fwsm = er32(FWSM);
1155 return (fwsm & E1000_ICH_FWSM_FW_VALID) && 1280 return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
1156 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); 1281 (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
1157} 1282}
1158 1283
1159/** 1284/**
@@ -1440,8 +1565,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
1440 word_addr = (u16)(cnf_base_addr << 1); 1565 word_addr = (u16)(cnf_base_addr << 1);
1441 1566
1442 for (i = 0; i < cnf_size; i++) { 1567 for (i = 0; i < cnf_size; i++) {
1443 ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, 1568 ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, &reg_data);
1444 &reg_data);
1445 if (ret_val) 1569 if (ret_val)
1446 goto release; 1570 goto release;
1447 1571
@@ -1501,13 +1625,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1501 if (ret_val) 1625 if (ret_val)
1502 goto release; 1626 goto release;
1503 1627
1504 status_reg &= BM_CS_STATUS_LINK_UP | 1628 status_reg &= (BM_CS_STATUS_LINK_UP |
1505 BM_CS_STATUS_RESOLVED | 1629 BM_CS_STATUS_RESOLVED |
1506 BM_CS_STATUS_SPEED_MASK; 1630 BM_CS_STATUS_SPEED_MASK);
1507 1631
1508 if (status_reg == (BM_CS_STATUS_LINK_UP | 1632 if (status_reg == (BM_CS_STATUS_LINK_UP |
1509 BM_CS_STATUS_RESOLVED | 1633 BM_CS_STATUS_RESOLVED |
1510 BM_CS_STATUS_SPEED_1000)) 1634 BM_CS_STATUS_SPEED_1000))
1511 k1_enable = false; 1635 k1_enable = false;
1512 } 1636 }
1513 1637
@@ -1516,13 +1640,13 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
1516 if (ret_val) 1640 if (ret_val)
1517 goto release; 1641 goto release;
1518 1642
1519 status_reg &= HV_M_STATUS_LINK_UP | 1643 status_reg &= (HV_M_STATUS_LINK_UP |
1520 HV_M_STATUS_AUTONEG_COMPLETE | 1644 HV_M_STATUS_AUTONEG_COMPLETE |
1521 HV_M_STATUS_SPEED_MASK; 1645 HV_M_STATUS_SPEED_MASK);
1522 1646
1523 if (status_reg == (HV_M_STATUS_LINK_UP | 1647 if (status_reg == (HV_M_STATUS_LINK_UP |
1524 HV_M_STATUS_AUTONEG_COMPLETE | 1648 HV_M_STATUS_AUTONEG_COMPLETE |
1525 HV_M_STATUS_SPEED_1000)) 1649 HV_M_STATUS_SPEED_1000))
1526 k1_enable = false; 1650 k1_enable = false;
1527 } 1651 }
1528 1652
@@ -1579,7 +1703,7 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1579 if (ret_val) 1703 if (ret_val)
1580 return ret_val; 1704 return ret_val;
1581 1705
1582 udelay(20); 1706 usleep_range(20, 40);
1583 ctrl_ext = er32(CTRL_EXT); 1707 ctrl_ext = er32(CTRL_EXT);
1584 ctrl_reg = er32(CTRL); 1708 ctrl_reg = er32(CTRL);
1585 1709
@@ -1589,11 +1713,11 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
1589 1713
1590 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); 1714 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
1591 e1e_flush(); 1715 e1e_flush();
1592 udelay(20); 1716 usleep_range(20, 40);
1593 ew32(CTRL, ctrl_reg); 1717 ew32(CTRL, ctrl_reg);
1594 ew32(CTRL_EXT, ctrl_ext); 1718 ew32(CTRL_EXT, ctrl_ext);
1595 e1e_flush(); 1719 e1e_flush();
1596 udelay(20); 1720 usleep_range(20, 40);
1597 1721
1598 return 0; 1722 return 0;
1599} 1723}
@@ -1667,7 +1791,6 @@ release:
1667 return ret_val; 1791 return ret_val;
1668} 1792}
1669 1793
1670
1671/** 1794/**
1672 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode 1795 * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
1673 * @hw: pointer to the HW structure 1796 * @hw: pointer to the HW structure
@@ -1834,7 +1957,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1834 * SHRAL/H) and initial CRC values to the MAC 1957 * SHRAL/H) and initial CRC values to the MAC
1835 */ 1958 */
1836 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { 1959 for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
1837 u8 mac_addr[ETH_ALEN] = {0}; 1960 u8 mac_addr[ETH_ALEN] = { 0 };
1838 u32 addr_high, addr_low; 1961 u32 addr_high, addr_low;
1839 1962
1840 addr_high = er32(RAH(i)); 1963 addr_high = er32(RAH(i));
@@ -1865,8 +1988,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1865 ew32(RCTL, mac_reg); 1988 ew32(RCTL, mac_reg);
1866 1989
1867 ret_val = e1000e_read_kmrn_reg(hw, 1990 ret_val = e1000e_read_kmrn_reg(hw,
1868 E1000_KMRNCTRLSTA_CTRL_OFFSET, 1991 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1869 &data); 1992 &data);
1870 if (ret_val) 1993 if (ret_val)
1871 return ret_val; 1994 return ret_val;
1872 ret_val = e1000e_write_kmrn_reg(hw, 1995 ret_val = e1000e_write_kmrn_reg(hw,
@@ -1875,8 +1998,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1875 if (ret_val) 1998 if (ret_val)
1876 return ret_val; 1999 return ret_val;
1877 ret_val = e1000e_read_kmrn_reg(hw, 2000 ret_val = e1000e_read_kmrn_reg(hw,
1878 E1000_KMRNCTRLSTA_HD_CTRL, 2001 E1000_KMRNCTRLSTA_HD_CTRL,
1879 &data); 2002 &data);
1880 if (ret_val) 2003 if (ret_val)
1881 return ret_val; 2004 return ret_val;
1882 data &= ~(0xF << 8); 2005 data &= ~(0xF << 8);
@@ -1923,8 +2046,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1923 ew32(RCTL, mac_reg); 2046 ew32(RCTL, mac_reg);
1924 2047
1925 ret_val = e1000e_read_kmrn_reg(hw, 2048 ret_val = e1000e_read_kmrn_reg(hw,
1926 E1000_KMRNCTRLSTA_CTRL_OFFSET, 2049 E1000_KMRNCTRLSTA_CTRL_OFFSET,
1927 &data); 2050 &data);
1928 if (ret_val) 2051 if (ret_val)
1929 return ret_val; 2052 return ret_val;
1930 ret_val = e1000e_write_kmrn_reg(hw, 2053 ret_val = e1000e_write_kmrn_reg(hw,
@@ -1933,8 +2056,8 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1933 if (ret_val) 2056 if (ret_val)
1934 return ret_val; 2057 return ret_val;
1935 ret_val = e1000e_read_kmrn_reg(hw, 2058 ret_val = e1000e_read_kmrn_reg(hw,
1936 E1000_KMRNCTRLSTA_HD_CTRL, 2059 E1000_KMRNCTRLSTA_HD_CTRL,
1937 &data); 2060 &data);
1938 if (ret_val) 2061 if (ret_val)
1939 return ret_val; 2062 return ret_val;
1940 data &= ~(0xF << 8); 2063 data &= ~(0xF << 8);
@@ -2100,7 +2223,7 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
2100 do { 2223 do {
2101 data = er32(STATUS); 2224 data = er32(STATUS);
2102 data &= E1000_STATUS_LAN_INIT_DONE; 2225 data &= E1000_STATUS_LAN_INIT_DONE;
2103 udelay(100); 2226 usleep_range(100, 200);
2104 } while ((!data) && --loop); 2227 } while ((!data) && --loop);
2105 2228
2106 /* If basic configuration is incomplete before the above loop 2229 /* If basic configuration is incomplete before the above loop
@@ -2445,7 +2568,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2445 2568
2446 /* Check bank 0 */ 2569 /* Check bank 0 */
2447 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset, 2570 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
2448 &sig_byte); 2571 &sig_byte);
2449 if (ret_val) 2572 if (ret_val)
2450 return ret_val; 2573 return ret_val;
2451 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == 2574 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
@@ -2456,8 +2579,8 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
2456 2579
2457 /* Check bank 1 */ 2580 /* Check bank 1 */
2458 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + 2581 ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
2459 bank1_offset, 2582 bank1_offset,
2460 &sig_byte); 2583 &sig_byte);
2461 if (ret_val) 2584 if (ret_val)
2462 return ret_val; 2585 return ret_val;
2463 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == 2586 if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
@@ -2510,8 +2633,8 @@ static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2510 2633
2511 ret_val = 0; 2634 ret_val = 0;
2512 for (i = 0; i < words; i++) { 2635 for (i = 0; i < words; i++) {
2513 if (dev_spec->shadow_ram[offset+i].modified) { 2636 if (dev_spec->shadow_ram[offset + i].modified) {
2514 data[i] = dev_spec->shadow_ram[offset+i].value; 2637 data[i] = dev_spec->shadow_ram[offset + i].value;
2515 } else { 2638 } else {
2516 ret_val = e1000_read_flash_word_ich8lan(hw, 2639 ret_val = e1000_read_flash_word_ich8lan(hw,
2517 act_offset + i, 2640 act_offset + i,
@@ -2696,8 +2819,8 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2696 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) 2819 if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
2697 return -E1000_ERR_NVM; 2820 return -E1000_ERR_NVM;
2698 2821
2699 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + 2822 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
2700 hw->nvm.flash_base_addr; 2823 hw->nvm.flash_base_addr);
2701 2824
2702 do { 2825 do {
2703 udelay(1); 2826 udelay(1);
@@ -2714,8 +2837,9 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2714 2837
2715 ew32flash(ICH_FLASH_FADDR, flash_linear_addr); 2838 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
2716 2839
2717 ret_val = e1000_flash_cycle_ich8lan(hw, 2840 ret_val =
2718 ICH_FLASH_READ_COMMAND_TIMEOUT); 2841 e1000_flash_cycle_ich8lan(hw,
2842 ICH_FLASH_READ_COMMAND_TIMEOUT);
2719 2843
2720 /* Check if FCERR is set to 1, if set to 1, clear it 2844 /* Check if FCERR is set to 1, if set to 1, clear it
2721 * and try the whole sequence a few more times, else 2845 * and try the whole sequence a few more times, else
@@ -2774,8 +2898,8 @@ static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
2774 nvm->ops.acquire(hw); 2898 nvm->ops.acquire(hw);
2775 2899
2776 for (i = 0; i < words; i++) { 2900 for (i = 0; i < words; i++) {
2777 dev_spec->shadow_ram[offset+i].modified = true; 2901 dev_spec->shadow_ram[offset + i].modified = true;
2778 dev_spec->shadow_ram[offset+i].value = data[i]; 2902 dev_spec->shadow_ram[offset + i].value = data[i];
2779 } 2903 }
2780 2904
2781 nvm->ops.release(hw); 2905 nvm->ops.release(hw);
@@ -2844,8 +2968,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2844 data = dev_spec->shadow_ram[i].value; 2968 data = dev_spec->shadow_ram[i].value;
2845 } else { 2969 } else {
2846 ret_val = e1000_read_flash_word_ich8lan(hw, i + 2970 ret_val = e1000_read_flash_word_ich8lan(hw, i +
2847 old_bank_offset, 2971 old_bank_offset,
2848 &data); 2972 &data);
2849 if (ret_val) 2973 if (ret_val)
2850 break; 2974 break;
2851 } 2975 }
@@ -2863,7 +2987,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2863 /* Convert offset to bytes. */ 2987 /* Convert offset to bytes. */
2864 act_offset = (i + new_bank_offset) << 1; 2988 act_offset = (i + new_bank_offset) << 1;
2865 2989
2866 udelay(100); 2990 usleep_range(100, 200);
2867 /* Write the bytes to the new bank. */ 2991 /* Write the bytes to the new bank. */
2868 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, 2992 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2869 act_offset, 2993 act_offset,
@@ -2871,10 +2995,10 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2871 if (ret_val) 2995 if (ret_val)
2872 break; 2996 break;
2873 2997
2874 udelay(100); 2998 usleep_range(100, 200);
2875 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, 2999 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
2876 act_offset + 1, 3000 act_offset + 1,
2877 (u8)(data >> 8)); 3001 (u8)(data >> 8));
2878 if (ret_val) 3002 if (ret_val)
2879 break; 3003 break;
2880 } 3004 }
@@ -3050,8 +3174,8 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3050 offset > ICH_FLASH_LINEAR_ADDR_MASK) 3174 offset > ICH_FLASH_LINEAR_ADDR_MASK)
3051 return -E1000_ERR_NVM; 3175 return -E1000_ERR_NVM;
3052 3176
3053 flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) + 3177 flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) +
3054 hw->nvm.flash_base_addr; 3178 hw->nvm.flash_base_addr);
3055 3179
3056 do { 3180 do {
3057 udelay(1); 3181 udelay(1);
@@ -3062,7 +3186,7 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3062 3186
3063 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); 3187 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
3064 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ 3188 /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
3065 hsflctl.hsf_ctrl.fldbcount = size -1; 3189 hsflctl.hsf_ctrl.fldbcount = size - 1;
3066 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; 3190 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
3067 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); 3191 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
3068 3192
@@ -3078,8 +3202,9 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
3078 /* check if FCERR is set to 1 , if set to 1, clear it 3202 /* check if FCERR is set to 1 , if set to 1, clear it
3079 * and try the whole sequence a few more times else done 3203 * and try the whole sequence a few more times else done
3080 */ 3204 */
3081 ret_val = e1000_flash_cycle_ich8lan(hw, 3205 ret_val =
3082 ICH_FLASH_WRITE_COMMAND_TIMEOUT); 3206 e1000_flash_cycle_ich8lan(hw,
3207 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
3083 if (!ret_val) 3208 if (!ret_val)
3084 break; 3209 break;
3085 3210
@@ -3138,7 +3263,7 @@ static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
3138 3263
3139 for (program_retries = 0; program_retries < 100; program_retries++) { 3264 for (program_retries = 0; program_retries < 100; program_retries++) {
3140 e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset); 3265 e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset);
3141 udelay(100); 3266 usleep_range(100, 200);
3142 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); 3267 ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
3143 if (!ret_val) 3268 if (!ret_val)
3144 break; 3269 break;
@@ -3209,8 +3334,10 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3209 flash_linear_addr = hw->nvm.flash_base_addr; 3334 flash_linear_addr = hw->nvm.flash_base_addr;
3210 flash_linear_addr += (bank) ? flash_bank_size : 0; 3335 flash_linear_addr += (bank) ? flash_bank_size : 0;
3211 3336
3212 for (j = 0; j < iteration ; j++) { 3337 for (j = 0; j < iteration; j++) {
3213 do { 3338 do {
3339 u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT;
3340
3214 /* Steps */ 3341 /* Steps */
3215 ret_val = e1000_flash_cycle_init_ich8lan(hw); 3342 ret_val = e1000_flash_cycle_init_ich8lan(hw);
3216 if (ret_val) 3343 if (ret_val)
@@ -3230,8 +3357,7 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
3230 flash_linear_addr += (j * sector_size); 3357 flash_linear_addr += (j * sector_size);
3231 ew32flash(ICH_FLASH_FADDR, flash_linear_addr); 3358 ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
3232 3359
3233 ret_val = e1000_flash_cycle_ich8lan(hw, 3360 ret_val = e1000_flash_cycle_ich8lan(hw, timeout);
3234 ICH_FLASH_ERASE_COMMAND_TIMEOUT);
3235 if (!ret_val) 3361 if (!ret_val)
3236 break; 3362 break;
3237 3363
@@ -3270,8 +3396,7 @@ static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
3270 return ret_val; 3396 return ret_val;
3271 } 3397 }
3272 3398
3273 if (*data == ID_LED_RESERVED_0000 || 3399 if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
3274 *data == ID_LED_RESERVED_FFFF)
3275 *data = ID_LED_DEFAULT_ICH8LAN; 3400 *data = ID_LED_DEFAULT_ICH8LAN;
3276 3401
3277 return 0; 3402 return 0;
@@ -3511,9 +3636,9 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3511 3636
3512 /* Initialize identification LED */ 3637 /* Initialize identification LED */
3513 ret_val = mac->ops.id_led_init(hw); 3638 ret_val = mac->ops.id_led_init(hw);
3639 /* An error is not fatal and we should not stop init due to this */
3514 if (ret_val) 3640 if (ret_val)
3515 e_dbg("Error initializing identification LED\n"); 3641 e_dbg("Error initializing identification LED\n");
3516 /* This is not fatal and we should not stop init due to this */
3517 3642
3518 /* Setup the receive address. */ 3643 /* Setup the receive address. */
3519 e1000e_init_rx_addrs(hw, mac->rar_entry_count); 3644 e1000e_init_rx_addrs(hw, mac->rar_entry_count);
@@ -3541,16 +3666,16 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3541 3666
3542 /* Set the transmit descriptor write-back policy for both queues */ 3667 /* Set the transmit descriptor write-back policy for both queues */
3543 txdctl = er32(TXDCTL(0)); 3668 txdctl = er32(TXDCTL(0));
3544 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | 3669 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
3545 E1000_TXDCTL_FULL_TX_DESC_WB; 3670 E1000_TXDCTL_FULL_TX_DESC_WB);
3546 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | 3671 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
3547 E1000_TXDCTL_MAX_TX_DESC_PREFETCH; 3672 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
3548 ew32(TXDCTL(0), txdctl); 3673 ew32(TXDCTL(0), txdctl);
3549 txdctl = er32(TXDCTL(1)); 3674 txdctl = er32(TXDCTL(1));
3550 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | 3675 txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) |
3551 E1000_TXDCTL_FULL_TX_DESC_WB; 3676 E1000_TXDCTL_FULL_TX_DESC_WB);
3552 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | 3677 txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) |
3553 E1000_TXDCTL_MAX_TX_DESC_PREFETCH; 3678 E1000_TXDCTL_MAX_TX_DESC_PREFETCH);
3554 ew32(TXDCTL(1), txdctl); 3679 ew32(TXDCTL(1), txdctl);
3555 3680
3556 /* ICH8 has opposite polarity of no_snoop bits. 3681 /* ICH8 has opposite polarity of no_snoop bits.
@@ -3559,7 +3684,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3559 if (mac->type == e1000_ich8lan) 3684 if (mac->type == e1000_ich8lan)
3560 snoop = PCIE_ICH8_SNOOP_ALL; 3685 snoop = PCIE_ICH8_SNOOP_ALL;
3561 else 3686 else
3562 snoop = (u32) ~(PCIE_NO_SNOOP_ALL); 3687 snoop = (u32)~(PCIE_NO_SNOOP_ALL);
3563 e1000e_set_pcie_no_snoop(hw, snoop); 3688 e1000e_set_pcie_no_snoop(hw, snoop);
3564 3689
3565 ctrl_ext = er32(CTRL_EXT); 3690 ctrl_ext = er32(CTRL_EXT);
@@ -3575,6 +3700,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3575 3700
3576 return ret_val; 3701 return ret_val;
3577} 3702}
3703
3578/** 3704/**
3579 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits 3705 * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
3580 * @hw: pointer to the HW structure 3706 * @hw: pointer to the HW structure
@@ -3686,8 +3812,7 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3686 */ 3812 */
3687 hw->fc.current_mode = hw->fc.requested_mode; 3813 hw->fc.current_mode = hw->fc.requested_mode;
3688 3814
3689 e_dbg("After fix-ups FlowControl is now = %x\n", 3815 e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
3690 hw->fc.current_mode);
3691 3816
3692 /* Continue to configure the copper link. */ 3817 /* Continue to configure the copper link. */
3693 ret_val = hw->mac.ops.setup_physical_interface(hw); 3818 ret_val = hw->mac.ops.setup_physical_interface(hw);
@@ -3737,12 +3862,12 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3737 if (ret_val) 3862 if (ret_val)
3738 return ret_val; 3863 return ret_val;
3739 ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, 3864 ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
3740 &reg_data); 3865 &reg_data);
3741 if (ret_val) 3866 if (ret_val)
3742 return ret_val; 3867 return ret_val;
3743 reg_data |= 0x3F; 3868 reg_data |= 0x3F;
3744 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, 3869 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
3745 reg_data); 3870 reg_data);
3746 if (ret_val) 3871 if (ret_val)
3747 return ret_val; 3872 return ret_val;
3748 3873
@@ -3760,7 +3885,6 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3760 break; 3885 break;
3761 case e1000_phy_82577: 3886 case e1000_phy_82577:
3762 case e1000_phy_82579: 3887 case e1000_phy_82579:
3763 case e1000_phy_i217:
3764 ret_val = e1000_copper_link_setup_82577(hw); 3888 ret_val = e1000_copper_link_setup_82577(hw);
3765 if (ret_val) 3889 if (ret_val)
3766 return ret_val; 3890 return ret_val;
@@ -3796,6 +3920,31 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3796} 3920}
3797 3921
3798/** 3922/**
3923 * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface
3924 * @hw: pointer to the HW structure
3925 *
3926 * Calls the PHY specific link setup function and then calls the
3927 * generic setup_copper_link to finish configuring the link for
3928 * Lynxpoint PCH devices
3929 **/
3930static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw)
3931{
3932 u32 ctrl;
3933 s32 ret_val;
3934
3935 ctrl = er32(CTRL);
3936 ctrl |= E1000_CTRL_SLU;
3937 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
3938 ew32(CTRL, ctrl);
3939
3940 ret_val = e1000_copper_link_setup_82577(hw);
3941 if (ret_val)
3942 return ret_val;
3943
3944 return e1000e_setup_copper_link(hw);
3945}
3946
3947/**
3799 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex 3948 * e1000_get_link_up_info_ich8lan - Get current link speed and duplex
3800 * @hw: pointer to the HW structure 3949 * @hw: pointer to the HW structure
3801 * @speed: pointer to store current link speed 3950 * @speed: pointer to store current link speed
@@ -3815,8 +3964,7 @@ static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
3815 return ret_val; 3964 return ret_val;
3816 3965
3817 if ((hw->mac.type == e1000_ich8lan) && 3966 if ((hw->mac.type == e1000_ich8lan) &&
3818 (hw->phy.type == e1000_phy_igp_3) && 3967 (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
3819 (*speed == SPEED_1000)) {
3820 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); 3968 ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
3821 } 3969 }
3822 3970
@@ -3899,7 +4047,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
3899 * /disabled - false). 4047 * /disabled - false).
3900 **/ 4048 **/
3901void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, 4049void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
3902 bool state) 4050 bool state)
3903{ 4051{
3904 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 4052 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
3905 4053
@@ -3981,12 +4129,12 @@ void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
3981 return; 4129 return;
3982 4130
3983 ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, 4131 ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3984 &reg_data); 4132 &reg_data);
3985 if (ret_val) 4133 if (ret_val)
3986 return; 4134 return;
3987 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; 4135 reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
3988 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, 4136 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
3989 reg_data); 4137 reg_data);
3990 if (ret_val) 4138 if (ret_val)
3991 return; 4139 return;
3992 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; 4140 reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 8bf4655c2e17..80034a2b297c 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -211,7 +211,8 @@
211#define I82579_MSE_THRESHOLD 0x084F /* 82579 Mean Square Error Threshold */ 211#define I82579_MSE_THRESHOLD 0x084F /* 82579 Mean Square Error Threshold */
212#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */ 212#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */
213#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ 213#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
214#define I82579_EEE_PCS_STATUS 0x182D /* IEEE MMD Register 3.1 >> 8 */ 214#define I82579_RX_CONFIG 0x3412 /* Receive configuration */
215#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */
215#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */ 216#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */
216#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */ 217#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */
217#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */ 218#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */
@@ -249,13 +250,6 @@
249/* Proprietary Latency Tolerance Reporting PCI Capability */ 250/* Proprietary Latency Tolerance Reporting PCI Capability */
250#define E1000_PCI_LTR_CAP_LPT 0xA8 251#define E1000_PCI_LTR_CAP_LPT 0xA8
251 252
252/* OBFF Control & Threshold Defines */
253#define E1000_SVCR_OFF_EN 0x00000001
254#define E1000_SVCR_OFF_MASKINT 0x00001000
255#define E1000_SVCR_OFF_TIMER_MASK 0xFFFF0000
256#define E1000_SVCR_OFF_TIMER_SHIFT 16
257#define E1000_SVT_OFF_HWM_MASK 0x0000001F
258
259void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw); 253void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
260void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, 254void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
261 bool state); 255 bool state);
@@ -267,4 +261,5 @@ s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
267void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw); 261void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
268s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable); 262s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
269s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data); 263s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data);
264s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data);
270#endif /* _E1000E_ICH8LAN_H_ */ 265#endif /* _E1000E_ICH8LAN_H_ */
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index b78e02174601..2480c1091873 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -596,7 +596,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
596 * serdes media type. 596 * serdes media type.
597 */ 597 */
598 /* SYNCH bit and IV bit are sticky. */ 598 /* SYNCH bit and IV bit are sticky. */
599 udelay(10); 599 usleep_range(10, 20);
600 rxcw = er32(RXCW); 600 rxcw = er32(RXCW);
601 if (rxcw & E1000_RXCW_SYNCH) { 601 if (rxcw & E1000_RXCW_SYNCH) {
602 if (!(rxcw & E1000_RXCW_IV)) { 602 if (!(rxcw & E1000_RXCW_IV)) {
@@ -613,7 +613,7 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
613 status = er32(STATUS); 613 status = er32(STATUS);
614 if (status & E1000_STATUS_LU) { 614 if (status & E1000_STATUS_LU) {
615 /* SYNCH bit and IV bit are sticky, so reread rxcw. */ 615 /* SYNCH bit and IV bit are sticky, so reread rxcw. */
616 udelay(10); 616 usleep_range(10, 20);
617 rxcw = er32(RXCW); 617 rxcw = er32(RXCW);
618 if (rxcw & E1000_RXCW_SYNCH) { 618 if (rxcw & E1000_RXCW_SYNCH) {
619 if (!(rxcw & E1000_RXCW_IV)) { 619 if (!(rxcw & E1000_RXCW_IV)) {
@@ -1382,7 +1382,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
1382 if (!(swsm & E1000_SWSM_SMBI)) 1382 if (!(swsm & E1000_SWSM_SMBI))
1383 break; 1383 break;
1384 1384
1385 udelay(50); 1385 usleep_range(50, 100);
1386 i++; 1386 i++;
1387 } 1387 }
1388 1388
@@ -1400,7 +1400,7 @@ s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
1400 if (er32(SWSM) & E1000_SWSM_SWESMBI) 1400 if (er32(SWSM) & E1000_SWSM_SWESMBI)
1401 break; 1401 break;
1402 1402
1403 udelay(50); 1403 usleep_range(50, 100);
1404 } 1404 }
1405 1405
1406 if (i == timeout) { 1406 if (i == timeout) {
@@ -1600,15 +1600,28 @@ s32 e1000e_blink_led_generic(struct e1000_hw *hw)
1600 ledctl_blink = E1000_LEDCTL_LED0_BLINK | 1600 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1601 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); 1601 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1602 } else { 1602 } else {
1603 /* set the blink bit for each LED that's "on" (0x0E) 1603 /* Set the blink bit for each LED that's "on" (0x0E)
1604 * in ledctl_mode2 1604 * (or "off" if inverted) in ledctl_mode2. The blink
1605 * logic in hardware only works when mode is set to "on"
1606 * so it must be changed accordingly when the mode is
1607 * "off" and inverted.
1605 */ 1608 */
1606 ledctl_blink = hw->mac.ledctl_mode2; 1609 ledctl_blink = hw->mac.ledctl_mode2;
1607 for (i = 0; i < 4; i++) 1610 for (i = 0; i < 32; i += 8) {
1608 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == 1611 u32 mode = (hw->mac.ledctl_mode2 >> i) &
1609 E1000_LEDCTL_MODE_LED_ON) 1612 E1000_LEDCTL_LED0_MODE_MASK;
1610 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << 1613 u32 led_default = hw->mac.ledctl_default >> i;
1611 (i * 8)); 1614
1615 if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
1616 (mode == E1000_LEDCTL_MODE_LED_ON)) ||
1617 ((led_default & E1000_LEDCTL_LED0_IVRT) &&
1618 (mode == E1000_LEDCTL_MODE_LED_OFF))) {
1619 ledctl_blink &=
1620 ~(E1000_LEDCTL_LED0_MODE_MASK << i);
1621 ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
1622 E1000_LEDCTL_MODE_LED_ON) << i;
1623 }
1624 }
1612 } 1625 }
1613 1626
1614 ew32(LEDCTL, ledctl_blink); 1627 ew32(LEDCTL, ledctl_blink);
@@ -1712,7 +1725,7 @@ s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
1712 while (timeout) { 1725 while (timeout) {
1713 if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE)) 1726 if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
1714 break; 1727 break;
1715 udelay(100); 1728 usleep_range(100, 200);
1716 timeout--; 1729 timeout--;
1717 } 1730 }
1718 1731
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 7e615e2bf7e6..b18fad5b579e 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -55,7 +55,7 @@
55 55
56#define DRV_EXTRAVERSION "-k" 56#define DRV_EXTRAVERSION "-k"
57 57
58#define DRV_VERSION "2.2.14" DRV_EXTRAVERSION 58#define DRV_VERSION "2.3.2" DRV_EXTRAVERSION
59char e1000e_driver_name[] = "e1000e"; 59char e1000e_driver_name[] = "e1000e";
60const char e1000e_driver_version[] = DRV_VERSION; 60const char e1000e_driver_version[] = DRV_VERSION;
61 61
@@ -219,9 +219,8 @@ static void e1000e_dump(struct e1000_adapter *adapter)
219 if (netdev) { 219 if (netdev) {
220 dev_info(&adapter->pdev->dev, "Net device Info\n"); 220 dev_info(&adapter->pdev->dev, "Net device Info\n");
221 pr_info("Device Name state trans_start last_rx\n"); 221 pr_info("Device Name state trans_start last_rx\n");
222 pr_info("%-15s %016lX %016lX %016lX\n", 222 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
223 netdev->name, netdev->state, netdev->trans_start, 223 netdev->state, netdev->trans_start, netdev->last_rx);
224 netdev->last_rx);
225 } 224 }
226 225
227 /* Print Registers */ 226 /* Print Registers */
@@ -755,8 +754,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
755 cpu_to_le64(ps_page->dma); 754 cpu_to_le64(ps_page->dma);
756 } 755 }
757 756
758 skb = __netdev_alloc_skb_ip_align(netdev, 757 skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0,
759 adapter->rx_ps_bsize0,
760 gfp); 758 gfp);
761 759
762 if (!skb) { 760 if (!skb) {
@@ -850,8 +848,8 @@ check_page:
850 848
851 if (!buffer_info->dma) { 849 if (!buffer_info->dma) {
852 buffer_info->dma = dma_map_page(&pdev->dev, 850 buffer_info->dma = dma_map_page(&pdev->dev,
853 buffer_info->page, 0, 851 buffer_info->page, 0,
854 PAGE_SIZE, 852 PAGE_SIZE,
855 DMA_FROM_DEVICE); 853 DMA_FROM_DEVICE);
856 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { 854 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
857 adapter->alloc_rx_buff_failed++; 855 adapter->alloc_rx_buff_failed++;
@@ -942,10 +940,8 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
942 940
943 cleaned = true; 941 cleaned = true;
944 cleaned_count++; 942 cleaned_count++;
945 dma_unmap_single(&pdev->dev, 943 dma_unmap_single(&pdev->dev, buffer_info->dma,
946 buffer_info->dma, 944 adapter->rx_buffer_len, DMA_FROM_DEVICE);
947 adapter->rx_buffer_len,
948 DMA_FROM_DEVICE);
949 buffer_info->dma = 0; 945 buffer_info->dma = 0;
950 946
951 length = le16_to_cpu(rx_desc->wb.upper.length); 947 length = le16_to_cpu(rx_desc->wb.upper.length);
@@ -1073,8 +1069,8 @@ static void e1000_put_txbuf(struct e1000_ring *tx_ring,
1073static void e1000_print_hw_hang(struct work_struct *work) 1069static void e1000_print_hw_hang(struct work_struct *work)
1074{ 1070{
1075 struct e1000_adapter *adapter = container_of(work, 1071 struct e1000_adapter *adapter = container_of(work,
1076 struct e1000_adapter, 1072 struct e1000_adapter,
1077 print_hang_task); 1073 print_hang_task);
1078 struct net_device *netdev = adapter->netdev; 1074 struct net_device *netdev = adapter->netdev;
1079 struct e1000_ring *tx_ring = adapter->tx_ring; 1075 struct e1000_ring *tx_ring = adapter->tx_ring;
1080 unsigned int i = tx_ring->next_to_clean; 1076 unsigned int i = tx_ring->next_to_clean;
@@ -1087,8 +1083,7 @@ static void e1000_print_hw_hang(struct work_struct *work)
1087 if (test_bit(__E1000_DOWN, &adapter->state)) 1083 if (test_bit(__E1000_DOWN, &adapter->state))
1088 return; 1084 return;
1089 1085
1090 if (!adapter->tx_hang_recheck && 1086 if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) {
1091 (adapter->flags2 & FLAG2_DMA_BURST)) {
1092 /* May be block on write-back, flush and detect again 1087 /* May be block on write-back, flush and detect again
1093 * flush pending descriptor writebacks to memory 1088 * flush pending descriptor writebacks to memory
1094 */ 1089 */
@@ -1130,19 +1125,10 @@ static void e1000_print_hw_hang(struct work_struct *work)
1130 "PHY 1000BASE-T Status <%x>\n" 1125 "PHY 1000BASE-T Status <%x>\n"
1131 "PHY Extended Status <%x>\n" 1126 "PHY Extended Status <%x>\n"
1132 "PCI Status <%x>\n", 1127 "PCI Status <%x>\n",
1133 readl(tx_ring->head), 1128 readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use,
1134 readl(tx_ring->tail), 1129 tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp,
1135 tx_ring->next_to_use, 1130 eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
1136 tx_ring->next_to_clean, 1131 phy_status, phy_1000t_status, phy_ext_status, pci_status);
1137 tx_ring->buffer_info[eop].time_stamp,
1138 eop,
1139 jiffies,
1140 eop_desc->upper.fields.status,
1141 er32(STATUS),
1142 phy_status,
1143 phy_1000t_status,
1144 phy_ext_status,
1145 pci_status);
1146 1132
1147 /* Suggest workaround for known h/w issue */ 1133 /* Suggest workaround for known h/w issue */
1148 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) 1134 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
@@ -1435,7 +1421,7 @@ copydone:
1435 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); 1421 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1436 1422
1437 if (rx_desc->wb.upper.header_status & 1423 if (rx_desc->wb.upper.header_status &
1438 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) 1424 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
1439 adapter->rx_hdr_split++; 1425 adapter->rx_hdr_split++;
1440 1426
1441 e1000_receive_skb(adapter, netdev, skb, staterr, 1427 e1000_receive_skb(adapter, netdev, skb, staterr,
@@ -1473,7 +1459,7 @@ next_desc:
1473 * e1000_consume_page - helper function 1459 * e1000_consume_page - helper function
1474 **/ 1460 **/
1475static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, 1461static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
1476 u16 length) 1462 u16 length)
1477{ 1463{
1478 bi->page = NULL; 1464 bi->page = NULL;
1479 skb->len += length; 1465 skb->len += length;
@@ -1500,7 +1486,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1500 unsigned int i; 1486 unsigned int i;
1501 int cleaned_count = 0; 1487 int cleaned_count = 0;
1502 bool cleaned = false; 1488 bool cleaned = false;
1503 unsigned int total_rx_bytes=0, total_rx_packets=0; 1489 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1490 struct skb_shared_info *shinfo;
1504 1491
1505 i = rx_ring->next_to_clean; 1492 i = rx_ring->next_to_clean;
1506 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); 1493 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
@@ -1546,7 +1533,6 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1546 rx_ring->rx_skb_top = NULL; 1533 rx_ring->rx_skb_top = NULL;
1547 goto next_desc; 1534 goto next_desc;
1548 } 1535 }
1549
1550#define rxtop (rx_ring->rx_skb_top) 1536#define rxtop (rx_ring->rx_skb_top)
1551 if (!(staterr & E1000_RXD_STAT_EOP)) { 1537 if (!(staterr & E1000_RXD_STAT_EOP)) {
1552 /* this descriptor is only the beginning (or middle) */ 1538 /* this descriptor is only the beginning (or middle) */
@@ -1554,12 +1540,13 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1554 /* this is the beginning of a chain */ 1540 /* this is the beginning of a chain */
1555 rxtop = skb; 1541 rxtop = skb;
1556 skb_fill_page_desc(rxtop, 0, buffer_info->page, 1542 skb_fill_page_desc(rxtop, 0, buffer_info->page,
1557 0, length); 1543 0, length);
1558 } else { 1544 } else {
1559 /* this is the middle of a chain */ 1545 /* this is the middle of a chain */
1560 skb_fill_page_desc(rxtop, 1546 shinfo = skb_shinfo(rxtop);
1561 skb_shinfo(rxtop)->nr_frags, 1547 skb_fill_page_desc(rxtop, shinfo->nr_frags,
1562 buffer_info->page, 0, length); 1548 buffer_info->page, 0,
1549 length);
1563 /* re-use the skb, only consumed the page */ 1550 /* re-use the skb, only consumed the page */
1564 buffer_info->skb = skb; 1551 buffer_info->skb = skb;
1565 } 1552 }
@@ -1568,9 +1555,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1568 } else { 1555 } else {
1569 if (rxtop) { 1556 if (rxtop) {
1570 /* end of the chain */ 1557 /* end of the chain */
1571 skb_fill_page_desc(rxtop, 1558 shinfo = skb_shinfo(rxtop);
1572 skb_shinfo(rxtop)->nr_frags, 1559 skb_fill_page_desc(rxtop, shinfo->nr_frags,
1573 buffer_info->page, 0, length); 1560 buffer_info->page, 0,
1561 length);
1574 /* re-use the current skb, we only consumed the 1562 /* re-use the current skb, we only consumed the
1575 * page 1563 * page
1576 */ 1564 */
@@ -1595,10 +1583,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1595 skb_put(skb, length); 1583 skb_put(skb, length);
1596 } else { 1584 } else {
1597 skb_fill_page_desc(skb, 0, 1585 skb_fill_page_desc(skb, 0,
1598 buffer_info->page, 0, 1586 buffer_info->page, 0,
1599 length); 1587 length);
1600 e1000_consume_page(buffer_info, skb, 1588 e1000_consume_page(buffer_info, skb,
1601 length); 1589 length);
1602 } 1590 }
1603 } 1591 }
1604 } 1592 }
@@ -1671,8 +1659,7 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
1671 DMA_FROM_DEVICE); 1659 DMA_FROM_DEVICE);
1672 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) 1660 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1673 dma_unmap_page(&pdev->dev, buffer_info->dma, 1661 dma_unmap_page(&pdev->dev, buffer_info->dma,
1674 PAGE_SIZE, 1662 PAGE_SIZE, DMA_FROM_DEVICE);
1675 DMA_FROM_DEVICE);
1676 else if (adapter->clean_rx == e1000_clean_rx_irq_ps) 1663 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1677 dma_unmap_single(&pdev->dev, buffer_info->dma, 1664 dma_unmap_single(&pdev->dev, buffer_info->dma,
1678 adapter->rx_ps_bsize0, 1665 adapter->rx_ps_bsize0,
@@ -1725,7 +1712,8 @@ static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
1725static void e1000e_downshift_workaround(struct work_struct *work) 1712static void e1000e_downshift_workaround(struct work_struct *work)
1726{ 1713{
1727 struct e1000_adapter *adapter = container_of(work, 1714 struct e1000_adapter *adapter = container_of(work,
1728 struct e1000_adapter, downshift_task); 1715 struct e1000_adapter,
1716 downshift_task);
1729 1717
1730 if (test_bit(__E1000_DOWN, &adapter->state)) 1718 if (test_bit(__E1000_DOWN, &adapter->state))
1731 return; 1719 return;
@@ -1918,7 +1906,6 @@ static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data)
1918 struct e1000_hw *hw = &adapter->hw; 1906 struct e1000_hw *hw = &adapter->hw;
1919 struct e1000_ring *tx_ring = adapter->tx_ring; 1907 struct e1000_ring *tx_ring = adapter->tx_ring;
1920 1908
1921
1922 adapter->total_tx_bytes = 0; 1909 adapter->total_tx_bytes = 0;
1923 adapter->total_tx_packets = 0; 1910 adapter->total_tx_packets = 0;
1924 1911
@@ -1975,7 +1962,6 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
1975 ew32(RFCTL, rfctl); 1962 ew32(RFCTL, rfctl);
1976 } 1963 }
1977 1964
1978#define E1000_IVAR_INT_ALLOC_VALID 0x8
1979 /* Configure Rx vector */ 1965 /* Configure Rx vector */
1980 rx_ring->ims_val = E1000_IMS_RXQ0; 1966 rx_ring->ims_val = E1000_IMS_RXQ0;
1981 adapter->eiac_mask |= rx_ring->ims_val; 1967 adapter->eiac_mask |= rx_ring->ims_val;
@@ -2050,8 +2036,9 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
2050 if (adapter->flags & FLAG_HAS_MSIX) { 2036 if (adapter->flags & FLAG_HAS_MSIX) {
2051 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ 2037 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
2052 adapter->msix_entries = kcalloc(adapter->num_vectors, 2038 adapter->msix_entries = kcalloc(adapter->num_vectors,
2053 sizeof(struct msix_entry), 2039 sizeof(struct
2054 GFP_KERNEL); 2040 msix_entry),
2041 GFP_KERNEL);
2055 if (adapter->msix_entries) { 2042 if (adapter->msix_entries) {
2056 for (i = 0; i < adapter->num_vectors; i++) 2043 for (i = 0; i < adapter->num_vectors; i++)
2057 adapter->msix_entries[i].entry = i; 2044 adapter->msix_entries[i].entry = i;
@@ -2495,7 +2482,7 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
2495 switch (itr_setting) { 2482 switch (itr_setting) {
2496 case lowest_latency: 2483 case lowest_latency:
2497 /* handle TSO and jumbo frames */ 2484 /* handle TSO and jumbo frames */
2498 if (bytes/packets > 8000) 2485 if (bytes / packets > 8000)
2499 retval = bulk_latency; 2486 retval = bulk_latency;
2500 else if ((packets < 5) && (bytes > 512)) 2487 else if ((packets < 5) && (bytes > 512))
2501 retval = low_latency; 2488 retval = low_latency;
@@ -2503,13 +2490,13 @@ static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
2503 case low_latency: /* 50 usec aka 20000 ints/s */ 2490 case low_latency: /* 50 usec aka 20000 ints/s */
2504 if (bytes > 10000) { 2491 if (bytes > 10000) {
2505 /* this if handles the TSO accounting */ 2492 /* this if handles the TSO accounting */
2506 if (bytes/packets > 8000) 2493 if (bytes / packets > 8000)
2507 retval = bulk_latency; 2494 retval = bulk_latency;
2508 else if ((packets < 10) || ((bytes/packets) > 1200)) 2495 else if ((packets < 10) || ((bytes / packets) > 1200))
2509 retval = bulk_latency; 2496 retval = bulk_latency;
2510 else if ((packets > 35)) 2497 else if ((packets > 35))
2511 retval = lowest_latency; 2498 retval = lowest_latency;
2512 } else if (bytes/packets > 2000) { 2499 } else if (bytes / packets > 2000) {
2513 retval = bulk_latency; 2500 retval = bulk_latency;
2514 } else if (packets <= 2 && bytes < 512) { 2501 } else if (packets <= 2 && bytes < 512) {
2515 retval = lowest_latency; 2502 retval = lowest_latency;
@@ -2561,8 +2548,8 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
2561 2548
2562 current_itr = max(adapter->rx_itr, adapter->tx_itr); 2549 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2563 2550
2564 switch (current_itr) {
2565 /* counts and packets in update_itr are dependent on these numbers */ 2551 /* counts and packets in update_itr are dependent on these numbers */
2552 switch (current_itr) {
2566 case lowest_latency: 2553 case lowest_latency:
2567 new_itr = 70000; 2554 new_itr = 70000;
2568 break; 2555 break;
@@ -2583,8 +2570,7 @@ set_itr_now:
2583 * increasing 2570 * increasing
2584 */ 2571 */
2585 new_itr = new_itr > adapter->itr ? 2572 new_itr = new_itr > adapter->itr ?
2586 min(adapter->itr + (new_itr >> 2), new_itr) : 2573 min(adapter->itr + (new_itr >> 2), new_itr) : new_itr;
2587 new_itr;
2588 adapter->itr = new_itr; 2574 adapter->itr = new_itr;
2589 adapter->rx_ring->itr_val = new_itr; 2575 adapter->rx_ring->itr_val = new_itr;
2590 if (adapter->msix_entries) 2576 if (adapter->msix_entries)
@@ -2815,8 +2801,7 @@ static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2815 u16 vid = adapter->hw.mng_cookie.vlan_id; 2801 u16 vid = adapter->hw.mng_cookie.vlan_id;
2816 u16 old_vid = adapter->mng_vlan_id; 2802 u16 old_vid = adapter->mng_vlan_id;
2817 2803
2818 if (adapter->hw.mng_cookie.status & 2804 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
2819 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
2820 e1000_vlan_rx_add_vid(netdev, vid); 2805 e1000_vlan_rx_add_vid(netdev, vid);
2821 adapter->mng_vlan_id = vid; 2806 adapter->mng_vlan_id = vid;
2822 } 2807 }
@@ -2832,7 +2817,7 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter)
2832 e1000_vlan_rx_add_vid(adapter->netdev, 0); 2817 e1000_vlan_rx_add_vid(adapter->netdev, 0);
2833 2818
2834 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 2819 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2835 e1000_vlan_rx_add_vid(adapter->netdev, vid); 2820 e1000_vlan_rx_add_vid(adapter->netdev, vid);
2836} 2821}
2837 2822
2838static void e1000_init_manageability_pt(struct e1000_adapter *adapter) 2823static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
@@ -3007,8 +2992,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
3007 rctl = er32(RCTL); 2992 rctl = er32(RCTL);
3008 rctl &= ~(3 << E1000_RCTL_MO_SHIFT); 2993 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3009 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | 2994 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
3010 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | 2995 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
3011 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 2996 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3012 2997
3013 /* Do not Store bad packets */ 2998 /* Do not Store bad packets */
3014 rctl &= ~E1000_RCTL_SBP; 2999 rctl &= ~E1000_RCTL_SBP;
@@ -3094,19 +3079,17 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
3094 /* Enable Packet split descriptors */ 3079 /* Enable Packet split descriptors */
3095 rctl |= E1000_RCTL_DTYP_PS; 3080 rctl |= E1000_RCTL_DTYP_PS;
3096 3081
3097 psrctl |= adapter->rx_ps_bsize0 >> 3082 psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT;
3098 E1000_PSRCTL_BSIZE0_SHIFT;
3099 3083
3100 switch (adapter->rx_ps_pages) { 3084 switch (adapter->rx_ps_pages) {
3101 case 3: 3085 case 3:
3102 psrctl |= PAGE_SIZE << 3086 psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT;
3103 E1000_PSRCTL_BSIZE3_SHIFT; 3087 /* fall-through */
3104 case 2: 3088 case 2:
3105 psrctl |= PAGE_SIZE << 3089 psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT;
3106 E1000_PSRCTL_BSIZE2_SHIFT; 3090 /* fall-through */
3107 case 1: 3091 case 1:
3108 psrctl |= PAGE_SIZE >> 3092 psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT;
3109 E1000_PSRCTL_BSIZE1_SHIFT;
3110 break; 3093 break;
3111 } 3094 }
3112 3095
@@ -3280,7 +3263,7 @@ static int e1000e_write_mc_addr_list(struct net_device *netdev)
3280 /* update_mc_addr_list expects a packed array of only addresses. */ 3263 /* update_mc_addr_list expects a packed array of only addresses. */
3281 i = 0; 3264 i = 0;
3282 netdev_for_each_mc_addr(ha, netdev) 3265 netdev_for_each_mc_addr(ha, netdev)
3283 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); 3266 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
3284 3267
3285 hw->mac.ops.update_mc_addr_list(hw, mta_list, i); 3268 hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
3286 kfree(mta_list); 3269 kfree(mta_list);
@@ -3757,8 +3740,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3757 * but don't include ethernet FCS because hardware appends it 3740 * but don't include ethernet FCS because hardware appends it
3758 */ 3741 */
3759 min_tx_space = (adapter->max_frame_size + 3742 min_tx_space = (adapter->max_frame_size +
3760 sizeof(struct e1000_tx_desc) - 3743 sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2;
3761 ETH_FCS_LEN) * 2;
3762 min_tx_space = ALIGN(min_tx_space, 1024); 3744 min_tx_space = ALIGN(min_tx_space, 1024);
3763 min_tx_space >>= 10; 3745 min_tx_space >>= 10;
3764 /* software strips receive CRC, so leave room for it */ 3746 /* software strips receive CRC, so leave room for it */
@@ -3861,13 +3843,13 @@ void e1000e_reset(struct e1000_adapter *adapter)
3861 if ((adapter->max_frame_size * 2) > (pba << 10)) { 3843 if ((adapter->max_frame_size * 2) > (pba << 10)) {
3862 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { 3844 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
3863 dev_info(&adapter->pdev->dev, 3845 dev_info(&adapter->pdev->dev,
3864 "Interrupt Throttle Rate turned off\n"); 3846 "Interrupt Throttle Rate off\n");
3865 adapter->flags2 |= FLAG2_DISABLE_AIM; 3847 adapter->flags2 |= FLAG2_DISABLE_AIM;
3866 e1000e_write_itr(adapter, 0); 3848 e1000e_write_itr(adapter, 0);
3867 } 3849 }
3868 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { 3850 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
3869 dev_info(&adapter->pdev->dev, 3851 dev_info(&adapter->pdev->dev,
3870 "Interrupt Throttle Rate turned on\n"); 3852 "Interrupt Throttle Rate on\n");
3871 adapter->flags2 &= ~FLAG2_DISABLE_AIM; 3853 adapter->flags2 &= ~FLAG2_DISABLE_AIM;
3872 adapter->itr = 20000; 3854 adapter->itr = 20000;
3873 e1000e_write_itr(adapter, adapter->itr); 3855 e1000e_write_itr(adapter, adapter->itr);
@@ -3898,6 +3880,38 @@ void e1000e_reset(struct e1000_adapter *adapter)
3898 /* initialize systim and reset the ns time counter */ 3880 /* initialize systim and reset the ns time counter */
3899 e1000e_config_hwtstamp(adapter); 3881 e1000e_config_hwtstamp(adapter);
3900 3882
3883 /* Set EEE advertisement as appropriate */
3884 if (adapter->flags2 & FLAG2_HAS_EEE) {
3885 s32 ret_val;
3886 u16 adv_addr;
3887
3888 switch (hw->phy.type) {
3889 case e1000_phy_82579:
3890 adv_addr = I82579_EEE_ADVERTISEMENT;
3891 break;
3892 case e1000_phy_i217:
3893 adv_addr = I217_EEE_ADVERTISEMENT;
3894 break;
3895 default:
3896 dev_err(&adapter->pdev->dev,
3897 "Invalid PHY type setting EEE advertisement\n");
3898 return;
3899 }
3900
3901 ret_val = hw->phy.ops.acquire(hw);
3902 if (ret_val) {
3903 dev_err(&adapter->pdev->dev,
3904 "EEE advertisement - unable to acquire PHY\n");
3905 return;
3906 }
3907
3908 e1000_write_emi_reg_locked(hw, adv_addr,
3909 hw->dev_spec.ich8lan.eee_disable ?
3910 0 : adapter->eee_advert);
3911
3912 hw->phy.ops.release(hw);
3913 }
3914
3901 if (!netif_running(adapter->netdev) && 3915 if (!netif_running(adapter->netdev) &&
3902 !test_bit(__E1000_TESTING, &adapter->state)) { 3916 !test_bit(__E1000_TESTING, &adapter->state)) {
3903 e1000_power_down_phy(adapter); 3917 e1000_power_down_phy(adapter);
@@ -4266,8 +4280,7 @@ static int e1000_open(struct net_device *netdev)
4266 e1000e_power_up_phy(adapter); 4280 e1000e_power_up_phy(adapter);
4267 4281
4268 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 4282 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4269 if ((adapter->hw.mng_cookie.status & 4283 if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
4270 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
4271 e1000_update_mng_vlan(adapter); 4284 e1000_update_mng_vlan(adapter);
4272 4285
4273 /* DMA latency requirement to workaround jumbo issue */ 4286 /* DMA latency requirement to workaround jumbo issue */
@@ -4370,8 +4383,7 @@ static int e1000_close(struct net_device *netdev)
4370 /* kill manageability vlan ID if supported, but not if a vlan with 4383 /* kill manageability vlan ID if supported, but not if a vlan with
4371 * the same ID is registered on the host OS (let 8021q kill it) 4384 * the same ID is registered on the host OS (let 8021q kill it)
4372 */ 4385 */
4373 if (adapter->hw.mng_cookie.status & 4386 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
4374 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
4375 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 4387 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
4376 4388
4377 /* If AMT is enabled, let the firmware know that the network 4389 /* If AMT is enabled, let the firmware know that the network
@@ -4387,6 +4399,7 @@ static int e1000_close(struct net_device *netdev)
4387 4399
4388 return 0; 4400 return 0;
4389} 4401}
4402
4390/** 4403/**
4391 * e1000_set_mac - Change the Ethernet Address of the NIC 4404 * e1000_set_mac - Change the Ethernet Address of the NIC
4392 * @netdev: network interface device structure 4405 * @netdev: network interface device structure
@@ -4437,7 +4450,8 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
4437static void e1000e_update_phy_task(struct work_struct *work) 4450static void e1000e_update_phy_task(struct work_struct *work)
4438{ 4451{
4439 struct e1000_adapter *adapter = container_of(work, 4452 struct e1000_adapter *adapter = container_of(work,
4440 struct e1000_adapter, update_phy_task); 4453 struct e1000_adapter,
4454 update_phy_task);
4441 4455
4442 if (test_bit(__E1000_DOWN, &adapter->state)) 4456 if (test_bit(__E1000_DOWN, &adapter->state))
4443 return; 4457 return;
@@ -4454,7 +4468,7 @@ static void e1000e_update_phy_task(struct work_struct *work)
4454 **/ 4468 **/
4455static void e1000_update_phy_info(unsigned long data) 4469static void e1000_update_phy_info(unsigned long data)
4456{ 4470{
4457 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 4471 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
4458 4472
4459 if (test_bit(__E1000_DOWN, &adapter->state)) 4473 if (test_bit(__E1000_DOWN, &adapter->state))
4460 return; 4474 return;
@@ -4621,18 +4635,16 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
4621 * our own version based on RUC and ROC 4635 * our own version based on RUC and ROC
4622 */ 4636 */
4623 netdev->stats.rx_errors = adapter->stats.rxerrc + 4637 netdev->stats.rx_errors = adapter->stats.rxerrc +
4624 adapter->stats.crcerrs + adapter->stats.algnerrc + 4638 adapter->stats.crcerrs + adapter->stats.algnerrc +
4625 adapter->stats.ruc + adapter->stats.roc + 4639 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
4626 adapter->stats.cexterr;
4627 netdev->stats.rx_length_errors = adapter->stats.ruc + 4640 netdev->stats.rx_length_errors = adapter->stats.ruc +
4628 adapter->stats.roc; 4641 adapter->stats.roc;
4629 netdev->stats.rx_crc_errors = adapter->stats.crcerrs; 4642 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
4630 netdev->stats.rx_frame_errors = adapter->stats.algnerrc; 4643 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
4631 netdev->stats.rx_missed_errors = adapter->stats.mpc; 4644 netdev->stats.rx_missed_errors = adapter->stats.mpc;
4632 4645
4633 /* Tx Errors */ 4646 /* Tx Errors */
4634 netdev->stats.tx_errors = adapter->stats.ecol + 4647 netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol;
4635 adapter->stats.latecol;
4636 netdev->stats.tx_aborted_errors = adapter->stats.ecol; 4648 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
4637 netdev->stats.tx_window_errors = adapter->stats.latecol; 4649 netdev->stats.tx_window_errors = adapter->stats.latecol;
4638 netdev->stats.tx_carrier_errors = adapter->stats.tncrs; 4650 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
@@ -4790,7 +4802,7 @@ static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
4790 **/ 4802 **/
4791static void e1000_watchdog(unsigned long data) 4803static void e1000_watchdog(unsigned long data)
4792{ 4804{
4793 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 4805 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
4794 4806
4795 /* Do the rest outside of interrupt context */ 4807 /* Do the rest outside of interrupt context */
4796 schedule_work(&adapter->watchdog_task); 4808 schedule_work(&adapter->watchdog_task);
@@ -4801,7 +4813,8 @@ static void e1000_watchdog(unsigned long data)
4801static void e1000_watchdog_task(struct work_struct *work) 4813static void e1000_watchdog_task(struct work_struct *work)
4802{ 4814{
4803 struct e1000_adapter *adapter = container_of(work, 4815 struct e1000_adapter *adapter = container_of(work,
4804 struct e1000_adapter, watchdog_task); 4816 struct e1000_adapter,
4817 watchdog_task);
4805 struct net_device *netdev = adapter->netdev; 4818 struct net_device *netdev = adapter->netdev;
4806 struct e1000_mac_info *mac = &adapter->hw.mac; 4819 struct e1000_mac_info *mac = &adapter->hw.mac;
4807 struct e1000_phy_info *phy = &adapter->hw.phy; 4820 struct e1000_phy_info *phy = &adapter->hw.phy;
@@ -4835,8 +4848,8 @@ static void e1000_watchdog_task(struct work_struct *work)
4835 /* update snapshot of PHY registers on LSC */ 4848 /* update snapshot of PHY registers on LSC */
4836 e1000_phy_read_status(adapter); 4849 e1000_phy_read_status(adapter);
4837 mac->ops.get_link_up_info(&adapter->hw, 4850 mac->ops.get_link_up_info(&adapter->hw,
4838 &adapter->link_speed, 4851 &adapter->link_speed,
4839 &adapter->link_duplex); 4852 &adapter->link_duplex);
4840 e1000_print_link_info(adapter); 4853 e1000_print_link_info(adapter);
4841 4854
4842 /* check if SmartSpeed worked */ 4855 /* check if SmartSpeed worked */
@@ -4949,7 +4962,7 @@ static void e1000_watchdog_task(struct work_struct *work)
4949 adapter->flags |= FLAG_RESTART_NOW; 4962 adapter->flags |= FLAG_RESTART_NOW;
4950 else 4963 else
4951 pm_schedule_suspend(netdev->dev.parent, 4964 pm_schedule_suspend(netdev->dev.parent,
4952 LINK_TIMEOUT); 4965 LINK_TIMEOUT);
4953 } 4966 }
4954 } 4967 }
4955 4968
@@ -4984,8 +4997,8 @@ link_up:
4984 */ 4997 */
4985 u32 goc = (adapter->gotc + adapter->gorc) / 10000; 4998 u32 goc = (adapter->gotc + adapter->gorc) / 10000;
4986 u32 dif = (adapter->gotc > adapter->gorc ? 4999 u32 dif = (adapter->gotc > adapter->gorc ?
4987 adapter->gotc - adapter->gorc : 5000 adapter->gotc - adapter->gorc :
4988 adapter->gorc - adapter->gotc) / 10000; 5001 adapter->gorc - adapter->gotc) / 10000;
4989 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; 5002 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
4990 5003
4991 e1000e_write_itr(adapter, itr); 5004 e1000e_write_itr(adapter, itr);
@@ -5064,14 +5077,14 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
5064 iph->tot_len = 0; 5077 iph->tot_len = 0;
5065 iph->check = 0; 5078 iph->check = 0;
5066 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 5079 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
5067 0, IPPROTO_TCP, 0); 5080 0, IPPROTO_TCP, 0);
5068 cmd_length = E1000_TXD_CMD_IP; 5081 cmd_length = E1000_TXD_CMD_IP;
5069 ipcse = skb_transport_offset(skb) - 1; 5082 ipcse = skb_transport_offset(skb) - 1;
5070 } else if (skb_is_gso_v6(skb)) { 5083 } else if (skb_is_gso_v6(skb)) {
5071 ipv6_hdr(skb)->payload_len = 0; 5084 ipv6_hdr(skb)->payload_len = 0;
5072 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 5085 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
5073 &ipv6_hdr(skb)->daddr, 5086 &ipv6_hdr(skb)->daddr,
5074 0, IPPROTO_TCP, 0); 5087 0, IPPROTO_TCP, 0);
5075 ipcse = 0; 5088 ipcse = 0;
5076 } 5089 }
5077 ipcss = skb_network_offset(skb); 5090 ipcss = skb_network_offset(skb);
@@ -5080,7 +5093,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb)
5080 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; 5093 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
5081 5094
5082 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | 5095 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
5083 E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); 5096 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
5084 5097
5085 i = tx_ring->next_to_use; 5098 i = tx_ring->next_to_use;
5086 context_desc = E1000_CONTEXT_DESC(*tx_ring, i); 5099 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
@@ -5150,8 +5163,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
5150 5163
5151 context_desc->lower_setup.ip_config = 0; 5164 context_desc->lower_setup.ip_config = 0;
5152 context_desc->upper_setup.tcp_fields.tucss = css; 5165 context_desc->upper_setup.tcp_fields.tucss = css;
5153 context_desc->upper_setup.tcp_fields.tucso = 5166 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
5154 css + skb->csum_offset;
5155 context_desc->upper_setup.tcp_fields.tucse = 0; 5167 context_desc->upper_setup.tcp_fields.tucse = 0;
5156 context_desc->tcp_seg_setup.data = 0; 5168 context_desc->tcp_seg_setup.data = 0;
5157 context_desc->cmd_and_length = cpu_to_le32(cmd_len); 5169 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
@@ -5224,7 +5236,8 @@ static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
5224 buffer_info->time_stamp = jiffies; 5236 buffer_info->time_stamp = jiffies;
5225 buffer_info->next_to_watch = i; 5237 buffer_info->next_to_watch = i;
5226 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, 5238 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
5227 offset, size, DMA_TO_DEVICE); 5239 offset, size,
5240 DMA_TO_DEVICE);
5228 buffer_info->mapped_as_page = true; 5241 buffer_info->mapped_as_page = true;
5229 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) 5242 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
5230 goto dma_error; 5243 goto dma_error;
@@ -5273,7 +5286,7 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
5273 5286
5274 if (tx_flags & E1000_TX_FLAGS_TSO) { 5287 if (tx_flags & E1000_TX_FLAGS_TSO) {
5275 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | 5288 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
5276 E1000_TXD_CMD_TSE; 5289 E1000_TXD_CMD_TSE;
5277 txd_upper |= E1000_TXD_POPTS_TXSM << 8; 5290 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
5278 5291
5279 if (tx_flags & E1000_TX_FLAGS_IPV4) 5292 if (tx_flags & E1000_TX_FLAGS_IPV4)
@@ -5304,8 +5317,8 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
5304 buffer_info = &tx_ring->buffer_info[i]; 5317 buffer_info = &tx_ring->buffer_info[i];
5305 tx_desc = E1000_TX_DESC(*tx_ring, i); 5318 tx_desc = E1000_TX_DESC(*tx_ring, i);
5306 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 5319 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
5307 tx_desc->lower.data = 5320 tx_desc->lower.data = cpu_to_le32(txd_lower |
5308 cpu_to_le32(txd_lower | buffer_info->length); 5321 buffer_info->length);
5309 tx_desc->upper.data = cpu_to_le32(txd_upper); 5322 tx_desc->upper.data = cpu_to_le32(txd_upper);
5310 5323
5311 i++; 5324 i++;
@@ -5355,11 +5368,11 @@ static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
5355 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) 5368 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
5356 return 0; 5369 return 0;
5357 5370
5358 if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP)) 5371 if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP))
5359 return 0; 5372 return 0;
5360 5373
5361 { 5374 {
5362 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14); 5375 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14);
5363 struct udphdr *udp; 5376 struct udphdr *udp;
5364 5377
5365 if (ip->protocol != IPPROTO_UDP) 5378 if (ip->protocol != IPPROTO_UDP)
@@ -5584,7 +5597,7 @@ static void e1000_reset_task(struct work_struct *work)
5584 * Returns the address of the device statistics structure. 5597 * Returns the address of the device statistics structure.
5585 **/ 5598 **/
5586struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, 5599struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
5587 struct rtnl_link_stats64 *stats) 5600 struct rtnl_link_stats64 *stats)
5588{ 5601{
5589 struct e1000_adapter *adapter = netdev_priv(netdev); 5602 struct e1000_adapter *adapter = netdev_priv(netdev);
5590 5603
@@ -5605,18 +5618,15 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
5605 * our own version based on RUC and ROC 5618 * our own version based on RUC and ROC
5606 */ 5619 */
5607 stats->rx_errors = adapter->stats.rxerrc + 5620 stats->rx_errors = adapter->stats.rxerrc +
5608 adapter->stats.crcerrs + adapter->stats.algnerrc + 5621 adapter->stats.crcerrs + adapter->stats.algnerrc +
5609 adapter->stats.ruc + adapter->stats.roc + 5622 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
5610 adapter->stats.cexterr; 5623 stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc;
5611 stats->rx_length_errors = adapter->stats.ruc +
5612 adapter->stats.roc;
5613 stats->rx_crc_errors = adapter->stats.crcerrs; 5624 stats->rx_crc_errors = adapter->stats.crcerrs;
5614 stats->rx_frame_errors = adapter->stats.algnerrc; 5625 stats->rx_frame_errors = adapter->stats.algnerrc;
5615 stats->rx_missed_errors = adapter->stats.mpc; 5626 stats->rx_missed_errors = adapter->stats.mpc;
5616 5627
5617 /* Tx Errors */ 5628 /* Tx Errors */
5618 stats->tx_errors = adapter->stats.ecol + 5629 stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol;
5619 adapter->stats.latecol;
5620 stats->tx_aborted_errors = adapter->stats.ecol; 5630 stats->tx_aborted_errors = adapter->stats.ecol;
5621 stats->tx_window_errors = adapter->stats.latecol; 5631 stats->tx_window_errors = adapter->stats.latecol;
5622 stats->tx_carrier_errors = adapter->stats.tncrs; 5632 stats->tx_carrier_errors = adapter->stats.tncrs;
@@ -5685,9 +5695,9 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5685 5695
5686 /* adjust allocation if LPE protects us, and we aren't using SBP */ 5696 /* adjust allocation if LPE protects us, and we aren't using SBP */
5687 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || 5697 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
5688 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) 5698 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
5689 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN 5699 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
5690 + ETH_FCS_LEN; 5700 + ETH_FCS_LEN;
5691 5701
5692 if (netif_running(netdev)) 5702 if (netif_running(netdev))
5693 e1000e_up(adapter); 5703 e1000e_up(adapter);
@@ -5866,7 +5876,7 @@ static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
5866 phy_reg &= ~(BM_RCTL_MO_MASK); 5876 phy_reg &= ~(BM_RCTL_MO_MASK);
5867 if (mac_reg & E1000_RCTL_MO_3) 5877 if (mac_reg & E1000_RCTL_MO_3)
5868 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) 5878 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
5869 << BM_RCTL_MO_SHIFT); 5879 << BM_RCTL_MO_SHIFT);
5870 if (mac_reg & E1000_RCTL_BAM) 5880 if (mac_reg & E1000_RCTL_BAM)
5871 phy_reg |= BM_RCTL_BAM; 5881 phy_reg |= BM_RCTL_BAM;
5872 if (mac_reg & E1000_RCTL_PMCF) 5882 if (mac_reg & E1000_RCTL_PMCF)
@@ -5935,10 +5945,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
5935 } 5945 }
5936 5946
5937 ctrl = er32(CTRL); 5947 ctrl = er32(CTRL);
5938 /* advertise wake from D3Cold */
5939 #define E1000_CTRL_ADVD3WUC 0x00100000
5940 /* phy power management enable */
5941 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
5942 ctrl |= E1000_CTRL_ADVD3WUC; 5948 ctrl |= E1000_CTRL_ADVD3WUC;
5943 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) 5949 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
5944 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT; 5950 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
@@ -5982,8 +5988,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
5982 */ 5988 */
5983 e1000e_release_hw_control(adapter); 5989 e1000e_release_hw_control(adapter);
5984 5990
5985 pci_clear_master(pdev);
5986
5987 /* The pci-e switch on some quad port adapters will report a 5991 /* The pci-e switch on some quad port adapters will report a
5988 * correctable error when the MAC transitions from D0 to D3. To 5992 * correctable error when the MAC transitions from D0 to D3. To
5989 * prevent this we need to mask off the correctable errors on the 5993 * prevent this we need to mask off the correctable errors on the
@@ -6082,24 +6086,24 @@ static int __e1000_resume(struct pci_dev *pdev)
6082 e1e_rphy(&adapter->hw, BM_WUS, &phy_data); 6086 e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
6083 if (phy_data) { 6087 if (phy_data) {
6084 e_info("PHY Wakeup cause - %s\n", 6088 e_info("PHY Wakeup cause - %s\n",
6085 phy_data & E1000_WUS_EX ? "Unicast Packet" : 6089 phy_data & E1000_WUS_EX ? "Unicast Packet" :
6086 phy_data & E1000_WUS_MC ? "Multicast Packet" : 6090 phy_data & E1000_WUS_MC ? "Multicast Packet" :
6087 phy_data & E1000_WUS_BC ? "Broadcast Packet" : 6091 phy_data & E1000_WUS_BC ? "Broadcast Packet" :
6088 phy_data & E1000_WUS_MAG ? "Magic Packet" : 6092 phy_data & E1000_WUS_MAG ? "Magic Packet" :
6089 phy_data & E1000_WUS_LNKC ? 6093 phy_data & E1000_WUS_LNKC ?
6090 "Link Status Change" : "other"); 6094 "Link Status Change" : "other");
6091 } 6095 }
6092 e1e_wphy(&adapter->hw, BM_WUS, ~0); 6096 e1e_wphy(&adapter->hw, BM_WUS, ~0);
6093 } else { 6097 } else {
6094 u32 wus = er32(WUS); 6098 u32 wus = er32(WUS);
6095 if (wus) { 6099 if (wus) {
6096 e_info("MAC Wakeup cause - %s\n", 6100 e_info("MAC Wakeup cause - %s\n",
6097 wus & E1000_WUS_EX ? "Unicast Packet" : 6101 wus & E1000_WUS_EX ? "Unicast Packet" :
6098 wus & E1000_WUS_MC ? "Multicast Packet" : 6102 wus & E1000_WUS_MC ? "Multicast Packet" :
6099 wus & E1000_WUS_BC ? "Broadcast Packet" : 6103 wus & E1000_WUS_BC ? "Broadcast Packet" :
6100 wus & E1000_WUS_MAG ? "Magic Packet" : 6104 wus & E1000_WUS_MAG ? "Magic Packet" :
6101 wus & E1000_WUS_LNKC ? "Link Status Change" : 6105 wus & E1000_WUS_LNKC ? "Link Status Change" :
6102 "other"); 6106 "other");
6103 } 6107 }
6104 ew32(WUS, ~0); 6108 ew32(WUS, ~0);
6105 } 6109 }
@@ -6374,7 +6378,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
6374 e_info("(PCI Express:2.5GT/s:%s) %pM\n", 6378 e_info("(PCI Express:2.5GT/s:%s) %pM\n",
6375 /* bus width */ 6379 /* bus width */
6376 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 6380 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
6377 "Width x1"), 6381 "Width x1"),
6378 /* MAC address */ 6382 /* MAC address */
6379 netdev->dev_addr); 6383 netdev->dev_addr);
6380 e_info("Intel(R) PRO/%s Network Connection\n", 6384 e_info("Intel(R) PRO/%s Network Connection\n",
@@ -6484,7 +6488,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6484 resource_size_t flash_start, flash_len; 6488 resource_size_t flash_start, flash_len;
6485 static int cards_found; 6489 static int cards_found;
6486 u16 aspm_disable_flag = 0; 6490 u16 aspm_disable_flag = 0;
6487 int i, err, pci_using_dac; 6491 int bars, i, err, pci_using_dac;
6488 u16 eeprom_data = 0; 6492 u16 eeprom_data = 0;
6489 u16 eeprom_apme_mask = E1000_EEPROM_APME; 6493 u16 eeprom_apme_mask = E1000_EEPROM_APME;
6490 6494
@@ -6511,15 +6515,16 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6511 err = dma_set_coherent_mask(&pdev->dev, 6515 err = dma_set_coherent_mask(&pdev->dev,
6512 DMA_BIT_MASK(32)); 6516 DMA_BIT_MASK(32));
6513 if (err) { 6517 if (err) {
6514 dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); 6518 dev_err(&pdev->dev,
6519 "No usable DMA configuration, aborting\n");
6515 goto err_dma; 6520 goto err_dma;
6516 } 6521 }
6517 } 6522 }
6518 } 6523 }
6519 6524
6520 err = pci_request_selected_regions_exclusive(pdev, 6525 bars = pci_select_bars(pdev, IORESOURCE_MEM);
6521 pci_select_bars(pdev, IORESOURCE_MEM), 6526 err = pci_request_selected_regions_exclusive(pdev, bars,
6522 e1000e_driver_name); 6527 e1000e_driver_name);
6523 if (err) 6528 if (err)
6524 goto err_pci_reg; 6529 goto err_pci_reg;
6525 6530
@@ -6572,6 +6577,10 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6572 goto err_flashmap; 6577 goto err_flashmap;
6573 } 6578 }
6574 6579
6580 /* Set default EEE advertisement */
6581 if (adapter->flags2 & FLAG2_HAS_EEE)
6582 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
6583
6575 /* construct the net_device struct */ 6584 /* construct the net_device struct */
6576 netdev->netdev_ops = &e1000e_netdev_ops; 6585 netdev->netdev_ops = &e1000e_netdev_ops;
6577 e1000e_set_ethtool_ops(netdev); 6586 e1000e_set_ethtool_ops(netdev);
@@ -6688,11 +6697,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6688 6697
6689 init_timer(&adapter->watchdog_timer); 6698 init_timer(&adapter->watchdog_timer);
6690 adapter->watchdog_timer.function = e1000_watchdog; 6699 adapter->watchdog_timer.function = e1000_watchdog;
6691 adapter->watchdog_timer.data = (unsigned long) adapter; 6700 adapter->watchdog_timer.data = (unsigned long)adapter;
6692 6701
6693 init_timer(&adapter->phy_info_timer); 6702 init_timer(&adapter->phy_info_timer);
6694 adapter->phy_info_timer.function = e1000_update_phy_info; 6703 adapter->phy_info_timer.function = e1000_update_phy_info;
6695 adapter->phy_info_timer.data = (unsigned long) adapter; 6704 adapter->phy_info_timer.data = (unsigned long)adapter;
6696 6705
6697 INIT_WORK(&adapter->reset_task, e1000_reset_task); 6706 INIT_WORK(&adapter->reset_task, e1000_reset_task);
6698 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); 6707 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
@@ -6800,7 +6809,7 @@ err_ioremap:
6800 free_netdev(netdev); 6809 free_netdev(netdev);
6801err_alloc_etherdev: 6810err_alloc_etherdev:
6802 pci_release_selected_regions(pdev, 6811 pci_release_selected_regions(pdev,
6803 pci_select_bars(pdev, IORESOURCE_MEM)); 6812 pci_select_bars(pdev, IORESOURCE_MEM));
6804err_pci_reg: 6813err_pci_reg:
6805err_dma: 6814err_dma:
6806 pci_disable_device(pdev); 6815 pci_disable_device(pdev);
@@ -6870,7 +6879,7 @@ static void e1000_remove(struct pci_dev *pdev)
6870 if (adapter->hw.flash_address) 6879 if (adapter->hw.flash_address)
6871 iounmap(adapter->hw.flash_address); 6880 iounmap(adapter->hw.flash_address);
6872 pci_release_selected_regions(pdev, 6881 pci_release_selected_regions(pdev,
6873 pci_select_bars(pdev, IORESOURCE_MEM)); 6882 pci_select_bars(pdev, IORESOURCE_MEM));
6874 6883
6875 free_netdev(netdev); 6884 free_netdev(netdev);
6876 6885
@@ -6891,7 +6900,8 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
6891 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, 6900 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
6892 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, 6901 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
6893 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, 6902 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
6894 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 }, 6903 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP),
6904 board_82571 },
6895 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, 6905 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
6896 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, 6906 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
6897 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, 6907 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
@@ -6967,8 +6977,8 @@ MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
6967#ifdef CONFIG_PM 6977#ifdef CONFIG_PM
6968static const struct dev_pm_ops e1000_pm_ops = { 6978static const struct dev_pm_ops e1000_pm_ops = {
6969 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) 6979 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
6970 SET_RUNTIME_PM_OPS(e1000_runtime_suspend, 6980 SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume,
6971 e1000_runtime_resume, e1000_idle) 6981 e1000_idle)
6972}; 6982};
6973#endif 6983#endif
6974 6984
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index 84fecc268162..44ddc0a0ee0e 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -630,7 +630,7 @@ void e1000e_reload_nvm_generic(struct e1000_hw *hw)
630{ 630{
631 u32 ctrl_ext; 631 u32 ctrl_ext;
632 632
633 udelay(10); 633 usleep_range(10, 20);
634 ctrl_ext = er32(CTRL_EXT); 634 ctrl_ext = er32(CTRL_EXT);
635 ctrl_ext |= E1000_CTRL_EXT_EE_RST; 635 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
636 ew32(CTRL_EXT, ctrl_ext); 636 ew32(CTRL_EXT, ctrl_ext);
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index 98da75dff936..c16bd75b6caa 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -45,7 +45,7 @@
45unsigned int copybreak = COPYBREAK_DEFAULT; 45unsigned int copybreak = COPYBREAK_DEFAULT;
46module_param(copybreak, uint, 0644); 46module_param(copybreak, uint, 0644);
47MODULE_PARM_DESC(copybreak, 47MODULE_PARM_DESC(copybreak,
48 "Maximum size of packet that is copied to a new buffer on receive"); 48 "Maximum size of packet that is copied to a new buffer on receive");
49 49
50/* All parameters are treated the same, as an integer array of values. 50/* All parameters are treated the same, as an integer array of values.
51 * This macro just reduces the need to repeat the same declaration code 51 * This macro just reduces the need to repeat the same declaration code
@@ -143,7 +143,8 @@ E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
143 * 143 *
144 * Default Value: 1 (enabled) 144 * Default Value: 1 (enabled)
145 */ 145 */
146E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]"); 146E1000_PARAM(WriteProtectNVM,
147 "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
147 148
148/* Enable CRC Stripping 149/* Enable CRC Stripping
149 * 150 *
@@ -160,13 +161,18 @@ struct e1000_option {
160 const char *err; 161 const char *err;
161 int def; 162 int def;
162 union { 163 union {
163 struct { /* range_option info */ 164 /* range_option info */
165 struct {
164 int min; 166 int min;
165 int max; 167 int max;
166 } r; 168 } r;
167 struct { /* list_option info */ 169 /* list_option info */
170 struct {
168 int nr; 171 int nr;
169 struct e1000_opt_list { int i; char *str; } *p; 172 struct e1000_opt_list {
173 int i;
174 char *str;
175 } *p;
170 } l; 176 } l;
171 } arg; 177 } arg;
172}; 178};
@@ -246,7 +252,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
246 "Using defaults for all values\n"); 252 "Using defaults for all values\n");
247 } 253 }
248 254
249 { /* Transmit Interrupt Delay */ 255 /* Transmit Interrupt Delay */
256 {
250 static const struct e1000_option opt = { 257 static const struct e1000_option opt = {
251 .type = range_option, 258 .type = range_option,
252 .name = "Transmit Interrupt Delay", 259 .name = "Transmit Interrupt Delay",
@@ -265,7 +272,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
265 adapter->tx_int_delay = opt.def; 272 adapter->tx_int_delay = opt.def;
266 } 273 }
267 } 274 }
268 { /* Transmit Absolute Interrupt Delay */ 275 /* Transmit Absolute Interrupt Delay */
276 {
269 static const struct e1000_option opt = { 277 static const struct e1000_option opt = {
270 .type = range_option, 278 .type = range_option,
271 .name = "Transmit Absolute Interrupt Delay", 279 .name = "Transmit Absolute Interrupt Delay",
@@ -284,7 +292,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
284 adapter->tx_abs_int_delay = opt.def; 292 adapter->tx_abs_int_delay = opt.def;
285 } 293 }
286 } 294 }
287 { /* Receive Interrupt Delay */ 295 /* Receive Interrupt Delay */
296 {
288 static struct e1000_option opt = { 297 static struct e1000_option opt = {
289 .type = range_option, 298 .type = range_option,
290 .name = "Receive Interrupt Delay", 299 .name = "Receive Interrupt Delay",
@@ -303,7 +312,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
303 adapter->rx_int_delay = opt.def; 312 adapter->rx_int_delay = opt.def;
304 } 313 }
305 } 314 }
306 { /* Receive Absolute Interrupt Delay */ 315 /* Receive Absolute Interrupt Delay */
316 {
307 static const struct e1000_option opt = { 317 static const struct e1000_option opt = {
308 .type = range_option, 318 .type = range_option,
309 .name = "Receive Absolute Interrupt Delay", 319 .name = "Receive Absolute Interrupt Delay",
@@ -322,7 +332,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
322 adapter->rx_abs_int_delay = opt.def; 332 adapter->rx_abs_int_delay = opt.def;
323 } 333 }
324 } 334 }
325 { /* Interrupt Throttling Rate */ 335 /* Interrupt Throttling Rate */
336 {
326 static const struct e1000_option opt = { 337 static const struct e1000_option opt = {
327 .type = range_option, 338 .type = range_option,
328 .name = "Interrupt Throttling Rate (ints/sec)", 339 .name = "Interrupt Throttling Rate (ints/sec)",
@@ -392,7 +403,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
392 break; 403 break;
393 } 404 }
394 } 405 }
395 { /* Interrupt Mode */ 406 /* Interrupt Mode */
407 {
396 static struct e1000_option opt = { 408 static struct e1000_option opt = {
397 .type = range_option, 409 .type = range_option,
398 .name = "Interrupt Mode", 410 .name = "Interrupt Mode",
@@ -435,7 +447,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
435 kfree(opt.err); 447 kfree(opt.err);
436#endif 448#endif
437 } 449 }
438 { /* Smart Power Down */ 450 /* Smart Power Down */
451 {
439 static const struct e1000_option opt = { 452 static const struct e1000_option opt = {
440 .type = enable_option, 453 .type = enable_option,
441 .name = "PHY Smart Power Down", 454 .name = "PHY Smart Power Down",
@@ -450,7 +463,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
450 adapter->flags |= FLAG_SMART_POWER_DOWN; 463 adapter->flags |= FLAG_SMART_POWER_DOWN;
451 } 464 }
452 } 465 }
453 { /* CRC Stripping */ 466 /* CRC Stripping */
467 {
454 static const struct e1000_option opt = { 468 static const struct e1000_option opt = {
455 .type = enable_option, 469 .type = enable_option,
456 .name = "CRC Stripping", 470 .name = "CRC Stripping",
@@ -470,27 +484,28 @@ void e1000e_check_options(struct e1000_adapter *adapter)
470 adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING; 484 adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING;
471 } 485 }
472 } 486 }
473 { /* Kumeran Lock Loss Workaround */ 487 /* Kumeran Lock Loss Workaround */
488 {
474 static const struct e1000_option opt = { 489 static const struct e1000_option opt = {
475 .type = enable_option, 490 .type = enable_option,
476 .name = "Kumeran Lock Loss Workaround", 491 .name = "Kumeran Lock Loss Workaround",
477 .err = "defaulting to Enabled", 492 .err = "defaulting to Enabled",
478 .def = OPTION_ENABLED 493 .def = OPTION_ENABLED
479 }; 494 };
495 bool enabled = opt.def;
480 496
481 if (num_KumeranLockLoss > bd) { 497 if (num_KumeranLockLoss > bd) {
482 unsigned int kmrn_lock_loss = KumeranLockLoss[bd]; 498 unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
483 e1000_validate_option(&kmrn_lock_loss, &opt, adapter); 499 e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
484 if (hw->mac.type == e1000_ich8lan) 500 enabled = kmrn_lock_loss;
485 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
486 kmrn_lock_loss);
487 } else {
488 if (hw->mac.type == e1000_ich8lan)
489 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
490 opt.def);
491 } 501 }
502
503 if (hw->mac.type == e1000_ich8lan)
504 e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
505 enabled);
492 } 506 }
493 { /* Write-protect NVM */ 507 /* Write-protect NVM */
508 {
494 static const struct e1000_option opt = { 509 static const struct e1000_option opt = {
495 .type = enable_option, 510 .type = enable_option,
496 .name = "Write-protect NVM", 511 .name = "Write-protect NVM",
@@ -500,7 +515,8 @@ void e1000e_check_options(struct e1000_adapter *adapter)
500 515
501 if (adapter->flags & FLAG_IS_ICH) { 516 if (adapter->flags & FLAG_IS_ICH) {
502 if (num_WriteProtectNVM > bd) { 517 if (num_WriteProtectNVM > bd) {
503 unsigned int write_protect_nvm = WriteProtectNVM[bd]; 518 unsigned int write_protect_nvm =
519 WriteProtectNVM[bd];
504 e1000_validate_option(&write_protect_nvm, &opt, 520 e1000_validate_option(&write_protect_nvm, &opt,
505 adapter); 521 adapter);
506 if (write_protect_nvm) 522 if (write_protect_nvm)
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 0930c136aa31..59c76a6815a0 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -37,7 +37,9 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
37 37
38/* Cable length tables */ 38/* Cable length tables */
39static const u16 e1000_m88_cable_length_table[] = { 39static const u16 e1000_m88_cable_length_table[] = {
40 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; 40 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED
41};
42
41#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ 43#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
42 ARRAY_SIZE(e1000_m88_cable_length_table) 44 ARRAY_SIZE(e1000_m88_cable_length_table)
43 45
@@ -49,7 +51,9 @@ static const u16 e1000_igp_2_cable_length_table[] = {
49 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, 51 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
50 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, 52 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
51 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, 53 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
52 124}; 54 124
55};
56
53#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ 57#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
54 ARRAY_SIZE(e1000_igp_2_cable_length_table) 58 ARRAY_SIZE(e1000_igp_2_cable_length_table)
55 59
@@ -67,8 +71,7 @@ s32 e1000e_check_reset_block_generic(struct e1000_hw *hw)
67 71
68 manc = er32(MANC); 72 manc = er32(MANC);
69 73
70 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? 74 return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0;
71 E1000_BLK_PHY_RESET : 0;
72} 75}
73 76
74/** 77/**
@@ -94,7 +97,7 @@ s32 e1000e_get_phy_id(struct e1000_hw *hw)
94 return ret_val; 97 return ret_val;
95 98
96 phy->id = (u32)(phy_id << 16); 99 phy->id = (u32)(phy_id << 16);
97 udelay(20); 100 usleep_range(20, 40);
98 ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id); 101 ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id);
99 if (ret_val) 102 if (ret_val)
100 return ret_val; 103 return ret_val;
@@ -175,7 +178,13 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
175 e_dbg("MDI Error\n"); 178 e_dbg("MDI Error\n");
176 return -E1000_ERR_PHY; 179 return -E1000_ERR_PHY;
177 } 180 }
178 *data = (u16) mdic; 181 if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
182 e_dbg("MDI Read offset error - requested %d, returned %d\n",
183 offset,
184 (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
185 return -E1000_ERR_PHY;
186 }
187 *data = (u16)mdic;
179 188
180 /* Allow some time after each MDIC transaction to avoid 189 /* Allow some time after each MDIC transaction to avoid
181 * reading duplicate data in the next MDIC transaction. 190 * reading duplicate data in the next MDIC transaction.
@@ -233,6 +242,12 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
233 e_dbg("MDI Error\n"); 242 e_dbg("MDI Error\n");
234 return -E1000_ERR_PHY; 243 return -E1000_ERR_PHY;
235 } 244 }
245 if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) {
246 e_dbg("MDI Write offset error - requested %d, returned %d\n",
247 offset,
248 (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT);
249 return -E1000_ERR_PHY;
250 }
236 251
237 /* Allow some time after each MDIC transaction to avoid 252 /* Allow some time after each MDIC transaction to avoid
238 * reading duplicate data in the next MDIC transaction. 253 * reading duplicate data in the next MDIC transaction.
@@ -324,7 +339,7 @@ s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page)
324 * semaphores before exiting. 339 * semaphores before exiting.
325 **/ 340 **/
326static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, 341static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
327 bool locked) 342 bool locked)
328{ 343{
329 s32 ret_val = 0; 344 s32 ret_val = 0;
330 345
@@ -391,7 +406,7 @@ s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
391 * at the offset. Release any acquired semaphores before exiting. 406 * at the offset. Release any acquired semaphores before exiting.
392 **/ 407 **/
393static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, 408static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
394 bool locked) 409 bool locked)
395{ 410{
396 s32 ret_val = 0; 411 s32 ret_val = 0;
397 412
@@ -410,8 +425,7 @@ static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
410 (u16)offset); 425 (u16)offset);
411 if (!ret_val) 426 if (!ret_val)
412 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & 427 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS &
413 offset, 428 offset, data);
414 data);
415 if (!locked) 429 if (!locked)
416 hw->phy.ops.release(hw); 430 hw->phy.ops.release(hw);
417 431
@@ -458,7 +472,7 @@ s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
458 * Release any acquired semaphores before exiting. 472 * Release any acquired semaphores before exiting.
459 **/ 473 **/
460static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, 474static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
461 bool locked) 475 bool locked)
462{ 476{
463 u32 kmrnctrlsta; 477 u32 kmrnctrlsta;
464 478
@@ -531,7 +545,7 @@ s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
531 * before exiting. 545 * before exiting.
532 **/ 546 **/
533static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, 547static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
534 bool locked) 548 bool locked)
535{ 549{
536 u32 kmrnctrlsta; 550 u32 kmrnctrlsta;
537 551
@@ -772,8 +786,7 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
772 786
773 phy_data |= M88E1000_EPSCR_TX_CLK_25; 787 phy_data |= M88E1000_EPSCR_TX_CLK_25;
774 788
775 if ((phy->revision == 2) && 789 if ((phy->revision == 2) && (phy->id == M88E1111_I_PHY_ID)) {
776 (phy->id == M88E1111_I_PHY_ID)) {
777 /* 82573L PHY - set the downshift counter to 5x. */ 790 /* 82573L PHY - set the downshift counter to 5x. */
778 phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; 791 phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
779 phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; 792 phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
@@ -1296,7 +1309,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1296 e_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); 1309 e_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
1297 1310
1298 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 1311 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
1299 100000, &link); 1312 100000, &link);
1300 if (ret_val) 1313 if (ret_val)
1301 return ret_val; 1314 return ret_val;
1302 1315
@@ -1319,7 +1332,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1319 1332
1320 /* Try once more */ 1333 /* Try once more */
1321 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 1334 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
1322 100000, &link); 1335 100000, &link);
1323 if (ret_val) 1336 if (ret_val)
1324 return ret_val; 1337 return ret_val;
1325 } 1338 }
@@ -1609,9 +1622,9 @@ s32 e1000_check_polarity_m88(struct e1000_hw *hw)
1609 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data); 1622 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data);
1610 1623
1611 if (!ret_val) 1624 if (!ret_val)
1612 phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY) 1625 phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY)
1613 ? e1000_rev_polarity_reversed 1626 ? e1000_rev_polarity_reversed
1614 : e1000_rev_polarity_normal; 1627 : e1000_rev_polarity_normal);
1615 1628
1616 return ret_val; 1629 return ret_val;
1617} 1630}
@@ -1653,9 +1666,9 @@ s32 e1000_check_polarity_igp(struct e1000_hw *hw)
1653 ret_val = e1e_rphy(hw, offset, &data); 1666 ret_val = e1e_rphy(hw, offset, &data);
1654 1667
1655 if (!ret_val) 1668 if (!ret_val)
1656 phy->cable_polarity = (data & mask) 1669 phy->cable_polarity = ((data & mask)
1657 ? e1000_rev_polarity_reversed 1670 ? e1000_rev_polarity_reversed
1658 : e1000_rev_polarity_normal; 1671 : e1000_rev_polarity_normal);
1659 1672
1660 return ret_val; 1673 return ret_val;
1661} 1674}
@@ -1685,9 +1698,9 @@ s32 e1000_check_polarity_ife(struct e1000_hw *hw)
1685 ret_val = e1e_rphy(hw, offset, &phy_data); 1698 ret_val = e1e_rphy(hw, offset, &phy_data);
1686 1699
1687 if (!ret_val) 1700 if (!ret_val)
1688 phy->cable_polarity = (phy_data & mask) 1701 phy->cable_polarity = ((phy_data & mask)
1689 ? e1000_rev_polarity_reversed 1702 ? e1000_rev_polarity_reversed
1690 : e1000_rev_polarity_normal; 1703 : e1000_rev_polarity_normal);
1691 1704
1692 return ret_val; 1705 return ret_val;
1693} 1706}
@@ -1733,7 +1746,7 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
1733 * Polls the PHY status register for link, 'iterations' number of times. 1746 * Polls the PHY status register for link, 'iterations' number of times.
1734 **/ 1747 **/
1735s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, 1748s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
1736 u32 usec_interval, bool *success) 1749 u32 usec_interval, bool *success)
1737{ 1750{
1738 s32 ret_val = 0; 1751 s32 ret_val = 0;
1739 u16 i, phy_status; 1752 u16 i, phy_status;
@@ -1756,7 +1769,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
1756 if (phy_status & BMSR_LSTATUS) 1769 if (phy_status & BMSR_LSTATUS)
1757 break; 1770 break;
1758 if (usec_interval >= 1000) 1771 if (usec_interval >= 1000)
1759 mdelay(usec_interval/1000); 1772 mdelay(usec_interval / 1000);
1760 else 1773 else
1761 udelay(usec_interval); 1774 udelay(usec_interval);
1762 } 1775 }
@@ -1791,8 +1804,8 @@ s32 e1000e_get_cable_length_m88(struct e1000_hw *hw)
1791 if (ret_val) 1804 if (ret_val)
1792 return ret_val; 1805 return ret_val;
1793 1806
1794 index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> 1807 index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
1795 M88E1000_PSSR_CABLE_LENGTH_SHIFT; 1808 M88E1000_PSSR_CABLE_LENGTH_SHIFT);
1796 1809
1797 if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) 1810 if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1)
1798 return -E1000_ERR_PHY; 1811 return -E1000_ERR_PHY;
@@ -1824,10 +1837,10 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
1824 u16 cur_agc_index, max_agc_index = 0; 1837 u16 cur_agc_index, max_agc_index = 0;
1825 u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; 1838 u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
1826 static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { 1839 static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
1827 IGP02E1000_PHY_AGC_A, 1840 IGP02E1000_PHY_AGC_A,
1828 IGP02E1000_PHY_AGC_B, 1841 IGP02E1000_PHY_AGC_B,
1829 IGP02E1000_PHY_AGC_C, 1842 IGP02E1000_PHY_AGC_C,
1830 IGP02E1000_PHY_AGC_D 1843 IGP02E1000_PHY_AGC_D
1831 }; 1844 };
1832 1845
1833 /* Read the AGC registers for all channels */ 1846 /* Read the AGC registers for all channels */
@@ -1841,8 +1854,8 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
1841 * that can be put into the lookup table to obtain the 1854 * that can be put into the lookup table to obtain the
1842 * approximate cable length. 1855 * approximate cable length.
1843 */ 1856 */
1844 cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & 1857 cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
1845 IGP02E1000_AGC_LENGTH_MASK; 1858 IGP02E1000_AGC_LENGTH_MASK);
1846 1859
1847 /* Array index bound check. */ 1860 /* Array index bound check. */
1848 if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || 1861 if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
@@ -1865,8 +1878,8 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
1865 agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); 1878 agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
1866 1879
1867 /* Calculate cable length with the error range of +/- 10 meters. */ 1880 /* Calculate cable length with the error range of +/- 10 meters. */
1868 phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ? 1881 phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
1869 (agc_value - IGP02E1000_AGC_RANGE) : 0; 1882 (agc_value - IGP02E1000_AGC_RANGE) : 0);
1870 phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; 1883 phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
1871 1884
1872 phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; 1885 phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
@@ -2040,9 +2053,9 @@ s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
2040 return ret_val; 2053 return ret_val;
2041 } else { 2054 } else {
2042 /* Polarity is forced */ 2055 /* Polarity is forced */
2043 phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY) 2056 phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY)
2044 ? e1000_rev_polarity_reversed 2057 ? e1000_rev_polarity_reversed
2045 : e1000_rev_polarity_normal; 2058 : e1000_rev_polarity_normal);
2046 } 2059 }
2047 2060
2048 ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); 2061 ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
@@ -2119,7 +2132,7 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
2119 ew32(CTRL, ctrl); 2132 ew32(CTRL, ctrl);
2120 e1e_flush(); 2133 e1e_flush();
2121 2134
2122 udelay(150); 2135 usleep_range(150, 300);
2123 2136
2124 phy->ops.release(hw); 2137 phy->ops.release(hw);
2125 2138
@@ -2375,13 +2388,13 @@ s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
2375 2388
2376 /* Page is shifted left, PHY expects (page x 32) */ 2389 /* Page is shifted left, PHY expects (page x 32) */
2377 ret_val = e1000e_write_phy_reg_mdic(hw, page_select, 2390 ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
2378 (page << page_shift)); 2391 (page << page_shift));
2379 if (ret_val) 2392 if (ret_val)
2380 goto release; 2393 goto release;
2381 } 2394 }
2382 2395
2383 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 2396 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2384 data); 2397 data);
2385 2398
2386release: 2399release:
2387 hw->phy.ops.release(hw); 2400 hw->phy.ops.release(hw);
@@ -2433,13 +2446,13 @@ s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
2433 2446
2434 /* Page is shifted left, PHY expects (page x 32) */ 2447 /* Page is shifted left, PHY expects (page x 32) */
2435 ret_val = e1000e_write_phy_reg_mdic(hw, page_select, 2448 ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
2436 (page << page_shift)); 2449 (page << page_shift));
2437 if (ret_val) 2450 if (ret_val)
2438 goto release; 2451 goto release;
2439 } 2452 }
2440 2453
2441 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, 2454 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
2442 data); 2455 data);
2443release: 2456release:
2444 hw->phy.ops.release(hw); 2457 hw->phy.ops.release(hw);
2445 return ret_val; 2458 return ret_val;
@@ -2674,7 +2687,7 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
2674 if (read) { 2687 if (read) {
2675 /* Read the Wakeup register page value using opcode 0x12 */ 2688 /* Read the Wakeup register page value using opcode 0x12 */
2676 ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, 2689 ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
2677 data); 2690 data);
2678 } else { 2691 } else {
2679 /* Write the Wakeup register page value using opcode 0x12 */ 2692 /* Write the Wakeup register page value using opcode 0x12 */
2680 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, 2693 ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
@@ -2763,7 +2776,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
2763 2776
2764 if (page > 0 && page < HV_INTC_FC_PAGE_START) { 2777 if (page > 0 && page < HV_INTC_FC_PAGE_START) {
2765 ret_val = e1000_access_phy_debug_regs_hv(hw, offset, 2778 ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
2766 data, true); 2779 data, true);
2767 goto out; 2780 goto out;
2768 } 2781 }
2769 2782
@@ -2786,8 +2799,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
2786 e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page, 2799 e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
2787 page << IGP_PAGE_SHIFT, reg); 2800 page << IGP_PAGE_SHIFT, reg);
2788 2801
2789 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, 2802 ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data);
2790 data);
2791out: 2803out:
2792 if (!locked) 2804 if (!locked)
2793 hw->phy.ops.release(hw); 2805 hw->phy.ops.release(hw);
@@ -2871,7 +2883,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2871 2883
2872 if (page > 0 && page < HV_INTC_FC_PAGE_START) { 2884 if (page > 0 && page < HV_INTC_FC_PAGE_START) {
2873 ret_val = e1000_access_phy_debug_regs_hv(hw, offset, 2885 ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
2874 &data, false); 2886 &data, false);
2875 goto out; 2887 goto out;
2876 } 2888 }
2877 2889
@@ -2910,7 +2922,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
2910 page << IGP_PAGE_SHIFT, reg); 2922 page << IGP_PAGE_SHIFT, reg);
2911 2923
2912 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, 2924 ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
2913 data); 2925 data);
2914 2926
2915out: 2927out:
2916 if (!locked) 2928 if (!locked)
@@ -2988,15 +3000,15 @@ static u32 e1000_get_phy_addr_for_hv_page(u32 page)
2988 * These accesses done with PHY address 2 and without using pages. 3000 * These accesses done with PHY address 2 and without using pages.
2989 **/ 3001 **/
2990static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, 3002static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
2991 u16 *data, bool read) 3003 u16 *data, bool read)
2992{ 3004{
2993 s32 ret_val; 3005 s32 ret_val;
2994 u32 addr_reg; 3006 u32 addr_reg;
2995 u32 data_reg; 3007 u32 data_reg;
2996 3008
2997 /* This takes care of the difference with desktop vs mobile phy */ 3009 /* This takes care of the difference with desktop vs mobile phy */
2998 addr_reg = (hw->phy.type == e1000_phy_82578) ? 3010 addr_reg = ((hw->phy.type == e1000_phy_82578) ?
2999 I82578_ADDR_REG : I82577_ADDR_REG; 3011 I82578_ADDR_REG : I82577_ADDR_REG);
3000 data_reg = addr_reg + 1; 3012 data_reg = addr_reg + 1;
3001 3013
3002 /* All operations in this function are phy address 2 */ 3014 /* All operations in this function are phy address 2 */
@@ -3050,8 +3062,8 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
3050 if (ret_val) 3062 if (ret_val)
3051 return ret_val; 3063 return ret_val;
3052 3064
3053 data &= BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | 3065 data &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
3054 BM_CS_STATUS_SPEED_MASK; 3066 BM_CS_STATUS_SPEED_MASK);
3055 3067
3056 if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | 3068 if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED |
3057 BM_CS_STATUS_SPEED_1000)) 3069 BM_CS_STATUS_SPEED_1000))
@@ -3086,9 +3098,9 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw)
3086 ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); 3098 ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
3087 3099
3088 if (!ret_val) 3100 if (!ret_val)
3089 phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) 3101 phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY)
3090 ? e1000_rev_polarity_reversed 3102 ? e1000_rev_polarity_reversed
3091 : e1000_rev_polarity_normal; 3103 : e1000_rev_polarity_normal);
3092 3104
3093 return ret_val; 3105 return ret_val;
3094} 3106}
@@ -3215,8 +3227,8 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
3215 if (ret_val) 3227 if (ret_val)
3216 return ret_val; 3228 return ret_val;
3217 3229
3218 length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >> 3230 length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
3219 I82577_DSTATUS_CABLE_LENGTH_SHIFT; 3231 I82577_DSTATUS_CABLE_LENGTH_SHIFT);
3220 3232
3221 if (length == E1000_CABLE_LENGTH_UNDEFINED) 3233 if (length == E1000_CABLE_LENGTH_UNDEFINED)
3222 return -E1000_ERR_PHY; 3234 return -E1000_ERR_PHY;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index d60cd4393415..bea46bb26061 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -447,7 +447,6 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
447 447
448 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 448 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
449 &tx_ring->dma, GFP_KERNEL); 449 &tx_ring->dma, GFP_KERNEL);
450
451 if (!tx_ring->desc) 450 if (!tx_ring->desc)
452 goto err; 451 goto err;
453 452
@@ -488,7 +487,6 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
488 487
489 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 488 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
490 &rx_ring->dma, GFP_KERNEL); 489 &rx_ring->dma, GFP_KERNEL);
491
492 if (!rx_ring->desc) 490 if (!rx_ring->desc)
493 goto err; 491 goto err;
494 492
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index b5f94abe3cff..5dc119fd95a8 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -717,14 +717,11 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
717 txdr->size = ALIGN(txdr->size, 4096); 717 txdr->size = ALIGN(txdr->size, 4096);
718 718
719 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, 719 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
720 GFP_KERNEL); 720 GFP_KERNEL | __GFP_ZERO);
721 if (!txdr->desc) { 721 if (!txdr->desc) {
722 vfree(txdr->buffer_info); 722 vfree(txdr->buffer_info);
723 netif_err(adapter, probe, adapter->netdev,
724 "Unable to allocate transmit descriptor memory\n");
725 return -ENOMEM; 723 return -ENOMEM;
726 } 724 }
727 memset(txdr->desc, 0, txdr->size);
728 725
729 txdr->next_to_use = 0; 726 txdr->next_to_use = 0;
730 txdr->next_to_clean = 0; 727 txdr->next_to_clean = 0;
@@ -807,8 +804,6 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
807 804
808 if (!rxdr->desc) { 805 if (!rxdr->desc) {
809 vfree(rxdr->buffer_info); 806 vfree(rxdr->buffer_info);
810 netif_err(adapter, probe, adapter->netdev,
811 "Unable to allocate receive descriptors\n");
812 return -ENOMEM; 807 return -ENOMEM;
813 } 808 }
814 memset(rxdr->desc, 0, rxdr->size); 809 memset(rxdr->desc, 0, rxdr->size);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 79f4a26ea6cc..1339932f59b1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7007,7 +7007,7 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
7007 int err; 7007 int err;
7008 7008
7009 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 7009 if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
7010 return -EOPNOTSUPP; 7010 return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags);
7011 7011
7012 /* Hardware does not support aging addresses so if a 7012 /* Hardware does not support aging addresses so if a
7013 * ndm_state is given only allow permanent addresses 7013 * ndm_state is given only allow permanent addresses
@@ -7038,44 +7038,6 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
7038 return err; 7038 return err;
7039} 7039}
7040 7040
7041static int ixgbe_ndo_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
7042 struct net_device *dev,
7043 const unsigned char *addr)
7044{
7045 struct ixgbe_adapter *adapter = netdev_priv(dev);
7046 int err = -EOPNOTSUPP;
7047
7048 if (ndm->ndm_state & NUD_PERMANENT) {
7049 pr_info("%s: FDB only supports static addresses\n",
7050 ixgbe_driver_name);
7051 return -EINVAL;
7052 }
7053
7054 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
7055 if (is_unicast_ether_addr(addr))
7056 err = dev_uc_del(dev, addr);
7057 else if (is_multicast_ether_addr(addr))
7058 err = dev_mc_del(dev, addr);
7059 else
7060 err = -EINVAL;
7061 }
7062
7063 return err;
7064}
7065
7066static int ixgbe_ndo_fdb_dump(struct sk_buff *skb,
7067 struct netlink_callback *cb,
7068 struct net_device *dev,
7069 int idx)
7070{
7071 struct ixgbe_adapter *adapter = netdev_priv(dev);
7072
7073 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
7074 idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
7075
7076 return idx;
7077}
7078
7079static int ixgbe_ndo_bridge_setlink(struct net_device *dev, 7041static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
7080 struct nlmsghdr *nlh) 7042 struct nlmsghdr *nlh)
7081{ 7043{
@@ -7171,8 +7133,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
7171 .ndo_set_features = ixgbe_set_features, 7133 .ndo_set_features = ixgbe_set_features,
7172 .ndo_fix_features = ixgbe_fix_features, 7134 .ndo_fix_features = ixgbe_fix_features,
7173 .ndo_fdb_add = ixgbe_ndo_fdb_add, 7135 .ndo_fdb_add = ixgbe_ndo_fdb_add,
7174 .ndo_fdb_del = ixgbe_ndo_fdb_del,
7175 .ndo_fdb_dump = ixgbe_ndo_fdb_dump,
7176 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink, 7136 .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink,
7177 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, 7137 .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
7178}; 7138};
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index d44b4d21268c..b3e6530637e3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -661,13 +661,7 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
661 bool enable = ((event_mask & 0x10000000U) != 0); 661 bool enable = ((event_mask & 0x10000000U) != 0);
662 662
663 if (enable) { 663 if (enable) {
664 eth_random_addr(vf_mac_addr); 664 eth_zero_addr(vf_mac_addr);
665 e_info(probe, "IOV: VF %d is enabled MAC %pM\n",
666 vfn, vf_mac_addr);
667 /*
668 * Store away the VF "permananet" MAC address, it will ask
669 * for it later.
670 */
671 memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); 665 memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
672 } 666 }
673 667
@@ -688,7 +682,8 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
688 ixgbe_vf_reset_event(adapter, vf); 682 ixgbe_vf_reset_event(adapter, vf);
689 683
690 /* set vf mac address */ 684 /* set vf mac address */
691 ixgbe_set_vf_mac(adapter, vf, vf_mac); 685 if (!is_zero_ether_addr(vf_mac))
686 ixgbe_set_vf_mac(adapter, vf, vf_mac);
692 687
693 vf_shift = vf % 32; 688 vf_shift = vf % 32;
694 reg_offset = vf / 32; 689 reg_offset = vf / 32;
@@ -729,8 +724,16 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
729 IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); 724 IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
730 725
731 /* reply to reset with ack and vf mac address */ 726 /* reply to reset with ack and vf mac address */
732 msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK; 727 msgbuf[0] = IXGBE_VF_RESET;
733 memcpy(addr, vf_mac, ETH_ALEN); 728 if (!is_zero_ether_addr(vf_mac)) {
729 msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
730 memcpy(addr, vf_mac, ETH_ALEN);
731 } else {
732 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
733 dev_warn(&adapter->pdev->dev,
734 "VF %d has no MAC address assigned, you may have to assign one manually\n",
735 vf);
736 }
734 737
735 /* 738 /*
736 * Piggyback the multicast filter type so VF can compute the 739 * Piggyback the multicast filter type so VF can compute the
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index fc0af9a3bb35..fff0d9867529 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -44,8 +44,8 @@ struct ixgbevf_tx_buffer {
44 struct sk_buff *skb; 44 struct sk_buff *skb;
45 dma_addr_t dma; 45 dma_addr_t dma;
46 unsigned long time_stamp; 46 unsigned long time_stamp;
47 union ixgbe_adv_tx_desc *next_to_watch;
47 u16 length; 48 u16 length;
48 u16 next_to_watch;
49 u16 mapped_as_page; 49 u16 mapped_as_page;
50}; 50};
51 51
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 2b6cb5ca48ee..eeae9349f78b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -76,12 +76,9 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, 76 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
77 * Class, Class Mask, private data (not used) } 77 * Class, Class Mask, private data (not used) }
78 */ 78 */
79static struct pci_device_id ixgbevf_pci_tbl[] = { 79static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), 80 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
81 board_82599_vf}, 81 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
82 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF),
83 board_X540_vf},
84
85 /* required last entry */ 82 /* required last entry */
86 {0, } 83 {0, }
87}; 84};
@@ -190,28 +187,37 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
190 struct ixgbevf_adapter *adapter = q_vector->adapter; 187 struct ixgbevf_adapter *adapter = q_vector->adapter;
191 union ixgbe_adv_tx_desc *tx_desc, *eop_desc; 188 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
192 struct ixgbevf_tx_buffer *tx_buffer_info; 189 struct ixgbevf_tx_buffer *tx_buffer_info;
193 unsigned int i, eop, count = 0; 190 unsigned int i, count = 0;
194 unsigned int total_bytes = 0, total_packets = 0; 191 unsigned int total_bytes = 0, total_packets = 0;
195 192
196 if (test_bit(__IXGBEVF_DOWN, &adapter->state)) 193 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
197 return true; 194 return true;
198 195
199 i = tx_ring->next_to_clean; 196 i = tx_ring->next_to_clean;
200 eop = tx_ring->tx_buffer_info[i].next_to_watch; 197 tx_buffer_info = &tx_ring->tx_buffer_info[i];
201 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop); 198 eop_desc = tx_buffer_info->next_to_watch;
202 199
203 while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && 200 do {
204 (count < tx_ring->count)) {
205 bool cleaned = false; 201 bool cleaned = false;
206 rmb(); /* read buffer_info after eop_desc */ 202
207 /* eop could change between read and DD-check */ 203 /* if next_to_watch is not set then there is no work pending */
208 if (unlikely(eop != tx_ring->tx_buffer_info[i].next_to_watch)) 204 if (!eop_desc)
209 goto cont_loop; 205 break;
206
207 /* prevent any other reads prior to eop_desc */
208 read_barrier_depends();
209
210 /* if DD is not set pending work has not been completed */
211 if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
212 break;
213
214 /* clear next_to_watch to prevent false hangs */
215 tx_buffer_info->next_to_watch = NULL;
216
210 for ( ; !cleaned; count++) { 217 for ( ; !cleaned; count++) {
211 struct sk_buff *skb; 218 struct sk_buff *skb;
212 tx_desc = IXGBEVF_TX_DESC(tx_ring, i); 219 tx_desc = IXGBEVF_TX_DESC(tx_ring, i);
213 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 220 cleaned = (tx_desc == eop_desc);
214 cleaned = (i == eop);
215 skb = tx_buffer_info->skb; 221 skb = tx_buffer_info->skb;
216 222
217 if (cleaned && skb) { 223 if (cleaned && skb) {
@@ -234,12 +240,12 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
234 i++; 240 i++;
235 if (i == tx_ring->count) 241 if (i == tx_ring->count)
236 i = 0; 242 i = 0;
243
244 tx_buffer_info = &tx_ring->tx_buffer_info[i];
237 } 245 }
238 246
239cont_loop: 247 eop_desc = tx_buffer_info->next_to_watch;
240 eop = tx_ring->tx_buffer_info[i].next_to_watch; 248 } while (count < tx_ring->count);
241 eop_desc = IXGBEVF_TX_DESC(tx_ring, eop);
242 }
243 249
244 tx_ring->next_to_clean = i; 250 tx_ring->next_to_clean = i;
245 251
@@ -2046,6 +2052,7 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2046{ 2052{
2047 struct ixgbe_hw *hw = &adapter->hw; 2053 struct ixgbe_hw *hw = &adapter->hw;
2048 struct pci_dev *pdev = adapter->pdev; 2054 struct pci_dev *pdev = adapter->pdev;
2055 struct net_device *netdev = adapter->netdev;
2049 int err; 2056 int err;
2050 2057
2051 /* PCI config space info */ 2058 /* PCI config space info */
@@ -2065,18 +2072,26 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
2065 err = hw->mac.ops.reset_hw(hw); 2072 err = hw->mac.ops.reset_hw(hw);
2066 if (err) { 2073 if (err) {
2067 dev_info(&pdev->dev, 2074 dev_info(&pdev->dev,
2068 "PF still in reset state, assigning new address\n"); 2075 "PF still in reset state. Is the PF interface up?\n");
2069 eth_hw_addr_random(adapter->netdev);
2070 memcpy(adapter->hw.mac.addr, adapter->netdev->dev_addr,
2071 adapter->netdev->addr_len);
2072 } else { 2076 } else {
2073 err = hw->mac.ops.init_hw(hw); 2077 err = hw->mac.ops.init_hw(hw);
2074 if (err) { 2078 if (err) {
2075 pr_err("init_shared_code failed: %d\n", err); 2079 pr_err("init_shared_code failed: %d\n", err);
2076 goto out; 2080 goto out;
2077 } 2081 }
2078 memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr, 2082 err = hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
2079 adapter->netdev->addr_len); 2083 if (err)
2084 dev_info(&pdev->dev, "Error reading MAC address\n");
2085 else if (is_zero_ether_addr(adapter->hw.mac.addr))
2086 dev_info(&pdev->dev,
2087 "MAC address not assigned by administrator.\n");
2088 memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
2089 }
2090
2091 if (!is_valid_ether_addr(netdev->dev_addr)) {
2092 dev_info(&pdev->dev, "Assigning random MAC address\n");
2093 eth_hw_addr_random(netdev);
2094 memcpy(hw->mac.addr, netdev->dev_addr, netdev->addr_len);
2080 } 2095 }
2081 2096
2082 /* lock to protect mailbox accesses */ 2097 /* lock to protect mailbox accesses */
@@ -2425,9 +2440,6 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2425 &rx_ring->dma, GFP_KERNEL); 2440 &rx_ring->dma, GFP_KERNEL);
2426 2441
2427 if (!rx_ring->desc) { 2442 if (!rx_ring->desc) {
2428 hw_dbg(&adapter->hw,
2429 "Unable to allocate memory for "
2430 "the receive descriptor ring\n");
2431 vfree(rx_ring->rx_buffer_info); 2443 vfree(rx_ring->rx_buffer_info);
2432 rx_ring->rx_buffer_info = NULL; 2444 rx_ring->rx_buffer_info = NULL;
2433 goto alloc_failed; 2445 goto alloc_failed;
@@ -2822,8 +2834,7 @@ static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
2822} 2834}
2823 2835
2824static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, 2836static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2825 struct sk_buff *skb, u32 tx_flags, 2837 struct sk_buff *skb, u32 tx_flags)
2826 unsigned int first)
2827{ 2838{
2828 struct ixgbevf_tx_buffer *tx_buffer_info; 2839 struct ixgbevf_tx_buffer *tx_buffer_info;
2829 unsigned int len; 2840 unsigned int len;
@@ -2848,7 +2859,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2848 size, DMA_TO_DEVICE); 2859 size, DMA_TO_DEVICE);
2849 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma)) 2860 if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma))
2850 goto dma_error; 2861 goto dma_error;
2851 tx_buffer_info->next_to_watch = i;
2852 2862
2853 len -= size; 2863 len -= size;
2854 total -= size; 2864 total -= size;
@@ -2878,7 +2888,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2878 tx_buffer_info->dma)) 2888 tx_buffer_info->dma))
2879 goto dma_error; 2889 goto dma_error;
2880 tx_buffer_info->mapped_as_page = true; 2890 tx_buffer_info->mapped_as_page = true;
2881 tx_buffer_info->next_to_watch = i;
2882 2891
2883 len -= size; 2892 len -= size;
2884 total -= size; 2893 total -= size;
@@ -2897,8 +2906,6 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring,
2897 else 2906 else
2898 i = i - 1; 2907 i = i - 1;
2899 tx_ring->tx_buffer_info[i].skb = skb; 2908 tx_ring->tx_buffer_info[i].skb = skb;
2900 tx_ring->tx_buffer_info[first].next_to_watch = i;
2901 tx_ring->tx_buffer_info[first].time_stamp = jiffies;
2902 2909
2903 return count; 2910 return count;
2904 2911
@@ -2907,7 +2914,6 @@ dma_error:
2907 2914
2908 /* clear timestamp and dma mappings for failed tx_buffer_info map */ 2915 /* clear timestamp and dma mappings for failed tx_buffer_info map */
2909 tx_buffer_info->dma = 0; 2916 tx_buffer_info->dma = 0;
2910 tx_buffer_info->next_to_watch = 0;
2911 count--; 2917 count--;
2912 2918
2913 /* clear timestamp and dma mappings for remaining portion of packet */ 2919 /* clear timestamp and dma mappings for remaining portion of packet */
@@ -2924,7 +2930,8 @@ dma_error:
2924} 2930}
2925 2931
2926static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags, 2932static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
2927 int count, u32 paylen, u8 hdr_len) 2933 int count, unsigned int first, u32 paylen,
2934 u8 hdr_len)
2928{ 2935{
2929 union ixgbe_adv_tx_desc *tx_desc = NULL; 2936 union ixgbe_adv_tx_desc *tx_desc = NULL;
2930 struct ixgbevf_tx_buffer *tx_buffer_info; 2937 struct ixgbevf_tx_buffer *tx_buffer_info;
@@ -2975,6 +2982,16 @@ static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
2975 2982
2976 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd); 2983 tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
2977 2984
2985 tx_ring->tx_buffer_info[first].time_stamp = jiffies;
2986
2987 /* Force memory writes to complete before letting h/w
2988 * know there are new descriptors to fetch. (Only
2989 * applicable for weak-ordered memory model archs,
2990 * such as IA-64).
2991 */
2992 wmb();
2993
2994 tx_ring->tx_buffer_info[first].next_to_watch = tx_desc;
2978 tx_ring->next_to_use = i; 2995 tx_ring->next_to_use = i;
2979} 2996}
2980 2997
@@ -3066,15 +3083,8 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3066 tx_flags |= IXGBE_TX_FLAGS_CSUM; 3083 tx_flags |= IXGBE_TX_FLAGS_CSUM;
3067 3084
3068 ixgbevf_tx_queue(tx_ring, tx_flags, 3085 ixgbevf_tx_queue(tx_ring, tx_flags,
3069 ixgbevf_tx_map(tx_ring, skb, tx_flags, first), 3086 ixgbevf_tx_map(tx_ring, skb, tx_flags),
3070 skb->len, hdr_len); 3087 first, skb->len, hdr_len);
3071 /*
3072 * Force memory writes to complete before letting h/w
3073 * know there are new descriptors to fetch. (Only
3074 * applicable for weak-ordered memory model archs,
3075 * such as IA-64).
3076 */
3077 wmb();
3078 3088
3079 writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail); 3089 writel(tx_ring->next_to_use, adapter->hw.hw_addr + tx_ring->tail);
3080 3090
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 0c94557b53df..387b52635bc0 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -109,7 +109,12 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
109 if (ret_val) 109 if (ret_val)
110 return ret_val; 110 return ret_val;
111 111
112 if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK)) 112 /* New versions of the PF may NACK the reset return message
113 * to indicate that no MAC address has yet been assigned for
114 * the VF.
115 */
116 if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
117 msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
113 return IXGBE_ERR_INVALID_MAC_ADDR; 118 return IXGBE_ERR_INVALID_MAC_ADDR;
114 119
115 memcpy(hw->mac.perm_addr, addr, ETH_ALEN); 120 memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index edfba9370922..5170ecb00acc 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -23,6 +23,7 @@ config MV643XX_ETH
23 depends on (MV64X60 || PPC32 || PLAT_ORION) && INET 23 depends on (MV64X60 || PPC32 || PLAT_ORION) && INET
24 select INET_LRO 24 select INET_LRO
25 select PHYLIB 25 select PHYLIB
26 select MVMDIO
26 ---help--- 27 ---help---
27 This driver supports the gigabit ethernet MACs in the 28 This driver supports the gigabit ethernet MACs in the
28 Marvell Discovery PPC/MIPS chipset family (MV643XX) and 29 Marvell Discovery PPC/MIPS chipset family (MV643XX) and
@@ -38,9 +39,7 @@ config MVMDIO
38 interface units of the Marvell EBU SoCs (Kirkwood, Orion5x, 39 interface units of the Marvell EBU SoCs (Kirkwood, Orion5x,
39 Dove, Armada 370 and Armada XP). 40 Dove, Armada 370 and Armada XP).
40 41
41 For now, this driver is only needed for the MVNETA driver 42 This driver is used by the MV643XX_ETH and MVNETA drivers.
42 (used on Armada 370 and XP), but it could be used in the
43 future by the MV643XX_ETH driver.
44 43
45config MVNETA 44config MVNETA
46 tristate "Marvell Armada 370/XP network interface support" 45 tristate "Marvell Armada 370/XP network interface support"
diff --git a/drivers/net/ethernet/marvell/Makefile b/drivers/net/ethernet/marvell/Makefile
index 7f63b4aac434..5c4a7765ff0e 100644
--- a/drivers/net/ethernet/marvell/Makefile
+++ b/drivers/net/ethernet/marvell/Makefile
@@ -2,8 +2,8 @@
2# Makefile for the Marvell device drivers. 2# Makefile for the Marvell device drivers.
3# 3#
4 4
5obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
6obj-$(CONFIG_MVMDIO) += mvmdio.o 5obj-$(CONFIG_MVMDIO) += mvmdio.o
6obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
7obj-$(CONFIG_MVNETA) += mvneta.o 7obj-$(CONFIG_MVNETA) += mvneta.o
8obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o 8obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
9obj-$(CONFIG_SKGE) += skge.o 9obj-$(CONFIG_SKGE) += skge.o
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 6562c736a1d8..aedbd8256ad1 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -20,6 +20,8 @@
20 * Copyright (C) 2007-2008 Marvell Semiconductor 20 * Copyright (C) 2007-2008 Marvell Semiconductor
21 * Lennert Buytenhek <buytenh@marvell.com> 21 * Lennert Buytenhek <buytenh@marvell.com>
22 * 22 *
23 * Copyright (C) 2013 Michael Stapelberg <michael@stapelberg.de>
24 *
23 * This program is free software; you can redistribute it and/or 25 * This program is free software; you can redistribute it and/or
24 * modify it under the terms of the GNU General Public License 26 * modify it under the terms of the GNU General Public License
25 * as published by the Free Software Foundation; either version 2 27 * as published by the Free Software Foundation; either version 2
@@ -67,14 +69,6 @@ static char mv643xx_eth_driver_version[] = "1.4";
67 * Registers shared between all ports. 69 * Registers shared between all ports.
68 */ 70 */
69#define PHY_ADDR 0x0000 71#define PHY_ADDR 0x0000
70#define SMI_REG 0x0004
71#define SMI_BUSY 0x10000000
72#define SMI_READ_VALID 0x08000000
73#define SMI_OPCODE_READ 0x04000000
74#define SMI_OPCODE_WRITE 0x00000000
75#define ERR_INT_CAUSE 0x0080
76#define ERR_INT_SMI_DONE 0x00000010
77#define ERR_INT_MASK 0x0084
78#define WINDOW_BASE(w) (0x0200 + ((w) << 3)) 72#define WINDOW_BASE(w) (0x0200 + ((w) << 3))
79#define WINDOW_SIZE(w) (0x0204 + ((w) << 3)) 73#define WINDOW_SIZE(w) (0x0204 + ((w) << 3))
80#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2)) 74#define WINDOW_REMAP_HIGH(w) (0x0280 + ((w) << 2))
@@ -264,25 +258,6 @@ struct mv643xx_eth_shared_private {
264 void __iomem *base; 258 void __iomem *base;
265 259
266 /* 260 /*
267 * Points at the right SMI instance to use.
268 */
269 struct mv643xx_eth_shared_private *smi;
270
271 /*
272 * Provides access to local SMI interface.
273 */
274 struct mii_bus *smi_bus;
275
276 /*
277 * If we have access to the error interrupt pin (which is
278 * somewhat misnamed as it not only reflects internal errors
279 * but also reflects SMI completion), use that to wait for
280 * SMI access completion instead of polling the SMI busy bit.
281 */
282 int err_interrupt;
283 wait_queue_head_t smi_busy_wait;
284
285 /*
286 * Per-port MBUS window access register value. 261 * Per-port MBUS window access register value.
287 */ 262 */
288 u32 win_protect; 263 u32 win_protect;
@@ -1120,97 +1095,6 @@ out_write:
1120 wrlp(mp, PORT_SERIAL_CONTROL, pscr); 1095 wrlp(mp, PORT_SERIAL_CONTROL, pscr);
1121} 1096}
1122 1097
1123static irqreturn_t mv643xx_eth_err_irq(int irq, void *dev_id)
1124{
1125 struct mv643xx_eth_shared_private *msp = dev_id;
1126
1127 if (readl(msp->base + ERR_INT_CAUSE) & ERR_INT_SMI_DONE) {
1128 writel(~ERR_INT_SMI_DONE, msp->base + ERR_INT_CAUSE);
1129 wake_up(&msp->smi_busy_wait);
1130 return IRQ_HANDLED;
1131 }
1132
1133 return IRQ_NONE;
1134}
1135
1136static int smi_is_done(struct mv643xx_eth_shared_private *msp)
1137{
1138 return !(readl(msp->base + SMI_REG) & SMI_BUSY);
1139}
1140
1141static int smi_wait_ready(struct mv643xx_eth_shared_private *msp)
1142{
1143 if (msp->err_interrupt == NO_IRQ) {
1144 int i;
1145
1146 for (i = 0; !smi_is_done(msp); i++) {
1147 if (i == 10)
1148 return -ETIMEDOUT;
1149 msleep(10);
1150 }
1151
1152 return 0;
1153 }
1154
1155 if (!smi_is_done(msp)) {
1156 wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp),
1157 msecs_to_jiffies(100));
1158 if (!smi_is_done(msp))
1159 return -ETIMEDOUT;
1160 }
1161
1162 return 0;
1163}
1164
1165static int smi_bus_read(struct mii_bus *bus, int addr, int reg)
1166{
1167 struct mv643xx_eth_shared_private *msp = bus->priv;
1168 void __iomem *smi_reg = msp->base + SMI_REG;
1169 int ret;
1170
1171 if (smi_wait_ready(msp)) {
1172 pr_warn("SMI bus busy timeout\n");
1173 return -ETIMEDOUT;
1174 }
1175
1176 writel(SMI_OPCODE_READ | (reg << 21) | (addr << 16), smi_reg);
1177
1178 if (smi_wait_ready(msp)) {
1179 pr_warn("SMI bus busy timeout\n");
1180 return -ETIMEDOUT;
1181 }
1182
1183 ret = readl(smi_reg);
1184 if (!(ret & SMI_READ_VALID)) {
1185 pr_warn("SMI bus read not valid\n");
1186 return -ENODEV;
1187 }
1188
1189 return ret & 0xffff;
1190}
1191
1192static int smi_bus_write(struct mii_bus *bus, int addr, int reg, u16 val)
1193{
1194 struct mv643xx_eth_shared_private *msp = bus->priv;
1195 void __iomem *smi_reg = msp->base + SMI_REG;
1196
1197 if (smi_wait_ready(msp)) {
1198 pr_warn("SMI bus busy timeout\n");
1199 return -ETIMEDOUT;
1200 }
1201
1202 writel(SMI_OPCODE_WRITE | (reg << 21) |
1203 (addr << 16) | (val & 0xffff), smi_reg);
1204
1205 if (smi_wait_ready(msp)) {
1206 pr_warn("SMI bus busy timeout\n");
1207 return -ETIMEDOUT;
1208 }
1209
1210 return 0;
1211}
1212
1213
1214/* statistics ***************************************************************/ 1098/* statistics ***************************************************************/
1215static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) 1099static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
1216{ 1100{
@@ -1523,6 +1407,34 @@ mv643xx_eth_get_settings_phyless(struct mv643xx_eth_private *mp,
1523 return 0; 1407 return 0;
1524} 1408}
1525 1409
1410static void
1411mv643xx_eth_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1412{
1413 struct mv643xx_eth_private *mp = netdev_priv(dev);
1414 wol->supported = 0;
1415 wol->wolopts = 0;
1416 if (mp->phy)
1417 phy_ethtool_get_wol(mp->phy, wol);
1418}
1419
1420static int
1421mv643xx_eth_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1422{
1423 struct mv643xx_eth_private *mp = netdev_priv(dev);
1424 int err;
1425
1426 if (mp->phy == NULL)
1427 return -EOPNOTSUPP;
1428
1429 err = phy_ethtool_set_wol(mp->phy, wol);
1430 /* Given that mv643xx_eth works without the marvell-specific PHY driver,
1431 * this debugging hint is useful to have.
1432 */
1433 if (err == -EOPNOTSUPP)
1434 netdev_info(dev, "The PHY does not support set_wol, was CONFIG_MARVELL_PHY enabled?\n");
1435 return err;
1436}
1437
1526static int 1438static int
1527mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1439mv643xx_eth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1528{ 1440{
@@ -1708,6 +1620,8 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
1708 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats, 1620 .get_ethtool_stats = mv643xx_eth_get_ethtool_stats,
1709 .get_sset_count = mv643xx_eth_get_sset_count, 1621 .get_sset_count = mv643xx_eth_get_sset_count,
1710 .get_ts_info = ethtool_op_get_ts_info, 1622 .get_ts_info = ethtool_op_get_ts_info,
1623 .get_wol = mv643xx_eth_get_wol,
1624 .set_wol = mv643xx_eth_set_wol,
1711}; 1625};
1712 1626
1713 1627
@@ -2656,47 +2570,6 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2656 goto out_free; 2570 goto out_free;
2657 2571
2658 /* 2572 /*
2659 * Set up and register SMI bus.
2660 */
2661 if (pd == NULL || pd->shared_smi == NULL) {
2662 msp->smi_bus = mdiobus_alloc();
2663 if (msp->smi_bus == NULL)
2664 goto out_unmap;
2665
2666 msp->smi_bus->priv = msp;
2667 msp->smi_bus->name = "mv643xx_eth smi";
2668 msp->smi_bus->read = smi_bus_read;
2669 msp->smi_bus->write = smi_bus_write,
2670 snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d",
2671 pdev->name, pdev->id);
2672 msp->smi_bus->parent = &pdev->dev;
2673 msp->smi_bus->phy_mask = 0xffffffff;
2674 if (mdiobus_register(msp->smi_bus) < 0)
2675 goto out_free_mii_bus;
2676 msp->smi = msp;
2677 } else {
2678 msp->smi = platform_get_drvdata(pd->shared_smi);
2679 }
2680
2681 msp->err_interrupt = NO_IRQ;
2682 init_waitqueue_head(&msp->smi_busy_wait);
2683
2684 /*
2685 * Check whether the error interrupt is hooked up.
2686 */
2687 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
2688 if (res != NULL) {
2689 int err;
2690
2691 err = request_irq(res->start, mv643xx_eth_err_irq,
2692 IRQF_SHARED, "mv643xx_eth", msp);
2693 if (!err) {
2694 writel(ERR_INT_SMI_DONE, msp->base + ERR_INT_MASK);
2695 msp->err_interrupt = res->start;
2696 }
2697 }
2698
2699 /*
2700 * (Re-)program MBUS remapping windows if we are asked to. 2573 * (Re-)program MBUS remapping windows if we are asked to.
2701 */ 2574 */
2702 dram = mv_mbus_dram_info(); 2575 dram = mv_mbus_dram_info();
@@ -2711,10 +2584,6 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2711 2584
2712 return 0; 2585 return 0;
2713 2586
2714out_free_mii_bus:
2715 mdiobus_free(msp->smi_bus);
2716out_unmap:
2717 iounmap(msp->base);
2718out_free: 2587out_free:
2719 kfree(msp); 2588 kfree(msp);
2720out: 2589out:
@@ -2724,14 +2593,7 @@ out:
2724static int mv643xx_eth_shared_remove(struct platform_device *pdev) 2593static int mv643xx_eth_shared_remove(struct platform_device *pdev)
2725{ 2594{
2726 struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev); 2595 struct mv643xx_eth_shared_private *msp = platform_get_drvdata(pdev);
2727 struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
2728 2596
2729 if (pd == NULL || pd->shared_smi == NULL) {
2730 mdiobus_unregister(msp->smi_bus);
2731 mdiobus_free(msp->smi_bus);
2732 }
2733 if (msp->err_interrupt != NO_IRQ)
2734 free_irq(msp->err_interrupt, msp);
2735 iounmap(msp->base); 2597 iounmap(msp->base);
2736 kfree(msp); 2598 kfree(msp);
2737 2599
@@ -2794,14 +2656,21 @@ static void set_params(struct mv643xx_eth_private *mp,
2794 mp->txq_count = pd->tx_queue_count ? : 1; 2656 mp->txq_count = pd->tx_queue_count ? : 1;
2795} 2657}
2796 2658
2659static void mv643xx_eth_adjust_link(struct net_device *dev)
2660{
2661 struct mv643xx_eth_private *mp = netdev_priv(dev);
2662
2663 mv643xx_adjust_pscr(mp);
2664}
2665
2797static struct phy_device *phy_scan(struct mv643xx_eth_private *mp, 2666static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
2798 int phy_addr) 2667 int phy_addr)
2799{ 2668{
2800 struct mii_bus *bus = mp->shared->smi->smi_bus;
2801 struct phy_device *phydev; 2669 struct phy_device *phydev;
2802 int start; 2670 int start;
2803 int num; 2671 int num;
2804 int i; 2672 int i;
2673 char phy_id[MII_BUS_ID_SIZE + 3];
2805 2674
2806 if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) { 2675 if (phy_addr == MV643XX_ETH_PHY_ADDR_DEFAULT) {
2807 start = phy_addr_get(mp) & 0x1f; 2676 start = phy_addr_get(mp) & 0x1f;
@@ -2811,17 +2680,19 @@ static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
2811 num = 1; 2680 num = 1;
2812 } 2681 }
2813 2682
2814 phydev = NULL; 2683 /* Attempt to connect to the PHY using orion-mdio */
2684 phydev = ERR_PTR(-ENODEV);
2815 for (i = 0; i < num; i++) { 2685 for (i = 0; i < num; i++) {
2816 int addr = (start + i) & 0x1f; 2686 int addr = (start + i) & 0x1f;
2817 2687
2818 if (bus->phy_map[addr] == NULL) 2688 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
2819 mdiobus_scan(bus, addr); 2689 "orion-mdio-mii", addr);
2820 2690
2821 if (phydev == NULL) { 2691 phydev = phy_connect(mp->dev, phy_id, mv643xx_eth_adjust_link,
2822 phydev = bus->phy_map[addr]; 2692 PHY_INTERFACE_MODE_GMII);
2823 if (phydev != NULL) 2693 if (!IS_ERR(phydev)) {
2824 phy_addr_set(mp, addr); 2694 phy_addr_set(mp, addr);
2695 break;
2825 } 2696 }
2826 } 2697 }
2827 2698
@@ -2834,8 +2705,6 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex)
2834 2705
2835 phy_reset(mp); 2706 phy_reset(mp);
2836 2707
2837 phy_attach(mp->dev, dev_name(&phy->dev), PHY_INTERFACE_MODE_GMII);
2838
2839 if (speed == 0) { 2708 if (speed == 0) {
2840 phy->autoneg = AUTONEG_ENABLE; 2709 phy->autoneg = AUTONEG_ENABLE;
2841 phy->speed = 0; 2710 phy->speed = 0;
@@ -2943,11 +2812,17 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2943 netif_set_real_num_tx_queues(dev, mp->txq_count); 2812 netif_set_real_num_tx_queues(dev, mp->txq_count);
2944 netif_set_real_num_rx_queues(dev, mp->rxq_count); 2813 netif_set_real_num_rx_queues(dev, mp->rxq_count);
2945 2814
2946 if (pd->phy_addr != MV643XX_ETH_PHY_NONE) 2815 if (pd->phy_addr != MV643XX_ETH_PHY_NONE) {
2947 mp->phy = phy_scan(mp, pd->phy_addr); 2816 mp->phy = phy_scan(mp, pd->phy_addr);
2948 2817
2949 if (mp->phy != NULL) 2818 if (IS_ERR(mp->phy)) {
2819 err = PTR_ERR(mp->phy);
2820 if (err == -ENODEV)
2821 err = -EPROBE_DEFER;
2822 goto out;
2823 }
2950 phy_init(mp, pd->speed, pd->duplex); 2824 phy_init(mp, pd->speed, pd->duplex);
2825 }
2951 2826
2952 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops); 2827 SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
2953 2828
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 77b7c80262f4..7b5158f654c2 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -24,10 +24,13 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/mutex.h> 25#include <linux/mutex.h>
26#include <linux/phy.h> 26#include <linux/phy.h>
27#include <linux/of_address.h> 27#include <linux/interrupt.h>
28#include <linux/of_mdio.h>
29#include <linux/platform_device.h> 28#include <linux/platform_device.h>
30#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/io.h>
31#include <linux/of_mdio.h>
32#include <linux/sched.h>
33#include <linux/wait.h>
31 34
32#define MVMDIO_SMI_DATA_SHIFT 0 35#define MVMDIO_SMI_DATA_SHIFT 0
33#define MVMDIO_SMI_PHY_ADDR_SHIFT 16 36#define MVMDIO_SMI_PHY_ADDR_SHIFT 16
@@ -36,33 +39,58 @@
36#define MVMDIO_SMI_WRITE_OPERATION 0 39#define MVMDIO_SMI_WRITE_OPERATION 0
37#define MVMDIO_SMI_READ_VALID BIT(27) 40#define MVMDIO_SMI_READ_VALID BIT(27)
38#define MVMDIO_SMI_BUSY BIT(28) 41#define MVMDIO_SMI_BUSY BIT(28)
42#define MVMDIO_ERR_INT_CAUSE 0x007C
43#define MVMDIO_ERR_INT_SMI_DONE 0x00000010
44#define MVMDIO_ERR_INT_MASK 0x0080
39 45
40struct orion_mdio_dev { 46struct orion_mdio_dev {
41 struct mutex lock; 47 struct mutex lock;
42 void __iomem *smireg; 48 void __iomem *regs;
49 /*
50 * If we have access to the error interrupt pin (which is
51 * somewhat misnamed as it not only reflects internal errors
52 * but also reflects SMI completion), use that to wait for
53 * SMI access completion instead of polling the SMI busy bit.
54 */
55 int err_interrupt;
56 wait_queue_head_t smi_busy_wait;
43}; 57};
44 58
59static int orion_mdio_smi_is_done(struct orion_mdio_dev *dev)
60{
61 return !(readl(dev->regs) & MVMDIO_SMI_BUSY);
62}
63
45/* Wait for the SMI unit to be ready for another operation 64/* Wait for the SMI unit to be ready for another operation
46 */ 65 */
47static int orion_mdio_wait_ready(struct mii_bus *bus) 66static int orion_mdio_wait_ready(struct mii_bus *bus)
48{ 67{
49 struct orion_mdio_dev *dev = bus->priv; 68 struct orion_mdio_dev *dev = bus->priv;
50 int count; 69 int count;
51 u32 val;
52 70
53 count = 0; 71 if (dev->err_interrupt <= 0) {
54 while (1) { 72 count = 0;
55 val = readl(dev->smireg); 73 while (1) {
56 if (!(val & MVMDIO_SMI_BUSY)) 74 if (orion_mdio_smi_is_done(dev))
57 break; 75 break;
58 76
59 if (count > 100) { 77 if (count > 100) {
60 dev_err(bus->parent, "Timeout: SMI busy for too long\n"); 78 dev_err(bus->parent,
61 return -ETIMEDOUT; 79 "Timeout: SMI busy for too long\n");
62 } 80 return -ETIMEDOUT;
81 }
63 82
64 udelay(10); 83 udelay(10);
65 count++; 84 count++;
85 }
86 } else {
87 if (!orion_mdio_smi_is_done(dev)) {
88 wait_event_timeout(dev->smi_busy_wait,
89 orion_mdio_smi_is_done(dev),
90 msecs_to_jiffies(100));
91 if (!orion_mdio_smi_is_done(dev))
92 return -ETIMEDOUT;
93 }
66 } 94 }
67 95
68 return 0; 96 return 0;
@@ -87,12 +115,12 @@ static int orion_mdio_read(struct mii_bus *bus, int mii_id,
87 writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) | 115 writel(((mii_id << MVMDIO_SMI_PHY_ADDR_SHIFT) |
88 (regnum << MVMDIO_SMI_PHY_REG_SHIFT) | 116 (regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
89 MVMDIO_SMI_READ_OPERATION), 117 MVMDIO_SMI_READ_OPERATION),
90 dev->smireg); 118 dev->regs);
91 119
92 /* Wait for the value to become available */ 120 /* Wait for the value to become available */
93 count = 0; 121 count = 0;
94 while (1) { 122 while (1) {
95 val = readl(dev->smireg); 123 val = readl(dev->regs);
96 if (val & MVMDIO_SMI_READ_VALID) 124 if (val & MVMDIO_SMI_READ_VALID)
97 break; 125 break;
98 126
@@ -129,7 +157,7 @@ static int orion_mdio_write(struct mii_bus *bus, int mii_id,
129 (regnum << MVMDIO_SMI_PHY_REG_SHIFT) | 157 (regnum << MVMDIO_SMI_PHY_REG_SHIFT) |
130 MVMDIO_SMI_WRITE_OPERATION | 158 MVMDIO_SMI_WRITE_OPERATION |
131 (value << MVMDIO_SMI_DATA_SHIFT)), 159 (value << MVMDIO_SMI_DATA_SHIFT)),
132 dev->smireg); 160 dev->regs);
133 161
134 mutex_unlock(&dev->lock); 162 mutex_unlock(&dev->lock);
135 163
@@ -141,13 +169,34 @@ static int orion_mdio_reset(struct mii_bus *bus)
141 return 0; 169 return 0;
142} 170}
143 171
172static irqreturn_t orion_mdio_err_irq(int irq, void *dev_id)
173{
174 struct orion_mdio_dev *dev = dev_id;
175
176 if (readl(dev->regs + MVMDIO_ERR_INT_CAUSE) &
177 MVMDIO_ERR_INT_SMI_DONE) {
178 writel(~MVMDIO_ERR_INT_SMI_DONE,
179 dev->regs + MVMDIO_ERR_INT_CAUSE);
180 wake_up(&dev->smi_busy_wait);
181 return IRQ_HANDLED;
182 }
183
184 return IRQ_NONE;
185}
186
144static int orion_mdio_probe(struct platform_device *pdev) 187static int orion_mdio_probe(struct platform_device *pdev)
145{ 188{
146 struct device_node *np = pdev->dev.of_node; 189 struct resource *r;
147 struct mii_bus *bus; 190 struct mii_bus *bus;
148 struct orion_mdio_dev *dev; 191 struct orion_mdio_dev *dev;
149 int i, ret; 192 int i, ret;
150 193
194 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
195 if (!r) {
196 dev_err(&pdev->dev, "No SMI register address given\n");
197 return -ENODEV;
198 }
199
151 bus = mdiobus_alloc_size(sizeof(struct orion_mdio_dev)); 200 bus = mdiobus_alloc_size(sizeof(struct orion_mdio_dev));
152 if (!bus) { 201 if (!bus) {
153 dev_err(&pdev->dev, "Cannot allocate MDIO bus\n"); 202 dev_err(&pdev->dev, "Cannot allocate MDIO bus\n");
@@ -172,33 +221,54 @@ static int orion_mdio_probe(struct platform_device *pdev)
172 bus->irq[i] = PHY_POLL; 221 bus->irq[i] = PHY_POLL;
173 222
174 dev = bus->priv; 223 dev = bus->priv;
175 dev->smireg = of_iomap(pdev->dev.of_node, 0); 224 dev->regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
176 if (!dev->smireg) { 225 if (!dev->regs) {
177 dev_err(&pdev->dev, "No SMI register address given in DT\n"); 226 dev_err(&pdev->dev, "Unable to remap SMI register\n");
178 kfree(bus->irq); 227 ret = -ENODEV;
179 mdiobus_free(bus); 228 goto out_mdio;
180 return -ENODEV; 229 }
230
231 init_waitqueue_head(&dev->smi_busy_wait);
232
233 dev->err_interrupt = platform_get_irq(pdev, 0);
234 if (dev->err_interrupt != -ENXIO) {
235 ret = devm_request_irq(&pdev->dev, dev->err_interrupt,
236 orion_mdio_err_irq,
237 IRQF_SHARED, pdev->name, dev);
238 if (ret)
239 goto out_mdio;
240
241 writel(MVMDIO_ERR_INT_SMI_DONE,
242 dev->regs + MVMDIO_ERR_INT_MASK);
181 } 243 }
182 244
183 mutex_init(&dev->lock); 245 mutex_init(&dev->lock);
184 246
185 ret = of_mdiobus_register(bus, np); 247 if (pdev->dev.of_node)
248 ret = of_mdiobus_register(bus, pdev->dev.of_node);
249 else
250 ret = mdiobus_register(bus);
186 if (ret < 0) { 251 if (ret < 0) {
187 dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret); 252 dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
188 iounmap(dev->smireg); 253 goto out_mdio;
189 kfree(bus->irq);
190 mdiobus_free(bus);
191 return ret;
192 } 254 }
193 255
194 platform_set_drvdata(pdev, bus); 256 platform_set_drvdata(pdev, bus);
195 257
196 return 0; 258 return 0;
259
260out_mdio:
261 kfree(bus->irq);
262 mdiobus_free(bus);
263 return ret;
197} 264}
198 265
199static int orion_mdio_remove(struct platform_device *pdev) 266static int orion_mdio_remove(struct platform_device *pdev)
200{ 267{
201 struct mii_bus *bus = platform_get_drvdata(pdev); 268 struct mii_bus *bus = platform_get_drvdata(pdev);
269 struct orion_mdio_dev *dev = bus->priv;
270
271 writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
202 mdiobus_unregister(bus); 272 mdiobus_unregister(bus);
203 kfree(bus->irq); 273 kfree(bus->irq);
204 mdiobus_free(bus); 274 mdiobus_free(bus);
@@ -225,3 +295,4 @@ module_platform_driver(orion_mdio_driver);
225MODULE_DESCRIPTION("Marvell MDIO interface driver"); 295MODULE_DESCRIPTION("Marvell MDIO interface driver");
226MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); 296MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
227MODULE_LICENSE("GPL"); 297MODULE_LICENSE("GPL");
298MODULE_ALIAS("platform:orion-mdio");
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index cd345b8969bc..e48261e468f3 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1969,13 +1969,8 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
1969 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, 1969 rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
1970 rxq->size * MVNETA_DESC_ALIGNED_SIZE, 1970 rxq->size * MVNETA_DESC_ALIGNED_SIZE,
1971 &rxq->descs_phys, GFP_KERNEL); 1971 &rxq->descs_phys, GFP_KERNEL);
1972 if (rxq->descs == NULL) { 1972 if (rxq->descs == NULL)
1973 netdev_err(pp->dev,
1974 "rxq=%d: Can't allocate %d bytes for %d RX descr\n",
1975 rxq->id, rxq->size * MVNETA_DESC_ALIGNED_SIZE,
1976 rxq->size);
1977 return -ENOMEM; 1973 return -ENOMEM;
1978 }
1979 1974
1980 BUG_ON(rxq->descs != 1975 BUG_ON(rxq->descs !=
1981 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); 1976 PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
@@ -2029,13 +2024,8 @@ static int mvneta_txq_init(struct mvneta_port *pp,
2029 txq->descs = dma_alloc_coherent(pp->dev->dev.parent, 2024 txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
2030 txq->size * MVNETA_DESC_ALIGNED_SIZE, 2025 txq->size * MVNETA_DESC_ALIGNED_SIZE,
2031 &txq->descs_phys, GFP_KERNEL); 2026 &txq->descs_phys, GFP_KERNEL);
2032 if (txq->descs == NULL) { 2027 if (txq->descs == NULL)
2033 netdev_err(pp->dev,
2034 "txQ=%d: Can't allocate %d bytes for %d TX descr\n",
2035 txq->id, txq->size * MVNETA_DESC_ALIGNED_SIZE,
2036 txq->size);
2037 return -ENOMEM; 2028 return -ENOMEM;
2038 }
2039 2029
2040 /* Make sure descriptor address is cache line size aligned */ 2030 /* Make sure descriptor address is cache line size aligned */
2041 BUG_ON(txq->descs != 2031 BUG_ON(txq->descs !=
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 037ed866c22f..339bb323cb0c 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -584,12 +584,14 @@ static int init_hash_table(struct pxa168_eth_private *pep)
584 */ 584 */
585 if (pep->htpr == NULL) { 585 if (pep->htpr == NULL) {
586 pep->htpr = dma_alloc_coherent(pep->dev->dev.parent, 586 pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
587 HASH_ADDR_TABLE_SIZE, 587 HASH_ADDR_TABLE_SIZE,
588 &pep->htpr_dma, GFP_KERNEL); 588 &pep->htpr_dma,
589 GFP_KERNEL | __GFP_ZERO);
589 if (pep->htpr == NULL) 590 if (pep->htpr == NULL)
590 return -ENOMEM; 591 return -ENOMEM;
592 } else {
593 memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
591 } 594 }
592 memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
593 wrl(pep, HTPR, pep->htpr_dma); 595 wrl(pep, HTPR, pep->htpr_dma);
594 return 0; 596 return 0;
595} 597}
@@ -1023,13 +1025,11 @@ static int rxq_init(struct net_device *dev)
1023 size = pep->rx_ring_size * sizeof(struct rx_desc); 1025 size = pep->rx_ring_size * sizeof(struct rx_desc);
1024 pep->rx_desc_area_size = size; 1026 pep->rx_desc_area_size = size;
1025 pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, 1027 pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1026 &pep->rx_desc_dma, GFP_KERNEL); 1028 &pep->rx_desc_dma,
1027 if (!pep->p_rx_desc_area) { 1029 GFP_KERNEL | __GFP_ZERO);
1028 printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n", 1030 if (!pep->p_rx_desc_area)
1029 dev->name, size);
1030 goto out; 1031 goto out;
1031 } 1032
1032 memset((void *)pep->p_rx_desc_area, 0, size);
1033 /* initialize the next_desc_ptr links in the Rx descriptors ring */ 1033 /* initialize the next_desc_ptr links in the Rx descriptors ring */
1034 p_rx_desc = pep->p_rx_desc_area; 1034 p_rx_desc = pep->p_rx_desc_area;
1035 for (i = 0; i < rx_desc_num; i++) { 1035 for (i = 0; i < rx_desc_num; i++) {
@@ -1086,13 +1086,10 @@ static int txq_init(struct net_device *dev)
1086 size = pep->tx_ring_size * sizeof(struct tx_desc); 1086 size = pep->tx_ring_size * sizeof(struct tx_desc);
1087 pep->tx_desc_area_size = size; 1087 pep->tx_desc_area_size = size;
1088 pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, 1088 pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1089 &pep->tx_desc_dma, GFP_KERNEL); 1089 &pep->tx_desc_dma,
1090 if (!pep->p_tx_desc_area) { 1090 GFP_KERNEL | __GFP_ZERO);
1091 printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n", 1091 if (!pep->p_tx_desc_area)
1092 dev->name, size);
1093 goto out; 1092 goto out;
1094 }
1095 memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
1096 /* Initialize the next_desc_ptr links in the Tx descriptors ring */ 1093 /* Initialize the next_desc_ptr links in the Tx descriptors ring */
1097 p_tx_desc = pep->p_tx_desc_area; 1094 p_tx_desc = pep->p_tx_desc_area;
1098 for (i = 0; i < tx_desc_num; i++) { 1095 for (i = 0; i < tx_desc_num; i++) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index fdc5f23d8e9f..05267d716e86 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -1837,10 +1837,8 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
1837 priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE, 1837 priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
1838 &priv->mfunc.vhcr_dma, 1838 &priv->mfunc.vhcr_dma,
1839 GFP_KERNEL); 1839 GFP_KERNEL);
1840 if (!priv->mfunc.vhcr) { 1840 if (!priv->mfunc.vhcr)
1841 mlx4_err(dev, "Couldn't allocate VHCR.\n");
1842 goto err_hcr; 1841 goto err_hcr;
1843 }
1844 } 1842 }
1845 1843
1846 priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev, 1844 priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index b799ab12a291..0f91222ea3d7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -186,7 +186,7 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
186 186
187static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev) 187static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
188{ 188{
189 return DCB_CAP_DCBX_VER_IEEE; 189 return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
190} 190}
191 191
192static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode) 192static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
@@ -253,3 +253,11 @@ const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
253 .getdcbx = mlx4_en_dcbnl_getdcbx, 253 .getdcbx = mlx4_en_dcbnl_getdcbx,
254 .setdcbx = mlx4_en_dcbnl_setdcbx, 254 .setdcbx = mlx4_en_dcbnl_setdcbx,
255}; 255};
256
257const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = {
258 .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
259 .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
260
261 .getdcbx = mlx4_en_dcbnl_getdcbx,
262 .setdcbx = mlx4_en_dcbnl_setdcbx,
263};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 30d78f806dc3..d2a4f919bf1f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1931,79 +1931,6 @@ static int mlx4_en_set_features(struct net_device *netdev,
1931 1931
1932} 1932}
1933 1933
1934static int mlx4_en_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
1935 struct net_device *dev,
1936 const unsigned char *addr, u16 flags)
1937{
1938 struct mlx4_en_priv *priv = netdev_priv(dev);
1939 struct mlx4_dev *mdev = priv->mdev->dev;
1940 int err;
1941
1942 if (!mlx4_is_mfunc(mdev))
1943 return -EOPNOTSUPP;
1944
1945 /* Hardware does not support aging addresses, allow only
1946 * permanent addresses if ndm_state is given
1947 */
1948 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
1949 en_info(priv, "Add FDB only supports static addresses\n");
1950 return -EINVAL;
1951 }
1952
1953 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
1954 err = dev_uc_add_excl(dev, addr);
1955 else if (is_multicast_ether_addr(addr))
1956 err = dev_mc_add_excl(dev, addr);
1957 else
1958 err = -EINVAL;
1959
1960 /* Only return duplicate errors if NLM_F_EXCL is set */
1961 if (err == -EEXIST && !(flags & NLM_F_EXCL))
1962 err = 0;
1963
1964 return err;
1965}
1966
1967static int mlx4_en_fdb_del(struct ndmsg *ndm,
1968 struct nlattr *tb[],
1969 struct net_device *dev,
1970 const unsigned char *addr)
1971{
1972 struct mlx4_en_priv *priv = netdev_priv(dev);
1973 struct mlx4_dev *mdev = priv->mdev->dev;
1974 int err;
1975
1976 if (!mlx4_is_mfunc(mdev))
1977 return -EOPNOTSUPP;
1978
1979 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
1980 en_info(priv, "Del FDB only supports static addresses\n");
1981 return -EINVAL;
1982 }
1983
1984 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
1985 err = dev_uc_del(dev, addr);
1986 else if (is_multicast_ether_addr(addr))
1987 err = dev_mc_del(dev, addr);
1988 else
1989 err = -EINVAL;
1990
1991 return err;
1992}
1993
1994static int mlx4_en_fdb_dump(struct sk_buff *skb,
1995 struct netlink_callback *cb,
1996 struct net_device *dev, int idx)
1997{
1998 struct mlx4_en_priv *priv = netdev_priv(dev);
1999 struct mlx4_dev *mdev = priv->mdev->dev;
2000
2001 if (mlx4_is_mfunc(mdev))
2002 idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
2003
2004 return idx;
2005}
2006
2007static const struct net_device_ops mlx4_netdev_ops = { 1934static const struct net_device_ops mlx4_netdev_ops = {
2008 .ndo_open = mlx4_en_open, 1935 .ndo_open = mlx4_en_open,
2009 .ndo_stop = mlx4_en_close, 1936 .ndo_stop = mlx4_en_close,
@@ -2025,9 +1952,6 @@ static const struct net_device_ops mlx4_netdev_ops = {
2025#ifdef CONFIG_RFS_ACCEL 1952#ifdef CONFIG_RFS_ACCEL
2026 .ndo_rx_flow_steer = mlx4_en_filter_rfs, 1953 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2027#endif 1954#endif
2028 .ndo_fdb_add = mlx4_en_fdb_add,
2029 .ndo_fdb_del = mlx4_en_fdb_del,
2030 .ndo_fdb_dump = mlx4_en_fdb_dump,
2031}; 1955};
2032 1956
2033int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 1957int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -2089,8 +2013,14 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2089 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 2013 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
2090 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 2014 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
2091#ifdef CONFIG_MLX4_EN_DCB 2015#ifdef CONFIG_MLX4_EN_DCB
2092 if (!mlx4_is_slave(priv->mdev->dev)) 2016 if (!mlx4_is_slave(priv->mdev->dev)) {
2093 dev->dcbnl_ops = &mlx4_en_dcbnl_ops; 2017 if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_SET_ETH_SCHED) {
2018 dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
2019 } else {
2020 en_info(priv, "enabling only PFC DCB ops\n");
2021 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops;
2022 }
2023 }
2094#endif 2024#endif
2095 2025
2096 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) 2026 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 3488c6d9e6b5..2448f0d669e6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -58,10 +58,9 @@ static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
58 58
59 /* build the pkt before xmit */ 59 /* build the pkt before xmit */
60 skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN); 60 skb = netdev_alloc_skb(priv->dev, MLX4_LOOPBACK_TEST_PAYLOAD + ETH_HLEN + NET_IP_ALIGN);
61 if (!skb) { 61 if (!skb)
62 en_err(priv, "-LOOPBACK_TEST_XMIT- failed to create skb for xmit\n");
63 return -ENOMEM; 62 return -ENOMEM;
64 } 63
65 skb_reserve(skb, NET_IP_ALIGN); 64 skb_reserve(skb, NET_IP_ALIGN);
66 65
67 ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr)); 66 ethh = (struct ethhdr *)skb_put(skb, sizeof(struct ethhdr));
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index f6245579962d..ab470d991ade 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -91,7 +91,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
91 [ 8] = "P_Key violation counter", 91 [ 8] = "P_Key violation counter",
92 [ 9] = "Q_Key violation counter", 92 [ 9] = "Q_Key violation counter",
93 [10] = "VMM", 93 [10] = "VMM",
94 [12] = "DPDP", 94 [12] = "Dual Port Different Protocol (DPDP) support",
95 [15] = "Big LSO headers", 95 [15] = "Big LSO headers",
96 [16] = "MW support", 96 [16] = "MW support",
97 [17] = "APM support", 97 [17] = "APM support",
@@ -109,6 +109,8 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
109 [41] = "Unicast VEP steering support", 109 [41] = "Unicast VEP steering support",
110 [42] = "Multicast VEP steering support", 110 [42] = "Multicast VEP steering support",
111 [48] = "Counters support", 111 [48] = "Counters support",
112 [53] = "Port ETS Scheduler support",
113 [55] = "Port link type sensing support",
112 [59] = "Port management change event support", 114 [59] = "Port management change event support",
113 [61] = "64 byte EQE support", 115 [61] = "64 byte EQE support",
114 [62] = "64 byte CQE support", 116 [62] = "64 byte CQE support",
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index f710b7ce0dcb..d4cb5d3b28a2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -624,6 +624,7 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port);
624 624
625#ifdef CONFIG_MLX4_EN_DCB 625#ifdef CONFIG_MLX4_EN_DCB
626extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops; 626extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops;
627extern const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops;
627#endif 628#endif
628 629
629int mlx4_en_setup_tc(struct net_device *dev, u8 up); 630int mlx4_en_setup_tc(struct net_device *dev, u8 up);
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index 07a6ebc47c92..b6c60fdef4ff 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -1622,25 +1622,7 @@ static struct platform_driver ks8695_driver = {
1622 .resume = ks8695_drv_resume, 1622 .resume = ks8695_drv_resume,
1623}; 1623};
1624 1624
1625/* Module interface */ 1625module_platform_driver(ks8695_driver);
1626
1627static int __init
1628ks8695_init(void)
1629{
1630 printk(KERN_INFO "%s Ethernet driver, V%s\n",
1631 MODULENAME, MODULEVERSION);
1632
1633 return platform_driver_register(&ks8695_driver);
1634}
1635
1636static void __exit
1637ks8695_cleanup(void)
1638{
1639 platform_driver_unregister(&ks8695_driver);
1640}
1641
1642module_init(ks8695_init);
1643module_exit(ks8695_cleanup);
1644 1626
1645MODULE_AUTHOR("Simtec Electronics"); 1627MODULE_AUTHOR("Simtec Electronics");
1646MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver"); 1628MODULE_DESCRIPTION("Micrel KS8695 (Centaur) Ethernet driver");
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 8fb481252e2c..4a3b4995f19a 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -1367,7 +1367,7 @@ static int ks8851_read_selftest(struct ks8851_net *ks)
1367#ifdef CONFIG_PM 1367#ifdef CONFIG_PM
1368static int ks8851_suspend(struct spi_device *spi, pm_message_t state) 1368static int ks8851_suspend(struct spi_device *spi, pm_message_t state)
1369{ 1369{
1370 struct ks8851_net *ks = dev_get_drvdata(&spi->dev); 1370 struct ks8851_net *ks = spi_get_drvdata(spi);
1371 struct net_device *dev = ks->netdev; 1371 struct net_device *dev = ks->netdev;
1372 1372
1373 if (netif_running(dev)) { 1373 if (netif_running(dev)) {
@@ -1380,7 +1380,7 @@ static int ks8851_suspend(struct spi_device *spi, pm_message_t state)
1380 1380
1381static int ks8851_resume(struct spi_device *spi) 1381static int ks8851_resume(struct spi_device *spi)
1382{ 1382{
1383 struct ks8851_net *ks = dev_get_drvdata(&spi->dev); 1383 struct ks8851_net *ks = spi_get_drvdata(spi);
1384 struct net_device *dev = ks->netdev; 1384 struct net_device *dev = ks->netdev;
1385 1385
1386 if (netif_running(dev)) { 1386 if (netif_running(dev)) {
@@ -1456,7 +1456,7 @@ static int ks8851_probe(struct spi_device *spi)
1456 SET_ETHTOOL_OPS(ndev, &ks8851_ethtool_ops); 1456 SET_ETHTOOL_OPS(ndev, &ks8851_ethtool_ops);
1457 SET_NETDEV_DEV(ndev, &spi->dev); 1457 SET_NETDEV_DEV(ndev, &spi->dev);
1458 1458
1459 dev_set_drvdata(&spi->dev, ks); 1459 spi_set_drvdata(spi, ks);
1460 1460
1461 ndev->if_port = IF_PORT_100BASET; 1461 ndev->if_port = IF_PORT_100BASET;
1462 ndev->netdev_ops = &ks8851_netdev_ops; 1462 ndev->netdev_ops = &ks8851_netdev_ops;
@@ -1516,7 +1516,7 @@ err_irq:
1516 1516
1517static int ks8851_remove(struct spi_device *spi) 1517static int ks8851_remove(struct spi_device *spi)
1518{ 1518{
1519 struct ks8851_net *priv = dev_get_drvdata(&spi->dev); 1519 struct ks8851_net *priv = spi_get_drvdata(spi);
1520 1520
1521 if (netif_msg_drv(priv)) 1521 if (netif_msg_drv(priv))
1522 dev_info(&spi->dev, "remove\n"); 1522 dev_info(&spi->dev, "remove\n");
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index a343066f7b43..ddaf138ce0d4 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -792,20 +792,35 @@ static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
792 792
793 frame_hdr = ks->frame_head_info; 793 frame_hdr = ks->frame_head_info;
794 while (ks->frame_cnt--) { 794 while (ks->frame_cnt--) {
795 if (unlikely(!(frame_hdr->sts & RXFSHR_RXFV) ||
796 frame_hdr->len >= RX_BUF_SIZE ||
797 frame_hdr->len <= 0)) {
798
799 /* discard an invalid packet */
800 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
801 netdev->stats.rx_dropped++;
802 if (!(frame_hdr->sts & RXFSHR_RXFV))
803 netdev->stats.rx_frame_errors++;
804 else
805 netdev->stats.rx_length_errors++;
806 frame_hdr++;
807 continue;
808 }
809
795 skb = netdev_alloc_skb(netdev, frame_hdr->len + 16); 810 skb = netdev_alloc_skb(netdev, frame_hdr->len + 16);
796 if (likely(skb && (frame_hdr->sts & RXFSHR_RXFV) && 811 if (likely(skb)) {
797 (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
798 skb_reserve(skb, 2); 812 skb_reserve(skb, 2);
799 /* read data block including CRC 4 bytes */ 813 /* read data block including CRC 4 bytes */
800 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len); 814 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
801 skb_put(skb, frame_hdr->len); 815 skb_put(skb, frame_hdr->len - 4);
802 skb->protocol = eth_type_trans(skb, netdev); 816 skb->protocol = eth_type_trans(skb, netdev);
803 netif_rx(skb); 817 netif_rx(skb);
818 /* exclude CRC size */
819 netdev->stats.rx_bytes += frame_hdr->len - 4;
820 netdev->stats.rx_packets++;
804 } else { 821 } else {
805 pr_err("%s: err:skb alloc\n", __func__);
806 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF)); 822 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
807 if (skb) 823 netdev->stats.rx_dropped++;
808 dev_kfree_skb_irq(skb);
809 } 824 }
810 frame_hdr++; 825 frame_hdr++;
811 } 826 }
@@ -877,6 +892,8 @@ static irqreturn_t ks_irq(int irq, void *pw)
877 ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK); 892 ks_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
878 } 893 }
879 894
895 if (unlikely(status & IRQ_RXOI))
896 ks->netdev->stats.rx_over_errors++;
880 /* this should be the last in IRQ handler*/ 897 /* this should be the last in IRQ handler*/
881 ks_restore_cmd_reg(ks); 898 ks_restore_cmd_reg(ks);
882 return IRQ_HANDLED; 899 return IRQ_HANDLED;
@@ -1015,6 +1032,9 @@ static int ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1015 1032
1016 if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) { 1033 if (likely(ks_tx_fifo_space(ks) >= skb->len + 12)) {
1017 ks_write_qmu(ks, skb->data, skb->len); 1034 ks_write_qmu(ks, skb->data, skb->len);
1035 /* add tx statistics */
1036 netdev->stats.tx_bytes += skb->len;
1037 netdev->stats.tx_packets++;
1018 dev_kfree_skb(skb); 1038 dev_kfree_skb(skb);
1019 } else 1039 } else
1020 retv = NETDEV_TX_BUSY; 1040 retv = NETDEV_TX_BUSY;
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 5d98a9f7bfc7..c7b40aa21f22 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1566,7 +1566,7 @@ static int enc28j60_probe(struct spi_device *spi)
1566 INIT_WORK(&priv->setrx_work, enc28j60_setrx_work_handler); 1566 INIT_WORK(&priv->setrx_work, enc28j60_setrx_work_handler);
1567 INIT_WORK(&priv->irq_work, enc28j60_irq_work_handler); 1567 INIT_WORK(&priv->irq_work, enc28j60_irq_work_handler);
1568 INIT_WORK(&priv->restart_work, enc28j60_restart_work_handler); 1568 INIT_WORK(&priv->restart_work, enc28j60_restart_work_handler);
1569 dev_set_drvdata(&spi->dev, priv); /* spi to priv reference */ 1569 spi_set_drvdata(spi, priv); /* spi to priv reference */
1570 SET_NETDEV_DEV(dev, &spi->dev); 1570 SET_NETDEV_DEV(dev, &spi->dev);
1571 1571
1572 if (!enc28j60_chipset_init(dev)) { 1572 if (!enc28j60_chipset_init(dev)) {
@@ -1618,7 +1618,7 @@ error_alloc:
1618 1618
1619static int enc28j60_remove(struct spi_device *spi) 1619static int enc28j60_remove(struct spi_device *spi)
1620{ 1620{
1621 struct enc28j60_net *priv = dev_get_drvdata(&spi->dev); 1621 struct enc28j60_net *priv = spi_get_drvdata(spi);
1622 1622
1623 if (netif_msg_drv(priv)) 1623 if (netif_msg_drv(priv))
1624 printk(KERN_DEBUG DRV_NAME ": remove\n"); 1624 printk(KERN_DEBUG DRV_NAME ": remove\n");
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 4f9937e026e5..d5ffdc8264eb 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3592,10 +3592,9 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
3592 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry); 3592 bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
3593 ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes, 3593 ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
3594 &ss->rx_done.bus, 3594 &ss->rx_done.bus,
3595 GFP_KERNEL); 3595 GFP_KERNEL | __GFP_ZERO);
3596 if (ss->rx_done.entry == NULL) 3596 if (ss->rx_done.entry == NULL)
3597 goto abort; 3597 goto abort;
3598 memset(ss->rx_done.entry, 0, bytes);
3599 bytes = sizeof(*ss->fw_stats); 3598 bytes = sizeof(*ss->fw_stats);
3600 ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes, 3599 ss->fw_stats = dma_alloc_coherent(&pdev->dev, bytes,
3601 &ss->fw_stats_bus, 3600 &ss->fw_stats_bus,
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index b0b361546365..c20766c2f65b 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -175,13 +175,13 @@ static int sonic_probe1(struct net_device *dev)
175 175
176 /* Allocate the entire chunk of memory for the descriptors. 176 /* Allocate the entire chunk of memory for the descriptors.
177 Note that this cannot cross a 64K boundary. */ 177 Note that this cannot cross a 64K boundary. */
178 if ((lp->descriptors = dma_alloc_coherent(lp->device, 178 lp->descriptors = dma_alloc_coherent(lp->device,
179 SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), 179 SIZEOF_SONIC_DESC *
180 &lp->descriptors_laddr, GFP_KERNEL)) == NULL) { 180 SONIC_BUS_SCALE(lp->dma_bitmode),
181 printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n", 181 &lp->descriptors_laddr,
182 dev_name(lp->device)); 182 GFP_KERNEL);
183 if (lp->descriptors == NULL)
183 goto out; 184 goto out;
184 }
185 185
186 /* Now set up the pointers to point to the appropriate places */ 186 /* Now set up the pointers to point to the appropriate places */
187 lp->cda = lp->descriptors; 187 lp->cda = lp->descriptors;
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 0ffde69c8d01..346a4e025c34 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -202,13 +202,13 @@ static int macsonic_init(struct net_device *dev)
202 202
203 /* Allocate the entire chunk of memory for the descriptors. 203 /* Allocate the entire chunk of memory for the descriptors.
204 Note that this cannot cross a 64K boundary. */ 204 Note that this cannot cross a 64K boundary. */
205 if ((lp->descriptors = dma_alloc_coherent(lp->device, 205 lp->descriptors = dma_alloc_coherent(lp->device,
206 SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), 206 SIZEOF_SONIC_DESC *
207 &lp->descriptors_laddr, GFP_KERNEL)) == NULL) { 207 SONIC_BUS_SCALE(lp->dma_bitmode),
208 printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n", 208 &lp->descriptors_laddr,
209 dev_name(lp->device)); 209 GFP_KERNEL);
210 if (lp->descriptors == NULL)
210 return -ENOMEM; 211 return -ENOMEM;
211 }
212 212
213 /* Now set up the pointers to point to the appropriate places */ 213 /* Now set up the pointers to point to the appropriate places */
214 lp->cda = lp->descriptors; 214 lp->cda = lp->descriptors;
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index 46795e403467..1bd419dbda6d 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -424,7 +424,6 @@ static void sonic_rx(struct net_device *dev)
424 /* Malloc up new buffer. */ 424 /* Malloc up new buffer. */
425 new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); 425 new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
426 if (new_skb == NULL) { 426 if (new_skb == NULL) {
427 printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name);
428 lp->stats.rx_dropped++; 427 lp->stats.rx_dropped++;
429 break; 428 break;
430 } 429 }
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index 5e4748e855f6..c2e0256fe3df 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -197,14 +197,12 @@ static int __init sonic_probe1(struct net_device *dev)
197 * We also allocate extra space for a pointer to allow freeing 197 * We also allocate extra space for a pointer to allow freeing
198 * this structure later on (in xtsonic_cleanup_module()). 198 * this structure later on (in xtsonic_cleanup_module()).
199 */ 199 */
200 lp->descriptors = 200 lp->descriptors = dma_alloc_coherent(lp->device,
201 dma_alloc_coherent(lp->device, 201 SIZEOF_SONIC_DESC *
202 SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), 202 SONIC_BUS_SCALE(lp->dma_bitmode),
203 &lp->descriptors_laddr, GFP_KERNEL); 203 &lp->descriptors_laddr,
204 204 GFP_KERNEL);
205 if (lp->descriptors == NULL) { 205 if (lp->descriptors == NULL) {
206 printk(KERN_ERR "%s: couldn't alloc DMA memory for "
207 " descriptors.\n", dev_name(lp->device));
208 err = -ENOMEM; 206 err = -ENOMEM;
209 goto out; 207 goto out;
210 } 208 }
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index bfd887382e19..3371ff41bb34 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -80,6 +80,7 @@
80#include <linux/slab.h> 80#include <linux/slab.h>
81#include <linux/prefetch.h> 81#include <linux/prefetch.h>
82#include <net/tcp.h> 82#include <net/tcp.h>
83#include <net/checksum.h>
83 84
84#include <asm/div64.h> 85#include <asm/div64.h>
85#include <asm/irq.h> 86#include <asm/irq.h>
@@ -8337,16 +8338,13 @@ static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
8337{ 8338{
8338 struct iphdr *ip = lro->iph; 8339 struct iphdr *ip = lro->iph;
8339 struct tcphdr *tcp = lro->tcph; 8340 struct tcphdr *tcp = lro->tcph;
8340 __sum16 nchk;
8341 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat; 8341 struct swStat *swstats = &sp->mac_control.stats_info->sw_stat;
8342 8342
8343 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__); 8343 DBG_PRINT(INFO_DBG, "%s: Been here...\n", __func__);
8344 8344
8345 /* Update L3 header */ 8345 /* Update L3 header */
8346 csum_replace2(&ip->check, ip->tot_len, htons(lro->total_len));
8346 ip->tot_len = htons(lro->total_len); 8347 ip->tot_len = htons(lro->total_len);
8347 ip->check = 0;
8348 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
8349 ip->check = nchk;
8350 8348
8351 /* Update L4 header */ 8349 /* Update L4 header */
8352 tcp->ack_seq = lro->tcp_ack; 8350 tcp->ack_seq = lro->tcp_ack;
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index 63e7af44366f..cb9e63831500 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -152,8 +152,6 @@ static void netx_eth_receive(struct net_device *ndev)
152 152
153 skb = netdev_alloc_skb(ndev, len); 153 skb = netdev_alloc_skb(ndev, len);
154 if (unlikely(skb == NULL)) { 154 if (unlikely(skb == NULL)) {
155 printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
156 ndev->name);
157 ndev->stats.rx_dropped++; 155 ndev->stats.rx_dropped++;
158 return; 156 return;
159 } 157 }
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 162da8975b05..3df8287b7452 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -287,23 +287,16 @@ static int w90p910_init_desc(struct net_device *dev)
287 ether = netdev_priv(dev); 287 ether = netdev_priv(dev);
288 pdev = ether->pdev; 288 pdev = ether->pdev;
289 289
290 ether->tdesc = (struct tran_pdesc *) 290 ether->tdesc = dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc),
291 dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc), 291 &ether->tdesc_phys, GFP_KERNEL);
292 &ether->tdesc_phys, GFP_KERNEL); 292 if (!ether->tdesc)
293
294 if (!ether->tdesc) {
295 dev_err(&pdev->dev, "Failed to allocate memory for tx desc\n");
296 return -ENOMEM; 293 return -ENOMEM;
297 }
298
299 ether->rdesc = (struct recv_pdesc *)
300 dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc),
301 &ether->rdesc_phys, GFP_KERNEL);
302 294
295 ether->rdesc = dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc),
296 &ether->rdesc_phys, GFP_KERNEL);
303 if (!ether->rdesc) { 297 if (!ether->rdesc) {
304 dev_err(&pdev->dev, "Failed to allocate memory for rx desc\n");
305 dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc), 298 dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc),
306 ether->tdesc, ether->tdesc_phys); 299 ether->tdesc, ether->tdesc_phys);
307 return -ENOMEM; 300 return -ENOMEM;
308 } 301 }
309 302
@@ -737,7 +730,6 @@ static void netdev_rx(struct net_device *dev)
737 data = ether->rdesc->recv_buf[ether->cur_rx]; 730 data = ether->rdesc->recv_buf[ether->cur_rx];
738 skb = netdev_alloc_skb(dev, length + 2); 731 skb = netdev_alloc_skb(dev, length + 2);
739 if (!skb) { 732 if (!skb) {
740 dev_err(&pdev->dev, "get skb buffer error\n");
741 ether->stats.rx_dropped++; 733 ether->stats.rx_dropped++;
742 return; 734 return;
743 } 735 }
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 0b8de12bcbca..5ae124719790 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -2200,6 +2200,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2200 struct ring_desc *start_tx; 2200 struct ring_desc *start_tx;
2201 struct ring_desc *prev_tx; 2201 struct ring_desc *prev_tx;
2202 struct nv_skb_map *prev_tx_ctx; 2202 struct nv_skb_map *prev_tx_ctx;
2203 struct nv_skb_map *tmp_tx_ctx = NULL, *start_tx_ctx = NULL;
2203 unsigned long flags; 2204 unsigned long flags;
2204 2205
2205 /* add fragments to entries count */ 2206 /* add fragments to entries count */
@@ -2261,12 +2262,31 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2261 do { 2262 do {
2262 prev_tx = put_tx; 2263 prev_tx = put_tx;
2263 prev_tx_ctx = np->put_tx_ctx; 2264 prev_tx_ctx = np->put_tx_ctx;
2265 if (!start_tx_ctx)
2266 start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
2267
2264 bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size; 2268 bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2265 np->put_tx_ctx->dma = skb_frag_dma_map( 2269 np->put_tx_ctx->dma = skb_frag_dma_map(
2266 &np->pci_dev->dev, 2270 &np->pci_dev->dev,
2267 frag, offset, 2271 frag, offset,
2268 bcnt, 2272 bcnt,
2269 DMA_TO_DEVICE); 2273 DMA_TO_DEVICE);
2274 if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) {
2275
2276 /* Unwind the mapped fragments */
2277 do {
2278 nv_unmap_txskb(np, start_tx_ctx);
2279 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
2280 tmp_tx_ctx = np->first_tx_ctx;
2281 } while (tmp_tx_ctx != np->put_tx_ctx);
2282 kfree_skb(skb);
2283 np->put_tx_ctx = start_tx_ctx;
2284 u64_stats_update_begin(&np->swstats_tx_syncp);
2285 np->stat_tx_dropped++;
2286 u64_stats_update_end(&np->swstats_tx_syncp);
2287 return NETDEV_TX_OK;
2288 }
2289
2270 np->put_tx_ctx->dma_len = bcnt; 2290 np->put_tx_ctx->dma_len = bcnt;
2271 np->put_tx_ctx->dma_single = 0; 2291 np->put_tx_ctx->dma_single = 0;
2272 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma); 2292 put_tx->buf = cpu_to_le32(np->put_tx_ctx->dma);
@@ -2327,7 +2347,8 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2327 struct ring_desc_ex *start_tx; 2347 struct ring_desc_ex *start_tx;
2328 struct ring_desc_ex *prev_tx; 2348 struct ring_desc_ex *prev_tx;
2329 struct nv_skb_map *prev_tx_ctx; 2349 struct nv_skb_map *prev_tx_ctx;
2330 struct nv_skb_map *start_tx_ctx; 2350 struct nv_skb_map *start_tx_ctx = NULL;
2351 struct nv_skb_map *tmp_tx_ctx = NULL;
2331 unsigned long flags; 2352 unsigned long flags;
2332 2353
2333 /* add fragments to entries count */ 2354 /* add fragments to entries count */
@@ -2392,11 +2413,29 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2392 prev_tx = put_tx; 2413 prev_tx = put_tx;
2393 prev_tx_ctx = np->put_tx_ctx; 2414 prev_tx_ctx = np->put_tx_ctx;
2394 bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size; 2415 bcnt = (frag_size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : frag_size;
2416 if (!start_tx_ctx)
2417 start_tx_ctx = tmp_tx_ctx = np->put_tx_ctx;
2395 np->put_tx_ctx->dma = skb_frag_dma_map( 2418 np->put_tx_ctx->dma = skb_frag_dma_map(
2396 &np->pci_dev->dev, 2419 &np->pci_dev->dev,
2397 frag, offset, 2420 frag, offset,
2398 bcnt, 2421 bcnt,
2399 DMA_TO_DEVICE); 2422 DMA_TO_DEVICE);
2423
2424 if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) {
2425
2426 /* Unwind the mapped fragments */
2427 do {
2428 nv_unmap_txskb(np, start_tx_ctx);
2429 if (unlikely(tmp_tx_ctx++ == np->last_tx_ctx))
2430 tmp_tx_ctx = np->first_tx_ctx;
2431 } while (tmp_tx_ctx != np->put_tx_ctx);
2432 kfree_skb(skb);
2433 np->put_tx_ctx = start_tx_ctx;
2434 u64_stats_update_begin(&np->swstats_tx_syncp);
2435 np->stat_tx_dropped++;
2436 u64_stats_update_end(&np->swstats_tx_syncp);
2437 return NETDEV_TX_OK;
2438 }
2400 np->put_tx_ctx->dma_len = bcnt; 2439 np->put_tx_ctx->dma_len = bcnt;
2401 np->put_tx_ctx->dma_single = 0; 2440 np->put_tx_ctx->dma_single = 0;
2402 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma)); 2441 put_tx->bufhigh = cpu_to_le32(dma_high(np->put_tx_ctx->dma));
@@ -5025,7 +5064,6 @@ static int nv_loopback_test(struct net_device *dev)
5025 pkt_len = ETH_DATA_LEN; 5064 pkt_len = ETH_DATA_LEN;
5026 tx_skb = netdev_alloc_skb(dev, pkt_len); 5065 tx_skb = netdev_alloc_skb(dev, pkt_len);
5027 if (!tx_skb) { 5066 if (!tx_skb) {
5028 netdev_err(dev, "netdev_alloc_skb() failed during loopback test\n");
5029 ret = 0; 5067 ret = 0;
5030 goto out; 5068 goto out;
5031 } 5069 }
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index efa29b712d5f..55a5548d6add 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1409,9 +1409,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
1409 dma_alloc_coherent(&pldat->pdev->dev, 1409 dma_alloc_coherent(&pldat->pdev->dev,
1410 pldat->dma_buff_size, &dma_handle, 1410 pldat->dma_buff_size, &dma_handle,
1411 GFP_KERNEL); 1411 GFP_KERNEL);
1412
1413 if (pldat->dma_buff_base_v == NULL) { 1412 if (pldat->dma_buff_base_v == NULL) {
1414 dev_err(&pdev->dev, "error getting DMA region.\n");
1415 ret = -ENOMEM; 1413 ret = -ENOMEM;
1416 goto err_out_free_irq; 1414 goto err_out_free_irq;
1417 } 1415 }
@@ -1434,13 +1432,11 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
1434 /* Get MAC address from current HW setting (POR state is all zeros) */ 1432 /* Get MAC address from current HW setting (POR state is all zeros) */
1435 __lpc_get_mac(pldat, ndev->dev_addr); 1433 __lpc_get_mac(pldat, ndev->dev_addr);
1436 1434
1437#ifdef CONFIG_OF_NET
1438 if (!is_valid_ether_addr(ndev->dev_addr)) { 1435 if (!is_valid_ether_addr(ndev->dev_addr)) {
1439 const char *macaddr = of_get_mac_address(pdev->dev.of_node); 1436 const char *macaddr = of_get_mac_address(pdev->dev.of_node);
1440 if (macaddr) 1437 if (macaddr)
1441 memcpy(ndev->dev_addr, macaddr, ETH_ALEN); 1438 memcpy(ndev->dev_addr, macaddr, ETH_ALEN);
1442 } 1439 }
1443#endif
1444 if (!is_valid_ether_addr(ndev->dev_addr)) 1440 if (!is_valid_ether_addr(ndev->dev_addr))
1445 eth_hw_addr_random(ndev); 1441 eth_hw_addr_random(ndev);
1446 1442
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 73ce7dd6b954..60eb890800ec 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1469,13 +1469,11 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
1469 1469
1470 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY; 1470 size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
1471 rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size, 1471 rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
1472 &rx_ring->rx_buff_pool_logic, 1472 &rx_ring->rx_buff_pool_logic,
1473 GFP_KERNEL); 1473 GFP_KERNEL | __GFP_ZERO);
1474 if (!rx_ring->rx_buff_pool) { 1474 if (!rx_ring->rx_buff_pool)
1475 pr_err("Unable to allocate memory for the receive pool buffer\n");
1476 return -ENOMEM; 1475 return -ENOMEM;
1477 } 1476
1478 memset(rx_ring->rx_buff_pool, 0, size);
1479 rx_ring->rx_buff_pool_size = size; 1477 rx_ring->rx_buff_pool_size = size;
1480 for (i = 0; i < rx_ring->count; i++) { 1478 for (i = 0; i < rx_ring->count; i++) {
1481 buffer_info = &rx_ring->buffer_info[i]; 1479 buffer_info = &rx_ring->buffer_info[i];
@@ -1774,13 +1772,12 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1774 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc); 1772 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1775 1773
1776 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 1774 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1777 &tx_ring->dma, GFP_KERNEL); 1775 &tx_ring->dma,
1776 GFP_KERNEL | __GFP_ZERO);
1778 if (!tx_ring->desc) { 1777 if (!tx_ring->desc) {
1779 vfree(tx_ring->buffer_info); 1778 vfree(tx_ring->buffer_info);
1780 pr_err("Unable to allocate memory for the transmit descriptor ring\n");
1781 return -ENOMEM; 1779 return -ENOMEM;
1782 } 1780 }
1783 memset(tx_ring->desc, 0, tx_ring->size);
1784 1781
1785 tx_ring->next_to_use = 0; 1782 tx_ring->next_to_use = 0;
1786 tx_ring->next_to_clean = 0; 1783 tx_ring->next_to_clean = 0;
@@ -1820,14 +1817,12 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1820 1817
1821 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); 1818 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1822 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 1819 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1823 &rx_ring->dma, GFP_KERNEL); 1820 &rx_ring->dma,
1824 1821 GFP_KERNEL | __GFP_ZERO);
1825 if (!rx_ring->desc) { 1822 if (!rx_ring->desc) {
1826 pr_err("Unable to allocate memory for the receive descriptor ring\n");
1827 vfree(rx_ring->buffer_info); 1823 vfree(rx_ring->buffer_info);
1828 return -ENOMEM; 1824 return -ENOMEM;
1829 } 1825 }
1830 memset(rx_ring->desc, 0, rx_ring->size);
1831 rx_ring->next_to_clean = 0; 1826 rx_ring->next_to_clean = 0;
1832 rx_ring->next_to_use = 0; 1827 rx_ring->next_to_use = 0;
1833 for (desNo = 0; desNo < rx_ring->count; desNo++) { 1828 for (desNo = 0; desNo < rx_ring->count; desNo++) {
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index b1cfbb75ff1e..a5f0b5da6149 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -441,12 +441,11 @@ static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
441 441
442 ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev, 442 ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
443 RX_RING_SIZE * sizeof(u64), 443 RX_RING_SIZE * sizeof(u64),
444 &ring->buf_dma, GFP_KERNEL); 444 &ring->buf_dma,
445 GFP_KERNEL | __GFP_ZERO);
445 if (!ring->buffers) 446 if (!ring->buffers)
446 goto out_ring_desc; 447 goto out_ring_desc;
447 448
448 memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64));
449
450 write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno), 449 write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno),
451 PAS_DMA_RXCHAN_BASEL_BRBL(ring->chan.ring_dma)); 450 PAS_DMA_RXCHAN_BASEL_BRBL(ring->chan.ring_dma));
452 451
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index a8669adecc97..0e1797295a48 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -35,6 +35,16 @@ config QLCNIC
35 This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet 35 This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet
36 devices. 36 devices.
37 37
38config QLCNIC_SRIOV
39 bool "QLOGIC QLCNIC 83XX family SR-IOV Support"
40 depends on QLCNIC && PCI_IOV
41 default y
42 ---help---
43 This configuration parameter enables Single Root Input Output
44 Virtualization support for QLE83XX Converged Ethernet devices.
45 This allows for virtual function acceleration in virtualized
46 environments.
47
38config QLGE 48config QLGE
39 tristate "QLogic QLGE 10Gb Ethernet Driver Support" 49 tristate "QLogic QLGE 10Gb Ethernet Driver Support"
40 depends on PCI 50 depends on PCI
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index eb3dfdbb642b..322a36b76727 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -955,9 +955,10 @@ typedef struct nx_mac_list_s {
955 uint8_t mac_addr[ETH_ALEN+2]; 955 uint8_t mac_addr[ETH_ALEN+2];
956} nx_mac_list_t; 956} nx_mac_list_t;
957 957
958struct nx_vlan_ip_list { 958struct nx_ip_list {
959 struct list_head list; 959 struct list_head list;
960 __be32 ip_addr; 960 __be32 ip_addr;
961 bool master;
961}; 962};
962 963
963/* 964/*
@@ -1605,7 +1606,7 @@ struct netxen_adapter {
1605 struct net_device *netdev; 1606 struct net_device *netdev;
1606 struct pci_dev *pdev; 1607 struct pci_dev *pdev;
1607 struct list_head mac_list; 1608 struct list_head mac_list;
1608 struct list_head vlan_ip_list; 1609 struct list_head ip_list;
1609 1610
1610 spinlock_t tx_clean_lock; 1611 spinlock_t tx_clean_lock;
1611 1612
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 4782dcfde736..7692dfd4f262 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -27,6 +27,7 @@
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/if_vlan.h> 29#include <linux/if_vlan.h>
30#include <net/checksum.h>
30#include "netxen_nic.h" 31#include "netxen_nic.h"
31#include "netxen_nic_hw.h" 32#include "netxen_nic_hw.h"
32 33
@@ -1641,9 +1642,8 @@ netxen_process_lro(struct netxen_adapter *adapter,
1641 th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2)); 1642 th = (struct tcphdr *)((skb->data + vhdr_len) + (iph->ihl << 2));
1642 1643
1643 length = (iph->ihl << 2) + (th->doff << 2) + lro_length; 1644 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1645 csum_replace2(&iph->check, iph->tot_len, htons(length));
1644 iph->tot_len = htons(length); 1646 iph->tot_len = htons(length);
1645 iph->check = 0;
1646 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1647 th->psh = push; 1647 th->psh = push;
1648 th->seq = htonl(seq_number); 1648 th->seq = htonl(seq_number);
1649 1649
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 501f49207da5..7867aebc05f2 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -90,7 +90,7 @@ static irqreturn_t netxen_intr(int irq, void *data);
90static irqreturn_t netxen_msi_intr(int irq, void *data); 90static irqreturn_t netxen_msi_intr(int irq, void *data);
91static irqreturn_t netxen_msix_intr(int irq, void *data); 91static irqreturn_t netxen_msix_intr(int irq, void *data);
92 92
93static void netxen_free_vlan_ip_list(struct netxen_adapter *); 93static void netxen_free_ip_list(struct netxen_adapter *, bool);
94static void netxen_restore_indev_addr(struct net_device *dev, unsigned long); 94static void netxen_restore_indev_addr(struct net_device *dev, unsigned long);
95static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev, 95static struct rtnl_link_stats64 *netxen_nic_get_stats(struct net_device *dev,
96 struct rtnl_link_stats64 *stats); 96 struct rtnl_link_stats64 *stats);
@@ -1450,7 +1450,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1450 1450
1451 spin_lock_init(&adapter->tx_clean_lock); 1451 spin_lock_init(&adapter->tx_clean_lock);
1452 INIT_LIST_HEAD(&adapter->mac_list); 1452 INIT_LIST_HEAD(&adapter->mac_list);
1453 INIT_LIST_HEAD(&adapter->vlan_ip_list); 1453 INIT_LIST_HEAD(&adapter->ip_list);
1454 1454
1455 err = netxen_setup_pci_map(adapter); 1455 err = netxen_setup_pci_map(adapter);
1456 if (err) 1456 if (err)
@@ -1585,7 +1585,7 @@ static void netxen_nic_remove(struct pci_dev *pdev)
1585 1585
1586 cancel_work_sync(&adapter->tx_timeout_task); 1586 cancel_work_sync(&adapter->tx_timeout_task);
1587 1587
1588 netxen_free_vlan_ip_list(adapter); 1588 netxen_free_ip_list(adapter, false);
1589 netxen_nic_detach(adapter); 1589 netxen_nic_detach(adapter);
1590 1590
1591 nx_decr_dev_ref_cnt(adapter); 1591 nx_decr_dev_ref_cnt(adapter);
@@ -3137,62 +3137,77 @@ netxen_destip_supported(struct netxen_adapter *adapter)
3137} 3137}
3138 3138
3139static void 3139static void
3140netxen_free_vlan_ip_list(struct netxen_adapter *adapter) 3140netxen_free_ip_list(struct netxen_adapter *adapter, bool master)
3141{ 3141{
3142 struct nx_vlan_ip_list *cur; 3142 struct nx_ip_list *cur, *tmp_cur;
3143 struct list_head *head = &adapter->vlan_ip_list;
3144 3143
3145 while (!list_empty(head)) { 3144 list_for_each_entry_safe(cur, tmp_cur, &adapter->ip_list, list) {
3146 cur = list_entry(head->next, struct nx_vlan_ip_list, list); 3145 if (master) {
3147 netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN); 3146 if (cur->master) {
3148 list_del(&cur->list); 3147 netxen_config_ipaddr(adapter, cur->ip_addr,
3149 kfree(cur); 3148 NX_IP_DOWN);
3149 list_del(&cur->list);
3150 kfree(cur);
3151 }
3152 } else {
3153 netxen_config_ipaddr(adapter, cur->ip_addr, NX_IP_DOWN);
3154 list_del(&cur->list);
3155 kfree(cur);
3156 }
3150 } 3157 }
3151
3152} 3158}
3153static void 3159
3154netxen_list_config_vlan_ip(struct netxen_adapter *adapter, 3160static bool
3161netxen_list_config_ip(struct netxen_adapter *adapter,
3155 struct in_ifaddr *ifa, unsigned long event) 3162 struct in_ifaddr *ifa, unsigned long event)
3156{ 3163{
3157 struct net_device *dev; 3164 struct net_device *dev;
3158 struct nx_vlan_ip_list *cur, *tmp_cur; 3165 struct nx_ip_list *cur, *tmp_cur;
3159 struct list_head *head; 3166 struct list_head *head;
3167 bool ret = false;
3160 3168
3161 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; 3169 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
3162 3170
3163 if (dev == NULL) 3171 if (dev == NULL)
3164 return; 3172 goto out;
3165
3166 if (!is_vlan_dev(dev))
3167 return;
3168 3173
3169 switch (event) { 3174 switch (event) {
3170 case NX_IP_UP: 3175 case NX_IP_UP:
3171 list_for_each(head, &adapter->vlan_ip_list) { 3176 list_for_each(head, &adapter->ip_list) {
3172 cur = list_entry(head, struct nx_vlan_ip_list, list); 3177 cur = list_entry(head, struct nx_ip_list, list);
3173 3178
3174 if (cur->ip_addr == ifa->ifa_address) 3179 if (cur->ip_addr == ifa->ifa_address)
3175 return; 3180 goto out;
3176 } 3181 }
3177 3182
3178 cur = kzalloc(sizeof(struct nx_vlan_ip_list), GFP_ATOMIC); 3183 cur = kzalloc(sizeof(struct nx_ip_list), GFP_ATOMIC);
3179 if (cur == NULL) 3184 if (cur == NULL)
3180 return; 3185 goto out;
3181 3186 if (dev->priv_flags & IFF_802_1Q_VLAN)
3187 dev = vlan_dev_real_dev(dev);
3188 cur->master = !!netif_is_bond_master(dev);
3182 cur->ip_addr = ifa->ifa_address; 3189 cur->ip_addr = ifa->ifa_address;
3183 list_add_tail(&cur->list, &adapter->vlan_ip_list); 3190 list_add_tail(&cur->list, &adapter->ip_list);
3191 netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP);
3192 ret = true;
3184 break; 3193 break;
3185 case NX_IP_DOWN: 3194 case NX_IP_DOWN:
3186 list_for_each_entry_safe(cur, tmp_cur, 3195 list_for_each_entry_safe(cur, tmp_cur,
3187 &adapter->vlan_ip_list, list) { 3196 &adapter->ip_list, list) {
3188 if (cur->ip_addr == ifa->ifa_address) { 3197 if (cur->ip_addr == ifa->ifa_address) {
3189 list_del(&cur->list); 3198 list_del(&cur->list);
3190 kfree(cur); 3199 kfree(cur);
3200 netxen_config_ipaddr(adapter, ifa->ifa_address,
3201 NX_IP_DOWN);
3202 ret = true;
3191 break; 3203 break;
3192 } 3204 }
3193 } 3205 }
3194 } 3206 }
3207out:
3208 return ret;
3195} 3209}
3210
3196static void 3211static void
3197netxen_config_indev_addr(struct netxen_adapter *adapter, 3212netxen_config_indev_addr(struct netxen_adapter *adapter,
3198 struct net_device *dev, unsigned long event) 3213 struct net_device *dev, unsigned long event)
@@ -3209,14 +3224,10 @@ netxen_config_indev_addr(struct netxen_adapter *adapter,
3209 for_ifa(indev) { 3224 for_ifa(indev) {
3210 switch (event) { 3225 switch (event) {
3211 case NETDEV_UP: 3226 case NETDEV_UP:
3212 netxen_config_ipaddr(adapter, 3227 netxen_list_config_ip(adapter, ifa, NX_IP_UP);
3213 ifa->ifa_address, NX_IP_UP);
3214 netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP);
3215 break; 3228 break;
3216 case NETDEV_DOWN: 3229 case NETDEV_DOWN:
3217 netxen_config_ipaddr(adapter, 3230 netxen_list_config_ip(adapter, ifa, NX_IP_DOWN);
3218 ifa->ifa_address, NX_IP_DOWN);
3219 netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN);
3220 break; 3231 break;
3221 default: 3232 default:
3222 break; 3233 break;
@@ -3231,23 +3242,78 @@ netxen_restore_indev_addr(struct net_device *netdev, unsigned long event)
3231 3242
3232{ 3243{
3233 struct netxen_adapter *adapter = netdev_priv(netdev); 3244 struct netxen_adapter *adapter = netdev_priv(netdev);
3234 struct nx_vlan_ip_list *pos, *tmp_pos; 3245 struct nx_ip_list *pos, *tmp_pos;
3235 unsigned long ip_event; 3246 unsigned long ip_event;
3236 3247
3237 ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN; 3248 ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
3238 netxen_config_indev_addr(adapter, netdev, event); 3249 netxen_config_indev_addr(adapter, netdev, event);
3239 3250
3240 list_for_each_entry_safe(pos, tmp_pos, &adapter->vlan_ip_list, list) { 3251 list_for_each_entry_safe(pos, tmp_pos, &adapter->ip_list, list) {
3241 netxen_config_ipaddr(adapter, pos->ip_addr, ip_event); 3252 netxen_config_ipaddr(adapter, pos->ip_addr, ip_event);
3242 } 3253 }
3243} 3254}
3244 3255
3256static inline bool
3257netxen_config_checkdev(struct net_device *dev)
3258{
3259 struct netxen_adapter *adapter;
3260
3261 if (!is_netxen_netdev(dev))
3262 return false;
3263 adapter = netdev_priv(dev);
3264 if (!adapter)
3265 return false;
3266 if (!netxen_destip_supported(adapter))
3267 return false;
3268 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC)
3269 return false;
3270
3271 return true;
3272}
3273
3274/**
3275 * netxen_config_master - configure addresses based on master
3276 * @dev: netxen device
3277 * @event: netdev event
3278 */
3279static void netxen_config_master(struct net_device *dev, unsigned long event)
3280{
3281 struct net_device *master, *slave;
3282 struct netxen_adapter *adapter = netdev_priv(dev);
3283
3284 rcu_read_lock();
3285 master = netdev_master_upper_dev_get_rcu(dev);
3286 /*
3287 * This is the case where the netxen nic is being
3288 * enslaved and is dev_open()ed in bond_enslave()
3289 * Now we should program the bond's (and its vlans')
3290 * addresses in the netxen NIC.
3291 */
3292 if (master && netif_is_bond_master(master) &&
3293 !netif_is_bond_slave(dev)) {
3294 netxen_config_indev_addr(adapter, master, event);
3295 for_each_netdev_rcu(&init_net, slave)
3296 if (slave->priv_flags & IFF_802_1Q_VLAN &&
3297 vlan_dev_real_dev(slave) == master)
3298 netxen_config_indev_addr(adapter, slave, event);
3299 }
3300 rcu_read_unlock();
3301 /*
3302 * This is the case where the netxen nic is being
3303 * released and is dev_close()ed in bond_release()
3304 * just before IFF_BONDING is stripped.
3305 */
3306 if (!master && dev->priv_flags & IFF_BONDING)
3307 netxen_free_ip_list(adapter, true);
3308}
3309
3245static int netxen_netdev_event(struct notifier_block *this, 3310static int netxen_netdev_event(struct notifier_block *this,
3246 unsigned long event, void *ptr) 3311 unsigned long event, void *ptr)
3247{ 3312{
3248 struct netxen_adapter *adapter; 3313 struct netxen_adapter *adapter;
3249 struct net_device *dev = (struct net_device *)ptr; 3314 struct net_device *dev = (struct net_device *)ptr;
3250 struct net_device *orig_dev = dev; 3315 struct net_device *orig_dev = dev;
3316 struct net_device *slave;
3251 3317
3252recheck: 3318recheck:
3253 if (dev == NULL) 3319 if (dev == NULL)
@@ -3257,19 +3323,28 @@ recheck:
3257 dev = vlan_dev_real_dev(dev); 3323 dev = vlan_dev_real_dev(dev);
3258 goto recheck; 3324 goto recheck;
3259 } 3325 }
3260 3326 if (event == NETDEV_UP || event == NETDEV_DOWN) {
3261 if (!is_netxen_netdev(dev)) 3327 /* If this is a bonding device, look for netxen-based slaves*/
3262 goto done; 3328 if (netif_is_bond_master(dev)) {
3263 3329 rcu_read_lock();
3264 adapter = netdev_priv(dev); 3330 for_each_netdev_in_bond_rcu(dev, slave) {
3265 3331 if (!netxen_config_checkdev(slave))
3266 if (!adapter) 3332 continue;
3267 goto done; 3333 adapter = netdev_priv(slave);
3268 3334 netxen_config_indev_addr(adapter,
3269 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) 3335 orig_dev, event);
3270 goto done; 3336 }
3271 3337 rcu_read_unlock();
3272 netxen_config_indev_addr(adapter, orig_dev, event); 3338 } else {
3339 if (!netxen_config_checkdev(dev))
3340 goto done;
3341 adapter = netdev_priv(dev);
3342 /* Act only if the actual netxen is the target */
3343 if (orig_dev == dev)
3344 netxen_config_master(dev, event);
3345 netxen_config_indev_addr(adapter, orig_dev, event);
3346 }
3347 }
3273done: 3348done:
3274 return NOTIFY_DONE; 3349 return NOTIFY_DONE;
3275} 3350}
@@ -3279,12 +3354,12 @@ netxen_inetaddr_event(struct notifier_block *this,
3279 unsigned long event, void *ptr) 3354 unsigned long event, void *ptr)
3280{ 3355{
3281 struct netxen_adapter *adapter; 3356 struct netxen_adapter *adapter;
3282 struct net_device *dev; 3357 struct net_device *dev, *slave;
3283
3284 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; 3358 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
3359 unsigned long ip_event;
3285 3360
3286 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL; 3361 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
3287 3362 ip_event = (event == NETDEV_UP) ? NX_IP_UP : NX_IP_DOWN;
3288recheck: 3363recheck:
3289 if (dev == NULL) 3364 if (dev == NULL)
3290 goto done; 3365 goto done;
@@ -3293,31 +3368,24 @@ recheck:
3293 dev = vlan_dev_real_dev(dev); 3368 dev = vlan_dev_real_dev(dev);
3294 goto recheck; 3369 goto recheck;
3295 } 3370 }
3296 3371 if (event == NETDEV_UP || event == NETDEV_DOWN) {
3297 if (!is_netxen_netdev(dev)) 3372 /* If this is a bonding device, look for netxen-based slaves*/
3298 goto done; 3373 if (netif_is_bond_master(dev)) {
3299 3374 rcu_read_lock();
3300 adapter = netdev_priv(dev); 3375 for_each_netdev_in_bond_rcu(dev, slave) {
3301 3376 if (!netxen_config_checkdev(slave))
3302 if (!adapter || !netxen_destip_supported(adapter)) 3377 continue;
3303 goto done; 3378 adapter = netdev_priv(slave);
3304 3379 netxen_list_config_ip(adapter, ifa, ip_event);
3305 if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) 3380 }
3306 goto done; 3381 rcu_read_unlock();
3307 3382 } else {
3308 switch (event) { 3383 if (!netxen_config_checkdev(dev))
3309 case NETDEV_UP: 3384 goto done;
3310 netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_UP); 3385 adapter = netdev_priv(dev);
3311 netxen_list_config_vlan_ip(adapter, ifa, NX_IP_UP); 3386 netxen_list_config_ip(adapter, ifa, ip_event);
3312 break; 3387 }
3313 case NETDEV_DOWN:
3314 netxen_config_ipaddr(adapter, ifa->ifa_address, NX_IP_DOWN);
3315 netxen_list_config_vlan_ip(adapter, ifa, NX_IP_DOWN);
3316 break;
3317 default:
3318 break;
3319 } 3388 }
3320
3321done: 3389done:
3322 return NOTIFY_DONE; 3390 return NOTIFY_DONE;
3323} 3391}
@@ -3334,7 +3402,7 @@ static void
3334netxen_restore_indev_addr(struct net_device *dev, unsigned long event) 3402netxen_restore_indev_addr(struct net_device *dev, unsigned long event)
3335{ } 3403{ }
3336static void 3404static void
3337netxen_free_vlan_ip_list(struct netxen_adapter *adapter) 3405netxen_free_ip_list(struct netxen_adapter *adapter, bool master)
3338{ } 3406{ }
3339#endif 3407#endif
3340 3408
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 8fd38cb6d26a..91a8fcd6c246 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -312,7 +312,6 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
312 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev, 312 lrg_buf_cb->skb = netdev_alloc_skb(qdev->ndev,
313 qdev->lrg_buffer_len); 313 qdev->lrg_buffer_len);
314 if (unlikely(!lrg_buf_cb->skb)) { 314 if (unlikely(!lrg_buf_cb->skb)) {
315 netdev_err(qdev->ndev, "failed netdev_alloc_skb()\n");
316 qdev->lrg_buf_skb_check++; 315 qdev->lrg_buf_skb_check++;
317 } else { 316 } else {
318 /* 317 /*
diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile
index 7722a203e388..4b1fb3faa3b7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/Makefile
+++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile
@@ -8,4 +8,6 @@ qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
8 qlcnic_ethtool.o qlcnic_ctx.o qlcnic_io.o \ 8 qlcnic_ethtool.o qlcnic_ctx.o qlcnic_io.o \
9 qlcnic_sysfs.o qlcnic_minidump.o qlcnic_83xx_hw.o \ 9 qlcnic_sysfs.o qlcnic_minidump.o qlcnic_83xx_hw.o \
10 qlcnic_83xx_init.o qlcnic_83xx_vnic.o \ 10 qlcnic_83xx_init.o qlcnic_83xx_vnic.o \
11 qlcnic_minidump.o 11 qlcnic_minidump.o qlcnic_sriov_common.o
12
13qlcnic-$(CONFIG_QLCNIC_SRIOV) += qlcnic_sriov_pf.o
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index ba3c72fce1f2..ef55718d06ba 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -37,9 +37,9 @@
37#include "qlcnic_83xx_hw.h" 37#include "qlcnic_83xx_hw.h"
38 38
39#define _QLCNIC_LINUX_MAJOR 5 39#define _QLCNIC_LINUX_MAJOR 5
40#define _QLCNIC_LINUX_MINOR 1 40#define _QLCNIC_LINUX_MINOR 2
41#define _QLCNIC_LINUX_SUBVERSION 35 41#define _QLCNIC_LINUX_SUBVERSION 40
42#define QLCNIC_LINUX_VERSIONID "5.1.35" 42#define QLCNIC_LINUX_VERSIONID "5.2.40"
43#define QLCNIC_DRV_IDC_VER 0x01 43#define QLCNIC_DRV_IDC_VER 0x01
44#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 44#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
45 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 45 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -449,6 +449,7 @@ struct qlcnic_hardware_context {
449 struct qlc_83xx_idc idc; 449 struct qlc_83xx_idc idc;
450 struct qlc_83xx_fw_info fw_info; 450 struct qlc_83xx_fw_info fw_info;
451 struct qlcnic_intrpt_config *intr_tbl; 451 struct qlcnic_intrpt_config *intr_tbl;
452 struct qlcnic_sriov *sriov;
452 u32 *reg_tbl; 453 u32 *reg_tbl;
453 u32 *ext_reg_tbl; 454 u32 *ext_reg_tbl;
454 u32 mbox_aen[QLC_83XX_MBX_AEN_CNT]; 455 u32 mbox_aen[QLC_83XX_MBX_AEN_CNT];
@@ -896,6 +897,7 @@ struct qlcnic_ipaddr {
896#define QLCNIC_FW_RESET_OWNER 0x2000 897#define QLCNIC_FW_RESET_OWNER 0x2000
897#define QLCNIC_FW_HANG 0x4000 898#define QLCNIC_FW_HANG 0x4000
898#define QLCNIC_FW_LRO_MSS_CAP 0x8000 899#define QLCNIC_FW_LRO_MSS_CAP 0x8000
900#define QLCNIC_TX_INTR_SHARED 0x10000
899#define QLCNIC_IS_MSI_FAMILY(adapter) \ 901#define QLCNIC_IS_MSI_FAMILY(adapter) \
900 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) 902 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
901 903
@@ -914,7 +916,9 @@ struct qlcnic_ipaddr {
914#define __QLCNIC_AER 5 916#define __QLCNIC_AER 5
915#define __QLCNIC_DIAG_RES_ALLOC 6 917#define __QLCNIC_DIAG_RES_ALLOC 6
916#define __QLCNIC_LED_ENABLE 7 918#define __QLCNIC_LED_ENABLE 7
917#define __QLCNIC_ELB_INPROGRESS 8 919#define __QLCNIC_ELB_INPROGRESS 8
920#define __QLCNIC_SRIOV_ENABLE 10
921#define __QLCNIC_SRIOV_CAPABLE 11
918 922
919#define QLCNIC_INTERRUPT_TEST 1 923#define QLCNIC_INTERRUPT_TEST 1
920#define QLCNIC_LOOPBACK_TEST 2 924#define QLCNIC_LOOPBACK_TEST 2
@@ -1009,6 +1013,7 @@ struct qlcnic_adapter {
1009 1013
1010 struct qlcnic_filter_hash fhash; 1014 struct qlcnic_filter_hash fhash;
1011 struct qlcnic_filter_hash rx_fhash; 1015 struct qlcnic_filter_hash rx_fhash;
1016 struct list_head vf_mc_list;
1012 1017
1013 spinlock_t tx_clean_lock; 1018 spinlock_t tx_clean_lock;
1014 spinlock_t mac_learn_lock; 1019 spinlock_t mac_learn_lock;
@@ -1051,7 +1056,11 @@ struct qlcnic_info_le {
1051 u8 total_pf; 1056 u8 total_pf;
1052 u8 total_rss_engines; 1057 u8 total_rss_engines;
1053 __le16 max_vports; 1058 __le16 max_vports;
1054 u8 reserved2[64]; 1059 __le16 linkstate_reg_offset;
1060 __le16 bit_offsets;
1061 __le16 max_local_ipv6_addrs;
1062 __le16 max_remote_ipv6_addrs;
1063 u8 reserved2[56];
1055} __packed; 1064} __packed;
1056 1065
1057struct qlcnic_info { 1066struct qlcnic_info {
@@ -1083,6 +1092,10 @@ struct qlcnic_info {
1083 u8 total_pf; 1092 u8 total_pf;
1084 u8 total_rss_engines; 1093 u8 total_rss_engines;
1085 u16 max_vports; 1094 u16 max_vports;
1095 u16 linkstate_reg_offset;
1096 u16 bit_offsets;
1097 u16 max_local_ipv6_addrs;
1098 u16 max_remote_ipv6_addrs;
1086}; 1099};
1087 1100
1088struct qlcnic_pci_info_le { 1101struct qlcnic_pci_info_le {
@@ -1348,6 +1361,7 @@ struct _cdrp_cmd {
1348struct qlcnic_cmd_args { 1361struct qlcnic_cmd_args {
1349 struct _cdrp_cmd req; 1362 struct _cdrp_cmd req;
1350 struct _cdrp_cmd rsp; 1363 struct _cdrp_cmd rsp;
1364 int op_type;
1351}; 1365};
1352 1366
1353int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter); 1367int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
@@ -1430,6 +1444,7 @@ void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter,
1430 struct qlcnic_host_rds_ring *rds_ring, u8 ring_id); 1444 struct qlcnic_host_rds_ring *rds_ring, u8 ring_id);
1431int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max); 1445int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
1432void qlcnic_set_multi(struct net_device *netdev); 1446void qlcnic_set_multi(struct net_device *netdev);
1447void __qlcnic_set_multi(struct net_device *netdev);
1433int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *); 1448int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *);
1434int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *); 1449int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *);
1435void qlcnic_free_mac_list(struct qlcnic_adapter *adapter); 1450void qlcnic_free_mac_list(struct qlcnic_adapter *adapter);
@@ -1511,6 +1526,12 @@ int qlcnic_reset_npar_config(struct qlcnic_adapter *);
1511int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *); 1526int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *);
1512void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int, 1527void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int,
1513 __le16); 1528 __le16);
1529int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
1530int qlcnic_read_mac_addr(struct qlcnic_adapter *);
1531int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
1532void qlcnic_sriov_vf_schedule_multi(struct net_device *);
1533void qlcnic_vf_add_mc_list(struct net_device *);
1534
1514/* 1535/*
1515 * QLOGIC Board information 1536 * QLOGIC Board information
1516 */ 1537 */
@@ -1567,6 +1588,9 @@ struct qlcnic_hardware_ops {
1567 int (*create_rx_ctx) (struct qlcnic_adapter *); 1588 int (*create_rx_ctx) (struct qlcnic_adapter *);
1568 int (*create_tx_ctx) (struct qlcnic_adapter *, 1589 int (*create_tx_ctx) (struct qlcnic_adapter *,
1569 struct qlcnic_host_tx_ring *, int); 1590 struct qlcnic_host_tx_ring *, int);
1591 void (*del_rx_ctx) (struct qlcnic_adapter *);
1592 void (*del_tx_ctx) (struct qlcnic_adapter *,
1593 struct qlcnic_host_tx_ring *);
1570 int (*setup_link_event) (struct qlcnic_adapter *, int); 1594 int (*setup_link_event) (struct qlcnic_adapter *, int);
1571 int (*get_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *, u8); 1595 int (*get_nic_info) (struct qlcnic_adapter *, struct qlcnic_info *, u8);
1572 int (*get_pci_info) (struct qlcnic_adapter *, struct qlcnic_pci_info *); 1596 int (*get_pci_info) (struct qlcnic_adapter *, struct qlcnic_pci_info *);
@@ -1635,7 +1659,10 @@ static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
1635static inline int qlcnic_issue_cmd(struct qlcnic_adapter *adapter, 1659static inline int qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
1636 struct qlcnic_cmd_args *cmd) 1660 struct qlcnic_cmd_args *cmd)
1637{ 1661{
1638 return adapter->ahw->hw_ops->mbx_cmd(adapter, cmd); 1662 if (adapter->ahw->hw_ops->mbx_cmd)
1663 return adapter->ahw->hw_ops->mbx_cmd(adapter, cmd);
1664
1665 return -EIO;
1639} 1666}
1640 1667
1641static inline void qlcnic_get_func_no(struct qlcnic_adapter *adapter) 1668static inline void qlcnic_get_func_no(struct qlcnic_adapter *adapter)
@@ -1655,12 +1682,14 @@ static inline void qlcnic_api_unlock(struct qlcnic_adapter *adapter)
1655 1682
1656static inline void qlcnic_add_sysfs(struct qlcnic_adapter *adapter) 1683static inline void qlcnic_add_sysfs(struct qlcnic_adapter *adapter)
1657{ 1684{
1658 adapter->ahw->hw_ops->add_sysfs(adapter); 1685 if (adapter->ahw->hw_ops->add_sysfs)
1686 adapter->ahw->hw_ops->add_sysfs(adapter);
1659} 1687}
1660 1688
1661static inline void qlcnic_remove_sysfs(struct qlcnic_adapter *adapter) 1689static inline void qlcnic_remove_sysfs(struct qlcnic_adapter *adapter)
1662{ 1690{
1663 adapter->ahw->hw_ops->remove_sysfs(adapter); 1691 if (adapter->ahw->hw_ops->remove_sysfs)
1692 adapter->ahw->hw_ops->remove_sysfs(adapter);
1664} 1693}
1665 1694
1666static inline void 1695static inline void
@@ -1681,6 +1710,17 @@ static inline int qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
1681 return adapter->ahw->hw_ops->create_tx_ctx(adapter, ptr, ring); 1710 return adapter->ahw->hw_ops->create_tx_ctx(adapter, ptr, ring);
1682} 1711}
1683 1712
1713static inline void qlcnic_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter)
1714{
1715 return adapter->ahw->hw_ops->del_rx_ctx(adapter);
1716}
1717
1718static inline void qlcnic_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter,
1719 struct qlcnic_host_tx_ring *ptr)
1720{
1721 return adapter->ahw->hw_ops->del_tx_ctx(adapter, ptr);
1722}
1723
1684static inline int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, 1724static inline int qlcnic_linkevent_request(struct qlcnic_adapter *adapter,
1685 int enable) 1725 int enable)
1686{ 1726{
@@ -1778,12 +1818,14 @@ static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
1778static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter, 1818static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
1779 u32 key) 1819 u32 key)
1780{ 1820{
1781 adapter->nic_ops->request_reset(adapter, key); 1821 if (adapter->nic_ops->request_reset)
1822 adapter->nic_ops->request_reset(adapter, key);
1782} 1823}
1783 1824
1784static inline void qlcnic_cancel_idc_work(struct qlcnic_adapter *adapter) 1825static inline void qlcnic_cancel_idc_work(struct qlcnic_adapter *adapter)
1785{ 1826{
1786 adapter->nic_ops->cancel_idc_work(adapter); 1827 if (adapter->nic_ops->cancel_idc_work)
1828 adapter->nic_ops->cancel_idc_work(adapter);
1787} 1829}
1788 1830
1789static inline irqreturn_t 1831static inline irqreturn_t
@@ -1830,7 +1872,9 @@ extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
1830 } while (0) 1872 } while (0)
1831 1873
1832#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030 1874#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
1875#define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430
1833#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020 1876#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
1877
1834static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter) 1878static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
1835{ 1879{
1836 unsigned short device = adapter->pdev->device; 1880 unsigned short device = adapter->pdev->device;
@@ -1840,8 +1884,23 @@ static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
1840static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter) 1884static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter)
1841{ 1885{
1842 unsigned short device = adapter->pdev->device; 1886 unsigned short device = adapter->pdev->device;
1843 return (device == PCI_DEVICE_ID_QLOGIC_QLE834X) ? true : false; 1887 bool status;
1888
1889 status = ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
1890 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false;
1891
1892 return status;
1893}
1894
1895static inline bool qlcnic_sriov_pf_check(struct qlcnic_adapter *adapter)
1896{
1897 return (adapter->ahw->op_mode == QLCNIC_SRIOV_PF_FUNC) ? true : false;
1844} 1898}
1845 1899
1900static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
1901{
1902 unsigned short device = adapter->pdev->device;
1846 1903
1904 return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
1905}
1847#endif /* __QLCNIC_H_ */ 1906#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index cd5ae8813cb3..374fa8a3791b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include "qlcnic.h" 8#include "qlcnic.h"
9#include "qlcnic_sriov.h"
9#include <linux/if_vlan.h> 10#include <linux/if_vlan.h>
10#include <linux/ipv6.h> 11#include <linux/ipv6.h>
11#include <linux/ethtool.h> 12#include <linux/ethtool.h>
@@ -13,100 +14,7 @@
13 14
14#define QLCNIC_MAX_TX_QUEUES 1 15#define QLCNIC_MAX_TX_QUEUES 1
15#define RSS_HASHTYPE_IP_TCP 0x3 16#define RSS_HASHTYPE_IP_TCP 0x3
16 17#define QLC_83XX_FW_MBX_CMD 0
17/* status descriptor mailbox data
18 * @phy_addr: physical address of buffer
19 * @sds_ring_size: buffer size
20 * @intrpt_id: interrupt id
21 * @intrpt_val: source of interrupt
22 */
23struct qlcnic_sds_mbx {
24 u64 phy_addr;
25 u8 rsvd1[16];
26 u16 sds_ring_size;
27 u16 rsvd2[3];
28 u16 intrpt_id;
29 u8 intrpt_val;
30 u8 rsvd3[5];
31} __packed;
32
33/* receive descriptor buffer data
34 * phy_addr_reg: physical address of regular buffer
35 * phy_addr_jmb: physical address of jumbo buffer
36 * reg_ring_sz: size of regular buffer
37 * reg_ring_len: no. of entries in regular buffer
38 * jmb_ring_len: no. of entries in jumbo buffer
39 * jmb_ring_sz: size of jumbo buffer
40 */
41struct qlcnic_rds_mbx {
42 u64 phy_addr_reg;
43 u64 phy_addr_jmb;
44 u16 reg_ring_sz;
45 u16 reg_ring_len;
46 u16 jmb_ring_sz;
47 u16 jmb_ring_len;
48} __packed;
49
50/* host producers for regular and jumbo rings */
51struct __host_producer_mbx {
52 u32 reg_buf;
53 u32 jmb_buf;
54} __packed;
55
56/* Receive context mailbox data outbox registers
57 * @state: state of the context
58 * @vport_id: virtual port id
59 * @context_id: receive context id
60 * @num_pci_func: number of pci functions of the port
61 * @phy_port: physical port id
62 */
63struct qlcnic_rcv_mbx_out {
64 u8 rcv_num;
65 u8 sts_num;
66 u16 ctx_id;
67 u8 state;
68 u8 num_pci_func;
69 u8 phy_port;
70 u8 vport_id;
71 u32 host_csmr[QLCNIC_MAX_RING_SETS];
72 struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
73} __packed;
74
75struct qlcnic_add_rings_mbx_out {
76 u8 rcv_num;
77 u8 sts_num;
78 u16 ctx_id;
79 u32 host_csmr[QLCNIC_MAX_RING_SETS];
80 struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
81} __packed;
82
83/* Transmit context mailbox inbox registers
84 * @phys_addr: DMA address of the transmit buffer
85 * @cnsmr_index: host consumer index
86 * @size: legth of transmit buffer ring
87 * @intr_id: interrput id
88 * @src: src of interrupt
89 */
90struct qlcnic_tx_mbx {
91 u64 phys_addr;
92 u64 cnsmr_index;
93 u16 size;
94 u16 intr_id;
95 u8 src;
96 u8 rsvd[3];
97} __packed;
98
99/* Transmit context mailbox outbox registers
100 * @host_prod: host producer index
101 * @ctx_id: transmit context id
102 * @state: state of the transmit context
103 */
104struct qlcnic_tx_mbx_out {
105 u32 host_prod;
106 u16 ctx_id;
107 u8 state;
108 u8 rsvd;
109} __packed;
110 18
111static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = { 19static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
112 {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1}, 20 {QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
@@ -156,9 +64,11 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
156 {QLCNIC_CMD_SET_LED_CONFIG, 5, 1}, 64 {QLCNIC_CMD_SET_LED_CONFIG, 5, 1},
157 {QLCNIC_CMD_GET_LED_CONFIG, 1, 5}, 65 {QLCNIC_CMD_GET_LED_CONFIG, 1, 5},
158 {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26}, 66 {QLCNIC_CMD_ADD_RCV_RINGS, 130, 26},
67 {QLCNIC_CMD_CONFIG_VPORT, 4, 4},
68 {QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
159}; 69};
160 70
161static const u32 qlcnic_83xx_ext_reg_tbl[] = { 71const u32 qlcnic_83xx_ext_reg_tbl[] = {
162 0x38CC, /* Global Reset */ 72 0x38CC, /* Global Reset */
163 0x38F0, /* Wildcard */ 73 0x38F0, /* Wildcard */
164 0x38FC, /* Informant */ 74 0x38FC, /* Informant */
@@ -204,7 +114,7 @@ static const u32 qlcnic_83xx_ext_reg_tbl[] = {
204 0x34A4, /* QLC_83XX_ASIC_TEMP */ 114 0x34A4, /* QLC_83XX_ASIC_TEMP */
205}; 115};
206 116
207static const u32 qlcnic_83xx_reg_tbl[] = { 117const u32 qlcnic_83xx_reg_tbl[] = {
208 0x34A8, /* PEG_HALT_STAT1 */ 118 0x34A8, /* PEG_HALT_STAT1 */
209 0x34AC, /* PEG_HALT_STAT2 */ 119 0x34AC, /* PEG_HALT_STAT2 */
210 0x34B0, /* FW_HEARTBEAT */ 120 0x34B0, /* FW_HEARTBEAT */
@@ -247,6 +157,8 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
247 .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag, 157 .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag,
248 .create_rx_ctx = qlcnic_83xx_create_rx_ctx, 158 .create_rx_ctx = qlcnic_83xx_create_rx_ctx,
249 .create_tx_ctx = qlcnic_83xx_create_tx_ctx, 159 .create_tx_ctx = qlcnic_83xx_create_tx_ctx,
160 .del_rx_ctx = qlcnic_83xx_del_rx_ctx,
161 .del_tx_ctx = qlcnic_83xx_del_tx_ctx,
250 .setup_link_event = qlcnic_83xx_setup_link_event, 162 .setup_link_event = qlcnic_83xx_setup_link_event,
251 .get_nic_info = qlcnic_83xx_get_nic_info, 163 .get_nic_info = qlcnic_83xx_get_nic_info,
252 .get_pci_info = qlcnic_83xx_get_pci_info, 164 .get_pci_info = qlcnic_83xx_get_pci_info,
@@ -355,14 +267,20 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
355 num_intr)); 267 num_intr));
356 /* account for AEN interrupt MSI-X based interrupts */ 268 /* account for AEN interrupt MSI-X based interrupts */
357 num_msix += 1; 269 num_msix += 1;
358 num_msix += adapter->max_drv_tx_rings; 270
271 if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
272 num_msix += adapter->max_drv_tx_rings;
273
359 err = qlcnic_enable_msix(adapter, num_msix); 274 err = qlcnic_enable_msix(adapter, num_msix);
360 if (err == -ENOMEM) 275 if (err == -ENOMEM)
361 return err; 276 return err;
362 if (adapter->flags & QLCNIC_MSIX_ENABLED) 277 if (adapter->flags & QLCNIC_MSIX_ENABLED)
363 num_msix = adapter->ahw->num_msix; 278 num_msix = adapter->ahw->num_msix;
364 else 279 else {
280 if (qlcnic_sriov_vf_check(adapter))
281 return -EINVAL;
365 num_msix = 1; 282 num_msix = 1;
283 }
366 /* setup interrupt mapping table for fw */ 284 /* setup interrupt mapping table for fw */
367 ahw->intr_tbl = vzalloc(num_msix * 285 ahw->intr_tbl = vzalloc(num_msix *
368 sizeof(struct qlcnic_intrpt_config)); 286 sizeof(struct qlcnic_intrpt_config));
@@ -595,7 +513,7 @@ int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter)
595void qlcnic_83xx_get_func_no(struct qlcnic_adapter *adapter) 513void qlcnic_83xx_get_func_no(struct qlcnic_adapter *adapter)
596{ 514{
597 u32 val = QLCRDX(adapter->ahw, QLCNIC_INFORMANT); 515 u32 val = QLCRDX(adapter->ahw, QLCNIC_INFORMANT);
598 adapter->ahw->pci_func = val & 0xf; 516 adapter->ahw->pci_func = (val >> 24) & 0xff;
599} 517}
600 518
601int qlcnic_83xx_cam_lock(struct qlcnic_adapter *adapter) 519int qlcnic_83xx_cam_lock(struct qlcnic_adapter *adapter)
@@ -707,6 +625,11 @@ void qlcnic_83xx_check_vf(struct qlcnic_adapter *adapter,
707 ahw->fw_hal_version = 2; 625 ahw->fw_hal_version = 2;
708 qlcnic_get_func_no(adapter); 626 qlcnic_get_func_no(adapter);
709 627
628 if (qlcnic_sriov_vf_check(adapter)) {
629 qlcnic_sriov_vf_set_ops(adapter);
630 return;
631 }
632
710 /* Determine function privilege level */ 633 /* Determine function privilege level */
711 op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE); 634 op_mode = QLCRDX(adapter->ahw, QLC_83XX_DRV_OP_MODE);
712 if (op_mode == QLC_83XX_DEFAULT_OPMODE) 635 if (op_mode == QLC_83XX_DEFAULT_OPMODE)
@@ -722,6 +645,9 @@ void qlcnic_83xx_check_vf(struct qlcnic_adapter *adapter,
722 ahw->fw_hal_version); 645 ahw->fw_hal_version);
723 adapter->nic_ops = &qlcnic_vf_ops; 646 adapter->nic_ops = &qlcnic_vf_ops;
724 } else { 647 } else {
648 if (pci_find_ext_capability(adapter->pdev,
649 PCI_EXT_CAP_ID_SRIOV))
650 set_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state);
725 adapter->nic_ops = &qlcnic_83xx_ops; 651 adapter->nic_ops = &qlcnic_83xx_ops;
726 } 652 }
727} 653}
@@ -755,7 +681,7 @@ static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
755} 681}
756 682
757/* Mailbox response for mac rcode */ 683/* Mailbox response for mac rcode */
758static u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter) 684u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
759{ 685{
760 u32 fw_data; 686 u32 fw_data;
761 u8 mac_cmd_rcode; 687 u8 mac_cmd_rcode;
@@ -769,7 +695,7 @@ static u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
769 return 1; 695 return 1;
770} 696}
771 697
772static u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter) 698u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter)
773{ 699{
774 u32 data; 700 u32 data;
775 unsigned long wait_time = 0; 701 unsigned long wait_time = 0;
@@ -884,6 +810,7 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
884 size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl); 810 size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl);
885 for (i = 0; i < size; i++) { 811 for (i = 0; i < size; i++) {
886 if (type == mbx_tbl[i].cmd) { 812 if (type == mbx_tbl[i].cmd) {
813 mbx->op_type = QLC_83XX_FW_MBX_CMD;
887 mbx->req.num = mbx_tbl[i].in_args; 814 mbx->req.num = mbx_tbl[i].in_args;
888 mbx->rsp.num = mbx_tbl[i].out_args; 815 mbx->rsp.num = mbx_tbl[i].out_args;
889 mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32), 816 mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
@@ -901,10 +828,10 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
901 memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num); 828 memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
902 temp = adapter->ahw->fw_hal_version << 29; 829 temp = adapter->ahw->fw_hal_version << 29;
903 mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp); 830 mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
904 break; 831 return 0;
905 } 832 }
906 } 833 }
907 return 0; 834 return -EINVAL;
908} 835}
909 836
910void qlcnic_83xx_idc_aen_work(struct work_struct *work) 837void qlcnic_83xx_idc_aen_work(struct work_struct *work)
@@ -960,6 +887,9 @@ void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
960 break; 887 break;
961 case QLCNIC_MBX_TIME_EXTEND_EVENT: 888 case QLCNIC_MBX_TIME_EXTEND_EVENT:
962 break; 889 break;
890 case QLCNIC_MBX_BC_EVENT:
891 qlcnic_sriov_handle_bc_event(adapter, event[1]);
892 break;
963 case QLCNIC_MBX_SFP_INSERT_EVENT: 893 case QLCNIC_MBX_SFP_INSERT_EVENT:
964 dev_info(&adapter->pdev->dev, "SFP+ Insert AEN:0x%x.\n", 894 dev_info(&adapter->pdev->dev, "SFP+ Insert AEN:0x%x.\n",
965 QLCNIC_MBX_RSP(event[0])); 895 QLCNIC_MBX_RSP(event[0]));
@@ -1004,7 +934,8 @@ static int qlcnic_83xx_add_rings(struct qlcnic_adapter *adapter)
1004 sds = &recv_ctx->sds_rings[i]; 934 sds = &recv_ctx->sds_rings[i];
1005 sds->consumer = 0; 935 sds->consumer = 0;
1006 memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds)); 936 memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
1007 sds_mbx.phy_addr = sds->phys_addr; 937 sds_mbx.phy_addr_low = LSD(sds->phys_addr);
938 sds_mbx.phy_addr_high = MSD(sds->phys_addr);
1008 sds_mbx.sds_ring_size = sds->num_desc; 939 sds_mbx.sds_ring_size = sds->num_desc;
1009 940
1010 if (adapter->flags & QLCNIC_MSIX_ENABLED) 941 if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -1050,6 +981,32 @@ out:
1050 return err; 981 return err;
1051} 982}
1052 983
984void qlcnic_83xx_del_rx_ctx(struct qlcnic_adapter *adapter)
985{
986 int err;
987 u32 temp = 0;
988 struct qlcnic_cmd_args cmd;
989 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
990
991 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_RX_CTX))
992 return;
993
994 if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
995 cmd.req.arg[0] |= (0x3 << 29);
996
997 if (qlcnic_sriov_pf_check(adapter))
998 qlcnic_pf_set_interface_id_del_rx_ctx(adapter, &temp);
999
1000 cmd.req.arg[1] = recv_ctx->context_id | temp;
1001 err = qlcnic_issue_cmd(adapter, &cmd);
1002 if (err)
1003 dev_err(&adapter->pdev->dev,
1004 "Failed to destroy rx ctx in firmware\n");
1005
1006 recv_ctx->state = QLCNIC_HOST_CTX_STATE_FREED;
1007 qlcnic_free_mbx_args(&cmd);
1008}
1009
1053int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter) 1010int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
1054{ 1011{
1055 int i, err, index, sds_mbx_size, rds_mbx_size; 1012 int i, err, index, sds_mbx_size, rds_mbx_size;
@@ -1080,9 +1037,17 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
1080 /* set mailbox hdr and capabilities */ 1037 /* set mailbox hdr and capabilities */
1081 qlcnic_alloc_mbx_args(&cmd, adapter, 1038 qlcnic_alloc_mbx_args(&cmd, adapter,
1082 QLCNIC_CMD_CREATE_RX_CTX); 1039 QLCNIC_CMD_CREATE_RX_CTX);
1040
1041 if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
1042 cmd.req.arg[0] |= (0x3 << 29);
1043
1083 cmd.req.arg[1] = cap; 1044 cmd.req.arg[1] = cap;
1084 cmd.req.arg[5] = 1 | (num_rds << 5) | (num_sds << 8) | 1045 cmd.req.arg[5] = 1 | (num_rds << 5) | (num_sds << 8) |
1085 (QLC_83XX_HOST_RDS_MODE_UNIQUE << 16); 1046 (QLC_83XX_HOST_RDS_MODE_UNIQUE << 16);
1047
1048 if (qlcnic_sriov_pf_check(adapter))
1049 qlcnic_pf_set_interface_id_create_rx_ctx(adapter,
1050 &cmd.req.arg[6]);
1086 /* set up status rings, mbx 8-57/87 */ 1051 /* set up status rings, mbx 8-57/87 */
1087 index = QLC_83XX_HOST_SDS_MBX_IDX; 1052 index = QLC_83XX_HOST_SDS_MBX_IDX;
1088 for (i = 0; i < num_sds; i++) { 1053 for (i = 0; i < num_sds; i++) {
@@ -1090,7 +1055,8 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
1090 sds = &recv_ctx->sds_rings[i]; 1055 sds = &recv_ctx->sds_rings[i];
1091 sds->consumer = 0; 1056 sds->consumer = 0;
1092 memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds)); 1057 memset(sds->desc_head, 0, STATUS_DESC_RINGSIZE(sds));
1093 sds_mbx.phy_addr = sds->phys_addr; 1058 sds_mbx.phy_addr_low = LSD(sds->phys_addr);
1059 sds_mbx.phy_addr_high = MSD(sds->phys_addr);
1094 sds_mbx.sds_ring_size = sds->num_desc; 1060 sds_mbx.sds_ring_size = sds->num_desc;
1095 if (adapter->flags & QLCNIC_MSIX_ENABLED) 1061 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1096 intrpt_id = ahw->intr_tbl[i].id; 1062 intrpt_id = ahw->intr_tbl[i].id;
@@ -1110,13 +1076,15 @@ int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *adapter)
1110 rds = &recv_ctx->rds_rings[0]; 1076 rds = &recv_ctx->rds_rings[0];
1111 rds->producer = 0; 1077 rds->producer = 0;
1112 memset(&rds_mbx, 0, rds_mbx_size); 1078 memset(&rds_mbx, 0, rds_mbx_size);
1113 rds_mbx.phy_addr_reg = rds->phys_addr; 1079 rds_mbx.phy_addr_reg_low = LSD(rds->phys_addr);
1080 rds_mbx.phy_addr_reg_high = MSD(rds->phys_addr);
1114 rds_mbx.reg_ring_sz = rds->dma_size; 1081 rds_mbx.reg_ring_sz = rds->dma_size;
1115 rds_mbx.reg_ring_len = rds->num_desc; 1082 rds_mbx.reg_ring_len = rds->num_desc;
1116 /* Jumbo ring */ 1083 /* Jumbo ring */
1117 rds = &recv_ctx->rds_rings[1]; 1084 rds = &recv_ctx->rds_rings[1];
1118 rds->producer = 0; 1085 rds->producer = 0;
1119 rds_mbx.phy_addr_jmb = rds->phys_addr; 1086 rds_mbx.phy_addr_jmb_low = LSD(rds->phys_addr);
1087 rds_mbx.phy_addr_jmb_high = MSD(rds->phys_addr);
1120 rds_mbx.jmb_ring_sz = rds->dma_size; 1088 rds_mbx.jmb_ring_sz = rds->dma_size;
1121 rds_mbx.jmb_ring_len = rds->num_desc; 1089 rds_mbx.jmb_ring_len = rds->num_desc;
1122 buf = &cmd.req.arg[index]; 1090 buf = &cmd.req.arg[index];
@@ -1163,16 +1131,39 @@ out:
1163 return err; 1131 return err;
1164} 1132}
1165 1133
1134void qlcnic_83xx_del_tx_ctx(struct qlcnic_adapter *adapter,
1135 struct qlcnic_host_tx_ring *tx_ring)
1136{
1137 struct qlcnic_cmd_args cmd;
1138 u32 temp = 0;
1139
1140 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX))
1141 return;
1142
1143 if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
1144 cmd.req.arg[0] |= (0x3 << 29);
1145
1146 if (qlcnic_sriov_pf_check(adapter))
1147 qlcnic_pf_set_interface_id_del_tx_ctx(adapter, &temp);
1148
1149 cmd.req.arg[1] = tx_ring->ctx_id | temp;
1150 if (qlcnic_issue_cmd(adapter, &cmd))
1151 dev_err(&adapter->pdev->dev,
1152 "Failed to destroy tx ctx in firmware\n");
1153 qlcnic_free_mbx_args(&cmd);
1154}
1155
1166int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter, 1156int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
1167 struct qlcnic_host_tx_ring *tx, int ring) 1157 struct qlcnic_host_tx_ring *tx, int ring)
1168{ 1158{
1169 int err; 1159 int err;
1170 u16 msix_id; 1160 u16 msix_id;
1171 u32 *buf, intr_mask; 1161 u32 *buf, intr_mask, temp = 0;
1172 struct qlcnic_cmd_args cmd; 1162 struct qlcnic_cmd_args cmd;
1173 struct qlcnic_tx_mbx mbx; 1163 struct qlcnic_tx_mbx mbx;
1174 struct qlcnic_tx_mbx_out *mbx_out; 1164 struct qlcnic_tx_mbx_out *mbx_out;
1175 struct qlcnic_hardware_context *ahw = adapter->ahw; 1165 struct qlcnic_hardware_context *ahw = adapter->ahw;
1166 u32 msix_vector;
1176 1167
1177 /* Reset host resources */ 1168 /* Reset host resources */
1178 tx->producer = 0; 1169 tx->producer = 0;
@@ -1182,13 +1173,21 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
1182 memset(&mbx, 0, sizeof(struct qlcnic_tx_mbx)); 1173 memset(&mbx, 0, sizeof(struct qlcnic_tx_mbx));
1183 1174
1184 /* setup mailbox inbox registerss */ 1175 /* setup mailbox inbox registerss */
1185 mbx.phys_addr = tx->phys_addr; 1176 mbx.phys_addr_low = LSD(tx->phys_addr);
1186 mbx.cnsmr_index = tx->hw_cons_phys_addr; 1177 mbx.phys_addr_high = MSD(tx->phys_addr);
1178 mbx.cnsmr_index_low = LSD(tx->hw_cons_phys_addr);
1179 mbx.cnsmr_index_high = MSD(tx->hw_cons_phys_addr);
1187 mbx.size = tx->num_desc; 1180 mbx.size = tx->num_desc;
1188 if (adapter->flags & QLCNIC_MSIX_ENABLED) 1181 if (adapter->flags & QLCNIC_MSIX_ENABLED) {
1189 msix_id = ahw->intr_tbl[adapter->max_sds_rings + ring].id; 1182 if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
1190 else 1183 msix_vector = adapter->max_sds_rings + ring;
1184 else
1185 msix_vector = adapter->max_sds_rings - 1;
1186 msix_id = ahw->intr_tbl[msix_vector].id;
1187 } else {
1191 msix_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID); 1188 msix_id = QLCRDX(ahw, QLCNIC_DEF_INT_ID);
1189 }
1190
1192 if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST) 1191 if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
1193 mbx.intr_id = msix_id; 1192 mbx.intr_id = msix_id;
1194 else 1193 else
@@ -1196,8 +1195,15 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
1196 mbx.src = 0; 1195 mbx.src = 0;
1197 1196
1198 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX); 1197 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CREATE_TX_CTX);
1198
1199 if (qlcnic_sriov_pf_check(adapter) || qlcnic_sriov_vf_check(adapter))
1200 cmd.req.arg[0] |= (0x3 << 29);
1201
1202 if (qlcnic_sriov_pf_check(adapter))
1203 qlcnic_pf_set_interface_id_create_tx_ctx(adapter, &temp);
1204
1199 cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT; 1205 cmd.req.arg[1] = QLCNIC_CAP0_LEGACY_CONTEXT;
1200 cmd.req.arg[5] = QLCNIC_MAX_TX_QUEUES; 1206 cmd.req.arg[5] = QLCNIC_MAX_TX_QUEUES | temp;
1201 buf = &cmd.req.arg[6]; 1207 buf = &cmd.req.arg[6];
1202 memcpy(buf, &mbx, sizeof(struct qlcnic_tx_mbx)); 1208 memcpy(buf, &mbx, sizeof(struct qlcnic_tx_mbx));
1203 /* send the mailbox command*/ 1209 /* send the mailbox command*/
@@ -1210,7 +1216,8 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter,
1210 mbx_out = (struct qlcnic_tx_mbx_out *)&cmd.rsp.arg[2]; 1216 mbx_out = (struct qlcnic_tx_mbx_out *)&cmd.rsp.arg[2];
1211 tx->crb_cmd_producer = ahw->pci_base0 + mbx_out->host_prod; 1217 tx->crb_cmd_producer = ahw->pci_base0 + mbx_out->host_prod;
1212 tx->ctx_id = mbx_out->ctx_id; 1218 tx->ctx_id = mbx_out->ctx_id;
1213 if (adapter->flags & QLCNIC_MSIX_ENABLED) { 1219 if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
1220 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
1214 intr_mask = ahw->intr_tbl[adapter->max_sds_rings + ring].src; 1221 intr_mask = ahw->intr_tbl[adapter->max_sds_rings + ring].src;
1215 tx->crb_intr_mask = ahw->pci_base0 + intr_mask; 1222 tx->crb_intr_mask = ahw->pci_base0 + intr_mask;
1216 } 1223 }
@@ -1373,12 +1380,60 @@ mbx_err:
1373 } 1380 }
1374} 1381}
1375 1382
1383int qlcnic_83xx_set_led(struct net_device *netdev,
1384 enum ethtool_phys_id_state state)
1385{
1386 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1387 int err = -EIO, active = 1;
1388
1389 if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
1390 netdev_warn(netdev,
1391 "LED test is not supported in non-privileged mode\n");
1392 return -EOPNOTSUPP;
1393 }
1394
1395 switch (state) {
1396 case ETHTOOL_ID_ACTIVE:
1397 if (test_and_set_bit(__QLCNIC_LED_ENABLE, &adapter->state))
1398 return -EBUSY;
1399
1400 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
1401 break;
1402
1403 err = qlcnic_83xx_config_led(adapter, active, 0);
1404 if (err)
1405 netdev_err(netdev, "Failed to set LED blink state\n");
1406 break;
1407 case ETHTOOL_ID_INACTIVE:
1408 active = 0;
1409
1410 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
1411 break;
1412
1413 err = qlcnic_83xx_config_led(adapter, active, 0);
1414 if (err)
1415 netdev_err(netdev, "Failed to reset LED blink state\n");
1416 break;
1417
1418 default:
1419 return -EINVAL;
1420 }
1421
1422 if (!active || err)
1423 clear_bit(__QLCNIC_LED_ENABLE, &adapter->state);
1424
1425 return err;
1426}
1427
1376void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter, 1428void qlcnic_83xx_register_nic_idc_func(struct qlcnic_adapter *adapter,
1377 int enable) 1429 int enable)
1378{ 1430{
1379 struct qlcnic_cmd_args cmd; 1431 struct qlcnic_cmd_args cmd;
1380 int status; 1432 int status;
1381 1433
1434 if (qlcnic_sriov_vf_check(adapter))
1435 return;
1436
1382 if (enable) { 1437 if (enable) {
1383 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INIT_NIC_FUNC); 1438 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INIT_NIC_FUNC);
1384 cmd.req.arg[1] = BIT_0 | BIT_31; 1439 cmd.req.arg[1] = BIT_0 | BIT_31;
@@ -1441,24 +1496,35 @@ int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *adapter, int enable)
1441 return err; 1496 return err;
1442} 1497}
1443 1498
1499static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter,
1500 u32 *interface_id)
1501{
1502 if (qlcnic_sriov_pf_check(adapter)) {
1503 qlcnic_pf_set_interface_id_promisc(adapter, interface_id);
1504 } else {
1505 if (!qlcnic_sriov_vf_check(adapter))
1506 *interface_id = adapter->recv_ctx->context_id << 16;
1507 }
1508}
1509
1444int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) 1510int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
1445{ 1511{
1446 int err; 1512 int err;
1447 u32 temp; 1513 u32 temp = 0;
1448 struct qlcnic_cmd_args cmd; 1514 struct qlcnic_cmd_args cmd;
1449 1515
1450 if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED) 1516 if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
1451 return -EIO; 1517 return -EIO;
1452 1518
1453 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_MAC_RX_MODE); 1519 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_MAC_RX_MODE);
1454 temp = adapter->recv_ctx->context_id << 16; 1520 qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
1455 cmd.req.arg[1] = (mode ? 1 : 0) | temp; 1521 cmd.req.arg[1] = (mode ? 1 : 0) | temp;
1456 err = qlcnic_issue_cmd(adapter, &cmd); 1522 err = qlcnic_issue_cmd(adapter, &cmd);
1457 if (err) 1523 if (err)
1458 dev_info(&adapter->pdev->dev, 1524 dev_info(&adapter->pdev->dev,
1459 "Promiscous mode config failed\n"); 1525 "Promiscous mode config failed\n");
1460 qlcnic_free_mbx_args(&cmd);
1461 1526
1527 qlcnic_free_mbx_args(&cmd);
1462 return err; 1528 return err;
1463} 1529}
1464 1530
@@ -1598,21 +1664,31 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
1598 return status; 1664 return status;
1599} 1665}
1600 1666
1667static void qlcnic_83xx_set_interface_id_ipaddr(struct qlcnic_adapter *adapter,
1668 u32 *interface_id)
1669{
1670 if (qlcnic_sriov_pf_check(adapter)) {
1671 qlcnic_pf_set_interface_id_ipaddr(adapter, interface_id);
1672 } else {
1673 if (!qlcnic_sriov_vf_check(adapter))
1674 *interface_id = adapter->recv_ctx->context_id << 16;
1675 }
1676}
1677
1601void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip, 1678void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip,
1602 int mode) 1679 int mode)
1603{ 1680{
1604 int err; 1681 int err;
1605 u32 temp, temp_ip; 1682 u32 temp = 0, temp_ip;
1606 struct qlcnic_cmd_args cmd; 1683 struct qlcnic_cmd_args cmd;
1607 1684
1608 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_IP_ADDR); 1685 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIGURE_IP_ADDR);
1609 if (mode == QLCNIC_IP_UP) { 1686 qlcnic_83xx_set_interface_id_ipaddr(adapter, &temp);
1610 temp = adapter->recv_ctx->context_id << 16; 1687
1688 if (mode == QLCNIC_IP_UP)
1611 cmd.req.arg[1] = 1 | temp; 1689 cmd.req.arg[1] = 1 | temp;
1612 } else { 1690 else
1613 temp = adapter->recv_ctx->context_id << 16;
1614 cmd.req.arg[1] = 2 | temp; 1691 cmd.req.arg[1] = 2 | temp;
1615 }
1616 1692
1617 /* 1693 /*
1618 * Adapter needs IP address in network byte order. 1694 * Adapter needs IP address in network byte order.
@@ -1629,6 +1705,7 @@ void qlcnic_83xx_config_ipaddr(struct qlcnic_adapter *adapter, __be32 ip,
1629 dev_err(&adapter->netdev->dev, 1705 dev_err(&adapter->netdev->dev,
1630 "could not notify %s IP 0x%x request\n", 1706 "could not notify %s IP 0x%x request\n",
1631 (mode == QLCNIC_IP_UP) ? "Add" : "Remove", ip); 1707 (mode == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
1708
1632 qlcnic_free_mbx_args(&cmd); 1709 qlcnic_free_mbx_args(&cmd);
1633} 1710}
1634 1711
@@ -1695,11 +1772,22 @@ int qlcnic_83xx_config_rss(struct qlcnic_adapter *adapter, int enable)
1695 1772
1696} 1773}
1697 1774
1775static void qlcnic_83xx_set_interface_id_macaddr(struct qlcnic_adapter *adapter,
1776 u32 *interface_id)
1777{
1778 if (qlcnic_sriov_pf_check(adapter)) {
1779 qlcnic_pf_set_interface_id_macaddr(adapter, interface_id);
1780 } else {
1781 if (!qlcnic_sriov_vf_check(adapter))
1782 *interface_id = adapter->recv_ctx->context_id << 16;
1783 }
1784}
1785
1698int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr, 1786int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
1699 __le16 vlan_id, u8 op) 1787 __le16 vlan_id, u8 op)
1700{ 1788{
1701 int err; 1789 int err;
1702 u32 *buf; 1790 u32 *buf, temp = 0;
1703 struct qlcnic_cmd_args cmd; 1791 struct qlcnic_cmd_args cmd;
1704 struct qlcnic_macvlan_mbx mv; 1792 struct qlcnic_macvlan_mbx mv;
1705 1793
@@ -1709,11 +1797,17 @@ int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
1709 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN); 1797 err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
1710 if (err) 1798 if (err)
1711 return err; 1799 return err;
1712 cmd.req.arg[1] = op | (1 << 8) |
1713 (adapter->recv_ctx->context_id << 16);
1714 1800
1801 cmd.req.arg[1] = op | (1 << 8);
1802 qlcnic_83xx_set_interface_id_macaddr(adapter, &temp);
1803 cmd.req.arg[1] |= temp;
1715 mv.vlan = le16_to_cpu(vlan_id); 1804 mv.vlan = le16_to_cpu(vlan_id);
1716 memcpy(&mv.mac, addr, ETH_ALEN); 1805 mv.mac_addr0 = addr[0];
1806 mv.mac_addr1 = addr[1];
1807 mv.mac_addr2 = addr[2];
1808 mv.mac_addr3 = addr[3];
1809 mv.mac_addr4 = addr[4];
1810 mv.mac_addr5 = addr[5];
1717 buf = &cmd.req.arg[2]; 1811 buf = &cmd.req.arg[2];
1718 memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx)); 1812 memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
1719 err = qlcnic_issue_cmd(adapter, &cmd); 1813 err = qlcnic_issue_cmd(adapter, &cmd);
@@ -2002,14 +2096,17 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
2002int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *adapter, bool op_type) 2096int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *adapter, bool op_type)
2003{ 2097{
2004 int i, index, err; 2098 int i, index, err;
2005 bool type;
2006 u8 max_ints; 2099 u8 max_ints;
2007 u32 val, temp; 2100 u32 val, temp, type;
2008 struct qlcnic_cmd_args cmd; 2101 struct qlcnic_cmd_args cmd;
2009 2102
2010 max_ints = adapter->ahw->num_msix - 1; 2103 max_ints = adapter->ahw->num_msix - 1;
2011 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT); 2104 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_INTRPT);
2012 cmd.req.arg[1] = max_ints; 2105 cmd.req.arg[1] = max_ints;
2106
2107 if (qlcnic_sriov_vf_check(adapter))
2108 cmd.req.arg[1] |= (adapter->ahw->pci_func << 8) | BIT_16;
2109
2013 for (i = 0, index = 2; i < max_ints; i++) { 2110 for (i = 0, index = 2; i < max_ints; i++) {
2014 type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL; 2111 type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
2015 val = type | (adapter->ahw->intr_tbl[i].type << 4); 2112 val = type | (adapter->ahw->intr_tbl[i].type << 4);
@@ -2163,7 +2260,7 @@ static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter)
2163 return 0; 2260 return 0;
2164} 2261}
2165 2262
2166static int qlcnic_83xx_enable_flash_write_op(struct qlcnic_adapter *adapter) 2263int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *adapter)
2167{ 2264{
2168 int ret; 2265 int ret;
2169 u32 cmd; 2266 u32 cmd;
@@ -2181,7 +2278,7 @@ static int qlcnic_83xx_enable_flash_write_op(struct qlcnic_adapter *adapter)
2181 return 0; 2278 return 0;
2182} 2279}
2183 2280
2184static int qlcnic_83xx_disable_flash_write_op(struct qlcnic_adapter *adapter) 2281int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *adapter)
2185{ 2282{
2186 int ret; 2283 int ret;
2187 2284
@@ -2255,7 +2352,7 @@ int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter,
2255 return -EIO; 2352 return -EIO;
2256 2353
2257 if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { 2354 if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
2258 ret = qlcnic_83xx_enable_flash_write_op(adapter); 2355 ret = qlcnic_83xx_enable_flash_write(adapter);
2259 if (ret) { 2356 if (ret) {
2260 qlcnic_83xx_unlock_flash(adapter); 2357 qlcnic_83xx_unlock_flash(adapter);
2261 dev_err(&adapter->pdev->dev, 2358 dev_err(&adapter->pdev->dev,
@@ -2297,7 +2394,7 @@ int qlcnic_83xx_erase_flash_sector(struct qlcnic_adapter *adapter,
2297 } 2394 }
2298 2395
2299 if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) { 2396 if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
2300 ret = qlcnic_83xx_disable_flash_write_op(adapter); 2397 ret = qlcnic_83xx_disable_flash_write(adapter);
2301 if (ret) { 2398 if (ret) {
2302 qlcnic_83xx_unlock_flash(adapter); 2399 qlcnic_83xx_unlock_flash(adapter);
2303 dev_err(&adapter->pdev->dev, 2400 dev_err(&adapter->pdev->dev,
@@ -2337,8 +2434,8 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
2337 u32 temp; 2434 u32 temp;
2338 int ret = -EIO; 2435 int ret = -EIO;
2339 2436
2340 if ((count < QLC_83XX_FLASH_BULK_WRITE_MIN) || 2437 if ((count < QLC_83XX_FLASH_WRITE_MIN) ||
2341 (count > QLC_83XX_FLASH_BULK_WRITE_MAX)) { 2438 (count > QLC_83XX_FLASH_WRITE_MAX)) {
2342 dev_err(&adapter->pdev->dev, 2439 dev_err(&adapter->pdev->dev,
2343 "%s: Invalid word count\n", __func__); 2440 "%s: Invalid word count\n", __func__);
2344 return -EIO; 2441 return -EIO;
@@ -2616,13 +2713,19 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
2616 2713
2617int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) 2714int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter)
2618{ 2715{
2716 u8 pci_func;
2619 int err; 2717 int err;
2620 u32 config = 0, state; 2718 u32 config = 0, state;
2621 struct qlcnic_cmd_args cmd; 2719 struct qlcnic_cmd_args cmd;
2622 struct qlcnic_hardware_context *ahw = adapter->ahw; 2720 struct qlcnic_hardware_context *ahw = adapter->ahw;
2623 2721
2624 state = readl(ahw->pci_base0 + QLC_83XX_LINK_STATE(ahw->pci_func)); 2722 if (qlcnic_sriov_vf_check(adapter))
2625 if (!QLC_83xx_FUNC_VAL(state, ahw->pci_func)) { 2723 pci_func = adapter->portnum;
2724 else
2725 pci_func = ahw->pci_func;
2726
2727 state = readl(ahw->pci_base0 + QLC_83XX_LINK_STATE(pci_func));
2728 if (!QLC_83xx_FUNC_VAL(state, pci_func)) {
2626 dev_info(&adapter->pdev->dev, "link state down\n"); 2729 dev_info(&adapter->pdev->dev, "link state down\n");
2627 return config; 2730 return config;
2628 } 2731 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 61f81f6c84a9..32ed4b4c4976 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -12,6 +12,8 @@
12#include <linux/etherdevice.h> 12#include <linux/etherdevice.h>
13#include "qlcnic_hw.h" 13#include "qlcnic_hw.h"
14 14
15#define QLCNIC_83XX_BAR0_LENGTH 0x4000
16
15/* Directly mapped registers */ 17/* Directly mapped registers */
16#define QLC_83XX_CRB_WIN_BASE 0x3800 18#define QLC_83XX_CRB_WIN_BASE 0x3800
17#define QLC_83XX_CRB_WIN_FUNC(f) (QLC_83XX_CRB_WIN_BASE+((f)*4)) 19#define QLC_83XX_CRB_WIN_FUNC(f) (QLC_83XX_CRB_WIN_BASE+((f)*4))
@@ -86,6 +88,153 @@
86 88
87#define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16 89#define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16
88 90
91/* status descriptor mailbox data
92 * @phy_addr_{low|high}: physical address of buffer
93 * @sds_ring_size: buffer size
94 * @intrpt_id: interrupt id
95 * @intrpt_val: source of interrupt
96 */
97struct qlcnic_sds_mbx {
98 u32 phy_addr_low;
99 u32 phy_addr_high;
100 u32 rsvd1[4];
101#if defined(__LITTLE_ENDIAN)
102 u16 sds_ring_size;
103 u16 rsvd2;
104 u16 rsvd3[2];
105 u16 intrpt_id;
106 u8 intrpt_val;
107 u8 rsvd4;
108#elif defined(__BIG_ENDIAN)
109 u16 rsvd2;
110 u16 sds_ring_size;
111 u16 rsvd3[2];
112 u8 rsvd4;
113 u8 intrpt_val;
114 u16 intrpt_id;
115#endif
116 u32 rsvd5;
117} __packed;
118
119/* receive descriptor buffer data
120 * phy_addr_reg_{low|high}: physical address of regular buffer
121 * phy_addr_jmb_{low|high}: physical address of jumbo buffer
122 * reg_ring_sz: size of regular buffer
123 * reg_ring_len: no. of entries in regular buffer
124 * jmb_ring_len: no. of entries in jumbo buffer
125 * jmb_ring_sz: size of jumbo buffer
126 */
127struct qlcnic_rds_mbx {
128 u32 phy_addr_reg_low;
129 u32 phy_addr_reg_high;
130 u32 phy_addr_jmb_low;
131 u32 phy_addr_jmb_high;
132#if defined(__LITTLE_ENDIAN)
133 u16 reg_ring_sz;
134 u16 reg_ring_len;
135 u16 jmb_ring_sz;
136 u16 jmb_ring_len;
137#elif defined(__BIG_ENDIAN)
138 u16 reg_ring_len;
139 u16 reg_ring_sz;
140 u16 jmb_ring_len;
141 u16 jmb_ring_sz;
142#endif
143} __packed;
144
145/* host producers for regular and jumbo rings */
146struct __host_producer_mbx {
147 u32 reg_buf;
148 u32 jmb_buf;
149} __packed;
150
151/* Receive context mailbox data outbox registers
152 * @state: state of the context
153 * @vport_id: virtual port id
154 * @context_id: receive context id
155 * @num_pci_func: number of pci functions of the port
156 * @phy_port: physical port id
157 */
158struct qlcnic_rcv_mbx_out {
159#if defined(__LITTLE_ENDIAN)
160 u8 rcv_num;
161 u8 sts_num;
162 u16 ctx_id;
163 u8 state;
164 u8 num_pci_func;
165 u8 phy_port;
166 u8 vport_id;
167#elif defined(__BIG_ENDIAN)
168 u16 ctx_id;
169 u8 sts_num;
170 u8 rcv_num;
171 u8 vport_id;
172 u8 phy_port;
173 u8 num_pci_func;
174 u8 state;
175#endif
176 u32 host_csmr[QLCNIC_MAX_RING_SETS];
177 struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
178} __packed;
179
180struct qlcnic_add_rings_mbx_out {
181#if defined(__LITTLE_ENDIAN)
182 u8 rcv_num;
183 u8 sts_num;
184 u16 ctx_id;
185#elif defined(__BIG_ENDIAN)
186 u16 ctx_id;
187 u8 sts_num;
188 u8 rcv_num;
189#endif
190 u32 host_csmr[QLCNIC_MAX_RING_SETS];
191 struct __host_producer_mbx host_prod[QLCNIC_MAX_RING_SETS];
192} __packed;
193
194/* Transmit context mailbox inbox registers
195 * @phys_addr_{low|high}: DMA address of the transmit buffer
196 * @cnsmr_index_{low|high}: host consumer index
197 * @size: legth of transmit buffer ring
198 * @intr_id: interrput id
199 * @src: src of interrupt
200 */
201struct qlcnic_tx_mbx {
202 u32 phys_addr_low;
203 u32 phys_addr_high;
204 u32 cnsmr_index_low;
205 u32 cnsmr_index_high;
206#if defined(__LITTLE_ENDIAN)
207 u16 size;
208 u16 intr_id;
209 u8 src;
210 u8 rsvd[3];
211#elif defined(__BIG_ENDIAN)
212 u16 intr_id;
213 u16 size;
214 u8 rsvd[3];
215 u8 src;
216#endif
217} __packed;
218
219/* Transmit context mailbox outbox registers
220 * @host_prod: host producer index
221 * @ctx_id: transmit context id
222 * @state: state of the transmit context
223 */
224
225struct qlcnic_tx_mbx_out {
226 u32 host_prod;
227#if defined(__LITTLE_ENDIAN)
228 u16 ctx_id;
229 u8 state;
230 u8 rsvd;
231#elif defined(__BIG_ENDIAN)
232 u8 rsvd;
233 u8 state;
234 u16 ctx_id;
235#endif
236} __packed;
237
89struct qlcnic_intrpt_config { 238struct qlcnic_intrpt_config {
90 u8 type; 239 u8 type;
91 u8 enabled; 240 u8 enabled;
@@ -94,8 +243,23 @@ struct qlcnic_intrpt_config {
94}; 243};
95 244
96struct qlcnic_macvlan_mbx { 245struct qlcnic_macvlan_mbx {
97 u8 mac[ETH_ALEN]; 246#if defined(__LITTLE_ENDIAN)
247 u8 mac_addr0;
248 u8 mac_addr1;
249 u8 mac_addr2;
250 u8 mac_addr3;
251 u8 mac_addr4;
252 u8 mac_addr5;
98 u16 vlan; 253 u16 vlan;
254#elif defined(__BIG_ENDIAN)
255 u8 mac_addr3;
256 u8 mac_addr2;
257 u8 mac_addr1;
258 u8 mac_addr0;
259 u16 vlan;
260 u8 mac_addr5;
261 u8 mac_addr4;
262#endif
99}; 263};
100 264
101struct qlc_83xx_fw_info { 265struct qlc_83xx_fw_info {
@@ -226,6 +390,7 @@ struct qlc_83xx_idc {
226#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000) 390#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000)
227#define QLC_83XX_VIRTUAL_NIC_MODE 0xFF 391#define QLC_83XX_VIRTUAL_NIC_MODE 0xFF
228#define QLC_83XX_DEFAULT_MODE 0x0 392#define QLC_83XX_DEFAULT_MODE 0x0
393#define QLC_83XX_SRIOV_MODE 0x1
229#define QLCNIC_BRDTYPE_83XX_10G 0x0083 394#define QLCNIC_BRDTYPE_83XX_10G 0x0083
230 395
231#define QLC_83XX_FLASH_SPI_STATUS 0x2808E010 396#define QLC_83XX_FLASH_SPI_STATUS 0x2808E010
@@ -242,8 +407,8 @@ struct qlc_83xx_idc {
242#define QLC_83XX_FLASH_BULK_WRITE_CMD 0xcadcadca 407#define QLC_83XX_FLASH_BULK_WRITE_CMD 0xcadcadca
243#define QLC_83XX_FLASH_READ_RETRY_COUNT 5000 408#define QLC_83XX_FLASH_READ_RETRY_COUNT 5000
244#define QLC_83XX_FLASH_STATUS_READY 0x6 409#define QLC_83XX_FLASH_STATUS_READY 0x6
245#define QLC_83XX_FLASH_BULK_WRITE_MIN 2 410#define QLC_83XX_FLASH_WRITE_MIN 2
246#define QLC_83XX_FLASH_BULK_WRITE_MAX 64 411#define QLC_83XX_FLASH_WRITE_MAX 64
247#define QLC_83XX_FLASH_STATUS_REG_POLL_DELAY 1 412#define QLC_83XX_FLASH_STATUS_REG_POLL_DELAY 1
248#define QLC_83XX_ERASE_MODE 1 413#define QLC_83XX_ERASE_MODE 1
249#define QLC_83XX_WRITE_MODE 2 414#define QLC_83XX_WRITE_MODE 2
@@ -351,6 +516,9 @@ int qlcnic_ind_rd(struct qlcnic_adapter *, u32);
351int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *); 516int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *);
352int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *, 517int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *,
353 struct qlcnic_host_tx_ring *, int); 518 struct qlcnic_host_tx_ring *, int);
519void qlcnic_83xx_del_rx_ctx(struct qlcnic_adapter *);
520void qlcnic_83xx_del_tx_ctx(struct qlcnic_adapter *,
521 struct qlcnic_host_tx_ring *);
354int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8); 522int qlcnic_83xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
355int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *, int); 523int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *, int);
356void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *); 524void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *);
@@ -401,7 +569,7 @@ int qlcnic_83xx_read_flash_descriptor_table(struct qlcnic_adapter *);
401int qlcnic_83xx_flash_read32(struct qlcnic_adapter *, u32, u8 *, int); 569int qlcnic_83xx_flash_read32(struct qlcnic_adapter *, u32, u8 *, int);
402int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *, 570int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *,
403 u32, u8 *, int); 571 u32, u8 *, int);
404int qlcnic_83xx_init(struct qlcnic_adapter *); 572int qlcnic_83xx_init(struct qlcnic_adapter *, int);
405int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *); 573int qlcnic_83xx_idc_ready_state_entry(struct qlcnic_adapter *);
406int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev); 574int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev);
407void qlcnic_83xx_idc_poll_dev_state(struct work_struct *); 575void qlcnic_83xx_idc_poll_dev_state(struct work_struct *);
@@ -434,5 +602,10 @@ int qlcnic_83xx_get_regs_len(struct qlcnic_adapter *);
434int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *); 602int qlcnic_83xx_get_registers(struct qlcnic_adapter *, u32 *);
435int qlcnic_83xx_loopback_test(struct net_device *, u8); 603int qlcnic_83xx_loopback_test(struct net_device *, u8);
436int qlcnic_83xx_interrupt_test(struct net_device *); 604int qlcnic_83xx_interrupt_test(struct net_device *);
605int qlcnic_83xx_set_led(struct net_device *, enum ethtool_phys_id_state);
437int qlcnic_83xx_flash_test(struct qlcnic_adapter *); 606int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
607int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
608int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
609u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *);
610u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *);
438#endif 611#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 5c033f268ca5..c302d118a0d0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qlcnic for copyright and licensing details. 5 * See LICENSE.qlcnic for copyright and licensing details.
6 */ 6 */
7 7
8#include "qlcnic_sriov.h"
8#include "qlcnic.h" 9#include "qlcnic.h"
9#include "qlcnic_hw.h" 10#include "qlcnic_hw.h"
10 11
@@ -25,12 +26,12 @@
25#define QLC_83XX_OPCODE_POLL_READ_LIST 0x0100 26#define QLC_83XX_OPCODE_POLL_READ_LIST 0x0100
26 27
27static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter); 28static int qlcnic_83xx_init_default_driver(struct qlcnic_adapter *adapter);
28static int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
29static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev); 29static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev);
30static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter); 30static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter);
31 31
32/* Template header */ 32/* Template header */
33struct qlc_83xx_reset_hdr { 33struct qlc_83xx_reset_hdr {
34#if defined(__LITTLE_ENDIAN)
34 u16 version; 35 u16 version;
35 u16 signature; 36 u16 signature;
36 u16 size; 37 u16 size;
@@ -39,14 +40,31 @@ struct qlc_83xx_reset_hdr {
39 u16 checksum; 40 u16 checksum;
40 u16 init_offset; 41 u16 init_offset;
41 u16 start_offset; 42 u16 start_offset;
43#elif defined(__BIG_ENDIAN)
44 u16 signature;
45 u16 version;
46 u16 entries;
47 u16 size;
48 u16 checksum;
49 u16 hdr_size;
50 u16 start_offset;
51 u16 init_offset;
52#endif
42} __packed; 53} __packed;
43 54
44/* Command entry header. */ 55/* Command entry header. */
45struct qlc_83xx_entry_hdr { 56struct qlc_83xx_entry_hdr {
46 u16 cmd; 57#if defined(__LITTLE_ENDIAN)
47 u16 size; 58 u16 cmd;
48 u16 count; 59 u16 size;
49 u16 delay; 60 u16 count;
61 u16 delay;
62#elif defined(__BIG_ENDIAN)
63 u16 size;
64 u16 cmd;
65 u16 delay;
66 u16 count;
67#endif
50} __packed; 68} __packed;
51 69
52/* Generic poll command */ 70/* Generic poll command */
@@ -60,10 +78,17 @@ struct qlc_83xx_rmw {
60 u32 mask; 78 u32 mask;
61 u32 xor_value; 79 u32 xor_value;
62 u32 or_value; 80 u32 or_value;
81#if defined(__LITTLE_ENDIAN)
63 u8 shl; 82 u8 shl;
64 u8 shr; 83 u8 shr;
65 u8 index_a; 84 u8 index_a;
66 u8 rsvd; 85 u8 rsvd;
86#elif defined(__BIG_ENDIAN)
87 u8 rsvd;
88 u8 index_a;
89 u8 shr;
90 u8 shl;
91#endif
67} __packed; 92} __packed;
68 93
69/* Generic command with 2 DWORD */ 94/* Generic command with 2 DWORD */
@@ -1893,6 +1918,9 @@ int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *adapter)
1893 qlcnic_get_func_no(adapter); 1918 qlcnic_get_func_no(adapter);
1894 op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE); 1919 op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE);
1895 1920
1921 if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state))
1922 op_mode = QLC_83XX_DEFAULT_OPMODE;
1923
1896 if (op_mode == QLC_83XX_DEFAULT_OPMODE) { 1924 if (op_mode == QLC_83XX_DEFAULT_OPMODE) {
1897 adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver; 1925 adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
1898 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry; 1926 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
@@ -1922,6 +1950,16 @@ int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
1922 ahw->max_mac_filters = nic_info.max_mac_filters; 1950 ahw->max_mac_filters = nic_info.max_mac_filters;
1923 ahw->max_mtu = nic_info.max_mtu; 1951 ahw->max_mtu = nic_info.max_mtu;
1924 1952
1953 /* VNIC mode is detected by BIT_23 in capabilities. This bit is also
1954 * set in case device is SRIOV capable. VNIC and SRIOV are mutually
1955 * exclusive. So in case of sriov capable device load driver in
1956 * default mode
1957 */
1958 if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state)) {
1959 ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
1960 return ahw->nic_mode;
1961 }
1962
1925 if (ahw->capabilities & BIT_23) 1963 if (ahw->capabilities & BIT_23)
1926 ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE; 1964 ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE;
1927 else 1965 else
@@ -1930,7 +1968,7 @@ int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
1930 return ahw->nic_mode; 1968 return ahw->nic_mode;
1931} 1969}
1932 1970
1933static int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter) 1971int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
1934{ 1972{
1935 int ret; 1973 int ret;
1936 1974
@@ -2008,10 +2046,13 @@ static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter)
2008 } 2046 }
2009} 2047}
2010 2048
2011int qlcnic_83xx_init(struct qlcnic_adapter *adapter) 2049int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
2012{ 2050{
2013 struct qlcnic_hardware_context *ahw = adapter->ahw; 2051 struct qlcnic_hardware_context *ahw = adapter->ahw;
2014 2052
2053 if (qlcnic_sriov_vf_check(adapter))
2054 return qlcnic_sriov_vf_init(adapter, pci_using_dac);
2055
2015 if (qlcnic_83xx_check_hw_status(adapter)) 2056 if (qlcnic_83xx_check_hw_status(adapter))
2016 return -EIO; 2057 return -EIO;
2017 2058
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index a69097c6b84d..43562c256379 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -382,8 +382,7 @@ out_free_rq:
382 return err; 382 return err;
383} 383}
384 384
385static void 385void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *adapter)
386qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
387{ 386{
388 int err; 387 int err;
389 struct qlcnic_cmd_args cmd; 388 struct qlcnic_cmd_args cmd;
@@ -422,22 +421,20 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
422 421
423 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx); 422 rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
424 rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size, 423 rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
425 &rq_phys_addr, GFP_KERNEL); 424 &rq_phys_addr, GFP_KERNEL | __GFP_ZERO);
426 if (!rq_addr) 425 if (!rq_addr)
427 return -ENOMEM; 426 return -ENOMEM;
428 427
429 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx); 428 rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
430 rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size, 429 rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
431 &rsp_phys_addr, GFP_KERNEL); 430 &rsp_phys_addr, GFP_KERNEL | __GFP_ZERO);
432 if (!rsp_addr) { 431 if (!rsp_addr) {
433 err = -ENOMEM; 432 err = -ENOMEM;
434 goto out_free_rq; 433 goto out_free_rq;
435 } 434 }
436 435
437 memset(rq_addr, 0, rq_size);
438 prq = rq_addr; 436 prq = rq_addr;
439 437
440 memset(rsp_addr, 0, rsp_size);
441 prsp = rsp_addr; 438 prsp = rsp_addr;
442 439
443 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); 440 prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
@@ -486,13 +483,13 @@ out_free_rq:
486 return err; 483 return err;
487} 484}
488 485
489static void 486void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *adapter,
490qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter, 487 struct qlcnic_host_tx_ring *tx_ring)
491 struct qlcnic_host_tx_ring *tx_ring)
492{ 488{
493 struct qlcnic_cmd_args cmd; 489 struct qlcnic_cmd_args cmd;
494 490
495 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX); 491 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DESTROY_TX_CTX);
492
496 cmd.req.arg[1] = tx_ring->ctx_id; 493 cmd.req.arg[1] = tx_ring->ctx_id;
497 if (qlcnic_issue_cmd(adapter, &cmd)) 494 if (qlcnic_issue_cmd(adapter, &cmd))
498 dev_err(&adapter->pdev->dev, 495 dev_err(&adapter->pdev->dev,
@@ -532,20 +529,15 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
532 ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32), 529 ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32),
533 &tx_ring->hw_cons_phys_addr, 530 &tx_ring->hw_cons_phys_addr,
534 GFP_KERNEL); 531 GFP_KERNEL);
535 532 if (ptr == NULL)
536 if (ptr == NULL) {
537 dev_err(&pdev->dev, "failed to allocate tx consumer\n");
538 return -ENOMEM; 533 return -ENOMEM;
539 } 534
540 tx_ring->hw_consumer = ptr; 535 tx_ring->hw_consumer = ptr;
541 /* cmd desc ring */ 536 /* cmd desc ring */
542 addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring), 537 addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
543 &tx_ring->phys_addr, 538 &tx_ring->phys_addr,
544 GFP_KERNEL); 539 GFP_KERNEL);
545
546 if (addr == NULL) { 540 if (addr == NULL) {
547 dev_err(&pdev->dev,
548 "failed to allocate tx desc ring\n");
549 err = -ENOMEM; 541 err = -ENOMEM;
550 goto err_out_free; 542 goto err_out_free;
551 } 543 }
@@ -556,11 +548,9 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
556 for (ring = 0; ring < adapter->max_rds_rings; ring++) { 548 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
557 rds_ring = &recv_ctx->rds_rings[ring]; 549 rds_ring = &recv_ctx->rds_rings[ring];
558 addr = dma_alloc_coherent(&adapter->pdev->dev, 550 addr = dma_alloc_coherent(&adapter->pdev->dev,
559 RCV_DESC_RINGSIZE(rds_ring), 551 RCV_DESC_RINGSIZE(rds_ring),
560 &rds_ring->phys_addr, GFP_KERNEL); 552 &rds_ring->phys_addr, GFP_KERNEL);
561 if (addr == NULL) { 553 if (addr == NULL) {
562 dev_err(&pdev->dev,
563 "failed to allocate rds ring [%d]\n", ring);
564 err = -ENOMEM; 554 err = -ENOMEM;
565 goto err_out_free; 555 goto err_out_free;
566 } 556 }
@@ -572,11 +562,9 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
572 sds_ring = &recv_ctx->sds_rings[ring]; 562 sds_ring = &recv_ctx->sds_rings[ring];
573 563
574 addr = dma_alloc_coherent(&adapter->pdev->dev, 564 addr = dma_alloc_coherent(&adapter->pdev->dev,
575 STATUS_DESC_RINGSIZE(sds_ring), 565 STATUS_DESC_RINGSIZE(sds_ring),
576 &sds_ring->phys_addr, GFP_KERNEL); 566 &sds_ring->phys_addr, GFP_KERNEL);
577 if (addr == NULL) { 567 if (addr == NULL) {
578 dev_err(&pdev->dev,
579 "failed to allocate sds ring [%d]\n", ring);
580 err = -ENOMEM; 568 err = -ENOMEM;
581 goto err_out_free; 569 goto err_out_free;
582 } 570 }
@@ -616,13 +604,12 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
616 &dev->tx_ring[ring], 604 &dev->tx_ring[ring],
617 ring); 605 ring);
618 if (err) { 606 if (err) {
619 qlcnic_fw_cmd_destroy_rx_ctx(dev); 607 qlcnic_fw_cmd_del_rx_ctx(dev);
620 if (ring == 0) 608 if (ring == 0)
621 goto err_out; 609 goto err_out;
622 610
623 for (i = 0; i < ring; i++) 611 for (i = 0; i < ring; i++)
624 qlcnic_fw_cmd_destroy_tx_ctx(dev, 612 qlcnic_fw_cmd_del_tx_ctx(dev, &dev->tx_ring[i]);
625 &dev->tx_ring[i]);
626 613
627 goto err_out; 614 goto err_out;
628 } 615 }
@@ -644,10 +631,10 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
644 int ring; 631 int ring;
645 632
646 if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) { 633 if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
647 qlcnic_fw_cmd_destroy_rx_ctx(adapter); 634 qlcnic_fw_cmd_del_rx_ctx(adapter);
648 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) 635 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
649 qlcnic_fw_cmd_destroy_tx_ctx(adapter, 636 qlcnic_fw_cmd_del_tx_ctx(adapter,
650 &adapter->tx_ring[ring]); 637 &adapter->tx_ring[ring]);
651 638
652 if (qlcnic_83xx_check(adapter) && 639 if (qlcnic_83xx_check(adapter) &&
653 (adapter->flags & QLCNIC_MSIX_ENABLED)) { 640 (adapter->flags & QLCNIC_MSIX_ENABLED)) {
@@ -655,7 +642,7 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
655 qlcnic_83xx_config_intrpt(adapter, 0); 642 qlcnic_83xx_config_intrpt(adapter, 0);
656 } 643 }
657 /* Allow dma queues to drain after context reset */ 644 /* Allow dma queues to drain after context reset */
658 mdelay(20); 645 msleep(20);
659 } 646 }
660} 647}
661 648
@@ -753,10 +740,9 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
753 size_t nic_size = sizeof(struct qlcnic_info_le); 740 size_t nic_size = sizeof(struct qlcnic_info_le);
754 741
755 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, 742 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
756 &nic_dma_t, GFP_KERNEL); 743 &nic_dma_t, GFP_KERNEL | __GFP_ZERO);
757 if (!nic_info_addr) 744 if (!nic_info_addr)
758 return -ENOMEM; 745 return -ENOMEM;
759 memset(nic_info_addr, 0, nic_size);
760 746
761 nic_info = nic_info_addr; 747 nic_info = nic_info_addr;
762 748
@@ -804,11 +790,10 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
804 return err; 790 return err;
805 791
806 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size, 792 nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
807 &nic_dma_t, GFP_KERNEL); 793 &nic_dma_t, GFP_KERNEL | __GFP_ZERO);
808 if (!nic_info_addr) 794 if (!nic_info_addr)
809 return -ENOMEM; 795 return -ENOMEM;
810 796
811 memset(nic_info_addr, 0, nic_size);
812 nic_info = nic_info_addr; 797 nic_info = nic_info_addr;
813 798
814 nic_info->pci_func = cpu_to_le16(nic->pci_func); 799 nic_info->pci_func = cpu_to_le16(nic->pci_func);
@@ -854,10 +839,10 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
854 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC; 839 size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
855 840
856 pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size, 841 pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
857 &pci_info_dma_t, GFP_KERNEL); 842 &pci_info_dma_t,
843 GFP_KERNEL | __GFP_ZERO);
858 if (!pci_info_addr) 844 if (!pci_info_addr)
859 return -ENOMEM; 845 return -ENOMEM;
860 memset(pci_info_addr, 0, pci_size);
861 846
862 npar = pci_info_addr; 847 npar = pci_info_addr;
863 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO); 848 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_PCI_INFO);
@@ -949,12 +934,9 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
949 } 934 }
950 935
951 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, 936 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
952 &stats_dma_t, GFP_KERNEL); 937 &stats_dma_t, GFP_KERNEL | __GFP_ZERO);
953 if (!stats_addr) { 938 if (!stats_addr)
954 dev_err(&adapter->pdev->dev, "Unable to allocate memory\n");
955 return -ENOMEM; 939 return -ENOMEM;
956 }
957 memset(stats_addr, 0, stats_size);
958 940
959 arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12; 941 arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
960 arg1 |= rx_tx << 15 | stats_size << 16; 942 arg1 |= rx_tx << 15 | stats_size << 16;
@@ -1003,13 +985,10 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
1003 return -ENOMEM; 985 return -ENOMEM;
1004 986
1005 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, 987 stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
1006 &stats_dma_t, GFP_KERNEL); 988 &stats_dma_t, GFP_KERNEL | __GFP_ZERO);
1007 if (!stats_addr) { 989 if (!stats_addr)
1008 dev_err(&adapter->pdev->dev,
1009 "%s: Unable to allocate memory.\n", __func__);
1010 return -ENOMEM; 990 return -ENOMEM;
1011 } 991
1012 memset(stats_addr, 0, stats_size);
1013 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS); 992 qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS);
1014 cmd.req.arg[1] = stats_size << 16; 993 cmd.req.arg[1] = stats_size << 16;
1015 cmd.req.arg[2] = MSD(stats_dma_t); 994 cmd.req.arg[2] = MSD(stats_dma_t);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 5641f8ec49ab..f4f279d5cba4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -149,7 +149,8 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
149 149
150static inline int qlcnic_82xx_statistics(void) 150static inline int qlcnic_82xx_statistics(void)
151{ 151{
152 return QLCNIC_STATS_LEN + ARRAY_SIZE(qlcnic_83xx_mac_stats_strings); 152 return ARRAY_SIZE(qlcnic_device_gstrings_stats) +
153 ARRAY_SIZE(qlcnic_83xx_mac_stats_strings);
153} 154}
154 155
155static inline int qlcnic_83xx_statistics(void) 156static inline int qlcnic_83xx_statistics(void)
@@ -1070,8 +1071,7 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1070 } 1071 }
1071} 1072}
1072 1073
1073static void 1074static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type)
1074qlcnic_fill_stats(u64 *data, void *stats, int type)
1075{ 1075{
1076 if (type == QLCNIC_MAC_STATS) { 1076 if (type == QLCNIC_MAC_STATS) {
1077 struct qlcnic_mac_statistics *mac_stats = 1077 struct qlcnic_mac_statistics *mac_stats =
@@ -1120,6 +1120,7 @@ qlcnic_fill_stats(u64 *data, void *stats, int type)
1120 *data++ = QLCNIC_FILL_STATS(esw_stats->local_frames); 1120 *data++ = QLCNIC_FILL_STATS(esw_stats->local_frames);
1121 *data++ = QLCNIC_FILL_STATS(esw_stats->numbytes); 1121 *data++ = QLCNIC_FILL_STATS(esw_stats->numbytes);
1122 } 1122 }
1123 return data;
1123} 1124}
1124 1125
1125static void qlcnic_get_ethtool_stats(struct net_device *dev, 1126static void qlcnic_get_ethtool_stats(struct net_device *dev,
@@ -1147,7 +1148,7 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
1147 /* Retrieve MAC statistics from firmware */ 1148 /* Retrieve MAC statistics from firmware */
1148 memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics)); 1149 memset(&mac_stats, 0, sizeof(struct qlcnic_mac_statistics));
1149 qlcnic_get_mac_stats(adapter, &mac_stats); 1150 qlcnic_get_mac_stats(adapter, &mac_stats);
1150 qlcnic_fill_stats(data, &mac_stats, QLCNIC_MAC_STATS); 1151 data = qlcnic_fill_stats(data, &mac_stats, QLCNIC_MAC_STATS);
1151 } 1152 }
1152 1153
1153 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) 1154 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
@@ -1159,7 +1160,7 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
1159 if (ret) 1160 if (ret)
1160 return; 1161 return;
1161 1162
1162 qlcnic_fill_stats(data, &port_stats.rx, QLCNIC_ESW_STATS); 1163 data = qlcnic_fill_stats(data, &port_stats.rx, QLCNIC_ESW_STATS);
1163 ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func, 1164 ret = qlcnic_get_port_stats(adapter, adapter->ahw->pci_func,
1164 QLCNIC_QUERY_TX_COUNTER, &port_stats.tx); 1165 QLCNIC_QUERY_TX_COUNTER, &port_stats.tx);
1165 if (ret) 1166 if (ret)
@@ -1176,7 +1177,8 @@ static int qlcnic_set_led(struct net_device *dev,
1176 int err = -EIO, active = 1; 1177 int err = -EIO, active = 1;
1177 1178
1178 if (qlcnic_83xx_check(adapter)) 1179 if (qlcnic_83xx_check(adapter))
1179 return -EOPNOTSUPP; 1180 return qlcnic_83xx_set_led(dev, state);
1181
1180 if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) { 1182 if (adapter->ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
1181 netdev_warn(dev, "LED test not supported for non " 1183 netdev_warn(dev, "LED test not supported for non "
1182 "privilege function\n"); 1184 "privilege function\n");
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
index 44197ca1456c..1cebd8900cf9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hdr.h
@@ -714,7 +714,9 @@ enum {
714 QLCNIC_MGMT_FUNC = 0, 714 QLCNIC_MGMT_FUNC = 0,
715 QLCNIC_PRIV_FUNC = 1, 715 QLCNIC_PRIV_FUNC = 1,
716 QLCNIC_NON_PRIV_FUNC = 2, 716 QLCNIC_NON_PRIV_FUNC = 2,
717 QLCNIC_UNKNOWN_FUNC_MODE = 3 717 QLCNIC_SRIOV_PF_FUNC = 3,
718 QLCNIC_SRIOV_VF_FUNC = 4,
719 QLCNIC_UNKNOWN_FUNC_MODE = 5
718}; 720};
719 721
720enum { 722enum {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index f89cc7a3fe6c..253b3ac16046 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -496,7 +496,7 @@ int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
496 return 0; 496 return 0;
497} 497}
498 498
499void qlcnic_set_multi(struct net_device *netdev) 499void __qlcnic_set_multi(struct net_device *netdev)
500{ 500{
501 struct qlcnic_adapter *adapter = netdev_priv(netdev); 501 struct qlcnic_adapter *adapter = netdev_priv(netdev);
502 struct netdev_hw_addr *ha; 502 struct netdev_hw_addr *ha;
@@ -508,7 +508,8 @@ void qlcnic_set_multi(struct net_device *netdev)
508 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) 508 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
509 return; 509 return;
510 510
511 qlcnic_nic_add_mac(adapter, adapter->mac_addr); 511 if (!qlcnic_sriov_vf_check(adapter))
512 qlcnic_nic_add_mac(adapter, adapter->mac_addr);
512 qlcnic_nic_add_mac(adapter, bcast_addr); 513 qlcnic_nic_add_mac(adapter, bcast_addr);
513 514
514 if (netdev->flags & IFF_PROMISC) { 515 if (netdev->flags & IFF_PROMISC) {
@@ -523,23 +524,55 @@ void qlcnic_set_multi(struct net_device *netdev)
523 goto send_fw_cmd; 524 goto send_fw_cmd;
524 } 525 }
525 526
526 if (!netdev_mc_empty(netdev)) { 527 if (!netdev_mc_empty(netdev) && !qlcnic_sriov_vf_check(adapter)) {
527 netdev_for_each_mc_addr(ha, netdev) { 528 netdev_for_each_mc_addr(ha, netdev) {
528 qlcnic_nic_add_mac(adapter, ha->addr); 529 qlcnic_nic_add_mac(adapter, ha->addr);
529 } 530 }
530 } 531 }
531 532
533 if (qlcnic_sriov_vf_check(adapter))
534 qlcnic_vf_add_mc_list(netdev);
535
532send_fw_cmd: 536send_fw_cmd:
533 if (mode == VPORT_MISS_MODE_ACCEPT_ALL && !adapter->fdb_mac_learn) { 537 if (!qlcnic_sriov_vf_check(adapter)) {
534 qlcnic_alloc_lb_filters_mem(adapter); 538 if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
535 adapter->drv_mac_learn = true; 539 !adapter->fdb_mac_learn) {
536 } else { 540 qlcnic_alloc_lb_filters_mem(adapter);
537 adapter->drv_mac_learn = false; 541 adapter->drv_mac_learn = true;
542 } else {
543 adapter->drv_mac_learn = false;
544 }
538 } 545 }
539 546
540 qlcnic_nic_set_promisc(adapter, mode); 547 qlcnic_nic_set_promisc(adapter, mode);
541} 548}
542 549
550void qlcnic_set_multi(struct net_device *netdev)
551{
552 struct qlcnic_adapter *adapter = netdev_priv(netdev);
553 struct netdev_hw_addr *ha;
554 struct qlcnic_mac_list_s *cur;
555
556 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
557 return;
558 if (qlcnic_sriov_vf_check(adapter)) {
559 if (!netdev_mc_empty(netdev)) {
560 netdev_for_each_mc_addr(ha, netdev) {
561 cur = kzalloc(sizeof(struct qlcnic_mac_list_s),
562 GFP_ATOMIC);
563 if (cur == NULL)
564 break;
565 memcpy(cur->mac_addr,
566 ha->addr, ETH_ALEN);
567 list_add_tail(&cur->list, &adapter->vf_mc_list);
568 }
569 }
570 qlcnic_sriov_vf_schedule_multi(adapter->netdev);
571 return;
572 }
573 __qlcnic_set_multi(netdev);
574}
575
543int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode) 576int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
544{ 577{
545 struct qlcnic_nic_req req; 578 struct qlcnic_nic_req req;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 5b8749eda11f..e862a77a626b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -83,6 +83,8 @@ enum qlcnic_regs {
83#define QLCNIC_CMD_CONFIG_PORT 0x2e 83#define QLCNIC_CMD_CONFIG_PORT 0x2e
84#define QLCNIC_CMD_TEMP_SIZE 0x2f 84#define QLCNIC_CMD_TEMP_SIZE 0x2f
85#define QLCNIC_CMD_GET_TEMP_HDR 0x30 85#define QLCNIC_CMD_GET_TEMP_HDR 0x30
86#define QLCNIC_CMD_BC_EVENT_SETUP 0x31
87#define QLCNIC_CMD_CONFIG_VPORT 0x32
86#define QLCNIC_CMD_GET_MAC_STATS 0x37 88#define QLCNIC_CMD_GET_MAC_STATS 0x37
87#define QLCNIC_CMD_SET_DRV_VER 0x38 89#define QLCNIC_CMD_SET_DRV_VER 0x38
88#define QLCNIC_CMD_CONFIGURE_RSS 0x41 90#define QLCNIC_CMD_CONFIGURE_RSS 0x41
@@ -114,6 +116,7 @@ enum qlcnic_regs {
114#define QLCNIC_SET_FAC_DEF_MAC 5 116#define QLCNIC_SET_FAC_DEF_MAC 5
115 117
116#define QLCNIC_MBX_LINK_EVENT 0x8001 118#define QLCNIC_MBX_LINK_EVENT 0x8001
119#define QLCNIC_MBX_BC_EVENT 0x8002
117#define QLCNIC_MBX_COMP_EVENT 0x8100 120#define QLCNIC_MBX_COMP_EVENT 0x8100
118#define QLCNIC_MBX_REQUEST_EVENT 0x8101 121#define QLCNIC_MBX_REQUEST_EVENT 0x8101
119#define QLCNIC_MBX_TIME_EXTEND_EVENT 0x8102 122#define QLCNIC_MBX_TIME_EXTEND_EVENT 0x8102
@@ -175,6 +178,9 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
175int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *); 178int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *);
176int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *, 179int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *,
177 struct qlcnic_host_tx_ring *tx_ring, int); 180 struct qlcnic_host_tx_ring *tx_ring, int);
181void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *);
182void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *,
183 struct qlcnic_host_tx_ring *);
178int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, __le16, u8); 184int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, __le16, u8);
179int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*); 185int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*);
180int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8); 186int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 0e630061bff3..a85ca63a2c9e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -9,6 +9,7 @@
9#include <linux/if_vlan.h> 9#include <linux/if_vlan.h>
10#include <net/ip.h> 10#include <net/ip.h>
11#include <linux/ipv6.h> 11#include <linux/ipv6.h>
12#include <net/checksum.h>
12 13
13#include "qlcnic.h" 14#include "qlcnic.h"
14 15
@@ -146,7 +147,10 @@ static inline u8 qlcnic_mac_hash(u64 mac)
146static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter, 147static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
147 u16 handle, u8 ring_id) 148 u16 handle, u8 ring_id)
148{ 149{
149 if (adapter->pdev->device == PCI_DEVICE_ID_QLOGIC_QLE834X) 150 unsigned short device = adapter->pdev->device;
151
152 if ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
153 (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X))
150 return handle | (ring_id << 15); 154 return handle | (ring_id << 15);
151 else 155 else
152 return handle; 156 return handle;
@@ -1132,9 +1136,8 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
1132 iph = (struct iphdr *)skb->data; 1136 iph = (struct iphdr *)skb->data;
1133 th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); 1137 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1134 length = (iph->ihl << 2) + (th->doff << 2) + lro_length; 1138 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1139 csum_replace2(&iph->check, iph->tot_len, htons(length));
1135 iph->tot_len = htons(length); 1140 iph->tot_len = htons(length);
1136 iph->check = 0;
1137 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1138 } 1141 }
1139 1142
1140 th->psh = push; 1143 th->psh = push;
@@ -1595,9 +1598,8 @@ qlcnic_83xx_process_lro(struct qlcnic_adapter *adapter,
1595 iph = (struct iphdr *)skb->data; 1598 iph = (struct iphdr *)skb->data;
1596 th = (struct tcphdr *)(skb->data + (iph->ihl << 2)); 1599 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1597 length = (iph->ihl << 2) + (th->doff << 2) + lro_length; 1600 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1601 csum_replace2(&iph->check, iph->tot_len, htons(length));
1598 iph->tot_len = htons(length); 1602 iph->tot_len = htons(length);
1599 iph->check = 0;
1600 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1601 } 1603 }
1602 1604
1603 th->psh = push; 1605 th->psh = push;
@@ -1692,6 +1694,29 @@ skip:
1692 return count; 1694 return count;
1693} 1695}
1694 1696
1697static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
1698{
1699 int tx_complete;
1700 int work_done;
1701 struct qlcnic_host_sds_ring *sds_ring;
1702 struct qlcnic_adapter *adapter;
1703 struct qlcnic_host_tx_ring *tx_ring;
1704
1705 sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
1706 adapter = sds_ring->adapter;
1707 /* tx ring count = 1 */
1708 tx_ring = adapter->tx_ring;
1709
1710 tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
1711 work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
1712 if ((work_done < budget) && tx_complete) {
1713 napi_complete(&sds_ring->napi);
1714 qlcnic_83xx_enable_intr(adapter, sds_ring);
1715 }
1716
1717 return work_done;
1718}
1719
1695static int qlcnic_83xx_poll(struct napi_struct *napi, int budget) 1720static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
1696{ 1721{
1697 int tx_complete; 1722 int tx_complete;
@@ -1769,7 +1794,8 @@ void qlcnic_83xx_napi_enable(struct qlcnic_adapter *adapter)
1769 qlcnic_83xx_enable_intr(adapter, sds_ring); 1794 qlcnic_83xx_enable_intr(adapter, sds_ring);
1770 } 1795 }
1771 1796
1772 if (adapter->flags & QLCNIC_MSIX_ENABLED) { 1797 if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
1798 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
1773 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { 1799 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1774 tx_ring = &adapter->tx_ring[ring]; 1800 tx_ring = &adapter->tx_ring[ring];
1775 napi_enable(&tx_ring->napi); 1801 napi_enable(&tx_ring->napi);
@@ -1796,7 +1822,8 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
1796 napi_disable(&sds_ring->napi); 1822 napi_disable(&sds_ring->napi);
1797 } 1823 }
1798 1824
1799 if (adapter->flags & QLCNIC_MSIX_ENABLED) { 1825 if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
1826 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
1800 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { 1827 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1801 tx_ring = &adapter->tx_ring[ring]; 1828 tx_ring = &adapter->tx_ring[ring];
1802 qlcnic_83xx_disable_tx_intr(adapter, tx_ring); 1829 qlcnic_83xx_disable_tx_intr(adapter, tx_ring);
@@ -1809,7 +1836,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
1809int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter, 1836int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
1810 struct net_device *netdev) 1837 struct net_device *netdev)
1811{ 1838{
1812 int ring, max_sds_rings; 1839 int ring, max_sds_rings, temp;
1813 struct qlcnic_host_sds_ring *sds_ring; 1840 struct qlcnic_host_sds_ring *sds_ring;
1814 struct qlcnic_host_tx_ring *tx_ring; 1841 struct qlcnic_host_tx_ring *tx_ring;
1815 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx; 1842 struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
@@ -1820,14 +1847,23 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
1820 max_sds_rings = adapter->max_sds_rings; 1847 max_sds_rings = adapter->max_sds_rings;
1821 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 1848 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1822 sds_ring = &recv_ctx->sds_rings[ring]; 1849 sds_ring = &recv_ctx->sds_rings[ring];
1823 if (adapter->flags & QLCNIC_MSIX_ENABLED) 1850 if (adapter->flags & QLCNIC_MSIX_ENABLED) {
1824 netif_napi_add(netdev, &sds_ring->napi, 1851 if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
1825 qlcnic_83xx_rx_poll, 1852 netif_napi_add(netdev, &sds_ring->napi,
1826 QLCNIC_NETDEV_WEIGHT * 2); 1853 qlcnic_83xx_rx_poll,
1827 else 1854 QLCNIC_NETDEV_WEIGHT * 2);
1855 } else {
1856 temp = QLCNIC_NETDEV_WEIGHT / max_sds_rings;
1857 netif_napi_add(netdev, &sds_ring->napi,
1858 qlcnic_83xx_msix_sriov_vf_poll,
1859 temp);
1860 }
1861
1862 } else {
1828 netif_napi_add(netdev, &sds_ring->napi, 1863 netif_napi_add(netdev, &sds_ring->napi,
1829 qlcnic_83xx_poll, 1864 qlcnic_83xx_poll,
1830 QLCNIC_NETDEV_WEIGHT / max_sds_rings); 1865 QLCNIC_NETDEV_WEIGHT / max_sds_rings);
1866 }
1831 } 1867 }
1832 1868
1833 if (qlcnic_alloc_tx_rings(adapter, netdev)) { 1869 if (qlcnic_alloc_tx_rings(adapter, netdev)) {
@@ -1835,7 +1871,8 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
1835 return -ENOMEM; 1871 return -ENOMEM;
1836 } 1872 }
1837 1873
1838 if (adapter->flags & QLCNIC_MSIX_ENABLED) { 1874 if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
1875 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
1839 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { 1876 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1840 tx_ring = &adapter->tx_ring[ring]; 1877 tx_ring = &adapter->tx_ring[ring];
1841 netif_napi_add(netdev, &tx_ring->napi, 1878 netif_napi_add(netdev, &tx_ring->napi,
@@ -1861,7 +1898,8 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *adapter)
1861 1898
1862 qlcnic_free_sds_rings(adapter->recv_ctx); 1899 qlcnic_free_sds_rings(adapter->recv_ctx);
1863 1900
1864 if ((adapter->flags & QLCNIC_MSIX_ENABLED)) { 1901 if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
1902 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
1865 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) { 1903 for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
1866 tx_ring = &adapter->tx_ring[ring]; 1904 tx_ring = &adapter->tx_ring[ring];
1867 netif_napi_del(&tx_ring->napi); 1905 netif_napi_del(&tx_ring->napi);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 28a6d4838364..0d00b2bd2c81 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -9,6 +9,7 @@
9#include <linux/interrupt.h> 9#include <linux/interrupt.h>
10 10
11#include "qlcnic.h" 11#include "qlcnic.h"
12#include "qlcnic_sriov.h"
12#include "qlcnic_hw.h" 13#include "qlcnic_hw.h"
13 14
14#include <linux/swab.h> 15#include <linux/swab.h>
@@ -109,6 +110,7 @@ static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
109static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = { 110static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
110 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X), 111 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
111 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X), 112 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
113 ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X),
112 {0,} 114 {0,}
113}; 115};
114 116
@@ -198,8 +200,7 @@ void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
198 recv_ctx->sds_rings = NULL; 200 recv_ctx->sds_rings = NULL;
199} 201}
200 202
201static int 203int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
202qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
203{ 204{
204 u8 mac_addr[ETH_ALEN]; 205 u8 mac_addr[ETH_ALEN];
205 struct net_device *netdev = adapter->netdev; 206 struct net_device *netdev = adapter->netdev;
@@ -225,6 +226,9 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
225 struct qlcnic_adapter *adapter = netdev_priv(netdev); 226 struct qlcnic_adapter *adapter = netdev_priv(netdev);
226 struct sockaddr *addr = p; 227 struct sockaddr *addr = p;
227 228
229 if (qlcnic_sriov_vf_check(adapter))
230 return -EINVAL;
231
228 if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED)) 232 if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
229 return -EOPNOTSUPP; 233 return -EOPNOTSUPP;
230 234
@@ -253,11 +257,8 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
253 struct qlcnic_adapter *adapter = netdev_priv(netdev); 257 struct qlcnic_adapter *adapter = netdev_priv(netdev);
254 int err = -EOPNOTSUPP; 258 int err = -EOPNOTSUPP;
255 259
256 if (!adapter->fdb_mac_learn) { 260 if (!adapter->fdb_mac_learn)
257 pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n", 261 return ndo_dflt_fdb_del(ndm, tb, netdev, addr);
258 __func__);
259 return err;
260 }
261 262
262 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) { 263 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
263 if (is_unicast_ether_addr(addr)) 264 if (is_unicast_ether_addr(addr))
@@ -277,11 +278,8 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
277 struct qlcnic_adapter *adapter = netdev_priv(netdev); 278 struct qlcnic_adapter *adapter = netdev_priv(netdev);
278 int err = 0; 279 int err = 0;
279 280
280 if (!adapter->fdb_mac_learn) { 281 if (!adapter->fdb_mac_learn)
281 pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n", 282 return ndo_dflt_fdb_add(ndm, tb, netdev, addr, flags);
282 __func__);
283 return -EOPNOTSUPP;
284 }
285 283
286 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) { 284 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
287 pr_info("%s: FDB e-switch is not enabled\n", __func__); 285 pr_info("%s: FDB e-switch is not enabled\n", __func__);
@@ -306,11 +304,8 @@ static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
306{ 304{
307 struct qlcnic_adapter *adapter = netdev_priv(netdev); 305 struct qlcnic_adapter *adapter = netdev_priv(netdev);
308 306
309 if (!adapter->fdb_mac_learn) { 307 if (!adapter->fdb_mac_learn)
310 pr_info("%s: Driver mac learn is enabled, FDB operation not allowed\n", 308 return ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
311 __func__);
312 return -EOPNOTSUPP;
313 }
314 309
315 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) 310 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
316 idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx); 311 idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
@@ -387,6 +382,8 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
387 .process_lb_rcv_ring_diag = qlcnic_82xx_process_rcv_ring_diag, 382 .process_lb_rcv_ring_diag = qlcnic_82xx_process_rcv_ring_diag,
388 .create_rx_ctx = qlcnic_82xx_fw_cmd_create_rx_ctx, 383 .create_rx_ctx = qlcnic_82xx_fw_cmd_create_rx_ctx,
389 .create_tx_ctx = qlcnic_82xx_fw_cmd_create_tx_ctx, 384 .create_tx_ctx = qlcnic_82xx_fw_cmd_create_tx_ctx,
385 .del_rx_ctx = qlcnic_82xx_fw_cmd_del_rx_ctx,
386 .del_tx_ctx = qlcnic_82xx_fw_cmd_del_tx_ctx,
390 .setup_link_event = qlcnic_82xx_linkevent_request, 387 .setup_link_event = qlcnic_82xx_linkevent_request,
391 .get_nic_info = qlcnic_82xx_get_nic_info, 388 .get_nic_info = qlcnic_82xx_get_nic_info,
392 .get_pci_info = qlcnic_82xx_get_pci_info, 389 .get_pci_info = qlcnic_82xx_get_pci_info,
@@ -408,7 +405,15 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
408{ 405{
409 struct pci_dev *pdev = adapter->pdev; 406 struct pci_dev *pdev = adapter->pdev;
410 int err = -1, i; 407 int err = -1, i;
411 int max_tx_rings; 408 int max_tx_rings, tx_vector;
409
410 if (adapter->flags & QLCNIC_TX_INTR_SHARED) {
411 max_tx_rings = 0;
412 tx_vector = 0;
413 } else {
414 max_tx_rings = adapter->max_drv_tx_rings;
415 tx_vector = 1;
416 }
412 417
413 if (!adapter->msix_entries) { 418 if (!adapter->msix_entries) {
414 adapter->msix_entries = kcalloc(num_msix, 419 adapter->msix_entries = kcalloc(num_msix,
@@ -431,7 +436,6 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
431 if (qlcnic_83xx_check(adapter)) { 436 if (qlcnic_83xx_check(adapter)) {
432 adapter->ahw->num_msix = num_msix; 437 adapter->ahw->num_msix = num_msix;
433 /* subtract mail box and tx ring vectors */ 438 /* subtract mail box and tx ring vectors */
434 max_tx_rings = adapter->max_drv_tx_rings;
435 adapter->max_sds_rings = num_msix - 439 adapter->max_sds_rings = num_msix -
436 max_tx_rings - 1; 440 max_tx_rings - 1;
437 } else { 441 } else {
@@ -444,11 +448,11 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
444 "Unable to allocate %d MSI-X interrupt vectors\n", 448 "Unable to allocate %d MSI-X interrupt vectors\n",
445 num_msix); 449 num_msix);
446 if (qlcnic_83xx_check(adapter)) { 450 if (qlcnic_83xx_check(adapter)) {
447 if (err < QLC_83XX_MINIMUM_VECTOR) 451 if (err < (QLC_83XX_MINIMUM_VECTOR - tx_vector))
448 return err; 452 return err;
449 err -= (adapter->max_drv_tx_rings + 1); 453 err -= (max_tx_rings + 1);
450 num_msix = rounddown_pow_of_two(err); 454 num_msix = rounddown_pow_of_two(err);
451 num_msix += (adapter->max_drv_tx_rings + 1); 455 num_msix += (max_tx_rings + 1);
452 } else { 456 } else {
453 num_msix = rounddown_pow_of_two(err); 457 num_msix = rounddown_pow_of_two(err);
454 } 458 }
@@ -542,11 +546,10 @@ void qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
542 } 546 }
543} 547}
544 548
545static void 549static void qlcnic_cleanup_pci_map(struct qlcnic_hardware_context *ahw)
546qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
547{ 550{
548 if (adapter->ahw->pci_base0 != NULL) 551 if (ahw->pci_base0 != NULL)
549 iounmap(adapter->ahw->pci_base0); 552 iounmap(ahw->pci_base0);
550} 553}
551 554
552static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter) 555static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter)
@@ -721,6 +724,7 @@ static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
721 *bar = QLCNIC_82XX_BAR0_LENGTH; 724 *bar = QLCNIC_82XX_BAR0_LENGTH;
722 break; 725 break;
723 case PCI_DEVICE_ID_QLOGIC_QLE834X: 726 case PCI_DEVICE_ID_QLOGIC_QLE834X:
727 case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
724 *bar = QLCNIC_83XX_BAR0_LENGTH; 728 *bar = QLCNIC_83XX_BAR0_LENGTH;
725 break; 729 break;
726 default: 730 default:
@@ -751,7 +755,7 @@ static int qlcnic_setup_pci_map(struct pci_dev *pdev,
751 return -EIO; 755 return -EIO;
752 } 756 }
753 757
754 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20)); 758 dev_info(&pdev->dev, "%dKB memory map\n", (int)(mem_len >> 10));
755 759
756 ahw->pci_base0 = mem_ptr0; 760 ahw->pci_base0 = mem_ptr0;
757 ahw->pci_len0 = pci_len0; 761 ahw->pci_len0 = pci_len0;
@@ -1292,7 +1296,8 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
1292 } 1296 }
1293 } 1297 }
1294 if (qlcnic_83xx_check(adapter) && 1298 if (qlcnic_83xx_check(adapter) &&
1295 (adapter->flags & QLCNIC_MSIX_ENABLED)) { 1299 (adapter->flags & QLCNIC_MSIX_ENABLED) &&
1300 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
1296 handler = qlcnic_msix_tx_intr; 1301 handler = qlcnic_msix_tx_intr;
1297 for (ring = 0; ring < adapter->max_drv_tx_rings; 1302 for (ring = 0; ring < adapter->max_drv_tx_rings;
1298 ring++) { 1303 ring++) {
@@ -1328,7 +1333,8 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
1328 free_irq(sds_ring->irq, sds_ring); 1333 free_irq(sds_ring->irq, sds_ring);
1329 } 1334 }
1330 } 1335 }
1331 if (qlcnic_83xx_check(adapter)) { 1336 if (qlcnic_83xx_check(adapter) &&
1337 !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
1332 for (ring = 0; ring < adapter->max_drv_tx_rings; 1338 for (ring = 0; ring < adapter->max_drv_tx_rings;
1333 ring++) { 1339 ring++) {
1334 tx_ring = &adapter->tx_ring[ring]; 1340 tx_ring = &adapter->tx_ring[ring];
@@ -1418,9 +1424,12 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1418 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state)) 1424 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1419 return; 1425 return;
1420 1426
1427 if (qlcnic_sriov_vf_check(adapter))
1428 qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
1421 smp_mb(); 1429 smp_mb();
1422 spin_lock(&adapter->tx_clean_lock); 1430 spin_lock(&adapter->tx_clean_lock);
1423 netif_carrier_off(netdev); 1431 netif_carrier_off(netdev);
1432 adapter->ahw->linkup = 0;
1424 netif_tx_disable(netdev); 1433 netif_tx_disable(netdev);
1425 1434
1426 qlcnic_free_mac_list(adapter); 1435 qlcnic_free_mac_list(adapter);
@@ -1685,7 +1694,7 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter)
1685 return err; 1694 return err;
1686} 1695}
1687 1696
1688static int 1697int
1689qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, 1698qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
1690 int pci_using_dac) 1699 int pci_using_dac)
1691{ 1700{
@@ -1820,6 +1829,9 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1820 u32 capab2; 1829 u32 capab2;
1821 char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */ 1830 char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
1822 1831
1832 if (pdev->is_virtfn)
1833 return -ENODEV;
1834
1823 err = pci_enable_device(pdev); 1835 err = pci_enable_device(pdev);
1824 if (err) 1836 if (err)
1825 return err; 1837 return err;
@@ -1844,12 +1856,18 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1844 if (!ahw) 1856 if (!ahw)
1845 goto err_out_free_res; 1857 goto err_out_free_res;
1846 1858
1847 if (ent->device == PCI_DEVICE_ID_QLOGIC_QLE824X) { 1859 switch (ent->device) {
1860 case PCI_DEVICE_ID_QLOGIC_QLE824X:
1848 ahw->hw_ops = &qlcnic_hw_ops; 1861 ahw->hw_ops = &qlcnic_hw_ops;
1849 ahw->reg_tbl = (u32 *)qlcnic_reg_tbl; 1862 ahw->reg_tbl = (u32 *) qlcnic_reg_tbl;
1850 } else if (ent->device == PCI_DEVICE_ID_QLOGIC_QLE834X) { 1863 break;
1864 case PCI_DEVICE_ID_QLOGIC_QLE834X:
1851 qlcnic_83xx_register_map(ahw); 1865 qlcnic_83xx_register_map(ahw);
1852 } else { 1866 break;
1867 case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
1868 qlcnic_sriov_vf_register_map(ahw);
1869 break;
1870 default:
1853 goto err_out_free_hw_res; 1871 goto err_out_free_hw_res;
1854 } 1872 }
1855 1873
@@ -1911,11 +1929,13 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1911 } else if (qlcnic_83xx_check(adapter)) { 1929 } else if (qlcnic_83xx_check(adapter)) {
1912 qlcnic_83xx_check_vf(adapter, ent); 1930 qlcnic_83xx_check_vf(adapter, ent);
1913 adapter->portnum = adapter->ahw->pci_func; 1931 adapter->portnum = adapter->ahw->pci_func;
1914 err = qlcnic_83xx_init(adapter); 1932 err = qlcnic_83xx_init(adapter, pci_using_dac);
1915 if (err) { 1933 if (err) {
1916 dev_err(&pdev->dev, "%s: failed\n", __func__); 1934 dev_err(&pdev->dev, "%s: failed\n", __func__);
1917 goto err_out_free_hw; 1935 goto err_out_free_hw;
1918 } 1936 }
1937 if (qlcnic_sriov_vf_check(adapter))
1938 return 0;
1919 } else { 1939 } else {
1920 dev_err(&pdev->dev, 1940 dev_err(&pdev->dev,
1921 "%s: failed. Please Reboot\n", __func__); 1941 "%s: failed. Please Reboot\n", __func__);
@@ -1932,6 +1952,12 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1932 module_name(THIS_MODULE), 1952 module_name(THIS_MODULE),
1933 board_name, adapter->ahw->revision_id); 1953 board_name, adapter->ahw->revision_id);
1934 } 1954 }
1955
1956 if (qlcnic_83xx_check(adapter) && !qlcnic_use_msi_x &&
1957 !!qlcnic_use_msi)
1958 dev_warn(&pdev->dev,
1959 "83xx adapter do not support MSI interrupts\n");
1960
1935 err = qlcnic_setup_intr(adapter, 0); 1961 err = qlcnic_setup_intr(adapter, 0);
1936 if (err) { 1962 if (err) {
1937 dev_err(&pdev->dev, "Failed to setup interrupt\n"); 1963 dev_err(&pdev->dev, "Failed to setup interrupt\n");
@@ -1999,7 +2025,7 @@ err_out_free_netdev:
1999 free_netdev(netdev); 2025 free_netdev(netdev);
2000 2026
2001err_out_iounmap: 2027err_out_iounmap:
2002 qlcnic_cleanup_pci_map(adapter); 2028 qlcnic_cleanup_pci_map(ahw);
2003 2029
2004err_out_free_hw_res: 2030err_out_free_hw_res:
2005 kfree(ahw); 2031 kfree(ahw);
@@ -2024,11 +2050,13 @@ static void qlcnic_remove(struct pci_dev *pdev)
2024 return; 2050 return;
2025 2051
2026 netdev = adapter->netdev; 2052 netdev = adapter->netdev;
2053 qlcnic_sriov_pf_disable(adapter);
2027 2054
2028 qlcnic_cancel_idc_work(adapter); 2055 qlcnic_cancel_idc_work(adapter);
2029 ahw = adapter->ahw; 2056 ahw = adapter->ahw;
2030 2057
2031 unregister_netdev(netdev); 2058 unregister_netdev(netdev);
2059 qlcnic_sriov_cleanup(adapter);
2032 2060
2033 if (qlcnic_83xx_check(adapter)) { 2061 if (qlcnic_83xx_check(adapter)) {
2034 qlcnic_83xx_free_mbx_intr(adapter); 2062 qlcnic_83xx_free_mbx_intr(adapter);
@@ -2054,7 +2082,7 @@ static void qlcnic_remove(struct pci_dev *pdev)
2054 2082
2055 qlcnic_remove_sysfs(adapter); 2083 qlcnic_remove_sysfs(adapter);
2056 2084
2057 qlcnic_cleanup_pci_map(adapter); 2085 qlcnic_cleanup_pci_map(adapter->ahw);
2058 2086
2059 qlcnic_release_firmware(adapter); 2087 qlcnic_release_firmware(adapter);
2060 2088
@@ -3432,7 +3460,10 @@ static struct pci_driver qlcnic_driver = {
3432 .resume = qlcnic_resume, 3460 .resume = qlcnic_resume,
3433#endif 3461#endif
3434 .shutdown = qlcnic_shutdown, 3462 .shutdown = qlcnic_shutdown,
3435 .err_handler = &qlcnic_err_handler 3463 .err_handler = &qlcnic_err_handler,
3464#ifdef CONFIG_QLCNIC_SRIOV
3465 .sriov_configure = qlcnic_pci_sriov_configure,
3466#endif
3436 3467
3437}; 3468};
3438 3469
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index abbd22c814a6..4b9bab18ebd9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -810,11 +810,8 @@ static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter,
810 810
811 tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size, 811 tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
812 &tmp_addr_t, GFP_KERNEL); 812 &tmp_addr_t, GFP_KERNEL);
813 if (!tmp_addr) { 813 if (!tmp_addr)
814 dev_err(&adapter->pdev->dev,
815 "Can't get memory for FW dump template\n");
816 return -ENOMEM; 814 return -ENOMEM;
817 }
818 815
819 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) { 816 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
820 err = -ENOMEM; 817 err = -ENOMEM;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
new file mode 100644
index 000000000000..b476ebac2439
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -0,0 +1,214 @@
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#ifndef _QLCNIC_83XX_SRIOV_H_
9#define _QLCNIC_83XX_SRIOV_H_
10
11#include "qlcnic.h"
12#include <linux/types.h>
13#include <linux/pci.h>
14
15extern const u32 qlcnic_83xx_reg_tbl[];
16extern const u32 qlcnic_83xx_ext_reg_tbl[];
17
18struct qlcnic_bc_payload {
19 u64 payload[126];
20};
21
22struct qlcnic_bc_hdr {
23#if defined(__LITTLE_ENDIAN)
24 u8 version;
25 u8 msg_type:4;
26 u8 rsvd1:3;
27 u8 op_type:1;
28 u8 num_cmds;
29 u8 num_frags;
30 u8 frag_num;
31 u8 cmd_op;
32 u16 seq_id;
33 u64 rsvd3;
34#elif defined(__BIG_ENDIAN)
35 u8 num_frags;
36 u8 num_cmds;
37 u8 op_type:1;
38 u8 rsvd1:3;
39 u8 msg_type:4;
40 u8 version;
41 u16 seq_id;
42 u8 cmd_op;
43 u8 frag_num;
44 u64 rsvd3;
45#endif
46};
47
48enum qlcnic_bc_commands {
49 QLCNIC_BC_CMD_CHANNEL_INIT = 0x0,
50 QLCNIC_BC_CMD_CHANNEL_TERM = 0x1,
51};
52
53#define QLC_BC_CMD 1
54
55struct qlcnic_trans_list {
56 /* Lock for manipulating list */
57 spinlock_t lock;
58 struct list_head wait_list;
59 int count;
60};
61
62enum qlcnic_trans_state {
63 QLC_INIT = 0,
64 QLC_WAIT_FOR_CHANNEL_FREE,
65 QLC_WAIT_FOR_RESP,
66 QLC_ABORT,
67 QLC_END,
68};
69
70struct qlcnic_bc_trans {
71 u8 func_id;
72 u8 active;
73 u8 curr_rsp_frag;
74 u8 curr_req_frag;
75 u16 cmd_id;
76 u16 req_pay_size;
77 u16 rsp_pay_size;
78 u32 trans_id;
79 enum qlcnic_trans_state trans_state;
80 struct list_head list;
81 struct qlcnic_bc_hdr *req_hdr;
82 struct qlcnic_bc_hdr *rsp_hdr;
83 struct qlcnic_bc_payload *req_pay;
84 struct qlcnic_bc_payload *rsp_pay;
85 struct completion resp_cmpl;
86 struct qlcnic_vf_info *vf;
87};
88
89enum qlcnic_vf_state {
90 QLC_BC_VF_SEND = 0,
91 QLC_BC_VF_RECV,
92 QLC_BC_VF_CHANNEL,
93 QLC_BC_VF_STATE,
94};
95
96struct qlcnic_resources {
97 u16 num_tx_mac_filters;
98 u16 num_rx_ucast_mac_filters;
99 u16 num_rx_mcast_mac_filters;
100
101 u16 num_txvlan_keys;
102
103 u16 num_rx_queues;
104 u16 num_tx_queues;
105
106 u16 num_rx_buf_rings;
107 u16 num_rx_status_rings;
108
109 u16 num_destip;
110 u32 num_lro_flows_supported;
111 u16 max_local_ipv6_addrs;
112 u16 max_remote_ipv6_addrs;
113};
114
115struct qlcnic_vport {
116 u16 handle;
117 u8 mac[6];
118};
119
120struct qlcnic_vf_info {
121 u8 pci_func;
122 u16 rx_ctx_id;
123 u16 tx_ctx_id;
124 unsigned long state;
125 struct completion ch_free_cmpl;
126 struct work_struct trans_work;
127 /* It synchronizes commands sent from VF */
128 struct mutex send_cmd_lock;
129 struct qlcnic_bc_trans *send_cmd;
130 struct qlcnic_trans_list rcv_act;
131 struct qlcnic_trans_list rcv_pend;
132 struct qlcnic_adapter *adapter;
133 struct qlcnic_vport *vp;
134};
135
136struct qlcnic_async_work_list {
137 struct list_head list;
138 struct work_struct work;
139 void *ptr;
140};
141
142struct qlcnic_back_channel {
143 u16 trans_counter;
144 struct workqueue_struct *bc_trans_wq;
145 struct workqueue_struct *bc_async_wq;
146 struct list_head async_list;
147};
148
149struct qlcnic_sriov {
150 u16 vp_handle;
151 u8 num_vfs;
152 struct qlcnic_resources ff_max;
153 struct qlcnic_back_channel bc;
154 struct qlcnic_vf_info *vf_info;
155};
156
157int qlcnic_sriov_init(struct qlcnic_adapter *, int);
158void qlcnic_sriov_cleanup(struct qlcnic_adapter *);
159void __qlcnic_sriov_cleanup(struct qlcnic_adapter *);
160void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *);
161int qlcnic_sriov_vf_init(struct qlcnic_adapter *, int);
162void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *);
163int qlcnic_sriov_func_to_index(struct qlcnic_adapter *, u8);
164int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
165void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *, u32);
166int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *, u8);
167void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *);
168
169static inline bool qlcnic_sriov_enable_check(struct qlcnic_adapter *adapter)
170{
171 return test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state) ? true : false;
172}
173
174#ifdef CONFIG_QLCNIC_SRIOV
175void qlcnic_sriov_pf_process_bc_cmd(struct qlcnic_adapter *,
176 struct qlcnic_bc_trans *,
177 struct qlcnic_cmd_args *);
178void qlcnic_sriov_pf_disable(struct qlcnic_adapter *);
179void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *);
180int qlcnic_pci_sriov_configure(struct pci_dev *, int);
181void qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *, u32 *);
182void qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *, u32 *);
183void qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *, u32 *);
184void qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *, u32 *);
185void qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *, u32 *);
186void qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *, u32 *);
187void qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *, u32 *);
188#else
189static inline void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter) {}
190static inline void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter) {}
191static inline void
192qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *adapter,
193 u32 *int_id) {}
194static inline void
195qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *adapter,
196 u32 *int_id) {}
197static inline void
198qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *adapter,
199 u32 *int_id) {}
200static inline void
201qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *adapter,
202 u32 *int_id) {}
203static inline void
204qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *adapter, u32 *int_id)
205{}
206static inline void
207qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *adapter, u32 *int_id)
208{}
209static inline void
210qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *adapter, u32 *int_id)
211{}
212#endif
213
214#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
new file mode 100644
index 000000000000..14e9ebd3b73a
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -0,0 +1,1297 @@
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#include "qlcnic_sriov.h"
9#include "qlcnic.h"
10#include "qlcnic_83xx_hw.h"
11#include <linux/types.h>
12
13#define QLC_BC_COMMAND 0
14#define QLC_BC_RESPONSE 1
15
16#define QLC_MBOX_RESP_TIMEOUT (10 * HZ)
17#define QLC_MBOX_CH_FREE_TIMEOUT (10 * HZ)
18
19#define QLC_BC_MSG 0
20#define QLC_BC_CFREE 1
21#define QLC_BC_HDR_SZ 16
22#define QLC_BC_PAYLOAD_SZ (1024 - QLC_BC_HDR_SZ)
23
24#define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF 2048
25#define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF 512
26
27static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *,
28 struct qlcnic_cmd_args *);
29
30static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
31 .read_crb = qlcnic_83xx_read_crb,
32 .write_crb = qlcnic_83xx_write_crb,
33 .read_reg = qlcnic_83xx_rd_reg_indirect,
34 .write_reg = qlcnic_83xx_wrt_reg_indirect,
35 .get_mac_address = qlcnic_83xx_get_mac_address,
36 .setup_intr = qlcnic_83xx_setup_intr,
37 .alloc_mbx_args = qlcnic_83xx_alloc_mbx_args,
38 .mbx_cmd = qlcnic_sriov_vf_mbx_op,
39 .get_func_no = qlcnic_83xx_get_func_no,
40 .api_lock = qlcnic_83xx_cam_lock,
41 .api_unlock = qlcnic_83xx_cam_unlock,
42 .process_lb_rcv_ring_diag = qlcnic_83xx_process_rcv_ring_diag,
43 .create_rx_ctx = qlcnic_83xx_create_rx_ctx,
44 .create_tx_ctx = qlcnic_83xx_create_tx_ctx,
45 .del_rx_ctx = qlcnic_83xx_del_rx_ctx,
46 .del_tx_ctx = qlcnic_83xx_del_tx_ctx,
47 .setup_link_event = qlcnic_83xx_setup_link_event,
48 .get_nic_info = qlcnic_83xx_get_nic_info,
49 .get_pci_info = qlcnic_83xx_get_pci_info,
50 .set_nic_info = qlcnic_83xx_set_nic_info,
51 .change_macvlan = qlcnic_83xx_sre_macaddr_change,
52 .napi_enable = qlcnic_83xx_napi_enable,
53 .napi_disable = qlcnic_83xx_napi_disable,
54 .config_intr_coal = qlcnic_83xx_config_intr_coal,
55 .config_rss = qlcnic_83xx_config_rss,
56 .config_hw_lro = qlcnic_83xx_config_hw_lro,
57 .config_promisc_mode = qlcnic_83xx_nic_set_promisc,
58 .change_l2_filter = qlcnic_83xx_change_l2_filter,
59 .get_board_info = qlcnic_83xx_get_port_info,
60};
61
62static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
63 .config_bridged_mode = qlcnic_config_bridged_mode,
64 .config_led = qlcnic_config_led,
65 .cancel_idc_work = qlcnic_83xx_idc_exit,
66 .napi_add = qlcnic_83xx_napi_add,
67 .napi_del = qlcnic_83xx_napi_del,
68 .config_ipaddr = qlcnic_83xx_config_ipaddr,
69 .clear_legacy_intr = qlcnic_83xx_clear_legacy_intr,
70};
71
72static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = {
73 {QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2},
74 {QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2},
75};
76
77static inline bool qlcnic_sriov_bc_msg_check(u32 val)
78{
79 return (val & (1 << QLC_BC_MSG)) ? true : false;
80}
81
82static inline bool qlcnic_sriov_channel_free_check(u32 val)
83{
84 return (val & (1 << QLC_BC_CFREE)) ? true : false;
85}
86
87static inline u8 qlcnic_sriov_target_func_id(u32 val)
88{
89 return (val >> 4) & 0xff;
90}
91
92static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id)
93{
94 struct pci_dev *dev = adapter->pdev;
95 int pos;
96 u16 stride, offset;
97
98 if (qlcnic_sriov_vf_check(adapter))
99 return 0;
100
101 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
102 pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
103 pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
104
105 return (dev->devfn + offset + stride * vf_id) & 0xff;
106}
107
108int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
109{
110 struct qlcnic_sriov *sriov;
111 struct qlcnic_back_channel *bc;
112 struct workqueue_struct *wq;
113 struct qlcnic_vport *vp;
114 struct qlcnic_vf_info *vf;
115 int err, i;
116
117 if (!qlcnic_sriov_enable_check(adapter))
118 return -EIO;
119
120 sriov = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL);
121 if (!sriov)
122 return -ENOMEM;
123
124 adapter->ahw->sriov = sriov;
125 sriov->num_vfs = num_vfs;
126 bc = &sriov->bc;
127 sriov->vf_info = kzalloc(sizeof(struct qlcnic_vf_info) *
128 num_vfs, GFP_KERNEL);
129 if (!sriov->vf_info) {
130 err = -ENOMEM;
131 goto qlcnic_free_sriov;
132 }
133
134 wq = create_singlethread_workqueue("bc-trans");
135 if (wq == NULL) {
136 err = -ENOMEM;
137 dev_err(&adapter->pdev->dev,
138 "Cannot create bc-trans workqueue\n");
139 goto qlcnic_free_vf_info;
140 }
141
142 bc->bc_trans_wq = wq;
143
144 wq = create_singlethread_workqueue("async");
145 if (wq == NULL) {
146 err = -ENOMEM;
147 dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n");
148 goto qlcnic_destroy_trans_wq;
149 }
150
151 bc->bc_async_wq = wq;
152 INIT_LIST_HEAD(&bc->async_list);
153
154 for (i = 0; i < num_vfs; i++) {
155 vf = &sriov->vf_info[i];
156 vf->adapter = adapter;
157 vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
158 mutex_init(&vf->send_cmd_lock);
159 INIT_LIST_HEAD(&vf->rcv_act.wait_list);
160 INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
161 spin_lock_init(&vf->rcv_act.lock);
162 spin_lock_init(&vf->rcv_pend.lock);
163 init_completion(&vf->ch_free_cmpl);
164
165 if (qlcnic_sriov_pf_check(adapter)) {
166 vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL);
167 if (!vp) {
168 err = -ENOMEM;
169 goto qlcnic_destroy_async_wq;
170 }
171 sriov->vf_info[i].vp = vp;
172 random_ether_addr(vp->mac);
173 dev_info(&adapter->pdev->dev,
174 "MAC Address %pM is configured for VF %d\n",
175 vp->mac, i);
176 }
177 }
178
179 return 0;
180
181qlcnic_destroy_async_wq:
182 destroy_workqueue(bc->bc_async_wq);
183
184qlcnic_destroy_trans_wq:
185 destroy_workqueue(bc->bc_trans_wq);
186
187qlcnic_free_vf_info:
188 kfree(sriov->vf_info);
189
190qlcnic_free_sriov:
191 kfree(adapter->ahw->sriov);
192 return err;
193}
194
195void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
196{
197 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
198 struct qlcnic_back_channel *bc = &sriov->bc;
199 int i;
200
201 if (!qlcnic_sriov_enable_check(adapter))
202 return;
203
204 qlcnic_sriov_cleanup_async_list(bc);
205 destroy_workqueue(bc->bc_async_wq);
206 destroy_workqueue(bc->bc_trans_wq);
207
208 for (i = 0; i < sriov->num_vfs; i++)
209 kfree(sriov->vf_info[i].vp);
210
211 kfree(sriov->vf_info);
212 kfree(adapter->ahw->sriov);
213}
214
215static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter)
216{
217 qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
218 qlcnic_sriov_cfg_bc_intr(adapter, 0);
219 __qlcnic_sriov_cleanup(adapter);
220}
221
222void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
223{
224 if (qlcnic_sriov_pf_check(adapter))
225 qlcnic_sriov_pf_cleanup(adapter);
226
227 if (qlcnic_sriov_vf_check(adapter))
228 qlcnic_sriov_vf_cleanup(adapter);
229}
230
231static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
232 u32 *pay, u8 pci_func, u8 size)
233{
234 struct qlcnic_hardware_context *ahw = adapter->ahw;
235 unsigned long flags;
236 u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val;
237 u16 opcode;
238 u8 mbx_err_code;
239 int i, j;
240
241 opcode = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
242
243 if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
244 dev_info(&adapter->pdev->dev,
245 "Mailbox cmd attempted, 0x%x\n", opcode);
246 dev_info(&adapter->pdev->dev, "Mailbox detached\n");
247 return 0;
248 }
249
250 spin_lock_irqsave(&ahw->mbx_lock, flags);
251
252 mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
253 if (mbx_val) {
254 QLCDB(adapter, DRV, "Mailbox cmd attempted, 0x%x\n", opcode);
255 spin_unlock_irqrestore(&ahw->mbx_lock, flags);
256 return QLCNIC_RCODE_TIMEOUT;
257 }
258 /* Fill in mailbox registers */
259 val = size + (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
260 mbx_cmd = 0x31 | (val << 16) | (adapter->ahw->fw_hal_version << 29);
261
262 writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
263 mbx_cmd = 0x1 | (1 << 4);
264
265 if (qlcnic_sriov_pf_check(adapter))
266 mbx_cmd |= (pci_func << 5);
267
268 writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1));
269 for (i = 2, j = 0; j < (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
270 i++, j++) {
271 writel(*(hdr++), QLCNIC_MBX_HOST(ahw, i));
272 }
273 for (j = 0; j < size; j++, i++)
274 writel(*(pay++), QLCNIC_MBX_HOST(ahw, i));
275
276 /* Signal FW about the impending command */
277 QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
278
279 /* Waiting for the mailbox cmd to complete and while waiting here
280 * some AEN might arrive. If more than 5 seconds expire we can
281 * assume something is wrong.
282 */
283poll:
284 rsp = qlcnic_83xx_mbx_poll(adapter);
285 if (rsp != QLCNIC_RCODE_TIMEOUT) {
286 /* Get the FW response data */
287 fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
288 if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
289 qlcnic_83xx_process_aen(adapter);
290 mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
291 if (mbx_val)
292 goto poll;
293 }
294 mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
295 rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
296 opcode = QLCNIC_MBX_RSP(fw_data);
297
298 switch (mbx_err_code) {
299 case QLCNIC_MBX_RSP_OK:
300 case QLCNIC_MBX_PORT_RSP_OK:
301 rsp = QLCNIC_RCODE_SUCCESS;
302 break;
303 default:
304 if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
305 rsp = qlcnic_83xx_mac_rcode(adapter);
306 if (!rsp)
307 goto out;
308 }
309 dev_err(&adapter->pdev->dev,
310 "MBX command 0x%x failed with err:0x%x\n",
311 opcode, mbx_err_code);
312 rsp = mbx_err_code;
313 break;
314 }
315 goto out;
316 }
317
318 dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n",
319 QLCNIC_MBX_RSP(mbx_cmd));
320 rsp = QLCNIC_RCODE_TIMEOUT;
321out:
322 /* clear fw mbx control register */
323 QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
324 spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
325 return rsp;
326}
327
328static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter)
329{
330 adapter->num_rxd = QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF;
331 adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
332 adapter->num_jumbo_rxd = QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF;
333 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
334 adapter->num_txd = MAX_CMD_DESCRIPTORS;
335 adapter->max_rds_rings = MAX_RDS_RINGS;
336}
337
338static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
339{
340 struct qlcnic_info nic_info;
341 struct qlcnic_hardware_context *ahw = adapter->ahw;
342 int err;
343
344 err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
345 if (err)
346 return -EIO;
347
348 if (qlcnic_83xx_get_port_info(adapter))
349 return -EIO;
350
351 qlcnic_sriov_vf_cfg_buff_desc(adapter);
352 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
353 dev_info(&adapter->pdev->dev, "HAL Version: %d\n",
354 adapter->ahw->fw_hal_version);
355
356 ahw->physical_port = (u8) nic_info.phys_port;
357 ahw->switch_mode = nic_info.switch_mode;
358 ahw->max_mtu = nic_info.max_mtu;
359 ahw->op_mode = nic_info.op_mode;
360 ahw->capabilities = nic_info.capabilities;
361 return 0;
362}
363
364static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
365 int pci_using_dac)
366{
367 int err;
368
369 INIT_LIST_HEAD(&adapter->vf_mc_list);
370 if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
371 dev_warn(&adapter->pdev->dev,
372 "83xx adapter do not support MSI interrupts\n");
373
374 err = qlcnic_setup_intr(adapter, 1);
375 if (err) {
376 dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
377 goto err_out_disable_msi;
378 }
379
380 err = qlcnic_83xx_setup_mbx_intr(adapter);
381 if (err)
382 goto err_out_disable_msi;
383
384 err = qlcnic_sriov_init(adapter, 1);
385 if (err)
386 goto err_out_disable_mbx_intr;
387
388 err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
389 if (err)
390 goto err_out_cleanup_sriov;
391
392 err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
393 if (err)
394 goto err_out_disable_bc_intr;
395
396 err = qlcnic_sriov_vf_init_driver(adapter);
397 if (err)
398 goto err_out_send_channel_term;
399
400 err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
401 if (err)
402 goto err_out_send_channel_term;
403
404 pci_set_drvdata(adapter->pdev, adapter);
405 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
406 adapter->netdev->name);
407 return 0;
408
409err_out_send_channel_term:
410 qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
411
412err_out_disable_bc_intr:
413 qlcnic_sriov_cfg_bc_intr(adapter, 0);
414
415err_out_cleanup_sriov:
416 __qlcnic_sriov_cleanup(adapter);
417
418err_out_disable_mbx_intr:
419 qlcnic_83xx_free_mbx_intr(adapter);
420
421err_out_disable_msi:
422 qlcnic_teardown_intr(adapter);
423 return err;
424}
425
426int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
427{
428 struct qlcnic_hardware_context *ahw = adapter->ahw;
429
430 spin_lock_init(&ahw->mbx_lock);
431 set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
432 ahw->msix_supported = 1;
433 adapter->flags |= QLCNIC_TX_INTR_SHARED;
434
435 if (qlcnic_sriov_setup_vf(adapter, pci_using_dac))
436 return -EIO;
437
438 if (qlcnic_read_mac_addr(adapter))
439 dev_warn(&adapter->pdev->dev, "failed to read mac addr\n");
440
441 set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
442 adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
443 adapter->ahw->reset_context = 0;
444 adapter->fw_fail_cnt = 0;
445 clear_bit(__QLCNIC_RESETTING, &adapter->state);
446 adapter->need_fw_reset = 0;
447 return 0;
448}
449
450void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *adapter)
451{
452 struct qlcnic_hardware_context *ahw = adapter->ahw;
453
454 ahw->op_mode = QLCNIC_SRIOV_VF_FUNC;
455 dev_info(&adapter->pdev->dev,
456 "HAL Version: %d Non Privileged SRIOV function\n",
457 ahw->fw_hal_version);
458 adapter->nic_ops = &qlcnic_sriov_vf_ops;
459 set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
460 return;
461}
462
463void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *ahw)
464{
465 ahw->hw_ops = &qlcnic_sriov_vf_hw_ops;
466 ahw->reg_tbl = (u32 *)qlcnic_83xx_reg_tbl;
467 ahw->ext_reg_tbl = (u32 *)qlcnic_83xx_ext_reg_tbl;
468}
469
470static u32 qlcnic_sriov_get_bc_paysize(u32 real_pay_size, u8 curr_frag)
471{
472 u32 pay_size;
473
474 pay_size = real_pay_size / ((curr_frag + 1) * QLC_BC_PAYLOAD_SZ);
475
476 if (pay_size)
477 pay_size = QLC_BC_PAYLOAD_SZ;
478 else
479 pay_size = real_pay_size % QLC_BC_PAYLOAD_SZ;
480
481 return pay_size;
482}
483
484int qlcnic_sriov_func_to_index(struct qlcnic_adapter *adapter, u8 pci_func)
485{
486 struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info;
487 u8 i;
488
489 if (qlcnic_sriov_vf_check(adapter))
490 return 0;
491
492 for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) {
493 if (vf_info[i].pci_func == pci_func)
494 return i;
495 }
496
497 return -EINVAL;
498}
499
500static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans **trans)
501{
502 *trans = kzalloc(sizeof(struct qlcnic_bc_trans), GFP_ATOMIC);
503 if (!*trans)
504 return -ENOMEM;
505
506 init_completion(&(*trans)->resp_cmpl);
507 return 0;
508}
509
510static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr,
511 u32 size)
512{
513 *hdr = kzalloc(sizeof(struct qlcnic_bc_hdr) * size, GFP_ATOMIC);
514 if (!*hdr)
515 return -ENOMEM;
516
517 return 0;
518}
519
520static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
521{
522 const struct qlcnic_mailbox_metadata *mbx_tbl;
523 int i, size;
524
525 mbx_tbl = qlcnic_sriov_bc_mbx_tbl;
526 size = ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl);
527
528 for (i = 0; i < size; i++) {
529 if (type == mbx_tbl[i].cmd) {
530 mbx->op_type = QLC_BC_CMD;
531 mbx->req.num = mbx_tbl[i].in_args;
532 mbx->rsp.num = mbx_tbl[i].out_args;
533 mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
534 GFP_ATOMIC);
535 if (!mbx->req.arg)
536 return -ENOMEM;
537 mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32),
538 GFP_ATOMIC);
539 if (!mbx->rsp.arg) {
540 kfree(mbx->req.arg);
541 mbx->req.arg = NULL;
542 return -ENOMEM;
543 }
544 memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
545 memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
546 mbx->req.arg[0] = (type | (mbx->req.num << 16) |
547 (3 << 29));
548 return 0;
549 }
550 }
551 return -EINVAL;
552}
553
554static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
555 struct qlcnic_cmd_args *cmd,
556 u16 seq, u8 msg_type)
557{
558 struct qlcnic_bc_hdr *hdr;
559 int i;
560 u32 num_regs, bc_pay_sz;
561 u16 remainder;
562 u8 cmd_op, num_frags, t_num_frags;
563
564 bc_pay_sz = QLC_BC_PAYLOAD_SZ;
565 if (msg_type == QLC_BC_COMMAND) {
566 trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg;
567 trans->rsp_pay = (struct qlcnic_bc_payload *)cmd->rsp.arg;
568 num_regs = cmd->req.num;
569 trans->req_pay_size = (num_regs * 4);
570 num_regs = cmd->rsp.num;
571 trans->rsp_pay_size = (num_regs * 4);
572 cmd_op = cmd->req.arg[0] & 0xff;
573 remainder = (trans->req_pay_size) % (bc_pay_sz);
574 num_frags = (trans->req_pay_size) / (bc_pay_sz);
575 if (remainder)
576 num_frags++;
577 t_num_frags = num_frags;
578 if (qlcnic_sriov_alloc_bc_msg(&trans->req_hdr, num_frags))
579 return -ENOMEM;
580 remainder = (trans->rsp_pay_size) % (bc_pay_sz);
581 num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
582 if (remainder)
583 num_frags++;
584 if (qlcnic_sriov_alloc_bc_msg(&trans->rsp_hdr, num_frags))
585 return -ENOMEM;
586 num_frags = t_num_frags;
587 hdr = trans->req_hdr;
588 } else {
589 cmd->req.arg = (u32 *)trans->req_pay;
590 cmd->rsp.arg = (u32 *)trans->rsp_pay;
591 cmd_op = cmd->req.arg[0] & 0xff;
592 remainder = (trans->rsp_pay_size) % (bc_pay_sz);
593 num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
594 if (remainder)
595 num_frags++;
596 cmd->req.num = trans->req_pay_size / 4;
597 cmd->rsp.num = trans->rsp_pay_size / 4;
598 hdr = trans->rsp_hdr;
599 }
600
601 trans->trans_id = seq;
602 trans->cmd_id = cmd_op;
603 for (i = 0; i < num_frags; i++) {
604 hdr[i].version = 2;
605 hdr[i].msg_type = msg_type;
606 hdr[i].op_type = cmd->op_type;
607 hdr[i].num_cmds = 1;
608 hdr[i].num_frags = num_frags;
609 hdr[i].frag_num = i + 1;
610 hdr[i].cmd_op = cmd_op;
611 hdr[i].seq_id = seq;
612 }
613 return 0;
614}
615
616static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *trans)
617{
618 if (!trans)
619 return;
620 kfree(trans->req_hdr);
621 kfree(trans->rsp_hdr);
622 kfree(trans);
623}
624
625static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info *vf,
626 struct qlcnic_bc_trans *trans, u8 type)
627{
628 struct qlcnic_trans_list *t_list;
629 unsigned long flags;
630 int ret = 0;
631
632 if (type == QLC_BC_RESPONSE) {
633 t_list = &vf->rcv_act;
634 spin_lock_irqsave(&t_list->lock, flags);
635 t_list->count--;
636 list_del(&trans->list);
637 if (t_list->count > 0)
638 ret = 1;
639 spin_unlock_irqrestore(&t_list->lock, flags);
640 }
641 if (type == QLC_BC_COMMAND) {
642 while (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
643 msleep(100);
644 vf->send_cmd = NULL;
645 clear_bit(QLC_BC_VF_SEND, &vf->state);
646 }
647 return ret;
648}
649
650static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov,
651 struct qlcnic_vf_info *vf,
652 work_func_t func)
653{
654 INIT_WORK(&vf->trans_work, func);
655 queue_work(sriov->bc.bc_trans_wq, &vf->trans_work);
656}
657
658static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans *trans)
659{
660 struct completion *cmpl = &trans->resp_cmpl;
661
662 if (wait_for_completion_timeout(cmpl, QLC_MBOX_RESP_TIMEOUT))
663 trans->trans_state = QLC_END;
664 else
665 trans->trans_state = QLC_ABORT;
666
667 return;
668}
669
670static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans *trans,
671 u8 type)
672{
673 if (type == QLC_BC_RESPONSE) {
674 trans->curr_rsp_frag++;
675 if (trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
676 trans->trans_state = QLC_INIT;
677 else
678 trans->trans_state = QLC_END;
679 } else {
680 trans->curr_req_frag++;
681 if (trans->curr_req_frag < trans->req_hdr->num_frags)
682 trans->trans_state = QLC_INIT;
683 else
684 trans->trans_state = QLC_WAIT_FOR_RESP;
685 }
686}
687
688static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans *trans,
689 u8 type)
690{
691 struct qlcnic_vf_info *vf = trans->vf;
692 struct completion *cmpl = &vf->ch_free_cmpl;
693
694 if (!wait_for_completion_timeout(cmpl, QLC_MBOX_CH_FREE_TIMEOUT)) {
695 trans->trans_state = QLC_ABORT;
696 return;
697 }
698
699 clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
700 qlcnic_sriov_handle_multi_frags(trans, type);
701}
702
703static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter,
704 u32 *hdr, u32 *pay, u32 size)
705{
706 struct qlcnic_hardware_context *ahw = adapter->ahw;
707 u32 fw_mbx;
708 u8 i, max = 2, hdr_size, j;
709
710 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
711 max = (size / sizeof(u32)) + hdr_size;
712
713 fw_mbx = readl(QLCNIC_MBX_FW(ahw, 0));
714 for (i = 2, j = 0; j < hdr_size; i++, j++)
715 *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i));
716 for (; j < max; i++, j++)
717 *(pay++) = readl(QLCNIC_MBX_FW(ahw, i));
718}
719
720static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf)
721{
722 int ret = -EBUSY;
723 u32 timeout = 10000;
724
725 do {
726 if (!test_and_set_bit(QLC_BC_VF_CHANNEL, &vf->state)) {
727 ret = 0;
728 break;
729 }
730 mdelay(1);
731 } while (--timeout);
732
733 return ret;
734}
735
736static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type)
737{
738 struct qlcnic_vf_info *vf = trans->vf;
739 u32 pay_size, hdr_size;
740 u32 *hdr, *pay;
741 int ret;
742 u8 pci_func = trans->func_id;
743
744 if (__qlcnic_sriov_issue_bc_post(vf))
745 return -EBUSY;
746
747 if (type == QLC_BC_COMMAND) {
748 hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag);
749 pay = (u32 *)(trans->req_pay + trans->curr_req_frag);
750 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
751 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
752 trans->curr_req_frag);
753 pay_size = (pay_size / sizeof(u32));
754 } else {
755 hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag);
756 pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag);
757 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
758 pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
759 trans->curr_rsp_frag);
760 pay_size = (pay_size / sizeof(u32));
761 }
762
763 ret = qlcnic_sriov_post_bc_msg(vf->adapter, hdr, pay,
764 pci_func, pay_size);
765 return ret;
766}
767
768static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans,
769 struct qlcnic_vf_info *vf, u8 type)
770{
771 int err;
772 bool flag = true;
773
774 while (flag) {
775 switch (trans->trans_state) {
776 case QLC_INIT:
777 trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE;
778 if (qlcnic_sriov_issue_bc_post(trans, type))
779 trans->trans_state = QLC_ABORT;
780 break;
781 case QLC_WAIT_FOR_CHANNEL_FREE:
782 qlcnic_sriov_wait_for_channel_free(trans, type);
783 break;
784 case QLC_WAIT_FOR_RESP:
785 qlcnic_sriov_wait_for_resp(trans);
786 break;
787 case QLC_END:
788 err = 0;
789 flag = false;
790 break;
791 case QLC_ABORT:
792 err = -EIO;
793 flag = false;
794 clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
795 break;
796 default:
797 err = -EIO;
798 flag = false;
799 }
800 }
801 return err;
802}
803
804static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter *adapter,
805 struct qlcnic_bc_trans *trans, int pci_func)
806{
807 struct qlcnic_vf_info *vf;
808 int err, index = qlcnic_sriov_func_to_index(adapter, pci_func);
809
810 if (index < 0)
811 return -EIO;
812
813 vf = &adapter->ahw->sriov->vf_info[index];
814 trans->vf = vf;
815 trans->func_id = pci_func;
816
817 if (!test_bit(QLC_BC_VF_STATE, &vf->state)) {
818 if (qlcnic_sriov_pf_check(adapter))
819 return -EIO;
820 if (qlcnic_sriov_vf_check(adapter) &&
821 trans->cmd_id != QLCNIC_BC_CMD_CHANNEL_INIT)
822 return -EIO;
823 }
824
825 mutex_lock(&vf->send_cmd_lock);
826 vf->send_cmd = trans;
827 err = __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_COMMAND);
828 qlcnic_sriov_clear_trans(vf, trans, QLC_BC_COMMAND);
829 mutex_unlock(&vf->send_cmd_lock);
830 return err;
831}
832
833static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter *adapter,
834 struct qlcnic_bc_trans *trans,
835 struct qlcnic_cmd_args *cmd)
836{
837#ifdef CONFIG_QLCNIC_SRIOV
838 if (qlcnic_sriov_pf_check(adapter)) {
839 qlcnic_sriov_pf_process_bc_cmd(adapter, trans, cmd);
840 return;
841 }
842#endif
843 cmd->rsp.arg[0] |= (0x9 << 25);
844 return;
845}
846
847static void qlcnic_sriov_process_bc_cmd(struct work_struct *work)
848{
849 struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info,
850 trans_work);
851 struct qlcnic_bc_trans *trans = NULL;
852 struct qlcnic_adapter *adapter = vf->adapter;
853 struct qlcnic_cmd_args cmd;
854 u8 req;
855
856 trans = list_first_entry(&vf->rcv_act.wait_list,
857 struct qlcnic_bc_trans, list);
858 adapter = vf->adapter;
859
860 if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, trans->req_hdr->seq_id,
861 QLC_BC_RESPONSE))
862 goto cleanup_trans;
863
864 __qlcnic_sriov_process_bc_cmd(adapter, trans, &cmd);
865 trans->trans_state = QLC_INIT;
866 __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_RESPONSE);
867
868cleanup_trans:
869 qlcnic_free_mbx_args(&cmd);
870 req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE);
871 qlcnic_sriov_cleanup_transaction(trans);
872 if (req)
873 qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf,
874 qlcnic_sriov_process_bc_cmd);
875}
876
877static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr *hdr,
878 struct qlcnic_vf_info *vf)
879{
880 struct qlcnic_bc_trans *trans;
881 u32 pay_size;
882
883 if (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
884 return;
885
886 trans = vf->send_cmd;
887
888 if (trans == NULL)
889 goto clear_send;
890
891 if (trans->trans_id != hdr->seq_id)
892 goto clear_send;
893
894 pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
895 trans->curr_rsp_frag);
896 qlcnic_sriov_pull_bc_msg(vf->adapter,
897 (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag),
898 (u32 *)(trans->rsp_pay + trans->curr_rsp_frag),
899 pay_size);
900 if (++trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
901 goto clear_send;
902
903 complete(&trans->resp_cmpl);
904
905clear_send:
906 clear_bit(QLC_BC_VF_SEND, &vf->state);
907}
908
909static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
910 struct qlcnic_vf_info *vf,
911 struct qlcnic_bc_trans *trans)
912{
913 struct qlcnic_trans_list *t_list = &vf->rcv_act;
914
915 spin_lock(&t_list->lock);
916 t_list->count++;
917 list_add_tail(&trans->list, &t_list->wait_list);
918 if (t_list->count == 1)
919 qlcnic_sriov_schedule_bc_cmd(sriov, vf,
920 qlcnic_sriov_process_bc_cmd);
921 spin_unlock(&t_list->lock);
922 return 0;
923}
924
925static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov,
926 struct qlcnic_vf_info *vf,
927 struct qlcnic_bc_hdr *hdr)
928{
929 struct qlcnic_bc_trans *trans = NULL;
930 struct list_head *node;
931 u32 pay_size, curr_frag;
932 u8 found = 0, active = 0;
933
934 spin_lock(&vf->rcv_pend.lock);
935 if (vf->rcv_pend.count > 0) {
936 list_for_each(node, &vf->rcv_pend.wait_list) {
937 trans = list_entry(node, struct qlcnic_bc_trans, list);
938 if (trans->trans_id == hdr->seq_id) {
939 found = 1;
940 break;
941 }
942 }
943 }
944
945 if (found) {
946 curr_frag = trans->curr_req_frag;
947 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
948 curr_frag);
949 qlcnic_sriov_pull_bc_msg(vf->adapter,
950 (u32 *)(trans->req_hdr + curr_frag),
951 (u32 *)(trans->req_pay + curr_frag),
952 pay_size);
953 trans->curr_req_frag++;
954 if (trans->curr_req_frag >= hdr->num_frags) {
955 vf->rcv_pend.count--;
956 list_del(&trans->list);
957 active = 1;
958 }
959 }
960 spin_unlock(&vf->rcv_pend.lock);
961
962 if (active)
963 if (qlcnic_sriov_add_act_list(sriov, vf, trans))
964 qlcnic_sriov_cleanup_transaction(trans);
965
966 return;
967}
968
969static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov,
970 struct qlcnic_bc_hdr *hdr,
971 struct qlcnic_vf_info *vf)
972{
973 struct qlcnic_bc_trans *trans;
974 struct qlcnic_adapter *adapter = vf->adapter;
975 struct qlcnic_cmd_args cmd;
976 u32 pay_size;
977 int err;
978 u8 cmd_op;
979
980 if (!test_bit(QLC_BC_VF_STATE, &vf->state) &&
981 hdr->op_type != QLC_BC_CMD &&
982 hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT)
983 return;
984
985 if (hdr->frag_num > 1) {
986 qlcnic_sriov_handle_pending_trans(sriov, vf, hdr);
987 return;
988 }
989
990 cmd_op = hdr->cmd_op;
991 if (qlcnic_sriov_alloc_bc_trans(&trans))
992 return;
993
994 if (hdr->op_type == QLC_BC_CMD)
995 err = qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op);
996 else
997 err = qlcnic_alloc_mbx_args(&cmd, adapter, cmd_op);
998
999 if (err) {
1000 qlcnic_sriov_cleanup_transaction(trans);
1001 return;
1002 }
1003
1004 cmd.op_type = hdr->op_type;
1005 if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, hdr->seq_id,
1006 QLC_BC_COMMAND)) {
1007 qlcnic_free_mbx_args(&cmd);
1008 qlcnic_sriov_cleanup_transaction(trans);
1009 return;
1010 }
1011
1012 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
1013 trans->curr_req_frag);
1014 qlcnic_sriov_pull_bc_msg(vf->adapter,
1015 (u32 *)(trans->req_hdr + trans->curr_req_frag),
1016 (u32 *)(trans->req_pay + trans->curr_req_frag),
1017 pay_size);
1018 trans->func_id = vf->pci_func;
1019 trans->vf = vf;
1020 trans->trans_id = hdr->seq_id;
1021 trans->curr_req_frag++;
1022 if (trans->curr_req_frag == trans->req_hdr->num_frags) {
1023 if (qlcnic_sriov_add_act_list(sriov, vf, trans)) {
1024 qlcnic_free_mbx_args(&cmd);
1025 qlcnic_sriov_cleanup_transaction(trans);
1026 }
1027 } else {
1028 spin_lock(&vf->rcv_pend.lock);
1029 list_add_tail(&trans->list, &vf->rcv_pend.wait_list);
1030 vf->rcv_pend.count++;
1031 spin_unlock(&vf->rcv_pend.lock);
1032 }
1033}
1034
1035static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov,
1036 struct qlcnic_vf_info *vf)
1037{
1038 struct qlcnic_bc_hdr hdr;
1039 u32 *ptr = (u32 *)&hdr;
1040 u8 msg_type, i;
1041
1042 for (i = 2; i < 6; i++)
1043 ptr[i - 2] = readl(QLCNIC_MBX_FW(vf->adapter->ahw, i));
1044 msg_type = hdr.msg_type;
1045
1046 switch (msg_type) {
1047 case QLC_BC_COMMAND:
1048 qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf);
1049 break;
1050 case QLC_BC_RESPONSE:
1051 qlcnic_sriov_handle_bc_resp(&hdr, vf);
1052 break;
1053 }
1054}
1055
1056void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event)
1057{
1058 struct qlcnic_vf_info *vf;
1059 struct qlcnic_sriov *sriov;
1060 int index;
1061 u8 pci_func;
1062
1063 sriov = adapter->ahw->sriov;
1064 pci_func = qlcnic_sriov_target_func_id(event);
1065 index = qlcnic_sriov_func_to_index(adapter, pci_func);
1066
1067 if (index < 0)
1068 return;
1069
1070 vf = &sriov->vf_info[index];
1071 vf->pci_func = pci_func;
1072
1073 if (qlcnic_sriov_channel_free_check(event))
1074 complete(&vf->ch_free_cmpl);
1075
1076 if (qlcnic_sriov_bc_msg_check(event))
1077 qlcnic_sriov_handle_msg_event(sriov, vf);
1078}
1079
1080int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable)
1081{
1082 struct qlcnic_cmd_args cmd;
1083 int err;
1084
1085 if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
1086 return 0;
1087
1088 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_BC_EVENT_SETUP))
1089 return -ENOMEM;
1090
1091 if (enable)
1092 cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
1093
1094 err = qlcnic_83xx_mbx_op(adapter, &cmd);
1095
1096 if (err != QLCNIC_RCODE_SUCCESS) {
1097 dev_err(&adapter->pdev->dev,
1098 "Failed to %s bc events, err=%d\n",
1099 (enable ? "enable" : "disable"), err);
1100 }
1101
1102 qlcnic_free_mbx_args(&cmd);
1103 return err;
1104}
1105
1106static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter,
1107 struct qlcnic_cmd_args *cmd)
1108{
1109 struct qlcnic_bc_trans *trans;
1110 int err;
1111 u32 rsp_data, opcode, mbx_err_code, rsp;
1112 u16 seq = ++adapter->ahw->sriov->bc.trans_counter;
1113
1114 if (qlcnic_sriov_alloc_bc_trans(&trans))
1115 return -ENOMEM;
1116
1117 if (qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND))
1118 return -ENOMEM;
1119
1120 if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
1121 rsp = -EIO;
1122 QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
1123 QLCNIC_MBX_RSP(cmd->req.arg[0]), adapter->ahw->pci_func);
1124 goto err_out;
1125 }
1126
1127 err = qlcnic_sriov_send_bc_cmd(adapter, trans, adapter->ahw->pci_func);
1128 if (err) {
1129 dev_err(&adapter->pdev->dev,
1130 "MBX command 0x%x timed out for VF %d\n",
1131 (cmd->req.arg[0] & 0xffff), adapter->ahw->pci_func);
1132 rsp = QLCNIC_RCODE_TIMEOUT;
1133 goto err_out;
1134 }
1135
1136 rsp_data = cmd->rsp.arg[0];
1137 mbx_err_code = QLCNIC_MBX_STATUS(rsp_data);
1138 opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]);
1139
1140 if ((mbx_err_code == QLCNIC_MBX_RSP_OK) ||
1141 (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
1142 rsp = QLCNIC_RCODE_SUCCESS;
1143 } else {
1144 rsp = mbx_err_code;
1145 if (!rsp)
1146 rsp = 1;
1147 dev_err(&adapter->pdev->dev,
1148 "MBX command 0x%x failed with err:0x%x for VF %d\n",
1149 opcode, mbx_err_code, adapter->ahw->pci_func);
1150 }
1151
1152err_out:
1153 qlcnic_sriov_cleanup_transaction(trans);
1154 return rsp;
1155}
1156
1157int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
1158{
1159 struct qlcnic_cmd_args cmd;
1160 struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
1161 int ret;
1162
1163 if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op))
1164 return -ENOMEM;
1165
1166 ret = qlcnic_issue_cmd(adapter, &cmd);
1167 if (ret) {
1168 dev_err(&adapter->pdev->dev,
1169 "Failed bc channel %s %d\n", cmd_op ? "term" : "init",
1170 ret);
1171 goto out;
1172 }
1173
1174 cmd_op = (cmd.rsp.arg[0] & 0xff);
1175 if (cmd.rsp.arg[0] >> 25 == 2)
1176 return 2;
1177 if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
1178 set_bit(QLC_BC_VF_STATE, &vf->state);
1179 else
1180 clear_bit(QLC_BC_VF_STATE, &vf->state);
1181
1182out:
1183 qlcnic_free_mbx_args(&cmd);
1184 return ret;
1185}
1186
1187void qlcnic_vf_add_mc_list(struct net_device *netdev)
1188{
1189 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1190 struct qlcnic_mac_list_s *cur;
1191 struct list_head *head, tmp_list;
1192
1193 INIT_LIST_HEAD(&tmp_list);
1194 head = &adapter->vf_mc_list;
1195 netif_addr_lock_bh(netdev);
1196
1197 while (!list_empty(head)) {
1198 cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
1199 list_move(&cur->list, &tmp_list);
1200 }
1201
1202 netif_addr_unlock_bh(netdev);
1203
1204 while (!list_empty(&tmp_list)) {
1205 cur = list_entry((&tmp_list)->next,
1206 struct qlcnic_mac_list_s, list);
1207 qlcnic_nic_add_mac(adapter, cur->mac_addr);
1208 list_del(&cur->list);
1209 kfree(cur);
1210 }
1211}
1212
1213void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
1214{
1215 struct list_head *head = &bc->async_list;
1216 struct qlcnic_async_work_list *entry;
1217
1218 while (!list_empty(head)) {
1219 entry = list_entry(head->next, struct qlcnic_async_work_list,
1220 list);
1221 cancel_work_sync(&entry->work);
1222 list_del(&entry->list);
1223 kfree(entry);
1224 }
1225}
1226
1227static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
1228{
1229 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1230
1231 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
1232 return;
1233
1234 __qlcnic_set_multi(netdev);
1235}
1236
1237static void qlcnic_sriov_handle_async_multi(struct work_struct *work)
1238{
1239 struct qlcnic_async_work_list *entry;
1240 struct net_device *netdev;
1241
1242 entry = container_of(work, struct qlcnic_async_work_list, work);
1243 netdev = (struct net_device *)entry->ptr;
1244
1245 qlcnic_sriov_vf_set_multi(netdev);
1246 return;
1247}
1248
1249static struct qlcnic_async_work_list *
1250qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
1251{
1252 struct list_head *node;
1253 struct qlcnic_async_work_list *entry = NULL;
1254 u8 empty = 0;
1255
1256 list_for_each(node, &bc->async_list) {
1257 entry = list_entry(node, struct qlcnic_async_work_list, list);
1258 if (!work_pending(&entry->work)) {
1259 empty = 1;
1260 break;
1261 }
1262 }
1263
1264 if (!empty) {
1265 entry = kzalloc(sizeof(struct qlcnic_async_work_list),
1266 GFP_ATOMIC);
1267 if (entry == NULL)
1268 return NULL;
1269 list_add_tail(&entry->list, &bc->async_list);
1270 }
1271
1272 return entry;
1273}
1274
1275static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc,
1276 work_func_t func, void *data)
1277{
1278 struct qlcnic_async_work_list *entry = NULL;
1279
1280 entry = qlcnic_sriov_get_free_node_async_work(bc);
1281 if (!entry)
1282 return;
1283
1284 entry->ptr = data;
1285 INIT_WORK(&entry->work, func);
1286 queue_work(bc->bc_async_wq, &entry->work);
1287}
1288
1289void qlcnic_sriov_vf_schedule_multi(struct net_device *netdev)
1290{
1291
1292 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1293 struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
1294
1295 qlcnic_sriov_schedule_bc_async_work(bc, qlcnic_sriov_handle_async_multi,
1296 netdev);
1297}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
new file mode 100644
index 000000000000..bed505606a2d
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -0,0 +1,1175 @@
1/*
2 * QLogic qlcnic NIC Driver
3 * Copyright (c) 2009-2013 QLogic Corporation
4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
6 */
7
8#include "qlcnic_sriov.h"
9#include "qlcnic.h"
10#include <linux/types.h>
11
12#define QLCNIC_SRIOV_VF_MAX_MAC 1
13
14static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
15
16struct qlcnic_sriov_cmd_handler {
17 int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *);
18};
19
20struct qlcnic_sriov_fw_cmd_handler {
21 u32 cmd;
22 int (*fn) (struct qlcnic_bc_trans *, struct qlcnic_cmd_args *);
23};
24
25static int qlcnic_sriov_pf_set_vport_info(struct qlcnic_adapter *adapter,
26 struct qlcnic_info *npar_info,
27 u16 vport_id)
28{
29 struct qlcnic_cmd_args cmd;
30 int err;
31
32 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_SET_NIC_INFO))
33 return -ENOMEM;
34
35 cmd.req.arg[1] = (vport_id << 16) | 0x1;
36 cmd.req.arg[2] = npar_info->bit_offsets;
37 cmd.req.arg[2] |= npar_info->min_tx_bw << 16;
38 cmd.req.arg[3] = npar_info->max_tx_bw | (npar_info->max_tx_ques << 16);
39 cmd.req.arg[4] = npar_info->max_tx_mac_filters;
40 cmd.req.arg[4] |= npar_info->max_rx_mcast_mac_filters << 16;
41 cmd.req.arg[5] = npar_info->max_rx_ucast_mac_filters |
42 (npar_info->max_rx_ip_addr << 16);
43 cmd.req.arg[6] = npar_info->max_rx_lro_flow |
44 (npar_info->max_rx_status_rings << 16);
45 cmd.req.arg[7] = npar_info->max_rx_buf_rings |
46 (npar_info->max_rx_ques << 16);
47 cmd.req.arg[8] = npar_info->max_tx_vlan_keys;
48 cmd.req.arg[8] |= npar_info->max_local_ipv6_addrs << 16;
49 cmd.req.arg[9] = npar_info->max_remote_ipv6_addrs;
50
51 err = qlcnic_issue_cmd(adapter, &cmd);
52 if (err)
53 dev_err(&adapter->pdev->dev,
54 "Failed to set vport info, err=%d\n", err);
55
56 qlcnic_free_mbx_args(&cmd);
57 return err;
58}
59
60static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
61 struct qlcnic_info *info, u16 func)
62{
63 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
64 struct qlcnic_resources *res = &sriov->ff_max;
65 int ret = -EIO, vpid;
66 u32 temp, num_vf_macs, num_vfs, max;
67
68 vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
69 if (vpid < 0)
70 return -EINVAL;
71
72 num_vfs = sriov->num_vfs;
73 max = num_vfs + 1;
74 info->bit_offsets = 0xffff;
75 info->min_tx_bw = 0;
76 info->max_tx_bw = MAX_BW;
77 info->max_tx_ques = res->num_tx_queues / max;
78 info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
79 num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC;
80
81 if (adapter->ahw->pci_func == func) {
82 temp = res->num_rx_mcast_mac_filters - (num_vfs * num_vf_macs);
83 info->max_rx_ucast_mac_filters = temp;
84 temp = res->num_tx_mac_filters - (num_vfs * num_vf_macs);
85 info->max_tx_mac_filters = temp;
86 } else {
87 info->max_rx_ucast_mac_filters = num_vf_macs;
88 info->max_tx_mac_filters = num_vf_macs;
89 }
90
91 info->max_rx_ip_addr = res->num_destip / max;
92 info->max_rx_status_rings = res->num_rx_status_rings / max;
93 info->max_rx_buf_rings = res->num_rx_buf_rings / max;
94 info->max_rx_ques = res->num_rx_queues / max;
95 info->max_rx_lro_flow = res->num_lro_flows_supported / max;
96 info->max_tx_vlan_keys = res->num_txvlan_keys;
97 info->max_local_ipv6_addrs = res->max_local_ipv6_addrs;
98 info->max_remote_ipv6_addrs = res->max_remote_ipv6_addrs;
99
100 ret = qlcnic_sriov_pf_set_vport_info(adapter, info, vpid);
101 if (ret)
102 return ret;
103
104 return 0;
105}
106
107static void qlcnic_sriov_pf_set_ff_max_res(struct qlcnic_adapter *adapter,
108 struct qlcnic_info *info)
109{
110 struct qlcnic_resources *ff_max = &adapter->ahw->sriov->ff_max;
111
112 ff_max->num_tx_mac_filters = info->max_tx_mac_filters;
113 ff_max->num_rx_ucast_mac_filters = info->max_rx_ucast_mac_filters;
114 ff_max->num_rx_mcast_mac_filters = info->max_rx_mcast_mac_filters;
115 ff_max->num_txvlan_keys = info->max_tx_vlan_keys;
116 ff_max->num_rx_queues = info->max_rx_ques;
117 ff_max->num_tx_queues = info->max_tx_ques;
118 ff_max->num_lro_flows_supported = info->max_rx_lro_flow;
119 ff_max->num_destip = info->max_rx_ip_addr;
120 ff_max->num_rx_buf_rings = info->max_rx_buf_rings;
121 ff_max->num_rx_status_rings = info->max_rx_status_rings;
122 ff_max->max_remote_ipv6_addrs = info->max_remote_ipv6_addrs;
123 ff_max->max_local_ipv6_addrs = info->max_local_ipv6_addrs;
124}
125
126static int qlcnic_sriov_get_pf_info(struct qlcnic_adapter *adapter,
127 struct qlcnic_info *npar_info)
128{
129 int err;
130 struct qlcnic_cmd_args cmd;
131
132 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO))
133 return -ENOMEM;
134
135 cmd.req.arg[1] = 0x2;
136 err = qlcnic_issue_cmd(adapter, &cmd);
137 if (err) {
138 dev_err(&adapter->pdev->dev,
139 "Failed to get PF info, err=%d\n", err);
140 goto out;
141 }
142
143 npar_info->total_pf = cmd.rsp.arg[2] & 0xff;
144 npar_info->total_rss_engines = (cmd.rsp.arg[2] >> 8) & 0xff;
145 npar_info->max_vports = MSW(cmd.rsp.arg[2]);
146 npar_info->max_tx_ques = LSW(cmd.rsp.arg[3]);
147 npar_info->max_tx_mac_filters = MSW(cmd.rsp.arg[3]);
148 npar_info->max_rx_mcast_mac_filters = LSW(cmd.rsp.arg[4]);
149 npar_info->max_rx_ucast_mac_filters = MSW(cmd.rsp.arg[4]);
150 npar_info->max_rx_ip_addr = LSW(cmd.rsp.arg[5]);
151 npar_info->max_rx_lro_flow = MSW(cmd.rsp.arg[5]);
152 npar_info->max_rx_status_rings = LSW(cmd.rsp.arg[6]);
153 npar_info->max_rx_buf_rings = MSW(cmd.rsp.arg[6]);
154 npar_info->max_rx_ques = LSW(cmd.rsp.arg[7]);
155 npar_info->max_tx_vlan_keys = MSW(cmd.rsp.arg[7]);
156 npar_info->max_local_ipv6_addrs = LSW(cmd.rsp.arg[8]);
157 npar_info->max_remote_ipv6_addrs = MSW(cmd.rsp.arg[8]);
158
159 qlcnic_sriov_pf_set_ff_max_res(adapter, npar_info);
160 dev_info(&adapter->pdev->dev,
161 "\n\ttotal_pf: %d,\n"
162 "\n\ttotal_rss_engines: %d max_vports: %d max_tx_ques %d,\n"
163 "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
164 "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
165 "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
166 "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
167 "\tmax_local_ipv6_addrs: %d, max_remote_ipv6_addrs: %d\n",
168 npar_info->total_pf, npar_info->total_rss_engines,
169 npar_info->max_vports, npar_info->max_tx_ques,
170 npar_info->max_tx_mac_filters,
171 npar_info->max_rx_mcast_mac_filters,
172 npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr,
173 npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings,
174 npar_info->max_rx_buf_rings, npar_info->max_rx_ques,
175 npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs,
176 npar_info->max_remote_ipv6_addrs);
177
178out:
179 qlcnic_free_mbx_args(&cmd);
180 return err;
181}
182
183static void qlcnic_sriov_pf_reset_vport_handle(struct qlcnic_adapter *adapter,
184 u8 func)
185{
186 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
187 struct qlcnic_vport *vp;
188 int index;
189
190 if (adapter->ahw->pci_func == func) {
191 sriov->vp_handle = 0;
192 } else {
193 index = qlcnic_sriov_func_to_index(adapter, func);
194 if (index < 0)
195 return;
196 vp = sriov->vf_info[index].vp;
197 vp->handle = 0;
198 }
199}
200
201static void qlcnic_sriov_pf_set_vport_handle(struct qlcnic_adapter *adapter,
202 u16 vport_handle, u8 func)
203{
204 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
205 struct qlcnic_vport *vp;
206 int index;
207
208 if (adapter->ahw->pci_func == func) {
209 sriov->vp_handle = vport_handle;
210 } else {
211 index = qlcnic_sriov_func_to_index(adapter, func);
212 if (index < 0)
213 return;
214 vp = sriov->vf_info[index].vp;
215 vp->handle = vport_handle;
216 }
217}
218
219static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *adapter,
220 u8 func)
221{
222 struct qlcnic_sriov *sriov = adapter->ahw->sriov;
223 struct qlcnic_vf_info *vf_info;
224 int index;
225
226 if (adapter->ahw->pci_func == func) {
227 return sriov->vp_handle;
228 } else {
229 index = qlcnic_sriov_func_to_index(adapter, func);
230 if (index >= 0) {
231 vf_info = &sriov->vf_info[index];
232 return vf_info->vp->handle;
233 }
234 }
235
236 return -EINVAL;
237}
238
239static int qlcnic_sriov_pf_config_vport(struct qlcnic_adapter *adapter,
240 u8 flag, u16 func)
241{
242 struct qlcnic_cmd_args cmd;
243 int ret;
244 int vpid;
245
246 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_VPORT))
247 return -ENOMEM;
248
249 if (flag) {
250 cmd.req.arg[3] = func << 8;
251 } else {
252 vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
253 if (vpid < 0) {
254 ret = -EINVAL;
255 goto out;
256 }
257 cmd.req.arg[3] = ((vpid & 0xffff) << 8) | 1;
258 }
259
260 ret = qlcnic_issue_cmd(adapter, &cmd);
261 if (ret) {
262 dev_err(&adapter->pdev->dev,
263 "Failed %s vport, err %d for func 0x%x\n",
264 (flag ? "enable" : "disable"), ret, func);
265 goto out;
266 }
267
268 if (flag) {
269 vpid = cmd.rsp.arg[2] & 0xffff;
270 qlcnic_sriov_pf_set_vport_handle(adapter, vpid, func);
271 } else {
272 qlcnic_sriov_pf_reset_vport_handle(adapter, func);
273 }
274
275out:
276 qlcnic_free_mbx_args(&cmd);
277 return ret;
278}
279
280static int qlcnic_sriov_pf_cfg_eswitch(struct qlcnic_adapter *adapter,
281 u8 func, u8 enable)
282{
283 struct qlcnic_cmd_args cmd;
284 int err = -EIO;
285
286 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_TOGGLE_ESWITCH))
287 return -ENOMEM;
288
289 cmd.req.arg[0] |= (3 << 29);
290 cmd.req.arg[1] = ((func & 0xf) << 2) | BIT_6 | BIT_1;
291 if (enable)
292 cmd.req.arg[1] |= BIT_0;
293
294 err = qlcnic_issue_cmd(adapter, &cmd);
295
296 if (err != QLCNIC_RCODE_SUCCESS) {
297 dev_err(&adapter->pdev->dev,
298 "Failed to enable sriov eswitch%d\n", err);
299 err = -EIO;
300 }
301
302 qlcnic_free_mbx_args(&cmd);
303 return err;
304}
305
306void qlcnic_sriov_pf_cleanup(struct qlcnic_adapter *adapter)
307{
308 u8 func = adapter->ahw->pci_func;
309
310 if (!qlcnic_sriov_enable_check(adapter))
311 return;
312
313 qlcnic_sriov_cfg_bc_intr(adapter, 0);
314 qlcnic_sriov_pf_config_vport(adapter, 0, func);
315 qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0);
316 __qlcnic_sriov_cleanup(adapter);
317 adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
318 clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
319}
320
321void qlcnic_sriov_pf_disable(struct qlcnic_adapter *adapter)
322{
323 if (!qlcnic_sriov_pf_check(adapter))
324 return;
325
326 if (!qlcnic_sriov_enable_check(adapter))
327 return;
328
329 pci_disable_sriov(adapter->pdev);
330 netdev_info(adapter->netdev,
331 "SR-IOV is disabled successfully on port %d\n",
332 adapter->portnum);
333}
334
335static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
336{
337 struct net_device *netdev = adapter->netdev;
338
339 if (netif_running(netdev))
340 __qlcnic_down(adapter, netdev);
341
342 qlcnic_sriov_pf_disable(adapter);
343
344 qlcnic_sriov_pf_cleanup(adapter);
345
346 /* After disabling SRIOV re-init the driver in default mode
347 configure opmode based on op_mode of function
348 */
349 if (qlcnic_83xx_configure_opmode(adapter))
350 return -EIO;
351
352 if (netif_running(netdev))
353 __qlcnic_up(adapter, netdev);
354
355 return 0;
356}
357
358static int qlcnic_sriov_pf_init(struct qlcnic_adapter *adapter)
359{
360 struct qlcnic_hardware_context *ahw = adapter->ahw;
361 struct qlcnic_info nic_info, pf_info, vp_info;
362 int err;
363 u8 func = ahw->pci_func;
364
365 if (!qlcnic_sriov_enable_check(adapter))
366 return 0;
367
368 err = qlcnic_sriov_pf_cfg_eswitch(adapter, func, 1);
369 if (err)
370 goto clear_sriov_enable;
371
372 err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
373 if (err)
374 goto disable_eswitch;
375
376 err = qlcnic_sriov_get_pf_info(adapter, &pf_info);
377 if (err)
378 goto delete_vport;
379
380 err = qlcnic_get_nic_info(adapter, &nic_info, func);
381 if (err)
382 goto delete_vport;
383
384 err = qlcnic_sriov_pf_cal_res_limit(adapter, &vp_info, func);
385 if (err)
386 goto delete_vport;
387
388 err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
389 if (err)
390 goto delete_vport;
391
392 ahw->physical_port = (u8) nic_info.phys_port;
393 ahw->switch_mode = nic_info.switch_mode;
394 ahw->max_mtu = nic_info.max_mtu;
395 ahw->capabilities = nic_info.capabilities;
396 ahw->nic_mode = QLC_83XX_SRIOV_MODE;
397 return err;
398
399delete_vport:
400 qlcnic_sriov_pf_config_vport(adapter, 0, func);
401
402disable_eswitch:
403 qlcnic_sriov_pf_cfg_eswitch(adapter, func, 0);
404
405clear_sriov_enable:
406 __qlcnic_sriov_cleanup(adapter);
407 adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
408 clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
409 return err;
410}
411
412static int qlcnic_sriov_pf_enable(struct qlcnic_adapter *adapter, int num_vfs)
413{
414 int err;
415
416 if (!qlcnic_sriov_enable_check(adapter))
417 return 0;
418
419 err = pci_enable_sriov(adapter->pdev, num_vfs);
420 if (err)
421 qlcnic_sriov_pf_cleanup(adapter);
422
423 return err;
424}
425
426static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
427 int num_vfs)
428{
429 int err = 0;
430
431 set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
432 adapter->ahw->op_mode = QLCNIC_SRIOV_PF_FUNC;
433
434 if (qlcnic_sriov_init(adapter, num_vfs)) {
435 clear_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
436 adapter->ahw->op_mode = QLCNIC_MGMT_FUNC;
437 return -EIO;
438 }
439
440 if (qlcnic_sriov_pf_init(adapter))
441 return -EIO;
442
443 err = qlcnic_sriov_pf_enable(adapter, num_vfs);
444 return err;
445}
446
447static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
448{
449 struct net_device *netdev = adapter->netdev;
450 int err;
451
452 if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
453 netdev_err(netdev,
454 "SR-IOV cannot be enabled, when legacy interrupts are enabled\n");
455 return -EIO;
456 }
457
458 if (netif_running(netdev))
459 __qlcnic_down(adapter, netdev);
460
461 err = __qlcnic_pci_sriov_enable(adapter, num_vfs);
462 if (err) {
463 netdev_info(netdev, "Failed to enable SR-IOV on port %d\n",
464 adapter->portnum);
465
466 if (qlcnic_83xx_configure_opmode(adapter))
467 goto error;
468 } else {
469 netdev_info(adapter->netdev,
470 "SR-IOV is enabled successfully on port %d\n",
471 adapter->portnum);
472 }
473 if (netif_running(netdev))
474 __qlcnic_up(adapter, netdev);
475
476error:
477 return err;
478}
479
480int qlcnic_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
481{
482 struct qlcnic_adapter *adapter = pci_get_drvdata(dev);
483 int err;
484
485 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
486 return -EBUSY;
487
488 if (num_vfs == 0)
489 err = qlcnic_pci_sriov_disable(adapter);
490 else
491 err = qlcnic_pci_sriov_enable(adapter, num_vfs);
492
493 clear_bit(__QLCNIC_RESETTING, &adapter->state);
494 return err;
495}
496
497static int qlcnic_sriov_set_vf_vport_info(struct qlcnic_adapter *adapter,
498 u16 func)
499{
500 struct qlcnic_info defvp_info;
501 int err;
502
503 err = qlcnic_sriov_pf_cal_res_limit(adapter, &defvp_info, func);
504 if (err)
505 return -EIO;
506
507 return 0;
508}
509
510static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
511 struct qlcnic_cmd_args *cmd)
512{
513 struct qlcnic_vf_info *vf = trans->vf;
514 struct qlcnic_adapter *adapter = vf->adapter;
515 int err;
516 u16 func = vf->pci_func;
517
518 cmd->rsp.arg[0] = trans->req_hdr->cmd_op;
519 cmd->rsp.arg[0] |= (1 << 16);
520
521 if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) {
522 err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
523 if (!err) {
524 err = qlcnic_sriov_set_vf_vport_info(adapter, func);
525 if (err)
526 qlcnic_sriov_pf_config_vport(adapter, 0, func);
527 }
528 } else {
529 err = qlcnic_sriov_pf_config_vport(adapter, 0, func);
530 }
531
532 if (err)
533 goto err_out;
534
535 cmd->rsp.arg[0] |= (1 << 25);
536
537 if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
538 set_bit(QLC_BC_VF_STATE, &vf->state);
539 else
540 clear_bit(QLC_BC_VF_STATE, &vf->state);
541
542 return err;
543
544err_out:
545 cmd->rsp.arg[0] |= (2 << 25);
546 return err;
547}
548
549static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
550 struct qlcnic_vport *vp,
551 u16 func, __le16 vlan, u8 op)
552{
553 struct qlcnic_cmd_args cmd;
554 struct qlcnic_macvlan_mbx mv;
555 u8 *addr;
556 int err;
557 u32 *buf;
558 int vpid;
559
560 if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN))
561 return -ENOMEM;
562
563 vpid = qlcnic_sriov_pf_get_vport_handle(adapter, func);
564 if (vpid < 0) {
565 err = -EINVAL;
566 goto out;
567 }
568
569 if (vlan)
570 op = ((op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
571 QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL);
572
573 cmd.req.arg[1] = op | (1 << 8) | (3 << 6);
574 cmd.req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31;
575
576 addr = vp->mac;
577 mv.vlan = le16_to_cpu(vlan);
578 mv.mac_addr0 = addr[0];
579 mv.mac_addr1 = addr[1];
580 mv.mac_addr2 = addr[2];
581 mv.mac_addr3 = addr[3];
582 mv.mac_addr4 = addr[4];
583 mv.mac_addr5 = addr[5];
584 buf = &cmd.req.arg[2];
585 memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
586
587 err = qlcnic_issue_cmd(adapter, &cmd);
588
589 if (err)
590 dev_err(&adapter->pdev->dev,
591 "MAC-VLAN %s to CAM failed, err=%d.\n",
592 ((op == 1) ? "add " : "delete "), err);
593
594out:
595 qlcnic_free_mbx_args(&cmd);
596 return err;
597}
598
599static int qlcnic_sriov_validate_create_rx_ctx(struct qlcnic_cmd_args *cmd)
600{
601 if ((cmd->req.arg[0] >> 29) != 0x3)
602 return -EINVAL;
603
604 return 0;
605}
606
607static int qlcnic_sriov_pf_create_rx_ctx_cmd(struct qlcnic_bc_trans *tran,
608 struct qlcnic_cmd_args *cmd)
609{
610 struct qlcnic_vf_info *vf = tran->vf;
611 struct qlcnic_adapter *adapter = vf->adapter;
612 struct qlcnic_rcv_mbx_out *mbx_out;
613 int err;
614
615 err = qlcnic_sriov_validate_create_rx_ctx(cmd);
616 if (err) {
617 cmd->rsp.arg[0] |= (0x6 << 25);
618 return err;
619 }
620
621 cmd->req.arg[6] = vf->vp->handle;
622 err = qlcnic_issue_cmd(adapter, cmd);
623
624 if (!err) {
625 mbx_out = (struct qlcnic_rcv_mbx_out *)&cmd->rsp.arg[1];
626 vf->rx_ctx_id = mbx_out->ctx_id;
627 qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func,
628 0, QLCNIC_MAC_ADD);
629 } else {
630 vf->rx_ctx_id = 0;
631 }
632
633 return err;
634}
635
636static int qlcnic_sriov_pf_mac_address_cmd(struct qlcnic_bc_trans *trans,
637 struct qlcnic_cmd_args *cmd)
638{
639 struct qlcnic_vf_info *vf = trans->vf;
640 u8 type, *mac;
641
642 type = cmd->req.arg[1];
643 switch (type) {
644 case QLCNIC_SET_STATION_MAC:
645 case QLCNIC_SET_FAC_DEF_MAC:
646 cmd->rsp.arg[0] = (2 << 25);
647 break;
648 case QLCNIC_GET_CURRENT_MAC:
649 cmd->rsp.arg[0] = (1 << 25);
650 mac = vf->vp->mac;
651 cmd->rsp.arg[2] = mac[1] | ((mac[0] << 8) & 0xff00);
652 cmd->rsp.arg[1] = mac[5] | ((mac[4] << 8) & 0xff00) |
653 ((mac[3]) << 16 & 0xff0000) |
654 ((mac[2]) << 24 & 0xff000000);
655 }
656
657 return 0;
658}
659
660static int qlcnic_sriov_validate_create_tx_ctx(struct qlcnic_cmd_args *cmd)
661{
662 if ((cmd->req.arg[0] >> 29) != 0x3)
663 return -EINVAL;
664
665 return 0;
666}
667
668static int qlcnic_sriov_pf_create_tx_ctx_cmd(struct qlcnic_bc_trans *trans,
669 struct qlcnic_cmd_args *cmd)
670{
671 struct qlcnic_vf_info *vf = trans->vf;
672 struct qlcnic_adapter *adapter = vf->adapter;
673 struct qlcnic_tx_mbx_out *mbx_out;
674 int err;
675
676 err = qlcnic_sriov_validate_create_tx_ctx(cmd);
677 if (err) {
678 cmd->rsp.arg[0] |= (0x6 << 25);
679 return err;
680 }
681
682 cmd->req.arg[5] |= vf->vp->handle << 16;
683 err = qlcnic_issue_cmd(adapter, cmd);
684 if (!err) {
685 mbx_out = (struct qlcnic_tx_mbx_out *)&cmd->rsp.arg[2];
686 vf->tx_ctx_id = mbx_out->ctx_id;
687 } else {
688 vf->tx_ctx_id = 0;
689 }
690
691 return err;
692}
693
694static int qlcnic_sriov_validate_del_rx_ctx(struct qlcnic_vf_info *vf,
695 struct qlcnic_cmd_args *cmd)
696{
697 if ((cmd->req.arg[0] >> 29) != 0x3)
698 return -EINVAL;
699
700 if ((cmd->req.arg[1] & 0xffff) != vf->rx_ctx_id)
701 return -EINVAL;
702
703 return 0;
704}
705
706static int qlcnic_sriov_pf_del_rx_ctx_cmd(struct qlcnic_bc_trans *trans,
707 struct qlcnic_cmd_args *cmd)
708{
709 struct qlcnic_vf_info *vf = trans->vf;
710 struct qlcnic_adapter *adapter = vf->adapter;
711 int err;
712
713 err = qlcnic_sriov_validate_del_rx_ctx(vf, cmd);
714 if (err) {
715 cmd->rsp.arg[0] |= (0x6 << 25);
716 return err;
717 }
718
719 qlcnic_sriov_cfg_vf_def_mac(adapter, vf->vp, vf->pci_func,
720 0, QLCNIC_MAC_DEL);
721 cmd->req.arg[1] |= vf->vp->handle << 16;
722 err = qlcnic_issue_cmd(adapter, cmd);
723
724 if (!err)
725 vf->rx_ctx_id = 0;
726
727 return err;
728}
729
730static int qlcnic_sriov_validate_del_tx_ctx(struct qlcnic_vf_info *vf,
731 struct qlcnic_cmd_args *cmd)
732{
733 if ((cmd->req.arg[0] >> 29) != 0x3)
734 return -EINVAL;
735
736 if ((cmd->req.arg[1] & 0xffff) != vf->tx_ctx_id)
737 return -EINVAL;
738
739 return 0;
740}
741
742static int qlcnic_sriov_pf_del_tx_ctx_cmd(struct qlcnic_bc_trans *trans,
743 struct qlcnic_cmd_args *cmd)
744{
745 struct qlcnic_vf_info *vf = trans->vf;
746 struct qlcnic_adapter *adapter = vf->adapter;
747 int err;
748
749 err = qlcnic_sriov_validate_del_tx_ctx(vf, cmd);
750 if (err) {
751 cmd->rsp.arg[0] |= (0x6 << 25);
752 return err;
753 }
754
755 cmd->req.arg[1] |= vf->vp->handle << 16;
756 err = qlcnic_issue_cmd(adapter, cmd);
757
758 if (!err)
759 vf->tx_ctx_id = 0;
760
761 return err;
762}
763
764static int qlcnic_sriov_validate_cfg_lro(struct qlcnic_vf_info *vf,
765 struct qlcnic_cmd_args *cmd)
766{
767 if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
768 return -EINVAL;
769
770 return 0;
771}
772
773static int qlcnic_sriov_pf_cfg_lro_cmd(struct qlcnic_bc_trans *trans,
774 struct qlcnic_cmd_args *cmd)
775{
776 struct qlcnic_vf_info *vf = trans->vf;
777 struct qlcnic_adapter *adapter = vf->adapter;
778 int err;
779
780 err = qlcnic_sriov_validate_cfg_lro(vf, cmd);
781 if (err) {
782 cmd->rsp.arg[0] |= (0x6 << 25);
783 return err;
784 }
785
786 err = qlcnic_issue_cmd(adapter, cmd);
787 return err;
788}
789
790static int qlcnic_sriov_pf_cfg_ip_cmd(struct qlcnic_bc_trans *trans,
791 struct qlcnic_cmd_args *cmd)
792{
793 struct qlcnic_vf_info *vf = trans->vf;
794 struct qlcnic_adapter *adapter = vf->adapter;
795 int err = -EIO;
796 u8 op;
797
798 op = cmd->req.arg[1] & 0xff;
799
800 cmd->req.arg[1] |= vf->vp->handle << 16;
801 cmd->req.arg[1] |= BIT_31;
802
803 err = qlcnic_issue_cmd(adapter, cmd);
804 return err;
805}
806
807static int qlcnic_sriov_validate_cfg_intrpt(struct qlcnic_vf_info *vf,
808 struct qlcnic_cmd_args *cmd)
809{
810 if (((cmd->req.arg[1] >> 8) & 0xff) != vf->pci_func)
811 return -EINVAL;
812
813 if (!(cmd->req.arg[1] & BIT_16))
814 return -EINVAL;
815
816 if ((cmd->req.arg[1] & 0xff) != 0x1)
817 return -EINVAL;
818
819 return 0;
820}
821
822static int qlcnic_sriov_pf_cfg_intrpt_cmd(struct qlcnic_bc_trans *trans,
823 struct qlcnic_cmd_args *cmd)
824{
825 struct qlcnic_vf_info *vf = trans->vf;
826 struct qlcnic_adapter *adapter = vf->adapter;
827 int err;
828
829 err = qlcnic_sriov_validate_cfg_intrpt(vf, cmd);
830 if (err)
831 cmd->rsp.arg[0] |= (0x6 << 25);
832 else
833 err = qlcnic_issue_cmd(adapter, cmd);
834
835 return err;
836}
837
838static int qlcnic_sriov_validate_mtu(struct qlcnic_adapter *adapter,
839 struct qlcnic_vf_info *vf,
840 struct qlcnic_cmd_args *cmd)
841{
842 if (cmd->req.arg[1] != vf->rx_ctx_id)
843 return -EINVAL;
844
845 if (cmd->req.arg[2] > adapter->ahw->max_mtu)
846 return -EINVAL;
847
848 return 0;
849}
850
851static int qlcnic_sriov_pf_set_mtu_cmd(struct qlcnic_bc_trans *trans,
852 struct qlcnic_cmd_args *cmd)
853{
854 struct qlcnic_vf_info *vf = trans->vf;
855 struct qlcnic_adapter *adapter = vf->adapter;
856 int err;
857
858 err = qlcnic_sriov_validate_mtu(adapter, vf, cmd);
859 if (err)
860 cmd->rsp.arg[0] |= (0x6 << 25);
861 else
862 err = qlcnic_issue_cmd(adapter, cmd);
863
864 return err;
865}
866
867static int qlcnic_sriov_validate_get_nic_info(struct qlcnic_vf_info *vf,
868 struct qlcnic_cmd_args *cmd)
869{
870 if (cmd->req.arg[1] & BIT_31) {
871 if (((cmd->req.arg[1] >> 16) & 0x7fff) != vf->pci_func)
872 return -EINVAL;
873 } else {
874 cmd->req.arg[1] |= vf->vp->handle << 16;
875 }
876
877 return 0;
878}
879
880static int qlcnic_sriov_pf_get_nic_info_cmd(struct qlcnic_bc_trans *trans,
881 struct qlcnic_cmd_args *cmd)
882{
883 struct qlcnic_vf_info *vf = trans->vf;
884 struct qlcnic_adapter *adapter = vf->adapter;
885 int err;
886
887 err = qlcnic_sriov_validate_get_nic_info(vf, cmd);
888 if (err) {
889 cmd->rsp.arg[0] |= (0x6 << 25);
890 return err;
891 }
892
893 err = qlcnic_issue_cmd(adapter, cmd);
894 return err;
895}
896
897static int qlcnic_sriov_validate_cfg_rss(struct qlcnic_vf_info *vf,
898 struct qlcnic_cmd_args *cmd)
899{
900 if (cmd->req.arg[1] != vf->rx_ctx_id)
901 return -EINVAL;
902
903 return 0;
904}
905
906static int qlcnic_sriov_pf_cfg_rss_cmd(struct qlcnic_bc_trans *trans,
907 struct qlcnic_cmd_args *cmd)
908{
909 struct qlcnic_vf_info *vf = trans->vf;
910 struct qlcnic_adapter *adapter = vf->adapter;
911 int err;
912
913 err = qlcnic_sriov_validate_cfg_rss(vf, cmd);
914 if (err)
915 cmd->rsp.arg[0] |= (0x6 << 25);
916 else
917 err = qlcnic_issue_cmd(adapter, cmd);
918
919 return err;
920}
921
922static int qlcnic_sriov_validate_cfg_intrcoal(struct qlcnic_adapter *adapter,
923 struct qlcnic_vf_info *vf,
924 struct qlcnic_cmd_args *cmd)
925{
926 struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
927 u16 ctx_id, pkts, time;
928
929 ctx_id = cmd->req.arg[1] >> 16;
930 pkts = cmd->req.arg[2] & 0xffff;
931 time = cmd->req.arg[2] >> 16;
932
933 if (ctx_id != vf->rx_ctx_id)
934 return -EINVAL;
935 if (pkts > coal->rx_packets)
936 return -EINVAL;
937 if (time < coal->rx_time_us)
938 return -EINVAL;
939
940 return 0;
941}
942
943static int qlcnic_sriov_pf_cfg_intrcoal_cmd(struct qlcnic_bc_trans *tran,
944 struct qlcnic_cmd_args *cmd)
945{
946 struct qlcnic_vf_info *vf = tran->vf;
947 struct qlcnic_adapter *adapter = vf->adapter;
948 int err;
949
950 err = qlcnic_sriov_validate_cfg_intrcoal(adapter, vf, cmd);
951 if (err) {
952 cmd->rsp.arg[0] |= (0x6 << 25);
953 return err;
954 }
955
956 err = qlcnic_issue_cmd(adapter, cmd);
957 return err;
958}
959
960static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
961 struct qlcnic_vf_info *vf,
962 struct qlcnic_cmd_args *cmd)
963{
964 struct qlcnic_macvlan_mbx *macvlan;
965
966 if (!(cmd->req.arg[1] & BIT_8))
967 return -EINVAL;
968
969 cmd->req.arg[1] |= (vf->vp->handle << 16);
970 cmd->req.arg[1] |= BIT_31;
971
972 macvlan = (struct qlcnic_macvlan_mbx *)&cmd->req.arg[2];
973 if (!(macvlan->mac_addr0 & BIT_0)) {
974 dev_err(&adapter->pdev->dev,
975 "MAC address change is not allowed from VF %d",
976 vf->pci_func);
977 return -EINVAL;
978 }
979
980 return 0;
981}
982
983static int qlcnic_sriov_pf_cfg_macvlan_cmd(struct qlcnic_bc_trans *trans,
984 struct qlcnic_cmd_args *cmd)
985{
986 struct qlcnic_vf_info *vf = trans->vf;
987 struct qlcnic_adapter *adapter = vf->adapter;
988 int err;
989
990 err = qlcnic_sriov_validate_cfg_macvlan(adapter, vf, cmd);
991 if (err) {
992 cmd->rsp.arg[0] |= (0x6 << 25);
993 return err;
994 }
995
996 err = qlcnic_issue_cmd(adapter, cmd);
997 return err;
998}
999
1000static int qlcnic_sriov_validate_linkevent(struct qlcnic_vf_info *vf,
1001 struct qlcnic_cmd_args *cmd)
1002{
1003 if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
1004 return -EINVAL;
1005
1006 if (!(cmd->req.arg[1] & BIT_8))
1007 return -EINVAL;
1008
1009 return 0;
1010}
1011
1012static int qlcnic_sriov_pf_linkevent_cmd(struct qlcnic_bc_trans *trans,
1013 struct qlcnic_cmd_args *cmd)
1014{
1015 struct qlcnic_vf_info *vf = trans->vf;
1016 struct qlcnic_adapter *adapter = vf->adapter;
1017 int err;
1018
1019 err = qlcnic_sriov_validate_linkevent(vf, cmd);
1020 if (err) {
1021 cmd->rsp.arg[0] |= (0x6 << 25);
1022 return err;
1023 }
1024
1025 err = qlcnic_issue_cmd(adapter, cmd);
1026 return err;
1027}
1028
1029static int qlcnic_sriov_pf_cfg_promisc_cmd(struct qlcnic_bc_trans *trans,
1030 struct qlcnic_cmd_args *cmd)
1031{
1032 struct qlcnic_vf_info *vf = trans->vf;
1033 struct qlcnic_adapter *adapter = vf->adapter;
1034 int err;
1035
1036 cmd->req.arg[1] |= vf->vp->handle << 16;
1037 cmd->req.arg[1] |= BIT_31;
1038 err = qlcnic_issue_cmd(adapter, cmd);
1039 return err;
1040}
1041
1042static const int qlcnic_pf_passthru_supp_cmds[] = {
1043 QLCNIC_CMD_GET_STATISTICS,
1044 QLCNIC_CMD_GET_PORT_CONFIG,
1045 QLCNIC_CMD_GET_LINK_STATUS,
1046};
1047
1048static const struct qlcnic_sriov_cmd_handler qlcnic_pf_bc_cmd_hdlr[] = {
1049 [QLCNIC_BC_CMD_CHANNEL_INIT] = {&qlcnic_sriov_pf_channel_cfg_cmd},
1050 [QLCNIC_BC_CMD_CHANNEL_TERM] = {&qlcnic_sriov_pf_channel_cfg_cmd},
1051};
1052
1053static const struct qlcnic_sriov_fw_cmd_handler qlcnic_pf_fw_cmd_hdlr[] = {
1054 {QLCNIC_CMD_CREATE_RX_CTX, qlcnic_sriov_pf_create_rx_ctx_cmd},
1055 {QLCNIC_CMD_CREATE_TX_CTX, qlcnic_sriov_pf_create_tx_ctx_cmd},
1056 {QLCNIC_CMD_MAC_ADDRESS, qlcnic_sriov_pf_mac_address_cmd},
1057 {QLCNIC_CMD_DESTROY_RX_CTX, qlcnic_sriov_pf_del_rx_ctx_cmd},
1058 {QLCNIC_CMD_DESTROY_TX_CTX, qlcnic_sriov_pf_del_tx_ctx_cmd},
1059 {QLCNIC_CMD_CONFIGURE_HW_LRO, qlcnic_sriov_pf_cfg_lro_cmd},
1060 {QLCNIC_CMD_CONFIGURE_IP_ADDR, qlcnic_sriov_pf_cfg_ip_cmd},
1061 {QLCNIC_CMD_CONFIG_INTRPT, qlcnic_sriov_pf_cfg_intrpt_cmd},
1062 {QLCNIC_CMD_SET_MTU, qlcnic_sriov_pf_set_mtu_cmd},
1063 {QLCNIC_CMD_GET_NIC_INFO, qlcnic_sriov_pf_get_nic_info_cmd},
1064 {QLCNIC_CMD_CONFIGURE_RSS, qlcnic_sriov_pf_cfg_rss_cmd},
1065 {QLCNIC_CMD_CONFIG_INTR_COAL, qlcnic_sriov_pf_cfg_intrcoal_cmd},
1066 {QLCNIC_CMD_CONFIG_MAC_VLAN, qlcnic_sriov_pf_cfg_macvlan_cmd},
1067 {QLCNIC_CMD_GET_LINK_EVENT, qlcnic_sriov_pf_linkevent_cmd},
1068 {QLCNIC_CMD_CONFIGURE_MAC_RX_MODE, qlcnic_sriov_pf_cfg_promisc_cmd},
1069};
1070
1071void qlcnic_sriov_pf_process_bc_cmd(struct qlcnic_adapter *adapter,
1072 struct qlcnic_bc_trans *trans,
1073 struct qlcnic_cmd_args *cmd)
1074{
1075 u8 size, cmd_op;
1076
1077 cmd_op = trans->req_hdr->cmd_op;
1078
1079 if (trans->req_hdr->op_type == QLC_BC_CMD) {
1080 size = ARRAY_SIZE(qlcnic_pf_bc_cmd_hdlr);
1081 if (cmd_op < size) {
1082 qlcnic_pf_bc_cmd_hdlr[cmd_op].fn(trans, cmd);
1083 return;
1084 }
1085 } else {
1086 int i;
1087 size = ARRAY_SIZE(qlcnic_pf_fw_cmd_hdlr);
1088 for (i = 0; i < size; i++) {
1089 if (cmd_op == qlcnic_pf_fw_cmd_hdlr[i].cmd) {
1090 qlcnic_pf_fw_cmd_hdlr[i].fn(trans, cmd);
1091 return;
1092 }
1093 }
1094
1095 size = ARRAY_SIZE(qlcnic_pf_passthru_supp_cmds);
1096 for (i = 0; i < size; i++) {
1097 if (cmd_op == qlcnic_pf_passthru_supp_cmds[i]) {
1098 qlcnic_issue_cmd(adapter, cmd);
1099 return;
1100 }
1101 }
1102 }
1103
1104 cmd->rsp.arg[0] |= (0x9 << 25);
1105}
1106
1107void qlcnic_pf_set_interface_id_create_rx_ctx(struct qlcnic_adapter *adapter,
1108 u32 *int_id)
1109{
1110 u16 vpid;
1111
1112 vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
1113 adapter->ahw->pci_func);
1114 *int_id |= vpid;
1115}
1116
1117void qlcnic_pf_set_interface_id_del_rx_ctx(struct qlcnic_adapter *adapter,
1118 u32 *int_id)
1119{
1120 u16 vpid;
1121
1122 vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
1123 adapter->ahw->pci_func);
1124 *int_id |= vpid << 16;
1125}
1126
1127void qlcnic_pf_set_interface_id_create_tx_ctx(struct qlcnic_adapter *adapter,
1128 u32 *int_id)
1129{
1130 int vpid;
1131
1132 vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
1133 adapter->ahw->pci_func);
1134 *int_id |= vpid << 16;
1135}
1136
1137void qlcnic_pf_set_interface_id_del_tx_ctx(struct qlcnic_adapter *adapter,
1138 u32 *int_id)
1139{
1140 u16 vpid;
1141
1142 vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
1143 adapter->ahw->pci_func);
1144 *int_id |= vpid << 16;
1145}
1146
1147void qlcnic_pf_set_interface_id_promisc(struct qlcnic_adapter *adapter,
1148 u32 *int_id)
1149{
1150 u16 vpid;
1151
1152 vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
1153 adapter->ahw->pci_func);
1154 *int_id |= (vpid << 16) | BIT_31;
1155}
1156
1157void qlcnic_pf_set_interface_id_ipaddr(struct qlcnic_adapter *adapter,
1158 u32 *int_id)
1159{
1160 u16 vpid;
1161
1162 vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
1163 adapter->ahw->pci_func);
1164 *int_id |= (vpid << 16) | BIT_31;
1165}
1166
1167void qlcnic_pf_set_interface_id_macaddr(struct qlcnic_adapter *adapter,
1168 u32 *int_id)
1169{
1170 u16 vpid;
1171
1172 vpid = qlcnic_sriov_pf_get_vport_handle(adapter,
1173 adapter->ahw->pci_func);
1174 *int_id |= (vpid << 16) | BIT_31;
1175}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 987fb6f8adc3..c77675da671f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -21,8 +21,6 @@
21#include <linux/aer.h> 21#include <linux/aer.h>
22#include <linux/log2.h> 22#include <linux/log2.h>
23 23
24#include <linux/sysfs.h>
25
26#define QLC_STATUS_UNSUPPORTED_CMD -2 24#define QLC_STATUS_UNSUPPORTED_CMD -2
27 25
28int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable) 26int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
@@ -886,6 +884,244 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
886 return size; 884 return size;
887} 885}
888 886
887static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
888 struct kobject *kobj,
889 struct bin_attribute *attr,
890 char *buf, loff_t offset,
891 size_t size)
892{
893 unsigned char *p_read_buf;
894 int ret, count;
895 struct device *dev = container_of(kobj, struct device, kobj);
896 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
897
898 if (!size)
899 return QL_STATUS_INVALID_PARAM;
900 if (!buf)
901 return QL_STATUS_INVALID_PARAM;
902
903 count = size / sizeof(u32);
904
905 if (size % sizeof(u32))
906 count++;
907
908 p_read_buf = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
909 if (!p_read_buf)
910 return -ENOMEM;
911 if (qlcnic_83xx_lock_flash(adapter) != 0) {
912 kfree(p_read_buf);
913 return -EIO;
914 }
915
916 ret = qlcnic_83xx_lockless_flash_read32(adapter, offset, p_read_buf,
917 count);
918
919 if (ret) {
920 qlcnic_83xx_unlock_flash(adapter);
921 kfree(p_read_buf);
922 return ret;
923 }
924
925 qlcnic_83xx_unlock_flash(adapter);
926 memcpy(buf, p_read_buf, size);
927 kfree(p_read_buf);
928
929 return size;
930}
931
932static int qlcnic_83xx_sysfs_flash_bulk_write(struct qlcnic_adapter *adapter,
933 char *buf, loff_t offset,
934 size_t size)
935{
936 int i, ret, count;
937 unsigned char *p_cache, *p_src;
938
939 p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
940 if (!p_cache)
941 return -ENOMEM;
942
943 memcpy(p_cache, buf, size);
944 p_src = p_cache;
945 count = size / sizeof(u32);
946
947 if (qlcnic_83xx_lock_flash(adapter) != 0) {
948 kfree(p_cache);
949 return -EIO;
950 }
951
952 if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
953 ret = qlcnic_83xx_enable_flash_write(adapter);
954 if (ret) {
955 kfree(p_cache);
956 qlcnic_83xx_unlock_flash(adapter);
957 return -EIO;
958 }
959 }
960
961 for (i = 0; i < count / QLC_83XX_FLASH_WRITE_MAX; i++) {
962 ret = qlcnic_83xx_flash_bulk_write(adapter, offset,
963 (u32 *)p_src,
964 QLC_83XX_FLASH_WRITE_MAX);
965
966 if (ret) {
967 if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
968 ret = qlcnic_83xx_disable_flash_write(adapter);
969 if (ret) {
970 kfree(p_cache);
971 qlcnic_83xx_unlock_flash(adapter);
972 return -EIO;
973 }
974 }
975
976 kfree(p_cache);
977 qlcnic_83xx_unlock_flash(adapter);
978 return -EIO;
979 }
980
981 p_src = p_src + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX;
982 offset = offset + sizeof(u32)*QLC_83XX_FLASH_WRITE_MAX;
983 }
984
985 if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
986 ret = qlcnic_83xx_disable_flash_write(adapter);
987 if (ret) {
988 kfree(p_cache);
989 qlcnic_83xx_unlock_flash(adapter);
990 return -EIO;
991 }
992 }
993
994 kfree(p_cache);
995 qlcnic_83xx_unlock_flash(adapter);
996
997 return 0;
998}
999
1000static int qlcnic_83xx_sysfs_flash_write(struct qlcnic_adapter *adapter,
1001 char *buf, loff_t offset, size_t size)
1002{
1003 int i, ret, count;
1004 unsigned char *p_cache, *p_src;
1005
1006 p_cache = kcalloc(size, sizeof(unsigned char), GFP_KERNEL);
1007 if (!p_cache)
1008 return -ENOMEM;
1009
1010 memcpy(p_cache, buf, size);
1011 p_src = p_cache;
1012 count = size / sizeof(u32);
1013
1014 if (qlcnic_83xx_lock_flash(adapter) != 0) {
1015 kfree(p_cache);
1016 return -EIO;
1017 }
1018
1019 if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
1020 ret = qlcnic_83xx_enable_flash_write(adapter);
1021 if (ret) {
1022 kfree(p_cache);
1023 qlcnic_83xx_unlock_flash(adapter);
1024 return -EIO;
1025 }
1026 }
1027
1028 for (i = 0; i < count; i++) {
1029 ret = qlcnic_83xx_flash_write32(adapter, offset, (u32 *)p_src);
1030 if (ret) {
1031 if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
1032 ret = qlcnic_83xx_disable_flash_write(adapter);
1033 if (ret) {
1034 kfree(p_cache);
1035 qlcnic_83xx_unlock_flash(adapter);
1036 return -EIO;
1037 }
1038 }
1039 kfree(p_cache);
1040 qlcnic_83xx_unlock_flash(adapter);
1041 return -EIO;
1042 }
1043
1044 p_src = p_src + sizeof(u32);
1045 offset = offset + sizeof(u32);
1046 }
1047
1048 if (adapter->ahw->fdt.mfg_id == adapter->flash_mfg_id) {
1049 ret = qlcnic_83xx_disable_flash_write(adapter);
1050 if (ret) {
1051 kfree(p_cache);
1052 qlcnic_83xx_unlock_flash(adapter);
1053 return -EIO;
1054 }
1055 }
1056
1057 kfree(p_cache);
1058 qlcnic_83xx_unlock_flash(adapter);
1059
1060 return 0;
1061}
1062
1063static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
1064 struct kobject *kobj,
1065 struct bin_attribute *attr,
1066 char *buf, loff_t offset,
1067 size_t size)
1068{
1069 int ret;
1070 static int flash_mode;
1071 unsigned long data;
1072 struct device *dev = container_of(kobj, struct device, kobj);
1073 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
1074
1075 if (!buf)
1076 return QL_STATUS_INVALID_PARAM;
1077
1078 ret = kstrtoul(buf, 16, &data);
1079
1080 switch (data) {
1081 case QLC_83XX_FLASH_SECTOR_ERASE_CMD:
1082 flash_mode = QLC_83XX_ERASE_MODE;
1083 ret = qlcnic_83xx_erase_flash_sector(adapter, offset);
1084 if (ret) {
1085 dev_err(&adapter->pdev->dev,
1086 "%s failed at %d\n", __func__, __LINE__);
1087 return -EIO;
1088 }
1089 break;
1090
1091 case QLC_83XX_FLASH_BULK_WRITE_CMD:
1092 flash_mode = QLC_83XX_BULK_WRITE_MODE;
1093 break;
1094
1095 case QLC_83XX_FLASH_WRITE_CMD:
1096 flash_mode = QLC_83XX_WRITE_MODE;
1097 break;
1098 default:
1099 if (flash_mode == QLC_83XX_BULK_WRITE_MODE) {
1100 ret = qlcnic_83xx_sysfs_flash_bulk_write(adapter, buf,
1101 offset, size);
1102 if (ret) {
1103 dev_err(&adapter->pdev->dev,
1104 "%s failed at %d\n",
1105 __func__, __LINE__);
1106 return -EIO;
1107 }
1108 }
1109
1110 if (flash_mode == QLC_83XX_WRITE_MODE) {
1111 ret = qlcnic_83xx_sysfs_flash_write(adapter, buf,
1112 offset, size);
1113 if (ret) {
1114 dev_err(&adapter->pdev->dev,
1115 "%s failed at %d\n", __func__,
1116 __LINE__);
1117 return -EIO;
1118 }
1119 }
1120 }
1121
1122 return size;
1123}
1124
889static struct device_attribute dev_attr_bridged_mode = { 1125static struct device_attribute dev_attr_bridged_mode = {
890 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)}, 1126 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
891 .show = qlcnic_show_bridged_mode, 1127 .show = qlcnic_show_bridged_mode,
@@ -960,6 +1196,13 @@ static struct bin_attribute bin_attr_pm_config = {
960 .write = qlcnic_sysfs_write_pm_config, 1196 .write = qlcnic_sysfs_write_pm_config,
961}; 1197};
962 1198
1199static struct bin_attribute bin_attr_flash = {
1200 .attr = {.name = "flash", .mode = (S_IRUGO | S_IWUSR)},
1201 .size = 0,
1202 .read = qlcnic_83xx_sysfs_flash_read_handler,
1203 .write = qlcnic_83xx_sysfs_flash_write_handler,
1204};
1205
963void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter) 1206void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
964{ 1207{
965 struct device *dev = &adapter->pdev->dev; 1208 struct device *dev = &adapter->pdev->dev;
@@ -1048,10 +1291,18 @@ void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter)
1048 1291
1049void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *adapter) 1292void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *adapter)
1050{ 1293{
1294 struct device *dev = &adapter->pdev->dev;
1295
1051 qlcnic_create_diag_entries(adapter); 1296 qlcnic_create_diag_entries(adapter);
1297
1298 if (sysfs_create_bin_file(&dev->kobj, &bin_attr_flash))
1299 dev_info(dev, "failed to create flash sysfs entry\n");
1052} 1300}
1053 1301
1054void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *adapter) 1302void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *adapter)
1055{ 1303{
1304 struct device *dev = &adapter->pdev->dev;
1305
1056 qlcnic_remove_diag_entries(adapter); 1306 qlcnic_remove_diag_entries(adapter);
1307 sysfs_remove_bin_file(&dev->kobj, &bin_attr_flash);
1057} 1308}
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index b13ab544a7eb..1dd778a6f01e 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1211,8 +1211,6 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1211 netdev_alloc_skb(qdev->ndev, 1211 netdev_alloc_skb(qdev->ndev,
1212 SMALL_BUFFER_SIZE); 1212 SMALL_BUFFER_SIZE);
1213 if (sbq_desc->p.skb == NULL) { 1213 if (sbq_desc->p.skb == NULL) {
1214 netif_err(qdev, probe, qdev->ndev,
1215 "Couldn't get an skb.\n");
1216 rx_ring->sbq_clean_idx = clean_idx; 1214 rx_ring->sbq_clean_idx = clean_idx;
1217 return; 1215 return;
1218 } 1216 }
@@ -1519,8 +1517,6 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1519 1517
1520 skb = netdev_alloc_skb(ndev, length); 1518 skb = netdev_alloc_skb(ndev, length);
1521 if (!skb) { 1519 if (!skb) {
1522 netif_err(qdev, drv, qdev->ndev,
1523 "Couldn't get an skb, need to unwind!.\n");
1524 rx_ring->rx_dropped++; 1520 rx_ring->rx_dropped++;
1525 put_page(lbq_desc->p.pg_chunk.page); 1521 put_page(lbq_desc->p.pg_chunk.page);
1526 return; 1522 return;
@@ -1605,8 +1601,6 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1605 /* Allocate new_skb and copy */ 1601 /* Allocate new_skb and copy */
1606 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN); 1602 new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1607 if (new_skb == NULL) { 1603 if (new_skb == NULL) {
1608 netif_err(qdev, probe, qdev->ndev,
1609 "No skb available, drop the packet.\n");
1610 rx_ring->rx_dropped++; 1604 rx_ring->rx_dropped++;
1611 return; 1605 return;
1612 } 1606 }
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 5b4103db70f5..e9dc84943cfc 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -224,11 +224,14 @@ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
224 break; 224 break;
225 } 225 }
226 226
227 if (limit < 0)
228 return -ETIMEDOUT;
229
227 return ioread16(ioaddr + MMRD); 230 return ioread16(ioaddr + MMRD);
228} 231}
229 232
230/* Write a word data from PHY Chip */ 233/* Write a word data from PHY Chip */
231static void r6040_phy_write(void __iomem *ioaddr, 234static int r6040_phy_write(void __iomem *ioaddr,
232 int phy_addr, int reg, u16 val) 235 int phy_addr, int reg, u16 val)
233{ 236{
234 int limit = MAC_DEF_TIMEOUT; 237 int limit = MAC_DEF_TIMEOUT;
@@ -243,6 +246,8 @@ static void r6040_phy_write(void __iomem *ioaddr,
243 if (!(cmd & MDIO_WRITE)) 246 if (!(cmd & MDIO_WRITE))
244 break; 247 break;
245 } 248 }
249
250 return (limit < 0) ? -ETIMEDOUT : 0;
246} 251}
247 252
248static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg) 253static int r6040_mdiobus_read(struct mii_bus *bus, int phy_addr, int reg)
@@ -261,9 +266,7 @@ static int r6040_mdiobus_write(struct mii_bus *bus, int phy_addr,
261 struct r6040_private *lp = netdev_priv(dev); 266 struct r6040_private *lp = netdev_priv(dev);
262 void __iomem *ioaddr = lp->base; 267 void __iomem *ioaddr = lp->base;
263 268
264 r6040_phy_write(ioaddr, phy_addr, reg, value); 269 return r6040_phy_write(ioaddr, phy_addr, reg, value);
265
266 return 0;
267} 270}
268 271
269static int r6040_mdiobus_reset(struct mii_bus *bus) 272static int r6040_mdiobus_reset(struct mii_bus *bus)
@@ -347,7 +350,6 @@ static int r6040_alloc_rxbufs(struct net_device *dev)
347 do { 350 do {
348 skb = netdev_alloc_skb(dev, MAX_BUF_SIZE); 351 skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
349 if (!skb) { 352 if (!skb) {
350 netdev_err(dev, "failed to alloc skb for rx\n");
351 rc = -ENOMEM; 353 rc = -ENOMEM;
352 goto err_exit; 354 goto err_exit;
353 } 355 }
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 1276ac71353a..3ccedeb8aba0 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2041,8 +2041,6 @@ keep_pkt:
2041 2041
2042 netif_receive_skb (skb); 2042 netif_receive_skb (skb);
2043 } else { 2043 } else {
2044 if (net_ratelimit())
2045 netdev_warn(dev, "Memory squeeze, dropping packet\n");
2046 dev->stats.rx_dropped++; 2044 dev->stats.rx_dropped++;
2047 } 2045 }
2048 received++; 2046 received++;
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index 9f2d416de750..d77d60ea8202 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -782,8 +782,6 @@ static void net_rx(struct net_device *dev)
782 782
783 skb = netdev_alloc_skb(dev, pkt_len + 2); 783 skb = netdev_alloc_skb(dev, pkt_len + 2);
784 if (skb == NULL) { 784 if (skb == NULL) {
785 printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n",
786 dev->name);
787 dev->stats.rx_dropped++; 785 dev->stats.rx_dropped++;
788 goto done; 786 goto done;
789 } 787 }
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 4ecbe64a758d..9a1bc1a23854 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -47,7 +47,9 @@
47#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw" 47#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw" 48#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
49#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw" 49#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
50#define FIRMWARE_8168G_1 "rtl_nic/rtl8168g-1.fw" 50#define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw"
51#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw"
52#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw"
51 53
52#ifdef RTL8169_DEBUG 54#ifdef RTL8169_DEBUG
53#define assert(expr) \ 55#define assert(expr) \
@@ -140,6 +142,8 @@ enum mac_version {
140 RTL_GIGA_MAC_VER_39, 142 RTL_GIGA_MAC_VER_39,
141 RTL_GIGA_MAC_VER_40, 143 RTL_GIGA_MAC_VER_40,
142 RTL_GIGA_MAC_VER_41, 144 RTL_GIGA_MAC_VER_41,
145 RTL_GIGA_MAC_VER_42,
146 RTL_GIGA_MAC_VER_43,
143 RTL_GIGA_MAC_NONE = 0xff, 147 RTL_GIGA_MAC_NONE = 0xff,
144}; 148};
145 149
@@ -262,10 +266,16 @@ static const struct {
262 _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1, 266 _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
263 JUMBO_1K, true), 267 JUMBO_1K, true),
264 [RTL_GIGA_MAC_VER_40] = 268 [RTL_GIGA_MAC_VER_40] =
265 _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_1, 269 _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_2,
266 JUMBO_9K, false), 270 JUMBO_9K, false),
267 [RTL_GIGA_MAC_VER_41] = 271 [RTL_GIGA_MAC_VER_41] =
268 _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false), 272 _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false),
273 [RTL_GIGA_MAC_VER_42] =
274 _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_3,
275 JUMBO_9K, false),
276 [RTL_GIGA_MAC_VER_43] =
277 _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_2,
278 JUMBO_1K, true),
269}; 279};
270#undef _R 280#undef _R
271 281
@@ -329,6 +339,7 @@ enum rtl_registers {
329#define RXCFG_FIFO_SHIFT 13 339#define RXCFG_FIFO_SHIFT 13
330 /* No threshold before first PCI xfer */ 340 /* No threshold before first PCI xfer */
331#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT) 341#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
342#define RX_EARLY_OFF (1 << 11)
332#define RXCFG_DMA_SHIFT 8 343#define RXCFG_DMA_SHIFT 8
333 /* Unlimited maximum PCI burst. */ 344 /* Unlimited maximum PCI burst. */
334#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT) 345#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
@@ -513,6 +524,7 @@ enum rtl_register_content {
513 PMEnable = (1 << 0), /* Power Management Enable */ 524 PMEnable = (1 << 0), /* Power Management Enable */
514 525
515 /* Config2 register p. 25 */ 526 /* Config2 register p. 25 */
527 ClkReqEn = (1 << 7), /* Clock Request Enable */
516 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */ 528 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
517 PCI_Clock_66MHz = 0x01, 529 PCI_Clock_66MHz = 0x01,
518 PCI_Clock_33MHz = 0x00, 530 PCI_Clock_33MHz = 0x00,
@@ -533,6 +545,7 @@ enum rtl_register_content {
533 Spi_en = (1 << 3), 545 Spi_en = (1 << 3),
534 LanWake = (1 << 1), /* LanWake enable/disable */ 546 LanWake = (1 << 1), /* LanWake enable/disable */
535 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ 547 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
548 ASPM_en = (1 << 0), /* ASPM enable */
536 549
537 /* TBICSR p.28 */ 550 /* TBICSR p.28 */
538 TBIReset = 0x80000000, 551 TBIReset = 0x80000000,
@@ -814,7 +827,9 @@ MODULE_FIRMWARE(FIRMWARE_8168F_2);
814MODULE_FIRMWARE(FIRMWARE_8402_1); 827MODULE_FIRMWARE(FIRMWARE_8402_1);
815MODULE_FIRMWARE(FIRMWARE_8411_1); 828MODULE_FIRMWARE(FIRMWARE_8411_1);
816MODULE_FIRMWARE(FIRMWARE_8106E_1); 829MODULE_FIRMWARE(FIRMWARE_8106E_1);
817MODULE_FIRMWARE(FIRMWARE_8168G_1); 830MODULE_FIRMWARE(FIRMWARE_8106E_2);
831MODULE_FIRMWARE(FIRMWARE_8168G_2);
832MODULE_FIRMWARE(FIRMWARE_8168G_3);
818 833
819static void rtl_lock_work(struct rtl8169_private *tp) 834static void rtl_lock_work(struct rtl8169_private *tp)
820{ 835{
@@ -1024,14 +1039,6 @@ static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
1024 (RTL_R32(GPHY_OCP) & 0xffff) : ~0; 1039 (RTL_R32(GPHY_OCP) & 0xffff) : ~0;
1025} 1040}
1026 1041
1027static void rtl_w1w0_phy_ocp(struct rtl8169_private *tp, int reg, int p, int m)
1028{
1029 int val;
1030
1031 val = r8168_phy_ocp_read(tp, reg);
1032 r8168_phy_ocp_write(tp, reg, (val | p) & ~m);
1033}
1034
1035static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) 1042static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1036{ 1043{
1037 void __iomem *ioaddr = tp->mmio_addr; 1044 void __iomem *ioaddr = tp->mmio_addr;
@@ -1077,6 +1084,21 @@ static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
1077 return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2); 1084 return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
1078} 1085}
1079 1086
1087static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value)
1088{
1089 if (reg == 0x1f) {
1090 tp->ocp_base = value << 4;
1091 return;
1092 }
1093
1094 r8168_mac_ocp_write(tp, tp->ocp_base + reg, value);
1095}
1096
1097static int mac_mcu_read(struct rtl8169_private *tp, int reg)
1098{
1099 return r8168_mac_ocp_read(tp, tp->ocp_base + reg);
1100}
1101
1080DECLARE_RTL_COND(rtl_phyar_cond) 1102DECLARE_RTL_COND(rtl_phyar_cond)
1081{ 1103{
1082 void __iomem *ioaddr = tp->mmio_addr; 1104 void __iomem *ioaddr = tp->mmio_addr;
@@ -2028,6 +2050,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
2028 int mac_version; 2050 int mac_version;
2029 } mac_info[] = { 2051 } mac_info[] = {
2030 /* 8168G family. */ 2052 /* 8168G family. */
2053 { 0x7cf00000, 0x50900000, RTL_GIGA_MAC_VER_42 },
2031 { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 }, 2054 { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
2032 { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 }, 2055 { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
2033 2056
@@ -2116,6 +2139,10 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
2116 netif_notice(tp, probe, dev, 2139 netif_notice(tp, probe, dev,
2117 "unknown MAC, using family default\n"); 2140 "unknown MAC, using family default\n");
2118 tp->mac_version = default_version; 2141 tp->mac_version = default_version;
2142 } else if (tp->mac_version == RTL_GIGA_MAC_VER_42) {
2143 tp->mac_version = tp->mii.supports_gmii ?
2144 RTL_GIGA_MAC_VER_42 :
2145 RTL_GIGA_MAC_VER_43;
2119 } 2146 }
2120} 2147}
2121 2148
@@ -2142,9 +2169,7 @@ static void rtl_writephy_batch(struct rtl8169_private *tp,
2142#define PHY_DATA_OR 0x10000000 2169#define PHY_DATA_OR 0x10000000
2143#define PHY_DATA_AND 0x20000000 2170#define PHY_DATA_AND 0x20000000
2144#define PHY_BJMPN 0x30000000 2171#define PHY_BJMPN 0x30000000
2145#define PHY_READ_EFUSE 0x40000000 2172#define PHY_MDIO_CHG 0x40000000
2146#define PHY_READ_MAC_BYTE 0x50000000
2147#define PHY_WRITE_MAC_BYTE 0x60000000
2148#define PHY_CLEAR_READCOUNT 0x70000000 2173#define PHY_CLEAR_READCOUNT 0x70000000
2149#define PHY_WRITE 0x80000000 2174#define PHY_WRITE 0x80000000
2150#define PHY_READCOUNT_EQ_SKIP 0x90000000 2175#define PHY_READCOUNT_EQ_SKIP 0x90000000
@@ -2153,7 +2178,6 @@ static void rtl_writephy_batch(struct rtl8169_private *tp,
2153#define PHY_WRITE_PREVIOUS 0xc0000000 2178#define PHY_WRITE_PREVIOUS 0xc0000000
2154#define PHY_SKIPN 0xd0000000 2179#define PHY_SKIPN 0xd0000000
2155#define PHY_DELAY_MS 0xe0000000 2180#define PHY_DELAY_MS 0xe0000000
2156#define PHY_WRITE_ERI_WORD 0xf0000000
2157 2181
2158struct fw_info { 2182struct fw_info {
2159 u32 magic; 2183 u32 magic;
@@ -2230,7 +2254,7 @@ static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2230 case PHY_READ: 2254 case PHY_READ:
2231 case PHY_DATA_OR: 2255 case PHY_DATA_OR:
2232 case PHY_DATA_AND: 2256 case PHY_DATA_AND:
2233 case PHY_READ_EFUSE: 2257 case PHY_MDIO_CHG:
2234 case PHY_CLEAR_READCOUNT: 2258 case PHY_CLEAR_READCOUNT:
2235 case PHY_WRITE: 2259 case PHY_WRITE:
2236 case PHY_WRITE_PREVIOUS: 2260 case PHY_WRITE_PREVIOUS:
@@ -2261,9 +2285,6 @@ static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2261 } 2285 }
2262 break; 2286 break;
2263 2287
2264 case PHY_READ_MAC_BYTE:
2265 case PHY_WRITE_MAC_BYTE:
2266 case PHY_WRITE_ERI_WORD:
2267 default: 2288 default:
2268 netif_err(tp, ifup, tp->dev, 2289 netif_err(tp, ifup, tp->dev,
2269 "Invalid action 0x%08x\n", action); 2290 "Invalid action 0x%08x\n", action);
@@ -2294,10 +2315,13 @@ out:
2294static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) 2315static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2295{ 2316{
2296 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; 2317 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2318 struct mdio_ops org, *ops = &tp->mdio_ops;
2297 u32 predata, count; 2319 u32 predata, count;
2298 size_t index; 2320 size_t index;
2299 2321
2300 predata = count = 0; 2322 predata = count = 0;
2323 org.write = ops->write;
2324 org.read = ops->read;
2301 2325
2302 for (index = 0; index < pa->size; ) { 2326 for (index = 0; index < pa->size; ) {
2303 u32 action = le32_to_cpu(pa->code[index]); 2327 u32 action = le32_to_cpu(pa->code[index]);
@@ -2324,8 +2348,15 @@ static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2324 case PHY_BJMPN: 2348 case PHY_BJMPN:
2325 index -= regno; 2349 index -= regno;
2326 break; 2350 break;
2327 case PHY_READ_EFUSE: 2351 case PHY_MDIO_CHG:
2328 predata = rtl8168d_efuse_read(tp, regno); 2352 if (data == 0) {
2353 ops->write = org.write;
2354 ops->read = org.read;
2355 } else if (data == 1) {
2356 ops->write = mac_mcu_write;
2357 ops->read = mac_mcu_read;
2358 }
2359
2329 index++; 2360 index++;
2330 break; 2361 break;
2331 case PHY_CLEAR_READCOUNT: 2362 case PHY_CLEAR_READCOUNT:
@@ -2361,13 +2392,13 @@ static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2361 index++; 2392 index++;
2362 break; 2393 break;
2363 2394
2364 case PHY_READ_MAC_BYTE:
2365 case PHY_WRITE_MAC_BYTE:
2366 case PHY_WRITE_ERI_WORD:
2367 default: 2395 default:
2368 BUG(); 2396 BUG();
2369 } 2397 }
2370 } 2398 }
2399
2400 ops->write = org.write;
2401 ops->read = org.read;
2371} 2402}
2372 2403
2373static void rtl_release_firmware(struct rtl8169_private *tp) 2404static void rtl_release_firmware(struct rtl8169_private *tp)
@@ -3368,51 +3399,68 @@ static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3368 3399
3369static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) 3400static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3370{ 3401{
3371 static const u16 mac_ocp_patch[] = { 3402 rtl_apply_firmware(tp);
3372 0xe008, 0xe01b, 0xe01d, 0xe01f,
3373 0xe021, 0xe023, 0xe025, 0xe027,
3374 0x49d2, 0xf10d, 0x766c, 0x49e2,
3375 0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
3376
3377 0x77c0, 0x4870, 0x9fc0, 0x1ea0,
3378 0xc707, 0x8ee1, 0x9d6c, 0xc603,
3379 0xbe00, 0xb416, 0x0076, 0xe86c,
3380 0xc602, 0xbe00, 0x0000, 0xc602,
3381
3382 0xbe00, 0x0000, 0xc602, 0xbe00,
3383 0x0000, 0xc602, 0xbe00, 0x0000,
3384 0xc602, 0xbe00, 0x0000, 0xc602,
3385 0xbe00, 0x0000, 0xc602, 0xbe00,
3386
3387 0x0000, 0x0000, 0x0000, 0x0000
3388 };
3389 u32 i;
3390 3403
3391 /* Patch code for GPHY reset */ 3404 rtl_writephy(tp, 0x1f, 0x0a46);
3392 for (i = 0; i < ARRAY_SIZE(mac_ocp_patch); i++) 3405 if (rtl_readphy(tp, 0x10) & 0x0100) {
3393 r8168_mac_ocp_write(tp, 0xf800 + 2*i, mac_ocp_patch[i]); 3406 rtl_writephy(tp, 0x1f, 0x0bcc);
3394 r8168_mac_ocp_write(tp, 0xfc26, 0x8000); 3407 rtl_w1w0_phy(tp, 0x12, 0x0000, 0x8000);
3395 r8168_mac_ocp_write(tp, 0xfc28, 0x0075); 3408 } else {
3409 rtl_writephy(tp, 0x1f, 0x0bcc);
3410 rtl_w1w0_phy(tp, 0x12, 0x8000, 0x0000);
3411 }
3396 3412
3397 rtl_apply_firmware(tp); 3413 rtl_writephy(tp, 0x1f, 0x0a46);
3414 if (rtl_readphy(tp, 0x13) & 0x0100) {
3415 rtl_writephy(tp, 0x1f, 0x0c41);
3416 rtl_w1w0_phy(tp, 0x15, 0x0002, 0x0000);
3417 } else {
3418 rtl_writephy(tp, 0x1f, 0x0c41);
3419 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0002);
3420 }
3398 3421
3399 if (r8168_phy_ocp_read(tp, 0xa460) & 0x0100) 3422 /* Enable PHY auto speed down */
3400 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x8000); 3423 rtl_writephy(tp, 0x1f, 0x0a44);
3401 else 3424 rtl_w1w0_phy(tp, 0x11, 0x000c, 0x0000);
3402 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x8000, 0x0000); 3425
3426 rtl_writephy(tp, 0x1f, 0x0bcc);
3427 rtl_w1w0_phy(tp, 0x14, 0x0100, 0x0000);
3428 rtl_writephy(tp, 0x1f, 0x0a44);
3429 rtl_w1w0_phy(tp, 0x11, 0x00c0, 0x0000);
3430 rtl_writephy(tp, 0x1f, 0x0a43);
3431 rtl_writephy(tp, 0x13, 0x8084);
3432 rtl_w1w0_phy(tp, 0x14, 0x0000, 0x6000);
3433 rtl_w1w0_phy(tp, 0x10, 0x1003, 0x0000);
3434
3435 /* EEE auto-fallback function */
3436 rtl_writephy(tp, 0x1f, 0x0a4b);
3437 rtl_w1w0_phy(tp, 0x11, 0x0004, 0x0000);
3438
3439 /* Enable UC LPF tune function */
3440 rtl_writephy(tp, 0x1f, 0x0a43);
3441 rtl_writephy(tp, 0x13, 0x8012);
3442 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3403 3443
3404 if (r8168_phy_ocp_read(tp, 0xa466) & 0x0100) 3444 rtl_writephy(tp, 0x1f, 0x0c42);
3405 rtl_w1w0_phy_ocp(tp, 0xc41a, 0x0002, 0x0000); 3445 rtl_w1w0_phy(tp, 0x11, 0x4000, 0x2000);
3406 else
3407 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x0002);
3408 3446
3409 rtl_w1w0_phy_ocp(tp, 0xa442, 0x000c, 0x0000); 3447 /* Improve SWR Efficiency */
3410 rtl_w1w0_phy_ocp(tp, 0xa4b2, 0x0004, 0x0000); 3448 rtl_writephy(tp, 0x1f, 0x0bcd);
3449 rtl_writephy(tp, 0x14, 0x5065);
3450 rtl_writephy(tp, 0x14, 0xd065);
3451 rtl_writephy(tp, 0x1f, 0x0bc8);
3452 rtl_writephy(tp, 0x11, 0x5655);
3453 rtl_writephy(tp, 0x1f, 0x0bcd);
3454 rtl_writephy(tp, 0x14, 0x1065);
3455 rtl_writephy(tp, 0x14, 0x9065);
3456 rtl_writephy(tp, 0x14, 0x1065);
3411 3457
3412 r8168_phy_ocp_write(tp, 0xa436, 0x8012); 3458 rtl_writephy(tp, 0x1f, 0x0000);
3413 rtl_w1w0_phy_ocp(tp, 0xa438, 0x8000, 0x0000); 3459}
3414 3460
3415 rtl_w1w0_phy_ocp(tp, 0xc422, 0x4000, 0x2000); 3461static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp)
3462{
3463 rtl_apply_firmware(tp);
3416} 3464}
3417 3465
3418static void rtl8102e_hw_phy_config(struct rtl8169_private *tp) 3466static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
@@ -3600,6 +3648,10 @@ static void rtl_hw_phy_config(struct net_device *dev)
3600 case RTL_GIGA_MAC_VER_40: 3648 case RTL_GIGA_MAC_VER_40:
3601 rtl8168g_1_hw_phy_config(tp); 3649 rtl8168g_1_hw_phy_config(tp);
3602 break; 3650 break;
3651 case RTL_GIGA_MAC_VER_42:
3652 case RTL_GIGA_MAC_VER_43:
3653 rtl8168g_2_hw_phy_config(tp);
3654 break;
3603 3655
3604 case RTL_GIGA_MAC_VER_41: 3656 case RTL_GIGA_MAC_VER_41:
3605 default: 3657 default:
@@ -3808,6 +3860,8 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp)
3808 break; 3860 break;
3809 case RTL_GIGA_MAC_VER_40: 3861 case RTL_GIGA_MAC_VER_40:
3810 case RTL_GIGA_MAC_VER_41: 3862 case RTL_GIGA_MAC_VER_41:
3863 case RTL_GIGA_MAC_VER_42:
3864 case RTL_GIGA_MAC_VER_43:
3811 ops->write = r8168g_mdio_write; 3865 ops->write = r8168g_mdio_write;
3812 ops->read = r8168g_mdio_read; 3866 ops->read = r8168g_mdio_read;
3813 break; 3867 break;
@@ -3859,6 +3913,8 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3859 case RTL_GIGA_MAC_VER_39: 3913 case RTL_GIGA_MAC_VER_39:
3860 case RTL_GIGA_MAC_VER_40: 3914 case RTL_GIGA_MAC_VER_40:
3861 case RTL_GIGA_MAC_VER_41: 3915 case RTL_GIGA_MAC_VER_41:
3916 case RTL_GIGA_MAC_VER_42:
3917 case RTL_GIGA_MAC_VER_43:
3862 RTL_W32(RxConfig, RTL_R32(RxConfig) | 3918 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3863 AcceptBroadcast | AcceptMulticast | AcceptMyPhys); 3919 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3864 break; 3920 break;
@@ -3966,6 +4022,8 @@ static void r8168_phy_power_down(struct rtl8169_private *tp)
3966 switch (tp->mac_version) { 4022 switch (tp->mac_version) {
3967 case RTL_GIGA_MAC_VER_32: 4023 case RTL_GIGA_MAC_VER_32:
3968 case RTL_GIGA_MAC_VER_33: 4024 case RTL_GIGA_MAC_VER_33:
4025 case RTL_GIGA_MAC_VER_40:
4026 case RTL_GIGA_MAC_VER_41:
3969 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN); 4027 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
3970 break; 4028 break;
3971 4029
@@ -4027,6 +4085,11 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
4027 case RTL_GIGA_MAC_VER_33: 4085 case RTL_GIGA_MAC_VER_33:
4028 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); 4086 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4029 break; 4087 break;
4088 case RTL_GIGA_MAC_VER_40:
4089 case RTL_GIGA_MAC_VER_41:
4090 rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000,
4091 0xfc000000, ERIAR_EXGMAC);
4092 break;
4030 } 4093 }
4031} 4094}
4032 4095
@@ -4044,6 +4107,11 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
4044 case RTL_GIGA_MAC_VER_33: 4107 case RTL_GIGA_MAC_VER_33:
4045 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); 4108 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4046 break; 4109 break;
4110 case RTL_GIGA_MAC_VER_40:
4111 case RTL_GIGA_MAC_VER_41:
4112 rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000,
4113 0x00000000, ERIAR_EXGMAC);
4114 break;
4047 } 4115 }
4048 4116
4049 r8168_phy_power_up(tp); 4117 r8168_phy_power_up(tp);
@@ -4080,6 +4148,7 @@ static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
4080 case RTL_GIGA_MAC_VER_30: 4148 case RTL_GIGA_MAC_VER_30:
4081 case RTL_GIGA_MAC_VER_37: 4149 case RTL_GIGA_MAC_VER_37:
4082 case RTL_GIGA_MAC_VER_39: 4150 case RTL_GIGA_MAC_VER_39:
4151 case RTL_GIGA_MAC_VER_43:
4083 ops->down = r810x_pll_power_down; 4152 ops->down = r810x_pll_power_down;
4084 ops->up = r810x_pll_power_up; 4153 ops->up = r810x_pll_power_up;
4085 break; 4154 break;
@@ -4107,6 +4176,7 @@ static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
4107 case RTL_GIGA_MAC_VER_38: 4176 case RTL_GIGA_MAC_VER_38:
4108 case RTL_GIGA_MAC_VER_40: 4177 case RTL_GIGA_MAC_VER_40:
4109 case RTL_GIGA_MAC_VER_41: 4178 case RTL_GIGA_MAC_VER_41:
4179 case RTL_GIGA_MAC_VER_42:
4110 ops->down = r8168_pll_power_down; 4180 ops->down = r8168_pll_power_down;
4111 ops->up = r8168_pll_power_up; 4181 ops->up = r8168_pll_power_up;
4112 break; 4182 break;
@@ -4149,6 +4219,12 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
4149 case RTL_GIGA_MAC_VER_34: 4219 case RTL_GIGA_MAC_VER_34:
4150 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); 4220 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4151 break; 4221 break;
4222 case RTL_GIGA_MAC_VER_40:
4223 case RTL_GIGA_MAC_VER_41:
4224 case RTL_GIGA_MAC_VER_42:
4225 case RTL_GIGA_MAC_VER_43:
4226 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
4227 break;
4152 default: 4228 default:
4153 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST); 4229 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
4154 break; 4230 break;
@@ -4305,6 +4381,8 @@ static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
4305 */ 4381 */
4306 case RTL_GIGA_MAC_VER_40: 4382 case RTL_GIGA_MAC_VER_40:
4307 case RTL_GIGA_MAC_VER_41: 4383 case RTL_GIGA_MAC_VER_41:
4384 case RTL_GIGA_MAC_VER_42:
4385 case RTL_GIGA_MAC_VER_43:
4308 default: 4386 default:
4309 ops->disable = NULL; 4387 ops->disable = NULL;
4310 ops->enable = NULL; 4388 ops->enable = NULL;
@@ -4412,6 +4490,8 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
4412 tp->mac_version == RTL_GIGA_MAC_VER_37 || 4490 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
4413 tp->mac_version == RTL_GIGA_MAC_VER_40 || 4491 tp->mac_version == RTL_GIGA_MAC_VER_40 ||
4414 tp->mac_version == RTL_GIGA_MAC_VER_41 || 4492 tp->mac_version == RTL_GIGA_MAC_VER_41 ||
4493 tp->mac_version == RTL_GIGA_MAC_VER_42 ||
4494 tp->mac_version == RTL_GIGA_MAC_VER_43 ||
4415 tp->mac_version == RTL_GIGA_MAC_VER_38) { 4495 tp->mac_version == RTL_GIGA_MAC_VER_38) {
4416 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); 4496 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4417 rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); 4497 rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
@@ -5127,6 +5207,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5127 void __iomem *ioaddr = tp->mmio_addr; 5207 void __iomem *ioaddr = tp->mmio_addr;
5128 struct pci_dev *pdev = tp->pci_dev; 5208 struct pci_dev *pdev = tp->pci_dev;
5129 5209
5210 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5211
5130 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC); 5212 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
5131 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); 5213 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5132 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); 5214 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
@@ -5138,6 +5220,7 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5138 5220
5139 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); 5221 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5140 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); 5222 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5223 rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC);
5141 5224
5142 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); 5225 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5143 RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); 5226 RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
@@ -5149,7 +5232,26 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5149 /* Adjust EEE LED frequency */ 5232 /* Adjust EEE LED frequency */
5150 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); 5233 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5151 5234
5152 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x02, ERIAR_EXGMAC); 5235 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
5236 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
5237}
5238
5239static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
5240{
5241 void __iomem *ioaddr = tp->mmio_addr;
5242 static const struct ephy_info e_info_8168g_2[] = {
5243 { 0x00, 0x0000, 0x0008 },
5244 { 0x0c, 0x3df0, 0x0200 },
5245 { 0x19, 0xffff, 0xfc00 },
5246 { 0x1e, 0xffff, 0x20eb }
5247 };
5248
5249 rtl_hw_start_8168g_1(tp);
5250
5251 /* disable aspm and clock request before access ephy */
5252 RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
5253 RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
5254 rtl_ephy_init(tp, e_info_8168g_2, ARRAY_SIZE(e_info_8168g_2));
5153} 5255}
5154 5256
5155static void rtl_hw_start_8168(struct net_device *dev) 5257static void rtl_hw_start_8168(struct net_device *dev)
@@ -5177,10 +5279,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
5177 5279
5178 rtl_set_rx_tx_desc_registers(tp, ioaddr); 5280 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5179 5281
5180 rtl_set_rx_mode(dev); 5282 rtl_set_rx_tx_config_registers(tp);
5181
5182 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
5183 (InterFrameGap << TxInterFrameGapShift));
5184 5283
5185 RTL_R8(IntrMask); 5284 RTL_R8(IntrMask);
5186 5285
@@ -5257,6 +5356,9 @@ static void rtl_hw_start_8168(struct net_device *dev)
5257 case RTL_GIGA_MAC_VER_41: 5356 case RTL_GIGA_MAC_VER_41:
5258 rtl_hw_start_8168g_1(tp); 5357 rtl_hw_start_8168g_1(tp);
5259 break; 5358 break;
5359 case RTL_GIGA_MAC_VER_42:
5360 rtl_hw_start_8168g_2(tp);
5361 break;
5260 5362
5261 default: 5363 default:
5262 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n", 5364 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
@@ -5264,9 +5366,11 @@ static void rtl_hw_start_8168(struct net_device *dev)
5264 break; 5366 break;
5265 } 5367 }
5266 5368
5369 RTL_W8(Cfg9346, Cfg9346_Lock);
5370
5267 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); 5371 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5268 5372
5269 RTL_W8(Cfg9346, Cfg9346_Lock); 5373 rtl_set_rx_mode(dev);
5270 5374
5271 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); 5375 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
5272} 5376}
@@ -5424,6 +5528,17 @@ static void rtl_hw_start_8101(struct net_device *dev)
5424 5528
5425 RTL_W8(Cfg9346, Cfg9346_Unlock); 5529 RTL_W8(Cfg9346, Cfg9346_Unlock);
5426 5530
5531 RTL_W8(MaxTxPacketSize, TxPacketMax);
5532
5533 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5534
5535 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
5536 RTL_W16(CPlusCmd, tp->cp_cmd);
5537
5538 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5539
5540 rtl_set_rx_tx_config_registers(tp);
5541
5427 switch (tp->mac_version) { 5542 switch (tp->mac_version) {
5428 case RTL_GIGA_MAC_VER_07: 5543 case RTL_GIGA_MAC_VER_07:
5429 rtl_hw_start_8102e_1(tp); 5544 rtl_hw_start_8102e_1(tp);
@@ -5451,28 +5566,21 @@ static void rtl_hw_start_8101(struct net_device *dev)
5451 case RTL_GIGA_MAC_VER_39: 5566 case RTL_GIGA_MAC_VER_39:
5452 rtl_hw_start_8106(tp); 5567 rtl_hw_start_8106(tp);
5453 break; 5568 break;
5569 case RTL_GIGA_MAC_VER_43:
5570 rtl_hw_start_8168g_2(tp);
5571 break;
5454 } 5572 }
5455 5573
5456 RTL_W8(Cfg9346, Cfg9346_Lock); 5574 RTL_W8(Cfg9346, Cfg9346_Lock);
5457 5575
5458 RTL_W8(MaxTxPacketSize, TxPacketMax);
5459
5460 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5461
5462 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
5463 RTL_W16(CPlusCmd, tp->cp_cmd);
5464
5465 RTL_W16(IntrMitigate, 0x0000); 5576 RTL_W16(IntrMitigate, 0x0000);
5466 5577
5467 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5468
5469 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); 5578 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5470 rtl_set_rx_tx_config_registers(tp);
5471
5472 RTL_R8(IntrMask);
5473 5579
5474 rtl_set_rx_mode(dev); 5580 rtl_set_rx_mode(dev);
5475 5581
5582 RTL_R8(IntrMask);
5583
5476 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); 5584 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
5477} 5585}
5478 5586
@@ -6744,6 +6852,8 @@ static void rtl_hw_initialize(struct rtl8169_private *tp)
6744 switch (tp->mac_version) { 6852 switch (tp->mac_version) {
6745 case RTL_GIGA_MAC_VER_40: 6853 case RTL_GIGA_MAC_VER_40:
6746 case RTL_GIGA_MAC_VER_41: 6854 case RTL_GIGA_MAC_VER_41:
6855 case RTL_GIGA_MAC_VER_42:
6856 case RTL_GIGA_MAC_VER_43:
6747 rtl_hw_init_8168g(tp); 6857 rtl_hw_init_8168g(tp);
6748 break; 6858 break;
6749 6859
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 6ed333fe5c04..a7499cbf4503 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2,7 +2,8 @@
2 * SuperH Ethernet device driver 2 * SuperH Ethernet device driver
3 * 3 *
4 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu 4 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
5 * Copyright (C) 2008-2012 Renesas Solutions Corp. 5 * Copyright (C) 2008-2013 Renesas Solutions Corp.
6 * Copyright (C) 2013 Cogent Embedded, Inc.
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License, 9 * under the terms and conditions of the GNU General Public License,
@@ -49,6 +50,269 @@
49 NETIF_MSG_RX_ERR| \ 50 NETIF_MSG_RX_ERR| \
50 NETIF_MSG_TX_ERR) 51 NETIF_MSG_TX_ERR)
51 52
53static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
54 [EDSR] = 0x0000,
55 [EDMR] = 0x0400,
56 [EDTRR] = 0x0408,
57 [EDRRR] = 0x0410,
58 [EESR] = 0x0428,
59 [EESIPR] = 0x0430,
60 [TDLAR] = 0x0010,
61 [TDFAR] = 0x0014,
62 [TDFXR] = 0x0018,
63 [TDFFR] = 0x001c,
64 [RDLAR] = 0x0030,
65 [RDFAR] = 0x0034,
66 [RDFXR] = 0x0038,
67 [RDFFR] = 0x003c,
68 [TRSCER] = 0x0438,
69 [RMFCR] = 0x0440,
70 [TFTR] = 0x0448,
71 [FDR] = 0x0450,
72 [RMCR] = 0x0458,
73 [RPADIR] = 0x0460,
74 [FCFTR] = 0x0468,
75 [CSMR] = 0x04E4,
76
77 [ECMR] = 0x0500,
78 [ECSR] = 0x0510,
79 [ECSIPR] = 0x0518,
80 [PIR] = 0x0520,
81 [PSR] = 0x0528,
82 [PIPR] = 0x052c,
83 [RFLR] = 0x0508,
84 [APR] = 0x0554,
85 [MPR] = 0x0558,
86 [PFTCR] = 0x055c,
87 [PFRCR] = 0x0560,
88 [TPAUSER] = 0x0564,
89 [GECMR] = 0x05b0,
90 [BCULR] = 0x05b4,
91 [MAHR] = 0x05c0,
92 [MALR] = 0x05c8,
93 [TROCR] = 0x0700,
94 [CDCR] = 0x0708,
95 [LCCR] = 0x0710,
96 [CEFCR] = 0x0740,
97 [FRECR] = 0x0748,
98 [TSFRCR] = 0x0750,
99 [TLFRCR] = 0x0758,
100 [RFCR] = 0x0760,
101 [CERCR] = 0x0768,
102 [CEECR] = 0x0770,
103 [MAFCR] = 0x0778,
104 [RMII_MII] = 0x0790,
105
106 [ARSTR] = 0x0000,
107 [TSU_CTRST] = 0x0004,
108 [TSU_FWEN0] = 0x0010,
109 [TSU_FWEN1] = 0x0014,
110 [TSU_FCM] = 0x0018,
111 [TSU_BSYSL0] = 0x0020,
112 [TSU_BSYSL1] = 0x0024,
113 [TSU_PRISL0] = 0x0028,
114 [TSU_PRISL1] = 0x002c,
115 [TSU_FWSL0] = 0x0030,
116 [TSU_FWSL1] = 0x0034,
117 [TSU_FWSLC] = 0x0038,
118 [TSU_QTAG0] = 0x0040,
119 [TSU_QTAG1] = 0x0044,
120 [TSU_FWSR] = 0x0050,
121 [TSU_FWINMK] = 0x0054,
122 [TSU_ADQT0] = 0x0048,
123 [TSU_ADQT1] = 0x004c,
124 [TSU_VTAG0] = 0x0058,
125 [TSU_VTAG1] = 0x005c,
126 [TSU_ADSBSY] = 0x0060,
127 [TSU_TEN] = 0x0064,
128 [TSU_POST1] = 0x0070,
129 [TSU_POST2] = 0x0074,
130 [TSU_POST3] = 0x0078,
131 [TSU_POST4] = 0x007c,
132 [TSU_ADRH0] = 0x0100,
133 [TSU_ADRL0] = 0x0104,
134 [TSU_ADRH31] = 0x01f8,
135 [TSU_ADRL31] = 0x01fc,
136
137 [TXNLCR0] = 0x0080,
138 [TXALCR0] = 0x0084,
139 [RXNLCR0] = 0x0088,
140 [RXALCR0] = 0x008c,
141 [FWNLCR0] = 0x0090,
142 [FWALCR0] = 0x0094,
143 [TXNLCR1] = 0x00a0,
144 [TXALCR1] = 0x00a0,
145 [RXNLCR1] = 0x00a8,
146 [RXALCR1] = 0x00ac,
147 [FWNLCR1] = 0x00b0,
148 [FWALCR1] = 0x00b4,
149};
150
151static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
152 [ECMR] = 0x0300,
153 [RFLR] = 0x0308,
154 [ECSR] = 0x0310,
155 [ECSIPR] = 0x0318,
156 [PIR] = 0x0320,
157 [PSR] = 0x0328,
158 [RDMLR] = 0x0340,
159 [IPGR] = 0x0350,
160 [APR] = 0x0354,
161 [MPR] = 0x0358,
162 [RFCF] = 0x0360,
163 [TPAUSER] = 0x0364,
164 [TPAUSECR] = 0x0368,
165 [MAHR] = 0x03c0,
166 [MALR] = 0x03c8,
167 [TROCR] = 0x03d0,
168 [CDCR] = 0x03d4,
169 [LCCR] = 0x03d8,
170 [CNDCR] = 0x03dc,
171 [CEFCR] = 0x03e4,
172 [FRECR] = 0x03e8,
173 [TSFRCR] = 0x03ec,
174 [TLFRCR] = 0x03f0,
175 [RFCR] = 0x03f4,
176 [MAFCR] = 0x03f8,
177
178 [EDMR] = 0x0200,
179 [EDTRR] = 0x0208,
180 [EDRRR] = 0x0210,
181 [TDLAR] = 0x0218,
182 [RDLAR] = 0x0220,
183 [EESR] = 0x0228,
184 [EESIPR] = 0x0230,
185 [TRSCER] = 0x0238,
186 [RMFCR] = 0x0240,
187 [TFTR] = 0x0248,
188 [FDR] = 0x0250,
189 [RMCR] = 0x0258,
190 [TFUCR] = 0x0264,
191 [RFOCR] = 0x0268,
192 [FCFTR] = 0x0270,
193 [TRIMD] = 0x027c,
194};
195
196static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
197 [ECMR] = 0x0100,
198 [RFLR] = 0x0108,
199 [ECSR] = 0x0110,
200 [ECSIPR] = 0x0118,
201 [PIR] = 0x0120,
202 [PSR] = 0x0128,
203 [RDMLR] = 0x0140,
204 [IPGR] = 0x0150,
205 [APR] = 0x0154,
206 [MPR] = 0x0158,
207 [TPAUSER] = 0x0164,
208 [RFCF] = 0x0160,
209 [TPAUSECR] = 0x0168,
210 [BCFRR] = 0x016c,
211 [MAHR] = 0x01c0,
212 [MALR] = 0x01c8,
213 [TROCR] = 0x01d0,
214 [CDCR] = 0x01d4,
215 [LCCR] = 0x01d8,
216 [CNDCR] = 0x01dc,
217 [CEFCR] = 0x01e4,
218 [FRECR] = 0x01e8,
219 [TSFRCR] = 0x01ec,
220 [TLFRCR] = 0x01f0,
221 [RFCR] = 0x01f4,
222 [MAFCR] = 0x01f8,
223 [RTRATE] = 0x01fc,
224
225 [EDMR] = 0x0000,
226 [EDTRR] = 0x0008,
227 [EDRRR] = 0x0010,
228 [TDLAR] = 0x0018,
229 [RDLAR] = 0x0020,
230 [EESR] = 0x0028,
231 [EESIPR] = 0x0030,
232 [TRSCER] = 0x0038,
233 [RMFCR] = 0x0040,
234 [TFTR] = 0x0048,
235 [FDR] = 0x0050,
236 [RMCR] = 0x0058,
237 [TFUCR] = 0x0064,
238 [RFOCR] = 0x0068,
239 [FCFTR] = 0x0070,
240 [RPADIR] = 0x0078,
241 [TRIMD] = 0x007c,
242 [RBWAR] = 0x00c8,
243 [RDFAR] = 0x00cc,
244 [TBRAR] = 0x00d4,
245 [TDFAR] = 0x00d8,
246};
247
248static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
249 [ECMR] = 0x0160,
250 [ECSR] = 0x0164,
251 [ECSIPR] = 0x0168,
252 [PIR] = 0x016c,
253 [MAHR] = 0x0170,
254 [MALR] = 0x0174,
255 [RFLR] = 0x0178,
256 [PSR] = 0x017c,
257 [TROCR] = 0x0180,
258 [CDCR] = 0x0184,
259 [LCCR] = 0x0188,
260 [CNDCR] = 0x018c,
261 [CEFCR] = 0x0194,
262 [FRECR] = 0x0198,
263 [TSFRCR] = 0x019c,
264 [TLFRCR] = 0x01a0,
265 [RFCR] = 0x01a4,
266 [MAFCR] = 0x01a8,
267 [IPGR] = 0x01b4,
268 [APR] = 0x01b8,
269 [MPR] = 0x01bc,
270 [TPAUSER] = 0x01c4,
271 [BCFR] = 0x01cc,
272
273 [ARSTR] = 0x0000,
274 [TSU_CTRST] = 0x0004,
275 [TSU_FWEN0] = 0x0010,
276 [TSU_FWEN1] = 0x0014,
277 [TSU_FCM] = 0x0018,
278 [TSU_BSYSL0] = 0x0020,
279 [TSU_BSYSL1] = 0x0024,
280 [TSU_PRISL0] = 0x0028,
281 [TSU_PRISL1] = 0x002c,
282 [TSU_FWSL0] = 0x0030,
283 [TSU_FWSL1] = 0x0034,
284 [TSU_FWSLC] = 0x0038,
285 [TSU_QTAGM0] = 0x0040,
286 [TSU_QTAGM1] = 0x0044,
287 [TSU_ADQT0] = 0x0048,
288 [TSU_ADQT1] = 0x004c,
289 [TSU_FWSR] = 0x0050,
290 [TSU_FWINMK] = 0x0054,
291 [TSU_ADSBSY] = 0x0060,
292 [TSU_TEN] = 0x0064,
293 [TSU_POST1] = 0x0070,
294 [TSU_POST2] = 0x0074,
295 [TSU_POST3] = 0x0078,
296 [TSU_POST4] = 0x007c,
297
298 [TXNLCR0] = 0x0080,
299 [TXALCR0] = 0x0084,
300 [RXNLCR0] = 0x0088,
301 [RXALCR0] = 0x008c,
302 [FWNLCR0] = 0x0090,
303 [FWALCR0] = 0x0094,
304 [TXNLCR1] = 0x00a0,
305 [TXALCR1] = 0x00a0,
306 [RXNLCR1] = 0x00a8,
307 [RXALCR1] = 0x00ac,
308 [FWNLCR1] = 0x00b0,
309 [FWALCR1] = 0x00b4,
310
311 [TSU_ADRH0] = 0x0100,
312 [TSU_ADRL0] = 0x0104,
313 [TSU_ADRL31] = 0x01fc,
314};
315
52#if defined(CONFIG_CPU_SUBTYPE_SH7734) || \ 316#if defined(CONFIG_CPU_SUBTYPE_SH7734) || \
53 defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 317 defined(CONFIG_CPU_SUBTYPE_SH7763) || \
54 defined(CONFIG_ARCH_R8A7740) 318 defined(CONFIG_ARCH_R8A7740)
@@ -78,7 +342,7 @@ static void sh_eth_select_mii(struct net_device *ndev)
78#endif 342#endif
79 343
80/* There is CPU dependent code */ 344/* There is CPU dependent code */
81#if defined(CONFIG_CPU_SUBTYPE_SH7724) || defined(CONFIG_ARCH_R8A7779) 345#if defined(CONFIG_ARCH_R8A7779)
82#define SH_ETH_RESET_DEFAULT 1 346#define SH_ETH_RESET_DEFAULT 1
83static void sh_eth_set_duplex(struct net_device *ndev) 347static void sh_eth_set_duplex(struct net_device *ndev)
84{ 348{
@@ -93,18 +357,60 @@ static void sh_eth_set_duplex(struct net_device *ndev)
93static void sh_eth_set_rate(struct net_device *ndev) 357static void sh_eth_set_rate(struct net_device *ndev)
94{ 358{
95 struct sh_eth_private *mdp = netdev_priv(ndev); 359 struct sh_eth_private *mdp = netdev_priv(ndev);
96 unsigned int bits = ECMR_RTM;
97 360
98#if defined(CONFIG_ARCH_R8A7779) 361 switch (mdp->speed) {
99 bits |= ECMR_ELB; 362 case 10: /* 10BASE */
100#endif 363 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
364 break;
365 case 100:/* 100BASE */
366 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
367 break;
368 default:
369 break;
370 }
371}
372
373/* R8A7779 */
374static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
375 .set_duplex = sh_eth_set_duplex,
376 .set_rate = sh_eth_set_rate,
377
378 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
379 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
380 .eesipr_value = 0x01ff009f,
381
382 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
383 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
384 EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
385 .tx_error_check = EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
386
387 .apr = 1,
388 .mpr = 1,
389 .tpauser = 1,
390 .hw_swap = 1,
391};
392#elif defined(CONFIG_CPU_SUBTYPE_SH7724)
393#define SH_ETH_RESET_DEFAULT 1
394static void sh_eth_set_duplex(struct net_device *ndev)
395{
396 struct sh_eth_private *mdp = netdev_priv(ndev);
397
398 if (mdp->duplex) /* Full */
399 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
400 else /* Half */
401 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
402}
403
404static void sh_eth_set_rate(struct net_device *ndev)
405{
406 struct sh_eth_private *mdp = netdev_priv(ndev);
101 407
102 switch (mdp->speed) { 408 switch (mdp->speed) {
103 case 10: /* 10BASE */ 409 case 10: /* 10BASE */
104 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~bits, ECMR); 410 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
105 break; 411 break;
106 case 100:/* 100BASE */ 412 case 100:/* 100BASE */
107 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | bits, ECMR); 413 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
108 break; 414 break;
109 default: 415 default:
110 break; 416 break;
@@ -592,7 +898,7 @@ static int sh_eth_check_reset(struct net_device *ndev)
592 cnt--; 898 cnt--;
593 } 899 }
594 if (cnt < 0) { 900 if (cnt < 0) {
595 printk(KERN_ERR "Device reset fail\n"); 901 pr_err("Device reset fail\n");
596 ret = -ETIMEDOUT; 902 ret = -ETIMEDOUT;
597 } 903 }
598 return ret; 904 return ret;
@@ -908,11 +1214,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
908 /* Allocate all Rx descriptors. */ 1214 /* Allocate all Rx descriptors. */
909 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; 1215 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
910 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, 1216 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
911 GFP_KERNEL); 1217 GFP_KERNEL);
912
913 if (!mdp->rx_ring) { 1218 if (!mdp->rx_ring) {
914 dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
915 rx_ringsize);
916 ret = -ENOMEM; 1219 ret = -ENOMEM;
917 goto desc_ring_free; 1220 goto desc_ring_free;
918 } 1221 }
@@ -922,10 +1225,8 @@ static int sh_eth_ring_init(struct net_device *ndev)
922 /* Allocate all Tx descriptors. */ 1225 /* Allocate all Tx descriptors. */
923 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; 1226 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
924 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, 1227 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
925 GFP_KERNEL); 1228 GFP_KERNEL);
926 if (!mdp->tx_ring) { 1229 if (!mdp->tx_ring) {
927 dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
928 tx_ringsize);
929 ret = -ENOMEM; 1230 ret = -ENOMEM;
930 goto desc_ring_free; 1231 goto desc_ring_free;
931 } 1232 }
@@ -2228,7 +2529,6 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
2228/* MDIO bus release function */ 2529/* MDIO bus release function */
2229static int sh_mdio_release(struct net_device *ndev) 2530static int sh_mdio_release(struct net_device *ndev)
2230{ 2531{
2231 struct sh_eth_private *mdp = netdev_priv(ndev);
2232 struct mii_bus *bus = dev_get_drvdata(&ndev->dev); 2532 struct mii_bus *bus = dev_get_drvdata(&ndev->dev);
2233 2533
2234 /* unregister mdio bus */ 2534 /* unregister mdio bus */
@@ -2237,15 +2537,9 @@ static int sh_mdio_release(struct net_device *ndev)
2237 /* remove mdio bus info from net_device */ 2537 /* remove mdio bus info from net_device */
2238 dev_set_drvdata(&ndev->dev, NULL); 2538 dev_set_drvdata(&ndev->dev, NULL);
2239 2539
2240 /* free interrupts memory */
2241 kfree(bus->irq);
2242
2243 /* free bitbang info */ 2540 /* free bitbang info */
2244 free_mdio_bitbang(bus); 2541 free_mdio_bitbang(bus);
2245 2542
2246 /* free bitbang memory */
2247 kfree(mdp->bitbang);
2248
2249 return 0; 2543 return 0;
2250} 2544}
2251 2545
@@ -2258,7 +2552,8 @@ static int sh_mdio_init(struct net_device *ndev, int id,
2258 struct sh_eth_private *mdp = netdev_priv(ndev); 2552 struct sh_eth_private *mdp = netdev_priv(ndev);
2259 2553
2260 /* create bit control struct for PHY */ 2554 /* create bit control struct for PHY */
2261 bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); 2555 bitbang = devm_kzalloc(&ndev->dev, sizeof(struct bb_info),
2556 GFP_KERNEL);
2262 if (!bitbang) { 2557 if (!bitbang) {
2263 ret = -ENOMEM; 2558 ret = -ENOMEM;
2264 goto out; 2559 goto out;
@@ -2267,18 +2562,17 @@ static int sh_mdio_init(struct net_device *ndev, int id,
2267 /* bitbang init */ 2562 /* bitbang init */
2268 bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; 2563 bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
2269 bitbang->set_gate = pd->set_mdio_gate; 2564 bitbang->set_gate = pd->set_mdio_gate;
2270 bitbang->mdi_msk = 0x08; 2565 bitbang->mdi_msk = PIR_MDI;
2271 bitbang->mdo_msk = 0x04; 2566 bitbang->mdo_msk = PIR_MDO;
2272 bitbang->mmd_msk = 0x02;/* MMD */ 2567 bitbang->mmd_msk = PIR_MMD;
2273 bitbang->mdc_msk = 0x01; 2568 bitbang->mdc_msk = PIR_MDC;
2274 bitbang->ctrl.ops = &bb_ops; 2569 bitbang->ctrl.ops = &bb_ops;
2275 2570
2276 /* MII controller setting */ 2571 /* MII controller setting */
2277 mdp->bitbang = bitbang;
2278 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); 2572 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
2279 if (!mdp->mii_bus) { 2573 if (!mdp->mii_bus) {
2280 ret = -ENOMEM; 2574 ret = -ENOMEM;
2281 goto out_free_bitbang; 2575 goto out;
2282 } 2576 }
2283 2577
2284 /* Hook up MII support for ethtool */ 2578 /* Hook up MII support for ethtool */
@@ -2288,7 +2582,9 @@ static int sh_mdio_init(struct net_device *ndev, int id,
2288 mdp->pdev->name, id); 2582 mdp->pdev->name, id);
2289 2583
2290 /* PHY IRQ */ 2584 /* PHY IRQ */
2291 mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 2585 mdp->mii_bus->irq = devm_kzalloc(&ndev->dev,
2586 sizeof(int) * PHY_MAX_ADDR,
2587 GFP_KERNEL);
2292 if (!mdp->mii_bus->irq) { 2588 if (!mdp->mii_bus->irq) {
2293 ret = -ENOMEM; 2589 ret = -ENOMEM;
2294 goto out_free_bus; 2590 goto out_free_bus;
@@ -2300,21 +2596,15 @@ static int sh_mdio_init(struct net_device *ndev, int id,
2300 /* register mdio bus */ 2596 /* register mdio bus */
2301 ret = mdiobus_register(mdp->mii_bus); 2597 ret = mdiobus_register(mdp->mii_bus);
2302 if (ret) 2598 if (ret)
2303 goto out_free_irq; 2599 goto out_free_bus;
2304 2600
2305 dev_set_drvdata(&ndev->dev, mdp->mii_bus); 2601 dev_set_drvdata(&ndev->dev, mdp->mii_bus);
2306 2602
2307 return 0; 2603 return 0;
2308 2604
2309out_free_irq:
2310 kfree(mdp->mii_bus->irq);
2311
2312out_free_bus: 2605out_free_bus:
2313 free_mdio_bitbang(mdp->mii_bus); 2606 free_mdio_bitbang(mdp->mii_bus);
2314 2607
2315out_free_bitbang:
2316 kfree(bitbang);
2317
2318out: 2608out:
2319 return ret; 2609 return ret;
2320} 2610}
@@ -2327,6 +2617,9 @@ static const u16 *sh_eth_get_register_offset(int register_type)
2327 case SH_ETH_REG_GIGABIT: 2617 case SH_ETH_REG_GIGABIT:
2328 reg_offset = sh_eth_offset_gigabit; 2618 reg_offset = sh_eth_offset_gigabit;
2329 break; 2619 break;
2620 case SH_ETH_REG_FAST_RCAR:
2621 reg_offset = sh_eth_offset_fast_rcar;
2622 break;
2330 case SH_ETH_REG_FAST_SH4: 2623 case SH_ETH_REG_FAST_SH4:
2331 reg_offset = sh_eth_offset_fast_sh4; 2624 reg_offset = sh_eth_offset_fast_sh4;
2332 break; 2625 break;
@@ -2334,7 +2627,7 @@ static const u16 *sh_eth_get_register_offset(int register_type)
2334 reg_offset = sh_eth_offset_fast_sh3_sh2; 2627 reg_offset = sh_eth_offset_fast_sh3_sh2;
2335 break; 2628 break;
2336 default: 2629 default:
2337 printk(KERN_ERR "Unknown register type (%d)\n", register_type); 2630 pr_err("Unknown register type (%d)\n", register_type);
2338 break; 2631 break;
2339 } 2632 }
2340 2633
@@ -2364,7 +2657,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2364 struct resource *res; 2657 struct resource *res;
2365 struct net_device *ndev = NULL; 2658 struct net_device *ndev = NULL;
2366 struct sh_eth_private *mdp = NULL; 2659 struct sh_eth_private *mdp = NULL;
2367 struct sh_eth_plat_data *pd; 2660 struct sh_eth_plat_data *pd = pdev->dev.platform_data;
2368 2661
2369 /* get base addr */ 2662 /* get base addr */
2370 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2663 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2402,10 +2695,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2402 mdp = netdev_priv(ndev); 2695 mdp = netdev_priv(ndev);
2403 mdp->num_tx_ring = TX_RING_SIZE; 2696 mdp->num_tx_ring = TX_RING_SIZE;
2404 mdp->num_rx_ring = RX_RING_SIZE; 2697 mdp->num_rx_ring = RX_RING_SIZE;
2405 mdp->addr = ioremap(res->start, resource_size(res)); 2698 mdp->addr = devm_ioremap_resource(&pdev->dev, res);
2406 if (mdp->addr == NULL) { 2699 if (IS_ERR(mdp->addr)) {
2407 ret = -ENOMEM; 2700 ret = PTR_ERR(mdp->addr);
2408 dev_err(&pdev->dev, "ioremap failed.\n");
2409 goto out_release; 2701 goto out_release;
2410 } 2702 }
2411 2703
@@ -2414,7 +2706,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2414 pm_runtime_enable(&pdev->dev); 2706 pm_runtime_enable(&pdev->dev);
2415 pm_runtime_resume(&pdev->dev); 2707 pm_runtime_resume(&pdev->dev);
2416 2708
2417 pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
2418 /* get PHY ID */ 2709 /* get PHY ID */
2419 mdp->phy_id = pd->phy; 2710 mdp->phy_id = pd->phy;
2420 mdp->phy_interface = pd->phy_interface; 2711 mdp->phy_interface = pd->phy_interface;
@@ -2452,11 +2743,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2452 ret = -ENODEV; 2743 ret = -ENODEV;
2453 goto out_release; 2744 goto out_release;
2454 } 2745 }
2455 mdp->tsu_addr = ioremap(rtsu->start, 2746 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
2456 resource_size(rtsu)); 2747 if (IS_ERR(mdp->tsu_addr)) {
2457 if (mdp->tsu_addr == NULL) { 2748 ret = PTR_ERR(mdp->tsu_addr);
2458 ret = -ENOMEM;
2459 dev_err(&pdev->dev, "TSU ioremap failed.\n");
2460 goto out_release; 2749 goto out_release;
2461 } 2750 }
2462 mdp->port = devno % 2; 2751 mdp->port = devno % 2;
@@ -2497,10 +2786,6 @@ out_unregister:
2497 2786
2498out_release: 2787out_release:
2499 /* net_dev free */ 2788 /* net_dev free */
2500 if (mdp && mdp->addr)
2501 iounmap(mdp->addr);
2502 if (mdp && mdp->tsu_addr)
2503 iounmap(mdp->tsu_addr);
2504 if (ndev) 2789 if (ndev)
2505 free_netdev(ndev); 2790 free_netdev(ndev);
2506 2791
@@ -2511,14 +2796,10 @@ out:
2511static int sh_eth_drv_remove(struct platform_device *pdev) 2796static int sh_eth_drv_remove(struct platform_device *pdev)
2512{ 2797{
2513 struct net_device *ndev = platform_get_drvdata(pdev); 2798 struct net_device *ndev = platform_get_drvdata(pdev);
2514 struct sh_eth_private *mdp = netdev_priv(ndev);
2515 2799
2516 if (mdp->cd->tsu)
2517 iounmap(mdp->tsu_addr);
2518 sh_mdio_release(ndev); 2800 sh_mdio_release(ndev);
2519 unregister_netdev(ndev); 2801 unregister_netdev(ndev);
2520 pm_runtime_disable(&pdev->dev); 2802 pm_runtime_disable(&pdev->dev);
2521 iounmap(mdp->addr);
2522 free_netdev(ndev); 2803 free_netdev(ndev);
2523 platform_set_drvdata(pdev, NULL); 2804 platform_set_drvdata(pdev, NULL);
2524 2805
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 828be4515008..1ddc9f235bcb 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -156,225 +156,6 @@ enum {
156 SH_ETH_MAX_REGISTER_OFFSET, 156 SH_ETH_MAX_REGISTER_OFFSET,
157}; 157};
158 158
159static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
160 [EDSR] = 0x0000,
161 [EDMR] = 0x0400,
162 [EDTRR] = 0x0408,
163 [EDRRR] = 0x0410,
164 [EESR] = 0x0428,
165 [EESIPR] = 0x0430,
166 [TDLAR] = 0x0010,
167 [TDFAR] = 0x0014,
168 [TDFXR] = 0x0018,
169 [TDFFR] = 0x001c,
170 [RDLAR] = 0x0030,
171 [RDFAR] = 0x0034,
172 [RDFXR] = 0x0038,
173 [RDFFR] = 0x003c,
174 [TRSCER] = 0x0438,
175 [RMFCR] = 0x0440,
176 [TFTR] = 0x0448,
177 [FDR] = 0x0450,
178 [RMCR] = 0x0458,
179 [RPADIR] = 0x0460,
180 [FCFTR] = 0x0468,
181 [CSMR] = 0x04E4,
182
183 [ECMR] = 0x0500,
184 [ECSR] = 0x0510,
185 [ECSIPR] = 0x0518,
186 [PIR] = 0x0520,
187 [PSR] = 0x0528,
188 [PIPR] = 0x052c,
189 [RFLR] = 0x0508,
190 [APR] = 0x0554,
191 [MPR] = 0x0558,
192 [PFTCR] = 0x055c,
193 [PFRCR] = 0x0560,
194 [TPAUSER] = 0x0564,
195 [GECMR] = 0x05b0,
196 [BCULR] = 0x05b4,
197 [MAHR] = 0x05c0,
198 [MALR] = 0x05c8,
199 [TROCR] = 0x0700,
200 [CDCR] = 0x0708,
201 [LCCR] = 0x0710,
202 [CEFCR] = 0x0740,
203 [FRECR] = 0x0748,
204 [TSFRCR] = 0x0750,
205 [TLFRCR] = 0x0758,
206 [RFCR] = 0x0760,
207 [CERCR] = 0x0768,
208 [CEECR] = 0x0770,
209 [MAFCR] = 0x0778,
210 [RMII_MII] = 0x0790,
211
212 [ARSTR] = 0x0000,
213 [TSU_CTRST] = 0x0004,
214 [TSU_FWEN0] = 0x0010,
215 [TSU_FWEN1] = 0x0014,
216 [TSU_FCM] = 0x0018,
217 [TSU_BSYSL0] = 0x0020,
218 [TSU_BSYSL1] = 0x0024,
219 [TSU_PRISL0] = 0x0028,
220 [TSU_PRISL1] = 0x002c,
221 [TSU_FWSL0] = 0x0030,
222 [TSU_FWSL1] = 0x0034,
223 [TSU_FWSLC] = 0x0038,
224 [TSU_QTAG0] = 0x0040,
225 [TSU_QTAG1] = 0x0044,
226 [TSU_FWSR] = 0x0050,
227 [TSU_FWINMK] = 0x0054,
228 [TSU_ADQT0] = 0x0048,
229 [TSU_ADQT1] = 0x004c,
230 [TSU_VTAG0] = 0x0058,
231 [TSU_VTAG1] = 0x005c,
232 [TSU_ADSBSY] = 0x0060,
233 [TSU_TEN] = 0x0064,
234 [TSU_POST1] = 0x0070,
235 [TSU_POST2] = 0x0074,
236 [TSU_POST3] = 0x0078,
237 [TSU_POST4] = 0x007c,
238 [TSU_ADRH0] = 0x0100,
239 [TSU_ADRL0] = 0x0104,
240 [TSU_ADRH31] = 0x01f8,
241 [TSU_ADRL31] = 0x01fc,
242
243 [TXNLCR0] = 0x0080,
244 [TXALCR0] = 0x0084,
245 [RXNLCR0] = 0x0088,
246 [RXALCR0] = 0x008c,
247 [FWNLCR0] = 0x0090,
248 [FWALCR0] = 0x0094,
249 [TXNLCR1] = 0x00a0,
250 [TXALCR1] = 0x00a0,
251 [RXNLCR1] = 0x00a8,
252 [RXALCR1] = 0x00ac,
253 [FWNLCR1] = 0x00b0,
254 [FWALCR1] = 0x00b4,
255};
256
257static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
258 [ECMR] = 0x0100,
259 [RFLR] = 0x0108,
260 [ECSR] = 0x0110,
261 [ECSIPR] = 0x0118,
262 [PIR] = 0x0120,
263 [PSR] = 0x0128,
264 [RDMLR] = 0x0140,
265 [IPGR] = 0x0150,
266 [APR] = 0x0154,
267 [MPR] = 0x0158,
268 [TPAUSER] = 0x0164,
269 [RFCF] = 0x0160,
270 [TPAUSECR] = 0x0168,
271 [BCFRR] = 0x016c,
272 [MAHR] = 0x01c0,
273 [MALR] = 0x01c8,
274 [TROCR] = 0x01d0,
275 [CDCR] = 0x01d4,
276 [LCCR] = 0x01d8,
277 [CNDCR] = 0x01dc,
278 [CEFCR] = 0x01e4,
279 [FRECR] = 0x01e8,
280 [TSFRCR] = 0x01ec,
281 [TLFRCR] = 0x01f0,
282 [RFCR] = 0x01f4,
283 [MAFCR] = 0x01f8,
284 [RTRATE] = 0x01fc,
285
286 [EDMR] = 0x0000,
287 [EDTRR] = 0x0008,
288 [EDRRR] = 0x0010,
289 [TDLAR] = 0x0018,
290 [RDLAR] = 0x0020,
291 [EESR] = 0x0028,
292 [EESIPR] = 0x0030,
293 [TRSCER] = 0x0038,
294 [RMFCR] = 0x0040,
295 [TFTR] = 0x0048,
296 [FDR] = 0x0050,
297 [RMCR] = 0x0058,
298 [TFUCR] = 0x0064,
299 [RFOCR] = 0x0068,
300 [FCFTR] = 0x0070,
301 [RPADIR] = 0x0078,
302 [TRIMD] = 0x007c,
303 [RBWAR] = 0x00c8,
304 [RDFAR] = 0x00cc,
305 [TBRAR] = 0x00d4,
306 [TDFAR] = 0x00d8,
307};
308
309static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
310 [ECMR] = 0x0160,
311 [ECSR] = 0x0164,
312 [ECSIPR] = 0x0168,
313 [PIR] = 0x016c,
314 [MAHR] = 0x0170,
315 [MALR] = 0x0174,
316 [RFLR] = 0x0178,
317 [PSR] = 0x017c,
318 [TROCR] = 0x0180,
319 [CDCR] = 0x0184,
320 [LCCR] = 0x0188,
321 [CNDCR] = 0x018c,
322 [CEFCR] = 0x0194,
323 [FRECR] = 0x0198,
324 [TSFRCR] = 0x019c,
325 [TLFRCR] = 0x01a0,
326 [RFCR] = 0x01a4,
327 [MAFCR] = 0x01a8,
328 [IPGR] = 0x01b4,
329 [APR] = 0x01b8,
330 [MPR] = 0x01bc,
331 [TPAUSER] = 0x01c4,
332 [BCFR] = 0x01cc,
333
334 [ARSTR] = 0x0000,
335 [TSU_CTRST] = 0x0004,
336 [TSU_FWEN0] = 0x0010,
337 [TSU_FWEN1] = 0x0014,
338 [TSU_FCM] = 0x0018,
339 [TSU_BSYSL0] = 0x0020,
340 [TSU_BSYSL1] = 0x0024,
341 [TSU_PRISL0] = 0x0028,
342 [TSU_PRISL1] = 0x002c,
343 [TSU_FWSL0] = 0x0030,
344 [TSU_FWSL1] = 0x0034,
345 [TSU_FWSLC] = 0x0038,
346 [TSU_QTAGM0] = 0x0040,
347 [TSU_QTAGM1] = 0x0044,
348 [TSU_ADQT0] = 0x0048,
349 [TSU_ADQT1] = 0x004c,
350 [TSU_FWSR] = 0x0050,
351 [TSU_FWINMK] = 0x0054,
352 [TSU_ADSBSY] = 0x0060,
353 [TSU_TEN] = 0x0064,
354 [TSU_POST1] = 0x0070,
355 [TSU_POST2] = 0x0074,
356 [TSU_POST3] = 0x0078,
357 [TSU_POST4] = 0x007c,
358
359 [TXNLCR0] = 0x0080,
360 [TXALCR0] = 0x0084,
361 [RXNLCR0] = 0x0088,
362 [RXALCR0] = 0x008c,
363 [FWNLCR0] = 0x0090,
364 [FWALCR0] = 0x0094,
365 [TXNLCR1] = 0x00a0,
366 [TXALCR1] = 0x00a0,
367 [RXNLCR1] = 0x00a8,
368 [RXALCR1] = 0x00ac,
369 [FWNLCR1] = 0x00b0,
370 [FWALCR1] = 0x00b4,
371
372 [TSU_ADRH0] = 0x0100,
373 [TSU_ADRL0] = 0x0104,
374 [TSU_ADRL31] = 0x01fc,
375
376};
377
378/* Driver's parameters */ 159/* Driver's parameters */
379#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 160#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
380#define SH4_SKB_RX_ALIGN 32 161#define SH4_SKB_RX_ALIGN 32
@@ -705,7 +486,6 @@ struct sh_eth_private {
705 const u16 *reg_offset; 486 const u16 *reg_offset;
706 void __iomem *addr; 487 void __iomem *addr;
707 void __iomem *tsu_addr; 488 void __iomem *tsu_addr;
708 struct bb_info *bitbang;
709 u32 num_rx_ring; 489 u32 num_rx_ring;
710 u32 num_tx_ring; 490 u32 num_tx_ring;
711 dma_addr_t rx_desc_dma; 491 dma_addr_t rx_desc_dma;
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index 21683e2b1ff4..b6739afeaca1 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -998,6 +998,7 @@ static int s6gmac_probe(struct platform_device *pdev)
998 mb = mdiobus_alloc(); 998 mb = mdiobus_alloc();
999 if (!mb) { 999 if (!mb) {
1000 printk(KERN_ERR DRV_PRMT "error allocating mii bus\n"); 1000 printk(KERN_ERR DRV_PRMT "error allocating mii bus\n");
1001 res = -ENOMEM;
1001 goto errmii; 1002 goto errmii;
1002 } 1003 }
1003 mb->name = "s6gmac_mii"; 1004 mb->name = "s6gmac_mii";
@@ -1053,20 +1054,7 @@ static struct platform_driver s6gmac_driver = {
1053 }, 1054 },
1054}; 1055};
1055 1056
1056static int __init s6gmac_init(void) 1057module_platform_driver(s6gmac_driver);
1057{
1058 printk(KERN_INFO DRV_PRMT "S6 GMAC ethernet driver\n");
1059 return platform_driver_register(&s6gmac_driver);
1060}
1061
1062
1063static void __exit s6gmac_exit(void)
1064{
1065 platform_driver_unregister(&s6gmac_driver);
1066}
1067
1068module_init(s6gmac_init);
1069module_exit(s6gmac_exit);
1070 1058
1071MODULE_LICENSE("GPL"); 1059MODULE_LICENSE("GPL");
1072MODULE_DESCRIPTION("S6105 on chip Ethernet driver"); 1060MODULE_DESCRIPTION("S6105 on chip Ethernet driver");
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index 3aca57853ed4..bdac936a68bc 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -651,8 +651,11 @@ if (next_ptr < RX_START || next_ptr >= RX_END) {
651 skb->protocol = eth_type_trans(skb, dev); 651 skb->protocol = eth_type_trans(skb, dev);
652 netif_rx(skb); 652 netif_rx(skb);
653 received ++; 653 received ++;
654 } else 654 } else {
655 goto dropping; 655 ether3_outw(next_ptr >> 8, REG_RECVEND);
656 dev->stats.rx_dropped++;
657 goto done;
658 }
656 } else { 659 } else {
657 struct net_device_stats *stats = &dev->stats; 660 struct net_device_stats *stats = &dev->stats;
658 ether3_outw(next_ptr >> 8, REG_RECVEND); 661 ether3_outw(next_ptr >> 8, REG_RECVEND);
@@ -679,21 +682,6 @@ done:
679 } 682 }
680 683
681 return maxcnt; 684 return maxcnt;
682
683dropping:{
684 static unsigned long last_warned;
685
686 ether3_outw(next_ptr >> 8, REG_RECVEND);
687 /*
688 * Don't print this message too many times...
689 */
690 if (time_after(jiffies, last_warned + 10 * HZ)) {
691 last_warned = jiffies;
692 printk("%s: memory squeeze, dropping packet.\n", dev->name);
693 }
694 dev->stats.rx_dropped++;
695 goto done;
696 }
697} 685}
698 686
699/* 687/*
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 0fde9ca28269..0ad5694b41f8 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -381,8 +381,6 @@ memory_squeeze:
381 dev->stats.rx_packets++; 381 dev->stats.rx_packets++;
382 dev->stats.rx_bytes += len; 382 dev->stats.rx_bytes += len;
383 } else { 383 } else {
384 printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n",
385 dev->name);
386 dev->stats.rx_dropped++; 384 dev->stats.rx_dropped++;
387 } 385 }
388 } else { 386 } else {
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 0bc00991d310..01b99206139a 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -22,6 +22,7 @@
22#include <linux/topology.h> 22#include <linux/topology.h>
23#include <linux/gfp.h> 23#include <linux/gfp.h>
24#include <linux/cpu_rmap.h> 24#include <linux/cpu_rmap.h>
25#include <linux/aer.h>
25#include "net_driver.h" 26#include "net_driver.h"
26#include "efx.h" 27#include "efx.h"
27#include "nic.h" 28#include "nic.h"
@@ -71,21 +72,21 @@ const char *const efx_loopback_mode_names[] = {
71 72
72const unsigned int efx_reset_type_max = RESET_TYPE_MAX; 73const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
73const char *const efx_reset_type_names[] = { 74const char *const efx_reset_type_names[] = {
74 [RESET_TYPE_INVISIBLE] = "INVISIBLE", 75 [RESET_TYPE_INVISIBLE] = "INVISIBLE",
75 [RESET_TYPE_ALL] = "ALL", 76 [RESET_TYPE_ALL] = "ALL",
76 [RESET_TYPE_WORLD] = "WORLD", 77 [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
77 [RESET_TYPE_DISABLE] = "DISABLE", 78 [RESET_TYPE_WORLD] = "WORLD",
78 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", 79 [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
79 [RESET_TYPE_INT_ERROR] = "INT_ERROR", 80 [RESET_TYPE_DISABLE] = "DISABLE",
80 [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", 81 [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
81 [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH", 82 [RESET_TYPE_INT_ERROR] = "INT_ERROR",
82 [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH", 83 [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
83 [RESET_TYPE_TX_SKIP] = "TX_SKIP", 84 [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
84 [RESET_TYPE_MC_FAILURE] = "MC_FAILURE", 85 [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
86 [RESET_TYPE_TX_SKIP] = "TX_SKIP",
87 [RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
85}; 88};
86 89
87#define EFX_MAX_MTU (9 * 1024)
88
89/* Reset workqueue. If any NIC has a hardware failure then a reset will be 90/* Reset workqueue. If any NIC has a hardware failure then a reset will be
90 * queued onto this work queue. This is not a per-nic work queue, because 91 * queued onto this work queue. This is not a per-nic work queue, because
91 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. 92 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
@@ -117,9 +118,12 @@ MODULE_PARM_DESC(separate_tx_channels,
117static int napi_weight = 64; 118static int napi_weight = 64;
118 119
119/* This is the time (in jiffies) between invocations of the hardware 120/* This is the time (in jiffies) between invocations of the hardware
120 * monitor. On Falcon-based NICs, this will: 121 * monitor.
122 * On Falcon-based NICs, this will:
121 * - Check the on-board hardware monitor; 123 * - Check the on-board hardware monitor;
122 * - Poll the link state and reconfigure the hardware as necessary. 124 * - Poll the link state and reconfigure the hardware as necessary.
125 * On Siena-based NICs for power systems with EEH support, this will give EEH a
126 * chance to start.
123 */ 127 */
124static unsigned int efx_monitor_interval = 1 * HZ; 128static unsigned int efx_monitor_interval = 1 * HZ;
125 129
@@ -203,13 +207,14 @@ static void efx_stop_all(struct efx_nic *efx);
203#define EFX_ASSERT_RESET_SERIALISED(efx) \ 207#define EFX_ASSERT_RESET_SERIALISED(efx) \
204 do { \ 208 do { \
205 if ((efx->state == STATE_READY) || \ 209 if ((efx->state == STATE_READY) || \
210 (efx->state == STATE_RECOVERY) || \
206 (efx->state == STATE_DISABLED)) \ 211 (efx->state == STATE_DISABLED)) \
207 ASSERT_RTNL(); \ 212 ASSERT_RTNL(); \
208 } while (0) 213 } while (0)
209 214
210static int efx_check_disabled(struct efx_nic *efx) 215static int efx_check_disabled(struct efx_nic *efx)
211{ 216{
212 if (efx->state == STATE_DISABLED) { 217 if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
213 netif_err(efx, drv, efx->net_dev, 218 netif_err(efx, drv, efx->net_dev,
214 "device is disabled due to earlier errors\n"); 219 "device is disabled due to earlier errors\n");
215 return -EIO; 220 return -EIO;
@@ -242,15 +247,9 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
242 struct efx_rx_queue *rx_queue = 247 struct efx_rx_queue *rx_queue =
243 efx_channel_get_rx_queue(channel); 248 efx_channel_get_rx_queue(channel);
244 249
245 /* Deliver last RX packet. */ 250 efx_rx_flush_packet(channel);
246 if (channel->rx_pkt) { 251 if (rx_queue->enabled)
247 __efx_rx_packet(channel, channel->rx_pkt);
248 channel->rx_pkt = NULL;
249 }
250 if (rx_queue->enabled) {
251 efx_rx_strategy(channel);
252 efx_fast_push_rx_descriptors(rx_queue); 252 efx_fast_push_rx_descriptors(rx_queue);
253 }
254 } 253 }
255 254
256 return spent; 255 return spent;
@@ -625,20 +624,51 @@ fail:
625 */ 624 */
626static void efx_start_datapath(struct efx_nic *efx) 625static void efx_start_datapath(struct efx_nic *efx)
627{ 626{
627 bool old_rx_scatter = efx->rx_scatter;
628 struct efx_tx_queue *tx_queue; 628 struct efx_tx_queue *tx_queue;
629 struct efx_rx_queue *rx_queue; 629 struct efx_rx_queue *rx_queue;
630 struct efx_channel *channel; 630 struct efx_channel *channel;
631 size_t rx_buf_len;
631 632
632 /* Calculate the rx buffer allocation parameters required to 633 /* Calculate the rx buffer allocation parameters required to
633 * support the current MTU, including padding for header 634 * support the current MTU, including padding for header
634 * alignment and overruns. 635 * alignment and overruns.
635 */ 636 */
636 efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + 637 efx->rx_dma_len = (efx->type->rx_buffer_hash_size +
637 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + 638 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
638 efx->type->rx_buffer_hash_size + 639 efx->type->rx_buffer_padding);
639 efx->type->rx_buffer_padding); 640 rx_buf_len = (sizeof(struct efx_rx_page_state) +
640 efx->rx_buffer_order = get_order(efx->rx_buffer_len + 641 EFX_PAGE_IP_ALIGN + efx->rx_dma_len);
641 sizeof(struct efx_rx_page_state)); 642 if (rx_buf_len <= PAGE_SIZE) {
643 efx->rx_scatter = false;
644 efx->rx_buffer_order = 0;
645 } else if (efx->type->can_rx_scatter) {
646 BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
647 EFX_PAGE_IP_ALIGN + EFX_RX_USR_BUF_SIZE >
648 PAGE_SIZE / 2);
649 efx->rx_scatter = true;
650 efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
651 efx->rx_buffer_order = 0;
652 } else {
653 efx->rx_scatter = false;
654 efx->rx_buffer_order = get_order(rx_buf_len);
655 }
656
657 efx_rx_config_page_split(efx);
658 if (efx->rx_buffer_order)
659 netif_dbg(efx, drv, efx->net_dev,
660 "RX buf len=%u; page order=%u batch=%u\n",
661 efx->rx_dma_len, efx->rx_buffer_order,
662 efx->rx_pages_per_batch);
663 else
664 netif_dbg(efx, drv, efx->net_dev,
665 "RX buf len=%u step=%u bpp=%u; page batch=%u\n",
666 efx->rx_dma_len, efx->rx_page_buf_step,
667 efx->rx_bufs_per_page, efx->rx_pages_per_batch);
668
669 /* RX filters also have scatter-enabled flags */
670 if (efx->rx_scatter != old_rx_scatter)
671 efx_filter_update_rx_scatter(efx);
642 672
643 /* We must keep at least one descriptor in a TX ring empty. 673 /* We must keep at least one descriptor in a TX ring empty.
644 * We could avoid this when the queue size does not exactly 674 * We could avoid this when the queue size does not exactly
@@ -655,16 +685,12 @@ static void efx_start_datapath(struct efx_nic *efx)
655 efx_for_each_channel_tx_queue(tx_queue, channel) 685 efx_for_each_channel_tx_queue(tx_queue, channel)
656 efx_init_tx_queue(tx_queue); 686 efx_init_tx_queue(tx_queue);
657 687
658 /* The rx buffer allocation strategy is MTU dependent */
659 efx_rx_strategy(channel);
660
661 efx_for_each_channel_rx_queue(rx_queue, channel) { 688 efx_for_each_channel_rx_queue(rx_queue, channel) {
662 efx_init_rx_queue(rx_queue); 689 efx_init_rx_queue(rx_queue);
663 efx_nic_generate_fill_event(rx_queue); 690 efx_nic_generate_fill_event(rx_queue);
664 } 691 }
665 692
666 WARN_ON(channel->rx_pkt != NULL); 693 WARN_ON(channel->rx_pkt_n_frags);
667 efx_rx_strategy(channel);
668 } 694 }
669 695
670 if (netif_device_present(efx->net_dev)) 696 if (netif_device_present(efx->net_dev))
@@ -683,7 +709,7 @@ static void efx_stop_datapath(struct efx_nic *efx)
683 BUG_ON(efx->port_enabled); 709 BUG_ON(efx->port_enabled);
684 710
685 /* Only perform flush if dma is enabled */ 711 /* Only perform flush if dma is enabled */
686 if (dev->is_busmaster) { 712 if (dev->is_busmaster && efx->state != STATE_RECOVERY) {
687 rc = efx_nic_flush_queues(efx); 713 rc = efx_nic_flush_queues(efx);
688 714
689 if (rc && EFX_WORKAROUND_7803(efx)) { 715 if (rc && EFX_WORKAROUND_7803(efx)) {
@@ -1596,13 +1622,15 @@ static void efx_start_all(struct efx_nic *efx)
1596 efx_start_port(efx); 1622 efx_start_port(efx);
1597 efx_start_datapath(efx); 1623 efx_start_datapath(efx);
1598 1624
1599 /* Start the hardware monitor if there is one. Otherwise (we're link 1625 /* Start the hardware monitor if there is one */
1600 * event driven), we have to poll the PHY because after an event queue 1626 if (efx->type->monitor != NULL)
1601 * flush, we could have a missed a link state change */
1602 if (efx->type->monitor != NULL) {
1603 queue_delayed_work(efx->workqueue, &efx->monitor_work, 1627 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1604 efx_monitor_interval); 1628 efx_monitor_interval);
1605 } else { 1629
1630 /* If link state detection is normally event-driven, we have
1631 * to poll now because we could have missed a change
1632 */
1633 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
1606 mutex_lock(&efx->mac_lock); 1634 mutex_lock(&efx->mac_lock);
1607 if (efx->phy_op->poll(efx)) 1635 if (efx->phy_op->poll(efx))
1608 efx_link_status_changed(efx); 1636 efx_link_status_changed(efx);
@@ -2309,7 +2337,9 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
2309 2337
2310out: 2338out:
2311 /* Leave device stopped if necessary */ 2339 /* Leave device stopped if necessary */
2312 disabled = rc || method == RESET_TYPE_DISABLE; 2340 disabled = rc ||
2341 method == RESET_TYPE_DISABLE ||
2342 method == RESET_TYPE_RECOVER_OR_DISABLE;
2313 rc2 = efx_reset_up(efx, method, !disabled); 2343 rc2 = efx_reset_up(efx, method, !disabled);
2314 if (rc2) { 2344 if (rc2) {
2315 disabled = true; 2345 disabled = true;
@@ -2328,13 +2358,48 @@ out:
2328 return rc; 2358 return rc;
2329} 2359}
2330 2360
2361/* Try recovery mechanisms.
2362 * For now only EEH is supported.
2363 * Returns 0 if the recovery mechanisms are unsuccessful.
2364 * Returns a non-zero value otherwise.
2365 */
2366static int efx_try_recovery(struct efx_nic *efx)
2367{
2368#ifdef CONFIG_EEH
2369 /* A PCI error can occur and not be seen by EEH because nothing
2370 * happens on the PCI bus. In this case the driver may fail and
2371 * schedule a 'recover or reset', leading to this recovery handler.
2372 * Manually call the eeh failure check function.
2373 */
2374 struct eeh_dev *eehdev =
2375 of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev));
2376
2377 if (eeh_dev_check_failure(eehdev)) {
2378 /* The EEH mechanisms will handle the error and reset the
2379 * device if necessary.
2380 */
2381 return 1;
2382 }
2383#endif
2384 return 0;
2385}
2386
2331/* The worker thread exists so that code that cannot sleep can 2387/* The worker thread exists so that code that cannot sleep can
2332 * schedule a reset for later. 2388 * schedule a reset for later.
2333 */ 2389 */
2334static void efx_reset_work(struct work_struct *data) 2390static void efx_reset_work(struct work_struct *data)
2335{ 2391{
2336 struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); 2392 struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
2337 unsigned long pending = ACCESS_ONCE(efx->reset_pending); 2393 unsigned long pending;
2394 enum reset_type method;
2395
2396 pending = ACCESS_ONCE(efx->reset_pending);
2397 method = fls(pending) - 1;
2398
2399 if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
2400 method == RESET_TYPE_RECOVER_OR_ALL) &&
2401 efx_try_recovery(efx))
2402 return;
2338 2403
2339 if (!pending) 2404 if (!pending)
2340 return; 2405 return;
@@ -2346,7 +2411,7 @@ static void efx_reset_work(struct work_struct *data)
2346 * it cannot change again. 2411 * it cannot change again.
2347 */ 2412 */
2348 if (efx->state == STATE_READY) 2413 if (efx->state == STATE_READY)
2349 (void)efx_reset(efx, fls(pending) - 1); 2414 (void)efx_reset(efx, method);
2350 2415
2351 rtnl_unlock(); 2416 rtnl_unlock();
2352} 2417}
@@ -2355,11 +2420,20 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
2355{ 2420{
2356 enum reset_type method; 2421 enum reset_type method;
2357 2422
2423 if (efx->state == STATE_RECOVERY) {
2424 netif_dbg(efx, drv, efx->net_dev,
2425 "recovering: skip scheduling %s reset\n",
2426 RESET_TYPE(type));
2427 return;
2428 }
2429
2358 switch (type) { 2430 switch (type) {
2359 case RESET_TYPE_INVISIBLE: 2431 case RESET_TYPE_INVISIBLE:
2360 case RESET_TYPE_ALL: 2432 case RESET_TYPE_ALL:
2433 case RESET_TYPE_RECOVER_OR_ALL:
2361 case RESET_TYPE_WORLD: 2434 case RESET_TYPE_WORLD:
2362 case RESET_TYPE_DISABLE: 2435 case RESET_TYPE_DISABLE:
2436 case RESET_TYPE_RECOVER_OR_DISABLE:
2363 method = type; 2437 method = type;
2364 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", 2438 netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
2365 RESET_TYPE(method)); 2439 RESET_TYPE(method));
@@ -2569,6 +2643,8 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
2569 efx_fini_struct(efx); 2643 efx_fini_struct(efx);
2570 pci_set_drvdata(pci_dev, NULL); 2644 pci_set_drvdata(pci_dev, NULL);
2571 free_netdev(efx->net_dev); 2645 free_netdev(efx->net_dev);
2646
2647 pci_disable_pcie_error_reporting(pci_dev);
2572}; 2648};
2573 2649
2574/* NIC VPD information 2650/* NIC VPD information
@@ -2741,6 +2817,11 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
2741 netif_warn(efx, probe, efx->net_dev, 2817 netif_warn(efx, probe, efx->net_dev,
2742 "failed to create MTDs (%d)\n", rc); 2818 "failed to create MTDs (%d)\n", rc);
2743 2819
2820 rc = pci_enable_pcie_error_reporting(pci_dev);
2821 if (rc && rc != -EINVAL)
2822 netif_warn(efx, probe, efx->net_dev,
2823 "pci_enable_pcie_error_reporting failed (%d)\n", rc);
2824
2744 return 0; 2825 return 0;
2745 2826
2746 fail4: 2827 fail4:
@@ -2865,12 +2946,112 @@ static const struct dev_pm_ops efx_pm_ops = {
2865 .restore = efx_pm_resume, 2946 .restore = efx_pm_resume,
2866}; 2947};
2867 2948
2949/* A PCI error affecting this device was detected.
2950 * At this point MMIO and DMA may be disabled.
2951 * Stop the software path and request a slot reset.
2952 */
2953static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
2954 enum pci_channel_state state)
2955{
2956 pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
2957 struct efx_nic *efx = pci_get_drvdata(pdev);
2958
2959 if (state == pci_channel_io_perm_failure)
2960 return PCI_ERS_RESULT_DISCONNECT;
2961
2962 rtnl_lock();
2963
2964 if (efx->state != STATE_DISABLED) {
2965 efx->state = STATE_RECOVERY;
2966 efx->reset_pending = 0;
2967
2968 efx_device_detach_sync(efx);
2969
2970 efx_stop_all(efx);
2971 efx_stop_interrupts(efx, false);
2972
2973 status = PCI_ERS_RESULT_NEED_RESET;
2974 } else {
2975 /* If the interface is disabled we don't want to do anything
2976 * with it.
2977 */
2978 status = PCI_ERS_RESULT_RECOVERED;
2979 }
2980
2981 rtnl_unlock();
2982
2983 pci_disable_device(pdev);
2984
2985 return status;
2986}
2987
2988/* Fake a successfull reset, which will be performed later in efx_io_resume. */
2989static pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
2990{
2991 struct efx_nic *efx = pci_get_drvdata(pdev);
2992 pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
2993 int rc;
2994
2995 if (pci_enable_device(pdev)) {
2996 netif_err(efx, hw, efx->net_dev,
2997 "Cannot re-enable PCI device after reset.\n");
2998 status = PCI_ERS_RESULT_DISCONNECT;
2999 }
3000
3001 rc = pci_cleanup_aer_uncorrect_error_status(pdev);
3002 if (rc) {
3003 netif_err(efx, hw, efx->net_dev,
3004 "pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
3005 /* Non-fatal error. Continue. */
3006 }
3007
3008 return status;
3009}
3010
3011/* Perform the actual reset and resume I/O operations. */
3012static void efx_io_resume(struct pci_dev *pdev)
3013{
3014 struct efx_nic *efx = pci_get_drvdata(pdev);
3015 int rc;
3016
3017 rtnl_lock();
3018
3019 if (efx->state == STATE_DISABLED)
3020 goto out;
3021
3022 rc = efx_reset(efx, RESET_TYPE_ALL);
3023 if (rc) {
3024 netif_err(efx, hw, efx->net_dev,
3025 "efx_reset failed after PCI error (%d)\n", rc);
3026 } else {
3027 efx->state = STATE_READY;
3028 netif_dbg(efx, hw, efx->net_dev,
3029 "Done resetting and resuming IO after PCI error.\n");
3030 }
3031
3032out:
3033 rtnl_unlock();
3034}
3035
3036/* For simplicity and reliability, we always require a slot reset and try to
3037 * reset the hardware when a pci error affecting the device is detected.
3038 * We leave both the link_reset and mmio_enabled callback unimplemented:
3039 * with our request for slot reset the mmio_enabled callback will never be
3040 * called, and the link_reset callback is not used by AER or EEH mechanisms.
3041 */
3042static struct pci_error_handlers efx_err_handlers = {
3043 .error_detected = efx_io_error_detected,
3044 .slot_reset = efx_io_slot_reset,
3045 .resume = efx_io_resume,
3046};
3047
2868static struct pci_driver efx_pci_driver = { 3048static struct pci_driver efx_pci_driver = {
2869 .name = KBUILD_MODNAME, 3049 .name = KBUILD_MODNAME,
2870 .id_table = efx_pci_table, 3050 .id_table = efx_pci_table,
2871 .probe = efx_pci_probe, 3051 .probe = efx_pci_probe,
2872 .remove = efx_pci_remove, 3052 .remove = efx_pci_remove,
2873 .driver.pm = &efx_pm_ops, 3053 .driver.pm = &efx_pm_ops,
3054 .err_handler = &efx_err_handlers,
2874}; 3055};
2875 3056
2876/************************************************************************** 3057/**************************************************************************
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index d2f790df6dcb..8372da239b43 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -33,17 +33,22 @@ extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
33extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); 33extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
34 34
35/* RX */ 35/* RX */
36extern void efx_rx_config_page_split(struct efx_nic *efx);
36extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); 37extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
37extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); 38extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
38extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue); 39extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
39extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); 40extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
40extern void efx_rx_strategy(struct efx_channel *channel);
41extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); 41extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
42extern void efx_rx_slow_fill(unsigned long context); 42extern void efx_rx_slow_fill(unsigned long context);
43extern void __efx_rx_packet(struct efx_channel *channel, 43extern void __efx_rx_packet(struct efx_channel *channel);
44 struct efx_rx_buffer *rx_buf); 44extern void efx_rx_packet(struct efx_rx_queue *rx_queue,
45extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 45 unsigned int index, unsigned int n_frags,
46 unsigned int len, u16 flags); 46 unsigned int len, u16 flags);
47static inline void efx_rx_flush_packet(struct efx_channel *channel)
48{
49 if (channel->rx_pkt_n_frags)
50 __efx_rx_packet(channel);
51}
47extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); 52extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
48 53
49#define EFX_MAX_DMAQ_SIZE 4096UL 54#define EFX_MAX_DMAQ_SIZE 4096UL
@@ -67,6 +72,7 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
67extern int efx_probe_filters(struct efx_nic *efx); 72extern int efx_probe_filters(struct efx_nic *efx);
68extern void efx_restore_filters(struct efx_nic *efx); 73extern void efx_restore_filters(struct efx_nic *efx);
69extern void efx_remove_filters(struct efx_nic *efx); 74extern void efx_remove_filters(struct efx_nic *efx);
75extern void efx_filter_update_rx_scatter(struct efx_nic *efx);
70extern s32 efx_filter_insert_filter(struct efx_nic *efx, 76extern s32 efx_filter_insert_filter(struct efx_nic *efx,
71 struct efx_filter_spec *spec, 77 struct efx_filter_spec *spec,
72 bool replace); 78 bool replace);
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index 182dbe2cc6e4..ab8fb5889e55 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -137,8 +137,12 @@ enum efx_loopback_mode {
137 * Reset methods are numbered in order of increasing scope. 137 * Reset methods are numbered in order of increasing scope.
138 * 138 *
139 * @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only) 139 * @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only)
140 * @RESET_TYPE_RECOVER_OR_ALL: Try to recover. Apply RESET_TYPE_ALL
141 * if unsuccessful.
140 * @RESET_TYPE_ALL: Reset datapath, MAC and PHY 142 * @RESET_TYPE_ALL: Reset datapath, MAC and PHY
141 * @RESET_TYPE_WORLD: Reset as much as possible 143 * @RESET_TYPE_WORLD: Reset as much as possible
144 * @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
145 * unsuccessful.
142 * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled 146 * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
143 * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog 147 * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
144 * @RESET_TYPE_INT_ERROR: reset due to internal error 148 * @RESET_TYPE_INT_ERROR: reset due to internal error
@@ -150,9 +154,11 @@ enum efx_loopback_mode {
150 */ 154 */
151enum reset_type { 155enum reset_type {
152 RESET_TYPE_INVISIBLE = 0, 156 RESET_TYPE_INVISIBLE = 0,
153 RESET_TYPE_ALL = 1, 157 RESET_TYPE_RECOVER_OR_ALL = 1,
154 RESET_TYPE_WORLD = 2, 158 RESET_TYPE_ALL = 2,
155 RESET_TYPE_DISABLE = 3, 159 RESET_TYPE_WORLD = 3,
160 RESET_TYPE_RECOVER_OR_DISABLE = 4,
161 RESET_TYPE_DISABLE = 5,
156 RESET_TYPE_MAX_METHOD, 162 RESET_TYPE_MAX_METHOD,
157 RESET_TYPE_TX_WATCHDOG, 163 RESET_TYPE_TX_WATCHDOG,
158 RESET_TYPE_INT_ERROR, 164 RESET_TYPE_INT_ERROR,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 8e61cd06f66a..6e768175e7e0 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -154,6 +154,7 @@ static const struct efx_ethtool_stat efx_ethtool_stats[] = {
154 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err), 154 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
155 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch), 155 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
156 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), 156 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
157 EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_nodesc_trunc),
157}; 158};
158 159
159/* Number of ethtool statistics */ 160/* Number of ethtool statistics */
@@ -978,7 +979,8 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
978 rule->m_ext.data[1])) 979 rule->m_ext.data[1]))
979 return -EINVAL; 980 return -EINVAL;
980 981
981 efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, 0, 982 efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
983 efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
982 (rule->ring_cookie == RX_CLS_FLOW_DISC) ? 984 (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
983 0xfff : rule->ring_cookie); 985 0xfff : rule->ring_cookie);
984 986
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 49bcd196e10d..4486102fa9b3 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -1546,10 +1546,6 @@ static int falcon_probe_nic(struct efx_nic *efx)
1546 1546
1547static void falcon_init_rx_cfg(struct efx_nic *efx) 1547static void falcon_init_rx_cfg(struct efx_nic *efx)
1548{ 1548{
1549 /* Prior to Siena the RX DMA engine will split each frame at
1550 * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to
1551 * be so large that that never happens. */
1552 const unsigned huge_buf_size = (3 * 4096) >> 5;
1553 /* RX control FIFO thresholds (32 entries) */ 1549 /* RX control FIFO thresholds (32 entries) */
1554 const unsigned ctrl_xon_thr = 20; 1550 const unsigned ctrl_xon_thr = 20;
1555 const unsigned ctrl_xoff_thr = 25; 1551 const unsigned ctrl_xoff_thr = 25;
@@ -1557,10 +1553,15 @@ static void falcon_init_rx_cfg(struct efx_nic *efx)
1557 1553
1558 efx_reado(efx, &reg, FR_AZ_RX_CFG); 1554 efx_reado(efx, &reg, FR_AZ_RX_CFG);
1559 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) { 1555 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
1560 /* Data FIFO size is 5.5K */ 1556 /* Data FIFO size is 5.5K. The RX DMA engine only
1557 * supports scattering for user-mode queues, but will
1558 * split DMA writes at intervals of RX_USR_BUF_SIZE
1559 * (32-byte units) even for kernel-mode queues. We
1560 * set it to be so large that that never happens.
1561 */
1561 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0); 1562 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
1562 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE, 1563 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
1563 huge_buf_size); 1564 (3 * 4096) >> 5);
1564 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8); 1565 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
1565 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8); 1566 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
1566 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr); 1567 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
@@ -1569,7 +1570,7 @@ static void falcon_init_rx_cfg(struct efx_nic *efx)
1569 /* Data FIFO size is 80K; register fields moved */ 1570 /* Data FIFO size is 80K; register fields moved */
1570 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0); 1571 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
1571 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE, 1572 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
1572 huge_buf_size); 1573 EFX_RX_USR_BUF_SIZE >> 5);
1573 /* Send XON and XOFF at ~3 * max MTU away from empty/full */ 1574 /* Send XON and XOFF at ~3 * max MTU away from empty/full */
1574 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8); 1575 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
1575 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8); 1576 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
@@ -1815,6 +1816,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
1815 .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER, 1816 .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
1816 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), 1817 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
1817 .rx_buffer_padding = 0x24, 1818 .rx_buffer_padding = 0x24,
1819 .can_rx_scatter = false,
1818 .max_interrupt_mode = EFX_INT_MODE_MSI, 1820 .max_interrupt_mode = EFX_INT_MODE_MSI,
1819 .phys_addr_channels = 4, 1821 .phys_addr_channels = 4,
1820 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, 1822 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
@@ -1865,6 +1867,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
1865 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), 1867 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
1866 .rx_buffer_hash_size = 0x10, 1868 .rx_buffer_hash_size = 0x10,
1867 .rx_buffer_padding = 0, 1869 .rx_buffer_padding = 0,
1870 .can_rx_scatter = true,
1868 .max_interrupt_mode = EFX_INT_MODE_MSIX, 1871 .max_interrupt_mode = EFX_INT_MODE_MSIX,
1869 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy 1872 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
1870 * interrupt handler only supports 32 1873 * interrupt handler only supports 32
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index 8af42cd1feda..2397f0e8d3eb 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -66,6 +66,10 @@ struct efx_filter_state {
66#endif 66#endif
67}; 67};
68 68
69static void efx_filter_table_clear_entry(struct efx_nic *efx,
70 struct efx_filter_table *table,
71 unsigned int filter_idx);
72
69/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit 73/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
70 * key derived from the n-tuple. The initial LFSR state is 0xffff. */ 74 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
71static u16 efx_filter_hash(u32 key) 75static u16 efx_filter_hash(u32 key)
@@ -168,6 +172,25 @@ static void efx_filter_push_rx_config(struct efx_nic *efx)
168 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED, 172 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
169 !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags & 173 !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
170 EFX_FILTER_FLAG_RX_RSS)); 174 EFX_FILTER_FLAG_RX_RSS));
175
176 /* There is a single bit to enable RX scatter for all
177 * unmatched packets. Only set it if scatter is
178 * enabled in both filter specs.
179 */
180 EFX_SET_OWORD_FIELD(
181 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
182 !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
183 table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
184 EFX_FILTER_FLAG_RX_SCATTER));
185 } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
186 /* We don't expose 'default' filters because unmatched
187 * packets always go to the queue number found in the
188 * RSS table. But we still need to set the RX scatter
189 * bit here.
190 */
191 EFX_SET_OWORD_FIELD(
192 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
193 efx->rx_scatter);
171 } 194 }
172 195
173 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); 196 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
@@ -409,9 +432,18 @@ static void efx_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx)
409 struct efx_filter_state *state = efx->filter_state; 432 struct efx_filter_state *state = efx->filter_state;
410 struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF]; 433 struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF];
411 struct efx_filter_spec *spec = &table->spec[filter_idx]; 434 struct efx_filter_spec *spec = &table->spec[filter_idx];
435 enum efx_filter_flags flags = 0;
436
437 /* If there's only one channel then disable RSS for non VF
438 * traffic, thereby allowing VFs to use RSS when the PF can't.
439 */
440 if (efx->n_rx_channels > 1)
441 flags |= EFX_FILTER_FLAG_RX_RSS;
412 442
413 efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL, 443 if (efx->rx_scatter)
414 EFX_FILTER_FLAG_RX_RSS, 0); 444 flags |= EFX_FILTER_FLAG_RX_SCATTER;
445
446 efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL, flags, 0);
415 spec->type = EFX_FILTER_UC_DEF + filter_idx; 447 spec->type = EFX_FILTER_UC_DEF + filter_idx;
416 table->used_bitmap[0] |= 1 << filter_idx; 448 table->used_bitmap[0] |= 1 << filter_idx;
417} 449}
@@ -463,13 +495,6 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
463 break; 495 break;
464 } 496 }
465 497
466 case EFX_FILTER_TABLE_RX_DEF:
467 /* One filter spec per type */
468 BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
469 BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
470 EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
471 return spec->type - EFX_FILTER_UC_DEF;
472
473 case EFX_FILTER_TABLE_RX_MAC: { 498 case EFX_FILTER_TABLE_RX_MAC: {
474 bool is_wild = spec->type == EFX_FILTER_MAC_WILD; 499 bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
475 EFX_POPULATE_OWORD_7( 500 EFX_POPULATE_OWORD_7(
@@ -520,42 +545,6 @@ static bool efx_filter_equal(const struct efx_filter_spec *left,
520 return true; 545 return true;
521} 546}
522 547
523static int efx_filter_search(struct efx_filter_table *table,
524 struct efx_filter_spec *spec, u32 key,
525 bool for_insert, unsigned int *depth_required)
526{
527 unsigned hash, incr, filter_idx, depth, depth_max;
528
529 hash = efx_filter_hash(key);
530 incr = efx_filter_increment(key);
531
532 filter_idx = hash & (table->size - 1);
533 depth = 1;
534 depth_max = (for_insert ?
535 (spec->priority <= EFX_FILTER_PRI_HINT ?
536 FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX) :
537 table->search_depth[spec->type]);
538
539 for (;;) {
540 /* Return success if entry is used and matches this spec
541 * or entry is unused and we are trying to insert.
542 */
543 if (test_bit(filter_idx, table->used_bitmap) ?
544 efx_filter_equal(spec, &table->spec[filter_idx]) :
545 for_insert) {
546 *depth_required = depth;
547 return filter_idx;
548 }
549
550 /* Return failure if we reached the maximum search depth */
551 if (depth == depth_max)
552 return for_insert ? -EBUSY : -ENOENT;
553
554 filter_idx = (filter_idx + incr) & (table->size - 1);
555 ++depth;
556 }
557}
558
559/* 548/*
560 * Construct/deconstruct external filter IDs. At least the RX filter 549 * Construct/deconstruct external filter IDs. At least the RX filter
561 * IDs must be ordered by matching priority, for RX NFC semantics. 550 * IDs must be ordered by matching priority, for RX NFC semantics.
@@ -650,44 +639,111 @@ u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
650 * efx_filter_insert_filter - add or replace a filter 639 * efx_filter_insert_filter - add or replace a filter
651 * @efx: NIC in which to insert the filter 640 * @efx: NIC in which to insert the filter
652 * @spec: Specification for the filter 641 * @spec: Specification for the filter
653 * @replace: Flag for whether the specified filter may replace a filter 642 * @replace_equal: Flag for whether the specified filter may replace an
654 * with an identical match expression and equal or lower priority 643 * existing filter with equal priority
655 * 644 *
656 * On success, return the filter ID. 645 * On success, return the filter ID.
657 * On failure, return a negative error code. 646 * On failure, return a negative error code.
647 *
648 * If an existing filter has equal match values to the new filter
649 * spec, then the new filter might replace it, depending on the
650 * relative priorities. If the existing filter has lower priority, or
651 * if @replace_equal is set and it has equal priority, then it is
652 * replaced. Otherwise the function fails, returning -%EPERM if
653 * the existing filter has higher priority or -%EEXIST if it has
654 * equal priority.
658 */ 655 */
659s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, 656s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
660 bool replace) 657 bool replace_equal)
661{ 658{
662 struct efx_filter_state *state = efx->filter_state; 659 struct efx_filter_state *state = efx->filter_state;
663 struct efx_filter_table *table = efx_filter_spec_table(state, spec); 660 struct efx_filter_table *table = efx_filter_spec_table(state, spec);
664 struct efx_filter_spec *saved_spec;
665 efx_oword_t filter; 661 efx_oword_t filter;
666 unsigned int filter_idx, depth = 0; 662 int rep_index, ins_index;
667 u32 key; 663 unsigned int depth = 0;
668 int rc; 664 int rc;
669 665
670 if (!table || table->size == 0) 666 if (!table || table->size == 0)
671 return -EINVAL; 667 return -EINVAL;
672 668
673 key = efx_filter_build(&filter, spec);
674
675 netif_vdbg(efx, hw, efx->net_dev, 669 netif_vdbg(efx, hw, efx->net_dev,
676 "%s: type %d search_depth=%d", __func__, spec->type, 670 "%s: type %d search_depth=%d", __func__, spec->type,
677 table->search_depth[spec->type]); 671 table->search_depth[spec->type]);
678 672
679 spin_lock_bh(&state->lock); 673 if (table->id == EFX_FILTER_TABLE_RX_DEF) {
674 /* One filter spec per type */
675 BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
676 BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
677 EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
678 rep_index = spec->type - EFX_FILTER_INDEX_UC_DEF;
679 ins_index = rep_index;
680 680
681 rc = efx_filter_search(table, spec, key, true, &depth); 681 spin_lock_bh(&state->lock);
682 if (rc < 0) 682 } else {
683 goto out; 683 /* Search concurrently for
684 filter_idx = rc; 684 * (1) a filter to be replaced (rep_index): any filter
685 BUG_ON(filter_idx >= table->size); 685 * with the same match values, up to the current
686 saved_spec = &table->spec[filter_idx]; 686 * search depth for this type, and
687 687 * (2) the insertion point (ins_index): (1) or any
688 if (test_bit(filter_idx, table->used_bitmap)) { 688 * free slot before it or up to the maximum search
689 /* Should we replace the existing filter? */ 689 * depth for this priority
690 if (!replace) { 690 * We fail if we cannot find (2).
691 *
692 * We can stop once either
693 * (a) we find (1), in which case we have definitely
694 * found (2) as well; or
695 * (b) we have searched exhaustively for (1), and have
696 * either found (2) or searched exhaustively for it
697 */
698 u32 key = efx_filter_build(&filter, spec);
699 unsigned int hash = efx_filter_hash(key);
700 unsigned int incr = efx_filter_increment(key);
701 unsigned int max_rep_depth = table->search_depth[spec->type];
702 unsigned int max_ins_depth =
703 spec->priority <= EFX_FILTER_PRI_HINT ?
704 FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX;
705 unsigned int i = hash & (table->size - 1);
706
707 ins_index = -1;
708 depth = 1;
709
710 spin_lock_bh(&state->lock);
711
712 for (;;) {
713 if (!test_bit(i, table->used_bitmap)) {
714 if (ins_index < 0)
715 ins_index = i;
716 } else if (efx_filter_equal(spec, &table->spec[i])) {
717 /* Case (a) */
718 if (ins_index < 0)
719 ins_index = i;
720 rep_index = i;
721 break;
722 }
723
724 if (depth >= max_rep_depth &&
725 (ins_index >= 0 || depth >= max_ins_depth)) {
726 /* Case (b) */
727 if (ins_index < 0) {
728 rc = -EBUSY;
729 goto out;
730 }
731 rep_index = -1;
732 break;
733 }
734
735 i = (i + incr) & (table->size - 1);
736 ++depth;
737 }
738 }
739
740 /* If we found a filter to be replaced, check whether we
741 * should do so
742 */
743 if (rep_index >= 0) {
744 struct efx_filter_spec *saved_spec = &table->spec[rep_index];
745
746 if (spec->priority == saved_spec->priority && !replace_equal) {
691 rc = -EEXIST; 747 rc = -EEXIST;
692 goto out; 748 goto out;
693 } 749 }
@@ -695,11 +751,14 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
695 rc = -EPERM; 751 rc = -EPERM;
696 goto out; 752 goto out;
697 } 753 }
698 } else { 754 }
699 __set_bit(filter_idx, table->used_bitmap); 755
756 /* Insert the filter */
757 if (ins_index != rep_index) {
758 __set_bit(ins_index, table->used_bitmap);
700 ++table->used; 759 ++table->used;
701 } 760 }
702 *saved_spec = *spec; 761 table->spec[ins_index] = *spec;
703 762
704 if (table->id == EFX_FILTER_TABLE_RX_DEF) { 763 if (table->id == EFX_FILTER_TABLE_RX_DEF) {
705 efx_filter_push_rx_config(efx); 764 efx_filter_push_rx_config(efx);
@@ -713,13 +772,19 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
713 } 772 }
714 773
715 efx_writeo(efx, &filter, 774 efx_writeo(efx, &filter,
716 table->offset + table->step * filter_idx); 775 table->offset + table->step * ins_index);
776
777 /* If we were able to replace a filter by inserting
778 * at a lower depth, clear the replaced filter
779 */
780 if (ins_index != rep_index && rep_index >= 0)
781 efx_filter_table_clear_entry(efx, table, rep_index);
717 } 782 }
718 783
719 netif_vdbg(efx, hw, efx->net_dev, 784 netif_vdbg(efx, hw, efx->net_dev,
720 "%s: filter type %d index %d rxq %u set", 785 "%s: filter type %d index %d rxq %u set",
721 __func__, spec->type, filter_idx, spec->dmaq_id); 786 __func__, spec->type, ins_index, spec->dmaq_id);
722 rc = efx_filter_make_id(spec, filter_idx); 787 rc = efx_filter_make_id(spec, ins_index);
723 788
724out: 789out:
725 spin_unlock_bh(&state->lock); 790 spin_unlock_bh(&state->lock);
@@ -1060,6 +1125,50 @@ void efx_remove_filters(struct efx_nic *efx)
1060 kfree(state); 1125 kfree(state);
1061} 1126}
1062 1127
1128/* Update scatter enable flags for filters pointing to our own RX queues */
1129void efx_filter_update_rx_scatter(struct efx_nic *efx)
1130{
1131 struct efx_filter_state *state = efx->filter_state;
1132 enum efx_filter_table_id table_id;
1133 struct efx_filter_table *table;
1134 efx_oword_t filter;
1135 unsigned int filter_idx;
1136
1137 spin_lock_bh(&state->lock);
1138
1139 for (table_id = EFX_FILTER_TABLE_RX_IP;
1140 table_id <= EFX_FILTER_TABLE_RX_DEF;
1141 table_id++) {
1142 table = &state->table[table_id];
1143
1144 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
1145 if (!test_bit(filter_idx, table->used_bitmap) ||
1146 table->spec[filter_idx].dmaq_id >=
1147 efx->n_rx_channels)
1148 continue;
1149
1150 if (efx->rx_scatter)
1151 table->spec[filter_idx].flags |=
1152 EFX_FILTER_FLAG_RX_SCATTER;
1153 else
1154 table->spec[filter_idx].flags &=
1155 ~EFX_FILTER_FLAG_RX_SCATTER;
1156
1157 if (table_id == EFX_FILTER_TABLE_RX_DEF)
1158 /* Pushed by efx_filter_push_rx_config() */
1159 continue;
1160
1161 efx_filter_build(&filter, &table->spec[filter_idx]);
1162 efx_writeo(efx, &filter,
1163 table->offset + table->step * filter_idx);
1164 }
1165 }
1166
1167 efx_filter_push_rx_config(efx);
1168
1169 spin_unlock_bh(&state->lock);
1170}
1171
1063#ifdef CONFIG_RFS_ACCEL 1172#ifdef CONFIG_RFS_ACCEL
1064 1173
1065int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 1174int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 9d426d0457bd..c5c9747861ba 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -553,6 +553,7 @@
553#define MC_CMD_PTP_MODE_V1_VLAN 0x1 /* enum */ 553#define MC_CMD_PTP_MODE_V1_VLAN 0x1 /* enum */
554#define MC_CMD_PTP_MODE_V2 0x2 /* enum */ 554#define MC_CMD_PTP_MODE_V2 0x2 /* enum */
555#define MC_CMD_PTP_MODE_V2_VLAN 0x3 /* enum */ 555#define MC_CMD_PTP_MODE_V2_VLAN 0x3 /* enum */
556#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4 /* enum */
556 557
557/* MC_CMD_PTP_IN_DISABLE msgrequest */ 558/* MC_CMD_PTP_IN_DISABLE msgrequest */
558#define MC_CMD_PTP_IN_DISABLE_LEN 8 559#define MC_CMD_PTP_IN_DISABLE_LEN 8
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 0a90abd2421b..9bd433a095c5 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -69,6 +69,12 @@
69#define EFX_TXQ_TYPES 4 69#define EFX_TXQ_TYPES 4
70#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS) 70#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
71 71
72/* Maximum possible MTU the driver supports */
73#define EFX_MAX_MTU (9 * 1024)
74
75/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page. */
76#define EFX_RX_USR_BUF_SIZE 1824
77
72/* Forward declare Precision Time Protocol (PTP) support structure. */ 78/* Forward declare Precision Time Protocol (PTP) support structure. */
73struct efx_ptp_data; 79struct efx_ptp_data;
74 80
@@ -206,25 +212,23 @@ struct efx_tx_queue {
206/** 212/**
207 * struct efx_rx_buffer - An Efx RX data buffer 213 * struct efx_rx_buffer - An Efx RX data buffer
208 * @dma_addr: DMA base address of the buffer 214 * @dma_addr: DMA base address of the buffer
209 * @skb: The associated socket buffer. Valid iff !(@flags & %EFX_RX_BUF_PAGE). 215 * @page: The associated page buffer.
210 * Will be %NULL if the buffer slot is currently free.
211 * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
212 * Will be %NULL if the buffer slot is currently free. 216 * Will be %NULL if the buffer slot is currently free.
213 * @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE. 217 * @page_offset: If pending: offset in @page of DMA base address.
214 * @len: Buffer length, in bytes. 218 * If completed: offset in @page of Ethernet header.
215 * @flags: Flags for buffer and packet state. 219 * @len: If pending: length for DMA descriptor.
220 * If completed: received length, excluding hash prefix.
221 * @flags: Flags for buffer and packet state. These are only set on the
222 * first buffer of a scattered packet.
216 */ 223 */
217struct efx_rx_buffer { 224struct efx_rx_buffer {
218 dma_addr_t dma_addr; 225 dma_addr_t dma_addr;
219 union { 226 struct page *page;
220 struct sk_buff *skb;
221 struct page *page;
222 } u;
223 u16 page_offset; 227 u16 page_offset;
224 u16 len; 228 u16 len;
225 u16 flags; 229 u16 flags;
226}; 230};
227#define EFX_RX_BUF_PAGE 0x0001 231#define EFX_RX_BUF_LAST_IN_PAGE 0x0001
228#define EFX_RX_PKT_CSUMMED 0x0002 232#define EFX_RX_PKT_CSUMMED 0x0002
229#define EFX_RX_PKT_DISCARD 0x0004 233#define EFX_RX_PKT_DISCARD 0x0004
230 234
@@ -260,14 +264,23 @@ struct efx_rx_page_state {
260 * @added_count: Number of buffers added to the receive queue. 264 * @added_count: Number of buffers added to the receive queue.
261 * @notified_count: Number of buffers given to NIC (<= @added_count). 265 * @notified_count: Number of buffers given to NIC (<= @added_count).
262 * @removed_count: Number of buffers removed from the receive queue. 266 * @removed_count: Number of buffers removed from the receive queue.
267 * @scatter_n: Number of buffers used by current packet
268 * @page_ring: The ring to store DMA mapped pages for reuse.
269 * @page_add: Counter to calculate the write pointer for the recycle ring.
270 * @page_remove: Counter to calculate the read pointer for the recycle ring.
271 * @page_recycle_count: The number of pages that have been recycled.
272 * @page_recycle_failed: The number of pages that couldn't be recycled because
273 * the kernel still held a reference to them.
274 * @page_recycle_full: The number of pages that were released because the
275 * recycle ring was full.
276 * @page_ptr_mask: The number of pages in the RX recycle ring minus 1.
263 * @max_fill: RX descriptor maximum fill level (<= ring size) 277 * @max_fill: RX descriptor maximum fill level (<= ring size)
264 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill 278 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
265 * (<= @max_fill) 279 * (<= @max_fill)
266 * @min_fill: RX descriptor minimum non-zero fill level. 280 * @min_fill: RX descriptor minimum non-zero fill level.
267 * This records the minimum fill level observed when a ring 281 * This records the minimum fill level observed when a ring
268 * refill was triggered. 282 * refill was triggered.
269 * @alloc_page_count: RX allocation strategy counter. 283 * @recycle_count: RX buffer recycle counter.
270 * @alloc_skb_count: RX allocation strategy counter.
271 * @slow_fill: Timer used to defer efx_nic_generate_fill_event(). 284 * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
272 */ 285 */
273struct efx_rx_queue { 286struct efx_rx_queue {
@@ -279,15 +292,22 @@ struct efx_rx_queue {
279 bool enabled; 292 bool enabled;
280 bool flush_pending; 293 bool flush_pending;
281 294
282 int added_count; 295 unsigned int added_count;
283 int notified_count; 296 unsigned int notified_count;
284 int removed_count; 297 unsigned int removed_count;
298 unsigned int scatter_n;
299 struct page **page_ring;
300 unsigned int page_add;
301 unsigned int page_remove;
302 unsigned int page_recycle_count;
303 unsigned int page_recycle_failed;
304 unsigned int page_recycle_full;
305 unsigned int page_ptr_mask;
285 unsigned int max_fill; 306 unsigned int max_fill;
286 unsigned int fast_fill_trigger; 307 unsigned int fast_fill_trigger;
287 unsigned int min_fill; 308 unsigned int min_fill;
288 unsigned int min_overfill; 309 unsigned int min_overfill;
289 unsigned int alloc_page_count; 310 unsigned int recycle_count;
290 unsigned int alloc_skb_count;
291 struct timer_list slow_fill; 311 struct timer_list slow_fill;
292 unsigned int slow_fill_count; 312 unsigned int slow_fill_count;
293}; 313};
@@ -336,10 +356,6 @@ enum efx_rx_alloc_method {
336 * @event_test_cpu: Last CPU to handle interrupt or test event for this channel 356 * @event_test_cpu: Last CPU to handle interrupt or test event for this channel
337 * @irq_count: Number of IRQs since last adaptive moderation decision 357 * @irq_count: Number of IRQs since last adaptive moderation decision
338 * @irq_mod_score: IRQ moderation score 358 * @irq_mod_score: IRQ moderation score
339 * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
340 * and diagnostic counters
341 * @rx_alloc_push_pages: RX allocation method currently in use for pushing
342 * descriptors
343 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors 359 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
344 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors 360 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
345 * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors 361 * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
@@ -347,6 +363,12 @@ enum efx_rx_alloc_method {
347 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors 363 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
348 * @n_rx_overlength: Count of RX_OVERLENGTH errors 364 * @n_rx_overlength: Count of RX_OVERLENGTH errors
349 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun 365 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
366 * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to
367 * lack of descriptors
368 * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
369 * __efx_rx_packet(), or zero if there is none
370 * @rx_pkt_index: Ring index of first buffer for next packet to be delivered
371 * by __efx_rx_packet(), if @rx_pkt_n_frags != 0
350 * @rx_queue: RX queue for this channel 372 * @rx_queue: RX queue for this channel
351 * @tx_queue: TX queues for this channel 373 * @tx_queue: TX queues for this channel
352 */ 374 */
@@ -371,9 +393,6 @@ struct efx_channel {
371 unsigned int rfs_filters_added; 393 unsigned int rfs_filters_added;
372#endif 394#endif
373 395
374 int rx_alloc_level;
375 int rx_alloc_push_pages;
376
377 unsigned n_rx_tobe_disc; 396 unsigned n_rx_tobe_disc;
378 unsigned n_rx_ip_hdr_chksum_err; 397 unsigned n_rx_ip_hdr_chksum_err;
379 unsigned n_rx_tcp_udp_chksum_err; 398 unsigned n_rx_tcp_udp_chksum_err;
@@ -381,11 +400,10 @@ struct efx_channel {
381 unsigned n_rx_frm_trunc; 400 unsigned n_rx_frm_trunc;
382 unsigned n_rx_overlength; 401 unsigned n_rx_overlength;
383 unsigned n_skbuff_leaks; 402 unsigned n_skbuff_leaks;
403 unsigned int n_rx_nodesc_trunc;
384 404
385 /* Used to pipeline received packets in order to optimise memory 405 unsigned int rx_pkt_n_frags;
386 * access with prefetches. 406 unsigned int rx_pkt_index;
387 */
388 struct efx_rx_buffer *rx_pkt;
389 407
390 struct efx_rx_queue rx_queue; 408 struct efx_rx_queue rx_queue;
391 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; 409 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
@@ -410,7 +428,7 @@ struct efx_channel_type {
410 void (*post_remove)(struct efx_channel *); 428 void (*post_remove)(struct efx_channel *);
411 void (*get_name)(struct efx_channel *, char *buf, size_t len); 429 void (*get_name)(struct efx_channel *, char *buf, size_t len);
412 struct efx_channel *(*copy)(const struct efx_channel *); 430 struct efx_channel *(*copy)(const struct efx_channel *);
413 void (*receive_skb)(struct efx_channel *, struct sk_buff *); 431 bool (*receive_skb)(struct efx_channel *, struct sk_buff *);
414 bool keep_eventq; 432 bool keep_eventq;
415}; 433};
416 434
@@ -446,6 +464,7 @@ enum nic_state {
446 STATE_UNINIT = 0, /* device being probed/removed or is frozen */ 464 STATE_UNINIT = 0, /* device being probed/removed or is frozen */
447 STATE_READY = 1, /* hardware ready and netdev registered */ 465 STATE_READY = 1, /* hardware ready and netdev registered */
448 STATE_DISABLED = 2, /* device disabled due to hardware errors */ 466 STATE_DISABLED = 2, /* device disabled due to hardware errors */
467 STATE_RECOVERY = 3, /* device recovering from PCI error */
449}; 468};
450 469
451/* 470/*
@@ -684,10 +703,13 @@ struct vfdi_status;
684 * @n_channels: Number of channels in use 703 * @n_channels: Number of channels in use
685 * @n_rx_channels: Number of channels used for RX (= number of RX queues) 704 * @n_rx_channels: Number of channels used for RX (= number of RX queues)
686 * @n_tx_channels: Number of channels used for TX 705 * @n_tx_channels: Number of channels used for TX
687 * @rx_buffer_len: RX buffer length 706 * @rx_dma_len: Current maximum RX DMA length
688 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 707 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
708 * @rx_buffer_truesize: Amortised allocation size of an RX buffer,
709 * for use in sk_buff::truesize
689 * @rx_hash_key: Toeplitz hash key for RSS 710 * @rx_hash_key: Toeplitz hash key for RSS
690 * @rx_indir_table: Indirection table for RSS 711 * @rx_indir_table: Indirection table for RSS
712 * @rx_scatter: Scatter mode enabled for receives
691 * @int_error_count: Number of internal errors seen recently 713 * @int_error_count: Number of internal errors seen recently
692 * @int_error_expire: Time at which error count will be expired 714 * @int_error_expire: Time at which error count will be expired
693 * @irq_status: Interrupt status buffer 715 * @irq_status: Interrupt status buffer
@@ -800,10 +822,15 @@ struct efx_nic {
800 unsigned rss_spread; 822 unsigned rss_spread;
801 unsigned tx_channel_offset; 823 unsigned tx_channel_offset;
802 unsigned n_tx_channels; 824 unsigned n_tx_channels;
803 unsigned int rx_buffer_len; 825 unsigned int rx_dma_len;
804 unsigned int rx_buffer_order; 826 unsigned int rx_buffer_order;
827 unsigned int rx_buffer_truesize;
828 unsigned int rx_page_buf_step;
829 unsigned int rx_bufs_per_page;
830 unsigned int rx_pages_per_batch;
805 u8 rx_hash_key[40]; 831 u8 rx_hash_key[40];
806 u32 rx_indir_table[128]; 832 u32 rx_indir_table[128];
833 bool rx_scatter;
807 834
808 unsigned int_error_count; 835 unsigned int_error_count;
809 unsigned long int_error_expire; 836 unsigned long int_error_expire;
@@ -934,8 +961,9 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
934 * @evq_ptr_tbl_base: Event queue pointer table base address 961 * @evq_ptr_tbl_base: Event queue pointer table base address
935 * @evq_rptr_tbl_base: Event queue read-pointer table base address 962 * @evq_rptr_tbl_base: Event queue read-pointer table base address
936 * @max_dma_mask: Maximum possible DMA mask 963 * @max_dma_mask: Maximum possible DMA mask
937 * @rx_buffer_hash_size: Size of hash at start of RX buffer 964 * @rx_buffer_hash_size: Size of hash at start of RX packet
938 * @rx_buffer_padding: Size of padding at end of RX buffer 965 * @rx_buffer_padding: Size of padding at end of RX packet
966 * @can_rx_scatter: NIC is able to scatter packet to multiple buffers
939 * @max_interrupt_mode: Highest capability interrupt mode supported 967 * @max_interrupt_mode: Highest capability interrupt mode supported
940 * from &enum efx_init_mode. 968 * from &enum efx_init_mode.
941 * @phys_addr_channels: Number of channels with physically addressed 969 * @phys_addr_channels: Number of channels with physically addressed
@@ -983,6 +1011,7 @@ struct efx_nic_type {
983 u64 max_dma_mask; 1011 u64 max_dma_mask;
984 unsigned int rx_buffer_hash_size; 1012 unsigned int rx_buffer_hash_size;
985 unsigned int rx_buffer_padding; 1013 unsigned int rx_buffer_padding;
1014 bool can_rx_scatter;
986 unsigned int max_interrupt_mode; 1015 unsigned int max_interrupt_mode;
987 unsigned int phys_addr_channels; 1016 unsigned int phys_addr_channels;
988 unsigned int timer_period_max; 1017 unsigned int timer_period_max;
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index eaa8e874a3cb..b0503cd8c2a0 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -305,11 +305,11 @@ int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
305 unsigned int len) 305 unsigned int len)
306{ 306{
307 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len, 307 buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
308 &buffer->dma_addr, GFP_ATOMIC); 308 &buffer->dma_addr,
309 GFP_ATOMIC | __GFP_ZERO);
309 if (!buffer->addr) 310 if (!buffer->addr)
310 return -ENOMEM; 311 return -ENOMEM;
311 buffer->len = len; 312 buffer->len = len;
312 memset(buffer->addr, 0, len);
313 return 0; 313 return 0;
314} 314}
315 315
@@ -592,12 +592,22 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
592 struct efx_nic *efx = rx_queue->efx; 592 struct efx_nic *efx = rx_queue->efx;
593 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; 593 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
594 bool iscsi_digest_en = is_b0; 594 bool iscsi_digest_en = is_b0;
595 bool jumbo_en;
596
597 /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
598 * DMA to continue after a PCIe page boundary (and scattering
599 * is not possible). In Falcon B0 and Siena, it enables
600 * scatter.
601 */
602 jumbo_en = !is_b0 || efx->rx_scatter;
595 603
596 netif_dbg(efx, hw, efx->net_dev, 604 netif_dbg(efx, hw, efx->net_dev,
597 "RX queue %d ring in special buffers %d-%d\n", 605 "RX queue %d ring in special buffers %d-%d\n",
598 efx_rx_queue_index(rx_queue), rx_queue->rxd.index, 606 efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
599 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 607 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
600 608
609 rx_queue->scatter_n = 0;
610
601 /* Pin RX descriptor ring */ 611 /* Pin RX descriptor ring */
602 efx_init_special_buffer(efx, &rx_queue->rxd); 612 efx_init_special_buffer(efx, &rx_queue->rxd);
603 613
@@ -614,8 +624,7 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
614 FRF_AZ_RX_DESCQ_SIZE, 624 FRF_AZ_RX_DESCQ_SIZE,
615 __ffs(rx_queue->rxd.entries), 625 __ffs(rx_queue->rxd.entries),
616 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , 626 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
617 /* For >=B0 this is scatter so disable */ 627 FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
618 FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
619 FRF_AZ_RX_DESCQ_EN, 1); 628 FRF_AZ_RX_DESCQ_EN, 1);
620 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 629 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
621 efx_rx_queue_index(rx_queue)); 630 efx_rx_queue_index(rx_queue));
@@ -969,13 +978,24 @@ static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
969 EFX_RX_PKT_DISCARD : 0; 978 EFX_RX_PKT_DISCARD : 0;
970} 979}
971 980
972/* Handle receive events that are not in-order. */ 981/* Handle receive events that are not in-order. Return true if this
973static void 982 * can be handled as a partial packet discard, false if it's more
983 * serious.
984 */
985static bool
974efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) 986efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
975{ 987{
988 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
976 struct efx_nic *efx = rx_queue->efx; 989 struct efx_nic *efx = rx_queue->efx;
977 unsigned expected, dropped; 990 unsigned expected, dropped;
978 991
992 if (rx_queue->scatter_n &&
993 index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
994 rx_queue->ptr_mask)) {
995 ++channel->n_rx_nodesc_trunc;
996 return true;
997 }
998
979 expected = rx_queue->removed_count & rx_queue->ptr_mask; 999 expected = rx_queue->removed_count & rx_queue->ptr_mask;
980 dropped = (index - expected) & rx_queue->ptr_mask; 1000 dropped = (index - expected) & rx_queue->ptr_mask;
981 netif_info(efx, rx_err, efx->net_dev, 1001 netif_info(efx, rx_err, efx->net_dev,
@@ -984,6 +1004,7 @@ efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
984 1004
985 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? 1005 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
986 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); 1006 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
1007 return false;
987} 1008}
988 1009
989/* Handle a packet received event 1010/* Handle a packet received event
@@ -999,7 +1020,7 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
999 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; 1020 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
1000 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 1021 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
1001 unsigned expected_ptr; 1022 unsigned expected_ptr;
1002 bool rx_ev_pkt_ok; 1023 bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
1003 u16 flags; 1024 u16 flags;
1004 struct efx_rx_queue *rx_queue; 1025 struct efx_rx_queue *rx_queue;
1005 struct efx_nic *efx = channel->efx; 1026 struct efx_nic *efx = channel->efx;
@@ -1007,21 +1028,56 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
1007 if (unlikely(ACCESS_ONCE(efx->reset_pending))) 1028 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1008 return; 1029 return;
1009 1030
1010 /* Basic packet information */ 1031 rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
1011 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 1032 rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
1012 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
1013 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
1014 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
1015 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
1016 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != 1033 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
1017 channel->channel); 1034 channel->channel);
1018 1035
1019 rx_queue = efx_channel_get_rx_queue(channel); 1036 rx_queue = efx_channel_get_rx_queue(channel);
1020 1037
1021 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 1038 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
1022 expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask; 1039 expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
1023 if (unlikely(rx_ev_desc_ptr != expected_ptr)) 1040 rx_queue->ptr_mask);
1024 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); 1041
1042 /* Check for partial drops and other errors */
1043 if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
1044 unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
1045 if (rx_ev_desc_ptr != expected_ptr &&
1046 !efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
1047 return;
1048
1049 /* Discard all pending fragments */
1050 if (rx_queue->scatter_n) {
1051 efx_rx_packet(
1052 rx_queue,
1053 rx_queue->removed_count & rx_queue->ptr_mask,
1054 rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
1055 rx_queue->removed_count += rx_queue->scatter_n;
1056 rx_queue->scatter_n = 0;
1057 }
1058
1059 /* Return if there is no new fragment */
1060 if (rx_ev_desc_ptr != expected_ptr)
1061 return;
1062
1063 /* Discard new fragment if not SOP */
1064 if (!rx_ev_sop) {
1065 efx_rx_packet(
1066 rx_queue,
1067 rx_queue->removed_count & rx_queue->ptr_mask,
1068 1, 0, EFX_RX_PKT_DISCARD);
1069 ++rx_queue->removed_count;
1070 return;
1071 }
1072 }
1073
1074 ++rx_queue->scatter_n;
1075 if (rx_ev_cont)
1076 return;
1077
1078 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
1079 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
1080 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
1025 1081
1026 if (likely(rx_ev_pkt_ok)) { 1082 if (likely(rx_ev_pkt_ok)) {
1027 /* If packet is marked as OK and packet type is TCP/IP or 1083 /* If packet is marked as OK and packet type is TCP/IP or
@@ -1049,7 +1105,11 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
1049 channel->irq_mod_score += 2; 1105 channel->irq_mod_score += 2;
1050 1106
1051 /* Handle received packet */ 1107 /* Handle received packet */
1052 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags); 1108 efx_rx_packet(rx_queue,
1109 rx_queue->removed_count & rx_queue->ptr_mask,
1110 rx_queue->scatter_n, rx_ev_byte_cnt, flags);
1111 rx_queue->removed_count += rx_queue->scatter_n;
1112 rx_queue->scatter_n = 0;
1053} 1113}
1054 1114
1055/* If this flush done event corresponds to a &struct efx_tx_queue, then 1115/* If this flush done event corresponds to a &struct efx_tx_queue, then
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 3f93624fc273..07f6baa15c0c 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -99,6 +99,9 @@
99#define PTP_V2_VERSION_LENGTH 1 99#define PTP_V2_VERSION_LENGTH 1
100#define PTP_V2_VERSION_OFFSET 29 100#define PTP_V2_VERSION_OFFSET 29
101 101
102#define PTP_V2_UUID_LENGTH 8
103#define PTP_V2_UUID_OFFSET 48
104
102/* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2), 105/* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2),
103 * the MC only captures the last six bytes of the clock identity. These values 106 * the MC only captures the last six bytes of the clock identity. These values
104 * reflect those, not the ones used in the standard. The standard permits 107 * reflect those, not the ones used in the standard. The standard permits
@@ -429,13 +432,10 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
429 unsigned number_readings = (response_length / 432 unsigned number_readings = (response_length /
430 MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN); 433 MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN);
431 unsigned i; 434 unsigned i;
432 unsigned min;
433 unsigned min_set = 0;
434 unsigned total; 435 unsigned total;
435 unsigned ngood = 0; 436 unsigned ngood = 0;
436 unsigned last_good = 0; 437 unsigned last_good = 0;
437 struct efx_ptp_data *ptp = efx->ptp_data; 438 struct efx_ptp_data *ptp = efx->ptp_data;
438 bool min_valid = false;
439 u32 last_sec; 439 u32 last_sec;
440 u32 start_sec; 440 u32 start_sec;
441 struct timespec delta; 441 struct timespec delta;
@@ -443,35 +443,17 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
443 if (number_readings == 0) 443 if (number_readings == 0)
444 return -EAGAIN; 444 return -EAGAIN;
445 445
446 /* Find minimum value in this set of results, discarding clearly 446 /* Read the set of results and increment stats for any results that
447 * erroneous results. 447 * appera to be erroneous.
448 */ 448 */
449 for (i = 0; i < number_readings; i++) { 449 for (i = 0; i < number_readings; i++) {
450 efx_ptp_read_timeset(synch_buf, &ptp->timeset[i]); 450 efx_ptp_read_timeset(synch_buf, &ptp->timeset[i]);
451 synch_buf += MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN; 451 synch_buf += MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN;
452 if (ptp->timeset[i].window > SYNCHRONISATION_GRANULARITY_NS) {
453 if (min_valid) {
454 if (ptp->timeset[i].window < min_set)
455 min_set = ptp->timeset[i].window;
456 } else {
457 min_valid = true;
458 min_set = ptp->timeset[i].window;
459 }
460 }
461 }
462
463 if (min_valid) {
464 if (ptp->base_sync_valid && (min_set > ptp->base_sync_ns))
465 min = ptp->base_sync_ns;
466 else
467 min = min_set;
468 } else {
469 min = SYNCHRONISATION_GRANULARITY_NS;
470 } 452 }
471 453
472 /* Discard excessively long synchronise durations. The MC times 454 /* Find the last good host-MC synchronization result. The MC times
473 * when it finishes reading the host time so the corrected window 455 * when it finishes reading the host time so the corrected window time
474 * time should be fairly constant for a given platform. 456 * should be fairly constant for a given platform.
475 */ 457 */
476 total = 0; 458 total = 0;
477 for (i = 0; i < number_readings; i++) 459 for (i = 0; i < number_readings; i++)
@@ -489,8 +471,8 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
489 471
490 if (ngood == 0) { 472 if (ngood == 0) {
491 netif_warn(efx, drv, efx->net_dev, 473 netif_warn(efx, drv, efx->net_dev,
492 "PTP no suitable synchronisations %dns %dns\n", 474 "PTP no suitable synchronisations %dns\n",
493 ptp->base_sync_ns, min_set); 475 ptp->base_sync_ns);
494 return -EAGAIN; 476 return -EAGAIN;
495 } 477 }
496 478
@@ -1006,43 +988,53 @@ bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
1006 * the receive timestamp from the MC - this will probably occur after the 988 * the receive timestamp from the MC - this will probably occur after the
1007 * packet arrival because of the processing in the MC. 989 * packet arrival because of the processing in the MC.
1008 */ 990 */
1009static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) 991static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
1010{ 992{
1011 struct efx_nic *efx = channel->efx; 993 struct efx_nic *efx = channel->efx;
1012 struct efx_ptp_data *ptp = efx->ptp_data; 994 struct efx_ptp_data *ptp = efx->ptp_data;
1013 struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb; 995 struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
1014 u8 *data; 996 u8 *match_data_012, *match_data_345;
1015 unsigned int version; 997 unsigned int version;
1016 998
1017 match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS); 999 match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
1018 1000
1019 /* Correct version? */ 1001 /* Correct version? */
1020 if (ptp->mode == MC_CMD_PTP_MODE_V1) { 1002 if (ptp->mode == MC_CMD_PTP_MODE_V1) {
1021 if (skb->len < PTP_V1_MIN_LENGTH) { 1003 if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) {
1022 netif_receive_skb(skb); 1004 return false;
1023 return;
1024 } 1005 }
1025 version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]); 1006 version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]);
1026 if (version != PTP_VERSION_V1) { 1007 if (version != PTP_VERSION_V1) {
1027 netif_receive_skb(skb); 1008 return false;
1028 return;
1029 } 1009 }
1010
1011 /* PTP V1 uses all six bytes of the UUID to match the packet
1012 * to the timestamp
1013 */
1014 match_data_012 = skb->data + PTP_V1_UUID_OFFSET;
1015 match_data_345 = skb->data + PTP_V1_UUID_OFFSET + 3;
1030 } else { 1016 } else {
1031 if (skb->len < PTP_V2_MIN_LENGTH) { 1017 if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) {
1032 netif_receive_skb(skb); 1018 return false;
1033 return;
1034 } 1019 }
1035 version = skb->data[PTP_V2_VERSION_OFFSET]; 1020 version = skb->data[PTP_V2_VERSION_OFFSET];
1036
1037 BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2);
1038 BUILD_BUG_ON(PTP_V1_UUID_OFFSET != PTP_V2_MC_UUID_OFFSET);
1039 BUILD_BUG_ON(PTP_V1_UUID_LENGTH != PTP_V2_MC_UUID_LENGTH);
1040 BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET);
1041 BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH);
1042
1043 if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) { 1021 if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) {
1044 netif_receive_skb(skb); 1022 return false;
1045 return; 1023 }
1024
1025 /* The original V2 implementation uses bytes 2-7 of
1026 * the UUID to match the packet to the timestamp. This
1027 * discards two of the bytes of the MAC address used
1028 * to create the UUID (SF bug 33070). The PTP V2
1029 * enhanced mode fixes this issue and uses bytes 0-2
1030 * and byte 5-7 of the UUID.
1031 */
1032 match_data_345 = skb->data + PTP_V2_UUID_OFFSET + 5;
1033 if (ptp->mode == MC_CMD_PTP_MODE_V2) {
1034 match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 2;
1035 } else {
1036 match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 0;
1037 BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED);
1046 } 1038 }
1047 } 1039 }
1048 1040
@@ -1056,14 +1048,19 @@ static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
1056 timestamps = skb_hwtstamps(skb); 1048 timestamps = skb_hwtstamps(skb);
1057 memset(timestamps, 0, sizeof(*timestamps)); 1049 memset(timestamps, 0, sizeof(*timestamps));
1058 1050
1051 /* We expect the sequence number to be in the same position in
1052 * the packet for PTP V1 and V2
1053 */
1054 BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET);
1055 BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH);
1056
1059 /* Extract UUID/Sequence information */ 1057 /* Extract UUID/Sequence information */
1060 data = skb->data + PTP_V1_UUID_OFFSET; 1058 match->words[0] = (match_data_012[0] |
1061 match->words[0] = (data[0] | 1059 (match_data_012[1] << 8) |
1062 (data[1] << 8) | 1060 (match_data_012[2] << 16) |
1063 (data[2] << 16) | 1061 (match_data_345[0] << 24));
1064 (data[3] << 24)); 1062 match->words[1] = (match_data_345[1] |
1065 match->words[1] = (data[4] | 1063 (match_data_345[2] << 8) |
1066 (data[5] << 8) |
1067 (skb->data[PTP_V1_SEQUENCE_OFFSET + 1064 (skb->data[PTP_V1_SEQUENCE_OFFSET +
1068 PTP_V1_SEQUENCE_LENGTH - 1] << 1065 PTP_V1_SEQUENCE_LENGTH - 1] <<
1069 16)); 1066 16));
@@ -1073,6 +1070,8 @@ static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
1073 1070
1074 skb_queue_tail(&ptp->rxq, skb); 1071 skb_queue_tail(&ptp->rxq, skb);
1075 queue_work(ptp->workwq, &ptp->work); 1072 queue_work(ptp->workwq, &ptp->work);
1073
1074 return true;
1076} 1075}
1077 1076
1078/* Transmit a PTP packet. This has to be transmitted by the MC 1077/* Transmit a PTP packet. This has to be transmitted by the MC
@@ -1167,7 +1166,7 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
1167 * timestamped 1166 * timestamped
1168 */ 1167 */
1169 init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; 1168 init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
1170 new_mode = MC_CMD_PTP_MODE_V2; 1169 new_mode = MC_CMD_PTP_MODE_V2_ENHANCED;
1171 enable_wanted = true; 1170 enable_wanted = true;
1172 break; 1171 break;
1173 case HWTSTAMP_FILTER_PTP_V2_EVENT: 1172 case HWTSTAMP_FILTER_PTP_V2_EVENT:
@@ -1186,7 +1185,14 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
1186 if (init->tx_type != HWTSTAMP_TX_OFF) 1185 if (init->tx_type != HWTSTAMP_TX_OFF)
1187 enable_wanted = true; 1186 enable_wanted = true;
1188 1187
1188 /* Old versions of the firmware do not support the improved
1189 * UUID filtering option (SF bug 33070). If the firmware does
1190 * not accept the enhanced mode, fall back to the standard PTP
1191 * v2 UUID filtering.
1192 */
1189 rc = efx_ptp_change_mode(efx, enable_wanted, new_mode); 1193 rc = efx_ptp_change_mode(efx, enable_wanted, new_mode);
1194 if ((rc != 0) && (new_mode == MC_CMD_PTP_MODE_V2_ENHANCED))
1195 rc = efx_ptp_change_mode(efx, enable_wanted, MC_CMD_PTP_MODE_V2);
1190 if (rc != 0) 1196 if (rc != 0)
1191 return rc; 1197 return rc;
1192 1198
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index bb579a6128c8..e73e30bac10e 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -16,6 +16,7 @@
16#include <linux/udp.h> 16#include <linux/udp.h>
17#include <linux/prefetch.h> 17#include <linux/prefetch.h>
18#include <linux/moduleparam.h> 18#include <linux/moduleparam.h>
19#include <linux/iommu.h>
19#include <net/ip.h> 20#include <net/ip.h>
20#include <net/checksum.h> 21#include <net/checksum.h>
21#include "net_driver.h" 22#include "net_driver.h"
@@ -24,85 +25,39 @@
24#include "selftest.h" 25#include "selftest.h"
25#include "workarounds.h" 26#include "workarounds.h"
26 27
27/* Number of RX descriptors pushed at once. */ 28/* Preferred number of descriptors to fill at once */
28#define EFX_RX_BATCH 8 29#define EFX_RX_PREFERRED_BATCH 8U
29 30
30/* Maximum size of a buffer sharing a page */ 31/* Number of RX buffers to recycle pages for. When creating the RX page recycle
31#define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state)) 32 * ring, this number is divided by the number of buffers per page to calculate
33 * the number of pages to store in the RX page recycle ring.
34 */
35#define EFX_RECYCLE_RING_SIZE_IOMMU 4096
36#define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
32 37
33/* Size of buffer allocated for skb header area. */ 38/* Size of buffer allocated for skb header area. */
34#define EFX_SKB_HEADERS 64u 39#define EFX_SKB_HEADERS 64u
35 40
36/*
37 * rx_alloc_method - RX buffer allocation method
38 *
39 * This driver supports two methods for allocating and using RX buffers:
40 * each RX buffer may be backed by an skb or by an order-n page.
41 *
42 * When GRO is in use then the second method has a lower overhead,
43 * since we don't have to allocate then free skbs on reassembled frames.
44 *
45 * Values:
46 * - RX_ALLOC_METHOD_AUTO = 0
47 * - RX_ALLOC_METHOD_SKB = 1
48 * - RX_ALLOC_METHOD_PAGE = 2
49 *
50 * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
51 * controlled by the parameters below.
52 *
53 * - Since pushing and popping descriptors are separated by the rx_queue
54 * size, so the watermarks should be ~rxd_size.
55 * - The performance win by using page-based allocation for GRO is less
56 * than the performance hit of using page-based allocation of non-GRO,
57 * so the watermarks should reflect this.
58 *
59 * Per channel we maintain a single variable, updated by each channel:
60 *
61 * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO :
62 * RX_ALLOC_FACTOR_SKB)
63 * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
64 * limits the hysteresis), and update the allocation strategy:
65 *
66 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ?
67 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
68 */
69static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
70
71#define RX_ALLOC_LEVEL_GRO 0x2000
72#define RX_ALLOC_LEVEL_MAX 0x3000
73#define RX_ALLOC_FACTOR_GRO 1
74#define RX_ALLOC_FACTOR_SKB (-2)
75
76/* This is the percentage fill level below which new RX descriptors 41/* This is the percentage fill level below which new RX descriptors
77 * will be added to the RX descriptor ring. 42 * will be added to the RX descriptor ring.
78 */ 43 */
79static unsigned int rx_refill_threshold; 44static unsigned int rx_refill_threshold;
80 45
46/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
47#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
48 EFX_RX_USR_BUF_SIZE)
49
81/* 50/*
82 * RX maximum head room required. 51 * RX maximum head room required.
83 * 52 *
84 * This must be at least 1 to prevent overflow and at least 2 to allow 53 * This must be at least 1 to prevent overflow, plus one packet-worth
85 * pipelined receives. 54 * to allow pipelined receives.
86 */ 55 */
87#define EFX_RXD_HEAD_ROOM 2 56#define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
88 57
89/* Offset of ethernet header within page */ 58static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
90static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
91 struct efx_rx_buffer *buf)
92{ 59{
93 return buf->page_offset + efx->type->rx_buffer_hash_size; 60 return page_address(buf->page) + buf->page_offset;
94}
95static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
96{
97 return PAGE_SIZE << efx->rx_buffer_order;
98}
99
100static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
101{
102 if (buf->flags & EFX_RX_BUF_PAGE)
103 return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
104 else
105 return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size;
106} 61}
107 62
108static inline u32 efx_rx_buf_hash(const u8 *eh) 63static inline u32 efx_rx_buf_hash(const u8 *eh)
@@ -119,66 +74,81 @@ static inline u32 efx_rx_buf_hash(const u8 *eh)
119#endif 74#endif
120} 75}
121 76
122/** 77static inline struct efx_rx_buffer *
123 * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers 78efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
124 * 79{
125 * @rx_queue: Efx RX queue 80 if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
126 * 81 return efx_rx_buffer(rx_queue, 0);
127 * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a 82 else
128 * struct efx_rx_buffer for each one. Return a negative error code or 0 83 return rx_buf + 1;
129 * on success. May fail having only inserted fewer than EFX_RX_BATCH 84}
130 * buffers. 85
131 */ 86static inline void efx_sync_rx_buffer(struct efx_nic *efx,
132static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue) 87 struct efx_rx_buffer *rx_buf,
88 unsigned int len)
89{
90 dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
91 DMA_FROM_DEVICE);
92}
93
94void efx_rx_config_page_split(struct efx_nic *efx)
95{
96 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + EFX_PAGE_IP_ALIGN,
97 L1_CACHE_BYTES);
98 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
99 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
100 efx->rx_page_buf_step);
101 efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
102 efx->rx_bufs_per_page;
103 efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
104 efx->rx_bufs_per_page);
105}
106
107/* Check the RX page recycle ring for a page that can be reused. */
108static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
133{ 109{
134 struct efx_nic *efx = rx_queue->efx; 110 struct efx_nic *efx = rx_queue->efx;
135 struct net_device *net_dev = efx->net_dev; 111 struct page *page;
136 struct efx_rx_buffer *rx_buf; 112 struct efx_rx_page_state *state;
137 struct sk_buff *skb; 113 unsigned index;
138 int skb_len = efx->rx_buffer_len;
139 unsigned index, count;
140 114
141 for (count = 0; count < EFX_RX_BATCH; ++count) { 115 index = rx_queue->page_remove & rx_queue->page_ptr_mask;
142 index = rx_queue->added_count & rx_queue->ptr_mask; 116 page = rx_queue->page_ring[index];
143 rx_buf = efx_rx_buffer(rx_queue, index); 117 if (page == NULL)
144 118 return NULL;
145 rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len); 119
146 if (unlikely(!skb)) 120 rx_queue->page_ring[index] = NULL;
147 return -ENOMEM; 121 /* page_remove cannot exceed page_add. */
148 122 if (rx_queue->page_remove != rx_queue->page_add)
149 /* Adjust the SKB for padding */ 123 ++rx_queue->page_remove;
150 skb_reserve(skb, NET_IP_ALIGN);
151 rx_buf->len = skb_len - NET_IP_ALIGN;
152 rx_buf->flags = 0;
153
154 rx_buf->dma_addr = dma_map_single(&efx->pci_dev->dev,
155 skb->data, rx_buf->len,
156 DMA_FROM_DEVICE);
157 if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
158 rx_buf->dma_addr))) {
159 dev_kfree_skb_any(skb);
160 rx_buf->u.skb = NULL;
161 return -EIO;
162 }
163 124
164 ++rx_queue->added_count; 125 /* If page_count is 1 then we hold the only reference to this page. */
165 ++rx_queue->alloc_skb_count; 126 if (page_count(page) == 1) {
127 ++rx_queue->page_recycle_count;
128 return page;
129 } else {
130 state = page_address(page);
131 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
132 PAGE_SIZE << efx->rx_buffer_order,
133 DMA_FROM_DEVICE);
134 put_page(page);
135 ++rx_queue->page_recycle_failed;
166 } 136 }
167 137
168 return 0; 138 return NULL;
169} 139}
170 140
171/** 141/**
172 * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers 142 * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
173 * 143 *
174 * @rx_queue: Efx RX queue 144 * @rx_queue: Efx RX queue
175 * 145 *
176 * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA, 146 * This allocates a batch of pages, maps them for DMA, and populates
177 * and populates struct efx_rx_buffers for each one. Return a negative error 147 * struct efx_rx_buffers for each one. Return a negative error code or
178 * code or 0 on success. If a single page can be split between two buffers, 148 * 0 on success. If a single page can be used for multiple buffers,
179 * then the page will either be inserted fully, or not at at all. 149 * then the page will either be inserted fully, or not at all.
180 */ 150 */
181static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) 151static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
182{ 152{
183 struct efx_nic *efx = rx_queue->efx; 153 struct efx_nic *efx = rx_queue->efx;
184 struct efx_rx_buffer *rx_buf; 154 struct efx_rx_buffer *rx_buf;
@@ -188,150 +158,140 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
188 dma_addr_t dma_addr; 158 dma_addr_t dma_addr;
189 unsigned index, count; 159 unsigned index, count;
190 160
191 /* We can split a page between two buffers */ 161 count = 0;
192 BUILD_BUG_ON(EFX_RX_BATCH & 1); 162 do {
193 163 page = efx_reuse_page(rx_queue);
194 for (count = 0; count < EFX_RX_BATCH; ++count) { 164 if (page == NULL) {
195 page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, 165 page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
196 efx->rx_buffer_order); 166 efx->rx_buffer_order);
197 if (unlikely(page == NULL)) 167 if (unlikely(page == NULL))
198 return -ENOMEM; 168 return -ENOMEM;
199 dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0, 169 dma_addr =
200 efx_rx_buf_size(efx), 170 dma_map_page(&efx->pci_dev->dev, page, 0,
201 DMA_FROM_DEVICE); 171 PAGE_SIZE << efx->rx_buffer_order,
202 if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) { 172 DMA_FROM_DEVICE);
203 __free_pages(page, efx->rx_buffer_order); 173 if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
204 return -EIO; 174 dma_addr))) {
175 __free_pages(page, efx->rx_buffer_order);
176 return -EIO;
177 }
178 state = page_address(page);
179 state->dma_addr = dma_addr;
180 } else {
181 state = page_address(page);
182 dma_addr = state->dma_addr;
205 } 183 }
206 state = page_address(page);
207 state->refcnt = 0;
208 state->dma_addr = dma_addr;
209 184
210 dma_addr += sizeof(struct efx_rx_page_state); 185 dma_addr += sizeof(struct efx_rx_page_state);
211 page_offset = sizeof(struct efx_rx_page_state); 186 page_offset = sizeof(struct efx_rx_page_state);
212 187
213 split: 188 do {
214 index = rx_queue->added_count & rx_queue->ptr_mask; 189 index = rx_queue->added_count & rx_queue->ptr_mask;
215 rx_buf = efx_rx_buffer(rx_queue, index); 190 rx_buf = efx_rx_buffer(rx_queue, index);
216 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; 191 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
217 rx_buf->u.page = page; 192 rx_buf->page = page;
218 rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN; 193 rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
219 rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; 194 rx_buf->len = efx->rx_dma_len;
220 rx_buf->flags = EFX_RX_BUF_PAGE; 195 rx_buf->flags = 0;
221 ++rx_queue->added_count; 196 ++rx_queue->added_count;
222 ++rx_queue->alloc_page_count;
223 ++state->refcnt;
224
225 if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
226 /* Use the second half of the page */
227 get_page(page); 197 get_page(page);
228 dma_addr += (PAGE_SIZE >> 1); 198 dma_addr += efx->rx_page_buf_step;
229 page_offset += (PAGE_SIZE >> 1); 199 page_offset += efx->rx_page_buf_step;
230 ++count; 200 } while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
231 goto split; 201
232 } 202 rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
233 } 203 } while (++count < efx->rx_pages_per_batch);
234 204
235 return 0; 205 return 0;
236} 206}
237 207
208/* Unmap a DMA-mapped page. This function is only called for the final RX
209 * buffer in a page.
210 */
238static void efx_unmap_rx_buffer(struct efx_nic *efx, 211static void efx_unmap_rx_buffer(struct efx_nic *efx,
239 struct efx_rx_buffer *rx_buf, 212 struct efx_rx_buffer *rx_buf)
240 unsigned int used_len)
241{ 213{
242 if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { 214 struct page *page = rx_buf->page;
243 struct efx_rx_page_state *state; 215
244 216 if (page) {
245 state = page_address(rx_buf->u.page); 217 struct efx_rx_page_state *state = page_address(page);
246 if (--state->refcnt == 0) { 218 dma_unmap_page(&efx->pci_dev->dev,
247 dma_unmap_page(&efx->pci_dev->dev, 219 state->dma_addr,
248 state->dma_addr, 220 PAGE_SIZE << efx->rx_buffer_order,
249 efx_rx_buf_size(efx), 221 DMA_FROM_DEVICE);
250 DMA_FROM_DEVICE);
251 } else if (used_len) {
252 dma_sync_single_for_cpu(&efx->pci_dev->dev,
253 rx_buf->dma_addr, used_len,
254 DMA_FROM_DEVICE);
255 }
256 } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
257 dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
258 rx_buf->len, DMA_FROM_DEVICE);
259 } 222 }
260} 223}
261 224
262static void efx_free_rx_buffer(struct efx_nic *efx, 225static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf)
263 struct efx_rx_buffer *rx_buf)
264{ 226{
265 if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { 227 if (rx_buf->page) {
266 __free_pages(rx_buf->u.page, efx->rx_buffer_order); 228 put_page(rx_buf->page);
267 rx_buf->u.page = NULL; 229 rx_buf->page = NULL;
268 } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
269 dev_kfree_skb_any(rx_buf->u.skb);
270 rx_buf->u.skb = NULL;
271 } 230 }
272} 231}
273 232
274static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, 233/* Attempt to recycle the page if there is an RX recycle ring; the page can
275 struct efx_rx_buffer *rx_buf) 234 * only be added if this is the final RX buffer, to prevent pages being used in
235 * the descriptor ring and appearing in the recycle ring simultaneously.
236 */
237static void efx_recycle_rx_page(struct efx_channel *channel,
238 struct efx_rx_buffer *rx_buf)
276{ 239{
277 efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0); 240 struct page *page = rx_buf->page;
278 efx_free_rx_buffer(rx_queue->efx, rx_buf); 241 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
279} 242 struct efx_nic *efx = rx_queue->efx;
243 unsigned index;
280 244
281/* Attempt to resurrect the other receive buffer that used to share this page, 245 /* Only recycle the page after processing the final buffer. */
282 * which had previously been passed up to the kernel and freed. */ 246 if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
283static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
284 struct efx_rx_buffer *rx_buf)
285{
286 struct efx_rx_page_state *state = page_address(rx_buf->u.page);
287 struct efx_rx_buffer *new_buf;
288 unsigned fill_level, index;
289
290 /* +1 because efx_rx_packet() incremented removed_count. +1 because
291 * we'd like to insert an additional descriptor whilst leaving
292 * EFX_RXD_HEAD_ROOM for the non-recycle path */
293 fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
294 if (unlikely(fill_level > rx_queue->max_fill)) {
295 /* We could place "state" on a list, and drain the list in
296 * efx_fast_push_rx_descriptors(). For now, this will do. */
297 return; 247 return;
298 }
299 248
300 ++state->refcnt; 249 index = rx_queue->page_add & rx_queue->page_ptr_mask;
301 get_page(rx_buf->u.page); 250 if (rx_queue->page_ring[index] == NULL) {
251 unsigned read_index = rx_queue->page_remove &
252 rx_queue->page_ptr_mask;
302 253
303 index = rx_queue->added_count & rx_queue->ptr_mask; 254 /* The next slot in the recycle ring is available, but
304 new_buf = efx_rx_buffer(rx_queue, index); 255 * increment page_remove if the read pointer currently
305 new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); 256 * points here.
306 new_buf->u.page = rx_buf->u.page; 257 */
307 new_buf->len = rx_buf->len; 258 if (read_index == index)
308 new_buf->flags = EFX_RX_BUF_PAGE; 259 ++rx_queue->page_remove;
309 ++rx_queue->added_count; 260 rx_queue->page_ring[index] = page;
261 ++rx_queue->page_add;
262 return;
263 }
264 ++rx_queue->page_recycle_full;
265 efx_unmap_rx_buffer(efx, rx_buf);
266 put_page(rx_buf->page);
310} 267}
311 268
312/* Recycle the given rx buffer directly back into the rx_queue. There is 269static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
313 * always room to add this buffer, because we've just popped a buffer. */ 270 struct efx_rx_buffer *rx_buf)
314static void efx_recycle_rx_buffer(struct efx_channel *channel,
315 struct efx_rx_buffer *rx_buf)
316{ 271{
317 struct efx_nic *efx = channel->efx; 272 /* Release the page reference we hold for the buffer. */
318 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel); 273 if (rx_buf->page)
319 struct efx_rx_buffer *new_buf; 274 put_page(rx_buf->page);
320 unsigned index; 275
321 276 /* If this is the last buffer in a page, unmap and free it. */
322 rx_buf->flags &= EFX_RX_BUF_PAGE; 277 if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
323 278 efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
324 if ((rx_buf->flags & EFX_RX_BUF_PAGE) && 279 efx_free_rx_buffer(rx_buf);
325 efx->rx_buffer_len <= EFX_RX_HALF_PAGE && 280 }
326 page_count(rx_buf->u.page) == 1) 281 rx_buf->page = NULL;
327 efx_resurrect_rx_buffer(rx_queue, rx_buf); 282}
328 283
329 index = rx_queue->added_count & rx_queue->ptr_mask; 284/* Recycle the pages that are used by buffers that have just been received. */
330 new_buf = efx_rx_buffer(rx_queue, index); 285static void efx_recycle_rx_buffers(struct efx_channel *channel,
286 struct efx_rx_buffer *rx_buf,
287 unsigned int n_frags)
288{
289 struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
331 290
332 memcpy(new_buf, rx_buf, sizeof(*new_buf)); 291 do {
333 rx_buf->u.page = NULL; 292 efx_recycle_rx_page(channel, rx_buf);
334 ++rx_queue->added_count; 293 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
294 } while (--n_frags);
335} 295}
336 296
337/** 297/**
@@ -348,8 +308,8 @@ static void efx_recycle_rx_buffer(struct efx_channel *channel,
348 */ 308 */
349void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) 309void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
350{ 310{
351 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 311 struct efx_nic *efx = rx_queue->efx;
352 unsigned fill_level; 312 unsigned int fill_level, batch_size;
353 int space, rc = 0; 313 int space, rc = 0;
354 314
355 /* Calculate current fill level, and exit if we don't need to fill */ 315 /* Calculate current fill level, and exit if we don't need to fill */
@@ -364,28 +324,26 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
364 rx_queue->min_fill = fill_level; 324 rx_queue->min_fill = fill_level;
365 } 325 }
366 326
327 batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
367 space = rx_queue->max_fill - fill_level; 328 space = rx_queue->max_fill - fill_level;
368 EFX_BUG_ON_PARANOID(space < EFX_RX_BATCH); 329 EFX_BUG_ON_PARANOID(space < batch_size);
369 330
370 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 331 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
371 "RX queue %d fast-filling descriptor ring from" 332 "RX queue %d fast-filling descriptor ring from"
372 " level %d to level %d using %s allocation\n", 333 " level %d to level %d\n",
373 efx_rx_queue_index(rx_queue), fill_level, 334 efx_rx_queue_index(rx_queue), fill_level,
374 rx_queue->max_fill, 335 rx_queue->max_fill);
375 channel->rx_alloc_push_pages ? "page" : "skb"); 336
376 337
377 do { 338 do {
378 if (channel->rx_alloc_push_pages) 339 rc = efx_init_rx_buffers(rx_queue);
379 rc = efx_init_rx_buffers_page(rx_queue);
380 else
381 rc = efx_init_rx_buffers_skb(rx_queue);
382 if (unlikely(rc)) { 340 if (unlikely(rc)) {
383 /* Ensure that we don't leave the rx queue empty */ 341 /* Ensure that we don't leave the rx queue empty */
384 if (rx_queue->added_count == rx_queue->removed_count) 342 if (rx_queue->added_count == rx_queue->removed_count)
385 efx_schedule_slow_fill(rx_queue); 343 efx_schedule_slow_fill(rx_queue);
386 goto out; 344 goto out;
387 } 345 }
388 } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); 346 } while ((space -= batch_size) >= batch_size);
389 347
390 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev, 348 netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
391 "RX queue %d fast-filled descriptor ring " 349 "RX queue %d fast-filled descriptor ring "
@@ -408,7 +366,7 @@ void efx_rx_slow_fill(unsigned long context)
408 366
409static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, 367static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
410 struct efx_rx_buffer *rx_buf, 368 struct efx_rx_buffer *rx_buf,
411 int len, bool *leak_packet) 369 int len)
412{ 370{
413 struct efx_nic *efx = rx_queue->efx; 371 struct efx_nic *efx = rx_queue->efx;
414 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; 372 unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
@@ -428,11 +386,6 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
428 "RX event (0x%x > 0x%x+0x%x). Leaking\n", 386 "RX event (0x%x > 0x%x+0x%x). Leaking\n",
429 efx_rx_queue_index(rx_queue), len, max_len, 387 efx_rx_queue_index(rx_queue), len, max_len,
430 efx->type->rx_buffer_padding); 388 efx->type->rx_buffer_padding);
431 /* If this buffer was skb-allocated, then the meta
432 * data at the end of the skb will be trashed. So
433 * we have no choice but to leak the fragment.
434 */
435 *leak_packet = !(rx_buf->flags & EFX_RX_BUF_PAGE);
436 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); 389 efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
437 } else { 390 } else {
438 if (net_ratelimit()) 391 if (net_ratelimit())
@@ -448,212 +401,238 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
448/* Pass a received packet up through GRO. GRO can handle pages 401/* Pass a received packet up through GRO. GRO can handle pages
449 * regardless of checksum state and skbs with a good checksum. 402 * regardless of checksum state and skbs with a good checksum.
450 */ 403 */
451static void efx_rx_packet_gro(struct efx_channel *channel, 404static void
452 struct efx_rx_buffer *rx_buf, 405efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
453 const u8 *eh) 406 unsigned int n_frags, u8 *eh)
454{ 407{
455 struct napi_struct *napi = &channel->napi_str; 408 struct napi_struct *napi = &channel->napi_str;
456 gro_result_t gro_result; 409 gro_result_t gro_result;
410 struct efx_nic *efx = channel->efx;
411 struct sk_buff *skb;
457 412
458 if (rx_buf->flags & EFX_RX_BUF_PAGE) { 413 skb = napi_get_frags(napi);
459 struct efx_nic *efx = channel->efx; 414 if (unlikely(!skb)) {
460 struct page *page = rx_buf->u.page; 415 while (n_frags--) {
461 struct sk_buff *skb; 416 put_page(rx_buf->page);
417 rx_buf->page = NULL;
418 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
419 }
420 return;
421 }
462 422
463 rx_buf->u.page = NULL; 423 if (efx->net_dev->features & NETIF_F_RXHASH)
424 skb->rxhash = efx_rx_buf_hash(eh);
425 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
426 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
427
428 for (;;) {
429 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
430 rx_buf->page, rx_buf->page_offset,
431 rx_buf->len);
432 rx_buf->page = NULL;
433 skb->len += rx_buf->len;
434 if (skb_shinfo(skb)->nr_frags == n_frags)
435 break;
436
437 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
438 }
464 439
465 skb = napi_get_frags(napi); 440 skb->data_len = skb->len;
466 if (!skb) { 441 skb->truesize += n_frags * efx->rx_buffer_truesize;
467 put_page(page); 442
468 return; 443 skb_record_rx_queue(skb, channel->rx_queue.core_index);
469 } 444
445 gro_result = napi_gro_frags(napi);
446 if (gro_result != GRO_DROP)
447 channel->irq_mod_score += 2;
448}
470 449
471 if (efx->net_dev->features & NETIF_F_RXHASH) 450/* Allocate and construct an SKB around page fragments */
472 skb->rxhash = efx_rx_buf_hash(eh); 451static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
452 struct efx_rx_buffer *rx_buf,
453 unsigned int n_frags,
454 u8 *eh, int hdr_len)
455{
456 struct efx_nic *efx = channel->efx;
457 struct sk_buff *skb;
473 458
474 skb_fill_page_desc(skb, 0, page, 459 /* Allocate an SKB to store the headers */
475 efx_rx_buf_offset(efx, rx_buf), rx_buf->len); 460 skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
461 if (unlikely(skb == NULL))
462 return NULL;
476 463
477 skb->len = rx_buf->len; 464 EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
478 skb->data_len = rx_buf->len;
479 skb->truesize += rx_buf->len;
480 skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
481 CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
482 465
483 skb_record_rx_queue(skb, channel->rx_queue.core_index); 466 skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
467 memcpy(__skb_put(skb, hdr_len), eh, hdr_len);
484 468
485 gro_result = napi_gro_frags(napi); 469 /* Append the remaining page(s) onto the frag list */
486 } else { 470 if (rx_buf->len > hdr_len) {
487 struct sk_buff *skb = rx_buf->u.skb; 471 rx_buf->page_offset += hdr_len;
472 rx_buf->len -= hdr_len;
488 473
489 EFX_BUG_ON_PARANOID(!(rx_buf->flags & EFX_RX_PKT_CSUMMED)); 474 for (;;) {
490 rx_buf->u.skb = NULL; 475 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
491 skb->ip_summed = CHECKSUM_UNNECESSARY; 476 rx_buf->page, rx_buf->page_offset,
477 rx_buf->len);
478 rx_buf->page = NULL;
479 skb->len += rx_buf->len;
480 skb->data_len += rx_buf->len;
481 if (skb_shinfo(skb)->nr_frags == n_frags)
482 break;
492 483
493 gro_result = napi_gro_receive(napi, skb); 484 rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
485 }
486 } else {
487 __free_pages(rx_buf->page, efx->rx_buffer_order);
488 rx_buf->page = NULL;
489 n_frags = 0;
494 } 490 }
495 491
496 if (gro_result == GRO_NORMAL) { 492 skb->truesize += n_frags * efx->rx_buffer_truesize;
497 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; 493
498 } else if (gro_result != GRO_DROP) { 494 /* Move past the ethernet header */
499 channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO; 495 skb->protocol = eth_type_trans(skb, efx->net_dev);
500 channel->irq_mod_score += 2; 496
501 } 497 return skb;
502} 498}
503 499
504void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 500void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
505 unsigned int len, u16 flags) 501 unsigned int n_frags, unsigned int len, u16 flags)
506{ 502{
507 struct efx_nic *efx = rx_queue->efx; 503 struct efx_nic *efx = rx_queue->efx;
508 struct efx_channel *channel = efx_rx_queue_channel(rx_queue); 504 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
509 struct efx_rx_buffer *rx_buf; 505 struct efx_rx_buffer *rx_buf;
510 bool leak_packet = false;
511 506
512 rx_buf = efx_rx_buffer(rx_queue, index); 507 rx_buf = efx_rx_buffer(rx_queue, index);
513 rx_buf->flags |= flags; 508 rx_buf->flags |= flags;
514 509
515 /* This allows the refill path to post another buffer. 510 /* Validate the number of fragments and completed length */
516 * EFX_RXD_HEAD_ROOM ensures that the slot we are using 511 if (n_frags == 1) {
517 * isn't overwritten yet. 512 efx_rx_packet__check_len(rx_queue, rx_buf, len);
518 */ 513 } else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
519 rx_queue->removed_count++; 514 unlikely(len <= (n_frags - 1) * EFX_RX_USR_BUF_SIZE) ||
520 515 unlikely(len > n_frags * EFX_RX_USR_BUF_SIZE) ||
521 /* Validate the length encoded in the event vs the descriptor pushed */ 516 unlikely(!efx->rx_scatter)) {
522 efx_rx_packet__check_len(rx_queue, rx_buf, len, &leak_packet); 517 /* If this isn't an explicit discard request, either
518 * the hardware or the driver is broken.
519 */
520 WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
521 rx_buf->flags |= EFX_RX_PKT_DISCARD;
522 }
523 523
524 netif_vdbg(efx, rx_status, efx->net_dev, 524 netif_vdbg(efx, rx_status, efx->net_dev,
525 "RX queue %d received id %x at %llx+%x %s%s\n", 525 "RX queue %d received ids %x-%x len %d %s%s\n",
526 efx_rx_queue_index(rx_queue), index, 526 efx_rx_queue_index(rx_queue), index,
527 (unsigned long long)rx_buf->dma_addr, len, 527 (index + n_frags - 1) & rx_queue->ptr_mask, len,
528 (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "", 528 (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
529 (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : ""); 529 (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
530 530
531 /* Discard packet, if instructed to do so */ 531 /* Discard packet, if instructed to do so. Process the
532 * previous receive first.
533 */
532 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) { 534 if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
533 if (unlikely(leak_packet)) 535 efx_rx_flush_packet(channel);
534 channel->n_skbuff_leaks++; 536 put_page(rx_buf->page);
535 else 537 efx_recycle_rx_buffers(channel, rx_buf, n_frags);
536 efx_recycle_rx_buffer(channel, rx_buf); 538 return;
537
538 /* Don't hold off the previous receive */
539 rx_buf = NULL;
540 goto out;
541 } 539 }
542 540
543 /* Release and/or sync DMA mapping - assumes all RX buffers 541 if (n_frags == 1)
544 * consumed in-order per RX queue 542 rx_buf->len = len;
543
544 /* Release and/or sync the DMA mapping - assumes all RX buffers
545 * consumed in-order per RX queue.
545 */ 546 */
546 efx_unmap_rx_buffer(efx, rx_buf, len); 547 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
547 548
548 /* Prefetch nice and early so data will (hopefully) be in cache by 549 /* Prefetch nice and early so data will (hopefully) be in cache by
549 * the time we look at it. 550 * the time we look at it.
550 */ 551 */
551 prefetch(efx_rx_buf_eh(efx, rx_buf)); 552 prefetch(efx_rx_buf_va(rx_buf));
553
554 rx_buf->page_offset += efx->type->rx_buffer_hash_size;
555 rx_buf->len -= efx->type->rx_buffer_hash_size;
556
557 if (n_frags > 1) {
558 /* Release/sync DMA mapping for additional fragments.
559 * Fix length for last fragment.
560 */
561 unsigned int tail_frags = n_frags - 1;
562
563 for (;;) {
564 rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
565 if (--tail_frags == 0)
566 break;
567 efx_sync_rx_buffer(efx, rx_buf, EFX_RX_USR_BUF_SIZE);
568 }
569 rx_buf->len = len - (n_frags - 1) * EFX_RX_USR_BUF_SIZE;
570 efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
571 }
572
573 /* All fragments have been DMA-synced, so recycle buffers and pages. */
574 rx_buf = efx_rx_buffer(rx_queue, index);
575 efx_recycle_rx_buffers(channel, rx_buf, n_frags);
552 576
553 /* Pipeline receives so that we give time for packet headers to be 577 /* Pipeline receives so that we give time for packet headers to be
554 * prefetched into cache. 578 * prefetched into cache.
555 */ 579 */
556 rx_buf->len = len - efx->type->rx_buffer_hash_size; 580 efx_rx_flush_packet(channel);
557out: 581 channel->rx_pkt_n_frags = n_frags;
558 if (channel->rx_pkt) 582 channel->rx_pkt_index = index;
559 __efx_rx_packet(channel, channel->rx_pkt);
560 channel->rx_pkt = rx_buf;
561} 583}
562 584
563static void efx_rx_deliver(struct efx_channel *channel, 585static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
564 struct efx_rx_buffer *rx_buf) 586 struct efx_rx_buffer *rx_buf,
587 unsigned int n_frags)
565{ 588{
566 struct sk_buff *skb; 589 struct sk_buff *skb;
590 u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
567 591
568 /* We now own the SKB */ 592 skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
569 skb = rx_buf->u.skb; 593 if (unlikely(skb == NULL)) {
570 rx_buf->u.skb = NULL; 594 efx_free_rx_buffer(rx_buf);
595 return;
596 }
597 skb_record_rx_queue(skb, channel->rx_queue.core_index);
571 598
572 /* Set the SKB flags */ 599 /* Set the SKB flags */
573 skb_checksum_none_assert(skb); 600 skb_checksum_none_assert(skb);
574 601
575 /* Record the rx_queue */
576 skb_record_rx_queue(skb, channel->rx_queue.core_index);
577
578 /* Pass the packet up */
579 if (channel->type->receive_skb) 602 if (channel->type->receive_skb)
580 channel->type->receive_skb(channel, skb); 603 if (channel->type->receive_skb(channel, skb))
581 else 604 return;
582 netif_receive_skb(skb);
583 605
584 /* Update allocation strategy method */ 606 /* Pass the packet up */
585 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; 607 netif_receive_skb(skb);
586} 608}
587 609
588/* Handle a received packet. Second half: Touches packet payload. */ 610/* Handle a received packet. Second half: Touches packet payload. */
589void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf) 611void __efx_rx_packet(struct efx_channel *channel)
590{ 612{
591 struct efx_nic *efx = channel->efx; 613 struct efx_nic *efx = channel->efx;
592 u8 *eh = efx_rx_buf_eh(efx, rx_buf); 614 struct efx_rx_buffer *rx_buf =
615 efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
616 u8 *eh = efx_rx_buf_va(rx_buf);
593 617
594 /* If we're in loopback test, then pass the packet directly to the 618 /* If we're in loopback test, then pass the packet directly to the
595 * loopback layer, and free the rx_buf here 619 * loopback layer, and free the rx_buf here
596 */ 620 */
597 if (unlikely(efx->loopback_selftest)) { 621 if (unlikely(efx->loopback_selftest)) {
598 efx_loopback_rx_packet(efx, eh, rx_buf->len); 622 efx_loopback_rx_packet(efx, eh, rx_buf->len);
599 efx_free_rx_buffer(efx, rx_buf); 623 efx_free_rx_buffer(rx_buf);
600 return; 624 goto out;
601 }
602
603 if (!(rx_buf->flags & EFX_RX_BUF_PAGE)) {
604 struct sk_buff *skb = rx_buf->u.skb;
605
606 prefetch(skb_shinfo(skb));
607
608 skb_reserve(skb, efx->type->rx_buffer_hash_size);
609 skb_put(skb, rx_buf->len);
610
611 if (efx->net_dev->features & NETIF_F_RXHASH)
612 skb->rxhash = efx_rx_buf_hash(eh);
613
614 /* Move past the ethernet header. rx_buf->data still points
615 * at the ethernet header */
616 skb->protocol = eth_type_trans(skb, efx->net_dev);
617
618 skb_record_rx_queue(skb, channel->rx_queue.core_index);
619 } 625 }
620 626
621 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM))) 627 if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
622 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED; 628 rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
623 629
624 if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)) && 630 if (!channel->type->receive_skb)
625 !channel->type->receive_skb) 631 efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
626 efx_rx_packet_gro(channel, rx_buf, eh);
627 else 632 else
628 efx_rx_deliver(channel, rx_buf); 633 efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
629} 634out:
630 635 channel->rx_pkt_n_frags = 0;
631void efx_rx_strategy(struct efx_channel *channel)
632{
633 enum efx_rx_alloc_method method = rx_alloc_method;
634
635 if (channel->type->receive_skb) {
636 channel->rx_alloc_push_pages = false;
637 return;
638 }
639
640 /* Only makes sense to use page based allocation if GRO is enabled */
641 if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
642 method = RX_ALLOC_METHOD_SKB;
643 } else if (method == RX_ALLOC_METHOD_AUTO) {
644 /* Constrain the rx_alloc_level */
645 if (channel->rx_alloc_level < 0)
646 channel->rx_alloc_level = 0;
647 else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX)
648 channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
649
650 /* Decide on the allocation method */
651 method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ?
652 RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
653 }
654
655 /* Push the option */
656 channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE);
657} 636}
658 637
659int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) 638int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
@@ -683,9 +662,32 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
683 kfree(rx_queue->buffer); 662 kfree(rx_queue->buffer);
684 rx_queue->buffer = NULL; 663 rx_queue->buffer = NULL;
685 } 664 }
665
686 return rc; 666 return rc;
687} 667}
688 668
669static void efx_init_rx_recycle_ring(struct efx_nic *efx,
670 struct efx_rx_queue *rx_queue)
671{
672 unsigned int bufs_in_recycle_ring, page_ring_size;
673
674 /* Set the RX recycle ring size */
675#ifdef CONFIG_PPC64
676 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
677#else
678 if (efx->pci_dev->dev.iommu_group)
679 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
680 else
681 bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
682#endif /* CONFIG_PPC64 */
683
684 page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
685 efx->rx_bufs_per_page);
686 rx_queue->page_ring = kcalloc(page_ring_size,
687 sizeof(*rx_queue->page_ring), GFP_KERNEL);
688 rx_queue->page_ptr_mask = page_ring_size - 1;
689}
690
689void efx_init_rx_queue(struct efx_rx_queue *rx_queue) 691void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
690{ 692{
691 struct efx_nic *efx = rx_queue->efx; 693 struct efx_nic *efx = rx_queue->efx;
@@ -699,10 +701,18 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
699 rx_queue->notified_count = 0; 701 rx_queue->notified_count = 0;
700 rx_queue->removed_count = 0; 702 rx_queue->removed_count = 0;
701 rx_queue->min_fill = -1U; 703 rx_queue->min_fill = -1U;
704 efx_init_rx_recycle_ring(efx, rx_queue);
705
706 rx_queue->page_remove = 0;
707 rx_queue->page_add = rx_queue->page_ptr_mask + 1;
708 rx_queue->page_recycle_count = 0;
709 rx_queue->page_recycle_failed = 0;
710 rx_queue->page_recycle_full = 0;
702 711
703 /* Initialise limit fields */ 712 /* Initialise limit fields */
704 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM; 713 max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
705 max_trigger = max_fill - EFX_RX_BATCH; 714 max_trigger =
715 max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
706 if (rx_refill_threshold != 0) { 716 if (rx_refill_threshold != 0) {
707 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; 717 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
708 if (trigger > max_trigger) 718 if (trigger > max_trigger)
@@ -722,6 +732,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
722void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) 732void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
723{ 733{
724 int i; 734 int i;
735 struct efx_nic *efx = rx_queue->efx;
725 struct efx_rx_buffer *rx_buf; 736 struct efx_rx_buffer *rx_buf;
726 737
727 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev, 738 netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
@@ -733,13 +744,32 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
733 del_timer_sync(&rx_queue->slow_fill); 744 del_timer_sync(&rx_queue->slow_fill);
734 efx_nic_fini_rx(rx_queue); 745 efx_nic_fini_rx(rx_queue);
735 746
736 /* Release RX buffers NB start at index 0 not current HW ptr */ 747 /* Release RX buffers from the current read ptr to the write ptr */
737 if (rx_queue->buffer) { 748 if (rx_queue->buffer) {
738 for (i = 0; i <= rx_queue->ptr_mask; i++) { 749 for (i = rx_queue->removed_count; i < rx_queue->added_count;
739 rx_buf = efx_rx_buffer(rx_queue, i); 750 i++) {
751 unsigned index = i & rx_queue->ptr_mask;
752 rx_buf = efx_rx_buffer(rx_queue, index);
740 efx_fini_rx_buffer(rx_queue, rx_buf); 753 efx_fini_rx_buffer(rx_queue, rx_buf);
741 } 754 }
742 } 755 }
756
757 /* Unmap and release the pages in the recycle ring. Remove the ring. */
758 for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
759 struct page *page = rx_queue->page_ring[i];
760 struct efx_rx_page_state *state;
761
762 if (page == NULL)
763 continue;
764
765 state = page_address(page);
766 dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
767 PAGE_SIZE << efx->rx_buffer_order,
768 DMA_FROM_DEVICE);
769 put_page(page);
770 }
771 kfree(rx_queue->page_ring);
772 rx_queue->page_ring = NULL;
743} 773}
744 774
745void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) 775void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
@@ -754,9 +784,6 @@ void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
754} 784}
755 785
756 786
757module_param(rx_alloc_method, int, 0644);
758MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
759
760module_param(rx_refill_threshold, uint, 0444); 787module_param(rx_refill_threshold, uint, 0444);
761MODULE_PARM_DESC(rx_refill_threshold, 788MODULE_PARM_DESC(rx_refill_threshold,
762 "RX descriptor ring refill threshold (%)"); 789 "RX descriptor ring refill threshold (%)");
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index ba40f67e4f05..51669244d154 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -202,7 +202,7 @@ out:
202 202
203static enum reset_type siena_map_reset_reason(enum reset_type reason) 203static enum reset_type siena_map_reset_reason(enum reset_type reason)
204{ 204{
205 return RESET_TYPE_ALL; 205 return RESET_TYPE_RECOVER_OR_ALL;
206} 206}
207 207
208static int siena_map_reset_flags(u32 *flags) 208static int siena_map_reset_flags(u32 *flags)
@@ -245,6 +245,22 @@ static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
245 return efx_mcdi_reset_port(efx); 245 return efx_mcdi_reset_port(efx);
246} 246}
247 247
248#ifdef CONFIG_EEH
249/* When a PCI device is isolated from the bus, a subsequent MMIO read is
250 * required for the kernel EEH mechanisms to notice. As the Solarflare driver
251 * was written to minimise MMIO read (for latency) then a periodic call to check
252 * the EEH status of the device is required so that device recovery can happen
253 * in a timely fashion.
254 */
255static void siena_monitor(struct efx_nic *efx)
256{
257 struct eeh_dev *eehdev =
258 of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev));
259
260 eeh_dev_check_failure(eehdev);
261}
262#endif
263
248static int siena_probe_nvconfig(struct efx_nic *efx) 264static int siena_probe_nvconfig(struct efx_nic *efx)
249{ 265{
250 u32 caps = 0; 266 u32 caps = 0;
@@ -398,6 +414,8 @@ static int siena_init_nic(struct efx_nic *efx)
398 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1); 414 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1);
399 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1); 415 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1);
400 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1); 416 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1);
417 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_USR_BUF_SIZE,
418 EFX_RX_USR_BUF_SIZE >> 5);
401 efx_writeo(efx, &temp, FR_AZ_RX_CFG); 419 efx_writeo(efx, &temp, FR_AZ_RX_CFG);
402 420
403 /* Set hash key for IPv4 */ 421 /* Set hash key for IPv4 */
@@ -665,7 +683,11 @@ const struct efx_nic_type siena_a0_nic_type = {
665 .init = siena_init_nic, 683 .init = siena_init_nic,
666 .dimension_resources = siena_dimension_resources, 684 .dimension_resources = siena_dimension_resources,
667 .fini = efx_port_dummy_op_void, 685 .fini = efx_port_dummy_op_void,
686#ifdef CONFIG_EEH
687 .monitor = siena_monitor,
688#else
668 .monitor = NULL, 689 .monitor = NULL,
690#endif
669 .map_reset_reason = siena_map_reset_reason, 691 .map_reset_reason = siena_map_reset_reason,
670 .map_reset_flags = siena_map_reset_flags, 692 .map_reset_flags = siena_map_reset_flags,
671 .reset = siena_reset_hw, 693 .reset = siena_reset_hw,
@@ -698,6 +720,7 @@ const struct efx_nic_type siena_a0_nic_type = {
698 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), 720 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
699 .rx_buffer_hash_size = 0x10, 721 .rx_buffer_hash_size = 0x10,
700 .rx_buffer_padding = 0, 722 .rx_buffer_padding = 0,
723 .can_rx_scatter = true,
701 .max_interrupt_mode = EFX_INT_MODE_MSIX, 724 .max_interrupt_mode = EFX_INT_MODE_MSIX,
702 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy 725 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
703 * interrupt handler only supports 32 726 * interrupt handler only supports 32
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 79ad9c94a21b..4bdbaad9932d 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -213,10 +213,11 @@ static int meth_init_tx_ring(struct meth_private *priv)
213{ 213{
214 /* Init TX ring */ 214 /* Init TX ring */
215 priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE, 215 priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE,
216 &priv->tx_ring_dma, GFP_ATOMIC); 216 &priv->tx_ring_dma,
217 GFP_ATOMIC | __GFP_ZERO);
217 if (!priv->tx_ring) 218 if (!priv->tx_ring)
218 return -ENOMEM; 219 return -ENOMEM;
219 memset(priv->tx_ring, 0, TX_RING_BUFFER_SIZE); 220
220 priv->tx_count = priv->tx_read = priv->tx_write = 0; 221 priv->tx_count = priv->tx_read = priv->tx_write = 0;
221 mace->eth.tx_ring_base = priv->tx_ring_dma; 222 mace->eth.tx_ring_base = priv->tx_ring_dma;
222 /* Now init skb save area */ 223 /* Now init skb save area */
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index efca14eaefa9..e45829628d5f 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1841,15 +1841,12 @@ refill_rx_ring:
1841 entry = sis_priv->dirty_rx % NUM_RX_DESC; 1841 entry = sis_priv->dirty_rx % NUM_RX_DESC;
1842 1842
1843 if (sis_priv->rx_skbuff[entry] == NULL) { 1843 if (sis_priv->rx_skbuff[entry] == NULL) {
1844 if ((skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE)) == NULL) { 1844 skb = netdev_alloc_skb(net_dev, RX_BUF_SIZE);
1845 if (skb == NULL) {
1845 /* not enough memory for skbuff, this makes a 1846 /* not enough memory for skbuff, this makes a
1846 * "hole" on the buffer ring, it is not clear 1847 * "hole" on the buffer ring, it is not clear
1847 * how the hardware will react to this kind 1848 * how the hardware will react to this kind
1848 * of degenerated buffer */ 1849 * of degenerated buffer */
1849 if (netif_msg_rx_err(sis_priv))
1850 printk(KERN_INFO "%s: Memory squeeze, "
1851 "deferring packet.\n",
1852 net_dev->name);
1853 net_dev->stats.rx_dropped++; 1850 net_dev->stats.rx_dropped++;
1854 break; 1851 break;
1855 } 1852 }
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index 50823da9dc1e..e85c2e7e8246 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -1223,9 +1223,7 @@ static void smc_rcv(struct net_device *dev)
1223 dev->stats.multicast++; 1223 dev->stats.multicast++;
1224 1224
1225 skb = netdev_alloc_skb(dev, packet_length + 5); 1225 skb = netdev_alloc_skb(dev, packet_length + 5);
1226
1227 if ( skb == NULL ) { 1226 if ( skb == NULL ) {
1228 printk(KERN_NOTICE CARDNAME ": Low memory, packet dropped.\n");
1229 dev->stats.rx_dropped++; 1227 dev->stats.rx_dropped++;
1230 goto done; 1228 goto done;
1231 } 1229 }
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 591650a8de38..dfbf978315df 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -465,8 +465,6 @@ static inline void smc_rcv(struct net_device *dev)
465 */ 465 */
466 skb = netdev_alloc_skb(dev, packet_len); 466 skb = netdev_alloc_skb(dev, packet_len);
467 if (unlikely(skb == NULL)) { 467 if (unlikely(skb == NULL)) {
468 printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
469 dev->name);
470 SMC_WAIT_MMU_BUSY(lp); 468 SMC_WAIT_MMU_BUSY(lp);
471 SMC_SET_MMU_CMD(lp, MC_RELEASE); 469 SMC_SET_MMU_CMD(lp, MC_RELEASE);
472 dev->stats.rx_dropped++; 470 dev->stats.rx_dropped++;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index da5cc9a3b34c..48e2b99bec51 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2115,7 +2115,7 @@ static int smsc911x_init(struct net_device *dev)
2115 spin_lock_init(&pdata->dev_lock); 2115 spin_lock_init(&pdata->dev_lock);
2116 spin_lock_init(&pdata->mac_lock); 2116 spin_lock_init(&pdata->mac_lock);
2117 2117
2118 if (pdata->ioaddr == 0) { 2118 if (pdata->ioaddr == NULL) {
2119 SMSC_WARN(pdata, probe, "pdata->ioaddr: 0x00000000"); 2119 SMSC_WARN(pdata, probe, "pdata->ioaddr: 0x00000000");
2120 return -ENODEV; 2120 return -ENODEV;
2121 } 2121 }
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index d457fa2d7509..ffa5c4ad1210 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -848,10 +848,8 @@ static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index)
848 BUG_ON(pd->rx_buffers[index].skb); 848 BUG_ON(pd->rx_buffers[index].skb);
849 BUG_ON(pd->rx_buffers[index].mapping); 849 BUG_ON(pd->rx_buffers[index].mapping);
850 850
851 if (unlikely(!skb)) { 851 if (unlikely(!skb))
852 smsc_warn(RX_ERR, "Failed to allocate new skb!");
853 return -ENOMEM; 852 return -ENOMEM;
854 }
855 853
856 mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb), 854 mapping = pci_map_single(pd->pdev, skb_tail_pointer(skb),
857 PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 855 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index c0ea838c78d1..f695a50bac47 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -5,6 +5,7 @@ config STMMAC_ETH
5 select MII 5 select MII
6 select PHYLIB 6 select PHYLIB
7 select CRC32 7 select CRC32
8 select PTP_1588_CLOCK
8 ---help--- 9 ---help---
9 This is the driver for the Ethernet IPs are built around a 10 This is the driver for the Ethernet IPs are built around a
10 Synopsys IP Core and only tested on the STMicroelectronics 11 Synopsys IP Core and only tested on the STMicroelectronics
@@ -54,22 +55,4 @@ config STMMAC_DA
54 By default, the DMA arbitration scheme is based on Round-robin 55 By default, the DMA arbitration scheme is based on Round-robin
55 (rx:tx priority is 1:1). 56 (rx:tx priority is 1:1).
56 57
57choice
58 prompt "Select the DMA TX/RX descriptor operating modes"
59 depends on STMMAC_ETH
60 ---help---
61 This driver supports DMA descriptor to operate both in dual buffer
62 (RING) and linked-list(CHAINED) mode. In RING mode each descriptor
63 points to two data buffer pointers whereas in CHAINED mode they
64 points to only one data buffer pointer.
65
66config STMMAC_RING
67 bool "Enable Descriptor Ring Mode"
68
69config STMMAC_CHAINED
70 bool "Enable Descriptor Chained Mode"
71
72endchoice
73
74
75endif 58endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index c8e8ea60ac19..356a9dd32be7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -1,9 +1,7 @@
1obj-$(CONFIG_STMMAC_ETH) += stmmac.o 1obj-$(CONFIG_STMMAC_ETH) += stmmac.o
2stmmac-$(CONFIG_STMMAC_RING) += ring_mode.o
3stmmac-$(CONFIG_STMMAC_CHAINED) += chain_mode.o
4stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o 2stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
5stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o 3stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
6stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \ 4stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
7 dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ 5 chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
8 dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \ 6 dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
9 mmc_core.o $(stmmac-y) 7 mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o $(stmmac-y)
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index 0668659803ed..37a3f93b487d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -28,7 +28,7 @@
28 28
29#include "stmmac.h" 29#include "stmmac.h"
30 30
31unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) 31static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
32{ 32{
33 struct stmmac_priv *priv = (struct stmmac_priv *) p; 33 struct stmmac_priv *priv = (struct stmmac_priv *) p;
34 unsigned int txsize = priv->dma_tx_size; 34 unsigned int txsize = priv->dma_tx_size;
@@ -47,7 +47,8 @@ unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
47 47
48 desc->des2 = dma_map_single(priv->device, skb->data, 48 desc->des2 = dma_map_single(priv->device, skb->data,
49 bmax, DMA_TO_DEVICE); 49 bmax, DMA_TO_DEVICE);
50 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum); 50 priv->tx_skbuff_dma[entry] = desc->des2;
51 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE);
51 52
52 while (len != 0) { 53 while (len != 0) {
53 entry = (++priv->cur_tx) % txsize; 54 entry = (++priv->cur_tx) % txsize;
@@ -57,8 +58,9 @@ unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
57 desc->des2 = dma_map_single(priv->device, 58 desc->des2 = dma_map_single(priv->device,
58 (skb->data + bmax * i), 59 (skb->data + bmax * i),
59 bmax, DMA_TO_DEVICE); 60 bmax, DMA_TO_DEVICE);
60 priv->hw->desc->prepare_tx_desc(desc, 0, bmax, 61 priv->tx_skbuff_dma[entry] = desc->des2;
61 csum); 62 priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
63 STMMAC_CHAIN_MODE);
62 priv->hw->desc->set_tx_owner(desc); 64 priv->hw->desc->set_tx_owner(desc);
63 priv->tx_skbuff[entry] = NULL; 65 priv->tx_skbuff[entry] = NULL;
64 len -= bmax; 66 len -= bmax;
@@ -67,8 +69,9 @@ unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
67 desc->des2 = dma_map_single(priv->device, 69 desc->des2 = dma_map_single(priv->device,
68 (skb->data + bmax * i), len, 70 (skb->data + bmax * i), len,
69 DMA_TO_DEVICE); 71 DMA_TO_DEVICE);
70 priv->hw->desc->prepare_tx_desc(desc, 0, len, 72 priv->tx_skbuff_dma[entry] = desc->des2;
71 csum); 73 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
74 STMMAC_CHAIN_MODE);
72 priv->hw->desc->set_tx_owner(desc); 75 priv->hw->desc->set_tx_owner(desc);
73 priv->tx_skbuff[entry] = NULL; 76 priv->tx_skbuff[entry] = NULL;
74 len = 0; 77 len = 0;
@@ -89,49 +92,70 @@ static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc)
89 return ret; 92 return ret;
90} 93}
91 94
92static void stmmac_refill_desc3(int bfsize, struct dma_desc *p) 95static void stmmac_init_dma_chain(void *des, dma_addr_t phy_addr,
93{ 96 unsigned int size, unsigned int extend_desc)
94}
95
96static void stmmac_init_desc3(int des3_as_data_buf, struct dma_desc *p)
97{
98}
99
100static void stmmac_clean_desc3(struct dma_desc *p)
101{
102}
103
104static void stmmac_init_dma_chain(struct dma_desc *des, dma_addr_t phy_addr,
105 unsigned int size)
106{ 97{
107 /* 98 /*
108 * In chained mode the des3 points to the next element in the ring. 99 * In chained mode the des3 points to the next element in the ring.
109 * The latest element has to point to the head. 100 * The latest element has to point to the head.
110 */ 101 */
111 int i; 102 int i;
112 struct dma_desc *p = des;
113 dma_addr_t dma_phy = phy_addr; 103 dma_addr_t dma_phy = phy_addr;
114 104
115 for (i = 0; i < (size - 1); i++) { 105 if (extend_desc) {
116 dma_phy += sizeof(struct dma_desc); 106 struct dma_extended_desc *p = (struct dma_extended_desc *) des;
117 p->des3 = (unsigned int)dma_phy; 107 for (i = 0; i < (size - 1); i++) {
118 p++; 108 dma_phy += sizeof(struct dma_extended_desc);
109 p->basic.des3 = (unsigned int)dma_phy;
110 p++;
111 }
112 p->basic.des3 = (unsigned int)phy_addr;
113
114 } else {
115 struct dma_desc *p = (struct dma_desc *) des;
116 for (i = 0; i < (size - 1); i++) {
117 dma_phy += sizeof(struct dma_desc);
118 p->des3 = (unsigned int)dma_phy;
119 p++;
120 }
121 p->des3 = (unsigned int)phy_addr;
119 } 122 }
120 p->des3 = (unsigned int)phy_addr;
121} 123}
122 124
123static int stmmac_set_16kib_bfsize(int mtu) 125static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
126{
127 struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
128
129 if (priv->hwts_rx_en && !priv->extend_desc)
130 /* NOTE: Device will overwrite des3 with timestamp value if
131 * 1588-2002 time stamping is enabled, hence reinitialize it
132 * to keep explicit chaining in the descriptor.
133 */
134 p->des3 = (unsigned int)(priv->dma_rx_phy +
135 (((priv->dirty_rx) + 1) %
136 priv->dma_rx_size) *
137 sizeof(struct dma_desc));
138}
139
140static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
124{ 141{
125 /* Not supported */ 142 struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
126 return 0; 143
144 if (priv->hw->desc->get_tx_ls(p) && !priv->extend_desc)
145 /* NOTE: Device will overwrite des3 with timestamp value if
146 * 1588-2002 time stamping is enabled, hence reinitialize it
147 * to keep explicit chaining in the descriptor.
148 */
149 p->des3 = (unsigned int)(priv->dma_tx_phy +
150 (((priv->dirty_tx + 1) %
151 priv->dma_tx_size) *
152 sizeof(struct dma_desc)));
127} 153}
128 154
129const struct stmmac_ring_mode_ops ring_mode_ops = { 155const struct stmmac_chain_mode_ops chain_mode_ops = {
156 .init = stmmac_init_dma_chain,
130 .is_jumbo_frm = stmmac_is_jumbo_frm, 157 .is_jumbo_frm = stmmac_is_jumbo_frm,
131 .jumbo_frm = stmmac_jumbo_frm, 158 .jumbo_frm = stmmac_jumbo_frm,
132 .refill_desc3 = stmmac_refill_desc3, 159 .refill_desc3 = stmmac_refill_desc3,
133 .init_desc3 = stmmac_init_desc3,
134 .init_dma_chain = stmmac_init_dma_chain,
135 .clean_desc3 = stmmac_clean_desc3, 160 .clean_desc3 = stmmac_clean_desc3,
136 .set_16kib_bfsize = stmmac_set_16kib_bfsize,
137}; 161};
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 186d14806122..ad7e20a9875d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -117,6 +117,36 @@ struct stmmac_extra_stats {
117 unsigned long irq_rx_path_in_lpi_mode_n; 117 unsigned long irq_rx_path_in_lpi_mode_n;
118 unsigned long irq_rx_path_exit_lpi_mode_n; 118 unsigned long irq_rx_path_exit_lpi_mode_n;
119 unsigned long phy_eee_wakeup_error_n; 119 unsigned long phy_eee_wakeup_error_n;
120 /* Extended RDES status */
121 unsigned long ip_hdr_err;
122 unsigned long ip_payload_err;
123 unsigned long ip_csum_bypassed;
124 unsigned long ipv4_pkt_rcvd;
125 unsigned long ipv6_pkt_rcvd;
126 unsigned long rx_msg_type_ext_no_ptp;
127 unsigned long rx_msg_type_sync;
128 unsigned long rx_msg_type_follow_up;
129 unsigned long rx_msg_type_delay_req;
130 unsigned long rx_msg_type_delay_resp;
131 unsigned long rx_msg_type_pdelay_req;
132 unsigned long rx_msg_type_pdelay_resp;
133 unsigned long rx_msg_type_pdelay_follow_up;
134 unsigned long ptp_frame_type;
135 unsigned long ptp_ver;
136 unsigned long timestamp_dropped;
137 unsigned long av_pkt_rcvd;
138 unsigned long av_tagged_pkt_rcvd;
139 unsigned long vlan_tag_priority_val;
140 unsigned long l3_filter_match;
141 unsigned long l4_filter_match;
142 unsigned long l3_l4_filter_no_match;
143 /* PCS */
144 unsigned long irq_pcs_ane_n;
145 unsigned long irq_pcs_link_n;
146 unsigned long irq_rgmii_n;
147 unsigned long pcs_link;
148 unsigned long pcs_duplex;
149 unsigned long pcs_speed;
120}; 150};
121 151
122/* CSR Frequency Access Defines*/ 152/* CSR Frequency Access Defines*/
@@ -138,6 +168,12 @@ struct stmmac_extra_stats {
138#define FLOW_TX 2 168#define FLOW_TX 2
139#define FLOW_AUTO (FLOW_TX | FLOW_RX) 169#define FLOW_AUTO (FLOW_TX | FLOW_RX)
140 170
171/* PCS defines */
172#define STMMAC_PCS_RGMII (1 << 0)
173#define STMMAC_PCS_SGMII (1 << 1)
174#define STMMAC_PCS_TBI (1 << 2)
175#define STMMAC_PCS_RTBI (1 << 3)
176
141#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */ 177#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
142 178
143/* DAM HW feature register fields */ 179/* DAM HW feature register fields */
@@ -194,17 +230,25 @@ enum dma_irq_status {
194 handle_tx = 0x8, 230 handle_tx = 0x8,
195}; 231};
196 232
197enum core_specific_irq_mask { 233#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 1)
198 core_mmc_tx_irq = 1, 234#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 2)
199 core_mmc_rx_irq = 2, 235#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 3)
200 core_mmc_rx_csum_offload_irq = 4, 236#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 4)
201 core_irq_receive_pmt_irq = 8, 237
202 core_irq_tx_path_in_lpi_mode = 16, 238#define CORE_PCS_ANE_COMPLETE (1 << 5)
203 core_irq_tx_path_exit_lpi_mode = 32, 239#define CORE_PCS_LINK_STATUS (1 << 6)
204 core_irq_rx_path_in_lpi_mode = 64, 240#define CORE_RGMII_IRQ (1 << 7)
205 core_irq_rx_path_exit_lpi_mode = 128, 241
242struct rgmii_adv {
243 unsigned int pause;
244 unsigned int duplex;
245 unsigned int lp_pause;
246 unsigned int lp_duplex;
206}; 247};
207 248
249#define STMMAC_PCS_PAUSE 1
250#define STMMAC_PCS_ASYM_PAUSE 2
251
208/* DMA HW capabilities */ 252/* DMA HW capabilities */
209struct dma_features { 253struct dma_features {
210 unsigned int mbps_10_100; 254 unsigned int mbps_10_100;
@@ -255,23 +299,26 @@ struct dma_features {
255#define STMMAC_DEFAULT_LIT_LS_TIMER 0x3E8 299#define STMMAC_DEFAULT_LIT_LS_TIMER 0x3E8
256#define STMMAC_DEFAULT_TWT_LS_TIMER 0x0 300#define STMMAC_DEFAULT_TWT_LS_TIMER 0x0
257 301
302#define STMMAC_CHAIN_MODE 0x1
303#define STMMAC_RING_MODE 0x2
304
258struct stmmac_desc_ops { 305struct stmmac_desc_ops {
259 /* DMA RX descriptor ring initialization */ 306 /* DMA RX descriptor ring initialization */
260 void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size, 307 void (*init_rx_desc) (struct dma_desc *p, int disable_rx_ic, int mode,
261 int disable_rx_ic); 308 int end);
262 /* DMA TX descriptor ring initialization */ 309 /* DMA TX descriptor ring initialization */
263 void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size); 310 void (*init_tx_desc) (struct dma_desc *p, int mode, int end);
264 311
265 /* Invoked by the xmit function to prepare the tx descriptor */ 312 /* Invoked by the xmit function to prepare the tx descriptor */
266 void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len, 313 void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
267 int csum_flag); 314 int csum_flag, int mode);
268 /* Set/get the owner of the descriptor */ 315 /* Set/get the owner of the descriptor */
269 void (*set_tx_owner) (struct dma_desc *p); 316 void (*set_tx_owner) (struct dma_desc *p);
270 int (*get_tx_owner) (struct dma_desc *p); 317 int (*get_tx_owner) (struct dma_desc *p);
271 /* Invoked by the xmit function to close the tx descriptor */ 318 /* Invoked by the xmit function to close the tx descriptor */
272 void (*close_tx_desc) (struct dma_desc *p); 319 void (*close_tx_desc) (struct dma_desc *p);
273 /* Clean the tx descriptor as soon as the tx irq is received */ 320 /* Clean the tx descriptor as soon as the tx irq is received */
274 void (*release_tx_desc) (struct dma_desc *p); 321 void (*release_tx_desc) (struct dma_desc *p, int mode);
275 /* Clear interrupt on tx frame completion. When this bit is 322 /* Clear interrupt on tx frame completion. When this bit is
276 * set an interrupt happens as soon as the frame is transmitted */ 323 * set an interrupt happens as soon as the frame is transmitted */
277 void (*clear_tx_ic) (struct dma_desc *p); 324 void (*clear_tx_ic) (struct dma_desc *p);
@@ -290,12 +337,22 @@ struct stmmac_desc_ops {
290 /* Return the reception status looking at the RDES1 */ 337 /* Return the reception status looking at the RDES1 */
291 int (*rx_status) (void *data, struct stmmac_extra_stats *x, 338 int (*rx_status) (void *data, struct stmmac_extra_stats *x,
292 struct dma_desc *p); 339 struct dma_desc *p);
340 void (*rx_extended_status) (void *data, struct stmmac_extra_stats *x,
341 struct dma_extended_desc *p);
342 /* Set tx timestamp enable bit */
343 void (*enable_tx_timestamp) (struct dma_desc *p);
344 /* get tx timestamp status */
345 int (*get_tx_timestamp_status) (struct dma_desc *p);
346 /* get timestamp value */
347 u64 (*get_timestamp) (void *desc, u32 ats);
348 /* get rx timestamp status */
349 int (*get_rx_timestamp_status) (void *desc, u32 ats);
293}; 350};
294 351
295struct stmmac_dma_ops { 352struct stmmac_dma_ops {
296 /* DMA core initialization */ 353 /* DMA core initialization */
297 int (*init) (void __iomem *ioaddr, int pbl, int fb, int mb, 354 int (*init) (void __iomem *ioaddr, int pbl, int fb, int mb,
298 int burst_len, u32 dma_tx, u32 dma_rx); 355 int burst_len, u32 dma_tx, u32 dma_rx, int atds);
299 /* Dump DMA registers */ 356 /* Dump DMA registers */
300 void (*dump_regs) (void __iomem *ioaddr); 357 void (*dump_regs) (void __iomem *ioaddr);
301 /* Set tx/rx threshold in the csr6 register 358 /* Set tx/rx threshold in the csr6 register
@@ -327,7 +384,8 @@ struct stmmac_ops {
327 /* Dump MAC registers */ 384 /* Dump MAC registers */
328 void (*dump_regs) (void __iomem *ioaddr); 385 void (*dump_regs) (void __iomem *ioaddr);
329 /* Handle extra events on specific interrupts hw dependent */ 386 /* Handle extra events on specific interrupts hw dependent */
330 int (*host_irq_status) (void __iomem *ioaddr); 387 int (*host_irq_status) (void __iomem *ioaddr,
388 struct stmmac_extra_stats *x);
331 /* Multicast filter setting */ 389 /* Multicast filter setting */
332 void (*set_filter) (struct net_device *dev, int id); 390 void (*set_filter) (struct net_device *dev, int id);
333 /* Flow control setting */ 391 /* Flow control setting */
@@ -344,6 +402,18 @@ struct stmmac_ops {
344 void (*reset_eee_mode) (void __iomem *ioaddr); 402 void (*reset_eee_mode) (void __iomem *ioaddr);
345 void (*set_eee_timer) (void __iomem *ioaddr, int ls, int tw); 403 void (*set_eee_timer) (void __iomem *ioaddr, int ls, int tw);
346 void (*set_eee_pls) (void __iomem *ioaddr, int link); 404 void (*set_eee_pls) (void __iomem *ioaddr, int link);
405 void (*ctrl_ane) (void __iomem *ioaddr, bool restart);
406 void (*get_adv) (void __iomem *ioaddr, struct rgmii_adv *adv);
407};
408
409struct stmmac_hwtimestamp {
410 void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data);
411 void (*config_sub_second_increment) (void __iomem *ioaddr);
412 int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec);
413 int (*config_addend)(void __iomem *ioaddr, u32 addend);
414 int (*adjust_systime)(void __iomem *ioaddr, u32 sec, u32 nsec,
415 int add_sub);
416 u64 (*get_systime)(void __iomem *ioaddr);
347}; 417};
348 418
349struct mac_link { 419struct mac_link {
@@ -360,19 +430,28 @@ struct mii_regs {
360struct stmmac_ring_mode_ops { 430struct stmmac_ring_mode_ops {
361 unsigned int (*is_jumbo_frm) (int len, int ehn_desc); 431 unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
362 unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); 432 unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
363 void (*refill_desc3) (int bfsize, struct dma_desc *p); 433 void (*refill_desc3) (void *priv, struct dma_desc *p);
364 void (*init_desc3) (int des3_as_data_buf, struct dma_desc *p); 434 void (*init_desc3) (struct dma_desc *p);
365 void (*init_dma_chain) (struct dma_desc *des, dma_addr_t phy_addr, 435 void (*clean_desc3) (void *priv, struct dma_desc *p);
366 unsigned int size);
367 void (*clean_desc3) (struct dma_desc *p);
368 int (*set_16kib_bfsize) (int mtu); 436 int (*set_16kib_bfsize) (int mtu);
369}; 437};
370 438
439struct stmmac_chain_mode_ops {
440 void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
441 unsigned int extend_desc);
442 unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
443 unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
444 void (*refill_desc3) (void *priv, struct dma_desc *p);
445 void (*clean_desc3) (void *priv, struct dma_desc *p);
446};
447
371struct mac_device_info { 448struct mac_device_info {
372 const struct stmmac_ops *mac; 449 const struct stmmac_ops *mac;
373 const struct stmmac_desc_ops *desc; 450 const struct stmmac_desc_ops *desc;
374 const struct stmmac_dma_ops *dma; 451 const struct stmmac_dma_ops *dma;
375 const struct stmmac_ring_mode_ops *ring; 452 const struct stmmac_ring_mode_ops *ring;
453 const struct stmmac_chain_mode_ops *chain;
454 const struct stmmac_hwtimestamp *ptp;
376 struct mii_regs mii; /* MII register Addresses */ 455 struct mii_regs mii; /* MII register Addresses */
377 struct mac_link link; 456 struct mac_link link;
378 unsigned int synopsys_uid; 457 unsigned int synopsys_uid;
@@ -390,5 +469,6 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
390 469
391extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); 470extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
392extern const struct stmmac_ring_mode_ops ring_mode_ops; 471extern const struct stmmac_ring_mode_ops ring_mode_ops;
472extern const struct stmmac_chain_mode_ops chain_mode_ops;
393 473
394#endif /* __COMMON_H__ */ 474#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index 223adf95fd03..2eca0c033038 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -24,6 +24,7 @@
24#ifndef __DESCS_H__ 24#ifndef __DESCS_H__
25#define __DESCS_H__ 25#define __DESCS_H__
26 26
27/* Basic descriptor structure for normal and alternate descriptors */
27struct dma_desc { 28struct dma_desc {
28 /* Receive descriptor */ 29 /* Receive descriptor */
29 union { 30 union {
@@ -60,7 +61,7 @@ struct dma_desc {
60 } rx; 61 } rx;
61 struct { 62 struct {
62 /* RDES0 */ 63 /* RDES0 */
63 u32 payload_csum_error:1; 64 u32 rx_mac_addr:1;
64 u32 crc_error:1; 65 u32 crc_error:1;
65 u32 dribbling:1; 66 u32 dribbling:1;
66 u32 error_gmii:1; 67 u32 error_gmii:1;
@@ -162,13 +163,57 @@ struct dma_desc {
162 unsigned int des3; 163 unsigned int des3;
163}; 164};
164 165
166/* Extended descriptor structure (supported by new SYNP GMAC generations) */
167struct dma_extended_desc {
168 struct dma_desc basic;
169 union {
170 struct {
171 u32 ip_payload_type:3;
172 u32 ip_hdr_err:1;
173 u32 ip_payload_err:1;
174 u32 ip_csum_bypassed:1;
175 u32 ipv4_pkt_rcvd:1;
176 u32 ipv6_pkt_rcvd:1;
177 u32 msg_type:4;
178 u32 ptp_frame_type:1;
179 u32 ptp_ver:1;
180 u32 timestamp_dropped:1;
181 u32 reserved:1;
182 u32 av_pkt_rcvd:1;
183 u32 av_tagged_pkt_rcvd:1;
184 u32 vlan_tag_priority_val:3;
185 u32 reserved3:3;
186 u32 l3_filter_match:1;
187 u32 l4_filter_match:1;
188 u32 l3_l4_filter_no_match:2;
189 u32 reserved4:4;
190 } erx;
191 struct {
192 u32 reserved;
193 } etx;
194 } des4;
195 unsigned int des5; /* Reserved */
196 unsigned int des6; /* Tx/Rx Timestamp Low */
197 unsigned int des7; /* Tx/Rx Timestamp High */
198};
199
165/* Transmit checksum insertion control */ 200/* Transmit checksum insertion control */
166enum tdes_csum_insertion { 201enum tdes_csum_insertion {
167 cic_disabled = 0, /* Checksum Insertion Control */ 202 cic_disabled = 0, /* Checksum Insertion Control */
168 cic_only_ip = 1, /* Only IP header */ 203 cic_only_ip = 1, /* Only IP header */
169 cic_no_pseudoheader = 2, /* IP header but pseudoheader 204 /* IP header but pseudoheader is not calculated */
170 * is not calculated */ 205 cic_no_pseudoheader = 2,
171 cic_full = 3, /* IP header and pseudoheader */ 206 cic_full = 3, /* IP header and pseudoheader */
172}; 207};
173 208
209/* Extended RDES4 definitions */
210#define RDES_EXT_NO_PTP 0
211#define RDES_EXT_SYNC 0x1
212#define RDES_EXT_FOLLOW_UP 0x2
213#define RDES_EXT_DELAY_REQ 0x3
214#define RDES_EXT_DELAY_RESP 0x4
215#define RDES_EXT_PDELAY_REQ 0x5
216#define RDES_EXT_PDELAY_RESP 0x6
217#define RDES_EXT_PDELAY_FOLLOW_UP 0x7
218
174#endif /* __DESCS_H__ */ 219#endif /* __DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index 7ee9499a6e38..20f83fc9cf13 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -30,26 +30,28 @@
30#ifndef __DESC_COM_H__ 30#ifndef __DESC_COM_H__
31#define __DESC_COM_H__ 31#define __DESC_COM_H__
32 32
33#if defined(CONFIG_STMMAC_RING) 33/* Specific functions used for Ring mode */
34static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end) 34
35/* Enhanced descriptors */
36static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
35{ 37{
36 p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1; 38 p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
37 if (end) 39 if (end)
38 p->des01.erx.end_ring = 1; 40 p->des01.erx.end_ring = 1;
39} 41}
40 42
41static inline void ehn_desc_tx_set_on_ring_chain(struct dma_desc *p, int end) 43static inline void ehn_desc_tx_set_on_ring(struct dma_desc *p, int end)
42{ 44{
43 if (end) 45 if (end)
44 p->des01.etx.end_ring = 1; 46 p->des01.etx.end_ring = 1;
45} 47}
46 48
47static inline void enh_desc_end_tx_desc(struct dma_desc *p, int ter) 49static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int ter)
48{ 50{
49 p->des01.etx.end_ring = ter; 51 p->des01.etx.end_ring = ter;
50} 52}
51 53
52static inline void enh_set_tx_desc_len(struct dma_desc *p, int len) 54static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
53{ 55{
54 if (unlikely(len > BUF_SIZE_4KiB)) { 56 if (unlikely(len > BUF_SIZE_4KiB)) {
55 p->des01.etx.buffer1_size = BUF_SIZE_4KiB; 57 p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
@@ -58,25 +60,26 @@ static inline void enh_set_tx_desc_len(struct dma_desc *p, int len)
58 p->des01.etx.buffer1_size = len; 60 p->des01.etx.buffer1_size = len;
59} 61}
60 62
61static inline void ndesc_rx_set_on_ring_chain(struct dma_desc *p, int end) 63/* Normal descriptors */
64static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
62{ 65{
63 p->des01.rx.buffer2_size = BUF_SIZE_2KiB - 1; 66 p->des01.rx.buffer2_size = BUF_SIZE_2KiB - 1;
64 if (end) 67 if (end)
65 p->des01.rx.end_ring = 1; 68 p->des01.rx.end_ring = 1;
66} 69}
67 70
68static inline void ndesc_tx_set_on_ring_chain(struct dma_desc *p, int end) 71static inline void ndesc_tx_set_on_ring(struct dma_desc *p, int end)
69{ 72{
70 if (end) 73 if (end)
71 p->des01.tx.end_ring = 1; 74 p->des01.tx.end_ring = 1;
72} 75}
73 76
74static inline void ndesc_end_tx_desc(struct dma_desc *p, int ter) 77static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int ter)
75{ 78{
76 p->des01.tx.end_ring = ter; 79 p->des01.tx.end_ring = ter;
77} 80}
78 81
79static inline void norm_set_tx_desc_len(struct dma_desc *p, int len) 82static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
80{ 83{
81 if (unlikely(len > BUF_SIZE_2KiB)) { 84 if (unlikely(len > BUF_SIZE_2KiB)) {
82 p->des01.etx.buffer1_size = BUF_SIZE_2KiB - 1; 85 p->des01.etx.buffer1_size = BUF_SIZE_2KiB - 1;
@@ -85,47 +88,48 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
85 p->des01.tx.buffer1_size = len; 88 p->des01.tx.buffer1_size = len;
86} 89}
87 90
88#else 91/* Specific functions used for Chain mode */
89 92
90static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end) 93/* Enhanced descriptors */
94static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p, int end)
91{ 95{
92 p->des01.erx.second_address_chained = 1; 96 p->des01.erx.second_address_chained = 1;
93} 97}
94 98
95static inline void ehn_desc_tx_set_on_ring_chain(struct dma_desc *p, int end) 99static inline void ehn_desc_tx_set_on_chain(struct dma_desc *p, int end)
96{ 100{
97 p->des01.etx.second_address_chained = 1; 101 p->des01.etx.second_address_chained = 1;
98} 102}
99 103
100static inline void enh_desc_end_tx_desc(struct dma_desc *p, int ter) 104static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p, int ter)
101{ 105{
102 p->des01.etx.second_address_chained = 1; 106 p->des01.etx.second_address_chained = 1;
103} 107}
104 108
105static inline void enh_set_tx_desc_len(struct dma_desc *p, int len) 109static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
106{ 110{
107 p->des01.etx.buffer1_size = len; 111 p->des01.etx.buffer1_size = len;
108} 112}
109 113
110static inline void ndesc_rx_set_on_ring_chain(struct dma_desc *p, int end) 114/* Normal descriptors */
115static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end)
111{ 116{
112 p->des01.rx.second_address_chained = 1; 117 p->des01.rx.second_address_chained = 1;
113} 118}
114 119
115static inline void ndesc_tx_set_on_ring_chain(struct dma_desc *p, int ring_size) 120static inline void ndesc_tx_set_on_chain(struct dma_desc *p, int
121 ring_size)
116{ 122{
117 p->des01.tx.second_address_chained = 1; 123 p->des01.tx.second_address_chained = 1;
118} 124}
119 125
120static inline void ndesc_end_tx_desc(struct dma_desc *p, int ter) 126static inline void ndesc_end_tx_desc_on_chain(struct dma_desc *p, int ter)
121{ 127{
122 p->des01.tx.second_address_chained = 1; 128 p->des01.tx.second_address_chained = 1;
123} 129}
124 130
125static inline void norm_set_tx_desc_len(struct dma_desc *p, int len) 131static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
126{ 132{
127 p->des01.tx.buffer1_size = len; 133 p->des01.tx.buffer1_size = len;
128} 134}
129#endif
130
131#endif /* __DESC_COM_H__ */ 135#endif /* __DESC_COM_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index 7ad56afd6324..57f4e8f607e9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -89,13 +89,46 @@ enum power_event {
89 (reg * 8)) 89 (reg * 8))
90#define GMAC_MAX_PERFECT_ADDRESSES 32 90#define GMAC_MAX_PERFECT_ADDRESSES 32
91 91
92/* PCS registers (AN/TBI/SGMII/RGMII) offset */
92#define GMAC_AN_CTRL 0x000000c0 /* AN control */ 93#define GMAC_AN_CTRL 0x000000c0 /* AN control */
93#define GMAC_AN_STATUS 0x000000c4 /* AN status */ 94#define GMAC_AN_STATUS 0x000000c4 /* AN status */
94#define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */ 95#define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */
95#define GMAC_ANE_LINK 0x000000cc /* Auto-Neg. link partener ability */ 96#define GMAC_ANE_LPA 0x000000cc /* Auto-Neg. link partener ability */
96#define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */ 97#define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */
97#define GMAC_TBI 0x000000d4 /* TBI extend status */ 98#define GMAC_TBI 0x000000d4 /* TBI extend status */
98#define GMAC_GMII_STATUS 0x000000d8 /* S/R-GMII status */ 99#define GMAC_S_R_GMII 0x000000d8 /* SGMII RGMII status */
100
101/* AN Configuration defines */
102#define GMAC_AN_CTRL_RAN 0x00000200 /* Restart Auto-Negotiation */
103#define GMAC_AN_CTRL_ANE 0x00001000 /* Auto-Negotiation Enable */
104#define GMAC_AN_CTRL_ELE 0x00004000 /* External Loopback Enable */
105#define GMAC_AN_CTRL_ECD 0x00010000 /* Enable Comma Detect */
106#define GMAC_AN_CTRL_LR 0x00020000 /* Lock to Reference */
107#define GMAC_AN_CTRL_SGMRAL 0x00040000 /* SGMII RAL Control */
108
109/* AN Status defines */
110#define GMAC_AN_STATUS_LS 0x00000004 /* Link Status 0:down 1:up */
111#define GMAC_AN_STATUS_ANA 0x00000008 /* Auto-Negotiation Ability */
112#define GMAC_AN_STATUS_ANC 0x00000020 /* Auto-Negotiation Complete */
113#define GMAC_AN_STATUS_ES 0x00000100 /* Extended Status */
114
115/* Register 54 (SGMII/RGMII status register) */
116#define GMAC_S_R_GMII_LINK 0x8
117#define GMAC_S_R_GMII_SPEED 0x5
118#define GMAC_S_R_GMII_SPEED_SHIFT 0x1
119#define GMAC_S_R_GMII_MODE 0x1
120#define GMAC_S_R_GMII_SPEED_125 2
121#define GMAC_S_R_GMII_SPEED_25 1
122
123/* Common ADV and LPA defines */
124#define GMAC_ANE_FD (1 << 5)
125#define GMAC_ANE_HD (1 << 6)
126#define GMAC_ANE_PSE (3 << 7)
127#define GMAC_ANE_PSE_SHIFT 7
128
129 /* GMAC Configuration defines */
130#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
131#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */
99 132
100/* GMAC Configuration defines */ 133/* GMAC Configuration defines */
101#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */ 134#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
@@ -155,6 +188,7 @@ enum inter_frame_gap {
155/* Programmable burst length (passed thorugh platform)*/ 188/* Programmable burst length (passed thorugh platform)*/
156#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ 189#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
157#define DMA_BUS_MODE_PBL_SHIFT 8 190#define DMA_BUS_MODE_PBL_SHIFT 8
191#define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */
158 192
159enum rx_tx_priority_ratio { 193enum rx_tx_priority_ratio {
160 double_ratio = 0x00004000, /*2:1 */ 194 double_ratio = 0x00004000, /*2:1 */
@@ -230,5 +264,7 @@ enum rtc_control {
230#define GMAC_MMC_TX_INTR 0x108 264#define GMAC_MMC_TX_INTR 0x108
231#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 265#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
232 266
267
268
233extern const struct stmmac_dma_ops dwmac1000_dma_ops; 269extern const struct stmmac_dma_ops dwmac1000_dma_ops;
234#endif /* __DWMAC1000_H__ */ 270#endif /* __DWMAC1000_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index bfe022605498..29138da19db0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -28,6 +28,7 @@
28 28
29#include <linux/crc32.h> 29#include <linux/crc32.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/ethtool.h>
31#include <asm/io.h> 32#include <asm/io.h>
32#include "dwmac1000.h" 33#include "dwmac1000.h"
33 34
@@ -193,59 +194,91 @@ static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode)
193 writel(pmt, ioaddr + GMAC_PMT); 194 writel(pmt, ioaddr + GMAC_PMT);
194} 195}
195 196
196 197static int dwmac1000_irq_status(void __iomem *ioaddr,
197static int dwmac1000_irq_status(void __iomem *ioaddr) 198 struct stmmac_extra_stats *x)
198{ 199{
199 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS); 200 u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
200 int status = 0; 201 int ret = 0;
201 202
202 /* Not used events (e.g. MMC interrupts) are not handled. */ 203 /* Not used events (e.g. MMC interrupts) are not handled. */
203 if ((intr_status & mmc_tx_irq)) { 204 if ((intr_status & mmc_tx_irq)) {
204 CHIP_DBG(KERN_INFO "GMAC: MMC tx interrupt: 0x%08x\n", 205 CHIP_DBG(KERN_INFO "GMAC: MMC tx interrupt: 0x%08x\n",
205 readl(ioaddr + GMAC_MMC_TX_INTR)); 206 readl(ioaddr + GMAC_MMC_TX_INTR));
206 status |= core_mmc_tx_irq; 207 x->mmc_tx_irq_n++;
207 } 208 }
208 if (unlikely(intr_status & mmc_rx_irq)) { 209 if (unlikely(intr_status & mmc_rx_irq)) {
209 CHIP_DBG(KERN_INFO "GMAC: MMC rx interrupt: 0x%08x\n", 210 CHIP_DBG(KERN_INFO "GMAC: MMC rx interrupt: 0x%08x\n",
210 readl(ioaddr + GMAC_MMC_RX_INTR)); 211 readl(ioaddr + GMAC_MMC_RX_INTR));
211 status |= core_mmc_rx_irq; 212 x->mmc_rx_irq_n++;
212 } 213 }
213 if (unlikely(intr_status & mmc_rx_csum_offload_irq)) { 214 if (unlikely(intr_status & mmc_rx_csum_offload_irq)) {
214 CHIP_DBG(KERN_INFO "GMAC: MMC rx csum offload: 0x%08x\n", 215 CHIP_DBG(KERN_INFO "GMAC: MMC rx csum offload: 0x%08x\n",
215 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD)); 216 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
216 status |= core_mmc_rx_csum_offload_irq; 217 x->mmc_rx_csum_offload_irq_n++;
217 } 218 }
218 if (unlikely(intr_status & pmt_irq)) { 219 if (unlikely(intr_status & pmt_irq)) {
219 CHIP_DBG(KERN_INFO "GMAC: received Magic frame\n"); 220 CHIP_DBG(KERN_INFO "GMAC: received Magic frame\n");
220 /* clear the PMT bits 5 and 6 by reading the PMT 221 /* clear the PMT bits 5 and 6 by reading the PMT
221 * status register. */ 222 * status register. */
222 readl(ioaddr + GMAC_PMT); 223 readl(ioaddr + GMAC_PMT);
223 status |= core_irq_receive_pmt_irq; 224 x->irq_receive_pmt_irq_n++;
224 } 225 }
225 /* MAC trx/rx EEE LPI entry/exit interrupts */ 226 /* MAC trx/rx EEE LPI entry/exit interrupts */
226 if (intr_status & lpiis_irq) { 227 if (intr_status & lpiis_irq) {
227 /* Clean LPI interrupt by reading the Reg 12 */ 228 /* Clean LPI interrupt by reading the Reg 12 */
228 u32 lpi_status = readl(ioaddr + LPI_CTRL_STATUS); 229 ret = readl(ioaddr + LPI_CTRL_STATUS);
229 230
230 if (lpi_status & LPI_CTRL_STATUS_TLPIEN) { 231 if (ret & LPI_CTRL_STATUS_TLPIEN) {
231 CHIP_DBG(KERN_INFO "GMAC TX entered in LPI\n"); 232 CHIP_DBG(KERN_INFO "GMAC TX entered in LPI\n");
232 status |= core_irq_tx_path_in_lpi_mode; 233 x->irq_tx_path_in_lpi_mode_n++;
233 } 234 }
234 if (lpi_status & LPI_CTRL_STATUS_TLPIEX) { 235 if (ret & LPI_CTRL_STATUS_TLPIEX) {
235 CHIP_DBG(KERN_INFO "GMAC TX exit from LPI\n"); 236 CHIP_DBG(KERN_INFO "GMAC TX exit from LPI\n");
236 status |= core_irq_tx_path_exit_lpi_mode; 237 x->irq_tx_path_exit_lpi_mode_n++;
237 } 238 }
238 if (lpi_status & LPI_CTRL_STATUS_RLPIEN) { 239 if (ret & LPI_CTRL_STATUS_RLPIEN) {
239 CHIP_DBG(KERN_INFO "GMAC RX entered in LPI\n"); 240 CHIP_DBG(KERN_INFO "GMAC RX entered in LPI\n");
240 status |= core_irq_rx_path_in_lpi_mode; 241 x->irq_rx_path_in_lpi_mode_n++;
241 } 242 }
242 if (lpi_status & LPI_CTRL_STATUS_RLPIEX) { 243 if (ret & LPI_CTRL_STATUS_RLPIEX) {
243 CHIP_DBG(KERN_INFO "GMAC RX exit from LPI\n"); 244 CHIP_DBG(KERN_INFO "GMAC RX exit from LPI\n");
244 status |= core_irq_rx_path_exit_lpi_mode; 245 x->irq_rx_path_exit_lpi_mode_n++;
246 }
247 }
248
249 if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) {
250 CHIP_DBG(KERN_INFO "GMAC PCS ANE IRQ\n");
251 readl(ioaddr + GMAC_AN_STATUS);
252 x->irq_pcs_ane_n++;
253 }
254 if (intr_status & rgmii_irq) {
255 u32 status = readl(ioaddr + GMAC_S_R_GMII);
256 CHIP_DBG(KERN_INFO "GMAC RGMII/SGMII interrupt\n");
257 x->irq_rgmii_n++;
258
259 /* Save and dump the link status. */
260 if (status & GMAC_S_R_GMII_LINK) {
261 int speed_value = (status & GMAC_S_R_GMII_SPEED) >>
262 GMAC_S_R_GMII_SPEED_SHIFT;
263 x->pcs_duplex = (status & GMAC_S_R_GMII_MODE);
264
265 if (speed_value == GMAC_S_R_GMII_SPEED_125)
266 x->pcs_speed = SPEED_1000;
267 else if (speed_value == GMAC_S_R_GMII_SPEED_25)
268 x->pcs_speed = SPEED_100;
269 else
270 x->pcs_speed = SPEED_10;
271
272 x->pcs_link = 1;
273 pr_debug("Link is Up - %d/%s\n", (int) x->pcs_speed,
274 x->pcs_duplex ? "Full" : "Half");
275 } else {
276 x->pcs_link = 0;
277 pr_debug("Link is Down\n");
245 } 278 }
246 } 279 }
247 280
248 return status; 281 return ret;
249} 282}
250 283
251static void dwmac1000_set_eee_mode(void __iomem *ioaddr) 284static void dwmac1000_set_eee_mode(void __iomem *ioaddr)
@@ -297,6 +330,41 @@ static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw)
297 writel(value, ioaddr + LPI_TIMER_CTRL); 330 writel(value, ioaddr + LPI_TIMER_CTRL);
298} 331}
299 332
333static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool restart)
334{
335 u32 value;
336
337 value = readl(ioaddr + GMAC_AN_CTRL);
338 /* auto negotiation enable and External Loopback enable */
339 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
340
341 if (restart)
342 value |= GMAC_AN_CTRL_RAN;
343
344 writel(value, ioaddr + GMAC_AN_CTRL);
345}
346
347static void dwmac1000_get_adv(void __iomem *ioaddr, struct rgmii_adv *adv)
348{
349 u32 value = readl(ioaddr + GMAC_ANE_ADV);
350
351 if (value & GMAC_ANE_FD)
352 adv->duplex = DUPLEX_FULL;
353 if (value & GMAC_ANE_HD)
354 adv->duplex |= DUPLEX_HALF;
355
356 adv->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
357
358 value = readl(ioaddr + GMAC_ANE_LPA);
359
360 if (value & GMAC_ANE_FD)
361 adv->lp_duplex = DUPLEX_FULL;
362 if (value & GMAC_ANE_HD)
363 adv->lp_duplex = DUPLEX_HALF;
364
365 adv->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
366}
367
300static const struct stmmac_ops dwmac1000_ops = { 368static const struct stmmac_ops dwmac1000_ops = {
301 .core_init = dwmac1000_core_init, 369 .core_init = dwmac1000_core_init,
302 .rx_ipc = dwmac1000_rx_ipc_enable, 370 .rx_ipc = dwmac1000_rx_ipc_enable,
@@ -311,6 +379,8 @@ static const struct stmmac_ops dwmac1000_ops = {
311 .reset_eee_mode = dwmac1000_reset_eee_mode, 379 .reset_eee_mode = dwmac1000_reset_eee_mode,
312 .set_eee_timer = dwmac1000_set_eee_timer, 380 .set_eee_timer = dwmac1000_set_eee_timer,
313 .set_eee_pls = dwmac1000_set_eee_pls, 381 .set_eee_pls = dwmac1000_set_eee_pls,
382 .ctrl_ane = dwmac1000_ctrl_ane,
383 .get_adv = dwmac1000_get_adv,
314}; 384};
315 385
316struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr) 386struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index bf83c03bfd06..f1c4b2c00aa5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -30,8 +30,8 @@
30#include "dwmac1000.h" 30#include "dwmac1000.h"
31#include "dwmac_dma.h" 31#include "dwmac_dma.h"
32 32
33static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, 33static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
34 int mb, int burst_len, u32 dma_tx, u32 dma_rx) 34 int burst_len, u32 dma_tx, u32 dma_rx, int atds)
35{ 35{
36 u32 value = readl(ioaddr + DMA_BUS_MODE); 36 u32 value = readl(ioaddr + DMA_BUS_MODE);
37 int limit; 37 int limit;
@@ -73,6 +73,10 @@ static int dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb,
73#ifdef CONFIG_STMMAC_DA 73#ifdef CONFIG_STMMAC_DA
74 value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */ 74 value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */
75#endif 75#endif
76
77 if (atds)
78 value |= DMA_BUS_MODE_ATDS;
79
76 writel(value, ioaddr + DMA_BUS_MODE); 80 writel(value, ioaddr + DMA_BUS_MODE);
77 81
78 /* In case of GMAC AXI configuration, program the DMA_AXI_BUS_MODE 82 /* In case of GMAC AXI configuration, program the DMA_AXI_BUS_MODE
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index f83210e7c221..cb86a58c1c5f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -72,7 +72,8 @@ static int dwmac100_rx_ipc_enable(void __iomem *ioaddr)
72 return 0; 72 return 0;
73} 73}
74 74
75static int dwmac100_irq_status(void __iomem *ioaddr) 75static int dwmac100_irq_status(void __iomem *ioaddr,
76 struct stmmac_extra_stats *x)
76{ 77{
77 return 0; 78 return 0;
78} 79}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index c2b4d55a79b6..e979a8b2ae42 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -32,8 +32,8 @@
32#include "dwmac100.h" 32#include "dwmac100.h"
33#include "dwmac_dma.h" 33#include "dwmac_dma.h"
34 34
35static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, 35static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
36 int mb, int burst_len, u32 dma_tx, u32 dma_rx) 36 int burst_len, u32 dma_tx, u32 dma_rx, int atds)
37{ 37{
38 u32 value = readl(ioaddr + DMA_BUS_MODE); 38 u32 value = readl(ioaddr + DMA_BUS_MODE);
39 int limit; 39 int limit;
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 2fc8ef95f97a..0fbc8fafa706 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -150,6 +150,57 @@ static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
150 return ret; 150 return ret;
151} 151}
152 152
153static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
154 struct dma_extended_desc *p)
155{
156 if (unlikely(p->basic.des01.erx.rx_mac_addr)) {
157 if (p->des4.erx.ip_hdr_err)
158 x->ip_hdr_err++;
159 if (p->des4.erx.ip_payload_err)
160 x->ip_payload_err++;
161 if (p->des4.erx.ip_csum_bypassed)
162 x->ip_csum_bypassed++;
163 if (p->des4.erx.ipv4_pkt_rcvd)
164 x->ipv4_pkt_rcvd++;
165 if (p->des4.erx.ipv6_pkt_rcvd)
166 x->ipv6_pkt_rcvd++;
167 if (p->des4.erx.msg_type == RDES_EXT_SYNC)
168 x->rx_msg_type_sync++;
169 else if (p->des4.erx.msg_type == RDES_EXT_FOLLOW_UP)
170 x->rx_msg_type_follow_up++;
171 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ)
172 x->rx_msg_type_delay_req++;
173 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP)
174 x->rx_msg_type_delay_resp++;
175 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ)
176 x->rx_msg_type_pdelay_req++;
177 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP)
178 x->rx_msg_type_pdelay_resp++;
179 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_FOLLOW_UP)
180 x->rx_msg_type_pdelay_follow_up++;
181 else
182 x->rx_msg_type_ext_no_ptp++;
183 if (p->des4.erx.ptp_frame_type)
184 x->ptp_frame_type++;
185 if (p->des4.erx.ptp_ver)
186 x->ptp_ver++;
187 if (p->des4.erx.timestamp_dropped)
188 x->timestamp_dropped++;
189 if (p->des4.erx.av_pkt_rcvd)
190 x->av_pkt_rcvd++;
191 if (p->des4.erx.av_tagged_pkt_rcvd)
192 x->av_tagged_pkt_rcvd++;
193 if (p->des4.erx.vlan_tag_priority_val)
194 x->vlan_tag_priority_val++;
195 if (p->des4.erx.l3_filter_match)
196 x->l3_filter_match++;
197 if (p->des4.erx.l4_filter_match)
198 x->l4_filter_match++;
199 if (p->des4.erx.l3_l4_filter_no_match)
200 x->l3_l4_filter_no_match++;
201 }
202}
203
153static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, 204static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
154 struct dma_desc *p) 205 struct dma_desc *p)
155{ 206{
@@ -198,7 +249,7 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
198 * At any rate, we need to understand if the CSUM hw computation is ok 249 * At any rate, we need to understand if the CSUM hw computation is ok
199 * and report this info to the upper layers. */ 250 * and report this info to the upper layers. */
200 ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error, 251 ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error,
201 p->des01.erx.frame_type, p->des01.erx.payload_csum_error); 252 p->des01.erx.frame_type, p->des01.erx.rx_mac_addr);
202 253
203 if (unlikely(p->des01.erx.dribbling)) { 254 if (unlikely(p->des01.erx.dribbling)) {
204 CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n"); 255 CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n");
@@ -225,34 +276,32 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
225 x->rx_vlan++; 276 x->rx_vlan++;
226 } 277 }
227#endif 278#endif
279
228 return ret; 280 return ret;
229} 281}
230 282
231static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size, 283static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
232 int disable_rx_ic) 284 int mode, int end)
233{ 285{
234 int i; 286 p->des01.erx.own = 1;
235 for (i = 0; i < ring_size; i++) { 287 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
236 p->des01.erx.own = 1;
237 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
238 288
239 ehn_desc_rx_set_on_ring_chain(p, (i == ring_size - 1)); 289 if (mode == STMMAC_CHAIN_MODE)
290 ehn_desc_rx_set_on_chain(p, end);
291 else
292 ehn_desc_rx_set_on_ring(p, end);
240 293
241 if (disable_rx_ic) 294 if (disable_rx_ic)
242 p->des01.erx.disable_ic = 1; 295 p->des01.erx.disable_ic = 1;
243 p++;
244 }
245} 296}
246 297
247static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size) 298static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
248{ 299{
249 int i; 300 p->des01.etx.own = 0;
250 301 if (mode == STMMAC_CHAIN_MODE)
251 for (i = 0; i < ring_size; i++) { 302 ehn_desc_tx_set_on_chain(p, end);
252 p->des01.etx.own = 0; 303 else
253 ehn_desc_tx_set_on_ring_chain(p, (i == ring_size - 1)); 304 ehn_desc_tx_set_on_ring(p, end);
254 p++;
255 }
256} 305}
257 306
258static int enh_desc_get_tx_owner(struct dma_desc *p) 307static int enh_desc_get_tx_owner(struct dma_desc *p)
@@ -280,20 +329,26 @@ static int enh_desc_get_tx_ls(struct dma_desc *p)
280 return p->des01.etx.last_segment; 329 return p->des01.etx.last_segment;
281} 330}
282 331
283static void enh_desc_release_tx_desc(struct dma_desc *p) 332static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
284{ 333{
285 int ter = p->des01.etx.end_ring; 334 int ter = p->des01.etx.end_ring;
286 335
287 memset(p, 0, offsetof(struct dma_desc, des2)); 336 memset(p, 0, offsetof(struct dma_desc, des2));
288 enh_desc_end_tx_desc(p, ter); 337 if (mode == STMMAC_CHAIN_MODE)
338 enh_desc_end_tx_desc_on_chain(p, ter);
339 else
340 enh_desc_end_tx_desc_on_ring(p, ter);
289} 341}
290 342
291static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, 343static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
292 int csum_flag) 344 int csum_flag, int mode)
293{ 345{
294 p->des01.etx.first_segment = is_fs; 346 p->des01.etx.first_segment = is_fs;
295 347
296 enh_set_tx_desc_len(p, len); 348 if (mode == STMMAC_CHAIN_MODE)
349 enh_set_tx_desc_len_on_chain(p, len);
350 else
351 enh_set_tx_desc_len_on_ring(p, len);
297 352
298 if (likely(csum_flag)) 353 if (likely(csum_flag))
299 p->des01.etx.checksum_insertion = cic_full; 354 p->des01.etx.checksum_insertion = cic_full;
@@ -323,6 +378,49 @@ static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
323 return p->des01.erx.frame_length; 378 return p->des01.erx.frame_length;
324} 379}
325 380
381static void enh_desc_enable_tx_timestamp(struct dma_desc *p)
382{
383 p->des01.etx.time_stamp_enable = 1;
384}
385
386static int enh_desc_get_tx_timestamp_status(struct dma_desc *p)
387{
388 return p->des01.etx.time_stamp_status;
389}
390
391static u64 enh_desc_get_timestamp(void *desc, u32 ats)
392{
393 u64 ns;
394
395 if (ats) {
396 struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
397 ns = p->des6;
398 /* convert high/sec time stamp value to nanosecond */
399 ns += p->des7 * 1000000000ULL;
400 } else {
401 struct dma_desc *p = (struct dma_desc *)desc;
402 ns = p->des2;
403 ns += p->des3 * 1000000000ULL;
404 }
405
406 return ns;
407}
408
409static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats)
410{
411 if (ats) {
412 struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
413 return p->basic.des01.erx.ipc_csum_error;
414 } else {
415 struct dma_desc *p = (struct dma_desc *)desc;
416 if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff))
417 /* timestamp is corrupted, hence don't store it */
418 return 0;
419 else
420 return 1;
421 }
422}
423
326const struct stmmac_desc_ops enh_desc_ops = { 424const struct stmmac_desc_ops enh_desc_ops = {
327 .tx_status = enh_desc_get_tx_status, 425 .tx_status = enh_desc_get_tx_status,
328 .rx_status = enh_desc_get_rx_status, 426 .rx_status = enh_desc_get_rx_status,
@@ -339,4 +437,9 @@ const struct stmmac_desc_ops enh_desc_ops = {
339 .set_tx_owner = enh_desc_set_tx_owner, 437 .set_tx_owner = enh_desc_set_tx_owner,
340 .set_rx_owner = enh_desc_set_rx_owner, 438 .set_rx_owner = enh_desc_set_rx_owner,
341 .get_rx_frame_len = enh_desc_get_rx_frame_len, 439 .get_rx_frame_len = enh_desc_get_rx_frame_len,
440 .rx_extended_status = enh_desc_get_ext_status,
441 .enable_tx_timestamp = enh_desc_enable_tx_timestamp,
442 .get_tx_timestamp_status = enh_desc_get_tx_timestamp_status,
443 .get_timestamp = enh_desc_get_timestamp,
444 .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,
342}; 445};
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 68962c549a2d..7cbcea348c3d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -122,30 +122,28 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
122 return ret; 122 return ret;
123} 123}
124 124
125static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size, 125static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
126 int disable_rx_ic) 126 int end)
127{ 127{
128 int i; 128 p->des01.rx.own = 1;
129 for (i = 0; i < ring_size; i++) { 129 p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
130 p->des01.rx.own = 1;
131 p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
132 130
133 ndesc_rx_set_on_ring_chain(p, (i == ring_size - 1)); 131 if (mode == STMMAC_CHAIN_MODE)
132 ndesc_rx_set_on_chain(p, end);
133 else
134 ndesc_rx_set_on_ring(p, end);
134 135
135 if (disable_rx_ic) 136 if (disable_rx_ic)
136 p->des01.rx.disable_ic = 1; 137 p->des01.rx.disable_ic = 1;
137 p++;
138 }
139} 138}
140 139
141static void ndesc_init_tx_desc(struct dma_desc *p, unsigned int ring_size) 140static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
142{ 141{
143 int i; 142 p->des01.tx.own = 0;
144 for (i = 0; i < ring_size; i++) { 143 if (mode == STMMAC_CHAIN_MODE)
145 p->des01.tx.own = 0; 144 ndesc_tx_set_on_chain(p, end);
146 ndesc_tx_set_on_ring_chain(p, (i == (ring_size - 1))); 145 else
147 p++; 146 ndesc_tx_set_on_ring(p, end);
148 }
149} 147}
150 148
151static int ndesc_get_tx_owner(struct dma_desc *p) 149static int ndesc_get_tx_owner(struct dma_desc *p)
@@ -173,19 +171,25 @@ static int ndesc_get_tx_ls(struct dma_desc *p)
173 return p->des01.tx.last_segment; 171 return p->des01.tx.last_segment;
174} 172}
175 173
176static void ndesc_release_tx_desc(struct dma_desc *p) 174static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
177{ 175{
178 int ter = p->des01.tx.end_ring; 176 int ter = p->des01.tx.end_ring;
179 177
180 memset(p, 0, offsetof(struct dma_desc, des2)); 178 memset(p, 0, offsetof(struct dma_desc, des2));
181 ndesc_end_tx_desc(p, ter); 179 if (mode == STMMAC_CHAIN_MODE)
180 ndesc_end_tx_desc_on_chain(p, ter);
181 else
182 ndesc_end_tx_desc_on_ring(p, ter);
182} 183}
183 184
184static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, 185static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
185 int csum_flag) 186 int csum_flag, int mode)
186{ 187{
187 p->des01.tx.first_segment = is_fs; 188 p->des01.tx.first_segment = is_fs;
188 norm_set_tx_desc_len(p, len); 189 if (mode == STMMAC_CHAIN_MODE)
190 norm_set_tx_desc_len_on_chain(p, len);
191 else
192 norm_set_tx_desc_len_on_ring(p, len);
189 193
190 if (likely(csum_flag)) 194 if (likely(csum_flag))
191 p->des01.tx.checksum_insertion = cic_full; 195 p->des01.tx.checksum_insertion = cic_full;
@@ -215,6 +219,39 @@ static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
215 return p->des01.rx.frame_length; 219 return p->des01.rx.frame_length;
216} 220}
217 221
222static void ndesc_enable_tx_timestamp(struct dma_desc *p)
223{
224 p->des01.tx.time_stamp_enable = 1;
225}
226
227static int ndesc_get_tx_timestamp_status(struct dma_desc *p)
228{
229 return p->des01.tx.time_stamp_status;
230}
231
232static u64 ndesc_get_timestamp(void *desc, u32 ats)
233{
234 struct dma_desc *p = (struct dma_desc *)desc;
235 u64 ns;
236
237 ns = p->des2;
238 /* convert high/sec time stamp value to nanosecond */
239 ns += p->des3 * 1000000000ULL;
240
241 return ns;
242}
243
244static int ndesc_get_rx_timestamp_status(void *desc, u32 ats)
245{
246 struct dma_desc *p = (struct dma_desc *)desc;
247
248 if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff))
249 /* timestamp is corrupted, hence don't store it */
250 return 0;
251 else
252 return 1;
253}
254
218const struct stmmac_desc_ops ndesc_ops = { 255const struct stmmac_desc_ops ndesc_ops = {
219 .tx_status = ndesc_get_tx_status, 256 .tx_status = ndesc_get_tx_status,
220 .rx_status = ndesc_get_rx_status, 257 .rx_status = ndesc_get_rx_status,
@@ -231,4 +268,8 @@ const struct stmmac_desc_ops ndesc_ops = {
231 .set_tx_owner = ndesc_set_tx_owner, 268 .set_tx_owner = ndesc_set_tx_owner,
232 .set_rx_owner = ndesc_set_rx_owner, 269 .set_rx_owner = ndesc_set_rx_owner,
233 .get_rx_frame_len = ndesc_get_rx_frame_len, 270 .get_rx_frame_len = ndesc_get_rx_frame_len,
271 .enable_tx_timestamp = ndesc_enable_tx_timestamp,
272 .get_tx_timestamp_status = ndesc_get_tx_timestamp_status,
273 .get_timestamp = ndesc_get_timestamp,
274 .get_rx_timestamp_status = ndesc_get_rx_timestamp_status,
234}; 275};
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index 4b785e10f2ed..d0265a7d5a54 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -48,25 +48,30 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
48 48
49 desc->des2 = dma_map_single(priv->device, skb->data, 49 desc->des2 = dma_map_single(priv->device, skb->data,
50 bmax, DMA_TO_DEVICE); 50 bmax, DMA_TO_DEVICE);
51 priv->tx_skbuff_dma[entry] = desc->des2;
51 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 52 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
52 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, 53 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
53 csum); 54 STMMAC_RING_MODE);
54 wmb(); 55 wmb();
55 entry = (++priv->cur_tx) % txsize; 56 entry = (++priv->cur_tx) % txsize;
56 desc = priv->dma_tx + entry; 57 desc = priv->dma_tx + entry;
57 58
58 desc->des2 = dma_map_single(priv->device, skb->data + bmax, 59 desc->des2 = dma_map_single(priv->device, skb->data + bmax,
59 len, DMA_TO_DEVICE); 60 len, DMA_TO_DEVICE);
61 priv->tx_skbuff_dma[entry] = desc->des2;
60 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 62 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
61 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum); 63 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
64 STMMAC_RING_MODE);
62 wmb(); 65 wmb();
63 priv->hw->desc->set_tx_owner(desc); 66 priv->hw->desc->set_tx_owner(desc);
64 priv->tx_skbuff[entry] = NULL; 67 priv->tx_skbuff[entry] = NULL;
65 } else { 68 } else {
66 desc->des2 = dma_map_single(priv->device, skb->data, 69 desc->des2 = dma_map_single(priv->device, skb->data,
67 nopaged_len, DMA_TO_DEVICE); 70 nopaged_len, DMA_TO_DEVICE);
71 priv->tx_skbuff_dma[entry] = desc->des2;
68 desc->des3 = desc->des2 + BUF_SIZE_4KiB; 72 desc->des3 = desc->des2 + BUF_SIZE_4KiB;
69 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum); 73 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
74 STMMAC_RING_MODE);
70 } 75 }
71 76
72 return entry; 77 return entry;
@@ -82,27 +87,23 @@ static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc)
82 return ret; 87 return ret;
83} 88}
84 89
85static void stmmac_refill_desc3(int bfsize, struct dma_desc *p) 90static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
86{ 91{
87 /* Fill DES3 in case of RING mode */ 92 struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
88 if (bfsize >= BUF_SIZE_8KiB)
89 p->des3 = p->des2 + BUF_SIZE_8KiB;
90}
91 93
92/* In ring mode we need to fill the desc3 because it is used 94 if (unlikely(priv->plat->has_gmac))
93 * as buffer */ 95 /* Fill DES3 in case of RING mode */
94static void stmmac_init_desc3(int des3_as_data_buf, struct dma_desc *p) 96 if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
95{ 97 p->des3 = p->des2 + BUF_SIZE_8KiB;
96 if (unlikely(des3_as_data_buf))
97 p->des3 = p->des2 + BUF_SIZE_8KiB;
98} 98}
99 99
100static void stmmac_init_dma_chain(struct dma_desc *des, dma_addr_t phy_addr, 100/* In ring mode we need to fill the desc3 because it is used as buffer */
101 unsigned int size) 101static void stmmac_init_desc3(struct dma_desc *p)
102{ 102{
103 p->des3 = p->des2 + BUF_SIZE_8KiB;
103} 104}
104 105
105static void stmmac_clean_desc3(struct dma_desc *p) 106static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
106{ 107{
107 if (unlikely(p->des3)) 108 if (unlikely(p->des3))
108 p->des3 = 0; 109 p->des3 = 0;
@@ -121,7 +122,6 @@ const struct stmmac_ring_mode_ops ring_mode_ops = {
121 .jumbo_frm = stmmac_jumbo_frm, 122 .jumbo_frm = stmmac_jumbo_frm,
122 .refill_desc3 = stmmac_refill_desc3, 123 .refill_desc3 = stmmac_refill_desc3,
123 .init_desc3 = stmmac_init_desc3, 124 .init_desc3 = stmmac_init_desc3,
124 .init_dma_chain = stmmac_init_dma_chain,
125 .clean_desc3 = stmmac_clean_desc3, 125 .clean_desc3 = stmmac_clean_desc3,
126 .set_16kib_bfsize = stmmac_set_16kib_bfsize, 126 .set_16kib_bfsize = stmmac_set_16kib_bfsize,
127}; 127};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index b05df8983be5..75f997b467aa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -24,25 +24,29 @@
24#define __STMMAC_H__ 24#define __STMMAC_H__
25 25
26#define STMMAC_RESOURCE_NAME "stmmaceth" 26#define STMMAC_RESOURCE_NAME "stmmaceth"
27#define DRV_MODULE_VERSION "Nov_2012" 27#define DRV_MODULE_VERSION "March_2013"
28 28
29#include <linux/clk.h> 29#include <linux/clk.h>
30#include <linux/stmmac.h> 30#include <linux/stmmac.h>
31#include <linux/phy.h> 31#include <linux/phy.h>
32#include <linux/pci.h> 32#include <linux/pci.h>
33#include "common.h" 33#include "common.h"
34#include <linux/ptp_clock_kernel.h>
34 35
35struct stmmac_priv { 36struct stmmac_priv {
36 /* Frequently used values are kept adjacent for cache effect */ 37 /* Frequently used values are kept adjacent for cache effect */
37 struct dma_desc *dma_tx ____cacheline_aligned; 38 struct dma_desc *dma_tx ____cacheline_aligned; /* Basic TX desc */
39 struct dma_extended_desc *dma_etx; /* Extended TX descriptor */
38 dma_addr_t dma_tx_phy; 40 dma_addr_t dma_tx_phy;
39 struct sk_buff **tx_skbuff; 41 struct sk_buff **tx_skbuff;
42 dma_addr_t *tx_skbuff_dma;
40 unsigned int cur_tx; 43 unsigned int cur_tx;
41 unsigned int dirty_tx; 44 unsigned int dirty_tx;
42 unsigned int dma_tx_size; 45 unsigned int dma_tx_size;
43 int tx_coalesce; 46 int tx_coalesce;
44 47
45 struct dma_desc *dma_rx ; 48 struct dma_desc *dma_rx; /* Basic RX descriptor */
49 struct dma_extended_desc *dma_erx; /* Extended RX descriptor */
46 unsigned int cur_rx; 50 unsigned int cur_rx;
47 unsigned int dirty_rx; 51 unsigned int dirty_rx;
48 struct sk_buff **rx_skbuff; 52 struct sk_buff **rx_skbuff;
@@ -93,6 +97,16 @@ struct stmmac_priv {
93 u32 tx_coal_timer; 97 u32 tx_coal_timer;
94 int use_riwt; 98 int use_riwt;
95 u32 rx_riwt; 99 u32 rx_riwt;
100 unsigned int mode;
101 int extend_desc;
102 int pcs;
103 int hwts_tx_en;
104 int hwts_rx_en;
105 unsigned int default_addend;
106 u32 adv_ts;
107 struct ptp_clock *ptp_clock;
108 struct ptp_clock_info ptp_clock_ops;
109 spinlock_t ptp_lock;
96}; 110};
97 111
98extern int phyaddr; 112extern int phyaddr;
@@ -102,6 +116,9 @@ extern int stmmac_mdio_register(struct net_device *ndev);
102extern void stmmac_set_ethtool_ops(struct net_device *netdev); 116extern void stmmac_set_ethtool_ops(struct net_device *netdev);
103extern const struct stmmac_desc_ops enh_desc_ops; 117extern const struct stmmac_desc_ops enh_desc_ops;
104extern const struct stmmac_desc_ops ndesc_ops; 118extern const struct stmmac_desc_ops ndesc_ops;
119extern const struct stmmac_hwtimestamp stmmac_ptp;
120extern int stmmac_ptp_register(struct stmmac_priv *priv);
121extern void stmmac_ptp_unregister(struct stmmac_priv *priv);
105int stmmac_freeze(struct net_device *ndev); 122int stmmac_freeze(struct net_device *ndev);
106int stmmac_restore(struct net_device *ndev); 123int stmmac_restore(struct net_device *ndev);
107int stmmac_resume(struct net_device *ndev); 124int stmmac_resume(struct net_device *ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index d1ac39c1b05d..c5f9cb85c8ef 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -27,6 +27,7 @@
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/mii.h> 28#include <linux/mii.h>
29#include <linux/phy.h> 29#include <linux/phy.h>
30#include <linux/net_tstamp.h>
30#include <asm/io.h> 31#include <asm/io.h>
31 32
32#include "stmmac.h" 33#include "stmmac.h"
@@ -108,6 +109,33 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
108 STMMAC_STAT(irq_rx_path_in_lpi_mode_n), 109 STMMAC_STAT(irq_rx_path_in_lpi_mode_n),
109 STMMAC_STAT(irq_rx_path_exit_lpi_mode_n), 110 STMMAC_STAT(irq_rx_path_exit_lpi_mode_n),
110 STMMAC_STAT(phy_eee_wakeup_error_n), 111 STMMAC_STAT(phy_eee_wakeup_error_n),
112 /* Extended RDES status */
113 STMMAC_STAT(ip_hdr_err),
114 STMMAC_STAT(ip_payload_err),
115 STMMAC_STAT(ip_csum_bypassed),
116 STMMAC_STAT(ipv4_pkt_rcvd),
117 STMMAC_STAT(ipv6_pkt_rcvd),
118 STMMAC_STAT(rx_msg_type_ext_no_ptp),
119 STMMAC_STAT(rx_msg_type_sync),
120 STMMAC_STAT(rx_msg_type_follow_up),
121 STMMAC_STAT(rx_msg_type_delay_req),
122 STMMAC_STAT(rx_msg_type_delay_resp),
123 STMMAC_STAT(rx_msg_type_pdelay_req),
124 STMMAC_STAT(rx_msg_type_pdelay_resp),
125 STMMAC_STAT(rx_msg_type_pdelay_follow_up),
126 STMMAC_STAT(ptp_frame_type),
127 STMMAC_STAT(ptp_ver),
128 STMMAC_STAT(timestamp_dropped),
129 STMMAC_STAT(av_pkt_rcvd),
130 STMMAC_STAT(av_tagged_pkt_rcvd),
131 STMMAC_STAT(vlan_tag_priority_val),
132 STMMAC_STAT(l3_filter_match),
133 STMMAC_STAT(l4_filter_match),
134 STMMAC_STAT(l3_l4_filter_no_match),
135 /* PCS */
136 STMMAC_STAT(irq_pcs_ane_n),
137 STMMAC_STAT(irq_pcs_link_n),
138 STMMAC_STAT(irq_rgmii_n),
111}; 139};
112#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats) 140#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
113 141
@@ -219,6 +247,70 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
219 struct stmmac_priv *priv = netdev_priv(dev); 247 struct stmmac_priv *priv = netdev_priv(dev);
220 struct phy_device *phy = priv->phydev; 248 struct phy_device *phy = priv->phydev;
221 int rc; 249 int rc;
250
251 if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) {
252 struct rgmii_adv adv;
253
254 if (!priv->xstats.pcs_link) {
255 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
256 cmd->duplex = DUPLEX_UNKNOWN;
257 return 0;
258 }
259 cmd->duplex = priv->xstats.pcs_duplex;
260
261 ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed);
262
263 /* Get and convert ADV/LP_ADV from the HW AN registers */
264 if (priv->hw->mac->get_adv)
265 priv->hw->mac->get_adv(priv->ioaddr, &adv);
266 else
267 return -EOPNOTSUPP; /* should never happen indeed */
268
269 /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
270
271 if (adv.pause & STMMAC_PCS_PAUSE)
272 cmd->advertising |= ADVERTISED_Pause;
273 if (adv.pause & STMMAC_PCS_ASYM_PAUSE)
274 cmd->advertising |= ADVERTISED_Asym_Pause;
275 if (adv.lp_pause & STMMAC_PCS_PAUSE)
276 cmd->lp_advertising |= ADVERTISED_Pause;
277 if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE)
278 cmd->lp_advertising |= ADVERTISED_Asym_Pause;
279
280 /* Reg49[3] always set because ANE is always supported */
281 cmd->autoneg = ADVERTISED_Autoneg;
282 cmd->supported |= SUPPORTED_Autoneg;
283 cmd->advertising |= ADVERTISED_Autoneg;
284 cmd->lp_advertising |= ADVERTISED_Autoneg;
285
286 if (adv.duplex) {
287 cmd->supported |= (SUPPORTED_1000baseT_Full |
288 SUPPORTED_100baseT_Full |
289 SUPPORTED_10baseT_Full);
290 cmd->advertising |= (ADVERTISED_1000baseT_Full |
291 ADVERTISED_100baseT_Full |
292 ADVERTISED_10baseT_Full);
293 } else {
294 cmd->supported |= (SUPPORTED_1000baseT_Half |
295 SUPPORTED_100baseT_Half |
296 SUPPORTED_10baseT_Half);
297 cmd->advertising |= (ADVERTISED_1000baseT_Half |
298 ADVERTISED_100baseT_Half |
299 ADVERTISED_10baseT_Half);
300 }
301 if (adv.lp_duplex)
302 cmd->lp_advertising |= (ADVERTISED_1000baseT_Full |
303 ADVERTISED_100baseT_Full |
304 ADVERTISED_10baseT_Full);
305 else
306 cmd->lp_advertising |= (ADVERTISED_1000baseT_Half |
307 ADVERTISED_100baseT_Half |
308 ADVERTISED_10baseT_Half);
309 cmd->port = PORT_OTHER;
310
311 return 0;
312 }
313
222 if (phy == NULL) { 314 if (phy == NULL) {
223 pr_err("%s: %s: PHY is not registered\n", 315 pr_err("%s: %s: PHY is not registered\n",
224 __func__, dev->name); 316 __func__, dev->name);
@@ -243,6 +335,30 @@ static int stmmac_ethtool_setsettings(struct net_device *dev,
243 struct phy_device *phy = priv->phydev; 335 struct phy_device *phy = priv->phydev;
244 int rc; 336 int rc;
245 337
338 if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) {
339 u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause;
340
341 /* Only support ANE */
342 if (cmd->autoneg != AUTONEG_ENABLE)
343 return -EINVAL;
344
345 if (cmd->autoneg == AUTONEG_ENABLE) {
346 mask &= (ADVERTISED_1000baseT_Half |
347 ADVERTISED_1000baseT_Full |
348 ADVERTISED_100baseT_Half |
349 ADVERTISED_100baseT_Full |
350 ADVERTISED_10baseT_Half |
351 ADVERTISED_10baseT_Full);
352
353 spin_lock(&priv->lock);
354 if (priv->hw->mac->ctrl_ane)
355 priv->hw->mac->ctrl_ane(priv->ioaddr, 1);
356 spin_unlock(&priv->lock);
357 }
358
359 return 0;
360 }
361
246 spin_lock(&priv->lock); 362 spin_lock(&priv->lock);
247 rc = phy_ethtool_sset(phy, cmd); 363 rc = phy_ethtool_sset(phy, cmd);
248 spin_unlock(&priv->lock); 364 spin_unlock(&priv->lock);
@@ -312,6 +428,9 @@ stmmac_get_pauseparam(struct net_device *netdev,
312{ 428{
313 struct stmmac_priv *priv = netdev_priv(netdev); 429 struct stmmac_priv *priv = netdev_priv(netdev);
314 430
431 if (priv->pcs) /* FIXME */
432 return;
433
315 spin_lock(&priv->lock); 434 spin_lock(&priv->lock);
316 435
317 pause->rx_pause = 0; 436 pause->rx_pause = 0;
@@ -335,6 +454,9 @@ stmmac_set_pauseparam(struct net_device *netdev,
335 int new_pause = FLOW_OFF; 454 int new_pause = FLOW_OFF;
336 int ret = 0; 455 int ret = 0;
337 456
457 if (priv->pcs) /* FIXME */
458 return -EOPNOTSUPP;
459
338 spin_lock(&priv->lock); 460 spin_lock(&priv->lock);
339 461
340 if (pause->rx_pause) 462 if (pause->rx_pause)
@@ -604,6 +726,38 @@ static int stmmac_set_coalesce(struct net_device *dev,
604 return 0; 726 return 0;
605} 727}
606 728
729static int stmmac_get_ts_info(struct net_device *dev,
730 struct ethtool_ts_info *info)
731{
732 struct stmmac_priv *priv = netdev_priv(dev);
733
734 if ((priv->hwts_tx_en) && (priv->hwts_rx_en)) {
735
736 info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE |
737 SOF_TIMESTAMPING_RX_HARDWARE |
738 SOF_TIMESTAMPING_RAW_HARDWARE;
739
740 if (priv->ptp_clock)
741 info->phc_index = ptp_clock_index(priv->ptp_clock);
742
743 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
744
745 info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
746 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
747 (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
748 (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
749 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
750 (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
751 (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
752 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
753 (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
754 (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
755 (1 << HWTSTAMP_FILTER_ALL));
756 return 0;
757 } else
758 return ethtool_op_get_ts_info(dev, info);
759}
760
607static const struct ethtool_ops stmmac_ethtool_ops = { 761static const struct ethtool_ops stmmac_ethtool_ops = {
608 .begin = stmmac_check_if_running, 762 .begin = stmmac_check_if_running,
609 .get_drvinfo = stmmac_ethtool_getdrvinfo, 763 .get_drvinfo = stmmac_ethtool_getdrvinfo,
@@ -623,7 +777,7 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
623 .get_eee = stmmac_ethtool_op_get_eee, 777 .get_eee = stmmac_ethtool_op_get_eee,
624 .set_eee = stmmac_ethtool_op_set_eee, 778 .set_eee = stmmac_ethtool_op_set_eee,
625 .get_sset_count = stmmac_get_sset_count, 779 .get_sset_count = stmmac_get_sset_count,
626 .get_ts_info = ethtool_op_get_ts_info, 780 .get_ts_info = stmmac_get_ts_info,
627 .get_coalesce = stmmac_get_coalesce, 781 .get_coalesce = stmmac_get_coalesce,
628 .set_coalesce = stmmac_set_coalesce, 782 .set_coalesce = stmmac_set_coalesce,
629}; 783};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
new file mode 100644
index 000000000000..def7e75e1d57
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -0,0 +1,148 @@
1/*******************************************************************************
2 Copyright (C) 2013 Vayavya Labs Pvt Ltd
3
4 This implements all the API for managing HW timestamp & PTP.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
23 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
24*******************************************************************************/
25
26#include <linux/io.h>
27#include <linux/delay.h>
28#include "common.h"
29#include "stmmac_ptp.h"
30
31static void stmmac_config_hw_tstamping(void __iomem *ioaddr, u32 data)
32{
33 writel(data, ioaddr + PTP_TCR);
34}
35
36static void stmmac_config_sub_second_increment(void __iomem *ioaddr)
37{
38 u32 value = readl(ioaddr + PTP_TCR);
39 unsigned long data;
40
41 /* Convert the ptp_clock to nano second
42 * formula = (1/ptp_clock) * 1000000000
43 * where, ptp_clock = 50MHz.
44 */
45 data = (1000000000ULL / 50000000);
46
47 /* 0.465ns accuracy */
48 if (value & PTP_TCR_TSCTRLSSR)
49 data = (data * 100) / 465;
50
51 writel(data, ioaddr + PTP_SSIR);
52}
53
54static int stmmac_init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
55{
56 int limit;
57 u32 value;
58
59 writel(sec, ioaddr + PTP_STSUR);
60 writel(nsec, ioaddr + PTP_STNSUR);
61 /* issue command to initialize the system time value */
62 value = readl(ioaddr + PTP_TCR);
63 value |= PTP_TCR_TSINIT;
64 writel(value, ioaddr + PTP_TCR);
65
66 /* wait for present system time initialize to complete */
67 limit = 10;
68 while (limit--) {
69 if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSINIT))
70 break;
71 mdelay(10);
72 }
73 if (limit < 0)
74 return -EBUSY;
75
76 return 0;
77}
78
79static int stmmac_config_addend(void __iomem *ioaddr, u32 addend)
80{
81 u32 value;
82 int limit;
83
84 writel(addend, ioaddr + PTP_TAR);
85 /* issue command to update the addend value */
86 value = readl(ioaddr + PTP_TCR);
87 value |= PTP_TCR_TSADDREG;
88 writel(value, ioaddr + PTP_TCR);
89
90 /* wait for present addend update to complete */
91 limit = 10;
92 while (limit--) {
93 if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSADDREG))
94 break;
95 mdelay(10);
96 }
97 if (limit < 0)
98 return -EBUSY;
99
100 return 0;
101}
102
103static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
104 int add_sub)
105{
106 u32 value;
107 int limit;
108
109 writel(sec, ioaddr + PTP_STSUR);
110 writel(((add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec),
111 ioaddr + PTP_STNSUR);
112 /* issue command to initialize the system time value */
113 value = readl(ioaddr + PTP_TCR);
114 value |= PTP_TCR_TSUPDT;
115 writel(value, ioaddr + PTP_TCR);
116
117 /* wait for present system time adjust/update to complete */
118 limit = 10;
119 while (limit--) {
120 if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSUPDT))
121 break;
122 mdelay(10);
123 }
124 if (limit < 0)
125 return -EBUSY;
126
127 return 0;
128}
129
130static u64 stmmac_get_systime(void __iomem *ioaddr)
131{
132 u64 ns;
133
134 ns = readl(ioaddr + PTP_STNSR);
135 /* convert sec time value to nanosecond */
136 ns += readl(ioaddr + PTP_STSR) * 1000000000ULL;
137
138 return ns;
139}
140
141const struct stmmac_hwtimestamp stmmac_ptp = {
142 .config_hw_tstamping = stmmac_config_hw_tstamping,
143 .init_systime = stmmac_init_systime,
144 .config_sub_second_increment = stmmac_config_sub_second_increment,
145 .config_addend = stmmac_config_addend,
146 .adjust_systime = stmmac_adjust_systime,
147 .get_systime = stmmac_get_systime,
148};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 39c6c5524633..6b26d31c268f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -47,6 +47,8 @@
47#include <linux/debugfs.h> 47#include <linux/debugfs.h>
48#include <linux/seq_file.h> 48#include <linux/seq_file.h>
49#endif 49#endif
50#include <linux/net_tstamp.h>
51#include "stmmac_ptp.h"
50#include "stmmac.h" 52#include "stmmac.h"
51 53
52#undef STMMAC_DEBUG 54#undef STMMAC_DEBUG
@@ -130,6 +132,13 @@ module_param(eee_timer, int, S_IRUGO | S_IWUSR);
130MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec"); 132MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
131#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x)) 133#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
132 134
135/* By default the driver will use the ring mode to manage tx and rx descriptors
136 * but passing this value so user can force to use the chain instead of the ring
137 */
138static unsigned int chain_mode;
139module_param(chain_mode, int, S_IRUGO);
140MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
141
133static irqreturn_t stmmac_interrupt(int irq, void *dev_id); 142static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
134 143
135#ifdef CONFIG_STMMAC_DEBUG_FS 144#ifdef CONFIG_STMMAC_DEBUG_FS
@@ -304,6 +313,339 @@ static void stmmac_eee_adjust(struct stmmac_priv *priv)
304 priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link); 313 priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
305} 314}
306 315
316/* stmmac_get_tx_hwtstamp:
317 * @priv : pointer to private device structure.
318 * @entry : descriptor index to be used.
319 * @skb : the socket buffer
320 * Description :
321 * This function will read timestamp from the descriptor & pass it to stack.
322 * and also perform some sanity checks.
323 */
324static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
325 unsigned int entry,
326 struct sk_buff *skb)
327{
328 struct skb_shared_hwtstamps shhwtstamp;
329 u64 ns;
330 void *desc = NULL;
331
332 if (!priv->hwts_tx_en)
333 return;
334
335 /* if skb doesn't support hw tstamp */
336 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
337 return;
338
339 if (priv->adv_ts)
340 desc = (priv->dma_etx + entry);
341 else
342 desc = (priv->dma_tx + entry);
343
344 /* check tx tstamp status */
345 if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc))
346 return;
347
348 /* get the valid tstamp */
349 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
350
351 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
352 shhwtstamp.hwtstamp = ns_to_ktime(ns);
353 /* pass tstamp to stack */
354 skb_tstamp_tx(skb, &shhwtstamp);
355
356 return;
357}
358
359/* stmmac_get_rx_hwtstamp:
360 * @priv : pointer to private device structure.
361 * @entry : descriptor index to be used.
362 * @skb : the socket buffer
363 * Description :
364 * This function will read received packet's timestamp from the descriptor
365 * and pass it to stack. It also perform some sanity checks.
366 */
367static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv,
368 unsigned int entry,
369 struct sk_buff *skb)
370{
371 struct skb_shared_hwtstamps *shhwtstamp = NULL;
372 u64 ns;
373 void *desc = NULL;
374
375 if (!priv->hwts_rx_en)
376 return;
377
378 if (priv->adv_ts)
379 desc = (priv->dma_erx + entry);
380 else
381 desc = (priv->dma_rx + entry);
382
383 /* if rx tstamp is not valid */
384 if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts))
385 return;
386
387 /* get valid tstamp */
388 ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
389 shhwtstamp = skb_hwtstamps(skb);
390 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
391 shhwtstamp->hwtstamp = ns_to_ktime(ns);
392}
393
394/**
395 * stmmac_hwtstamp_ioctl - control hardware timestamping.
396 * @dev: device pointer.
397 * @ifr: An IOCTL specefic structure, that can contain a pointer to
398 * a proprietary structure used to pass information to the driver.
399 * Description:
400 * This function configures the MAC to enable/disable both outgoing(TX)
401 * and incoming(RX) packets time stamping based on user input.
402 * Return Value:
403 * 0 on success and an appropriate -ve integer on failure.
404 */
405static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
406{
407 struct stmmac_priv *priv = netdev_priv(dev);
408 struct hwtstamp_config config;
409 struct timespec now;
410 u64 temp = 0;
411 u32 ptp_v2 = 0;
412 u32 tstamp_all = 0;
413 u32 ptp_over_ipv4_udp = 0;
414 u32 ptp_over_ipv6_udp = 0;
415 u32 ptp_over_ethernet = 0;
416 u32 snap_type_sel = 0;
417 u32 ts_master_en = 0;
418 u32 ts_event_en = 0;
419 u32 value = 0;
420
421 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
422 netdev_alert(priv->dev, "No support for HW time stamping\n");
423 priv->hwts_tx_en = 0;
424 priv->hwts_rx_en = 0;
425
426 return -EOPNOTSUPP;
427 }
428
429 if (copy_from_user(&config, ifr->ifr_data,
430 sizeof(struct hwtstamp_config)))
431 return -EFAULT;
432
433 pr_debug("%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
434 __func__, config.flags, config.tx_type, config.rx_filter);
435
436 /* reserved for future extensions */
437 if (config.flags)
438 return -EINVAL;
439
440 switch (config.tx_type) {
441 case HWTSTAMP_TX_OFF:
442 priv->hwts_tx_en = 0;
443 break;
444 case HWTSTAMP_TX_ON:
445 priv->hwts_tx_en = 1;
446 break;
447 default:
448 return -ERANGE;
449 }
450
451 if (priv->adv_ts) {
452 switch (config.rx_filter) {
453 /* time stamp no incoming packet at all */
454 case HWTSTAMP_FILTER_NONE:
455 config.rx_filter = HWTSTAMP_FILTER_NONE;
456 break;
457
458 /* PTP v1, UDP, any kind of event packet */
459 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
460 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
461 /* take time stamp for all event messages */
462 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
463
464 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
465 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
466 break;
467
468 /* PTP v1, UDP, Sync packet */
469 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
470 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
471 /* take time stamp for SYNC messages only */
472 ts_event_en = PTP_TCR_TSEVNTENA;
473
474 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
475 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
476 break;
477
478 /* PTP v1, UDP, Delay_req packet */
479 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
480 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
481 /* take time stamp for Delay_Req messages only */
482 ts_master_en = PTP_TCR_TSMSTRENA;
483 ts_event_en = PTP_TCR_TSEVNTENA;
484
485 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
486 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
487 break;
488
489 /* PTP v2, UDP, any kind of event packet */
490 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
491 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
492 ptp_v2 = PTP_TCR_TSVER2ENA;
493 /* take time stamp for all event messages */
494 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
495
496 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
497 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
498 break;
499
500 /* PTP v2, UDP, Sync packet */
501 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
502 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
503 ptp_v2 = PTP_TCR_TSVER2ENA;
504 /* take time stamp for SYNC messages only */
505 ts_event_en = PTP_TCR_TSEVNTENA;
506
507 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
508 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
509 break;
510
511 /* PTP v2, UDP, Delay_req packet */
512 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
513 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
514 ptp_v2 = PTP_TCR_TSVER2ENA;
515 /* take time stamp for Delay_Req messages only */
516 ts_master_en = PTP_TCR_TSMSTRENA;
517 ts_event_en = PTP_TCR_TSEVNTENA;
518
519 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
520 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
521 break;
522
523 /* PTP v2/802.AS1, any layer, any kind of event packet */
524 case HWTSTAMP_FILTER_PTP_V2_EVENT:
525 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
526 ptp_v2 = PTP_TCR_TSVER2ENA;
527 /* take time stamp for all event messages */
528 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
529
530 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
531 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
532 ptp_over_ethernet = PTP_TCR_TSIPENA;
533 break;
534
535 /* PTP v2/802.AS1, any layer, Sync packet */
536 case HWTSTAMP_FILTER_PTP_V2_SYNC:
537 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
538 ptp_v2 = PTP_TCR_TSVER2ENA;
539 /* take time stamp for SYNC messages only */
540 ts_event_en = PTP_TCR_TSEVNTENA;
541
542 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
543 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
544 ptp_over_ethernet = PTP_TCR_TSIPENA;
545 break;
546
547 /* PTP v2/802.AS1, any layer, Delay_req packet */
548 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
549 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
550 ptp_v2 = PTP_TCR_TSVER2ENA;
551 /* take time stamp for Delay_Req messages only */
552 ts_master_en = PTP_TCR_TSMSTRENA;
553 ts_event_en = PTP_TCR_TSEVNTENA;
554
555 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
556 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
557 ptp_over_ethernet = PTP_TCR_TSIPENA;
558 break;
559
560 /* time stamp any incoming packet */
561 case HWTSTAMP_FILTER_ALL:
562 config.rx_filter = HWTSTAMP_FILTER_ALL;
563 tstamp_all = PTP_TCR_TSENALL;
564 break;
565
566 default:
567 return -ERANGE;
568 }
569 } else {
570 switch (config.rx_filter) {
571 case HWTSTAMP_FILTER_NONE:
572 config.rx_filter = HWTSTAMP_FILTER_NONE;
573 break;
574 default:
575 /* PTP v1, UDP, any kind of event packet */
576 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
577 break;
578 }
579 }
580 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
581
582 if (!priv->hwts_tx_en && !priv->hwts_rx_en)
583 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0);
584 else {
585 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
586 tstamp_all | ptp_v2 | ptp_over_ethernet |
587 ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
588 ts_master_en | snap_type_sel);
589
590 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value);
591
592 /* program Sub Second Increment reg */
593 priv->hw->ptp->config_sub_second_increment(priv->ioaddr);
594
595 /* calculate default added value:
596 * formula is :
597 * addend = (2^32)/freq_div_ratio;
598 * where, freq_div_ratio = STMMAC_SYSCLOCK/50MHz
599 * hence, addend = ((2^32) * 50MHz)/STMMAC_SYSCLOCK;
600 * NOTE: STMMAC_SYSCLOCK should be >= 50MHz to
601 * achive 20ns accuracy.
602 *
603 * 2^x * y == (y << x), hence
604 * 2^32 * 50000000 ==> (50000000 << 32)
605 */
606 temp = (u64)(50000000ULL << 32);
607 priv->default_addend = div_u64(temp, STMMAC_SYSCLOCK);
608 priv->hw->ptp->config_addend(priv->ioaddr,
609 priv->default_addend);
610
611 /* initialize system time */
612 getnstimeofday(&now);
613 priv->hw->ptp->init_systime(priv->ioaddr, now.tv_sec,
614 now.tv_nsec);
615 }
616
617 return copy_to_user(ifr->ifr_data, &config,
618 sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
619}
620
621static int stmmac_init_ptp(struct stmmac_priv *priv)
622{
623 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
624 return -EOPNOTSUPP;
625
626 if (netif_msg_hw(priv)) {
627 if (priv->dma_cap.time_stamp) {
628 pr_debug("IEEE 1588-2002 Time Stamp supported\n");
629 priv->adv_ts = 0;
630 }
631 if (priv->dma_cap.atime_stamp && priv->extend_desc) {
632 pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
633 priv->adv_ts = 1;
634 }
635 }
636
637 priv->hw->ptp = &stmmac_ptp;
638 priv->hwts_tx_en = 0;
639 priv->hwts_rx_en = 0;
640
641 return stmmac_ptp_register(priv);
642}
643
644static void stmmac_release_ptp(struct stmmac_priv *priv)
645{
646 stmmac_ptp_unregister(priv);
647}
648
307/** 649/**
308 * stmmac_adjust_link 650 * stmmac_adjust_link
309 * @dev: net device structure 651 * @dev: net device structure
@@ -398,6 +740,24 @@ static void stmmac_adjust_link(struct net_device *dev)
398 DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n"); 740 DBG(probe, DEBUG, "stmmac_adjust_link: exiting\n");
399} 741}
400 742
743static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
744{
745 int interface = priv->plat->interface;
746
747 if (priv->dma_cap.pcs) {
748 if ((interface & PHY_INTERFACE_MODE_RGMII) ||
749 (interface & PHY_INTERFACE_MODE_RGMII_ID) ||
750 (interface & PHY_INTERFACE_MODE_RGMII_RXID) ||
751 (interface & PHY_INTERFACE_MODE_RGMII_TXID)) {
752 pr_debug("STMMAC: PCS RGMII support enable\n");
753 priv->pcs = STMMAC_PCS_RGMII;
754 } else if (interface & PHY_INTERFACE_MODE_SGMII) {
755 pr_debug("STMMAC: PCS SGMII support enable\n");
756 priv->pcs = STMMAC_PCS_SGMII;
757 }
758 }
759}
760
401/** 761/**
402 * stmmac_init_phy - PHY initialization 762 * stmmac_init_phy - PHY initialization
403 * @dev: net device structure 763 * @dev: net device structure
@@ -461,29 +821,56 @@ static int stmmac_init_phy(struct net_device *dev)
461} 821}
462 822
463/** 823/**
464 * display_ring 824 * stmmac_display_ring
465 * @p: pointer to the ring. 825 * @p: pointer to the ring.
466 * @size: size of the ring. 826 * @size: size of the ring.
467 * Description: display all the descriptors within the ring. 827 * Description: display the control/status and buffer descriptors.
468 */ 828 */
469static void display_ring(struct dma_desc *p, int size) 829static void stmmac_display_ring(void *head, int size, int extend_desc)
470{ 830{
471 struct tmp_s {
472 u64 a;
473 unsigned int b;
474 unsigned int c;
475 };
476 int i; 831 int i;
832 struct dma_extended_desc *ep = (struct dma_extended_desc *) head;
833 struct dma_desc *p = (struct dma_desc *) head;
834
477 for (i = 0; i < size; i++) { 835 for (i = 0; i < size; i++) {
478 struct tmp_s *x = (struct tmp_s *)(p + i); 836 u64 x;
479 pr_info("\t%d [0x%x]: DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x", 837 if (extend_desc) {
480 i, (unsigned int)virt_to_phys(&p[i]), 838 x = *(u64 *) ep;
481 (unsigned int)(x->a), (unsigned int)((x->a) >> 32), 839 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
482 x->b, x->c); 840 i, (unsigned int) virt_to_phys(ep),
841 (unsigned int) x, (unsigned int) (x >> 32),
842 ep->basic.des2, ep->basic.des3);
843 ep++;
844 } else {
845 x = *(u64 *) p;
846 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
847 i, (unsigned int) virt_to_phys(p),
848 (unsigned int) x, (unsigned int) (x >> 32),
849 p->des2, p->des3);
850 p++;
851 }
483 pr_info("\n"); 852 pr_info("\n");
484 } 853 }
485} 854}
486 855
856static void stmmac_display_rings(struct stmmac_priv *priv)
857{
858 unsigned int txsize = priv->dma_tx_size;
859 unsigned int rxsize = priv->dma_rx_size;
860
861 if (priv->extend_desc) {
862 pr_info("Extended RX descriptor ring:\n");
863 stmmac_display_ring((void *) priv->dma_erx, rxsize, 1);
864 pr_info("Extended TX descriptor ring:\n");
865 stmmac_display_ring((void *) priv->dma_etx, txsize, 1);
866 } else {
867 pr_info("RX descriptor ring:\n");
868 stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
869 pr_info("TX descriptor ring:\n");
870 stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
871 }
872}
873
487static int stmmac_set_bfsize(int mtu, int bufsize) 874static int stmmac_set_bfsize(int mtu, int bufsize)
488{ 875{
489 int ret = bufsize; 876 int ret = bufsize;
@@ -500,6 +887,59 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
500 return ret; 887 return ret;
501} 888}
502 889
890static void stmmac_clear_descriptors(struct stmmac_priv *priv)
891{
892 int i;
893 unsigned int txsize = priv->dma_tx_size;
894 unsigned int rxsize = priv->dma_rx_size;
895
896 /* Clear the Rx/Tx descriptors */
897 for (i = 0; i < rxsize; i++)
898 if (priv->extend_desc)
899 priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
900 priv->use_riwt, priv->mode,
901 (i == rxsize - 1));
902 else
903 priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
904 priv->use_riwt, priv->mode,
905 (i == rxsize - 1));
906 for (i = 0; i < txsize; i++)
907 if (priv->extend_desc)
908 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
909 priv->mode,
910 (i == txsize - 1));
911 else
912 priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
913 priv->mode,
914 (i == txsize - 1));
915}
916
917static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
918 int i)
919{
920 struct sk_buff *skb;
921
922 skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
923 GFP_KERNEL);
924 if (unlikely(skb == NULL)) {
925 pr_err("%s: Rx init fails; skb is NULL\n", __func__);
926 return 1;
927 }
928 skb_reserve(skb, NET_IP_ALIGN);
929 priv->rx_skbuff[i] = skb;
930 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
931 priv->dma_buf_sz,
932 DMA_FROM_DEVICE);
933
934 p->des2 = priv->rx_skbuff_dma[i];
935
936 if ((priv->mode == STMMAC_RING_MODE) &&
937 (priv->dma_buf_sz == BUF_SIZE_16KiB))
938 priv->hw->ring->init_desc3(p);
939
940 return 0;
941}
942
503/** 943/**
504 * init_dma_desc_rings - init the RX/TX descriptor rings 944 * init_dma_desc_rings - init the RX/TX descriptor rings
505 * @dev: net device structure 945 * @dev: net device structure
@@ -511,75 +951,70 @@ static void init_dma_desc_rings(struct net_device *dev)
511{ 951{
512 int i; 952 int i;
513 struct stmmac_priv *priv = netdev_priv(dev); 953 struct stmmac_priv *priv = netdev_priv(dev);
514 struct sk_buff *skb;
515 unsigned int txsize = priv->dma_tx_size; 954 unsigned int txsize = priv->dma_tx_size;
516 unsigned int rxsize = priv->dma_rx_size; 955 unsigned int rxsize = priv->dma_rx_size;
517 unsigned int bfsize; 956 unsigned int bfsize = 0;
518 int dis_ic = 0;
519 int des3_as_data_buf = 0;
520 957
521 /* Set the max buffer size according to the DESC mode 958 /* Set the max buffer size according to the DESC mode
522 * and the MTU. Note that RING mode allows 16KiB bsize. */ 959 * and the MTU. Note that RING mode allows 16KiB bsize. */
523 bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu); 960 if (priv->mode == STMMAC_RING_MODE)
961 bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
524 962
525 if (bfsize == BUF_SIZE_16KiB) 963 if (bfsize < BUF_SIZE_16KiB)
526 des3_as_data_buf = 1;
527 else
528 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); 964 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
529 965
530 DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n", 966 DBG(probe, INFO, "stmmac: txsize %d, rxsize %d, bfsize %d\n",
531 txsize, rxsize, bfsize); 967 txsize, rxsize, bfsize);
532 968
533 priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), 969 if (priv->extend_desc) {
534 GFP_KERNEL); 970 priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
535 priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), 971 sizeof(struct
536 GFP_KERNEL); 972 dma_extended_desc),
537 priv->dma_rx = 973 &priv->dma_rx_phy,
538 (struct dma_desc *)dma_alloc_coherent(priv->device, 974 GFP_KERNEL);
539 rxsize * 975 priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
976 sizeof(struct
977 dma_extended_desc),
978 &priv->dma_tx_phy,
979 GFP_KERNEL);
980 if ((!priv->dma_erx) || (!priv->dma_etx))
981 return;
982 } else {
983 priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
540 sizeof(struct dma_desc), 984 sizeof(struct dma_desc),
541 &priv->dma_rx_phy, 985 &priv->dma_rx_phy,
542 GFP_KERNEL); 986 GFP_KERNEL);
543 priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), 987 priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
544 GFP_KERNEL);
545 priv->dma_tx =
546 (struct dma_desc *)dma_alloc_coherent(priv->device,
547 txsize *
548 sizeof(struct dma_desc), 988 sizeof(struct dma_desc),
549 &priv->dma_tx_phy, 989 &priv->dma_tx_phy,
550 GFP_KERNEL); 990 GFP_KERNEL);
551 991 if ((!priv->dma_rx) || (!priv->dma_tx))
552 if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL)) { 992 return;
553 pr_err("%s:ERROR allocating the DMA Tx/Rx desc\n", __func__);
554 return;
555 } 993 }
556 994
557 DBG(probe, INFO, "stmmac (%s) DMA desc: virt addr (Rx %p, " 995 priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
558 "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n", 996 GFP_KERNEL);
559 dev->name, priv->dma_rx, priv->dma_tx, 997 priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
560 (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy); 998 GFP_KERNEL);
999 priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
1000 GFP_KERNEL);
1001 priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
1002 GFP_KERNEL);
1003 if (netif_msg_drv(priv))
1004 pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
1005 (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
561 1006
562 /* RX INITIALIZATION */ 1007 /* RX INITIALIZATION */
563 DBG(probe, INFO, "stmmac: SKB addresses:\n" 1008 DBG(probe, INFO, "stmmac: SKB addresses:\nskb\t\tskb data\tdma data\n");
564 "skb\t\tskb data\tdma data\n");
565
566 for (i = 0; i < rxsize; i++) { 1009 for (i = 0; i < rxsize; i++) {
567 struct dma_desc *p = priv->dma_rx + i; 1010 struct dma_desc *p;
1011 if (priv->extend_desc)
1012 p = &((priv->dma_erx + i)->basic);
1013 else
1014 p = priv->dma_rx + i;
568 1015
569 skb = __netdev_alloc_skb(dev, bfsize + NET_IP_ALIGN, 1016 if (stmmac_init_rx_buffers(priv, p, i))
570 GFP_KERNEL);
571 if (unlikely(skb == NULL)) {
572 pr_err("%s: Rx init fails; skb is NULL\n", __func__);
573 break; 1017 break;
574 }
575 skb_reserve(skb, NET_IP_ALIGN);
576 priv->rx_skbuff[i] = skb;
577 priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
578 bfsize, DMA_FROM_DEVICE);
579
580 p->des2 = priv->rx_skbuff_dma[i];
581
582 priv->hw->ring->init_desc3(des3_as_data_buf, p);
583 1018
584 DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], 1019 DBG(probe, INFO, "[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
585 priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]); 1020 priv->rx_skbuff[i]->data, priv->rx_skbuff_dma[i]);
@@ -589,32 +1024,40 @@ static void init_dma_desc_rings(struct net_device *dev)
589 priv->dma_buf_sz = bfsize; 1024 priv->dma_buf_sz = bfsize;
590 buf_sz = bfsize; 1025 buf_sz = bfsize;
591 1026
1027 /* Setup the chained descriptor addresses */
1028 if (priv->mode == STMMAC_CHAIN_MODE) {
1029 if (priv->extend_desc) {
1030 priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy,
1031 rxsize, 1);
1032 priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy,
1033 txsize, 1);
1034 } else {
1035 priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy,
1036 rxsize, 0);
1037 priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy,
1038 txsize, 0);
1039 }
1040 }
1041
592 /* TX INITIALIZATION */ 1042 /* TX INITIALIZATION */
593 for (i = 0; i < txsize; i++) { 1043 for (i = 0; i < txsize; i++) {
1044 struct dma_desc *p;
1045 if (priv->extend_desc)
1046 p = &((priv->dma_etx + i)->basic);
1047 else
1048 p = priv->dma_tx + i;
1049 p->des2 = 0;
1050 priv->tx_skbuff_dma[i] = 0;
594 priv->tx_skbuff[i] = NULL; 1051 priv->tx_skbuff[i] = NULL;
595 priv->dma_tx[i].des2 = 0;
596 } 1052 }
597 1053
598 /* In case of Chained mode this sets the des3 to the next
599 * element in the chain */
600 priv->hw->ring->init_dma_chain(priv->dma_rx, priv->dma_rx_phy, rxsize);
601 priv->hw->ring->init_dma_chain(priv->dma_tx, priv->dma_tx_phy, txsize);
602
603 priv->dirty_tx = 0; 1054 priv->dirty_tx = 0;
604 priv->cur_tx = 0; 1055 priv->cur_tx = 0;
605 1056
606 if (priv->use_riwt) 1057 stmmac_clear_descriptors(priv);
607 dis_ic = 1;
608 /* Clear the Rx/Tx descriptors */
609 priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
610 priv->hw->desc->init_tx_desc(priv->dma_tx, txsize);
611 1058
612 if (netif_msg_hw(priv)) { 1059 if (netif_msg_hw(priv))
613 pr_info("RX descriptor ring:\n"); 1060 stmmac_display_rings(priv);
614 display_ring(priv->dma_rx, rxsize);
615 pr_info("TX descriptor ring:\n");
616 display_ring(priv->dma_tx, txsize);
617 }
618} 1061}
619 1062
620static void dma_free_rx_skbufs(struct stmmac_priv *priv) 1063static void dma_free_rx_skbufs(struct stmmac_priv *priv)
@@ -637,13 +1080,20 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
637 1080
638 for (i = 0; i < priv->dma_tx_size; i++) { 1081 for (i = 0; i < priv->dma_tx_size; i++) {
639 if (priv->tx_skbuff[i] != NULL) { 1082 if (priv->tx_skbuff[i] != NULL) {
640 struct dma_desc *p = priv->dma_tx + i; 1083 struct dma_desc *p;
641 if (p->des2) 1084 if (priv->extend_desc)
642 dma_unmap_single(priv->device, p->des2, 1085 p = &((priv->dma_etx + i)->basic);
1086 else
1087 p = priv->dma_tx + i;
1088
1089 if (priv->tx_skbuff_dma[i])
1090 dma_unmap_single(priv->device,
1091 priv->tx_skbuff_dma[i],
643 priv->hw->desc->get_tx_len(p), 1092 priv->hw->desc->get_tx_len(p),
644 DMA_TO_DEVICE); 1093 DMA_TO_DEVICE);
645 dev_kfree_skb_any(priv->tx_skbuff[i]); 1094 dev_kfree_skb_any(priv->tx_skbuff[i]);
646 priv->tx_skbuff[i] = NULL; 1095 priv->tx_skbuff[i] = NULL;
1096 priv->tx_skbuff_dma[i] = 0;
647 } 1097 }
648 } 1098 }
649} 1099}
@@ -656,14 +1106,24 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
656 1106
657 /* Free the region of consistent memory previously allocated for 1107 /* Free the region of consistent memory previously allocated for
658 * the DMA */ 1108 * the DMA */
659 dma_free_coherent(priv->device, 1109 if (!priv->extend_desc) {
660 priv->dma_tx_size * sizeof(struct dma_desc), 1110 dma_free_coherent(priv->device,
661 priv->dma_tx, priv->dma_tx_phy); 1111 priv->dma_tx_size * sizeof(struct dma_desc),
662 dma_free_coherent(priv->device, 1112 priv->dma_tx, priv->dma_tx_phy);
663 priv->dma_rx_size * sizeof(struct dma_desc), 1113 dma_free_coherent(priv->device,
664 priv->dma_rx, priv->dma_rx_phy); 1114 priv->dma_rx_size * sizeof(struct dma_desc),
1115 priv->dma_rx, priv->dma_rx_phy);
1116 } else {
1117 dma_free_coherent(priv->device, priv->dma_tx_size *
1118 sizeof(struct dma_extended_desc),
1119 priv->dma_etx, priv->dma_tx_phy);
1120 dma_free_coherent(priv->device, priv->dma_rx_size *
1121 sizeof(struct dma_extended_desc),
1122 priv->dma_erx, priv->dma_rx_phy);
1123 }
665 kfree(priv->rx_skbuff_dma); 1124 kfree(priv->rx_skbuff_dma);
666 kfree(priv->rx_skbuff); 1125 kfree(priv->rx_skbuff);
1126 kfree(priv->tx_skbuff_dma);
667 kfree(priv->tx_skbuff); 1127 kfree(priv->tx_skbuff);
668} 1128}
669 1129
@@ -708,13 +1168,18 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
708 int last; 1168 int last;
709 unsigned int entry = priv->dirty_tx % txsize; 1169 unsigned int entry = priv->dirty_tx % txsize;
710 struct sk_buff *skb = priv->tx_skbuff[entry]; 1170 struct sk_buff *skb = priv->tx_skbuff[entry];
711 struct dma_desc *p = priv->dma_tx + entry; 1171 struct dma_desc *p;
1172
1173 if (priv->extend_desc)
1174 p = (struct dma_desc *) (priv->dma_etx + entry);
1175 else
1176 p = priv->dma_tx + entry;
712 1177
713 /* Check if the descriptor is owned by the DMA. */ 1178 /* Check if the descriptor is owned by the DMA. */
714 if (priv->hw->desc->get_tx_owner(p)) 1179 if (priv->hw->desc->get_tx_owner(p))
715 break; 1180 break;
716 1181
717 /* Verify tx error by looking at the last segment */ 1182 /* Verify tx error by looking at the last segment. */
718 last = priv->hw->desc->get_tx_ls(p); 1183 last = priv->hw->desc->get_tx_ls(p);
719 if (likely(last)) { 1184 if (likely(last)) {
720 int tx_error = 1185 int tx_error =
@@ -726,22 +1191,27 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
726 priv->xstats.tx_pkt_n++; 1191 priv->xstats.tx_pkt_n++;
727 } else 1192 } else
728 priv->dev->stats.tx_errors++; 1193 priv->dev->stats.tx_errors++;
1194
1195 stmmac_get_tx_hwtstamp(priv, entry, skb);
729 } 1196 }
730 TX_DBG("%s: curr %d, dirty %d\n", __func__, 1197 TX_DBG("%s: curr %d, dirty %d\n", __func__,
731 priv->cur_tx, priv->dirty_tx); 1198 priv->cur_tx, priv->dirty_tx);
732 1199
733 if (likely(p->des2)) 1200 if (likely(priv->tx_skbuff_dma[entry])) {
734 dma_unmap_single(priv->device, p->des2, 1201 dma_unmap_single(priv->device,
1202 priv->tx_skbuff_dma[entry],
735 priv->hw->desc->get_tx_len(p), 1203 priv->hw->desc->get_tx_len(p),
736 DMA_TO_DEVICE); 1204 DMA_TO_DEVICE);
737 priv->hw->ring->clean_desc3(p); 1205 priv->tx_skbuff_dma[entry] = 0;
1206 }
1207 priv->hw->ring->clean_desc3(priv, p);
738 1208
739 if (likely(skb != NULL)) { 1209 if (likely(skb != NULL)) {
740 dev_kfree_skb(skb); 1210 dev_kfree_skb(skb);
741 priv->tx_skbuff[entry] = NULL; 1211 priv->tx_skbuff[entry] = NULL;
742 } 1212 }
743 1213
744 priv->hw->desc->release_tx_desc(p); 1214 priv->hw->desc->release_tx_desc(p, priv->mode);
745 1215
746 priv->dirty_tx++; 1216 priv->dirty_tx++;
747 } 1217 }
@@ -782,11 +1252,21 @@ static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
782 */ 1252 */
783static void stmmac_tx_err(struct stmmac_priv *priv) 1253static void stmmac_tx_err(struct stmmac_priv *priv)
784{ 1254{
1255 int i;
1256 int txsize = priv->dma_tx_size;
785 netif_stop_queue(priv->dev); 1257 netif_stop_queue(priv->dev);
786 1258
787 priv->hw->dma->stop_tx(priv->ioaddr); 1259 priv->hw->dma->stop_tx(priv->ioaddr);
788 dma_free_tx_skbufs(priv); 1260 dma_free_tx_skbufs(priv);
789 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); 1261 for (i = 0; i < txsize; i++)
1262 if (priv->extend_desc)
1263 priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
1264 priv->mode,
1265 (i == txsize - 1));
1266 else
1267 priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
1268 priv->mode,
1269 (i == txsize - 1));
790 priv->dirty_tx = 0; 1270 priv->dirty_tx = 0;
791 priv->cur_tx = 0; 1271 priv->cur_tx = 0;
792 priv->hw->dma->start_tx(priv->ioaddr); 1272 priv->hw->dma->start_tx(priv->ioaddr);
@@ -860,6 +1340,14 @@ static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
860{ 1340{
861 if (priv->plat->enh_desc) { 1341 if (priv->plat->enh_desc) {
862 pr_info(" Enhanced/Alternate descriptors\n"); 1342 pr_info(" Enhanced/Alternate descriptors\n");
1343
1344 /* GMAC older than 3.50 has no extended descriptors */
1345 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
1346 pr_info("\tEnabled extended descriptors\n");
1347 priv->extend_desc = 1;
1348 } else
1349 pr_warn("Extended descriptors not supported\n");
1350
863 priv->hw->desc = &enh_desc_ops; 1351 priv->hw->desc = &enh_desc_ops;
864 } else { 1352 } else {
865 pr_info(" Normal descriptors\n"); 1353 pr_info(" Normal descriptors\n");
@@ -946,6 +1434,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
946{ 1434{
947 int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0; 1435 int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0;
948 int mixed_burst = 0; 1436 int mixed_burst = 0;
1437 int atds = 0;
949 1438
950 /* Some DMA parameters can be passed from the platform; 1439 /* Some DMA parameters can be passed from the platform;
951 * in case of these are not passed we keep a default 1440 * in case of these are not passed we keep a default
@@ -957,9 +1446,12 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
957 burst_len = priv->plat->dma_cfg->burst_len; 1446 burst_len = priv->plat->dma_cfg->burst_len;
958 } 1447 }
959 1448
1449 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
1450 atds = 1;
1451
960 return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst, 1452 return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
961 burst_len, priv->dma_tx_phy, 1453 burst_len, priv->dma_tx_phy,
962 priv->dma_rx_phy); 1454 priv->dma_rx_phy, atds);
963} 1455}
964 1456
965/** 1457/**
@@ -1012,10 +1504,13 @@ static int stmmac_open(struct net_device *dev)
1012 1504
1013 stmmac_check_ether_addr(priv); 1505 stmmac_check_ether_addr(priv);
1014 1506
1015 ret = stmmac_init_phy(dev); 1507 if (!priv->pcs) {
1016 if (unlikely(ret)) { 1508 ret = stmmac_init_phy(dev);
1017 pr_err("%s: Cannot attach to PHY (error: %d)\n", __func__, ret); 1509 if (ret) {
1018 goto open_error; 1510 pr_err("%s: Cannot attach to PHY (error: %d)\n",
1511 __func__, ret);
1512 goto open_error;
1513 }
1019 } 1514 }
1020 1515
1021 /* Create and initialize the TX/RX descriptors chains. */ 1516 /* Create and initialize the TX/RX descriptors chains. */
@@ -1084,6 +1579,10 @@ static int stmmac_open(struct net_device *dev)
1084 1579
1085 stmmac_mmc_setup(priv); 1580 stmmac_mmc_setup(priv);
1086 1581
1582 ret = stmmac_init_ptp(priv);
1583 if (ret)
1584 pr_warn("%s: failed PTP initialisation\n", __func__);
1585
1087#ifdef CONFIG_STMMAC_DEBUG_FS 1586#ifdef CONFIG_STMMAC_DEBUG_FS
1088 ret = stmmac_init_fs(dev); 1587 ret = stmmac_init_fs(dev);
1089 if (ret < 0) 1588 if (ret < 0)
@@ -1104,7 +1603,12 @@ static int stmmac_open(struct net_device *dev)
1104 phy_start(priv->phydev); 1603 phy_start(priv->phydev);
1105 1604
1106 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER; 1605 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER;
1107 priv->eee_enabled = stmmac_eee_init(priv); 1606
1607 /* Using PCS we cannot dial with the phy registers at this stage
1608 * so we do not support extra feature like EEE.
1609 */
1610 if (!priv->pcs)
1611 priv->eee_enabled = stmmac_eee_init(priv);
1108 1612
1109 stmmac_init_tx_coalesce(priv); 1613 stmmac_init_tx_coalesce(priv);
1110 1614
@@ -1113,6 +1617,9 @@ static int stmmac_open(struct net_device *dev)
1113 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT); 1617 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
1114 } 1618 }
1115 1619
1620 if (priv->pcs && priv->hw->mac->ctrl_ane)
1621 priv->hw->mac->ctrl_ane(priv->ioaddr, 0);
1622
1116 napi_enable(&priv->napi); 1623 napi_enable(&priv->napi);
1117 netif_start_queue(dev); 1624 netif_start_queue(dev);
1118 1625
@@ -1184,6 +1691,8 @@ static int stmmac_release(struct net_device *dev)
1184#endif 1691#endif
1185 clk_disable_unprepare(priv->stmmac_clk); 1692 clk_disable_unprepare(priv->stmmac_clk);
1186 1693
1694 stmmac_release_ptp(priv);
1695
1187 return 0; 1696 return 0;
1188} 1697}
1189 1698
@@ -1198,7 +1707,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1198 struct stmmac_priv *priv = netdev_priv(dev); 1707 struct stmmac_priv *priv = netdev_priv(dev);
1199 unsigned int txsize = priv->dma_tx_size; 1708 unsigned int txsize = priv->dma_tx_size;
1200 unsigned int entry; 1709 unsigned int entry;
1201 int i, csum_insertion = 0; 1710 int i, csum_insertion = 0, is_jumbo = 0;
1202 int nfrags = skb_shinfo(skb)->nr_frags; 1711 int nfrags = skb_shinfo(skb)->nr_frags;
1203 struct dma_desc *desc, *first; 1712 struct dma_desc *desc, *first;
1204 unsigned int nopaged_len = skb_headlen(skb); 1713 unsigned int nopaged_len = skb_headlen(skb);
@@ -1233,7 +1742,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1233 1742
1234 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); 1743 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
1235 1744
1236 desc = priv->dma_tx + entry; 1745 if (priv->extend_desc)
1746 desc = (struct dma_desc *) (priv->dma_etx + entry);
1747 else
1748 desc = priv->dma_tx + entry;
1749
1237 first = desc; 1750 first = desc;
1238 1751
1239#ifdef STMMAC_XMIT_DEBUG 1752#ifdef STMMAC_XMIT_DEBUG
@@ -1244,28 +1757,46 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1244#endif 1757#endif
1245 priv->tx_skbuff[entry] = skb; 1758 priv->tx_skbuff[entry] = skb;
1246 1759
1247 if (priv->hw->ring->is_jumbo_frm(skb->len, priv->plat->enh_desc)) { 1760 /* To program the descriptors according to the size of the frame */
1248 entry = priv->hw->ring->jumbo_frm(priv, skb, csum_insertion); 1761 if (priv->mode == STMMAC_RING_MODE) {
1249 desc = priv->dma_tx + entry; 1762 is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len,
1763 priv->plat->enh_desc);
1764 if (unlikely(is_jumbo))
1765 entry = priv->hw->ring->jumbo_frm(priv, skb,
1766 csum_insertion);
1250 } else { 1767 } else {
1768 is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len,
1769 priv->plat->enh_desc);
1770 if (unlikely(is_jumbo))
1771 entry = priv->hw->chain->jumbo_frm(priv, skb,
1772 csum_insertion);
1773 }
1774 if (likely(!is_jumbo)) {
1251 desc->des2 = dma_map_single(priv->device, skb->data, 1775 desc->des2 = dma_map_single(priv->device, skb->data,
1252 nopaged_len, DMA_TO_DEVICE); 1776 nopaged_len, DMA_TO_DEVICE);
1777 priv->tx_skbuff_dma[entry] = desc->des2;
1253 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, 1778 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
1254 csum_insertion); 1779 csum_insertion, priv->mode);
1255 } 1780 } else
1781 desc = first;
1256 1782
1257 for (i = 0; i < nfrags; i++) { 1783 for (i = 0; i < nfrags; i++) {
1258 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1784 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1259 int len = skb_frag_size(frag); 1785 int len = skb_frag_size(frag);
1260 1786
1261 entry = (++priv->cur_tx) % txsize; 1787 entry = (++priv->cur_tx) % txsize;
1262 desc = priv->dma_tx + entry; 1788 if (priv->extend_desc)
1789 desc = (struct dma_desc *) (priv->dma_etx + entry);
1790 else
1791 desc = priv->dma_tx + entry;
1263 1792
1264 TX_DBG("\t[entry %d] segment len: %d\n", entry, len); 1793 TX_DBG("\t[entry %d] segment len: %d\n", entry, len);
1265 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, 1794 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
1266 DMA_TO_DEVICE); 1795 DMA_TO_DEVICE);
1796 priv->tx_skbuff_dma[entry] = desc->des2;
1267 priv->tx_skbuff[entry] = NULL; 1797 priv->tx_skbuff[entry] = NULL;
1268 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion); 1798 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
1799 priv->mode);
1269 wmb(); 1800 wmb();
1270 priv->hw->desc->set_tx_owner(desc); 1801 priv->hw->desc->set_tx_owner(desc);
1271 wmb(); 1802 wmb();
@@ -1302,7 +1833,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1302 "first=%p, nfrags=%d\n", 1833 "first=%p, nfrags=%d\n",
1303 (priv->cur_tx % txsize), (priv->dirty_tx % txsize), 1834 (priv->cur_tx % txsize), (priv->dirty_tx % txsize),
1304 entry, first, nfrags); 1835 entry, first, nfrags);
1305 display_ring(priv->dma_tx, txsize); 1836 if (priv->extend_desc)
1837 stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
1838 else
1839 stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
1840
1306 pr_info(">>> frame to be transmitted: "); 1841 pr_info(">>> frame to be transmitted: ");
1307 print_pkt(skb->data, skb->len); 1842 print_pkt(skb->data, skb->len);
1308 } 1843 }
@@ -1314,7 +1849,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1314 1849
1315 dev->stats.tx_bytes += skb->len; 1850 dev->stats.tx_bytes += skb->len;
1316 1851
1317 skb_tx_timestamp(skb); 1852 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1853 priv->hwts_tx_en)) {
1854 /* declare that device is doing timestamping */
1855 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1856 priv->hw->desc->enable_tx_timestamp(first);
1857 }
1858
1859 if (!priv->hwts_tx_en)
1860 skb_tx_timestamp(skb);
1318 1861
1319 priv->hw->dma->enable_dma_transmission(priv->ioaddr); 1862 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
1320 1863
@@ -1327,10 +1870,16 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1327{ 1870{
1328 unsigned int rxsize = priv->dma_rx_size; 1871 unsigned int rxsize = priv->dma_rx_size;
1329 int bfsize = priv->dma_buf_sz; 1872 int bfsize = priv->dma_buf_sz;
1330 struct dma_desc *p = priv->dma_rx;
1331 1873
1332 for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) { 1874 for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
1333 unsigned int entry = priv->dirty_rx % rxsize; 1875 unsigned int entry = priv->dirty_rx % rxsize;
1876 struct dma_desc *p;
1877
1878 if (priv->extend_desc)
1879 p = (struct dma_desc *) (priv->dma_erx + entry);
1880 else
1881 p = priv->dma_rx + entry;
1882
1334 if (likely(priv->rx_skbuff[entry] == NULL)) { 1883 if (likely(priv->rx_skbuff[entry] == NULL)) {
1335 struct sk_buff *skb; 1884 struct sk_buff *skb;
1336 1885
@@ -1344,15 +1893,14 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1344 dma_map_single(priv->device, skb->data, bfsize, 1893 dma_map_single(priv->device, skb->data, bfsize,
1345 DMA_FROM_DEVICE); 1894 DMA_FROM_DEVICE);
1346 1895
1347 (p + entry)->des2 = priv->rx_skbuff_dma[entry]; 1896 p->des2 = priv->rx_skbuff_dma[entry];
1348 1897
1349 if (unlikely(priv->plat->has_gmac)) 1898 priv->hw->ring->refill_desc3(priv, p);
1350 priv->hw->ring->refill_desc3(bfsize, p + entry);
1351 1899
1352 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry); 1900 RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
1353 } 1901 }
1354 wmb(); 1902 wmb();
1355 priv->hw->desc->set_rx_owner(p + entry); 1903 priv->hw->desc->set_rx_owner(p);
1356 wmb(); 1904 wmb();
1357 } 1905 }
1358} 1906}
@@ -1363,33 +1911,61 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1363 unsigned int entry = priv->cur_rx % rxsize; 1911 unsigned int entry = priv->cur_rx % rxsize;
1364 unsigned int next_entry; 1912 unsigned int next_entry;
1365 unsigned int count = 0; 1913 unsigned int count = 0;
1366 struct dma_desc *p = priv->dma_rx + entry;
1367 struct dma_desc *p_next;
1368 1914
1369#ifdef STMMAC_RX_DEBUG 1915#ifdef STMMAC_RX_DEBUG
1370 if (netif_msg_hw(priv)) { 1916 if (netif_msg_hw(priv)) {
1371 pr_debug(">>> stmmac_rx: descriptor ring:\n"); 1917 pr_debug(">>> stmmac_rx: descriptor ring:\n");
1372 display_ring(priv->dma_rx, rxsize); 1918 if (priv->extend_desc)
1919 stmmac_display_ring((void *) priv->dma_erx, rxsize, 1);
1920 else
1921 stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
1373 } 1922 }
1374#endif 1923#endif
1375 while (!priv->hw->desc->get_rx_owner(p)) { 1924 while (count < limit) {
1376 int status; 1925 int status;
1926 struct dma_desc *p, *p_next;
1927
1928 if (priv->extend_desc)
1929 p = (struct dma_desc *) (priv->dma_erx + entry);
1930 else
1931 p = priv->dma_rx + entry ;
1377 1932
1378 if (count >= limit) 1933 if (priv->hw->desc->get_rx_owner(p))
1379 break; 1934 break;
1380 1935
1381 count++; 1936 count++;
1382 1937
1383 next_entry = (++priv->cur_rx) % rxsize; 1938 next_entry = (++priv->cur_rx) % rxsize;
1384 p_next = priv->dma_rx + next_entry; 1939 if (priv->extend_desc)
1940 p_next = (struct dma_desc *) (priv->dma_erx +
1941 next_entry);
1942 else
1943 p_next = priv->dma_rx + next_entry;
1944
1385 prefetch(p_next); 1945 prefetch(p_next);
1386 1946
1387 /* read the status of the incoming frame */ 1947 /* read the status of the incoming frame */
1388 status = (priv->hw->desc->rx_status(&priv->dev->stats, 1948 status = priv->hw->desc->rx_status(&priv->dev->stats,
1389 &priv->xstats, p)); 1949 &priv->xstats, p);
1390 if (unlikely(status == discard_frame)) 1950 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
1951 priv->hw->desc->rx_extended_status(&priv->dev->stats,
1952 &priv->xstats,
1953 priv->dma_erx +
1954 entry);
1955 if (unlikely(status == discard_frame)) {
1391 priv->dev->stats.rx_errors++; 1956 priv->dev->stats.rx_errors++;
1392 else { 1957 if (priv->hwts_rx_en && !priv->extend_desc) {
1958 /* DESC2 & DESC3 will be overwitten by device
1959 * with timestamp value, hence reinitialize
1960 * them in stmmac_rx_refill() function so that
1961 * device can reuse it.
1962 */
1963 priv->rx_skbuff[entry] = NULL;
1964 dma_unmap_single(priv->device,
1965 priv->rx_skbuff_dma[entry],
1966 priv->dma_buf_sz, DMA_FROM_DEVICE);
1967 }
1968 } else {
1393 struct sk_buff *skb; 1969 struct sk_buff *skb;
1394 int frame_len; 1970 int frame_len;
1395 1971
@@ -1418,6 +1994,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1418 prefetch(skb->data - NET_IP_ALIGN); 1994 prefetch(skb->data - NET_IP_ALIGN);
1419 priv->rx_skbuff[entry] = NULL; 1995 priv->rx_skbuff[entry] = NULL;
1420 1996
1997 stmmac_get_rx_hwtstamp(priv, entry, skb);
1998
1421 skb_put(skb, frame_len); 1999 skb_put(skb, frame_len);
1422 dma_unmap_single(priv->device, 2000 dma_unmap_single(priv->device,
1423 priv->rx_skbuff_dma[entry], 2001 priv->rx_skbuff_dma[entry],
@@ -1441,7 +2019,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1441 priv->dev->stats.rx_bytes += frame_len; 2019 priv->dev->stats.rx_bytes += frame_len;
1442 } 2020 }
1443 entry = next_entry; 2021 entry = next_entry;
1444 p = p_next; /* use prefetched values */
1445 } 2022 }
1446 2023
1447 stmmac_rx_refill(priv); 2024 stmmac_rx_refill(priv);
@@ -1604,30 +2181,14 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
1604 /* To handle GMAC own interrupts */ 2181 /* To handle GMAC own interrupts */
1605 if (priv->plat->has_gmac) { 2182 if (priv->plat->has_gmac) {
1606 int status = priv->hw->mac->host_irq_status((void __iomem *) 2183 int status = priv->hw->mac->host_irq_status((void __iomem *)
1607 dev->base_addr); 2184 dev->base_addr,
2185 &priv->xstats);
1608 if (unlikely(status)) { 2186 if (unlikely(status)) {
1609 if (status & core_mmc_tx_irq)
1610 priv->xstats.mmc_tx_irq_n++;
1611 if (status & core_mmc_rx_irq)
1612 priv->xstats.mmc_rx_irq_n++;
1613 if (status & core_mmc_rx_csum_offload_irq)
1614 priv->xstats.mmc_rx_csum_offload_irq_n++;
1615 if (status & core_irq_receive_pmt_irq)
1616 priv->xstats.irq_receive_pmt_irq_n++;
1617
1618 /* For LPI we need to save the tx status */ 2187 /* For LPI we need to save the tx status */
1619 if (status & core_irq_tx_path_in_lpi_mode) { 2188 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
1620 priv->xstats.irq_tx_path_in_lpi_mode_n++;
1621 priv->tx_path_in_lpi_mode = true; 2189 priv->tx_path_in_lpi_mode = true;
1622 } 2190 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
1623 if (status & core_irq_tx_path_exit_lpi_mode) {
1624 priv->xstats.irq_tx_path_exit_lpi_mode_n++;
1625 priv->tx_path_in_lpi_mode = false; 2191 priv->tx_path_in_lpi_mode = false;
1626 }
1627 if (status & core_irq_rx_path_in_lpi_mode)
1628 priv->xstats.irq_rx_path_in_lpi_mode_n++;
1629 if (status & core_irq_rx_path_exit_lpi_mode)
1630 priv->xstats.irq_rx_path_exit_lpi_mode_n++;
1631 } 2192 }
1632 } 2193 }
1633 2194
@@ -1655,21 +2216,30 @@ static void stmmac_poll_controller(struct net_device *dev)
1655 * a proprietary structure used to pass information to the driver. 2216 * a proprietary structure used to pass information to the driver.
1656 * @cmd: IOCTL command 2217 * @cmd: IOCTL command
1657 * Description: 2218 * Description:
1658 * Currently there are no special functionality supported in IOCTL, just the 2219 * Currently it supports just the phy_mii_ioctl(...) and HW time stamping.
1659 * phy_mii_ioctl(...) can be invoked.
1660 */ 2220 */
1661static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2221static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1662{ 2222{
1663 struct stmmac_priv *priv = netdev_priv(dev); 2223 struct stmmac_priv *priv = netdev_priv(dev);
1664 int ret; 2224 int ret = -EOPNOTSUPP;
1665 2225
1666 if (!netif_running(dev)) 2226 if (!netif_running(dev))
1667 return -EINVAL; 2227 return -EINVAL;
1668 2228
1669 if (!priv->phydev) 2229 switch (cmd) {
1670 return -EINVAL; 2230 case SIOCGMIIPHY:
1671 2231 case SIOCGMIIREG:
1672 ret = phy_mii_ioctl(priv->phydev, rq, cmd); 2232 case SIOCSMIIREG:
2233 if (!priv->phydev)
2234 return -EINVAL;
2235 ret = phy_mii_ioctl(priv->phydev, rq, cmd);
2236 break;
2237 case SIOCSHWTSTAMP:
2238 ret = stmmac_hwtstamp_ioctl(dev, rq);
2239 break;
2240 default:
2241 break;
2242 }
1673 2243
1674 return ret; 2244 return ret;
1675} 2245}
@@ -1679,40 +2249,51 @@ static struct dentry *stmmac_fs_dir;
1679static struct dentry *stmmac_rings_status; 2249static struct dentry *stmmac_rings_status;
1680static struct dentry *stmmac_dma_cap; 2250static struct dentry *stmmac_dma_cap;
1681 2251
1682static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) 2252static void sysfs_display_ring(void *head, int size, int extend_desc,
2253 struct seq_file *seq)
1683{ 2254{
1684 struct tmp_s {
1685 u64 a;
1686 unsigned int b;
1687 unsigned int c;
1688 };
1689 int i; 2255 int i;
1690 struct net_device *dev = seq->private; 2256 struct dma_extended_desc *ep = (struct dma_extended_desc *) head;
1691 struct stmmac_priv *priv = netdev_priv(dev); 2257 struct dma_desc *p = (struct dma_desc *) head;
1692
1693 seq_printf(seq, "=======================\n");
1694 seq_printf(seq, " RX descriptor ring\n");
1695 seq_printf(seq, "=======================\n");
1696 2258
1697 for (i = 0; i < priv->dma_rx_size; i++) { 2259 for (i = 0; i < size; i++) {
1698 struct tmp_s *x = (struct tmp_s *)(priv->dma_rx + i); 2260 u64 x;
1699 seq_printf(seq, "[%d] DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x", 2261 if (extend_desc) {
1700 i, (unsigned int)(x->a), 2262 x = *(u64 *) ep;
1701 (unsigned int)((x->a) >> 32), x->b, x->c); 2263 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2264 i, (unsigned int) virt_to_phys(ep),
2265 (unsigned int) x, (unsigned int) (x >> 32),
2266 ep->basic.des2, ep->basic.des3);
2267 ep++;
2268 } else {
2269 x = *(u64 *) p;
2270 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2271 i, (unsigned int) virt_to_phys(ep),
2272 (unsigned int) x, (unsigned int) (x >> 32),
2273 p->des2, p->des3);
2274 p++;
2275 }
1702 seq_printf(seq, "\n"); 2276 seq_printf(seq, "\n");
1703 } 2277 }
2278}
1704 2279
1705 seq_printf(seq, "\n"); 2280static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
1706 seq_printf(seq, "=======================\n"); 2281{
1707 seq_printf(seq, " TX descriptor ring\n"); 2282 struct net_device *dev = seq->private;
1708 seq_printf(seq, "=======================\n"); 2283 struct stmmac_priv *priv = netdev_priv(dev);
2284 unsigned int txsize = priv->dma_tx_size;
2285 unsigned int rxsize = priv->dma_rx_size;
1709 2286
1710 for (i = 0; i < priv->dma_tx_size; i++) { 2287 if (priv->extend_desc) {
1711 struct tmp_s *x = (struct tmp_s *)(priv->dma_tx + i); 2288 seq_printf(seq, "Extended RX descriptor ring:\n");
1712 seq_printf(seq, "[%d] DES0=0x%x DES1=0x%x BUF1=0x%x BUF2=0x%x", 2289 sysfs_display_ring((void *) priv->dma_erx, rxsize, 1, seq);
1713 i, (unsigned int)(x->a), 2290 seq_printf(seq, "Extended TX descriptor ring:\n");
1714 (unsigned int)((x->a) >> 32), x->b, x->c); 2291 sysfs_display_ring((void *) priv->dma_etx, txsize, 1, seq);
1715 seq_printf(seq, "\n"); 2292 } else {
2293 seq_printf(seq, "RX descriptor ring:\n");
2294 sysfs_display_ring((void *)priv->dma_rx, rxsize, 0, seq);
2295 seq_printf(seq, "TX descriptor ring:\n");
2296 sysfs_display_ring((void *)priv->dma_tx, txsize, 0, seq);
1716 } 2297 }
1717 2298
1718 return 0; 2299 return 0;
@@ -1877,7 +2458,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
1877 */ 2458 */
1878static int stmmac_hw_init(struct stmmac_priv *priv) 2459static int stmmac_hw_init(struct stmmac_priv *priv)
1879{ 2460{
1880 int ret = 0; 2461 int ret;
1881 struct mac_device_info *mac; 2462 struct mac_device_info *mac;
1882 2463
1883 /* Identify the MAC HW device */ 2464 /* Identify the MAC HW device */
@@ -1892,12 +2473,23 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
1892 2473
1893 priv->hw = mac; 2474 priv->hw = mac;
1894 2475
1895 /* To use the chained or ring mode */
1896 priv->hw->ring = &ring_mode_ops;
1897
1898 /* Get and dump the chip ID */ 2476 /* Get and dump the chip ID */
1899 priv->synopsys_id = stmmac_get_synopsys_id(priv); 2477 priv->synopsys_id = stmmac_get_synopsys_id(priv);
1900 2478
2479 /* To use alternate (extended) or normal descriptor structures */
2480 stmmac_selec_desc_mode(priv);
2481
2482 /* To use the chained or ring mode */
2483 if (chain_mode) {
2484 priv->hw->chain = &chain_mode_ops;
2485 pr_info(" Chain mode enabled\n");
2486 priv->mode = STMMAC_CHAIN_MODE;
2487 } else {
2488 priv->hw->ring = &ring_mode_ops;
2489 pr_info(" Ring mode enabled\n");
2490 priv->mode = STMMAC_RING_MODE;
2491 }
2492
1901 /* Get the HW capability (new GMAC newer than 3.50a) */ 2493 /* Get the HW capability (new GMAC newer than 3.50a) */
1902 priv->hw_cap_support = stmmac_get_hw_features(priv); 2494 priv->hw_cap_support = stmmac_get_hw_features(priv);
1903 if (priv->hw_cap_support) { 2495 if (priv->hw_cap_support) {
@@ -1921,9 +2513,6 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
1921 } else 2513 } else
1922 pr_info(" No HW DMA feature register supported"); 2514 pr_info(" No HW DMA feature register supported");
1923 2515
1924 /* Select the enhnaced/normal descriptor structures */
1925 stmmac_selec_desc_mode(priv);
1926
1927 /* Enable the IPC (Checksum Offload) and check if the feature has been 2516 /* Enable the IPC (Checksum Offload) and check if the feature has been
1928 * enabled during the core configuration. */ 2517 * enabled during the core configuration. */
1929 ret = priv->hw->mac->rx_ipc(priv->ioaddr); 2518 ret = priv->hw->mac->rx_ipc(priv->ioaddr);
@@ -1943,7 +2532,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
1943 device_set_wakeup_capable(priv->device, 1); 2532 device_set_wakeup_capable(priv->device, 1);
1944 } 2533 }
1945 2534
1946 return ret; 2535 return 0;
1947} 2536}
1948 2537
1949/** 2538/**
@@ -1989,7 +2578,9 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
1989 priv->plat->phy_addr = phyaddr; 2578 priv->plat->phy_addr = phyaddr;
1990 2579
1991 /* Init MAC and get the capabilities */ 2580 /* Init MAC and get the capabilities */
1992 stmmac_hw_init(priv); 2581 ret = stmmac_hw_init(priv);
2582 if (ret)
2583 goto error_free_netdev;
1993 2584
1994 ndev->netdev_ops = &stmmac_netdev_ops; 2585 ndev->netdev_ops = &stmmac_netdev_ops;
1995 2586
@@ -2044,12 +2635,16 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
2044 else 2635 else
2045 priv->clk_csr = priv->plat->clk_csr; 2636 priv->clk_csr = priv->plat->clk_csr;
2046 2637
2047 /* MDIO bus Registration */ 2638 stmmac_check_pcs_mode(priv);
2048 ret = stmmac_mdio_register(ndev); 2639
2049 if (ret < 0) { 2640 if (!priv->pcs) {
2050 pr_debug("%s: MDIO bus (id: %d) registration failed", 2641 /* MDIO bus Registration */
2051 __func__, priv->plat->bus_id); 2642 ret = stmmac_mdio_register(ndev);
2052 goto error_mdio_register; 2643 if (ret < 0) {
2644 pr_debug("%s: MDIO bus (id: %d) registration failed",
2645 __func__, priv->plat->bus_id);
2646 goto error_mdio_register;
2647 }
2053 } 2648 }
2054 2649
2055 return priv; 2650 return priv;
@@ -2060,6 +2655,7 @@ error_clk_get:
2060 unregister_netdev(ndev); 2655 unregister_netdev(ndev);
2061error_netdev_register: 2656error_netdev_register:
2062 netif_napi_del(&priv->napi); 2657 netif_napi_del(&priv->napi);
2658error_free_netdev:
2063 free_netdev(ndev); 2659 free_netdev(ndev);
2064 2660
2065 return NULL; 2661 return NULL;
@@ -2081,7 +2677,8 @@ int stmmac_dvr_remove(struct net_device *ndev)
2081 priv->hw->dma->stop_tx(priv->ioaddr); 2677 priv->hw->dma->stop_tx(priv->ioaddr);
2082 2678
2083 stmmac_set_mac(priv->ioaddr, false); 2679 stmmac_set_mac(priv->ioaddr, false);
2084 stmmac_mdio_unregister(ndev); 2680 if (!priv->pcs)
2681 stmmac_mdio_unregister(ndev);
2085 netif_carrier_off(ndev); 2682 netif_carrier_off(ndev);
2086 unregister_netdev(ndev); 2683 unregister_netdev(ndev);
2087 free_netdev(ndev); 2684 free_netdev(ndev);
@@ -2093,7 +2690,6 @@ int stmmac_dvr_remove(struct net_device *ndev)
2093int stmmac_suspend(struct net_device *ndev) 2690int stmmac_suspend(struct net_device *ndev)
2094{ 2691{
2095 struct stmmac_priv *priv = netdev_priv(ndev); 2692 struct stmmac_priv *priv = netdev_priv(ndev);
2096 int dis_ic = 0;
2097 unsigned long flags; 2693 unsigned long flags;
2098 2694
2099 if (!ndev || !netif_running(ndev)) 2695 if (!ndev || !netif_running(ndev))
@@ -2107,18 +2703,13 @@ int stmmac_suspend(struct net_device *ndev)
2107 netif_device_detach(ndev); 2703 netif_device_detach(ndev);
2108 netif_stop_queue(ndev); 2704 netif_stop_queue(ndev);
2109 2705
2110 if (priv->use_riwt)
2111 dis_ic = 1;
2112
2113 napi_disable(&priv->napi); 2706 napi_disable(&priv->napi);
2114 2707
2115 /* Stop TX/RX DMA */ 2708 /* Stop TX/RX DMA */
2116 priv->hw->dma->stop_tx(priv->ioaddr); 2709 priv->hw->dma->stop_tx(priv->ioaddr);
2117 priv->hw->dma->stop_rx(priv->ioaddr); 2710 priv->hw->dma->stop_rx(priv->ioaddr);
2118 /* Clear the Rx/Tx descriptors */ 2711
2119 priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size, 2712 stmmac_clear_descriptors(priv);
2120 dis_ic);
2121 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
2122 2713
2123 /* Enable Power down mode by programming the PMT regs */ 2714 /* Enable Power down mode by programming the PMT regs */
2124 if (device_may_wakeup(priv->device)) 2715 if (device_may_wakeup(priv->device))
@@ -2257,6 +2848,9 @@ static int __init stmmac_cmdline_opt(char *str)
2257 } else if (!strncmp(opt, "eee_timer:", 10)) { 2848 } else if (!strncmp(opt, "eee_timer:", 10)) {
2258 if (kstrtoint(opt + 10, 0, &eee_timer)) 2849 if (kstrtoint(opt + 10, 0, &eee_timer))
2259 goto err; 2850 goto err;
2851 } else if (!strncmp(opt, "chain_mode:", 11)) {
2852 if (kstrtoint(opt + 11, 0, &chain_mode))
2853 goto err;
2260 } 2854 }
2261 } 2855 }
2262 return 0; 2856 return 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
new file mode 100644
index 000000000000..93d4beff92c7
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -0,0 +1,215 @@
1/*******************************************************************************
2 PTP 1588 clock using the STMMAC.
3
4 Copyright (C) 2013 Vayavya Labs Pvt Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
23*******************************************************************************/
24#include "stmmac.h"
25#include "stmmac_ptp.h"
26
27/**
28 * stmmac_adjust_freq
29 *
30 * @ptp: pointer to ptp_clock_info structure
31 * @ppb: desired period change in parts ber billion
32 *
33 * Description: this function will adjust the frequency of hardware clock.
34 */
35static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
36{
37 struct stmmac_priv *priv =
38 container_of(ptp, struct stmmac_priv, ptp_clock_ops);
39 unsigned long flags;
40 u32 diff, addend;
41 int neg_adj = 0;
42 u64 adj;
43
44 if (ppb < 0) {
45 neg_adj = 1;
46 ppb = -ppb;
47 }
48
49 addend = priv->default_addend;
50 adj = addend;
51 adj *= ppb;
52 diff = div_u64(adj, 1000000000ULL);
53 addend = neg_adj ? (addend - diff) : (addend + diff);
54
55 spin_lock_irqsave(&priv->ptp_lock, flags);
56
57 priv->hw->ptp->config_addend(priv->ioaddr, addend);
58
59 spin_unlock_irqrestore(&priv->lock, flags);
60
61 return 0;
62}
63
64/**
65 * stmmac_adjust_time
66 *
67 * @ptp: pointer to ptp_clock_info structure
68 * @delta: desired change in nanoseconds
69 *
70 * Description: this function will shift/adjust the hardware clock time.
71 */
72static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
73{
74 struct stmmac_priv *priv =
75 container_of(ptp, struct stmmac_priv, ptp_clock_ops);
76 unsigned long flags;
77 u32 sec, nsec;
78 u32 quotient, reminder;
79 int neg_adj = 0;
80
81 if (delta < 0) {
82 neg_adj = 1;
83 delta = -delta;
84 }
85
86 quotient = div_u64_rem(delta, 1000000000ULL, &reminder);
87 sec = quotient;
88 nsec = reminder;
89
90 spin_lock_irqsave(&priv->ptp_lock, flags);
91
92 priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj);
93
94 spin_unlock_irqrestore(&priv->lock, flags);
95
96 return 0;
97}
98
99/**
100 * stmmac_get_time
101 *
102 * @ptp: pointer to ptp_clock_info structure
103 * @ts: pointer to hold time/result
104 *
105 * Description: this function will read the current time from the
106 * hardware clock and store it in @ts.
107 */
108static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec *ts)
109{
110 struct stmmac_priv *priv =
111 container_of(ptp, struct stmmac_priv, ptp_clock_ops);
112 unsigned long flags;
113 u64 ns;
114 u32 reminder;
115
116 spin_lock_irqsave(&priv->ptp_lock, flags);
117
118 ns = priv->hw->ptp->get_systime(priv->ioaddr);
119
120 spin_unlock_irqrestore(&priv->ptp_lock, flags);
121
122 ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &reminder);
123 ts->tv_nsec = reminder;
124
125 return 0;
126}
127
128/**
129 * stmmac_set_time
130 *
131 * @ptp: pointer to ptp_clock_info structure
132 * @ts: time value to set
133 *
134 * Description: this function will set the current time on the
135 * hardware clock.
136 */
137static int stmmac_set_time(struct ptp_clock_info *ptp,
138 const struct timespec *ts)
139{
140 struct stmmac_priv *priv =
141 container_of(ptp, struct stmmac_priv, ptp_clock_ops);
142 unsigned long flags;
143
144 spin_lock_irqsave(&priv->ptp_lock, flags);
145
146 priv->hw->ptp->init_systime(priv->ioaddr, ts->tv_sec, ts->tv_nsec);
147
148 spin_unlock_irqrestore(&priv->ptp_lock, flags);
149
150 return 0;
151}
152
153static int stmmac_enable(struct ptp_clock_info *ptp,
154 struct ptp_clock_request *rq, int on)
155{
156 return -EOPNOTSUPP;
157}
158
159/* structure describing a PTP hardware clock */
160static struct ptp_clock_info stmmac_ptp_clock_ops = {
161 .owner = THIS_MODULE,
162 .name = "stmmac_ptp_clock",
163 .max_adj = 62500000,
164 .n_alarm = 0,
165 .n_ext_ts = 0,
166 .n_per_out = 0,
167 .pps = 0,
168 .adjfreq = stmmac_adjust_freq,
169 .adjtime = stmmac_adjust_time,
170 .gettime = stmmac_get_time,
171 .settime = stmmac_set_time,
172 .enable = stmmac_enable,
173};
174
175/**
176 * stmmac_ptp_register
177 *
178 * @ndev: net device pointer
179 *
180 * Description: this function will register the ptp clock driver
181 * to kernel. It also does some house keeping work.
182 */
183int stmmac_ptp_register(struct stmmac_priv *priv)
184{
185 spin_lock_init(&priv->ptp_lock);
186 priv->ptp_clock_ops = stmmac_ptp_clock_ops;
187
188 priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops,
189 priv->device);
190 if (IS_ERR(priv->ptp_clock)) {
191 priv->ptp_clock = NULL;
192 pr_err("ptp_clock_register() failed on %s\n", priv->dev->name);
193 } else
194 pr_debug("Added PTP HW clock successfully on %s\n",
195 priv->dev->name);
196
197 return 0;
198}
199
200/**
201 * stmmac_ptp_unregister
202 *
203 * @ndev: net device pointer
204 *
205 * Description: this function will remove/unregister the ptp clock driver
206 * from the kernel.
207 */
208void stmmac_ptp_unregister(struct stmmac_priv *priv)
209{
210 if (priv->ptp_clock) {
211 ptp_clock_unregister(priv->ptp_clock);
212 pr_debug("Removed PTP HW clock successfully on %s\n",
213 priv->dev->name);
214 }
215}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
new file mode 100644
index 000000000000..3dbc047622fa
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -0,0 +1,74 @@
1/******************************************************************************
2 PTP Header file
3
4 Copyright (C) 2013 Vayavya Labs Pvt Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
23******************************************************************************/
24
25#ifndef __STMMAC_PTP_H__
26#define __STMMAC_PTP_H__
27
28#define STMMAC_SYSCLOCK 62500000
29
30/* IEEE 1588 PTP register offsets */
31#define PTP_TCR 0x0700 /* Timestamp Control Reg */
32#define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */
33#define PTP_STSR 0x0708 /* System Time – Seconds Regr */
34#define PTP_STNSR 0x070C /* System Time – Nanoseconds Reg */
35#define PTP_STSUR 0x0710 /* System Time – Seconds Update Reg */
36#define PTP_STNSUR 0x0714 /* System Time – Nanoseconds Update Reg */
37#define PTP_TAR 0x0718 /* Timestamp Addend Reg */
38#define PTP_TTSR 0x071C /* Target Time Seconds Reg */
39#define PTP_TTNSR 0x0720 /* Target Time Nanoseconds Reg */
40#define PTP_STHWSR 0x0724 /* System Time - Higher Word Seconds Reg */
41#define PTP_TSR 0x0728 /* Timestamp Status */
42
43#define PTP_STNSUR_ADDSUB_SHIFT 31
44
45/* PTP TCR defines */
46#define PTP_TCR_TSENA 0x00000001 /* Timestamp Enable */
47#define PTP_TCR_TSCFUPDT 0x00000002 /* Timestamp Fine/Coarse Update */
48#define PTP_TCR_TSINIT 0x00000004 /* Timestamp Initialize */
49#define PTP_TCR_TSUPDT 0x00000008 /* Timestamp Update */
50/* Timestamp Interrupt Trigger Enable */
51#define PTP_TCR_TSTRIG 0x00000010
52#define PTP_TCR_TSADDREG 0x00000020 /* Addend Reg Update */
53#define PTP_TCR_TSENALL 0x00000100 /* Enable Timestamp for All Frames */
54/* Timestamp Digital or Binary Rollover Control */
55#define PTP_TCR_TSCTRLSSR 0x00000200
56
57/* Enable PTP packet Processing for Version 2 Format */
58#define PTP_TCR_TSVER2ENA 0x00000400
59/* Enable Processing of PTP over Ethernet Frames */
60#define PTP_TCR_TSIPENA 0x00000800
61/* Enable Processing of PTP Frames Sent over IPv6-UDP */
62#define PTP_TCR_TSIPV6ENA 0x00001000
63/* Enable Processing of PTP Frames Sent over IPv4-UDP */
64#define PTP_TCR_TSIPV4ENA 0x00002000
65/* Enable Timestamp Snapshot for Event Messages */
66#define PTP_TCR_TSEVNTENA 0x00004000
67/* Enable Snapshot for Messages Relevant to Master */
68#define PTP_TCR_TSMSTRENA 0x00008000
69/* Select PTP packets for Taking Snapshots */
70#define PTP_TCR_SNAPTYPSEL_1 0x00010000
71/* Enable MAC address for PTP Frame Filtering */
72#define PTP_TCR_TSENMACADDR 0x00040000
73
74#endif /* __STMMAC_PTP_H__ */
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index e4c1c88e4c2a..95cff98d8a34 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6618,7 +6618,7 @@ static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
6618 (len << TXHDR_LEN_SHIFT) | 6618 (len << TXHDR_LEN_SHIFT) |
6619 ((l3off / 2) << TXHDR_L3START_SHIFT) | 6619 ((l3off / 2) << TXHDR_L3START_SHIFT) |
6620 (ihl << TXHDR_IHL_SHIFT) | 6620 (ihl << TXHDR_IHL_SHIFT) |
6621 ((eth_proto_inner < 1536) ? TXHDR_LLC : 0) | 6621 ((eth_proto_inner < ETH_P_802_3_MIN) ? TXHDR_LLC : 0) |
6622 ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) | 6622 ((eth_proto == ETH_P_8021Q) ? TXHDR_VLAN : 0) |
6623 (ipv6 ? TXHDR_IP_VER : 0) | 6623 (ipv6 ? TXHDR_IP_VER : 0) |
6624 csum_bits); 6624 csum_bits);
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 5fafca065305..054975939a18 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1169,10 +1169,8 @@ static int bigmac_ether_init(struct platform_device *op,
1169 bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev, 1169 bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev,
1170 PAGE_SIZE, 1170 PAGE_SIZE,
1171 &bp->bblock_dvma, GFP_ATOMIC); 1171 &bp->bblock_dvma, GFP_ATOMIC);
1172 if (bp->bmac_block == NULL || bp->bblock_dvma == 0) { 1172 if (bp->bmac_block == NULL || bp->bblock_dvma == 0)
1173 printk(KERN_ERR "BIGMAC: Cannot allocate consistent DMA.\n");
1174 goto fail_and_cleanup; 1173 goto fail_and_cleanup;
1175 }
1176 1174
1177 /* Get the board revision of this BigMAC. */ 1175 /* Get the board revision of this BigMAC. */
1178 bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node, 1176 bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node,
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index a1bff49a8155..436fa9d5a071 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2752,10 +2752,8 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
2752 &hp->hblock_dvma, 2752 &hp->hblock_dvma,
2753 GFP_ATOMIC); 2753 GFP_ATOMIC);
2754 err = -ENOMEM; 2754 err = -ENOMEM;
2755 if (!hp->happy_block) { 2755 if (!hp->happy_block)
2756 printk(KERN_ERR "happymeal: Cannot allocate descriptors.\n");
2757 goto err_out_iounmap; 2756 goto err_out_iounmap;
2758 }
2759 2757
2760 /* Force check of the link first time we are brought up. */ 2758 /* Force check of the link first time we are brought up. */
2761 hp->linkcheck = 0; 2759 hp->linkcheck = 0;
@@ -3068,14 +3066,11 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
3068 hp->happy_bursts = DMA_BURSTBITS; 3066 hp->happy_bursts = DMA_BURSTBITS;
3069#endif 3067#endif
3070 3068
3071 hp->happy_block = (struct hmeal_init_block *) 3069 hp->happy_block = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3072 dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &hp->hblock_dvma, GFP_KERNEL); 3070 &hp->hblock_dvma, GFP_KERNEL);
3073
3074 err = -ENODEV; 3071 err = -ENODEV;
3075 if (!hp->happy_block) { 3072 if (!hp->happy_block)
3076 printk(KERN_ERR "happymeal(PCI): Cannot get hme init block.\n");
3077 goto err_out_iounmap; 3073 goto err_out_iounmap;
3078 }
3079 3074
3080 hp->linkcheck = 0; 3075 hp->linkcheck = 0;
3081 hp->timer_state = asleep; 3076 hp->timer_state = asleep;
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 49bf3e2eb652..8182591bc187 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -414,7 +414,7 @@ static void qe_rx(struct sunqe *qep)
414 struct qe_rxd *this; 414 struct qe_rxd *this;
415 struct sunqe_buffers *qbufs = qep->buffers; 415 struct sunqe_buffers *qbufs = qep->buffers;
416 __u32 qbufs_dvma = qep->buffers_dvma; 416 __u32 qbufs_dvma = qep->buffers_dvma;
417 int elem = qep->rx_new, drops = 0; 417 int elem = qep->rx_new;
418 u32 flags; 418 u32 flags;
419 419
420 this = &rxbase[elem]; 420 this = &rxbase[elem];
@@ -436,7 +436,6 @@ static void qe_rx(struct sunqe *qep)
436 } else { 436 } else {
437 skb = netdev_alloc_skb(dev, len + 2); 437 skb = netdev_alloc_skb(dev, len + 2);
438 if (skb == NULL) { 438 if (skb == NULL) {
439 drops++;
440 dev->stats.rx_dropped++; 439 dev->stats.rx_dropped++;
441 } else { 440 } else {
442 skb_reserve(skb, 2); 441 skb_reserve(skb, 2);
@@ -456,8 +455,6 @@ static void qe_rx(struct sunqe *qep)
456 this = &rxbase[elem]; 455 this = &rxbase[elem];
457 } 456 }
458 qep->rx_new = elem; 457 qep->rx_new = elem;
459 if (drops)
460 printk(KERN_NOTICE "%s: Memory squeeze, deferring packet.\n", qep->dev->name);
461} 458}
462 459
463static void qe_tx_reclaim(struct sunqe *qep); 460static void qe_tx_reclaim(struct sunqe *qep);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index e15cc71b826d..e8824cea093b 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1102,10 +1102,9 @@ static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
1102 dno = bdx_rxdb_available(db) - 1; 1102 dno = bdx_rxdb_available(db) - 1;
1103 while (dno > 0) { 1103 while (dno > 0) {
1104 skb = netdev_alloc_skb(priv->ndev, f->m.pktsz + NET_IP_ALIGN); 1104 skb = netdev_alloc_skb(priv->ndev, f->m.pktsz + NET_IP_ALIGN);
1105 if (!skb) { 1105 if (!skb)
1106 pr_err("NO MEM: netdev_alloc_skb failed\n");
1107 break; 1106 break;
1108 } 1107
1109 skb_reserve(skb, NET_IP_ALIGN); 1108 skb_reserve(skb, NET_IP_ALIGN);
1110 1109
1111 idx = bdx_rxdb_alloc_elem(db); 1110 idx = bdx_rxdb_alloc_elem(db);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 80cad06e5eb2..1d740423a053 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -126,6 +126,13 @@ do { \
126#define CPSW_FIFO_DUAL_MAC_MODE (1 << 15) 126#define CPSW_FIFO_DUAL_MAC_MODE (1 << 15)
127#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15) 127#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15)
128 128
129#define CPSW_INTPACEEN (0x3f << 16)
130#define CPSW_INTPRESCALE_MASK (0x7FF << 0)
131#define CPSW_CMINTMAX_CNT 63
132#define CPSW_CMINTMIN_CNT 2
133#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
134#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
135
129#define cpsw_enable_irq(priv) \ 136#define cpsw_enable_irq(priv) \
130 do { \ 137 do { \
131 u32 i; \ 138 u32 i; \
@@ -139,6 +146,10 @@ do { \
139 disable_irq_nosync(priv->irqs_table[i]); \ 146 disable_irq_nosync(priv->irqs_table[i]); \
140 } while (0); 147 } while (0);
141 148
149#define cpsw_slave_index(priv) \
150 ((priv->data.dual_emac) ? priv->emac_port : \
151 priv->data.active_slave)
152
142static int debug_level; 153static int debug_level;
143module_param(debug_level, int, 0); 154module_param(debug_level, int, 0);
144MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)"); 155MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
@@ -160,6 +171,15 @@ struct cpsw_wr_regs {
160 u32 rx_en; 171 u32 rx_en;
161 u32 tx_en; 172 u32 tx_en;
162 u32 misc_en; 173 u32 misc_en;
174 u32 mem_allign1[8];
175 u32 rx_thresh_stat;
176 u32 rx_stat;
177 u32 tx_stat;
178 u32 misc_stat;
179 u32 mem_allign2[8];
180 u32 rx_imax;
181 u32 tx_imax;
182
163}; 183};
164 184
165struct cpsw_ss_regs { 185struct cpsw_ss_regs {
@@ -314,6 +334,8 @@ struct cpsw_priv {
314 struct cpsw_host_regs __iomem *host_port_regs; 334 struct cpsw_host_regs __iomem *host_port_regs;
315 u32 msg_enable; 335 u32 msg_enable;
316 u32 version; 336 u32 version;
337 u32 coal_intvl;
338 u32 bus_freq_mhz;
317 struct net_device_stats stats; 339 struct net_device_stats stats;
318 int rx_packet_max; 340 int rx_packet_max;
319 int host_port; 341 int host_port;
@@ -612,6 +634,77 @@ static void cpsw_adjust_link(struct net_device *ndev)
612 } 634 }
613} 635}
614 636
637static int cpsw_get_coalesce(struct net_device *ndev,
638 struct ethtool_coalesce *coal)
639{
640 struct cpsw_priv *priv = netdev_priv(ndev);
641
642 coal->rx_coalesce_usecs = priv->coal_intvl;
643 return 0;
644}
645
646static int cpsw_set_coalesce(struct net_device *ndev,
647 struct ethtool_coalesce *coal)
648{
649 struct cpsw_priv *priv = netdev_priv(ndev);
650 u32 int_ctrl;
651 u32 num_interrupts = 0;
652 u32 prescale = 0;
653 u32 addnl_dvdr = 1;
654 u32 coal_intvl = 0;
655
656 if (!coal->rx_coalesce_usecs)
657 return -EINVAL;
658
659 coal_intvl = coal->rx_coalesce_usecs;
660
661 int_ctrl = readl(&priv->wr_regs->int_control);
662 prescale = priv->bus_freq_mhz * 4;
663
664 if (coal_intvl < CPSW_CMINTMIN_INTVL)
665 coal_intvl = CPSW_CMINTMIN_INTVL;
666
667 if (coal_intvl > CPSW_CMINTMAX_INTVL) {
668 /* Interrupt pacer works with 4us Pulse, we can
669 * throttle further by dilating the 4us pulse.
670 */
671 addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
672
673 if (addnl_dvdr > 1) {
674 prescale *= addnl_dvdr;
675 if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
676 coal_intvl = (CPSW_CMINTMAX_INTVL
677 * addnl_dvdr);
678 } else {
679 addnl_dvdr = 1;
680 coal_intvl = CPSW_CMINTMAX_INTVL;
681 }
682 }
683
684 num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
685 writel(num_interrupts, &priv->wr_regs->rx_imax);
686 writel(num_interrupts, &priv->wr_regs->tx_imax);
687
688 int_ctrl |= CPSW_INTPACEEN;
689 int_ctrl &= (~CPSW_INTPRESCALE_MASK);
690 int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
691 writel(int_ctrl, &priv->wr_regs->int_control);
692
693 cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
694 if (priv->data.dual_emac) {
695 int i;
696
697 for (i = 0; i < priv->data.slaves; i++) {
698 priv = netdev_priv(priv->slaves[i].ndev);
699 priv->coal_intvl = coal_intvl;
700 }
701 } else {
702 priv->coal_intvl = coal_intvl;
703 }
704
705 return 0;
706}
707
615static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val) 708static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
616{ 709{
617 static char *leader = "........................................"; 710 static char *leader = "........................................";
@@ -834,6 +927,14 @@ static int cpsw_ndo_open(struct net_device *ndev)
834 cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i); 927 cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
835 } 928 }
836 929
930 /* Enable Interrupt pacing if configured */
931 if (priv->coal_intvl != 0) {
932 struct ethtool_coalesce coal;
933
934 coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
935 cpsw_set_coalesce(ndev, &coal);
936 }
937
837 cpdma_ctlr_start(priv->dma); 938 cpdma_ctlr_start(priv->dma);
838 cpsw_intr_enable(priv); 939 cpsw_intr_enable(priv);
839 napi_enable(&priv->napi); 940 napi_enable(&priv->napi);
@@ -942,7 +1043,7 @@ static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags)
942 1043
943static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) 1044static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
944{ 1045{
945 struct cpsw_slave *slave = &priv->slaves[priv->data.cpts_active_slave]; 1046 struct cpsw_slave *slave = &priv->slaves[priv->data.active_slave];
946 u32 ts_en, seq_id; 1047 u32 ts_en, seq_id;
947 1048
948 if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) { 1049 if (!priv->cpts->tx_enable && !priv->cpts->rx_enable) {
@@ -971,7 +1072,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
971 if (priv->data.dual_emac) 1072 if (priv->data.dual_emac)
972 slave = &priv->slaves[priv->emac_port]; 1073 slave = &priv->slaves[priv->emac_port];
973 else 1074 else
974 slave = &priv->slaves[priv->data.cpts_active_slave]; 1075 slave = &priv->slaves[priv->data.active_slave];
975 1076
976 ctrl = slave_read(slave, CPSW2_CONTROL); 1077 ctrl = slave_read(slave, CPSW2_CONTROL);
977 ctrl &= ~CTRL_ALL_TS_MASK; 1078 ctrl &= ~CTRL_ALL_TS_MASK;
@@ -1056,14 +1157,26 @@ static int cpsw_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
1056 1157
1057static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 1158static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
1058{ 1159{
1160 struct cpsw_priv *priv = netdev_priv(dev);
1161 struct mii_ioctl_data *data = if_mii(req);
1162 int slave_no = cpsw_slave_index(priv);
1163
1059 if (!netif_running(dev)) 1164 if (!netif_running(dev))
1060 return -EINVAL; 1165 return -EINVAL;
1061 1166
1167 switch (cmd) {
1062#ifdef CONFIG_TI_CPTS 1168#ifdef CONFIG_TI_CPTS
1063 if (cmd == SIOCSHWTSTAMP) 1169 case SIOCSHWTSTAMP:
1064 return cpsw_hwtstamp_ioctl(dev, req); 1170 return cpsw_hwtstamp_ioctl(dev, req);
1065#endif 1171#endif
1066 return -ENOTSUPP; 1172 case SIOCGMIIPHY:
1173 data->phy_id = priv->slaves[slave_no].phy->addr;
1174 break;
1175 default:
1176 return -ENOTSUPP;
1177 }
1178
1179 return 0;
1067} 1180}
1068 1181
1069static void cpsw_ndo_tx_timeout(struct net_device *ndev) 1182static void cpsw_ndo_tx_timeout(struct net_device *ndev)
@@ -1244,12 +1357,39 @@ static int cpsw_get_ts_info(struct net_device *ndev,
1244 return 0; 1357 return 0;
1245} 1358}
1246 1359
1360static int cpsw_get_settings(struct net_device *ndev,
1361 struct ethtool_cmd *ecmd)
1362{
1363 struct cpsw_priv *priv = netdev_priv(ndev);
1364 int slave_no = cpsw_slave_index(priv);
1365
1366 if (priv->slaves[slave_no].phy)
1367 return phy_ethtool_gset(priv->slaves[slave_no].phy, ecmd);
1368 else
1369 return -EOPNOTSUPP;
1370}
1371
1372static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1373{
1374 struct cpsw_priv *priv = netdev_priv(ndev);
1375 int slave_no = cpsw_slave_index(priv);
1376
1377 if (priv->slaves[slave_no].phy)
1378 return phy_ethtool_sset(priv->slaves[slave_no].phy, ecmd);
1379 else
1380 return -EOPNOTSUPP;
1381}
1382
1247static const struct ethtool_ops cpsw_ethtool_ops = { 1383static const struct ethtool_ops cpsw_ethtool_ops = {
1248 .get_drvinfo = cpsw_get_drvinfo, 1384 .get_drvinfo = cpsw_get_drvinfo,
1249 .get_msglevel = cpsw_get_msglevel, 1385 .get_msglevel = cpsw_get_msglevel,
1250 .set_msglevel = cpsw_set_msglevel, 1386 .set_msglevel = cpsw_set_msglevel,
1251 .get_link = ethtool_op_get_link, 1387 .get_link = ethtool_op_get_link,
1252 .get_ts_info = cpsw_get_ts_info, 1388 .get_ts_info = cpsw_get_ts_info,
1389 .get_settings = cpsw_get_settings,
1390 .set_settings = cpsw_set_settings,
1391 .get_coalesce = cpsw_get_coalesce,
1392 .set_coalesce = cpsw_set_coalesce,
1253}; 1393};
1254 1394
1255static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv, 1395static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
@@ -1282,12 +1422,12 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
1282 } 1422 }
1283 data->slaves = prop; 1423 data->slaves = prop;
1284 1424
1285 if (of_property_read_u32(node, "cpts_active_slave", &prop)) { 1425 if (of_property_read_u32(node, "active_slave", &prop)) {
1286 pr_err("Missing cpts_active_slave property in the DT.\n"); 1426 pr_err("Missing active_slave property in the DT.\n");
1287 ret = -EINVAL; 1427 ret = -EINVAL;
1288 goto error_ret; 1428 goto error_ret;
1289 } 1429 }
1290 data->cpts_active_slave = prop; 1430 data->active_slave = prop;
1291 1431
1292 if (of_property_read_u32(node, "cpts_clock_mult", &prop)) { 1432 if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
1293 pr_err("Missing cpts_clock_mult property in the DT.\n"); 1433 pr_err("Missing cpts_clock_mult property in the DT.\n");
@@ -1437,6 +1577,9 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
1437 priv_sl2->slaves = priv->slaves; 1577 priv_sl2->slaves = priv->slaves;
1438 priv_sl2->clk = priv->clk; 1578 priv_sl2->clk = priv->clk;
1439 1579
1580 priv_sl2->coal_intvl = 0;
1581 priv_sl2->bus_freq_mhz = priv->bus_freq_mhz;
1582
1440 priv_sl2->cpsw_res = priv->cpsw_res; 1583 priv_sl2->cpsw_res = priv->cpsw_res;
1441 priv_sl2->regs = priv->regs; 1584 priv_sl2->regs = priv->regs;
1442 priv_sl2->host_port = priv->host_port; 1585 priv_sl2->host_port = priv->host_port;
@@ -1546,6 +1689,8 @@ static int cpsw_probe(struct platform_device *pdev)
1546 ret = -ENODEV; 1689 ret = -ENODEV;
1547 goto clean_slave_ret; 1690 goto clean_slave_ret;
1548 } 1691 }
1692 priv->coal_intvl = 0;
1693 priv->bus_freq_mhz = clk_get_rate(priv->clk) / 1000000;
1549 1694
1550 priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1695 priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1551 if (!priv->cpsw_res) { 1696 if (!priv->cpsw_res) {
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 72300bc9e378..6a0b47715a84 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1438,7 +1438,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
1438 * Polled functionality used by netconsole and others in non interrupt mode 1438 * Polled functionality used by netconsole and others in non interrupt mode
1439 * 1439 *
1440 */ 1440 */
1441void emac_poll_controller(struct net_device *ndev) 1441static void emac_poll_controller(struct net_device *ndev)
1442{ 1442{
1443 struct emac_priv *priv = netdev_priv(ndev); 1443 struct emac_priv *priv = netdev_priv(ndev);
1444 1444
@@ -1865,21 +1865,18 @@ static int davinci_emac_probe(struct platform_device *pdev)
1865 1865
1866 1866
1867 /* obtain emac clock from kernel */ 1867 /* obtain emac clock from kernel */
1868 emac_clk = clk_get(&pdev->dev, NULL); 1868 emac_clk = devm_clk_get(&pdev->dev, NULL);
1869 if (IS_ERR(emac_clk)) { 1869 if (IS_ERR(emac_clk)) {
1870 dev_err(&pdev->dev, "failed to get EMAC clock\n"); 1870 dev_err(&pdev->dev, "failed to get EMAC clock\n");
1871 return -EBUSY; 1871 return -EBUSY;
1872 } 1872 }
1873 emac_bus_frequency = clk_get_rate(emac_clk); 1873 emac_bus_frequency = clk_get_rate(emac_clk);
1874 clk_put(emac_clk);
1875 1874
1876 /* TODO: Probe PHY here if possible */ 1875 /* TODO: Probe PHY here if possible */
1877 1876
1878 ndev = alloc_etherdev(sizeof(struct emac_priv)); 1877 ndev = alloc_etherdev(sizeof(struct emac_priv));
1879 if (!ndev) { 1878 if (!ndev)
1880 rc = -ENOMEM; 1879 return -ENOMEM;
1881 goto no_ndev;
1882 }
1883 1880
1884 platform_set_drvdata(pdev, ndev); 1881 platform_set_drvdata(pdev, ndev);
1885 priv = netdev_priv(ndev); 1882 priv = netdev_priv(ndev);
@@ -1893,7 +1890,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
1893 if (!pdata) { 1890 if (!pdata) {
1894 dev_err(&pdev->dev, "no platform data\n"); 1891 dev_err(&pdev->dev, "no platform data\n");
1895 rc = -ENODEV; 1892 rc = -ENODEV;
1896 goto probe_quit; 1893 goto no_pdata;
1897 } 1894 }
1898 1895
1899 /* MAC addr and PHY mask , RMII enable info from platform_data */ 1896 /* MAC addr and PHY mask , RMII enable info from platform_data */
@@ -1913,23 +1910,23 @@ static int davinci_emac_probe(struct platform_device *pdev)
1913 if (!res) { 1910 if (!res) {
1914 dev_err(&pdev->dev,"error getting res\n"); 1911 dev_err(&pdev->dev,"error getting res\n");
1915 rc = -ENOENT; 1912 rc = -ENOENT;
1916 goto probe_quit; 1913 goto no_pdata;
1917 } 1914 }
1918 1915
1919 priv->emac_base_phys = res->start + pdata->ctrl_reg_offset; 1916 priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
1920 size = resource_size(res); 1917 size = resource_size(res);
1921 if (!request_mem_region(res->start, size, ndev->name)) { 1918 if (!devm_request_mem_region(&pdev->dev, res->start,
1919 size, ndev->name)) {
1922 dev_err(&pdev->dev, "failed request_mem_region() for regs\n"); 1920 dev_err(&pdev->dev, "failed request_mem_region() for regs\n");
1923 rc = -ENXIO; 1921 rc = -ENXIO;
1924 goto probe_quit; 1922 goto no_pdata;
1925 } 1923 }
1926 1924
1927 priv->remap_addr = ioremap(res->start, size); 1925 priv->remap_addr = devm_ioremap(&pdev->dev, res->start, size);
1928 if (!priv->remap_addr) { 1926 if (!priv->remap_addr) {
1929 dev_err(&pdev->dev, "unable to map IO\n"); 1927 dev_err(&pdev->dev, "unable to map IO\n");
1930 rc = -ENOMEM; 1928 rc = -ENOMEM;
1931 release_mem_region(res->start, size); 1929 goto no_pdata;
1932 goto probe_quit;
1933 } 1930 }
1934 priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset; 1931 priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset;
1935 ndev->base_addr = (unsigned long)priv->remap_addr; 1932 ndev->base_addr = (unsigned long)priv->remap_addr;
@@ -1962,7 +1959,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
1962 if (!priv->dma) { 1959 if (!priv->dma) {
1963 dev_err(&pdev->dev, "error initializing DMA\n"); 1960 dev_err(&pdev->dev, "error initializing DMA\n");
1964 rc = -ENOMEM; 1961 rc = -ENOMEM;
1965 goto no_dma; 1962 goto no_pdata;
1966 } 1963 }
1967 1964
1968 priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH), 1965 priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH),
@@ -1971,14 +1968,14 @@ static int davinci_emac_probe(struct platform_device *pdev)
1971 emac_rx_handler); 1968 emac_rx_handler);
1972 if (WARN_ON(!priv->txchan || !priv->rxchan)) { 1969 if (WARN_ON(!priv->txchan || !priv->rxchan)) {
1973 rc = -ENOMEM; 1970 rc = -ENOMEM;
1974 goto no_irq_res; 1971 goto no_cpdma_chan;
1975 } 1972 }
1976 1973
1977 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1974 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1978 if (!res) { 1975 if (!res) {
1979 dev_err(&pdev->dev, "error getting irq res\n"); 1976 dev_err(&pdev->dev, "error getting irq res\n");
1980 rc = -ENOENT; 1977 rc = -ENOENT;
1981 goto no_irq_res; 1978 goto no_cpdma_chan;
1982 } 1979 }
1983 ndev->irq = res->start; 1980 ndev->irq = res->start;
1984 1981
@@ -2000,7 +1997,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
2000 if (rc) { 1997 if (rc) {
2001 dev_err(&pdev->dev, "error in register_netdev\n"); 1998 dev_err(&pdev->dev, "error in register_netdev\n");
2002 rc = -ENODEV; 1999 rc = -ENODEV;
2003 goto no_irq_res; 2000 goto no_cpdma_chan;
2004 } 2001 }
2005 2002
2006 2003
@@ -2015,20 +2012,14 @@ static int davinci_emac_probe(struct platform_device *pdev)
2015 2012
2016 return 0; 2013 return 0;
2017 2014
2018no_irq_res: 2015no_cpdma_chan:
2019 if (priv->txchan) 2016 if (priv->txchan)
2020 cpdma_chan_destroy(priv->txchan); 2017 cpdma_chan_destroy(priv->txchan);
2021 if (priv->rxchan) 2018 if (priv->rxchan)
2022 cpdma_chan_destroy(priv->rxchan); 2019 cpdma_chan_destroy(priv->rxchan);
2023 cpdma_ctlr_destroy(priv->dma); 2020 cpdma_ctlr_destroy(priv->dma);
2024no_dma: 2021no_pdata:
2025 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2026 release_mem_region(res->start, resource_size(res));
2027 iounmap(priv->remap_addr);
2028
2029probe_quit:
2030 free_netdev(ndev); 2022 free_netdev(ndev);
2031no_ndev:
2032 return rc; 2023 return rc;
2033} 2024}
2034 2025
@@ -2041,14 +2032,12 @@ no_ndev:
2041 */ 2032 */
2042static int davinci_emac_remove(struct platform_device *pdev) 2033static int davinci_emac_remove(struct platform_device *pdev)
2043{ 2034{
2044 struct resource *res;
2045 struct net_device *ndev = platform_get_drvdata(pdev); 2035 struct net_device *ndev = platform_get_drvdata(pdev);
2046 struct emac_priv *priv = netdev_priv(ndev); 2036 struct emac_priv *priv = netdev_priv(ndev);
2047 2037
2048 dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n"); 2038 dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n");
2049 2039
2050 platform_set_drvdata(pdev, NULL); 2040 platform_set_drvdata(pdev, NULL);
2051 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2052 2041
2053 if (priv->txchan) 2042 if (priv->txchan)
2054 cpdma_chan_destroy(priv->txchan); 2043 cpdma_chan_destroy(priv->txchan);
@@ -2056,10 +2045,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
2056 cpdma_chan_destroy(priv->rxchan); 2045 cpdma_chan_destroy(priv->rxchan);
2057 cpdma_ctlr_destroy(priv->dma); 2046 cpdma_ctlr_destroy(priv->dma);
2058 2047
2059 release_mem_region(res->start, resource_size(res));
2060
2061 unregister_netdev(ndev); 2048 unregister_netdev(ndev);
2062 iounmap(priv->remap_addr);
2063 free_netdev(ndev); 2049 free_netdev(ndev);
2064 2050
2065 return 0; 2051 return 0;
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 22725386c5de..bdda36f8e541 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -1911,10 +1911,8 @@ static void tlan_reset_lists(struct net_device *dev)
1911 list->frame_size = TLAN_MAX_FRAME_SIZE; 1911 list->frame_size = TLAN_MAX_FRAME_SIZE;
1912 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; 1912 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
1913 skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5); 1913 skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
1914 if (!skb) { 1914 if (!skb)
1915 netdev_err(dev, "Out of memory for received data\n");
1916 break; 1915 break;
1917 }
1918 1916
1919 list->buffer[0].address = pci_map_single(priv->pci_dev, 1917 list->buffer[0].address = pci_map_single(priv->pci_dev,
1920 skb->data, 1918 skb->data,
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 445c0595c997..ad32af67e618 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -58,13 +58,6 @@ MODULE_DESCRIPTION("Gelic Network driver");
58MODULE_LICENSE("GPL"); 58MODULE_LICENSE("GPL");
59 59
60 60
61static inline void gelic_card_enable_rxdmac(struct gelic_card *card);
62static inline void gelic_card_disable_rxdmac(struct gelic_card *card);
63static inline void gelic_card_disable_txdmac(struct gelic_card *card);
64static inline void gelic_card_reset_chain(struct gelic_card *card,
65 struct gelic_descr_chain *chain,
66 struct gelic_descr *start_descr);
67
68/* set irq_mask */ 61/* set irq_mask */
69int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask) 62int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask)
70{ 63{
@@ -78,12 +71,12 @@ int gelic_card_set_irq_mask(struct gelic_card *card, u64 mask)
78 return status; 71 return status;
79} 72}
80 73
81static inline void gelic_card_rx_irq_on(struct gelic_card *card) 74static void gelic_card_rx_irq_on(struct gelic_card *card)
82{ 75{
83 card->irq_mask |= GELIC_CARD_RXINT; 76 card->irq_mask |= GELIC_CARD_RXINT;
84 gelic_card_set_irq_mask(card, card->irq_mask); 77 gelic_card_set_irq_mask(card, card->irq_mask);
85} 78}
86static inline void gelic_card_rx_irq_off(struct gelic_card *card) 79static void gelic_card_rx_irq_off(struct gelic_card *card)
87{ 80{
88 card->irq_mask &= ~GELIC_CARD_RXINT; 81 card->irq_mask &= ~GELIC_CARD_RXINT;
89 gelic_card_set_irq_mask(card, card->irq_mask); 82 gelic_card_set_irq_mask(card, card->irq_mask);
@@ -127,6 +120,120 @@ static int gelic_card_set_link_mode(struct gelic_card *card, int mode)
127 return 0; 120 return 0;
128} 121}
129 122
123/**
124 * gelic_card_disable_txdmac - disables the transmit DMA controller
125 * @card: card structure
126 *
127 * gelic_card_disable_txdmac terminates processing on the DMA controller by
128 * turing off DMA and issuing a force end
129 */
130static void gelic_card_disable_txdmac(struct gelic_card *card)
131{
132 int status;
133
134 /* this hvc blocks until the DMA in progress really stopped */
135 status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card));
136 if (status)
137 dev_err(ctodev(card),
138 "lv1_net_stop_tx_dma failed, status=%d\n", status);
139}
140
141/**
142 * gelic_card_enable_rxdmac - enables the receive DMA controller
143 * @card: card structure
144 *
145 * gelic_card_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
146 * in the GDADMACCNTR register
147 */
148static void gelic_card_enable_rxdmac(struct gelic_card *card)
149{
150 int status;
151
152#ifdef DEBUG
153 if (gelic_descr_get_status(card->rx_chain.head) !=
154 GELIC_DESCR_DMA_CARDOWNED) {
155 printk(KERN_ERR "%s: status=%x\n", __func__,
156 be32_to_cpu(card->rx_chain.head->dmac_cmd_status));
157 printk(KERN_ERR "%s: nextphy=%x\n", __func__,
158 be32_to_cpu(card->rx_chain.head->next_descr_addr));
159 printk(KERN_ERR "%s: head=%p\n", __func__,
160 card->rx_chain.head);
161 }
162#endif
163 status = lv1_net_start_rx_dma(bus_id(card), dev_id(card),
164 card->rx_chain.head->bus_addr, 0);
165 if (status)
166 dev_info(ctodev(card),
167 "lv1_net_start_rx_dma failed, status=%d\n", status);
168}
169
170/**
171 * gelic_card_disable_rxdmac - disables the receive DMA controller
172 * @card: card structure
173 *
174 * gelic_card_disable_rxdmac terminates processing on the DMA controller by
175 * turing off DMA and issuing a force end
176 */
177static void gelic_card_disable_rxdmac(struct gelic_card *card)
178{
179 int status;
180
181 /* this hvc blocks until the DMA in progress really stopped */
182 status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card));
183 if (status)
184 dev_err(ctodev(card),
185 "lv1_net_stop_rx_dma failed, %d\n", status);
186}
187
188/**
189 * gelic_descr_set_status -- sets the status of a descriptor
190 * @descr: descriptor to change
191 * @status: status to set in the descriptor
192 *
193 * changes the status to the specified value. Doesn't change other bits
194 * in the status
195 */
196static void gelic_descr_set_status(struct gelic_descr *descr,
197 enum gelic_descr_dma_status status)
198{
199 descr->dmac_cmd_status = cpu_to_be32(status |
200 (be32_to_cpu(descr->dmac_cmd_status) &
201 ~GELIC_DESCR_DMA_STAT_MASK));
202 /*
203 * dma_cmd_status field is used to indicate whether the descriptor
204 * is valid or not.
205 * Usually caller of this function wants to inform that to the
206 * hardware, so we assure here the hardware sees the change.
207 */
208 wmb();
209}
210
211/**
212 * gelic_card_reset_chain - reset status of a descriptor chain
213 * @card: card structure
214 * @chain: address of chain
215 * @start_descr: address of descriptor array
216 *
217 * Reset the status of dma descriptors to ready state
218 * and re-initialize the hardware chain for later use
219 */
220static void gelic_card_reset_chain(struct gelic_card *card,
221 struct gelic_descr_chain *chain,
222 struct gelic_descr *start_descr)
223{
224 struct gelic_descr *descr;
225
226 for (descr = start_descr; start_descr != descr->next; descr++) {
227 gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED);
228 descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr);
229 }
230
231 chain->head = start_descr;
232 chain->tail = (descr - 1);
233
234 (descr - 1)->next_descr_addr = 0;
235}
236
130void gelic_card_up(struct gelic_card *card) 237void gelic_card_up(struct gelic_card *card)
131{ 238{
132 pr_debug("%s: called\n", __func__); 239 pr_debug("%s: called\n", __func__);
@@ -183,29 +290,6 @@ gelic_descr_get_status(struct gelic_descr *descr)
183} 290}
184 291
185/** 292/**
186 * gelic_descr_set_status -- sets the status of a descriptor
187 * @descr: descriptor to change
188 * @status: status to set in the descriptor
189 *
190 * changes the status to the specified value. Doesn't change other bits
191 * in the status
192 */
193static void gelic_descr_set_status(struct gelic_descr *descr,
194 enum gelic_descr_dma_status status)
195{
196 descr->dmac_cmd_status = cpu_to_be32(status |
197 (be32_to_cpu(descr->dmac_cmd_status) &
198 ~GELIC_DESCR_DMA_STAT_MASK));
199 /*
200 * dma_cmd_status field is used to indicate whether the descriptor
201 * is valid or not.
202 * Usually caller of this function wants to inform that to the
203 * hardware, so we assure here the hardware sees the change.
204 */
205 wmb();
206}
207
208/**
209 * gelic_card_free_chain - free descriptor chain 293 * gelic_card_free_chain - free descriptor chain
210 * @card: card structure 294 * @card: card structure
211 * @descr_in: address of desc 295 * @descr_in: address of desc
@@ -286,31 +370,6 @@ iommu_error:
286} 370}
287 371
288/** 372/**
289 * gelic_card_reset_chain - reset status of a descriptor chain
290 * @card: card structure
291 * @chain: address of chain
292 * @start_descr: address of descriptor array
293 *
294 * Reset the status of dma descriptors to ready state
295 * and re-initialize the hardware chain for later use
296 */
297static void gelic_card_reset_chain(struct gelic_card *card,
298 struct gelic_descr_chain *chain,
299 struct gelic_descr *start_descr)
300{
301 struct gelic_descr *descr;
302
303 for (descr = start_descr; start_descr != descr->next; descr++) {
304 gelic_descr_set_status(descr, GELIC_DESCR_DMA_CARDOWNED);
305 descr->next_descr_addr = cpu_to_be32(descr->next->bus_addr);
306 }
307
308 chain->head = start_descr;
309 chain->tail = (descr - 1);
310
311 (descr - 1)->next_descr_addr = 0;
312}
313/**
314 * gelic_descr_prepare_rx - reinitializes a rx descriptor 373 * gelic_descr_prepare_rx - reinitializes a rx descriptor
315 * @card: card structure 374 * @card: card structure
316 * @descr: descriptor to re-init 375 * @descr: descriptor to re-init
@@ -599,71 +658,6 @@ void gelic_net_set_multi(struct net_device *netdev)
599} 658}
600 659
601/** 660/**
602 * gelic_card_enable_rxdmac - enables the receive DMA controller
603 * @card: card structure
604 *
605 * gelic_card_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
606 * in the GDADMACCNTR register
607 */
608static inline void gelic_card_enable_rxdmac(struct gelic_card *card)
609{
610 int status;
611
612#ifdef DEBUG
613 if (gelic_descr_get_status(card->rx_chain.head) !=
614 GELIC_DESCR_DMA_CARDOWNED) {
615 printk(KERN_ERR "%s: status=%x\n", __func__,
616 be32_to_cpu(card->rx_chain.head->dmac_cmd_status));
617 printk(KERN_ERR "%s: nextphy=%x\n", __func__,
618 be32_to_cpu(card->rx_chain.head->next_descr_addr));
619 printk(KERN_ERR "%s: head=%p\n", __func__,
620 card->rx_chain.head);
621 }
622#endif
623 status = lv1_net_start_rx_dma(bus_id(card), dev_id(card),
624 card->rx_chain.head->bus_addr, 0);
625 if (status)
626 dev_info(ctodev(card),
627 "lv1_net_start_rx_dma failed, status=%d\n", status);
628}
629
630/**
631 * gelic_card_disable_rxdmac - disables the receive DMA controller
632 * @card: card structure
633 *
634 * gelic_card_disable_rxdmac terminates processing on the DMA controller by
635 * turing off DMA and issuing a force end
636 */
637static inline void gelic_card_disable_rxdmac(struct gelic_card *card)
638{
639 int status;
640
641 /* this hvc blocks until the DMA in progress really stopped */
642 status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card));
643 if (status)
644 dev_err(ctodev(card),
645 "lv1_net_stop_rx_dma failed, %d\n", status);
646}
647
648/**
649 * gelic_card_disable_txdmac - disables the transmit DMA controller
650 * @card: card structure
651 *
652 * gelic_card_disable_txdmac terminates processing on the DMA controller by
653 * turing off DMA and issuing a force end
654 */
655static inline void gelic_card_disable_txdmac(struct gelic_card *card)
656{
657 int status;
658
659 /* this hvc blocks until the DMA in progress really stopped */
660 status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card));
661 if (status)
662 dev_err(ctodev(card),
663 "lv1_net_stop_tx_dma failed, status=%d\n", status);
664}
665
666/**
667 * gelic_net_stop - called upon ifconfig down 661 * gelic_net_stop - called upon ifconfig down
668 * @netdev: interface device structure 662 * @netdev: interface device structure
669 * 663 *
@@ -746,7 +740,7 @@ static void gelic_descr_set_tx_cmdstat(struct gelic_descr *descr,
746 } 740 }
747} 741}
748 742
749static inline struct sk_buff *gelic_put_vlan_tag(struct sk_buff *skb, 743static struct sk_buff *gelic_put_vlan_tag(struct sk_buff *skb,
750 unsigned short tag) 744 unsigned short tag)
751{ 745{
752 struct vlan_ethhdr *veth; 746 struct vlan_ethhdr *veth;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index f1b91fd7e41c..fef6b59e69c9 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -352,8 +352,7 @@ spider_net_init_chain(struct spider_net_card *card,
352 alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr); 352 alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
353 353
354 chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size, 354 chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
355 &chain->dma_addr, GFP_KERNEL); 355 &chain->dma_addr, GFP_KERNEL);
356
357 if (!chain->hwring) 356 if (!chain->hwring)
358 return -ENOMEM; 357 return -ENOMEM;
359 358
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 8fa947a2d929..3c69a0460832 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1308,27 +1308,16 @@ static int tsi108_open(struct net_device *dev)
1308 data->id, dev->irq, dev->name); 1308 data->id, dev->irq, dev->name);
1309 } 1309 }
1310 1310
1311 data->rxring = dma_alloc_coherent(NULL, rxring_size, 1311 data->rxring = dma_alloc_coherent(NULL, rxring_size, &data->rxdma,
1312 &data->rxdma, GFP_KERNEL); 1312 GFP_KERNEL | __GFP_ZERO);
1313 1313 if (!data->rxring)
1314 if (!data->rxring) {
1315 printk(KERN_DEBUG
1316 "TSI108_ETH: failed to allocate memory for rxring!\n");
1317 return -ENOMEM; 1314 return -ENOMEM;
1318 } else {
1319 memset(data->rxring, 0, rxring_size);
1320 }
1321
1322 data->txring = dma_alloc_coherent(NULL, txring_size,
1323 &data->txdma, GFP_KERNEL);
1324 1315
1316 data->txring = dma_alloc_coherent(NULL, txring_size, &data->txdma,
1317 GFP_KERNEL | __GFP_ZERO);
1325 if (!data->txring) { 1318 if (!data->txring) {
1326 printk(KERN_DEBUG
1327 "TSI108_ETH: failed to allocate memory for txring!\n");
1328 pci_free_consistent(0, rxring_size, data->rxring, data->rxdma); 1319 pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
1329 return -ENOMEM; 1320 return -ENOMEM;
1330 } else {
1331 memset(data->txring, 0, txring_size);
1332 } 1321 }
1333 1322
1334 for (i = 0; i < TSI108_RXRING_LEN; i++) { 1323 for (i = 0; i < TSI108_RXRING_LEN; i++) {
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 545043cc4c0b..a518dcab396e 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -754,7 +754,7 @@ static int w5100_remove(struct platform_device *pdev)
754 return 0; 754 return 0;
755} 755}
756 756
757#ifdef CONFIG_PM 757#ifdef CONFIG_PM_SLEEP
758static int w5100_suspend(struct device *dev) 758static int w5100_suspend(struct device *dev)
759{ 759{
760 struct platform_device *pdev = to_platform_device(dev); 760 struct platform_device *pdev = to_platform_device(dev);
@@ -787,7 +787,7 @@ static int w5100_resume(struct device *dev)
787 } 787 }
788 return 0; 788 return 0;
789} 789}
790#endif /* CONFIG_PM */ 790#endif /* CONFIG_PM_SLEEP */
791 791
792static SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume); 792static SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
793 793
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 7cbd0e6fc6f3..6e00e3f94ce4 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -666,7 +666,7 @@ static int w5300_remove(struct platform_device *pdev)
666 return 0; 666 return 0;
667} 667}
668 668
669#ifdef CONFIG_PM 669#ifdef CONFIG_PM_SLEEP
670static int w5300_suspend(struct device *dev) 670static int w5300_suspend(struct device *dev)
671{ 671{
672 struct platform_device *pdev = to_platform_device(dev); 672 struct platform_device *pdev = to_platform_device(dev);
@@ -699,7 +699,7 @@ static int w5300_resume(struct device *dev)
699 } 699 }
700 return 0; 700 return 0;
701} 701}
702#endif /* CONFIG_PM */ 702#endif /* CONFIG_PM_SLEEP */
703 703
704static SIMPLE_DEV_PM_OPS(w5300_pm_ops, w5300_suspend, w5300_resume); 704static SIMPLE_DEV_PM_OPS(w5300_pm_ops, w5300_suspend, w5300_resume);
705 705
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 9fc2ada4c3c2..4a7c60f4c83d 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -245,39 +245,30 @@ static int temac_dma_bd_init(struct net_device *ndev)
245 /* returns a virtual address and a physical address. */ 245 /* returns a virtual address and a physical address. */
246 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 246 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
247 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 247 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
248 &lp->tx_bd_p, GFP_KERNEL); 248 &lp->tx_bd_p, GFP_KERNEL | __GFP_ZERO);
249 if (!lp->tx_bd_v) { 249 if (!lp->tx_bd_v)
250 dev_err(&ndev->dev,
251 "unable to allocate DMA TX buffer descriptors");
252 goto out; 250 goto out;
253 } 251
254 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 252 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
255 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 253 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
256 &lp->rx_bd_p, GFP_KERNEL); 254 &lp->rx_bd_p, GFP_KERNEL | __GFP_ZERO);
257 if (!lp->rx_bd_v) { 255 if (!lp->rx_bd_v)
258 dev_err(&ndev->dev,
259 "unable to allocate DMA RX buffer descriptors");
260 goto out; 256 goto out;
261 }
262 257
263 memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
264 for (i = 0; i < TX_BD_NUM; i++) { 258 for (i = 0; i < TX_BD_NUM; i++) {
265 lp->tx_bd_v[i].next = lp->tx_bd_p + 259 lp->tx_bd_v[i].next = lp->tx_bd_p +
266 sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM); 260 sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM);
267 } 261 }
268 262
269 memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
270 for (i = 0; i < RX_BD_NUM; i++) { 263 for (i = 0; i < RX_BD_NUM; i++) {
271 lp->rx_bd_v[i].next = lp->rx_bd_p + 264 lp->rx_bd_v[i].next = lp->rx_bd_p +
272 sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM); 265 sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
273 266
274 skb = netdev_alloc_skb_ip_align(ndev, 267 skb = netdev_alloc_skb_ip_align(ndev,
275 XTE_MAX_JUMBO_FRAME_SIZE); 268 XTE_MAX_JUMBO_FRAME_SIZE);
276 269 if (!skb)
277 if (skb == 0) {
278 dev_err(&ndev->dev, "alloc_skb error %d\n", i);
279 goto out; 270 goto out;
280 } 271
281 lp->rx_skb[i] = skb; 272 lp->rx_skb[i] = skb;
282 /* returns physical address of skb->data */ 273 /* returns physical address of skb->data */
283 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, 274 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
@@ -789,9 +780,7 @@ static void ll_temac_recv(struct net_device *ndev)
789 780
790 new_skb = netdev_alloc_skb_ip_align(ndev, 781 new_skb = netdev_alloc_skb_ip_align(ndev,
791 XTE_MAX_JUMBO_FRAME_SIZE); 782 XTE_MAX_JUMBO_FRAME_SIZE);
792 783 if (!new_skb) {
793 if (new_skb == 0) {
794 dev_err(&ndev->dev, "no memory for new sk_buff\n");
795 spin_unlock_irqrestore(&lp->rx_lock, flags); 784 spin_unlock_irqrestore(&lp->rx_lock, flags);
796 return; 785 return;
797 } 786 }
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 278c9db3b5b8..24748e8367a1 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -204,41 +204,31 @@ static int axienet_dma_bd_init(struct net_device *ndev)
204 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 204 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
205 sizeof(*lp->tx_bd_v) * TX_BD_NUM, 205 sizeof(*lp->tx_bd_v) * TX_BD_NUM,
206 &lp->tx_bd_p, 206 &lp->tx_bd_p,
207 GFP_KERNEL); 207 GFP_KERNEL | __GFP_ZERO);
208 if (!lp->tx_bd_v) { 208 if (!lp->tx_bd_v)
209 dev_err(&ndev->dev, "unable to allocate DMA Tx buffer "
210 "descriptors");
211 goto out; 209 goto out;
212 }
213 210
214 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 211 lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
215 sizeof(*lp->rx_bd_v) * RX_BD_NUM, 212 sizeof(*lp->rx_bd_v) * RX_BD_NUM,
216 &lp->rx_bd_p, 213 &lp->rx_bd_p,
217 GFP_KERNEL); 214 GFP_KERNEL | __GFP_ZERO);
218 if (!lp->rx_bd_v) { 215 if (!lp->rx_bd_v)
219 dev_err(&ndev->dev, "unable to allocate DMA Rx buffer "
220 "descriptors");
221 goto out; 216 goto out;
222 }
223 217
224 memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
225 for (i = 0; i < TX_BD_NUM; i++) { 218 for (i = 0; i < TX_BD_NUM; i++) {
226 lp->tx_bd_v[i].next = lp->tx_bd_p + 219 lp->tx_bd_v[i].next = lp->tx_bd_p +
227 sizeof(*lp->tx_bd_v) * 220 sizeof(*lp->tx_bd_v) *
228 ((i + 1) % TX_BD_NUM); 221 ((i + 1) % TX_BD_NUM);
229 } 222 }
230 223
231 memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM);
232 for (i = 0; i < RX_BD_NUM; i++) { 224 for (i = 0; i < RX_BD_NUM; i++) {
233 lp->rx_bd_v[i].next = lp->rx_bd_p + 225 lp->rx_bd_v[i].next = lp->rx_bd_p +
234 sizeof(*lp->rx_bd_v) * 226 sizeof(*lp->rx_bd_v) *
235 ((i + 1) % RX_BD_NUM); 227 ((i + 1) % RX_BD_NUM);
236 228
237 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 229 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
238 if (!skb) { 230 if (!skb)
239 dev_err(&ndev->dev, "alloc_skb error %d\n", i);
240 goto out; 231 goto out;
241 }
242 232
243 lp->rx_bd_v[i].sw_id_offset = (u32) skb; 233 lp->rx_bd_v[i].sw_id_offset = (u32) skb;
244 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, 234 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
@@ -777,10 +767,9 @@ static void axienet_recv(struct net_device *ndev)
777 packets++; 767 packets++;
778 768
779 new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 769 new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
780 if (!new_skb) { 770 if (!new_skb)
781 dev_err(&ndev->dev, "no memory for new sk_buff\n");
782 return; 771 return;
783 } 772
784 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, 773 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
785 lp->max_frm_size, 774 lp->max_frm_size,
786 DMA_FROM_DEVICE); 775 DMA_FROM_DEVICE);
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index 98e09d0d3ce2..76210abf2e9b 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1041,7 +1041,6 @@ xirc2ps_interrupt(int irq, void *dev_id)
1041 /* 1 extra so we can use insw */ 1041 /* 1 extra so we can use insw */
1042 skb = netdev_alloc_skb(dev, pktlen + 3); 1042 skb = netdev_alloc_skb(dev, pktlen + 3);
1043 if (!skb) { 1043 if (!skb) {
1044 pr_notice("low memory, packet dropped (size=%u)\n", pktlen);
1045 dev->stats.rx_dropped++; 1044 dev->stats.rx_dropped++;
1046 } else { /* okay get the packet */ 1045 } else { /* okay get the packet */
1047 skb_reserve(skb, 2); 1046 skb_reserve(skb, 2);
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 502c8ff1d985..4c8ddc944d51 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -1070,13 +1070,10 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
1070 (PI_ALIGN_K_DESC_BLK - 1); 1070 (PI_ALIGN_K_DESC_BLK - 1);
1071 bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size, 1071 bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size,
1072 &bp->kmalloced_dma, 1072 &bp->kmalloced_dma,
1073 GFP_ATOMIC); 1073 GFP_ATOMIC | __GFP_ZERO);
1074 if (top_v == NULL) { 1074 if (top_v == NULL)
1075 printk("%s: Could not allocate memory for host buffers "
1076 "and structures!\n", print_name);
1077 return DFX_K_FAILURE; 1075 return DFX_K_FAILURE;
1078 } 1076
1079 memset(top_v, 0, alloc_size); /* zero out memory before continuing */
1080 top_p = bp->kmalloced_dma; /* get physical address of buffer */ 1077 top_p = bp->kmalloced_dma; /* get physical address of buffer */
1081 1078
1082 /* 1079 /*
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 4cf8f1017aad..b2d863f2ea42 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -866,7 +866,7 @@ static int yam_open(struct net_device *dev)
866 866
867 printk(KERN_INFO "Trying %s at iobase 0x%lx irq %u\n", dev->name, dev->base_addr, dev->irq); 867 printk(KERN_INFO "Trying %s at iobase 0x%lx irq %u\n", dev->name, dev->base_addr, dev->irq);
868 868
869 if (!dev || !yp->bitrate) 869 if (!yp->bitrate)
870 return -ENXIO; 870 return -ENXIO;
871 if (!dev->base_addr || dev->base_addr > 0x1000 - YAM_EXTENT || 871 if (!dev->base_addr || dev->base_addr > 0x1000 - YAM_EXTENT ||
872 dev->irq < 2 || dev->irq > 15) { 872 dev->irq < 2 || dev->irq > 15) {
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index fc1687ea4a42..6e88eab33f5c 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -233,8 +233,8 @@ struct at86rf230_local {
233#define STATE_SLEEP 0x0F 233#define STATE_SLEEP 0x0F
234#define STATE_BUSY_RX_AACK 0x11 234#define STATE_BUSY_RX_AACK 0x11
235#define STATE_BUSY_TX_ARET 0x12 235#define STATE_BUSY_TX_ARET 0x12
236#define STATE_BUSY_RX_AACK_ON 0x16 236#define STATE_RX_AACK_ON 0x16
237#define STATE_BUSY_TX_ARET_ON 0x19 237#define STATE_TX_ARET_ON 0x19
238#define STATE_RX_ON_NOCLK 0x1C 238#define STATE_RX_ON_NOCLK 0x1C
239#define STATE_RX_AACK_ON_NOCLK 0x1D 239#define STATE_RX_AACK_ON_NOCLK 0x1D
240#define STATE_BUSY_RX_AACK_NOCLK 0x1E 240#define STATE_BUSY_RX_AACK_NOCLK 0x1E
@@ -619,6 +619,52 @@ err:
619 return -EINVAL; 619 return -EINVAL;
620} 620}
621 621
622static int
623at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
624 struct ieee802154_hw_addr_filt *filt,
625 unsigned long changed)
626{
627 struct at86rf230_local *lp = dev->priv;
628
629 if (changed & IEEE802515_AFILT_SADDR_CHANGED) {
630 dev_vdbg(&lp->spi->dev,
631 "at86rf230_set_hw_addr_filt called for saddr\n");
632 __at86rf230_write(lp, RG_SHORT_ADDR_0, filt->short_addr);
633 __at86rf230_write(lp, RG_SHORT_ADDR_1, filt->short_addr >> 8);
634 }
635
636 if (changed & IEEE802515_AFILT_PANID_CHANGED) {
637 dev_vdbg(&lp->spi->dev,
638 "at86rf230_set_hw_addr_filt called for pan id\n");
639 __at86rf230_write(lp, RG_PAN_ID_0, filt->pan_id);
640 __at86rf230_write(lp, RG_PAN_ID_1, filt->pan_id >> 8);
641 }
642
643 if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) {
644 dev_vdbg(&lp->spi->dev,
645 "at86rf230_set_hw_addr_filt called for IEEE addr\n");
646 at86rf230_write_subreg(lp, SR_IEEE_ADDR_0, filt->ieee_addr[7]);
647 at86rf230_write_subreg(lp, SR_IEEE_ADDR_1, filt->ieee_addr[6]);
648 at86rf230_write_subreg(lp, SR_IEEE_ADDR_2, filt->ieee_addr[5]);
649 at86rf230_write_subreg(lp, SR_IEEE_ADDR_3, filt->ieee_addr[4]);
650 at86rf230_write_subreg(lp, SR_IEEE_ADDR_4, filt->ieee_addr[3]);
651 at86rf230_write_subreg(lp, SR_IEEE_ADDR_5, filt->ieee_addr[2]);
652 at86rf230_write_subreg(lp, SR_IEEE_ADDR_6, filt->ieee_addr[1]);
653 at86rf230_write_subreg(lp, SR_IEEE_ADDR_7, filt->ieee_addr[0]);
654 }
655
656 if (changed & IEEE802515_AFILT_PANC_CHANGED) {
657 dev_vdbg(&lp->spi->dev,
658 "at86rf230_set_hw_addr_filt called for panc change\n");
659 if (filt->pan_coord)
660 at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1);
661 else
662 at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 0);
663 }
664
665 return 0;
666}
667
622static struct ieee802154_ops at86rf230_ops = { 668static struct ieee802154_ops at86rf230_ops = {
623 .owner = THIS_MODULE, 669 .owner = THIS_MODULE,
624 .xmit = at86rf230_xmit, 670 .xmit = at86rf230_xmit,
@@ -626,6 +672,7 @@ static struct ieee802154_ops at86rf230_ops = {
626 .set_channel = at86rf230_channel, 672 .set_channel = at86rf230_channel,
627 .start = at86rf230_start, 673 .start = at86rf230_start,
628 .stop = at86rf230_stop, 674 .stop = at86rf230_stop,
675 .set_hw_addr_filt = at86rf230_set_hw_addr_filt,
629}; 676};
630 677
631static void at86rf230_irqwork(struct work_struct *work) 678static void at86rf230_irqwork(struct work_struct *work)
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 3f2c7aaf28c4..ca0035116a18 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -22,6 +22,7 @@
22#include <linux/spi/spi.h> 22#include <linux/spi/spi.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/pinctrl/consumer.h>
25#include <net/wpan-phy.h> 26#include <net/wpan-phy.h>
26#include <net/mac802154.h> 27#include <net/mac802154.h>
27 28
@@ -91,9 +92,8 @@ struct mrf24j40 {
91#define MRF24J40_READLONG(reg) (1 << 15 | (reg) << 5) 92#define MRF24J40_READLONG(reg) (1 << 15 | (reg) << 5)
92#define MRF24J40_WRITELONG(reg) (1 << 15 | (reg) << 5 | 1 << 4) 93#define MRF24J40_WRITELONG(reg) (1 << 15 | (reg) << 5 | 1 << 4)
93 94
94/* Maximum speed to run the device at. TODO: Get the real max value from 95/* The datasheet indicates the theoretical maximum for SCK to be 10MHz */
95 * someone at Microchip since it isn't in the datasheet. */ 96#define MAX_SPI_SPEED_HZ 10000000
96#define MAX_SPI_SPEED_HZ 1000000
97 97
98#define printdev(X) (&X->spi->dev) 98#define printdev(X) (&X->spi->dev)
99 99
@@ -361,6 +361,7 @@ static int mrf24j40_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
361 if (ret == -ERESTARTSYS) 361 if (ret == -ERESTARTSYS)
362 goto err; 362 goto err;
363 if (ret == 0) { 363 if (ret == 0) {
364 dev_warn(printdev(devrec), "Timeout waiting for TX interrupt\n");
364 ret = -ETIMEDOUT; 365 ret = -ETIMEDOUT;
365 goto err; 366 goto err;
366 } 367 }
@@ -477,7 +478,7 @@ static int mrf24j40_filter(struct ieee802154_dev *dev,
477 int i; 478 int i;
478 for (i = 0; i < 8; i++) 479 for (i = 0; i < 8; i++)
479 write_short_reg(devrec, REG_EADR0+i, 480 write_short_reg(devrec, REG_EADR0+i,
480 filt->ieee_addr[i]); 481 filt->ieee_addr[7-i]);
481 482
482#ifdef DEBUG 483#ifdef DEBUG
483 printk(KERN_DEBUG "Set long addr to: "); 484 printk(KERN_DEBUG "Set long addr to: ");
@@ -623,6 +624,7 @@ static int mrf24j40_probe(struct spi_device *spi)
623 int ret = -ENOMEM; 624 int ret = -ENOMEM;
624 u8 val; 625 u8 val;
625 struct mrf24j40 *devrec; 626 struct mrf24j40 *devrec;
627 struct pinctrl *pinctrl;
626 628
627 printk(KERN_INFO "mrf24j40: probe(). IRQ: %d\n", spi->irq); 629 printk(KERN_INFO "mrf24j40: probe(). IRQ: %d\n", spi->irq);
628 630
@@ -633,6 +635,11 @@ static int mrf24j40_probe(struct spi_device *spi)
633 if (!devrec->buf) 635 if (!devrec->buf)
634 goto err_buf; 636 goto err_buf;
635 637
638 pinctrl = devm_pinctrl_get_select_default(&spi->dev);
639 if (IS_ERR(pinctrl))
640 dev_warn(&spi->dev,
641 "pinctrl pins are not configured from the driver");
642
636 spi->mode = SPI_MODE_0; /* TODO: Is this appropriate for right here? */ 643 spi->mode = SPI_MODE_0; /* TODO: Is this appropriate for right here? */
637 if (spi->max_speed_hz > MAX_SPI_SPEED_HZ) 644 if (spi->max_speed_hz > MAX_SPI_SPEED_HZ)
638 spi->max_speed_hz = MAX_SPI_SPEED_HZ; 645 spi->max_speed_hz = MAX_SPI_SPEED_HZ;
@@ -641,7 +648,7 @@ static int mrf24j40_probe(struct spi_device *spi)
641 init_completion(&devrec->tx_complete); 648 init_completion(&devrec->tx_complete);
642 INIT_WORK(&devrec->irqwork, mrf24j40_isrwork); 649 INIT_WORK(&devrec->irqwork, mrf24j40_isrwork);
643 devrec->spi = spi; 650 devrec->spi = spi;
644 dev_set_drvdata(&spi->dev, devrec); 651 spi_set_drvdata(spi, devrec);
645 652
646 /* Register with the 802154 subsystem */ 653 /* Register with the 802154 subsystem */
647 654
@@ -713,7 +720,7 @@ err_devrec:
713 720
714static int mrf24j40_remove(struct spi_device *spi) 721static int mrf24j40_remove(struct spi_device *spi)
715{ 722{
716 struct mrf24j40 *devrec = dev_get_drvdata(&spi->dev); 723 struct mrf24j40 *devrec = spi_get_drvdata(spi);
717 724
718 dev_dbg(printdev(devrec), "remove\n"); 725 dev_dbg(printdev(devrec), "remove\n");
719 726
@@ -725,7 +732,7 @@ static int mrf24j40_remove(struct spi_device *spi)
725 * complete? */ 732 * complete? */
726 733
727 /* Clean up the SPI stuff. */ 734 /* Clean up the SPI stuff. */
728 dev_set_drvdata(&spi->dev, NULL); 735 spi_set_drvdata(spi, NULL);
729 kfree(devrec->buf); 736 kfree(devrec->buf);
730 kfree(devrec); 737 kfree(devrec);
731 return 0; 738 return 0;
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 9cea451a6081..3adb43ce138f 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -352,21 +352,19 @@ static int ali_ircc_open(int i, chipio_t *info)
352 /* Allocate memory if needed */ 352 /* Allocate memory if needed */
353 self->rx_buff.head = 353 self->rx_buff.head =
354 dma_alloc_coherent(NULL, self->rx_buff.truesize, 354 dma_alloc_coherent(NULL, self->rx_buff.truesize,
355 &self->rx_buff_dma, GFP_KERNEL); 355 &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
356 if (self->rx_buff.head == NULL) { 356 if (self->rx_buff.head == NULL) {
357 err = -ENOMEM; 357 err = -ENOMEM;
358 goto err_out2; 358 goto err_out2;
359 } 359 }
360 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
361 360
362 self->tx_buff.head = 361 self->tx_buff.head =
363 dma_alloc_coherent(NULL, self->tx_buff.truesize, 362 dma_alloc_coherent(NULL, self->tx_buff.truesize,
364 &self->tx_buff_dma, GFP_KERNEL); 363 &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
365 if (self->tx_buff.head == NULL) { 364 if (self->tx_buff.head == NULL) {
366 err = -ENOMEM; 365 err = -ENOMEM;
367 goto err_out3; 366 goto err_out3;
368 } 367 }
369 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
370 368
371 self->rx_buff.in_frame = FALSE; 369 self->rx_buff.in_frame = FALSE;
372 self->rx_buff.state = OUTSIDE_FRAME; 370 self->rx_buff.state = OUTSIDE_FRAME;
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index b5151e4ced61..7a1f684edcb5 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -27,6 +27,7 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/time.h> 28#include <linux/time.h>
29#include <linux/types.h> 29#include <linux/types.h>
30#include <linux/ioport.h>
30 31
31#include <net/irda/irda.h> 32#include <net/irda/irda.h>
32#include <net/irda/irmod.h> 33#include <net/irda/irmod.h>
@@ -882,12 +883,12 @@ static int au1k_irda_probe(struct platform_device *pdev)
882 goto out; 883 goto out;
883 884
884 err = -EBUSY; 885 err = -EBUSY;
885 aup->ioarea = request_mem_region(r->start, r->end - r->start + 1, 886 aup->ioarea = request_mem_region(r->start, resource_size(r),
886 pdev->name); 887 pdev->name);
887 if (!aup->ioarea) 888 if (!aup->ioarea)
888 goto out; 889 goto out;
889 890
890 aup->iobase = ioremap_nocache(r->start, r->end - r->start + 1); 891 aup->iobase = ioremap_nocache(r->start, resource_size(r));
891 if (!aup->iobase) 892 if (!aup->iobase)
892 goto out2; 893 goto out2;
893 894
@@ -952,18 +953,7 @@ static struct platform_driver au1k_irda_driver = {
952 .remove = au1k_irda_remove, 953 .remove = au1k_irda_remove,
953}; 954};
954 955
955static int __init au1k_irda_load(void) 956module_platform_driver(au1k_irda_driver);
956{
957 return platform_driver_register(&au1k_irda_driver);
958}
959
960static void __exit au1k_irda_unload(void)
961{
962 return platform_driver_unregister(&au1k_irda_driver);
963}
964 957
965MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>"); 958MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>");
966MODULE_DESCRIPTION("Au1000 IrDA Device Driver"); 959MODULE_DESCRIPTION("Au1000 IrDA Device Driver");
967
968module_init(au1k_irda_load);
969module_exit(au1k_irda_unload);
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index fed4a05d55c7..a06fca61c9a0 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -389,7 +389,8 @@ static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev)
389 set_dma_callback(port->rx_dma_channel, bfin_sir_dma_rx_int, dev); 389 set_dma_callback(port->rx_dma_channel, bfin_sir_dma_rx_int, dev);
390 set_dma_callback(port->tx_dma_channel, bfin_sir_dma_tx_int, dev); 390 set_dma_callback(port->tx_dma_channel, bfin_sir_dma_tx_int, dev);
391 391
392 port->rx_dma_buf.buf = (unsigned char *)dma_alloc_coherent(NULL, PAGE_SIZE, &dma_handle, GFP_DMA); 392 port->rx_dma_buf.buf = dma_alloc_coherent(NULL, PAGE_SIZE,
393 &dma_handle, GFP_DMA);
393 port->rx_dma_buf.head = 0; 394 port->rx_dma_buf.head = 0;
394 port->rx_dma_buf.tail = 0; 395 port->rx_dma_buf.tail = 0;
395 port->rx_dma_nrows = 0; 396 port->rx_dma_nrows = 0;
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index 2a4f2f153244..9cf836b57c49 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -431,22 +431,20 @@ static int __init nsc_ircc_open(chipio_t *info)
431 /* Allocate memory if needed */ 431 /* Allocate memory if needed */
432 self->rx_buff.head = 432 self->rx_buff.head =
433 dma_alloc_coherent(NULL, self->rx_buff.truesize, 433 dma_alloc_coherent(NULL, self->rx_buff.truesize,
434 &self->rx_buff_dma, GFP_KERNEL); 434 &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
435 if (self->rx_buff.head == NULL) { 435 if (self->rx_buff.head == NULL) {
436 err = -ENOMEM; 436 err = -ENOMEM;
437 goto out2; 437 goto out2;
438 438
439 } 439 }
440 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
441 440
442 self->tx_buff.head = 441 self->tx_buff.head =
443 dma_alloc_coherent(NULL, self->tx_buff.truesize, 442 dma_alloc_coherent(NULL, self->tx_buff.truesize,
444 &self->tx_buff_dma, GFP_KERNEL); 443 &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
445 if (self->tx_buff.head == NULL) { 444 if (self->tx_buff.head == NULL) {
446 err = -ENOMEM; 445 err = -ENOMEM;
447 goto out3; 446 goto out3;
448 } 447 }
449 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
450 448
451 self->rx_buff.in_frame = FALSE; 449 self->rx_buff.in_frame = FALSE;
452 self->rx_buff.state = OUTSIDE_FRAME; 450 self->rx_buff.state = OUTSIDE_FRAME;
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 858de05bdb7d..964b116a0ab7 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -700,12 +700,12 @@ static int pxa_irda_start(struct net_device *dev)
700 700
701 err = -ENOMEM; 701 err = -ENOMEM;
702 si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, 702 si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
703 &si->dma_rx_buff_phy, GFP_KERNEL ); 703 &si->dma_rx_buff_phy, GFP_KERNEL);
704 if (!si->dma_rx_buff) 704 if (!si->dma_rx_buff)
705 goto err_dma_rx_buff; 705 goto err_dma_rx_buff;
706 706
707 si->dma_tx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, 707 si->dma_tx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
708 &si->dma_tx_buff_phy, GFP_KERNEL ); 708 &si->dma_tx_buff_phy, GFP_KERNEL);
709 if (!si->dma_tx_buff) 709 if (!si->dma_tx_buff)
710 goto err_dma_tx_buff; 710 goto err_dma_tx_buff;
711 711
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 5290952b60c2..aa05dad75335 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -563,24 +563,15 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma,
563 563
564 self->rx_buff.head = 564 self->rx_buff.head =
565 dma_alloc_coherent(NULL, self->rx_buff.truesize, 565 dma_alloc_coherent(NULL, self->rx_buff.truesize,
566 &self->rx_buff_dma, GFP_KERNEL); 566 &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
567 if (self->rx_buff.head == NULL) { 567 if (self->rx_buff.head == NULL)
568 IRDA_ERROR("%s, Can't allocate memory for receive buffer!\n",
569 driver_name);
570 goto err_out2; 568 goto err_out2;
571 }
572 569
573 self->tx_buff.head = 570 self->tx_buff.head =
574 dma_alloc_coherent(NULL, self->tx_buff.truesize, 571 dma_alloc_coherent(NULL, self->tx_buff.truesize,
575 &self->tx_buff_dma, GFP_KERNEL); 572 &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
576 if (self->tx_buff.head == NULL) { 573 if (self->tx_buff.head == NULL)
577 IRDA_ERROR("%s, Can't allocate memory for transmit buffer!\n",
578 driver_name);
579 goto err_out3; 574 goto err_out3;
580 }
581
582 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
583 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
584 575
585 self->rx_buff.in_frame = FALSE; 576 self->rx_buff.in_frame = FALSE;
586 self->rx_buff.state = OUTSIDE_FRAME; 577 self->rx_buff.state = OUTSIDE_FRAME;
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index f9033c6a888c..51f2bc376101 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -364,21 +364,19 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
364 /* Allocate memory if needed */ 364 /* Allocate memory if needed */
365 self->rx_buff.head = 365 self->rx_buff.head =
366 dma_alloc_coherent(&pdev->dev, self->rx_buff.truesize, 366 dma_alloc_coherent(&pdev->dev, self->rx_buff.truesize,
367 &self->rx_buff_dma, GFP_KERNEL); 367 &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
368 if (self->rx_buff.head == NULL) { 368 if (self->rx_buff.head == NULL) {
369 err = -ENOMEM; 369 err = -ENOMEM;
370 goto err_out2; 370 goto err_out2;
371 } 371 }
372 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
373 372
374 self->tx_buff.head = 373 self->tx_buff.head =
375 dma_alloc_coherent(&pdev->dev, self->tx_buff.truesize, 374 dma_alloc_coherent(&pdev->dev, self->tx_buff.truesize,
376 &self->tx_buff_dma, GFP_KERNEL); 375 &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
377 if (self->tx_buff.head == NULL) { 376 if (self->tx_buff.head == NULL) {
378 err = -ENOMEM; 377 err = -ENOMEM;
379 goto err_out3; 378 goto err_out3;
380 } 379 }
381 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
382 380
383 self->rx_buff.in_frame = FALSE; 381 self->rx_buff.in_frame = FALSE;
384 self->rx_buff.state = OUTSIDE_FRAME; 382 self->rx_buff.state = OUTSIDE_FRAME;
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index f5bb92f15880..bb8857a158a6 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -216,22 +216,19 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
216 /* Allocate memory if needed */ 216 /* Allocate memory if needed */
217 self->rx_buff.head = 217 self->rx_buff.head =
218 dma_alloc_coherent(NULL, self->rx_buff.truesize, 218 dma_alloc_coherent(NULL, self->rx_buff.truesize,
219 &self->rx_buff_dma, GFP_KERNEL); 219 &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
220 if (self->rx_buff.head == NULL) { 220 if (self->rx_buff.head == NULL) {
221 err = -ENOMEM; 221 err = -ENOMEM;
222 goto err_out1; 222 goto err_out1;
223 } 223 }
224 224
225 memset(self->rx_buff.head, 0, self->rx_buff.truesize);
226
227 self->tx_buff.head = 225 self->tx_buff.head =
228 dma_alloc_coherent(NULL, self->tx_buff.truesize, 226 dma_alloc_coherent(NULL, self->tx_buff.truesize,
229 &self->tx_buff_dma, GFP_KERNEL); 227 &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
230 if (self->tx_buff.head == NULL) { 228 if (self->tx_buff.head == NULL) {
231 err = -ENOMEM; 229 err = -ENOMEM;
232 goto err_out2; 230 goto err_out2;
233 } 231 }
234 memset(self->tx_buff.head, 0, self->tx_buff.truesize);
235 232
236 self->rx_buff.in_frame = FALSE; 233 self->rx_buff.in_frame = FALSE;
237 self->rx_buff.state = OUTSIDE_FRAME; 234 self->rx_buff.state = OUTSIDE_FRAME;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 73abbc1655d5..70af6dc07d40 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -46,9 +46,16 @@ struct macvlan_port {
46 46
47static void macvlan_port_destroy(struct net_device *dev); 47static void macvlan_port_destroy(struct net_device *dev);
48 48
49#define macvlan_port_get_rcu(dev) \ 49static struct macvlan_port *macvlan_port_get_rcu(const struct net_device *dev)
50 ((struct macvlan_port *) rcu_dereference(dev->rx_handler_data)) 50{
51#define macvlan_port_get(dev) ((struct macvlan_port *) dev->rx_handler_data) 51 return rcu_dereference(dev->rx_handler_data);
52}
53
54static struct macvlan_port *macvlan_port_get_rtnl(const struct net_device *dev)
55{
56 return rtnl_dereference(dev->rx_handler_data);
57}
58
52#define macvlan_port_exists(dev) (dev->priv_flags & IFF_MACVLAN_PORT) 59#define macvlan_port_exists(dev) (dev->priv_flags & IFF_MACVLAN_PORT)
53 60
54static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port, 61static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
@@ -703,7 +710,7 @@ static int macvlan_port_create(struct net_device *dev)
703 710
704static void macvlan_port_destroy(struct net_device *dev) 711static void macvlan_port_destroy(struct net_device *dev)
705{ 712{
706 struct macvlan_port *port = macvlan_port_get(dev); 713 struct macvlan_port *port = macvlan_port_get_rtnl(dev);
707 714
708 dev->priv_flags &= ~IFF_MACVLAN_PORT; 715 dev->priv_flags &= ~IFF_MACVLAN_PORT;
709 netdev_rx_handler_unregister(dev); 716 netdev_rx_handler_unregister(dev);
@@ -772,7 +779,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
772 if (err < 0) 779 if (err < 0)
773 return err; 780 return err;
774 } 781 }
775 port = macvlan_port_get(lowerdev); 782 port = macvlan_port_get_rtnl(lowerdev);
776 783
777 /* Only 1 macvlan device can be created in passthru mode */ 784 /* Only 1 macvlan device can be created in passthru mode */
778 if (port->passthru) 785 if (port->passthru)
@@ -921,7 +928,7 @@ static int macvlan_device_event(struct notifier_block *unused,
921 if (!macvlan_port_exists(dev)) 928 if (!macvlan_port_exists(dev))
922 return NOTIFY_DONE; 929 return NOTIFY_DONE;
923 930
924 port = macvlan_port_get(dev); 931 port = macvlan_port_get_rtnl(dev);
925 932
926 switch (event) { 933 switch (event) {
927 case NETDEV_CHANGE: 934 case NETDEV_CHANGE:
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index a449439bd653..59e9605de316 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -725,6 +725,8 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
725 goto err_kfree; 725 goto err_kfree;
726 } 726 }
727 727
728 skb_probe_transport_header(skb, ETH_HLEN);
729
728 rcu_read_lock_bh(); 730 rcu_read_lock_bh();
729 vlan = rcu_dereference_bh(q->vlan); 731 vlan = rcu_dereference_bh(q->vlan);
730 /* copy skb_ubuf_info for callback when skb has no error */ 732 /* copy skb_ubuf_info for callback when skb has no error */
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index ec40ba882f61..ff2e45e9cb54 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -159,7 +159,7 @@ static int lxt973a2_update_link(struct phy_device *phydev)
159 return 0; 159 return 0;
160} 160}
161 161
162int lxt973a2_read_status(struct phy_device *phydev) 162static int lxt973a2_read_status(struct phy_device *phydev)
163{ 163{
164 int adv; 164 int adv;
165 int err; 165 int err;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 22dec9c7ef05..202fe1ff1987 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -7,6 +7,8 @@
7 * 7 *
8 * Copyright (c) 2004 Freescale Semiconductor, Inc. 8 * Copyright (c) 2004 Freescale Semiconductor, Inc.
9 * 9 *
10 * Copyright (c) 2013 Michael Stapelberg <michael@stapelberg.de>
11 *
10 * This program is free software; you can redistribute it and/or modify it 12 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the 13 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your 14 * Free Software Foundation; either version 2 of the License, or (at your
@@ -80,6 +82,28 @@
80#define MII_88E1318S_PHY_MSCR1_REG 16 82#define MII_88E1318S_PHY_MSCR1_REG 16
81#define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6) 83#define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6)
82 84
85/* Copper Specific Interrupt Enable Register */
86#define MII_88E1318S_PHY_CSIER 0x12
87/* WOL Event Interrupt Enable */
88#define MII_88E1318S_PHY_CSIER_WOL_EIE BIT(7)
89
90/* LED Timer Control Register */
91#define MII_88E1318S_PHY_LED_PAGE 0x03
92#define MII_88E1318S_PHY_LED_TCR 0x12
93#define MII_88E1318S_PHY_LED_TCR_FORCE_INT BIT(15)
94#define MII_88E1318S_PHY_LED_TCR_INTn_ENABLE BIT(7)
95#define MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW BIT(11)
96
97/* Magic Packet MAC address registers */
98#define MII_88E1318S_PHY_MAGIC_PACKET_WORD2 0x17
99#define MII_88E1318S_PHY_MAGIC_PACKET_WORD1 0x18
100#define MII_88E1318S_PHY_MAGIC_PACKET_WORD0 0x19
101
102#define MII_88E1318S_PHY_WOL_PAGE 0x11
103#define MII_88E1318S_PHY_WOL_CTRL 0x10
104#define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12)
105#define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE BIT(14)
106
83#define MII_88E1121_PHY_LED_CTRL 16 107#define MII_88E1121_PHY_LED_CTRL 16
84#define MII_88E1121_PHY_LED_PAGE 3 108#define MII_88E1121_PHY_LED_PAGE 3
85#define MII_88E1121_PHY_LED_DEF 0x0030 109#define MII_88E1121_PHY_LED_DEF 0x0030
@@ -696,6 +720,107 @@ static int m88e1121_did_interrupt(struct phy_device *phydev)
696 return 0; 720 return 0;
697} 721}
698 722
723static void m88e1318_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
724{
725 wol->supported = WAKE_MAGIC;
726 wol->wolopts = 0;
727
728 if (phy_write(phydev, MII_MARVELL_PHY_PAGE,
729 MII_88E1318S_PHY_WOL_PAGE) < 0)
730 return;
731
732 if (phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL) &
733 MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE)
734 wol->wolopts |= WAKE_MAGIC;
735
736 if (phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x00) < 0)
737 return;
738}
739
740static int m88e1318_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
741{
742 int err, oldpage, temp;
743
744 oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
745
746 if (wol->wolopts & WAKE_MAGIC) {
747 /* Explicitly switch to page 0x00, just to be sure */
748 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x00);
749 if (err < 0)
750 return err;
751
752 /* Enable the WOL interrupt */
753 temp = phy_read(phydev, MII_88E1318S_PHY_CSIER);
754 temp |= MII_88E1318S_PHY_CSIER_WOL_EIE;
755 err = phy_write(phydev, MII_88E1318S_PHY_CSIER, temp);
756 if (err < 0)
757 return err;
758
759 err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
760 MII_88E1318S_PHY_LED_PAGE);
761 if (err < 0)
762 return err;
763
764 /* Setup LED[2] as interrupt pin (active low) */
765 temp = phy_read(phydev, MII_88E1318S_PHY_LED_TCR);
766 temp &= ~MII_88E1318S_PHY_LED_TCR_FORCE_INT;
767 temp |= MII_88E1318S_PHY_LED_TCR_INTn_ENABLE;
768 temp |= MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW;
769 err = phy_write(phydev, MII_88E1318S_PHY_LED_TCR, temp);
770 if (err < 0)
771 return err;
772
773 err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
774 MII_88E1318S_PHY_WOL_PAGE);
775 if (err < 0)
776 return err;
777
778 /* Store the device address for the magic packet */
779 err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD2,
780 ((phydev->attached_dev->dev_addr[5] << 8) |
781 phydev->attached_dev->dev_addr[4]));
782 if (err < 0)
783 return err;
784 err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD1,
785 ((phydev->attached_dev->dev_addr[3] << 8) |
786 phydev->attached_dev->dev_addr[2]));
787 if (err < 0)
788 return err;
789 err = phy_write(phydev, MII_88E1318S_PHY_MAGIC_PACKET_WORD0,
790 ((phydev->attached_dev->dev_addr[1] << 8) |
791 phydev->attached_dev->dev_addr[0]));
792 if (err < 0)
793 return err;
794
795 /* Clear WOL status and enable magic packet matching */
796 temp = phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL);
797 temp |= MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS;
798 temp |= MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE;
799 err = phy_write(phydev, MII_88E1318S_PHY_WOL_CTRL, temp);
800 if (err < 0)
801 return err;
802 } else {
803 err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
804 MII_88E1318S_PHY_WOL_PAGE);
805 if (err < 0)
806 return err;
807
808 /* Clear WOL status and disable magic packet matching */
809 temp = phy_read(phydev, MII_88E1318S_PHY_WOL_CTRL);
810 temp |= MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS;
811 temp &= ~MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE;
812 err = phy_write(phydev, MII_88E1318S_PHY_WOL_CTRL, temp);
813 if (err < 0)
814 return err;
815 }
816
817 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
818 if (err < 0)
819 return err;
820
821 return 0;
822}
823
699static struct phy_driver marvell_drivers[] = { 824static struct phy_driver marvell_drivers[] = {
700 { 825 {
701 .phy_id = MARVELL_PHY_ID_88E1101, 826 .phy_id = MARVELL_PHY_ID_88E1101,
@@ -772,6 +897,8 @@ static struct phy_driver marvell_drivers[] = {
772 .ack_interrupt = &marvell_ack_interrupt, 897 .ack_interrupt = &marvell_ack_interrupt,
773 .config_intr = &marvell_config_intr, 898 .config_intr = &marvell_config_intr,
774 .did_interrupt = &m88e1121_did_interrupt, 899 .did_interrupt = &m88e1121_did_interrupt,
900 .get_wol = &m88e1318_get_wol,
901 .set_wol = &m88e1318_set_wol,
775 .driver = { .owner = THIS_MODULE }, 902 .driver = { .owner = THIS_MODULE },
776 }, 903 },
777 { 904 {
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 27274986ab56..a47f9236d966 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -235,17 +235,7 @@ static struct platform_driver mdio_gpio_driver = {
235 }, 235 },
236}; 236};
237 237
238static int __init mdio_gpio_init(void) 238module_platform_driver(mdio_gpio_driver);
239{
240 return platform_driver_register(&mdio_gpio_driver);
241}
242module_init(mdio_gpio_init);
243
244static void __exit mdio_gpio_exit(void)
245{
246 platform_driver_unregister(&mdio_gpio_driver);
247}
248module_exit(mdio_gpio_exit);
249 239
250MODULE_ALIAS("platform:mdio-gpio"); 240MODULE_ALIAS("platform:mdio-gpio");
251MODULE_AUTHOR("Laurent Pinchart, Paulius Zaleckas"); 241MODULE_AUTHOR("Laurent Pinchart, Paulius Zaleckas");
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index 09297fe05ae5..b51fa1f469b0 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2009,2011 Cavium, Inc. 6 * Copyright (C) 2009-2012 Cavium, Inc.
7 */ 7 */
8 8
9#include <linux/platform_device.h> 9#include <linux/platform_device.h>
@@ -27,30 +27,98 @@
27#define SMI_CLK 0x18 27#define SMI_CLK 0x18
28#define SMI_EN 0x20 28#define SMI_EN 0x20
29 29
30enum octeon_mdiobus_mode {
31 UNINIT = 0,
32 C22,
33 C45
34};
35
30struct octeon_mdiobus { 36struct octeon_mdiobus {
31 struct mii_bus *mii_bus; 37 struct mii_bus *mii_bus;
32 u64 register_base; 38 u64 register_base;
33 resource_size_t mdio_phys; 39 resource_size_t mdio_phys;
34 resource_size_t regsize; 40 resource_size_t regsize;
41 enum octeon_mdiobus_mode mode;
35 int phy_irq[PHY_MAX_ADDR]; 42 int phy_irq[PHY_MAX_ADDR];
36}; 43};
37 44
45static void octeon_mdiobus_set_mode(struct octeon_mdiobus *p,
46 enum octeon_mdiobus_mode m)
47{
48 union cvmx_smix_clk smi_clk;
49
50 if (m == p->mode)
51 return;
52
53 smi_clk.u64 = cvmx_read_csr(p->register_base + SMI_CLK);
54 smi_clk.s.mode = (m == C45) ? 1 : 0;
55 smi_clk.s.preamble = 1;
56 cvmx_write_csr(p->register_base + SMI_CLK, smi_clk.u64);
57 p->mode = m;
58}
59
60static int octeon_mdiobus_c45_addr(struct octeon_mdiobus *p,
61 int phy_id, int regnum)
62{
63 union cvmx_smix_cmd smi_cmd;
64 union cvmx_smix_wr_dat smi_wr;
65 int timeout = 1000;
66
67 octeon_mdiobus_set_mode(p, C45);
68
69 smi_wr.u64 = 0;
70 smi_wr.s.dat = regnum & 0xffff;
71 cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64);
72
73 regnum = (regnum >> 16) & 0x1f;
74
75 smi_cmd.u64 = 0;
76 smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_45_ADDRESS */
77 smi_cmd.s.phy_adr = phy_id;
78 smi_cmd.s.reg_adr = regnum;
79 cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
80
81 do {
82 /* Wait 1000 clocks so we don't saturate the RSL bus
83 * doing reads.
84 */
85 __delay(1000);
86 smi_wr.u64 = cvmx_read_csr(p->register_base + SMI_WR_DAT);
87 } while (smi_wr.s.pending && --timeout);
88
89 if (timeout <= 0)
90 return -EIO;
91 return 0;
92}
93
38static int octeon_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum) 94static int octeon_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum)
39{ 95{
40 struct octeon_mdiobus *p = bus->priv; 96 struct octeon_mdiobus *p = bus->priv;
41 union cvmx_smix_cmd smi_cmd; 97 union cvmx_smix_cmd smi_cmd;
42 union cvmx_smix_rd_dat smi_rd; 98 union cvmx_smix_rd_dat smi_rd;
99 unsigned int op = 1; /* MDIO_CLAUSE_22_READ */
43 int timeout = 1000; 100 int timeout = 1000;
44 101
102 if (regnum & MII_ADDR_C45) {
103 int r = octeon_mdiobus_c45_addr(p, phy_id, regnum);
104 if (r < 0)
105 return r;
106
107 regnum = (regnum >> 16) & 0x1f;
108 op = 3; /* MDIO_CLAUSE_45_READ */
109 } else {
110 octeon_mdiobus_set_mode(p, C22);
111 }
112
113
45 smi_cmd.u64 = 0; 114 smi_cmd.u64 = 0;
46 smi_cmd.s.phy_op = 1; /* MDIO_CLAUSE_22_READ */ 115 smi_cmd.s.phy_op = op;
47 smi_cmd.s.phy_adr = phy_id; 116 smi_cmd.s.phy_adr = phy_id;
48 smi_cmd.s.reg_adr = regnum; 117 smi_cmd.s.reg_adr = regnum;
49 cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64); 118 cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
50 119
51 do { 120 do {
52 /* 121 /* Wait 1000 clocks so we don't saturate the RSL bus
53 * Wait 1000 clocks so we don't saturate the RSL bus
54 * doing reads. 122 * doing reads.
55 */ 123 */
56 __delay(1000); 124 __delay(1000);
@@ -69,21 +137,33 @@ static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id,
69 struct octeon_mdiobus *p = bus->priv; 137 struct octeon_mdiobus *p = bus->priv;
70 union cvmx_smix_cmd smi_cmd; 138 union cvmx_smix_cmd smi_cmd;
71 union cvmx_smix_wr_dat smi_wr; 139 union cvmx_smix_wr_dat smi_wr;
140 unsigned int op = 0; /* MDIO_CLAUSE_22_WRITE */
72 int timeout = 1000; 141 int timeout = 1000;
73 142
143
144 if (regnum & MII_ADDR_C45) {
145 int r = octeon_mdiobus_c45_addr(p, phy_id, regnum);
146 if (r < 0)
147 return r;
148
149 regnum = (regnum >> 16) & 0x1f;
150 op = 1; /* MDIO_CLAUSE_45_WRITE */
151 } else {
152 octeon_mdiobus_set_mode(p, C22);
153 }
154
74 smi_wr.u64 = 0; 155 smi_wr.u64 = 0;
75 smi_wr.s.dat = val; 156 smi_wr.s.dat = val;
76 cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64); 157 cvmx_write_csr(p->register_base + SMI_WR_DAT, smi_wr.u64);
77 158
78 smi_cmd.u64 = 0; 159 smi_cmd.u64 = 0;
79 smi_cmd.s.phy_op = 0; /* MDIO_CLAUSE_22_WRITE */ 160 smi_cmd.s.phy_op = op;
80 smi_cmd.s.phy_adr = phy_id; 161 smi_cmd.s.phy_adr = phy_id;
81 smi_cmd.s.reg_adr = regnum; 162 smi_cmd.s.reg_adr = regnum;
82 cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64); 163 cvmx_write_csr(p->register_base + SMI_CMD, smi_cmd.u64);
83 164
84 do { 165 do {
85 /* 166 /* Wait 1000 clocks so we don't saturate the RSL bus
86 * Wait 1000 clocks so we don't saturate the RSL bus
87 * doing reads. 167 * doing reads.
88 */ 168 */
89 __delay(1000); 169 __delay(1000);
@@ -197,18 +277,7 @@ void octeon_mdiobus_force_mod_depencency(void)
197} 277}
198EXPORT_SYMBOL(octeon_mdiobus_force_mod_depencency); 278EXPORT_SYMBOL(octeon_mdiobus_force_mod_depencency);
199 279
200static int __init octeon_mdiobus_mod_init(void) 280module_platform_driver(octeon_mdiobus_driver);
201{
202 return platform_driver_register(&octeon_mdiobus_driver);
203}
204
205static void __exit octeon_mdiobus_mod_exit(void)
206{
207 platform_driver_unregister(&octeon_mdiobus_driver);
208}
209
210module_init(octeon_mdiobus_mod_init);
211module_exit(octeon_mdiobus_mod_exit);
212 281
213MODULE_DESCRIPTION(DRV_DESCRIPTION); 282MODULE_DESCRIPTION(DRV_DESCRIPTION);
214MODULE_VERSION(DRV_VERSION); 283MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index abf7b6153d00..2510435f34ed 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -53,6 +53,18 @@
53#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14) 53#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14)
54#define KSZ8051_RMII_50MHZ_CLK (1 << 7) 54#define KSZ8051_RMII_50MHZ_CLK (1 << 7)
55 55
56static int ksz_config_flags(struct phy_device *phydev)
57{
58 int regval;
59
60 if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) {
61 regval = phy_read(phydev, MII_KSZPHY_CTRL);
62 regval |= KSZ8051_RMII_50MHZ_CLK;
63 return phy_write(phydev, MII_KSZPHY_CTRL, regval);
64 }
65 return 0;
66}
67
56static int kszphy_ack_interrupt(struct phy_device *phydev) 68static int kszphy_ack_interrupt(struct phy_device *phydev)
57{ 69{
58 /* bit[7..0] int status, which is a read and clear register. */ 70 /* bit[7..0] int status, which is a read and clear register. */
@@ -114,22 +126,19 @@ static int kszphy_config_init(struct phy_device *phydev)
114 126
115static int ksz8021_config_init(struct phy_device *phydev) 127static int ksz8021_config_init(struct phy_device *phydev)
116{ 128{
129 int rc;
117 const u16 val = KSZPHY_OMSO_B_CAST_OFF | KSZPHY_OMSO_RMII_OVERRIDE; 130 const u16 val = KSZPHY_OMSO_B_CAST_OFF | KSZPHY_OMSO_RMII_OVERRIDE;
118 phy_write(phydev, MII_KSZPHY_OMSO, val); 131 phy_write(phydev, MII_KSZPHY_OMSO, val);
119 return 0; 132 rc = ksz_config_flags(phydev);
133 return rc < 0 ? rc : 0;
120} 134}
121 135
122static int ks8051_config_init(struct phy_device *phydev) 136static int ks8051_config_init(struct phy_device *phydev)
123{ 137{
124 int regval; 138 int rc;
125
126 if (phydev->dev_flags & MICREL_PHY_50MHZ_CLK) {
127 regval = phy_read(phydev, MII_KSZPHY_CTRL);
128 regval |= KSZ8051_RMII_50MHZ_CLK;
129 phy_write(phydev, MII_KSZPHY_CTRL, regval);
130 }
131 139
132 return 0; 140 rc = ksz_config_flags(phydev);
141 return rc < 0 ? rc : 0;
133} 142}
134 143
135#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06 144#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
@@ -192,6 +201,19 @@ static struct phy_driver ksphy_driver[] = {
192 .config_intr = kszphy_config_intr, 201 .config_intr = kszphy_config_intr,
193 .driver = { .owner = THIS_MODULE,}, 202 .driver = { .owner = THIS_MODULE,},
194}, { 203}, {
204 .phy_id = PHY_ID_KSZ8031,
205 .phy_id_mask = 0x00ffffff,
206 .name = "Micrel KSZ8031",
207 .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
208 SUPPORTED_Asym_Pause),
209 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
210 .config_init = ksz8021_config_init,
211 .config_aneg = genphy_config_aneg,
212 .read_status = genphy_read_status,
213 .ack_interrupt = kszphy_ack_interrupt,
214 .config_intr = kszphy_config_intr,
215 .driver = { .owner = THIS_MODULE,},
216}, {
195 .phy_id = PHY_ID_KSZ8041, 217 .phy_id = PHY_ID_KSZ8041,
196 .phy_id_mask = 0x00fffff0, 218 .phy_id_mask = 0x00fffff0,
197 .name = "Micrel KSZ8041", 219 .name = "Micrel KSZ8041",
@@ -325,6 +347,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = {
325 { PHY_ID_KSZ8001, 0x00ffffff }, 347 { PHY_ID_KSZ8001, 0x00ffffff },
326 { PHY_ID_KS8737, 0x00fffff0 }, 348 { PHY_ID_KS8737, 0x00fffff0 },
327 { PHY_ID_KSZ8021, 0x00ffffff }, 349 { PHY_ID_KSZ8021, 0x00ffffff },
350 { PHY_ID_KSZ8031, 0x00ffffff },
328 { PHY_ID_KSZ8041, 0x00fffff0 }, 351 { PHY_ID_KSZ8041, 0x00fffff0 },
329 { PHY_ID_KSZ8051, 0x00fffff0 }, 352 { PHY_ID_KSZ8051, 0x00fffff0 },
330 { PHY_ID_KSZ8061, 0x00fffff0 }, 353 { PHY_ID_KSZ8061, 0x00fffff0 },
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index ef9ea9248223..c14f14741b3f 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -463,33 +463,6 @@ void phy_stop_machine(struct phy_device *phydev)
463} 463}
464 464
465/** 465/**
466 * phy_force_reduction - reduce PHY speed/duplex settings by one step
467 * @phydev: target phy_device struct
468 *
469 * Description: Reduces the speed/duplex settings by one notch,
470 * in this order--
471 * 1000/FULL, 1000/HALF, 100/FULL, 100/HALF, 10/FULL, 10/HALF.
472 * The function bottoms out at 10/HALF.
473 */
474static void phy_force_reduction(struct phy_device *phydev)
475{
476 int idx;
477
478 idx = phy_find_setting(phydev->speed, phydev->duplex);
479
480 idx++;
481
482 idx = phy_find_valid(idx, phydev->supported);
483
484 phydev->speed = settings[idx].speed;
485 phydev->duplex = settings[idx].duplex;
486
487 pr_info("Trying %d/%s\n",
488 phydev->speed, DUPLEX_FULL == phydev->duplex ? "FULL" : "HALF");
489}
490
491
492/**
493 * phy_error - enter HALTED state for this PHY device 466 * phy_error - enter HALTED state for this PHY device
494 * @phydev: target phy_device struct 467 * @phydev: target phy_device struct
495 * 468 *
@@ -818,30 +791,11 @@ void phy_state_machine(struct work_struct *work)
818 phydev->adjust_link(phydev->attached_dev); 791 phydev->adjust_link(phydev->attached_dev);
819 792
820 } else if (0 == phydev->link_timeout--) { 793 } else if (0 == phydev->link_timeout--) {
821 int idx;
822
823 needs_aneg = 1; 794 needs_aneg = 1;
824 /* If we have the magic_aneg bit, 795 /* If we have the magic_aneg bit,
825 * we try again */ 796 * we try again */
826 if (phydev->drv->flags & PHY_HAS_MAGICANEG) 797 if (phydev->drv->flags & PHY_HAS_MAGICANEG)
827 break; 798 break;
828
829 /* The timer expired, and we still
830 * don't have a setting, so we try
831 * forcing it until we find one that
832 * works, starting from the fastest speed,
833 * and working our way down */
834 idx = phy_find_valid(0, phydev->supported);
835
836 phydev->speed = settings[idx].speed;
837 phydev->duplex = settings[idx].duplex;
838
839 phydev->autoneg = AUTONEG_DISABLE;
840
841 pr_info("Trying %d/%s\n",
842 phydev->speed,
843 DUPLEX_FULL == phydev->duplex ?
844 "FULL" : "HALF");
845 } 799 }
846 break; 800 break;
847 case PHY_NOLINK: 801 case PHY_NOLINK:
@@ -866,10 +820,8 @@ void phy_state_machine(struct work_struct *work)
866 phydev->state = PHY_RUNNING; 820 phydev->state = PHY_RUNNING;
867 netif_carrier_on(phydev->attached_dev); 821 netif_carrier_on(phydev->attached_dev);
868 } else { 822 } else {
869 if (0 == phydev->link_timeout--) { 823 if (0 == phydev->link_timeout--)
870 phy_force_reduction(phydev);
871 needs_aneg = 1; 824 needs_aneg = 1;
872 }
873 } 825 }
874 826
875 phydev->adjust_link(phydev->attached_dev); 827 phydev->adjust_link(phydev->attached_dev);
@@ -1188,3 +1140,19 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
1188 return 0; 1140 return 0;
1189} 1141}
1190EXPORT_SYMBOL(phy_ethtool_set_eee); 1142EXPORT_SYMBOL(phy_ethtool_set_eee);
1143
1144int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1145{
1146 if (phydev->drv->set_wol)
1147 return phydev->drv->set_wol(phydev, wol);
1148
1149 return -EOPNOTSUPP;
1150}
1151EXPORT_SYMBOL(phy_ethtool_set_wol);
1152
1153void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1154{
1155 if (phydev->drv->get_wol)
1156 phydev->drv->get_wol(phydev, wol);
1157}
1158EXPORT_SYMBOL(phy_ethtool_get_wol);
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 5c87eef40bf9..d11c93e69e03 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -281,7 +281,7 @@ static int ks8995_probe(struct spi_device *spi)
281 mutex_init(&ks->lock); 281 mutex_init(&ks->lock);
282 ks->pdata = pdata; 282 ks->pdata = pdata;
283 ks->spi = spi_dev_get(spi); 283 ks->spi = spi_dev_get(spi);
284 dev_set_drvdata(&spi->dev, ks); 284 spi_set_drvdata(spi, ks);
285 285
286 spi->mode = SPI_MODE_0; 286 spi->mode = SPI_MODE_0;
287 spi->bits_per_word = 8; 287 spi->bits_per_word = 8;
@@ -325,7 +325,7 @@ static int ks8995_probe(struct spi_device *spi)
325 return 0; 325 return 0;
326 326
327err_drvdata: 327err_drvdata:
328 dev_set_drvdata(&spi->dev, NULL); 328 spi_set_drvdata(spi, NULL);
329 kfree(ks); 329 kfree(ks);
330 return err; 330 return err;
331} 331}
@@ -334,10 +334,10 @@ static int ks8995_remove(struct spi_device *spi)
334{ 334{
335 struct ks8995_data *ks8995; 335 struct ks8995_data *ks8995;
336 336
337 ks8995 = dev_get_drvdata(&spi->dev); 337 ks8995 = spi_get_drvdata(spi);
338 sysfs_remove_bin_file(&spi->dev.kobj, &ks8995_registers_attr); 338 sysfs_remove_bin_file(&spi->dev.kobj, &ks8995_registers_attr);
339 339
340 dev_set_drvdata(&spi->dev, NULL); 340 spi_set_drvdata(spi, NULL);
341 kfree(ks8995); 341 kfree(ks8995);
342 342
343 return 0; 343 return 0;
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 2585c383e623..3492b5391273 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -61,7 +61,7 @@ MODULE_DESCRIPTION("Vitesse PHY driver");
61MODULE_AUTHOR("Kriston Carson"); 61MODULE_AUTHOR("Kriston Carson");
62MODULE_LICENSE("GPL"); 62MODULE_LICENSE("GPL");
63 63
64int vsc824x_add_skew(struct phy_device *phydev) 64static int vsc824x_add_skew(struct phy_device *phydev)
65{ 65{
66 int err; 66 int err;
67 int extcon; 67 int extcon;
@@ -81,7 +81,6 @@ int vsc824x_add_skew(struct phy_device *phydev)
81 81
82 return err; 82 return err;
83} 83}
84EXPORT_SYMBOL(vsc824x_add_skew);
85 84
86static int vsc824x_config_init(struct phy_device *phydev) 85static int vsc824x_config_init(struct phy_device *phydev)
87{ 86{
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index bed62d9c53c8..1f7bef90b467 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -560,7 +560,7 @@ static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
560 * so don't forget to remove it. 560 * so don't forget to remove it.
561 */ 561 */
562 562
563 if (ntohs(eth->h_proto) >= 1536) 563 if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
564 return eth->h_proto; 564 return eth->h_proto;
565 565
566 rawp = skb->data; 566 rawp = skb->data;
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index 1a12033d2efa..090c834d7dbd 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -105,64 +105,15 @@ static const struct ppp_channel_ops sync_ops = {
105}; 105};
106 106
107/* 107/*
108 * Utility procedures to print a buffer in hex/ascii 108 * Utility procedure to print a buffer in hex/ascii
109 */ 109 */
110static void 110static void
111ppp_print_hex (register __u8 * out, const __u8 * in, int count)
112{
113 register __u8 next_ch;
114 static const char hex[] = "0123456789ABCDEF";
115
116 while (count-- > 0) {
117 next_ch = *in++;
118 *out++ = hex[(next_ch >> 4) & 0x0F];
119 *out++ = hex[next_ch & 0x0F];
120 ++out;
121 }
122}
123
124static void
125ppp_print_char (register __u8 * out, const __u8 * in, int count)
126{
127 register __u8 next_ch;
128
129 while (count-- > 0) {
130 next_ch = *in++;
131
132 if (next_ch < 0x20 || next_ch > 0x7e)
133 *out++ = '.';
134 else {
135 *out++ = next_ch;
136 if (next_ch == '%') /* printk/syslogd has a bug !! */
137 *out++ = '%';
138 }
139 }
140 *out = '\0';
141}
142
143static void
144ppp_print_buffer (const char *name, const __u8 *buf, int count) 111ppp_print_buffer (const char *name, const __u8 *buf, int count)
145{ 112{
146 __u8 line[44];
147
148 if (name != NULL) 113 if (name != NULL)
149 printk(KERN_DEBUG "ppp_synctty: %s, count = %d\n", name, count); 114 printk(KERN_DEBUG "ppp_synctty: %s, count = %d\n", name, count);
150 115
151 while (count > 8) { 116 print_hex_dump_bytes("", DUMP_PREFIX_NONE, buf, count);
152 memset (line, 32, 44);
153 ppp_print_hex (line, buf, 8);
154 ppp_print_char (&line[8 * 3], buf, 8);
155 printk(KERN_DEBUG "%s\n", line);
156 count -= 8;
157 buf += 8;
158 }
159
160 if (count > 0) {
161 memset (line, 32, 44);
162 ppp_print_hex (line, buf, count);
163 ppp_print_char (&line[8 * 3], buf, count);
164 printk(KERN_DEBUG "%s\n", line);
165 }
166} 117}
167 118
168 119
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
index c3011af68e91..c853d84fd99f 100644
--- a/drivers/net/team/Kconfig
+++ b/drivers/net/team/Kconfig
@@ -37,6 +37,18 @@ config NET_TEAM_MODE_ROUNDROBIN
37 To compile this team mode as a module, choose M here: the module 37 To compile this team mode as a module, choose M here: the module
38 will be called team_mode_roundrobin. 38 will be called team_mode_roundrobin.
39 39
40config NET_TEAM_MODE_RANDOM
41 tristate "Random mode support"
42 depends on NET_TEAM
43 ---help---
44 Basic mode where port used for transmitting packets is selected
45 randomly.
46
47 All added ports are setup to have team's device address.
48
49 To compile this team mode as a module, choose M here: the module
50 will be called team_mode_random.
51
40config NET_TEAM_MODE_ACTIVEBACKUP 52config NET_TEAM_MODE_ACTIVEBACKUP
41 tristate "Active-backup mode support" 53 tristate "Active-backup mode support"
42 depends on NET_TEAM 54 depends on NET_TEAM
diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile
index 975763014e5a..c57e85889751 100644
--- a/drivers/net/team/Makefile
+++ b/drivers/net/team/Makefile
@@ -5,5 +5,6 @@
5obj-$(CONFIG_NET_TEAM) += team.o 5obj-$(CONFIG_NET_TEAM) += team.o
6obj-$(CONFIG_NET_TEAM_MODE_BROADCAST) += team_mode_broadcast.o 6obj-$(CONFIG_NET_TEAM_MODE_BROADCAST) += team_mode_broadcast.o
7obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o 7obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o
8obj-$(CONFIG_NET_TEAM_MODE_RANDOM) += team_mode_random.o
8obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o 9obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o
9obj-$(CONFIG_NET_TEAM_MODE_LOADBALANCE) += team_mode_loadbalance.o 10obj-$(CONFIG_NET_TEAM_MODE_LOADBALANCE) += team_mode_loadbalance.o
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index bf3419297875..621c1bddeee9 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -73,11 +73,24 @@ static int team_port_set_orig_dev_addr(struct team_port *port)
73 return __set_port_dev_addr(port->dev, port->orig.dev_addr); 73 return __set_port_dev_addr(port->dev, port->orig.dev_addr);
74} 74}
75 75
76int team_port_set_team_dev_addr(struct team_port *port) 76static int team_port_set_team_dev_addr(struct team *team,
77 struct team_port *port)
78{
79 return __set_port_dev_addr(port->dev, team->dev->dev_addr);
80}
81
82int team_modeop_port_enter(struct team *team, struct team_port *port)
83{
84 return team_port_set_team_dev_addr(team, port);
85}
86EXPORT_SYMBOL(team_modeop_port_enter);
87
88void team_modeop_port_change_dev_addr(struct team *team,
89 struct team_port *port)
77{ 90{
78 return __set_port_dev_addr(port->dev, port->team->dev->dev_addr); 91 team_port_set_team_dev_addr(team, port);
79} 92}
80EXPORT_SYMBOL(team_port_set_team_dev_addr); 93EXPORT_SYMBOL(team_modeop_port_change_dev_addr);
81 94
82static void team_refresh_port_linkup(struct team_port *port) 95static void team_refresh_port_linkup(struct team_port *port)
83{ 96{
@@ -490,9 +503,9 @@ static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
490 return false; 503 return false;
491} 504}
492 505
493rx_handler_result_t team_dummy_receive(struct team *team, 506static rx_handler_result_t team_dummy_receive(struct team *team,
494 struct team_port *port, 507 struct team_port *port,
495 struct sk_buff *skb) 508 struct sk_buff *skb)
496{ 509{
497 return RX_HANDLER_ANOTHER; 510 return RX_HANDLER_ANOTHER;
498} 511}
diff --git a/drivers/net/team/team_mode_broadcast.c b/drivers/net/team/team_mode_broadcast.c
index c5db428e73fa..c366cd299c06 100644
--- a/drivers/net/team/team_mode_broadcast.c
+++ b/drivers/net/team/team_mode_broadcast.c
@@ -46,20 +46,10 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb)
46 return sum_ret; 46 return sum_ret;
47} 47}
48 48
49static int bc_port_enter(struct team *team, struct team_port *port)
50{
51 return team_port_set_team_dev_addr(port);
52}
53
54static void bc_port_change_dev_addr(struct team *team, struct team_port *port)
55{
56 team_port_set_team_dev_addr(port);
57}
58
59static const struct team_mode_ops bc_mode_ops = { 49static const struct team_mode_ops bc_mode_ops = {
60 .transmit = bc_transmit, 50 .transmit = bc_transmit,
61 .port_enter = bc_port_enter, 51 .port_enter = team_modeop_port_enter,
62 .port_change_dev_addr = bc_port_change_dev_addr, 52 .port_change_dev_addr = team_modeop_port_change_dev_addr,
63}; 53};
64 54
65static const struct team_mode bc_mode = { 55static const struct team_mode bc_mode = {
diff --git a/drivers/net/team/team_mode_random.c b/drivers/net/team/team_mode_random.c
new file mode 100644
index 000000000000..9eabfaa22f3e
--- /dev/null
+++ b/drivers/net/team/team_mode_random.c
@@ -0,0 +1,71 @@
1/*
2 * drivers/net/team/team_mode_random.c - Random mode for team
3 * Copyright (c) 2013 Jiri Pirko <jiri@resnulli.us>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/skbuff.h>
16#include <linux/reciprocal_div.h>
17#include <linux/if_team.h>
18
19static u32 random_N(unsigned int N)
20{
21 return reciprocal_divide(random32(), N);
22}
23
24static bool rnd_transmit(struct team *team, struct sk_buff *skb)
25{
26 struct team_port *port;
27 int port_index;
28
29 port_index = random_N(team->en_port_count);
30 port = team_get_port_by_index_rcu(team, port_index);
31 port = team_get_first_port_txable_rcu(team, port);
32 if (unlikely(!port))
33 goto drop;
34 if (team_dev_queue_xmit(team, port, skb))
35 return false;
36 return true;
37
38drop:
39 dev_kfree_skb_any(skb);
40 return false;
41}
42
43static const struct team_mode_ops rnd_mode_ops = {
44 .transmit = rnd_transmit,
45 .port_enter = team_modeop_port_enter,
46 .port_change_dev_addr = team_modeop_port_change_dev_addr,
47};
48
49static const struct team_mode rnd_mode = {
50 .kind = "random",
51 .owner = THIS_MODULE,
52 .ops = &rnd_mode_ops,
53};
54
55static int __init rnd_init_module(void)
56{
57 return team_mode_register(&rnd_mode);
58}
59
60static void __exit rnd_cleanup_module(void)
61{
62 team_mode_unregister(&rnd_mode);
63}
64
65module_init(rnd_init_module);
66module_exit(rnd_cleanup_module);
67
68MODULE_LICENSE("GPL v2");
69MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
70MODULE_DESCRIPTION("Random mode for team");
71MODULE_ALIAS("team-mode-random");
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
index 105135aa8f05..d268e4de781b 100644
--- a/drivers/net/team/team_mode_roundrobin.c
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -25,26 +25,6 @@ static struct rr_priv *rr_priv(struct team *team)
25 return (struct rr_priv *) &team->mode_priv; 25 return (struct rr_priv *) &team->mode_priv;
26} 26}
27 27
28static struct team_port *__get_first_port_up(struct team *team,
29 struct team_port *port)
30{
31 struct team_port *cur;
32
33 if (team_port_txable(port))
34 return port;
35 cur = port;
36 list_for_each_entry_continue_rcu(cur, &team->port_list, list)
37 if (team_port_txable(port))
38 return cur;
39 list_for_each_entry_rcu(cur, &team->port_list, list) {
40 if (cur == port)
41 break;
42 if (team_port_txable(port))
43 return cur;
44 }
45 return NULL;
46}
47
48static bool rr_transmit(struct team *team, struct sk_buff *skb) 28static bool rr_transmit(struct team *team, struct sk_buff *skb)
49{ 29{
50 struct team_port *port; 30 struct team_port *port;
@@ -52,7 +32,7 @@ static bool rr_transmit(struct team *team, struct sk_buff *skb)
52 32
53 port_index = rr_priv(team)->sent_packets++ % team->en_port_count; 33 port_index = rr_priv(team)->sent_packets++ % team->en_port_count;
54 port = team_get_port_by_index_rcu(team, port_index); 34 port = team_get_port_by_index_rcu(team, port_index);
55 port = __get_first_port_up(team, port); 35 port = team_get_first_port_txable_rcu(team, port);
56 if (unlikely(!port)) 36 if (unlikely(!port))
57 goto drop; 37 goto drop;
58 if (team_dev_queue_xmit(team, port, skb)) 38 if (team_dev_queue_xmit(team, port, skb))
@@ -64,20 +44,10 @@ drop:
64 return false; 44 return false;
65} 45}
66 46
67static int rr_port_enter(struct team *team, struct team_port *port)
68{
69 return team_port_set_team_dev_addr(port);
70}
71
72static void rr_port_change_dev_addr(struct team *team, struct team_port *port)
73{
74 team_port_set_team_dev_addr(port);
75}
76
77static const struct team_mode_ops rr_mode_ops = { 47static const struct team_mode_ops rr_mode_ops = {
78 .transmit = rr_transmit, 48 .transmit = rr_transmit,
79 .port_enter = rr_port_enter, 49 .port_enter = team_modeop_port_enter,
80 .port_change_dev_addr = rr_port_change_dev_addr, 50 .port_change_dev_addr = team_modeop_port_change_dev_addr,
81}; 51};
82 52
83static const struct team_mode rr_mode = { 53static const struct team_mode rr_mode = {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index b7c457adc0dc..29538e6e914d 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -409,14 +409,12 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
409{ 409{
410 struct tun_file *ntfile; 410 struct tun_file *ntfile;
411 struct tun_struct *tun; 411 struct tun_struct *tun;
412 struct net_device *dev;
413 412
414 tun = rtnl_dereference(tfile->tun); 413 tun = rtnl_dereference(tfile->tun);
415 414
416 if (tun && !tfile->detached) { 415 if (tun && !tfile->detached) {
417 u16 index = tfile->queue_index; 416 u16 index = tfile->queue_index;
418 BUG_ON(index >= tun->numqueues); 417 BUG_ON(index >= tun->numqueues);
419 dev = tun->dev;
420 418
421 rcu_assign_pointer(tun->tfiles[index], 419 rcu_assign_pointer(tun->tfiles[index],
422 tun->tfiles[tun->numqueues - 1]); 420 tun->tfiles[tun->numqueues - 1]);
@@ -1205,6 +1203,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1205 } 1203 }
1206 1204
1207 skb_reset_network_header(skb); 1205 skb_reset_network_header(skb);
1206 skb_probe_transport_header(skb, 0);
1207
1208 rxhash = skb_get_rxhash(skb); 1208 rxhash = skb_get_rxhash(skb);
1209 netif_rx_ni(skb); 1209 netif_rx_ni(skb);
1210 1210
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 4709fa3497cf..44a989cd9fb2 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -362,8 +362,8 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
362 u8 iface_no; 362 u8 iface_no;
363 363
364 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 364 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
365 if (ctx == NULL) 365 if (!ctx)
366 return -ENODEV; 366 return -ENOMEM;
367 367
368 hrtimer_init(&ctx->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 368 hrtimer_init(&ctx->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
369 ctx->tx_timer.function = &cdc_ncm_tx_timer_cb; 369 ctx->tx_timer.function = &cdc_ncm_tx_timer_cb;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 57ac4b0294bc..f7d67e8eb1aa 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -154,7 +154,7 @@ struct padded_vnet_hdr {
154 */ 154 */
155static int vq2txq(struct virtqueue *vq) 155static int vq2txq(struct virtqueue *vq)
156{ 156{
157 return (virtqueue_get_queue_index(vq) - 1) / 2; 157 return (vq->index - 1) / 2;
158} 158}
159 159
160static int txq2vq(int txq) 160static int txq2vq(int txq)
@@ -164,7 +164,7 @@ static int txq2vq(int txq)
164 164
165static int vq2rxq(struct virtqueue *vq) 165static int vq2rxq(struct virtqueue *vq)
166{ 166{
167 return virtqueue_get_queue_index(vq) / 2; 167 return vq->index / 2;
168} 168}
169 169
170static int rxq2vq(int rxq) 170static int rxq2vq(int rxq)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 7cee7a3068ec..9a6471593ca3 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -33,7 +33,7 @@
33#include <net/arp.h> 33#include <net/arp.h>
34#include <net/ndisc.h> 34#include <net/ndisc.h>
35#include <net/ip.h> 35#include <net/ip.h>
36#include <net/ipip.h> 36#include <net/ip_tunnels.h>
37#include <net/icmp.h> 37#include <net/icmp.h>
38#include <net/udp.h> 38#include <net/udp.h>
39#include <net/rtnetlink.h> 39#include <net/rtnetlink.h>
@@ -81,31 +81,30 @@ struct vxlan_net {
81 struct hlist_head vni_list[VNI_HASH_SIZE]; 81 struct hlist_head vni_list[VNI_HASH_SIZE];
82}; 82};
83 83
84struct vxlan_rdst {
85 struct rcu_head rcu;
86 __be32 remote_ip;
87 __be16 remote_port;
88 u32 remote_vni;
89 u32 remote_ifindex;
90 struct vxlan_rdst *remote_next;
91};
92
84/* Forwarding table entry */ 93/* Forwarding table entry */
85struct vxlan_fdb { 94struct vxlan_fdb {
86 struct hlist_node hlist; /* linked list of entries */ 95 struct hlist_node hlist; /* linked list of entries */
87 struct rcu_head rcu; 96 struct rcu_head rcu;
88 unsigned long updated; /* jiffies */ 97 unsigned long updated; /* jiffies */
89 unsigned long used; 98 unsigned long used;
90 __be32 remote_ip; 99 struct vxlan_rdst remote;
91 u16 state; /* see ndm_state */ 100 u16 state; /* see ndm_state */
92 u8 eth_addr[ETH_ALEN]; 101 u8 eth_addr[ETH_ALEN];
93}; 102};
94 103
95/* Per-cpu network traffic stats */
96struct vxlan_stats {
97 u64 rx_packets;
98 u64 rx_bytes;
99 u64 tx_packets;
100 u64 tx_bytes;
101 struct u64_stats_sync syncp;
102};
103
104/* Pseudo network device */ 104/* Pseudo network device */
105struct vxlan_dev { 105struct vxlan_dev {
106 struct hlist_node hlist; 106 struct hlist_node hlist;
107 struct net_device *dev; 107 struct net_device *dev;
108 struct vxlan_stats __percpu *stats;
109 __u32 vni; /* virtual network id */ 108 __u32 vni; /* virtual network id */
110 __be32 gaddr; /* multicast group */ 109 __be32 gaddr; /* multicast group */
111 __be32 saddr; /* source address */ 110 __be32 saddr; /* source address */
@@ -157,7 +156,8 @@ static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
157/* Fill in neighbour message in skbuff. */ 156/* Fill in neighbour message in skbuff. */
158static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan, 157static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
159 const struct vxlan_fdb *fdb, 158 const struct vxlan_fdb *fdb,
160 u32 portid, u32 seq, int type, unsigned int flags) 159 u32 portid, u32 seq, int type, unsigned int flags,
160 const struct vxlan_rdst *rdst)
161{ 161{
162 unsigned long now = jiffies; 162 unsigned long now = jiffies;
163 struct nda_cacheinfo ci; 163 struct nda_cacheinfo ci;
@@ -176,7 +176,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
176 176
177 if (type == RTM_GETNEIGH) { 177 if (type == RTM_GETNEIGH) {
178 ndm->ndm_family = AF_INET; 178 ndm->ndm_family = AF_INET;
179 send_ip = fdb->remote_ip != 0; 179 send_ip = rdst->remote_ip != htonl(INADDR_ANY);
180 send_eth = !is_zero_ether_addr(fdb->eth_addr); 180 send_eth = !is_zero_ether_addr(fdb->eth_addr);
181 } else 181 } else
182 ndm->ndm_family = AF_BRIDGE; 182 ndm->ndm_family = AF_BRIDGE;
@@ -188,7 +188,17 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
188 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr)) 188 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
189 goto nla_put_failure; 189 goto nla_put_failure;
190 190
191 if (send_ip && nla_put_be32(skb, NDA_DST, fdb->remote_ip)) 191 if (send_ip && nla_put_be32(skb, NDA_DST, rdst->remote_ip))
192 goto nla_put_failure;
193
194 if (rdst->remote_port && rdst->remote_port != vxlan_port &&
195 nla_put_be16(skb, NDA_PORT, rdst->remote_port))
196 goto nla_put_failure;
197 if (rdst->remote_vni != vxlan->vni &&
198 nla_put_be32(skb, NDA_VNI, rdst->remote_vni))
199 goto nla_put_failure;
200 if (rdst->remote_ifindex &&
201 nla_put_u32(skb, NDA_IFINDEX, rdst->remote_ifindex))
192 goto nla_put_failure; 202 goto nla_put_failure;
193 203
194 ci.ndm_used = jiffies_to_clock_t(now - fdb->used); 204 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
@@ -211,6 +221,9 @@ static inline size_t vxlan_nlmsg_size(void)
211 return NLMSG_ALIGN(sizeof(struct ndmsg)) 221 return NLMSG_ALIGN(sizeof(struct ndmsg))
212 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */ 222 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
213 + nla_total_size(sizeof(__be32)) /* NDA_DST */ 223 + nla_total_size(sizeof(__be32)) /* NDA_DST */
224 + nla_total_size(sizeof(__be32)) /* NDA_PORT */
225 + nla_total_size(sizeof(__be32)) /* NDA_VNI */
226 + nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
214 + nla_total_size(sizeof(struct nda_cacheinfo)); 227 + nla_total_size(sizeof(struct nda_cacheinfo));
215} 228}
216 229
@@ -225,7 +238,7 @@ static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
225 if (skb == NULL) 238 if (skb == NULL)
226 goto errout; 239 goto errout;
227 240
228 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0); 241 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, &fdb->remote);
229 if (err < 0) { 242 if (err < 0) {
230 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */ 243 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
231 WARN_ON(err == -EMSGSIZE); 244 WARN_ON(err == -EMSGSIZE);
@@ -247,7 +260,8 @@ static void vxlan_ip_miss(struct net_device *dev, __be32 ipa)
247 260
248 memset(&f, 0, sizeof f); 261 memset(&f, 0, sizeof f);
249 f.state = NUD_STALE; 262 f.state = NUD_STALE;
250 f.remote_ip = ipa; /* goes to NDA_DST */ 263 f.remote.remote_ip = ipa; /* goes to NDA_DST */
264 f.remote.remote_vni = VXLAN_N_VID;
251 265
252 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH); 266 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
253} 267}
@@ -300,10 +314,38 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
300 return NULL; 314 return NULL;
301} 315}
302 316
317/* Add/update destinations for multicast */
318static int vxlan_fdb_append(struct vxlan_fdb *f,
319 __be32 ip, __u32 port, __u32 vni, __u32 ifindex)
320{
321 struct vxlan_rdst *rd_prev, *rd;
322
323 rd_prev = NULL;
324 for (rd = &f->remote; rd; rd = rd->remote_next) {
325 if (rd->remote_ip == ip &&
326 rd->remote_port == port &&
327 rd->remote_vni == vni &&
328 rd->remote_ifindex == ifindex)
329 return 0;
330 rd_prev = rd;
331 }
332 rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
333 if (rd == NULL)
334 return -ENOBUFS;
335 rd->remote_ip = ip;
336 rd->remote_port = port;
337 rd->remote_vni = vni;
338 rd->remote_ifindex = ifindex;
339 rd->remote_next = NULL;
340 rd_prev->remote_next = rd;
341 return 1;
342}
343
303/* Add new entry to forwarding table -- assumes lock held */ 344/* Add new entry to forwarding table -- assumes lock held */
304static int vxlan_fdb_create(struct vxlan_dev *vxlan, 345static int vxlan_fdb_create(struct vxlan_dev *vxlan,
305 const u8 *mac, __be32 ip, 346 const u8 *mac, __be32 ip,
306 __u16 state, __u16 flags) 347 __u16 state, __u16 flags,
348 __u32 port, __u32 vni, __u32 ifindex)
307{ 349{
308 struct vxlan_fdb *f; 350 struct vxlan_fdb *f;
309 int notify = 0; 351 int notify = 0;
@@ -320,6 +362,14 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
320 f->updated = jiffies; 362 f->updated = jiffies;
321 notify = 1; 363 notify = 1;
322 } 364 }
365 if ((flags & NLM_F_APPEND) &&
366 is_multicast_ether_addr(f->eth_addr)) {
367 int rc = vxlan_fdb_append(f, ip, port, vni, ifindex);
368
369 if (rc < 0)
370 return rc;
371 notify |= rc;
372 }
323 } else { 373 } else {
324 if (!(flags & NLM_F_CREATE)) 374 if (!(flags & NLM_F_CREATE))
325 return -ENOENT; 375 return -ENOENT;
@@ -333,7 +383,11 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
333 return -ENOMEM; 383 return -ENOMEM;
334 384
335 notify = 1; 385 notify = 1;
336 f->remote_ip = ip; 386 f->remote.remote_ip = ip;
387 f->remote.remote_port = port;
388 f->remote.remote_vni = vni;
389 f->remote.remote_ifindex = ifindex;
390 f->remote.remote_next = NULL;
337 f->state = state; 391 f->state = state;
338 f->updated = f->used = jiffies; 392 f->updated = f->used = jiffies;
339 memcpy(f->eth_addr, mac, ETH_ALEN); 393 memcpy(f->eth_addr, mac, ETH_ALEN);
@@ -349,6 +403,19 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
349 return 0; 403 return 0;
350} 404}
351 405
406void vxlan_fdb_free(struct rcu_head *head)
407{
408 struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
409
410 while (f->remote.remote_next) {
411 struct vxlan_rdst *rd = f->remote.remote_next;
412
413 f->remote.remote_next = rd->remote_next;
414 kfree(rd);
415 }
416 kfree(f);
417}
418
352static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) 419static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
353{ 420{
354 netdev_dbg(vxlan->dev, 421 netdev_dbg(vxlan->dev,
@@ -358,7 +425,7 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
358 vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH); 425 vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH);
359 426
360 hlist_del_rcu(&f->hlist); 427 hlist_del_rcu(&f->hlist);
361 kfree_rcu(f, rcu); 428 call_rcu(&f->rcu, vxlan_fdb_free);
362} 429}
363 430
364/* Add static entry (via netlink) */ 431/* Add static entry (via netlink) */
@@ -367,7 +434,9 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
367 const unsigned char *addr, u16 flags) 434 const unsigned char *addr, u16 flags)
368{ 435{
369 struct vxlan_dev *vxlan = netdev_priv(dev); 436 struct vxlan_dev *vxlan = netdev_priv(dev);
437 struct net *net = dev_net(vxlan->dev);
370 __be32 ip; 438 __be32 ip;
439 u32 port, vni, ifindex;
371 int err; 440 int err;
372 441
373 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) { 442 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
@@ -384,8 +453,36 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
384 453
385 ip = nla_get_be32(tb[NDA_DST]); 454 ip = nla_get_be32(tb[NDA_DST]);
386 455
456 if (tb[NDA_PORT]) {
457 if (nla_len(tb[NDA_PORT]) != sizeof(u32))
458 return -EINVAL;
459 port = nla_get_u32(tb[NDA_PORT]);
460 } else
461 port = vxlan_port;
462
463 if (tb[NDA_VNI]) {
464 if (nla_len(tb[NDA_VNI]) != sizeof(u32))
465 return -EINVAL;
466 vni = nla_get_u32(tb[NDA_VNI]);
467 } else
468 vni = vxlan->vni;
469
470 if (tb[NDA_IFINDEX]) {
471 struct net_device *tdev;
472
473 if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
474 return -EINVAL;
475 ifindex = nla_get_u32(tb[NDA_IFINDEX]);
476 tdev = dev_get_by_index(net, ifindex);
477 if (!tdev)
478 return -EADDRNOTAVAIL;
479 dev_put(tdev);
480 } else
481 ifindex = 0;
482
387 spin_lock_bh(&vxlan->hash_lock); 483 spin_lock_bh(&vxlan->hash_lock);
388 err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags); 484 err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags, port,
485 vni, ifindex);
389 spin_unlock_bh(&vxlan->hash_lock); 486 spin_unlock_bh(&vxlan->hash_lock);
390 487
391 return err; 488 return err;
@@ -423,18 +520,21 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
423 int err; 520 int err;
424 521
425 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) { 522 hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
426 if (idx < cb->args[0]) 523 struct vxlan_rdst *rd;
427 goto skip; 524 for (rd = &f->remote; rd; rd = rd->remote_next) {
428 525 if (idx < cb->args[0])
429 err = vxlan_fdb_info(skb, vxlan, f, 526 goto skip;
430 NETLINK_CB(cb->skb).portid, 527
431 cb->nlh->nlmsg_seq, 528 err = vxlan_fdb_info(skb, vxlan, f,
432 RTM_NEWNEIGH, 529 NETLINK_CB(cb->skb).portid,
433 NLM_F_MULTI); 530 cb->nlh->nlmsg_seq,
434 if (err < 0) 531 RTM_NEWNEIGH,
435 break; 532 NLM_F_MULTI, rd);
533 if (err < 0)
534 break;
436skip: 535skip:
437 ++idx; 536 ++idx;
537 }
438 } 538 }
439 } 539 }
440 540
@@ -454,22 +554,23 @@ static void vxlan_snoop(struct net_device *dev,
454 f = vxlan_find_mac(vxlan, src_mac); 554 f = vxlan_find_mac(vxlan, src_mac);
455 if (likely(f)) { 555 if (likely(f)) {
456 f->used = jiffies; 556 f->used = jiffies;
457 if (likely(f->remote_ip == src_ip)) 557 if (likely(f->remote.remote_ip == src_ip))
458 return; 558 return;
459 559
460 if (net_ratelimit()) 560 if (net_ratelimit())
461 netdev_info(dev, 561 netdev_info(dev,
462 "%pM migrated from %pI4 to %pI4\n", 562 "%pM migrated from %pI4 to %pI4\n",
463 src_mac, &f->remote_ip, &src_ip); 563 src_mac, &f->remote.remote_ip, &src_ip);
464 564
465 f->remote_ip = src_ip; 565 f->remote.remote_ip = src_ip;
466 f->updated = jiffies; 566 f->updated = jiffies;
467 } else { 567 } else {
468 /* learned new entry */ 568 /* learned new entry */
469 spin_lock(&vxlan->hash_lock); 569 spin_lock(&vxlan->hash_lock);
470 err = vxlan_fdb_create(vxlan, src_mac, src_ip, 570 err = vxlan_fdb_create(vxlan, src_mac, src_ip,
471 NUD_REACHABLE, 571 NUD_REACHABLE,
472 NLM_F_EXCL|NLM_F_CREATE); 572 NLM_F_EXCL|NLM_F_CREATE,
573 vxlan_port, vxlan->vni, 0);
473 spin_unlock(&vxlan->hash_lock); 574 spin_unlock(&vxlan->hash_lock);
474 } 575 }
475} 576}
@@ -556,7 +657,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
556 struct iphdr *oip; 657 struct iphdr *oip;
557 struct vxlanhdr *vxh; 658 struct vxlanhdr *vxh;
558 struct vxlan_dev *vxlan; 659 struct vxlan_dev *vxlan;
559 struct vxlan_stats *stats; 660 struct pcpu_tstats *stats;
560 __u32 vni; 661 __u32 vni;
561 int err; 662 int err;
562 663
@@ -632,7 +733,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
632 } 733 }
633 } 734 }
634 735
635 stats = this_cpu_ptr(vxlan->stats); 736 stats = this_cpu_ptr(vxlan->dev->tstats);
636 u64_stats_update_begin(&stats->syncp); 737 u64_stats_update_begin(&stats->syncp);
637 stats->rx_packets++; 738 stats->rx_packets++;
638 stats->rx_bytes += skb->len; 739 stats->rx_bytes += skb->len;
@@ -691,7 +792,6 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
691 n = neigh_lookup(&arp_tbl, &tip, dev); 792 n = neigh_lookup(&arp_tbl, &tip, dev);
692 793
693 if (n) { 794 if (n) {
694 struct vxlan_dev *vxlan = netdev_priv(dev);
695 struct vxlan_fdb *f; 795 struct vxlan_fdb *f;
696 struct sk_buff *reply; 796 struct sk_buff *reply;
697 797
@@ -701,7 +801,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
701 } 801 }
702 802
703 f = vxlan_find_mac(vxlan, n->ha); 803 f = vxlan_find_mac(vxlan, n->ha);
704 if (f && f->remote_ip == 0) { 804 if (f && f->remote.remote_ip == htonl(INADDR_ANY)) {
705 /* bridge-local neighbor */ 805 /* bridge-local neighbor */
706 neigh_release(n); 806 neigh_release(n);
707 goto out; 807 goto out;
@@ -763,28 +863,6 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
763 return false; 863 return false;
764} 864}
765 865
766/* Extract dsfield from inner protocol */
767static inline u8 vxlan_get_dsfield(const struct iphdr *iph,
768 const struct sk_buff *skb)
769{
770 if (skb->protocol == htons(ETH_P_IP))
771 return iph->tos;
772 else if (skb->protocol == htons(ETH_P_IPV6))
773 return ipv6_get_dsfield((const struct ipv6hdr *)iph);
774 else
775 return 0;
776}
777
778/* Propogate ECN bits out */
779static inline u8 vxlan_ecn_encap(u8 tos,
780 const struct iphdr *iph,
781 const struct sk_buff *skb)
782{
783 u8 inner = vxlan_get_dsfield(iph, skb);
784
785 return INET_ECN_encapsulate(tos, inner);
786}
787
788static void vxlan_sock_free(struct sk_buff *skb) 866static void vxlan_sock_free(struct sk_buff *skb)
789{ 867{
790 sock_put(skb->sk); 868 sock_put(skb->sk);
@@ -820,68 +898,74 @@ static u16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb)
820 return (((u64) hash * range) >> 32) + vxlan->port_min; 898 return (((u64) hash * range) >> 32) + vxlan->port_min;
821} 899}
822 900
823/* Transmit local packets over Vxlan 901static int handle_offloads(struct sk_buff *skb)
824 * 902{
825 * Outer IP header inherits ECN and DF from inner header. 903 if (skb_is_gso(skb)) {
826 * Outer UDP destination is the VXLAN assigned port. 904 int err = skb_unclone(skb, GFP_ATOMIC);
827 * source port is based on hash of flow 905 if (unlikely(err))
828 */ 906 return err;
829static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) 907
908 skb_shinfo(skb)->gso_type |= (SKB_GSO_UDP_TUNNEL | SKB_GSO_UDP);
909 } else if (skb->ip_summed != CHECKSUM_PARTIAL)
910 skb->ip_summed = CHECKSUM_NONE;
911
912 return 0;
913}
914
915/* Bypass encapsulation if the destination is local */
916static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
917 struct vxlan_dev *dst_vxlan)
918{
919 struct pcpu_tstats *tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
920 struct pcpu_tstats *rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
921
922 skb->pkt_type = PACKET_HOST;
923 skb->encapsulation = 0;
924 skb->dev = dst_vxlan->dev;
925 __skb_pull(skb, skb_network_offset(skb));
926
927 if (dst_vxlan->flags & VXLAN_F_LEARN)
928 vxlan_snoop(skb->dev, INADDR_LOOPBACK, eth_hdr(skb)->h_source);
929
930 u64_stats_update_begin(&tx_stats->syncp);
931 tx_stats->tx_packets++;
932 tx_stats->tx_bytes += skb->len;
933 u64_stats_update_end(&tx_stats->syncp);
934
935 if (netif_rx(skb) == NET_RX_SUCCESS) {
936 u64_stats_update_begin(&rx_stats->syncp);
937 rx_stats->rx_packets++;
938 rx_stats->rx_bytes += skb->len;
939 u64_stats_update_end(&rx_stats->syncp);
940 } else {
941 skb->dev->stats.rx_dropped++;
942 }
943}
944
945static netdev_tx_t vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
946 struct vxlan_rdst *rdst, bool did_rsc)
830{ 947{
831 struct vxlan_dev *vxlan = netdev_priv(dev); 948 struct vxlan_dev *vxlan = netdev_priv(dev);
832 struct rtable *rt; 949 struct rtable *rt;
833 const struct iphdr *old_iph; 950 const struct iphdr *old_iph;
834 struct ethhdr *eth;
835 struct iphdr *iph; 951 struct iphdr *iph;
836 struct vxlanhdr *vxh; 952 struct vxlanhdr *vxh;
837 struct udphdr *uh; 953 struct udphdr *uh;
838 struct flowi4 fl4; 954 struct flowi4 fl4;
839 unsigned int pkt_len = skb->len;
840 __be32 dst; 955 __be32 dst;
841 __u16 src_port; 956 __u16 src_port, dst_port;
957 u32 vni;
842 __be16 df = 0; 958 __be16 df = 0;
843 __u8 tos, ttl; 959 __u8 tos, ttl;
844 int err;
845 bool did_rsc = false;
846 const struct vxlan_fdb *f;
847 960
848 skb_reset_mac_header(skb); 961 dst_port = rdst->remote_port ? rdst->remote_port : vxlan_port;
849 eth = eth_hdr(skb); 962 vni = rdst->remote_vni;
850 963 dst = rdst->remote_ip;
851 if ((vxlan->flags & VXLAN_F_PROXY) && ntohs(eth->h_proto) == ETH_P_ARP)
852 return arp_reduce(dev, skb);
853 else if ((vxlan->flags&VXLAN_F_RSC) && ntohs(eth->h_proto) == ETH_P_IP)
854 did_rsc = route_shortcircuit(dev, skb);
855
856 f = vxlan_find_mac(vxlan, eth->h_dest);
857 if (f == NULL) {
858 did_rsc = false;
859 dst = vxlan->gaddr;
860 if (!dst && (vxlan->flags & VXLAN_F_L2MISS) &&
861 !is_multicast_ether_addr(eth->h_dest))
862 vxlan_fdb_miss(vxlan, eth->h_dest);
863 } else
864 dst = f->remote_ip;
865 964
866 if (!dst) { 965 if (!dst) {
867 if (did_rsc) { 966 if (did_rsc) {
868 __skb_pull(skb, skb_network_offset(skb));
869 skb->ip_summed = CHECKSUM_NONE;
870 skb->pkt_type = PACKET_HOST;
871
872 /* short-circuited back to local bridge */ 967 /* short-circuited back to local bridge */
873 if (netif_rx(skb) == NET_RX_SUCCESS) { 968 vxlan_encap_bypass(skb, vxlan, vxlan);
874 struct vxlan_stats *stats =
875 this_cpu_ptr(vxlan->stats);
876
877 u64_stats_update_begin(&stats->syncp);
878 stats->tx_packets++;
879 stats->tx_bytes += pkt_len;
880 u64_stats_update_end(&stats->syncp);
881 } else {
882 dev->stats.tx_errors++;
883 dev->stats.tx_aborted_errors++;
884 }
885 return NETDEV_TX_OK; 969 return NETDEV_TX_OK;
886 } 970 }
887 goto drop; 971 goto drop;
@@ -904,12 +988,12 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
904 988
905 tos = vxlan->tos; 989 tos = vxlan->tos;
906 if (tos == 1) 990 if (tos == 1)
907 tos = vxlan_get_dsfield(old_iph, skb); 991 tos = ip_tunnel_get_dsfield(old_iph, skb);
908 992
909 src_port = vxlan_src_port(vxlan, skb); 993 src_port = vxlan_src_port(vxlan, skb);
910 994
911 memset(&fl4, 0, sizeof(fl4)); 995 memset(&fl4, 0, sizeof(fl4));
912 fl4.flowi4_oif = vxlan->link; 996 fl4.flowi4_oif = rdst->remote_ifindex;
913 fl4.flowi4_tos = RT_TOS(tos); 997 fl4.flowi4_tos = RT_TOS(tos);
914 fl4.daddr = dst; 998 fl4.daddr = dst;
915 fl4.saddr = vxlan->saddr; 999 fl4.saddr = vxlan->saddr;
@@ -928,6 +1012,18 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
928 goto tx_error; 1012 goto tx_error;
929 } 1013 }
930 1014
1015 /* Bypass encapsulation if the destination is local */
1016 if (rt->rt_flags & RTCF_LOCAL) {
1017 struct vxlan_dev *dst_vxlan;
1018
1019 ip_rt_put(rt);
1020 dst_vxlan = vxlan_find_vni(dev_net(dev), vni);
1021 if (!dst_vxlan)
1022 goto tx_error;
1023 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
1024 return NETDEV_TX_OK;
1025 }
1026
931 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 1027 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
932 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | 1028 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
933 IPSKB_REROUTED); 1029 IPSKB_REROUTED);
@@ -936,13 +1032,13 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
936 1032
937 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh)); 1033 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
938 vxh->vx_flags = htonl(VXLAN_FLAGS); 1034 vxh->vx_flags = htonl(VXLAN_FLAGS);
939 vxh->vx_vni = htonl(vxlan->vni << 8); 1035 vxh->vx_vni = htonl(vni << 8);
940 1036
941 __skb_push(skb, sizeof(*uh)); 1037 __skb_push(skb, sizeof(*uh));
942 skb_reset_transport_header(skb); 1038 skb_reset_transport_header(skb);
943 uh = udp_hdr(skb); 1039 uh = udp_hdr(skb);
944 1040
945 uh->dest = htons(vxlan_port); 1041 uh->dest = htons(dst_port);
946 uh->source = htons(src_port); 1042 uh->source = htons(src_port);
947 1043
948 uh->len = htons(skb->len); 1044 uh->len = htons(skb->len);
@@ -955,7 +1051,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
955 iph->ihl = sizeof(struct iphdr) >> 2; 1051 iph->ihl = sizeof(struct iphdr) >> 2;
956 iph->frag_off = df; 1052 iph->frag_off = df;
957 iph->protocol = IPPROTO_UDP; 1053 iph->protocol = IPPROTO_UDP;
958 iph->tos = vxlan_ecn_encap(tos, old_iph, skb); 1054 iph->tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
959 iph->daddr = dst; 1055 iph->daddr = dst;
960 iph->saddr = fl4.saddr; 1056 iph->saddr = fl4.saddr;
961 iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); 1057 iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
@@ -965,22 +1061,10 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
965 1061
966 vxlan_set_owner(dev, skb); 1062 vxlan_set_owner(dev, skb);
967 1063
968 /* See iptunnel_xmit() */ 1064 if (handle_offloads(skb))
969 if (skb->ip_summed != CHECKSUM_PARTIAL) 1065 goto drop;
970 skb->ip_summed = CHECKSUM_NONE;
971
972 err = ip_local_out(skb);
973 if (likely(net_xmit_eval(err) == 0)) {
974 struct vxlan_stats *stats = this_cpu_ptr(vxlan->stats);
975 1066
976 u64_stats_update_begin(&stats->syncp); 1067 iptunnel_xmit(skb, dev);
977 stats->tx_packets++;
978 stats->tx_bytes += pkt_len;
979 u64_stats_update_end(&stats->syncp);
980 } else {
981 dev->stats.tx_errors++;
982 dev->stats.tx_aborted_errors++;
983 }
984 return NETDEV_TX_OK; 1068 return NETDEV_TX_OK;
985 1069
986drop: 1070drop:
@@ -994,6 +1078,64 @@ tx_free:
994 return NETDEV_TX_OK; 1078 return NETDEV_TX_OK;
995} 1079}
996 1080
1081/* Transmit local packets over Vxlan
1082 *
1083 * Outer IP header inherits ECN and DF from inner header.
1084 * Outer UDP destination is the VXLAN assigned port.
1085 * source port is based on hash of flow
1086 */
1087static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
1088{
1089 struct vxlan_dev *vxlan = netdev_priv(dev);
1090 struct ethhdr *eth;
1091 bool did_rsc = false;
1092 struct vxlan_rdst group, *rdst0, *rdst;
1093 struct vxlan_fdb *f;
1094 int rc1, rc;
1095
1096 skb_reset_mac_header(skb);
1097 eth = eth_hdr(skb);
1098
1099 if ((vxlan->flags & VXLAN_F_PROXY) && ntohs(eth->h_proto) == ETH_P_ARP)
1100 return arp_reduce(dev, skb);
1101 else if ((vxlan->flags&VXLAN_F_RSC) && ntohs(eth->h_proto) == ETH_P_IP)
1102 did_rsc = route_shortcircuit(dev, skb);
1103
1104 f = vxlan_find_mac(vxlan, eth->h_dest);
1105 if (f == NULL) {
1106 did_rsc = false;
1107 group.remote_port = vxlan_port;
1108 group.remote_vni = vxlan->vni;
1109 group.remote_ip = vxlan->gaddr;
1110 group.remote_ifindex = vxlan->link;
1111 group.remote_next = 0;
1112 rdst0 = &group;
1113
1114 if (group.remote_ip == htonl(INADDR_ANY) &&
1115 (vxlan->flags & VXLAN_F_L2MISS) &&
1116 !is_multicast_ether_addr(eth->h_dest))
1117 vxlan_fdb_miss(vxlan, eth->h_dest);
1118 } else
1119 rdst0 = &f->remote;
1120
1121 rc = NETDEV_TX_OK;
1122
1123 /* if there are multiple destinations, send copies */
1124 for (rdst = rdst0->remote_next; rdst; rdst = rdst->remote_next) {
1125 struct sk_buff *skb1;
1126
1127 skb1 = skb_clone(skb, GFP_ATOMIC);
1128 rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc);
1129 if (rc == NETDEV_TX_OK)
1130 rc = rc1;
1131 }
1132
1133 rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc);
1134 if (rc == NETDEV_TX_OK)
1135 rc = rc1;
1136 return rc;
1137}
1138
997/* Walk the forwarding table and purge stale entries */ 1139/* Walk the forwarding table and purge stale entries */
998static void vxlan_cleanup(unsigned long arg) 1140static void vxlan_cleanup(unsigned long arg)
999{ 1141{
@@ -1034,10 +1176,8 @@ static void vxlan_cleanup(unsigned long arg)
1034/* Setup stats when device is created */ 1176/* Setup stats when device is created */
1035static int vxlan_init(struct net_device *dev) 1177static int vxlan_init(struct net_device *dev)
1036{ 1178{
1037 struct vxlan_dev *vxlan = netdev_priv(dev); 1179 dev->tstats = alloc_percpu(struct pcpu_tstats);
1038 1180 if (!dev->tstats)
1039 vxlan->stats = alloc_percpu(struct vxlan_stats);
1040 if (!vxlan->stats)
1041 return -ENOMEM; 1181 return -ENOMEM;
1042 1182
1043 return 0; 1183 return 0;
@@ -1093,49 +1233,6 @@ static int vxlan_stop(struct net_device *dev)
1093 return 0; 1233 return 0;
1094} 1234}
1095 1235
1096/* Merge per-cpu statistics */
1097static struct rtnl_link_stats64 *vxlan_stats64(struct net_device *dev,
1098 struct rtnl_link_stats64 *stats)
1099{
1100 struct vxlan_dev *vxlan = netdev_priv(dev);
1101 struct vxlan_stats tmp, sum = { 0 };
1102 unsigned int cpu;
1103
1104 for_each_possible_cpu(cpu) {
1105 unsigned int start;
1106 const struct vxlan_stats *stats
1107 = per_cpu_ptr(vxlan->stats, cpu);
1108
1109 do {
1110 start = u64_stats_fetch_begin_bh(&stats->syncp);
1111 memcpy(&tmp, stats, sizeof(tmp));
1112 } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
1113
1114 sum.tx_bytes += tmp.tx_bytes;
1115 sum.tx_packets += tmp.tx_packets;
1116 sum.rx_bytes += tmp.rx_bytes;
1117 sum.rx_packets += tmp.rx_packets;
1118 }
1119
1120 stats->tx_bytes = sum.tx_bytes;
1121 stats->tx_packets = sum.tx_packets;
1122 stats->rx_bytes = sum.rx_bytes;
1123 stats->rx_packets = sum.rx_packets;
1124
1125 stats->multicast = dev->stats.multicast;
1126 stats->rx_length_errors = dev->stats.rx_length_errors;
1127 stats->rx_frame_errors = dev->stats.rx_frame_errors;
1128 stats->rx_errors = dev->stats.rx_errors;
1129
1130 stats->tx_dropped = dev->stats.tx_dropped;
1131 stats->tx_carrier_errors = dev->stats.tx_carrier_errors;
1132 stats->tx_aborted_errors = dev->stats.tx_aborted_errors;
1133 stats->collisions = dev->stats.collisions;
1134 stats->tx_errors = dev->stats.tx_errors;
1135
1136 return stats;
1137}
1138
1139/* Stub, nothing needs to be done. */ 1236/* Stub, nothing needs to be done. */
1140static void vxlan_set_multicast_list(struct net_device *dev) 1237static void vxlan_set_multicast_list(struct net_device *dev)
1141{ 1238{
@@ -1146,7 +1243,7 @@ static const struct net_device_ops vxlan_netdev_ops = {
1146 .ndo_open = vxlan_open, 1243 .ndo_open = vxlan_open,
1147 .ndo_stop = vxlan_stop, 1244 .ndo_stop = vxlan_stop,
1148 .ndo_start_xmit = vxlan_xmit, 1245 .ndo_start_xmit = vxlan_xmit,
1149 .ndo_get_stats64 = vxlan_stats64, 1246 .ndo_get_stats64 = ip_tunnel_get_stats64,
1150 .ndo_set_rx_mode = vxlan_set_multicast_list, 1247 .ndo_set_rx_mode = vxlan_set_multicast_list,
1151 .ndo_change_mtu = eth_change_mtu, 1248 .ndo_change_mtu = eth_change_mtu,
1152 .ndo_validate_addr = eth_validate_addr, 1249 .ndo_validate_addr = eth_validate_addr,
@@ -1163,9 +1260,7 @@ static struct device_type vxlan_type = {
1163 1260
1164static void vxlan_free(struct net_device *dev) 1261static void vxlan_free(struct net_device *dev)
1165{ 1262{
1166 struct vxlan_dev *vxlan = netdev_priv(dev); 1263 free_percpu(dev->tstats);
1167
1168 free_percpu(vxlan->stats);
1169 free_netdev(dev); 1264 free_netdev(dev);
1170} 1265}
1171 1266
@@ -1189,8 +1284,10 @@ static void vxlan_setup(struct net_device *dev)
1189 dev->features |= NETIF_F_NETNS_LOCAL; 1284 dev->features |= NETIF_F_NETNS_LOCAL;
1190 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 1285 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
1191 dev->features |= NETIF_F_RXCSUM; 1286 dev->features |= NETIF_F_RXCSUM;
1287 dev->features |= NETIF_F_GSO_SOFTWARE;
1192 1288
1193 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; 1289 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
1290 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1194 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 1291 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1195 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; 1292 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1196 1293
@@ -1555,6 +1652,7 @@ static void __exit vxlan_cleanup_module(void)
1555{ 1652{
1556 rtnl_link_unregister(&vxlan_link_ops); 1653 rtnl_link_unregister(&vxlan_link_ops);
1557 unregister_pernet_device(&vxlan_net_ops); 1654 unregister_pernet_device(&vxlan_net_ops);
1655 rcu_barrier();
1558} 1656}
1559module_exit(vxlan_cleanup_module); 1657module_exit(vxlan_cleanup_module);
1560 1658
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 3150def17193..2d691b8b95b9 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1523,7 +1523,8 @@ int ath5k_hw_dma_stop(struct ath5k_hw *ah);
1523/* EEPROM access functions */ 1523/* EEPROM access functions */
1524int ath5k_eeprom_init(struct ath5k_hw *ah); 1524int ath5k_eeprom_init(struct ath5k_hw *ah);
1525void ath5k_eeprom_detach(struct ath5k_hw *ah); 1525void ath5k_eeprom_detach(struct ath5k_hw *ah);
1526 1526int ath5k_eeprom_mode_from_channel(struct ath5k_hw *ah,
1527 struct ieee80211_channel *channel);
1527 1528
1528/* Protocol Control Unit Functions */ 1529/* Protocol Control Unit Functions */
1529/* Helpers */ 1530/* Helpers */
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index b7e0258887e7..94d34ee02265 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -1779,7 +1779,8 @@ ath5k_eeprom_detach(struct ath5k_hw *ah)
1779} 1779}
1780 1780
1781int 1781int
1782ath5k_eeprom_mode_from_channel(struct ieee80211_channel *channel) 1782ath5k_eeprom_mode_from_channel(struct ath5k_hw *ah,
1783 struct ieee80211_channel *channel)
1783{ 1784{
1784 switch (channel->hw_value) { 1785 switch (channel->hw_value) {
1785 case AR5K_MODE_11A: 1786 case AR5K_MODE_11A:
@@ -1789,6 +1790,7 @@ ath5k_eeprom_mode_from_channel(struct ieee80211_channel *channel)
1789 case AR5K_MODE_11B: 1790 case AR5K_MODE_11B:
1790 return AR5K_EEPROM_MODE_11B; 1791 return AR5K_EEPROM_MODE_11B;
1791 default: 1792 default:
1792 return -1; 1793 ATH5K_WARN(ah, "channel is not A/B/G!");
1794 return AR5K_EEPROM_MODE_11A;
1793 } 1795 }
1794} 1796}
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index 94a9bbea6874..693296ee9693 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -493,6 +493,3 @@ struct ath5k_eeprom_info {
493 /* Antenna raw switch tables */ 493 /* Antenna raw switch tables */
494 u32 ee_antenna[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX]; 494 u32 ee_antenna[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX];
495}; 495};
496
497int
498ath5k_eeprom_mode_from_channel(struct ieee80211_channel *channel);
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index a78afa98c650..d6bc7cb61bfb 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -1612,11 +1612,7 @@ ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
1612 1612
1613 ah->ah_cal_mask |= AR5K_CALIBRATION_NF; 1613 ah->ah_cal_mask |= AR5K_CALIBRATION_NF;
1614 1614
1615 ee_mode = ath5k_eeprom_mode_from_channel(ah->ah_current_channel); 1615 ee_mode = ath5k_eeprom_mode_from_channel(ah, ah->ah_current_channel);
1616 if (WARN_ON(ee_mode < 0)) {
1617 ah->ah_cal_mask &= ~AR5K_CALIBRATION_NF;
1618 return;
1619 }
1620 1616
1621 /* completed NF calibration, test threshold */ 1617 /* completed NF calibration, test threshold */
1622 nf = ath5k_hw_read_measured_noise_floor(ah); 1618 nf = ath5k_hw_read_measured_noise_floor(ah);
@@ -2317,12 +2313,7 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
2317 2313
2318 def_ant = ah->ah_def_ant; 2314 def_ant = ah->ah_def_ant;
2319 2315
2320 ee_mode = ath5k_eeprom_mode_from_channel(channel); 2316 ee_mode = ath5k_eeprom_mode_from_channel(ah, channel);
2321 if (ee_mode < 0) {
2322 ATH5K_ERR(ah,
2323 "invalid channel: %d\n", channel->center_freq);
2324 return;
2325 }
2326 2317
2327 switch (ant_mode) { 2318 switch (ant_mode) {
2328 case AR5K_ANTMODE_DEFAULT: 2319 case AR5K_ANTMODE_DEFAULT:
@@ -3622,12 +3613,7 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
3622 return -EINVAL; 3613 return -EINVAL;
3623 } 3614 }
3624 3615
3625 ee_mode = ath5k_eeprom_mode_from_channel(channel); 3616 ee_mode = ath5k_eeprom_mode_from_channel(ah, channel);
3626 if (ee_mode < 0) {
3627 ATH5K_ERR(ah,
3628 "invalid channel: %d\n", channel->center_freq);
3629 return -EINVAL;
3630 }
3631 3617
3632 /* Initialize TX power table */ 3618 /* Initialize TX power table */
3633 switch (ah->ah_radio) { 3619 switch (ah->ah_radio) {
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index e2d8b2cf19eb..a3399c4f13a9 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -984,9 +984,7 @@ ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
984 if (ah->ah_version == AR5K_AR5210) 984 if (ah->ah_version == AR5K_AR5210)
985 return; 985 return;
986 986
987 ee_mode = ath5k_eeprom_mode_from_channel(channel); 987 ee_mode = ath5k_eeprom_mode_from_channel(ah, channel);
988 if (WARN_ON(ee_mode < 0))
989 return;
990 988
991 /* Adjust power delta for channel 14 */ 989 /* Adjust power delta for channel 14 */
992 if (channel->center_freq == 2484) 990 if (channel->center_freq == 2484)
diff --git a/drivers/net/wireless/ath/ath6kl/Kconfig b/drivers/net/wireless/ath/ath6kl/Kconfig
index 630c83db056e..e39e5860a2e9 100644
--- a/drivers/net/wireless/ath/ath6kl/Kconfig
+++ b/drivers/net/wireless/ath/ath6kl/Kconfig
@@ -30,6 +30,15 @@ config ATH6KL_DEBUG
30 ---help--- 30 ---help---
31 Enables debug support 31 Enables debug support
32 32
33config ATH6KL_TRACING
34 bool "Atheros ath6kl tracing support"
35 depends on ATH6KL
36 depends on EVENT_TRACING
37 ---help---
38 Select this to ath6kl use tracing infrastructure.
39
40 If unsure, say Y to make it easier to debug problems.
41
33config ATH6KL_REGDOMAIN 42config ATH6KL_REGDOMAIN
34 bool "Atheros ath6kl regdomain support" 43 bool "Atheros ath6kl regdomain support"
35 depends on ATH6KL 44 depends on ATH6KL
diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile
index cab0ec0d5380..dc2b3b46781e 100644
--- a/drivers/net/wireless/ath/ath6kl/Makefile
+++ b/drivers/net/wireless/ath/ath6kl/Makefile
@@ -35,10 +35,15 @@ ath6kl_core-y += txrx.o
35ath6kl_core-y += wmi.o 35ath6kl_core-y += wmi.o
36ath6kl_core-y += core.o 36ath6kl_core-y += core.o
37ath6kl_core-y += recovery.o 37ath6kl_core-y += recovery.o
38
38ath6kl_core-$(CONFIG_NL80211_TESTMODE) += testmode.o 39ath6kl_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
40ath6kl_core-$(CONFIG_ATH6KL_TRACING) += trace.o
39 41
40obj-$(CONFIG_ATH6KL_SDIO) += ath6kl_sdio.o 42obj-$(CONFIG_ATH6KL_SDIO) += ath6kl_sdio.o
41ath6kl_sdio-y += sdio.o 43ath6kl_sdio-y += sdio.o
42 44
43obj-$(CONFIG_ATH6KL_USB) += ath6kl_usb.o 45obj-$(CONFIG_ATH6KL_USB) += ath6kl_usb.o
44ath6kl_usb-y += usb.o 46ath6kl_usb-y += usb.o
47
48# for tracing framework to find trace.h
49CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 752ffc4f4166..5c9736a94e54 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -402,7 +402,7 @@ static bool ath6kl_is_valid_iftype(struct ath6kl *ar, enum nl80211_iftype type,
402 if (type == NL80211_IFTYPE_STATION || 402 if (type == NL80211_IFTYPE_STATION ||
403 type == NL80211_IFTYPE_AP || type == NL80211_IFTYPE_ADHOC) { 403 type == NL80211_IFTYPE_AP || type == NL80211_IFTYPE_ADHOC) {
404 for (i = 0; i < ar->vif_max; i++) { 404 for (i = 0; i < ar->vif_max; i++) {
405 if ((ar->avail_idx_map >> i) & BIT(0)) { 405 if ((ar->avail_idx_map) & BIT(i)) {
406 *if_idx = i; 406 *if_idx = i;
407 return true; 407 return true;
408 } 408 }
@@ -412,7 +412,7 @@ static bool ath6kl_is_valid_iftype(struct ath6kl *ar, enum nl80211_iftype type,
412 if (type == NL80211_IFTYPE_P2P_CLIENT || 412 if (type == NL80211_IFTYPE_P2P_CLIENT ||
413 type == NL80211_IFTYPE_P2P_GO) { 413 type == NL80211_IFTYPE_P2P_GO) {
414 for (i = ar->max_norm_iface; i < ar->vif_max; i++) { 414 for (i = ar->max_norm_iface; i < ar->vif_max; i++) {
415 if ((ar->avail_idx_map >> i) & BIT(0)) { 415 if ((ar->avail_idx_map) & BIT(i)) {
416 *if_idx = i; 416 *if_idx = i;
417 return true; 417 return true;
418 } 418 }
@@ -1535,7 +1535,9 @@ static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy,
1535 1535
1536 ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag)); 1536 ath6kl_cfg80211_vif_stop(vif, test_bit(WMI_READY, &ar->flag));
1537 1537
1538 rtnl_lock();
1538 ath6kl_cfg80211_vif_cleanup(vif); 1539 ath6kl_cfg80211_vif_cleanup(vif);
1540 rtnl_unlock();
1539 1541
1540 return 0; 1542 return 0;
1541} 1543}
@@ -2990,13 +2992,15 @@ static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
2990{ 2992{
2991 struct ath6kl *ar = ath6kl_priv(dev); 2993 struct ath6kl *ar = ath6kl_priv(dev);
2992 struct ath6kl_vif *vif = netdev_priv(dev); 2994 struct ath6kl_vif *vif = netdev_priv(dev);
2995 int err;
2993 2996
2994 if (vif->nw_type != AP_NETWORK) 2997 if (vif->nw_type != AP_NETWORK)
2995 return -EOPNOTSUPP; 2998 return -EOPNOTSUPP;
2996 2999
2997 /* Use this only for authorizing/unauthorizing a station */ 3000 err = cfg80211_check_station_change(wiphy, params,
2998 if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED))) 3001 CFG80211_STA_AP_MLME_CLIENT);
2999 return -EOPNOTSUPP; 3002 if (err)
3003 return err;
3000 3004
3001 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED)) 3005 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))
3002 return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx, 3006 return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx,
@@ -3659,7 +3663,6 @@ struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
3659 vif->sme_state = SME_DISCONNECTED; 3663 vif->sme_state = SME_DISCONNECTED;
3660 set_bit(WLAN_ENABLED, &vif->flags); 3664 set_bit(WLAN_ENABLED, &vif->flags);
3661 ar->wlan_pwr_state = WLAN_POWER_STATE_ON; 3665 ar->wlan_pwr_state = WLAN_POWER_STATE_ON;
3662 set_bit(NETDEV_REGISTERED, &vif->flags);
3663 3666
3664 if (type == NL80211_IFTYPE_ADHOC) 3667 if (type == NL80211_IFTYPE_ADHOC)
3665 ar->ibss_if_active = true; 3668 ar->ibss_if_active = true;
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 61b2f98b4e77..26b0f92424e1 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -560,7 +560,6 @@ enum ath6kl_vif_state {
560 WMM_ENABLED, 560 WMM_ENABLED,
561 NETQ_STOPPED, 561 NETQ_STOPPED,
562 DTIM_EXPIRED, 562 DTIM_EXPIRED,
563 NETDEV_REGISTERED,
564 CLEAR_BSSFILTER_ON_BEACON, 563 CLEAR_BSSFILTER_ON_BEACON,
565 DTIM_PERIOD_AVAIL, 564 DTIM_PERIOD_AVAIL,
566 WLAN_ENABLED, 565 WLAN_ENABLED,
@@ -936,8 +935,6 @@ void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
936 u8 win_sz); 935 u8 win_sz);
937void ath6kl_wakeup_event(void *dev); 936void ath6kl_wakeup_event(void *dev);
938 937
939void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
940 bool wait_fot_compltn, bool cold_reset);
941void ath6kl_init_control_info(struct ath6kl_vif *vif); 938void ath6kl_init_control_info(struct ath6kl_vif *vif);
942struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar); 939struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar);
943void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready); 940void ath6kl_cfg80211_vif_stop(struct ath6kl_vif *vif, bool wmi_ready);
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index 15cfe30e54fd..fe38b836cb26 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -56,6 +56,60 @@ int ath6kl_printk(const char *level, const char *fmt, ...)
56} 56}
57EXPORT_SYMBOL(ath6kl_printk); 57EXPORT_SYMBOL(ath6kl_printk);
58 58
59int ath6kl_info(const char *fmt, ...)
60{
61 struct va_format vaf = {
62 .fmt = fmt,
63 };
64 va_list args;
65 int ret;
66
67 va_start(args, fmt);
68 vaf.va = &args;
69 ret = ath6kl_printk(KERN_INFO, "%pV", &vaf);
70 trace_ath6kl_log_info(&vaf);
71 va_end(args);
72
73 return ret;
74}
75EXPORT_SYMBOL(ath6kl_info);
76
77int ath6kl_err(const char *fmt, ...)
78{
79 struct va_format vaf = {
80 .fmt = fmt,
81 };
82 va_list args;
83 int ret;
84
85 va_start(args, fmt);
86 vaf.va = &args;
87 ret = ath6kl_printk(KERN_ERR, "%pV", &vaf);
88 trace_ath6kl_log_err(&vaf);
89 va_end(args);
90
91 return ret;
92}
93EXPORT_SYMBOL(ath6kl_err);
94
95int ath6kl_warn(const char *fmt, ...)
96{
97 struct va_format vaf = {
98 .fmt = fmt,
99 };
100 va_list args;
101 int ret;
102
103 va_start(args, fmt);
104 vaf.va = &args;
105 ret = ath6kl_printk(KERN_WARNING, "%pV", &vaf);
106 trace_ath6kl_log_warn(&vaf);
107 va_end(args);
108
109 return ret;
110}
111EXPORT_SYMBOL(ath6kl_warn);
112
59#ifdef CONFIG_ATH6KL_DEBUG 113#ifdef CONFIG_ATH6KL_DEBUG
60 114
61void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...) 115void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...)
@@ -63,15 +117,15 @@ void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...)
63 struct va_format vaf; 117 struct va_format vaf;
64 va_list args; 118 va_list args;
65 119
66 if (!(debug_mask & mask))
67 return;
68
69 va_start(args, fmt); 120 va_start(args, fmt);
70 121
71 vaf.fmt = fmt; 122 vaf.fmt = fmt;
72 vaf.va = &args; 123 vaf.va = &args;
73 124
74 ath6kl_printk(KERN_DEBUG, "%pV", &vaf); 125 if (debug_mask & mask)
126 ath6kl_printk(KERN_DEBUG, "%pV", &vaf);
127
128 trace_ath6kl_log_dbg(mask, &vaf);
75 129
76 va_end(args); 130 va_end(args);
77} 131}
@@ -87,6 +141,10 @@ void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
87 141
88 print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len); 142 print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len);
89 } 143 }
144
145 /* tracing code doesn't like null strings :/ */
146 trace_ath6kl_log_dbg_dump(msg ? msg : "", prefix ? prefix : "",
147 buf, len);
90} 148}
91EXPORT_SYMBOL(ath6kl_dbg_dump); 149EXPORT_SYMBOL(ath6kl_dbg_dump);
92 150
@@ -1752,8 +1810,10 @@ int ath6kl_debug_init_fs(struct ath6kl *ar)
1752 debugfs_create_file("tgt_stats", S_IRUSR, ar->debugfs_phy, ar, 1810 debugfs_create_file("tgt_stats", S_IRUSR, ar->debugfs_phy, ar,
1753 &fops_tgt_stats); 1811 &fops_tgt_stats);
1754 1812
1755 debugfs_create_file("credit_dist_stats", S_IRUSR, ar->debugfs_phy, ar, 1813 if (ar->hif_type == ATH6KL_HIF_TYPE_SDIO)
1756 &fops_credit_dist_stats); 1814 debugfs_create_file("credit_dist_stats", S_IRUSR,
1815 ar->debugfs_phy, ar,
1816 &fops_credit_dist_stats);
1757 1817
1758 debugfs_create_file("endpoint_stats", S_IRUSR | S_IWUSR, 1818 debugfs_create_file("endpoint_stats", S_IRUSR | S_IWUSR,
1759 ar->debugfs_phy, ar, &fops_endpoint_stats); 1819 ar->debugfs_phy, ar, &fops_endpoint_stats);
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index f97cd4ead543..74369de00fb5 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -19,6 +19,7 @@
19#define DEBUG_H 19#define DEBUG_H
20 20
21#include "hif.h" 21#include "hif.h"
22#include "trace.h"
22 23
23enum ATH6K_DEBUG_MASK { 24enum ATH6K_DEBUG_MASK {
24 ATH6KL_DBG_CREDIT = BIT(0), 25 ATH6KL_DBG_CREDIT = BIT(0),
@@ -51,13 +52,9 @@ enum ATH6K_DEBUG_MASK {
51extern unsigned int debug_mask; 52extern unsigned int debug_mask;
52extern __printf(2, 3) 53extern __printf(2, 3)
53int ath6kl_printk(const char *level, const char *fmt, ...); 54int ath6kl_printk(const char *level, const char *fmt, ...);
54 55extern __printf(1, 2) int ath6kl_info(const char *fmt, ...);
55#define ath6kl_info(fmt, ...) \ 56extern __printf(1, 2) int ath6kl_err(const char *fmt, ...);
56 ath6kl_printk(KERN_INFO, fmt, ##__VA_ARGS__) 57extern __printf(1, 2) int ath6kl_warn(const char *fmt, ...);
57#define ath6kl_err(fmt, ...) \
58 ath6kl_printk(KERN_ERR, fmt, ##__VA_ARGS__)
59#define ath6kl_warn(fmt, ...) \
60 ath6kl_printk(KERN_WARNING, fmt, ##__VA_ARGS__)
61 58
62enum ath6kl_war { 59enum ath6kl_war {
63 ATH6KL_WAR_INVALID_RATE, 60 ATH6KL_WAR_INVALID_RATE,
diff --git a/drivers/net/wireless/ath/ath6kl/hif.c b/drivers/net/wireless/ath/ath6kl/hif.c
index a6b614421fa4..fea7709b5dda 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.c
+++ b/drivers/net/wireless/ath/ath6kl/hif.c
@@ -22,6 +22,7 @@
22#include "target.h" 22#include "target.h"
23#include "hif-ops.h" 23#include "hif-ops.h"
24#include "debug.h" 24#include "debug.h"
25#include "trace.h"
25 26
26#define MAILBOX_FOR_BLOCK_SIZE 1 27#define MAILBOX_FOR_BLOCK_SIZE 1
27 28
@@ -436,6 +437,8 @@ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
436 437
437 ath6kl_dump_registers(dev, &dev->irq_proc_reg, 438 ath6kl_dump_registers(dev, &dev->irq_proc_reg,
438 &dev->irq_en_reg); 439 &dev->irq_en_reg);
440 trace_ath6kl_sdio_irq(&dev->irq_en_reg,
441 sizeof(dev->irq_en_reg));
439 442
440 /* Update only those registers that are enabled */ 443 /* Update only those registers that are enabled */
441 host_int_status = dev->irq_proc_reg.host_int_status & 444 host_int_status = dev->irq_proc_reg.host_int_status &
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index fbb78dfe078f..65e5b719093d 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -19,6 +19,8 @@
19#include "hif.h" 19#include "hif.h"
20#include "debug.h" 20#include "debug.h"
21#include "hif-ops.h" 21#include "hif-ops.h"
22#include "trace.h"
23
22#include <asm/unaligned.h> 24#include <asm/unaligned.h>
23 25
24#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask)) 26#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
@@ -537,6 +539,8 @@ static int ath6kl_htc_tx_issue(struct htc_target *target,
537 packet->buf, padded_len, 539 packet->buf, padded_len,
538 HIF_WR_ASYNC_BLOCK_INC, packet); 540 HIF_WR_ASYNC_BLOCK_INC, packet);
539 541
542 trace_ath6kl_htc_tx(status, packet->endpoint, packet->buf, send_len);
543
540 return status; 544 return status;
541} 545}
542 546
@@ -757,7 +761,8 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
757{ 761{
758 struct htc_target *target = endpoint->target; 762 struct htc_target *target = endpoint->target;
759 struct hif_scatter_req *scat_req = NULL; 763 struct hif_scatter_req *scat_req = NULL;
760 int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0; 764 int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0, i;
765 struct htc_packet *packet;
761 int status; 766 int status;
762 u32 txb_mask; 767 u32 txb_mask;
763 u8 ac = WMM_NUM_AC; 768 u8 ac = WMM_NUM_AC;
@@ -832,6 +837,13 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
832 ath6kl_dbg(ATH6KL_DBG_HTC, 837 ath6kl_dbg(ATH6KL_DBG_HTC,
833 "htc tx scatter bytes %d entries %d\n", 838 "htc tx scatter bytes %d entries %d\n",
834 scat_req->len, scat_req->scat_entries); 839 scat_req->len, scat_req->scat_entries);
840
841 for (i = 0; i < scat_req->scat_entries; i++) {
842 packet = scat_req->scat_list[i].packet;
843 trace_ath6kl_htc_tx(packet->status, packet->endpoint,
844 packet->buf, packet->act_len);
845 }
846
835 ath6kl_hif_submit_scat_req(target->dev, scat_req, false); 847 ath6kl_hif_submit_scat_req(target->dev, scat_req, false);
836 848
837 if (status) 849 if (status)
@@ -1903,6 +1915,7 @@ static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
1903 ath6kl_dbg(ATH6KL_DBG_HTC, 1915 ath6kl_dbg(ATH6KL_DBG_HTC,
1904 "htc rx complete ep %d packet 0x%p\n", 1916 "htc rx complete ep %d packet 0x%p\n",
1905 endpoint->eid, packet); 1917 endpoint->eid, packet);
1918
1906 endpoint->ep_cb.rx(endpoint->target, packet); 1919 endpoint->ep_cb.rx(endpoint->target, packet);
1907} 1920}
1908 1921
@@ -2011,6 +2024,9 @@ static int ath6kl_htc_rx_process_packets(struct htc_target *target,
2011 list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) { 2024 list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
2012 ep = &target->endpoint[packet->endpoint]; 2025 ep = &target->endpoint[packet->endpoint];
2013 2026
2027 trace_ath6kl_htc_rx(packet->status, packet->endpoint,
2028 packet->buf, packet->act_len);
2029
2014 /* process header for each of the recv packet */ 2030 /* process header for each of the recv packet */
2015 status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds, 2031 status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds,
2016 n_lk_ahd); 2032 n_lk_ahd);
@@ -2291,6 +2307,9 @@ static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
2291 if (ath6kl_htc_rx_packet(target, packet, packet->act_len)) 2307 if (ath6kl_htc_rx_packet(target, packet, packet->act_len))
2292 goto fail_ctrl_rx; 2308 goto fail_ctrl_rx;
2293 2309
2310 trace_ath6kl_htc_rx(packet->status, packet->endpoint,
2311 packet->buf, packet->act_len);
2312
2294 /* process receive header */ 2313 /* process receive header */
2295 packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL); 2314 packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL);
2296 2315
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index 281390178e3d..67aa924ed8b3 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -988,8 +988,6 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
988 988
989 htc_hdr = (struct htc_frame_hdr *) netdata; 989 htc_hdr = (struct htc_frame_hdr *) netdata;
990 990
991 ep = &target->endpoint[htc_hdr->eid];
992
993 if (htc_hdr->eid >= ENDPOINT_MAX) { 991 if (htc_hdr->eid >= ENDPOINT_MAX) {
994 ath6kl_dbg(ATH6KL_DBG_HTC, 992 ath6kl_dbg(ATH6KL_DBG_HTC,
995 "HTC Rx: invalid EndpointID=%d\n", 993 "HTC Rx: invalid EndpointID=%d\n",
@@ -997,6 +995,7 @@ static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
997 status = -EINVAL; 995 status = -EINVAL;
998 goto free_skb; 996 goto free_skb;
999 } 997 }
998 ep = &target->endpoint[htc_hdr->eid];
1000 999
1001 payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len)); 1000 payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
1002 1001
@@ -1168,8 +1167,8 @@ static int htc_wait_recv_ctrl_message(struct htc_target *target)
1168 } 1167 }
1169 1168
1170 if (count <= 0) { 1169 if (count <= 0) {
1171 ath6kl_dbg(ATH6KL_DBG_HTC, "%s: Timeout!\n", __func__); 1170 ath6kl_warn("htc pipe control receive timeout!\n");
1172 return -ECOMM; 1171 return -ETIMEDOUT;
1173 } 1172 }
1174 1173
1175 return 0; 1174 return 0;
@@ -1582,16 +1581,16 @@ static int ath6kl_htc_pipe_wait_target(struct htc_target *target)
1582 return status; 1581 return status;
1583 1582
1584 if (target->pipe.ctrl_response_len < sizeof(*ready_msg)) { 1583 if (target->pipe.ctrl_response_len < sizeof(*ready_msg)) {
1585 ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg len:%d!\n", 1584 ath6kl_warn("invalid htc pipe ready msg len: %d\n",
1586 target->pipe.ctrl_response_len); 1585 target->pipe.ctrl_response_len);
1587 return -ECOMM; 1586 return -ECOMM;
1588 } 1587 }
1589 1588
1590 ready_msg = (struct htc_ready_ext_msg *) target->pipe.ctrl_response_buf; 1589 ready_msg = (struct htc_ready_ext_msg *) target->pipe.ctrl_response_buf;
1591 1590
1592 if (ready_msg->ver2_0_info.msg_id != cpu_to_le16(HTC_MSG_READY_ID)) { 1591 if (ready_msg->ver2_0_info.msg_id != cpu_to_le16(HTC_MSG_READY_ID)) {
1593 ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg : 0x%X !\n", 1592 ath6kl_warn("invalid htc pipe ready msg: 0x%x\n",
1594 ready_msg->ver2_0_info.msg_id); 1593 ready_msg->ver2_0_info.msg_id);
1595 return -ECOMM; 1594 return -ECOMM;
1596 } 1595 }
1597 1596
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 5d434cf88f35..40ffee6184fd 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -201,8 +201,8 @@ struct sk_buff *ath6kl_buf_alloc(int size)
201 u16 reserved; 201 u16 reserved;
202 202
203 /* Add chacheline space at front and back of buffer */ 203 /* Add chacheline space at front and back of buffer */
204 reserved = (2 * L1_CACHE_BYTES) + ATH6KL_DATA_OFFSET + 204 reserved = roundup((2 * L1_CACHE_BYTES) + ATH6KL_DATA_OFFSET +
205 sizeof(struct htc_packet) + ATH6KL_HTC_ALIGN_BYTES; 205 sizeof(struct htc_packet) + ATH6KL_HTC_ALIGN_BYTES, 4);
206 skb = dev_alloc_skb(size + reserved); 206 skb = dev_alloc_skb(size + reserved);
207 207
208 if (skb) 208 if (skb)
@@ -1549,10 +1549,89 @@ static const char *ath6kl_init_get_hif_name(enum ath6kl_hif_type type)
1549 return NULL; 1549 return NULL;
1550} 1550}
1551 1551
1552
1553static const struct fw_capa_str_map {
1554 int id;
1555 const char *name;
1556} fw_capa_map[] = {
1557 { ATH6KL_FW_CAPABILITY_HOST_P2P, "host-p2p" },
1558 { ATH6KL_FW_CAPABILITY_SCHED_SCAN, "sched-scan" },
1559 { ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX, "sta-p2pdev-duplex" },
1560 { ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT, "inactivity-timeout" },
1561 { ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE, "rsn-cap-override" },
1562 { ATH6KL_FW_CAPABILITY_WOW_MULTICAST_FILTER, "wow-mc-filter" },
1563 { ATH6KL_FW_CAPABILITY_BMISS_ENHANCE, "bmiss-enhance" },
1564 { ATH6KL_FW_CAPABILITY_SCHED_SCAN_MATCH_LIST, "sscan-match-list" },
1565 { ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD, "rssi-scan-thold" },
1566 { ATH6KL_FW_CAPABILITY_CUSTOM_MAC_ADDR, "custom-mac-addr" },
1567 { ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY, "tx-err-notify" },
1568 { ATH6KL_FW_CAPABILITY_REGDOMAIN, "regdomain" },
1569 { ATH6KL_FW_CAPABILITY_SCHED_SCAN_V2, "sched-scan-v2" },
1570 { ATH6KL_FW_CAPABILITY_HEART_BEAT_POLL, "hb-poll" },
1571};
1572
1573static const char *ath6kl_init_get_fw_capa_name(unsigned int id)
1574{
1575 int i;
1576
1577 for (i = 0; i < ARRAY_SIZE(fw_capa_map); i++) {
1578 if (fw_capa_map[i].id == id)
1579 return fw_capa_map[i].name;
1580 }
1581
1582 return "<unknown>";
1583}
1584
1585static void ath6kl_init_get_fwcaps(struct ath6kl *ar, char *buf, size_t buf_len)
1586{
1587 u8 *data = (u8 *) ar->fw_capabilities;
1588 size_t trunc_len, len = 0;
1589 int i, index, bit;
1590 char *trunc = "...";
1591
1592 for (i = 0; i < ATH6KL_FW_CAPABILITY_MAX; i++) {
1593 index = i / 8;
1594 bit = i % 8;
1595
1596 if (index >= sizeof(ar->fw_capabilities) * 4)
1597 break;
1598
1599 if (buf_len - len < 4) {
1600 ath6kl_warn("firmware capability buffer too small!\n");
1601
1602 /* add "..." to the end of string */
1603 trunc_len = strlen(trunc) + 1;
1604 strncpy(buf + buf_len - trunc_len, trunc, trunc_len);
1605
1606 return;
1607 }
1608
1609 if (data[index] & (1 << bit)) {
1610 len += scnprintf(buf + len, buf_len - len, "%s,",
1611 ath6kl_init_get_fw_capa_name(i));
1612 }
1613 }
1614
1615 /* overwrite the last comma */
1616 if (len > 0)
1617 len--;
1618
1619 buf[len] = '\0';
1620}
1621
1622static int ath6kl_init_hw_reset(struct ath6kl *ar)
1623{
1624 ath6kl_dbg(ATH6KL_DBG_BOOT, "cold resetting the device");
1625
1626 return ath6kl_diag_write32(ar, RESET_CONTROL_ADDRESS,
1627 cpu_to_le32(RESET_CONTROL_COLD_RST));
1628}
1629
1552static int __ath6kl_init_hw_start(struct ath6kl *ar) 1630static int __ath6kl_init_hw_start(struct ath6kl *ar)
1553{ 1631{
1554 long timeleft; 1632 long timeleft;
1555 int ret, i; 1633 int ret, i;
1634 char buf[200];
1556 1635
1557 ath6kl_dbg(ATH6KL_DBG_BOOT, "hw start\n"); 1636 ath6kl_dbg(ATH6KL_DBG_BOOT, "hw start\n");
1558 1637
@@ -1569,24 +1648,35 @@ static int __ath6kl_init_hw_start(struct ath6kl *ar)
1569 goto err_power_off; 1648 goto err_power_off;
1570 1649
1571 /* Do we need to finish the BMI phase */ 1650 /* Do we need to finish the BMI phase */
1572 /* FIXME: return error from ath6kl_bmi_done() */ 1651 ret = ath6kl_bmi_done(ar);
1573 if (ath6kl_bmi_done(ar)) { 1652 if (ret)
1574 ret = -EIO;
1575 goto err_power_off; 1653 goto err_power_off;
1576 }
1577 1654
1578 /* 1655 /*
1579 * The reason we have to wait for the target here is that the 1656 * The reason we have to wait for the target here is that the
1580 * driver layer has to init BMI in order to set the host block 1657 * driver layer has to init BMI in order to set the host block
1581 * size. 1658 * size.
1582 */ 1659 */
1583 if (ath6kl_htc_wait_target(ar->htc_target)) { 1660 ret = ath6kl_htc_wait_target(ar->htc_target);
1584 ret = -EIO; 1661
1662 if (ret == -ETIMEDOUT) {
1663 /*
1664 * Most likely USB target is in odd state after reboot and
1665 * needs a reset. A cold reset makes the whole device
1666 * disappear from USB bus and initialisation starts from
1667 * beginning.
1668 */
1669 ath6kl_warn("htc wait target timed out, resetting device\n");
1670 ath6kl_init_hw_reset(ar);
1671 goto err_power_off;
1672 } else if (ret) {
1673 ath6kl_err("htc wait target failed: %d\n", ret);
1585 goto err_power_off; 1674 goto err_power_off;
1586 } 1675 }
1587 1676
1588 if (ath6kl_init_service_ep(ar)) { 1677 ret = ath6kl_init_service_ep(ar);
1589 ret = -EIO; 1678 if (ret) {
1679 ath6kl_err("Endpoint service initilisation failed: %d\n", ret);
1590 goto err_cleanup_scatter; 1680 goto err_cleanup_scatter;
1591 } 1681 }
1592 1682
@@ -1617,6 +1707,8 @@ static int __ath6kl_init_hw_start(struct ath6kl *ar)
1617 ar->wiphy->fw_version, 1707 ar->wiphy->fw_version,
1618 ar->fw_api, 1708 ar->fw_api,
1619 test_bit(TESTMODE, &ar->flag) ? " testmode" : ""); 1709 test_bit(TESTMODE, &ar->flag) ? " testmode" : "");
1710 ath6kl_init_get_fwcaps(ar, buf, sizeof(buf));
1711 ath6kl_info("firmware supports: %s\n", buf);
1620 } 1712 }
1621 1713
1622 if (ar->version.abi_ver != ATH6KL_ABI_VERSION) { 1714 if (ar->version.abi_ver != ATH6KL_ABI_VERSION) {
@@ -1765,9 +1857,7 @@ void ath6kl_stop_txrx(struct ath6kl *ar)
1765 * Try to reset the device if we can. The driver may have been 1857 * Try to reset the device if we can. The driver may have been
1766 * configure NOT to reset the target during a debug session. 1858 * configure NOT to reset the target during a debug session.
1767 */ 1859 */
1768 ath6kl_dbg(ATH6KL_DBG_TRC, 1860 ath6kl_init_hw_reset(ar);
1769 "attempting to reset target on instance destroy\n");
1770 ath6kl_reset_device(ar, ar->target_type, true, true);
1771 1861
1772 up(&ar->sem); 1862 up(&ar->sem);
1773} 1863}
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index bd50b6b7b492..d4fcfcad57d0 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -345,39 +345,6 @@ out:
345 return ret; 345 return ret;
346} 346}
347 347
348/* FIXME: move to a better place, target.h? */
349#define AR6003_RESET_CONTROL_ADDRESS 0x00004000
350#define AR6004_RESET_CONTROL_ADDRESS 0x00004000
351
352void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
353 bool wait_fot_compltn, bool cold_reset)
354{
355 int status = 0;
356 u32 address;
357 __le32 data;
358
359 if (target_type != TARGET_TYPE_AR6003 &&
360 target_type != TARGET_TYPE_AR6004)
361 return;
362
363 data = cold_reset ? cpu_to_le32(RESET_CONTROL_COLD_RST) :
364 cpu_to_le32(RESET_CONTROL_MBOX_RST);
365
366 switch (target_type) {
367 case TARGET_TYPE_AR6003:
368 address = AR6003_RESET_CONTROL_ADDRESS;
369 break;
370 case TARGET_TYPE_AR6004:
371 address = AR6004_RESET_CONTROL_ADDRESS;
372 break;
373 }
374
375 status = ath6kl_diag_write32(ar, address, data);
376
377 if (status)
378 ath6kl_err("failed to reset target\n");
379}
380
381static void ath6kl_install_static_wep_keys(struct ath6kl_vif *vif) 348static void ath6kl_install_static_wep_keys(struct ath6kl_vif *vif)
382{ 349{
383 u8 index; 350 u8 index;
@@ -1327,9 +1294,11 @@ void init_netdev(struct net_device *dev)
1327 dev->watchdog_timeo = ATH6KL_TX_TIMEOUT; 1294 dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
1328 1295
1329 dev->needed_headroom = ETH_HLEN; 1296 dev->needed_headroom = ETH_HLEN;
1330 dev->needed_headroom += sizeof(struct ath6kl_llc_snap_hdr) + 1297 dev->needed_headroom += roundup(sizeof(struct ath6kl_llc_snap_hdr) +
1331 sizeof(struct wmi_data_hdr) + HTC_HDR_LENGTH 1298 sizeof(struct wmi_data_hdr) +
1332 + WMI_MAX_TX_META_SZ + ATH6KL_HTC_ALIGN_BYTES; 1299 HTC_HDR_LENGTH +
1300 WMI_MAX_TX_META_SZ +
1301 ATH6KL_HTC_ALIGN_BYTES, 4);
1333 1302
1334 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM; 1303 dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
1335 1304
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index d111980d44c0..fb141454c6d2 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -28,6 +28,7 @@
28#include "target.h" 28#include "target.h"
29#include "debug.h" 29#include "debug.h"
30#include "cfg80211.h" 30#include "cfg80211.h"
31#include "trace.h"
31 32
32struct ath6kl_sdio { 33struct ath6kl_sdio {
33 struct sdio_func *func; 34 struct sdio_func *func;
@@ -179,6 +180,8 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
179 request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len); 180 request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len);
180 ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len); 181 ath6kl_dbg_dump(ATH6KL_DBG_SDIO_DUMP, NULL, "sdio ", buf, len);
181 182
183 trace_ath6kl_sdio(addr, request, buf, len);
184
182 return ret; 185 return ret;
183} 186}
184 187
@@ -309,6 +312,13 @@ static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
309 sdio_claim_host(ar_sdio->func); 312 sdio_claim_host(ar_sdio->func);
310 313
311 mmc_set_data_timeout(&data, ar_sdio->func->card); 314 mmc_set_data_timeout(&data, ar_sdio->func->card);
315
316 trace_ath6kl_sdio_scat(scat_req->addr,
317 scat_req->req,
318 scat_req->len,
319 scat_req->scat_entries,
320 scat_req->scat_list);
321
312 /* synchronous call to process request */ 322 /* synchronous call to process request */
313 mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req); 323 mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
314 324
@@ -1123,10 +1133,12 @@ static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
1123 1133
1124 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len, 1134 ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
1125 HIF_WR_SYNC_BYTE_INC); 1135 HIF_WR_SYNC_BYTE_INC);
1126 if (ret) 1136 if (ret) {
1127 ath6kl_err("unable to send the bmi data to the device\n"); 1137 ath6kl_err("unable to send the bmi data to the device\n");
1138 return ret;
1139 }
1128 1140
1129 return ret; 1141 return 0;
1130} 1142}
1131 1143
1132static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len) 1144static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
diff --git a/drivers/net/wireless/ath/ath6kl/target.h b/drivers/net/wireless/ath/ath6kl/target.h
index a98c12ba70c1..a580a629a0da 100644
--- a/drivers/net/wireless/ath/ath6kl/target.h
+++ b/drivers/net/wireless/ath/ath6kl/target.h
@@ -25,7 +25,7 @@
25#define AR6004_BOARD_DATA_SZ 6144 25#define AR6004_BOARD_DATA_SZ 6144
26#define AR6004_BOARD_EXT_DATA_SZ 0 26#define AR6004_BOARD_EXT_DATA_SZ 0
27 27
28#define RESET_CONTROL_ADDRESS 0x00000000 28#define RESET_CONTROL_ADDRESS 0x00004000
29#define RESET_CONTROL_COLD_RST 0x00000100 29#define RESET_CONTROL_COLD_RST 0x00000100
30#define RESET_CONTROL_MBOX_RST 0x00000004 30#define RESET_CONTROL_MBOX_RST 0x00000004
31 31
diff --git a/drivers/net/wireless/ath/ath6kl/trace.c b/drivers/net/wireless/ath/ath6kl/trace.c
new file mode 100644
index 000000000000..e7d64b1285cb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/trace.c
@@ -0,0 +1,23 @@
1/*
2 * Copyright (c) 2012 Qualcomm Atheros, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/module.h>
18
19#define CREATE_TRACE_POINTS
20#include "trace.h"
21
22EXPORT_TRACEPOINT_SYMBOL(ath6kl_sdio);
23EXPORT_TRACEPOINT_SYMBOL(ath6kl_sdio_scat);
diff --git a/drivers/net/wireless/ath/ath6kl/trace.h b/drivers/net/wireless/ath/ath6kl/trace.h
new file mode 100644
index 000000000000..1a1ea7881b4d
--- /dev/null
+++ b/drivers/net/wireless/ath/ath6kl/trace.h
@@ -0,0 +1,332 @@
1#if !defined(_ATH6KL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
2
3#include <net/cfg80211.h>
4#include <linux/skbuff.h>
5#include <linux/tracepoint.h>
6#include "wmi.h"
7#include "hif.h"
8
9#if !defined(_ATH6KL_TRACE_H)
10static inline unsigned int ath6kl_get_wmi_id(void *buf, size_t buf_len)
11{
12 struct wmi_cmd_hdr *hdr = buf;
13
14 if (buf_len < sizeof(*hdr))
15 return 0;
16
17 return le16_to_cpu(hdr->cmd_id);
18}
19#endif /* __ATH6KL_TRACE_H */
20
21#define _ATH6KL_TRACE_H
22
23/* create empty functions when tracing is disabled */
24#if !defined(CONFIG_ATH6KL_TRACING)
25#undef TRACE_EVENT
26#define TRACE_EVENT(name, proto, ...) \
27static inline void trace_ ## name(proto) {}
28#undef DECLARE_EVENT_CLASS
29#define DECLARE_EVENT_CLASS(...)
30#undef DEFINE_EVENT
31#define DEFINE_EVENT(evt_class, name, proto, ...) \
32static inline void trace_ ## name(proto) {}
33#endif /* !CONFIG_ATH6KL_TRACING || __CHECKER__ */
34
35#undef TRACE_SYSTEM
36#define TRACE_SYSTEM ath6kl
37
38TRACE_EVENT(ath6kl_wmi_cmd,
39 TP_PROTO(void *buf, size_t buf_len),
40
41 TP_ARGS(buf, buf_len),
42
43 TP_STRUCT__entry(
44 __field(unsigned int, id)
45 __field(size_t, buf_len)
46 __dynamic_array(u8, buf, buf_len)
47 ),
48
49 TP_fast_assign(
50 __entry->id = ath6kl_get_wmi_id(buf, buf_len);
51 __entry->buf_len = buf_len;
52 memcpy(__get_dynamic_array(buf), buf, buf_len);
53 ),
54
55 TP_printk(
56 "id %d len %zd",
57 __entry->id, __entry->buf_len
58 )
59);
60
61TRACE_EVENT(ath6kl_wmi_event,
62 TP_PROTO(void *buf, size_t buf_len),
63
64 TP_ARGS(buf, buf_len),
65
66 TP_STRUCT__entry(
67 __field(unsigned int, id)
68 __field(size_t, buf_len)
69 __dynamic_array(u8, buf, buf_len)
70 ),
71
72 TP_fast_assign(
73 __entry->id = ath6kl_get_wmi_id(buf, buf_len);
74 __entry->buf_len = buf_len;
75 memcpy(__get_dynamic_array(buf), buf, buf_len);
76 ),
77
78 TP_printk(
79 "id %d len %zd",
80 __entry->id, __entry->buf_len
81 )
82);
83
84TRACE_EVENT(ath6kl_sdio,
85 TP_PROTO(unsigned int addr, int flags,
86 void *buf, size_t buf_len),
87
88 TP_ARGS(addr, flags, buf, buf_len),
89
90 TP_STRUCT__entry(
91 __field(unsigned int, tx)
92 __field(unsigned int, addr)
93 __field(int, flags)
94 __field(size_t, buf_len)
95 __dynamic_array(u8, buf, buf_len)
96 ),
97
98 TP_fast_assign(
99 __entry->addr = addr;
100 __entry->flags = flags;
101 __entry->buf_len = buf_len;
102 memcpy(__get_dynamic_array(buf), buf, buf_len);
103
104 if (flags & HIF_WRITE)
105 __entry->tx = 1;
106 else
107 __entry->tx = 0;
108 ),
109
110 TP_printk(
111 "%s addr 0x%x flags 0x%x len %zd\n",
112 __entry->tx ? "tx" : "rx",
113 __entry->addr,
114 __entry->flags,
115 __entry->buf_len
116 )
117);
118
119TRACE_EVENT(ath6kl_sdio_scat,
120 TP_PROTO(unsigned int addr, int flags, unsigned int total_len,
121 unsigned int entries, struct hif_scatter_item *list),
122
123 TP_ARGS(addr, flags, total_len, entries, list),
124
125 TP_STRUCT__entry(
126 __field(unsigned int, tx)
127 __field(unsigned int, addr)
128 __field(int, flags)
129 __field(unsigned int, entries)
130 __field(size_t, total_len)
131 __dynamic_array(unsigned int, len_array, entries)
132 __dynamic_array(u8, data, total_len)
133 ),
134
135 TP_fast_assign(
136 unsigned int *len_array;
137 int i, offset = 0;
138 size_t len;
139
140 __entry->addr = addr;
141 __entry->flags = flags;
142 __entry->entries = entries;
143 __entry->total_len = total_len;
144
145 if (flags & HIF_WRITE)
146 __entry->tx = 1;
147 else
148 __entry->tx = 0;
149
150 len_array = __get_dynamic_array(len_array);
151
152 for (i = 0; i < entries; i++) {
153 len = list[i].len;
154
155 memcpy((u8 *) __get_dynamic_array(data) + offset,
156 list[i].buf, len);
157
158 len_array[i] = len;
159 offset += len;
160 }
161 ),
162
163 TP_printk(
164 "%s addr 0x%x flags 0x%x entries %d total_len %zd\n",
165 __entry->tx ? "tx" : "rx",
166 __entry->addr,
167 __entry->flags,
168 __entry->entries,
169 __entry->total_len
170 )
171);
172
173TRACE_EVENT(ath6kl_sdio_irq,
174 TP_PROTO(void *buf, size_t buf_len),
175
176 TP_ARGS(buf, buf_len),
177
178 TP_STRUCT__entry(
179 __field(size_t, buf_len)
180 __dynamic_array(u8, buf, buf_len)
181 ),
182
183 TP_fast_assign(
184 __entry->buf_len = buf_len;
185 memcpy(__get_dynamic_array(buf), buf, buf_len);
186 ),
187
188 TP_printk(
189 "irq len %zd\n", __entry->buf_len
190 )
191);
192
193TRACE_EVENT(ath6kl_htc_rx,
194 TP_PROTO(int status, int endpoint, void *buf,
195 size_t buf_len),
196
197 TP_ARGS(status, endpoint, buf, buf_len),
198
199 TP_STRUCT__entry(
200 __field(int, status)
201 __field(int, endpoint)
202 __field(size_t, buf_len)
203 __dynamic_array(u8, buf, buf_len)
204 ),
205
206 TP_fast_assign(
207 __entry->status = status;
208 __entry->endpoint = endpoint;
209 __entry->buf_len = buf_len;
210 memcpy(__get_dynamic_array(buf), buf, buf_len);
211 ),
212
213 TP_printk(
214 "status %d endpoint %d len %zd\n",
215 __entry->status,
216 __entry->endpoint,
217 __entry->buf_len
218 )
219);
220
221TRACE_EVENT(ath6kl_htc_tx,
222 TP_PROTO(int status, int endpoint, void *buf,
223 size_t buf_len),
224
225 TP_ARGS(status, endpoint, buf, buf_len),
226
227 TP_STRUCT__entry(
228 __field(int, status)
229 __field(int, endpoint)
230 __field(size_t, buf_len)
231 __dynamic_array(u8, buf, buf_len)
232 ),
233
234 TP_fast_assign(
235 __entry->status = status;
236 __entry->endpoint = endpoint;
237 __entry->buf_len = buf_len;
238 memcpy(__get_dynamic_array(buf), buf, buf_len);
239 ),
240
241 TP_printk(
242 "status %d endpoint %d len %zd\n",
243 __entry->status,
244 __entry->endpoint,
245 __entry->buf_len
246 )
247);
248
249#define ATH6KL_MSG_MAX 200
250
251DECLARE_EVENT_CLASS(ath6kl_log_event,
252 TP_PROTO(struct va_format *vaf),
253 TP_ARGS(vaf),
254 TP_STRUCT__entry(
255 __dynamic_array(char, msg, ATH6KL_MSG_MAX)
256 ),
257 TP_fast_assign(
258 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
259 ATH6KL_MSG_MAX,
260 vaf->fmt,
261 *vaf->va) >= ATH6KL_MSG_MAX);
262 ),
263 TP_printk("%s", __get_str(msg))
264);
265
266DEFINE_EVENT(ath6kl_log_event, ath6kl_log_err,
267 TP_PROTO(struct va_format *vaf),
268 TP_ARGS(vaf)
269);
270
271DEFINE_EVENT(ath6kl_log_event, ath6kl_log_warn,
272 TP_PROTO(struct va_format *vaf),
273 TP_ARGS(vaf)
274);
275
276DEFINE_EVENT(ath6kl_log_event, ath6kl_log_info,
277 TP_PROTO(struct va_format *vaf),
278 TP_ARGS(vaf)
279);
280
281TRACE_EVENT(ath6kl_log_dbg,
282 TP_PROTO(unsigned int level, struct va_format *vaf),
283 TP_ARGS(level, vaf),
284 TP_STRUCT__entry(
285 __field(unsigned int, level)
286 __dynamic_array(char, msg, ATH6KL_MSG_MAX)
287 ),
288 TP_fast_assign(
289 __entry->level = level;
290 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
291 ATH6KL_MSG_MAX,
292 vaf->fmt,
293 *vaf->va) >= ATH6KL_MSG_MAX);
294 ),
295 TP_printk("%s", __get_str(msg))
296);
297
298TRACE_EVENT(ath6kl_log_dbg_dump,
299 TP_PROTO(const char *msg, const char *prefix,
300 const void *buf, size_t buf_len),
301
302 TP_ARGS(msg, prefix, buf, buf_len),
303
304 TP_STRUCT__entry(
305 __string(msg, msg)
306 __string(prefix, prefix)
307 __field(size_t, buf_len)
308 __dynamic_array(u8, buf, buf_len)
309 ),
310
311 TP_fast_assign(
312 __assign_str(msg, msg);
313 __assign_str(prefix, prefix);
314 __entry->buf_len = buf_len;
315 memcpy(__get_dynamic_array(buf), buf, buf_len);
316 ),
317
318 TP_printk(
319 "%s/%s\n", __get_str(prefix), __get_str(msg)
320 )
321);
322
323#endif /* _ ATH6KL_TRACE_H || TRACE_HEADER_MULTI_READ*/
324
325/* we don't want to use include/trace/events */
326#undef TRACE_INCLUDE_PATH
327#define TRACE_INCLUDE_PATH .
328#undef TRACE_INCLUDE_FILE
329#define TRACE_INCLUDE_FILE trace
330
331/* This part must be outside protection */
332#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index 78b369286579..ebb24045a8ae 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -20,6 +20,7 @@
20#include "core.h" 20#include "core.h"
21#include "debug.h" 21#include "debug.h"
22#include "htc-ops.h" 22#include "htc-ops.h"
23#include "trace.h"
23 24
24/* 25/*
25 * tid - tid_mux0..tid_mux3 26 * tid - tid_mux0..tid_mux3
@@ -288,6 +289,8 @@ int ath6kl_control_tx(void *devt, struct sk_buff *skb,
288 int status = 0; 289 int status = 0;
289 struct ath6kl_cookie *cookie = NULL; 290 struct ath6kl_cookie *cookie = NULL;
290 291
292 trace_ath6kl_wmi_cmd(skb->data, skb->len);
293
291 if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW)) { 294 if (WARN_ON_ONCE(ar->state == ATH6KL_STATE_WOW)) {
292 dev_kfree_skb(skb); 295 dev_kfree_skb(skb);
293 return -EACCES; 296 return -EACCES;
@@ -1324,7 +1327,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1324 __func__, ar, ept, skb, packet->buf, 1327 __func__, ar, ept, skb, packet->buf,
1325 packet->act_len, status); 1328 packet->act_len, status);
1326 1329
1327 if (status || !(skb->data + HTC_HDR_LENGTH)) { 1330 if (status || packet->act_len < HTC_HDR_LENGTH) {
1328 dev_kfree_skb(skb); 1331 dev_kfree_skb(skb);
1329 return; 1332 return;
1330 } 1333 }
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index 5fcd342762de..bed0d337712d 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -856,11 +856,9 @@ static int ath6kl_usb_submit_ctrl_out(struct ath6kl_usb *ar_usb,
856 int ret; 856 int ret;
857 857
858 if (size > 0) { 858 if (size > 0) {
859 buf = kmalloc(size, GFP_KERNEL); 859 buf = kmemdup(data, size, GFP_KERNEL);
860 if (buf == NULL) 860 if (buf == NULL)
861 return -ENOMEM; 861 return -ENOMEM;
862
863 memcpy(buf, data, size);
864 } 862 }
865 863
866 /* note: if successful returns number of bytes transfered */ 864 /* note: if successful returns number of bytes transfered */
@@ -872,8 +870,9 @@ static int ath6kl_usb_submit_ctrl_out(struct ath6kl_usb *ar_usb,
872 size, 1000); 870 size, 1000);
873 871
874 if (ret < 0) { 872 if (ret < 0) {
875 ath6kl_dbg(ATH6KL_DBG_USB, "%s failed,result = %d\n", 873 ath6kl_warn("Failed to submit usb control message: %d\n", ret);
876 __func__, ret); 874 kfree(buf);
875 return ret;
877 } 876 }
878 877
879 kfree(buf); 878 kfree(buf);
@@ -903,8 +902,9 @@ static int ath6kl_usb_submit_ctrl_in(struct ath6kl_usb *ar_usb,
903 size, 2 * HZ); 902 size, 2 * HZ);
904 903
905 if (ret < 0) { 904 if (ret < 0) {
906 ath6kl_dbg(ATH6KL_DBG_USB, "%s failed,result = %d\n", 905 ath6kl_warn("Failed to read usb control message: %d\n", ret);
907 __func__, ret); 906 kfree(buf);
907 return ret;
908 } 908 }
909 909
910 memcpy((u8 *) data, buf, size); 910 memcpy((u8 *) data, buf, size);
@@ -961,8 +961,10 @@ static int ath6kl_usb_diag_read32(struct ath6kl *ar, u32 address, u32 *data)
961 ATH6KL_USB_CONTROL_REQ_DIAG_RESP, 961 ATH6KL_USB_CONTROL_REQ_DIAG_RESP,
962 ar_usb->diag_resp_buffer, &resp_len); 962 ar_usb->diag_resp_buffer, &resp_len);
963 963
964 if (ret) 964 if (ret) {
965 ath6kl_warn("diag read32 failed: %d\n", ret);
965 return ret; 966 return ret;
967 }
966 968
967 resp = (struct ath6kl_usb_ctrl_diag_resp_read *) 969 resp = (struct ath6kl_usb_ctrl_diag_resp_read *)
968 ar_usb->diag_resp_buffer; 970 ar_usb->diag_resp_buffer;
@@ -976,6 +978,7 @@ static int ath6kl_usb_diag_write32(struct ath6kl *ar, u32 address, __le32 data)
976{ 978{
977 struct ath6kl_usb *ar_usb = ar->hif_priv; 979 struct ath6kl_usb *ar_usb = ar->hif_priv;
978 struct ath6kl_usb_ctrl_diag_cmd_write *cmd; 980 struct ath6kl_usb_ctrl_diag_cmd_write *cmd;
981 int ret;
979 982
980 cmd = (struct ath6kl_usb_ctrl_diag_cmd_write *) ar_usb->diag_cmd_buffer; 983 cmd = (struct ath6kl_usb_ctrl_diag_cmd_write *) ar_usb->diag_cmd_buffer;
981 984
@@ -984,12 +987,17 @@ static int ath6kl_usb_diag_write32(struct ath6kl *ar, u32 address, __le32 data)
984 cmd->address = cpu_to_le32(address); 987 cmd->address = cpu_to_le32(address);
985 cmd->value = data; 988 cmd->value = data;
986 989
987 return ath6kl_usb_ctrl_msg_exchange(ar_usb, 990 ret = ath6kl_usb_ctrl_msg_exchange(ar_usb,
988 ATH6KL_USB_CONTROL_REQ_DIAG_CMD, 991 ATH6KL_USB_CONTROL_REQ_DIAG_CMD,
989 (u8 *) cmd, 992 (u8 *) cmd,
990 sizeof(*cmd), 993 sizeof(*cmd),
991 0, NULL, NULL); 994 0, NULL, NULL);
995 if (ret) {
996 ath6kl_warn("diag_write32 failed: %d\n", ret);
997 return ret;
998 }
992 999
1000 return 0;
993} 1001}
994 1002
995static int ath6kl_usb_bmi_read(struct ath6kl *ar, u8 *buf, u32 len) 1003static int ath6kl_usb_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
@@ -1001,7 +1009,7 @@ static int ath6kl_usb_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
1001 ret = ath6kl_usb_submit_ctrl_in(ar_usb, 1009 ret = ath6kl_usb_submit_ctrl_in(ar_usb,
1002 ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP, 1010 ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP,
1003 0, 0, buf, len); 1011 0, 0, buf, len);
1004 if (ret != 0) { 1012 if (ret) {
1005 ath6kl_err("Unable to read the bmi data from the device: %d\n", 1013 ath6kl_err("Unable to read the bmi data from the device: %d\n",
1006 ret); 1014 ret);
1007 return ret; 1015 return ret;
@@ -1019,7 +1027,7 @@ static int ath6kl_usb_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
1019 ret = ath6kl_usb_submit_ctrl_out(ar_usb, 1027 ret = ath6kl_usb_submit_ctrl_out(ar_usb,
1020 ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD, 1028 ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD,
1021 0, 0, buf, len); 1029 0, 0, buf, len);
1022 if (ret != 0) { 1030 if (ret) {
1023 ath6kl_err("unable to send the bmi data to the device: %d\n", 1031 ath6kl_err("unable to send the bmi data to the device: %d\n",
1024 ret); 1032 ret);
1025 return ret; 1033 return ret;
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index d76b5bd81a0d..87aefb4c4c23 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -20,6 +20,7 @@
20#include "core.h" 20#include "core.h"
21#include "debug.h" 21#include "debug.h"
22#include "testmode.h" 22#include "testmode.h"
23#include "trace.h"
23#include "../regd.h" 24#include "../regd.h"
24#include "../regd_common.h" 25#include "../regd_common.h"
25 26
@@ -2028,6 +2029,9 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
2028 if (!sband) 2029 if (!sband)
2029 continue; 2030 continue;
2030 2031
2032 if (WARN_ON(band >= ATH6KL_NUM_BANDS))
2033 break;
2034
2031 ratemask = rates[band]; 2035 ratemask = rates[band];
2032 supp_rates = sc->supp_rates[band].rates; 2036 supp_rates = sc->supp_rates[band].rates;
2033 num_rates = 0; 2037 num_rates = 0;
@@ -4086,6 +4090,8 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
4086 return -EINVAL; 4090 return -EINVAL;
4087 } 4091 }
4088 4092
4093 trace_ath6kl_wmi_event(skb->data, skb->len);
4094
4089 return ath6kl_wmi_proc_events(wmi, skb); 4095 return ath6kl_wmi_proc_events(wmi, skb);
4090} 4096}
4091 4097
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 881e989ea470..e6b92ff265fd 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3606,6 +3606,12 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
3606 value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz); 3606 value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz);
3607 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value); 3607 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value);
3608 3608
3609 if ((AR_SREV_9462(ah)) && (ah->rxchainmask == 0x2)) {
3610 value = ar9003_hw_ant_ctrl_chain_get(ah, 1, is2ghz);
3611 REG_RMW_FIELD(ah, switch_chain_reg[0],
3612 AR_SWITCH_TABLE_ALL, value);
3613 }
3614
3609 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) { 3615 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
3610 if ((ah->rxchainmask & BIT(chain)) || 3616 if ((ah->rxchainmask & BIT(chain)) ||
3611 (ah->txchainmask & BIT(chain))) { 3617 (ah->txchainmask & BIT(chain))) {
@@ -3772,6 +3778,17 @@ static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan)
3772 AR_PHY_EXT_ATTEN_CTL_2, 3778 AR_PHY_EXT_ATTEN_CTL_2,
3773 }; 3779 };
3774 3780
3781 if ((AR_SREV_9462(ah)) && (ah->rxchainmask == 0x2)) {
3782 value = ar9003_hw_atten_chain_get(ah, 1, chan);
3783 REG_RMW_FIELD(ah, ext_atten_reg[0],
3784 AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB, value);
3785
3786 value = ar9003_hw_atten_chain_get_margin(ah, 1, chan);
3787 REG_RMW_FIELD(ah, ext_atten_reg[0],
3788 AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN,
3789 value);
3790 }
3791
3775 /* Test value. if 0 then attenuation is unused. Don't load anything. */ 3792 /* Test value. if 0 then attenuation is unused. Don't load anything. */
3776 for (i = 0; i < 3; i++) { 3793 for (i = 0; i < 3; i++) {
3777 if (ah->txchainmask & BIT(i)) { 3794 if (ah->txchainmask & BIT(i)) {
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index ccc42a71b436..999ab08c34e6 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -37,28 +37,28 @@ static const u32 ar9462_pciephy_clkreq_enable_L1_2p0[][2] = {
37 /* Addr allmodes */ 37 /* Addr allmodes */
38 {0x00018c00, 0x18253ede}, 38 {0x00018c00, 0x18253ede},
39 {0x00018c04, 0x000801d8}, 39 {0x00018c04, 0x000801d8},
40 {0x00018c08, 0x0003580c}, 40 {0x00018c08, 0x0003780c},
41}; 41};
42 42
43static const u32 ar9462_2p0_baseband_postamble[][5] = { 43static const u32 ar9462_2p0_baseband_postamble[][5] = {
44 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 44 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
45 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d}, 45 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d},
46 {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae}, 46 {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
47 {0x00009824, 0x5ac640de, 0x5ac640d0, 0x5ac640d0, 0x63c640da}, 47 {0x00009824, 0x63c640de, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
48 {0x00009828, 0x0796be89, 0x0696b081, 0x0696b881, 0x09143e81}, 48 {0x00009828, 0x0796be89, 0x0696b081, 0x0696b881, 0x09143e81},
49 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4}, 49 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
50 {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c}, 50 {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
51 {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4}, 51 {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
52 {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0}, 52 {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a2},
53 {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020}, 53 {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
54 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8}, 54 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
55 {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e}, 55 {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
56 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x32395d5e}, 56 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e},
57 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 57 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
58 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 58 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
59 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 59 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
60 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, 60 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
61 {0x00009e3c, 0xcf946222, 0xcf946222, 0xcfd5c782, 0xcfd5c282}, 61 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
62 {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27}, 62 {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
63 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, 63 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
64 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 64 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -82,9 +82,9 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
82 {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982}, 82 {0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
83 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, 83 {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
84 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 84 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
85 {0x0000a3a4, 0x00000010, 0x00000010, 0x00000000, 0x00000000}, 85 {0x0000a3a4, 0x00000050, 0x00000050, 0x00000000, 0x00000000},
86 {0x0000a3a8, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa}, 86 {0x0000a3a8, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa},
87 {0x0000a3ac, 0xaaaaaa00, 0xaaaaaa30, 0xaaaaaa00, 0xaaaaaa00}, 87 {0x0000a3ac, 0xaaaaaa00, 0xaa30aa30, 0xaaaaaa00, 0xaaaaaa00},
88 {0x0000a41c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce}, 88 {0x0000a41c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
89 {0x0000a420, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce}, 89 {0x0000a420, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
90 {0x0000a424, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce}, 90 {0x0000a424, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
@@ -363,14 +363,14 @@ static const u32 ar9462_pciephy_clkreq_disable_L1_2p0[][2] = {
363 /* Addr allmodes */ 363 /* Addr allmodes */
364 {0x00018c00, 0x18213ede}, 364 {0x00018c00, 0x18213ede},
365 {0x00018c04, 0x000801d8}, 365 {0x00018c04, 0x000801d8},
366 {0x00018c08, 0x0003580c}, 366 {0x00018c08, 0x0003780c},
367}; 367};
368 368
369static const u32 ar9462_pciephy_pll_on_clkreq_disable_L1_2p0[][2] = { 369static const u32 ar9462_pciephy_pll_on_clkreq_disable_L1_2p0[][2] = {
370 /* Addr allmodes */ 370 /* Addr allmodes */
371 {0x00018c00, 0x18212ede}, 371 {0x00018c00, 0x18212ede},
372 {0x00018c04, 0x000801d8}, 372 {0x00018c04, 0x000801d8},
373 {0x00018c08, 0x0003580c}, 373 {0x00018c08, 0x0003780c},
374}; 374};
375 375
376static const u32 ar9462_2p0_radio_postamble_sys2ant[][5] = { 376static const u32 ar9462_2p0_radio_postamble_sys2ant[][5] = {
@@ -775,7 +775,7 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
775 {0x00009fc0, 0x803e4788}, 775 {0x00009fc0, 0x803e4788},
776 {0x00009fc4, 0x0001efb5}, 776 {0x00009fc4, 0x0001efb5},
777 {0x00009fcc, 0x40000014}, 777 {0x00009fcc, 0x40000014},
778 {0x00009fd0, 0x01193b93}, 778 {0x00009fd0, 0x0a193b93},
779 {0x0000a20c, 0x00000000}, 779 {0x0000a20c, 0x00000000},
780 {0x0000a220, 0x00000000}, 780 {0x0000a220, 0x00000000},
781 {0x0000a224, 0x00000000}, 781 {0x0000a224, 0x00000000},
@@ -850,7 +850,7 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
850 {0x0000a7cc, 0x00000000}, 850 {0x0000a7cc, 0x00000000},
851 {0x0000a7d0, 0x00000000}, 851 {0x0000a7d0, 0x00000000},
852 {0x0000a7d4, 0x00000004}, 852 {0x0000a7d4, 0x00000004},
853 {0x0000a7dc, 0x00000001}, 853 {0x0000a7dc, 0x00000000},
854 {0x0000a7f0, 0x80000000}, 854 {0x0000a7f0, 0x80000000},
855 {0x0000a8d0, 0x004b6a8e}, 855 {0x0000a8d0, 0x004b6a8e},
856 {0x0000a8d4, 0x00000820}, 856 {0x0000a8d4, 0x00000820},
@@ -886,7 +886,7 @@ static const u32 ar9462_modes_high_ob_db_tx_gain_table_2p0[][5] = {
886 {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584}, 886 {0x0000a2e0, 0x0000f000, 0x0000f000, 0x03ccc584, 0x03ccc584},
887 {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800}, 887 {0x0000a2e4, 0x01ff0000, 0x01ff0000, 0x03f0f800, 0x03f0f800},
888 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000}, 888 {0x0000a2e8, 0x00000000, 0x00000000, 0x03ff0000, 0x03ff0000},
889 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, 889 {0x0000a410, 0x000050da, 0x000050da, 0x000050de, 0x000050de},
890 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 890 {0x0000a458, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
891 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, 891 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
892 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002}, 892 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
@@ -906,20 +906,20 @@ static const u32 ar9462_modes_high_ob_db_tx_gain_table_2p0[][5] = {
906 {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640}, 906 {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
907 {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660}, 907 {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
908 {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861}, 908 {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
909 {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81}, 909 {0x0000a548, 0x55025eb3, 0x55025eb3, 0x3e001a81, 0x3e001a81},
910 {0x0000a54c, 0x59025eb6, 0x59025eb6, 0x42001a83, 0x42001a83}, 910 {0x0000a54c, 0x58025ef3, 0x58025ef3, 0x42001a83, 0x42001a83},
911 {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001c84, 0x44001c84}, 911 {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001a84, 0x44001a84},
912 {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3}, 912 {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
913 {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5}, 913 {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
914 {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9}, 914 {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
915 {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb}, 915 {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
916 {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 916 {0x0000a564, 0x751ffff6, 0x751ffff6, 0x56001eec, 0x56001eec},
917 {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 917 {0x0000a568, 0x751ffff6, 0x751ffff6, 0x58001ef0, 0x58001ef0},
918 {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 918 {0x0000a56c, 0x751ffff6, 0x751ffff6, 0x5a001ef4, 0x5a001ef4},
919 {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 919 {0x0000a570, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
920 {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 920 {0x0000a574, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
921 {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 921 {0x0000a578, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
922 {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 922 {0x0000a57c, 0x751ffff6, 0x751ffff6, 0x5c001ff6, 0x5c001ff6},
923 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 923 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
924 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 924 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
925 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 925 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -1053,7 +1053,6 @@ static const u32 ar9462_2p0_mac_core[][2] = {
1053 {0x00008044, 0x00000000}, 1053 {0x00008044, 0x00000000},
1054 {0x00008048, 0x00000000}, 1054 {0x00008048, 0x00000000},
1055 {0x0000804c, 0xffffffff}, 1055 {0x0000804c, 0xffffffff},
1056 {0x00008050, 0xffffffff},
1057 {0x00008054, 0x00000000}, 1056 {0x00008054, 0x00000000},
1058 {0x00008058, 0x00000000}, 1057 {0x00008058, 0x00000000},
1059 {0x0000805c, 0x000fc78f}, 1058 {0x0000805c, 0x000fc78f},
@@ -1117,9 +1116,9 @@ static const u32 ar9462_2p0_mac_core[][2] = {
1117 {0x000081f8, 0x00000000}, 1116 {0x000081f8, 0x00000000},
1118 {0x000081fc, 0x00000000}, 1117 {0x000081fc, 0x00000000},
1119 {0x00008240, 0x00100000}, 1118 {0x00008240, 0x00100000},
1120 {0x00008244, 0x0010f424}, 1119 {0x00008244, 0x0010f400},
1121 {0x00008248, 0x00000800}, 1120 {0x00008248, 0x00000800},
1122 {0x0000824c, 0x0001e848}, 1121 {0x0000824c, 0x0001e800},
1123 {0x00008250, 0x00000000}, 1122 {0x00008250, 0x00000000},
1124 {0x00008254, 0x00000000}, 1123 {0x00008254, 0x00000000},
1125 {0x00008258, 0x00000000}, 1124 {0x00008258, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 1e8508530e98..7bdd726c7a8f 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -369,7 +369,6 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
369 struct ieee80211_channel *c = chan->chan; 369 struct ieee80211_channel *c = chan->chan;
370 struct ath9k_hw_cal_data *caldata = ah->caldata; 370 struct ath9k_hw_cal_data *caldata = ah->caldata;
371 371
372 chan->channelFlags &= (~CHANNEL_CW_INT);
373 if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) { 372 if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
374 ath_dbg(common, CALIBRATE, 373 ath_dbg(common, CALIBRATE,
375 "NF did not complete in calibration window\n"); 374 "NF did not complete in calibration window\n");
@@ -384,7 +383,6 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
384 ath_dbg(common, CALIBRATE, 383 ath_dbg(common, CALIBRATE,
385 "noise floor failed detected; detected %d, threshold %d\n", 384 "noise floor failed detected; detected %d, threshold %d\n",
386 nf, nfThresh); 385 nf, nfThresh);
387 chan->channelFlags |= CHANNEL_CW_INT;
388 } 386 }
389 387
390 if (!caldata) { 388 if (!caldata) {
@@ -410,7 +408,7 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
410 int i, j; 408 int i, j;
411 409
412 ah->caldata->channel = chan->channel; 410 ah->caldata->channel = chan->channel;
413 ah->caldata->channelFlags = chan->channelFlags & ~CHANNEL_CW_INT; 411 ah->caldata->channelFlags = chan->channelFlags;
414 ah->caldata->chanmode = chan->chanmode; 412 ah->caldata->chanmode = chan->chanmode;
415 h = ah->caldata->nfCalHist; 413 h = ah->caldata->nfCalHist;
416 default_nf = ath9k_hw_get_default_nf(ah, chan); 414 default_nf = ath9k_hw_get_default_nf(ah, chan);
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index 050ca4a4850d..6102476a65de 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -40,7 +40,7 @@
40 x = ATH_LPF_RSSI((x), ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN); \ 40 x = ATH_LPF_RSSI((x), ATH_RSSI_IN((y)), ATH_RSSI_LPF_LEN); \
41} while (0) 41} while (0)
42#define ATH_EP_RND(x, mul) \ 42#define ATH_EP_RND(x, mul) \
43 ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) 43 (((x) + ((mul)/2)) / (mul))
44 44
45int ath9k_cmn_padpos(__le16 frame_control); 45int ath9k_cmn_padpos(__le16 frame_control);
46int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb); 46int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 3714b971d18e..67a2a4b3b883 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -537,6 +537,7 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
537 PR("AMPDUs Completed:", a_completed); 537 PR("AMPDUs Completed:", a_completed);
538 PR("AMPDUs Retried: ", a_retries); 538 PR("AMPDUs Retried: ", a_retries);
539 PR("AMPDUs XRetried: ", a_xretries); 539 PR("AMPDUs XRetried: ", a_xretries);
540 PR("TXERR Filtered: ", txerr_filtered);
540 PR("FIFO Underrun: ", fifo_underrun); 541 PR("FIFO Underrun: ", fifo_underrun);
541 PR("TXOP Exceeded: ", xtxop); 542 PR("TXOP Exceeded: ", xtxop);
542 PR("TXTIMER Expiry: ", timer_exp); 543 PR("TXTIMER Expiry: ", timer_exp);
@@ -756,6 +757,8 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
756 TX_STAT_INC(qnum, completed); 757 TX_STAT_INC(qnum, completed);
757 } 758 }
758 759
760 if (ts->ts_status & ATH9K_TXERR_FILT)
761 TX_STAT_INC(qnum, txerr_filtered);
759 if (ts->ts_status & ATH9K_TXERR_FIFO) 762 if (ts->ts_status & ATH9K_TXERR_FIFO)
760 TX_STAT_INC(qnum, fifo_underrun); 763 TX_STAT_INC(qnum, fifo_underrun);
761 if (ts->ts_status & ATH9K_TXERR_XTXOP) 764 if (ts->ts_status & ATH9K_TXERR_XTXOP)
@@ -1909,6 +1912,7 @@ static const char ath9k_gstrings_stats[][ETH_GSTRING_LEN] = {
1909 AMKSTR(d_tx_desc_cfg_err), 1912 AMKSTR(d_tx_desc_cfg_err),
1910 AMKSTR(d_tx_data_underrun), 1913 AMKSTR(d_tx_data_underrun),
1911 AMKSTR(d_tx_delim_underrun), 1914 AMKSTR(d_tx_delim_underrun),
1915 "d_rx_crc_err",
1912 "d_rx_decrypt_crc_err", 1916 "d_rx_decrypt_crc_err",
1913 "d_rx_phy_err", 1917 "d_rx_phy_err",
1914 "d_rx_mic_err", 1918 "d_rx_mic_err",
@@ -1989,6 +1993,7 @@ void ath9k_get_et_stats(struct ieee80211_hw *hw,
1989 AWDATA(data_underrun); 1993 AWDATA(data_underrun);
1990 AWDATA(delim_underrun); 1994 AWDATA(delim_underrun);
1991 1995
1996 AWDATA_RX(crc_err);
1992 AWDATA_RX(decrypt_crc_err); 1997 AWDATA_RX(decrypt_crc_err);
1993 AWDATA_RX(phy_err); 1998 AWDATA_RX(phy_err);
1994 AWDATA_RX(mic_err); 1999 AWDATA_RX(mic_err);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 410d6d8f1aa7..794a7ec83a24 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -142,6 +142,7 @@ struct ath_interrupt_stats {
142 * @a_completed: Total AMPDUs completed 142 * @a_completed: Total AMPDUs completed
143 * @a_retries: No. of AMPDUs retried (SW) 143 * @a_retries: No. of AMPDUs retried (SW)
144 * @a_xretries: No. of AMPDUs dropped due to xretries 144 * @a_xretries: No. of AMPDUs dropped due to xretries
145 * @txerr_filtered: No. of frames with TXERR_FILT flag set.
145 * @fifo_underrun: FIFO underrun occurrences 146 * @fifo_underrun: FIFO underrun occurrences
146 Valid only for: 147 Valid only for:
147 - non-aggregate condition. 148 - non-aggregate condition.
@@ -168,6 +169,7 @@ struct ath_tx_stats {
168 u32 a_completed; 169 u32 a_completed;
169 u32 a_retries; 170 u32 a_retries;
170 u32 a_xretries; 171 u32 a_xretries;
172 u32 txerr_filtered;
171 u32 fifo_underrun; 173 u32 fifo_underrun;
172 u32 xtxop; 174 u32 xtxop;
173 u32 timer_exp; 175 u32 timer_exp;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 07e25260c31d..4fa2bb167050 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1669,6 +1669,104 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
1669} 1669}
1670EXPORT_SYMBOL(ath9k_hw_check_alive); 1670EXPORT_SYMBOL(ath9k_hw_check_alive);
1671 1671
1672static void ath9k_hw_init_mfp(struct ath_hw *ah)
1673{
1674 /* Setup MFP options for CCMP */
1675 if (AR_SREV_9280_20_OR_LATER(ah)) {
1676 /* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt
1677 * frames when constructing CCMP AAD. */
1678 REG_RMW_FIELD(ah, AR_AES_MUTE_MASK1, AR_AES_MUTE_MASK1_FC_MGMT,
1679 0xc7ff);
1680 ah->sw_mgmt_crypto = false;
1681 } else if (AR_SREV_9160_10_OR_LATER(ah)) {
1682 /* Disable hardware crypto for management frames */
1683 REG_CLR_BIT(ah, AR_PCU_MISC_MODE2,
1684 AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE);
1685 REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
1686 AR_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT);
1687 ah->sw_mgmt_crypto = true;
1688 } else {
1689 ah->sw_mgmt_crypto = true;
1690 }
1691}
1692
1693static void ath9k_hw_reset_opmode(struct ath_hw *ah,
1694 u32 macStaId1, u32 saveDefAntenna)
1695{
1696 struct ath_common *common = ath9k_hw_common(ah);
1697
1698 ENABLE_REGWRITE_BUFFER(ah);
1699
1700 REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
1701 REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(common->macaddr + 4)
1702 | macStaId1
1703 | AR_STA_ID1_RTS_USE_DEF
1704 | (ah->config.ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
1705 | ah->sta_id1_defaults);
1706 ath_hw_setbssidmask(common);
1707 REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
1708 ath9k_hw_write_associd(ah);
1709 REG_WRITE(ah, AR_ISR, ~0);
1710 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
1711
1712 REGWRITE_BUFFER_FLUSH(ah);
1713
1714 ath9k_hw_set_operating_mode(ah, ah->opmode);
1715}
1716
1717static void ath9k_hw_init_queues(struct ath_hw *ah)
1718{
1719 int i;
1720
1721 ENABLE_REGWRITE_BUFFER(ah);
1722
1723 for (i = 0; i < AR_NUM_DCU; i++)
1724 REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
1725
1726 REGWRITE_BUFFER_FLUSH(ah);
1727
1728 ah->intr_txqs = 0;
1729 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1730 ath9k_hw_resettxqueue(ah, i);
1731}
1732
1733/*
1734 * For big endian systems turn on swapping for descriptors
1735 */
1736static void ath9k_hw_init_desc(struct ath_hw *ah)
1737{
1738 struct ath_common *common = ath9k_hw_common(ah);
1739
1740 if (AR_SREV_9100(ah)) {
1741 u32 mask;
1742 mask = REG_READ(ah, AR_CFG);
1743 if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) {
1744 ath_dbg(common, RESET, "CFG Byte Swap Set 0x%x\n",
1745 mask);
1746 } else {
1747 mask = INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB;
1748 REG_WRITE(ah, AR_CFG, mask);
1749 ath_dbg(common, RESET, "Setting CFG 0x%x\n",
1750 REG_READ(ah, AR_CFG));
1751 }
1752 } else {
1753 if (common->bus_ops->ath_bus_type == ATH_USB) {
1754 /* Configure AR9271 target WLAN */
1755 if (AR_SREV_9271(ah))
1756 REG_WRITE(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB);
1757 else
1758 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
1759 }
1760#ifdef __BIG_ENDIAN
1761 else if (AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
1762 AR_SREV_9550(ah))
1763 REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0);
1764 else
1765 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
1766#endif
1767 }
1768}
1769
1672/* 1770/*
1673 * Fast channel change: 1771 * Fast channel change:
1674 * (Change synthesizer based on channel freq without resetting chip) 1772 * (Change synthesizer based on channel freq without resetting chip)
@@ -1746,7 +1844,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1746 u32 saveDefAntenna; 1844 u32 saveDefAntenna;
1747 u32 macStaId1; 1845 u32 macStaId1;
1748 u64 tsf = 0; 1846 u64 tsf = 0;
1749 int i, r; 1847 int r;
1750 bool start_mci_reset = false; 1848 bool start_mci_reset = false;
1751 bool save_fullsleep = ah->chip_fullsleep; 1849 bool save_fullsleep = ah->chip_fullsleep;
1752 1850
@@ -1763,10 +1861,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1763 ath9k_hw_getnf(ah, ah->curchan); 1861 ath9k_hw_getnf(ah, ah->curchan);
1764 1862
1765 ah->caldata = caldata; 1863 ah->caldata = caldata;
1766 if (caldata && 1864 if (caldata && (chan->channel != caldata->channel ||
1767 (chan->channel != caldata->channel || 1865 chan->channelFlags != caldata->channelFlags)) {
1768 (chan->channelFlags & ~CHANNEL_CW_INT) !=
1769 (caldata->channelFlags & ~CHANNEL_CW_INT))) {
1770 /* Operating channel changed, reset channel calibration data */ 1866 /* Operating channel changed, reset channel calibration data */
1771 memset(caldata, 0, sizeof(*caldata)); 1867 memset(caldata, 0, sizeof(*caldata));
1772 ath9k_init_nfcal_hist_buffer(ah, chan); 1868 ath9k_init_nfcal_hist_buffer(ah, chan);
@@ -1853,22 +1949,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1853 ath9k_hw_settsf64(ah, tsf); 1949 ath9k_hw_settsf64(ah, tsf);
1854 } 1950 }
1855 1951
1856 /* Setup MFP options for CCMP */ 1952 ath9k_hw_init_mfp(ah);
1857 if (AR_SREV_9280_20_OR_LATER(ah)) {
1858 /* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt
1859 * frames when constructing CCMP AAD. */
1860 REG_RMW_FIELD(ah, AR_AES_MUTE_MASK1, AR_AES_MUTE_MASK1_FC_MGMT,
1861 0xc7ff);
1862 ah->sw_mgmt_crypto = false;
1863 } else if (AR_SREV_9160_10_OR_LATER(ah)) {
1864 /* Disable hardware crypto for management frames */
1865 REG_CLR_BIT(ah, AR_PCU_MISC_MODE2,
1866 AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE);
1867 REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
1868 AR_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT);
1869 ah->sw_mgmt_crypto = true;
1870 } else
1871 ah->sw_mgmt_crypto = true;
1872 1953
1873 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) 1954 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
1874 ath9k_hw_set_delta_slope(ah, chan); 1955 ath9k_hw_set_delta_slope(ah, chan);
@@ -1876,24 +1957,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1876 ath9k_hw_spur_mitigate_freq(ah, chan); 1957 ath9k_hw_spur_mitigate_freq(ah, chan);
1877 ah->eep_ops->set_board_values(ah, chan); 1958 ah->eep_ops->set_board_values(ah, chan);
1878 1959
1879 ENABLE_REGWRITE_BUFFER(ah); 1960 ath9k_hw_reset_opmode(ah, macStaId1, saveDefAntenna);
1880
1881 REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
1882 REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(common->macaddr + 4)
1883 | macStaId1
1884 | AR_STA_ID1_RTS_USE_DEF
1885 | (ah->config.
1886 ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
1887 | ah->sta_id1_defaults);
1888 ath_hw_setbssidmask(common);
1889 REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
1890 ath9k_hw_write_associd(ah);
1891 REG_WRITE(ah, AR_ISR, ~0);
1892 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
1893
1894 REGWRITE_BUFFER_FLUSH(ah);
1895
1896 ath9k_hw_set_operating_mode(ah, ah->opmode);
1897 1961
1898 r = ath9k_hw_rf_set_freq(ah, chan); 1962 r = ath9k_hw_rf_set_freq(ah, chan);
1899 if (r) 1963 if (r)
@@ -1901,17 +1965,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1901 1965
1902 ath9k_hw_set_clockrate(ah); 1966 ath9k_hw_set_clockrate(ah);
1903 1967
1904 ENABLE_REGWRITE_BUFFER(ah); 1968 ath9k_hw_init_queues(ah);
1905
1906 for (i = 0; i < AR_NUM_DCU; i++)
1907 REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
1908
1909 REGWRITE_BUFFER_FLUSH(ah);
1910
1911 ah->intr_txqs = 0;
1912 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1913 ath9k_hw_resettxqueue(ah, i);
1914
1915 ath9k_hw_init_interrupt_masks(ah, ah->opmode); 1969 ath9k_hw_init_interrupt_masks(ah, ah->opmode);
1916 ath9k_hw_ani_cache_ini_regs(ah); 1970 ath9k_hw_ani_cache_ini_regs(ah);
1917 ath9k_hw_init_qos(ah); 1971 ath9k_hw_init_qos(ah);
@@ -1966,38 +2020,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1966 2020
1967 REGWRITE_BUFFER_FLUSH(ah); 2021 REGWRITE_BUFFER_FLUSH(ah);
1968 2022
1969 /* 2023 ath9k_hw_init_desc(ah);
1970 * For big endian systems turn on swapping for descriptors
1971 */
1972 if (AR_SREV_9100(ah)) {
1973 u32 mask;
1974 mask = REG_READ(ah, AR_CFG);
1975 if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) {
1976 ath_dbg(common, RESET, "CFG Byte Swap Set 0x%x\n",
1977 mask);
1978 } else {
1979 mask =
1980 INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB;
1981 REG_WRITE(ah, AR_CFG, mask);
1982 ath_dbg(common, RESET, "Setting CFG 0x%x\n",
1983 REG_READ(ah, AR_CFG));
1984 }
1985 } else {
1986 if (common->bus_ops->ath_bus_type == ATH_USB) {
1987 /* Configure AR9271 target WLAN */
1988 if (AR_SREV_9271(ah))
1989 REG_WRITE(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB);
1990 else
1991 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
1992 }
1993#ifdef __BIG_ENDIAN
1994 else if (AR_SREV_9330(ah) || AR_SREV_9340(ah) ||
1995 AR_SREV_9550(ah))
1996 REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0);
1997 else
1998 REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
1999#endif
2000 }
2001 2024
2002 if (ath9k_hw_btcoex_is_enabled(ah)) 2025 if (ath9k_hw_btcoex_is_enabled(ah))
2003 ath9k_hw_btcoex_enable(ah); 2026 ath9k_hw_btcoex_enable(ah);
@@ -2010,7 +2033,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2010 2033
2011 if (AR_SREV_9300_20_OR_LATER(ah)) { 2034 if (AR_SREV_9300_20_OR_LATER(ah)) {
2012 ar9003_hw_bb_watchdog_config(ah); 2035 ar9003_hw_bb_watchdog_config(ah);
2013
2014 ar9003_hw_disable_phy_restart(ah); 2036 ar9003_hw_disable_phy_restart(ah);
2015 } 2037 }
2016 2038
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 784e81ccb903..30e62d92d46d 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -363,7 +363,6 @@ enum ath9k_int {
363 ATH9K_INT_NOCARD = 0xffffffff 363 ATH9K_INT_NOCARD = 0xffffffff
364}; 364};
365 365
366#define CHANNEL_CW_INT 0x00002
367#define CHANNEL_CCK 0x00020 366#define CHANNEL_CCK 0x00020
368#define CHANNEL_OFDM 0x00040 367#define CHANNEL_OFDM 0x00040
369#define CHANNEL_2GHZ 0x00080 368#define CHANNEL_2GHZ 0x00080
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 9c0b150d5b8e..c61cafa2665b 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -387,8 +387,7 @@ static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
387 u8 tid; 387 u8 tid;
388 388
389 if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) || 389 if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) ||
390 txinfo->flags & IEEE80211_TX_CTL_INJECTED || 390 txinfo->flags & IEEE80211_TX_CTL_INJECTED)
391 (!(super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_AGGR))))
392 return; 391 return;
393 392
394 rcu_read_lock(); 393 rcu_read_lock();
@@ -981,30 +980,6 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
981 980
982 SET_VAL(CARL9170_TX_SUPER_AMPDU_FACTOR, 981 SET_VAL(CARL9170_TX_SUPER_AMPDU_FACTOR,
983 txc->s.ampdu_settings, factor); 982 txc->s.ampdu_settings, factor);
984
985 for (i = 0; i < CARL9170_TX_MAX_RATES; i++) {
986 txrate = &info->control.rates[i];
987 if (txrate->idx >= 0) {
988 txc->s.ri[i] =
989 CARL9170_TX_SUPER_RI_AMPDU;
990
991 if (WARN_ON(!(txrate->flags &
992 IEEE80211_TX_RC_MCS))) {
993 /*
994 * Not sure if it's even possible
995 * to aggregate non-ht rates with
996 * this HW.
997 */
998 goto err_out;
999 }
1000 continue;
1001 }
1002
1003 txrate->idx = 0;
1004 txrate->count = ar->hw->max_rate_tries;
1005 }
1006
1007 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR);
1008 } 983 }
1009 984
1010 /* 985 /*
@@ -1012,11 +987,31 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
1012 * taken from mac_control. For all fallback rate, the firmware 987 * taken from mac_control. For all fallback rate, the firmware
1013 * updates the mac_control flags from the rate info field. 988 * updates the mac_control flags from the rate info field.
1014 */ 989 */
1015 for (i = 1; i < CARL9170_TX_MAX_RATES; i++) { 990 for (i = 0; i < CARL9170_TX_MAX_RATES; i++) {
991 __le32 phy_set;
1016 txrate = &info->control.rates[i]; 992 txrate = &info->control.rates[i];
1017 if (txrate->idx < 0) 993 if (txrate->idx < 0)
1018 break; 994 break;
1019 995
996 phy_set = carl9170_tx_physet(ar, info, txrate);
997 if (i == 0) {
998 /* first rate - part of the hw's frame header */
999 txc->f.phy_control = phy_set;
1000
1001 if (ampdu && txrate->flags & IEEE80211_TX_RC_MCS)
1002 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_AGGR);
1003 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
1004 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
1005 else if (carl9170_tx_cts_check(ar, txrate))
1006 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
1007
1008 } else {
1009 /* fallback rates are stored in the firmware's
1010 * retry rate set array.
1011 */
1012 txc->s.rr[i - 1] = phy_set;
1013 }
1014
1020 SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i], 1015 SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[i],
1021 txrate->count); 1016 txrate->count);
1022 1017
@@ -1027,21 +1022,13 @@ static int carl9170_tx_prepare(struct ar9170 *ar,
1027 txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS << 1022 txc->s.ri[i] |= (AR9170_TX_MAC_PROT_CTS <<
1028 CARL9170_TX_SUPER_RI_ERP_PROT_S); 1023 CARL9170_TX_SUPER_RI_ERP_PROT_S);
1029 1024
1030 txc->s.rr[i - 1] = carl9170_tx_physet(ar, info, txrate); 1025 if (ampdu && (txrate->flags & IEEE80211_TX_RC_MCS))
1026 txc->s.ri[i] |= CARL9170_TX_SUPER_RI_AMPDU;
1031 } 1027 }
1032 1028
1033 txrate = &info->control.rates[0];
1034 SET_VAL(CARL9170_TX_SUPER_RI_TRIES, txc->s.ri[0], txrate->count);
1035
1036 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
1037 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_RTS);
1038 else if (carl9170_tx_cts_check(ar, txrate))
1039 mac_tmp |= cpu_to_le16(AR9170_TX_MAC_PROT_CTS);
1040
1041 txc->s.len = cpu_to_le16(skb->len); 1029 txc->s.len = cpu_to_le16(skb->len);
1042 txc->f.length = cpu_to_le16(len + FCS_LEN); 1030 txc->f.length = cpu_to_le16(len + FCS_LEN);
1043 txc->f.mac_control = mac_tmp; 1031 txc->f.mac_control = mac_tmp;
1044 txc->f.phy_control = carl9170_tx_physet(ar, info, txrate);
1045 1032
1046 arinfo = (void *)info->rate_driver_data; 1033 arinfo = (void *)info->rate_driver_data;
1047 arinfo->timeout = jiffies; 1034 arinfo->timeout = jiffies;
@@ -1381,9 +1368,9 @@ static void carl9170_tx(struct ar9170 *ar)
1381} 1368}
1382 1369
1383static bool carl9170_tx_ampdu_queue(struct ar9170 *ar, 1370static bool carl9170_tx_ampdu_queue(struct ar9170 *ar,
1384 struct ieee80211_sta *sta, struct sk_buff *skb) 1371 struct ieee80211_sta *sta, struct sk_buff *skb,
1372 struct ieee80211_tx_info *txinfo)
1385{ 1373{
1386 struct _carl9170_tx_superframe *super = (void *) skb->data;
1387 struct carl9170_sta_info *sta_info; 1374 struct carl9170_sta_info *sta_info;
1388 struct carl9170_sta_tid *agg; 1375 struct carl9170_sta_tid *agg;
1389 struct sk_buff *iter; 1376 struct sk_buff *iter;
@@ -1450,7 +1437,7 @@ err_unlock:
1450 1437
1451err_unlock_rcu: 1438err_unlock_rcu:
1452 rcu_read_unlock(); 1439 rcu_read_unlock();
1453 super->f.mac_control &= ~cpu_to_le16(AR9170_TX_MAC_AGGR); 1440 txinfo->flags &= ~IEEE80211_TX_CTL_AMPDU;
1454 carl9170_tx_status(ar, skb, false); 1441 carl9170_tx_status(ar, skb, false);
1455 ar->tx_dropped++; 1442 ar->tx_dropped++;
1456 return false; 1443 return false;
@@ -1492,7 +1479,7 @@ void carl9170_op_tx(struct ieee80211_hw *hw,
1492 * sta == NULL checks are redundant in this 1479 * sta == NULL checks are redundant in this
1493 * special case. 1480 * special case.
1494 */ 1481 */
1495 run = carl9170_tx_ampdu_queue(ar, sta, skb); 1482 run = carl9170_tx_ampdu_queue(ar, sta, skb, info);
1496 if (run) 1483 if (run)
1497 carl9170_tx_ampdu(ar); 1484 carl9170_tx_ampdu(ar);
1498 1485
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index 9396dc9fe3c5..d288eea0a26a 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -9,5 +9,7 @@ wil6210-objs += wmi.o
9wil6210-objs += interrupt.o 9wil6210-objs += interrupt.o
10wil6210-objs += txrx.o 10wil6210-objs += txrx.o
11 11
12subdir-ccflags-y += -Werror 12ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
13 subdir-ccflags-y += -Werror
14endif
13subdir-ccflags-y += -D__CHECK_ENDIAN__ 15subdir-ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 9ecc1968262c..c5d4a87abaaf 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -14,16 +14,6 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include <linux/kernel.h>
18#include <linux/netdevice.h>
19#include <linux/sched.h>
20#include <linux/etherdevice.h>
21#include <linux/wireless.h>
22#include <linux/ieee80211.h>
23#include <linux/slab.h>
24#include <linux/version.h>
25#include <net/cfg80211.h>
26
27#include "wil6210.h" 17#include "wil6210.h"
28#include "wmi.h" 18#include "wmi.h"
29 19
@@ -292,7 +282,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
292 282
293 /* WMI_CONNECT_CMD */ 283 /* WMI_CONNECT_CMD */
294 memset(&conn, 0, sizeof(conn)); 284 memset(&conn, 0, sizeof(conn));
295 switch (bss->capability & 0x03) { 285 switch (bss->capability & WLAN_CAPABILITY_DMG_TYPE_MASK) {
296 case WLAN_CAPABILITY_DMG_TYPE_AP: 286 case WLAN_CAPABILITY_DMG_TYPE_AP:
297 conn.network_type = WMI_NETTYPE_INFRA; 287 conn.network_type = WMI_NETTYPE_INFRA;
298 break; 288 break;
@@ -437,17 +427,18 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
437 if (rc) 427 if (rc)
438 return rc; 428 return rc;
439 429
440 rc = wmi_set_channel(wil, channel->hw_value);
441 if (rc)
442 return rc;
443
444 /* MAC address - pre-requisite for other commands */ 430 /* MAC address - pre-requisite for other commands */
445 wmi_set_mac_address(wil, ndev->dev_addr); 431 wmi_set_mac_address(wil, ndev->dev_addr);
446 432
447 /* IE's */ 433 /* IE's */
448 /* bcon 'head IE's are not relevant for 60g band */ 434 /* bcon 'head IE's are not relevant for 60g band */
449 wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len, 435 /*
450 bcon->beacon_ies); 436 * FW do not form regular beacon, so bcon IE's are not set
437 * For the DMG bcon, when it will be supported, bcon IE's will
438 * be reused; add something like:
439 * wmi_set_ie(wil, WMI_FRAME_BEACON, bcon->beacon_ies_len,
440 * bcon->beacon_ies);
441 */
451 wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len, 442 wmi_set_ie(wil, WMI_FRAME_PROBE_RESP, bcon->proberesp_ies_len,
452 bcon->proberesp_ies); 443 bcon->proberesp_ies);
453 wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len, 444 wmi_set_ie(wil, WMI_FRAME_ASSOC_RESP, bcon->assocresp_ies_len,
@@ -455,7 +446,8 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
455 446
456 wil->secure_pcp = info->privacy; 447 wil->secure_pcp = info->privacy;
457 448
458 rc = wmi_set_bcon(wil, info->beacon_interval, wmi_nettype); 449 rc = wmi_pcp_start(wil, info->beacon_interval, wmi_nettype,
450 channel->hw_value);
459 if (rc) 451 if (rc)
460 return rc; 452 return rc;
461 453
@@ -472,11 +464,8 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
472{ 464{
473 int rc = 0; 465 int rc = 0;
474 struct wil6210_priv *wil = wiphy_to_wil(wiphy); 466 struct wil6210_priv *wil = wiphy_to_wil(wiphy);
475 struct wireless_dev *wdev = ndev->ieee80211_ptr;
476 u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
477 467
478 /* To stop beaconing, set BI to 0 */ 468 rc = wmi_pcp_stop(wil);
479 rc = wmi_set_bcon(wil, 0, wmi_nettype);
480 469
481 return rc; 470 return rc;
482} 471}
diff --git a/drivers/net/wireless/ath/wil6210/dbg_hexdump.h b/drivers/net/wireless/ath/wil6210/dbg_hexdump.h
deleted file mode 100644
index e5712f026c47..000000000000
--- a/drivers/net/wireless/ath/wil6210/dbg_hexdump.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef WIL_DBG_HEXDUMP_H_
2#define WIL_DBG_HEXDUMP_H_
3
4#include <linux/printk.h>
5#include <linux/dynamic_debug.h>
6
7#if defined(CONFIG_DYNAMIC_DEBUG)
8#define wil_print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
9 groupsize, buf, len, ascii) \
10 dynamic_hex_dump(prefix_str, prefix_type, rowsize, \
11 groupsize, buf, len, ascii)
12
13#else /* defined(CONFIG_DYNAMIC_DEBUG) */
14#define wil_print_hex_dump_debug(prefix_str, prefix_type, rowsize, \
15 groupsize, buf, len, ascii) \
16 print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize, \
17 groupsize, buf, len, ascii)
18#endif /* defined(CONFIG_DYNAMIC_DEBUG) */
19
20#endif /* WIL_DBG_HEXDUMP_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 65fc9683bfd8..4be07f5e22b9 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -312,14 +312,6 @@ static const struct file_operations fops_memread = {
312 .llseek = seq_lseek, 312 .llseek = seq_lseek,
313}; 313};
314 314
315static int wil_default_open(struct inode *inode, struct file *file)
316{
317 if (inode->i_private)
318 file->private_data = inode->i_private;
319
320 return 0;
321}
322
323static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf, 315static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
324 size_t count, loff_t *ppos) 316 size_t count, loff_t *ppos)
325{ 317{
@@ -361,7 +353,7 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
361 353
362static const struct file_operations fops_ioblob = { 354static const struct file_operations fops_ioblob = {
363 .read = wil_read_file_ioblob, 355 .read = wil_read_file_ioblob,
364 .open = wil_default_open, 356 .open = simple_open,
365 .llseek = default_llseek, 357 .llseek = default_llseek,
366}; 358};
367 359
@@ -396,7 +388,7 @@ static ssize_t wil_write_file_reset(struct file *file, const char __user *buf,
396 388
397static const struct file_operations fops_reset = { 389static const struct file_operations fops_reset = {
398 .write = wil_write_file_reset, 390 .write = wil_write_file_reset,
399 .open = wil_default_open, 391 .open = simple_open,
400}; 392};
401/*---------Tx descriptor------------*/ 393/*---------Tx descriptor------------*/
402 394
@@ -526,7 +518,50 @@ static ssize_t wil_write_file_ssid(struct file *file, const char __user *buf,
526static const struct file_operations fops_ssid = { 518static const struct file_operations fops_ssid = {
527 .read = wil_read_file_ssid, 519 .read = wil_read_file_ssid,
528 .write = wil_write_file_ssid, 520 .write = wil_write_file_ssid,
529 .open = wil_default_open, 521 .open = simple_open,
522};
523
524/*---------temp------------*/
525static void print_temp(struct seq_file *s, const char *prefix, u32 t)
526{
527 switch (t) {
528 case 0:
529 case ~(u32)0:
530 seq_printf(s, "%s N/A\n", prefix);
531 break;
532 default:
533 seq_printf(s, "%s %d.%03d\n", prefix, t / 1000, t % 1000);
534 break;
535 }
536}
537
538static int wil_temp_debugfs_show(struct seq_file *s, void *data)
539{
540 struct wil6210_priv *wil = s->private;
541 u32 t_m, t_r;
542
543 int rc = wmi_get_temperature(wil, &t_m, &t_r);
544 if (rc) {
545 seq_printf(s, "Failed\n");
546 return 0;
547 }
548
549 print_temp(s, "MAC temperature :", t_m);
550 print_temp(s, "Radio temperature :", t_r);
551
552 return 0;
553}
554
555static int wil_temp_seq_open(struct inode *inode, struct file *file)
556{
557 return single_open(file, wil_temp_debugfs_show, inode->i_private);
558}
559
560static const struct file_operations fops_temp = {
561 .open = wil_temp_seq_open,
562 .release = single_release,
563 .read = seq_read,
564 .llseek = seq_lseek,
530}; 565};
531 566
532/*----------------*/ 567/*----------------*/
@@ -563,6 +598,7 @@ int wil6210_debugfs_init(struct wil6210_priv *wil)
563 debugfs_create_file("mem_val", S_IRUGO, dbg, wil, &fops_memread); 598 debugfs_create_file("mem_val", S_IRUGO, dbg, wil, &fops_memread);
564 599
565 debugfs_create_file("reset", S_IWUSR, dbg, wil, &fops_reset); 600 debugfs_create_file("reset", S_IWUSR, dbg, wil, &fops_reset);
601 debugfs_create_file("temp", S_IRUGO, dbg, wil, &fops_temp);
566 602
567 wil->rgf_blob.data = (void * __force)wil->csr + 0; 603 wil->rgf_blob.data = (void * __force)wil->csr + 0;
568 wil->rgf_blob.size = 0xa000; 604 wil->rgf_blob.size = 0xa000;
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index dc97e7b2609c..e3c1e7684f9c 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -240,6 +240,15 @@ static void wil_notify_fw_error(struct wil6210_priv *wil)
240 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp); 240 kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
241} 241}
242 242
243static void wil_cache_mbox_regs(struct wil6210_priv *wil)
244{
245 /* make shadow copy of registers that should not change on run time */
246 wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX,
247 sizeof(struct wil6210_mbox_ctl));
248 wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx);
249 wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx);
250}
251
243static irqreturn_t wil6210_irq_misc(int irq, void *cookie) 252static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
244{ 253{
245 struct wil6210_priv *wil = cookie; 254 struct wil6210_priv *wil = cookie;
@@ -257,14 +266,19 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
257 wil6210_mask_irq_misc(wil); 266 wil6210_mask_irq_misc(wil);
258 267
259 if (isr & ISR_MISC_FW_ERROR) { 268 if (isr & ISR_MISC_FW_ERROR) {
260 wil_dbg_irq(wil, "IRQ: Firmware error\n"); 269 wil_err(wil, "Firmware error detected\n");
261 clear_bit(wil_status_fwready, &wil->status); 270 clear_bit(wil_status_fwready, &wil->status);
262 wil_notify_fw_error(wil); 271 /*
263 isr &= ~ISR_MISC_FW_ERROR; 272 * do not clear @isr here - we do 2-nd part in thread
273 * there, user space get notified, and it should be done
274 * in non-atomic context
275 */
264 } 276 }
265 277
266 if (isr & ISR_MISC_FW_READY) { 278 if (isr & ISR_MISC_FW_READY) {
267 wil_dbg_irq(wil, "IRQ: FW ready\n"); 279 wil_dbg_irq(wil, "IRQ: FW ready\n");
280 wil_cache_mbox_regs(wil);
281 set_bit(wil_status_reset_done, &wil->status);
268 /** 282 /**
269 * Actual FW ready indicated by the 283 * Actual FW ready indicated by the
270 * WMI_FW_READY_EVENTID 284 * WMI_FW_READY_EVENTID
@@ -289,6 +303,11 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
289 303
290 wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr); 304 wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr);
291 305
306 if (isr & ISR_MISC_FW_ERROR) {
307 wil_notify_fw_error(wil);
308 isr &= ~ISR_MISC_FW_ERROR;
309 }
310
292 if (isr & ISR_MISC_MBOX_EVT) { 311 if (isr & ISR_MISC_MBOX_EVT) {
293 wil_dbg_irq(wil, "MBOX event\n"); 312 wil_dbg_irq(wil, "MBOX event\n");
294 wmi_recv_cmd(wil); 313 wmi_recv_cmd(wil);
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 761c389586d4..a0478e2f6868 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -14,12 +14,6 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include <linux/kernel.h>
18#include <linux/netdevice.h>
19#include <linux/sched.h>
20#include <linux/ieee80211.h>
21#include <linux/wireless.h>
22#include <linux/slab.h>
23#include <linux/moduleparam.h> 17#include <linux/moduleparam.h>
24#include <linux/if_arp.h> 18#include <linux/if_arp.h>
25 19
@@ -109,13 +103,24 @@ static void wil_connect_timer_fn(ulong x)
109 schedule_work(&wil->disconnect_worker); 103 schedule_work(&wil->disconnect_worker);
110} 104}
111 105
112static void wil_cache_mbox_regs(struct wil6210_priv *wil) 106static void wil_connect_worker(struct work_struct *work)
113{ 107{
114 /* make shadow copy of registers that should not change on run time */ 108 int rc;
115 wil_memcpy_fromio_32(&wil->mbox_ctl, wil->csr + HOST_MBOX, 109 struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
116 sizeof(struct wil6210_mbox_ctl)); 110 connect_worker);
117 wil_mbox_ring_le2cpus(&wil->mbox_ctl.rx); 111 int cid = wil->pending_connect_cid;
118 wil_mbox_ring_le2cpus(&wil->mbox_ctl.tx); 112
113 if (cid < 0) {
114 wil_err(wil, "No connection pending\n");
115 return;
116 }
117
118 wil_dbg_wmi(wil, "Configure for connection CID %d\n", cid);
119
120 rc = wil_vring_init_tx(wil, 0, WIL6210_TX_RING_SIZE, cid, 0);
121 wil->pending_connect_cid = -1;
122 if (rc == 0)
123 wil_link_on(wil);
119} 124}
120 125
121int wil_priv_init(struct wil6210_priv *wil) 126int wil_priv_init(struct wil6210_priv *wil)
@@ -130,7 +135,7 @@ int wil_priv_init(struct wil6210_priv *wil)
130 wil->pending_connect_cid = -1; 135 wil->pending_connect_cid = -1;
131 setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil); 136 setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
132 137
133 INIT_WORK(&wil->wmi_connect_worker, wmi_connect_worker); 138 INIT_WORK(&wil->connect_worker, wil_connect_worker);
134 INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker); 139 INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
135 INIT_WORK(&wil->wmi_event_worker, wmi_event_worker); 140 INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
136 141
@@ -147,8 +152,6 @@ int wil_priv_init(struct wil6210_priv *wil)
147 return -EAGAIN; 152 return -EAGAIN;
148 } 153 }
149 154
150 wil_cache_mbox_regs(wil);
151
152 return 0; 155 return 0;
153} 156}
154 157
@@ -185,15 +188,11 @@ static void wil_target_reset(struct wil6210_priv *wil)
185 W(RGF_USER_MAC_CPU_0, BIT(1)); /* mac_cpu_man_rst */ 188 W(RGF_USER_MAC_CPU_0, BIT(1)); /* mac_cpu_man_rst */
186 W(RGF_USER_USER_CPU_0, BIT(1)); /* user_cpu_man_rst */ 189 W(RGF_USER_USER_CPU_0, BIT(1)); /* user_cpu_man_rst */
187 190
188 msleep(100);
189
190 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000); 191 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0xFE000000);
191 W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F); 192 W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0x0000003F);
192 W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000170); 193 W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0x00000170);
193 W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FC00); 194 W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0xFFE7FC00);
194 195
195 msleep(100);
196
197 W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0); 196 W(RGF_USER_CLKS_CTL_SW_RST_VEC_3, 0);
198 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0); 197 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0);
199 W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0); 198 W(RGF_USER_CLKS_CTL_SW_RST_VEC_1, 0);
@@ -203,12 +202,6 @@ static void wil_target_reset(struct wil6210_priv *wil)
203 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080); 202 W(RGF_USER_CLKS_CTL_SW_RST_VEC_2, 0x00000080);
204 W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0); 203 W(RGF_USER_CLKS_CTL_SW_RST_VEC_0, 0);
205 204
206 msleep(2000);
207
208 W(RGF_USER_USER_CPU_0, BIT(0)); /* user_cpu_man_de_rst */
209
210 msleep(2000);
211
212 wil_dbg_misc(wil, "Reset completed\n"); 205 wil_dbg_misc(wil, "Reset completed\n");
213 206
214#undef W 207#undef W
@@ -265,8 +258,6 @@ int wil_reset(struct wil6210_priv *wil)
265 wil->pending_connect_cid = -1; 258 wil->pending_connect_cid = -1;
266 INIT_COMPLETION(wil->wmi_ready); 259 INIT_COMPLETION(wil->wmi_ready);
267 260
268 wil_cache_mbox_regs(wil);
269
270 /* TODO: release MAC reset */ 261 /* TODO: release MAC reset */
271 wil6210_enable_irq(wil); 262 wil6210_enable_irq(wil);
272 263
@@ -352,9 +343,9 @@ static int __wil_up(struct wil6210_priv *wil)
352 wil_err(wil, "SSID not set\n"); 343 wil_err(wil, "SSID not set\n");
353 return -EINVAL; 344 return -EINVAL;
354 } 345 }
355 wmi_set_ssid(wil, wdev->ssid_len, wdev->ssid); 346 rc = wmi_set_ssid(wil, wdev->ssid_len, wdev->ssid);
356 if (channel) 347 if (rc)
357 wmi_set_channel(wil, channel->hw_value); 348 return rc;
358 break; 349 break;
359 default: 350 default:
360 break; 351 break;
@@ -364,9 +355,12 @@ static int __wil_up(struct wil6210_priv *wil)
364 wmi_set_mac_address(wil, ndev->dev_addr); 355 wmi_set_mac_address(wil, ndev->dev_addr);
365 356
366 /* Set up beaconing if required. */ 357 /* Set up beaconing if required. */
367 rc = wmi_set_bcon(wil, bi, wmi_nettype); 358 if (bi > 0) {
368 if (rc) 359 rc = wmi_pcp_start(wil, bi, wmi_nettype,
369 return rc; 360 (channel ? channel->hw_value : 0));
361 if (rc)
362 return rc;
363 }
370 364
371 /* Rx VRING. After MAC and beacon */ 365 /* Rx VRING. After MAC and beacon */
372 wil_rx_init(wil); 366 wil_rx_init(wil);
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 8ce2e33dce20..098a8ec6b841 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -14,10 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include <linux/module.h>
18#include <linux/netdevice.h>
19#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
20#include <linux/slab.h>
21 18
22#include "wil6210.h" 19#include "wil6210.h"
23 20
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index 81c35c6e3832..eb1dc7ad80fb 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -14,10 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include <linux/kernel.h>
18#include <linux/module.h> 17#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/netdevice.h>
21#include <linux/debugfs.h> 18#include <linux/debugfs.h>
22#include <linux/pci.h> 19#include <linux/pci.h>
23#include <linux/moduleparam.h> 20#include <linux/moduleparam.h>
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index d1315b442375..79d4e3271b00 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -14,10 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include <linux/kernel.h>
18#include <linux/netdevice.h>
19#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
20#include <linux/hardirq.h>
21#include <net/ieee80211_radiotap.h> 18#include <net/ieee80211_radiotap.h>
22#include <linux/if_arp.h> 19#include <linux/if_arp.h>
23#include <linux/moduleparam.h> 20#include <linux/moduleparam.h>
@@ -83,8 +80,6 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
83 */ 80 */
84 vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL); 81 vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
85 if (!vring->va) { 82 if (!vring->va) {
86 wil_err(wil, "vring_alloc [%d] failed to alloc DMA mem\n",
87 vring->size);
88 kfree(vring->ctx); 83 kfree(vring->ctx);
89 vring->ctx = NULL; 84 vring->ctx = NULL;
90 return -ENOMEM; 85 return -ENOMEM;
@@ -560,7 +555,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
560 if (rc) 555 if (rc)
561 goto out_free; 556 goto out_free;
562 557
563 if (reply.cmd.status != WMI_VRING_CFG_SUCCESS) { 558 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) {
564 wil_err(wil, "Tx config failed, status 0x%02x\n", 559 wil_err(wil, "Tx config failed, status 0x%02x\n",
565 reply.cmd.status); 560 reply.cmd.status);
566 rc = -EINVAL; 561 rc = -EINVAL;
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index aea961ff8f08..8f76ecd8a7e5 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -21,8 +21,6 @@
21#include <linux/wireless.h> 21#include <linux/wireless.h>
22#include <net/cfg80211.h> 22#include <net/cfg80211.h>
23 23
24#include "dbg_hexdump.h"
25
26#define WIL_NAME "wil6210" 24#define WIL_NAME "wil6210"
27 25
28/** 26/**
@@ -188,6 +186,7 @@ enum { /* for wil6210_priv.status */
188 wil_status_fwready = 0, 186 wil_status_fwready = 0,
189 wil_status_fwconnected, 187 wil_status_fwconnected,
190 wil_status_dontscan, 188 wil_status_dontscan,
189 wil_status_reset_done,
191 wil_status_irqen, /* FIXME: interrupts enabled - for debug */ 190 wil_status_irqen, /* FIXME: interrupts enabled - for debug */
192}; 191};
193 192
@@ -210,6 +209,8 @@ struct wil6210_priv {
210 struct wireless_dev *wdev; 209 struct wireless_dev *wdev;
211 void __iomem *csr; 210 void __iomem *csr;
212 ulong status; 211 ulong status;
212 u32 fw_version;
213 u8 n_mids; /* number of additional MIDs as reported by FW */
213 /* profile */ 214 /* profile */
214 u32 monitor_flags; 215 u32 monitor_flags;
215 u32 secure_pcp; /* create secure PCP? */ 216 u32 secure_pcp; /* create secure PCP? */
@@ -227,7 +228,7 @@ struct wil6210_priv {
227 struct workqueue_struct *wmi_wq; /* for deferred calls */ 228 struct workqueue_struct *wmi_wq; /* for deferred calls */
228 struct work_struct wmi_event_worker; 229 struct work_struct wmi_event_worker;
229 struct workqueue_struct *wmi_wq_conn; /* for connect worker */ 230 struct workqueue_struct *wmi_wq_conn; /* for connect worker */
230 struct work_struct wmi_connect_worker; 231 struct work_struct connect_worker;
231 struct work_struct disconnect_worker; 232 struct work_struct disconnect_worker;
232 struct timer_list connect_timer; 233 struct timer_list connect_timer;
233 int pending_connect_cid; 234 int pending_connect_cid;
@@ -277,13 +278,13 @@ struct wil6210_priv {
277 278
278#define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize, \ 279#define wil_hex_dump_txrx(prefix_str, prefix_type, rowsize, \
279 groupsize, buf, len, ascii) \ 280 groupsize, buf, len, ascii) \
280 wil_print_hex_dump_debug("DBG[TXRX]" prefix_str,\ 281 print_hex_dump_debug("DBG[TXRX]" prefix_str,\
281 prefix_type, rowsize, \ 282 prefix_type, rowsize, \
282 groupsize, buf, len, ascii) 283 groupsize, buf, len, ascii)
283 284
284#define wil_hex_dump_wmi(prefix_str, prefix_type, rowsize, \ 285#define wil_hex_dump_wmi(prefix_str, prefix_type, rowsize, \
285 groupsize, buf, len, ascii) \ 286 groupsize, buf, len, ascii) \
286 wil_print_hex_dump_debug("DBG[ WMI]" prefix_str,\ 287 print_hex_dump_debug("DBG[ WMI]" prefix_str,\
287 prefix_type, rowsize, \ 288 prefix_type, rowsize, \
288 groupsize, buf, len, ascii) 289 groupsize, buf, len, ascii)
289 290
@@ -313,7 +314,6 @@ int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len);
313void wmi_recv_cmd(struct wil6210_priv *wil); 314void wmi_recv_cmd(struct wil6210_priv *wil);
314int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, 315int wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len,
315 u16 reply_id, void *reply, u8 reply_size, int to_msec); 316 u16 reply_id, void *reply, u8 reply_size, int to_msec);
316void wmi_connect_worker(struct work_struct *work);
317void wmi_event_worker(struct work_struct *work); 317void wmi_event_worker(struct work_struct *work);
318void wmi_event_flush(struct wil6210_priv *wil); 318void wmi_event_flush(struct wil6210_priv *wil);
319int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid); 319int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid);
@@ -328,6 +328,8 @@ int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
328int wmi_echo(struct wil6210_priv *wil); 328int wmi_echo(struct wil6210_priv *wil);
329int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie); 329int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
330int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring); 330int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
331int wmi_p2p_cfg(struct wil6210_priv *wil, int channel);
332int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
331 333
332int wil6210_init_irq(struct wil6210_priv *wil, int irq); 334int wil6210_init_irq(struct wil6210_priv *wil, int irq);
333void wil6210_fini_irq(struct wil6210_priv *wil, int irq); 335void wil6210_fini_irq(struct wil6210_priv *wil, int irq);
@@ -341,7 +343,8 @@ struct wireless_dev *wil_cfg80211_init(struct device *dev);
341void wil_wdev_free(struct wil6210_priv *wil); 343void wil_wdev_free(struct wil6210_priv *wil);
342 344
343int wmi_set_mac_address(struct wil6210_priv *wil, void *addr); 345int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
344int wmi_set_bcon(struct wil6210_priv *wil, int bi, u8 wmi_nettype); 346int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan);
347int wmi_pcp_stop(struct wil6210_priv *wil);
345void wil6210_disconnect(struct wil6210_priv *wil, void *bssid); 348void wil6210_disconnect(struct wil6210_priv *wil, void *bssid);
346 349
347int wil_rx_init(struct wil6210_priv *wil); 350int wil_rx_init(struct wil6210_priv *wil);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 0bb3b76b4b58..45b04e383f9a 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -14,9 +14,6 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include <linux/pci.h>
18#include <linux/io.h>
19#include <linux/list.h>
20#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
21#include <linux/if_arp.h> 18#include <linux/if_arp.h>
22 19
@@ -272,16 +269,18 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
272 struct net_device *ndev = wil_to_ndev(wil); 269 struct net_device *ndev = wil_to_ndev(wil);
273 struct wireless_dev *wdev = wil->wdev; 270 struct wireless_dev *wdev = wil->wdev;
274 struct wmi_ready_event *evt = d; 271 struct wmi_ready_event *evt = d;
275 u32 ver = le32_to_cpu(evt->sw_version); 272 wil->fw_version = le32_to_cpu(evt->sw_version);
273 wil->n_mids = evt->numof_additional_mids;
276 274
277 wil_dbg_wmi(wil, "FW ver. %d; MAC %pM\n", ver, evt->mac); 275 wil_dbg_wmi(wil, "FW ver. %d; MAC %pM; %d MID's\n", wil->fw_version,
276 evt->mac, wil->n_mids);
278 277
279 if (!is_valid_ether_addr(ndev->dev_addr)) { 278 if (!is_valid_ether_addr(ndev->dev_addr)) {
280 memcpy(ndev->dev_addr, evt->mac, ETH_ALEN); 279 memcpy(ndev->dev_addr, evt->mac, ETH_ALEN);
281 memcpy(ndev->perm_addr, evt->mac, ETH_ALEN); 280 memcpy(ndev->perm_addr, evt->mac, ETH_ALEN);
282 } 281 }
283 snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version), 282 snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version),
284 "%d", ver); 283 "%d", wil->fw_version);
285} 284}
286 285
287static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d, 286static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
@@ -324,17 +323,9 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
324 323
325 if (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)) { 324 if (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)) {
326 struct cfg80211_bss *bss; 325 struct cfg80211_bss *bss;
327 u64 tsf = le64_to_cpu(rx_mgmt_frame->u.beacon.timestamp); 326
328 u16 cap = le16_to_cpu(rx_mgmt_frame->u.beacon.capab_info); 327 bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
329 u16 bi = le16_to_cpu(rx_mgmt_frame->u.beacon.beacon_int); 328 d_len, signal, GFP_KERNEL);
330 const u8 *ie_buf = rx_mgmt_frame->u.beacon.variable;
331 size_t ie_len = d_len - offsetof(struct ieee80211_mgmt,
332 u.beacon.variable);
333 wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap);
334
335 bss = cfg80211_inform_bss(wiphy, channel, rx_mgmt_frame->bssid,
336 tsf, cap, bi, ie_buf, ie_len,
337 signal, GFP_KERNEL);
338 if (bss) { 329 if (bss) {
339 wil_dbg_wmi(wil, "Added BSS %pM\n", 330 wil_dbg_wmi(wil, "Added BSS %pM\n",
340 rx_mgmt_frame->bssid); 331 rx_mgmt_frame->bssid);
@@ -342,6 +333,9 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
342 } else { 333 } else {
343 wil_err(wil, "cfg80211_inform_bss() failed\n"); 334 wil_err(wil, "cfg80211_inform_bss() failed\n");
344 } 335 }
336 } else {
337 cfg80211_rx_mgmt(wil->wdev, freq, signal,
338 (void *)rx_mgmt_frame, d_len, GFP_KERNEL);
345 } 339 }
346} 340}
347 341
@@ -443,7 +437,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
443 memcpy(wil->dst_addr[0], evt->bssid, ETH_ALEN); 437 memcpy(wil->dst_addr[0], evt->bssid, ETH_ALEN);
444 438
445 wil->pending_connect_cid = evt->cid; 439 wil->pending_connect_cid = evt->cid;
446 queue_work(wil->wmi_wq_conn, &wil->wmi_connect_worker); 440 queue_work(wil->wmi_wq_conn, &wil->connect_worker);
447} 441}
448 442
449static void wmi_evt_disconnect(struct wil6210_priv *wil, int id, 443static void wmi_evt_disconnect(struct wil6210_priv *wil, int id,
@@ -528,6 +522,37 @@ static void wmi_evt_eapol_rx(struct wil6210_priv *wil, int id,
528 } 522 }
529} 523}
530 524
525static void wmi_evt_linkup(struct wil6210_priv *wil, int id, void *d, int len)
526{
527 struct net_device *ndev = wil_to_ndev(wil);
528 struct wmi_data_port_open_event *evt = d;
529
530 wil_dbg_wmi(wil, "Link UP for CID %d\n", evt->cid);
531
532 netif_carrier_on(ndev);
533}
534
535static void wmi_evt_linkdown(struct wil6210_priv *wil, int id, void *d, int len)
536{
537 struct net_device *ndev = wil_to_ndev(wil);
538 struct wmi_wbe_link_down_event *evt = d;
539
540 wil_dbg_wmi(wil, "Link DOWN for CID %d, reason %d\n",
541 evt->cid, le32_to_cpu(evt->reason));
542
543 netif_carrier_off(ndev);
544}
545
546static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d,
547 int len)
548{
549 struct wmi_vring_ba_status_event *evt = d;
550
551 wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d\n",
552 evt->ringid, evt->status ? "N/A" : "OK", evt->agg_wsize,
553 __le16_to_cpu(evt->ba_timeout));
554}
555
531static const struct { 556static const struct {
532 int eventid; 557 int eventid;
533 void (*handler)(struct wil6210_priv *wil, int eventid, 558 void (*handler)(struct wil6210_priv *wil, int eventid,
@@ -541,6 +566,9 @@ static const struct {
541 {WMI_DISCONNECT_EVENTID, wmi_evt_disconnect}, 566 {WMI_DISCONNECT_EVENTID, wmi_evt_disconnect},
542 {WMI_NOTIFY_REQ_DONE_EVENTID, wmi_evt_notify}, 567 {WMI_NOTIFY_REQ_DONE_EVENTID, wmi_evt_notify},
543 {WMI_EAPOL_RX_EVENTID, wmi_evt_eapol_rx}, 568 {WMI_EAPOL_RX_EVENTID, wmi_evt_eapol_rx},
569 {WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_linkup},
570 {WMI_WBE_LINKDOWN_EVENTID, wmi_evt_linkdown},
571 {WMI_BA_STATUS_EVENTID, wmi_evt_ba_status},
544}; 572};
545 573
546/* 574/*
@@ -559,6 +587,11 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
559 void __iomem *src; 587 void __iomem *src;
560 ulong flags; 588 ulong flags;
561 589
590 if (!test_bit(wil_status_reset_done, &wil->status)) {
591 wil_err(wil, "Reset not completed\n");
592 return;
593 }
594
562 for (;;) { 595 for (;;) {
563 u16 len; 596 u16 len;
564 597
@@ -683,18 +716,39 @@ int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
683 return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd)); 716 return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd));
684} 717}
685 718
686int wmi_set_bcon(struct wil6210_priv *wil, int bi, u8 wmi_nettype) 719int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype, u8 chan)
687{ 720{
688 struct wmi_bcon_ctrl_cmd cmd = { 721 int rc;
722
723 struct wmi_pcp_start_cmd cmd = {
689 .bcon_interval = cpu_to_le16(bi), 724 .bcon_interval = cpu_to_le16(bi),
690 .network_type = wmi_nettype, 725 .network_type = wmi_nettype,
691 .disable_sec_offload = 1, 726 .disable_sec_offload = 1,
727 .channel = chan,
692 }; 728 };
729 struct {
730 struct wil6210_mbox_hdr_wmi wmi;
731 struct wmi_pcp_started_event evt;
732 } __packed reply;
693 733
694 if (!wil->secure_pcp) 734 if (!wil->secure_pcp)
695 cmd.disable_sec = 1; 735 cmd.disable_sec = 1;
696 736
697 return wmi_send(wil, WMI_BCON_CTRL_CMDID, &cmd, sizeof(cmd)); 737 rc = wmi_call(wil, WMI_PCP_START_CMDID, &cmd, sizeof(cmd),
738 WMI_PCP_STARTED_EVENTID, &reply, sizeof(reply), 100);
739 if (rc)
740 return rc;
741
742 if (reply.evt.status != WMI_FW_STATUS_SUCCESS)
743 rc = -EINVAL;
744
745 return rc;
746}
747
748int wmi_pcp_stop(struct wil6210_priv *wil)
749{
750 return wmi_call(wil, WMI_PCP_STOP_CMDID, NULL, 0,
751 WMI_PCP_STOPPED_EVENTID, NULL, 0, 20);
698} 752}
699 753
700int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid) 754int wmi_set_ssid(struct wil6210_priv *wil, u8 ssid_len, const void *ssid)
@@ -765,6 +819,16 @@ int wmi_get_channel(struct wil6210_priv *wil, int *channel)
765 return 0; 819 return 0;
766} 820}
767 821
822int wmi_p2p_cfg(struct wil6210_priv *wil, int channel)
823{
824 struct wmi_p2p_cfg_cmd cmd = {
825 .discovery_mode = WMI_DISCOVERY_MODE_NON_OFFLOAD,
826 .channel = channel - 1,
827 };
828
829 return wmi_send(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd));
830}
831
768int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb) 832int wmi_tx_eapol(struct wil6210_priv *wil, struct sk_buff *skb)
769{ 833{
770 struct wmi_eapol_tx_cmd *cmd; 834 struct wmi_eapol_tx_cmd *cmd;
@@ -843,7 +907,7 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
843 /* BUG: FW API define ieLen as u8. Will fix FW */ 907 /* BUG: FW API define ieLen as u8. Will fix FW */
844 cmd->ie_len = cpu_to_le16(ie_len); 908 cmd->ie_len = cpu_to_le16(ie_len);
845 memcpy(cmd->ie_info, ie, ie_len); 909 memcpy(cmd->ie_info, ie, ie_len);
846 rc = wmi_send(wil, WMI_SET_APPIE_CMDID, &cmd, len); 910 rc = wmi_send(wil, WMI_SET_APPIE_CMDID, cmd, len);
847 kfree(cmd); 911 kfree(cmd);
848 912
849 return rc; 913 return rc;
@@ -898,6 +962,31 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
898 return rc; 962 return rc;
899} 963}
900 964
965int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r)
966{
967 int rc;
968 struct wmi_temp_sense_cmd cmd = {
969 .measure_marlon_m_en = cpu_to_le32(!!t_m),
970 .measure_marlon_r_en = cpu_to_le32(!!t_r),
971 };
972 struct {
973 struct wil6210_mbox_hdr_wmi wmi;
974 struct wmi_temp_sense_done_event evt;
975 } __packed reply;
976
977 rc = wmi_call(wil, WMI_TEMP_SENSE_CMDID, &cmd, sizeof(cmd),
978 WMI_TEMP_SENSE_DONE_EVENTID, &reply, sizeof(reply), 100);
979 if (rc)
980 return rc;
981
982 if (t_m)
983 *t_m = le32_to_cpu(reply.evt.marlon_m_t1000);
984 if (t_r)
985 *t_r = le32_to_cpu(reply.evt.marlon_r_t1000);
986
987 return 0;
988}
989
901void wmi_event_flush(struct wil6210_priv *wil) 990void wmi_event_flush(struct wil6210_priv *wil)
902{ 991{
903 struct pending_wmi_event *evt, *t; 992 struct pending_wmi_event *evt, *t;
@@ -997,24 +1086,3 @@ void wmi_event_worker(struct work_struct *work)
997 kfree(evt); 1086 kfree(evt);
998 } 1087 }
999} 1088}
1000
1001void wmi_connect_worker(struct work_struct *work)
1002{
1003 int rc;
1004 struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
1005 wmi_connect_worker);
1006
1007 if (wil->pending_connect_cid < 0) {
1008 wil_err(wil, "No connection pending\n");
1009 return;
1010 }
1011
1012 wil_dbg_wmi(wil, "Configure for connection CID %d\n",
1013 wil->pending_connect_cid);
1014
1015 rc = wil_vring_init_tx(wil, 0, WIL6210_TX_RING_SIZE,
1016 wil->pending_connect_cid, 0);
1017 wil->pending_connect_cid = -1;
1018 if (rc == 0)
1019 wil_link_on(wil);
1020}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 3bbf87572b07..50b8528394f4 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -36,6 +36,7 @@
36enum wmi_command_id { 36enum wmi_command_id {
37 WMI_CONNECT_CMDID = 0x0001, 37 WMI_CONNECT_CMDID = 0x0001,
38 WMI_DISCONNECT_CMDID = 0x0003, 38 WMI_DISCONNECT_CMDID = 0x0003,
39 WMI_DISCONNECT_STA_CMDID = 0x0004,
39 WMI_START_SCAN_CMDID = 0x0007, 40 WMI_START_SCAN_CMDID = 0x0007,
40 WMI_SET_BSS_FILTER_CMDID = 0x0009, 41 WMI_SET_BSS_FILTER_CMDID = 0x0009,
41 WMI_SET_PROBED_SSID_CMDID = 0x000a, 42 WMI_SET_PROBED_SSID_CMDID = 0x000a,
@@ -44,7 +45,6 @@ enum wmi_command_id {
44 WMI_ADD_CIPHER_KEY_CMDID = 0x0016, 45 WMI_ADD_CIPHER_KEY_CMDID = 0x0016,
45 WMI_DELETE_CIPHER_KEY_CMDID = 0x0017, 46 WMI_DELETE_CIPHER_KEY_CMDID = 0x0017,
46 WMI_SET_APPIE_CMDID = 0x003f, 47 WMI_SET_APPIE_CMDID = 0x003f,
47 WMI_GET_APPIE_CMDID = 0x0040,
48 WMI_SET_WSC_STATUS_CMDID = 0x0041, 48 WMI_SET_WSC_STATUS_CMDID = 0x0041,
49 WMI_PXMT_RANGE_CFG_CMDID = 0x0042, 49 WMI_PXMT_RANGE_CFG_CMDID = 0x0042,
50 WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x0043, 50 WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x0043,
@@ -55,11 +55,11 @@ enum wmi_command_id {
55 WMI_DEEP_ECHO_CMDID = 0x0804, 55 WMI_DEEP_ECHO_CMDID = 0x0804,
56 WMI_CONFIG_MAC_CMDID = 0x0805, 56 WMI_CONFIG_MAC_CMDID = 0x0805,
57 WMI_CONFIG_PHY_DEBUG_CMDID = 0x0806, 57 WMI_CONFIG_PHY_DEBUG_CMDID = 0x0806,
58 WMI_ADD_STATION_CMDID = 0x0807,
59 WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x0808, 58 WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x0808,
60 WMI_PHY_GET_STATISTICS_CMDID = 0x0809, 59 WMI_PHY_GET_STATISTICS_CMDID = 0x0809,
61 WMI_FS_TUNE_CMDID = 0x080a, 60 WMI_FS_TUNE_CMDID = 0x080a,
62 WMI_CORR_MEASURE_CMDID = 0x080b, 61 WMI_CORR_MEASURE_CMDID = 0x080b,
62 WMI_READ_RSSI_CMDID = 0x080c,
63 WMI_TEMP_SENSE_CMDID = 0x080e, 63 WMI_TEMP_SENSE_CMDID = 0x080e,
64 WMI_DC_CALIB_CMDID = 0x080f, 64 WMI_DC_CALIB_CMDID = 0x080f,
65 WMI_SEND_TONE_CMDID = 0x0810, 65 WMI_SEND_TONE_CMDID = 0x0810,
@@ -75,9 +75,9 @@ enum wmi_command_id {
75 MAC_IO_STATIC_PARAMS_CMDID = 0x081b, 75 MAC_IO_STATIC_PARAMS_CMDID = 0x081b,
76 MAC_IO_DYNAMIC_PARAMS_CMDID = 0x081c, 76 MAC_IO_DYNAMIC_PARAMS_CMDID = 0x081c,
77 WMI_SILENT_RSSI_CALIB_CMDID = 0x081d, 77 WMI_SILENT_RSSI_CALIB_CMDID = 0x081d,
78 WMI_RF_RX_TEST_CMDID = 0x081e,
78 WMI_CFG_RX_CHAIN_CMDID = 0x0820, 79 WMI_CFG_RX_CHAIN_CMDID = 0x0820,
79 WMI_VRING_CFG_CMDID = 0x0821, 80 WMI_VRING_CFG_CMDID = 0x0821,
80 WMI_RX_ON_CMDID = 0x0822,
81 WMI_VRING_BA_EN_CMDID = 0x0823, 81 WMI_VRING_BA_EN_CMDID = 0x0823,
82 WMI_VRING_BA_DIS_CMDID = 0x0824, 82 WMI_VRING_BA_DIS_CMDID = 0x0824,
83 WMI_RCP_ADDBA_RESP_CMDID = 0x0825, 83 WMI_RCP_ADDBA_RESP_CMDID = 0x0825,
@@ -87,7 +87,6 @@ enum wmi_command_id {
87 WMI_SET_PCP_CHANNEL_CMDID = 0x0829, 87 WMI_SET_PCP_CHANNEL_CMDID = 0x0829,
88 WMI_GET_PCP_CHANNEL_CMDID = 0x082a, 88 WMI_GET_PCP_CHANNEL_CMDID = 0x082a,
89 WMI_SW_TX_REQ_CMDID = 0x082b, 89 WMI_SW_TX_REQ_CMDID = 0x082b,
90 WMI_RX_OFF_CMDID = 0x082c,
91 WMI_READ_MAC_RXQ_CMDID = 0x0830, 90 WMI_READ_MAC_RXQ_CMDID = 0x0830,
92 WMI_READ_MAC_TXQ_CMDID = 0x0831, 91 WMI_READ_MAC_TXQ_CMDID = 0x0831,
93 WMI_WRITE_MAC_RXQ_CMDID = 0x0832, 92 WMI_WRITE_MAC_RXQ_CMDID = 0x0832,
@@ -112,6 +111,18 @@ enum wmi_command_id {
112 WMI_FLASH_READ_CMDID = 0x0902, 111 WMI_FLASH_READ_CMDID = 0x0902,
113 WMI_FLASH_WRITE_CMDID = 0x0903, 112 WMI_FLASH_WRITE_CMDID = 0x0903,
114 WMI_SECURITY_UNIT_TEST_CMDID = 0x0904, 113 WMI_SECURITY_UNIT_TEST_CMDID = 0x0904,
114 /*P2P*/
115 WMI_P2P_CFG_CMDID = 0x0910,
116 WMI_PORT_ALLOCATE_CMDID = 0x0911,
117 WMI_PORT_DELETE_CMDID = 0x0912,
118 WMI_POWER_MGMT_CFG_CMDID = 0x0913,
119 WMI_START_LISTEN_CMDID = 0x0914,
120 WMI_START_SEARCH_CMDID = 0x0915,
121 WMI_DISCOVERY_START_CMDID = 0x0916,
122 WMI_DISCOVERY_STOP_CMDID = 0x0917,
123 WMI_PCP_START_CMDID = 0x0918,
124 WMI_PCP_STOP_CMDID = 0x0919,
125 WMI_GET_PCP_FACTOR_CMDID = 0x091b,
115 126
116 WMI_SET_MAC_ADDRESS_CMDID = 0xf003, 127 WMI_SET_MAC_ADDRESS_CMDID = 0xf003,
117 WMI_ABORT_SCAN_CMDID = 0xf007, 128 WMI_ABORT_SCAN_CMDID = 0xf007,
@@ -132,18 +143,6 @@ enum wmi_command_id {
132 */ 143 */
133 144
134/* 145/*
135 * Frame Types
136 */
137enum wmi_mgmt_frame_type {
138 WMI_FRAME_BEACON = 0,
139 WMI_FRAME_PROBE_REQ = 1,
140 WMI_FRAME_PROBE_RESP = 2,
141 WMI_FRAME_ASSOC_REQ = 3,
142 WMI_FRAME_ASSOC_RESP = 4,
143 WMI_NUM_MGMT_FRAME,
144};
145
146/*
147 * WMI_CONNECT_CMDID 146 * WMI_CONNECT_CMDID
148 */ 147 */
149enum wmi_network_type { 148enum wmi_network_type {
@@ -184,7 +183,7 @@ enum wmi_crypto_type {
184enum wmi_connect_ctrl_flag_bits { 183enum wmi_connect_ctrl_flag_bits {
185 WMI_CONNECT_ASSOC_POLICY_USER = 0x0001, 184 WMI_CONNECT_ASSOC_POLICY_USER = 0x0001,
186 WMI_CONNECT_SEND_REASSOC = 0x0002, 185 WMI_CONNECT_SEND_REASSOC = 0x0002,
187 WMI_CONNECT_IGNORE_WPAx_GROUP_CIPHER = 0x0004, 186 WMI_CONNECT_IGNORE_WPA_GROUP_CIPHER = 0x0004,
188 WMI_CONNECT_PROFILE_MATCH_DONE = 0x0008, 187 WMI_CONNECT_PROFILE_MATCH_DONE = 0x0008,
189 WMI_CONNECT_IGNORE_AAC_BEACON = 0x0010, 188 WMI_CONNECT_IGNORE_AAC_BEACON = 0x0010,
190 WMI_CONNECT_CSA_FOLLOW_BSS = 0x0020, 189 WMI_CONNECT_CSA_FOLLOW_BSS = 0x0020,
@@ -212,6 +211,13 @@ struct wmi_connect_cmd {
212 u8 reserved1[2]; 211 u8 reserved1[2];
213} __packed; 212} __packed;
214 213
214/*
215 * WMI_DISCONNECT_STA_CMDID
216 */
217struct wmi_disconnect_sta_cmd {
218 u8 dst_mac[WMI_MAC_LEN];
219 __le16 disconnect_reason;
220} __packed;
215 221
216/* 222/*
217 * WMI_RECONNECT_CMDID 223 * WMI_RECONNECT_CMDID
@@ -289,10 +295,12 @@ struct wmi_delete_cipher_key_cmd {
289enum wmi_scan_type { 295enum wmi_scan_type {
290 WMI_LONG_SCAN = 0, 296 WMI_LONG_SCAN = 0,
291 WMI_SHORT_SCAN = 1, 297 WMI_SHORT_SCAN = 1,
298 WMI_PBC_SCAN = 2,
292}; 299};
293 300
294struct wmi_start_scan_cmd { 301struct wmi_start_scan_cmd {
295 u8 reserved[8]; 302 u8 reserved[8];
303
296 __le32 home_dwell_time; /* Max duration in the home channel(ms) */ 304 __le32 home_dwell_time; /* Max duration in the home channel(ms) */
297 __le32 force_scan_interval; /* Time interval between scans (ms)*/ 305 __le32 force_scan_interval; /* Time interval between scans (ms)*/
298 u8 scan_type; /* wmi_scan_type */ 306 u8 scan_type; /* wmi_scan_type */
@@ -309,7 +317,7 @@ struct wmi_start_scan_cmd {
309/* 317/*
310 * WMI_SET_PROBED_SSID_CMDID 318 * WMI_SET_PROBED_SSID_CMDID
311 */ 319 */
312#define MAX_PROBED_SSID_INDEX (15) 320#define MAX_PROBED_SSID_INDEX (3)
313 321
314enum wmi_ssid_flag { 322enum wmi_ssid_flag {
315 WMI_SSID_FLAG_DISABLE = 0, /* disables entry */ 323 WMI_SSID_FLAG_DISABLE = 0, /* disables entry */
@@ -328,6 +336,20 @@ struct wmi_probed_ssid_cmd {
328 * WMI_SET_APPIE_CMDID 336 * WMI_SET_APPIE_CMDID
329 * Add Application specified IE to a management frame 337 * Add Application specified IE to a management frame
330 */ 338 */
339#define WMI_MAX_IE_LEN (1024)
340
341/*
342 * Frame Types
343 */
344enum wmi_mgmt_frame_type {
345 WMI_FRAME_BEACON = 0,
346 WMI_FRAME_PROBE_REQ = 1,
347 WMI_FRAME_PROBE_RESP = 2,
348 WMI_FRAME_ASSOC_REQ = 3,
349 WMI_FRAME_ASSOC_RESP = 4,
350 WMI_NUM_MGMT_FRAME,
351};
352
331struct wmi_set_appie_cmd { 353struct wmi_set_appie_cmd {
332 u8 mgmt_frm_type; /* enum wmi_mgmt_frame_type */ 354 u8 mgmt_frm_type; /* enum wmi_mgmt_frame_type */
333 u8 reserved; 355 u8 reserved;
@@ -335,13 +357,18 @@ struct wmi_set_appie_cmd {
335 u8 ie_info[0]; 357 u8 ie_info[0];
336} __packed; 358} __packed;
337 359
338#define WMI_MAX_IE_LEN (1024)
339 360
361/*
362 * WMI_PXMT_RANGE_CFG_CMDID
363 */
340struct wmi_pxmt_range_cfg_cmd { 364struct wmi_pxmt_range_cfg_cmd {
341 u8 dst_mac[WMI_MAC_LEN]; 365 u8 dst_mac[WMI_MAC_LEN];
342 __le16 range; 366 __le16 range;
343} __packed; 367} __packed;
344 368
369/*
370 * WMI_PXMT_SNR2_RANGE_CFG_CMDID
371 */
345struct wmi_pxmt_snr2_range_cfg_cmd { 372struct wmi_pxmt_snr2_range_cfg_cmd {
346 s8 snr2range_arr[WMI_PROX_RANGE_NUM-1]; 373 s8 snr2range_arr[WMI_PROX_RANGE_NUM-1];
347} __packed; 374} __packed;
@@ -359,6 +386,23 @@ struct wmi_rf_mgmt_cmd {
359 __le32 rf_mgmt_type; 386 __le32 rf_mgmt_type;
360} __packed; 387} __packed;
361 388
389
390/*
391 * WMI_RF_RX_TEST_CMDID
392 */
393struct wmi_rf_rx_test_cmd {
394 __le32 sector;
395} __packed;
396
397/*
398 * WMI_CORR_MEASURE_CMDID
399 */
400struct wmi_corr_measure_cmd {
401 s32 freq_mhz;
402 __le32 length_samples;
403 __le32 iterations;
404} __packed;
405
362/* 406/*
363 * WMI_SET_SSID_CMDID 407 * WMI_SET_SSID_CMDID
364 */ 408 */
@@ -388,6 +432,74 @@ struct wmi_bcon_ctrl_cmd {
388 u8 disable_sec; 432 u8 disable_sec;
389} __packed; 433} __packed;
390 434
435
436/******* P2P ***********/
437
438/*
439 * WMI_PORT_ALLOCATE_CMDID
440 */
441enum wmi_port_role {
442 WMI_PORT_STA = 0,
443 WMI_PORT_PCP = 1,
444 WMI_PORT_AP = 2,
445 WMI_PORT_P2P_DEV = 3,
446 WMI_PORT_P2P_CLIENT = 4,
447 WMI_PORT_P2P_GO = 5,
448};
449
450struct wmi_port_allocate_cmd {
451 u8 mac[WMI_MAC_LEN];
452 u8 port_role;
453 u8 midid;
454} __packed;
455
456/*
457 * WMI_PORT_DELETE_CMDID
458 */
459struct wmi_delete_port_cmd {
460 u8 mid;
461 u8 reserved[3];
462} __packed;
463
464/*
465 * WMI_P2P_CFG_CMDID
466 */
467enum wmi_discovery_mode {
468 WMI_DISCOVERY_MODE_NON_OFFLOAD = 0,
469 WMI_DISCOVERY_MODE_OFFLOAD = 1,
470};
471
472struct wmi_p2p_cfg_cmd {
473 u8 discovery_mode; /* wmi_discovery_mode */
474 u8 channel;
475 __le16 bcon_interval; /* base to listen/search duration calculation */
476} __packed;
477
478/*
479 * WMI_POWER_MGMT_CFG_CMDID
480 */
481enum wmi_power_source_type {
482 WMI_POWER_SOURCE_BATTERY = 0,
483 WMI_POWER_SOURCE_OTHER = 1,
484};
485
486struct wmi_power_mgmt_cfg_cmd {
487 u8 power_source; /* wmi_power_source_type */
488 u8 reserved[3];
489} __packed;
490
491/*
492 * WMI_PCP_START_CMDID
493 */
494struct wmi_pcp_start_cmd {
495 __le16 bcon_interval;
496 u8 reserved0[10];
497 u8 network_type;
498 u8 channel;
499 u8 disable_sec_offload;
500 u8 disable_sec;
501} __packed;
502
391/* 503/*
392 * WMI_SW_TX_REQ_CMDID 504 * WMI_SW_TX_REQ_CMDID
393 */ 505 */
@@ -435,16 +547,17 @@ enum wmi_vring_cfg_schd_params_priority {
435 WMI_SCH_PRIO_HIGH = 1, 547 WMI_SCH_PRIO_HIGH = 1,
436}; 548};
437 549
550#define CIDXTID_CID_POS (0)
551#define CIDXTID_CID_LEN (4)
552#define CIDXTID_CID_MSK (0xF)
553#define CIDXTID_TID_POS (4)
554#define CIDXTID_TID_LEN (4)
555#define CIDXTID_TID_MSK (0xF0)
556
438struct wmi_vring_cfg { 557struct wmi_vring_cfg {
439 struct wmi_sw_ring_cfg tx_sw_ring; 558 struct wmi_sw_ring_cfg tx_sw_ring;
440 u8 ringid; /* 0-23 vrings */ 559 u8 ringid; /* 0-23 vrings */
441 560
442 #define CIDXTID_CID_POS (0)
443 #define CIDXTID_CID_LEN (4)
444 #define CIDXTID_CID_MSK (0xF)
445 #define CIDXTID_TID_POS (4)
446 #define CIDXTID_TID_LEN (4)
447 #define CIDXTID_TID_MSK (0xF0)
448 u8 cidxtid; 561 u8 cidxtid;
449 562
450 u8 encap_trans_type; 563 u8 encap_trans_type;
@@ -501,8 +614,14 @@ struct wmi_vring_ba_dis_cmd {
501 */ 614 */
502struct wmi_notify_req_cmd { 615struct wmi_notify_req_cmd {
503 u8 cid; 616 u8 cid;
504 u8 reserved[3]; 617 u8 year;
618 u8 month;
619 u8 day;
505 __le32 interval_usec; 620 __le32 interval_usec;
621 u8 hour;
622 u8 minute;
623 u8 second;
624 u8 miliseconds;
506} __packed; 625} __packed;
507 626
508/* 627/*
@@ -548,6 +667,11 @@ enum wmi_cfg_rx_chain_cmd_nwifi_ds_trans_type {
548 WMI_NWIFI_RX_TRANS_MODE_PBSS2STA = 2, 667 WMI_NWIFI_RX_TRANS_MODE_PBSS2STA = 2,
549}; 668};
550 669
670enum wmi_cfg_rx_chain_cmd_reorder_type {
671 WMI_RX_HW_REORDER = 0,
672 WMI_RX_SW_REORDER = 1,
673};
674
551struct wmi_cfg_rx_chain_cmd { 675struct wmi_cfg_rx_chain_cmd {
552 __le32 action; 676 __le32 action;
553 struct wmi_sw_ring_cfg rx_sw_ring; 677 struct wmi_sw_ring_cfg rx_sw_ring;
@@ -596,7 +720,8 @@ struct wmi_cfg_rx_chain_cmd {
596 __le16 wb_thrsh; 720 __le16 wb_thrsh;
597 __le32 itr_value; 721 __le32 itr_value;
598 __le16 host_thrsh; 722 __le16 host_thrsh;
599 u8 reserved[2]; 723 u8 reorder_type;
724 u8 reserved;
600 struct wmi_sniffer_cfg sniffer_cfg; 725 struct wmi_sniffer_cfg sniffer_cfg;
601} __packed; 726} __packed;
602 727
@@ -604,15 +729,7 @@ struct wmi_cfg_rx_chain_cmd {
604 * WMI_RCP_ADDBA_RESP_CMDID 729 * WMI_RCP_ADDBA_RESP_CMDID
605 */ 730 */
606struct wmi_rcp_addba_resp_cmd { 731struct wmi_rcp_addba_resp_cmd {
607
608 #define CIDXTID_CID_POS (0)
609 #define CIDXTID_CID_LEN (4)
610 #define CIDXTID_CID_MSK (0xF)
611 #define CIDXTID_TID_POS (4)
612 #define CIDXTID_TID_LEN (4)
613 #define CIDXTID_TID_MSK (0xF0)
614 u8 cidxtid; 732 u8 cidxtid;
615
616 u8 dialog_token; 733 u8 dialog_token;
617 __le16 status_code; 734 __le16 status_code;
618 __le16 ba_param_set; /* ieee80211_ba_parameterset field to send */ 735 __le16 ba_param_set; /* ieee80211_ba_parameterset field to send */
@@ -623,15 +740,7 @@ struct wmi_rcp_addba_resp_cmd {
623 * WMI_RCP_DELBA_CMDID 740 * WMI_RCP_DELBA_CMDID
624 */ 741 */
625struct wmi_rcp_delba_cmd { 742struct wmi_rcp_delba_cmd {
626
627 #define CIDXTID_CID_POS (0)
628 #define CIDXTID_CID_LEN (4)
629 #define CIDXTID_CID_MSK (0xF)
630 #define CIDXTID_TID_POS (4)
631 #define CIDXTID_TID_LEN (4)
632 #define CIDXTID_TID_MSK (0xF0)
633 u8 cidxtid; 743 u8 cidxtid;
634
635 u8 reserved; 744 u8 reserved;
636 __le16 reason; 745 __le16 reason;
637} __packed; 746} __packed;
@@ -640,15 +749,7 @@ struct wmi_rcp_delba_cmd {
640 * WMI_RCP_ADDBA_REQ_CMDID 749 * WMI_RCP_ADDBA_REQ_CMDID
641 */ 750 */
642struct wmi_rcp_addba_req_cmd { 751struct wmi_rcp_addba_req_cmd {
643
644 #define CIDXTID_CID_POS (0)
645 #define CIDXTID_CID_LEN (4)
646 #define CIDXTID_CID_MSK (0xF)
647 #define CIDXTID_TID_POS (4)
648 #define CIDXTID_TID_LEN (4)
649 #define CIDXTID_TID_MSK (0xF0)
650 u8 cidxtid; 752 u8 cidxtid;
651
652 u8 dialog_token; 753 u8 dialog_token;
653 /* ieee80211_ba_parameterset field as it received */ 754 /* ieee80211_ba_parameterset field as it received */
654 __le16 ba_param_set; 755 __le16 ba_param_set;
@@ -665,7 +766,6 @@ struct wmi_set_mac_address_cmd {
665 u8 reserved[2]; 766 u8 reserved[2];
666} __packed; 767} __packed;
667 768
668
669/* 769/*
670* WMI_EAPOL_TX_CMDID 770* WMI_EAPOL_TX_CMDID
671*/ 771*/
@@ -692,6 +792,17 @@ struct wmi_echo_cmd {
692} __packed; 792} __packed;
693 793
694/* 794/*
795 * WMI_TEMP_SENSE_CMDID
796 *
797 * Measure MAC and radio temperatures
798 */
799struct wmi_temp_sense_cmd {
800 __le32 measure_marlon_m_en;
801 __le32 measure_marlon_r_en;
802} __packed;
803
804
805/*
695 * WMI Events 806 * WMI Events
696 */ 807 */
697 808
@@ -699,7 +810,6 @@ struct wmi_echo_cmd {
699 * List of Events (target to host) 810 * List of Events (target to host)
700 */ 811 */
701enum wmi_event_id { 812enum wmi_event_id {
702 WMI_IMM_RSP_EVENTID = 0x0000,
703 WMI_READY_EVENTID = 0x1001, 813 WMI_READY_EVENTID = 0x1001,
704 WMI_CONNECT_EVENTID = 0x1002, 814 WMI_CONNECT_EVENTID = 0x1002,
705 WMI_DISCONNECT_EVENTID = 0x1003, 815 WMI_DISCONNECT_EVENTID = 0x1003,
@@ -709,13 +819,9 @@ enum wmi_event_id {
709 WMI_FW_READY_EVENTID = 0x1801, 819 WMI_FW_READY_EVENTID = 0x1801,
710 WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x0200, 820 WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x0200,
711 WMI_ECHO_RSP_EVENTID = 0x1803, 821 WMI_ECHO_RSP_EVENTID = 0x1803,
712 WMI_CONFIG_MAC_DONE_EVENTID = 0x1805,
713 WMI_CONFIG_PHY_DEBUG_DONE_EVENTID = 0x1806,
714 WMI_ADD_STATION_DONE_EVENTID = 0x1807,
715 WMI_ADD_DEBUG_TX_PCKT_DONE_EVENTID = 0x1808,
716 WMI_PHY_GET_STATISTICS_EVENTID = 0x1809,
717 WMI_FS_TUNE_DONE_EVENTID = 0x180a, 822 WMI_FS_TUNE_DONE_EVENTID = 0x180a,
718 WMI_CORR_MEASURE_DONE_EVENTID = 0x180b, 823 WMI_CORR_MEASURE_EVENTID = 0x180b,
824 WMI_READ_RSSI_EVENTID = 0x180c,
719 WMI_TEMP_SENSE_DONE_EVENTID = 0x180e, 825 WMI_TEMP_SENSE_DONE_EVENTID = 0x180e,
720 WMI_DC_CALIB_DONE_EVENTID = 0x180f, 826 WMI_DC_CALIB_DONE_EVENTID = 0x180f,
721 WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811, 827 WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811,
@@ -727,10 +833,9 @@ enum wmi_event_id {
727 WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819, 833 WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819,
728 WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181a, 834 WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181a,
729 WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181d, 835 WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181d,
730 836 WMI_RF_RX_TEST_DONE_EVENTID = 0x181e,
731 WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820, 837 WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820,
732 WMI_VRING_CFG_DONE_EVENTID = 0x1821, 838 WMI_VRING_CFG_DONE_EVENTID = 0x1821,
733 WMI_RX_ON_DONE_EVENTID = 0x1822,
734 WMI_BA_STATUS_EVENTID = 0x1823, 839 WMI_BA_STATUS_EVENTID = 0x1823,
735 WMI_RCP_ADDBA_REQ_EVENTID = 0x1824, 840 WMI_RCP_ADDBA_REQ_EVENTID = 0x1824,
736 WMI_ADDBA_RESP_SENT_EVENTID = 0x1825, 841 WMI_ADDBA_RESP_SENT_EVENTID = 0x1825,
@@ -738,7 +843,6 @@ enum wmi_event_id {
738 WMI_GET_SSID_EVENTID = 0x1828, 843 WMI_GET_SSID_EVENTID = 0x1828,
739 WMI_GET_PCP_CHANNEL_EVENTID = 0x182a, 844 WMI_GET_PCP_CHANNEL_EVENTID = 0x182a,
740 WMI_SW_TX_COMPLETE_EVENTID = 0x182b, 845 WMI_SW_TX_COMPLETE_EVENTID = 0x182b,
741 WMI_RX_OFF_DONE_EVENTID = 0x182c,
742 846
743 WMI_READ_MAC_RXQ_EVENTID = 0x1830, 847 WMI_READ_MAC_RXQ_EVENTID = 0x1830,
744 WMI_READ_MAC_TXQ_EVENTID = 0x1831, 848 WMI_READ_MAC_TXQ_EVENTID = 0x1831,
@@ -765,7 +869,16 @@ enum wmi_event_id {
765 WMI_UNIT_TEST_EVENTID = 0x1900, 869 WMI_UNIT_TEST_EVENTID = 0x1900,
766 WMI_FLASH_READ_DONE_EVENTID = 0x1902, 870 WMI_FLASH_READ_DONE_EVENTID = 0x1902,
767 WMI_FLASH_WRITE_DONE_EVENTID = 0x1903, 871 WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
768 872 /*P2P*/
873 WMI_PORT_ALLOCATED_EVENTID = 0x1911,
874 WMI_PORT_DELETED_EVENTID = 0x1912,
875 WMI_LISTEN_STARTED_EVENTID = 0x1914,
876 WMI_SEARCH_STARTED_EVENTID = 0x1915,
877 WMI_DISCOVERY_STARTED_EVENTID = 0x1916,
878 WMI_DISCOVERY_STOPPED_EVENTID = 0x1917,
879 WMI_PCP_STARTED_EVENTID = 0x1918,
880 WMI_PCP_STOPPED_EVENTID = 0x1919,
881 WMI_PCP_FACTOR_EVENTID = 0x191a,
769 WMI_SET_CHANNEL_EVENTID = 0x9000, 882 WMI_SET_CHANNEL_EVENTID = 0x9000,
770 WMI_ASSOC_REQ_EVENTID = 0x9001, 883 WMI_ASSOC_REQ_EVENTID = 0x9001,
771 WMI_EAPOL_RX_EVENTID = 0x9002, 884 WMI_EAPOL_RX_EVENTID = 0x9002,
@@ -777,6 +890,12 @@ enum wmi_event_id {
777 * Events data structures 890 * Events data structures
778 */ 891 */
779 892
893
894enum wmi_fw_status {
895 WMI_FW_STATUS_SUCCESS,
896 WMI_FW_STATUS_FAILURE,
897};
898
780/* 899/*
781 * WMI_RF_MGMT_STATUS_EVENTID 900 * WMI_RF_MGMT_STATUS_EVENTID
782 */ 901 */
@@ -857,7 +976,7 @@ struct wmi_ready_event {
857 __le32 abi_version; 976 __le32 abi_version;
858 u8 mac[WMI_MAC_LEN]; 977 u8 mac[WMI_MAC_LEN];
859 u8 phy_capability; /* enum wmi_phy_capability */ 978 u8 phy_capability; /* enum wmi_phy_capability */
860 u8 reserved; 979 u8 numof_additional_mids;
861} __packed; 980} __packed;
862 981
863/* 982/*
@@ -876,6 +995,8 @@ struct wmi_notify_req_done_event {
876 __le16 other_rx_sector; 995 __le16 other_rx_sector;
877 __le16 other_tx_sector; 996 __le16 other_tx_sector;
878 __le16 range; 997 __le16 range;
998 u8 sqi;
999 u8 reserved[3];
879} __packed; 1000} __packed;
880 1001
881/* 1002/*
@@ -951,27 +1072,15 @@ struct wmi_vring_ba_status_event {
951 * WMI_DELBA_EVENTID 1072 * WMI_DELBA_EVENTID
952 */ 1073 */
953struct wmi_delba_event { 1074struct wmi_delba_event {
954
955 #define CIDXTID_CID_POS (0)
956 #define CIDXTID_CID_LEN (4)
957 #define CIDXTID_CID_MSK (0xF)
958 #define CIDXTID_TID_POS (4)
959 #define CIDXTID_TID_LEN (4)
960 #define CIDXTID_TID_MSK (0xF0)
961 u8 cidxtid; 1075 u8 cidxtid;
962
963 u8 from_initiator; 1076 u8 from_initiator;
964 __le16 reason; 1077 __le16 reason;
965} __packed; 1078} __packed;
966 1079
1080
967/* 1081/*
968 * WMI_VRING_CFG_DONE_EVENTID 1082 * WMI_VRING_CFG_DONE_EVENTID
969 */ 1083 */
970enum wmi_vring_cfg_done_event_status {
971 WMI_VRING_CFG_SUCCESS = 0,
972 WMI_VRING_CFG_FAILURE = 1,
973};
974
975struct wmi_vring_cfg_done_event { 1084struct wmi_vring_cfg_done_event {
976 u8 ringid; 1085 u8 ringid;
977 u8 status; 1086 u8 status;
@@ -982,21 +1091,8 @@ struct wmi_vring_cfg_done_event {
982/* 1091/*
983 * WMI_ADDBA_RESP_SENT_EVENTID 1092 * WMI_ADDBA_RESP_SENT_EVENTID
984 */ 1093 */
985enum wmi_rcp_addba_resp_sent_event_status {
986 WMI_ADDBA_SUCCESS = 0,
987 WMI_ADDBA_FAIL = 1,
988};
989
990struct wmi_rcp_addba_resp_sent_event { 1094struct wmi_rcp_addba_resp_sent_event {
991
992 #define CIDXTID_CID_POS (0)
993 #define CIDXTID_CID_LEN (4)
994 #define CIDXTID_CID_MSK (0xF)
995 #define CIDXTID_TID_POS (4)
996 #define CIDXTID_TID_LEN (4)
997 #define CIDXTID_TID_MSK (0xF0)
998 u8 cidxtid; 1095 u8 cidxtid;
999
1000 u8 reserved; 1096 u8 reserved;
1001 __le16 status; 1097 __le16 status;
1002} __packed; 1098} __packed;
@@ -1005,15 +1101,7 @@ struct wmi_rcp_addba_resp_sent_event {
1005 * WMI_RCP_ADDBA_REQ_EVENTID 1101 * WMI_RCP_ADDBA_REQ_EVENTID
1006 */ 1102 */
1007struct wmi_rcp_addba_req_event { 1103struct wmi_rcp_addba_req_event {
1008
1009 #define CIDXTID_CID_POS (0)
1010 #define CIDXTID_CID_LEN (4)
1011 #define CIDXTID_CID_MSK (0xF)
1012 #define CIDXTID_TID_POS (4)
1013 #define CIDXTID_TID_LEN (4)
1014 #define CIDXTID_TID_MSK (0xF0)
1015 u8 cidxtid; 1104 u8 cidxtid;
1016
1017 u8 dialog_token; 1105 u8 dialog_token;
1018 __le16 ba_param_set; /* ieee80211_ba_parameterset as it received */ 1106 __le16 ba_param_set; /* ieee80211_ba_parameterset as it received */
1019 __le16 ba_timeout; 1107 __le16 ba_timeout;
@@ -1055,6 +1143,7 @@ struct wmi_data_port_open_event {
1055 u8 reserved[3]; 1143 u8 reserved[3];
1056} __packed; 1144} __packed;
1057 1145
1146
1058/* 1147/*
1059 * WMI_GET_PCP_CHANNEL_EVENTID 1148 * WMI_GET_PCP_CHANNEL_EVENTID
1060 */ 1149 */
@@ -1063,6 +1152,54 @@ struct wmi_get_pcp_channel_event {
1063 u8 reserved[3]; 1152 u8 reserved[3];
1064} __packed; 1153} __packed;
1065 1154
1155
1156/*
1157* WMI_PORT_ALLOCATED_EVENTID
1158*/
1159struct wmi_port_allocated_event {
1160 u8 status; /* wmi_fw_status */
1161 u8 reserved[3];
1162} __packed;
1163
1164/*
1165* WMI_PORT_DELETED_EVENTID
1166*/
1167struct wmi_port_deleted_event {
1168 u8 status; /* wmi_fw_status */
1169 u8 reserved[3];
1170} __packed;
1171
1172/*
1173 * WMI_LISTEN_STARTED_EVENTID
1174 */
1175struct wmi_listen_started_event {
1176 u8 status; /* wmi_fw_status */
1177 u8 reserved[3];
1178} __packed;
1179
1180/*
1181 * WMI_SEARCH_STARTED_EVENTID
1182 */
1183struct wmi_search_started_event {
1184 u8 status; /* wmi_fw_status */
1185 u8 reserved[3];
1186} __packed;
1187
1188/*
1189 * WMI_PCP_STARTED_EVENTID
1190 */
1191struct wmi_pcp_started_event {
1192 u8 status; /* wmi_fw_status */
1193 u8 reserved[3];
1194} __packed;
1195
1196/*
1197 * WMI_PCP_FACTOR_EVENTID
1198 */
1199struct wmi_pcp_factor_event {
1200 __le32 pcp_factor;
1201} __packed;
1202
1066/* 1203/*
1067 * WMI_SW_TX_COMPLETE_EVENTID 1204 * WMI_SW_TX_COMPLETE_EVENTID
1068 */ 1205 */
@@ -1078,6 +1215,23 @@ struct wmi_sw_tx_complete_event {
1078} __packed; 1215} __packed;
1079 1216
1080/* 1217/*
1218 * WMI_CORR_MEASURE_EVENTID
1219 */
1220struct wmi_corr_measure_event {
1221 s32 i;
1222 s32 q;
1223 s32 image_i;
1224 s32 image_q;
1225} __packed;
1226
1227/*
1228 * WMI_READ_RSSI_EVENTID
1229 */
1230struct wmi_read_rssi_event {
1231 __le32 ina_rssi_adc_dbm;
1232} __packed;
1233
1234/*
1081 * WMI_GET_SSID_EVENTID 1235 * WMI_GET_SSID_EVENTID
1082 */ 1236 */
1083struct wmi_get_ssid_event { 1237struct wmi_get_ssid_event {
@@ -1091,7 +1245,8 @@ struct wmi_get_ssid_event {
1091struct wmi_rx_mgmt_info { 1245struct wmi_rx_mgmt_info {
1092 u8 mcs; 1246 u8 mcs;
1093 s8 snr; 1247 s8 snr;
1094 __le16 range; 1248 u8 range;
1249 u8 sqi;
1095 __le16 stype; 1250 __le16 stype;
1096 __le16 status; 1251 __le16 status;
1097 __le32 len; 1252 __le32 len;
@@ -1113,4 +1268,14 @@ struct wmi_echo_event {
1113 __le32 echoed_value; 1268 __le32 echoed_value;
1114} __packed; 1269} __packed;
1115 1270
1271/*
1272 * WMI_TEMP_SENSE_DONE_EVENTID
1273 *
1274 * Measure MAC and radio temperatures
1275 */
1276struct wmi_temp_sense_done_event {
1277 __le32 marlon_m_t1000;
1278 __le32 marlon_r_t1000;
1279} __packed;
1280
1116#endif /* __WILOCITY_WMI_H__ */ 1281#endif /* __WILOCITY_WMI_H__ */
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 287c6b670a36..078e6f3477a9 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -131,7 +131,7 @@ config B43_PHY_LP
131 131
132config B43_PHY_HT 132config B43_PHY_HT
133 bool "Support for HT-PHY (high throughput) devices" 133 bool "Support for HT-PHY (high throughput) devices"
134 depends on B43 134 depends on B43 && B43_BCMA
135 ---help--- 135 ---help---
136 Support for the HT-PHY. 136 Support for the HT-PHY.
137 137
@@ -166,8 +166,8 @@ config B43_DEBUG
166 Broadcom 43xx debugging. 166 Broadcom 43xx debugging.
167 167
168 This adds additional runtime sanity checks and statistics to the driver. 168 This adds additional runtime sanity checks and statistics to the driver.
169 These checks and statistics might me expensive and hurt runtime performance 169 These checks and statistics might be expensive and hurt the runtime
170 of your system. 170 performance of your system.
171 This also adds the b43 debugfs interface. 171 This also adds the b43 debugfs interface.
172 172
173 Do not enable this, unless you are debugging the driver. 173 Do not enable this, unless you are debugging the driver.
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 10e288d470e7..fe4a77ee05c9 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -473,6 +473,12 @@ enum {
473#define B43_MACCMD_CCA 0x00000008 /* Clear channel assessment */ 473#define B43_MACCMD_CCA 0x00000008 /* Clear channel assessment */
474#define B43_MACCMD_BGNOISE 0x00000010 /* Background noise */ 474#define B43_MACCMD_BGNOISE 0x00000010 /* Background noise */
475 475
476/* See BCMA_CLKCTLST_EXTRESREQ and BCMA_CLKCTLST_EXTRESST */
477#define B43_BCMA_CLKCTLST_80211_PLL_REQ 0x00000100
478#define B43_BCMA_CLKCTLST_PHY_PLL_REQ 0x00000200
479#define B43_BCMA_CLKCTLST_80211_PLL_ST 0x01000000
480#define B43_BCMA_CLKCTLST_PHY_PLL_ST 0x02000000
481
476/* BCMA 802.11 core specific IO Control (BCMA_IOCTL) flags */ 482/* BCMA 802.11 core specific IO Control (BCMA_IOCTL) flags */
477#define B43_BCMA_IOCTL_PHY_CLKEN 0x00000004 /* PHY Clock Enable */ 483#define B43_BCMA_IOCTL_PHY_CLKEN 0x00000004 /* PHY Clock Enable */
478#define B43_BCMA_IOCTL_PHY_RESET 0x00000008 /* PHY Reset */ 484#define B43_BCMA_IOCTL_PHY_RESET 0x00000008 /* PHY Reset */
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 122146943bf2..523355b87659 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -419,8 +419,6 @@ static inline
419 419
420static int alloc_ringmemory(struct b43_dmaring *ring) 420static int alloc_ringmemory(struct b43_dmaring *ring)
421{ 421{
422 gfp_t flags = GFP_KERNEL;
423
424 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K 422 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
425 * alignment and 8K buffers for 64-bit DMA with 8K alignment. 423 * alignment and 8K buffers for 64-bit DMA with 8K alignment.
426 * In practice we could use smaller buffers for the latter, but the 424 * In practice we could use smaller buffers for the latter, but the
@@ -435,12 +433,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
435 433
436 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, 434 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
437 ring_mem_size, &(ring->dmabase), 435 ring_mem_size, &(ring->dmabase),
438 flags); 436 GFP_KERNEL | __GFP_ZERO);
439 if (!ring->descbase) { 437 if (!ring->descbase)
440 b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
441 return -ENOMEM; 438 return -ENOMEM;
442 }
443 memset(ring->descbase, 0, ring_mem_size);
444 439
445 return 0; 440 return 0;
446} 441}
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 05682736e466..c4d0cc582555 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1189,10 +1189,15 @@ static void b43_bcma_phy_reset(struct b43_wldev *dev)
1189 1189
1190static void b43_bcma_wireless_core_reset(struct b43_wldev *dev, bool gmode) 1190static void b43_bcma_wireless_core_reset(struct b43_wldev *dev, bool gmode)
1191{ 1191{
1192 u32 req = B43_BCMA_CLKCTLST_80211_PLL_REQ |
1193 B43_BCMA_CLKCTLST_PHY_PLL_REQ;
1194 u32 status = B43_BCMA_CLKCTLST_80211_PLL_ST |
1195 B43_BCMA_CLKCTLST_PHY_PLL_ST;
1196
1192 b43_device_enable(dev, B43_BCMA_IOCTL_PHY_CLKEN); 1197 b43_device_enable(dev, B43_BCMA_IOCTL_PHY_CLKEN);
1193 bcma_core_set_clockmode(dev->dev->bdev, BCMA_CLKMODE_FAST); 1198 bcma_core_set_clockmode(dev->dev->bdev, BCMA_CLKMODE_FAST);
1194 b43_bcma_phy_reset(dev); 1199 b43_bcma_phy_reset(dev);
1195 bcma_core_pll_ctl(dev->dev->bdev, 0x300, 0x3000000, true); 1200 bcma_core_pll_ctl(dev->dev->bdev, req, status, true);
1196} 1201}
1197#endif 1202#endif
1198 1203
diff --git a/drivers/net/wireless/b43/phy_ht.c b/drivers/net/wireless/b43/phy_ht.c
index 7416c5e9154d..b8667706fc27 100644
--- a/drivers/net/wireless/b43/phy_ht.c
+++ b/drivers/net/wireless/b43/phy_ht.c
@@ -154,9 +154,84 @@ static void b43_radio_2059_init(struct b43_wldev *dev)
154} 154}
155 155
156/************************************************** 156/**************************************************
157 * RF
158 **************************************************/
159
160static void b43_phy_ht_force_rf_sequence(struct b43_wldev *dev, u16 rf_seq)
161{
162 u8 i;
163
164 u16 save_seq_mode = b43_phy_read(dev, B43_PHY_HT_RF_SEQ_MODE);
165 b43_phy_set(dev, B43_PHY_HT_RF_SEQ_MODE, 0x3);
166
167 b43_phy_set(dev, B43_PHY_HT_RF_SEQ_TRIG, rf_seq);
168 for (i = 0; i < 200; i++) {
169 if (!(b43_phy_read(dev, B43_PHY_HT_RF_SEQ_STATUS) & rf_seq)) {
170 i = 0;
171 break;
172 }
173 msleep(1);
174 }
175 if (i)
176 b43err(dev->wl, "Forcing RF sequence timeout\n");
177
178 b43_phy_write(dev, B43_PHY_HT_RF_SEQ_MODE, save_seq_mode);
179}
180
181static void b43_phy_ht_pa_override(struct b43_wldev *dev, bool enable)
182{
183 struct b43_phy_ht *htphy = dev->phy.ht;
184 static const u16 regs[3] = { B43_PHY_HT_RF_CTL_INT_C1,
185 B43_PHY_HT_RF_CTL_INT_C2,
186 B43_PHY_HT_RF_CTL_INT_C3 };
187 int i;
188
189 if (enable) {
190 for (i = 0; i < 3; i++)
191 b43_phy_write(dev, regs[i], htphy->rf_ctl_int_save[i]);
192 } else {
193 for (i = 0; i < 3; i++)
194 htphy->rf_ctl_int_save[i] = b43_phy_read(dev, regs[i]);
195 /* TODO: Does 5GHz band use different value (not 0x0400)? */
196 for (i = 0; i < 3; i++)
197 b43_phy_write(dev, regs[i], 0x0400);
198 }
199}
200
201/**************************************************
157 * Various PHY ops 202 * Various PHY ops
158 **************************************************/ 203 **************************************************/
159 204
205static u16 b43_phy_ht_classifier(struct b43_wldev *dev, u16 mask, u16 val)
206{
207 u16 tmp;
208 u16 allowed = B43_PHY_HT_CLASS_CTL_CCK_EN |
209 B43_PHY_HT_CLASS_CTL_OFDM_EN |
210 B43_PHY_HT_CLASS_CTL_WAITED_EN;
211
212 tmp = b43_phy_read(dev, B43_PHY_HT_CLASS_CTL);
213 tmp &= allowed;
214 tmp &= ~mask;
215 tmp |= (val & mask);
216 b43_phy_maskset(dev, B43_PHY_HT_CLASS_CTL, ~allowed, tmp);
217
218 return tmp;
219}
220
221static void b43_phy_ht_reset_cca(struct b43_wldev *dev)
222{
223 u16 bbcfg;
224
225 b43_phy_force_clock(dev, true);
226 bbcfg = b43_phy_read(dev, B43_PHY_HT_BBCFG);
227 b43_phy_write(dev, B43_PHY_HT_BBCFG, bbcfg | B43_PHY_HT_BBCFG_RSTCCA);
228 udelay(1);
229 b43_phy_write(dev, B43_PHY_HT_BBCFG, bbcfg & ~B43_PHY_HT_BBCFG_RSTCCA);
230 b43_phy_force_clock(dev, false);
231
232 b43_phy_ht_force_rf_sequence(dev, B43_PHY_HT_RF_SEQ_TRIG_RST2RX);
233}
234
160static void b43_phy_ht_zero_extg(struct b43_wldev *dev) 235static void b43_phy_ht_zero_extg(struct b43_wldev *dev)
161{ 236{
162 u8 i, j; 237 u8 i, j;
@@ -176,10 +251,10 @@ static void b43_phy_ht_afe_unk1(struct b43_wldev *dev)
176{ 251{
177 u8 i; 252 u8 i;
178 253
179 const u16 ctl_regs[3][2] = { 254 static const u16 ctl_regs[3][2] = {
180 { B43_PHY_HT_AFE_CTL1, B43_PHY_HT_AFE_CTL2 }, 255 { B43_PHY_HT_AFE_C1_OVER, B43_PHY_HT_AFE_C1 },
181 { B43_PHY_HT_AFE_CTL3, B43_PHY_HT_AFE_CTL4 }, 256 { B43_PHY_HT_AFE_C2_OVER, B43_PHY_HT_AFE_C2 },
182 { B43_PHY_HT_AFE_CTL5, B43_PHY_HT_AFE_CTL6}, 257 { B43_PHY_HT_AFE_C3_OVER, B43_PHY_HT_AFE_C3},
183 }; 258 };
184 259
185 for (i = 0; i < 3; i++) { 260 for (i = 0; i < 3; i++) {
@@ -193,27 +268,6 @@ static void b43_phy_ht_afe_unk1(struct b43_wldev *dev)
193 } 268 }
194} 269}
195 270
196static void b43_phy_ht_force_rf_sequence(struct b43_wldev *dev, u16 rf_seq)
197{
198 u8 i;
199
200 u16 save_seq_mode = b43_phy_read(dev, B43_PHY_HT_RF_SEQ_MODE);
201 b43_phy_set(dev, B43_PHY_HT_RF_SEQ_MODE, 0x3);
202
203 b43_phy_set(dev, B43_PHY_HT_RF_SEQ_TRIG, rf_seq);
204 for (i = 0; i < 200; i++) {
205 if (!(b43_phy_read(dev, B43_PHY_HT_RF_SEQ_STATUS) & rf_seq)) {
206 i = 0;
207 break;
208 }
209 msleep(1);
210 }
211 if (i)
212 b43err(dev->wl, "Forcing RF sequence timeout\n");
213
214 b43_phy_write(dev, B43_PHY_HT_RF_SEQ_MODE, save_seq_mode);
215}
216
217static void b43_phy_ht_read_clip_detection(struct b43_wldev *dev, u16 *clip_st) 271static void b43_phy_ht_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
218{ 272{
219 clip_st[0] = b43_phy_read(dev, B43_PHY_HT_C1_CLIP1THRES); 273 clip_st[0] = b43_phy_read(dev, B43_PHY_HT_C1_CLIP1THRES);
@@ -240,15 +294,426 @@ static void b43_phy_ht_bphy_init(struct b43_wldev *dev)
240} 294}
241 295
242/************************************************** 296/**************************************************
297 * Samples
298 **************************************************/
299
300static void b43_phy_ht_stop_playback(struct b43_wldev *dev)
301{
302 struct b43_phy_ht *phy_ht = dev->phy.ht;
303 u16 tmp;
304 int i;
305
306 tmp = b43_phy_read(dev, B43_PHY_HT_SAMP_STAT);
307 if (tmp & 0x1)
308 b43_phy_set(dev, B43_PHY_HT_SAMP_CMD, B43_PHY_HT_SAMP_CMD_STOP);
309 else if (tmp & 0x2)
310 b43_phy_mask(dev, B43_PHY_HT_IQLOCAL_CMDGCTL, 0x7FFF);
311
312 b43_phy_mask(dev, B43_PHY_HT_SAMP_CMD, ~0x0004);
313
314 for (i = 0; i < 3; i++) {
315 if (phy_ht->bb_mult_save[i] >= 0) {
316 b43_httab_write(dev, B43_HTTAB16(13, 0x63 + i * 4),
317 phy_ht->bb_mult_save[i]);
318 b43_httab_write(dev, B43_HTTAB16(13, 0x67 + i * 4),
319 phy_ht->bb_mult_save[i]);
320 }
321 }
322}
323
324static u16 b43_phy_ht_load_samples(struct b43_wldev *dev)
325{
326 int i;
327 u16 len = 20 << 3;
328
329 b43_phy_write(dev, B43_PHY_HT_TABLE_ADDR, 0x4400);
330
331 for (i = 0; i < len; i++) {
332 b43_phy_write(dev, B43_PHY_HT_TABLE_DATAHI, 0);
333 b43_phy_write(dev, B43_PHY_HT_TABLE_DATALO, 0);
334 }
335
336 return len;
337}
338
339static void b43_phy_ht_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
340 u16 wait)
341{
342 struct b43_phy_ht *phy_ht = dev->phy.ht;
343 u16 save_seq_mode;
344 int i;
345
346 for (i = 0; i < 3; i++) {
347 if (phy_ht->bb_mult_save[i] < 0)
348 phy_ht->bb_mult_save[i] = b43_httab_read(dev, B43_HTTAB16(13, 0x63 + i * 4));
349 }
350
351 b43_phy_write(dev, B43_PHY_HT_SAMP_DEP_CNT, samps - 1);
352 if (loops != 0xFFFF)
353 loops--;
354 b43_phy_write(dev, B43_PHY_HT_SAMP_LOOP_CNT, loops);
355 b43_phy_write(dev, B43_PHY_HT_SAMP_WAIT_CNT, wait);
356
357 save_seq_mode = b43_phy_read(dev, B43_PHY_HT_RF_SEQ_MODE);
358 b43_phy_set(dev, B43_PHY_HT_RF_SEQ_MODE,
359 B43_PHY_HT_RF_SEQ_MODE_CA_OVER);
360
361 /* TODO: find out mask bits! Do we need more function arguments? */
362 b43_phy_mask(dev, B43_PHY_HT_SAMP_CMD, ~0);
363 b43_phy_mask(dev, B43_PHY_HT_SAMP_CMD, ~0);
364 b43_phy_mask(dev, B43_PHY_HT_IQLOCAL_CMDGCTL, ~0);
365 b43_phy_set(dev, B43_PHY_HT_SAMP_CMD, 0x1);
366
367 for (i = 0; i < 100; i++) {
368 if (!(b43_phy_read(dev, B43_PHY_HT_RF_SEQ_STATUS) & 1)) {
369 i = 0;
370 break;
371 }
372 udelay(10);
373 }
374 if (i)
375 b43err(dev->wl, "run samples timeout\n");
376
377 b43_phy_write(dev, B43_PHY_HT_RF_SEQ_MODE, save_seq_mode);
378}
379
380static void b43_phy_ht_tx_tone(struct b43_wldev *dev)
381{
382 u16 samp;
383
384 samp = b43_phy_ht_load_samples(dev);
385 b43_phy_ht_run_samples(dev, samp, 0xFFFF, 0);
386}
387
388/**************************************************
389 * RSSI
390 **************************************************/
391
392static void b43_phy_ht_rssi_select(struct b43_wldev *dev, u8 core_sel,
393 u8 rssi_type)
394{
395 static const u16 ctl_regs[3][2] = {
396 { B43_PHY_HT_AFE_C1, B43_PHY_HT_AFE_C1_OVER, },
397 { B43_PHY_HT_AFE_C2, B43_PHY_HT_AFE_C2_OVER, },
398 { B43_PHY_HT_AFE_C3, B43_PHY_HT_AFE_C3_OVER, },
399 };
400 static const u16 radio_r[] = { R2059_SYN, R2059_TXRX0, R2059_RXRX1, };
401 int core;
402
403 if (core_sel == 0) {
404 b43err(dev->wl, "RSSI selection for core off not implemented yet\n");
405 } else {
406 for (core = 0; core < 3; core++) {
407 /* Check if caller requested a one specific core */
408 if ((core_sel == 1 && core != 0) ||
409 (core_sel == 2 && core != 1) ||
410 (core_sel == 3 && core != 2))
411 continue;
412
413 switch (rssi_type) {
414 case 4:
415 b43_phy_set(dev, ctl_regs[core][0], 0x3 << 8);
416 b43_phy_set(dev, ctl_regs[core][0], 0x3 << 10);
417 b43_phy_set(dev, ctl_regs[core][1], 0x1 << 9);
418 b43_phy_set(dev, ctl_regs[core][1], 0x1 << 10);
419
420 b43_radio_set(dev, R2059_RXRX1 | 0xbf, 0x1);
421 b43_radio_write(dev, radio_r[core] | 0x159,
422 0x11);
423 break;
424 default:
425 b43err(dev->wl, "RSSI selection for type %d not implemented yet\n",
426 rssi_type);
427 }
428 }
429 }
430}
431
432static void b43_phy_ht_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
433 u8 nsamp)
434{
435 u16 phy_regs_values[12];
436 static const u16 phy_regs_to_save[] = {
437 B43_PHY_HT_AFE_C1, B43_PHY_HT_AFE_C1_OVER,
438 0x848, 0x841,
439 B43_PHY_HT_AFE_C2, B43_PHY_HT_AFE_C2_OVER,
440 0x868, 0x861,
441 B43_PHY_HT_AFE_C3, B43_PHY_HT_AFE_C3_OVER,
442 0x888, 0x881,
443 };
444 u16 tmp[3];
445 int i;
446
447 for (i = 0; i < 12; i++)
448 phy_regs_values[i] = b43_phy_read(dev, phy_regs_to_save[i]);
449
450 b43_phy_ht_rssi_select(dev, 5, type);
451
452 for (i = 0; i < 6; i++)
453 buf[i] = 0;
454
455 for (i = 0; i < nsamp; i++) {
456 tmp[0] = b43_phy_read(dev, B43_PHY_HT_RSSI_C1);
457 tmp[1] = b43_phy_read(dev, B43_PHY_HT_RSSI_C2);
458 tmp[2] = b43_phy_read(dev, B43_PHY_HT_RSSI_C3);
459
460 buf[0] += ((s8)((tmp[0] & 0x3F) << 2)) >> 2;
461 buf[1] += ((s8)(((tmp[0] >> 8) & 0x3F) << 2)) >> 2;
462 buf[2] += ((s8)((tmp[1] & 0x3F) << 2)) >> 2;
463 buf[3] += ((s8)(((tmp[1] >> 8) & 0x3F) << 2)) >> 2;
464 buf[4] += ((s8)((tmp[2] & 0x3F) << 2)) >> 2;
465 buf[5] += ((s8)(((tmp[2] >> 8) & 0x3F) << 2)) >> 2;
466 }
467
468 for (i = 0; i < 12; i++)
469 b43_phy_write(dev, phy_regs_to_save[i], phy_regs_values[i]);
470}
471
472/**************************************************
473 * Tx/Rx
474 **************************************************/
475
476static void b43_phy_ht_tx_power_fix(struct b43_wldev *dev)
477{
478 int i;
479
480 for (i = 0; i < 3; i++) {
481 u16 mask;
482 u32 tmp = b43_httab_read(dev, B43_HTTAB32(26, 0xE8));
483
484 if (0) /* FIXME */
485 mask = 0x2 << (i * 4);
486 else
487 mask = 0;
488 b43_phy_mask(dev, B43_PHY_EXTG(0x108), mask);
489
490 b43_httab_write(dev, B43_HTTAB16(7, 0x110 + i), tmp >> 16);
491 b43_httab_write(dev, B43_HTTAB8(13, 0x63 + (i * 4)),
492 tmp & 0xFF);
493 b43_httab_write(dev, B43_HTTAB8(13, 0x73 + (i * 4)),
494 tmp & 0xFF);
495 }
496}
497
498static void b43_phy_ht_tx_power_ctl(struct b43_wldev *dev, bool enable)
499{
500 struct b43_phy_ht *phy_ht = dev->phy.ht;
501 u16 en_bits = B43_PHY_HT_TXPCTL_CMD_C1_COEFF |
502 B43_PHY_HT_TXPCTL_CMD_C1_HWPCTLEN |
503 B43_PHY_HT_TXPCTL_CMD_C1_PCTLEN;
504 static const u16 cmd_regs[3] = { B43_PHY_HT_TXPCTL_CMD_C1,
505 B43_PHY_HT_TXPCTL_CMD_C2,
506 B43_PHY_HT_TXPCTL_CMD_C3 };
507 int i;
508
509 if (!enable) {
510 if (b43_phy_read(dev, B43_PHY_HT_TXPCTL_CMD_C1) & en_bits) {
511 /* We disable enabled TX pwr ctl, save it's state */
512 /*
513 * TODO: find the registers. On N-PHY they were 0x1ed
514 * and 0x1ee, we need 3 such a registers for HT-PHY
515 */
516 }
517 b43_phy_mask(dev, B43_PHY_HT_TXPCTL_CMD_C1, ~en_bits);
518 } else {
519 b43_phy_set(dev, B43_PHY_HT_TXPCTL_CMD_C1, en_bits);
520
521 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
522 for (i = 0; i < 3; i++)
523 b43_phy_write(dev, cmd_regs[i], 0x32);
524 }
525
526 for (i = 0; i < 3; i++)
527 if (phy_ht->tx_pwr_idx[i] <=
528 B43_PHY_HT_TXPCTL_CMD_C1_INIT)
529 b43_phy_write(dev, cmd_regs[i],
530 phy_ht->tx_pwr_idx[i]);
531 }
532
533 phy_ht->tx_pwr_ctl = enable;
534}
535
536static void b43_phy_ht_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
537{
538 struct b43_phy_ht *phy_ht = dev->phy.ht;
539 s32 rssi_buf[6];
540
541 /* TODO */
542
543 b43_phy_ht_tx_tone(dev);
544 udelay(20);
545 b43_phy_ht_poll_rssi(dev, 4, rssi_buf, 1);
546 b43_phy_ht_stop_playback(dev);
547 b43_phy_ht_reset_cca(dev);
548
549 phy_ht->idle_tssi[0] = rssi_buf[0] & 0xff;
550 phy_ht->idle_tssi[1] = rssi_buf[2] & 0xff;
551 phy_ht->idle_tssi[2] = rssi_buf[4] & 0xff;
552
553 /* TODO */
554}
555
556static void b43_phy_ht_tx_power_ctl_setup(struct b43_wldev *dev)
557{
558 struct b43_phy_ht *phy_ht = dev->phy.ht;
559 struct ssb_sprom *sprom = dev->dev->bus_sprom;
560
561 u8 *idle = phy_ht->idle_tssi;
562 u8 target[3];
563 s16 a1[3], b0[3], b1[3];
564
565 u16 freq = dev->phy.channel_freq;
566 int i, c;
567
568 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
569 for (c = 0; c < 3; c++) {
570 target[c] = sprom->core_pwr_info[c].maxpwr_2g;
571 a1[c] = sprom->core_pwr_info[c].pa_2g[0];
572 b0[c] = sprom->core_pwr_info[c].pa_2g[1];
573 b1[c] = sprom->core_pwr_info[c].pa_2g[2];
574 }
575 } else if (freq >= 4900 && freq < 5100) {
576 for (c = 0; c < 3; c++) {
577 target[c] = sprom->core_pwr_info[c].maxpwr_5gl;
578 a1[c] = sprom->core_pwr_info[c].pa_5gl[0];
579 b0[c] = sprom->core_pwr_info[c].pa_5gl[1];
580 b1[c] = sprom->core_pwr_info[c].pa_5gl[2];
581 }
582 } else if (freq >= 5100 && freq < 5500) {
583 for (c = 0; c < 3; c++) {
584 target[c] = sprom->core_pwr_info[c].maxpwr_5g;
585 a1[c] = sprom->core_pwr_info[c].pa_5g[0];
586 b0[c] = sprom->core_pwr_info[c].pa_5g[1];
587 b1[c] = sprom->core_pwr_info[c].pa_5g[2];
588 }
589 } else if (freq >= 5500) {
590 for (c = 0; c < 3; c++) {
591 target[c] = sprom->core_pwr_info[c].maxpwr_5gh;
592 a1[c] = sprom->core_pwr_info[c].pa_5gh[0];
593 b0[c] = sprom->core_pwr_info[c].pa_5gh[1];
594 b1[c] = sprom->core_pwr_info[c].pa_5gh[2];
595 }
596 } else {
597 target[0] = target[1] = target[2] = 52;
598 a1[0] = a1[1] = a1[2] = -424;
599 b0[0] = b0[1] = b0[2] = 5612;
600 b1[0] = b1[1] = b1[2] = -1393;
601 }
602
603 b43_phy_set(dev, B43_PHY_HT_TSSIMODE, B43_PHY_HT_TSSIMODE_EN);
604 b43_phy_mask(dev, B43_PHY_HT_TXPCTL_CMD_C1,
605 ~B43_PHY_HT_TXPCTL_CMD_C1_PCTLEN & 0xFFFF);
606
607 /* TODO: Does it depend on sprom->fem.ghz2.tssipos? */
608 b43_phy_set(dev, B43_PHY_HT_TXPCTL_IDLE_TSSI, 0x4000);
609
610 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_CMD_C1,
611 ~B43_PHY_HT_TXPCTL_CMD_C1_INIT, 0x19);
612 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_CMD_C2,
613 ~B43_PHY_HT_TXPCTL_CMD_C2_INIT, 0x19);
614 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_CMD_C3,
615 ~B43_PHY_HT_TXPCTL_CMD_C3_INIT, 0x19);
616
617 b43_phy_set(dev, B43_PHY_HT_TXPCTL_IDLE_TSSI,
618 B43_PHY_HT_TXPCTL_IDLE_TSSI_BINF);
619
620 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_IDLE_TSSI,
621 ~B43_PHY_HT_TXPCTL_IDLE_TSSI_C1,
622 idle[0] << B43_PHY_HT_TXPCTL_IDLE_TSSI_C1_SHIFT);
623 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_IDLE_TSSI,
624 ~B43_PHY_HT_TXPCTL_IDLE_TSSI_C2,
625 idle[1] << B43_PHY_HT_TXPCTL_IDLE_TSSI_C2_SHIFT);
626 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_IDLE_TSSI2,
627 ~B43_PHY_HT_TXPCTL_IDLE_TSSI2_C3,
628 idle[2] << B43_PHY_HT_TXPCTL_IDLE_TSSI2_C3_SHIFT);
629
630 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_N, ~B43_PHY_HT_TXPCTL_N_TSSID,
631 0xf0);
632 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_N, ~B43_PHY_HT_TXPCTL_N_NPTIL2,
633 0x3 << B43_PHY_HT_TXPCTL_N_NPTIL2_SHIFT);
634#if 0
635 /* TODO: what to mask/set? */
636 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_CMD_C1, 0x800, 0)
637 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_CMD_C1, 0x400, 0)
638#endif
639
640 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_TARG_PWR,
641 ~B43_PHY_HT_TXPCTL_TARG_PWR_C1,
642 target[0] << B43_PHY_HT_TXPCTL_TARG_PWR_C1_SHIFT);
643 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_TARG_PWR,
644 ~B43_PHY_HT_TXPCTL_TARG_PWR_C2 & 0xFFFF,
645 target[1] << B43_PHY_HT_TXPCTL_TARG_PWR_C2_SHIFT);
646 b43_phy_maskset(dev, B43_PHY_HT_TXPCTL_TARG_PWR2,
647 ~B43_PHY_HT_TXPCTL_TARG_PWR2_C3,
648 target[2] << B43_PHY_HT_TXPCTL_TARG_PWR2_C3_SHIFT);
649
650 for (c = 0; c < 3; c++) {
651 s32 num, den, pwr;
652 u32 regval[64];
653
654 for (i = 0; i < 64; i++) {
655 num = 8 * (16 * b0[c] + b1[c] * i);
656 den = 32768 + a1[c] * i;
657 pwr = max((4 * num + den / 2) / den, -8);
658 regval[i] = pwr;
659 }
660 b43_httab_write_bulk(dev, B43_HTTAB16(26 + c, 0), 64, regval);
661 }
662}
663
664/**************************************************
243 * Channel switching ops. 665 * Channel switching ops.
244 **************************************************/ 666 **************************************************/
245 667
668static void b43_phy_ht_spur_avoid(struct b43_wldev *dev,
669 struct ieee80211_channel *new_channel)
670{
671 struct bcma_device *core = dev->dev->bdev;
672 int spuravoid = 0;
673 u16 tmp;
674
675 /* Check for 13 and 14 is just a guess, we don't have enough logs. */
676 if (new_channel->hw_value == 13 || new_channel->hw_value == 14)
677 spuravoid = 1;
678 bcma_core_pll_ctl(core, B43_BCMA_CLKCTLST_PHY_PLL_REQ, 0, false);
679 bcma_pmu_spuravoid_pllupdate(&core->bus->drv_cc, spuravoid);
680 bcma_core_pll_ctl(core,
681 B43_BCMA_CLKCTLST_80211_PLL_REQ |
682 B43_BCMA_CLKCTLST_PHY_PLL_REQ,
683 B43_BCMA_CLKCTLST_80211_PLL_ST |
684 B43_BCMA_CLKCTLST_PHY_PLL_ST, false);
685
686 /* Values has been taken from wlc_bmac_switch_macfreq comments */
687 switch (spuravoid) {
688 case 2: /* 126MHz */
689 tmp = 0x2082;
690 break;
691 case 1: /* 123MHz */
692 tmp = 0x5341;
693 break;
694 default: /* 120MHz */
695 tmp = 0x8889;
696 }
697
698 b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW, tmp);
699 b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8);
700
701 /* TODO: reset PLL */
702
703 if (spuravoid)
704 b43_phy_set(dev, B43_PHY_HT_BBCFG, B43_PHY_HT_BBCFG_RSTRX);
705 else
706 b43_phy_mask(dev, B43_PHY_HT_BBCFG,
707 ~B43_PHY_HT_BBCFG_RSTRX & 0xFFFF);
708
709 b43_phy_ht_reset_cca(dev);
710}
711
246static void b43_phy_ht_channel_setup(struct b43_wldev *dev, 712static void b43_phy_ht_channel_setup(struct b43_wldev *dev,
247 const struct b43_phy_ht_channeltab_e_phy *e, 713 const struct b43_phy_ht_channeltab_e_phy *e,
248 struct ieee80211_channel *new_channel) 714 struct ieee80211_channel *new_channel)
249{ 715{
250 bool old_band_5ghz; 716 bool old_band_5ghz;
251 u8 i;
252 717
253 old_band_5ghz = b43_phy_read(dev, B43_PHY_HT_BANDCTL) & 0; /* FIXME */ 718 old_band_5ghz = b43_phy_read(dev, B43_PHY_HT_BANDCTL) & 0; /* FIXME */
254 if (new_channel->band == IEEE80211_BAND_5GHZ && !old_band_5ghz) { 719 if (new_channel->band == IEEE80211_BAND_5GHZ && !old_band_5ghz) {
@@ -264,25 +729,20 @@ static void b43_phy_ht_channel_setup(struct b43_wldev *dev,
264 b43_phy_write(dev, B43_PHY_HT_BW5, e->bw5); 729 b43_phy_write(dev, B43_PHY_HT_BW5, e->bw5);
265 b43_phy_write(dev, B43_PHY_HT_BW6, e->bw6); 730 b43_phy_write(dev, B43_PHY_HT_BW6, e->bw6);
266 731
267 /* TODO: some ops on PHY regs 0x0B0 and 0xC0A */ 732 if (new_channel->hw_value == 14) {
733 b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_OFDM_EN, 0);
734 b43_phy_set(dev, B43_PHY_HT_TEST, 0x0800);
735 } else {
736 b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_OFDM_EN,
737 B43_PHY_HT_CLASS_CTL_OFDM_EN);
738 if (new_channel->band == IEEE80211_BAND_2GHZ)
739 b43_phy_mask(dev, B43_PHY_HT_TEST, ~0x840);
740 }
268 741
269 /* TODO: separated function? */ 742 if (1) /* TODO: On N it's for early devices only, what about HT? */
270 for (i = 0; i < 3; i++) { 743 b43_phy_ht_tx_power_fix(dev);
271 u16 mask;
272 u32 tmp = b43_httab_read(dev, B43_HTTAB32(26, 0xE8));
273 744
274 if (0) /* FIXME */ 745 b43_phy_ht_spur_avoid(dev, new_channel);
275 mask = 0x2 << (i * 4);
276 else
277 mask = 0;
278 b43_phy_mask(dev, B43_PHY_EXTG(0x108), mask);
279
280 b43_httab_write(dev, B43_HTTAB16(7, 0x110 + i), tmp >> 16);
281 b43_httab_write(dev, B43_HTTAB8(13, 0x63 + (i * 4)),
282 tmp & 0xFF);
283 b43_httab_write(dev, B43_HTTAB8(13, 0x73 + (i * 4)),
284 tmp & 0xFF);
285 }
286 746
287 b43_phy_write(dev, 0x017e, 0x3830); 747 b43_phy_write(dev, 0x017e, 0x3830);
288} 748}
@@ -337,14 +797,29 @@ static void b43_phy_ht_op_prepare_structs(struct b43_wldev *dev)
337{ 797{
338 struct b43_phy *phy = &dev->phy; 798 struct b43_phy *phy = &dev->phy;
339 struct b43_phy_ht *phy_ht = phy->ht; 799 struct b43_phy_ht *phy_ht = phy->ht;
800 int i;
340 801
341 memset(phy_ht, 0, sizeof(*phy_ht)); 802 memset(phy_ht, 0, sizeof(*phy_ht));
803
804 phy_ht->tx_pwr_ctl = true;
805 for (i = 0; i < 3; i++)
806 phy_ht->tx_pwr_idx[i] = B43_PHY_HT_TXPCTL_CMD_C1_INIT + 1;
807
808 for (i = 0; i < 3; i++)
809 phy_ht->bb_mult_save[i] = -1;
342} 810}
343 811
344static int b43_phy_ht_op_init(struct b43_wldev *dev) 812static int b43_phy_ht_op_init(struct b43_wldev *dev)
345{ 813{
814 struct b43_phy_ht *phy_ht = dev->phy.ht;
346 u16 tmp; 815 u16 tmp;
347 u16 clip_state[3]; 816 u16 clip_state[3];
817 bool saved_tx_pwr_ctl;
818
819 if (dev->dev->bus_type != B43_BUS_BCMA) {
820 b43err(dev->wl, "HT-PHY is supported only on BCMA bus!\n");
821 return -EOPNOTSUPP;
822 }
348 823
349 b43_phy_ht_tables_init(dev); 824 b43_phy_ht_tables_init(dev);
350 825
@@ -357,9 +832,9 @@ static int b43_phy_ht_op_init(struct b43_wldev *dev)
357 832
358 b43_phy_mask(dev, B43_PHY_EXTG(0), ~0x3); 833 b43_phy_mask(dev, B43_PHY_EXTG(0), ~0x3);
359 834
360 b43_phy_write(dev, B43_PHY_HT_AFE_CTL1, 0); 835 b43_phy_write(dev, B43_PHY_HT_AFE_C1_OVER, 0);
361 b43_phy_write(dev, B43_PHY_HT_AFE_CTL3, 0); 836 b43_phy_write(dev, B43_PHY_HT_AFE_C2_OVER, 0);
362 b43_phy_write(dev, B43_PHY_HT_AFE_CTL5, 0); 837 b43_phy_write(dev, B43_PHY_HT_AFE_C3_OVER, 0);
363 838
364 b43_phy_write(dev, B43_PHY_EXTG(0x103), 0x20); 839 b43_phy_write(dev, B43_PHY_EXTG(0x103), 0x20);
365 b43_phy_write(dev, B43_PHY_EXTG(0x101), 0x20); 840 b43_phy_write(dev, B43_PHY_EXTG(0x101), 0x20);
@@ -371,8 +846,11 @@ static int b43_phy_ht_op_init(struct b43_wldev *dev)
371 if (0) /* TODO: condition */ 846 if (0) /* TODO: condition */
372 ; /* TODO: PHY op on reg 0x217 */ 847 ; /* TODO: PHY op on reg 0x217 */
373 848
374 b43_phy_read(dev, 0xb0); /* TODO: what for? */ 849 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
375 b43_phy_set(dev, 0xb0, 0x1); 850 b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_CCK_EN, 0);
851 else
852 b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_CCK_EN,
853 B43_PHY_HT_CLASS_CTL_CCK_EN);
376 854
377 b43_phy_set(dev, 0xb1, 0x91); 855 b43_phy_set(dev, 0xb1, 0x91);
378 b43_phy_write(dev, 0x32f, 0x0003); 856 b43_phy_write(dev, 0x32f, 0x0003);
@@ -448,12 +926,13 @@ static int b43_phy_ht_op_init(struct b43_wldev *dev)
448 926
449 b43_mac_phy_clock_set(dev, true); 927 b43_mac_phy_clock_set(dev, true);
450 928
929 b43_phy_ht_pa_override(dev, false);
451 b43_phy_ht_force_rf_sequence(dev, B43_PHY_HT_RF_SEQ_TRIG_RX2TX); 930 b43_phy_ht_force_rf_sequence(dev, B43_PHY_HT_RF_SEQ_TRIG_RX2TX);
452 b43_phy_ht_force_rf_sequence(dev, B43_PHY_HT_RF_SEQ_TRIG_RST2RX); 931 b43_phy_ht_force_rf_sequence(dev, B43_PHY_HT_RF_SEQ_TRIG_RST2RX);
453 932 b43_phy_ht_pa_override(dev, true);
454 /* TODO: PHY op on reg 0xb0 */
455 933
456 /* TODO: Should we restore it? Or store it in global PHY info? */ 934 /* TODO: Should we restore it? Or store it in global PHY info? */
935 b43_phy_ht_classifier(dev, 0, 0);
457 b43_phy_ht_read_clip_detection(dev, clip_state); 936 b43_phy_ht_read_clip_detection(dev, clip_state);
458 937
459 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 938 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
@@ -462,6 +941,13 @@ static int b43_phy_ht_op_init(struct b43_wldev *dev)
462 b43_httab_write_bulk(dev, B43_HTTAB32(0x1a, 0xc0), 941 b43_httab_write_bulk(dev, B43_HTTAB32(0x1a, 0xc0),
463 B43_HTTAB_1A_C0_LATE_SIZE, b43_httab_0x1a_0xc0_late); 942 B43_HTTAB_1A_C0_LATE_SIZE, b43_httab_0x1a_0xc0_late);
464 943
944 saved_tx_pwr_ctl = phy_ht->tx_pwr_ctl;
945 b43_phy_ht_tx_power_fix(dev);
946 b43_phy_ht_tx_power_ctl(dev, false);
947 b43_phy_ht_tx_power_ctl_idle_tssi(dev);
948 b43_phy_ht_tx_power_ctl_setup(dev);
949 b43_phy_ht_tx_power_ctl(dev, saved_tx_pwr_ctl);
950
465 return 0; 951 return 0;
466} 952}
467 953
@@ -506,19 +992,19 @@ static void b43_phy_ht_op_software_rfkill(struct b43_wldev *dev,
506static void b43_phy_ht_op_switch_analog(struct b43_wldev *dev, bool on) 992static void b43_phy_ht_op_switch_analog(struct b43_wldev *dev, bool on)
507{ 993{
508 if (on) { 994 if (on) {
509 b43_phy_write(dev, B43_PHY_HT_AFE_CTL2, 0x00cd); 995 b43_phy_write(dev, B43_PHY_HT_AFE_C1, 0x00cd);
510 b43_phy_write(dev, B43_PHY_HT_AFE_CTL1, 0x0000); 996 b43_phy_write(dev, B43_PHY_HT_AFE_C1_OVER, 0x0000);
511 b43_phy_write(dev, B43_PHY_HT_AFE_CTL4, 0x00cd); 997 b43_phy_write(dev, B43_PHY_HT_AFE_C2, 0x00cd);
512 b43_phy_write(dev, B43_PHY_HT_AFE_CTL3, 0x0000); 998 b43_phy_write(dev, B43_PHY_HT_AFE_C2_OVER, 0x0000);
513 b43_phy_write(dev, B43_PHY_HT_AFE_CTL6, 0x00cd); 999 b43_phy_write(dev, B43_PHY_HT_AFE_C3, 0x00cd);
514 b43_phy_write(dev, B43_PHY_HT_AFE_CTL5, 0x0000); 1000 b43_phy_write(dev, B43_PHY_HT_AFE_C3_OVER, 0x0000);
515 } else { 1001 } else {
516 b43_phy_write(dev, B43_PHY_HT_AFE_CTL1, 0x07ff); 1002 b43_phy_write(dev, B43_PHY_HT_AFE_C1_OVER, 0x07ff);
517 b43_phy_write(dev, B43_PHY_HT_AFE_CTL2, 0x00fd); 1003 b43_phy_write(dev, B43_PHY_HT_AFE_C1, 0x00fd);
518 b43_phy_write(dev, B43_PHY_HT_AFE_CTL3, 0x07ff); 1004 b43_phy_write(dev, B43_PHY_HT_AFE_C2_OVER, 0x07ff);
519 b43_phy_write(dev, B43_PHY_HT_AFE_CTL4, 0x00fd); 1005 b43_phy_write(dev, B43_PHY_HT_AFE_C2, 0x00fd);
520 b43_phy_write(dev, B43_PHY_HT_AFE_CTL5, 0x07ff); 1006 b43_phy_write(dev, B43_PHY_HT_AFE_C3_OVER, 0x07ff);
521 b43_phy_write(dev, B43_PHY_HT_AFE_CTL6, 0x00fd); 1007 b43_phy_write(dev, B43_PHY_HT_AFE_C3, 0x00fd);
522 } 1008 }
523} 1009}
524 1010
diff --git a/drivers/net/wireless/b43/phy_ht.h b/drivers/net/wireless/b43/phy_ht.h
index 6544c4293b34..9b2408efb224 100644
--- a/drivers/net/wireless/b43/phy_ht.h
+++ b/drivers/net/wireless/b43/phy_ht.h
@@ -12,18 +12,60 @@
12#define B43_PHY_HT_TABLE_ADDR 0x072 /* Table address */ 12#define B43_PHY_HT_TABLE_ADDR 0x072 /* Table address */
13#define B43_PHY_HT_TABLE_DATALO 0x073 /* Table data low */ 13#define B43_PHY_HT_TABLE_DATALO 0x073 /* Table data low */
14#define B43_PHY_HT_TABLE_DATAHI 0x074 /* Table data high */ 14#define B43_PHY_HT_TABLE_DATAHI 0x074 /* Table data high */
15#define B43_PHY_HT_CLASS_CTL 0x0B0 /* Classifier control */
16#define B43_PHY_HT_CLASS_CTL_CCK_EN 0x0001 /* CCK enable */
17#define B43_PHY_HT_CLASS_CTL_OFDM_EN 0x0002 /* OFDM enable */
18#define B43_PHY_HT_CLASS_CTL_WAITED_EN 0x0004 /* Waited enable */
19#define B43_PHY_HT_IQLOCAL_CMDGCTL 0x0C2 /* I/Q LO cal command G control */
20#define B43_PHY_HT_SAMP_CMD 0x0C3 /* Sample command */
21#define B43_PHY_HT_SAMP_CMD_STOP 0x0002 /* Stop */
22#define B43_PHY_HT_SAMP_LOOP_CNT 0x0C4 /* Sample loop count */
23#define B43_PHY_HT_SAMP_WAIT_CNT 0x0C5 /* Sample wait count */
24#define B43_PHY_HT_SAMP_DEP_CNT 0x0C6 /* Sample depth count */
25#define B43_PHY_HT_SAMP_STAT 0x0C7 /* Sample status */
26#define B43_PHY_HT_TSSIMODE 0x122 /* TSSI mode */
27#define B43_PHY_HT_TSSIMODE_EN 0x0001 /* TSSI enable */
28#define B43_PHY_HT_TSSIMODE_PDEN 0x0002 /* Power det enable */
15#define B43_PHY_HT_BW1 0x1CE 29#define B43_PHY_HT_BW1 0x1CE
16#define B43_PHY_HT_BW2 0x1CF 30#define B43_PHY_HT_BW2 0x1CF
17#define B43_PHY_HT_BW3 0x1D0 31#define B43_PHY_HT_BW3 0x1D0
18#define B43_PHY_HT_BW4 0x1D1 32#define B43_PHY_HT_BW4 0x1D1
19#define B43_PHY_HT_BW5 0x1D2 33#define B43_PHY_HT_BW5 0x1D2
20#define B43_PHY_HT_BW6 0x1D3 34#define B43_PHY_HT_BW6 0x1D3
35#define B43_PHY_HT_TXPCTL_CMD_C1 0x1E7 /* TX power control command */
36#define B43_PHY_HT_TXPCTL_CMD_C1_INIT 0x007F /* Init */
37#define B43_PHY_HT_TXPCTL_CMD_C1_COEFF 0x2000 /* Power control coefficients */
38#define B43_PHY_HT_TXPCTL_CMD_C1_HWPCTLEN 0x4000 /* Hardware TX power control enable */
39#define B43_PHY_HT_TXPCTL_CMD_C1_PCTLEN 0x8000 /* TX power control enable */
40#define B43_PHY_HT_TXPCTL_N 0x1E8 /* TX power control N num */
41#define B43_PHY_HT_TXPCTL_N_TSSID 0x00FF /* N TSSI delay */
42#define B43_PHY_HT_TXPCTL_N_TSSID_SHIFT 0
43#define B43_PHY_HT_TXPCTL_N_NPTIL2 0x0700 /* N PT integer log2 */
44#define B43_PHY_HT_TXPCTL_N_NPTIL2_SHIFT 8
45#define B43_PHY_HT_TXPCTL_IDLE_TSSI 0x1E9 /* TX power control idle TSSI */
46#define B43_PHY_HT_TXPCTL_IDLE_TSSI_C1 0x003F
47#define B43_PHY_HT_TXPCTL_IDLE_TSSI_C1_SHIFT 0
48#define B43_PHY_HT_TXPCTL_IDLE_TSSI_C2 0x3F00
49#define B43_PHY_HT_TXPCTL_IDLE_TSSI_C2_SHIFT 8
50#define B43_PHY_HT_TXPCTL_IDLE_TSSI_BINF 0x8000 /* Raw TSSI offset bin format */
51#define B43_PHY_HT_TXPCTL_TARG_PWR 0x1EA /* TX power control target power */
52#define B43_PHY_HT_TXPCTL_TARG_PWR_C1 0x00FF /* Power 0 */
53#define B43_PHY_HT_TXPCTL_TARG_PWR_C1_SHIFT 0
54#define B43_PHY_HT_TXPCTL_TARG_PWR_C2 0xFF00 /* Power 1 */
55#define B43_PHY_HT_TXPCTL_TARG_PWR_C2_SHIFT 8
56#define B43_PHY_HT_TXPCTL_CMD_C2 0x222
57#define B43_PHY_HT_TXPCTL_CMD_C2_INIT 0x007F
58#define B43_PHY_HT_RSSI_C1 0x219
59#define B43_PHY_HT_RSSI_C2 0x21A
60#define B43_PHY_HT_RSSI_C3 0x21B
21 61
22#define B43_PHY_HT_C1_CLIP1THRES B43_PHY_OFDM(0x00E) 62#define B43_PHY_HT_C1_CLIP1THRES B43_PHY_OFDM(0x00E)
23#define B43_PHY_HT_C2_CLIP1THRES B43_PHY_OFDM(0x04E) 63#define B43_PHY_HT_C2_CLIP1THRES B43_PHY_OFDM(0x04E)
24#define B43_PHY_HT_C3_CLIP1THRES B43_PHY_OFDM(0x08E) 64#define B43_PHY_HT_C3_CLIP1THRES B43_PHY_OFDM(0x08E)
25 65
26#define B43_PHY_HT_RF_SEQ_MODE B43_PHY_EXTG(0x000) 66#define B43_PHY_HT_RF_SEQ_MODE B43_PHY_EXTG(0x000)
67#define B43_PHY_HT_RF_SEQ_MODE_CA_OVER 0x0001 /* Core active override */
68#define B43_PHY_HT_RF_SEQ_MODE_TR_OVER 0x0002 /* Trigger override */
27#define B43_PHY_HT_RF_SEQ_TRIG B43_PHY_EXTG(0x003) 69#define B43_PHY_HT_RF_SEQ_TRIG B43_PHY_EXTG(0x003)
28#define B43_PHY_HT_RF_SEQ_TRIG_RX2TX 0x0001 /* RX2TX */ 70#define B43_PHY_HT_RF_SEQ_TRIG_RX2TX 0x0001 /* RX2TX */
29#define B43_PHY_HT_RF_SEQ_TRIG_TX2RX 0x0002 /* TX2RX */ 71#define B43_PHY_HT_RF_SEQ_TRIG_TX2RX 0x0002 /* TX2RX */
@@ -36,12 +78,27 @@
36 78
37#define B43_PHY_HT_RF_CTL1 B43_PHY_EXTG(0x010) 79#define B43_PHY_HT_RF_CTL1 B43_PHY_EXTG(0x010)
38 80
39#define B43_PHY_HT_AFE_CTL1 B43_PHY_EXTG(0x110) 81#define B43_PHY_HT_RF_CTL_INT_C1 B43_PHY_EXTG(0x04c)
40#define B43_PHY_HT_AFE_CTL2 B43_PHY_EXTG(0x111) 82#define B43_PHY_HT_RF_CTL_INT_C2 B43_PHY_EXTG(0x06c)
41#define B43_PHY_HT_AFE_CTL3 B43_PHY_EXTG(0x114) 83#define B43_PHY_HT_RF_CTL_INT_C3 B43_PHY_EXTG(0x08c)
42#define B43_PHY_HT_AFE_CTL4 B43_PHY_EXTG(0x115) 84
43#define B43_PHY_HT_AFE_CTL5 B43_PHY_EXTG(0x118) 85#define B43_PHY_HT_AFE_C1_OVER B43_PHY_EXTG(0x110)
44#define B43_PHY_HT_AFE_CTL6 B43_PHY_EXTG(0x119) 86#define B43_PHY_HT_AFE_C1 B43_PHY_EXTG(0x111)
87#define B43_PHY_HT_AFE_C2_OVER B43_PHY_EXTG(0x114)
88#define B43_PHY_HT_AFE_C2 B43_PHY_EXTG(0x115)
89#define B43_PHY_HT_AFE_C3_OVER B43_PHY_EXTG(0x118)
90#define B43_PHY_HT_AFE_C3 B43_PHY_EXTG(0x119)
91
92#define B43_PHY_HT_TXPCTL_CMD_C3 B43_PHY_EXTG(0x164)
93#define B43_PHY_HT_TXPCTL_CMD_C3_INIT 0x007F
94#define B43_PHY_HT_TXPCTL_IDLE_TSSI2 B43_PHY_EXTG(0x165) /* TX power control idle TSSI */
95#define B43_PHY_HT_TXPCTL_IDLE_TSSI2_C3 0x003F
96#define B43_PHY_HT_TXPCTL_IDLE_TSSI2_C3_SHIFT 0
97#define B43_PHY_HT_TXPCTL_TARG_PWR2 B43_PHY_EXTG(0x166) /* TX power control target power */
98#define B43_PHY_HT_TXPCTL_TARG_PWR2_C3 0x00FF
99#define B43_PHY_HT_TXPCTL_TARG_PWR2_C3_SHIFT 0
100
101#define B43_PHY_HT_TEST B43_PHY_N_BMODE(0x00A)
45 102
46 103
47/* Values for PHY registers used on channel switching */ 104/* Values for PHY registers used on channel switching */
@@ -56,6 +113,14 @@ struct b43_phy_ht_channeltab_e_phy {
56 113
57 114
58struct b43_phy_ht { 115struct b43_phy_ht {
116 u16 rf_ctl_int_save[3];
117
118 bool tx_pwr_ctl;
119 u8 tx_pwr_idx[3];
120
121 s32 bb_mult_save[3];
122
123 u8 idle_tssi[3];
59}; 124};
60 125
61 126
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index 2d3c6644f82d..faeafe219c57 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -334,13 +334,9 @@ static int alloc_ringmemory(struct b43legacy_dmaring *ring)
334 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev, 334 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
335 B43legacy_DMA_RINGMEMSIZE, 335 B43legacy_DMA_RINGMEMSIZE,
336 &(ring->dmabase), 336 &(ring->dmabase),
337 GFP_KERNEL); 337 GFP_KERNEL | __GFP_ZERO);
338 if (!ring->descbase) { 338 if (!ring->descbase)
339 b43legacyerr(ring->dev->wl, "DMA ringmemory allocation"
340 " failed\n");
341 return -ENOMEM; 339 return -ENOMEM;
342 }
343 memset(ring->descbase, 0, B43legacy_DMA_RINGMEMSIZE);
344 340
345 return 0; 341 return 0;
346} 342}
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index 1d92d874ebb6..747e9317dabd 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -12,8 +12,9 @@ config BRCMSMAC
12 select CORDIC 12 select CORDIC
13 ---help--- 13 ---help---
14 This module adds support for PCIe wireless adapters based on Broadcom 14 This module adds support for PCIe wireless adapters based on Broadcom
15 IEEE802.11n SoftMAC chipsets. If you choose to build a module, it'll 15 IEEE802.11n SoftMAC chipsets. It also has WLAN led support, which will
16 be called brcmsmac.ko. 16 be available if you select BCMA_DRIVER_GPIO. If you choose to build a
17 module, the driver will be called brcmsmac.ko.
17 18
18config BRCMFMAC 19config BRCMFMAC
19 tristate "Broadcom IEEE802.11n embedded FullMAC WLAN driver" 20 tristate "Broadcom IEEE802.11n embedded FullMAC WLAN driver"
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 756e19fc2795..598c8e2f8d2b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -26,6 +26,7 @@ brcmfmac-objs += \
26 wl_cfg80211.o \ 26 wl_cfg80211.o \
27 fwil.o \ 27 fwil.o \
28 fweh.o \ 28 fweh.o \
29 fwsignal.o \
29 p2p.o \ 30 p2p.o \
30 dhd_cdc.o \ 31 dhd_cdc.o \
31 dhd_common.o \ 32 dhd_common.o \
@@ -39,3 +40,5 @@ brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
39 usb.o 40 usb.o
40brcmfmac-$(CONFIG_BRCMDBG) += \ 41brcmfmac-$(CONFIG_BRCMDBG) += \
41 dhd_dbg.o 42 dhd_dbg.o
43brcmfmac-$(CONFIG_BRCM_TRACING) += \
44 tracepoint.o
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index ef6f23be6d32..c7fa20846b32 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -501,6 +501,7 @@ struct brcmf_dcmd {
501/* Forward decls for struct brcmf_pub (see below) */ 501/* Forward decls for struct brcmf_pub (see below) */
502struct brcmf_proto; /* device communication protocol info */ 502struct brcmf_proto; /* device communication protocol info */
503struct brcmf_cfg80211_dev; /* cfg80211 device info */ 503struct brcmf_cfg80211_dev; /* cfg80211 device info */
504struct brcmf_fws_info; /* firmware signalling info */
504 505
505/* Common structure for module and instance linkage */ 506/* Common structure for module and instance linkage */
506struct brcmf_pub { 507struct brcmf_pub {
@@ -527,6 +528,10 @@ struct brcmf_pub {
527 unsigned char proto_buf[BRCMF_DCMD_MAXLEN]; 528 unsigned char proto_buf[BRCMF_DCMD_MAXLEN];
528 529
529 struct brcmf_fweh_info fweh; 530 struct brcmf_fweh_info fweh;
531
532 bool fw_signals;
533 struct brcmf_fws_info *fws;
534 spinlock_t fws_spinlock;
530#ifdef DEBUG 535#ifdef DEBUG
531 struct dentry *dbgfs_dir; 536 struct dentry *dbgfs_dir;
532#endif 537#endif
@@ -582,7 +587,7 @@ extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx, uint cmd,
582 void *buf, uint len); 587 void *buf, uint len);
583 588
584/* Remove any protocol-specific data header. */ 589/* Remove any protocol-specific data header. */
585extern int brcmf_proto_hdrpull(struct brcmf_pub *drvr, u8 *ifidx, 590extern int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
586 struct sk_buff *rxp); 591 struct sk_buff *rxp);
587 592
588extern int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked); 593extern int brcmf_net_attach(struct brcmf_if *ifp, bool rtnl_locked);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index ad25c3408b59..883ef9063e8a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -134,7 +134,7 @@ extern void brcmf_dev_reset(struct device *dev);
134/* Indication from bus module to change flow-control state */ 134/* Indication from bus module to change flow-control state */
135extern void brcmf_txflowblock(struct device *dev, bool state); 135extern void brcmf_txflowblock(struct device *dev, bool state);
136 136
137/* Notify tx completion */ 137/* Notify the bus has transferred the tx packet to firmware */
138extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, 138extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp,
139 bool success); 139 bool success);
140 140
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
index a2354d951dd7..e224bcb90024 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
@@ -28,6 +28,7 @@
28#include "dhd.h" 28#include "dhd.h"
29#include "dhd_proto.h" 29#include "dhd_proto.h"
30#include "dhd_bus.h" 30#include "dhd_bus.h"
31#include "fwsignal.h"
31#include "dhd_dbg.h" 32#include "dhd_dbg.h"
32 33
33struct brcmf_proto_cdc_dcmd { 34struct brcmf_proto_cdc_dcmd {
@@ -71,13 +72,26 @@ struct brcmf_proto_cdc_dcmd {
71 ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | \ 72 ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | \
72 ((idx) << BDC_FLAG2_IF_SHIFT))) 73 ((idx) << BDC_FLAG2_IF_SHIFT)))
73 74
75/**
76 * struct brcmf_proto_bdc_header - BDC header format
77 *
78 * @flags: flags contain protocol and checksum info.
79 * @priority: 802.1d priority and USB flow control info (bit 4:7).
80 * @flags2: additional flags containing dongle interface index.
81 * @data_offset: start of packet data. header is following by firmware signals.
82 */
74struct brcmf_proto_bdc_header { 83struct brcmf_proto_bdc_header {
75 u8 flags; 84 u8 flags;
76 u8 priority; /* 802.1d Priority, 4:7 flow control info for usb */ 85 u8 priority;
77 u8 flags2; 86 u8 flags2;
78 u8 data_offset; 87 u8 data_offset;
79}; 88};
80 89
90/*
91 * maximum length of firmware signal data between
92 * the BDC header and packet data in the tx path.
93 */
94#define BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES 12
81 95
82#define RETRIES 2 /* # of retries to retrieve matching dcmd response */ 96#define RETRIES 2 /* # of retries to retrieve matching dcmd response */
83#define BUS_HEADER_LEN (16+64) /* Must be atleast SDPCM_RESERVE 97#define BUS_HEADER_LEN (16+64) /* Must be atleast SDPCM_RESERVE
@@ -258,7 +272,7 @@ static void pkt_set_sum_good(struct sk_buff *skb, bool x)
258 skb->ip_summed = (x ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE); 272 skb->ip_summed = (x ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
259} 273}
260 274
261void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx, 275void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx, u8 offset,
262 struct sk_buff *pktbuf) 276 struct sk_buff *pktbuf)
263{ 277{
264 struct brcmf_proto_bdc_header *h; 278 struct brcmf_proto_bdc_header *h;
@@ -266,7 +280,6 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx,
266 brcmf_dbg(CDC, "Enter\n"); 280 brcmf_dbg(CDC, "Enter\n");
267 281
268 /* Push BDC header used to convey priority for buses that don't */ 282 /* Push BDC header used to convey priority for buses that don't */
269
270 skb_push(pktbuf, BDC_HEADER_LEN); 283 skb_push(pktbuf, BDC_HEADER_LEN);
271 284
272 h = (struct brcmf_proto_bdc_header *)(pktbuf->data); 285 h = (struct brcmf_proto_bdc_header *)(pktbuf->data);
@@ -277,11 +290,11 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx,
277 290
278 h->priority = (pktbuf->priority & BDC_PRIORITY_MASK); 291 h->priority = (pktbuf->priority & BDC_PRIORITY_MASK);
279 h->flags2 = 0; 292 h->flags2 = 0;
280 h->data_offset = 0; 293 h->data_offset = offset;
281 BDC_SET_IF_IDX(h, ifidx); 294 BDC_SET_IF_IDX(h, ifidx);
282} 295}
283 296
284int brcmf_proto_hdrpull(struct brcmf_pub *drvr, u8 *ifidx, 297int brcmf_proto_hdrpull(struct brcmf_pub *drvr, bool do_fws, u8 *ifidx,
285 struct sk_buff *pktbuf) 298 struct sk_buff *pktbuf)
286{ 299{
287 struct brcmf_proto_bdc_header *h; 300 struct brcmf_proto_bdc_header *h;
@@ -328,7 +341,10 @@ int brcmf_proto_hdrpull(struct brcmf_pub *drvr, u8 *ifidx,
328 pktbuf->priority = h->priority & BDC_PRIORITY_MASK; 341 pktbuf->priority = h->priority & BDC_PRIORITY_MASK;
329 342
330 skb_pull(pktbuf, BDC_HEADER_LEN); 343 skb_pull(pktbuf, BDC_HEADER_LEN);
331 skb_pull(pktbuf, h->data_offset << 2); 344 if (do_fws)
345 brcmf_fws_hdrpull(drvr, *ifidx, h->data_offset << 2, pktbuf);
346 else
347 skb_pull(pktbuf, h->data_offset << 2);
332 348
333 if (pktbuf->len == 0) 349 if (pktbuf->len == 0)
334 return -ENODATA; 350 return -ENODATA;
@@ -350,7 +366,7 @@ int brcmf_proto_attach(struct brcmf_pub *drvr)
350 } 366 }
351 367
352 drvr->prot = cdc; 368 drvr->prot = cdc;
353 drvr->hdrlen += BDC_HEADER_LEN; 369 drvr->hdrlen += BDC_HEADER_LEN + BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES;
354 drvr->bus_if->maxctl = BRCMF_DCMD_MAXLEN + 370 drvr->bus_if->maxctl = BRCMF_DCMD_MAXLEN +
355 sizeof(struct brcmf_proto_cdc_dcmd) + ROUND_UP_MARGIN; 371 sizeof(struct brcmf_proto_cdc_dcmd) + ROUND_UP_MARGIN;
356 return 0; 372 return 0;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 4544342a0428..be0787cab24f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -24,6 +24,7 @@
24#include "dhd_proto.h" 24#include "dhd_proto.h"
25#include "dhd_dbg.h" 25#include "dhd_dbg.h"
26#include "fwil.h" 26#include "fwil.h"
27#include "tracepoint.h"
27 28
28#define PKTFILTER_BUF_SIZE 128 29#define PKTFILTER_BUF_SIZE 128
29#define BRCMF_ARPOL_MODE 0xb /* agent|snoop|peer_autoreply */ 30#define BRCMF_ARPOL_MODE 0xb /* agent|snoop|peer_autoreply */
@@ -373,3 +374,35 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
373done: 374done:
374 return err; 375 return err;
375} 376}
377
378#ifdef CONFIG_BRCM_TRACING
379void __brcmf_err(const char *func, const char *fmt, ...)
380{
381 struct va_format vaf = {
382 .fmt = fmt,
383 };
384 va_list args;
385
386 va_start(args, fmt);
387 vaf.va = &args;
388 pr_err("%s: %pV", func, &vaf);
389 trace_brcmf_err(func, &vaf);
390 va_end(args);
391}
392#endif
393#if defined(CONFIG_BRCM_TRACING) || defined(CONFIG_BRCMDBG)
394void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...)
395{
396 struct va_format vaf = {
397 .fmt = fmt,
398 };
399 va_list args;
400
401 va_start(args, fmt);
402 vaf.va = &args;
403 if (brcmf_msg_level & level)
404 pr_debug("%s %pV", func, &vaf);
405 trace_brcmf_dbg(level, func, &vaf);
406 va_end(args);
407}
408#endif
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
index 57671eddf79d..ac792499b46a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
@@ -22,6 +22,7 @@
22#include "dhd.h" 22#include "dhd.h"
23#include "dhd_bus.h" 23#include "dhd_bus.h"
24#include "dhd_dbg.h" 24#include "dhd_dbg.h"
25#include "tracepoint.h"
25 26
26static struct dentry *root_folder; 27static struct dentry *root_folder;
27 28
@@ -123,3 +124,44 @@ void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr,
123 debugfs_create_file("counters", S_IRUGO, dentry, 124 debugfs_create_file("counters", S_IRUGO, dentry,
124 sdcnt, &brcmf_debugfs_sdio_counter_ops); 125 sdcnt, &brcmf_debugfs_sdio_counter_ops);
125} 126}
127
128static
129ssize_t brcmf_debugfs_fws_stats_read(struct file *f, char __user *data,
130 size_t count, loff_t *ppos)
131{
132 struct brcmf_fws_stats *fwstats = f->private_data;
133 char buf[100];
134 int res;
135
136 /* only allow read from start */
137 if (*ppos > 0)
138 return 0;
139
140 res = scnprintf(buf, sizeof(buf),
141 "header_pulls: %u\n"
142 "header_only_pkt: %u\n"
143 "tlv_parse_failed: %u\n"
144 "tlv_invalid_type: %u\n",
145 fwstats->header_pulls,
146 fwstats->header_only_pkt,
147 fwstats->tlv_parse_failed,
148 fwstats->tlv_invalid_type);
149
150 return simple_read_from_buffer(data, count, ppos, buf, res);
151}
152
153static const struct file_operations brcmf_debugfs_fws_stats_ops = {
154 .owner = THIS_MODULE,
155 .open = simple_open,
156 .read = brcmf_debugfs_fws_stats_read
157};
158
159void brcmf_debugfs_create_fws_stats(struct brcmf_pub *drvr,
160 struct brcmf_fws_stats *stats)
161{
162 struct dentry *dentry = drvr->dbgfs_dir;
163
164 if (!IS_ERR_OR_NULL(dentry))
165 debugfs_create_file("fws_stats", S_IRUGO, dentry,
166 stats, &brcmf_debugfs_fws_stats_ops);
167}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index bc013cbe06f6..4bc646bde16f 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -43,6 +43,7 @@
43 * debugging is not selected. When debugging the driver error 43 * debugging is not selected. When debugging the driver error
44 * messages are as important as other tracing or even more so. 44 * messages are as important as other tracing or even more so.
45 */ 45 */
46#ifndef CONFIG_BRCM_TRACING
46#ifdef CONFIG_BRCMDBG 47#ifdef CONFIG_BRCMDBG
47#define brcmf_err(fmt, ...) pr_err("%s: " fmt, __func__, ##__VA_ARGS__) 48#define brcmf_err(fmt, ...) pr_err("%s: " fmt, __func__, ##__VA_ARGS__)
48#else 49#else
@@ -52,15 +53,21 @@
52 pr_err("%s: " fmt, __func__, ##__VA_ARGS__); \ 53 pr_err("%s: " fmt, __func__, ##__VA_ARGS__); \
53 } while (0) 54 } while (0)
54#endif 55#endif
56#else
57__printf(2, 3)
58void __brcmf_err(const char *func, const char *fmt, ...);
59#define brcmf_err(fmt, ...) \
60 __brcmf_err(__func__, fmt, ##__VA_ARGS__)
61#endif
55 62
56#if defined(DEBUG) 63#if defined(DEBUG) || defined(CONFIG_BRCM_TRACING)
57 64__printf(3, 4)
65void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...);
58#define brcmf_dbg(level, fmt, ...) \ 66#define brcmf_dbg(level, fmt, ...) \
59do { \ 67do { \
60 if (brcmf_msg_level & BRCMF_##level##_VAL) \ 68 __brcmf_dbg(BRCMF_##level##_VAL, __func__, \
61 pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \ 69 fmt, ##__VA_ARGS__); \
62} while (0) 70} while (0)
63
64#define BRCMF_DATA_ON() (brcmf_msg_level & BRCMF_DATA_VAL) 71#define BRCMF_DATA_ON() (brcmf_msg_level & BRCMF_DATA_VAL)
65#define BRCMF_CTL_ON() (brcmf_msg_level & BRCMF_CTL_VAL) 72#define BRCMF_CTL_ON() (brcmf_msg_level & BRCMF_CTL_VAL)
66#define BRCMF_HDRS_ON() (brcmf_msg_level & BRCMF_HDRS_VAL) 73#define BRCMF_HDRS_ON() (brcmf_msg_level & BRCMF_HDRS_VAL)
@@ -69,7 +76,7 @@ do { \
69#define BRCMF_EVENT_ON() (brcmf_msg_level & BRCMF_EVENT_VAL) 76#define BRCMF_EVENT_ON() (brcmf_msg_level & BRCMF_EVENT_VAL)
70#define BRCMF_FIL_ON() (brcmf_msg_level & BRCMF_FIL_VAL) 77#define BRCMF_FIL_ON() (brcmf_msg_level & BRCMF_FIL_VAL)
71 78
72#else /* (defined DEBUG) || (defined DEBUG) */ 79#else /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */
73 80
74#define brcmf_dbg(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__) 81#define brcmf_dbg(level, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
75 82
@@ -81,7 +88,7 @@ do { \
81#define BRCMF_EVENT_ON() 0 88#define BRCMF_EVENT_ON() 0
82#define BRCMF_FIL_ON() 0 89#define BRCMF_FIL_ON() 0
83 90
84#endif /* defined(DEBUG) */ 91#endif /* defined(DEBUG) || defined(CONFIG_BRCM_TRACING) */
85 92
86#define brcmf_dbg_hex_dump(test, data, len, fmt, ...) \ 93#define brcmf_dbg_hex_dump(test, data, len, fmt, ...) \
87do { \ 94do { \
@@ -125,6 +132,13 @@ struct brcmf_sdio_count {
125 ulong rx_readahead_cnt; /* packets where header read-ahead was used */ 132 ulong rx_readahead_cnt; /* packets where header read-ahead was used */
126}; 133};
127 134
135struct brcmf_fws_stats {
136 u32 tlv_parse_failed;
137 u32 tlv_invalid_type;
138 u32 header_only_pkt;
139 u32 header_pulls;
140};
141
128struct brcmf_pub; 142struct brcmf_pub;
129#ifdef DEBUG 143#ifdef DEBUG
130void brcmf_debugfs_init(void); 144void brcmf_debugfs_init(void);
@@ -134,6 +148,8 @@ void brcmf_debugfs_detach(struct brcmf_pub *drvr);
134struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr); 148struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr);
135void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr, 149void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr,
136 struct brcmf_sdio_count *sdcnt); 150 struct brcmf_sdio_count *sdcnt);
151void brcmf_debugfs_create_fws_stats(struct brcmf_pub *drvr,
152 struct brcmf_fws_stats *stats);
137#else 153#else
138static inline void brcmf_debugfs_init(void) 154static inline void brcmf_debugfs_init(void)
139{ 155{
@@ -148,6 +164,10 @@ static inline int brcmf_debugfs_attach(struct brcmf_pub *drvr)
148static inline void brcmf_debugfs_detach(struct brcmf_pub *drvr) 164static inline void brcmf_debugfs_detach(struct brcmf_pub *drvr)
149{ 165{
150} 166}
167static inline void brcmf_debugfs_create_fws_stats(struct brcmf_pub *drvr,
168 struct brcmf_fws_stats *stats)
169{
170}
151#endif 171#endif
152 172
153#endif /* _BRCMF_DBG_H_ */ 173#endif /* _BRCMF_DBG_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index c06cea88df0d..fa5a2af04d46 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -30,17 +30,18 @@
30#include "p2p.h" 30#include "p2p.h"
31#include "wl_cfg80211.h" 31#include "wl_cfg80211.h"
32#include "fwil.h" 32#include "fwil.h"
33#include "fwsignal.h"
33 34
34MODULE_AUTHOR("Broadcom Corporation"); 35MODULE_AUTHOR("Broadcom Corporation");
35MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver."); 36MODULE_DESCRIPTION("Broadcom 802.11 wireless LAN fullmac driver.");
36MODULE_SUPPORTED_DEVICE("Broadcom 802.11 WLAN fullmac cards");
37MODULE_LICENSE("Dual BSD/GPL"); 37MODULE_LICENSE("Dual BSD/GPL");
38 38
39#define MAX_WAIT_FOR_8021X_TX 50 /* msecs */ 39#define MAX_WAIT_FOR_8021X_TX 50 /* msecs */
40 40
41/* Error bits */ 41/* Error bits */
42int brcmf_msg_level; 42int brcmf_msg_level;
43module_param(brcmf_msg_level, int, 0); 43module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
44MODULE_PARM_DESC(debug, "level of debug output");
44 45
45/* P2P0 enable */ 46/* P2P0 enable */
46static int brcmf_p2p_enable; 47static int brcmf_p2p_enable;
@@ -230,7 +231,7 @@ static netdev_tx_t brcmf_netdev_start_xmit(struct sk_buff *skb,
230 atomic_inc(&ifp->pend_8021x_cnt); 231 atomic_inc(&ifp->pend_8021x_cnt);
231 232
232 /* If the protocol uses a data header, apply it */ 233 /* If the protocol uses a data header, apply it */
233 brcmf_proto_hdrpush(drvr, ifp->ifidx, skb); 234 brcmf_proto_hdrpush(drvr, ifp->ifidx, 0, skb);
234 235
235 /* Use bus module to send data frame */ 236 /* Use bus module to send data frame */
236 ret = brcmf_bus_txdata(drvr->bus_if, skb); 237 ret = brcmf_bus_txdata(drvr->bus_if, skb);
@@ -283,7 +284,7 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
283 skb_unlink(skb, skb_list); 284 skb_unlink(skb, skb_list);
284 285
285 /* process and remove protocol-specific header */ 286 /* process and remove protocol-specific header */
286 ret = brcmf_proto_hdrpull(drvr, &ifidx, skb); 287 ret = brcmf_proto_hdrpull(drvr, drvr->fw_signals, &ifidx, skb);
287 ifp = drvr->iflist[ifidx]; 288 ifp = drvr->iflist[ifidx];
288 289
289 if (ret || !ifp || !ifp->ndev) { 290 if (ret || !ifp || !ifp->ndev) {
@@ -357,23 +358,29 @@ void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
357 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 358 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
358 struct brcmf_pub *drvr = bus_if->drvr; 359 struct brcmf_pub *drvr = bus_if->drvr;
359 struct brcmf_if *ifp; 360 struct brcmf_if *ifp;
361 int res;
360 362
361 brcmf_proto_hdrpull(drvr, &ifidx, txp); 363 res = brcmf_proto_hdrpull(drvr, false, &ifidx, txp);
362 364
363 ifp = drvr->iflist[ifidx]; 365 ifp = drvr->iflist[ifidx];
364 if (!ifp) 366 if (!ifp)
365 return; 367 goto done;
366 368
367 eh = (struct ethhdr *)(txp->data); 369 if (res == 0) {
368 type = ntohs(eh->h_proto); 370 eh = (struct ethhdr *)(txp->data);
371 type = ntohs(eh->h_proto);
369 372
370 if (type == ETH_P_PAE) { 373 if (type == ETH_P_PAE) {
371 atomic_dec(&ifp->pend_8021x_cnt); 374 atomic_dec(&ifp->pend_8021x_cnt);
372 if (waitqueue_active(&ifp->pend_8021x_wait)) 375 if (waitqueue_active(&ifp->pend_8021x_wait))
373 wake_up(&ifp->pend_8021x_wait); 376 wake_up(&ifp->pend_8021x_wait);
377 }
374 } 378 }
375 if (!success) 379 if (!success)
376 ifp->stats.tx_errors++; 380 ifp->stats.tx_errors++;
381
382done:
383 brcmu_pkt_buf_free_skb(txp);
377} 384}
378 385
379static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev) 386static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
@@ -873,6 +880,9 @@ int brcmf_bus_start(struct device *dev)
873 if (ret < 0) 880 if (ret < 0)
874 goto fail; 881 goto fail;
875 882
883 drvr->fw_signals = true;
884 (void)brcmf_fws_init(drvr);
885
876 drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev); 886 drvr->config = brcmf_cfg80211_attach(drvr, bus_if->dev);
877 if (drvr->config == NULL) { 887 if (drvr->config == NULL) {
878 ret = -ENOMEM; 888 ret = -ENOMEM;
@@ -889,6 +899,8 @@ fail:
889 brcmf_err("failed: %d\n", ret); 899 brcmf_err("failed: %d\n", ret);
890 if (drvr->config) 900 if (drvr->config)
891 brcmf_cfg80211_detach(drvr->config); 901 brcmf_cfg80211_detach(drvr->config);
902 if (drvr->fws)
903 brcmf_fws_deinit(drvr);
892 free_netdev(ifp->ndev); 904 free_netdev(ifp->ndev);
893 drvr->iflist[0] = NULL; 905 drvr->iflist[0] = NULL;
894 if (p2p_ifp) { 906 if (p2p_ifp) {
@@ -952,6 +964,9 @@ void brcmf_detach(struct device *dev)
952 if (drvr->prot) 964 if (drvr->prot)
953 brcmf_proto_detach(drvr); 965 brcmf_proto_detach(drvr);
954 966
967 if (drvr->fws)
968 brcmf_fws_deinit(drvr);
969
955 brcmf_debugfs_detach(drvr); 970 brcmf_debugfs_detach(drvr);
956 bus_if->drvr = NULL; 971 bus_if->drvr = NULL;
957 kfree(drvr); 972 kfree(drvr);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
index 48fa70302192..ef9179883748 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
@@ -33,7 +33,7 @@ extern void brcmf_proto_stop(struct brcmf_pub *drvr);
33/* Add any protocol-specific data header. 33/* Add any protocol-specific data header.
34 * Caller must reserve prot_hdrlen prepend space. 34 * Caller must reserve prot_hdrlen prepend space.
35 */ 35 */
36extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, 36extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx, u8 offset,
37 struct sk_buff *txp); 37 struct sk_buff *txp);
38 38
39/* Sets dongle media info (drv_version, mac address). */ 39/* Sets dongle media info (drv_version, mac address). */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 4469321c0eb3..9a2edd3f0a5c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -1546,7 +1546,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1546 struct sk_buff_head pktlist; /* needed for bus interface */ 1546 struct sk_buff_head pktlist; /* needed for bus interface */
1547 u16 pad; /* Number of pad bytes to read */ 1547 u16 pad; /* Number of pad bytes to read */
1548 uint rxleft = 0; /* Remaining number of frames allowed */ 1548 uint rxleft = 0; /* Remaining number of frames allowed */
1549 int sdret; /* Return code from calls */ 1549 int ret; /* Return code from calls */
1550 uint rxcount = 0; /* Total frames read */ 1550 uint rxcount = 0; /* Total frames read */
1551 struct brcmf_sdio_read *rd = &bus->cur_read, rd_new; 1551 struct brcmf_sdio_read *rd = &bus->cur_read, rd_new;
1552 u8 head_read = 0; 1552 u8 head_read = 0;
@@ -1577,15 +1577,15 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1577 /* read header first for unknow frame length */ 1577 /* read header first for unknow frame length */
1578 sdio_claim_host(bus->sdiodev->func[1]); 1578 sdio_claim_host(bus->sdiodev->func[1]);
1579 if (!rd->len) { 1579 if (!rd->len) {
1580 sdret = brcmf_sdcard_recv_buf(bus->sdiodev, 1580 ret = brcmf_sdcard_recv_buf(bus->sdiodev,
1581 bus->sdiodev->sbwad, 1581 bus->sdiodev->sbwad,
1582 SDIO_FUNC_2, F2SYNC, 1582 SDIO_FUNC_2, F2SYNC,
1583 bus->rxhdr, 1583 bus->rxhdr,
1584 BRCMF_FIRSTREAD); 1584 BRCMF_FIRSTREAD);
1585 bus->sdcnt.f2rxhdrs++; 1585 bus->sdcnt.f2rxhdrs++;
1586 if (sdret < 0) { 1586 if (ret < 0) {
1587 brcmf_err("RXHEADER FAILED: %d\n", 1587 brcmf_err("RXHEADER FAILED: %d\n",
1588 sdret); 1588 ret);
1589 bus->sdcnt.rx_hdrfail++; 1589 bus->sdcnt.rx_hdrfail++;
1590 brcmf_sdbrcm_rxfail(bus, true, true); 1590 brcmf_sdbrcm_rxfail(bus, true, true);
1591 sdio_release_host(bus->sdiodev->func[1]); 1591 sdio_release_host(bus->sdiodev->func[1]);
@@ -1637,14 +1637,14 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
1637 skb_pull(pkt, head_read); 1637 skb_pull(pkt, head_read);
1638 pkt_align(pkt, rd->len_left, BRCMF_SDALIGN); 1638 pkt_align(pkt, rd->len_left, BRCMF_SDALIGN);
1639 1639
1640 sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad, 1640 ret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
1641 SDIO_FUNC_2, F2SYNC, pkt); 1641 SDIO_FUNC_2, F2SYNC, pkt);
1642 bus->sdcnt.f2rxdata++; 1642 bus->sdcnt.f2rxdata++;
1643 sdio_release_host(bus->sdiodev->func[1]); 1643 sdio_release_host(bus->sdiodev->func[1]);
1644 1644
1645 if (sdret < 0) { 1645 if (ret < 0) {
1646 brcmf_err("read %d bytes from channel %d failed: %d\n", 1646 brcmf_err("read %d bytes from channel %d failed: %d\n",
1647 rd->len, rd->channel, sdret); 1647 rd->len, rd->channel, ret);
1648 brcmu_pkt_buf_free_skb(pkt); 1648 brcmu_pkt_buf_free_skb(pkt);
1649 sdio_claim_host(bus->sdiodev->func[1]); 1649 sdio_claim_host(bus->sdiodev->func[1]);
1650 brcmf_sdbrcm_rxfail(bus, true, 1650 brcmf_sdbrcm_rxfail(bus, true,
@@ -1775,7 +1775,7 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
1775/* Writes a HW/SW header into the packet and sends it. */ 1775/* Writes a HW/SW header into the packet and sends it. */
1776/* Assumes: (a) header space already there, (b) caller holds lock */ 1776/* Assumes: (a) header space already there, (b) caller holds lock */
1777static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt, 1777static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
1778 uint chan, bool free_pkt) 1778 uint chan)
1779{ 1779{
1780 int ret; 1780 int ret;
1781 u8 *frame; 1781 u8 *frame;
@@ -1805,10 +1805,7 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
1805 1805
1806 pkt_align(new, pkt->len, BRCMF_SDALIGN); 1806 pkt_align(new, pkt->len, BRCMF_SDALIGN);
1807 memcpy(new->data, pkt->data, pkt->len); 1807 memcpy(new->data, pkt->data, pkt->len);
1808 if (free_pkt) 1808 brcmu_pkt_buf_free_skb(pkt);
1809 brcmu_pkt_buf_free_skb(pkt);
1810 /* free the pkt if canned one is not used */
1811 free_pkt = true;
1812 pkt = new; 1809 pkt = new;
1813 frame = (u8 *) (pkt->data); 1810 frame = (u8 *) (pkt->data);
1814 /* precondition: (frame % BRCMF_SDALIGN) == 0) */ 1811 /* precondition: (frame % BRCMF_SDALIGN) == 0) */
@@ -1901,10 +1898,6 @@ done:
1901 /* restore pkt buffer pointer before calling tx complete routine */ 1898 /* restore pkt buffer pointer before calling tx complete routine */
1902 skb_pull(pkt, SDPCM_HDRLEN + pad); 1899 skb_pull(pkt, SDPCM_HDRLEN + pad);
1903 brcmf_txcomplete(bus->sdiodev->dev, pkt, ret != 0); 1900 brcmf_txcomplete(bus->sdiodev->dev, pkt, ret != 0);
1904
1905 if (free_pkt)
1906 brcmu_pkt_buf_free_skb(pkt);
1907
1908 return ret; 1901 return ret;
1909} 1902}
1910 1903
@@ -1932,7 +1925,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
1932 spin_unlock_bh(&bus->txqlock); 1925 spin_unlock_bh(&bus->txqlock);
1933 datalen = pkt->len - SDPCM_HDRLEN; 1926 datalen = pkt->len - SDPCM_HDRLEN;
1934 1927
1935 ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, true); 1928 ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL);
1936 1929
1937 /* In poll mode, need to check for other events */ 1930 /* In poll mode, need to check for other events */
1938 if (!bus->intr && cnt) { 1931 if (!bus->intr && cnt) {
@@ -2343,7 +2336,6 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
2343 if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) { 2336 if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) {
2344 skb_pull(pkt, SDPCM_HDRLEN); 2337 skb_pull(pkt, SDPCM_HDRLEN);
2345 brcmf_txcomplete(bus->sdiodev->dev, pkt, false); 2338 brcmf_txcomplete(bus->sdiodev->dev, pkt, false);
2346 brcmu_pkt_buf_free_skb(pkt);
2347 brcmf_err("out of bus->txq !!!\n"); 2339 brcmf_err("out of bus->txq !!!\n");
2348 ret = -ENOSR; 2340 ret = -ENOSR;
2349 } else { 2341 } else {
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
new file mode 100644
index 000000000000..071d55f9cd4d
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -0,0 +1,382 @@
1/*
2 * Copyright (c) 2010 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16#include <linux/types.h>
17#include <linux/if_ether.h>
18#include <linux/spinlock.h>
19#include <linux/skbuff.h>
20#include <linux/netdevice.h>
21#include <linux/err.h>
22#include <uapi/linux/nl80211.h>
23
24#include <brcmu_utils.h>
25#include <brcmu_wifi.h>
26#include "dhd.h"
27#include "dhd_dbg.h"
28#include "fwil.h"
29#include "fweh.h"
30#include "fwsignal.h"
31
32/**
33 * DOC: Firmware Signalling
34 *
35 * Firmware can send signals to host and vice versa, which are passed in the
36 * data packets using TLV based header. This signalling layer is on top of the
37 * BDC bus protocol layer.
38 */
39
40/*
41 * single definition for firmware-driver flow control tlv's.
42 *
43 * each tlv is specified by BRCMF_FWS_TLV_DEF(name, ID, length).
44 * A length value 0 indicates variable length tlv.
45 */
46#define BRCMF_FWS_TLV_DEFLIST \
47 BRCMF_FWS_TLV_DEF(MAC_OPEN, 1, 1) \
48 BRCMF_FWS_TLV_DEF(MAC_CLOSE, 2, 1) \
49 BRCMF_FWS_TLV_DEF(MAC_REQUEST_CREDIT, 3, 2) \
50 BRCMF_FWS_TLV_DEF(TXSTATUS, 4, 4) \
51 BRCMF_FWS_TLV_DEF(PKTTAG, 5, 4) \
52 BRCMF_FWS_TLV_DEF(MACDESC_ADD, 6, 8) \
53 BRCMF_FWS_TLV_DEF(MACDESC_DEL, 7, 8) \
54 BRCMF_FWS_TLV_DEF(RSSI, 8, 1) \
55 BRCMF_FWS_TLV_DEF(INTERFACE_OPEN, 9, 1) \
56 BRCMF_FWS_TLV_DEF(INTERFACE_CLOSE, 10, 1) \
57 BRCMF_FWS_TLV_DEF(FIFO_CREDITBACK, 11, 8) \
58 BRCMF_FWS_TLV_DEF(PENDING_TRAFFIC_BMP, 12, 2) \
59 BRCMF_FWS_TLV_DEF(MAC_REQUEST_PACKET, 13, 3) \
60 BRCMF_FWS_TLV_DEF(HOST_REORDER_RXPKTS, 14, 10) \
61 BRCMF_FWS_TLV_DEF(TRANS_ID, 18, 6) \
62 BRCMF_FWS_TLV_DEF(COMP_TXSTATUS, 19, 1) \
63 BRCMF_FWS_TLV_DEF(FILLER, 255, 0)
64
65/**
66 * enum brcmf_fws_tlv_type - definition of tlv identifiers.
67 */
68#define BRCMF_FWS_TLV_DEF(name, id, len) \
69 BRCMF_FWS_TYPE_ ## name = id,
70enum brcmf_fws_tlv_type {
71 BRCMF_FWS_TLV_DEFLIST
72 BRCMF_FWS_TYPE_INVALID
73};
74#undef BRCMF_FWS_TLV_DEF
75
76/**
77 * enum brcmf_fws_tlv_len - length values for tlvs.
78 */
79#define BRCMF_FWS_TLV_DEF(name, id, len) \
80 BRCMF_FWS_TYPE_ ## name ## _LEN = len,
81enum brcmf_fws_tlv_len {
82 BRCMF_FWS_TLV_DEFLIST
83};
84#undef BRCMF_FWS_TLV_DEF
85
86#ifdef DEBUG
87/**
88 * brcmf_fws_tlv_names - array of tlv names.
89 */
90#define BRCMF_FWS_TLV_DEF(name, id, len) \
91 { id, #name },
92static struct {
93 enum brcmf_fws_tlv_type id;
94 const char *name;
95} brcmf_fws_tlv_names[] = {
96 BRCMF_FWS_TLV_DEFLIST
97};
98#undef BRCMF_FWS_TLV_DEF
99
100static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id)
101{
102 int i;
103
104 for (i = 0; i < ARRAY_SIZE(brcmf_fws_tlv_names); i++)
105 if (brcmf_fws_tlv_names[i].id == id)
106 return brcmf_fws_tlv_names[i].name;
107
108 return "INVALID";
109}
110#else
111static const char *brcmf_fws_get_tlv_name(enum brcmf_fws_tlv_type id)
112{
113 return "NODEBUG";
114}
115#endif /* DEBUG */
116
117/**
118 * flags used to enable tlv signalling from firmware.
119 */
120#define BRCMF_FWS_FLAGS_RSSI_SIGNALS 0x0001
121#define BRCMF_FWS_FLAGS_XONXOFF_SIGNALS 0x0002
122#define BRCMF_FWS_FLAGS_CREDIT_STATUS_SIGNALS 0x0004
123#define BRCMF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE 0x0008
124#define BRCMF_FWS_FLAGS_PSQ_GENERATIONFSM_ENABLE 0x0010
125#define BRCMF_FWS_FLAGS_PSQ_ZERO_BUFFER_ENABLE 0x0020
126#define BRCMF_FWS_FLAGS_HOST_RXREORDER_ACTIVE 0x0040
127
128#define BRCMF_FWS_HANGER_MAXITEMS 1024
129#define BRCMF_FWS_HANGER_ITEM_STATE_FREE 1
130#define BRCMF_FWS_HANGER_ITEM_STATE_INUSE 2
131#define BRCMF_FWS_HANGER_ITEM_STATE_INUSE_SUPPRESSED 3
132
133#define BRCMF_FWS_STATE_OPEN 1
134#define BRCMF_FWS_STATE_CLOSE 2
135
136#define BRCMF_FWS_FCMODE_NONE 0
137#define BRCMF_FWS_FCMODE_IMPLIED_CREDIT 1
138#define BRCMF_FWS_FCMODE_EXPLICIT_CREDIT 2
139
140#define BRCMF_FWS_MAC_DESC_TABLE_SIZE 32
141#define BRCMF_FWS_MAX_IFNUM 16
142#define BRCMF_FWS_MAC_DESC_ID_INVALID 0xff
143
144#define BRCMF_FWS_HOSTIF_FLOWSTATE_OFF 0
145#define BRCMF_FWS_HOSTIF_FLOWSTATE_ON 1
146
147/**
148 * FWFC packet identifier
149 *
150 * 32-bit packet identifier used in PKTTAG tlv from host to dongle.
151 *
152 * - Generated at the host (e.g. dhd)
153 * - Seen as a generic sequence number by wlc except the flags field
154 *
155 * Generation : b[31] => generation number for this packet [host->fw]
156 * OR, current generation number [fw->host]
157 * Flags : b[30:27] => command, status flags
158 * FIFO-AC : b[26:24] => AC-FIFO id
159 * h-slot : b[23:8] => hanger-slot
160 * freerun : b[7:0] => A free running counter
161 */
162#define BRCMF_FWS_PKTTAG_GENERATION_MASK 0x80000000
163#define BRCMF_FWS_PKTTAG_GENERATION_SHIFT 31
164#define BRCMF_FWS_PKTTAG_FLAGS_MASK 0x78000000
165#define BRCMF_FWS_PKTTAG_FLAGS_SHIFT 27
166#define BRCMF_FWS_PKTTAG_FIFO_MASK 0x07000000
167#define BRCMF_FWS_PKTTAG_FIFO_SHIFT 24
168#define BRCMF_FWS_PKTTAG_HSLOT_MASK 0x00ffff00
169#define BRCMF_FWS_PKTTAG_HSLOT_SHIFT 8
170#define BRCMF_FWS_PKTTAG_FREERUN_MASK 0x000000ff
171#define BRCMF_FWS_PKTTAG_FREERUN_SHIFT 0
172
173#define brcmf_fws_pkttag_set_field(var, field, value) \
174 brcmu_maskset32((var), BRCMF_FWS_PKTTAG_ ## field ## _MASK, \
175 BRCMF_FWS_PKTTAG_ ## field ## _SHIFT, (value))
176#define brcmf_fws_pkttag_get_field(var, field) \
177 brcmu_maskget32((var), BRCMF_FWS_PKTTAG_ ## field ## _MASK, \
178 BRCMF_FWS_PKTTAG_ ## field ## _SHIFT)
179
180struct brcmf_fws_info {
181 struct brcmf_pub *drvr;
182 struct brcmf_fws_stats stats;
183};
184
185static int brcmf_fws_rssi_indicate(struct brcmf_fws_info *fws, s8 rssi)
186{
187 brcmf_dbg(CTL, "rssi %d\n", rssi);
188 return 0;
189}
190
191static int brcmf_fws_dbg_seqnum_check(struct brcmf_fws_info *fws, u8 *data)
192{
193 __le32 timestamp;
194
195 memcpy(&timestamp, &data[2], sizeof(timestamp));
196 brcmf_dbg(INFO, "received: seq %d, timestamp %d\n", data[1],
197 le32_to_cpu(timestamp));
198 return 0;
199}
200
201/* using macro so sparse checking does not complain
202 * about locking imbalance.
203 */
204#define brcmf_fws_lock(drvr, flags) \
205do { \
206 flags = 0; \
207 spin_lock_irqsave(&((drvr)->fws_spinlock), (flags)); \
208} while (0)
209
210/* using macro so sparse checking does not complain
211 * about locking imbalance.
212 */
213#define brcmf_fws_unlock(drvr, flags) \
214 spin_unlock_irqrestore(&((drvr)->fws_spinlock), (flags))
215
216int brcmf_fws_init(struct brcmf_pub *drvr)
217{
218 u32 tlv;
219 int rc;
220
221 /* enable rssi signals */
222 tlv = drvr->fw_signals ? BRCMF_FWS_FLAGS_RSSI_SIGNALS : 0;
223
224 spin_lock_init(&drvr->fws_spinlock);
225
226 drvr->fws = kzalloc(sizeof(*(drvr->fws)), GFP_KERNEL);
227 if (!drvr->fws) {
228 rc = -ENOMEM;
229 goto fail;
230 }
231
232 /* enable proptxtstatus signaling by default */
233 rc = brcmf_fil_iovar_int_set(drvr->iflist[0], "tlv", tlv);
234 if (rc < 0) {
235 brcmf_err("failed to set bdcv2 tlv signaling\n");
236 goto fail;
237 }
238 /* set linkage back */
239 drvr->fws->drvr = drvr;
240
241 /* create debugfs file for statistics */
242 brcmf_debugfs_create_fws_stats(drvr, &drvr->fws->stats);
243
244 /* TODO: remove upon feature delivery */
245 brcmf_err("%s bdcv2 tlv signaling [%x]\n",
246 drvr->fw_signals ? "enabled" : "disabled", tlv);
247 return 0;
248
249fail:
250 /* disable flow control entirely */
251 drvr->fw_signals = false;
252 brcmf_fws_deinit(drvr);
253 return rc;
254}
255
256void brcmf_fws_deinit(struct brcmf_pub *drvr)
257{
258 /* free top structure */
259 kfree(drvr->fws);
260 drvr->fws = NULL;
261}
262
263int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
264 struct sk_buff *skb)
265{
266 struct brcmf_fws_info *fws = drvr->fws;
267 ulong flags;
268 u8 *signal_data;
269 s16 data_len;
270 u8 type;
271 u8 len;
272 u8 *data;
273
274 brcmf_dbg(TRACE, "enter: ifidx %d, skblen %u, sig %d\n",
275 ifidx, skb->len, signal_len);
276
277 WARN_ON(signal_len > skb->len);
278
279 /* if flow control disabled, skip to packet data and leave */
280 if (!signal_len || !drvr->fw_signals) {
281 skb_pull(skb, signal_len);
282 return 0;
283 }
284
285 /* lock during tlv parsing */
286 brcmf_fws_lock(drvr, flags);
287
288 fws->stats.header_pulls++;
289 data_len = signal_len;
290 signal_data = skb->data;
291
292 while (data_len > 0) {
293 /* extract tlv info */
294 type = signal_data[0];
295
296 /* FILLER type is actually not a TLV, but
297 * a single byte that can be skipped.
298 */
299 if (type == BRCMF_FWS_TYPE_FILLER) {
300 signal_data += 1;
301 data_len -= 1;
302 continue;
303 }
304 len = signal_data[1];
305 data = signal_data + 2;
306
307 /* abort parsing when length invalid */
308 if (data_len < len + 2)
309 break;
310
311 brcmf_dbg(INFO, "tlv type=%d (%s), len=%d\n", type,
312 brcmf_fws_get_tlv_name(type), len);
313 switch (type) {
314 case BRCMF_FWS_TYPE_MAC_OPEN:
315 case BRCMF_FWS_TYPE_MAC_CLOSE:
316 WARN_ON(len != BRCMF_FWS_TYPE_MAC_OPEN_LEN);
317 break;
318 case BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT:
319 WARN_ON(len != BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT_LEN);
320 break;
321 case BRCMF_FWS_TYPE_TXSTATUS:
322 WARN_ON(len != BRCMF_FWS_TYPE_TXSTATUS_LEN);
323 break;
324 case BRCMF_FWS_TYPE_PKTTAG:
325 WARN_ON(len != BRCMF_FWS_TYPE_PKTTAG_LEN);
326 break;
327 case BRCMF_FWS_TYPE_MACDESC_ADD:
328 case BRCMF_FWS_TYPE_MACDESC_DEL:
329 WARN_ON(len != BRCMF_FWS_TYPE_MACDESC_ADD_LEN);
330 break;
331 case BRCMF_FWS_TYPE_RSSI:
332 WARN_ON(len != BRCMF_FWS_TYPE_RSSI_LEN);
333 brcmf_fws_rssi_indicate(fws, *(s8 *)data);
334 break;
335 case BRCMF_FWS_TYPE_INTERFACE_OPEN:
336 case BRCMF_FWS_TYPE_INTERFACE_CLOSE:
337 WARN_ON(len != BRCMF_FWS_TYPE_INTERFACE_OPEN_LEN);
338 break;
339 case BRCMF_FWS_TYPE_FIFO_CREDITBACK:
340 WARN_ON(len != BRCMF_FWS_TYPE_FIFO_CREDITBACK_LEN);
341 break;
342 case BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP:
343 WARN_ON(len != BRCMF_FWS_TYPE_PENDING_TRAFFIC_BMP_LEN);
344 break;
345 case BRCMF_FWS_TYPE_MAC_REQUEST_PACKET:
346 WARN_ON(len != BRCMF_FWS_TYPE_MAC_REQUEST_PACKET_LEN);
347 break;
348 case BRCMF_FWS_TYPE_HOST_REORDER_RXPKTS:
349 WARN_ON(len != BRCMF_FWS_TYPE_HOST_REORDER_RXPKTS_LEN);
350 break;
351 case BRCMF_FWS_TYPE_TRANS_ID:
352 WARN_ON(len != BRCMF_FWS_TYPE_TRANS_ID_LEN);
353 brcmf_fws_dbg_seqnum_check(fws, data);
354 break;
355 case BRCMF_FWS_TYPE_COMP_TXSTATUS:
356 WARN_ON(len != BRCMF_FWS_TYPE_COMP_TXSTATUS_LEN);
357 break;
358 default:
359 fws->stats.tlv_invalid_type++;
360 break;
361 }
362
363 signal_data += len + 2;
364 data_len -= len + 2;
365 }
366
367 if (data_len != 0)
368 fws->stats.tlv_parse_failed++;
369
370 /* signalling processing result does
371 * not affect the actual ethernet packet.
372 */
373 skb_pull(skb, signal_len);
374
375 /* this may be a signal-only packet
376 */
377 if (skb->len == 0)
378 fws->stats.header_only_pkt++;
379
380 brcmf_fws_unlock(drvr, flags);
381 return 0;
382}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h
new file mode 100644
index 000000000000..e728eea72bb4
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.h
@@ -0,0 +1,25 @@
1/*
2 * Copyright (c) 2012 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17
18#ifndef FWSIGNAL_H_
19#define FWSIGNAL_H_
20
21int brcmf_fws_init(struct brcmf_pub *drvr);
22void brcmf_fws_deinit(struct brcmf_pub *drvr);
23int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
24 struct sk_buff *skb);
25#endif /* FWSIGNAL_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c
new file mode 100644
index 000000000000..b505db48c60d
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.c
@@ -0,0 +1,22 @@
1/*
2 * Copyright (c) 2012 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/module.h> /* bug in tracepoint.h, it should include this */
18
19#ifndef __CHECKER__
20#define CREATE_TRACE_POINTS
21#include "tracepoint.h"
22#endif
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
new file mode 100644
index 000000000000..35efc7a67644
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/tracepoint.h
@@ -0,0 +1,87 @@
1/*
2 * Copyright (c) 2013 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16#if !defined(BRCMF_TRACEPOINT_H_) || defined(TRACE_HEADER_MULTI_READ)
17#define BRCMF_TRACEPOINT_H_
18
19#include <linux/types.h>
20#include <linux/tracepoint.h>
21
22#ifndef CONFIG_BRCM_TRACING
23
24#undef TRACE_EVENT
25#define TRACE_EVENT(name, proto, ...) \
26static inline void trace_ ## name(proto) {}
27
28#undef DECLARE_EVENT_CLASS
29#define DECLARE_EVENT_CLASS(...)
30
31#undef DEFINE_EVENT
32#define DEFINE_EVENT(evt_class, name, proto, ...) \
33static inline void trace_ ## name(proto) {}
34
35#endif /* CONFIG_BRCM_TRACING */
36
37#undef TRACE_SYSTEM
38#define TRACE_SYSTEM brcmfmac
39
40#define MAX_MSG_LEN 100
41
42TRACE_EVENT(brcmf_err,
43 TP_PROTO(const char *func, struct va_format *vaf),
44 TP_ARGS(func, vaf),
45 TP_STRUCT__entry(
46 __string(func, func)
47 __dynamic_array(char, msg, MAX_MSG_LEN)
48 ),
49 TP_fast_assign(
50 __assign_str(func, func);
51 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
52 MAX_MSG_LEN, vaf->fmt,
53 *vaf->va) >= MAX_MSG_LEN);
54 ),
55 TP_printk("%s: %s", __get_str(func), __get_str(msg))
56);
57
58TRACE_EVENT(brcmf_dbg,
59 TP_PROTO(u32 level, const char *func, struct va_format *vaf),
60 TP_ARGS(level, func, vaf),
61 TP_STRUCT__entry(
62 __field(u32, level)
63 __string(func, func)
64 __dynamic_array(char, msg, MAX_MSG_LEN)
65 ),
66 TP_fast_assign(
67 __entry->level = level;
68 __assign_str(func, func);
69 WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
70 MAX_MSG_LEN, vaf->fmt,
71 *vaf->va) >= MAX_MSG_LEN);
72 ),
73 TP_printk("%s: %s", __get_str(func), __get_str(msg))
74);
75
76#ifdef CONFIG_BRCM_TRACING
77
78#undef TRACE_INCLUDE_PATH
79#define TRACE_INCLUDE_PATH .
80#undef TRACE_INCLUDE_FILE
81#define TRACE_INCLUDE_FILE tracepoint
82
83#include <trace/define_trace.h>
84
85#endif /* CONFIG_BRCM_TRACING */
86
87#endif /* BRCMF_TRACEPOINT_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 42289e9ea886..01aed7ad6bec 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -112,11 +112,6 @@ struct brcmf_usbdev_info {
112static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo, 112static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo,
113 struct brcmf_usbreq *req); 113 struct brcmf_usbreq *req);
114 114
115MODULE_AUTHOR("Broadcom Corporation");
116MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN fullmac usb driver.");
117MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN fullmac usb cards");
118MODULE_LICENSE("Dual BSD/GPL");
119
120static struct brcmf_usbdev *brcmf_usb_get_buspub(struct device *dev) 115static struct brcmf_usbdev *brcmf_usb_get_buspub(struct device *dev)
121{ 116{
122 struct brcmf_bus *bus_if = dev_get_drvdata(dev); 117 struct brcmf_bus *bus_if = dev_get_drvdata(dev);
@@ -422,8 +417,6 @@ static void brcmf_usb_tx_complete(struct urb *urb)
422 brcmf_usb_del_fromq(devinfo, req); 417 brcmf_usb_del_fromq(devinfo, req);
423 418
424 brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0); 419 brcmf_txcomplete(devinfo->dev, req->skb, urb->status == 0);
425
426 brcmu_pkt_buf_free_skb(req->skb);
427 req->skb = NULL; 420 req->skb = NULL;
428 brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount); 421 brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, &devinfo->tx_freecount);
429 if (devinfo->tx_freecount > devinfo->tx_high_watermark && 422 if (devinfo->tx_freecount > devinfo->tx_high_watermark &&
@@ -577,15 +570,17 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
577 int ret; 570 int ret;
578 571
579 brcmf_dbg(USB, "Enter, skb=%p\n", skb); 572 brcmf_dbg(USB, "Enter, skb=%p\n", skb);
580 if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) 573 if (devinfo->bus_pub.state != BRCMFMAC_USB_STATE_UP) {
581 return -EIO; 574 ret = -EIO;
575 goto fail;
576 }
582 577
583 req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq, 578 req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq,
584 &devinfo->tx_freecount); 579 &devinfo->tx_freecount);
585 if (!req) { 580 if (!req) {
586 brcmu_pkt_buf_free_skb(skb);
587 brcmf_err("no req to send\n"); 581 brcmf_err("no req to send\n");
588 return -ENOMEM; 582 ret = -ENOMEM;
583 goto fail;
589 } 584 }
590 585
591 req->skb = skb; 586 req->skb = skb;
@@ -598,18 +593,21 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
598 if (ret) { 593 if (ret) {
599 brcmf_err("brcmf_usb_tx usb_submit_urb FAILED\n"); 594 brcmf_err("brcmf_usb_tx usb_submit_urb FAILED\n");
600 brcmf_usb_del_fromq(devinfo, req); 595 brcmf_usb_del_fromq(devinfo, req);
601 brcmu_pkt_buf_free_skb(req->skb);
602 req->skb = NULL; 596 req->skb = NULL;
603 brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req, 597 brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req,
604 &devinfo->tx_freecount); 598 &devinfo->tx_freecount);
605 } else { 599 goto fail;
606 if (devinfo->tx_freecount < devinfo->tx_low_watermark &&
607 !devinfo->tx_flowblock) {
608 brcmf_txflowblock(dev, true);
609 devinfo->tx_flowblock = true;
610 }
611 } 600 }
612 601
602 if (devinfo->tx_freecount < devinfo->tx_low_watermark &&
603 !devinfo->tx_flowblock) {
604 brcmf_txflowblock(dev, true);
605 devinfo->tx_flowblock = true;
606 }
607 return 0;
608
609fail:
610 brcmf_txcomplete(dev, skb, false);
613 return ret; 611 return ret;
614} 612}
615 613
@@ -1485,6 +1483,7 @@ static struct usb_device_id brcmf_usb_devid_table[] = {
1485 { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_BCMFW) }, 1483 { USB_DEVICE(BRCMF_USB_VENDOR_ID_BROADCOM, BRCMF_USB_DEVICE_ID_BCMFW) },
1486 { } 1484 { }
1487}; 1485};
1486
1488MODULE_DEVICE_TABLE(usb, brcmf_usb_devid_table); 1487MODULE_DEVICE_TABLE(usb, brcmf_usb_devid_table);
1489MODULE_FIRMWARE(BRCMF_USB_43143_FW_NAME); 1488MODULE_FIRMWARE(BRCMF_USB_43143_FW_NAME);
1490MODULE_FIRMWARE(BRCMF_USB_43236_FW_NAME); 1489MODULE_FIRMWARE(BRCMF_USB_43236_FW_NAME);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 2af9c0f0798d..804473fc5c5e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -3052,16 +3052,16 @@ brcmf_cfg80211_sched_scan_start(struct wiphy *wiphy,
3052 int i; 3052 int i;
3053 int ret = 0; 3053 int ret = 0;
3054 3054
3055 brcmf_dbg(SCAN, "Enter n_match_sets:%d n_ssids:%d\n", 3055 brcmf_dbg(SCAN, "Enter n_match_sets:%d n_ssids:%d\n",
3056 request->n_match_sets, request->n_ssids); 3056 request->n_match_sets, request->n_ssids);
3057 if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) { 3057 if (test_bit(BRCMF_SCAN_STATUS_BUSY, &cfg->scan_status)) {
3058 brcmf_err("Scanning already: status (%lu)\n", cfg->scan_status); 3058 brcmf_err("Scanning already: status (%lu)\n", cfg->scan_status);
3059 return -EAGAIN; 3059 return -EAGAIN;
3060 } 3060 }
3061 3061
3062 if (!request || !request->n_ssids || !request->n_match_sets) { 3062 if (!request->n_ssids || !request->n_match_sets) {
3063 brcmf_err("Invalid sched scan req!! n_ssids:%d\n", 3063 brcmf_err("Invalid sched scan req!! n_ssids:%d\n",
3064 request ? request->n_ssids : 0); 3064 request->n_ssids);
3065 return -EINVAL; 3065 return -EINVAL;
3066 } 3066 }
3067 3067
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/Makefile b/drivers/net/wireless/brcm80211/brcmsmac/Makefile
index d3d4151c3eda..cba19d839b77 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmsmac/Makefile
@@ -43,6 +43,10 @@ BRCMSMAC_OFILES := \
43 brcms_trace_events.o \ 43 brcms_trace_events.o \
44 debug.o 44 debug.o
45 45
46ifdef CONFIG_BCMA_DRIVER_GPIO
47BRCMSMAC_OFILES += led.o
48endif
49
46MODULEPFX := brcmsmac 50MODULEPFX := brcmsmac
47 51
48obj-$(CONFIG_BRCMSMAC) += $(MODULEPFX).o 52obj-$(CONFIG_BRCMSMAC) += $(MODULEPFX).o
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/led.c b/drivers/net/wireless/brcm80211/brcmsmac/led.c
new file mode 100644
index 000000000000..74b17cecb189
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/led.c
@@ -0,0 +1,126 @@
1#include <net/mac80211.h>
2#include <linux/bcma/bcma_driver_chipcommon.h>
3#include <linux/gpio.h>
4
5#include "mac80211_if.h"
6#include "pub.h"
7#include "main.h"
8#include "led.h"
9
10 /* number of leds */
11#define BRCMS_LED_NO 4
12 /* behavior mask */
13#define BRCMS_LED_BEH_MASK 0x7f
14 /* activelow (polarity) bit */
15#define BRCMS_LED_AL_MASK 0x80
16 /* radio enabled */
17#define BRCMS_LED_RADIO 3
18
19static void brcms_radio_led_ctrl(struct brcms_info *wl, bool state)
20{
21 if (wl->radio_led.gpio == -1)
22 return;
23
24 if (wl->radio_led.active_low)
25 state = !state;
26
27 if (state)
28 gpio_set_value(wl->radio_led.gpio, 1);
29 else
30 gpio_set_value(wl->radio_led.gpio, 0);
31}
32
33
34/* Callback from the LED subsystem. */
35static void brcms_led_brightness_set(struct led_classdev *led_dev,
36 enum led_brightness brightness)
37{
38 struct brcms_info *wl = container_of(led_dev,
39 struct brcms_info, led_dev);
40 brcms_radio_led_ctrl(wl, brightness);
41}
42
43void brcms_led_unregister(struct brcms_info *wl)
44{
45 if (wl->led_dev.dev)
46 led_classdev_unregister(&wl->led_dev);
47 if (wl->radio_led.gpio != -1)
48 gpio_free(wl->radio_led.gpio);
49}
50
51int brcms_led_register(struct brcms_info *wl)
52{
53 int i, err;
54 struct brcms_led *radio_led = &wl->radio_led;
55 /* get CC core */
56 struct bcma_drv_cc *cc_drv = &wl->wlc->hw->d11core->bus->drv_cc;
57 struct gpio_chip *bcma_gpio = &cc_drv->gpio;
58 struct ssb_sprom *sprom = &wl->wlc->hw->d11core->bus->sprom;
59 u8 *leds[] = { &sprom->gpio0,
60 &sprom->gpio1,
61 &sprom->gpio2,
62 &sprom->gpio3 };
63 unsigned gpio = -1;
64 bool active_low = false;
65
66 /* none by default */
67 radio_led->gpio = -1;
68 radio_led->active_low = false;
69
70 if (!bcma_gpio || !gpio_is_valid(bcma_gpio->base))
71 return -ENODEV;
72
73 /* find radio enabled LED */
74 for (i = 0; i < BRCMS_LED_NO; i++) {
75 u8 led = *leds[i];
76 if ((led & BRCMS_LED_BEH_MASK) == BRCMS_LED_RADIO) {
77 gpio = bcma_gpio->base + i;
78 if (led & BRCMS_LED_AL_MASK)
79 active_low = true;
80 break;
81 }
82 }
83
84 if (gpio == -1 || !gpio_is_valid(gpio))
85 return -ENODEV;
86
87 /* request and configure LED gpio */
88 err = gpio_request_one(gpio,
89 active_low ? GPIOF_OUT_INIT_HIGH
90 : GPIOF_OUT_INIT_LOW,
91 "radio on");
92 if (err) {
93 wiphy_err(wl->wiphy, "requesting led gpio %d failed (err: %d)\n",
94 gpio, err);
95 return err;
96 }
97 err = gpio_direction_output(gpio, 1);
98 if (err) {
99 wiphy_err(wl->wiphy, "cannot set led gpio %d to output (err: %d)\n",
100 gpio, err);
101 return err;
102 }
103
104 snprintf(wl->radio_led.name, sizeof(wl->radio_led.name),
105 "brcmsmac-%s:radio", wiphy_name(wl->wiphy));
106
107 wl->led_dev.name = wl->radio_led.name;
108 wl->led_dev.default_trigger =
109 ieee80211_get_radio_led_name(wl->pub->ieee_hw);
110 wl->led_dev.brightness_set = brcms_led_brightness_set;
111 err = led_classdev_register(wiphy_dev(wl->wiphy), &wl->led_dev);
112
113 if (err) {
114 wiphy_err(wl->wiphy, "cannot register led device: %s (err: %d)\n",
115 wl->radio_led.name, err);
116 return err;
117 }
118
119 wiphy_info(wl->wiphy, "registered radio enabled led device: %s gpio: %d\n",
120 wl->radio_led.name,
121 gpio);
122 radio_led->gpio = gpio;
123 radio_led->active_low = active_low;
124
125 return 0;
126}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/led.h b/drivers/net/wireless/brcm80211/brcmsmac/led.h
new file mode 100644
index 000000000000..17a0b1f5dbcf
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmsmac/led.h
@@ -0,0 +1,36 @@
1/*
2 * Copyright (c) 2012 Broadcom Corporation
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef _BRCM_LED_H_
18#define _BRCM_LED_H_
19struct brcms_led {
20 char name[32];
21 unsigned gpio;
22 bool active_low;
23};
24
25#ifdef CONFIG_BCMA_DRIVER_GPIO
26void brcms_led_unregister(struct brcms_info *wl);
27int brcms_led_register(struct brcms_info *wl);
28#else
29static inline void brcms_led_unregister(struct brcms_info *wl) {};
30static inline int brcms_led_register(struct brcms_info *wl)
31{
32 return -ENOTSUPP;
33};
34#endif
35
36#endif /* _BRCM_LED_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index c6451c61407a..c70cf7b654cd 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -34,6 +34,7 @@
34#include "mac80211_if.h" 34#include "mac80211_if.h"
35#include "main.h" 35#include "main.h"
36#include "debug.h" 36#include "debug.h"
37#include "led.h"
37 38
38#define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */ 39#define N_TX_QUEUES 4 /* #tx queues on mac80211<->driver interface */
39#define BRCMS_FLUSH_TIMEOUT 500 /* msec */ 40#define BRCMS_FLUSH_TIMEOUT 500 /* msec */
@@ -904,6 +905,7 @@ static void brcms_remove(struct bcma_device *pdev)
904 struct brcms_info *wl = hw->priv; 905 struct brcms_info *wl = hw->priv;
905 906
906 if (wl->wlc) { 907 if (wl->wlc) {
908 brcms_led_unregister(wl);
907 wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false); 909 wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false);
908 wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy); 910 wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
909 ieee80211_unregister_hw(hw); 911 ieee80211_unregister_hw(hw);
@@ -1151,6 +1153,8 @@ static int brcms_bcma_probe(struct bcma_device *pdev)
1151 pr_err("%s: brcms_attach failed!\n", __func__); 1153 pr_err("%s: brcms_attach failed!\n", __func__);
1152 return -ENODEV; 1154 return -ENODEV;
1153 } 1155 }
1156 brcms_led_register(wl);
1157
1154 return 0; 1158 return 0;
1155} 1159}
1156 1160
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
index 947ccacf43e6..4090032e81a2 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
@@ -20,8 +20,10 @@
20#include <linux/timer.h> 20#include <linux/timer.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/workqueue.h> 22#include <linux/workqueue.h>
23#include <linux/leds.h>
23 24
24#include "ucode_loader.h" 25#include "ucode_loader.h"
26#include "led.h"
25/* 27/*
26 * Starting index for 5G rates in the 28 * Starting index for 5G rates in the
27 * legacy rate table. 29 * legacy rate table.
@@ -81,6 +83,8 @@ struct brcms_info {
81 struct wiphy *wiphy; 83 struct wiphy *wiphy;
82 struct brcms_ucode ucode; 84 struct brcms_ucode ucode;
83 bool mute_tx; 85 bool mute_tx;
86 struct brcms_led radio_led;
87 struct led_classdev led_dev;
84}; 88};
85 89
86/* misc callbacks */ 90/* misc callbacks */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 8ef02dca8f8c..0c8e998bfb1e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -7810,9 +7810,14 @@ void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
7810 7810
7811 /* read the ucode version if we have not yet done so */ 7811 /* read the ucode version if we have not yet done so */
7812 if (wlc->ucode_rev == 0) { 7812 if (wlc->ucode_rev == 0) {
7813 wlc->ucode_rev = 7813 u16 rev;
7814 brcms_b_read_shm(wlc->hw, M_BOM_REV_MAJOR) << NBITS(u16); 7814 u16 patch;
7815 wlc->ucode_rev |= brcms_b_read_shm(wlc->hw, M_BOM_REV_MINOR); 7815
7816 rev = brcms_b_read_shm(wlc->hw, M_BOM_REV_MAJOR);
7817 patch = brcms_b_read_shm(wlc->hw, M_BOM_REV_MINOR);
7818 wlc->ucode_rev = (rev << NBITS(u16)) | patch;
7819 snprintf(wlc->wiphy->fw_version,
7820 sizeof(wlc->wiphy->fw_version), "%u.%u", rev, patch);
7816 } 7821 }
7817 7822
7818 /* ..now really unleash hell (allow the MAC out of suspend) */ 7823 /* ..now really unleash hell (allow the MAC out of suspend) */
diff --git a/drivers/net/wireless/brcm80211/brcmutil/utils.c b/drivers/net/wireless/brcm80211/brcmutil/utils.c
index 3e6405e06ac0..bf5e50fc21ba 100644
--- a/drivers/net/wireless/brcm80211/brcmutil/utils.c
+++ b/drivers/net/wireless/brcm80211/brcmutil/utils.c
@@ -116,6 +116,31 @@ struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec)
116} 116}
117EXPORT_SYMBOL(brcmu_pktq_pdeq); 117EXPORT_SYMBOL(brcmu_pktq_pdeq);
118 118
119/*
120 * precedence based dequeue with match function. Passing a NULL pointer
121 * for the match function parameter is considered to be a wildcard so
122 * any packet on the queue is returned. In that case it is no different
123 * from brcmu_pktq_pdeq() above.
124 */
125struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec,
126 bool (*match_fn)(struct sk_buff *skb,
127 void *arg), void *arg)
128{
129 struct sk_buff_head *q;
130 struct sk_buff *p, *next;
131
132 q = &pq->q[prec].skblist;
133 skb_queue_walk_safe(q, p, next) {
134 if (match_fn == NULL || match_fn(p, arg)) {
135 skb_unlink(p, q);
136 pq->len--;
137 return p;
138 }
139 }
140 return NULL;
141}
142EXPORT_SYMBOL(brcmu_pktq_pdeq_match);
143
119struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec) 144struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec)
120{ 145{
121 struct sk_buff_head *q; 146 struct sk_buff_head *q;
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_utils.h b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
index 477b92ad3d62..898cacb8d01d 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_utils.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
@@ -120,6 +120,10 @@ extern struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
120 struct sk_buff *p); 120 struct sk_buff *p);
121extern struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec); 121extern struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec);
122extern struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec); 122extern struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec);
123extern struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec,
124 bool (*match_fn)(struct sk_buff *p,
125 void *arg),
126 void *arg);
123 127
124/* packet primitives */ 128/* packet primitives */
125extern struct sk_buff *brcmu_pkt_buf_get_skb(uint len); 129extern struct sk_buff *brcmu_pkt_buf_get_skb(uint len);
@@ -173,6 +177,29 @@ extern void brcmu_pktq_flush(struct pktq *pq, bool dir,
173/* ip address */ 177/* ip address */
174struct ipv4_addr; 178struct ipv4_addr;
175 179
180/*
181 * bitfield macros using masking and shift
182 *
183 * remark: the mask parameter should be a shifted mask.
184 */
185static inline void brcmu_maskset32(u32 *var, u32 mask, u8 shift, u32 value)
186{
187 value = (value << shift) & mask;
188 *var = (*var & ~mask) | value;
189}
190static inline u32 brcmu_maskget32(u32 var, u32 mask, u8 shift)
191{
192 return (var & mask) >> shift;
193}
194static inline void brcmu_maskset16(u16 *var, u16 mask, u8 shift, u16 value)
195{
196 value = (value << shift) & mask;
197 *var = (*var & ~mask) | value;
198}
199static inline u16 brcmu_maskget16(u16 var, u16 mask, u8 shift)
200{
201 return (var & mask) >> shift;
202}
176 203
177/* externs */ 204/* externs */
178/* format/print */ 205/* format/print */
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
index c353b5f19c8c..b37a582ccbe7 100644
--- a/drivers/net/wireless/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -3477,7 +3477,7 @@ static struct attribute_group il3945_attribute_group = {
3477 .attrs = il3945_sysfs_entries, 3477 .attrs = il3945_sysfs_entries,
3478}; 3478};
3479 3479
3480struct ieee80211_ops il3945_mac_ops = { 3480static struct ieee80211_ops il3945_mac_ops __read_mostly = {
3481 .tx = il3945_mac_tx, 3481 .tx = il3945_mac_tx,
3482 .start = il3945_mac_start, 3482 .start = il3945_mac_start,
3483 .stop = il3945_mac_stop, 3483 .stop = il3945_mac_stop,
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
index e0b9d7fa5de0..dc1e6da9976a 100644
--- a/drivers/net/wireless/iwlegacy/3945.c
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -2379,10 +2379,8 @@ il3945_hw_set_hw_params(struct il_priv *il)
2379 il->_3945.shared_virt = 2379 il->_3945.shared_virt =
2380 dma_alloc_coherent(&il->pci_dev->dev, sizeof(struct il3945_shared), 2380 dma_alloc_coherent(&il->pci_dev->dev, sizeof(struct il3945_shared),
2381 &il->_3945.shared_phys, GFP_KERNEL); 2381 &il->_3945.shared_phys, GFP_KERNEL);
2382 if (!il->_3945.shared_virt) { 2382 if (!il->_3945.shared_virt)
2383 IL_ERR("failed to allocate pci memory\n");
2384 return -ENOMEM; 2383 return -ENOMEM;
2385 }
2386 2384
2387 il->hw_params.bcast_id = IL3945_BROADCAST_ID; 2385 il->hw_params.bcast_id = IL3945_BROADCAST_ID;
2388 2386
diff --git a/drivers/net/wireless/iwlegacy/3945.h b/drivers/net/wireless/iwlegacy/3945.h
index 1d45075e0d5b..9a8703def0ba 100644
--- a/drivers/net/wireless/iwlegacy/3945.h
+++ b/drivers/net/wireless/iwlegacy/3945.h
@@ -150,10 +150,6 @@ struct il3945_frame {
150 struct list_head list; 150 struct list_head list;
151}; 151};
152 152
153#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
154#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
155#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
156
157#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 153#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
158#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 154#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
159#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 155#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 7941eb3a0166..6affa7e8f017 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -612,7 +612,7 @@ il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
612 612
613/* Called for N_RX (legacy ABG frames), or 613/* Called for N_RX (legacy ABG frames), or
614 * N_RX_MPDU (HT high-throughput N frames). */ 614 * N_RX_MPDU (HT high-throughput N frames). */
615void 615static void
616il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb) 616il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
617{ 617{
618 struct ieee80211_hdr *header; 618 struct ieee80211_hdr *header;
@@ -744,7 +744,7 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
744 744
745/* Cache phy data (Rx signal strength, etc) for HT frame (N_RX_PHY). 745/* Cache phy data (Rx signal strength, etc) for HT frame (N_RX_PHY).
746 * This will be used later in il_hdl_rx() for N_RX_MPDU. */ 746 * This will be used later in il_hdl_rx() for N_RX_MPDU. */
747void 747static void
748il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb) 748il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
749{ 749{
750 struct il_rx_pkt *pkt = rxb_addr(rxb); 750 struct il_rx_pkt *pkt = rxb_addr(rxb);
@@ -1250,7 +1250,7 @@ il4965_dump_fh(struct il_priv *il, char **buf, bool display)
1250 return 0; 1250 return 0;
1251} 1251}
1252 1252
1253void 1253static void
1254il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb) 1254il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb)
1255{ 1255{
1256 struct il_rx_pkt *pkt = rxb_addr(rxb); 1256 struct il_rx_pkt *pkt = rxb_addr(rxb);
@@ -1357,7 +1357,7 @@ il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
1357} 1357}
1358#endif 1358#endif
1359 1359
1360void 1360static void
1361il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb) 1361il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
1362{ 1362{
1363 const int recalib_seconds = 60; 1363 const int recalib_seconds = 60;
@@ -1399,7 +1399,7 @@ il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
1399 il4965_temperature_calib(il); 1399 il4965_temperature_calib(il);
1400} 1400}
1401 1401
1402void 1402static void
1403il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb) 1403il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
1404{ 1404{
1405 struct il_rx_pkt *pkt = rxb_addr(rxb); 1405 struct il_rx_pkt *pkt = rxb_addr(rxb);
@@ -1921,8 +1921,8 @@ drop_unlock:
1921static inline int 1921static inline int
1922il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size) 1922il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size)
1923{ 1923{
1924 ptr->addr = 1924 ptr->addr = dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma,
1925 dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma, GFP_KERNEL); 1925 GFP_KERNEL);
1926 if (!ptr->addr) 1926 if (!ptr->addr)
1927 return -ENOMEM; 1927 return -ENOMEM;
1928 ptr->size = size; 1928 ptr->size = size;
@@ -2050,7 +2050,7 @@ il4965_txq_ctx_reset(struct il_priv *il)
2050 il_tx_queue_reset(il, txq_id); 2050 il_tx_queue_reset(il, txq_id);
2051} 2051}
2052 2052
2053void 2053static void
2054il4965_txq_ctx_unmap(struct il_priv *il) 2054il4965_txq_ctx_unmap(struct il_priv *il)
2055{ 2055{
2056 int txq_id; 2056 int txq_id;
@@ -2258,7 +2258,7 @@ il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
2258 2258
2259 spin_lock_irqsave(&il->sta_lock, flags); 2259 spin_lock_irqsave(&il->sta_lock, flags);
2260 tid_data = &il->stations[sta_id].tid[tid]; 2260 tid_data = &il->stations[sta_id].tid[tid];
2261 *ssn = SEQ_TO_SN(tid_data->seq_number); 2261 *ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2262 tid_data->agg.txq_id = txq_id; 2262 tid_data->agg.txq_id = txq_id;
2263 il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id); 2263 il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id);
2264 spin_unlock_irqrestore(&il->sta_lock, flags); 2264 spin_unlock_irqrestore(&il->sta_lock, flags);
@@ -2408,7 +2408,7 @@ il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
2408 /* aggregated HW queue */ 2408 /* aggregated HW queue */
2409 if (txq_id == tid_data->agg.txq_id && 2409 if (txq_id == tid_data->agg.txq_id &&
2410 q->read_ptr == q->write_ptr) { 2410 q->read_ptr == q->write_ptr) {
2411 u16 ssn = SEQ_TO_SN(tid_data->seq_number); 2411 u16 ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
2412 int tx_fifo = il4965_get_fifo_from_tid(tid); 2412 int tx_fifo = il4965_get_fifo_from_tid(tid);
2413 D_HT("HW queue empty: continue DELBA flow\n"); 2413 D_HT("HW queue empty: continue DELBA flow\n");
2414 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo); 2414 il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo);
@@ -2627,7 +2627,8 @@ il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr)
2627static inline u32 2627static inline u32
2628il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp) 2628il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp)
2629{ 2629{
2630 return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN; 2630 return le32_to_cpup(&tx_resp->u.status +
2631 tx_resp->frame_count) & IEEE80211_MAX_SN;
2631} 2632}
2632 2633
2633static inline u32 2634static inline u32
@@ -2717,15 +2718,15 @@ il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
2717 hdr = (struct ieee80211_hdr *) skb->data; 2718 hdr = (struct ieee80211_hdr *) skb->data;
2718 2719
2719 sc = le16_to_cpu(hdr->seq_ctrl); 2720 sc = le16_to_cpu(hdr->seq_ctrl);
2720 if (idx != (SEQ_TO_SN(sc) & 0xff)) { 2721 if (idx != (IEEE80211_SEQ_TO_SN(sc) & 0xff)) {
2721 IL_ERR("BUG_ON idx doesn't match seq control" 2722 IL_ERR("BUG_ON idx doesn't match seq control"
2722 " idx=%d, seq_idx=%d, seq=%d\n", idx, 2723 " idx=%d, seq_idx=%d, seq=%d\n", idx,
2723 SEQ_TO_SN(sc), hdr->seq_ctrl); 2724 IEEE80211_SEQ_TO_SN(sc), hdr->seq_ctrl);
2724 return -1; 2725 return -1;
2725 } 2726 }
2726 2727
2727 D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx, 2728 D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx,
2728 SEQ_TO_SN(sc)); 2729 IEEE80211_SEQ_TO_SN(sc));
2729 2730
2730 sh = idx - start; 2731 sh = idx - start;
2731 if (sh > 64) { 2732 if (sh > 64) {
@@ -2895,7 +2896,7 @@ il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
2895 * Handles block-acknowledge notification from device, which reports success 2896 * Handles block-acknowledge notification from device, which reports success
2896 * of frames sent via aggregation. 2897 * of frames sent via aggregation.
2897 */ 2898 */
2898void 2899static void
2899il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb) 2900il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb)
2900{ 2901{
2901 struct il_rx_pkt *pkt = rxb_addr(rxb); 2902 struct il_rx_pkt *pkt = rxb_addr(rxb);
@@ -6316,7 +6317,7 @@ il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
6316 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id); 6317 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
6317} 6318}
6318 6319
6319const struct ieee80211_ops il4965_mac_ops = { 6320static const struct ieee80211_ops il4965_mac_ops = {
6320 .tx = il4965_mac_tx, 6321 .tx = il4965_mac_tx,
6321 .start = il4965_mac_start, 6322 .start = il4965_mac_start,
6322 .stop = il4965_mac_stop, 6323 .stop = il4965_mac_stop,
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index e006ea831320..5b79819d7bed 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -1122,7 +1122,7 @@ il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd)
1122 sizeof(struct il_powertable_cmd), cmd); 1122 sizeof(struct il_powertable_cmd), cmd);
1123} 1123}
1124 1124
1125int 1125static int
1126il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force) 1126il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force)
1127{ 1127{
1128 int ret; 1128 int ret;
@@ -2566,15 +2566,13 @@ il_rx_queue_alloc(struct il_priv *il)
2566 INIT_LIST_HEAD(&rxq->rx_used); 2566 INIT_LIST_HEAD(&rxq->rx_used);
2567 2567
2568 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */ 2568 /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
2569 rxq->bd = 2569 rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
2570 dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma, 2570 GFP_KERNEL);
2571 GFP_KERNEL);
2572 if (!rxq->bd) 2571 if (!rxq->bd)
2573 goto err_bd; 2572 goto err_bd;
2574 2573
2575 rxq->rb_stts = 2574 rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct il_rb_status),
2576 dma_alloc_coherent(dev, sizeof(struct il_rb_status), 2575 &rxq->rb_stts_dma, GFP_KERNEL);
2577 &rxq->rb_stts_dma, GFP_KERNEL);
2578 if (!rxq->rb_stts) 2576 if (!rxq->rb_stts)
2579 goto err_rb; 2577 goto err_rb;
2580 2578
@@ -2941,10 +2939,9 @@ il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
2941 * shared with device */ 2939 * shared with device */
2942 txq->tfds = 2940 txq->tfds =
2943 dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL); 2941 dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
2944 if (!txq->tfds) { 2942 if (!txq->tfds)
2945 IL_ERR("Fail to alloc TFDs\n");
2946 goto error; 2943 goto error;
2947 } 2944
2948 txq->q.id = id; 2945 txq->q.id = id;
2949 2946
2950 return 0; 2947 return 0;
@@ -4891,7 +4888,7 @@ il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
4891} 4888}
4892EXPORT_SYMBOL(il_add_beacon_time); 4889EXPORT_SYMBOL(il_add_beacon_time);
4893 4890
4894#ifdef CONFIG_PM 4891#ifdef CONFIG_PM_SLEEP
4895 4892
4896static int 4893static int
4897il_pci_suspend(struct device *device) 4894il_pci_suspend(struct device *device)
@@ -4942,7 +4939,7 @@ il_pci_resume(struct device *device)
4942SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume); 4939SIMPLE_DEV_PM_OPS(il_pm_ops, il_pci_suspend, il_pci_resume);
4943EXPORT_SYMBOL(il_pm_ops); 4940EXPORT_SYMBOL(il_pm_ops);
4944 4941
4945#endif /* CONFIG_PM */ 4942#endif /* CONFIG_PM_SLEEP */
4946 4943
4947static void 4944static void
4948il_update_qos(struct il_priv *il) 4945il_update_qos(struct il_priv *il)
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
index 96f2025d936e..10986aaf9085 100644
--- a/drivers/net/wireless/iwlegacy/common.h
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -541,10 +541,6 @@ struct il_frame {
541 struct list_head list; 541 struct list_head list;
542}; 542};
543 543
544#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
545#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
546#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
547
548enum { 544enum {
549 CMD_SYNC = 0, 545 CMD_SYNC = 0,
550 CMD_SIZE_NORMAL = 0, 546 CMD_SIZE_NORMAL = 0,
@@ -2235,9 +2231,8 @@ il_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
2235 return -EINVAL; 2231 return -EINVAL;
2236 } 2232 }
2237 2233
2238 desc->v_addr = 2234 desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
2239 dma_alloc_coherent(&pci_dev->dev, desc->len, &desc->p_addr, 2235 &desc->p_addr, GFP_KERNEL);
2240 GFP_KERNEL);
2241 return (desc->v_addr != NULL) ? 0 : -ENOMEM; 2236 return (desc->v_addr != NULL) ? 0 : -ENOMEM;
2242} 2237}
2243 2238
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index ba319cba3f1e..56c2040a955b 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -6,7 +6,6 @@ config IWLWIFI
6 select LEDS_CLASS 6 select LEDS_CLASS
7 select LEDS_TRIGGERS 7 select LEDS_TRIGGERS
8 select MAC80211_LEDS 8 select MAC80211_LEDS
9 select IWLDVM
10 ---help--- 9 ---help---
11 Select to build the driver supporting the: 10 Select to build the driver supporting the:
12 11
@@ -45,6 +44,7 @@ config IWLWIFI
45config IWLDVM 44config IWLDVM
46 tristate "Intel Wireless WiFi DVM Firmware support" 45 tristate "Intel Wireless WiFi DVM Firmware support"
47 depends on IWLWIFI 46 depends on IWLWIFI
47 default IWLWIFI
48 help 48 help
49 This is the driver supporting the DVM firmware which is 49 This is the driver supporting the DVM firmware which is
50 currently the only firmware available for existing devices. 50 currently the only firmware available for existing devices.
@@ -58,6 +58,15 @@ config IWLMVM
58 58
59 Say yes if you have such a device. 59 Say yes if you have such a device.
60 60
61# don't call it _MODULE -- will confuse Kconfig/fixdep/...
62config IWLWIFI_OPMODE_MODULAR
63 bool
64 default y if IWLDVM=m
65 default y if IWLMVM=m
66
67comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
68 depends on IWLWIFI && IWLDVM=n && IWLMVM=n
69
61menu "Debugging Options" 70menu "Debugging Options"
62 depends on IWLWIFI 71 depends on IWLWIFI
63 72
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 6c7800044a04..3b5613ea458b 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -7,8 +7,7 @@ iwlwifi-objs += iwl-notif-wait.o
7iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o 7iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
8iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o 8iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
9iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o 9iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
10iwlwifi-objs += pcie/1000.o pcie/2000.o pcie/5000.o pcie/6000.o 10iwlwifi-objs += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o iwl-7000.o
11iwlwifi-objs += pcie/7000.o
12 11
13iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o 12iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
14iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o 13iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 41ec27cb6efe..019d433900ef 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.c b/drivers/net/wireless/iwlwifi/dvm/calib.c
index 6468de8634b0..d6c4cf2ad7c5 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/dvm/calib.h b/drivers/net/wireless/iwlwifi/dvm/calib.h
index 65e920cab2b7..cfddde194940 100644
--- a/drivers/net/wireless/iwlwifi/dvm/calib.h
+++ b/drivers/net/wireless/iwlwifi/dvm/calib.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/dvm/commands.h b/drivers/net/wireless/iwlwifi/dvm/commands.h
index 84e2c0fcfef6..95ca026ecc9d 100644
--- a/drivers/net/wireless/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/iwlwifi/dvm/commands.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -1526,6 +1526,7 @@ struct iwl_compressed_ba_resp {
1526 __le16 scd_ssn; 1526 __le16 scd_ssn;
1527 u8 txed; /* number of frames sent */ 1527 u8 txed; /* number of frames sent */
1528 u8 txed_2_done; /* number of frames acked */ 1528 u8 txed_2_done; /* number of frames acked */
1529 __le16 reserved1;
1529} __packed; 1530} __packed;
1530 1531
1531/* 1532/*
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index 20806cae11b7..7b8178be119f 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -19,7 +19,7 @@
19 * USA 19 * USA
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL. 22 * in the file called COPYING.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -2324,6 +2324,28 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
2324 return count; 2324 return count;
2325} 2325}
2326 2326
2327static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
2328 const char __user *user_buf,
2329 size_t count, loff_t *ppos)
2330{
2331 struct iwl_priv *priv = file->private_data;
2332 bool restart_fw = iwlwifi_mod_params.restart_fw;
2333 int ret;
2334
2335 iwlwifi_mod_params.restart_fw = true;
2336
2337 mutex_lock(&priv->mutex);
2338
2339 /* take the return value to make compiler happy - it will fail anyway */
2340 ret = iwl_dvm_send_cmd_pdu(priv, REPLY_ERROR, CMD_SYNC, 0, NULL);
2341
2342 mutex_unlock(&priv->mutex);
2343
2344 iwlwifi_mod_params.restart_fw = restart_fw;
2345
2346 return count;
2347}
2348
2327DEBUGFS_READ_FILE_OPS(ucode_rx_stats); 2349DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
2328DEBUGFS_READ_FILE_OPS(ucode_tx_stats); 2350DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
2329DEBUGFS_READ_FILE_OPS(ucode_general_stats); 2351DEBUGFS_READ_FILE_OPS(ucode_general_stats);
@@ -2343,6 +2365,7 @@ DEBUGFS_READ_FILE_OPS(bt_traffic);
2343DEBUGFS_READ_WRITE_FILE_OPS(protection_mode); 2365DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
2344DEBUGFS_READ_FILE_OPS(reply_tx_error); 2366DEBUGFS_READ_FILE_OPS(reply_tx_error);
2345DEBUGFS_WRITE_FILE_OPS(echo_test); 2367DEBUGFS_WRITE_FILE_OPS(echo_test);
2368DEBUGFS_WRITE_FILE_OPS(fw_restart);
2346#ifdef CONFIG_IWLWIFI_DEBUG 2369#ifdef CONFIG_IWLWIFI_DEBUG
2347DEBUGFS_READ_WRITE_FILE_OPS(log_event); 2370DEBUGFS_READ_WRITE_FILE_OPS(log_event);
2348#endif 2371#endif
@@ -2400,6 +2423,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir)
2400 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); 2423 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
2401 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); 2424 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
2402 DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR); 2425 DEBUGFS_ADD_FILE(echo_test, dir_debug, S_IWUSR);
2426 DEBUGFS_ADD_FILE(fw_restart, dir_debug, S_IWUSR);
2403#ifdef CONFIG_IWLWIFI_DEBUG 2427#ifdef CONFIG_IWLWIFI_DEBUG
2404 DEBUGFS_ADD_FILE(log_event, dir_debug, S_IWUSR | S_IRUSR); 2428 DEBUGFS_ADD_FILE(log_event, dir_debug, S_IWUSR | S_IRUSR);
2405#endif 2429#endif
diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
index 44ca0e57f9f7..87c006c9c573 100644
--- a/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -19,7 +19,7 @@
19 * USA 19 * USA
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL. 22 * in the file called COPYING.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 323e4a33fcac..c7cd2dffa5cd 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1137,7 +1137,8 @@ done:
1137static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw, 1137static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
1138 struct ieee80211_vif *vif, 1138 struct ieee80211_vif *vif,
1139 struct ieee80211_channel *channel, 1139 struct ieee80211_channel *channel,
1140 int duration) 1140 int duration,
1141 enum ieee80211_roc_type type)
1141{ 1142{
1142 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1143 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1143 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN]; 1144 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index 3a4aa5239c45..d69b55866714 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -19,7 +19,7 @@
19 * USA 19 * USA
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL. 22 * in the file called COPYING.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/dvm/testmode.c b/drivers/net/wireless/iwlwifi/dvm/testmode.c
index dc6f965a123a..b89b9d9b9969 100644
--- a/drivers/net/wireless/iwlwifi/dvm/testmode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/testmode.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index d1a670d7b10c..70b7f68c4958 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -19,7 +19,7 @@
19 * USA 19 * USA
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL. 22 * in the file called COPYING.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -418,7 +418,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
418 " Tx flags = 0x%08x, agg.state = %d", 418 " Tx flags = 0x%08x, agg.state = %d",
419 info->flags, tid_data->agg.state); 419 info->flags, tid_data->agg.state);
420 IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d", 420 IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d",
421 sta_id, tid, SEQ_TO_SN(tid_data->seq_number)); 421 sta_id, tid,
422 IEEE80211_SEQ_TO_SN(tid_data->seq_number));
422 goto drop_unlock_sta; 423 goto drop_unlock_sta;
423 } 424 }
424 425
@@ -569,7 +570,7 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
569 return 0; 570 return 0;
570 } 571 }
571 572
572 tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number); 573 tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
573 574
574 /* There are still packets for this RA / TID in the HW */ 575 /* There are still packets for this RA / TID in the HW */
575 if (!test_bit(txq_id, priv->agg_q_alloc)) { 576 if (!test_bit(txq_id, priv->agg_q_alloc)) {
@@ -651,7 +652,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
651 652
652 spin_lock_bh(&priv->sta_lock); 653 spin_lock_bh(&priv->sta_lock);
653 tid_data = &priv->tid_data[sta_id][tid]; 654 tid_data = &priv->tid_data[sta_id][tid];
654 tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number); 655 tid_data->agg.ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
655 tid_data->agg.txq_id = txq_id; 656 tid_data->agg.txq_id = txq_id;
656 657
657 *ssn = tid_data->agg.ssn; 658 *ssn = tid_data->agg.ssn;
@@ -911,7 +912,7 @@ static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
911static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp) 912static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
912{ 913{
913 return le32_to_cpup((__le32 *)&tx_resp->status + 914 return le32_to_cpup((__le32 *)&tx_resp->status +
914 tx_resp->frame_count) & MAX_SN; 915 tx_resp->frame_count) & IEEE80211_MAX_SN;
915} 916}
916 917
917static void iwl_rx_reply_tx_agg(struct iwl_priv *priv, 918static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
@@ -1148,7 +1149,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
1148 1149
1149 if (tx_resp->frame_count == 1) { 1150 if (tx_resp->frame_count == 1) {
1150 u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl); 1151 u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
1151 next_reclaimed = SEQ_TO_SN(next_reclaimed + 0x10); 1152 next_reclaimed = IEEE80211_SEQ_TO_SN(next_reclaimed + 0x10);
1152 1153
1153 if (is_agg) { 1154 if (is_agg) {
1154 /* If this is an aggregation queue, we can rely on the 1155 /* If this is an aggregation queue, we can rely on the
diff --git a/drivers/net/wireless/iwlwifi/dvm/ucode.c b/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 1a4ac9236a44..0a1cdc5e856b 100644
--- a/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -19,7 +19,7 @@
19 * USA 19 * USA
20 * 20 *
21 * The full GNU General Public License is included in this distribution 21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL. 22 * in the file called COPYING.
23 * 23 *
24 * Contact Information: 24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com> 25 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/pcie/1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index ff3389757281..c080ae3070b2 100644
--- a/drivers/net/wireless/iwlwifi/pcie/1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -29,7 +29,6 @@
29#include "iwl-config.h" 29#include "iwl-config.h"
30#include "iwl-csr.h" 30#include "iwl-csr.h"
31#include "iwl-agn-hw.h" 31#include "iwl-agn-hw.h"
32#include "cfg.h"
33 32
34/* Highest firmware API version supported */ 33/* Highest firmware API version supported */
35#define IWL1000_UCODE_API_MAX 5 34#define IWL1000_UCODE_API_MAX 5
diff --git a/drivers/net/wireless/iwlwifi/pcie/2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index e7de33128b16..a6ddd2f9fba0 100644
--- a/drivers/net/wireless/iwlwifi/pcie/2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -28,7 +28,6 @@
28#include <linux/stringify.h> 28#include <linux/stringify.h>
29#include "iwl-config.h" 29#include "iwl-config.h"
30#include "iwl-agn-hw.h" 30#include "iwl-agn-hw.h"
31#include "cfg.h"
32#include "dvm/commands.h" /* needed for BT for now */ 31#include "dvm/commands.h" /* needed for BT for now */
33 32
34/* Highest firmware API version supported */ 33/* Highest firmware API version supported */
diff --git a/drivers/net/wireless/iwlwifi/pcie/5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 5096f7c96ab6..403f3f224bf6 100644
--- a/drivers/net/wireless/iwlwifi/pcie/5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -29,7 +29,6 @@
29#include "iwl-config.h" 29#include "iwl-config.h"
30#include "iwl-agn-hw.h" 30#include "iwl-agn-hw.h"
31#include "iwl-csr.h" 31#include "iwl-csr.h"
32#include "cfg.h"
33 32
34/* Highest firmware API version supported */ 33/* Highest firmware API version supported */
35#define IWL5000_UCODE_API_MAX 5 34#define IWL5000_UCODE_API_MAX 5
diff --git a/drivers/net/wireless/iwlwifi/pcie/6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 801ff49796dd..b5ab8d1bcac0 100644
--- a/drivers/net/wireless/iwlwifi/pcie/6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -28,7 +28,6 @@
28#include <linux/stringify.h> 28#include <linux/stringify.h>
29#include "iwl-config.h" 29#include "iwl-config.h"
30#include "iwl-agn-hw.h" 30#include "iwl-agn-hw.h"
31#include "cfg.h"
32#include "dvm/commands.h" /* needed for BT for now */ 31#include "dvm/commands.h" /* needed for BT for now */
33 32
34/* Highest firmware API version supported */ 33/* Highest firmware API version supported */
diff --git a/drivers/net/wireless/iwlwifi/pcie/7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 6e35b2b72332..50263e87fe15 100644
--- a/drivers/net/wireless/iwlwifi/pcie/7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -1,34 +1,70 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. 3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
4 * 5 *
5 * This program is free software; you can redistribute it and/or modify it 6 * GPL LICENSE SUMMARY
6 * under the terms of version 2 of the GNU General Public License as 7 *
8 * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
8 * 13 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT 14 * This program is distributed in the hope that it will be useful, but
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * more details. 17 * General Public License for more details.
13 * 18 *
14 * You should have received a copy of the GNU General Public License along with 19 * You should have received a copy of the GNU General Public License
15 * this program; if not, write to the Free Software Foundation, Inc., 20 * along with this program; if not, write to the Free Software
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
17 * 23 *
18 * The full GNU General Public License is included in this distribution in the 24 * The full GNU General Public License is included in this distribution
19 * file called LICENSE. 25 * in the file called COPYING.
20 * 26 *
21 * Contact Information: 27 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 * 30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
25 *****************************************************************************/ 62 *****************************************************************************/
26 63
27#include <linux/module.h> 64#include <linux/module.h>
28#include <linux/stringify.h> 65#include <linux/stringify.h>
29#include "iwl-config.h" 66#include "iwl-config.h"
30#include "iwl-agn-hw.h" 67#include "iwl-agn-hw.h"
31#include "cfg.h"
32 68
33/* Highest firmware API version supported */ 69/* Highest firmware API version supported */
34#define IWL7260_UCODE_API_MAX 6 70#define IWL7260_UCODE_API_MAX 6
@@ -70,7 +106,6 @@ static const struct iwl_base_params iwl7000_base_params = {
70}; 106};
71 107
72static const struct iwl_ht_params iwl7000_ht_params = { 108static const struct iwl_ht_params iwl7000_ht_params = {
73 .ht_greenfield_support = true,
74 .use_rts_for_aggregation = true, /* use rts/cts protection */ 109 .use_rts_for_aggregation = true, /* use rts/cts protection */
75 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ), 110 .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
76}; 111};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
index e9975c54c276..6d73f943cefa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 743b48343358..c38aa8f77554 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -275,4 +275,51 @@ struct iwl_cfg {
275 const bool temp_offset_v2; 275 const bool temp_offset_v2;
276}; 276};
277 277
278/*
279 * This list declares the config structures for all devices.
280 */
281extern const struct iwl_cfg iwl5300_agn_cfg;
282extern const struct iwl_cfg iwl5100_agn_cfg;
283extern const struct iwl_cfg iwl5350_agn_cfg;
284extern const struct iwl_cfg iwl5100_bgn_cfg;
285extern const struct iwl_cfg iwl5100_abg_cfg;
286extern const struct iwl_cfg iwl5150_agn_cfg;
287extern const struct iwl_cfg iwl5150_abg_cfg;
288extern const struct iwl_cfg iwl6005_2agn_cfg;
289extern const struct iwl_cfg iwl6005_2abg_cfg;
290extern const struct iwl_cfg iwl6005_2bg_cfg;
291extern const struct iwl_cfg iwl6005_2agn_sff_cfg;
292extern const struct iwl_cfg iwl6005_2agn_d_cfg;
293extern const struct iwl_cfg iwl6005_2agn_mow1_cfg;
294extern const struct iwl_cfg iwl6005_2agn_mow2_cfg;
295extern const struct iwl_cfg iwl1030_bgn_cfg;
296extern const struct iwl_cfg iwl1030_bg_cfg;
297extern const struct iwl_cfg iwl6030_2agn_cfg;
298extern const struct iwl_cfg iwl6030_2abg_cfg;
299extern const struct iwl_cfg iwl6030_2bgn_cfg;
300extern const struct iwl_cfg iwl6030_2bg_cfg;
301extern const struct iwl_cfg iwl6000i_2agn_cfg;
302extern const struct iwl_cfg iwl6000i_2abg_cfg;
303extern const struct iwl_cfg iwl6000i_2bg_cfg;
304extern const struct iwl_cfg iwl6000_3agn_cfg;
305extern const struct iwl_cfg iwl6050_2agn_cfg;
306extern const struct iwl_cfg iwl6050_2abg_cfg;
307extern const struct iwl_cfg iwl6150_bgn_cfg;
308extern const struct iwl_cfg iwl6150_bg_cfg;
309extern const struct iwl_cfg iwl1000_bgn_cfg;
310extern const struct iwl_cfg iwl1000_bg_cfg;
311extern const struct iwl_cfg iwl100_bgn_cfg;
312extern const struct iwl_cfg iwl100_bg_cfg;
313extern const struct iwl_cfg iwl130_bgn_cfg;
314extern const struct iwl_cfg iwl130_bg_cfg;
315extern const struct iwl_cfg iwl2000_2bgn_cfg;
316extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
317extern const struct iwl_cfg iwl2030_2bgn_cfg;
318extern const struct iwl_cfg iwl6035_2agn_cfg;
319extern const struct iwl_cfg iwl105_bgn_cfg;
320extern const struct iwl_cfg iwl105_bgn_d_cfg;
321extern const struct iwl_cfg iwl135_bgn_cfg;
322extern const struct iwl_cfg iwl7260_2ac_cfg;
323extern const struct iwl_cfg iwl3160_ac_cfg;
324
278#endif /* __IWL_CONFIG_H__ */ 325#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index df3463a38704..20e845d4da04 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.c b/drivers/net/wireless/iwlwifi/iwl-debug.c
index 87535a67de76..8a44f594528d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -66,6 +66,7 @@
66#include <linux/device.h> 66#include <linux/device.h>
67#include <linux/interrupt.h> 67#include <linux/interrupt.h>
68#include <linux/export.h> 68#include <linux/export.h>
69#include "iwl-drv.h"
69#include "iwl-debug.h" 70#include "iwl-debug.h"
70#include "iwl-devtrace.h" 71#include "iwl-devtrace.h"
71 72
@@ -85,11 +86,11 @@ void __iwl_ ##fn(struct device *dev, const char *fmt, ...) \
85} 86}
86 87
87__iwl_fn(warn) 88__iwl_fn(warn)
88EXPORT_SYMBOL_GPL(__iwl_warn); 89IWL_EXPORT_SYMBOL(__iwl_warn);
89__iwl_fn(info) 90__iwl_fn(info)
90EXPORT_SYMBOL_GPL(__iwl_info); 91IWL_EXPORT_SYMBOL(__iwl_info);
91__iwl_fn(crit) 92__iwl_fn(crit)
92EXPORT_SYMBOL_GPL(__iwl_crit); 93IWL_EXPORT_SYMBOL(__iwl_crit);
93 94
94void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only, 95void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only,
95 const char *fmt, ...) 96 const char *fmt, ...)
@@ -110,7 +111,7 @@ void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only,
110 trace_iwlwifi_err(&vaf); 111 trace_iwlwifi_err(&vaf);
111 va_end(args); 112 va_end(args);
112} 113}
113EXPORT_SYMBOL_GPL(__iwl_err); 114IWL_EXPORT_SYMBOL(__iwl_err);
114 115
115#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING) 116#if defined(CONFIG_IWLWIFI_DEBUG) || defined(CONFIG_IWLWIFI_DEVICE_TRACING)
116void __iwl_dbg(struct device *dev, 117void __iwl_dbg(struct device *dev,
@@ -133,5 +134,5 @@ void __iwl_dbg(struct device *dev,
133 trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf); 134 trace_iwlwifi_dbg(level, in_interrupt(), function, &vaf);
134 va_end(args); 135 va_end(args);
135} 136}
136EXPORT_SYMBOL_GPL(__iwl_dbg); 137IWL_EXPORT_SYMBOL(__iwl_dbg);
137#endif 138#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 81aa91fab5aa..4491c1c72cc7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -298,7 +298,7 @@ TRACE_EVENT(iwlwifi_dbg,
298 MAX_MSG_LEN, vaf->fmt, 298 MAX_MSG_LEN, vaf->fmt,
299 *vaf->va) >= MAX_MSG_LEN); 299 *vaf->va) >= MAX_MSG_LEN);
300 ), 300 ),
301 TP_printk("%s", (char *)__get_dynamic_array(msg)) 301 TP_printk("%s", __get_str(msg))
302); 302);
303 303
304#undef TRACE_SYSTEM 304#undef TRACE_SYSTEM
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index fbfd2d137117..3ce4e9d5082d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -1102,7 +1102,7 @@ void iwl_drv_stop(struct iwl_drv *drv)
1102 1102
1103/* shared module parameters */ 1103/* shared module parameters */
1104struct iwl_mod_params iwlwifi_mod_params = { 1104struct iwl_mod_params iwlwifi_mod_params = {
1105 .restart_fw = 1, 1105 .restart_fw = true,
1106 .plcp_check = true, 1106 .plcp_check = true,
1107 .bt_coex_active = true, 1107 .bt_coex_active = true,
1108 .power_level = IWL_POWER_INDEX_1, 1108 .power_level = IWL_POWER_INDEX_1,
@@ -1111,7 +1111,7 @@ struct iwl_mod_params iwlwifi_mod_params = {
1111 .wd_disable = true, 1111 .wd_disable = true,
1112 /* the rest are 0 by default */ 1112 /* the rest are 0 by default */
1113}; 1113};
1114EXPORT_SYMBOL_GPL(iwlwifi_mod_params); 1114IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
1115 1115
1116int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops) 1116int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
1117{ 1117{
@@ -1135,7 +1135,7 @@ int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops)
1135 mutex_unlock(&iwlwifi_opmode_table_mtx); 1135 mutex_unlock(&iwlwifi_opmode_table_mtx);
1136 return -EIO; 1136 return -EIO;
1137} 1137}
1138EXPORT_SYMBOL_GPL(iwl_opmode_register); 1138IWL_EXPORT_SYMBOL(iwl_opmode_register);
1139 1139
1140void iwl_opmode_deregister(const char *name) 1140void iwl_opmode_deregister(const char *name)
1141{ 1141{
@@ -1157,7 +1157,7 @@ void iwl_opmode_deregister(const char *name)
1157 } 1157 }
1158 mutex_unlock(&iwlwifi_opmode_table_mtx); 1158 mutex_unlock(&iwlwifi_opmode_table_mtx);
1159} 1159}
1160EXPORT_SYMBOL_GPL(iwl_opmode_deregister); 1160IWL_EXPORT_SYMBOL(iwl_opmode_deregister);
1161 1161
1162static int __init iwl_drv_init(void) 1162static int __init iwl_drv_init(void)
1163{ 1163{
@@ -1207,8 +1207,8 @@ MODULE_PARM_DESC(11n_disable,
1207module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K, 1207module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
1208 int, S_IRUGO); 1208 int, S_IRUGO);
1209MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)"); 1209MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
1210module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, int, S_IRUGO); 1210module_param_named(fw_restart, iwlwifi_mod_params.restart_fw, bool, S_IRUGO);
1211MODULE_PARM_DESC(fw_restart, "restart firmware in case of error"); 1211MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)");
1212 1212
1213module_param_named(antenna_coupling, iwlwifi_mod_params.ant_coupling, 1213module_param_named(antenna_coupling, iwlwifi_mod_params.ant_coupling,
1214 int, S_IRUGO); 1214 int, S_IRUGO);
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.h b/drivers/net/wireless/iwlwifi/iwl-drv.h
index 594a5c71b272..7d1450916308 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -63,6 +63,8 @@
63#ifndef __iwl_drv_h__ 63#ifndef __iwl_drv_h__
64#define __iwl_drv_h__ 64#define __iwl_drv_h__
65 65
66#include <linux/module.h>
67
66/* for all modules */ 68/* for all modules */
67#define DRV_NAME "iwlwifi" 69#define DRV_NAME "iwlwifi"
68#define IWLWIFI_VERSION "in-tree:" 70#define IWLWIFI_VERSION "in-tree:"
@@ -123,4 +125,17 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
123 */ 125 */
124void iwl_drv_stop(struct iwl_drv *drv); 126void iwl_drv_stop(struct iwl_drv *drv);
125 127
128/*
129 * exported symbol management
130 *
131 * The driver can be split into multiple modules, in which case some symbols
132 * must be exported for the sub-modules. However, if it's not split and
133 * everything is built-in, then we can avoid that.
134 */
135#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
136#define IWL_EXPORT_SYMBOL(sym) EXPORT_SYMBOL_GPL(sym)
137#else
138#define IWL_EXPORT_SYMBOL(sym)
139#endif
140
126#endif /* __iwl_drv_h__ */ 141#endif /* __iwl_drv_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
index 034f2ff4f43d..600c9fdd7f71 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -62,6 +62,7 @@
62#include <linux/types.h> 62#include <linux/types.h>
63#include <linux/slab.h> 63#include <linux/slab.h>
64#include <linux/export.h> 64#include <linux/export.h>
65#include "iwl-drv.h"
65#include "iwl-modparams.h" 66#include "iwl-modparams.h"
66#include "iwl-eeprom-parse.h" 67#include "iwl-eeprom-parse.h"
67 68
@@ -749,7 +750,7 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
749 } 750 }
750 751
751 ht_info->ht_supported = true; 752 ht_info->ht_supported = true;
752 ht_info->cap = 0; 753 ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
753 754
754 if (iwlwifi_mod_params.amsdu_size_8K) 755 if (iwlwifi_mod_params.amsdu_size_8K)
755 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; 756 ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
@@ -909,7 +910,7 @@ iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg,
909 kfree(data); 910 kfree(data);
910 return NULL; 911 return NULL;
911} 912}
912EXPORT_SYMBOL_GPL(iwl_parse_eeprom_data); 913IWL_EXPORT_SYMBOL(iwl_parse_eeprom_data);
913 914
914/* helper functions */ 915/* helper functions */
915int iwl_nvm_check_version(struct iwl_nvm_data *data, 916int iwl_nvm_check_version(struct iwl_nvm_data *data,
@@ -928,4 +929,4 @@ int iwl_nvm_check_version(struct iwl_nvm_data *data,
928 data->calib_version, trans->cfg->nvm_calib_ver); 929 data->calib_version, trans->cfg->nvm_calib_ver);
929 return -EINVAL; 930 return -EINVAL;
930} 931}
931EXPORT_SYMBOL_GPL(iwl_nvm_check_version); 932IWL_EXPORT_SYMBOL(iwl_nvm_check_version);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
index 683fe6a8c58f..37f115390b19 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
index ef4806f27cf8..e5f2e362ab0b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -63,6 +63,7 @@
63#include <linux/slab.h> 63#include <linux/slab.h>
64#include <linux/export.h> 64#include <linux/export.h>
65 65
66#include "iwl-drv.h"
66#include "iwl-debug.h" 67#include "iwl-debug.h"
67#include "iwl-eeprom-read.h" 68#include "iwl-eeprom-read.h"
68#include "iwl-io.h" 69#include "iwl-io.h"
@@ -460,4 +461,4 @@ int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size)
460 461
461 return ret; 462 return ret;
462} 463}
463EXPORT_SYMBOL_GPL(iwl_read_eeprom); 464IWL_EXPORT_SYMBOL(iwl_read_eeprom);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
index b2588c5cbf93..8e941f8bd7d6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom-read.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index f5592fb3b1ed..484d318245fb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index 90873eca35f7..8b6c6fd95ed0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index b545178e46e3..435618574240 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -73,12 +73,14 @@
73 * treats good CRC threshold as a boolean 73 * treats good CRC threshold as a boolean
74 * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w). 74 * @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
75 * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P. 75 * @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
76 * @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
76 */ 77 */
77enum iwl_ucode_tlv_flag { 78enum iwl_ucode_tlv_flag {
78 IWL_UCODE_TLV_FLAGS_PAN = BIT(0), 79 IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
79 IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1), 80 IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
80 IWL_UCODE_TLV_FLAGS_MFP = BIT(2), 81 IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
81 IWL_UCODE_TLV_FLAGS_P2P = BIT(3), 82 IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
83 IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
82}; 84};
83 85
84/* The default calibrate table size if not specified by firmware file */ 86/* The default calibrate table size if not specified by firmware file */
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 276410d82de4..305c81f2c2b4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -29,6 +29,7 @@
29#include <linux/device.h> 29#include <linux/device.h>
30#include <linux/export.h> 30#include <linux/export.h>
31 31
32#include "iwl-drv.h"
32#include "iwl-io.h" 33#include "iwl-io.h"
33#include "iwl-csr.h" 34#include "iwl-csr.h"
34#include "iwl-debug.h" 35#include "iwl-debug.h"
@@ -49,7 +50,7 @@ int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
49 50
50 return -ETIMEDOUT; 51 return -ETIMEDOUT;
51} 52}
52EXPORT_SYMBOL_GPL(iwl_poll_bit); 53IWL_EXPORT_SYMBOL(iwl_poll_bit);
53 54
54u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg) 55u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
55{ 56{
@@ -62,7 +63,7 @@ u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
62 63
63 return value; 64 return value;
64} 65}
65EXPORT_SYMBOL_GPL(iwl_read_direct32); 66IWL_EXPORT_SYMBOL(iwl_read_direct32);
66 67
67void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value) 68void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
68{ 69{
@@ -73,7 +74,7 @@ void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
73 iwl_trans_release_nic_access(trans, &flags); 74 iwl_trans_release_nic_access(trans, &flags);
74 } 75 }
75} 76}
76EXPORT_SYMBOL_GPL(iwl_write_direct32); 77IWL_EXPORT_SYMBOL(iwl_write_direct32);
77 78
78int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask, 79int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
79 int timeout) 80 int timeout)
@@ -89,7 +90,7 @@ int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
89 90
90 return -ETIMEDOUT; 91 return -ETIMEDOUT;
91} 92}
92EXPORT_SYMBOL_GPL(iwl_poll_direct_bit); 93IWL_EXPORT_SYMBOL(iwl_poll_direct_bit);
93 94
94static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs) 95static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs)
95{ 96{
@@ -115,7 +116,7 @@ u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
115 } 116 }
116 return val; 117 return val;
117} 118}
118EXPORT_SYMBOL_GPL(iwl_read_prph); 119IWL_EXPORT_SYMBOL(iwl_read_prph);
119 120
120void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) 121void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
121{ 122{
@@ -126,7 +127,7 @@ void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
126 iwl_trans_release_nic_access(trans, &flags); 127 iwl_trans_release_nic_access(trans, &flags);
127 } 128 }
128} 129}
129EXPORT_SYMBOL_GPL(iwl_write_prph); 130IWL_EXPORT_SYMBOL(iwl_write_prph);
130 131
131void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask) 132void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
132{ 133{
@@ -138,7 +139,7 @@ void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
138 iwl_trans_release_nic_access(trans, &flags); 139 iwl_trans_release_nic_access(trans, &flags);
139 } 140 }
140} 141}
141EXPORT_SYMBOL_GPL(iwl_set_bits_prph); 142IWL_EXPORT_SYMBOL(iwl_set_bits_prph);
142 143
143void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs, 144void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
144 u32 bits, u32 mask) 145 u32 bits, u32 mask)
@@ -151,7 +152,7 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
151 iwl_trans_release_nic_access(trans, &flags); 152 iwl_trans_release_nic_access(trans, &flags);
152 } 153 }
153} 154}
154EXPORT_SYMBOL_GPL(iwl_set_bits_mask_prph); 155IWL_EXPORT_SYMBOL(iwl_set_bits_mask_prph);
155 156
156void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask) 157void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
157{ 158{
@@ -164,4 +165,4 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
164 iwl_trans_release_nic_access(trans, &flags); 165 iwl_trans_release_nic_access(trans, &flags);
165 } 166 }
166} 167}
167EXPORT_SYMBOL_GPL(iwl_clear_bits_prph); 168IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index 2c2a729092f5..3cc39ffe8ba5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -109,7 +109,7 @@ struct iwl_mod_params {
109 int sw_crypto; 109 int sw_crypto;
110 unsigned int disable_11n; 110 unsigned int disable_11n;
111 int amsdu_size_8K; 111 int amsdu_size_8K;
112 int restart_fw; 112 bool restart_fw;
113 bool plcp_check; 113 bool plcp_check;
114 int wd_disable; 114 int wd_disable;
115 bool bt_coex_active; 115 bool bt_coex_active;
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
index c3affbc62cdf..940b8a9d5285 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -63,6 +63,7 @@
63#include <linux/sched.h> 63#include <linux/sched.h>
64#include <linux/export.h> 64#include <linux/export.h>
65 65
66#include "iwl-drv.h"
66#include "iwl-notif-wait.h" 67#include "iwl-notif-wait.h"
67 68
68 69
@@ -72,7 +73,7 @@ void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_wait)
72 INIT_LIST_HEAD(&notif_wait->notif_waits); 73 INIT_LIST_HEAD(&notif_wait->notif_waits);
73 init_waitqueue_head(&notif_wait->notif_waitq); 74 init_waitqueue_head(&notif_wait->notif_waitq);
74} 75}
75EXPORT_SYMBOL_GPL(iwl_notification_wait_init); 76IWL_EXPORT_SYMBOL(iwl_notification_wait_init);
76 77
77void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait, 78void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
78 struct iwl_rx_packet *pkt) 79 struct iwl_rx_packet *pkt)
@@ -117,7 +118,7 @@ void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_wait,
117 if (triggered) 118 if (triggered)
118 wake_up_all(&notif_wait->notif_waitq); 119 wake_up_all(&notif_wait->notif_waitq);
119} 120}
120EXPORT_SYMBOL_GPL(iwl_notification_wait_notify); 121IWL_EXPORT_SYMBOL(iwl_notification_wait_notify);
121 122
122void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait) 123void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
123{ 124{
@@ -130,7 +131,7 @@ void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_wait)
130 131
131 wake_up_all(&notif_wait->notif_waitq); 132 wake_up_all(&notif_wait->notif_waitq);
132} 133}
133EXPORT_SYMBOL_GPL(iwl_abort_notification_waits); 134IWL_EXPORT_SYMBOL(iwl_abort_notification_waits);
134 135
135void 136void
136iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait, 137iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
@@ -154,7 +155,7 @@ iwl_init_notification_wait(struct iwl_notif_wait_data *notif_wait,
154 list_add(&wait_entry->list, &notif_wait->notif_waits); 155 list_add(&wait_entry->list, &notif_wait->notif_waits);
155 spin_unlock_bh(&notif_wait->notif_wait_lock); 156 spin_unlock_bh(&notif_wait->notif_wait_lock);
156} 157}
157EXPORT_SYMBOL_GPL(iwl_init_notification_wait); 158IWL_EXPORT_SYMBOL(iwl_init_notification_wait);
158 159
159int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait, 160int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait,
160 struct iwl_notification_wait *wait_entry, 161 struct iwl_notification_wait *wait_entry,
@@ -178,7 +179,7 @@ int iwl_wait_notification(struct iwl_notif_wait_data *notif_wait,
178 return -ETIMEDOUT; 179 return -ETIMEDOUT;
179 return 0; 180 return 0;
180} 181}
181EXPORT_SYMBOL_GPL(iwl_wait_notification); 182IWL_EXPORT_SYMBOL(iwl_wait_notification);
182 183
183void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait, 184void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait,
184 struct iwl_notification_wait *wait_entry) 185 struct iwl_notification_wait *wait_entry)
@@ -187,4 +188,4 @@ void iwl_remove_notification(struct iwl_notif_wait_data *notif_wait,
187 list_del(&wait_entry->list); 188 list_del(&wait_entry->list);
188 spin_unlock_bh(&notif_wait->notif_wait_lock); 189 spin_unlock_bh(&notif_wait->notif_wait_lock);
189} 190}
190EXPORT_SYMBOL_GPL(iwl_remove_notification); 191IWL_EXPORT_SYMBOL(iwl_remove_notification);
diff --git a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
index c2ce764463a3..2e2f1c8c99f9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
+++ b/drivers/net/wireless/iwlwifi/iwl-notif-wait.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index a70213bdb83c..6199a0a597a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -62,6 +62,7 @@
62#include <linux/types.h> 62#include <linux/types.h>
63#include <linux/slab.h> 63#include <linux/slab.h>
64#include <linux/export.h> 64#include <linux/export.h>
65#include "iwl-drv.h"
65#include "iwl-modparams.h" 66#include "iwl-modparams.h"
66#include "iwl-nvm-parse.h" 67#include "iwl-nvm-parse.h"
67 68
@@ -149,6 +150,8 @@ static struct ieee80211_rate iwl_cfg80211_rates[] = {
149 * @NVM_CHANNEL_DFS: dynamic freq selection candidate 150 * @NVM_CHANNEL_DFS: dynamic freq selection candidate
150 * @NVM_CHANNEL_WIDE: 20 MHz channel okay (?) 151 * @NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
151 * @NVM_CHANNEL_40MHZ: 40 MHz channel okay (?) 152 * @NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
153 * @NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
154 * @NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
152 */ 155 */
153enum iwl_nvm_channel_flags { 156enum iwl_nvm_channel_flags {
154 NVM_CHANNEL_VALID = BIT(0), 157 NVM_CHANNEL_VALID = BIT(0),
@@ -158,6 +161,8 @@ enum iwl_nvm_channel_flags {
158 NVM_CHANNEL_DFS = BIT(7), 161 NVM_CHANNEL_DFS = BIT(7),
159 NVM_CHANNEL_WIDE = BIT(8), 162 NVM_CHANNEL_WIDE = BIT(8),
160 NVM_CHANNEL_40MHZ = BIT(9), 163 NVM_CHANNEL_40MHZ = BIT(9),
164 NVM_CHANNEL_80MHZ = BIT(10),
165 NVM_CHANNEL_160MHZ = BIT(11),
161}; 166};
162 167
163#define CHECK_AND_PRINT_I(x) \ 168#define CHECK_AND_PRINT_I(x) \
@@ -210,6 +215,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
210 else 215 else
211 channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS; 216 channel->flags &= ~IEEE80211_CHAN_NO_HT40MINUS;
212 } 217 }
218 if (!(ch_flags & NVM_CHANNEL_80MHZ))
219 channel->flags |= IEEE80211_CHAN_NO_80MHZ;
220 if (!(ch_flags & NVM_CHANNEL_160MHZ))
221 channel->flags |= IEEE80211_CHAN_NO_160MHZ;
213 222
214 if (!(ch_flags & NVM_CHANNEL_IBSS)) 223 if (!(ch_flags & NVM_CHANNEL_IBSS))
215 channel->flags |= IEEE80211_CHAN_NO_IBSS; 224 channel->flags |= IEEE80211_CHAN_NO_IBSS;
@@ -245,6 +254,43 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
245 return n_channels; 254 return n_channels;
246} 255}
247 256
257static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
258 struct iwl_nvm_data *data,
259 struct ieee80211_sta_vht_cap *vht_cap)
260{
261 /* For now, assume new devices with NVM are VHT capable */
262
263 vht_cap->vht_supported = true;
264
265 vht_cap->cap = IEEE80211_VHT_CAP_SHORT_GI_80 |
266 IEEE80211_VHT_CAP_RXSTBC_1 |
267 IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
268 7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
269
270 if (iwlwifi_mod_params.amsdu_size_8K)
271 vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
272
273 vht_cap->vht_mcs.rx_mcs_map =
274 cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
275 IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
276 IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
277 IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
278 IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
279 IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
280 IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
281 IEEE80211_VHT_MCS_NOT_SUPPORTED << 14);
282
283 if (data->valid_rx_ant == 1 || cfg->rx_with_siso_diversity) {
284 vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
285 IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
286 /* this works because NOT_SUPPORTED == 3 */
287 vht_cap->vht_mcs.rx_mcs_map |=
288 cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << 2);
289 }
290
291 vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
292}
293
248static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, 294static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
249 struct iwl_nvm_data *data, const __le16 *nvm_sw) 295 struct iwl_nvm_data *data, const __le16 *nvm_sw)
250{ 296{
@@ -268,6 +314,7 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
268 n_used += iwl_init_sband_channels(data, sband, n_channels, 314 n_used += iwl_init_sband_channels(data, sband, n_channels,
269 IEEE80211_BAND_5GHZ); 315 IEEE80211_BAND_5GHZ);
270 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ); 316 iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ);
317 iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap);
271 318
272 if (n_channels != n_used) 319 if (n_channels != n_used)
273 IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n", 320 IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
@@ -343,4 +390,4 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
343 390
344 return data; 391 return data;
345} 392}
346EXPORT_SYMBOL_GPL(iwl_parse_nvm_data); 393IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
index b2692bd287fa..e57fb989661e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index 4a680019e117..98c7aa7346da 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
index 3392011a8768..25745daa0d5d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -65,6 +65,7 @@
65#include <linux/string.h> 65#include <linux/string.h>
66#include <linux/export.h> 66#include <linux/export.h>
67 67
68#include "iwl-drv.h"
68#include "iwl-phy-db.h" 69#include "iwl-phy-db.h"
69#include "iwl-debug.h" 70#include "iwl-debug.h"
70#include "iwl-op-mode.h" 71#include "iwl-op-mode.h"
@@ -149,7 +150,7 @@ struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans)
149 /* TODO: add default values of the phy db. */ 150 /* TODO: add default values of the phy db. */
150 return phy_db; 151 return phy_db;
151} 152}
152EXPORT_SYMBOL(iwl_phy_db_init); 153IWL_EXPORT_SYMBOL(iwl_phy_db_init);
153 154
154/* 155/*
155 * get phy db section: returns a pointer to a phy db section specified by 156 * get phy db section: returns a pointer to a phy db section specified by
@@ -215,7 +216,7 @@ void iwl_phy_db_free(struct iwl_phy_db *phy_db)
215 216
216 kfree(phy_db); 217 kfree(phy_db);
217} 218}
218EXPORT_SYMBOL(iwl_phy_db_free); 219IWL_EXPORT_SYMBOL(iwl_phy_db_free);
219 220
220int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt, 221int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
221 gfp_t alloc_ctx) 222 gfp_t alloc_ctx)
@@ -260,7 +261,7 @@ int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
260 261
261 return 0; 262 return 0;
262} 263}
263EXPORT_SYMBOL(iwl_phy_db_set_section); 264IWL_EXPORT_SYMBOL(iwl_phy_db_set_section);
264 265
265static int is_valid_channel(u16 ch_id) 266static int is_valid_channel(u16 ch_id)
266{ 267{
@@ -495,4 +496,4 @@ int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
495 "Finished sending phy db non channel data\n"); 496 "Finished sending phy db non channel data\n");
496 return 0; 497 return 0;
497} 498}
498EXPORT_SYMBOL(iwl_send_phy_db_data); 499IWL_EXPORT_SYMBOL(iwl_send_phy_db_data);
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
index d0e43d96ab38..ce983af79644 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.h
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index f76e9cad7757..386f2a7c87cb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.c b/drivers/net/wireless/iwlwifi/iwl-test.c
index ce0c67b425ee..efff2986b5b4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-test.c
+++ b/drivers/net/wireless/iwlwifi/iwl-test.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -64,6 +64,7 @@
64#include <linux/export.h> 64#include <linux/export.h>
65#include <net/netlink.h> 65#include <net/netlink.h>
66 66
67#include "iwl-drv.h"
67#include "iwl-io.h" 68#include "iwl-io.h"
68#include "iwl-fh.h" 69#include "iwl-fh.h"
69#include "iwl-prph.h" 70#include "iwl-prph.h"
@@ -653,7 +654,7 @@ int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb,
653 } 654 }
654 return 0; 655 return 0;
655} 656}
656EXPORT_SYMBOL_GPL(iwl_test_parse); 657IWL_EXPORT_SYMBOL(iwl_test_parse);
657 658
658/* 659/*
659 * Handle test commands. 660 * Handle test commands.
@@ -715,7 +716,7 @@ int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb)
715 } 716 }
716 return result; 717 return result;
717} 718}
718EXPORT_SYMBOL_GPL(iwl_test_handle_cmd); 719IWL_EXPORT_SYMBOL(iwl_test_handle_cmd);
719 720
720static int iwl_test_trace_dump(struct iwl_test *tst, struct sk_buff *skb, 721static int iwl_test_trace_dump(struct iwl_test *tst, struct sk_buff *skb,
721 struct netlink_callback *cb) 722 struct netlink_callback *cb)
@@ -803,7 +804,7 @@ int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb,
803 } 804 }
804 return result; 805 return result;
805} 806}
806EXPORT_SYMBOL_GPL(iwl_test_dump); 807IWL_EXPORT_SYMBOL(iwl_test_dump);
807 808
808/* 809/*
809 * Multicast a spontaneous messages from the device to the user space. 810 * Multicast a spontaneous messages from the device to the user space.
@@ -849,4 +850,4 @@ void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb)
849 if (tst->notify) 850 if (tst->notify)
850 iwl_test_send_rx(tst, rxb); 851 iwl_test_send_rx(tst, rxb);
851} 852}
852EXPORT_SYMBOL_GPL(iwl_test_rx); 853IWL_EXPORT_SYMBOL(iwl_test_rx);
diff --git a/drivers/net/wireless/iwlwifi/iwl-test.h b/drivers/net/wireless/iwlwifi/iwl-test.h
index 7fbf4d717caa..8fbd21704840 100644
--- a/drivers/net/wireless/iwlwifi/iwl-test.h
+++ b/drivers/net/wireless/iwlwifi/iwl-test.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.h b/drivers/net/wireless/iwlwifi/iwl-testmode.h
index a963f45c6849..98f48a9afc98 100644
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 0cac2b7af78b..7f9c254292a8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -114,9 +114,6 @@
114 * completely agnostic to these differences. 114 * completely agnostic to these differences.
115 * The transport does provide helper functionnality (i.e. SYNC / ASYNC mode), 115 * The transport does provide helper functionnality (i.e. SYNC / ASYNC mode),
116 */ 116 */
117#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
118#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
119#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
120#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f) 117#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
121#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8) 118#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
122#define SEQ_TO_INDEX(s) ((s) & 0xff) 119#define SEQ_TO_INDEX(s) ((s) & 0xff)
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
index 807b250ec396..2acc44b40986 100644
--- a/drivers/net/wireless/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/mvm/Makefile
@@ -2,7 +2,7 @@ obj-$(CONFIG_IWLMVM) += iwlmvm.o
2iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o 2iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
3iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o 3iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o
4iwlmvm-y += scan.o time-event.o rs.o 4iwlmvm-y += scan.o time-event.o rs.o
5iwlmvm-y += power.o 5iwlmvm-y += power.o bt-coex.o
6iwlmvm-y += led.o 6iwlmvm-y += led.o
7iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o 7iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
8iwlmvm-$(CONFIG_PM_SLEEP) += d3.o 8iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
diff --git a/drivers/net/wireless/iwlwifi/mvm/binding.c b/drivers/net/wireless/iwlwifi/mvm/binding.c
index 73d24aacb90a..93fd1457954b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/binding.c
+++ b/drivers/net/wireless/iwlwifi/mvm/binding.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
new file mode 100644
index 000000000000..47954deb6493
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -0,0 +1,347 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2013 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2013 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include "fw-api-bt-coex.h"
65#include "iwl-modparams.h"
66#include "mvm.h"
67#include "iwl-debug.h"
68
69#define EVENT_PRIO_ANT(_evt, _prio, _shrd_ant) \
70 [(_evt)] = (((_prio) << BT_COEX_PRIO_TBL_PRIO_POS) | \
71 ((_shrd_ant) << BT_COEX_PRIO_TBL_SHRD_ANT_POS))
72
73static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
74 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB1,
75 BT_COEX_PRIO_TBL_PRIO_BYPASS, 0),
76 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB2,
77 BT_COEX_PRIO_TBL_PRIO_BYPASS, 1),
78 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1,
79 BT_COEX_PRIO_TBL_PRIO_LOW, 0),
80 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2,
81 BT_COEX_PRIO_TBL_PRIO_LOW, 1),
82 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1,
83 BT_COEX_PRIO_TBL_PRIO_HIGH, 0),
84 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2,
85 BT_COEX_PRIO_TBL_PRIO_HIGH, 1),
86 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_DTIM,
87 BT_COEX_PRIO_TBL_DISABLED, 0),
88 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN52,
89 BT_COEX_PRIO_TBL_PRIO_COEX_OFF, 0),
90 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN24,
91 BT_COEX_PRIO_TBL_PRIO_COEX_ON, 0),
92 EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_IDLE,
93 BT_COEX_PRIO_TBL_PRIO_COEX_IDLE, 0),
94 0, 0, 0, 0, 0, 0,
95};
96
97#undef EVENT_PRIO_ANT
98
99int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
100{
101 return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, CMD_SYNC,
102 sizeof(struct iwl_bt_coex_prio_tbl_cmd),
103 &iwl_bt_prio_tbl);
104}
105
106static int iwl_send_bt_env(struct iwl_mvm *mvm, u8 action, u8 type)
107{
108 struct iwl_bt_coex_prot_env_cmd env_cmd;
109 int ret;
110
111 env_cmd.action = action;
112 env_cmd.type = type;
113 ret = iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PROT_ENV, CMD_SYNC,
114 sizeof(env_cmd), &env_cmd);
115 if (ret)
116 IWL_ERR(mvm, "failed to send BT env command\n");
117 return ret;
118}
119
120enum iwl_bt_kill_msk {
121 BT_KILL_MSK_DEFAULT,
122 BT_KILL_MSK_SCO_HID_A2DP,
123 BT_KILL_MSK_REDUCED_TXPOW,
124 BT_KILL_MSK_MAX,
125};
126
127static const u32 iwl_bt_ack_kill_msk[BT_KILL_MSK_MAX] = {
128 0xffffffff,
129 0xfffffc00,
130 0,
131};
132
133static const u32 iwl_bt_cts_kill_msk[BT_KILL_MSK_MAX] = {
134 0xffffffff,
135 0xfffffc00,
136 0,
137};
138
139#define IWL_BT_DEFAULT_BOOST (0xf0f0f0f0)
140
141/* Tight Coex */
142static const __le32 iwl_tight_lookup[BT_COEX_LUT_SIZE] = {
143 cpu_to_le32(0xaaaaaaaa),
144 cpu_to_le32(0xaaaaaaaa),
145 cpu_to_le32(0xaeaaaaaa),
146 cpu_to_le32(0xaaaaaaaa),
147 cpu_to_le32(0xcc00ff28),
148 cpu_to_le32(0x0000aaaa),
149 cpu_to_le32(0xcc00aaaa),
150 cpu_to_le32(0x0000aaaa),
151 cpu_to_le32(0xc0004000),
152 cpu_to_le32(0x00000000),
153 cpu_to_le32(0xf0005000),
154 cpu_to_le32(0xf0005000),
155};
156
157/* Loose Coex */
158static const __le32 iwl_loose_lookup[BT_COEX_LUT_SIZE] = {
159 cpu_to_le32(0xaaaaaaaa),
160 cpu_to_le32(0xaaaaaaaa),
161 cpu_to_le32(0xaeaaaaaa),
162 cpu_to_le32(0xaaaaaaaa),
163 cpu_to_le32(0xcc00ff28),
164 cpu_to_le32(0x0000aaaa),
165 cpu_to_le32(0xcc00aaaa),
166 cpu_to_le32(0x0000aaaa),
167 cpu_to_le32(0x00000000),
168 cpu_to_le32(0x00000000),
169 cpu_to_le32(0xf0005000),
170 cpu_to_le32(0xf0005000),
171};
172
173/* Full concurrency */
174static const __le32 iwl_concurrent_lookup[BT_COEX_LUT_SIZE] = {
175 cpu_to_le32(0xaaaaaaaa),
176 cpu_to_le32(0xaaaaaaaa),
177 cpu_to_le32(0xaaaaaaaa),
178 cpu_to_le32(0xaaaaaaaa),
179 cpu_to_le32(0xaaaaaaaa),
180 cpu_to_le32(0xaaaaaaaa),
181 cpu_to_le32(0xaaaaaaaa),
182 cpu_to_le32(0xaaaaaaaa),
183 cpu_to_le32(0x00000000),
184 cpu_to_le32(0x00000000),
185 cpu_to_le32(0x00000000),
186 cpu_to_le32(0x00000000),
187};
188
189/* BT Antenna Coupling Threshold (dB) */
190#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35)
191
192int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
193{
194 struct iwl_bt_coex_cmd cmd = {
195 .max_kill = 5,
196 .bt3_time_t7_value = 1,
197 .bt3_prio_sample_time = 2,
198 .bt3_timer_t2_value = 0xc,
199 };
200 int ret;
201
202 cmd.flags = iwlwifi_mod_params.bt_coex_active ?
203 BT_COEX_NW : BT_COEX_DISABLE;
204 cmd.flags |= iwlwifi_mod_params.bt_ch_announce ?
205 BT_CH_PRIMARY_EN | BT_CH_SECONDARY_EN : 0;
206 cmd.flags |= BT_SYNC_2_BT_DISABLE;
207
208 cmd.valid_bit_msk = cpu_to_le16(BT_VALID_ENABLE |
209 BT_VALID_BT_PRIO_BOOST |
210 BT_VALID_MAX_KILL |
211 BT_VALID_3W_TMRS |
212 BT_VALID_KILL_ACK |
213 BT_VALID_KILL_CTS |
214 BT_VALID_REDUCED_TX_POWER |
215 BT_VALID_LUT);
216
217 if (iwlwifi_mod_params.ant_coupling > IWL_BT_ANTENNA_COUPLING_THRESHOLD)
218 memcpy(&cmd.decision_lut, iwl_loose_lookup,
219 sizeof(iwl_tight_lookup));
220 else
221 memcpy(&cmd.decision_lut, iwl_tight_lookup,
222 sizeof(iwl_tight_lookup));
223
224 cmd.bt_prio_boost = cpu_to_le32(IWL_BT_DEFAULT_BOOST);
225 cmd.kill_ack_msk =
226 cpu_to_le32(iwl_bt_ack_kill_msk[BT_KILL_MSK_DEFAULT]);
227 cmd.kill_cts_msk =
228 cpu_to_le32(iwl_bt_cts_kill_msk[BT_KILL_MSK_DEFAULT]);
229
230 /* go to CALIB state in internal BT-Coex state machine */
231 ret = iwl_send_bt_env(mvm, BT_COEX_ENV_OPEN,
232 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
233 if (ret)
234 return ret;
235
236 ret = iwl_send_bt_env(mvm, BT_COEX_ENV_CLOSE,
237 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
238 if (ret)
239 return ret;
240
241 return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_SYNC,
242 sizeof(cmd), &cmd);
243}
244
245struct iwl_bt_notif_iterator_data {
246 struct iwl_mvm *mvm;
247 struct iwl_bt_coex_profile_notif *notif;
248};
249
250static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
251 struct ieee80211_vif *vif)
252{
253 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
254 struct iwl_bt_notif_iterator_data *data = _data;
255 struct ieee80211_chanctx_conf *chanctx_conf;
256 enum ieee80211_smps_mode smps_mode;
257 enum ieee80211_band band;
258
259 if (vif->type != NL80211_IFTYPE_STATION)
260 return;
261
262 rcu_read_lock();
263 chanctx_conf = rcu_dereference(vif->chanctx_conf);
264 if (chanctx_conf && chanctx_conf->def.chan)
265 band = chanctx_conf->def.chan->band;
266 else
267 band = -1;
268 rcu_read_unlock();
269
270 if (band != IEEE80211_BAND_2GHZ)
271 return;
272
273 smps_mode = IEEE80211_SMPS_AUTOMATIC;
274
275 if (data->notif->bt_status)
276 smps_mode = IEEE80211_SMPS_DYNAMIC;
277
278 if (data->notif->bt_traffic_load)
279 smps_mode = IEEE80211_SMPS_STATIC;
280
281 IWL_DEBUG_COEX(data->mvm,
282 "mac %d: bt_status %d traffic_load %d smps_req %d\n",
283 mvmvif->id, data->notif->bt_status,
284 data->notif->bt_traffic_load, smps_mode);
285
286 ieee80211_request_smps(vif, smps_mode);
287}
288
289int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
290 struct iwl_rx_cmd_buffer *rxb,
291 struct iwl_device_cmd *dev_cmd)
292{
293 struct iwl_rx_packet *pkt = rxb_addr(rxb);
294 struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
295 struct iwl_bt_notif_iterator_data data = {
296 .mvm = mvm,
297 .notif = notif,
298 };
299 struct iwl_bt_coex_cmd cmd = {};
300 enum iwl_bt_kill_msk bt_kill_msk;
301
302 IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
303 IWL_DEBUG_COEX(mvm, "\tBT %salive\n", notif->bt_status ? "" : "not ");
304 IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn);
305 IWL_DEBUG_COEX(mvm, "\tBT traffic load %d\n", notif->bt_traffic_load);
306 IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n",
307 notif->bt_agg_traffic_load);
308 IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
309
310 /* remember this notification for future use: rssi fluctuations */
311 memcpy(&mvm->last_bt_notif, notif, sizeof(mvm->last_bt_notif));
312
313 ieee80211_iterate_active_interfaces_atomic(
314 mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
315 iwl_mvm_bt_notif_iterator, &data);
316
317 /* Low latency BT profile is active: give higher prio to BT */
318 if (BT_MBOX_MSG(notif, 3, SCO_STATE) ||
319 BT_MBOX_MSG(notif, 3, A2DP_STATE) ||
320 BT_MBOX_MSG(notif, 3, SNIFF_STATE))
321 bt_kill_msk = BT_KILL_MSK_SCO_HID_A2DP;
322 else
323 bt_kill_msk = BT_KILL_MSK_DEFAULT;
324
325 /* Don't send HCMD if there is no update */
326 if (bt_kill_msk == mvm->bt_kill_msk)
327 return 0;
328
329 IWL_DEBUG_COEX(mvm,
330 "Udpate kill_msk: %d\n\t SCO %sactive A2DP %sactive SNIFF %sactive\n",
331 bt_kill_msk,
332 BT_MBOX_MSG(notif, 3, SCO_STATE) ? "" : "in",
333 BT_MBOX_MSG(notif, 3, A2DP_STATE) ? "" : "in",
334 BT_MBOX_MSG(notif, 3, SNIFF_STATE) ? "" : "in");
335
336 mvm->bt_kill_msk = bt_kill_msk;
337 cmd.kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]);
338 cmd.kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]);
339
340 cmd.valid_bit_msk = cpu_to_le16(BT_VALID_KILL_ACK | BT_VALID_KILL_CTS);
341
342 if (iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_SYNC, sizeof(cmd), &cmd))
343 IWL_ERR(mvm, "Failed to sent BT Coex CMD\n");
344
345 /* This handler is ASYNC */
346 return 0;
347}
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 994c8c263dc0..d4578cefe445 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -62,8 +62,10 @@
62 *****************************************************************************/ 62 *****************************************************************************/
63 63
64#include <linux/etherdevice.h> 64#include <linux/etherdevice.h>
65#include <linux/ip.h>
65#include <net/cfg80211.h> 66#include <net/cfg80211.h>
66#include <net/ipv6.h> 67#include <net/ipv6.h>
68#include <net/tcp.h>
67#include "iwl-modparams.h" 69#include "iwl-modparams.h"
68#include "fw-api.h" 70#include "fw-api.h"
69#include "mvm.h" 71#include "mvm.h"
@@ -402,6 +404,233 @@ static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
402 sizeof(cmd), &cmd); 404 sizeof(cmd), &cmd);
403} 405}
404 406
407enum iwl_mvm_tcp_packet_type {
408 MVM_TCP_TX_SYN,
409 MVM_TCP_RX_SYNACK,
410 MVM_TCP_TX_DATA,
411 MVM_TCP_RX_ACK,
412 MVM_TCP_RX_WAKE,
413 MVM_TCP_TX_FIN,
414};
415
416static __le16 pseudo_hdr_check(int len, __be32 saddr, __be32 daddr)
417{
418 __sum16 check = tcp_v4_check(len, saddr, daddr, 0);
419 return cpu_to_le16(be16_to_cpu((__force __be16)check));
420}
421
422static void iwl_mvm_build_tcp_packet(struct iwl_mvm *mvm,
423 struct ieee80211_vif *vif,
424 struct cfg80211_wowlan_tcp *tcp,
425 void *_pkt, u8 *mask,
426 __le16 *pseudo_hdr_csum,
427 enum iwl_mvm_tcp_packet_type ptype)
428{
429 struct {
430 struct ethhdr eth;
431 struct iphdr ip;
432 struct tcphdr tcp;
433 u8 data[];
434 } __packed *pkt = _pkt;
435 u16 ip_tot_len = sizeof(struct iphdr) + sizeof(struct tcphdr);
436 int i;
437
438 pkt->eth.h_proto = cpu_to_be16(ETH_P_IP),
439 pkt->ip.version = 4;
440 pkt->ip.ihl = 5;
441 pkt->ip.protocol = IPPROTO_TCP;
442
443 switch (ptype) {
444 case MVM_TCP_TX_SYN:
445 case MVM_TCP_TX_DATA:
446 case MVM_TCP_TX_FIN:
447 memcpy(pkt->eth.h_dest, tcp->dst_mac, ETH_ALEN);
448 memcpy(pkt->eth.h_source, vif->addr, ETH_ALEN);
449 pkt->ip.ttl = 128;
450 pkt->ip.saddr = tcp->src;
451 pkt->ip.daddr = tcp->dst;
452 pkt->tcp.source = cpu_to_be16(tcp->src_port);
453 pkt->tcp.dest = cpu_to_be16(tcp->dst_port);
454 /* overwritten for TX SYN later */
455 pkt->tcp.doff = sizeof(struct tcphdr) / 4;
456 pkt->tcp.window = cpu_to_be16(65000);
457 break;
458 case MVM_TCP_RX_SYNACK:
459 case MVM_TCP_RX_ACK:
460 case MVM_TCP_RX_WAKE:
461 memcpy(pkt->eth.h_dest, vif->addr, ETH_ALEN);
462 memcpy(pkt->eth.h_source, tcp->dst_mac, ETH_ALEN);
463 pkt->ip.saddr = tcp->dst;
464 pkt->ip.daddr = tcp->src;
465 pkt->tcp.source = cpu_to_be16(tcp->dst_port);
466 pkt->tcp.dest = cpu_to_be16(tcp->src_port);
467 break;
468 default:
469 WARN_ON(1);
470 return;
471 }
472
473 switch (ptype) {
474 case MVM_TCP_TX_SYN:
475 /* firmware assumes 8 option bytes - 8 NOPs for now */
476 memset(pkt->data, 0x01, 8);
477 ip_tot_len += 8;
478 pkt->tcp.doff = (sizeof(struct tcphdr) + 8) / 4;
479 pkt->tcp.syn = 1;
480 break;
481 case MVM_TCP_TX_DATA:
482 ip_tot_len += tcp->payload_len;
483 memcpy(pkt->data, tcp->payload, tcp->payload_len);
484 pkt->tcp.psh = 1;
485 pkt->tcp.ack = 1;
486 break;
487 case MVM_TCP_TX_FIN:
488 pkt->tcp.fin = 1;
489 pkt->tcp.ack = 1;
490 break;
491 case MVM_TCP_RX_SYNACK:
492 pkt->tcp.syn = 1;
493 pkt->tcp.ack = 1;
494 break;
495 case MVM_TCP_RX_ACK:
496 pkt->tcp.ack = 1;
497 break;
498 case MVM_TCP_RX_WAKE:
499 ip_tot_len += tcp->wake_len;
500 pkt->tcp.psh = 1;
501 pkt->tcp.ack = 1;
502 memcpy(pkt->data, tcp->wake_data, tcp->wake_len);
503 break;
504 }
505
506 switch (ptype) {
507 case MVM_TCP_TX_SYN:
508 case MVM_TCP_TX_DATA:
509 case MVM_TCP_TX_FIN:
510 pkt->ip.tot_len = cpu_to_be16(ip_tot_len);
511 pkt->ip.check = ip_fast_csum(&pkt->ip, pkt->ip.ihl);
512 break;
513 case MVM_TCP_RX_WAKE:
514 for (i = 0; i < DIV_ROUND_UP(tcp->wake_len, 8); i++) {
515 u8 tmp = tcp->wake_mask[i];
516 mask[i + 6] |= tmp << 6;
517 if (i + 1 < DIV_ROUND_UP(tcp->wake_len, 8))
518 mask[i + 7] = tmp >> 2;
519 }
520 /* fall through for ethernet/IP/TCP headers mask */
521 case MVM_TCP_RX_SYNACK:
522 case MVM_TCP_RX_ACK:
523 mask[0] = 0xff; /* match ethernet */
524 /*
525 * match ethernet, ip.version, ip.ihl
526 * the ip.ihl half byte is really masked out by firmware
527 */
528 mask[1] = 0x7f;
529 mask[2] = 0x80; /* match ip.protocol */
530 mask[3] = 0xfc; /* match ip.saddr, ip.daddr */
531 mask[4] = 0x3f; /* match ip.daddr, tcp.source, tcp.dest */
532 mask[5] = 0x80; /* match tcp flags */
533 /* leave rest (0 or set for MVM_TCP_RX_WAKE) */
534 break;
535 };
536
537 *pseudo_hdr_csum = pseudo_hdr_check(ip_tot_len - sizeof(struct iphdr),
538 pkt->ip.saddr, pkt->ip.daddr);
539}
540
541static int iwl_mvm_send_remote_wake_cfg(struct iwl_mvm *mvm,
542 struct ieee80211_vif *vif,
543 struct cfg80211_wowlan_tcp *tcp)
544{
545 struct iwl_wowlan_remote_wake_config *cfg;
546 struct iwl_host_cmd cmd = {
547 .id = REMOTE_WAKE_CONFIG_CMD,
548 .len = { sizeof(*cfg), },
549 .dataflags = { IWL_HCMD_DFL_NOCOPY, },
550 .flags = CMD_SYNC,
551 };
552 int ret;
553
554 if (!tcp)
555 return 0;
556
557 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
558 if (!cfg)
559 return -ENOMEM;
560 cmd.data[0] = cfg;
561
562 cfg->max_syn_retries = 10;
563 cfg->max_data_retries = 10;
564 cfg->tcp_syn_ack_timeout = 1; /* seconds */
565 cfg->tcp_ack_timeout = 1; /* seconds */
566
567 /* SYN (TX) */
568 iwl_mvm_build_tcp_packet(
569 mvm, vif, tcp, cfg->syn_tx.data, NULL,
570 &cfg->syn_tx.info.tcp_pseudo_header_checksum,
571 MVM_TCP_TX_SYN);
572 cfg->syn_tx.info.tcp_payload_length = 0;
573
574 /* SYN/ACK (RX) */
575 iwl_mvm_build_tcp_packet(
576 mvm, vif, tcp, cfg->synack_rx.data, cfg->synack_rx.rx_mask,
577 &cfg->synack_rx.info.tcp_pseudo_header_checksum,
578 MVM_TCP_RX_SYNACK);
579 cfg->synack_rx.info.tcp_payload_length = 0;
580
581 /* KEEPALIVE/ACK (TX) */
582 iwl_mvm_build_tcp_packet(
583 mvm, vif, tcp, cfg->keepalive_tx.data, NULL,
584 &cfg->keepalive_tx.info.tcp_pseudo_header_checksum,
585 MVM_TCP_TX_DATA);
586 cfg->keepalive_tx.info.tcp_payload_length =
587 cpu_to_le16(tcp->payload_len);
588 cfg->sequence_number_offset = tcp->payload_seq.offset;
589 /* length must be 0..4, the field is little endian */
590 cfg->sequence_number_length = tcp->payload_seq.len;
591 cfg->initial_sequence_number = cpu_to_le32(tcp->payload_seq.start);
592 cfg->keepalive_interval = cpu_to_le16(tcp->data_interval);
593 if (tcp->payload_tok.len) {
594 cfg->token_offset = tcp->payload_tok.offset;
595 cfg->token_length = tcp->payload_tok.len;
596 cfg->num_tokens =
597 cpu_to_le16(tcp->tokens_size % tcp->payload_tok.len);
598 memcpy(cfg->tokens, tcp->payload_tok.token_stream,
599 tcp->tokens_size);
600 } else {
601 /* set tokens to max value to almost never run out */
602 cfg->num_tokens = cpu_to_le16(65535);
603 }
604
605 /* ACK (RX) */
606 iwl_mvm_build_tcp_packet(
607 mvm, vif, tcp, cfg->keepalive_ack_rx.data,
608 cfg->keepalive_ack_rx.rx_mask,
609 &cfg->keepalive_ack_rx.info.tcp_pseudo_header_checksum,
610 MVM_TCP_RX_ACK);
611 cfg->keepalive_ack_rx.info.tcp_payload_length = 0;
612
613 /* WAKEUP (RX) */
614 iwl_mvm_build_tcp_packet(
615 mvm, vif, tcp, cfg->wake_rx.data, cfg->wake_rx.rx_mask,
616 &cfg->wake_rx.info.tcp_pseudo_header_checksum,
617 MVM_TCP_RX_WAKE);
618 cfg->wake_rx.info.tcp_payload_length =
619 cpu_to_le16(tcp->wake_len);
620
621 /* FIN */
622 iwl_mvm_build_tcp_packet(
623 mvm, vif, tcp, cfg->fin_tx.data, NULL,
624 &cfg->fin_tx.info.tcp_pseudo_header_checksum,
625 MVM_TCP_TX_FIN);
626 cfg->fin_tx.info.tcp_payload_length = 0;
627
628 ret = iwl_mvm_send_cmd(mvm, &cmd);
629 kfree(cfg);
630
631 return ret;
632}
633
405struct iwl_d3_iter_data { 634struct iwl_d3_iter_data {
406 struct iwl_mvm *mvm; 635 struct iwl_mvm *mvm;
407 struct ieee80211_vif *vif; 636 struct ieee80211_vif *vif;
@@ -640,6 +869,22 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
640 d3_cfg_cmd.wakeup_flags |= 869 d3_cfg_cmd.wakeup_flags |=
641 cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT); 870 cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
642 871
872 if (wowlan->tcp) {
873 /*
874 * The firmware currently doesn't really look at these, only
875 * the IWL_WOWLAN_WAKEUP_LINK_CHANGE bit. We have to set that
876 * reason bit since losing the connection to the AP implies
877 * losing the TCP connection.
878 * Set the flags anyway as long as they exist, in case this
879 * will be changed in the firmware.
880 */
881 wowlan_config_cmd.wakeup_filter |=
882 cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS |
883 IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE |
884 IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET |
885 IWL_WOWLAN_WAKEUP_LINK_CHANGE);
886 }
887
643 iwl_mvm_cancel_scan(mvm); 888 iwl_mvm_cancel_scan(mvm);
644 889
645 iwl_trans_stop_device(mvm->trans); 890 iwl_trans_stop_device(mvm->trans);
@@ -755,6 +1000,10 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
755 if (ret) 1000 if (ret)
756 goto out; 1001 goto out;
757 1002
1003 ret = iwl_mvm_send_remote_wake_cfg(mvm, vif, wowlan->tcp);
1004 if (ret)
1005 goto out;
1006
758 /* must be last -- this switches firmware state */ 1007 /* must be last -- this switches firmware state */
759 ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, CMD_SYNC, 1008 ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, CMD_SYNC,
760 sizeof(d3_cfg_cmd), &d3_cfg_cmd); 1009 sizeof(d3_cfg_cmd), &d3_cfg_cmd);
@@ -874,6 +1123,15 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
874 if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE) 1123 if (reasons & IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE)
875 wakeup.four_way_handshake = true; 1124 wakeup.four_way_handshake = true;
876 1125
1126 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS)
1127 wakeup.tcp_connlost = true;
1128
1129 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE)
1130 wakeup.tcp_nomoretokens = true;
1131
1132 if (reasons & IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET)
1133 wakeup.tcp_match = true;
1134
877 if (status->wake_packet_bufsize) { 1135 if (status->wake_packet_bufsize) {
878 int pktsize = le32_to_cpu(status->wake_packet_bufsize); 1136 int pktsize = le32_to_cpu(status->wake_packet_bufsize);
879 int pktlen = le32_to_cpu(status->wake_packet_length); 1137 int pktlen = le32_to_cpu(status->wake_packet_length);
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index c1bdb5582126..b080b4ba5458 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -69,12 +69,6 @@ struct iwl_dbgfs_mvm_ctx {
69 struct ieee80211_vif *vif; 69 struct ieee80211_vif *vif;
70}; 70};
71 71
72static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
73{
74 file->private_data = inode->i_private;
75 return 0;
76}
77
78static ssize_t iwl_dbgfs_tx_flush_write(struct file *file, 72static ssize_t iwl_dbgfs_tx_flush_write(struct file *file,
79 const char __user *user_buf, 73 const char __user *user_buf,
80 size_t count, loff_t *ppos) 74 size_t count, loff_t *ppos)
@@ -306,10 +300,130 @@ static ssize_t iwl_dbgfs_power_down_d3_allow_write(struct file *file,
306 return count; 300 return count;
307} 301}
308 302
303#define BT_MBOX_MSG(_notif, _num, _field) \
304 ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
305 >> BT_MBOX##_num##_##_field##_POS)
306
307
308#define BT_MBOX_PRINT(_num, _field, _end) \
309 pos += scnprintf(buf + pos, bufsz - pos, \
310 "\t%s: %d%s", \
311 #_field, \
312 BT_MBOX_MSG(notif, _num, _field), \
313 true ? "\n" : ", ");
314
315static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
316 size_t count, loff_t *ppos)
317{
318 struct iwl_mvm *mvm = file->private_data;
319 struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
320 char *buf;
321 int ret, pos = 0, bufsz = sizeof(char) * 1024;
322
323 buf = kmalloc(bufsz, GFP_KERNEL);
324 if (!buf)
325 return -ENOMEM;
326
327 mutex_lock(&mvm->mutex);
328
329 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
330
331 BT_MBOX_PRINT(0, LE_SLAVE_LAT, false);
332 BT_MBOX_PRINT(0, LE_PROF1, false);
333 BT_MBOX_PRINT(0, LE_PROF2, false);
334 BT_MBOX_PRINT(0, LE_PROF_OTHER, false);
335 BT_MBOX_PRINT(0, CHL_SEQ_N, false);
336 BT_MBOX_PRINT(0, INBAND_S, false);
337 BT_MBOX_PRINT(0, LE_MIN_RSSI, false);
338 BT_MBOX_PRINT(0, LE_SCAN, false);
339 BT_MBOX_PRINT(0, LE_ADV, false);
340 BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false);
341 BT_MBOX_PRINT(0, OPEN_CON_1, true);
342
343 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n");
344
345 BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false);
346 BT_MBOX_PRINT(1, IP_SR, false);
347 BT_MBOX_PRINT(1, LE_MSTR, false);
348 BT_MBOX_PRINT(1, AGGR_TRFC_LD, false);
349 BT_MBOX_PRINT(1, MSG_TYPE, false);
350 BT_MBOX_PRINT(1, SSN, true);
351
352 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n");
353
354 BT_MBOX_PRINT(2, SNIFF_ACT, false);
355 BT_MBOX_PRINT(2, PAG, false);
356 BT_MBOX_PRINT(2, INQUIRY, false);
357 BT_MBOX_PRINT(2, CONN, false);
358 BT_MBOX_PRINT(2, SNIFF_INTERVAL, false);
359 BT_MBOX_PRINT(2, DISC, false);
360 BT_MBOX_PRINT(2, SCO_TX_ACT, false);
361 BT_MBOX_PRINT(2, SCO_RX_ACT, false);
362 BT_MBOX_PRINT(2, ESCO_RE_TX, false);
363 BT_MBOX_PRINT(2, SCO_DURATION, true);
364
365 pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n");
366
367 BT_MBOX_PRINT(3, SCO_STATE, false);
368 BT_MBOX_PRINT(3, SNIFF_STATE, false);
369 BT_MBOX_PRINT(3, A2DP_STATE, false);
370 BT_MBOX_PRINT(3, ACL_STATE, false);
371 BT_MBOX_PRINT(3, MSTR_STATE, false);
372 BT_MBOX_PRINT(3, OBX_STATE, false);
373 BT_MBOX_PRINT(3, OPEN_CON_2, false);
374 BT_MBOX_PRINT(3, TRAFFIC_LOAD, false);
375 BT_MBOX_PRINT(3, CHL_SEQN_LSB, false);
376 BT_MBOX_PRINT(3, INBAND_P, false);
377 BT_MBOX_PRINT(3, MSG_TYPE_2, false);
378 BT_MBOX_PRINT(3, SSN_2, false);
379 BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
380
381 pos += scnprintf(buf+pos, bufsz-pos, "bt_status = %d\n",
382 notif->bt_status);
383 pos += scnprintf(buf+pos, bufsz-pos, "bt_open_conn = %d\n",
384 notif->bt_open_conn);
385 pos += scnprintf(buf+pos, bufsz-pos, "bt_traffic_load = %d\n",
386 notif->bt_traffic_load);
387 pos += scnprintf(buf+pos, bufsz-pos, "bt_agg_traffic_load = %d\n",
388 notif->bt_agg_traffic_load);
389 pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
390 notif->bt_ci_compliance);
391
392 mutex_unlock(&mvm->mutex);
393
394 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
395 kfree(buf);
396
397 return ret;
398}
399#undef BT_MBOX_PRINT
400
401static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
402 const char __user *user_buf,
403 size_t count, loff_t *ppos)
404{
405 struct iwl_mvm *mvm = file->private_data;
406 bool restart_fw = iwlwifi_mod_params.restart_fw;
407 int ret;
408
409 iwlwifi_mod_params.restart_fw = true;
410
411 mutex_lock(&mvm->mutex);
412
413 /* take the return value to make compiler happy - it will fail anyway */
414 ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, CMD_SYNC, 0, NULL);
415
416 mutex_unlock(&mvm->mutex);
417
418 iwlwifi_mod_params.restart_fw = restart_fw;
419
420 return count;
421}
422
309#define MVM_DEBUGFS_READ_FILE_OPS(name) \ 423#define MVM_DEBUGFS_READ_FILE_OPS(name) \
310static const struct file_operations iwl_dbgfs_##name##_ops = { \ 424static const struct file_operations iwl_dbgfs_##name##_ops = { \
311 .read = iwl_dbgfs_##name##_read, \ 425 .read = iwl_dbgfs_##name##_read, \
312 .open = iwl_dbgfs_open_file_generic, \ 426 .open = simple_open, \
313 .llseek = generic_file_llseek, \ 427 .llseek = generic_file_llseek, \
314} 428}
315 429
@@ -317,14 +431,14 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
317static const struct file_operations iwl_dbgfs_##name##_ops = { \ 431static const struct file_operations iwl_dbgfs_##name##_ops = { \
318 .write = iwl_dbgfs_##name##_write, \ 432 .write = iwl_dbgfs_##name##_write, \
319 .read = iwl_dbgfs_##name##_read, \ 433 .read = iwl_dbgfs_##name##_read, \
320 .open = iwl_dbgfs_open_file_generic, \ 434 .open = simple_open, \
321 .llseek = generic_file_llseek, \ 435 .llseek = generic_file_llseek, \
322}; 436};
323 437
324#define MVM_DEBUGFS_WRITE_FILE_OPS(name) \ 438#define MVM_DEBUGFS_WRITE_FILE_OPS(name) \
325static const struct file_operations iwl_dbgfs_##name##_ops = { \ 439static const struct file_operations iwl_dbgfs_##name##_ops = { \
326 .write = iwl_dbgfs_##name##_write, \ 440 .write = iwl_dbgfs_##name##_write, \
327 .open = iwl_dbgfs_open_file_generic, \ 441 .open = simple_open, \
328 .llseek = generic_file_llseek, \ 442 .llseek = generic_file_llseek, \
329}; 443};
330 444
@@ -345,8 +459,10 @@ MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush);
345MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain); 459MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain);
346MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram); 460MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram);
347MVM_DEBUGFS_READ_FILE_OPS(stations); 461MVM_DEBUGFS_READ_FILE_OPS(stations);
462MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
348MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow); 463MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow);
349MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow); 464MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow);
465MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart);
350 466
351int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir) 467int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
352{ 468{
@@ -358,8 +474,10 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
358 MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, S_IWUSR); 474 MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, S_IWUSR);
359 MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR); 475 MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
360 MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR); 476 MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR);
477 MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
361 MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR); 478 MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR);
362 MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR); 479 MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR);
480 MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
363 481
364 /* 482 /*
365 * Create a symlink with mac80211. It will be removed when mac80211 483 * Create a symlink with mac80211. It will be removed when mac80211
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
new file mode 100644
index 000000000000..05c61d6f384e
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
@@ -0,0 +1,319 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2013 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called COPYING.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2013 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __fw_api_bt_coex_h__
64#define __fw_api_bt_coex_h__
65
66#include <linux/types.h>
67#include <linux/bitops.h>
68
69#define BITS(nb) (BIT(nb) - 1)
70
71/**
72 * enum iwl_bt_coex_flags - flags for BT_COEX command
73 * @BT_CH_PRIMARY_EN:
74 * @BT_CH_SECONDARY_EN:
75 * @BT_NOTIF_COEX_OFF:
76 * @BT_COEX_MODE_POS:
77 * @BT_COEX_MODE_MSK:
78 * @BT_COEX_DISABLE:
79 * @BT_COEX_2W:
80 * @BT_COEX_3W:
81 * @BT_COEX_NW:
82 * @BT_USE_DEFAULTS:
83 * @BT_SYNC_2_BT_DISABLE:
84 * @BT_COEX_CORUNNING_TBL_EN:
85 */
86enum iwl_bt_coex_flags {
87 BT_CH_PRIMARY_EN = BIT(0),
88 BT_CH_SECONDARY_EN = BIT(1),
89 BT_NOTIF_COEX_OFF = BIT(2),
90 BT_COEX_MODE_POS = 3,
91 BT_COEX_MODE_MSK = BITS(3) << BT_COEX_MODE_POS,
92 BT_COEX_DISABLE = 0x0 << BT_COEX_MODE_POS,
93 BT_COEX_2W = 0x1 << BT_COEX_MODE_POS,
94 BT_COEX_3W = 0x2 << BT_COEX_MODE_POS,
95 BT_COEX_NW = 0x3 << BT_COEX_MODE_POS,
96 BT_USE_DEFAULTS = BIT(6),
97 BT_SYNC_2_BT_DISABLE = BIT(7),
98 /*
99 * For future use - when the flags will be enlarged
100 * BT_COEX_CORUNNING_TBL_EN = BIT(8),
101 */
102};
103
104/*
105 * indicates what has changed in the BT_COEX command.
106 */
107enum iwl_bt_coex_valid_bit_msk {
108 BT_VALID_ENABLE = BIT(0),
109 BT_VALID_BT_PRIO_BOOST = BIT(1),
110 BT_VALID_MAX_KILL = BIT(2),
111 BT_VALID_3W_TMRS = BIT(3),
112 BT_VALID_KILL_ACK = BIT(4),
113 BT_VALID_KILL_CTS = BIT(5),
114 BT_VALID_REDUCED_TX_POWER = BIT(6),
115 BT_VALID_LUT = BIT(7),
116 BT_VALID_WIFI_RX_SW_PRIO_BOOST = BIT(8),
117 BT_VALID_WIFI_TX_SW_PRIO_BOOST = BIT(9),
118 BT_VALID_MULTI_PRIO_LUT = BIT(10),
119 BT_VALID_TRM_KICK_FILTER = BIT(11),
120 BT_VALID_CORUN_LUT_20 = BIT(12),
121 BT_VALID_CORUN_LUT_40 = BIT(13),
122 BT_VALID_ANT_ISOLATION = BIT(14),
123 BT_VALID_ANT_ISOLATION_THRS = BIT(15),
124 /*
125 * For future use - when the valid flags will be enlarged
126 * BT_VALID_TXTX_DELTA_FREQ_THRS = BIT(16),
127 * BT_VALID_TXRX_MAX_FREQ_0 = BIT(17),
128 */
129};
130
131/**
132 * enum iwl_bt_reduced_tx_power - allows to reduce txpower for WiFi frames.
133 * @BT_REDUCED_TX_POWER_CTL: reduce Tx power for control frames
134 * @BT_REDUCED_TX_POWER_DATA: reduce Tx power for data frames
135 *
136 * This mechanism allows to have BT and WiFi run concurrently. Since WiFi
137 * reduces its Tx power, it can work along with BT, hence reducing the amount
138 * of WiFi frames being killed by BT.
139 */
140enum iwl_bt_reduced_tx_power {
141 BT_REDUCED_TX_POWER_CTL = BIT(0),
142 BT_REDUCED_TX_POWER_DATA = BIT(1),
143};
144
145#define BT_COEX_LUT_SIZE (12)
146
147/**
148 * struct iwl_bt_coex_cmd - bt coex configuration command
149 * @flags:&enum iwl_bt_coex_flags
150 * @lead_time:
151 * @max_kill:
152 * @bt3_time_t7_value:
153 * @kill_ack_msk:
154 * @kill_cts_msk:
155 * @bt3_prio_sample_time:
156 * @bt3_timer_t2_value:
157 * @bt4_reaction_time:
158 * @decision_lut[12]:
159 * @bt_reduced_tx_power: enum %iwl_bt_reduced_tx_power
160 * @valid_bit_msk: enum %iwl_bt_coex_valid_bit_msk
161 * @bt_prio_boost: values for PTA boost register
162 * @wifi_tx_prio_boost: SW boost of wifi tx priority
163 * @wifi_rx_prio_boost: SW boost of wifi rx priority
164 *
165 * The structure is used for the BT_COEX command.
166 */
167struct iwl_bt_coex_cmd {
168 u8 flags;
169 u8 lead_time;
170 u8 max_kill;
171 u8 bt3_time_t7_value;
172 __le32 kill_ack_msk;
173 __le32 kill_cts_msk;
174 u8 bt3_prio_sample_time;
175 u8 bt3_timer_t2_value;
176 __le16 bt4_reaction_time;
177 __le32 decision_lut[BT_COEX_LUT_SIZE];
178 u8 bt_reduced_tx_power;
179 u8 reserved;
180 __le16 valid_bit_msk;
181 __le32 bt_prio_boost;
182 u8 reserved2;
183 u8 wifi_tx_prio_boost;
184 __le16 wifi_rx_prio_boost;
185} __packed; /* BT_COEX_CMD_API_S_VER_3 */
186
187#define BT_MBOX(n_dw, _msg, _pos, _nbits) \
188 BT_MBOX##n_dw##_##_msg##_POS = (_pos), \
189 BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS
190
191enum iwl_bt_mxbox_dw0 {
192 BT_MBOX(0, LE_SLAVE_LAT, 0, 3),
193 BT_MBOX(0, LE_PROF1, 3, 1),
194 BT_MBOX(0, LE_PROF2, 4, 1),
195 BT_MBOX(0, LE_PROF_OTHER, 5, 1),
196 BT_MBOX(0, CHL_SEQ_N, 8, 4),
197 BT_MBOX(0, INBAND_S, 13, 1),
198 BT_MBOX(0, LE_MIN_RSSI, 16, 4),
199 BT_MBOX(0, LE_SCAN, 20, 1),
200 BT_MBOX(0, LE_ADV, 21, 1),
201 BT_MBOX(0, LE_MAX_TX_POWER, 24, 4),
202 BT_MBOX(0, OPEN_CON_1, 28, 2),
203};
204
205enum iwl_bt_mxbox_dw1 {
206 BT_MBOX(1, BR_MAX_TX_POWER, 0, 4),
207 BT_MBOX(1, IP_SR, 4, 1),
208 BT_MBOX(1, LE_MSTR, 5, 1),
209 BT_MBOX(1, AGGR_TRFC_LD, 8, 6),
210 BT_MBOX(1, MSG_TYPE, 16, 3),
211 BT_MBOX(1, SSN, 19, 2),
212};
213
214enum iwl_bt_mxbox_dw2 {
215 BT_MBOX(2, SNIFF_ACT, 0, 3),
216 BT_MBOX(2, PAG, 3, 1),
217 BT_MBOX(2, INQUIRY, 4, 1),
218 BT_MBOX(2, CONN, 5, 1),
219 BT_MBOX(2, SNIFF_INTERVAL, 8, 5),
220 BT_MBOX(2, DISC, 13, 1),
221 BT_MBOX(2, SCO_TX_ACT, 16, 2),
222 BT_MBOX(2, SCO_RX_ACT, 18, 2),
223 BT_MBOX(2, ESCO_RE_TX, 20, 2),
224 BT_MBOX(2, SCO_DURATION, 24, 6),
225};
226
227enum iwl_bt_mxbox_dw3 {
228 BT_MBOX(3, SCO_STATE, 0, 1),
229 BT_MBOX(3, SNIFF_STATE, 1, 1),
230 BT_MBOX(3, A2DP_STATE, 2, 1),
231 BT_MBOX(3, ACL_STATE, 3, 1),
232 BT_MBOX(3, MSTR_STATE, 4, 1),
233 BT_MBOX(3, OBX_STATE, 5, 1),
234 BT_MBOX(3, OPEN_CON_2, 8, 2),
235 BT_MBOX(3, TRAFFIC_LOAD, 10, 2),
236 BT_MBOX(3, CHL_SEQN_LSB, 12, 1),
237 BT_MBOX(3, INBAND_P, 13, 1),
238 BT_MBOX(3, MSG_TYPE_2, 16, 3),
239 BT_MBOX(3, SSN_2, 19, 2),
240 BT_MBOX(3, UPDATE_REQUEST, 21, 1),
241};
242
243#define BT_MBOX_MSG(_notif, _num, _field) \
244 ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
245 >> BT_MBOX##_num##_##_field##_POS)
246
247/**
248 * struct iwl_bt_coex_profile_notif - notification about BT coex
249 * @mbox_msg: message from BT to WiFi
250 * @:bt_status: 0 - off, 1 - on
251 * @:bt_open_conn: number of BT connections open
252 * @:bt_traffic_load: load of BT traffic
253 * @:bt_agg_traffic_load: aggregated load of BT traffic
254 * @:bt_ci_compliance: 0 - no CI compliance, 1 - CI compliant
255 */
256struct iwl_bt_coex_profile_notif {
257 __le32 mbox_msg[4];
258 u8 bt_status;
259 u8 bt_open_conn;
260 u8 bt_traffic_load;
261 u8 bt_agg_traffic_load;
262 u8 bt_ci_compliance;
263 u8 reserved[3];
264} __packed; /* BT_COEX_PROFILE_NTFY_API_S_VER_2 */
265
266enum iwl_bt_coex_prio_table_event {
267 BT_COEX_PRIO_TBL_EVT_INIT_CALIB1 = 0,
268 BT_COEX_PRIO_TBL_EVT_INIT_CALIB2 = 1,
269 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1 = 2,
270 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2 = 3,
271 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1 = 4,
272 BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2 = 5,
273 BT_COEX_PRIO_TBL_EVT_DTIM = 6,
274 BT_COEX_PRIO_TBL_EVT_SCAN52 = 7,
275 BT_COEX_PRIO_TBL_EVT_SCAN24 = 8,
276 BT_COEX_PRIO_TBL_EVT_IDLE = 9,
277 BT_COEX_PRIO_TBL_EVT_MAX = 16,
278}; /* BT_COEX_PRIO_TABLE_EVENTS_API_E_VER_1 */
279
280enum iwl_bt_coex_prio_table_prio {
281 BT_COEX_PRIO_TBL_DISABLED = 0,
282 BT_COEX_PRIO_TBL_PRIO_LOW = 1,
283 BT_COEX_PRIO_TBL_PRIO_HIGH = 2,
284 BT_COEX_PRIO_TBL_PRIO_BYPASS = 3,
285 BT_COEX_PRIO_TBL_PRIO_COEX_OFF = 4,
286 BT_COEX_PRIO_TBL_PRIO_COEX_ON = 5,
287 BT_COEX_PRIO_TBL_PRIO_COEX_IDLE = 6,
288 BT_COEX_PRIO_TBL_MAX = 8,
289}; /* BT_COEX_PRIO_TABLE_PRIORITIES_API_E_VER_1 */
290
291#define BT_COEX_PRIO_TBL_SHRD_ANT_POS (0)
292#define BT_COEX_PRIO_TBL_PRIO_POS (1)
293#define BT_COEX_PRIO_TBL_RESERVED_POS (4)
294
295/**
296 * struct iwl_bt_coex_prio_tbl_cmd - priority table for BT coex
297 * @prio_tbl:
298 */
299struct iwl_bt_coex_prio_tbl_cmd {
300 u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
301} __packed;
302
303enum iwl_bt_coex_env_action {
304 BT_COEX_ENV_CLOSE = 0,
305 BT_COEX_ENV_OPEN = 1,
306}; /* BT_COEX_PROT_ENV_ACTION_API_E_VER_1 */
307
308/**
309 * struct iwl_bt_coex_prot_env_cmd - BT Protection Envelope
310 * @action: enum %iwl_bt_coex_env_action
311 * @type: enum %iwl_bt_coex_prio_table_event
312 */
313struct iwl_bt_coex_prot_env_cmd {
314 u8 action; /* 0 = closed, 1 = open */
315 u8 type; /* 0 .. 15 */
316 u8 reserved[2];
317} __packed;
318
319#endif /* __fw_api_bt_coex_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index cf6f9a02fb74..51e015d1dfb2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -258,7 +258,7 @@ enum iwl_wowlan_wakeup_reason {
258 IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE = BIT(8), 258 IWL_WOWLAN_WAKEUP_BY_FOUR_WAY_HANDSHAKE = BIT(8),
259 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS = BIT(9), 259 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_LINK_LOSS = BIT(9),
260 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE = BIT(10), 260 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_SIGNATURE_TABLE = BIT(10),
261 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_TCP_EXTERNAL = BIT(11), 261 /* BIT(11) reserved */
262 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12), 262 IWL_WOWLAN_WAKEUP_BY_REM_WAKE_WAKEUP_PACKET = BIT(12),
263}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */ 263}; /* WOWLAN_WAKE_UP_REASON_API_E_VER_2 */
264 264
@@ -277,6 +277,55 @@ struct iwl_wowlan_status {
277 u8 wake_packet[]; /* can be truncated from _length to _bufsize */ 277 u8 wake_packet[]; /* can be truncated from _length to _bufsize */
278} __packed; /* WOWLAN_STATUSES_API_S_VER_4 */ 278} __packed; /* WOWLAN_STATUSES_API_S_VER_4 */
279 279
280#define IWL_WOWLAN_TCP_MAX_PACKET_LEN 64
281#define IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN 128
282#define IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS 2048
283
284struct iwl_tcp_packet_info {
285 __le16 tcp_pseudo_header_checksum;
286 __le16 tcp_payload_length;
287} __packed; /* TCP_PACKET_INFO_API_S_VER_2 */
288
289struct iwl_tcp_packet {
290 struct iwl_tcp_packet_info info;
291 u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
292 u8 data[IWL_WOWLAN_TCP_MAX_PACKET_LEN];
293} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */
294
295struct iwl_remote_wake_packet {
296 struct iwl_tcp_packet_info info;
297 u8 rx_mask[IWL_WOWLAN_MAX_PATTERN_LEN / 8];
298 u8 data[IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN];
299} __packed; /* TCP_PROTOCOL_PACKET_API_S_VER_1 */
300
301struct iwl_wowlan_remote_wake_config {
302 __le32 connection_max_time; /* unused */
303 /* TCP_PROTOCOL_CONFIG_API_S_VER_1 */
304 u8 max_syn_retries;
305 u8 max_data_retries;
306 u8 tcp_syn_ack_timeout;
307 u8 tcp_ack_timeout;
308
309 struct iwl_tcp_packet syn_tx;
310 struct iwl_tcp_packet synack_rx;
311 struct iwl_tcp_packet keepalive_ack_rx;
312 struct iwl_tcp_packet fin_tx;
313
314 struct iwl_remote_wake_packet keepalive_tx;
315 struct iwl_remote_wake_packet wake_rx;
316
317 /* REMOTE_WAKE_OFFSET_INFO_API_S_VER_1 */
318 u8 sequence_number_offset;
319 u8 sequence_number_length;
320 u8 token_offset;
321 u8 token_length;
322 /* REMOTE_WAKE_PROTOCOL_PARAMS_API_S_VER_1 */
323 __le32 initial_sequence_number;
324 __le16 keepalive_interval;
325 __le16 num_tokens;
326 u8 tokens[IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS];
327} __packed; /* REMOTE_WAKE_CONFIG_API_S_VER_2 */
328
280/* TODO: NetDetect API */ 329/* TODO: NetDetect API */
281 330
282#endif /* __fw_api_d3_h__ */ 331#endif /* __fw_api_d3_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
index ae39b7dfda7b..d68640ea41d4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-mac.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index be36b7604b7f..127051891e9b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
index aa3474d08231..fdd33bc0a594 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-rs.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 670ac8f95e26..b60d14151721 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
index 0acb53dda22d..a30691a8a85b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
index 2677914bf0a6..6d53850c5448 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 2adb61f103f4..f8d7e88234e4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -70,6 +70,7 @@
70#include "fw-api-mac.h" 70#include "fw-api-mac.h"
71#include "fw-api-power.h" 71#include "fw-api-power.h"
72#include "fw-api-d3.h" 72#include "fw-api-d3.h"
73#include "fw-api-bt-coex.h"
73 74
74/* queue and FIFO numbers by usage */ 75/* queue and FIFO numbers by usage */
75enum { 76enum {
@@ -152,6 +153,7 @@ enum {
152 153
153 BEACON_TEMPLATE_CMD = 0x91, 154 BEACON_TEMPLATE_CMD = 0x91,
154 TX_ANT_CONFIGURATION_CMD = 0x98, 155 TX_ANT_CONFIGURATION_CMD = 0x98,
156 BT_CONFIG = 0x9b,
155 STATISTICS_NOTIFICATION = 0x9d, 157 STATISTICS_NOTIFICATION = 0x9d,
156 158
157 /* RF-KILL commands and notifications */ 159 /* RF-KILL commands and notifications */
@@ -162,6 +164,11 @@ enum {
162 REPLY_RX_MPDU_CMD = 0xc1, 164 REPLY_RX_MPDU_CMD = 0xc1,
163 BA_NOTIF = 0xc5, 165 BA_NOTIF = 0xc5,
164 166
167 /* BT Coex */
168 BT_COEX_PRIO_TABLE = 0xcc,
169 BT_COEX_PROT_ENV = 0xcd,
170 BT_PROFILE_NOTIFICATION = 0xce,
171
165 REPLY_DEBUG_CMD = 0xf0, 172 REPLY_DEBUG_CMD = 0xf0,
166 DEBUG_LOG_MSG = 0xf7, 173 DEBUG_LOG_MSG = 0xf7,
167 174
@@ -794,6 +801,7 @@ struct iwl_phy_context_cmd {
794 * @byte_count: frame's byte-count 801 * @byte_count: frame's byte-count
795 * @frame_time: frame's time on the air, based on byte count and frame rate 802 * @frame_time: frame's time on the air, based on byte count and frame rate
796 * calculation 803 * calculation
804 * @mac_active_msk: what MACs were active when the frame was received
797 * 805 *
798 * Before each Rx, the device sends this data. It contains PHY information 806 * Before each Rx, the device sends this data. It contains PHY information
799 * about the reception of the packet. 807 * about the reception of the packet.
@@ -811,7 +819,7 @@ struct iwl_rx_phy_info {
811 __le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT]; 819 __le32 non_cfg_phy[IWL_RX_INFO_PHY_CNT];
812 __le32 rate_n_flags; 820 __le32 rate_n_flags;
813 __le32 byte_count; 821 __le32 byte_count;
814 __le16 reserved2; 822 __le16 mac_active_msk;
815 __le16 frame_time; 823 __le16 frame_time;
816} __packed; 824} __packed;
817 825
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index 500f818dba04..1006b3204e7b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -309,6 +309,10 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
309 goto error; 309 goto error;
310 } 310 }
311 311
312 ret = iwl_send_bt_prio_tbl(mvm);
313 if (ret)
314 goto error;
315
312 if (read_nvm) { 316 if (read_nvm) {
313 /* Read nvm */ 317 /* Read nvm */
314 ret = iwl_nvm_init(mvm); 318 ret = iwl_nvm_init(mvm);
@@ -414,6 +418,14 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
414 if (ret) 418 if (ret)
415 goto error; 419 goto error;
416 420
421 ret = iwl_send_bt_prio_tbl(mvm);
422 if (ret)
423 goto error;
424
425 ret = iwl_send_bt_init_conf(mvm);
426 if (ret)
427 goto error;
428
417 /* Send phy db control command and then phy db calibration*/ 429 /* Send phy db control command and then phy db calibration*/
418 ret = iwl_send_phy_db_data(mvm->phy_db); 430 ret = iwl_send_phy_db_data(mvm->phy_db);
419 if (ret) 431 if (ret)
diff --git a/drivers/net/wireless/iwlwifi/mvm/led.c b/drivers/net/wireless/iwlwifi/mvm/led.c
index 011906e73a05..2269a9e5cc67 100644
--- a/drivers/net/wireless/iwlwifi/mvm/led.c
+++ b/drivers/net/wireless/iwlwifi/mvm/led.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 341dbc0237ea..2779235daa35 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -553,9 +553,9 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
553 if (vif->bss_conf.qos) 553 if (vif->bss_conf.qos)
554 cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); 554 cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
555 555
556 /* Don't use cts to self as the fw doesn't support it currently. */
556 if (vif->bss_conf.use_cts_prot) 557 if (vif->bss_conf.use_cts_prot)
557 cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT | 558 cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
558 MAC_PROT_FLG_SELF_CTS_EN);
559 559
560 /* 560 /*
561 * I think that we should enable these 2 flags regardless the HT PROT 561 * I think that we should enable these 2 flags regardless the HT PROT
@@ -651,6 +651,13 @@ static int iwl_mvm_mac_ctxt_cmd_station(struct iwl_mvm *mvm,
651 /* Fill the common data for all mac context types */ 651 /* Fill the common data for all mac context types */
652 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); 652 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
653 653
654 /* Allow beacons to pass through as long as we are not associated,or we
655 * do not have dtim period information */
656 if (!vif->bss_conf.assoc || !vif->bss_conf.dtim_period)
657 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_BEACON);
658 else
659 cmd.filter_flags &= ~cpu_to_le32(MAC_FILTER_IN_BEACON);
660
654 /* Fill the data specific for station mode */ 661 /* Fill the data specific for station mode */
655 iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta); 662 iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta);
656 663
@@ -714,7 +721,9 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_device(struct iwl_mvm *mvm,
714 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); 721 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
715 722
716 cmd.protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT); 723 cmd.protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
717 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROMISC); 724
725 /* Override the filter flags to accept only probe requests */
726 cmd.filter_flags = cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
718 727
719 /* 728 /*
720 * This flag should be set to true when the P2P Device is 729 * This flag should be set to true when the P2P Device is
@@ -846,10 +855,10 @@ int iwl_mvm_mac_ctxt_beacon_changed(struct iwl_mvm *mvm,
846 */ 855 */
847static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm, 856static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
848 struct ieee80211_vif *vif, 857 struct ieee80211_vif *vif,
849 struct iwl_mac_data_ap *ctxt_ap) 858 struct iwl_mac_data_ap *ctxt_ap,
859 bool add)
850{ 860{
851 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 861 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
852 u32 curr_dev_time;
853 862
854 ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int); 863 ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int);
855 ctxt_ap->bi_reciprocal = 864 ctxt_ap->bi_reciprocal =
@@ -861,10 +870,19 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm,
861 vif->bss_conf.dtim_period)); 870 vif->bss_conf.dtim_period));
862 871
863 ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue); 872 ctxt_ap->mcast_qid = cpu_to_le32(vif->cab_queue);
864 curr_dev_time = iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
865 ctxt_ap->beacon_time = cpu_to_le32(curr_dev_time);
866 873
867 ctxt_ap->beacon_tsf = cpu_to_le64(curr_dev_time); 874 /*
875 * Only read the system time when the MAC is being added, when we
876 * just modify the MAC then we should keep the time -- the firmware
877 * can otherwise have a "jumping" TBTT.
878 */
879 if (add)
880 mvmvif->ap_beacon_time =
881 iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG);
882
883 ctxt_ap->beacon_time = cpu_to_le32(mvmvif->ap_beacon_time);
884
885 ctxt_ap->beacon_tsf = 0; /* unused */
868 886
869 /* TODO: Assume that the beacon id == mac context id */ 887 /* TODO: Assume that the beacon id == mac context id */
870 ctxt_ap->beacon_template = cpu_to_le32(mvmvif->id); 888 ctxt_ap->beacon_template = cpu_to_le32(mvmvif->id);
@@ -881,8 +899,12 @@ static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
881 /* Fill the common data for all mac context types */ 899 /* Fill the common data for all mac context types */
882 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); 900 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
883 901
902 /* Also enable probe requests to pass */
903 cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
904
884 /* Fill the data specific for ap mode */ 905 /* Fill the data specific for ap mode */
885 iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap); 906 iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap,
907 action == FW_CTXT_ACTION_ADD);
886 908
887 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); 909 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
888} 910}
@@ -899,7 +921,8 @@ static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm,
899 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); 921 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
900 922
901 /* Fill the data specific for GO mode */ 923 /* Fill the data specific for GO mode */
902 iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap); 924 iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap,
925 action == FW_CTXT_ACTION_ADD);
903 926
904 cmd.go.ctwin = cpu_to_le32(vif->bss_conf.p2p_ctwindow); 927 cmd.go.ctwin = cpu_to_le32(vif->bss_conf.p2p_ctwindow);
905 cmd.go.opp_ps_enabled = cpu_to_le32(!!vif->bss_conf.p2p_oppps); 928 cmd.go.opp_ps_enabled = cpu_to_le32(!!vif->bss_conf.p2p_oppps);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 7e169b085afe..14dd5ee9a01e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -65,7 +65,9 @@
65#include <linux/skbuff.h> 65#include <linux/skbuff.h>
66#include <linux/netdevice.h> 66#include <linux/netdevice.h>
67#include <linux/etherdevice.h> 67#include <linux/etherdevice.h>
68#include <linux/ip.h>
68#include <net/mac80211.h> 69#include <net/mac80211.h>
70#include <net/tcp.h>
69 71
70#include "iwl-op-mode.h" 72#include "iwl-op-mode.h"
71#include "iwl-io.h" 73#include "iwl-io.h"
@@ -102,10 +104,33 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
102 }, 104 },
103}; 105};
104 106
107#ifdef CONFIG_PM_SLEEP
108static const struct nl80211_wowlan_tcp_data_token_feature
109iwl_mvm_wowlan_tcp_token_feature = {
110 .min_len = 0,
111 .max_len = 255,
112 .bufsize = IWL_WOWLAN_REMOTE_WAKE_MAX_TOKENS,
113};
114
115static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
116 .tok = &iwl_mvm_wowlan_tcp_token_feature,
117 .data_payload_max = IWL_WOWLAN_TCP_MAX_PACKET_LEN -
118 sizeof(struct ethhdr) -
119 sizeof(struct iphdr) -
120 sizeof(struct tcphdr),
121 .data_interval_max = 65535, /* __le16 in API */
122 .wake_payload_max = IWL_WOWLAN_REMOTE_WAKE_MAX_PACKET_LEN -
123 sizeof(struct ethhdr) -
124 sizeof(struct iphdr) -
125 sizeof(struct tcphdr),
126 .seq = true,
127};
128#endif
129
105int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) 130int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
106{ 131{
107 struct ieee80211_hw *hw = mvm->hw; 132 struct ieee80211_hw *hw = mvm->hw;
108 int num_mac, ret; 133 int num_mac, ret, i;
109 134
110 /* Tell mac80211 our characteristics */ 135 /* Tell mac80211 our characteristics */
111 hw->flags = IEEE80211_HW_SIGNAL_DBM | 136 hw->flags = IEEE80211_HW_SIGNAL_DBM |
@@ -156,11 +181,15 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
156 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN); 181 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
157 hw->wiphy->addresses = mvm->addresses; 182 hw->wiphy->addresses = mvm->addresses;
158 hw->wiphy->n_addresses = 1; 183 hw->wiphy->n_addresses = 1;
159 num_mac = mvm->nvm_data->n_hw_addrs; 184
160 if (num_mac > 1) { 185 /* Extract additional MAC addresses if available */
161 memcpy(mvm->addresses[1].addr, mvm->addresses[0].addr, 186 num_mac = (mvm->nvm_data->n_hw_addrs > 1) ?
187 min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1;
188
189 for (i = 1; i < num_mac; i++) {
190 memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr,
162 ETH_ALEN); 191 ETH_ALEN);
163 mvm->addresses[1].addr[5]++; 192 mvm->addresses[i].addr[5]++;
164 hw->wiphy->n_addresses++; 193 hw->wiphy->n_addresses++;
165 } 194 }
166 195
@@ -206,6 +235,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
206 hw->wiphy->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; 235 hw->wiphy->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS;
207 hw->wiphy->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; 236 hw->wiphy->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN;
208 hw->wiphy->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; 237 hw->wiphy->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN;
238 hw->wiphy->wowlan.tcp = &iwl_mvm_wowlan_tcp_support;
209 } 239 }
210#endif 240#endif
211 241
@@ -273,12 +303,18 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
273 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false); 303 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
274 break; 304 break;
275 case IEEE80211_AMPDU_TX_START: 305 case IEEE80211_AMPDU_TX_START:
306 if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) {
307 ret = -EINVAL;
308 break;
309 }
276 ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn); 310 ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn);
277 break; 311 break;
278 case IEEE80211_AMPDU_TX_STOP_CONT: 312 case IEEE80211_AMPDU_TX_STOP_CONT:
313 ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid);
314 break;
279 case IEEE80211_AMPDU_TX_STOP_FLUSH: 315 case IEEE80211_AMPDU_TX_STOP_FLUSH:
280 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 316 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
281 ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid); 317 ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid);
282 break; 318 break;
283 case IEEE80211_AMPDU_TX_OPERATIONAL: 319 case IEEE80211_AMPDU_TX_OPERATIONAL:
284 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size); 320 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, buf_size);
@@ -1090,7 +1126,8 @@ static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
1090static int iwl_mvm_roc(struct ieee80211_hw *hw, 1126static int iwl_mvm_roc(struct ieee80211_hw *hw,
1091 struct ieee80211_vif *vif, 1127 struct ieee80211_vif *vif,
1092 struct ieee80211_channel *channel, 1128 struct ieee80211_channel *channel,
1093 int duration) 1129 int duration,
1130 enum ieee80211_roc_type type)
1094{ 1131{
1095 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1132 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
1096 struct cfg80211_chan_def chandef; 1133 struct cfg80211_chan_def chandef;
@@ -1101,8 +1138,8 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
1101 return -EINVAL; 1138 return -EINVAL;
1102 } 1139 }
1103 1140
1104 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d)\n", channel->hw_value, 1141 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
1105 duration); 1142 duration, type);
1106 1143
1107 mutex_lock(&mvm->mutex); 1144 mutex_lock(&mvm->mutex);
1108 1145
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index bdae700c769e..203eb85e03d3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -79,7 +79,7 @@
79#include "fw-api.h" 79#include "fw-api.h"
80 80
81#define IWL_INVALID_MAC80211_QUEUE 0xff 81#define IWL_INVALID_MAC80211_QUEUE 0xff
82#define IWL_MVM_MAX_ADDRESSES 2 82#define IWL_MVM_MAX_ADDRESSES 5
83/* RSSI offset for WkP */ 83/* RSSI offset for WkP */
84#define IWL_RSSI_OFFSET 50 84#define IWL_RSSI_OFFSET 50
85 85
@@ -174,6 +174,8 @@ struct iwl_mvm_vif {
174 bool uploaded; 174 bool uploaded;
175 bool ap_active; 175 bool ap_active;
176 176
177 u32 ap_beacon_time;
178
177 enum iwl_tsf_id tsf_id; 179 enum iwl_tsf_id tsf_id;
178 180
179 /* 181 /*
@@ -332,6 +334,10 @@ struct iwl_mvm {
332#ifdef CONFIG_PM_SLEEP 334#ifdef CONFIG_PM_SLEEP
333 int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen; 335 int gtk_ivlen, gtk_icvlen, ptk_ivlen, ptk_icvlen;
334#endif 336#endif
337
338 /* BT-Coex */
339 u8 bt_kill_msk;
340 struct iwl_bt_coex_profile_notif last_bt_notif;
335}; 341};
336 342
337/* Extract MVM priv from op_mode and _hw */ 343/* Extract MVM priv from op_mode and _hw */
@@ -502,4 +508,11 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
502void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw, 508void iwl_mvm_set_default_unicast_key(struct ieee80211_hw *hw,
503 struct ieee80211_vif *vif, int idx); 509 struct ieee80211_vif *vif, int idx);
504 510
511/* BT Coex */
512int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
513int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
514int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
515 struct iwl_rx_cmd_buffer *rxb,
516 struct iwl_device_cmd *cmd);
517
505#endif /* __IWL_MVM_H__ */ 518#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index 20016bcbdeab..93e3d0f174cc 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -74,6 +74,9 @@ static const int nvm_to_read[] = {
74 NVM_SECTION_TYPE_PRODUCTION, 74 NVM_SECTION_TYPE_PRODUCTION,
75}; 75};
76 76
77/* Default NVM size to read */
78#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024);
79
77/* used to simplify the shared operations on NCM_ACCESS_CMD versions */ 80/* used to simplify the shared operations on NCM_ACCESS_CMD versions */
78union iwl_nvm_access_cmd { 81union iwl_nvm_access_cmd {
79 struct iwl_nvm_access_cmd_ver1 ver1; 82 struct iwl_nvm_access_cmd_ver1 ver1;
@@ -193,9 +196,9 @@ static int iwl_nvm_read_section(struct iwl_mvm *mvm, u16 section,
193 int ret; 196 int ret;
194 bool old_eeprom = mvm->cfg->device_family != IWL_DEVICE_FAMILY_7000; 197 bool old_eeprom = mvm->cfg->device_family != IWL_DEVICE_FAMILY_7000;
195 198
196 length = (iwlwifi_mod_params.amsdu_size_8K ? (8 * 1024) : (4 * 1024)) 199 /* Set nvm section read length */
197 - sizeof(union iwl_nvm_access_cmd) 200 length = IWL_NVM_DEFAULT_CHUNK_SIZE;
198 - sizeof(struct iwl_rx_packet); 201
199 /* 202 /*
200 * if length is greater than EEPROM size, truncate it because uCode 203 * if length is greater than EEPROM size, truncate it because uCode
201 * doesn't check it by itself, and exit the loop when reached. 204 * doesn't check it by itself, and exit the loop when reached.
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index d0f9c1e0475e..828bdddd07e9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -230,6 +230,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
230 RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false), 230 RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
231 RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false), 231 RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false),
232 232
233 RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true),
234
233 RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false), 235 RX_HANDLER(RADIO_VERSION_NOTIFICATION, iwl_mvm_rx_radio_ver, false),
234 RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false), 236 RX_HANDLER(CARD_STATE_NOTIFICATION, iwl_mvm_rx_card_state_notif, false),
235 237
@@ -293,6 +295,11 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
293 CMD(NET_DETECT_PROFILES_CMD), 295 CMD(NET_DETECT_PROFILES_CMD),
294 CMD(NET_DETECT_HOTSPOTS_CMD), 296 CMD(NET_DETECT_HOTSPOTS_CMD),
295 CMD(NET_DETECT_HOTSPOTS_QUERY_CMD), 297 CMD(NET_DETECT_HOTSPOTS_QUERY_CMD),
298 CMD(CARD_STATE_NOTIFICATION),
299 CMD(BT_COEX_PRIO_TABLE),
300 CMD(BT_COEX_PROT_ENV),
301 CMD(BT_PROFILE_NOTIFICATION),
302 CMD(BT_CONFIG),
296}; 303};
297#undef CMD 304#undef CMD
298 305
@@ -363,8 +370,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
363 trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); 370 trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
364 trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K; 371 trans_cfg.rx_buf_size_8k = iwlwifi_mod_params.amsdu_size_8K;
365 372
366 /* TODO: this should really be a TLV */ 373 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DW_BC_TABLE)
367 if (cfg->device_family == IWL_DEVICE_FAMILY_7000)
368 trans_cfg.bc_table_dword = true; 374 trans_cfg.bc_table_dword = true;
369 375
370 if (!iwlwifi_mod_params.wd_disable) 376 if (!iwlwifi_mod_params.wd_disable)
diff --git a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
index b428448f8ddf..0d537e035ef0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/phy-ctxt.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index 5a92a4978795..efb9a6f3faac 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index 925628468146..df85c49dc599 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 56b636d9ab30..a01a6612677e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -680,12 +680,14 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
680 */ 680 */
681static bool rs_use_green(struct ieee80211_sta *sta) 681static bool rs_use_green(struct ieee80211_sta *sta)
682{ 682{
683 struct iwl_mvm_sta *sta_priv = (void *)sta->drv_priv; 683 /*
684 684 * There's a bug somewhere in this code that causes the
685 bool use_green = !(sta_priv->vif->bss_conf.ht_operation_mode & 685 * scaling to get stuck because GF+SGI can't be combined
686 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); 686 * in SISO rates. Until we find that bug, disable GF, it
687 687 * has only limited benefit and we still interoperate with
688 return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && use_green; 688 * GF APs since we can always receive GF transmissions.
689 */
690 return false;
689} 691}
690 692
691/** 693/**
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index b0b190d0ec23..4dfc21a3e83e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 9b21b92aa8d1..0d3c76b29242 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 274f44e2ef60..4d872d69577f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -101,8 +101,55 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
101 } 101 }
102 add_sta_cmd.add_modify = update ? 1 : 0; 102 add_sta_cmd.add_modify = update ? 1 : 0;
103 103
104 /* STA_FLG_FAT_EN_MSK ? */ 104 add_sta_cmd.station_flags_msk |= cpu_to_le32(STA_FLG_FAT_EN_MSK |
105 /* STA_FLG_MIMO_EN_MSK ? */ 105 STA_FLG_MIMO_EN_MSK);
106
107 switch (sta->bandwidth) {
108 case IEEE80211_STA_RX_BW_160:
109 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
110 /* fall through */
111 case IEEE80211_STA_RX_BW_80:
112 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
113 /* fall through */
114 case IEEE80211_STA_RX_BW_40:
115 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
116 /* fall through */
117 case IEEE80211_STA_RX_BW_20:
118 if (sta->ht_cap.ht_supported)
119 add_sta_cmd.station_flags |=
120 cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
121 break;
122 }
123
124 switch (sta->rx_nss) {
125 case 1:
126 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
127 break;
128 case 2:
129 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
130 break;
131 case 3 ... 8:
132 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
133 break;
134 }
135
136 switch (sta->smps_mode) {
137 case IEEE80211_SMPS_AUTOMATIC:
138 case IEEE80211_SMPS_NUM_MODES:
139 WARN_ON(1);
140 break;
141 case IEEE80211_SMPS_STATIC:
142 /* override NSS */
143 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
144 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
145 break;
146 case IEEE80211_SMPS_DYNAMIC:
147 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
148 break;
149 case IEEE80211_SMPS_OFF:
150 /* nothing */
151 break;
152 }
106 153
107 if (sta->ht_cap.ht_supported) { 154 if (sta->ht_cap.ht_supported) {
108 add_sta_cmd.station_flags_msk |= 155 add_sta_cmd.station_flags_msk |=
@@ -340,6 +387,9 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
340 387
341 if (vif->type == NL80211_IFTYPE_STATION && 388 if (vif->type == NL80211_IFTYPE_STATION &&
342 mvmvif->ap_sta_id == mvm_sta->sta_id) { 389 mvmvif->ap_sta_id == mvm_sta->sta_id) {
390 /* flush its queues here since we are freeing mvm_sta */
391 ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
392
343 /* 393 /*
344 * Put a non-NULL since the fw station isn't removed. 394 * Put a non-NULL since the fw station isn't removed.
345 * It will be removed after the MAC will be set as 395 * It will be removed after the MAC will be set as
@@ -348,9 +398,6 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
348 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], 398 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
349 ERR_PTR(-EINVAL)); 399 ERR_PTR(-EINVAL));
350 400
351 /* flush its queues here since we are freeing mvm_sta */
352 ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, true);
353
354 /* if we are associated - we can't remove the AP STA now */ 401 /* if we are associated - we can't remove the AP STA now */
355 if (vif->bss_conf.assoc) 402 if (vif->bss_conf.assoc)
356 return ret; 403 return ret;
@@ -686,7 +733,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
686 733
687 spin_lock_bh(&mvmsta->lock); 734 spin_lock_bh(&mvmsta->lock);
688 tid_data = &mvmsta->tid_data[tid]; 735 tid_data = &mvmsta->tid_data[tid];
689 tid_data->ssn = SEQ_TO_SN(tid_data->seq_number); 736 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
690 tid_data->txq_id = txq_id; 737 tid_data->txq_id = txq_id;
691 *ssn = tid_data->ssn; 738 *ssn = tid_data->ssn;
692 739
@@ -789,7 +836,7 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
789 836
790 switch (tid_data->state) { 837 switch (tid_data->state) {
791 case IWL_AGG_ON: 838 case IWL_AGG_ON:
792 tid_data->ssn = SEQ_TO_SN(tid_data->seq_number); 839 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
793 840
794 IWL_DEBUG_TX_QUEUES(mvm, 841 IWL_DEBUG_TX_QUEUES(mvm,
795 "ssn = %d, next_recl = %d\n", 842 "ssn = %d, next_recl = %d\n",
@@ -834,6 +881,34 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
834 return err; 881 return err;
835} 882}
836 883
884int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
885 struct ieee80211_sta *sta, u16 tid)
886{
887 struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
888 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
889 u16 txq_id;
890
891 /*
892 * First set the agg state to OFF to avoid calling
893 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
894 */
895 spin_lock_bh(&mvmsta->lock);
896 txq_id = tid_data->txq_id;
897 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
898 mvmsta->sta_id, tid, txq_id, tid_data->state);
899 tid_data->state = IWL_AGG_OFF;
900 spin_unlock_bh(&mvmsta->lock);
901
902 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
903 IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
904
905 iwl_trans_txq_disable(mvm->trans, tid_data->txq_id);
906 mvm->queue_to_mac80211[tid_data->txq_id] =
907 IWL_INVALID_MAC80211_QUEUE;
908
909 return 0;
910}
911
837static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm) 912static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
838{ 913{
839 int i; 914 int i;
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 896f88ac8145..b0352df981e4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -348,6 +348,8 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
348 struct ieee80211_sta *sta, u16 tid, u8 buf_size); 348 struct ieee80211_sta *sta, u16 tid, u8 buf_size);
349int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 349int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
350 struct ieee80211_sta *sta, u16 tid); 350 struct ieee80211_sta *sta, u16 tid);
351int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
352 struct ieee80211_sta *sta, u16 tid);
351 353
352int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm); 354int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
353int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta, 355int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index e437e02c7149..c2c7f5176027 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.h b/drivers/net/wireless/iwlwifi/mvm/time-event.h
index 64fb57a5ab43..b36424eda361 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.h
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.h
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 6645efe5c03e..0556d5e16f4e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -637,7 +637,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
637 next_reclaimed = ssn; 637 next_reclaimed = ssn;
638 } else { 638 } else {
639 /* The next packet to be reclaimed is the one after this one */ 639 /* The next packet to be reclaimed is the one after this one */
640 next_reclaimed = SEQ_TO_SN(seq_ctl + 0x10); 640 next_reclaimed = IEEE80211_SEQ_TO_SN(seq_ctl + 0x10);
641 } 641 }
642 642
643 IWL_DEBUG_TX_REPLY(mvm, 643 IWL_DEBUG_TX_REPLY(mvm,
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 000e842c2edd..e308ad93aa9e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
diff --git a/drivers/net/wireless/iwlwifi/pcie/cfg.h b/drivers/net/wireless/iwlwifi/pcie/cfg.h
deleted file mode 100644
index c6f8e83c3551..000000000000
--- a/drivers/net/wireless/iwlwifi/pcie/cfg.h
+++ /dev/null
@@ -1,115 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63#ifndef __iwl_pci_h__
64#define __iwl_pci_h__
65
66
67/*
68 * This file declares the config structures for all devices.
69 */
70
71extern const struct iwl_cfg iwl5300_agn_cfg;
72extern const struct iwl_cfg iwl5100_agn_cfg;
73extern const struct iwl_cfg iwl5350_agn_cfg;
74extern const struct iwl_cfg iwl5100_bgn_cfg;
75extern const struct iwl_cfg iwl5100_abg_cfg;
76extern const struct iwl_cfg iwl5150_agn_cfg;
77extern const struct iwl_cfg iwl5150_abg_cfg;
78extern const struct iwl_cfg iwl6005_2agn_cfg;
79extern const struct iwl_cfg iwl6005_2abg_cfg;
80extern const struct iwl_cfg iwl6005_2bg_cfg;
81extern const struct iwl_cfg iwl6005_2agn_sff_cfg;
82extern const struct iwl_cfg iwl6005_2agn_d_cfg;
83extern const struct iwl_cfg iwl6005_2agn_mow1_cfg;
84extern const struct iwl_cfg iwl6005_2agn_mow2_cfg;
85extern const struct iwl_cfg iwl1030_bgn_cfg;
86extern const struct iwl_cfg iwl1030_bg_cfg;
87extern const struct iwl_cfg iwl6030_2agn_cfg;
88extern const struct iwl_cfg iwl6030_2abg_cfg;
89extern const struct iwl_cfg iwl6030_2bgn_cfg;
90extern const struct iwl_cfg iwl6030_2bg_cfg;
91extern const struct iwl_cfg iwl6000i_2agn_cfg;
92extern const struct iwl_cfg iwl6000i_2abg_cfg;
93extern const struct iwl_cfg iwl6000i_2bg_cfg;
94extern const struct iwl_cfg iwl6000_3agn_cfg;
95extern const struct iwl_cfg iwl6050_2agn_cfg;
96extern const struct iwl_cfg iwl6050_2abg_cfg;
97extern const struct iwl_cfg iwl6150_bgn_cfg;
98extern const struct iwl_cfg iwl6150_bg_cfg;
99extern const struct iwl_cfg iwl1000_bgn_cfg;
100extern const struct iwl_cfg iwl1000_bg_cfg;
101extern const struct iwl_cfg iwl100_bgn_cfg;
102extern const struct iwl_cfg iwl100_bg_cfg;
103extern const struct iwl_cfg iwl130_bgn_cfg;
104extern const struct iwl_cfg iwl130_bg_cfg;
105extern const struct iwl_cfg iwl2000_2bgn_cfg;
106extern const struct iwl_cfg iwl2000_2bgn_d_cfg;
107extern const struct iwl_cfg iwl2030_2bgn_cfg;
108extern const struct iwl_cfg iwl6035_2agn_cfg;
109extern const struct iwl_cfg iwl105_bgn_cfg;
110extern const struct iwl_cfg iwl105_bgn_d_cfg;
111extern const struct iwl_cfg iwl135_bgn_cfg;
112extern const struct iwl_cfg iwl7260_2ac_cfg;
113extern const struct iwl_cfg iwl3160_ac_cfg;
114
115#endif /* __iwl_pci_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 7bc0fb9128dd..46ca91f77c9c 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -69,8 +69,6 @@
69 69
70#include "iwl-trans.h" 70#include "iwl-trans.h"
71#include "iwl-drv.h" 71#include "iwl-drv.h"
72
73#include "cfg.h"
74#include "internal.h" 72#include "internal.h"
75 73
76#define IWL_PCI_DEVICE(dev, subdev, cfg) \ 74#define IWL_PCI_DEVICE(dev, subdev, cfg) \
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 12c4f31ca8fb..50ba0a468f94 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -22,7 +22,7 @@
22 * USA 22 * USA
23 * 23 *
24 * The full GNU General Public License is included in this distribution 24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL. 25 * in the file called COPYING.
26 * 26 *
27 * Contact Information: 27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com> 28 * Intel Linux Wireless <ilw@linux.intel.com>
@@ -728,7 +728,8 @@ static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
728 728
729static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) 729static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
730{ 730{
731 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR, reg | (3 << 24)); 731 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
732 ((reg & 0x000FFFFF) | (3 << 24)));
732 return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT); 733 return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
733} 734}
734 735
@@ -736,7 +737,7 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
736 u32 val) 737 u32 val)
737{ 738{
738 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR, 739 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
739 ((addr & 0x0000FFFF) | (3 << 24))); 740 ((addr & 0x000FFFFF) | (3 << 24)));
740 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); 741 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
741} 742}
742 743
@@ -1383,28 +1384,11 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
1383 return ret; 1384 return ret;
1384} 1385}
1385 1386
1386static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
1387 const char __user *user_buf,
1388 size_t count, loff_t *ppos)
1389{
1390 struct iwl_trans *trans = file->private_data;
1391
1392 if (!trans->op_mode)
1393 return -EAGAIN;
1394
1395 local_bh_disable();
1396 iwl_op_mode_nic_error(trans->op_mode);
1397 local_bh_enable();
1398
1399 return count;
1400}
1401
1402DEBUGFS_READ_WRITE_FILE_OPS(interrupt); 1387DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
1403DEBUGFS_READ_FILE_OPS(fh_reg); 1388DEBUGFS_READ_FILE_OPS(fh_reg);
1404DEBUGFS_READ_FILE_OPS(rx_queue); 1389DEBUGFS_READ_FILE_OPS(rx_queue);
1405DEBUGFS_READ_FILE_OPS(tx_queue); 1390DEBUGFS_READ_FILE_OPS(tx_queue);
1406DEBUGFS_WRITE_FILE_OPS(csr); 1391DEBUGFS_WRITE_FILE_OPS(csr);
1407DEBUGFS_WRITE_FILE_OPS(fw_restart);
1408 1392
1409/* 1393/*
1410 * Create the debugfs files and directories 1394 * Create the debugfs files and directories
@@ -1418,7 +1402,6 @@ static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1418 DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR); 1402 DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
1419 DEBUGFS_ADD_FILE(csr, dir, S_IWUSR); 1403 DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
1420 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR); 1404 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
1421 DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
1422 return 0; 1405 return 0;
1423 1406
1424err: 1407err:
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index cb5c6792e3a8..282a5cafa913 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -501,10 +501,8 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
501 * shared with device */ 501 * shared with device */
502 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, 502 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
503 &txq->q.dma_addr, GFP_KERNEL); 503 &txq->q.dma_addr, GFP_KERNEL);
504 if (!txq->tfds) { 504 if (!txq->tfds)
505 IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
506 goto error; 505 goto error;
507 }
508 506
509 BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs)); 507 BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
510 BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) != 508 BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=
@@ -1609,7 +1607,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1609 * Check here that the packets are in the right place on the ring. 1607 * Check here that the packets are in the right place on the ring.
1610 */ 1608 */
1611#ifdef CONFIG_IWLWIFI_DEBUG 1609#ifdef CONFIG_IWLWIFI_DEBUG
1612 wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 1610 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1613 WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) && 1611 WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
1614 ((wifi_seq & 0xff) != q->write_ptr), 1612 ((wifi_seq & 0xff) != q->write_ptr),
1615 "Q: %d WiFi Seq %d tfdNum %d", 1613 "Q: %d WiFi Seq %d tfdNum %d",
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index cffdf4fbf161..7490c4fc7177 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1535,7 +1535,8 @@ static void hw_roc_done(struct work_struct *work)
1535static int mac80211_hwsim_roc(struct ieee80211_hw *hw, 1535static int mac80211_hwsim_roc(struct ieee80211_hw *hw,
1536 struct ieee80211_vif *vif, 1536 struct ieee80211_vif *vif,
1537 struct ieee80211_channel *chan, 1537 struct ieee80211_channel *chan,
1538 int duration) 1538 int duration,
1539 enum ieee80211_roc_type type)
1539{ 1540{
1540 struct mac80211_hwsim_data *hwsim = hw->priv; 1541 struct mac80211_hwsim_data *hwsim = hw->priv;
1541 1542
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile
index 97b245cbafd8..ecf28464367f 100644
--- a/drivers/net/wireless/mwifiex/Makefile
+++ b/drivers/net/wireless/mwifiex/Makefile
@@ -39,6 +39,7 @@ mwifiex-y += sta_tx.o
39mwifiex-y += sta_rx.o 39mwifiex-y += sta_rx.o
40mwifiex-y += uap_txrx.o 40mwifiex-y += uap_txrx.o
41mwifiex-y += cfg80211.o 41mwifiex-y += cfg80211.o
42mwifiex-y += ethtool.o
42mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o 43mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o
43obj-$(CONFIG_MWIFIEX) += mwifiex.o 44obj-$(CONFIG_MWIFIEX) += mwifiex.o
44 45
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 8aaf56ade4d9..78c2bb8d3726 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1933,66 +1933,10 @@ static void mwifiex_setup_vht_caps(struct ieee80211_sta_vht_cap *vht_info,
1933 struct mwifiex_private *priv) 1933 struct mwifiex_private *priv)
1934{ 1934{
1935 struct mwifiex_adapter *adapter = priv->adapter; 1935 struct mwifiex_adapter *adapter = priv->adapter;
1936 u32 vht_cap = 0, cap = adapter->hw_dot_11ac_dev_cap;
1937 1936
1938 vht_info->vht_supported = true; 1937 vht_info->vht_supported = true;
1939 1938
1940 switch (GET_VHTCAP_MAXMPDULEN(cap)) { 1939 vht_info->cap = adapter->hw_dot_11ac_dev_cap;
1941 case 0x00:
1942 vht_cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
1943 break;
1944 case 0x01:
1945 vht_cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
1946 break;
1947 case 0x10:
1948 vht_cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
1949 break;
1950 default:
1951 dev_err(adapter->dev, "unsupported MAX MPDU len\n");
1952 break;
1953 }
1954
1955 if (ISSUPP_11ACVHTHTCVHT(cap))
1956 vht_cap |= IEEE80211_VHT_CAP_HTC_VHT;
1957
1958 if (ISSUPP_11ACVHTTXOPPS(cap))
1959 vht_cap |= IEEE80211_VHT_CAP_VHT_TXOP_PS;
1960
1961 if (ISSUPP_11ACMURXBEAMFORMEE(cap))
1962 vht_cap |= IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE;
1963
1964 if (ISSUPP_11ACMUTXBEAMFORMEE(cap))
1965 vht_cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
1966
1967 if (ISSUPP_11ACSUBEAMFORMER(cap))
1968 vht_cap |= IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
1969
1970 if (ISSUPP_11ACSUBEAMFORMEE(cap))
1971 vht_cap |= IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
1972
1973 if (ISSUPP_11ACRXSTBC(cap))
1974 vht_cap |= IEEE80211_VHT_CAP_RXSTBC_1;
1975
1976 if (ISSUPP_11ACTXSTBC(cap))
1977 vht_cap |= IEEE80211_VHT_CAP_TXSTBC;
1978
1979 if (ISSUPP_11ACSGI160(cap))
1980 vht_cap |= IEEE80211_VHT_CAP_SHORT_GI_160;
1981
1982 if (ISSUPP_11ACSGI80(cap))
1983 vht_cap |= IEEE80211_VHT_CAP_SHORT_GI_80;
1984
1985 if (ISSUPP_11ACLDPC(cap))
1986 vht_cap |= IEEE80211_VHT_CAP_RXLDPC;
1987
1988 if (ISSUPP_11ACBW8080(cap))
1989 vht_cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
1990
1991 if (ISSUPP_11ACBW160(cap))
1992 vht_cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
1993
1994 vht_info->cap = vht_cap;
1995
1996 /* Update MCS support for VHT */ 1940 /* Update MCS support for VHT */
1997 vht_info->vht_mcs.rx_mcs_map = cpu_to_le16( 1941 vht_info->vht_mcs.rx_mcs_map = cpu_to_le16(
1998 adapter->hw_dot_11ac_mcs_support & 0xFFFF); 1942 adapter->hw_dot_11ac_mcs_support & 0xFFFF);
@@ -2236,6 +2180,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
2236 dev->flags |= IFF_BROADCAST | IFF_MULTICAST; 2180 dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
2237 dev->watchdog_timeo = MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT; 2181 dev->watchdog_timeo = MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT;
2238 dev->hard_header_len += MWIFIEX_MIN_DATA_HEADER_LEN; 2182 dev->hard_header_len += MWIFIEX_MIN_DATA_HEADER_LEN;
2183 dev->ethtool_ops = &mwifiex_ethtool_ops;
2239 2184
2240 mdev_priv = netdev_priv(dev); 2185 mdev_priv = netdev_priv(dev);
2241 *((unsigned long *) mdev_priv) = (unsigned long) priv; 2186 *((unsigned long *) mdev_priv) = (unsigned long) priv;
@@ -2294,6 +2239,152 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
2294} 2239}
2295EXPORT_SYMBOL_GPL(mwifiex_del_virtual_intf); 2240EXPORT_SYMBOL_GPL(mwifiex_del_virtual_intf);
2296 2241
2242#ifdef CONFIG_PM
2243static bool
2244mwifiex_is_pattern_supported(struct cfg80211_wowlan_trig_pkt_pattern *pat,
2245 s8 *byte_seq)
2246{
2247 int j, k, valid_byte_cnt = 0;
2248 bool dont_care_byte = false;
2249
2250 for (j = 0; j < DIV_ROUND_UP(pat->pattern_len, 8); j++) {
2251 for (k = 0; k < 8; k++) {
2252 if (pat->mask[j] & 1 << k) {
2253 memcpy(byte_seq + valid_byte_cnt,
2254 &pat->pattern[j * 8 + k], 1);
2255 valid_byte_cnt++;
2256 if (dont_care_byte)
2257 return false;
2258 } else {
2259 if (valid_byte_cnt)
2260 dont_care_byte = true;
2261 }
2262
2263 if (valid_byte_cnt > MAX_BYTESEQ)
2264 return false;
2265 }
2266 }
2267
2268 byte_seq[MAX_BYTESEQ] = valid_byte_cnt;
2269
2270 return true;
2271}
2272
2273static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
2274 struct cfg80211_wowlan *wowlan)
2275{
2276 struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
2277 struct mwifiex_ds_mef_cfg mef_cfg;
2278 struct mwifiex_mef_entry *mef_entry;
2279 int i, filt_num = 0, ret;
2280 bool first_pat = true;
2281 u8 byte_seq[MAX_BYTESEQ + 1];
2282 const u8 ipv4_mc_mac[] = {0x33, 0x33};
2283 const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e};
2284 struct mwifiex_private *priv =
2285 mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
2286
2287 if (!wowlan) {
2288 dev_warn(adapter->dev, "None of the WOWLAN triggers enabled\n");
2289 return 0;
2290 }
2291
2292 if (!priv->media_connected) {
2293 dev_warn(adapter->dev,
2294 "Can not configure WOWLAN in disconnected state\n");
2295 return 0;
2296 }
2297
2298 mef_entry = kzalloc(sizeof(*mef_entry), GFP_KERNEL);
2299 if (!mef_entry)
2300 return -ENOMEM;
2301
2302 memset(&mef_cfg, 0, sizeof(mef_cfg));
2303 mef_cfg.num_entries = 1;
2304 mef_cfg.mef_entry = mef_entry;
2305 mef_entry->mode = MEF_MODE_HOST_SLEEP;
2306 mef_entry->action = MEF_ACTION_ALLOW_AND_WAKEUP_HOST;
2307
2308 for (i = 0; i < wowlan->n_patterns; i++) {
2309 memset(byte_seq, 0, sizeof(byte_seq));
2310 if (!mwifiex_is_pattern_supported(&wowlan->patterns[i],
2311 byte_seq)) {
2312 wiphy_err(wiphy, "Pattern not supported\n");
2313 kfree(mef_entry);
2314 return -EOPNOTSUPP;
2315 }
2316
2317 if (!wowlan->patterns[i].pkt_offset) {
2318 if (!(byte_seq[0] & 0x01) &&
2319 (byte_seq[MAX_BYTESEQ] == 1)) {
2320 mef_cfg.criteria |= MWIFIEX_CRITERIA_UNICAST;
2321 continue;
2322 } else if (is_broadcast_ether_addr(byte_seq)) {
2323 mef_cfg.criteria |= MWIFIEX_CRITERIA_BROADCAST;
2324 continue;
2325 } else if ((!memcmp(byte_seq, ipv4_mc_mac, 2) &&
2326 (byte_seq[MAX_BYTESEQ] == 2)) ||
2327 (!memcmp(byte_seq, ipv6_mc_mac, 3) &&
2328 (byte_seq[MAX_BYTESEQ] == 3))) {
2329 mef_cfg.criteria |= MWIFIEX_CRITERIA_MULTICAST;
2330 continue;
2331 }
2332 }
2333
2334 mef_entry->filter[filt_num].repeat = 1;
2335 mef_entry->filter[filt_num].offset =
2336 wowlan->patterns[i].pkt_offset;
2337 memcpy(mef_entry->filter[filt_num].byte_seq, byte_seq,
2338 sizeof(byte_seq));
2339 mef_entry->filter[filt_num].filt_type = TYPE_EQ;
2340
2341 if (first_pat)
2342 first_pat = false;
2343 else
2344 mef_entry->filter[filt_num].filt_action = TYPE_AND;
2345
2346 filt_num++;
2347 }
2348
2349 if (wowlan->magic_pkt) {
2350 mef_cfg.criteria |= MWIFIEX_CRITERIA_UNICAST;
2351 mef_entry->filter[filt_num].repeat = 16;
2352 memcpy(mef_entry->filter[filt_num].byte_seq, priv->curr_addr,
2353 ETH_ALEN);
2354 mef_entry->filter[filt_num].byte_seq[MAX_BYTESEQ] = ETH_ALEN;
2355 mef_entry->filter[filt_num].offset = 14;
2356 mef_entry->filter[filt_num].filt_type = TYPE_EQ;
2357 if (filt_num)
2358 mef_entry->filter[filt_num].filt_action = TYPE_OR;
2359 }
2360
2361 if (!mef_cfg.criteria)
2362 mef_cfg.criteria = MWIFIEX_CRITERIA_BROADCAST |
2363 MWIFIEX_CRITERIA_UNICAST |
2364 MWIFIEX_CRITERIA_MULTICAST;
2365
2366 ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_MEF_CFG,
2367 HostCmd_ACT_GEN_SET, 0,
2368 &mef_cfg);
2369
2370 kfree(mef_entry);
2371 return ret;
2372}
2373
2374static int mwifiex_cfg80211_resume(struct wiphy *wiphy)
2375{
2376 return 0;
2377}
2378
2379static void mwifiex_cfg80211_set_wakeup(struct wiphy *wiphy,
2380 bool enabled)
2381{
2382 struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
2383
2384 device_set_wakeup_enable(adapter->dev, enabled);
2385}
2386#endif
2387
2297/* station cfg80211 operations */ 2388/* station cfg80211 operations */
2298static struct cfg80211_ops mwifiex_cfg80211_ops = { 2389static struct cfg80211_ops mwifiex_cfg80211_ops = {
2299 .add_virtual_intf = mwifiex_add_virtual_intf, 2390 .add_virtual_intf = mwifiex_add_virtual_intf,
@@ -2322,6 +2413,11 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
2322 .change_beacon = mwifiex_cfg80211_change_beacon, 2413 .change_beacon = mwifiex_cfg80211_change_beacon,
2323 .set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config, 2414 .set_cqm_rssi_config = mwifiex_cfg80211_set_cqm_rssi_config,
2324 .set_antenna = mwifiex_cfg80211_set_antenna, 2415 .set_antenna = mwifiex_cfg80211_set_antenna,
2416#ifdef CONFIG_PM
2417 .suspend = mwifiex_cfg80211_suspend,
2418 .resume = mwifiex_cfg80211_resume,
2419 .set_wakeup = mwifiex_cfg80211_set_wakeup,
2420#endif
2325}; 2421};
2326 2422
2327/* 2423/*
@@ -2380,6 +2476,14 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
2380 2476
2381 wiphy_apply_custom_regulatory(wiphy, &mwifiex_world_regdom_custom); 2477 wiphy_apply_custom_regulatory(wiphy, &mwifiex_world_regdom_custom);
2382 2478
2479#ifdef CONFIG_PM
2480 wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT;
2481 wiphy->wowlan.n_patterns = MWIFIEX_MAX_FILTERS;
2482 wiphy->wowlan.pattern_min_len = 1;
2483 wiphy->wowlan.pattern_max_len = MWIFIEX_MAX_PATTERN_LEN;
2484 wiphy->wowlan.max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN;
2485#endif
2486
2383 wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | 2487 wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
2384 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | 2488 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
2385 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; 2489 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index b5c8b962ce12..9a1302bd4c03 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -1149,7 +1149,7 @@ int mwifiex_ret_802_11_hs_cfg(struct mwifiex_private *priv,
1149 phs_cfg->params.hs_config.gpio, 1149 phs_cfg->params.hs_config.gpio,
1150 phs_cfg->params.hs_config.gap); 1150 phs_cfg->params.hs_config.gap);
1151 } 1151 }
1152 if (conditions != HOST_SLEEP_CFG_CANCEL) { 1152 if (conditions != HS_CFG_CANCEL) {
1153 adapter->is_hs_configured = true; 1153 adapter->is_hs_configured = true;
1154 if (adapter->iface_type == MWIFIEX_USB || 1154 if (adapter->iface_type == MWIFIEX_USB ||
1155 adapter->iface_type == MWIFIEX_PCIE) 1155 adapter->iface_type == MWIFIEX_PCIE)
diff --git a/drivers/net/wireless/mwifiex/ethtool.c b/drivers/net/wireless/mwifiex/ethtool.c
new file mode 100644
index 000000000000..bfb39908b2c6
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/ethtool.c
@@ -0,0 +1,70 @@
1/*
2 * Marvell Wireless LAN device driver: ethtool
3 *
4 * Copyright (C) 2013, Marvell International Ltd.
5 *
6 * This software file (the "File") is distributed by Marvell International
7 * Ltd. under the terms of the GNU General Public License Version 2, June 1991
8 * (the "License"). You may use, redistribute and/or modify this File in
9 * accordance with the terms and conditions of the License, a copy of which
10 * is available by writing to the Free Software Foundation, Inc.,
11 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
12 * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
13 *
14 * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
16 * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
17 * this warranty disclaimer.
18 */
19
20#include "main.h"
21
22static void mwifiex_ethtool_get_wol(struct net_device *dev,
23 struct ethtool_wolinfo *wol)
24{
25 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
26 u32 conditions = le32_to_cpu(priv->adapter->hs_cfg.conditions);
27
28 wol->supported = WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY;
29
30 if (conditions == HS_CFG_COND_DEF)
31 return;
32
33 if (conditions & HS_CFG_COND_UNICAST_DATA)
34 wol->wolopts |= WAKE_UCAST;
35 if (conditions & HS_CFG_COND_MULTICAST_DATA)
36 wol->wolopts |= WAKE_MCAST;
37 if (conditions & HS_CFG_COND_BROADCAST_DATA)
38 wol->wolopts |= WAKE_BCAST;
39 if (conditions & HS_CFG_COND_MAC_EVENT)
40 wol->wolopts |= WAKE_PHY;
41}
42
43static int mwifiex_ethtool_set_wol(struct net_device *dev,
44 struct ethtool_wolinfo *wol)
45{
46 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
47 u32 conditions = 0;
48
49 if (wol->wolopts & ~(WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY))
50 return -EOPNOTSUPP;
51
52 if (wol->wolopts & WAKE_UCAST)
53 conditions |= HS_CFG_COND_UNICAST_DATA;
54 if (wol->wolopts & WAKE_MCAST)
55 conditions |= HS_CFG_COND_MULTICAST_DATA;
56 if (wol->wolopts & WAKE_BCAST)
57 conditions |= HS_CFG_COND_BROADCAST_DATA;
58 if (wol->wolopts & WAKE_PHY)
59 conditions |= HS_CFG_COND_MAC_EVENT;
60 if (wol->wolopts == 0)
61 conditions |= HS_CFG_COND_DEF;
62 priv->adapter->hs_cfg.conditions = cpu_to_le32(conditions);
63
64 return 0;
65}
66
67const struct ethtool_ops mwifiex_ethtool_ops = {
68 .get_wol = mwifiex_ethtool_get_wol,
69 .set_wol = mwifiex_ethtool_set_wol,
70};
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 25acb0682c56..57c5defe1f9d 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -230,40 +230,12 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
230 230
231#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & (BIT(13)|BIT(14))) 231#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & (BIT(13)|BIT(14)))
232 232
233#define GET_VHTCAP_MAXMPDULEN(vht_cap_info) (vht_cap_info & 0x3)
234#define GET_VHTCAP_CHWDSET(vht_cap_info) ((vht_cap_info >> 2) & 0x3) 233#define GET_VHTCAP_CHWDSET(vht_cap_info) ((vht_cap_info >> 2) & 0x3)
235#define GET_VHTNSSMCS(mcs_mapset, nss) ((mcs_mapset >> (2 * (nss - 1))) & 0x3) 234#define GET_VHTNSSMCS(mcs_mapset, nss) ((mcs_mapset >> (2 * (nss - 1))) & 0x3)
236#define SET_VHTNSSMCS(mcs_mapset, nss, value) (mcs_mapset |= (value & 0x3) << \ 235#define SET_VHTNSSMCS(mcs_mapset, nss, value) (mcs_mapset |= (value & 0x3) << \
237 (2 * (nss - 1))) 236 (2 * (nss - 1)))
238#define NO_NSS_SUPPORT 0x3 237#define NO_NSS_SUPPORT 0x3
239 238
240/* HW_SPEC: HTC-VHT supported */
241#define ISSUPP_11ACVHTHTCVHT(Dot11acDevCap) (Dot11acDevCap & BIT(22))
242/* HW_SPEC: VHT TXOP PS support */
243#define ISSUPP_11ACVHTTXOPPS(Dot11acDevCap) (Dot11acDevCap & BIT(21))
244/* HW_SPEC: MU RX beamformee support */
245#define ISSUPP_11ACMURXBEAMFORMEE(Dot11acDevCap) (Dot11acDevCap & BIT(20))
246/* HW_SPEC: MU TX beamformee support */
247#define ISSUPP_11ACMUTXBEAMFORMEE(Dot11acDevCap) (Dot11acDevCap & BIT(19))
248/* HW_SPEC: SU Beamformee support */
249#define ISSUPP_11ACSUBEAMFORMEE(Dot11acDevCap) (Dot11acDevCap & BIT(10))
250/* HW_SPEC: SU Beamformer support */
251#define ISSUPP_11ACSUBEAMFORMER(Dot11acDevCap) (Dot11acDevCap & BIT(9))
252/* HW_SPEC: Rx STBC support */
253#define ISSUPP_11ACRXSTBC(Dot11acDevCap) (Dot11acDevCap & BIT(8))
254/* HW_SPEC: Tx STBC support */
255#define ISSUPP_11ACTXSTBC(Dot11acDevCap) (Dot11acDevCap & BIT(7))
256/* HW_SPEC: Short GI support for 160MHz BW */
257#define ISSUPP_11ACSGI160(Dot11acDevCap) (Dot11acDevCap & BIT(6))
258/* HW_SPEC: Short GI support for 80MHz BW */
259#define ISSUPP_11ACSGI80(Dot11acDevCap) (Dot11acDevCap & BIT(5))
260/* HW_SPEC: LDPC coding support */
261#define ISSUPP_11ACLDPC(Dot11acDevCap) (Dot11acDevCap & BIT(4))
262/* HW_SPEC: Channel BW 20/40/80/160/80+80 MHz support */
263#define ISSUPP_11ACBW8080(Dot11acDevCap) (Dot11acDevCap & BIT(3))
264/* HW_SPEC: Channel BW 20/40/80/160 MHz support */
265#define ISSUPP_11ACBW160(Dot11acDevCap) (Dot11acDevCap & BIT(2))
266
267#define GET_DEVTXMCSMAP(dev_mcs_map) (dev_mcs_map >> 16) 239#define GET_DEVTXMCSMAP(dev_mcs_map) (dev_mcs_map >> 16)
268#define GET_DEVRXMCSMAP(dev_mcs_map) (dev_mcs_map & 0xFFFF) 240#define GET_DEVRXMCSMAP(dev_mcs_map) (dev_mcs_map & 0xFFFF)
269 241
@@ -300,6 +272,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
300#define HostCmd_CMD_802_11_TX_RATE_QUERY 0x007f 272#define HostCmd_CMD_802_11_TX_RATE_QUERY 0x007f
301#define HostCmd_CMD_802_11_IBSS_COALESCING_STATUS 0x0083 273#define HostCmd_CMD_802_11_IBSS_COALESCING_STATUS 0x0083
302#define HostCmd_CMD_VERSION_EXT 0x0097 274#define HostCmd_CMD_VERSION_EXT 0x0097
275#define HostCmd_CMD_MEF_CFG 0x009a
303#define HostCmd_CMD_RSSI_INFO 0x00a4 276#define HostCmd_CMD_RSSI_INFO 0x00a4
304#define HostCmd_CMD_FUNC_INIT 0x00a9 277#define HostCmd_CMD_FUNC_INIT 0x00a9
305#define HostCmd_CMD_FUNC_SHUTDOWN 0x00aa 278#define HostCmd_CMD_FUNC_SHUTDOWN 0x00aa
@@ -376,10 +349,14 @@ enum P2P_MODES {
376#define HostCmd_SCAN_RADIO_TYPE_BG 0 349#define HostCmd_SCAN_RADIO_TYPE_BG 0
377#define HostCmd_SCAN_RADIO_TYPE_A 1 350#define HostCmd_SCAN_RADIO_TYPE_A 1
378 351
379#define HOST_SLEEP_CFG_CANCEL 0xffffffff 352#define HS_CFG_CANCEL 0xffffffff
380#define HOST_SLEEP_CFG_COND_DEF 0x00000000 353#define HS_CFG_COND_DEF 0x00000000
381#define HOST_SLEEP_CFG_GPIO_DEF 0xff 354#define HS_CFG_GPIO_DEF 0xff
382#define HOST_SLEEP_CFG_GAP_DEF 0 355#define HS_CFG_GAP_DEF 0
356#define HS_CFG_COND_BROADCAST_DATA 0x00000001
357#define HS_CFG_COND_UNICAST_DATA 0x00000002
358#define HS_CFG_COND_MAC_EVENT 0x00000004
359#define HS_CFG_COND_MULTICAST_DATA 0x00000008
383 360
384#define MWIFIEX_TIMEOUT_FOR_AP_RESP 0xfffc 361#define MWIFIEX_TIMEOUT_FOR_AP_RESP 0xfffc
385#define MWIFIEX_STATUS_CODE_AUTH_TIMEOUT 2 362#define MWIFIEX_STATUS_CODE_AUTH_TIMEOUT 2
@@ -469,6 +446,23 @@ enum P2P_MODES {
469#define EVENT_GET_BSS_TYPE(event_cause) \ 446#define EVENT_GET_BSS_TYPE(event_cause) \
470 (((event_cause) >> 24) & 0x00ff) 447 (((event_cause) >> 24) & 0x00ff)
471 448
449#define MWIFIEX_MAX_PATTERN_LEN 20
450#define MWIFIEX_MAX_OFFSET_LEN 50
451#define STACK_NBYTES 100
452#define TYPE_DNUM 1
453#define TYPE_BYTESEQ 2
454#define MAX_OPERAND 0x40
455#define TYPE_EQ (MAX_OPERAND+1)
456#define TYPE_EQ_DNUM (MAX_OPERAND+2)
457#define TYPE_EQ_BIT (MAX_OPERAND+3)
458#define TYPE_AND (MAX_OPERAND+4)
459#define TYPE_OR (MAX_OPERAND+5)
460#define MEF_MODE_HOST_SLEEP 1
461#define MEF_ACTION_ALLOW_AND_WAKEUP_HOST 3
462#define MWIFIEX_CRITERIA_BROADCAST BIT(0)
463#define MWIFIEX_CRITERIA_UNICAST BIT(1)
464#define MWIFIEX_CRITERIA_MULTICAST BIT(3)
465
472struct mwifiex_ie_types_header { 466struct mwifiex_ie_types_header {
473 __le16 type; 467 __le16 type;
474 __le16 len; 468 __le16 len;
@@ -1499,6 +1493,19 @@ struct host_cmd_ds_802_11_ibss_status {
1499 __le16 use_g_rate_protect; 1493 __le16 use_g_rate_protect;
1500} __packed; 1494} __packed;
1501 1495
1496struct mwifiex_fw_mef_entry {
1497 u8 mode;
1498 u8 action;
1499 __le16 exprsize;
1500 u8 expr[0];
1501} __packed;
1502
1503struct host_cmd_ds_mef_cfg {
1504 __le32 criteria;
1505 __le16 num_entries;
1506 struct mwifiex_fw_mef_entry mef_entry[0];
1507} __packed;
1508
1502#define CONNECTION_TYPE_INFRA 0 1509#define CONNECTION_TYPE_INFRA 0
1503#define CONNECTION_TYPE_ADHOC 1 1510#define CONNECTION_TYPE_ADHOC 1
1504#define CONNECTION_TYPE_AP 2 1511#define CONNECTION_TYPE_AP 2
@@ -1603,6 +1610,7 @@ struct host_cmd_ds_command {
1603 struct host_cmd_ds_remain_on_chan roc_cfg; 1610 struct host_cmd_ds_remain_on_chan roc_cfg;
1604 struct host_cmd_ds_p2p_mode_cfg mode_cfg; 1611 struct host_cmd_ds_p2p_mode_cfg mode_cfg;
1605 struct host_cmd_ds_802_11_ibss_status ibss_coalescing; 1612 struct host_cmd_ds_802_11_ibss_status ibss_coalescing;
1613 struct host_cmd_ds_mef_cfg mef_cfg;
1606 struct host_cmd_ds_mac_reg_access mac_reg; 1614 struct host_cmd_ds_mac_reg_access mac_reg;
1607 struct host_cmd_ds_bbp_reg_access bbp_reg; 1615 struct host_cmd_ds_bbp_reg_access bbp_reg;
1608 struct host_cmd_ds_rf_reg_access rf_reg; 1616 struct host_cmd_ds_rf_reg_access rf_reg;
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 0ff4c37ab42a..daf8801cecd2 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -318,9 +318,9 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
318 adapter->curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K; 318 adapter->curr_tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K;
319 319
320 adapter->is_hs_configured = false; 320 adapter->is_hs_configured = false;
321 adapter->hs_cfg.conditions = cpu_to_le32(HOST_SLEEP_CFG_COND_DEF); 321 adapter->hs_cfg.conditions = cpu_to_le32(HS_CFG_COND_DEF);
322 adapter->hs_cfg.gpio = HOST_SLEEP_CFG_GPIO_DEF; 322 adapter->hs_cfg.gpio = HS_CFG_GPIO_DEF;
323 adapter->hs_cfg.gap = HOST_SLEEP_CFG_GAP_DEF; 323 adapter->hs_cfg.gap = HS_CFG_GAP_DEF;
324 adapter->hs_activated = false; 324 adapter->hs_activated = false;
325 325
326 memset(adapter->event_body, 0, sizeof(adapter->event_body)); 326 memset(adapter->event_body, 0, sizeof(adapter->event_body));
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index d85e6eb1f58a..91d522c746ed 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -354,6 +354,29 @@ struct mwifiex_ds_misc_subsc_evt {
354 struct subsc_evt_cfg bcn_h_rssi_cfg; 354 struct subsc_evt_cfg bcn_h_rssi_cfg;
355}; 355};
356 356
357#define MAX_BYTESEQ 6 /* non-adjustable */
358#define MWIFIEX_MAX_FILTERS 10
359
360struct mwifiex_mef_filter {
361 u16 repeat;
362 u16 offset;
363 s8 byte_seq[MAX_BYTESEQ + 1];
364 u8 filt_type;
365 u8 filt_action;
366};
367
368struct mwifiex_mef_entry {
369 u8 mode;
370 u8 action;
371 struct mwifiex_mef_filter filter[MWIFIEX_MAX_FILTERS];
372};
373
374struct mwifiex_ds_mef_cfg {
375 u32 criteria;
376 u16 num_entries;
377 struct mwifiex_mef_entry *mef_entry;
378};
379
357#define MWIFIEX_MAX_VSIE_LEN (256) 380#define MWIFIEX_MAX_VSIE_LEN (256)
358#define MWIFIEX_MAX_VSIE_NUM (8) 381#define MWIFIEX_MAX_VSIE_NUM (8)
359#define MWIFIEX_VSIE_MASK_CLEAR 0x00 382#define MWIFIEX_VSIE_MASK_CLEAR 0x00
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 9c802ede9c3b..121443a0f2a1 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -588,10 +588,19 @@ mwifiex_tx_timeout(struct net_device *dev)
588{ 588{
589 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); 589 struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
590 590
591 dev_err(priv->adapter->dev, "%lu : Tx timeout, bss_type-num = %d-%d\n",
592 jiffies, priv->bss_type, priv->bss_num);
593 mwifiex_set_trans_start(dev);
594 priv->num_tx_timeout++; 591 priv->num_tx_timeout++;
592 priv->tx_timeout_cnt++;
593 dev_err(priv->adapter->dev,
594 "%lu : Tx timeout(#%d), bss_type-num = %d-%d\n",
595 jiffies, priv->tx_timeout_cnt, priv->bss_type, priv->bss_num);
596 mwifiex_set_trans_start(dev);
597
598 if (priv->tx_timeout_cnt > TX_TIMEOUT_THRESHOLD &&
599 priv->adapter->if_ops.card_reset) {
600 dev_err(priv->adapter->dev,
601 "tx_timeout_cnt exceeds threshold. Triggering card reset!\n");
602 priv->adapter->if_ops.card_reset(priv->adapter);
603 }
595} 604}
596 605
597/* 606/*
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 7035ade9af74..7255289a48ac 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -130,6 +130,9 @@ enum {
130#define MWIFIEX_USB_TYPE_DATA 0xBEADC0DE 130#define MWIFIEX_USB_TYPE_DATA 0xBEADC0DE
131#define MWIFIEX_USB_TYPE_EVENT 0xBEEFFACE 131#define MWIFIEX_USB_TYPE_EVENT 0xBEEFFACE
132 132
133/* Threshold for tx_timeout_cnt before we trigger a card reset */
134#define TX_TIMEOUT_THRESHOLD 6
135
133struct mwifiex_dbg { 136struct mwifiex_dbg {
134 u32 num_cmd_host_to_card_failure; 137 u32 num_cmd_host_to_card_failure;
135 u32 num_cmd_sleep_cfm_host_to_card_failure; 138 u32 num_cmd_sleep_cfm_host_to_card_failure;
@@ -394,6 +397,8 @@ struct mwifiex_private {
394 u8 curr_addr[ETH_ALEN]; 397 u8 curr_addr[ETH_ALEN];
395 u8 media_connected; 398 u8 media_connected;
396 u32 num_tx_timeout; 399 u32 num_tx_timeout;
400 /* track consecutive timeout */
401 u8 tx_timeout_cnt;
397 struct net_device *netdev; 402 struct net_device *netdev;
398 struct net_device_stats stats; 403 struct net_device_stats stats;
399 u16 curr_pkt_filter; 404 u16 curr_pkt_filter;
@@ -1098,11 +1103,15 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev);
1098 1103
1099void mwifiex_set_sys_config_invalid_data(struct mwifiex_uap_bss_param *config); 1104void mwifiex_set_sys_config_invalid_data(struct mwifiex_uap_bss_param *config);
1100 1105
1106int mwifiex_add_wowlan_magic_pkt_filter(struct mwifiex_adapter *adapter);
1107
1101int mwifiex_set_mgmt_ies(struct mwifiex_private *priv, 1108int mwifiex_set_mgmt_ies(struct mwifiex_private *priv,
1102 struct cfg80211_beacon_data *data); 1109 struct cfg80211_beacon_data *data);
1103int mwifiex_del_mgmt_ies(struct mwifiex_private *priv); 1110int mwifiex_del_mgmt_ies(struct mwifiex_private *priv);
1104u8 *mwifiex_11d_code_2_region(u8 code); 1111u8 *mwifiex_11d_code_2_region(u8 code);
1105 1112
1113extern const struct ethtool_ops mwifiex_ethtool_ops;
1114
1106#ifdef CONFIG_DEBUG_FS 1115#ifdef CONFIG_DEBUG_FS
1107void mwifiex_debugfs_init(void); 1116void mwifiex_debugfs_init(void);
1108void mwifiex_debugfs_remove(void); 1117void mwifiex_debugfs_remove(void);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index feb204613397..8cd8cdc91a7e 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -36,8 +36,6 @@ static u8 user_rmmod;
36static struct mwifiex_if_ops pcie_ops; 36static struct mwifiex_if_ops pcie_ops;
37 37
38static struct semaphore add_remove_card_sem; 38static struct semaphore add_remove_card_sem;
39static int mwifiex_pcie_enable_host_int(struct mwifiex_adapter *adapter);
40static int mwifiex_pcie_resume(struct pci_dev *pdev);
41 39
42static int 40static int
43mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb, 41mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
@@ -78,6 +76,82 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
78 return false; 76 return false;
79} 77}
80 78
79#ifdef CONFIG_PM
80/*
81 * Kernel needs to suspend all functions separately. Therefore all
82 * registered functions must have drivers with suspend and resume
83 * methods. Failing that the kernel simply removes the whole card.
84 *
85 * If already not suspended, this function allocates and sends a host
86 * sleep activate request to the firmware and turns off the traffic.
87 */
88static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
89{
90 struct mwifiex_adapter *adapter;
91 struct pcie_service_card *card;
92 int hs_actived;
93
94 if (pdev) {
95 card = (struct pcie_service_card *) pci_get_drvdata(pdev);
96 if (!card || !card->adapter) {
97 pr_err("Card or adapter structure is not valid\n");
98 return 0;
99 }
100 } else {
101 pr_err("PCIE device is not specified\n");
102 return 0;
103 }
104
105 adapter = card->adapter;
106
107 hs_actived = mwifiex_enable_hs(adapter);
108
109 /* Indicate device suspended */
110 adapter->is_suspended = true;
111
112 return 0;
113}
114
115/*
116 * Kernel needs to suspend all functions separately. Therefore all
117 * registered functions must have drivers with suspend and resume
118 * methods. Failing that the kernel simply removes the whole card.
119 *
120 * If already not resumed, this function turns on the traffic and
121 * sends a host sleep cancel request to the firmware.
122 */
123static int mwifiex_pcie_resume(struct pci_dev *pdev)
124{
125 struct mwifiex_adapter *adapter;
126 struct pcie_service_card *card;
127
128 if (pdev) {
129 card = (struct pcie_service_card *) pci_get_drvdata(pdev);
130 if (!card || !card->adapter) {
131 pr_err("Card or adapter structure is not valid\n");
132 return 0;
133 }
134 } else {
135 pr_err("PCIE device is not specified\n");
136 return 0;
137 }
138
139 adapter = card->adapter;
140
141 if (!adapter->is_suspended) {
142 dev_warn(adapter->dev, "Device already resumed\n");
143 return 0;
144 }
145
146 adapter->is_suspended = false;
147
148 mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
149 MWIFIEX_ASYNC_CMD);
150
151 return 0;
152}
153#endif
154
81/* 155/*
82 * This function probes an mwifiex device and registers it. It allocates 156 * This function probes an mwifiex device and registers it. It allocates
83 * the card structure, enables PCIE function number and initiates the 157 * the card structure, enables PCIE function number and initiates the
@@ -159,80 +233,6 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
159 kfree(card); 233 kfree(card);
160} 234}
161 235
162/*
163 * Kernel needs to suspend all functions separately. Therefore all
164 * registered functions must have drivers with suspend and resume
165 * methods. Failing that the kernel simply removes the whole card.
166 *
167 * If already not suspended, this function allocates and sends a host
168 * sleep activate request to the firmware and turns off the traffic.
169 */
170static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
171{
172 struct mwifiex_adapter *adapter;
173 struct pcie_service_card *card;
174 int hs_actived;
175
176 if (pdev) {
177 card = (struct pcie_service_card *) pci_get_drvdata(pdev);
178 if (!card || !card->adapter) {
179 pr_err("Card or adapter structure is not valid\n");
180 return 0;
181 }
182 } else {
183 pr_err("PCIE device is not specified\n");
184 return 0;
185 }
186
187 adapter = card->adapter;
188
189 hs_actived = mwifiex_enable_hs(adapter);
190
191 /* Indicate device suspended */
192 adapter->is_suspended = true;
193
194 return 0;
195}
196
197/*
198 * Kernel needs to suspend all functions separately. Therefore all
199 * registered functions must have drivers with suspend and resume
200 * methods. Failing that the kernel simply removes the whole card.
201 *
202 * If already not resumed, this function turns on the traffic and
203 * sends a host sleep cancel request to the firmware.
204 */
205static int mwifiex_pcie_resume(struct pci_dev *pdev)
206{
207 struct mwifiex_adapter *adapter;
208 struct pcie_service_card *card;
209
210 if (pdev) {
211 card = (struct pcie_service_card *) pci_get_drvdata(pdev);
212 if (!card || !card->adapter) {
213 pr_err("Card or adapter structure is not valid\n");
214 return 0;
215 }
216 } else {
217 pr_err("PCIE device is not specified\n");
218 return 0;
219 }
220
221 adapter = card->adapter;
222
223 if (!adapter->is_suspended) {
224 dev_warn(adapter->dev, "Device already resumed\n");
225 return 0;
226 }
227
228 adapter->is_suspended = false;
229
230 mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
231 MWIFIEX_ASYNC_CMD);
232
233 return 0;
234}
235
236static DEFINE_PCI_DEVICE_TABLE(mwifiex_ids) = { 236static DEFINE_PCI_DEVICE_TABLE(mwifiex_ids) = {
237 { 237 {
238 PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P, 238 PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P,
@@ -1030,8 +1030,8 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
1030 u32 wrindx, num_tx_buffs, rx_val; 1030 u32 wrindx, num_tx_buffs, rx_val;
1031 int ret; 1031 int ret;
1032 dma_addr_t buf_pa; 1032 dma_addr_t buf_pa;
1033 struct mwifiex_pcie_buf_desc *desc; 1033 struct mwifiex_pcie_buf_desc *desc = NULL;
1034 struct mwifiex_pfu_buf_desc *desc2; 1034 struct mwifiex_pfu_buf_desc *desc2 = NULL;
1035 __le16 *tmp; 1035 __le16 *tmp;
1036 1036
1037 if (!(skb->data && skb->len)) { 1037 if (!(skb->data && skb->len)) {
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index c55c5bb93134..a2ae690a0a67 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -334,7 +334,7 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
334 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH); 334 cmd->command = cpu_to_le16(HostCmd_CMD_802_11_HS_CFG_ENH);
335 335
336 if (!hs_activate && 336 if (!hs_activate &&
337 (hscfg_param->conditions != cpu_to_le32(HOST_SLEEP_CFG_CANCEL)) && 337 (hscfg_param->conditions != cpu_to_le32(HS_CFG_CANCEL)) &&
338 ((adapter->arp_filter_size > 0) && 338 ((adapter->arp_filter_size > 0) &&
339 (adapter->arp_filter_size <= ARP_FILTER_MAX_BUF_SIZE))) { 339 (adapter->arp_filter_size <= ARP_FILTER_MAX_BUF_SIZE))) {
340 dev_dbg(adapter->dev, 340 dev_dbg(adapter->dev,
@@ -1059,6 +1059,80 @@ mwifiex_cmd_802_11_subsc_evt(struct mwifiex_private *priv,
1059 return 0; 1059 return 0;
1060} 1060}
1061 1061
1062static int
1063mwifiex_cmd_append_rpn_expression(struct mwifiex_private *priv,
1064 struct mwifiex_mef_entry *mef_entry,
1065 u8 **buffer)
1066{
1067 struct mwifiex_mef_filter *filter = mef_entry->filter;
1068 int i, byte_len;
1069 u8 *stack_ptr = *buffer;
1070
1071 for (i = 0; i < MWIFIEX_MAX_FILTERS; i++) {
1072 filter = &mef_entry->filter[i];
1073 if (!filter->filt_type)
1074 break;
1075 *(__le32 *)stack_ptr = cpu_to_le32((u32)filter->repeat);
1076 stack_ptr += 4;
1077 *stack_ptr = TYPE_DNUM;
1078 stack_ptr += 1;
1079
1080 byte_len = filter->byte_seq[MAX_BYTESEQ];
1081 memcpy(stack_ptr, filter->byte_seq, byte_len);
1082 stack_ptr += byte_len;
1083 *stack_ptr = byte_len;
1084 stack_ptr += 1;
1085 *stack_ptr = TYPE_BYTESEQ;
1086 stack_ptr += 1;
1087
1088 *(__le32 *)stack_ptr = cpu_to_le32((u32)filter->offset);
1089 stack_ptr += 4;
1090 *stack_ptr = TYPE_DNUM;
1091 stack_ptr += 1;
1092
1093 *stack_ptr = filter->filt_type;
1094 stack_ptr += 1;
1095
1096 if (filter->filt_action) {
1097 *stack_ptr = filter->filt_action;
1098 stack_ptr += 1;
1099 }
1100
1101 if (stack_ptr - *buffer > STACK_NBYTES)
1102 return -1;
1103 }
1104
1105 *buffer = stack_ptr;
1106 return 0;
1107}
1108
1109static int
1110mwifiex_cmd_mef_cfg(struct mwifiex_private *priv,
1111 struct host_cmd_ds_command *cmd,
1112 struct mwifiex_ds_mef_cfg *mef)
1113{
1114 struct host_cmd_ds_mef_cfg *mef_cfg = &cmd->params.mef_cfg;
1115 u8 *pos = (u8 *)mef_cfg;
1116
1117 cmd->command = cpu_to_le16(HostCmd_CMD_MEF_CFG);
1118
1119 mef_cfg->criteria = cpu_to_le32(mef->criteria);
1120 mef_cfg->num_entries = cpu_to_le16(mef->num_entries);
1121 pos += sizeof(*mef_cfg);
1122 mef_cfg->mef_entry->mode = mef->mef_entry->mode;
1123 mef_cfg->mef_entry->action = mef->mef_entry->action;
1124 pos += sizeof(*(mef_cfg->mef_entry));
1125
1126 if (mwifiex_cmd_append_rpn_expression(priv, mef->mef_entry, &pos))
1127 return -1;
1128
1129 mef_cfg->mef_entry->exprsize =
1130 cpu_to_le16(pos - mef_cfg->mef_entry->expr);
1131 cmd->size = cpu_to_le16((u16) (pos - (u8 *)mef_cfg) + S_DS_GEN);
1132
1133 return 0;
1134}
1135
1062/* 1136/*
1063 * This function prepares the commands before sending them to the firmware. 1137 * This function prepares the commands before sending them to the firmware.
1064 * 1138 *
@@ -1273,6 +1347,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
1273 case HostCmd_CMD_802_11_SUBSCRIBE_EVENT: 1347 case HostCmd_CMD_802_11_SUBSCRIBE_EVENT:
1274 ret = mwifiex_cmd_802_11_subsc_evt(priv, cmd_ptr, data_buf); 1348 ret = mwifiex_cmd_802_11_subsc_evt(priv, cmd_ptr, data_buf);
1275 break; 1349 break;
1350 case HostCmd_CMD_MEF_CFG:
1351 ret = mwifiex_cmd_mef_cfg(priv, cmd_ptr, data_buf);
1352 break;
1276 default: 1353 default:
1277 dev_err(priv->adapter->dev, 1354 dev_err(priv->adapter->dev,
1278 "PREP_CMD: unknown cmd- %#x\n", cmd_no); 1355 "PREP_CMD: unknown cmd- %#x\n", cmd_no);
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 4669f8d9389f..80b9f2238001 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -976,6 +976,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
976 case HostCmd_CMD_UAP_BSS_STOP: 976 case HostCmd_CMD_UAP_BSS_STOP:
977 priv->bss_started = 0; 977 priv->bss_started = 0;
978 break; 978 break;
979 case HostCmd_CMD_MEF_CFG:
980 break;
979 default: 981 default:
980 dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n", 982 dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
981 resp->command); 983 resp->command);
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 13100f8de3db..8c943b6ebf45 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -382,7 +382,7 @@ static int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
382 break; 382 break;
383 } 383 }
384 if (hs_cfg->is_invoke_hostcmd) { 384 if (hs_cfg->is_invoke_hostcmd) {
385 if (hs_cfg->conditions == HOST_SLEEP_CFG_CANCEL) { 385 if (hs_cfg->conditions == HS_CFG_CANCEL) {
386 if (!adapter->is_hs_configured) 386 if (!adapter->is_hs_configured)
387 /* Already cancelled */ 387 /* Already cancelled */
388 break; 388 break;
@@ -397,8 +397,8 @@ static int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
397 adapter->hs_cfg.gpio = (u8)hs_cfg->gpio; 397 adapter->hs_cfg.gpio = (u8)hs_cfg->gpio;
398 if (hs_cfg->gap) 398 if (hs_cfg->gap)
399 adapter->hs_cfg.gap = (u8)hs_cfg->gap; 399 adapter->hs_cfg.gap = (u8)hs_cfg->gap;
400 } else if (adapter->hs_cfg.conditions 400 } else if (adapter->hs_cfg.conditions ==
401 == cpu_to_le32(HOST_SLEEP_CFG_CANCEL)) { 401 cpu_to_le32(HS_CFG_CANCEL)) {
402 /* Return failure if no parameters for HS 402 /* Return failure if no parameters for HS
403 enable */ 403 enable */
404 status = -1; 404 status = -1;
@@ -414,7 +414,7 @@ static int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action,
414 HostCmd_CMD_802_11_HS_CFG_ENH, 414 HostCmd_CMD_802_11_HS_CFG_ENH,
415 HostCmd_ACT_GEN_SET, 0, 415 HostCmd_ACT_GEN_SET, 0,
416 &adapter->hs_cfg); 416 &adapter->hs_cfg);
417 if (hs_cfg->conditions == HOST_SLEEP_CFG_CANCEL) 417 if (hs_cfg->conditions == HS_CFG_CANCEL)
418 /* Restore previous condition */ 418 /* Restore previous condition */
419 adapter->hs_cfg.conditions = 419 adapter->hs_cfg.conditions =
420 cpu_to_le32(prev_cond); 420 cpu_to_le32(prev_cond);
@@ -448,7 +448,7 @@ int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type)
448{ 448{
449 struct mwifiex_ds_hs_cfg hscfg; 449 struct mwifiex_ds_hs_cfg hscfg;
450 450
451 hscfg.conditions = HOST_SLEEP_CFG_CANCEL; 451 hscfg.conditions = HS_CFG_CANCEL;
452 hscfg.is_invoke_hostcmd = true; 452 hscfg.is_invoke_hostcmd = true;
453 453
454 return mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET, 454 return mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET,
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 296faec14365..8f923d0d2ba6 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -169,6 +169,8 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
169 if (!status) { 169 if (!status) {
170 priv->stats.tx_packets++; 170 priv->stats.tx_packets++;
171 priv->stats.tx_bytes += skb->len; 171 priv->stats.tx_bytes += skb->len;
172 if (priv->tx_timeout_cnt)
173 priv->tx_timeout_cnt = 0;
172 } else { 174 } else {
173 priv->stats.tx_errors++; 175 priv->stats.tx_errors++;
174 } 176 }
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 21553976b550..54667e65ca47 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -195,7 +195,7 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
195 skb->protocol = eth_type_trans(skb, priv->netdev); 195 skb->protocol = eth_type_trans(skb, priv->netdev);
196 skb->ip_summed = CHECKSUM_NONE; 196 skb->ip_summed = CHECKSUM_NONE;
197 197
198 /* This is required only in case of 11n and USB as we alloc 198 /* This is required only in case of 11n and USB/PCIE as we alloc
199 * a buffer of 4K only if its 11N (to be able to receive 4K 199 * a buffer of 4K only if its 11N (to be able to receive 4K
200 * AMSDU packets). In case of SD we allocate buffers based 200 * AMSDU packets). In case of SD we allocate buffers based
201 * on the size of packet and hence this is not needed. 201 * on the size of packet and hence this is not needed.
@@ -212,7 +212,8 @@ int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb)
212 * fragments. Currently we fail the Filesndl-ht.scr script 212 * fragments. Currently we fail the Filesndl-ht.scr script
213 * for UDP, hence this fix 213 * for UDP, hence this fix
214 */ 214 */
215 if ((priv->adapter->iface_type == MWIFIEX_USB) && 215 if ((priv->adapter->iface_type == MWIFIEX_USB ||
216 priv->adapter->iface_type == MWIFIEX_PCIE) &&
216 (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE)) 217 (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
217 skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE); 218 skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
218 219
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 091d9a64080a..0640e7d7f0c2 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -232,6 +232,7 @@ struct mwl8k_priv {
232 u16 num_mcaddrs; 232 u16 num_mcaddrs;
233 u8 hw_rev; 233 u8 hw_rev;
234 u32 fw_rev; 234 u32 fw_rev;
235 u32 caps;
235 236
236 /* 237 /*
237 * Running count of TX packets in flight, to avoid 238 * Running count of TX packets in flight, to avoid
@@ -284,6 +285,7 @@ struct mwl8k_priv {
284 unsigned fw_state; 285 unsigned fw_state;
285 char *fw_pref; 286 char *fw_pref;
286 char *fw_alt; 287 char *fw_alt;
288 bool is_8764;
287 struct completion firmware_loading_complete; 289 struct completion firmware_loading_complete;
288 290
289 /* bitmap of running BSSes */ 291 /* bitmap of running BSSes */
@@ -600,13 +602,18 @@ mwl8k_send_fw_load_cmd(struct mwl8k_priv *priv, void *data, int length)
600 loops = 1000; 602 loops = 1000;
601 do { 603 do {
602 u32 int_code; 604 u32 int_code;
603 605 if (priv->is_8764) {
604 int_code = ioread32(regs + MWL8K_HIU_INT_CODE); 606 int_code = ioread32(regs +
605 if (int_code == MWL8K_INT_CODE_CMD_FINISHED) { 607 MWL8K_HIU_H2A_INTERRUPT_STATUS);
606 iowrite32(0, regs + MWL8K_HIU_INT_CODE); 608 if (int_code == 0)
607 break; 609 break;
610 } else {
611 int_code = ioread32(regs + MWL8K_HIU_INT_CODE);
612 if (int_code == MWL8K_INT_CODE_CMD_FINISHED) {
613 iowrite32(0, regs + MWL8K_HIU_INT_CODE);
614 break;
615 }
608 } 616 }
609
610 cond_resched(); 617 cond_resched();
611 udelay(1); 618 udelay(1);
612 } while (--loops); 619 } while (--loops);
@@ -724,7 +731,7 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
724 int rc; 731 int rc;
725 int loops; 732 int loops;
726 733
727 if (!memcmp(fw->data, "\x01\x00\x00\x00", 4)) { 734 if (!memcmp(fw->data, "\x01\x00\x00\x00", 4) && !priv->is_8764) {
728 const struct firmware *helper = priv->fw_helper; 735 const struct firmware *helper = priv->fw_helper;
729 736
730 if (helper == NULL) { 737 if (helper == NULL) {
@@ -743,7 +750,10 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
743 750
744 rc = mwl8k_feed_fw_image(priv, fw->data, fw->size); 751 rc = mwl8k_feed_fw_image(priv, fw->data, fw->size);
745 } else { 752 } else {
746 rc = mwl8k_load_fw_image(priv, fw->data, fw->size); 753 if (priv->is_8764)
754 rc = mwl8k_feed_fw_image(priv, fw->data, fw->size);
755 else
756 rc = mwl8k_load_fw_image(priv, fw->data, fw->size);
747 } 757 }
748 758
749 if (rc) { 759 if (rc) {
@@ -908,9 +918,9 @@ static void mwl8k_encapsulate_tx_frame(struct mwl8k_priv *priv,
908} 918}
909 919
910/* 920/*
911 * Packet reception for 88w8366 AP firmware. 921 * Packet reception for 88w8366/88w8764 AP firmware.
912 */ 922 */
913struct mwl8k_rxd_8366_ap { 923struct mwl8k_rxd_ap {
914 __le16 pkt_len; 924 __le16 pkt_len;
915 __u8 sq2; 925 __u8 sq2;
916 __u8 rate; 926 __u8 rate;
@@ -928,30 +938,30 @@ struct mwl8k_rxd_8366_ap {
928 __u8 rx_ctrl; 938 __u8 rx_ctrl;
929} __packed; 939} __packed;
930 940
931#define MWL8K_8366_AP_RATE_INFO_MCS_FORMAT 0x80 941#define MWL8K_AP_RATE_INFO_MCS_FORMAT 0x80
932#define MWL8K_8366_AP_RATE_INFO_40MHZ 0x40 942#define MWL8K_AP_RATE_INFO_40MHZ 0x40
933#define MWL8K_8366_AP_RATE_INFO_RATEID(x) ((x) & 0x3f) 943#define MWL8K_AP_RATE_INFO_RATEID(x) ((x) & 0x3f)
934 944
935#define MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST 0x80 945#define MWL8K_AP_RX_CTRL_OWNED_BY_HOST 0x80
936 946
937/* 8366 AP rx_status bits */ 947/* 8366/8764 AP rx_status bits */
938#define MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK 0x80 948#define MWL8K_AP_RXSTAT_DECRYPT_ERR_MASK 0x80
939#define MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR 0xFF 949#define MWL8K_AP_RXSTAT_GENERAL_DECRYPT_ERR 0xFF
940#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR 0x02 950#define MWL8K_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR 0x02
941#define MWL8K_8366_AP_RXSTAT_WEP_DECRYPT_ICV_ERR 0x04 951#define MWL8K_AP_RXSTAT_WEP_DECRYPT_ICV_ERR 0x04
942#define MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_ICV_ERR 0x08 952#define MWL8K_AP_RXSTAT_TKIP_DECRYPT_ICV_ERR 0x08
943 953
944static void mwl8k_rxd_8366_ap_init(void *_rxd, dma_addr_t next_dma_addr) 954static void mwl8k_rxd_ap_init(void *_rxd, dma_addr_t next_dma_addr)
945{ 955{
946 struct mwl8k_rxd_8366_ap *rxd = _rxd; 956 struct mwl8k_rxd_ap *rxd = _rxd;
947 957
948 rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr); 958 rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr);
949 rxd->rx_ctrl = MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST; 959 rxd->rx_ctrl = MWL8K_AP_RX_CTRL_OWNED_BY_HOST;
950} 960}
951 961
952static void mwl8k_rxd_8366_ap_refill(void *_rxd, dma_addr_t addr, int len) 962static void mwl8k_rxd_ap_refill(void *_rxd, dma_addr_t addr, int len)
953{ 963{
954 struct mwl8k_rxd_8366_ap *rxd = _rxd; 964 struct mwl8k_rxd_ap *rxd = _rxd;
955 965
956 rxd->pkt_len = cpu_to_le16(len); 966 rxd->pkt_len = cpu_to_le16(len);
957 rxd->pkt_phys_addr = cpu_to_le32(addr); 967 rxd->pkt_phys_addr = cpu_to_le32(addr);
@@ -960,12 +970,12 @@ static void mwl8k_rxd_8366_ap_refill(void *_rxd, dma_addr_t addr, int len)
960} 970}
961 971
962static int 972static int
963mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status, 973mwl8k_rxd_ap_process(void *_rxd, struct ieee80211_rx_status *status,
964 __le16 *qos, s8 *noise) 974 __le16 *qos, s8 *noise)
965{ 975{
966 struct mwl8k_rxd_8366_ap *rxd = _rxd; 976 struct mwl8k_rxd_ap *rxd = _rxd;
967 977
968 if (!(rxd->rx_ctrl & MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST)) 978 if (!(rxd->rx_ctrl & MWL8K_AP_RX_CTRL_OWNED_BY_HOST))
969 return -1; 979 return -1;
970 rmb(); 980 rmb();
971 981
@@ -974,11 +984,11 @@ mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
974 status->signal = -rxd->rssi; 984 status->signal = -rxd->rssi;
975 *noise = -rxd->noise_floor; 985 *noise = -rxd->noise_floor;
976 986
977 if (rxd->rate & MWL8K_8366_AP_RATE_INFO_MCS_FORMAT) { 987 if (rxd->rate & MWL8K_AP_RATE_INFO_MCS_FORMAT) {
978 status->flag |= RX_FLAG_HT; 988 status->flag |= RX_FLAG_HT;
979 if (rxd->rate & MWL8K_8366_AP_RATE_INFO_40MHZ) 989 if (rxd->rate & MWL8K_AP_RATE_INFO_40MHZ)
980 status->flag |= RX_FLAG_40MHZ; 990 status->flag |= RX_FLAG_40MHZ;
981 status->rate_idx = MWL8K_8366_AP_RATE_INFO_RATEID(rxd->rate); 991 status->rate_idx = MWL8K_AP_RATE_INFO_RATEID(rxd->rate);
982 } else { 992 } else {
983 int i; 993 int i;
984 994
@@ -1002,19 +1012,19 @@ mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
1002 1012
1003 *qos = rxd->qos_control; 1013 *qos = rxd->qos_control;
1004 1014
1005 if ((rxd->rx_status != MWL8K_8366_AP_RXSTAT_GENERAL_DECRYPT_ERR) && 1015 if ((rxd->rx_status != MWL8K_AP_RXSTAT_GENERAL_DECRYPT_ERR) &&
1006 (rxd->rx_status & MWL8K_8366_AP_RXSTAT_DECRYPT_ERR_MASK) && 1016 (rxd->rx_status & MWL8K_AP_RXSTAT_DECRYPT_ERR_MASK) &&
1007 (rxd->rx_status & MWL8K_8366_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR)) 1017 (rxd->rx_status & MWL8K_AP_RXSTAT_TKIP_DECRYPT_MIC_ERR))
1008 status->flag |= RX_FLAG_MMIC_ERROR; 1018 status->flag |= RX_FLAG_MMIC_ERROR;
1009 1019
1010 return le16_to_cpu(rxd->pkt_len); 1020 return le16_to_cpu(rxd->pkt_len);
1011} 1021}
1012 1022
1013static struct rxd_ops rxd_8366_ap_ops = { 1023static struct rxd_ops rxd_ap_ops = {
1014 .rxd_size = sizeof(struct mwl8k_rxd_8366_ap), 1024 .rxd_size = sizeof(struct mwl8k_rxd_ap),
1015 .rxd_init = mwl8k_rxd_8366_ap_init, 1025 .rxd_init = mwl8k_rxd_ap_init,
1016 .rxd_refill = mwl8k_rxd_8366_ap_refill, 1026 .rxd_refill = mwl8k_rxd_ap_refill,
1017 .rxd_process = mwl8k_rxd_8366_ap_process, 1027 .rxd_process = mwl8k_rxd_ap_process,
1018}; 1028};
1019 1029
1020/* 1030/*
@@ -2401,6 +2411,9 @@ mwl8k_set_caps(struct ieee80211_hw *hw, u32 caps)
2401{ 2411{
2402 struct mwl8k_priv *priv = hw->priv; 2412 struct mwl8k_priv *priv = hw->priv;
2403 2413
2414 if (priv->caps)
2415 return;
2416
2404 if ((caps & MWL8K_CAP_2GHZ4) || !(caps & MWL8K_CAP_BAND_MASK)) { 2417 if ((caps & MWL8K_CAP_2GHZ4) || !(caps & MWL8K_CAP_BAND_MASK)) {
2405 mwl8k_setup_2ghz_band(hw); 2418 mwl8k_setup_2ghz_band(hw);
2406 if (caps & MWL8K_CAP_MIMO) 2419 if (caps & MWL8K_CAP_MIMO)
@@ -2412,6 +2425,8 @@ mwl8k_set_caps(struct ieee80211_hw *hw, u32 caps)
2412 if (caps & MWL8K_CAP_MIMO) 2425 if (caps & MWL8K_CAP_MIMO)
2413 mwl8k_set_ht_caps(hw, &priv->band_50, caps); 2426 mwl8k_set_ht_caps(hw, &priv->band_50, caps);
2414 } 2427 }
2428
2429 priv->caps = caps;
2415} 2430}
2416 2431
2417static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw) 2432static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
@@ -5429,12 +5444,17 @@ enum {
5429 MWL8363 = 0, 5444 MWL8363 = 0,
5430 MWL8687, 5445 MWL8687,
5431 MWL8366, 5446 MWL8366,
5447 MWL8764,
5432}; 5448};
5433 5449
5434#define MWL8K_8366_AP_FW_API 3 5450#define MWL8K_8366_AP_FW_API 3
5435#define _MWL8K_8366_AP_FW(api) "mwl8k/fmimage_8366_ap-" #api ".fw" 5451#define _MWL8K_8366_AP_FW(api) "mwl8k/fmimage_8366_ap-" #api ".fw"
5436#define MWL8K_8366_AP_FW(api) _MWL8K_8366_AP_FW(api) 5452#define MWL8K_8366_AP_FW(api) _MWL8K_8366_AP_FW(api)
5437 5453
5454#define MWL8K_8764_AP_FW_API 1
5455#define _MWL8K_8764_AP_FW(api) "mwl8k/fmimage_8764_ap-" #api ".fw"
5456#define MWL8K_8764_AP_FW(api) _MWL8K_8764_AP_FW(api)
5457
5438static struct mwl8k_device_info mwl8k_info_tbl[] = { 5458static struct mwl8k_device_info mwl8k_info_tbl[] = {
5439 [MWL8363] = { 5459 [MWL8363] = {
5440 .part_name = "88w8363", 5460 .part_name = "88w8363",
@@ -5452,7 +5472,13 @@ static struct mwl8k_device_info mwl8k_info_tbl[] = {
5452 .fw_image_sta = "mwl8k/fmimage_8366.fw", 5472 .fw_image_sta = "mwl8k/fmimage_8366.fw",
5453 .fw_image_ap = MWL8K_8366_AP_FW(MWL8K_8366_AP_FW_API), 5473 .fw_image_ap = MWL8K_8366_AP_FW(MWL8K_8366_AP_FW_API),
5454 .fw_api_ap = MWL8K_8366_AP_FW_API, 5474 .fw_api_ap = MWL8K_8366_AP_FW_API,
5455 .ap_rxd_ops = &rxd_8366_ap_ops, 5475 .ap_rxd_ops = &rxd_ap_ops,
5476 },
5477 [MWL8764] = {
5478 .part_name = "88w8764",
5479 .fw_image_ap = MWL8K_8764_AP_FW(MWL8K_8764_AP_FW_API),
5480 .fw_api_ap = MWL8K_8764_AP_FW_API,
5481 .ap_rxd_ops = &rxd_ap_ops,
5456 }, 5482 },
5457}; 5483};
5458 5484
@@ -5474,6 +5500,7 @@ static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
5474 { PCI_VDEVICE(MARVELL, 0x2a41), .driver_data = MWL8366, }, 5500 { PCI_VDEVICE(MARVELL, 0x2a41), .driver_data = MWL8366, },
5475 { PCI_VDEVICE(MARVELL, 0x2a42), .driver_data = MWL8366, }, 5501 { PCI_VDEVICE(MARVELL, 0x2a42), .driver_data = MWL8366, },
5476 { PCI_VDEVICE(MARVELL, 0x2a43), .driver_data = MWL8366, }, 5502 { PCI_VDEVICE(MARVELL, 0x2a43), .driver_data = MWL8366, },
5503 { PCI_VDEVICE(MARVELL, 0x2b36), .driver_data = MWL8764, },
5477 { }, 5504 { },
5478}; 5505};
5479MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table); 5506MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table);
@@ -5995,6 +6022,8 @@ static int mwl8k_probe(struct pci_dev *pdev,
5995 priv->pdev = pdev; 6022 priv->pdev = pdev;
5996 priv->device_info = &mwl8k_info_tbl[id->driver_data]; 6023 priv->device_info = &mwl8k_info_tbl[id->driver_data];
5997 6024
6025 if (id->driver_data == MWL8764)
6026 priv->is_8764 = true;
5998 6027
5999 priv->sram = pci_iomap(pdev, 0, 0x10000); 6028 priv->sram = pci_iomap(pdev, 0, 0x10000);
6000 if (priv->sram == NULL) { 6029 if (priv->sram == NULL) {
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 7744f42de1ea..1f9cb55c3360 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -1584,7 +1584,7 @@ static int ezusb_probe(struct usb_interface *interface,
1584 struct ezusb_priv *upriv = NULL; 1584 struct ezusb_priv *upriv = NULL;
1585 struct usb_interface_descriptor *iface_desc; 1585 struct usb_interface_descriptor *iface_desc;
1586 struct usb_endpoint_descriptor *ep; 1586 struct usb_endpoint_descriptor *ep;
1587 const struct firmware *fw_entry; 1587 const struct firmware *fw_entry = NULL;
1588 int retval = 0; 1588 int retval = 0;
1589 int i; 1589 int i;
1590 1590
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 4fd49a007b51..978e7eb26567 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -396,7 +396,7 @@ static int p54spi_rx(struct p54s_priv *priv)
396static irqreturn_t p54spi_interrupt(int irq, void *config) 396static irqreturn_t p54spi_interrupt(int irq, void *config)
397{ 397{
398 struct spi_device *spi = config; 398 struct spi_device *spi = config;
399 struct p54s_priv *priv = dev_get_drvdata(&spi->dev); 399 struct p54s_priv *priv = spi_get_drvdata(spi);
400 400
401 ieee80211_queue_work(priv->hw, &priv->work); 401 ieee80211_queue_work(priv->hw, &priv->work);
402 402
@@ -609,7 +609,7 @@ static int p54spi_probe(struct spi_device *spi)
609 609
610 priv = hw->priv; 610 priv = hw->priv;
611 priv->hw = hw; 611 priv->hw = hw;
612 dev_set_drvdata(&spi->dev, priv); 612 spi_set_drvdata(spi, priv);
613 priv->spi = spi; 613 priv->spi = spi;
614 614
615 spi->bits_per_word = 16; 615 spi->bits_per_word = 16;
@@ -685,7 +685,7 @@ err_free:
685 685
686static int p54spi_remove(struct spi_device *spi) 686static int p54spi_remove(struct spi_device *spi)
687{ 687{
688 struct p54s_priv *priv = dev_get_drvdata(&spi->dev); 688 struct p54s_priv *priv = spi_get_drvdata(spi);
689 689
690 p54_unregister_common(priv->hw); 690 p54_unregister_common(priv->hw);
691 691
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 3109c0db66e1..ebada812b3a5 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -144,7 +144,7 @@ static int psm;
144static char *essid; 144static char *essid;
145 145
146/* Default to encapsulation unless translation requested */ 146/* Default to encapsulation unless translation requested */
147static int translate = 1; 147static bool translate = 1;
148 148
149static int country = USA; 149static int country = USA;
150 150
@@ -178,7 +178,7 @@ module_param(hop_dwell, int, 0);
178module_param(beacon_period, int, 0); 178module_param(beacon_period, int, 0);
179module_param(psm, int, 0); 179module_param(psm, int, 0);
180module_param(essid, charp, 0); 180module_param(essid, charp, 0);
181module_param(translate, int, 0); 181module_param(translate, bool, 0);
182module_param(country, int, 0); 182module_param(country, int, 0);
183module_param(sniffer, int, 0); 183module_param(sniffer, int, 0);
184module_param(bc, int, 0); 184module_param(bc, int, 0);
@@ -953,7 +953,7 @@ static int translate_frame(ray_dev_t *local, struct tx_msg __iomem *ptx,
953 unsigned char *data, int len) 953 unsigned char *data, int len)
954{ 954{
955 __be16 proto = ((struct ethhdr *)data)->h_proto; 955 __be16 proto = ((struct ethhdr *)data)->h_proto;
956 if (ntohs(proto) >= 1536) { /* DIX II ethernet frame */ 956 if (ntohs(proto) >= ETH_P_802_3_MIN) { /* DIX II ethernet frame */
957 pr_debug("ray_cs translate_frame DIX II\n"); 957 pr_debug("ray_cs translate_frame DIX II\n");
958 /* Copy LLC header to card buffer */ 958 /* Copy LLC header to card buffer */
959 memcpy_toio(&ptx->var, eth2_llc, sizeof(eth2_llc)); 959 memcpy_toio(&ptx->var, eth2_llc, sizeof(eth2_llc));
@@ -1353,7 +1353,7 @@ static int ray_get_range(struct net_device *dev, struct iw_request_info *info,
1353static int ray_set_framing(struct net_device *dev, struct iw_request_info *info, 1353static int ray_set_framing(struct net_device *dev, struct iw_request_info *info,
1354 union iwreq_data *wrqu, char *extra) 1354 union iwreq_data *wrqu, char *extra)
1355{ 1355{
1356 translate = *(extra); /* Set framing mode */ 1356 translate = !!*(extra); /* Set framing mode */
1357 1357
1358 return 0; 1358 return 0;
1359} 1359}
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 525fd7521dff..8169a85c4498 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2,7 +2,7 @@
2 * Driver for RNDIS based wireless USB devices. 2 * Driver for RNDIS based wireless USB devices.
3 * 3 *
4 * Copyright (C) 2007 by Bjorge Dijkstra <bjd@jooz.net> 4 * Copyright (C) 2007 by Bjorge Dijkstra <bjd@jooz.net>
5 * Copyright (C) 2008-2009 by Jussi Kivilinna <jussi.kivilinna@mbnet.fi> 5 * Copyright (C) 2008-2009 by Jussi Kivilinna <jussi.kivilinna@iki.fi>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -2839,8 +2839,7 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
2839 } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC) 2839 } else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
2840 cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL); 2840 cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL);
2841 2841
2842 if (info != NULL) 2842 kfree(info);
2843 kfree(info);
2844 2843
2845 priv->connected = true; 2844 priv->connected = true;
2846 memcpy(priv->bssid, bssid, ETH_ALEN); 2845 memcpy(priv->bssid, bssid, ETH_ALEN);
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 2bf4efa33186..ffe61d53e3fe 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -169,6 +169,13 @@ config RT2800USB_RT53XX
169 rt2800usb driver. 169 rt2800usb driver.
170 Supported chips: RT5370 170 Supported chips: RT5370
171 171
172config RT2800USB_RT55XX
173 bool "rt2800usb - Include support for rt55xx devices (EXPERIMENTAL)"
174 ---help---
175 This adds support for rt55xx wireless chipset family to the
176 rt2800usb driver.
177 Supported chips: RT5572
178
172config RT2800USB_UNKNOWN 179config RT2800USB_UNKNOWN
173 bool "rt2800usb - Include support for unknown (USB) devices" 180 bool "rt2800usb - Include support for unknown (USB) devices"
174 default n 181 default n
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 4db1088a847f..a7630d5ec892 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -51,6 +51,7 @@
51 * RF3320 2.4G 1T1R(RT3350/RT3370/RT3390) 51 * RF3320 2.4G 1T1R(RT3350/RT3370/RT3390)
52 * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392) 52 * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
53 * RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662) 53 * RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
54 * RF5592 2.4G/5G 2T2R
54 * RF5360 2.4G 1T1R 55 * RF5360 2.4G 1T1R
55 * RF5370 2.4G 1T1R 56 * RF5370 2.4G 1T1R
56 * RF5390 2.4G 1T1R 57 * RF5390 2.4G 1T1R
@@ -68,6 +69,7 @@
68#define RF3320 0x000b 69#define RF3320 0x000b
69#define RF3322 0x000c 70#define RF3322 0x000c
70#define RF3053 0x000d 71#define RF3053 0x000d
72#define RF5592 0x000f
71#define RF3290 0x3290 73#define RF3290 0x3290
72#define RF5360 0x5360 74#define RF5360 0x5360
73#define RF5370 0x5370 75#define RF5370 0x5370
@@ -88,11 +90,8 @@
88#define REV_RT3390E 0x0211 90#define REV_RT3390E 0x0211
89#define REV_RT5390F 0x0502 91#define REV_RT5390F 0x0502
90#define REV_RT5390R 0x1502 92#define REV_RT5390R 0x1502
93#define REV_RT5592C 0x0221
91 94
92/*
93 * Signal information.
94 * Default offset is required for RSSI <-> dBm conversion.
95 */
96#define DEFAULT_RSSI_OFFSET 120 95#define DEFAULT_RSSI_OFFSET 120
97 96
98/* 97/*
@@ -690,6 +689,12 @@
690#define GPIO_SWITCH_7 FIELD32(0x00000080) 689#define GPIO_SWITCH_7 FIELD32(0x00000080)
691 690
692/* 691/*
692 * FIXME: where the DEBUG_INDEX name come from?
693 */
694#define MAC_DEBUG_INDEX 0x05e8
695#define MAC_DEBUG_INDEX_XTAL FIELD32(0x80000000)
696
697/*
693 * MAC Control/Status Registers(CSR). 698 * MAC Control/Status Registers(CSR).
694 * Some values are set in TU, whereas 1 TU == 1024 us. 699 * Some values are set in TU, whereas 1 TU == 1024 us.
695 */ 700 */
@@ -1934,6 +1939,9 @@ struct mac_iveiv_entry {
1934#define BBP4_BANDWIDTH FIELD8(0x18) 1939#define BBP4_BANDWIDTH FIELD8(0x18)
1935#define BBP4_MAC_IF_CTRL FIELD8(0x40) 1940#define BBP4_MAC_IF_CTRL FIELD8(0x40)
1936 1941
1942/* BBP27 */
1943#define BBP27_RX_CHAIN_SEL FIELD8(0x60)
1944
1937/* 1945/*
1938 * BBP 47: Bandwidth 1946 * BBP 47: Bandwidth
1939 */ 1947 */
@@ -1948,6 +1956,20 @@ struct mac_iveiv_entry {
1948#define BBP49_UPDATE_FLAG FIELD8(0x01) 1956#define BBP49_UPDATE_FLAG FIELD8(0x01)
1949 1957
1950/* 1958/*
1959 * BBP 105:
1960 * - bit0: detect SIG on primary channel only (on 40MHz bandwidth)
1961 * - bit1: FEQ (Feed Forward Compensation) for independend streams
1962 * - bit2: MLD (Maximum Likehood Detection) for 2 streams (reserved on single
1963 * stream)
1964 * - bit4: channel estimation updates based on remodulation of
1965 * L-SIG and HT-SIG symbols
1966 */
1967#define BBP105_DETECT_SIG_ON_PRIMARY FIELD8(0x01)
1968#define BBP105_FEQ FIELD8(0x02)
1969#define BBP105_MLD FIELD8(0x04)
1970#define BBP105_SIG_REMODULATION FIELD8(0x08)
1971
1972/*
1951 * BBP 109 1973 * BBP 109
1952 */ 1974 */
1953#define BBP109_TX0_POWER FIELD8(0x0f) 1975#define BBP109_TX0_POWER FIELD8(0x0f)
@@ -1967,6 +1989,11 @@ struct mac_iveiv_entry {
1967#define BBP152_RX_DEFAULT_ANT FIELD8(0x80) 1989#define BBP152_RX_DEFAULT_ANT FIELD8(0x80)
1968 1990
1969/* 1991/*
1992 * BBP 254: unknown
1993 */
1994#define BBP254_BIT7 FIELD8(0x80)
1995
1996/*
1970 * RFCSR registers 1997 * RFCSR registers
1971 * The wordsize of the RFCSR is 8 bits. 1998 * The wordsize of the RFCSR is 8 bits.
1972 */ 1999 */
@@ -2022,9 +2049,18 @@ struct mac_iveiv_entry {
2022#define RFCSR7_BITS67 FIELD8(0xc0) 2049#define RFCSR7_BITS67 FIELD8(0xc0)
2023 2050
2024/* 2051/*
2052 * RFCSR 9:
2053 */
2054#define RFCSR9_K FIELD8(0x0f)
2055#define RFCSR9_N FIELD8(0x10)
2056#define RFCSR9_UNKNOWN FIELD8(0x60)
2057#define RFCSR9_MOD FIELD8(0x80)
2058
2059/*
2025 * RFCSR 11: 2060 * RFCSR 11:
2026 */ 2061 */
2027#define RFCSR11_R FIELD8(0x03) 2062#define RFCSR11_R FIELD8(0x03)
2063#define RFCSR11_MOD FIELD8(0xc0)
2028 2064
2029/* 2065/*
2030 * RFCSR 12: 2066 * RFCSR 12:
@@ -2130,11 +2166,13 @@ struct mac_iveiv_entry {
2130 * RFCSR 49: 2166 * RFCSR 49:
2131 */ 2167 */
2132#define RFCSR49_TX FIELD8(0x3f) 2168#define RFCSR49_TX FIELD8(0x3f)
2169#define RFCSR49_EP FIELD8(0xc0)
2133 2170
2134/* 2171/*
2135 * RFCSR 50: 2172 * RFCSR 50:
2136 */ 2173 */
2137#define RFCSR50_TX FIELD8(0x3f) 2174#define RFCSR50_TX FIELD8(0x3f)
2175#define RFCSR50_EP FIELD8(0xc0)
2138 2176
2139/* 2177/*
2140 * RF registers 2178 * RF registers
@@ -2497,6 +2535,61 @@ struct mac_iveiv_entry {
2497#define EEPROM_BBP_REG_ID FIELD16(0xff00) 2535#define EEPROM_BBP_REG_ID FIELD16(0xff00)
2498 2536
2499/* 2537/*
2538 * EEPROM IQ Calibration, unlike other entries those are byte addresses.
2539 */
2540
2541#define EEPROM_IQ_GAIN_CAL_TX0_2G 0x130
2542#define EEPROM_IQ_PHASE_CAL_TX0_2G 0x131
2543#define EEPROM_IQ_GROUPDELAY_CAL_TX0_2G 0x132
2544#define EEPROM_IQ_GAIN_CAL_TX1_2G 0x133
2545#define EEPROM_IQ_PHASE_CAL_TX1_2G 0x134
2546#define EEPROM_IQ_GROUPDELAY_CAL_TX1_2G 0x135
2547#define EEPROM_IQ_GAIN_CAL_RX0_2G 0x136
2548#define EEPROM_IQ_PHASE_CAL_RX0_2G 0x137
2549#define EEPROM_IQ_GROUPDELAY_CAL_RX0_2G 0x138
2550#define EEPROM_IQ_GAIN_CAL_RX1_2G 0x139
2551#define EEPROM_IQ_PHASE_CAL_RX1_2G 0x13A
2552#define EEPROM_IQ_GROUPDELAY_CAL_RX1_2G 0x13B
2553#define EEPROM_RF_IQ_COMPENSATION_CONTROL 0x13C
2554#define EEPROM_RF_IQ_IMBALANCE_COMPENSATION_CONTROL 0x13D
2555#define EEPROM_IQ_GAIN_CAL_TX0_CH36_TO_CH64_5G 0x144
2556#define EEPROM_IQ_PHASE_CAL_TX0_CH36_TO_CH64_5G 0x145
2557#define EEPROM_IQ_GAIN_CAL_TX0_CH100_TO_CH138_5G 0X146
2558#define EEPROM_IQ_PHASE_CAL_TX0_CH100_TO_CH138_5G 0x147
2559#define EEPROM_IQ_GAIN_CAL_TX0_CH140_TO_CH165_5G 0x148
2560#define EEPROM_IQ_PHASE_CAL_TX0_CH140_TO_CH165_5G 0x149
2561#define EEPROM_IQ_GAIN_CAL_TX1_CH36_TO_CH64_5G 0x14A
2562#define EEPROM_IQ_PHASE_CAL_TX1_CH36_TO_CH64_5G 0x14B
2563#define EEPROM_IQ_GAIN_CAL_TX1_CH100_TO_CH138_5G 0X14C
2564#define EEPROM_IQ_PHASE_CAL_TX1_CH100_TO_CH138_5G 0x14D
2565#define EEPROM_IQ_GAIN_CAL_TX1_CH140_TO_CH165_5G 0x14E
2566#define EEPROM_IQ_PHASE_CAL_TX1_CH140_TO_CH165_5G 0x14F
2567#define EEPROM_IQ_GROUPDELAY_CAL_TX0_CH36_TO_CH64_5G 0x150
2568#define EEPROM_IQ_GROUPDELAY_CAL_TX1_CH36_TO_CH64_5G 0x151
2569#define EEPROM_IQ_GROUPDELAY_CAL_TX0_CH100_TO_CH138_5G 0x152
2570#define EEPROM_IQ_GROUPDELAY_CAL_TX1_CH100_TO_CH138_5G 0x153
2571#define EEPROM_IQ_GROUPDELAY_CAL_TX0_CH140_TO_CH165_5G 0x154
2572#define EEPROM_IQ_GROUPDELAY_CAL_TX1_CH140_TO_CH165_5G 0x155
2573#define EEPROM_IQ_GAIN_CAL_RX0_CH36_TO_CH64_5G 0x156
2574#define EEPROM_IQ_PHASE_CAL_RX0_CH36_TO_CH64_5G 0x157
2575#define EEPROM_IQ_GAIN_CAL_RX0_CH100_TO_CH138_5G 0X158
2576#define EEPROM_IQ_PHASE_CAL_RX0_CH100_TO_CH138_5G 0x159
2577#define EEPROM_IQ_GAIN_CAL_RX0_CH140_TO_CH165_5G 0x15A
2578#define EEPROM_IQ_PHASE_CAL_RX0_CH140_TO_CH165_5G 0x15B
2579#define EEPROM_IQ_GAIN_CAL_RX1_CH36_TO_CH64_5G 0x15C
2580#define EEPROM_IQ_PHASE_CAL_RX1_CH36_TO_CH64_5G 0x15D
2581#define EEPROM_IQ_GAIN_CAL_RX1_CH100_TO_CH138_5G 0X15E
2582#define EEPROM_IQ_PHASE_CAL_RX1_CH100_TO_CH138_5G 0x15F
2583#define EEPROM_IQ_GAIN_CAL_RX1_CH140_TO_CH165_5G 0x160
2584#define EEPROM_IQ_PHASE_CAL_RX1_CH140_TO_CH165_5G 0x161
2585#define EEPROM_IQ_GROUPDELAY_CAL_RX0_CH36_TO_CH64_5G 0x162
2586#define EEPROM_IQ_GROUPDELAY_CAL_RX1_CH36_TO_CH64_5G 0x163
2587#define EEPROM_IQ_GROUPDELAY_CAL_RX0_CH100_TO_CH138_5G 0x164
2588#define EEPROM_IQ_GROUPDELAY_CAL_RX1_CH100_TO_CH138_5G 0x165
2589#define EEPROM_IQ_GROUPDELAY_CAL_RX0_CH140_TO_CH165_5G 0x166
2590#define EEPROM_IQ_GROUPDELAY_CAL_RX1_CH140_TO_CH165_5G 0x167
2591
2592/*
2500 * MCU mailbox commands. 2593 * MCU mailbox commands.
2501 * MCU_SLEEP - go to power-save mode. 2594 * MCU_SLEEP - go to power-save mode.
2502 * arg1: 1: save as much power as possible, 0: save less power. 2595 * arg1: 1: save as much power as possible, 0: save less power.
@@ -2535,6 +2628,8 @@ struct mac_iveiv_entry {
2535#define TXWI_DESC_SIZE (4 * sizeof(__le32)) 2628#define TXWI_DESC_SIZE (4 * sizeof(__le32))
2536#define RXWI_DESC_SIZE (4 * sizeof(__le32)) 2629#define RXWI_DESC_SIZE (4 * sizeof(__le32))
2537 2630
2631#define TXWI_DESC_SIZE_5592 (5 * sizeof(__le32))
2632#define RXWI_DESC_SIZE_5592 (6 * sizeof(__le32))
2538/* 2633/*
2539 * TX WI structure 2634 * TX WI structure
2540 */ 2635 */
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index a658b4bc7da2..f08a0424fe4d 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -527,8 +527,10 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev,
527 */ 527 */
528 rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0); 528 rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
529 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); 529 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
530 if (rt2x00_is_usb(rt2x00dev)) 530 if (rt2x00_is_usb(rt2x00dev)) {
531 rt2800_register_write(rt2x00dev, H2M_INT_SRC, 0); 531 rt2800_register_write(rt2x00dev, H2M_INT_SRC, 0);
532 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
533 }
532 msleep(1); 534 msleep(1);
533 535
534 return 0; 536 return 0;
@@ -674,11 +676,6 @@ void rt2800_process_rxwi(struct queue_entry *entry,
674 * Convert descriptor AGC value to RSSI value. 676 * Convert descriptor AGC value to RSSI value.
675 */ 677 */
676 rxdesc->rssi = rt2800_agc_to_rssi(entry->queue->rt2x00dev, word); 678 rxdesc->rssi = rt2800_agc_to_rssi(entry->queue->rt2x00dev, word);
677
678 /*
679 * Remove RXWI descriptor from start of buffer.
680 */
681 skb_pull(entry->skb, RXWI_DESC_SIZE);
682} 679}
683EXPORT_SYMBOL_GPL(rt2800_process_rxwi); 680EXPORT_SYMBOL_GPL(rt2800_process_rxwi);
684 681
@@ -1988,8 +1985,21 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
1988} 1985}
1989 1986
1990#define POWER_BOUND 0x27 1987#define POWER_BOUND 0x27
1988#define POWER_BOUND_5G 0x2b
1991#define FREQ_OFFSET_BOUND 0x5f 1989#define FREQ_OFFSET_BOUND 0x5f
1992 1990
1991static void rt2800_adjust_freq_offset(struct rt2x00_dev *rt2x00dev)
1992{
1993 u8 rfcsr;
1994
1995 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
1996 if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND)
1997 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND);
1998 else
1999 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
2000 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
2001}
2002
1993static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev, 2003static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev,
1994 struct ieee80211_conf *conf, 2004 struct ieee80211_conf *conf,
1995 struct rf_channel *rf, 2005 struct rf_channel *rf,
@@ -2010,12 +2020,7 @@ static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev,
2010 rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1); 2020 rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
2011 rt2800_rfcsr_write(rt2x00dev, 49, rfcsr); 2021 rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
2012 2022
2013 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr); 2023 rt2800_adjust_freq_offset(rt2x00dev);
2014 if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND)
2015 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND);
2016 else
2017 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
2018 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
2019 2024
2020 if (rf->channel <= 14) { 2025 if (rf->channel <= 14) {
2021 if (rf->channel == 6) 2026 if (rf->channel == 6)
@@ -2056,13 +2061,7 @@ static void rt2800_config_channel_rf3322(struct rt2x00_dev *rt2x00dev,
2056 else 2061 else
2057 rt2800_rfcsr_write(rt2x00dev, 48, info->default_power2); 2062 rt2800_rfcsr_write(rt2x00dev, 48, info->default_power2);
2058 2063
2059 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr); 2064 rt2800_adjust_freq_offset(rt2x00dev);
2060 if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND)
2061 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND);
2062 else
2063 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
2064
2065 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
2066 2065
2067 rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr); 2066 rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
2068 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1); 2067 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
@@ -2127,12 +2126,7 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
2127 rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1); 2126 rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
2128 rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); 2127 rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
2129 2128
2130 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr); 2129 rt2800_adjust_freq_offset(rt2x00dev);
2131 if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND)
2132 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND);
2133 else
2134 rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
2135 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
2136 2130
2137 if (rf->channel <= 14) { 2131 if (rf->channel <= 14) {
2138 int idx = rf->channel-1; 2132 int idx = rf->channel-1;
@@ -2184,6 +2178,382 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev,
2184 } 2178 }
2185} 2179}
2186 2180
2181static void rt2800_config_channel_rf55xx(struct rt2x00_dev *rt2x00dev,
2182 struct ieee80211_conf *conf,
2183 struct rf_channel *rf,
2184 struct channel_info *info)
2185{
2186 u8 rfcsr, ep_reg;
2187 u32 reg;
2188 int power_bound;
2189
2190 /* TODO */
2191 const bool is_11b = false;
2192 const bool is_type_ep = false;
2193
2194 rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
2195 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL,
2196 (rf->channel > 14 || conf_is_ht40(conf)) ? 5 : 0);
2197 rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
2198
2199 /* Order of values on rf_channel entry: N, K, mod, R */
2200 rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1 & 0xff);
2201
2202 rt2800_rfcsr_read(rt2x00dev, 9, &rfcsr);
2203 rt2x00_set_field8(&rfcsr, RFCSR9_K, rf->rf2 & 0xf);
2204 rt2x00_set_field8(&rfcsr, RFCSR9_N, (rf->rf1 & 0x100) >> 8);
2205 rt2x00_set_field8(&rfcsr, RFCSR9_MOD, ((rf->rf3 - 8) & 0x4) >> 2);
2206 rt2800_rfcsr_write(rt2x00dev, 9, rfcsr);
2207
2208 rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
2209 rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf4 - 1);
2210 rt2x00_set_field8(&rfcsr, RFCSR11_MOD, (rf->rf3 - 8) & 0x3);
2211 rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
2212
2213 if (rf->channel <= 14) {
2214 rt2800_rfcsr_write(rt2x00dev, 10, 0x90);
2215 /* FIXME: RF11 owerwrite ? */
2216 rt2800_rfcsr_write(rt2x00dev, 11, 0x4A);
2217 rt2800_rfcsr_write(rt2x00dev, 12, 0x52);
2218 rt2800_rfcsr_write(rt2x00dev, 13, 0x42);
2219 rt2800_rfcsr_write(rt2x00dev, 22, 0x40);
2220 rt2800_rfcsr_write(rt2x00dev, 24, 0x4A);
2221 rt2800_rfcsr_write(rt2x00dev, 25, 0x80);
2222 rt2800_rfcsr_write(rt2x00dev, 27, 0x42);
2223 rt2800_rfcsr_write(rt2x00dev, 36, 0x80);
2224 rt2800_rfcsr_write(rt2x00dev, 37, 0x08);
2225 rt2800_rfcsr_write(rt2x00dev, 38, 0x89);
2226 rt2800_rfcsr_write(rt2x00dev, 39, 0x1B);
2227 rt2800_rfcsr_write(rt2x00dev, 40, 0x0D);
2228 rt2800_rfcsr_write(rt2x00dev, 41, 0x9B);
2229 rt2800_rfcsr_write(rt2x00dev, 42, 0xD5);
2230 rt2800_rfcsr_write(rt2x00dev, 43, 0x72);
2231 rt2800_rfcsr_write(rt2x00dev, 44, 0x0E);
2232 rt2800_rfcsr_write(rt2x00dev, 45, 0xA2);
2233 rt2800_rfcsr_write(rt2x00dev, 46, 0x6B);
2234 rt2800_rfcsr_write(rt2x00dev, 48, 0x10);
2235 rt2800_rfcsr_write(rt2x00dev, 51, 0x3E);
2236 rt2800_rfcsr_write(rt2x00dev, 52, 0x48);
2237 rt2800_rfcsr_write(rt2x00dev, 54, 0x38);
2238 rt2800_rfcsr_write(rt2x00dev, 56, 0xA1);
2239 rt2800_rfcsr_write(rt2x00dev, 57, 0x00);
2240 rt2800_rfcsr_write(rt2x00dev, 58, 0x39);
2241 rt2800_rfcsr_write(rt2x00dev, 60, 0x45);
2242 rt2800_rfcsr_write(rt2x00dev, 61, 0x91);
2243 rt2800_rfcsr_write(rt2x00dev, 62, 0x39);
2244
2245 /* TODO RF27 <- tssi */
2246
2247 rfcsr = rf->channel <= 10 ? 0x07 : 0x06;
2248 rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
2249 rt2800_rfcsr_write(rt2x00dev, 59, rfcsr);
2250
2251 if (is_11b) {
2252 /* CCK */
2253 rt2800_rfcsr_write(rt2x00dev, 31, 0xF8);
2254 rt2800_rfcsr_write(rt2x00dev, 32, 0xC0);
2255 if (is_type_ep)
2256 rt2800_rfcsr_write(rt2x00dev, 55, 0x06);
2257 else
2258 rt2800_rfcsr_write(rt2x00dev, 55, 0x47);
2259 } else {
2260 /* OFDM */
2261 if (is_type_ep)
2262 rt2800_rfcsr_write(rt2x00dev, 55, 0x03);
2263 else
2264 rt2800_rfcsr_write(rt2x00dev, 55, 0x43);
2265 }
2266
2267 power_bound = POWER_BOUND;
2268 ep_reg = 0x2;
2269 } else {
2270 rt2800_rfcsr_write(rt2x00dev, 10, 0x97);
2271 /* FIMXE: RF11 overwrite */
2272 rt2800_rfcsr_write(rt2x00dev, 11, 0x40);
2273 rt2800_rfcsr_write(rt2x00dev, 25, 0xBF);
2274 rt2800_rfcsr_write(rt2x00dev, 27, 0x42);
2275 rt2800_rfcsr_write(rt2x00dev, 36, 0x00);
2276 rt2800_rfcsr_write(rt2x00dev, 37, 0x04);
2277 rt2800_rfcsr_write(rt2x00dev, 38, 0x85);
2278 rt2800_rfcsr_write(rt2x00dev, 40, 0x42);
2279 rt2800_rfcsr_write(rt2x00dev, 41, 0xBB);
2280 rt2800_rfcsr_write(rt2x00dev, 42, 0xD7);
2281 rt2800_rfcsr_write(rt2x00dev, 45, 0x41);
2282 rt2800_rfcsr_write(rt2x00dev, 48, 0x00);
2283 rt2800_rfcsr_write(rt2x00dev, 57, 0x77);
2284 rt2800_rfcsr_write(rt2x00dev, 60, 0x05);
2285 rt2800_rfcsr_write(rt2x00dev, 61, 0x01);
2286
2287 /* TODO RF27 <- tssi */
2288
2289 if (rf->channel >= 36 && rf->channel <= 64) {
2290
2291 rt2800_rfcsr_write(rt2x00dev, 12, 0x2E);
2292 rt2800_rfcsr_write(rt2x00dev, 13, 0x22);
2293 rt2800_rfcsr_write(rt2x00dev, 22, 0x60);
2294 rt2800_rfcsr_write(rt2x00dev, 23, 0x7F);
2295 if (rf->channel <= 50)
2296 rt2800_rfcsr_write(rt2x00dev, 24, 0x09);
2297 else if (rf->channel >= 52)
2298 rt2800_rfcsr_write(rt2x00dev, 24, 0x07);
2299 rt2800_rfcsr_write(rt2x00dev, 39, 0x1C);
2300 rt2800_rfcsr_write(rt2x00dev, 43, 0x5B);
2301 rt2800_rfcsr_write(rt2x00dev, 44, 0X40);
2302 rt2800_rfcsr_write(rt2x00dev, 46, 0X00);
2303 rt2800_rfcsr_write(rt2x00dev, 51, 0xFE);
2304 rt2800_rfcsr_write(rt2x00dev, 52, 0x0C);
2305 rt2800_rfcsr_write(rt2x00dev, 54, 0xF8);
2306 if (rf->channel <= 50) {
2307 rt2800_rfcsr_write(rt2x00dev, 55, 0x06),
2308 rt2800_rfcsr_write(rt2x00dev, 56, 0xD3);
2309 } else if (rf->channel >= 52) {
2310 rt2800_rfcsr_write(rt2x00dev, 55, 0x04);
2311 rt2800_rfcsr_write(rt2x00dev, 56, 0xBB);
2312 }
2313
2314 rt2800_rfcsr_write(rt2x00dev, 58, 0x15);
2315 rt2800_rfcsr_write(rt2x00dev, 59, 0x7F);
2316 rt2800_rfcsr_write(rt2x00dev, 62, 0x15);
2317
2318 } else if (rf->channel >= 100 && rf->channel <= 165) {
2319
2320 rt2800_rfcsr_write(rt2x00dev, 12, 0x0E);
2321 rt2800_rfcsr_write(rt2x00dev, 13, 0x42);
2322 rt2800_rfcsr_write(rt2x00dev, 22, 0x40);
2323 if (rf->channel <= 153) {
2324 rt2800_rfcsr_write(rt2x00dev, 23, 0x3C);
2325 rt2800_rfcsr_write(rt2x00dev, 24, 0x06);
2326 } else if (rf->channel >= 155) {
2327 rt2800_rfcsr_write(rt2x00dev, 23, 0x38);
2328 rt2800_rfcsr_write(rt2x00dev, 24, 0x05);
2329 }
2330 if (rf->channel <= 138) {
2331 rt2800_rfcsr_write(rt2x00dev, 39, 0x1A);
2332 rt2800_rfcsr_write(rt2x00dev, 43, 0x3B);
2333 rt2800_rfcsr_write(rt2x00dev, 44, 0x20);
2334 rt2800_rfcsr_write(rt2x00dev, 46, 0x18);
2335 } else if (rf->channel >= 140) {
2336 rt2800_rfcsr_write(rt2x00dev, 39, 0x18);
2337 rt2800_rfcsr_write(rt2x00dev, 43, 0x1B);
2338 rt2800_rfcsr_write(rt2x00dev, 44, 0x10);
2339 rt2800_rfcsr_write(rt2x00dev, 46, 0X08);
2340 }
2341 if (rf->channel <= 124)
2342 rt2800_rfcsr_write(rt2x00dev, 51, 0xFC);
2343 else if (rf->channel >= 126)
2344 rt2800_rfcsr_write(rt2x00dev, 51, 0xEC);
2345 if (rf->channel <= 138)
2346 rt2800_rfcsr_write(rt2x00dev, 52, 0x06);
2347 else if (rf->channel >= 140)
2348 rt2800_rfcsr_write(rt2x00dev, 52, 0x06);
2349 rt2800_rfcsr_write(rt2x00dev, 54, 0xEB);
2350 if (rf->channel <= 138)
2351 rt2800_rfcsr_write(rt2x00dev, 55, 0x01);
2352 else if (rf->channel >= 140)
2353 rt2800_rfcsr_write(rt2x00dev, 55, 0x00);
2354 if (rf->channel <= 128)
2355 rt2800_rfcsr_write(rt2x00dev, 56, 0xBB);
2356 else if (rf->channel >= 130)
2357 rt2800_rfcsr_write(rt2x00dev, 56, 0xAB);
2358 if (rf->channel <= 116)
2359 rt2800_rfcsr_write(rt2x00dev, 58, 0x1D);
2360 else if (rf->channel >= 118)
2361 rt2800_rfcsr_write(rt2x00dev, 58, 0x15);
2362 if (rf->channel <= 138)
2363 rt2800_rfcsr_write(rt2x00dev, 59, 0x3F);
2364 else if (rf->channel >= 140)
2365 rt2800_rfcsr_write(rt2x00dev, 59, 0x7C);
2366 if (rf->channel <= 116)
2367 rt2800_rfcsr_write(rt2x00dev, 62, 0x1D);
2368 else if (rf->channel >= 118)
2369 rt2800_rfcsr_write(rt2x00dev, 62, 0x15);
2370 }
2371
2372 power_bound = POWER_BOUND_5G;
2373 ep_reg = 0x3;
2374 }
2375
2376 rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
2377 if (info->default_power1 > power_bound)
2378 rt2x00_set_field8(&rfcsr, RFCSR49_TX, power_bound);
2379 else
2380 rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1);
2381 if (is_type_ep)
2382 rt2x00_set_field8(&rfcsr, RFCSR49_EP, ep_reg);
2383 rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
2384
2385 rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
2386 if (info->default_power1 > power_bound)
2387 rt2x00_set_field8(&rfcsr, RFCSR50_TX, power_bound);
2388 else
2389 rt2x00_set_field8(&rfcsr, RFCSR50_TX, info->default_power2);
2390 if (is_type_ep)
2391 rt2x00_set_field8(&rfcsr, RFCSR50_EP, ep_reg);
2392 rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
2393
2394 rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
2395 rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
2396 rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
2397
2398 rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD,
2399 rt2x00dev->default_ant.tx_chain_num >= 1);
2400 rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD,
2401 rt2x00dev->default_ant.tx_chain_num == 2);
2402 rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
2403
2404 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD,
2405 rt2x00dev->default_ant.rx_chain_num >= 1);
2406 rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD,
2407 rt2x00dev->default_ant.rx_chain_num == 2);
2408 rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
2409
2410 rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
2411 rt2800_rfcsr_write(rt2x00dev, 6, 0xe4);
2412
2413 if (conf_is_ht40(conf))
2414 rt2800_rfcsr_write(rt2x00dev, 30, 0x16);
2415 else
2416 rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
2417
2418 if (!is_11b) {
2419 rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
2420 rt2800_rfcsr_write(rt2x00dev, 32, 0x80);
2421 }
2422
2423 /* TODO proper frequency adjustment */
2424 rt2800_adjust_freq_offset(rt2x00dev);
2425
2426 /* TODO merge with others */
2427 rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
2428 rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
2429 rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
2430
2431 /* BBP settings */
2432 rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
2433 rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
2434 rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
2435
2436 rt2800_bbp_write(rt2x00dev, 79, (rf->channel <= 14) ? 0x1C : 0x18);
2437 rt2800_bbp_write(rt2x00dev, 80, (rf->channel <= 14) ? 0x0E : 0x08);
2438 rt2800_bbp_write(rt2x00dev, 81, (rf->channel <= 14) ? 0x3A : 0x38);
2439 rt2800_bbp_write(rt2x00dev, 82, (rf->channel <= 14) ? 0x62 : 0x92);
2440
2441 /* GLRT band configuration */
2442 rt2800_bbp_write(rt2x00dev, 195, 128);
2443 rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0xE0 : 0xF0);
2444 rt2800_bbp_write(rt2x00dev, 195, 129);
2445 rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x1F : 0x1E);
2446 rt2800_bbp_write(rt2x00dev, 195, 130);
2447 rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x38 : 0x28);
2448 rt2800_bbp_write(rt2x00dev, 195, 131);
2449 rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x32 : 0x20);
2450 rt2800_bbp_write(rt2x00dev, 195, 133);
2451 rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x28 : 0x7F);
2452 rt2800_bbp_write(rt2x00dev, 195, 124);
2453 rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x19 : 0x7F);
2454}
2455
2456static void rt2800_bbp_write_with_rx_chain(struct rt2x00_dev *rt2x00dev,
2457 const unsigned int word,
2458 const u8 value)
2459{
2460 u8 chain, reg;
2461
2462 for (chain = 0; chain < rt2x00dev->default_ant.rx_chain_num; chain++) {
2463 rt2800_bbp_read(rt2x00dev, 27, &reg);
2464 rt2x00_set_field8(&reg, BBP27_RX_CHAIN_SEL, chain);
2465 rt2800_bbp_write(rt2x00dev, 27, reg);
2466
2467 rt2800_bbp_write(rt2x00dev, word, value);
2468 }
2469}
2470
2471static void rt2800_iq_calibrate(struct rt2x00_dev *rt2x00dev, int channel)
2472{
2473 u8 cal;
2474
2475 /* TX0 IQ Gain */
2476 rt2800_bbp_write(rt2x00dev, 158, 0x2c);
2477 if (channel <= 14)
2478 cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_GAIN_CAL_TX0_2G);
2479 else if (channel >= 36 && channel <= 64)
2480 cal = rt2x00_eeprom_byte(rt2x00dev,
2481 EEPROM_IQ_GAIN_CAL_TX0_CH36_TO_CH64_5G);
2482 else if (channel >= 100 && channel <= 138)
2483 cal = rt2x00_eeprom_byte(rt2x00dev,
2484 EEPROM_IQ_GAIN_CAL_TX0_CH100_TO_CH138_5G);
2485 else if (channel >= 140 && channel <= 165)
2486 cal = rt2x00_eeprom_byte(rt2x00dev,
2487 EEPROM_IQ_GAIN_CAL_TX0_CH140_TO_CH165_5G);
2488 else
2489 cal = 0;
2490 rt2800_bbp_write(rt2x00dev, 159, cal);
2491
2492 /* TX0 IQ Phase */
2493 rt2800_bbp_write(rt2x00dev, 158, 0x2d);
2494 if (channel <= 14)
2495 cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_PHASE_CAL_TX0_2G);
2496 else if (channel >= 36 && channel <= 64)
2497 cal = rt2x00_eeprom_byte(rt2x00dev,
2498 EEPROM_IQ_PHASE_CAL_TX0_CH36_TO_CH64_5G);
2499 else if (channel >= 100 && channel <= 138)
2500 cal = rt2x00_eeprom_byte(rt2x00dev,
2501 EEPROM_IQ_PHASE_CAL_TX0_CH100_TO_CH138_5G);
2502 else if (channel >= 140 && channel <= 165)
2503 cal = rt2x00_eeprom_byte(rt2x00dev,
2504 EEPROM_IQ_PHASE_CAL_TX0_CH140_TO_CH165_5G);
2505 else
2506 cal = 0;
2507 rt2800_bbp_write(rt2x00dev, 159, cal);
2508
2509 /* TX1 IQ Gain */
2510 rt2800_bbp_write(rt2x00dev, 158, 0x4a);
2511 if (channel <= 14)
2512 cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_GAIN_CAL_TX1_2G);
2513 else if (channel >= 36 && channel <= 64)
2514 cal = rt2x00_eeprom_byte(rt2x00dev,
2515 EEPROM_IQ_GAIN_CAL_TX1_CH36_TO_CH64_5G);
2516 else if (channel >= 100 && channel <= 138)
2517 cal = rt2x00_eeprom_byte(rt2x00dev,
2518 EEPROM_IQ_GAIN_CAL_TX1_CH100_TO_CH138_5G);
2519 else if (channel >= 140 && channel <= 165)
2520 cal = rt2x00_eeprom_byte(rt2x00dev,
2521 EEPROM_IQ_GAIN_CAL_TX1_CH140_TO_CH165_5G);
2522 else
2523 cal = 0;
2524 rt2800_bbp_write(rt2x00dev, 159, cal);
2525
2526 /* TX1 IQ Phase */
2527 rt2800_bbp_write(rt2x00dev, 158, 0x4b);
2528 if (channel <= 14)
2529 cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_PHASE_CAL_TX1_2G);
2530 else if (channel >= 36 && channel <= 64)
2531 cal = rt2x00_eeprom_byte(rt2x00dev,
2532 EEPROM_IQ_PHASE_CAL_TX1_CH36_TO_CH64_5G);
2533 else if (channel >= 100 && channel <= 138)
2534 cal = rt2x00_eeprom_byte(rt2x00dev,
2535 EEPROM_IQ_PHASE_CAL_TX1_CH100_TO_CH138_5G);
2536 else if (channel >= 140 && channel <= 165)
2537 cal = rt2x00_eeprom_byte(rt2x00dev,
2538 EEPROM_IQ_PHASE_CAL_TX1_CH140_TO_CH165_5G);
2539 else
2540 cal = 0;
2541 rt2800_bbp_write(rt2x00dev, 159, cal);
2542
2543 /* FIXME: possible RX0, RX1 callibration ? */
2544
2545 /* RF IQ compensation control */
2546 rt2800_bbp_write(rt2x00dev, 158, 0x04);
2547 cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_RF_IQ_COMPENSATION_CONTROL);
2548 rt2800_bbp_write(rt2x00dev, 159, cal != 0xff ? cal : 0);
2549
2550 /* RF IQ imbalance compensation control */
2551 rt2800_bbp_write(rt2x00dev, 158, 0x03);
2552 cal = rt2x00_eeprom_byte(rt2x00dev,
2553 EEPROM_RF_IQ_IMBALANCE_COMPENSATION_CONTROL);
2554 rt2800_bbp_write(rt2x00dev, 159, cal != 0xff ? cal : 0);
2555}
2556
2187static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, 2557static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2188 struct ieee80211_conf *conf, 2558 struct ieee80211_conf *conf,
2189 struct rf_channel *rf, 2559 struct rf_channel *rf,
@@ -2225,6 +2595,9 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2225 case RF5392: 2595 case RF5392:
2226 rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info); 2596 rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
2227 break; 2597 break;
2598 case RF5592:
2599 rt2800_config_channel_rf55xx(rt2x00dev, conf, rf, info);
2600 break;
2228 default: 2601 default:
2229 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info); 2602 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
2230 } 2603 }
@@ -2326,6 +2699,17 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
2326 if (rt2x00_rt(rt2x00dev, RT3572)) 2699 if (rt2x00_rt(rt2x00dev, RT3572))
2327 rt2800_rfcsr_write(rt2x00dev, 8, 0x80); 2700 rt2800_rfcsr_write(rt2x00dev, 8, 0x80);
2328 2701
2702 if (rt2x00_rt(rt2x00dev, RT5592)) {
2703 rt2800_bbp_write(rt2x00dev, 195, 141);
2704 rt2800_bbp_write(rt2x00dev, 196, conf_is_ht40(conf) ? 0x10 : 0x1a);
2705
2706 /* AGC init */
2707 reg = (rf->channel <= 14 ? 0x1c : 0x24) + 2 * rt2x00dev->lna_gain;
2708 rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
2709
2710 rt2800_iq_calibrate(rt2x00dev, rf->channel);
2711 }
2712
2329 rt2800_bbp_read(rt2x00dev, 4, &bbp); 2713 rt2800_bbp_read(rt2x00dev, 4, &bbp);
2330 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * conf_is_ht40(conf)); 2714 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * conf_is_ht40(conf));
2331 rt2800_bbp_write(rt2x00dev, 4, bbp); 2715 rt2800_bbp_write(rt2x00dev, 4, bbp);
@@ -2938,13 +3322,16 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
2938 rt2x00_rt(rt2x00dev, RT3390) || 3322 rt2x00_rt(rt2x00dev, RT3390) ||
2939 rt2x00_rt(rt2x00dev, RT3572) || 3323 rt2x00_rt(rt2x00dev, RT3572) ||
2940 rt2x00_rt(rt2x00dev, RT5390) || 3324 rt2x00_rt(rt2x00dev, RT5390) ||
2941 rt2x00_rt(rt2x00dev, RT5392)) 3325 rt2x00_rt(rt2x00dev, RT5392) ||
3326 rt2x00_rt(rt2x00dev, RT5592))
2942 vgc = 0x1c + (2 * rt2x00dev->lna_gain); 3327 vgc = 0x1c + (2 * rt2x00dev->lna_gain);
2943 else 3328 else
2944 vgc = 0x2e + rt2x00dev->lna_gain; 3329 vgc = 0x2e + rt2x00dev->lna_gain;
2945 } else { /* 5GHZ band */ 3330 } else { /* 5GHZ band */
2946 if (rt2x00_rt(rt2x00dev, RT3572)) 3331 if (rt2x00_rt(rt2x00dev, RT3572))
2947 vgc = 0x22 + (rt2x00dev->lna_gain * 5) / 3; 3332 vgc = 0x22 + (rt2x00dev->lna_gain * 5) / 3;
3333 else if (rt2x00_rt(rt2x00dev, RT5592))
3334 vgc = 0x24 + (2 * rt2x00dev->lna_gain);
2948 else { 3335 else {
2949 if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) 3336 if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
2950 vgc = 0x32 + (rt2x00dev->lna_gain * 5) / 3; 3337 vgc = 0x32 + (rt2x00dev->lna_gain * 5) / 3;
@@ -2960,7 +3347,11 @@ static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev,
2960 struct link_qual *qual, u8 vgc_level) 3347 struct link_qual *qual, u8 vgc_level)
2961{ 3348{
2962 if (qual->vgc_level != vgc_level) { 3349 if (qual->vgc_level != vgc_level) {
2963 rt2800_bbp_write(rt2x00dev, 66, vgc_level); 3350 if (rt2x00_rt(rt2x00dev, RT5592)) {
3351 rt2800_bbp_write(rt2x00dev, 83, qual->rssi > -65 ? 0x4a : 0x7a);
3352 rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, vgc_level);
3353 } else
3354 rt2800_bbp_write(rt2x00dev, 66, vgc_level);
2964 qual->vgc_level = vgc_level; 3355 qual->vgc_level = vgc_level;
2965 qual->vgc_level_reg = vgc_level; 3356 qual->vgc_level_reg = vgc_level;
2966 } 3357 }
@@ -2975,15 +3366,23 @@ EXPORT_SYMBOL_GPL(rt2800_reset_tuner);
2975void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, 3366void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
2976 const u32 count) 3367 const u32 count)
2977{ 3368{
3369 u8 vgc;
3370
2978 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) 3371 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C))
2979 return; 3372 return;
2980
2981 /* 3373 /*
2982 * When RSSI is better then -80 increase VGC level with 0x10 3374 * When RSSI is better then -80 increase VGC level with 0x10, except
3375 * for rt5592 chip.
2983 */ 3376 */
2984 rt2800_set_vgc(rt2x00dev, qual, 3377
2985 rt2800_get_default_vgc(rt2x00dev) + 3378 vgc = rt2800_get_default_vgc(rt2x00dev);
2986 ((qual->rssi > -80) * 0x10)); 3379
3380 if (rt2x00_rt(rt2x00dev, RT5592) && qual->rssi > -65)
3381 vgc += 0x20;
3382 else if (qual->rssi > -80)
3383 vgc += 0x10;
3384
3385 rt2800_set_vgc(rt2x00dev, qual, vgc);
2987} 3386}
2988EXPORT_SYMBOL_GPL(rt2800_link_tuner); 3387EXPORT_SYMBOL_GPL(rt2800_link_tuner);
2989 3388
@@ -3122,7 +3521,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
3122 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); 3521 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
3123 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); 3522 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
3124 } else if (rt2x00_rt(rt2x00dev, RT5390) || 3523 } else if (rt2x00_rt(rt2x00dev, RT5390) ||
3125 rt2x00_rt(rt2x00dev, RT5392)) { 3524 rt2x00_rt(rt2x00dev, RT5392) ||
3525 rt2x00_rt(rt2x00dev, RT5592)) {
3126 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404); 3526 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404);
3127 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); 3527 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
3128 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); 3528 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
@@ -3302,7 +3702,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
3302 rt2x00_set_field32(&reg, TXOP_CTRL_CFG_EXT_CWMIN, 0); 3702 rt2x00_set_field32(&reg, TXOP_CTRL_CFG_EXT_CWMIN, 0);
3303 rt2800_register_write(rt2x00dev, TXOP_CTRL_CFG, reg); 3703 rt2800_register_write(rt2x00dev, TXOP_CTRL_CFG, reg);
3304 3704
3305 rt2800_register_write(rt2x00dev, TXOP_HLDR_ET, 0x00000002); 3705 reg = rt2x00_rt(rt2x00dev, RT5592) ? 0x00000082 : 0x00000002;
3706 rt2800_register_write(rt2x00dev, TXOP_HLDR_ET, reg);
3306 3707
3307 rt2800_register_read(rt2x00dev, TX_RTS_CFG, &reg); 3708 rt2800_register_read(rt2x00dev, TX_RTS_CFG, &reg);
3308 rt2x00_set_field32(&reg, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 32); 3709 rt2x00_set_field32(&reg, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 32);
@@ -3487,6 +3888,136 @@ static int rt2800_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
3487 return -EACCES; 3888 return -EACCES;
3488} 3889}
3489 3890
3891static void rt2800_bbp4_mac_if_ctrl(struct rt2x00_dev *rt2x00dev)
3892{
3893 u8 value;
3894
3895 rt2800_bbp_read(rt2x00dev, 4, &value);
3896 rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1);
3897 rt2800_bbp_write(rt2x00dev, 4, value);
3898}
3899
3900static void rt2800_init_freq_calibration(struct rt2x00_dev *rt2x00dev)
3901{
3902 rt2800_bbp_write(rt2x00dev, 142, 1);
3903 rt2800_bbp_write(rt2x00dev, 143, 57);
3904}
3905
3906static void rt2800_init_bbp_5592_glrt(struct rt2x00_dev *rt2x00dev)
3907{
3908 const u8 glrt_table[] = {
3909 0xE0, 0x1F, 0X38, 0x32, 0x08, 0x28, 0x19, 0x0A, 0xFF, 0x00, /* 128 ~ 137 */
3910 0x16, 0x10, 0x10, 0x0B, 0x36, 0x2C, 0x26, 0x24, 0x42, 0x36, /* 138 ~ 147 */
3911 0x30, 0x2D, 0x4C, 0x46, 0x3D, 0x40, 0x3E, 0x42, 0x3D, 0x40, /* 148 ~ 157 */
3912 0X3C, 0x34, 0x2C, 0x2F, 0x3C, 0x35, 0x2E, 0x2A, 0x49, 0x41, /* 158 ~ 167 */
3913 0x36, 0x31, 0x30, 0x30, 0x0E, 0x0D, 0x28, 0x21, 0x1C, 0x16, /* 168 ~ 177 */
3914 0x50, 0x4A, 0x43, 0x40, 0x10, 0x10, 0x10, 0x10, 0x00, 0x00, /* 178 ~ 187 */
3915 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 188 ~ 197 */
3916 0x00, 0x00, 0x7D, 0x14, 0x32, 0x2C, 0x36, 0x4C, 0x43, 0x2C, /* 198 ~ 207 */
3917 0x2E, 0x36, 0x30, 0x6E, /* 208 ~ 211 */
3918 };
3919 int i;
3920
3921 for (i = 0; i < ARRAY_SIZE(glrt_table); i++) {
3922 rt2800_bbp_write(rt2x00dev, 195, 128 + i);
3923 rt2800_bbp_write(rt2x00dev, 196, glrt_table[i]);
3924 }
3925};
3926
3927static void rt2800_init_bbb_early(struct rt2x00_dev *rt2x00dev)
3928{
3929 rt2800_bbp_write(rt2x00dev, 65, 0x2C);
3930 rt2800_bbp_write(rt2x00dev, 66, 0x38);
3931 rt2800_bbp_write(rt2x00dev, 68, 0x0B);
3932 rt2800_bbp_write(rt2x00dev, 69, 0x12);
3933 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
3934 rt2800_bbp_write(rt2x00dev, 73, 0x10);
3935 rt2800_bbp_write(rt2x00dev, 81, 0x37);
3936 rt2800_bbp_write(rt2x00dev, 82, 0x62);
3937 rt2800_bbp_write(rt2x00dev, 83, 0x6A);
3938 rt2800_bbp_write(rt2x00dev, 84, 0x99);
3939 rt2800_bbp_write(rt2x00dev, 86, 0x00);
3940 rt2800_bbp_write(rt2x00dev, 91, 0x04);
3941 rt2800_bbp_write(rt2x00dev, 92, 0x00);
3942 rt2800_bbp_write(rt2x00dev, 103, 0x00);
3943 rt2800_bbp_write(rt2x00dev, 105, 0x05);
3944 rt2800_bbp_write(rt2x00dev, 106, 0x35);
3945}
3946
3947static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev)
3948{
3949 int ant, div_mode;
3950 u16 eeprom;
3951 u8 value;
3952
3953 rt2800_init_bbb_early(rt2x00dev);
3954
3955 rt2800_bbp_read(rt2x00dev, 105, &value);
3956 rt2x00_set_field8(&value, BBP105_MLD,
3957 rt2x00dev->default_ant.rx_chain_num == 2);
3958 rt2800_bbp_write(rt2x00dev, 105, value);
3959
3960 rt2800_bbp4_mac_if_ctrl(rt2x00dev);
3961
3962 rt2800_bbp_write(rt2x00dev, 20, 0x06);
3963 rt2800_bbp_write(rt2x00dev, 31, 0x08);
3964 rt2800_bbp_write(rt2x00dev, 65, 0x2C);
3965 rt2800_bbp_write(rt2x00dev, 68, 0xDD);
3966 rt2800_bbp_write(rt2x00dev, 69, 0x1A);
3967 rt2800_bbp_write(rt2x00dev, 70, 0x05);
3968 rt2800_bbp_write(rt2x00dev, 73, 0x13);
3969 rt2800_bbp_write(rt2x00dev, 74, 0x0F);
3970 rt2800_bbp_write(rt2x00dev, 75, 0x4F);
3971 rt2800_bbp_write(rt2x00dev, 76, 0x28);
3972 rt2800_bbp_write(rt2x00dev, 77, 0x59);
3973 rt2800_bbp_write(rt2x00dev, 84, 0x9A);
3974 rt2800_bbp_write(rt2x00dev, 86, 0x38);
3975 rt2800_bbp_write(rt2x00dev, 88, 0x90);
3976 rt2800_bbp_write(rt2x00dev, 91, 0x04);
3977 rt2800_bbp_write(rt2x00dev, 92, 0x02);
3978 rt2800_bbp_write(rt2x00dev, 95, 0x9a);
3979 rt2800_bbp_write(rt2x00dev, 98, 0x12);
3980 rt2800_bbp_write(rt2x00dev, 103, 0xC0);
3981 rt2800_bbp_write(rt2x00dev, 104, 0x92);
3982 /* FIXME BBP105 owerwrite */
3983 rt2800_bbp_write(rt2x00dev, 105, 0x3C);
3984 rt2800_bbp_write(rt2x00dev, 106, 0x35);
3985 rt2800_bbp_write(rt2x00dev, 128, 0x12);
3986 rt2800_bbp_write(rt2x00dev, 134, 0xD0);
3987 rt2800_bbp_write(rt2x00dev, 135, 0xF6);
3988 rt2800_bbp_write(rt2x00dev, 137, 0x0F);
3989
3990 /* Initialize GLRT (Generalized Likehood Radio Test) */
3991 rt2800_init_bbp_5592_glrt(rt2x00dev);
3992
3993 rt2800_bbp4_mac_if_ctrl(rt2x00dev);
3994
3995 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
3996 div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY);
3997 ant = (div_mode == 3) ? 1 : 0;
3998 rt2800_bbp_read(rt2x00dev, 152, &value);
3999 if (ant == 0) {
4000 /* Main antenna */
4001 rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1);
4002 } else {
4003 /* Auxiliary antenna */
4004 rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
4005 }
4006 rt2800_bbp_write(rt2x00dev, 152, value);
4007
4008 if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) {
4009 rt2800_bbp_read(rt2x00dev, 254, &value);
4010 rt2x00_set_field8(&value, BBP254_BIT7, 1);
4011 rt2800_bbp_write(rt2x00dev, 254, value);
4012 }
4013
4014 rt2800_init_freq_calibration(rt2x00dev);
4015
4016 rt2800_bbp_write(rt2x00dev, 84, 0x19);
4017 if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C))
4018 rt2800_bbp_write(rt2x00dev, 103, 0xc0);
4019}
4020
3490static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) 4021static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3491{ 4022{
3492 unsigned int i; 4023 unsigned int i;
@@ -3498,6 +4029,11 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3498 rt2800_wait_bbp_ready(rt2x00dev))) 4029 rt2800_wait_bbp_ready(rt2x00dev)))
3499 return -EACCES; 4030 return -EACCES;
3500 4031
4032 if (rt2x00_rt(rt2x00dev, RT5592)) {
4033 rt2800_init_bbp_5592(rt2x00dev);
4034 return 0;
4035 }
4036
3501 if (rt2x00_rt(rt2x00dev, RT3352)) { 4037 if (rt2x00_rt(rt2x00dev, RT3352)) {
3502 rt2800_bbp_write(rt2x00dev, 3, 0x00); 4038 rt2800_bbp_write(rt2x00dev, 3, 0x00);
3503 rt2800_bbp_write(rt2x00dev, 4, 0x50); 4039 rt2800_bbp_write(rt2x00dev, 4, 0x50);
@@ -3505,11 +4041,8 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3505 4041
3506 if (rt2x00_rt(rt2x00dev, RT3290) || 4042 if (rt2x00_rt(rt2x00dev, RT3290) ||
3507 rt2x00_rt(rt2x00dev, RT5390) || 4043 rt2x00_rt(rt2x00dev, RT5390) ||
3508 rt2x00_rt(rt2x00dev, RT5392)) { 4044 rt2x00_rt(rt2x00dev, RT5392))
3509 rt2800_bbp_read(rt2x00dev, 4, &value); 4045 rt2800_bbp4_mac_if_ctrl(rt2x00dev);
3510 rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1);
3511 rt2800_bbp_write(rt2x00dev, 4, value);
3512 }
3513 4046
3514 if (rt2800_is_305x_soc(rt2x00dev) || 4047 if (rt2800_is_305x_soc(rt2x00dev) ||
3515 rt2x00_rt(rt2x00dev, RT3290) || 4048 rt2x00_rt(rt2x00dev, RT3290) ||
@@ -3783,9 +4316,7 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
3783 rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0); 4316 rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0);
3784 rt2800_bbp_write(rt2x00dev, 152, value); 4317 rt2800_bbp_write(rt2x00dev, 152, value);
3785 4318
3786 /* Init frequency calibration */ 4319 rt2800_init_freq_calibration(rt2x00dev);
3787 rt2800_bbp_write(rt2x00dev, 142, 1);
3788 rt2800_bbp_write(rt2x00dev, 143, 57);
3789 } 4320 }
3790 4321
3791 for (i = 0; i < EEPROM_BBP_SIZE; i++) { 4322 for (i = 0; i < EEPROM_BBP_SIZE; i++) {
@@ -4259,6 +4790,69 @@ static void rt2800_init_rfcsr_5392(struct rt2x00_dev *rt2x00dev)
4259 rt2800_rfcsr_write(rt2x00dev, 63, 0x07); 4790 rt2800_rfcsr_write(rt2x00dev, 63, 0x07);
4260} 4791}
4261 4792
4793static void rt2800_init_rfcsr_5592(struct rt2x00_dev *rt2x00dev)
4794{
4795 u8 reg;
4796 u16 eeprom;
4797
4798 rt2800_rfcsr_write(rt2x00dev, 1, 0x3F);
4799 rt2800_rfcsr_write(rt2x00dev, 3, 0x08);
4800 rt2800_rfcsr_write(rt2x00dev, 3, 0x08);
4801 rt2800_rfcsr_write(rt2x00dev, 5, 0x10);
4802 rt2800_rfcsr_write(rt2x00dev, 6, 0xE4);
4803 rt2800_rfcsr_write(rt2x00dev, 7, 0x00);
4804 rt2800_rfcsr_write(rt2x00dev, 14, 0x00);
4805 rt2800_rfcsr_write(rt2x00dev, 15, 0x00);
4806 rt2800_rfcsr_write(rt2x00dev, 16, 0x00);
4807 rt2800_rfcsr_write(rt2x00dev, 18, 0x03);
4808 rt2800_rfcsr_write(rt2x00dev, 19, 0x4D);
4809 rt2800_rfcsr_write(rt2x00dev, 20, 0x10);
4810 rt2800_rfcsr_write(rt2x00dev, 21, 0x8D);
4811 rt2800_rfcsr_write(rt2x00dev, 26, 0x82);
4812 rt2800_rfcsr_write(rt2x00dev, 28, 0x00);
4813 rt2800_rfcsr_write(rt2x00dev, 29, 0x10);
4814 rt2800_rfcsr_write(rt2x00dev, 33, 0xC0);
4815 rt2800_rfcsr_write(rt2x00dev, 34, 0x07);
4816 rt2800_rfcsr_write(rt2x00dev, 35, 0x12);
4817 rt2800_rfcsr_write(rt2x00dev, 47, 0x0C);
4818 rt2800_rfcsr_write(rt2x00dev, 53, 0x22);
4819 rt2800_rfcsr_write(rt2x00dev, 63, 0x07);
4820
4821 rt2800_rfcsr_write(rt2x00dev, 2, 0x80);
4822 msleep(1);
4823
4824 rt2800_adjust_freq_offset(rt2x00dev);
4825
4826 rt2800_bbp_read(rt2x00dev, 138, &reg);
4827
4828 /* Turn off unused DAC1 and ADC1 to reduce power consumption */
4829 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
4830 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
4831 rt2x00_set_field8(&reg, BBP138_RX_ADC1, 0);
4832 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
4833 rt2x00_set_field8(&reg, BBP138_TX_DAC1, 1);
4834
4835 rt2800_bbp_write(rt2x00dev, 138, reg);
4836
4837 /* Enable DC filter */
4838 if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C))
4839 rt2800_bbp_write(rt2x00dev, 103, 0xc0);
4840
4841 rt2800_rfcsr_read(rt2x00dev, 38, &reg);
4842 rt2x00_set_field8(&reg, RFCSR38_RX_LO1_EN, 0);
4843 rt2800_rfcsr_write(rt2x00dev, 38, reg);
4844
4845 rt2800_rfcsr_read(rt2x00dev, 39, &reg);
4846 rt2x00_set_field8(&reg, RFCSR39_RX_LO2_EN, 0);
4847 rt2800_rfcsr_write(rt2x00dev, 39, reg);
4848
4849 rt2800_bbp4_mac_if_ctrl(rt2x00dev);
4850
4851 rt2800_rfcsr_read(rt2x00dev, 30, &reg);
4852 rt2x00_set_field8(&reg, RFCSR30_RX_VCM, 2);
4853 rt2800_rfcsr_write(rt2x00dev, 30, reg);
4854}
4855
4262static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) 4856static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
4263{ 4857{
4264 struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; 4858 struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
@@ -4276,6 +4870,8 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
4276 !rt2x00_rt(rt2x00dev, RT3572) && 4870 !rt2x00_rt(rt2x00dev, RT3572) &&
4277 !rt2x00_rt(rt2x00dev, RT5390) && 4871 !rt2x00_rt(rt2x00dev, RT5390) &&
4278 !rt2x00_rt(rt2x00dev, RT5392) && 4872 !rt2x00_rt(rt2x00dev, RT5392) &&
4873 !rt2x00_rt(rt2x00dev, RT5392) &&
4874 !rt2x00_rt(rt2x00dev, RT5592) &&
4279 !rt2800_is_305x_soc(rt2x00dev)) 4875 !rt2800_is_305x_soc(rt2x00dev))
4280 return 0; 4876 return 0;
4281 4877
@@ -4330,6 +4926,9 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
4330 case RT5392: 4926 case RT5392:
4331 rt2800_init_rfcsr_5392(rt2x00dev); 4927 rt2800_init_rfcsr_5392(rt2x00dev);
4332 break; 4928 break;
4929 case RT5592:
4930 rt2800_init_rfcsr_5592(rt2x00dev);
4931 return 0;
4333 } 4932 }
4334 4933
4335 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) { 4934 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
@@ -4427,7 +5026,8 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
4427 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) || 5026 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
4428 rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || 5027 rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
4429 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) || 5028 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
4430 rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) 5029 rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E) ||
5030 rt2x00_rt_rev_lt(rt2x00dev, RT5592, REV_RT5592C))
4431 rt2800_rfcsr_write(rt2x00dev, 27, 0x03); 5031 rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
4432 5032
4433 rt2800_register_read(rt2x00dev, OPT_14_CSR, &reg); 5033 rt2800_register_read(rt2x00dev, OPT_14_CSR, &reg);
@@ -4451,7 +5051,8 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
4451 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr); 5051 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
4452 } 5052 }
4453 5053
4454 if (rt2x00_rt(rt2x00dev, RT3090)) { 5054 if (rt2x00_rt(rt2x00dev, RT3090) ||
5055 rt2x00_rt(rt2x00dev, RT5592)) {
4455 rt2800_bbp_read(rt2x00dev, 138, &bbp); 5056 rt2800_bbp_read(rt2x00dev, 138, &bbp);
4456 5057
4457 /* Turn off unused DAC1 and ADC1 to reduce power consumption */ 5058 /* Turn off unused DAC1 and ADC1 to reduce power consumption */
@@ -4507,7 +5108,8 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
4507 } 5108 }
4508 5109
4509 if (rt2x00_rt(rt2x00dev, RT5390) || 5110 if (rt2x00_rt(rt2x00dev, RT5390) ||
4510 rt2x00_rt(rt2x00dev, RT5392)) { 5111 rt2x00_rt(rt2x00dev, RT5392) ||
5112 rt2x00_rt(rt2x00dev, RT5592)) {
4511 rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr); 5113 rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr);
4512 rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0); 5114 rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0);
4513 rt2800_rfcsr_write(rt2x00dev, 38, rfcsr); 5115 rt2800_rfcsr_write(rt2x00dev, 38, rfcsr);
@@ -4533,15 +5135,23 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
4533 * Initialize all registers. 5135 * Initialize all registers.
4534 */ 5136 */
4535 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) || 5137 if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
4536 rt2800_init_registers(rt2x00dev) || 5138 rt2800_init_registers(rt2x00dev)))
4537 rt2800_init_bbp(rt2x00dev) ||
4538 rt2800_init_rfcsr(rt2x00dev)))
4539 return -EIO; 5139 return -EIO;
4540 5140
4541 /* 5141 /*
4542 * Send signal to firmware during boot time. 5142 * Send signal to firmware during boot time.
4543 */ 5143 */
4544 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0); 5144 rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
5145 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
5146 if (rt2x00_is_usb(rt2x00dev)) {
5147 rt2800_register_write(rt2x00dev, H2M_INT_SRC, 0);
5148 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
5149 }
5150 msleep(1);
5151
5152 if (unlikely(rt2800_init_bbp(rt2x00dev) ||
5153 rt2800_init_rfcsr(rt2x00dev)))
5154 return -EIO;
4545 5155
4546 if (rt2x00_is_usb(rt2x00dev) && 5156 if (rt2x00_is_usb(rt2x00dev) &&
4547 (rt2x00_rt(rt2x00dev, RT3070) || 5157 (rt2x00_rt(rt2x00dev, RT3070) ||
@@ -4863,6 +5473,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
4863 case RT3572: 5473 case RT3572:
4864 case RT5390: 5474 case RT5390:
4865 case RT5392: 5475 case RT5392:
5476 case RT5592:
4866 break; 5477 break;
4867 default: 5478 default:
4868 ERROR(rt2x00dev, "Invalid RT chipset 0x%04x detected.\n", rt2x00dev->chip.rt); 5479 ERROR(rt2x00dev, "Invalid RT chipset 0x%04x detected.\n", rt2x00dev->chip.rt);
@@ -4887,6 +5498,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
4887 case RF5372: 5498 case RF5372:
4888 case RF5390: 5499 case RF5390:
4889 case RF5392: 5500 case RF5392:
5501 case RF5592:
4890 break; 5502 break;
4891 default: 5503 default:
4892 ERROR(rt2x00dev, "Invalid RF chipset 0x%04x detected.\n", 5504 ERROR(rt2x00dev, "Invalid RF chipset 0x%04x detected.\n",
@@ -5122,6 +5734,138 @@ static const struct rf_channel rf_vals_3x[] = {
5122 {173, 0x61, 0, 9}, 5734 {173, 0x61, 0, 9},
5123}; 5735};
5124 5736
5737static const struct rf_channel rf_vals_5592_xtal20[] = {
5738 /* Channel, N, K, mod, R */
5739 {1, 482, 4, 10, 3},
5740 {2, 483, 4, 10, 3},
5741 {3, 484, 4, 10, 3},
5742 {4, 485, 4, 10, 3},
5743 {5, 486, 4, 10, 3},
5744 {6, 487, 4, 10, 3},
5745 {7, 488, 4, 10, 3},
5746 {8, 489, 4, 10, 3},
5747 {9, 490, 4, 10, 3},
5748 {10, 491, 4, 10, 3},
5749 {11, 492, 4, 10, 3},
5750 {12, 493, 4, 10, 3},
5751 {13, 494, 4, 10, 3},
5752 {14, 496, 8, 10, 3},
5753 {36, 172, 8, 12, 1},
5754 {38, 173, 0, 12, 1},
5755 {40, 173, 4, 12, 1},
5756 {42, 173, 8, 12, 1},
5757 {44, 174, 0, 12, 1},
5758 {46, 174, 4, 12, 1},
5759 {48, 174, 8, 12, 1},
5760 {50, 175, 0, 12, 1},
5761 {52, 175, 4, 12, 1},
5762 {54, 175, 8, 12, 1},
5763 {56, 176, 0, 12, 1},
5764 {58, 176, 4, 12, 1},
5765 {60, 176, 8, 12, 1},
5766 {62, 177, 0, 12, 1},
5767 {64, 177, 4, 12, 1},
5768 {100, 183, 4, 12, 1},
5769 {102, 183, 8, 12, 1},
5770 {104, 184, 0, 12, 1},
5771 {106, 184, 4, 12, 1},
5772 {108, 184, 8, 12, 1},
5773 {110, 185, 0, 12, 1},
5774 {112, 185, 4, 12, 1},
5775 {114, 185, 8, 12, 1},
5776 {116, 186, 0, 12, 1},
5777 {118, 186, 4, 12, 1},
5778 {120, 186, 8, 12, 1},
5779 {122, 187, 0, 12, 1},
5780 {124, 187, 4, 12, 1},
5781 {126, 187, 8, 12, 1},
5782 {128, 188, 0, 12, 1},
5783 {130, 188, 4, 12, 1},
5784 {132, 188, 8, 12, 1},
5785 {134, 189, 0, 12, 1},
5786 {136, 189, 4, 12, 1},
5787 {138, 189, 8, 12, 1},
5788 {140, 190, 0, 12, 1},
5789 {149, 191, 6, 12, 1},
5790 {151, 191, 10, 12, 1},
5791 {153, 192, 2, 12, 1},
5792 {155, 192, 6, 12, 1},
5793 {157, 192, 10, 12, 1},
5794 {159, 193, 2, 12, 1},
5795 {161, 193, 6, 12, 1},
5796 {165, 194, 2, 12, 1},
5797 {184, 164, 0, 12, 1},
5798 {188, 164, 4, 12, 1},
5799 {192, 165, 8, 12, 1},
5800 {196, 166, 0, 12, 1},
5801};
5802
5803static const struct rf_channel rf_vals_5592_xtal40[] = {
5804 /* Channel, N, K, mod, R */
5805 {1, 241, 2, 10, 3},
5806 {2, 241, 7, 10, 3},
5807 {3, 242, 2, 10, 3},
5808 {4, 242, 7, 10, 3},
5809 {5, 243, 2, 10, 3},
5810 {6, 243, 7, 10, 3},
5811 {7, 244, 2, 10, 3},
5812 {8, 244, 7, 10, 3},
5813 {9, 245, 2, 10, 3},
5814 {10, 245, 7, 10, 3},
5815 {11, 246, 2, 10, 3},
5816 {12, 246, 7, 10, 3},
5817 {13, 247, 2, 10, 3},
5818 {14, 248, 4, 10, 3},
5819 {36, 86, 4, 12, 1},
5820 {38, 86, 6, 12, 1},
5821 {40, 86, 8, 12, 1},
5822 {42, 86, 10, 12, 1},
5823 {44, 87, 0, 12, 1},
5824 {46, 87, 2, 12, 1},
5825 {48, 87, 4, 12, 1},
5826 {50, 87, 6, 12, 1},
5827 {52, 87, 8, 12, 1},
5828 {54, 87, 10, 12, 1},
5829 {56, 88, 0, 12, 1},
5830 {58, 88, 2, 12, 1},
5831 {60, 88, 4, 12, 1},
5832 {62, 88, 6, 12, 1},
5833 {64, 88, 8, 12, 1},
5834 {100, 91, 8, 12, 1},
5835 {102, 91, 10, 12, 1},
5836 {104, 92, 0, 12, 1},
5837 {106, 92, 2, 12, 1},
5838 {108, 92, 4, 12, 1},
5839 {110, 92, 6, 12, 1},
5840 {112, 92, 8, 12, 1},
5841 {114, 92, 10, 12, 1},
5842 {116, 93, 0, 12, 1},
5843 {118, 93, 2, 12, 1},
5844 {120, 93, 4, 12, 1},
5845 {122, 93, 6, 12, 1},
5846 {124, 93, 8, 12, 1},
5847 {126, 93, 10, 12, 1},
5848 {128, 94, 0, 12, 1},
5849 {130, 94, 2, 12, 1},
5850 {132, 94, 4, 12, 1},
5851 {134, 94, 6, 12, 1},
5852 {136, 94, 8, 12, 1},
5853 {138, 94, 10, 12, 1},
5854 {140, 95, 0, 12, 1},
5855 {149, 95, 9, 12, 1},
5856 {151, 95, 11, 12, 1},
5857 {153, 96, 1, 12, 1},
5858 {155, 96, 3, 12, 1},
5859 {157, 96, 5, 12, 1},
5860 {159, 96, 7, 12, 1},
5861 {161, 96, 9, 12, 1},
5862 {165, 97, 1, 12, 1},
5863 {184, 82, 0, 12, 1},
5864 {188, 82, 4, 12, 1},
5865 {192, 82, 8, 12, 1},
5866 {196, 83, 0, 12, 1},
5867};
5868
5125static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) 5869static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
5126{ 5870{
5127 struct hw_mode_spec *spec = &rt2x00dev->spec; 5871 struct hw_mode_spec *spec = &rt2x00dev->spec;
@@ -5130,6 +5874,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
5130 char *default_power2; 5874 char *default_power2;
5131 unsigned int i; 5875 unsigned int i;
5132 u16 eeprom; 5876 u16 eeprom;
5877 u32 reg;
5133 5878
5134 /* 5879 /*
5135 * Disable powersaving as default on PCI devices. 5880 * Disable powersaving as default on PCI devices.
@@ -5211,8 +5956,22 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
5211 spec->supported_bands |= SUPPORT_BAND_5GHZ; 5956 spec->supported_bands |= SUPPORT_BAND_5GHZ;
5212 spec->num_channels = ARRAY_SIZE(rf_vals_3x); 5957 spec->num_channels = ARRAY_SIZE(rf_vals_3x);
5213 spec->channels = rf_vals_3x; 5958 spec->channels = rf_vals_3x;
5959 } else if (rt2x00_rf(rt2x00dev, RF5592)) {
5960 spec->supported_bands |= SUPPORT_BAND_5GHZ;
5961
5962 rt2800_register_read(rt2x00dev, MAC_DEBUG_INDEX, &reg);
5963 if (rt2x00_get_field32(reg, MAC_DEBUG_INDEX_XTAL)) {
5964 spec->num_channels = ARRAY_SIZE(rf_vals_5592_xtal40);
5965 spec->channels = rf_vals_5592_xtal40;
5966 } else {
5967 spec->num_channels = ARRAY_SIZE(rf_vals_5592_xtal20);
5968 spec->channels = rf_vals_5592_xtal20;
5969 }
5214 } 5970 }
5215 5971
5972 if (WARN_ON_ONCE(!spec->channels))
5973 return -ENODEV;
5974
5216 /* 5975 /*
5217 * Initialize HT information. 5976 * Initialize HT information.
5218 */ 5977 */
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index ded73da4de0b..f732ded8f1ba 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -729,6 +729,11 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
729 * Process the RXWI structure that is at the start of the buffer. 729 * Process the RXWI structure that is at the start of the buffer.
730 */ 730 */
731 rt2800_process_rxwi(entry, rxdesc); 731 rt2800_process_rxwi(entry, rxdesc);
732
733 /*
734 * Remove RXWI descriptor from start of buffer.
735 */
736 skb_pull(entry->skb, RXWI_DESC_SIZE);
732} 737}
733 738
734/* 739/*
@@ -742,10 +747,90 @@ static void rt2800pci_wakeup(struct rt2x00_dev *rt2x00dev)
742 rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS); 747 rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
743} 748}
744 749
750static bool rt2800pci_txdone_entry_check(struct queue_entry *entry, u32 status)
751{
752 __le32 *txwi;
753 u32 word;
754 int wcid, tx_wcid;
755
756 wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID);
757
758 txwi = rt2800_drv_get_txwi(entry);
759 rt2x00_desc_read(txwi, 1, &word);
760 tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
761
762 return (tx_wcid == wcid);
763}
764
765static bool rt2800pci_txdone_find_entry(struct queue_entry *entry, void *data)
766{
767 u32 status = *(u32 *)data;
768
769 /*
770 * rt2800pci hardware might reorder frames when exchanging traffic
771 * with multiple BA enabled STAs.
772 *
773 * For example, a tx queue
774 * [ STA1 | STA2 | STA1 | STA2 ]
775 * can result in tx status reports
776 * [ STA1 | STA1 | STA2 | STA2 ]
777 * when the hw decides to aggregate the frames for STA1 into one AMPDU.
778 *
779 * To mitigate this effect, associate the tx status to the first frame
780 * in the tx queue with a matching wcid.
781 */
782 if (rt2800pci_txdone_entry_check(entry, status) &&
783 !test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
784 /*
785 * Got a matching frame, associate the tx status with
786 * the frame
787 */
788 entry->status = status;
789 set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
790 return true;
791 }
792
793 /* Check the next frame */
794 return false;
795}
796
797static bool rt2800pci_txdone_match_first(struct queue_entry *entry, void *data)
798{
799 u32 status = *(u32 *)data;
800
801 /*
802 * Find the first frame without tx status and assign this status to it
803 * regardless if it matches or not.
804 */
805 if (!test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
806 /*
807 * Got a matching frame, associate the tx status with
808 * the frame
809 */
810 entry->status = status;
811 set_bit(ENTRY_DATA_STATUS_SET, &entry->flags);
812 return true;
813 }
814
815 /* Check the next frame */
816 return false;
817}
818static bool rt2800pci_txdone_release_entries(struct queue_entry *entry,
819 void *data)
820{
821 if (test_bit(ENTRY_DATA_STATUS_SET, &entry->flags)) {
822 rt2800_txdone_entry(entry, entry->status,
823 rt2800pci_get_txwi(entry));
824 return false;
825 }
826
827 /* No more frames to release */
828 return true;
829}
830
745static bool rt2800pci_txdone(struct rt2x00_dev *rt2x00dev) 831static bool rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
746{ 832{
747 struct data_queue *queue; 833 struct data_queue *queue;
748 struct queue_entry *entry;
749 u32 status; 834 u32 status;
750 u8 qid; 835 u8 qid;
751 int max_tx_done = 16; 836 int max_tx_done = 16;
@@ -783,8 +868,33 @@ static bool rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
783 break; 868 break;
784 } 869 }
785 870
786 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 871 /*
787 rt2800_txdone_entry(entry, status, rt2800pci_get_txwi(entry)); 872 * Let's associate this tx status with the first
873 * matching frame.
874 */
875 if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
876 Q_INDEX, &status,
877 rt2800pci_txdone_find_entry)) {
878 /*
879 * We cannot match the tx status to any frame, so just
880 * use the first one.
881 */
882 if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
883 Q_INDEX, &status,
884 rt2800pci_txdone_match_first)) {
885 WARNING(rt2x00dev, "No frame found for TX "
886 "status on queue %u, dropping\n",
887 qid);
888 break;
889 }
890 }
891
892 /*
893 * Release all frames with a valid tx status.
894 */
895 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE,
896 Q_INDEX, NULL,
897 rt2800pci_txdone_release_entries);
788 898
789 if (--max_tx_done == 0) 899 if (--max_tx_done == 0)
790 break; 900 break;
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 098613ed93fb..f32282009146 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -485,7 +485,7 @@ static void rt2800usb_write_tx_desc(struct queue_entry *entry,
485 */ 485 */
486 skbdesc->flags |= SKBDESC_DESC_IN_SKB; 486 skbdesc->flags |= SKBDESC_DESC_IN_SKB;
487 skbdesc->desc = txi; 487 skbdesc->desc = txi;
488 skbdesc->desc_len = TXINFO_DESC_SIZE + TXWI_DESC_SIZE; 488 skbdesc->desc_len = entry->queue->desc_size;
489} 489}
490 490
491/* 491/*
@@ -730,6 +730,11 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
730 * Process the RXWI structure. 730 * Process the RXWI structure.
731 */ 731 */
732 rt2800_process_rxwi(entry, rxdesc); 732 rt2800_process_rxwi(entry, rxdesc);
733
734 /*
735 * Remove RXWI descriptor from start of buffer.
736 */
737 skb_pull(entry->skb, entry->queue->desc_size - RXINFO_DESC_SIZE);
733} 738}
734 739
735/* 740/*
@@ -890,6 +895,47 @@ static const struct rt2x00_ops rt2800usb_ops = {
890#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ 895#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
891}; 896};
892 897
898static const struct data_queue_desc rt2800usb_queue_rx_5592 = {
899 .entry_num = 128,
900 .data_size = AGGREGATION_SIZE,
901 .desc_size = RXINFO_DESC_SIZE + RXWI_DESC_SIZE_5592,
902 .priv_size = sizeof(struct queue_entry_priv_usb),
903};
904
905static const struct data_queue_desc rt2800usb_queue_tx_5592 = {
906 .entry_num = 16,
907 .data_size = AGGREGATION_SIZE,
908 .desc_size = TXINFO_DESC_SIZE + TXWI_DESC_SIZE_5592,
909 .priv_size = sizeof(struct queue_entry_priv_usb),
910};
911
912static const struct data_queue_desc rt2800usb_queue_bcn_5592 = {
913 .entry_num = 8,
914 .data_size = MGMT_FRAME_SIZE,
915 .desc_size = TXINFO_DESC_SIZE + TXWI_DESC_SIZE_5592,
916 .priv_size = sizeof(struct queue_entry_priv_usb),
917};
918
919
920static const struct rt2x00_ops rt2800usb_ops_5592 = {
921 .name = KBUILD_MODNAME,
922 .drv_data_size = sizeof(struct rt2800_drv_data),
923 .max_ap_intf = 8,
924 .eeprom_size = EEPROM_SIZE,
925 .rf_size = RF_SIZE,
926 .tx_queues = NUM_TX_QUEUES,
927 .extra_tx_headroom = TXINFO_DESC_SIZE + TXWI_DESC_SIZE_5592,
928 .rx = &rt2800usb_queue_rx_5592,
929 .tx = &rt2800usb_queue_tx_5592,
930 .bcn = &rt2800usb_queue_bcn_5592,
931 .lib = &rt2800usb_rt2x00_ops,
932 .drv = &rt2800usb_rt2800_ops,
933 .hw = &rt2800usb_mac80211_ops,
934#ifdef CONFIG_RT2X00_LIB_DEBUGFS
935 .debugfs = &rt2800_rt2x00debug,
936#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
937};
938
893/* 939/*
894 * rt2800usb module information. 940 * rt2800usb module information.
895 */ 941 */
@@ -1200,6 +1246,18 @@ static struct usb_device_id rt2800usb_device_table[] = {
1200 { USB_DEVICE(0x148f, 0x5370) }, 1246 { USB_DEVICE(0x148f, 0x5370) },
1201 { USB_DEVICE(0x148f, 0x5372) }, 1247 { USB_DEVICE(0x148f, 0x5372) },
1202#endif 1248#endif
1249#ifdef CONFIG_RT2800USB_RT55XX
1250 /* Arcadyan */
1251 { USB_DEVICE(0x043e, 0x7a32), .driver_info = 5592 },
1252 /* AVM GmbH */
1253 { USB_DEVICE(0x057c, 0x8501), .driver_info = 5592 },
1254 /* D-Link DWA-160-B2 */
1255 { USB_DEVICE(0x2001, 0x3c1a), .driver_info = 5592 },
1256 /* Proware */
1257 { USB_DEVICE(0x043e, 0x7a13), .driver_info = 5592 },
1258 /* Ralink */
1259 { USB_DEVICE(0x148f, 0x5572), .driver_info = 5592 },
1260#endif
1203#ifdef CONFIG_RT2800USB_UNKNOWN 1261#ifdef CONFIG_RT2800USB_UNKNOWN
1204 /* 1262 /*
1205 * Unclear what kind of devices these are (they aren't supported by the 1263 * Unclear what kind of devices these are (they aren't supported by the
@@ -1303,6 +1361,9 @@ MODULE_LICENSE("GPL");
1303static int rt2800usb_probe(struct usb_interface *usb_intf, 1361static int rt2800usb_probe(struct usb_interface *usb_intf,
1304 const struct usb_device_id *id) 1362 const struct usb_device_id *id)
1305{ 1363{
1364 if (id->driver_info == 5592)
1365 return rt2x00usb_probe(usb_intf, &rt2800usb_ops_5592);
1366
1306 return rt2x00usb_probe(usb_intf, &rt2800usb_ops); 1367 return rt2x00usb_probe(usb_intf, &rt2800usb_ops);
1307} 1368}
1308 1369
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 086abb403a4f..51922cc179de 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -193,6 +193,7 @@ struct rt2x00_chip {
193#define RT3883 0x3883 /* WSOC */ 193#define RT3883 0x3883 /* WSOC */
194#define RT5390 0x5390 /* 2.4GHz */ 194#define RT5390 0x5390 /* 2.4GHz */
195#define RT5392 0x5392 /* 2.4GHz */ 195#define RT5392 0x5392 /* 2.4GHz */
196#define RT5592 0x5592
196 197
197 u16 rf; 198 u16 rf;
198 u16 rev; 199 u16 rev;
@@ -1064,8 +1065,7 @@ static inline void rt2x00_rf_write(struct rt2x00_dev *rt2x00dev,
1064} 1065}
1065 1066
1066/* 1067/*
1067 * Generic EEPROM access. 1068 * Generic EEPROM access. The EEPROM is being accessed by word or byte index.
1068 * The EEPROM is being accessed by word index.
1069 */ 1069 */
1070static inline void *rt2x00_eeprom_addr(struct rt2x00_dev *rt2x00dev, 1070static inline void *rt2x00_eeprom_addr(struct rt2x00_dev *rt2x00dev,
1071 const unsigned int word) 1071 const unsigned int word)
@@ -1085,6 +1085,12 @@ static inline void rt2x00_eeprom_write(struct rt2x00_dev *rt2x00dev,
1085 rt2x00dev->eeprom[word] = cpu_to_le16(data); 1085 rt2x00dev->eeprom[word] = cpu_to_le16(data);
1086} 1086}
1087 1087
1088static inline u8 rt2x00_eeprom_byte(struct rt2x00_dev *rt2x00dev,
1089 const unsigned int byte)
1090{
1091 return *(((u8 *)rt2x00dev->eeprom) + byte);
1092}
1093
1088/* 1094/*
1089 * Chipset handlers 1095 * Chipset handlers
1090 */ 1096 */
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index a0c8caef3b0a..c4009eaeb697 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -52,8 +52,8 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
52 udelay(REGISTER_BUSY_DELAY); 52 udelay(REGISTER_BUSY_DELAY);
53 } 53 }
54 54
55 ERROR(rt2x00dev, "Indirect register access failed: " 55 printk_once(KERN_ERR "%s() Indirect register access failed: "
56 "offset=0x%.08x, value=0x%.08x\n", offset, *reg); 56 "offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg);
57 *reg = ~0; 57 *reg = ~0;
58 58
59 return 0; 59 return 0;
@@ -124,12 +124,10 @@ static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
124 */ 124 */
125 addr = dma_alloc_coherent(rt2x00dev->dev, 125 addr = dma_alloc_coherent(rt2x00dev->dev,
126 queue->limit * queue->desc_size, 126 queue->limit * queue->desc_size,
127 &dma, GFP_KERNEL); 127 &dma, GFP_KERNEL | __GFP_ZERO);
128 if (!addr) 128 if (!addr)
129 return -ENOMEM; 129 return -ENOMEM;
130 130
131 memset(addr, 0, queue->limit * queue->desc_size);
132
133 /* 131 /*
134 * Initialize all queue entries to contain valid addresses. 132 * Initialize all queue entries to contain valid addresses.
135 */ 133 */
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 4d91795dc6a2..952a0490eb17 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -832,7 +832,9 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
832bool rt2x00queue_for_each_entry(struct data_queue *queue, 832bool rt2x00queue_for_each_entry(struct data_queue *queue,
833 enum queue_index start, 833 enum queue_index start,
834 enum queue_index end, 834 enum queue_index end,
835 bool (*fn)(struct queue_entry *entry)) 835 void *data,
836 bool (*fn)(struct queue_entry *entry,
837 void *data))
836{ 838{
837 unsigned long irqflags; 839 unsigned long irqflags;
838 unsigned int index_start; 840 unsigned int index_start;
@@ -863,17 +865,17 @@ bool rt2x00queue_for_each_entry(struct data_queue *queue,
863 */ 865 */
864 if (index_start < index_end) { 866 if (index_start < index_end) {
865 for (i = index_start; i < index_end; i++) { 867 for (i = index_start; i < index_end; i++) {
866 if (fn(&queue->entries[i])) 868 if (fn(&queue->entries[i], data))
867 return true; 869 return true;
868 } 870 }
869 } else { 871 } else {
870 for (i = index_start; i < queue->limit; i++) { 872 for (i = index_start; i < queue->limit; i++) {
871 if (fn(&queue->entries[i])) 873 if (fn(&queue->entries[i], data))
872 return true; 874 return true;
873 } 875 }
874 876
875 for (i = 0; i < index_end; i++) { 877 for (i = 0; i < index_end; i++) {
876 if (fn(&queue->entries[i])) 878 if (fn(&queue->entries[i], data))
877 return true; 879 return true;
878 } 880 }
879 } 881 }
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 9b8c10a86dee..3d0137193da0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -359,6 +359,7 @@ enum queue_entry_flags {
359 ENTRY_DATA_PENDING, 359 ENTRY_DATA_PENDING,
360 ENTRY_DATA_IO_FAILED, 360 ENTRY_DATA_IO_FAILED,
361 ENTRY_DATA_STATUS_PENDING, 361 ENTRY_DATA_STATUS_PENDING,
362 ENTRY_DATA_STATUS_SET,
362}; 363};
363 364
364/** 365/**
@@ -372,6 +373,7 @@ enum queue_entry_flags {
372 * @entry_idx: The entry index number. 373 * @entry_idx: The entry index number.
373 * @priv_data: Private data belonging to this queue entry. The pointer 374 * @priv_data: Private data belonging to this queue entry. The pointer
374 * points to data specific to a particular driver and queue type. 375 * points to data specific to a particular driver and queue type.
376 * @status: Device specific status
375 */ 377 */
376struct queue_entry { 378struct queue_entry {
377 unsigned long flags; 379 unsigned long flags;
@@ -383,6 +385,8 @@ struct queue_entry {
383 385
384 unsigned int entry_idx; 386 unsigned int entry_idx;
385 387
388 u32 status;
389
386 void *priv_data; 390 void *priv_data;
387}; 391};
388 392
@@ -584,6 +588,7 @@ struct data_queue_desc {
584 * @queue: Pointer to @data_queue 588 * @queue: Pointer to @data_queue
585 * @start: &enum queue_index Pointer to start index 589 * @start: &enum queue_index Pointer to start index
586 * @end: &enum queue_index Pointer to end index 590 * @end: &enum queue_index Pointer to end index
591 * @data: Data to pass to the callback function
587 * @fn: The function to call for each &struct queue_entry 592 * @fn: The function to call for each &struct queue_entry
588 * 593 *
589 * This will walk through all entries in the queue, in chronological 594 * This will walk through all entries in the queue, in chronological
@@ -596,7 +601,9 @@ struct data_queue_desc {
596bool rt2x00queue_for_each_entry(struct data_queue *queue, 601bool rt2x00queue_for_each_entry(struct data_queue *queue,
597 enum queue_index start, 602 enum queue_index start,
598 enum queue_index end, 603 enum queue_index end,
599 bool (*fn)(struct queue_entry *entry)); 604 void *data,
605 bool (*fn)(struct queue_entry *entry,
606 void *data));
600 607
601/** 608/**
602 * rt2x00queue_empty - Check if the queue is empty. 609 * rt2x00queue_empty - Check if the queue is empty.
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 40ea80725a96..5e50d4ff9d21 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -285,7 +285,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
285 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work); 285 queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work);
286} 286}
287 287
288static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry) 288static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
289{ 289{
290 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 290 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
291 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); 291 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
@@ -390,7 +390,7 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
390 queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work); 390 queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work);
391} 391}
392 392
393static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry) 393static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
394{ 394{
395 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 395 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
396 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); 396 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
@@ -427,12 +427,18 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
427 case QID_AC_BE: 427 case QID_AC_BE:
428 case QID_AC_BK: 428 case QID_AC_BK:
429 if (!rt2x00queue_empty(queue)) 429 if (!rt2x00queue_empty(queue))
430 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, 430 rt2x00queue_for_each_entry(queue,
431 Q_INDEX_DONE,
432 Q_INDEX,
433 NULL,
431 rt2x00usb_kick_tx_entry); 434 rt2x00usb_kick_tx_entry);
432 break; 435 break;
433 case QID_RX: 436 case QID_RX:
434 if (!rt2x00queue_full(queue)) 437 if (!rt2x00queue_full(queue))
435 rt2x00queue_for_each_entry(queue, Q_INDEX, Q_INDEX_DONE, 438 rt2x00queue_for_each_entry(queue,
439 Q_INDEX,
440 Q_INDEX_DONE,
441 NULL,
436 rt2x00usb_kick_rx_entry); 442 rt2x00usb_kick_rx_entry);
437 break; 443 break;
438 default: 444 default:
@@ -441,7 +447,7 @@ void rt2x00usb_kick_queue(struct data_queue *queue)
441} 447}
442EXPORT_SYMBOL_GPL(rt2x00usb_kick_queue); 448EXPORT_SYMBOL_GPL(rt2x00usb_kick_queue);
443 449
444static bool rt2x00usb_flush_entry(struct queue_entry *entry) 450static bool rt2x00usb_flush_entry(struct queue_entry *entry, void *data)
445{ 451{
446 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 452 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
447 struct queue_entry_priv_usb *entry_priv = entry->priv_data; 453 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
@@ -468,7 +474,7 @@ void rt2x00usb_flush_queue(struct data_queue *queue, bool drop)
468 unsigned int i; 474 unsigned int i;
469 475
470 if (drop) 476 if (drop)
471 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, 477 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, NULL,
472 rt2x00usb_flush_entry); 478 rt2x00usb_flush_entry);
473 479
474 /* 480 /*
@@ -559,7 +565,7 @@ void rt2x00usb_clear_entry(struct queue_entry *entry)
559 entry->flags = 0; 565 entry->flags = 0;
560 566
561 if (entry->queue->qid == QID_RX) 567 if (entry->queue->qid == QID_RX)
562 rt2x00usb_kick_rx_entry(entry); 568 rt2x00usb_kick_rx_entry(entry, NULL);
563} 569}
564EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry); 570EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry);
565 571
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 5847d6d0881e..41dce83ff41a 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -224,10 +224,9 @@ static void _usb_writeN_sync(struct rtl_priv *rtlpriv, u32 addr, void *data,
224 u8 *buffer; 224 u8 *buffer;
225 225
226 wvalue = (u16)(addr & 0x0000ffff); 226 wvalue = (u16)(addr & 0x0000ffff);
227 buffer = kmalloc(len, GFP_ATOMIC); 227 buffer = kmemdup(data, len, GFP_ATOMIC);
228 if (!buffer) 228 if (!buffer)
229 return; 229 return;
230 memcpy(buffer, data, len);
231 usb_control_msg(udev, pipe, request, reqtype, wvalue, 230 usb_control_msg(udev, pipe, request, reqtype, wvalue,
232 index, buffer, len, 50); 231 index, buffer, len, 50);
233 232
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index f13258a8d995..c3eff32acf6c 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -2127,9 +2127,6 @@ value to host byte ordering.*/
2127#define WLAN_FC_GET_TYPE(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) 2127#define WLAN_FC_GET_TYPE(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE)
2128#define WLAN_FC_GET_STYPE(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) 2128#define WLAN_FC_GET_STYPE(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE)
2129#define WLAN_FC_MORE_DATA(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_MOREDATA) 2129#define WLAN_FC_MORE_DATA(fc) (le16_to_cpu(fc) & IEEE80211_FCTL_MOREDATA)
2130#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
2131#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
2132#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
2133 2130
2134#define RT_RF_OFF_LEVL_ASPM BIT(0) /*PCI ASPM */ 2131#define RT_RF_OFF_LEVL_ASPM BIT(0) /*PCI ASPM */
2135#define RT_RF_OFF_LEVL_CLK_REQ BIT(1) /*PCI clock request */ 2132#define RT_RF_OFF_LEVL_CLK_REQ BIT(1) /*PCI clock request */
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 3b266d3231a3..4c67c2f9ea71 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -257,7 +257,7 @@ static int wl1251_spi_probe(struct spi_device *spi)
257 wl = hw->priv; 257 wl = hw->priv;
258 258
259 SET_IEEE80211_DEV(hw, &spi->dev); 259 SET_IEEE80211_DEV(hw, &spi->dev);
260 dev_set_drvdata(&spi->dev, wl); 260 spi_set_drvdata(spi, wl);
261 wl->if_priv = spi; 261 wl->if_priv = spi;
262 wl->if_ops = &wl1251_spi_ops; 262 wl->if_ops = &wl1251_spi_ops;
263 263
@@ -311,7 +311,7 @@ static int wl1251_spi_probe(struct spi_device *spi)
311 311
312static int wl1251_spi_remove(struct spi_device *spi) 312static int wl1251_spi_remove(struct spi_device *spi)
313{ 313{
314 struct wl1251 *wl = dev_get_drvdata(&spi->dev); 314 struct wl1251 *wl = spi_get_drvdata(spi);
315 315
316 free_irq(wl->irq, wl); 316 free_irq(wl->irq, wl);
317 wl1251_free_hw(wl); 317 wl1251_free_hw(wl);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 2c2ff3e1f849..d7e306333f6c 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -4956,7 +4956,8 @@ static void wlcore_op_flush(struct ieee80211_hw *hw, bool drop)
4956static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw, 4956static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
4957 struct ieee80211_vif *vif, 4957 struct ieee80211_vif *vif,
4958 struct ieee80211_channel *chan, 4958 struct ieee80211_channel *chan,
4959 int duration) 4959 int duration,
4960 enum ieee80211_roc_type type)
4960{ 4961{
4961 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif); 4962 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4962 struct wl1271 *wl = hw->priv; 4963 struct wl1271 *wl = hw->priv;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index cd49ba949636..83905a97c56c 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -942,7 +942,6 @@ static int netbk_count_requests(struct xenvif *vif,
942} 942}
943 943
944static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk, 944static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
945 struct sk_buff *skb,
946 u16 pending_idx) 945 u16 pending_idx)
947{ 946{
948 struct page *page; 947 struct page *page;
@@ -976,7 +975,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
976 975
977 index = pending_index(netbk->pending_cons++); 976 index = pending_index(netbk->pending_cons++);
978 pending_idx = netbk->pending_ring[index]; 977 pending_idx = netbk->pending_ring[index];
979 page = xen_netbk_alloc_page(netbk, skb, pending_idx); 978 page = xen_netbk_alloc_page(netbk, pending_idx);
980 if (!page) 979 if (!page)
981 goto err; 980 goto err;
982 981
@@ -1185,6 +1184,7 @@ static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1185 if (th >= skb_tail_pointer(skb)) 1184 if (th >= skb_tail_pointer(skb))
1186 goto out; 1185 goto out;
1187 1186
1187 skb_set_transport_header(skb, 4 * iph->ihl);
1188 skb->csum_start = th - skb->head; 1188 skb->csum_start = th - skb->head;
1189 switch (iph->protocol) { 1189 switch (iph->protocol) {
1190 case IPPROTO_TCP: 1190 case IPPROTO_TCP:
@@ -1381,7 +1381,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
1381 } 1381 }
1382 1382
1383 /* XXX could copy straight to head */ 1383 /* XXX could copy straight to head */
1384 page = xen_netbk_alloc_page(netbk, skb, pending_idx); 1384 page = xen_netbk_alloc_page(netbk, pending_idx);
1385 if (!page) { 1385 if (!page) {
1386 kfree_skb(skb); 1386 kfree_skb(skb);
1387 netbk_tx_err(vif, &txreq, idx); 1387 netbk_tx_err(vif, &txreq, idx);
@@ -1496,6 +1496,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1496 1496
1497 skb->dev = vif->dev; 1497 skb->dev = vif->dev;
1498 skb->protocol = eth_type_trans(skb, skb->dev); 1498 skb->protocol = eth_type_trans(skb, skb->dev);
1499 skb_reset_network_header(skb);
1499 1500
1500 if (checksum_setup(vif, skb)) { 1501 if (checksum_setup(vif, skb)) {
1501 netdev_dbg(vif->dev, 1502 netdev_dbg(vif->dev,
@@ -1504,6 +1505,8 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
1504 continue; 1505 continue;
1505 } 1506 }
1506 1507
1508 skb_probe_transport_header(skb, 0);
1509
1507 vif->dev->stats.rx_bytes += skb->len; 1510 vif->dev->stats.rx_bytes += skb->len;
1508 vif->dev->stats.rx_packets++; 1511 vif->dev->stats.rx_packets++;
1509 1512
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 7ffa43bd7cf9..d9097a786962 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -537,7 +537,6 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
537 struct netfront_info *np = netdev_priv(dev); 537 struct netfront_info *np = netdev_priv(dev);
538 struct netfront_stats *stats = this_cpu_ptr(np->stats); 538 struct netfront_stats *stats = this_cpu_ptr(np->stats);
539 struct xen_netif_tx_request *tx; 539 struct xen_netif_tx_request *tx;
540 struct xen_netif_extra_info *extra;
541 char *data = skb->data; 540 char *data = skb->data;
542 RING_IDX i; 541 RING_IDX i;
543 grant_ref_t ref; 542 grant_ref_t ref;
@@ -581,7 +580,6 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
581 tx->gref = np->grant_tx_ref[id] = ref; 580 tx->gref = np->grant_tx_ref[id] = ref;
582 tx->offset = offset; 581 tx->offset = offset;
583 tx->size = len; 582 tx->size = len;
584 extra = NULL;
585 583
586 tx->flags = 0; 584 tx->flags = 0;
587 if (skb->ip_summed == CHECKSUM_PARTIAL) 585 if (skb->ip_summed == CHECKSUM_PARTIAL)
@@ -597,10 +595,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
597 gso = (struct xen_netif_extra_info *) 595 gso = (struct xen_netif_extra_info *)
598 RING_GET_REQUEST(&np->tx, ++i); 596 RING_GET_REQUEST(&np->tx, ++i);
599 597
600 if (extra) 598 tx->flags |= XEN_NETTXF_extra_info;
601 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
602 else
603 tx->flags |= XEN_NETTXF_extra_info;
604 599
605 gso->u.gso.size = skb_shinfo(skb)->gso_size; 600 gso->u.gso.size = skb_shinfo(skb)->gso_size;
606 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 601 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
@@ -609,7 +604,6 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
609 604
610 gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 605 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
611 gso->flags = 0; 606 gso->flags = 0;
612 extra = gso;
613 } 607 }
614 608
615 np->tx.req_prod_pvt = i + 1; 609 np->tx.req_prod_pvt = i + 1;
@@ -718,7 +712,7 @@ static int xennet_get_responses(struct netfront_info *np,
718 struct sk_buff *skb = xennet_get_rx_skb(np, cons); 712 struct sk_buff *skb = xennet_get_rx_skb(np, cons);
719 grant_ref_t ref = xennet_get_rx_ref(np, cons); 713 grant_ref_t ref = xennet_get_rx_ref(np, cons);
720 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); 714 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
721 int frags = 1; 715 int slots = 1;
722 int err = 0; 716 int err = 0;
723 unsigned long ret; 717 unsigned long ret;
724 718
@@ -762,27 +756,27 @@ next:
762 if (!(rx->flags & XEN_NETRXF_more_data)) 756 if (!(rx->flags & XEN_NETRXF_more_data))
763 break; 757 break;
764 758
765 if (cons + frags == rp) { 759 if (cons + slots == rp) {
766 if (net_ratelimit()) 760 if (net_ratelimit())
767 dev_warn(dev, "Need more frags\n"); 761 dev_warn(dev, "Need more slots\n");
768 err = -ENOENT; 762 err = -ENOENT;
769 break; 763 break;
770 } 764 }
771 765
772 rx = RING_GET_RESPONSE(&np->rx, cons + frags); 766 rx = RING_GET_RESPONSE(&np->rx, cons + slots);
773 skb = xennet_get_rx_skb(np, cons + frags); 767 skb = xennet_get_rx_skb(np, cons + slots);
774 ref = xennet_get_rx_ref(np, cons + frags); 768 ref = xennet_get_rx_ref(np, cons + slots);
775 frags++; 769 slots++;
776 } 770 }
777 771
778 if (unlikely(frags > max)) { 772 if (unlikely(slots > max)) {
779 if (net_ratelimit()) 773 if (net_ratelimit())
780 dev_warn(dev, "Too many frags\n"); 774 dev_warn(dev, "Too many frags\n");
781 err = -E2BIG; 775 err = -E2BIG;
782 } 776 }
783 777
784 if (unlikely(err)) 778 if (unlikely(err))
785 np->rx.rsp_cons = cons + frags; 779 np->rx.rsp_cons = cons + slots;
786 780
787 return err; 781 return err;
788} 782}
diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c
index 1367655eee39..bea94510ad2d 100644
--- a/drivers/ptp/ptp_pch.c
+++ b/drivers/ptp/ptp_pch.c
@@ -118,7 +118,7 @@ struct pch_ts_regs {
118 * struct pch_dev - Driver private data 118 * struct pch_dev - Driver private data
119 */ 119 */
120struct pch_dev { 120struct pch_dev {
121 struct pch_ts_regs *regs; 121 struct pch_ts_regs __iomem *regs;
122 struct ptp_clock *ptp_clock; 122 struct ptp_clock *ptp_clock;
123 struct ptp_clock_info caps; 123 struct ptp_clock_info caps;
124 int exts0_enabled; 124 int exts0_enabled;
@@ -154,7 +154,7 @@ static inline void pch_eth_enable_set(struct pch_dev *chip)
154 iowrite32(val, (&chip->regs->ts_sel)); 154 iowrite32(val, (&chip->regs->ts_sel));
155} 155}
156 156
157static u64 pch_systime_read(struct pch_ts_regs *regs) 157static u64 pch_systime_read(struct pch_ts_regs __iomem *regs)
158{ 158{
159 u64 ns; 159 u64 ns;
160 u32 lo, hi; 160 u32 lo, hi;
@@ -169,7 +169,7 @@ static u64 pch_systime_read(struct pch_ts_regs *regs)
169 return ns; 169 return ns;
170} 170}
171 171
172static void pch_systime_write(struct pch_ts_regs *regs, u64 ns) 172static void pch_systime_write(struct pch_ts_regs __iomem *regs, u64 ns)
173{ 173{
174 u32 hi, lo; 174 u32 hi, lo;
175 175
@@ -315,7 +315,7 @@ int pch_set_station_address(u8 *addr, struct pci_dev *pdev)
315 struct pch_dev *chip = pci_get_drvdata(pdev); 315 struct pch_dev *chip = pci_get_drvdata(pdev);
316 316
317 /* Verify the parameter */ 317 /* Verify the parameter */
318 if ((chip->regs == 0) || addr == (u8 *)NULL) { 318 if ((chip->regs == NULL) || addr == (u8 *)NULL) {
319 dev_err(&pdev->dev, 319 dev_err(&pdev->dev,
320 "invalid params returning PCH_INVALIDPARAM\n"); 320 "invalid params returning PCH_INVALIDPARAM\n");
321 return PCH_INVALIDPARAM; 321 return PCH_INVALIDPARAM;
@@ -361,7 +361,7 @@ EXPORT_SYMBOL(pch_set_station_address);
361static irqreturn_t isr(int irq, void *priv) 361static irqreturn_t isr(int irq, void *priv)
362{ 362{
363 struct pch_dev *pch_dev = priv; 363 struct pch_dev *pch_dev = priv;
364 struct pch_ts_regs *regs = pch_dev->regs; 364 struct pch_ts_regs __iomem *regs = pch_dev->regs;
365 struct ptp_clock_event event; 365 struct ptp_clock_event event;
366 u32 ack = 0, lo, hi, val; 366 u32 ack = 0, lo, hi, val;
367 367
@@ -415,7 +415,7 @@ static int ptp_pch_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
415 u32 diff, addend; 415 u32 diff, addend;
416 int neg_adj = 0; 416 int neg_adj = 0;
417 struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); 417 struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
418 struct pch_ts_regs *regs = pch_dev->regs; 418 struct pch_ts_regs __iomem *regs = pch_dev->regs;
419 419
420 if (ppb < 0) { 420 if (ppb < 0) {
421 neg_adj = 1; 421 neg_adj = 1;
@@ -438,7 +438,7 @@ static int ptp_pch_adjtime(struct ptp_clock_info *ptp, s64 delta)
438 s64 now; 438 s64 now;
439 unsigned long flags; 439 unsigned long flags;
440 struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); 440 struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
441 struct pch_ts_regs *regs = pch_dev->regs; 441 struct pch_ts_regs __iomem *regs = pch_dev->regs;
442 442
443 spin_lock_irqsave(&pch_dev->register_lock, flags); 443 spin_lock_irqsave(&pch_dev->register_lock, flags);
444 now = pch_systime_read(regs); 444 now = pch_systime_read(regs);
@@ -455,7 +455,7 @@ static int ptp_pch_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
455 u32 remainder; 455 u32 remainder;
456 unsigned long flags; 456 unsigned long flags;
457 struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); 457 struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
458 struct pch_ts_regs *regs = pch_dev->regs; 458 struct pch_ts_regs __iomem *regs = pch_dev->regs;
459 459
460 spin_lock_irqsave(&pch_dev->register_lock, flags); 460 spin_lock_irqsave(&pch_dev->register_lock, flags);
461 ns = pch_systime_read(regs); 461 ns = pch_systime_read(regs);
@@ -472,7 +472,7 @@ static int ptp_pch_settime(struct ptp_clock_info *ptp,
472 u64 ns; 472 u64 ns;
473 unsigned long flags; 473 unsigned long flags;
474 struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps); 474 struct pch_dev *pch_dev = container_of(ptp, struct pch_dev, caps);
475 struct pch_ts_regs *regs = pch_dev->regs; 475 struct pch_ts_regs __iomem *regs = pch_dev->regs;
476 476
477 ns = ts->tv_sec * 1000000000ULL; 477 ns = ts->tv_sec * 1000000000ULL;
478 ns += ts->tv_nsec; 478 ns += ts->tv_nsec;
@@ -567,9 +567,9 @@ static void pch_remove(struct pci_dev *pdev)
567 free_irq(pdev->irq, chip); 567 free_irq(pdev->irq, chip);
568 568
569 /* unmap the virtual IO memory space */ 569 /* unmap the virtual IO memory space */
570 if (chip->regs != 0) { 570 if (chip->regs != NULL) {
571 iounmap(chip->regs); 571 iounmap(chip->regs);
572 chip->regs = 0; 572 chip->regs = NULL;
573 } 573 }
574 /* release the reserved IO memory space */ 574 /* release the reserved IO memory space */
575 if (chip->mem_base != 0) { 575 if (chip->mem_base != 0) {
@@ -670,7 +670,7 @@ pch_probe(struct pci_dev *pdev, const struct pci_device_id *id)
670err_req_irq: 670err_req_irq:
671 ptp_clock_unregister(chip->ptp_clock); 671 ptp_clock_unregister(chip->ptp_clock);
672 iounmap(chip->regs); 672 iounmap(chip->regs);
673 chip->regs = 0; 673 chip->regs = NULL;
674 674
675err_ioremap: 675err_ioremap:
676 release_mem_region(chip->mem_base, chip->mem_size); 676 release_mem_region(chip->mem_base, chip->mem_size);
@@ -723,9 +723,10 @@ static s32 __init ptp_pch_init(void)
723module_init(ptp_pch_init); 723module_init(ptp_pch_init);
724module_exit(ptp_pch_exit); 724module_exit(ptp_pch_exit);
725 725
726module_param_string(station, pch_param.station, sizeof pch_param.station, 0444); 726module_param_string(station,
727 pch_param.station, sizeof(pch_param.station), 0444);
727MODULE_PARM_DESC(station, 728MODULE_PARM_DESC(station,
728 "IEEE 1588 station address to use - column separated hex values"); 729 "IEEE 1588 station address to use - colon separated hex values");
729 730
730MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>"); 731MODULE_AUTHOR("LAPIS SEMICONDUCTOR, <tshimizu818@gmail.com>");
731MODULE_DESCRIPTION("PTP clock using the EG20T timer"); 732MODULE_DESCRIPTION("PTP clock using the EG20T timer");
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
index 2029b6caa595..fb877b59ec57 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -166,7 +166,7 @@ static void virtio_ccw_kvm_notify(struct virtqueue *vq)
166 166
167 vcdev = to_vc_device(info->vq->vdev); 167 vcdev = to_vc_device(info->vq->vdev);
168 ccw_device_get_schid(vcdev->cdev, &schid); 168 ccw_device_get_schid(vcdev->cdev, &schid);
169 do_kvm_notify(schid, virtqueue_get_queue_index(vq)); 169 do_kvm_notify(schid, vq->index);
170} 170}
171 171
172static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev, 172static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
@@ -188,7 +188,7 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
188 unsigned long flags; 188 unsigned long flags;
189 unsigned long size; 189 unsigned long size;
190 int ret; 190 int ret;
191 unsigned int index = virtqueue_get_queue_index(vq); 191 unsigned int index = vq->index;
192 192
193 /* Remove from our list. */ 193 /* Remove from our list. */
194 spin_lock_irqsave(&vcdev->lock, flags); 194 spin_lock_irqsave(&vcdev->lock, flags);
@@ -610,7 +610,7 @@ static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev,
610 vq = NULL; 610 vq = NULL;
611 spin_lock_irqsave(&vcdev->lock, flags); 611 spin_lock_irqsave(&vcdev->lock, flags);
612 list_for_each_entry(info, &vcdev->virtqueues, node) { 612 list_for_each_entry(info, &vcdev->virtqueues, node) {
613 if (virtqueue_get_queue_index(info->vq) == index) { 613 if (info->vq->index == index) {
614 vq = info->vq; 614 vq = info->vq;
615 break; 615 break;
616 } 616 }
diff --git a/drivers/scsi/csiostor/Makefile b/drivers/scsi/csiostor/Makefile
index b581966c88f9..913b9a92fb06 100644
--- a/drivers/scsi/csiostor/Makefile
+++ b/drivers/scsi/csiostor/Makefile
@@ -8,4 +8,5 @@ ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4
8obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor.o 8obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor.o
9 9
10csiostor-objs := csio_attr.o csio_init.o csio_lnode.o csio_scsi.o \ 10csiostor-objs := csio_attr.o csio_init.o csio_lnode.o csio_scsi.o \
11 csio_hw.o csio_isr.o csio_mb.o csio_rnode.o csio_wr.o 11 csio_hw.o csio_hw_t4.o csio_hw_t5.o csio_isr.o \
12 csio_mb.o csio_rnode.o csio_wr.o
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index bdd78fb4fc70..a0b4c8991deb 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -61,7 +61,7 @@ int csio_msi = 2;
61static int dev_num; 61static int dev_num;
62 62
63/* FCoE Adapter types & its description */ 63/* FCoE Adapter types & its description */
64static const struct csio_adap_desc csio_fcoe_adapters[] = { 64static const struct csio_adap_desc csio_t4_fcoe_adapters[] = {
65 {"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"}, 65 {"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"},
66 {"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"}, 66 {"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"},
67 {"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"}, 67 {"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"},
@@ -77,7 +77,38 @@ static const struct csio_adap_desc csio_fcoe_adapters[] = {
77 {"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"}, 77 {"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"},
78 {"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"}, 78 {"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"},
79 {"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"}, 79 {"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"},
80 {"T4 FPGA", "Chelsio T4 FPGA [FCoE]"} 80 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
81 {"HUAWEI T480 10G", "Chelsio HUAWEI T480 10G [FCoE]"},
82 {"HUAWEI T440 10G", "Chelsio HUAWEI T440 10G [FCoE]"},
83 {"HUAWEI STG 10G", "Chelsio HUAWEI STG 10G [FCoE]"},
84 {"ACROMAG XAUI 10G", "Chelsio ACROMAG XAUI 10G [FCoE]"},
85 {"ACROMAG SFP+ 10G", "Chelsio ACROMAG SFP+ 10G [FCoE]"},
86 {"QUANTA SFP+ 10G", "Chelsio QUANTA SFP+ 10G [FCoE]"},
87 {"HUAWEI 10Gbase-T", "Chelsio HUAWEI 10Gbase-T [FCoE]"},
88 {"HUAWEI T4TOE 10G", "Chelsio HUAWEI T4TOE 10G [FCoE]"}
89};
90
91static const struct csio_adap_desc csio_t5_fcoe_adapters[] = {
92 {"T580-Dbg 10G", "Chelsio T580-Dbg 10G [FCoE]"},
93 {"T520-CR 10G", "Chelsio T520-CR 10G [FCoE]"},
94 {"T522-CR 10G/1G", "Chelsio T452-CR 10G/1G [FCoE]"},
95 {"T540-CR 10G", "Chelsio T540-CR 10G [FCoE]"},
96 {"T520-BCH 10G", "Chelsio T520-BCH 10G [FCoE]"},
97 {"T540-BCH 10G", "Chelsio T540-BCH 10G [FCoE]"},
98 {"T540-CH 10G", "Chelsio T540-CH 10G [FCoE]"},
99 {"T520-SO 10G", "Chelsio T520-SO 10G [FCoE]"},
100 {"T520-CX4 10G", "Chelsio T520-CX4 10G [FCoE]"},
101 {"T520-BT 10G", "Chelsio T520-BT 10G [FCoE]"},
102 {"T504-BT 1G", "Chelsio T504-BT 1G [FCoE]"},
103 {"B520-SR 10G", "Chelsio B520-SR 10G [FCoE]"},
104 {"B504-BT 1G", "Chelsio B504-BT 1G [FCoE]"},
105 {"T580-CR 10G", "Chelsio T580-CR 10G [FCoE]"},
106 {"T540-LP-CR 10G", "Chelsio T540-LP-CR 10G [FCoE]"},
107 {"AMSTERDAM 10G", "Chelsio AMSTERDAM 10G [FCoE]"},
108 {"T580-LP-CR 40G", "Chelsio T580-LP-CR 40G [FCoE]"},
109 {"T520-LL-CR 10G", "Chelsio T520-LL-CR 10G [FCoE]"},
110 {"T560-CR 40G", "Chelsio T560-CR 40G [FCoE]"},
111 {"T580-CR 40G", "Chelsio T580-CR 40G [FCoE]"}
81}; 112};
82 113
83static void csio_mgmtm_cleanup(struct csio_mgmtm *); 114static void csio_mgmtm_cleanup(struct csio_mgmtm *);
@@ -124,7 +155,7 @@ int csio_is_hw_removing(struct csio_hw *hw)
124 * at the time it indicated completion is stored there. Returns 0 if the 155 * at the time it indicated completion is stored there. Returns 0 if the
125 * operation completes and -EAGAIN otherwise. 156 * operation completes and -EAGAIN otherwise.
126 */ 157 */
127static int 158int
128csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask, 159csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask,
129 int polarity, int attempts, int delay, uint32_t *valp) 160 int polarity, int attempts, int delay, uint32_t *valp)
130{ 161{
@@ -145,6 +176,24 @@ csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask,
145 } 176 }
146} 177}
147 178
179/*
180 * csio_hw_tp_wr_bits_indirect - set/clear bits in an indirect TP register
181 * @hw: the adapter
182 * @addr: the indirect TP register address
183 * @mask: specifies the field within the register to modify
184 * @val: new value for the field
185 *
186 * Sets a field of an indirect TP register to the given value.
187 */
188void
189csio_hw_tp_wr_bits_indirect(struct csio_hw *hw, unsigned int addr,
190 unsigned int mask, unsigned int val)
191{
192 csio_wr_reg32(hw, addr, TP_PIO_ADDR);
193 val |= csio_rd_reg32(hw, TP_PIO_DATA) & ~mask;
194 csio_wr_reg32(hw, val, TP_PIO_DATA);
195}
196
148void 197void
149csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask, 198csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask,
150 uint32_t value) 199 uint32_t value)
@@ -157,242 +206,22 @@ csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask,
157 206
158} 207}
159 208
160/*
161 * csio_hw_mc_read - read from MC through backdoor accesses
162 * @hw: the hw module
163 * @addr: address of first byte requested
164 * @data: 64 bytes of data containing the requested address
165 * @ecc: where to store the corresponding 64-bit ECC word
166 *
167 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
168 * that covers the requested address @addr. If @parity is not %NULL it
169 * is assigned the 64-bit ECC word for the read data.
170 */
171int
172csio_hw_mc_read(struct csio_hw *hw, uint32_t addr, __be32 *data,
173 uint64_t *ecc)
174{
175 int i;
176
177 if (csio_rd_reg32(hw, MC_BIST_CMD) & START_BIST)
178 return -EBUSY;
179 csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR);
180 csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN);
181 csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN);
182 csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
183 MC_BIST_CMD);
184 i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD, START_BIST,
185 0, 10, 1, NULL);
186 if (i)
187 return i;
188
189#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
190
191 for (i = 15; i >= 0; i--)
192 *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
193 if (ecc)
194 *ecc = csio_rd_reg64(hw, MC_DATA(16));
195#undef MC_DATA
196 return 0;
197}
198
199/*
200 * csio_hw_edc_read - read from EDC through backdoor accesses
201 * @hw: the hw module
202 * @idx: which EDC to access
203 * @addr: address of first byte requested
204 * @data: 64 bytes of data containing the requested address
205 * @ecc: where to store the corresponding 64-bit ECC word
206 *
207 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
208 * that covers the requested address @addr. If @parity is not %NULL it
209 * is assigned the 64-bit ECC word for the read data.
210 */
211int
212csio_hw_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
213 uint64_t *ecc)
214{
215 int i;
216
217 idx *= EDC_STRIDE;
218 if (csio_rd_reg32(hw, EDC_BIST_CMD + idx) & START_BIST)
219 return -EBUSY;
220 csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR + idx);
221 csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN + idx);
222 csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN + idx);
223 csio_wr_reg32(hw, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST,
224 EDC_BIST_CMD + idx);
225 i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD + idx, START_BIST,
226 0, 10, 1, NULL);
227 if (i)
228 return i;
229
230#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
231
232 for (i = 15; i >= 0; i--)
233 *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
234 if (ecc)
235 *ecc = csio_rd_reg64(hw, EDC_DATA(16));
236#undef EDC_DATA
237 return 0;
238}
239
240/*
241 * csio_mem_win_rw - read/write memory through PCIE memory window
242 * @hw: the adapter
243 * @addr: address of first byte requested
244 * @data: MEMWIN0_APERTURE bytes of data containing the requested address
245 * @dir: direction of transfer 1 => read, 0 => write
246 *
247 * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a
248 * MEMWIN0_APERTURE-byte-aligned address that covers the requested
249 * address @addr.
250 */
251static int
252csio_mem_win_rw(struct csio_hw *hw, u32 addr, u32 *data, int dir)
253{
254 int i;
255
256 /*
257 * Setup offset into PCIE memory window. Address must be a
258 * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to
259 * ensure that changes propagate before we attempt to use the new
260 * values.)
261 */
262 csio_wr_reg32(hw, addr & ~(MEMWIN0_APERTURE - 1),
263 PCIE_MEM_ACCESS_OFFSET);
264 csio_rd_reg32(hw, PCIE_MEM_ACCESS_OFFSET);
265
266 /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
267 for (i = 0; i < MEMWIN0_APERTURE; i = i + sizeof(__be32)) {
268 if (dir)
269 *data++ = csio_rd_reg32(hw, (MEMWIN0_BASE + i));
270 else
271 csio_wr_reg32(hw, *data++, (MEMWIN0_BASE + i));
272 }
273
274 return 0;
275}
276
277/*
278 * csio_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
279 * @hw: the csio_hw
280 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
281 * @addr: address within indicated memory type
282 * @len: amount of memory to transfer
283 * @buf: host memory buffer
284 * @dir: direction of transfer 1 => read, 0 => write
285 *
286 * Reads/writes an [almost] arbitrary memory region in the firmware: the
287 * firmware memory address, length and host buffer must be aligned on
288 * 32-bit boudaries. The memory is transferred as a raw byte sequence
289 * from/to the firmware's memory. If this memory contains data
290 * structures which contain multi-byte integers, it's the callers
291 * responsibility to perform appropriate byte order conversions.
292 */
293static int
294csio_memory_rw(struct csio_hw *hw, int mtype, u32 addr, u32 len,
295 uint32_t *buf, int dir)
296{
297 uint32_t pos, start, end, offset, memoffset;
298 int ret;
299 uint32_t *data;
300
301 /*
302 * Argument sanity checks ...
303 */
304 if ((addr & 0x3) || (len & 0x3))
305 return -EINVAL;
306
307 data = kzalloc(MEMWIN0_APERTURE, GFP_KERNEL);
308 if (!data)
309 return -ENOMEM;
310
311 /* Offset into the region of memory which is being accessed
312 * MEM_EDC0 = 0
313 * MEM_EDC1 = 1
314 * MEM_MC = 2
315 */
316 memoffset = (mtype * (5 * 1024 * 1024));
317
318 /* Determine the PCIE_MEM_ACCESS_OFFSET */
319 addr = addr + memoffset;
320
321 /*
322 * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes
323 * at a time so we need to round down the start and round up the end.
324 * We'll start copying out of the first line at (addr - start) a word
325 * at a time.
326 */
327 start = addr & ~(MEMWIN0_APERTURE-1);
328 end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1);
329 offset = (addr - start)/sizeof(__be32);
330
331 for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) {
332 /*
333 * If we're writing, copy the data from the caller's memory
334 * buffer
335 */
336 if (!dir) {
337 /*
338 * If we're doing a partial write, then we need to do
339 * a read-modify-write ...
340 */
341 if (offset || len < MEMWIN0_APERTURE) {
342 ret = csio_mem_win_rw(hw, pos, data, 1);
343 if (ret) {
344 kfree(data);
345 return ret;
346 }
347 }
348 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
349 len > 0) {
350 data[offset++] = *buf++;
351 len -= sizeof(__be32);
352 }
353 }
354
355 /*
356 * Transfer a block of memory and bail if there's an error.
357 */
358 ret = csio_mem_win_rw(hw, pos, data, dir);
359 if (ret) {
360 kfree(data);
361 return ret;
362 }
363
364 /*
365 * If we're reading, copy the data into the caller's memory
366 * buffer.
367 */
368 if (dir)
369 while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
370 len > 0) {
371 *buf++ = data[offset++];
372 len -= sizeof(__be32);
373 }
374 }
375
376 kfree(data);
377
378 return 0;
379}
380
381static int 209static int
382csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf) 210csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf)
383{ 211{
384 return csio_memory_rw(hw, mtype, addr, len, buf, 0); 212 return hw->chip_ops->chip_memory_rw(hw, MEMWIN_CSIOSTOR, mtype,
213 addr, len, buf, 0);
385} 214}
386 215
387/* 216/*
388 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms. 217 * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
389 */ 218 */
390#define EEPROM_MAX_RD_POLL 40 219#define EEPROM_MAX_RD_POLL 40
391#define EEPROM_MAX_WR_POLL 6 220#define EEPROM_MAX_WR_POLL 6
392#define EEPROM_STAT_ADDR 0x7bfc 221#define EEPROM_STAT_ADDR 0x7bfc
393#define VPD_BASE 0x400 222#define VPD_BASE 0x400
394#define VPD_BASE_OLD 0 223#define VPD_BASE_OLD 0
395#define VPD_LEN 512 224#define VPD_LEN 1024
396#define VPD_INFO_FLD_HDR_SIZE 3 225#define VPD_INFO_FLD_HDR_SIZE 3
397 226
398/* 227/*
@@ -817,23 +646,6 @@ out:
817 return 0; 646 return 0;
818} 647}
819 648
820/*
821 * csio_hw_flash_cfg_addr - return the address of the flash
822 * configuration file
823 * @hw: the HW module
824 *
825 * Return the address within the flash where the Firmware Configuration
826 * File is stored.
827 */
828static unsigned int
829csio_hw_flash_cfg_addr(struct csio_hw *hw)
830{
831 if (hw->params.sf_size == 0x100000)
832 return FPGA_FLASH_CFG_OFFSET;
833 else
834 return FLASH_CFG_OFFSET;
835}
836
837static void 649static void
838csio_hw_print_fw_version(struct csio_hw *hw, char *str) 650csio_hw_print_fw_version(struct csio_hw *hw, char *str)
839{ 651{
@@ -898,13 +710,13 @@ csio_hw_check_fw_version(struct csio_hw *hw)
898 minor = FW_HDR_FW_VER_MINOR_GET(hw->fwrev); 710 minor = FW_HDR_FW_VER_MINOR_GET(hw->fwrev);
899 micro = FW_HDR_FW_VER_MICRO_GET(hw->fwrev); 711 micro = FW_HDR_FW_VER_MICRO_GET(hw->fwrev);
900 712
901 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */ 713 if (major != FW_VERSION_MAJOR(hw)) { /* major mismatch - fail */
902 csio_err(hw, "card FW has major version %u, driver wants %u\n", 714 csio_err(hw, "card FW has major version %u, driver wants %u\n",
903 major, FW_VERSION_MAJOR); 715 major, FW_VERSION_MAJOR(hw));
904 return -EINVAL; 716 return -EINVAL;
905 } 717 }
906 718
907 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO) 719 if (minor == FW_VERSION_MINOR(hw) && micro == FW_VERSION_MICRO(hw))
908 return 0; /* perfect match */ 720 return 0; /* perfect match */
909 721
910 /* Minor/micro version mismatch */ 722 /* Minor/micro version mismatch */
@@ -1044,7 +856,7 @@ static void
1044csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range) 856csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range)
1045{ 857{
1046 uint16_t val; 858 uint16_t val;
1047 uint32_t pcie_cap; 859 int pcie_cap;
1048 860
1049 if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) { 861 if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) {
1050 pci_read_config_word(hw->pdev, 862 pci_read_config_word(hw->pdev,
@@ -1056,84 +868,6 @@ csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range)
1056 } 868 }
1057} 869}
1058 870
1059
1060/*
1061 * Return the specified PCI-E Configuration Space register from our Physical
1062 * Function. We try first via a Firmware LDST Command since we prefer to let
1063 * the firmware own all of these registers, but if that fails we go for it
1064 * directly ourselves.
1065 */
1066static uint32_t
1067csio_read_pcie_cfg4(struct csio_hw *hw, int reg)
1068{
1069 u32 val = 0;
1070 struct csio_mb *mbp;
1071 int rv;
1072 struct fw_ldst_cmd *ldst_cmd;
1073
1074 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
1075 if (!mbp) {
1076 CSIO_INC_STATS(hw, n_err_nomem);
1077 pci_read_config_dword(hw->pdev, reg, &val);
1078 return val;
1079 }
1080
1081 csio_mb_ldst(hw, mbp, CSIO_MB_DEFAULT_TMO, reg);
1082
1083 rv = csio_mb_issue(hw, mbp);
1084
1085 /*
1086 * If the LDST Command suucceeded, exctract the returned register
1087 * value. Otherwise read it directly ourself.
1088 */
1089 if (rv == 0) {
1090 ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb);
1091 val = ntohl(ldst_cmd->u.pcie.data[0]);
1092 } else
1093 pci_read_config_dword(hw->pdev, reg, &val);
1094
1095 mempool_free(mbp, hw->mb_mempool);
1096
1097 return val;
1098} /* csio_read_pcie_cfg4 */
1099
1100static int
1101csio_hw_set_mem_win(struct csio_hw *hw)
1102{
1103 u32 bar0;
1104
1105 /*
1106 * Truncation intentional: we only read the bottom 32-bits of the
1107 * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to
1108 * read BAR0 instead of using pci_resource_start() because we could be
1109 * operating from within a Virtual Machine which is trapping our
1110 * accesses to our Configuration Space and we need to set up the PCI-E
1111 * Memory Window decoders with the actual addresses which will be
1112 * coming across the PCI-E link.
1113 */
1114 bar0 = csio_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0);
1115 bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
1116
1117 /*
1118 * Set up memory window for accessing adapter memory ranges. (Read
1119 * back MA register to ensure that changes propagate before we attempt
1120 * to use the new values.)
1121 */
1122 csio_wr_reg32(hw, (bar0 + MEMWIN0_BASE) | BIR(0) |
1123 WINDOW(ilog2(MEMWIN0_APERTURE) - 10),
1124 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0));
1125 csio_wr_reg32(hw, (bar0 + MEMWIN1_BASE) | BIR(0) |
1126 WINDOW(ilog2(MEMWIN1_APERTURE) - 10),
1127 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1));
1128 csio_wr_reg32(hw, (bar0 + MEMWIN2_BASE) | BIR(0) |
1129 WINDOW(ilog2(MEMWIN2_APERTURE) - 10),
1130 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
1131 csio_rd_reg32(hw, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
1132 return 0;
1133} /* csio_hw_set_mem_win */
1134
1135
1136
1137/*****************************************************************************/ 871/*****************************************************************************/
1138/* HW State machine assists */ 872/* HW State machine assists */
1139/*****************************************************************************/ 873/*****************************************************************************/
@@ -1234,7 +968,9 @@ retry:
1234 for (;;) { 968 for (;;) {
1235 uint32_t pcie_fw; 969 uint32_t pcie_fw;
1236 970
971 spin_unlock_irq(&hw->lock);
1237 msleep(50); 972 msleep(50);
973 spin_lock_irq(&hw->lock);
1238 waiting -= 50; 974 waiting -= 50;
1239 975
1240 /* 976 /*
@@ -2121,9 +1857,9 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
2121 uint32_t *cfg_data; 1857 uint32_t *cfg_data;
2122 int value_to_add = 0; 1858 int value_to_add = 0;
2123 1859
2124 if (request_firmware(&cf, CSIO_CF_FNAME, dev) < 0) { 1860 if (request_firmware(&cf, CSIO_CF_FNAME(hw), dev) < 0) {
2125 csio_err(hw, "could not find config file " CSIO_CF_FNAME 1861 csio_err(hw, "could not find config file %s, err: %d\n",
2126 ",err: %d\n", ret); 1862 CSIO_CF_FNAME(hw), ret);
2127 return -ENOENT; 1863 return -ENOENT;
2128 } 1864 }
2129 1865
@@ -2147,9 +1883,24 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
2147 1883
2148 ret = csio_memory_write(hw, mtype, maddr, 1884 ret = csio_memory_write(hw, mtype, maddr,
2149 cf->size + value_to_add, cfg_data); 1885 cf->size + value_to_add, cfg_data);
1886
1887 if ((ret == 0) && (value_to_add != 0)) {
1888 union {
1889 u32 word;
1890 char buf[4];
1891 } last;
1892 size_t size = cf->size & ~0x3;
1893 int i;
1894
1895 last.word = cfg_data[size >> 2];
1896 for (i = value_to_add; i < 4; i++)
1897 last.buf[i] = 0;
1898 ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word);
1899 }
2150 if (ret == 0) { 1900 if (ret == 0) {
2151 csio_info(hw, "config file upgraded to " CSIO_CF_FNAME "\n"); 1901 csio_info(hw, "config file upgraded to %s\n",
2152 strncpy(path, "/lib/firmware/" CSIO_CF_FNAME, 64); 1902 CSIO_CF_FNAME(hw));
1903 snprintf(path, 64, "%s%s", "/lib/firmware/", CSIO_CF_FNAME(hw));
2153 } 1904 }
2154 1905
2155leave: 1906leave:
@@ -2179,7 +1930,7 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
2179{ 1930{
2180 unsigned int mtype, maddr; 1931 unsigned int mtype, maddr;
2181 int rv; 1932 int rv;
2182 uint32_t finiver, finicsum, cfcsum; 1933 uint32_t finiver = 0, finicsum = 0, cfcsum = 0;
2183 int using_flash; 1934 int using_flash;
2184 char path[64]; 1935 char path[64];
2185 1936
@@ -2207,7 +1958,7 @@ csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
2207 * config file from flash. 1958 * config file from flash.
2208 */ 1959 */
2209 mtype = FW_MEMTYPE_CF_FLASH; 1960 mtype = FW_MEMTYPE_CF_FLASH;
2210 maddr = csio_hw_flash_cfg_addr(hw); 1961 maddr = hw->chip_ops->chip_flash_cfg_addr(hw);
2211 using_flash = 1; 1962 using_flash = 1;
2212 } else { 1963 } else {
2213 /* 1964 /*
@@ -2346,30 +2097,32 @@ csio_hw_flash_fw(struct csio_hw *hw)
2346 struct pci_dev *pci_dev = hw->pdev; 2097 struct pci_dev *pci_dev = hw->pdev;
2347 struct device *dev = &pci_dev->dev ; 2098 struct device *dev = &pci_dev->dev ;
2348 2099
2349 if (request_firmware(&fw, CSIO_FW_FNAME, dev) < 0) { 2100 if (request_firmware(&fw, CSIO_FW_FNAME(hw), dev) < 0) {
2350 csio_err(hw, "could not find firmware image " CSIO_FW_FNAME 2101 csio_err(hw, "could not find firmware image %s, err: %d\n",
2351 ",err: %d\n", ret); 2102 CSIO_FW_FNAME(hw), ret);
2352 return -EINVAL; 2103 return -EINVAL;
2353 } 2104 }
2354 2105
2355 hdr = (const struct fw_hdr *)fw->data; 2106 hdr = (const struct fw_hdr *)fw->data;
2356 fw_ver = ntohl(hdr->fw_ver); 2107 fw_ver = ntohl(hdr->fw_ver);
2357 if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR) 2108 if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR(hw))
2358 return -EINVAL; /* wrong major version, won't do */ 2109 return -EINVAL; /* wrong major version, won't do */
2359 2110
2360 /* 2111 /*
2361 * If the flash FW is unusable or we found something newer, load it. 2112 * If the flash FW is unusable or we found something newer, load it.
2362 */ 2113 */
2363 if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR || 2114 if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR(hw) ||
2364 fw_ver > hw->fwrev) { 2115 fw_ver > hw->fwrev) {
2365 ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size, 2116 ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size,
2366 /*force=*/false); 2117 /*force=*/false);
2367 if (!ret) 2118 if (!ret)
2368 csio_info(hw, "firmware upgraded to version %pI4 from " 2119 csio_info(hw,
2369 CSIO_FW_FNAME "\n", &hdr->fw_ver); 2120 "firmware upgraded to version %pI4 from %s\n",
2121 &hdr->fw_ver, CSIO_FW_FNAME(hw));
2370 else 2122 else
2371 csio_err(hw, "firmware upgrade failed! err=%d\n", ret); 2123 csio_err(hw, "firmware upgrade failed! err=%d\n", ret);
2372 } 2124 } else
2125 ret = -EINVAL;
2373 2126
2374 release_firmware(fw); 2127 release_firmware(fw);
2375 2128
@@ -2410,7 +2163,7 @@ csio_hw_configure(struct csio_hw *hw)
2410 /* Set pci completion timeout value to 4 seconds. */ 2163 /* Set pci completion timeout value to 4 seconds. */
2411 csio_set_pcie_completion_timeout(hw, 0xd); 2164 csio_set_pcie_completion_timeout(hw, 0xd);
2412 2165
2413 csio_hw_set_mem_win(hw); 2166 hw->chip_ops->chip_set_mem_win(hw, MEMWIN_CSIOSTOR);
2414 2167
2415 rv = csio_hw_get_fw_version(hw, &hw->fwrev); 2168 rv = csio_hw_get_fw_version(hw, &hw->fwrev);
2416 if (rv != 0) 2169 if (rv != 0)
@@ -2478,6 +2231,8 @@ csio_hw_configure(struct csio_hw *hw)
2478 } else { 2231 } else {
2479 if (hw->fw_state == CSIO_DEV_STATE_INIT) { 2232 if (hw->fw_state == CSIO_DEV_STATE_INIT) {
2480 2233
2234 hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
2235
2481 /* device parameters */ 2236 /* device parameters */
2482 rv = csio_get_device_params(hw); 2237 rv = csio_get_device_params(hw);
2483 if (rv != 0) 2238 if (rv != 0)
@@ -2651,7 +2406,7 @@ csio_hw_intr_disable(struct csio_hw *hw)
2651 2406
2652} 2407}
2653 2408
2654static void 2409void
2655csio_hw_fatal_err(struct csio_hw *hw) 2410csio_hw_fatal_err(struct csio_hw *hw)
2656{ 2411{
2657 csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0); 2412 csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0);
@@ -2990,14 +2745,6 @@ csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt)
2990/* END: HW SM */ 2745/* END: HW SM */
2991/*****************************************************************************/ 2746/*****************************************************************************/
2992 2747
2993/* Slow path handlers */
2994struct intr_info {
2995 unsigned int mask; /* bits to check in interrupt status */
2996 const char *msg; /* message to print or NULL */
2997 short stat_idx; /* stat counter to increment or -1 */
2998 unsigned short fatal; /* whether the condition reported is fatal */
2999};
3000
3001/* 2748/*
3002 * csio_handle_intr_status - table driven interrupt handler 2749 * csio_handle_intr_status - table driven interrupt handler
3003 * @hw: HW instance 2750 * @hw: HW instance
@@ -3011,7 +2758,7 @@ struct intr_info {
3011 * by an entry specifying mask 0. Returns the number of fatal interrupt 2758 * by an entry specifying mask 0. Returns the number of fatal interrupt
3012 * conditions. 2759 * conditions.
3013 */ 2760 */
3014static int 2761int
3015csio_handle_intr_status(struct csio_hw *hw, unsigned int reg, 2762csio_handle_intr_status(struct csio_hw *hw, unsigned int reg,
3016 const struct intr_info *acts) 2763 const struct intr_info *acts)
3017{ 2764{
@@ -3038,80 +2785,6 @@ csio_handle_intr_status(struct csio_hw *hw, unsigned int reg,
3038} 2785}
3039 2786
3040/* 2787/*
3041 * Interrupt handler for the PCIE module.
3042 */
3043static void
3044csio_pcie_intr_handler(struct csio_hw *hw)
3045{
3046 static struct intr_info sysbus_intr_info[] = {
3047 { RNPP, "RXNP array parity error", -1, 1 },
3048 { RPCP, "RXPC array parity error", -1, 1 },
3049 { RCIP, "RXCIF array parity error", -1, 1 },
3050 { RCCP, "Rx completions control array parity error", -1, 1 },
3051 { RFTP, "RXFT array parity error", -1, 1 },
3052 { 0, NULL, 0, 0 }
3053 };
3054 static struct intr_info pcie_port_intr_info[] = {
3055 { TPCP, "TXPC array parity error", -1, 1 },
3056 { TNPP, "TXNP array parity error", -1, 1 },
3057 { TFTP, "TXFT array parity error", -1, 1 },
3058 { TCAP, "TXCA array parity error", -1, 1 },
3059 { TCIP, "TXCIF array parity error", -1, 1 },
3060 { RCAP, "RXCA array parity error", -1, 1 },
3061 { OTDD, "outbound request TLP discarded", -1, 1 },
3062 { RDPE, "Rx data parity error", -1, 1 },
3063 { TDUE, "Tx uncorrectable data error", -1, 1 },
3064 { 0, NULL, 0, 0 }
3065 };
3066 static struct intr_info pcie_intr_info[] = {
3067 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
3068 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
3069 { MSIDATAPERR, "MSI data parity error", -1, 1 },
3070 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
3071 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
3072 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
3073 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
3074 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
3075 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
3076 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
3077 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
3078 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
3079 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
3080 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
3081 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
3082 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
3083 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
3084 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
3085 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
3086 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
3087 { FIDPERR, "PCI FID parity error", -1, 1 },
3088 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
3089 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
3090 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
3091 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
3092 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
3093 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
3094 { PCIESINT, "PCI core secondary fault", -1, 1 },
3095 { PCIEPINT, "PCI core primary fault", -1, 1 },
3096 { UNXSPLCPLERR, "PCI unexpected split completion error", -1,
3097 0 },
3098 { 0, NULL, 0, 0 }
3099 };
3100
3101 int fat;
3102
3103 fat = csio_handle_intr_status(hw,
3104 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
3105 sysbus_intr_info) +
3106 csio_handle_intr_status(hw,
3107 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
3108 pcie_port_intr_info) +
3109 csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
3110 if (fat)
3111 csio_hw_fatal_err(hw);
3112}
3113
3114/*
3115 * TP interrupt handler. 2788 * TP interrupt handler.
3116 */ 2789 */
3117static void csio_tp_intr_handler(struct csio_hw *hw) 2790static void csio_tp_intr_handler(struct csio_hw *hw)
@@ -3517,7 +3190,7 @@ static void csio_ncsi_intr_handler(struct csio_hw *hw)
3517 */ 3190 */
3518static void csio_xgmac_intr_handler(struct csio_hw *hw, int port) 3191static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
3519{ 3192{
3520 uint32_t v = csio_rd_reg32(hw, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); 3193 uint32_t v = csio_rd_reg32(hw, CSIO_MAC_INT_CAUSE_REG(hw, port));
3521 3194
3522 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR; 3195 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
3523 if (!v) 3196 if (!v)
@@ -3527,7 +3200,7 @@ static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
3527 csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port); 3200 csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port);
3528 if (v & RXFIFO_PRTY_ERR) 3201 if (v & RXFIFO_PRTY_ERR)
3529 csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port); 3202 csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port);
3530 csio_wr_reg32(hw, v, PORT_REG(port, XGMAC_PORT_INT_CAUSE)); 3203 csio_wr_reg32(hw, v, CSIO_MAC_INT_CAUSE_REG(hw, port));
3531 csio_hw_fatal_err(hw); 3204 csio_hw_fatal_err(hw);
3532} 3205}
3533 3206
@@ -3596,7 +3269,7 @@ csio_hw_slow_intr_handler(struct csio_hw *hw)
3596 csio_xgmac_intr_handler(hw, 3); 3269 csio_xgmac_intr_handler(hw, 3);
3597 3270
3598 if (cause & PCIE) 3271 if (cause & PCIE)
3599 csio_pcie_intr_handler(hw); 3272 hw->chip_ops->chip_pcie_intr_handler(hw);
3600 3273
3601 if (cause & MC) 3274 if (cause & MC)
3602 csio_mem_intr_handler(hw, MEM_MC); 3275 csio_mem_intr_handler(hw, MEM_MC);
@@ -4262,6 +3935,7 @@ csio_hw_get_device_id(struct csio_hw *hw)
4262 &hw->params.pci.device_id); 3935 &hw->params.pci.device_id);
4263 3936
4264 csio_dev_id_cached(hw); 3937 csio_dev_id_cached(hw);
3938 hw->chip_id = (hw->params.pci.device_id & CSIO_HW_CHIP_MASK);
4265 3939
4266} /* csio_hw_get_device_id */ 3940} /* csio_hw_get_device_id */
4267 3941
@@ -4280,19 +3954,21 @@ csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id)
4280 prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK); 3954 prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK);
4281 adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK); 3955 adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK);
4282 3956
4283 if (prot_type == CSIO_FPGA) { 3957 if (prot_type == CSIO_T4_FCOE_ASIC) {
3958 memcpy(hw->hw_ver,
3959 csio_t4_fcoe_adapters[adap_type].model_no, 16);
4284 memcpy(hw->model_desc, 3960 memcpy(hw->model_desc,
4285 csio_fcoe_adapters[13].description, 32); 3961 csio_t4_fcoe_adapters[adap_type].description,
4286 } else if (prot_type == CSIO_T4_FCOE_ASIC) { 3962 32);
3963 } else if (prot_type == CSIO_T5_FCOE_ASIC) {
4287 memcpy(hw->hw_ver, 3964 memcpy(hw->hw_ver,
4288 csio_fcoe_adapters[adap_type].model_no, 16); 3965 csio_t5_fcoe_adapters[adap_type].model_no, 16);
4289 memcpy(hw->model_desc, 3966 memcpy(hw->model_desc,
4290 csio_fcoe_adapters[adap_type].description, 32); 3967 csio_t5_fcoe_adapters[adap_type].description,
3968 32);
4291 } else { 3969 } else {
4292 char tempName[32] = "Chelsio FCoE Controller"; 3970 char tempName[32] = "Chelsio FCoE Controller";
4293 memcpy(hw->model_desc, tempName, 32); 3971 memcpy(hw->model_desc, tempName, 32);
4294
4295 CSIO_DB_ASSERT(0);
4296 } 3972 }
4297 } 3973 }
4298} /* csio_hw_set_description */ 3974} /* csio_hw_set_description */
@@ -4321,6 +3997,9 @@ csio_hw_init(struct csio_hw *hw)
4321 3997
4322 strcpy(hw->name, CSIO_HW_NAME); 3998 strcpy(hw->name, CSIO_HW_NAME);
4323 3999
4000 /* Initialize the HW chip ops with T4/T5 specific ops */
4001 hw->chip_ops = csio_is_t4(hw->chip_id) ? &t4_ops : &t5_ops;
4002
4324 /* Set the model & its description */ 4003 /* Set the model & its description */
4325 4004
4326 ven_id = hw->params.pci.vendor_id; 4005 ven_id = hw->params.pci.vendor_id;
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
index 9edcca4c71af..489fc095cb03 100644
--- a/drivers/scsi/csiostor/csio_hw.h
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -48,6 +48,7 @@
48#include <scsi/scsi_device.h> 48#include <scsi/scsi_device.h>
49#include <scsi/scsi_transport_fc.h> 49#include <scsi/scsi_transport_fc.h>
50 50
51#include "csio_hw_chip.h"
51#include "csio_wr.h" 52#include "csio_wr.h"
52#include "csio_mb.h" 53#include "csio_mb.h"
53#include "csio_scsi.h" 54#include "csio_scsi.h"
@@ -60,13 +61,6 @@
60 */ 61 */
61#define FW_HOSTERROR 255 62#define FW_HOSTERROR 255
62 63
63#define CSIO_FW_FNAME "cxgb4/t4fw.bin"
64#define CSIO_CF_FNAME "cxgb4/t4-config.txt"
65
66#define FW_VERSION_MAJOR 1
67#define FW_VERSION_MINOR 2
68#define FW_VERSION_MICRO 8
69
70#define CSIO_HW_NAME "Chelsio FCoE Adapter" 64#define CSIO_HW_NAME "Chelsio FCoE Adapter"
71#define CSIO_MAX_PFN 8 65#define CSIO_MAX_PFN 8
72#define CSIO_MAX_PPORTS 4 66#define CSIO_MAX_PPORTS 4
@@ -123,8 +117,6 @@ extern int csio_msi;
123#define CSIO_VENDOR_ID 0x1425 117#define CSIO_VENDOR_ID 0x1425
124#define CSIO_ASIC_DEVID_PROTO_MASK 0xFF00 118#define CSIO_ASIC_DEVID_PROTO_MASK 0xFF00
125#define CSIO_ASIC_DEVID_TYPE_MASK 0x00FF 119#define CSIO_ASIC_DEVID_TYPE_MASK 0x00FF
126#define CSIO_FPGA 0xA000
127#define CSIO_T4_FCOE_ASIC 0x4600
128 120
129#define CSIO_GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \ 121#define CSIO_GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
130 EDC1 | LE | TP | MA | PM_TX | PM_RX | \ 122 EDC1 | LE | TP | MA | PM_TX | PM_RX | \
@@ -207,17 +199,6 @@ enum {
207 SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */ 199 SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */
208}; 200};
209 201
210enum { MEM_EDC0, MEM_EDC1, MEM_MC };
211
212enum {
213 MEMWIN0_APERTURE = 2048,
214 MEMWIN0_BASE = 0x1b800,
215 MEMWIN1_APERTURE = 32768,
216 MEMWIN1_BASE = 0x28000,
217 MEMWIN2_APERTURE = 65536,
218 MEMWIN2_BASE = 0x30000,
219};
220
221/* serial flash and firmware constants */ 202/* serial flash and firmware constants */
222enum { 203enum {
223 SF_ATTEMPTS = 10, /* max retries for SF operations */ 204 SF_ATTEMPTS = 10, /* max retries for SF operations */
@@ -239,9 +220,6 @@ enum {
239 FLASH_CFG_MAX_SIZE = 0x10000 , /* max size of the flash config file*/ 220 FLASH_CFG_MAX_SIZE = 0x10000 , /* max size of the flash config file*/
240 FLASH_CFG_OFFSET = 0x1f0000, 221 FLASH_CFG_OFFSET = 0x1f0000,
241 FLASH_CFG_START_SEC = FLASH_CFG_OFFSET / SF_SEC_SIZE, 222 FLASH_CFG_START_SEC = FLASH_CFG_OFFSET / SF_SEC_SIZE,
242 FPGA_FLASH_CFG_OFFSET = 0xf0000 , /* if FPGA mode, then cfg file is
243 * at 1MB - 64KB */
244 FPGA_FLASH_CFG_START_SEC = FPGA_FLASH_CFG_OFFSET / SF_SEC_SIZE,
245}; 223};
246 224
247/* 225/*
@@ -259,6 +237,8 @@ enum {
259 FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC), 237 FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
260 FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS), 238 FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
261 239
240 /* Location of Firmware Configuration File in FLASH. */
241 FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC),
262}; 242};
263 243
264#undef FLASH_START 244#undef FLASH_START
@@ -310,7 +290,7 @@ struct csio_adap_desc {
310struct pci_params { 290struct pci_params {
311 uint16_t vendor_id; 291 uint16_t vendor_id;
312 uint16_t device_id; 292 uint16_t device_id;
313 uint32_t vpd_cap_addr; 293 int vpd_cap_addr;
314 uint16_t speed; 294 uint16_t speed;
315 uint8_t width; 295 uint8_t width;
316}; 296};
@@ -513,6 +493,7 @@ struct csio_hw {
513 uint32_t fwrev; 493 uint32_t fwrev;
514 uint32_t tp_vers; 494 uint32_t tp_vers;
515 char chip_ver; 495 char chip_ver;
496 uint16_t chip_id; /* Tells T4/T5 chip */
516 uint32_t cfg_finiver; 497 uint32_t cfg_finiver;
517 uint32_t cfg_finicsum; 498 uint32_t cfg_finicsum;
518 uint32_t cfg_cfcsum; 499 uint32_t cfg_cfcsum;
@@ -556,6 +537,9 @@ struct csio_hw {
556 */ 537 */
557 538
558 struct csio_fcoe_res_info fres_info; /* Fcoe resource info */ 539 struct csio_fcoe_res_info fres_info; /* Fcoe resource info */
540 struct csio_hw_chip_ops *chip_ops; /* T4/T5 Chip specific
541 * Operations
542 */
559 543
560 /* MSIX vectors */ 544 /* MSIX vectors */
561 struct csio_msix_entries msix_entries[CSIO_MAX_MSIX_VECS]; 545 struct csio_msix_entries msix_entries[CSIO_MAX_MSIX_VECS];
@@ -636,9 +620,16 @@ csio_us_to_core_ticks(struct csio_hw *hw, uint32_t us)
636#define csio_dbg(__hw, __fmt, ...) 620#define csio_dbg(__hw, __fmt, ...)
637#endif 621#endif
638 622
623int csio_hw_wait_op_done_val(struct csio_hw *, int, uint32_t, int,
624 int, int, uint32_t *);
625void csio_hw_tp_wr_bits_indirect(struct csio_hw *, unsigned int,
626 unsigned int, unsigned int);
639int csio_mgmt_req_lookup(struct csio_mgmtm *, struct csio_ioreq *); 627int csio_mgmt_req_lookup(struct csio_mgmtm *, struct csio_ioreq *);
640void csio_hw_intr_disable(struct csio_hw *); 628void csio_hw_intr_disable(struct csio_hw *);
641int csio_hw_slow_intr_handler(struct csio_hw *hw); 629int csio_hw_slow_intr_handler(struct csio_hw *);
630int csio_handle_intr_status(struct csio_hw *, unsigned int,
631 const struct intr_info *);
632
642int csio_hw_start(struct csio_hw *); 633int csio_hw_start(struct csio_hw *);
643int csio_hw_stop(struct csio_hw *); 634int csio_hw_stop(struct csio_hw *);
644int csio_hw_reset(struct csio_hw *); 635int csio_hw_reset(struct csio_hw *);
@@ -647,19 +638,17 @@ int csio_is_hw_removing(struct csio_hw *);
647 638
648int csio_fwevtq_handler(struct csio_hw *); 639int csio_fwevtq_handler(struct csio_hw *);
649void csio_evtq_worker(struct work_struct *); 640void csio_evtq_worker(struct work_struct *);
650int csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, 641int csio_enqueue_evt(struct csio_hw *, enum csio_evt, void *, uint16_t);
651 void *evt_msg, uint16_t len);
652void csio_evtq_flush(struct csio_hw *hw); 642void csio_evtq_flush(struct csio_hw *hw);
653 643
654int csio_request_irqs(struct csio_hw *); 644int csio_request_irqs(struct csio_hw *);
655void csio_intr_enable(struct csio_hw *); 645void csio_intr_enable(struct csio_hw *);
656void csio_intr_disable(struct csio_hw *, bool); 646void csio_intr_disable(struct csio_hw *, bool);
647void csio_hw_fatal_err(struct csio_hw *);
657 648
658struct csio_lnode *csio_lnode_alloc(struct csio_hw *); 649struct csio_lnode *csio_lnode_alloc(struct csio_hw *);
659int csio_config_queues(struct csio_hw *); 650int csio_config_queues(struct csio_hw *);
660 651
661int csio_hw_mc_read(struct csio_hw *, uint32_t, __be32 *, uint64_t *);
662int csio_hw_edc_read(struct csio_hw *, int, uint32_t, __be32 *, uint64_t *);
663int csio_hw_init(struct csio_hw *); 652int csio_hw_init(struct csio_hw *);
664void csio_hw_exit(struct csio_hw *); 653void csio_hw_exit(struct csio_hw *);
665#endif /* ifndef __CSIO_HW_H__ */ 654#endif /* ifndef __CSIO_HW_H__ */
diff --git a/drivers/scsi/csiostor/csio_hw_chip.h b/drivers/scsi/csiostor/csio_hw_chip.h
new file mode 100644
index 000000000000..bca0de61ae80
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_hw_chip.h
@@ -0,0 +1,175 @@
1/*
2 * This file is part of the Chelsio FCoE driver for Linux.
3 *
4 * Copyright (c) 2008-2013 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#ifndef __CSIO_HW_CHIP_H__
35#define __CSIO_HW_CHIP_H__
36
37#include "csio_defs.h"
38
39/* FCoE device IDs for T4 */
40#define CSIO_DEVID_T440DBG_FCOE 0x4600
41#define CSIO_DEVID_T420CR_FCOE 0x4601
42#define CSIO_DEVID_T422CR_FCOE 0x4602
43#define CSIO_DEVID_T440CR_FCOE 0x4603
44#define CSIO_DEVID_T420BCH_FCOE 0x4604
45#define CSIO_DEVID_T440BCH_FCOE 0x4605
46#define CSIO_DEVID_T440CH_FCOE 0x4606
47#define CSIO_DEVID_T420SO_FCOE 0x4607
48#define CSIO_DEVID_T420CX_FCOE 0x4608
49#define CSIO_DEVID_T420BT_FCOE 0x4609
50#define CSIO_DEVID_T404BT_FCOE 0x460A
51#define CSIO_DEVID_B420_FCOE 0x460B
52#define CSIO_DEVID_B404_FCOE 0x460C
53#define CSIO_DEVID_T480CR_FCOE 0x460D
54#define CSIO_DEVID_T440LPCR_FCOE 0x460E
55#define CSIO_DEVID_AMSTERDAM_T4_FCOE 0x460F
56#define CSIO_DEVID_HUAWEI_T480_FCOE 0x4680
57#define CSIO_DEVID_HUAWEI_T440_FCOE 0x4681
58#define CSIO_DEVID_HUAWEI_STG310_FCOE 0x4682
59#define CSIO_DEVID_ACROMAG_XMC_XAUI 0x4683
60#define CSIO_DEVID_ACROMAG_XMC_SFP_FCOE 0x4684
61#define CSIO_DEVID_QUANTA_MEZZ_SFP_FCOE 0x4685
62#define CSIO_DEVID_HUAWEI_10GT_FCOE 0x4686
63#define CSIO_DEVID_HUAWEI_T440_TOE_FCOE 0x4687
64
65/* FCoE device IDs for T5 */
66#define CSIO_DEVID_T580DBG_FCOE 0x5600
67#define CSIO_DEVID_T520CR_FCOE 0x5601
68#define CSIO_DEVID_T522CR_FCOE 0x5602
69#define CSIO_DEVID_T540CR_FCOE 0x5603
70#define CSIO_DEVID_T520BCH_FCOE 0x5604
71#define CSIO_DEVID_T540BCH_FCOE 0x5605
72#define CSIO_DEVID_T540CH_FCOE 0x5606
73#define CSIO_DEVID_T520SO_FCOE 0x5607
74#define CSIO_DEVID_T520CX_FCOE 0x5608
75#define CSIO_DEVID_T520BT_FCOE 0x5609
76#define CSIO_DEVID_T504BT_FCOE 0x560A
77#define CSIO_DEVID_B520_FCOE 0x560B
78#define CSIO_DEVID_B504_FCOE 0x560C
79#define CSIO_DEVID_T580CR2_FCOE 0x560D
80#define CSIO_DEVID_T540LPCR_FCOE 0x560E
81#define CSIO_DEVID_AMSTERDAM_T5_FCOE 0x560F
82#define CSIO_DEVID_T580LPCR_FCOE 0x5610
83#define CSIO_DEVID_T520LLCR_FCOE 0x5611
84#define CSIO_DEVID_T560CR_FCOE 0x5612
85#define CSIO_DEVID_T580CR_FCOE 0x5613
86
87/* Define MACRO values */
88#define CSIO_HW_T4 0x4000
89#define CSIO_T4_FCOE_ASIC 0x4600
90#define CSIO_HW_T5 0x5000
91#define CSIO_T5_FCOE_ASIC 0x5600
92#define CSIO_HW_CHIP_MASK 0xF000
93#define T4_REGMAP_SIZE (160 * 1024)
94#define T5_REGMAP_SIZE (332 * 1024)
95#define FW_FNAME_T4 "cxgb4/t4fw.bin"
96#define FW_FNAME_T5 "cxgb4/t5fw.bin"
97#define FW_CFG_NAME_T4 "cxgb4/t4-config.txt"
98#define FW_CFG_NAME_T5 "cxgb4/t5-config.txt"
99
100/* Define static functions */
101static inline int csio_is_t4(uint16_t chip)
102{
103 return (chip == CSIO_HW_T4);
104}
105
106static inline int csio_is_t5(uint16_t chip)
107{
108 return (chip == CSIO_HW_T5);
109}
110
111/* Define MACRO DEFINITIONS */
112#define CSIO_DEVICE(devid, idx) \
113 { PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
114
115#define CSIO_HW_PIDX(hw, index) \
116 (csio_is_t4(hw->chip_id) ? (PIDX(index)) : \
117 (PIDX_T5(index) | DBTYPE(1U)))
118
119#define CSIO_HW_LP_INT_THRESH(hw, val) \
120 (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH(val)) : \
121 (V_LP_INT_THRESH_T5(val)))
122
123#define CSIO_HW_M_LP_INT_THRESH(hw) \
124 (csio_is_t4(hw->chip_id) ? (LP_INT_THRESH_MASK) : (M_LP_INT_THRESH_T5))
125
126#define CSIO_MAC_INT_CAUSE_REG(hw, port) \
127 (csio_is_t4(hw->chip_id) ? (PORT_REG(port, XGMAC_PORT_INT_CAUSE)) : \
128 (T5_PORT_REG(port, MAC_PORT_INT_CAUSE)))
129
130#define FW_VERSION_MAJOR(hw) (csio_is_t4(hw->chip_id) ? 1 : 0)
131#define FW_VERSION_MINOR(hw) (csio_is_t4(hw->chip_id) ? 2 : 0)
132#define FW_VERSION_MICRO(hw) (csio_is_t4(hw->chip_id) ? 8 : 0)
133
134#define CSIO_FW_FNAME(hw) \
135 (csio_is_t4(hw->chip_id) ? FW_FNAME_T4 : FW_FNAME_T5)
136
137#define CSIO_CF_FNAME(hw) \
138 (csio_is_t4(hw->chip_id) ? FW_CFG_NAME_T4 : FW_CFG_NAME_T5)
139
140/* Declare ENUMS */
141enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 };
142
143enum {
144 MEMWIN_APERTURE = 2048,
145 MEMWIN_BASE = 0x1b800,
146 MEMWIN_CSIOSTOR = 6, /* PCI-e Memory Window access */
147};
148
149/* Slow path handlers */
150struct intr_info {
151 unsigned int mask; /* bits to check in interrupt status */
152 const char *msg; /* message to print or NULL */
153 short stat_idx; /* stat counter to increment or -1 */
154 unsigned short fatal; /* whether the condition reported is fatal */
155};
156
157/* T4/T5 Chip specific ops */
158struct csio_hw;
159struct csio_hw_chip_ops {
160 int (*chip_set_mem_win)(struct csio_hw *, uint32_t);
161 void (*chip_pcie_intr_handler)(struct csio_hw *);
162 uint32_t (*chip_flash_cfg_addr)(struct csio_hw *);
163 int (*chip_mc_read)(struct csio_hw *, int, uint32_t,
164 __be32 *, uint64_t *);
165 int (*chip_edc_read)(struct csio_hw *, int, uint32_t,
166 __be32 *, uint64_t *);
167 int (*chip_memory_rw)(struct csio_hw *, u32, int, u32,
168 u32, uint32_t *, int);
169 void (*chip_dfs_create_ext_mem)(struct csio_hw *);
170};
171
172extern struct csio_hw_chip_ops t4_ops;
173extern struct csio_hw_chip_ops t5_ops;
174
175#endif /* #ifndef __CSIO_HW_CHIP_H__ */
diff --git a/drivers/scsi/csiostor/csio_hw_t4.c b/drivers/scsi/csiostor/csio_hw_t4.c
new file mode 100644
index 000000000000..89ecbac5478f
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_hw_t4.c
@@ -0,0 +1,403 @@
1/*
2 * This file is part of the Chelsio FCoE driver for Linux.
3 *
4 * Copyright (c) 2008-2013 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * - Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials
20 * provided with the distribution.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
23 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
24 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
25 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
26 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
27 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
28 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * SOFTWARE.
30 */
31
32#include "csio_hw.h"
33#include "csio_init.h"
34
35/*
36 * Return the specified PCI-E Configuration Space register from our Physical
37 * Function. We try first via a Firmware LDST Command since we prefer to let
38 * the firmware own all of these registers, but if that fails we go for it
39 * directly ourselves.
40 */
41static uint32_t
42csio_t4_read_pcie_cfg4(struct csio_hw *hw, int reg)
43{
44 u32 val = 0;
45 struct csio_mb *mbp;
46 int rv;
47 struct fw_ldst_cmd *ldst_cmd;
48
49 mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
50 if (!mbp) {
51 CSIO_INC_STATS(hw, n_err_nomem);
52 pci_read_config_dword(hw->pdev, reg, &val);
53 return val;
54 }
55
56 csio_mb_ldst(hw, mbp, CSIO_MB_DEFAULT_TMO, reg);
57 rv = csio_mb_issue(hw, mbp);
58
59 /*
60 * If the LDST Command suucceeded, exctract the returned register
61 * value. Otherwise read it directly ourself.
62 */
63 if (rv == 0) {
64 ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb);
65 val = ntohl(ldst_cmd->u.pcie.data[0]);
66 } else
67 pci_read_config_dword(hw->pdev, reg, &val);
68
69 mempool_free(mbp, hw->mb_mempool);
70
71 return val;
72}
73
74static int
75csio_t4_set_mem_win(struct csio_hw *hw, uint32_t win)
76{
77 u32 bar0;
78 u32 mem_win_base;
79
80 /*
81 * Truncation intentional: we only read the bottom 32-bits of the
82 * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to
83 * read BAR0 instead of using pci_resource_start() because we could be
84 * operating from within a Virtual Machine which is trapping our
85 * accesses to our Configuration Space and we need to set up the PCI-E
86 * Memory Window decoders with the actual addresses which will be
87 * coming across the PCI-E link.
88 */
89 bar0 = csio_t4_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0);
90 bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
91
92 mem_win_base = bar0 + MEMWIN_BASE;
93
94 /*
95 * Set up memory window for accessing adapter memory ranges. (Read
96 * back MA register to ensure that changes propagate before we attempt
97 * to use the new values.)
98 */
99 csio_wr_reg32(hw, mem_win_base | BIR(0) |
100 WINDOW(ilog2(MEMWIN_APERTURE) - 10),
101 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
102 csio_rd_reg32(hw,
103 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
104 return 0;
105}
106
107/*
108 * Interrupt handler for the PCIE module.
109 */
110static void
111csio_t4_pcie_intr_handler(struct csio_hw *hw)
112{
113 static struct intr_info sysbus_intr_info[] = {
114 { RNPP, "RXNP array parity error", -1, 1 },
115 { RPCP, "RXPC array parity error", -1, 1 },
116 { RCIP, "RXCIF array parity error", -1, 1 },
117 { RCCP, "Rx completions control array parity error", -1, 1 },
118 { RFTP, "RXFT array parity error", -1, 1 },
119 { 0, NULL, 0, 0 }
120 };
121 static struct intr_info pcie_port_intr_info[] = {
122 { TPCP, "TXPC array parity error", -1, 1 },
123 { TNPP, "TXNP array parity error", -1, 1 },
124 { TFTP, "TXFT array parity error", -1, 1 },
125 { TCAP, "TXCA array parity error", -1, 1 },
126 { TCIP, "TXCIF array parity error", -1, 1 },
127 { RCAP, "RXCA array parity error", -1, 1 },
128 { OTDD, "outbound request TLP discarded", -1, 1 },
129 { RDPE, "Rx data parity error", -1, 1 },
130 { TDUE, "Tx uncorrectable data error", -1, 1 },
131 { 0, NULL, 0, 0 }
132 };
133
134 static struct intr_info pcie_intr_info[] = {
135 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
136 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
137 { MSIDATAPERR, "MSI data parity error", -1, 1 },
138 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
139 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
140 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
141 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
142 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
143 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
144 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
145 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
146 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
147 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
148 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
149 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
150 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
151 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
152 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
153 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
154 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
155 { FIDPERR, "PCI FID parity error", -1, 1 },
156 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
157 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
158 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
159 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
160 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
161 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
162 { PCIESINT, "PCI core secondary fault", -1, 1 },
163 { PCIEPINT, "PCI core primary fault", -1, 1 },
164 { UNXSPLCPLERR, "PCI unexpected split completion error", -1,
165 0 },
166 { 0, NULL, 0, 0 }
167 };
168
169 int fat;
170 fat = csio_handle_intr_status(hw,
171 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
172 sysbus_intr_info) +
173 csio_handle_intr_status(hw,
174 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
175 pcie_port_intr_info) +
176 csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
177 if (fat)
178 csio_hw_fatal_err(hw);
179}
180
181/*
182 * csio_t4_flash_cfg_addr - return the address of the flash configuration file
183 * @hw: the HW module
184 *
185 * Return the address within the flash where the Firmware Configuration
186 * File is stored.
187 */
188static unsigned int
189csio_t4_flash_cfg_addr(struct csio_hw *hw)
190{
191 return FLASH_CFG_OFFSET;
192}
193
194/*
195 * csio_t4_mc_read - read from MC through backdoor accesses
196 * @hw: the hw module
197 * @idx: not used for T4 adapter
198 * @addr: address of first byte requested
199 * @data: 64 bytes of data containing the requested address
200 * @ecc: where to store the corresponding 64-bit ECC word
201 *
202 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
203 * that covers the requested address @addr. If @parity is not %NULL it
204 * is assigned the 64-bit ECC word for the read data.
205 */
206static int
207csio_t4_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
208 uint64_t *ecc)
209{
210 int i;
211
212 if (csio_rd_reg32(hw, MC_BIST_CMD) & START_BIST)
213 return -EBUSY;
214 csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR);
215 csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN);
216 csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN);
217 csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
218 MC_BIST_CMD);
219 i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD, START_BIST,
220 0, 10, 1, NULL);
221 if (i)
222 return i;
223
224#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
225
226 for (i = 15; i >= 0; i--)
227 *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
228 if (ecc)
229 *ecc = csio_rd_reg64(hw, MC_DATA(16));
230#undef MC_DATA
231 return 0;
232}
233
234/*
235 * csio_t4_edc_read - read from EDC through backdoor accesses
236 * @hw: the hw module
237 * @idx: which EDC to access
238 * @addr: address of first byte requested
239 * @data: 64 bytes of data containing the requested address
240 * @ecc: where to store the corresponding 64-bit ECC word
241 *
242 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
243 * that covers the requested address @addr. If @parity is not %NULL it
244 * is assigned the 64-bit ECC word for the read data.
245 */
246static int
247csio_t4_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
248 uint64_t *ecc)
249{
250 int i;
251
252 idx *= EDC_STRIDE;
253 if (csio_rd_reg32(hw, EDC_BIST_CMD + idx) & START_BIST)
254 return -EBUSY;
255 csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR + idx);
256 csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN + idx);
257 csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN + idx);
258 csio_wr_reg32(hw, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST,
259 EDC_BIST_CMD + idx);
260 i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD + idx, START_BIST,
261 0, 10, 1, NULL);
262 if (i)
263 return i;
264
265#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
266
267 for (i = 15; i >= 0; i--)
268 *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
269 if (ecc)
270 *ecc = csio_rd_reg64(hw, EDC_DATA(16));
271#undef EDC_DATA
272 return 0;
273}
274
275/*
276 * csio_t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
277 * @hw: the csio_hw
278 * @win: PCI-E memory Window to use
279 * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_MC0 (or MEM_MC) or MEM_MC1
280 * @addr: address within indicated memory type
281 * @len: amount of memory to transfer
282 * @buf: host memory buffer
283 * @dir: direction of transfer 1 => read, 0 => write
284 *
285 * Reads/writes an [almost] arbitrary memory region in the firmware: the
286 * firmware memory address, length and host buffer must be aligned on
287 * 32-bit boudaries. The memory is transferred as a raw byte sequence
288 * from/to the firmware's memory. If this memory contains data
289 * structures which contain multi-byte integers, it's the callers
290 * responsibility to perform appropriate byte order conversions.
291 */
292static int
293csio_t4_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
294 u32 len, uint32_t *buf, int dir)
295{
296 u32 pos, start, offset, memoffset, bar0;
297 u32 edc_size, mc_size, mem_reg, mem_aperture, mem_base;
298
299 /*
300 * Argument sanity checks ...
301 */
302 if ((addr & 0x3) || (len & 0x3))
303 return -EINVAL;
304
305 /* Offset into the region of memory which is being accessed
306 * MEM_EDC0 = 0
307 * MEM_EDC1 = 1
308 * MEM_MC = 2 -- T4
309 */
310 edc_size = EDRAM_SIZE_GET(csio_rd_reg32(hw, MA_EDRAM0_BAR));
311 if (mtype != MEM_MC1)
312 memoffset = (mtype * (edc_size * 1024 * 1024));
313 else {
314 mc_size = EXT_MEM_SIZE_GET(csio_rd_reg32(hw,
315 MA_EXT_MEMORY_BAR));
316 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
317 }
318
319 /* Determine the PCIE_MEM_ACCESS_OFFSET */
320 addr = addr + memoffset;
321
322 /*
323 * Each PCI-E Memory Window is programmed with a window size -- or
324 * "aperture" -- which controls the granularity of its mapping onto
325 * adapter memory. We need to grab that aperture in order to know
326 * how to use the specified window. The window is also programmed
327 * with the base address of the Memory Window in BAR0's address
328 * space. For T4 this is an absolute PCI-E Bus Address. For T5
329 * the address is relative to BAR0.
330 */
331 mem_reg = csio_rd_reg32(hw,
332 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
333 mem_aperture = 1 << (WINDOW(mem_reg) + 10);
334 mem_base = GET_PCIEOFST(mem_reg) << 10;
335
336 bar0 = csio_t4_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0);
337 bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
338 mem_base -= bar0;
339
340 start = addr & ~(mem_aperture-1);
341 offset = addr - start;
342
343 csio_dbg(hw, "csio_t4_memory_rw: mem_reg: 0x%x, mem_aperture: 0x%x\n",
344 mem_reg, mem_aperture);
345 csio_dbg(hw, "csio_t4_memory_rw: mem_base: 0x%x, mem_offset: 0x%x\n",
346 mem_base, memoffset);
347 csio_dbg(hw, "csio_t4_memory_rw: bar0: 0x%x, start:0x%x, offset:0x%x\n",
348 bar0, start, offset);
349 csio_dbg(hw, "csio_t4_memory_rw: mtype: %d, addr: 0x%x, len: %d\n",
350 mtype, addr, len);
351
352 for (pos = start; len > 0; pos += mem_aperture, offset = 0) {
353 /*
354 * Move PCI-E Memory Window to our current transfer
355 * position. Read it back to ensure that changes propagate
356 * before we attempt to use the new value.
357 */
358 csio_wr_reg32(hw, pos,
359 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
360 csio_rd_reg32(hw,
361 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
362
363 while (offset < mem_aperture && len > 0) {
364 if (dir)
365 *buf++ = csio_rd_reg32(hw, mem_base + offset);
366 else
367 csio_wr_reg32(hw, *buf++, mem_base + offset);
368
369 offset += sizeof(__be32);
370 len -= sizeof(__be32);
371 }
372 }
373 return 0;
374}
375
376/*
377 * csio_t4_dfs_create_ext_mem - setup debugfs for MC to read the values
378 * @hw: the csio_hw
379 *
380 * This function creates files in the debugfs with external memory region MC.
381 */
382static void
383csio_t4_dfs_create_ext_mem(struct csio_hw *hw)
384{
385 u32 size;
386 int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE);
387 if (i & EXT_MEM_ENABLE) {
388 size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR);
389 csio_add_debugfs_mem(hw, "mc", MEM_MC,
390 EXT_MEM_SIZE_GET(size));
391 }
392}
393
394/* T4 adapter specific function */
395struct csio_hw_chip_ops t4_ops = {
396 .chip_set_mem_win = csio_t4_set_mem_win,
397 .chip_pcie_intr_handler = csio_t4_pcie_intr_handler,
398 .chip_flash_cfg_addr = csio_t4_flash_cfg_addr,
399 .chip_mc_read = csio_t4_mc_read,
400 .chip_edc_read = csio_t4_edc_read,
401 .chip_memory_rw = csio_t4_memory_rw,
402 .chip_dfs_create_ext_mem = csio_t4_dfs_create_ext_mem,
403};
diff --git a/drivers/scsi/csiostor/csio_hw_t5.c b/drivers/scsi/csiostor/csio_hw_t5.c
new file mode 100644
index 000000000000..27745c170c24
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_hw_t5.c
@@ -0,0 +1,397 @@
1/*
2 * This file is part of the Chelsio FCoE driver for Linux.
3 *
4 * Copyright (c) 2008-2013 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include "csio_hw.h"
35#include "csio_init.h"
36
37static int
38csio_t5_set_mem_win(struct csio_hw *hw, uint32_t win)
39{
40 u32 mem_win_base;
41 /*
42 * Truncation intentional: we only read the bottom 32-bits of the
43 * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to
44 * read BAR0 instead of using pci_resource_start() because we could be
45 * operating from within a Virtual Machine which is trapping our
46 * accesses to our Configuration Space and we need to set up the PCI-E
47 * Memory Window decoders with the actual addresses which will be
48 * coming across the PCI-E link.
49 */
50
51 /* For T5, only relative offset inside the PCIe BAR is passed */
52 mem_win_base = MEMWIN_BASE;
53
54 /*
55 * Set up memory window for accessing adapter memory ranges. (Read
56 * back MA register to ensure that changes propagate before we attempt
57 * to use the new values.)
58 */
59 csio_wr_reg32(hw, mem_win_base | BIR(0) |
60 WINDOW(ilog2(MEMWIN_APERTURE) - 10),
61 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
62 csio_rd_reg32(hw,
63 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
64
65 return 0;
66}
67
68/*
69 * Interrupt handler for the PCIE module.
70 */
71static void
72csio_t5_pcie_intr_handler(struct csio_hw *hw)
73{
74 static struct intr_info sysbus_intr_info[] = {
75 { RNPP, "RXNP array parity error", -1, 1 },
76 { RPCP, "RXPC array parity error", -1, 1 },
77 { RCIP, "RXCIF array parity error", -1, 1 },
78 { RCCP, "Rx completions control array parity error", -1, 1 },
79 { RFTP, "RXFT array parity error", -1, 1 },
80 { 0, NULL, 0, 0 }
81 };
82 static struct intr_info pcie_port_intr_info[] = {
83 { TPCP, "TXPC array parity error", -1, 1 },
84 { TNPP, "TXNP array parity error", -1, 1 },
85 { TFTP, "TXFT array parity error", -1, 1 },
86 { TCAP, "TXCA array parity error", -1, 1 },
87 { TCIP, "TXCIF array parity error", -1, 1 },
88 { RCAP, "RXCA array parity error", -1, 1 },
89 { OTDD, "outbound request TLP discarded", -1, 1 },
90 { RDPE, "Rx data parity error", -1, 1 },
91 { TDUE, "Tx uncorrectable data error", -1, 1 },
92 { 0, NULL, 0, 0 }
93 };
94
95 static struct intr_info pcie_intr_info[] = {
96 { MSTGRPPERR, "Master Response Read Queue parity error",
97 -1, 1 },
98 { MSTTIMEOUTPERR, "Master Timeout FIFO parity error", -1, 1 },
99 { MSIXSTIPERR, "MSI-X STI SRAM parity error", -1, 1 },
100 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
101 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
102 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
103 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
104 { PIOCPLGRPPERR, "PCI PIO completion Group FIFO parity error",
105 -1, 1 },
106 { PIOREQGRPPERR, "PCI PIO request Group FIFO parity error",
107 -1, 1 },
108 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
109 { MSTTAGQPERR, "PCI master tag queue parity error", -1, 1 },
110 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
111 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
112 { DREQWRPERR, "PCI DMA channel write request parity error",
113 -1, 1 },
114 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
115 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
116 { HREQWRPERR, "PCI HMA channel count parity error", -1, 1 },
117 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
118 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
119 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
120 { FIDPERR, "PCI FID parity error", -1, 1 },
121 { VFIDPERR, "PCI INTx clear parity error", -1, 1 },
122 { MAGRPPERR, "PCI MA group FIFO parity error", -1, 1 },
123 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
124 { IPRXHDRGRPPERR, "PCI IP Rx header group parity error",
125 -1, 1 },
126 { IPRXDATAGRPPERR, "PCI IP Rx data group parity error",
127 -1, 1 },
128 { RPLPERR, "PCI IP replay buffer parity error", -1, 1 },
129 { IPSOTPERR, "PCI IP SOT buffer parity error", -1, 1 },
130 { TRGT1GRPPERR, "PCI TRGT1 group FIFOs parity error", -1, 1 },
131 { READRSPERR, "Outbound read error", -1, 0 },
132 { 0, NULL, 0, 0 }
133 };
134
135 int fat;
136 fat = csio_handle_intr_status(hw,
137 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
138 sysbus_intr_info) +
139 csio_handle_intr_status(hw,
140 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
141 pcie_port_intr_info) +
142 csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
143 if (fat)
144 csio_hw_fatal_err(hw);
145}
146
147/*
148 * csio_t5_flash_cfg_addr - return the address of the flash configuration file
149 * @hw: the HW module
150 *
151 * Return the address within the flash where the Firmware Configuration
152 * File is stored.
153 */
154static unsigned int
155csio_t5_flash_cfg_addr(struct csio_hw *hw)
156{
157 return FLASH_CFG_START;
158}
159
160/*
161 * csio_t5_mc_read - read from MC through backdoor accesses
162 * @hw: the hw module
163 * @idx: index to the register
164 * @addr: address of first byte requested
165 * @data: 64 bytes of data containing the requested address
166 * @ecc: where to store the corresponding 64-bit ECC word
167 *
168 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
169 * that covers the requested address @addr. If @parity is not %NULL it
170 * is assigned the 64-bit ECC word for the read data.
171 */
172static int
173csio_t5_mc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
174 uint64_t *ecc)
175{
176 int i;
177 uint32_t mc_bist_cmd_reg, mc_bist_cmd_addr_reg, mc_bist_cmd_len_reg;
178 uint32_t mc_bist_status_rdata_reg, mc_bist_data_pattern_reg;
179
180 mc_bist_cmd_reg = MC_REG(MC_P_BIST_CMD, idx);
181 mc_bist_cmd_addr_reg = MC_REG(MC_P_BIST_CMD_ADDR, idx);
182 mc_bist_cmd_len_reg = MC_REG(MC_P_BIST_CMD_LEN, idx);
183 mc_bist_status_rdata_reg = MC_REG(MC_P_BIST_STATUS_RDATA, idx);
184 mc_bist_data_pattern_reg = MC_REG(MC_P_BIST_DATA_PATTERN, idx);
185
186 if (csio_rd_reg32(hw, mc_bist_cmd_reg) & START_BIST)
187 return -EBUSY;
188 csio_wr_reg32(hw, addr & ~0x3fU, mc_bist_cmd_addr_reg);
189 csio_wr_reg32(hw, 64, mc_bist_cmd_len_reg);
190 csio_wr_reg32(hw, 0xc, mc_bist_data_pattern_reg);
191 csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
192 mc_bist_cmd_reg);
193 i = csio_hw_wait_op_done_val(hw, mc_bist_cmd_reg, START_BIST,
194 0, 10, 1, NULL);
195 if (i)
196 return i;
197
198#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
199
200 for (i = 15; i >= 0; i--)
201 *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
202 if (ecc)
203 *ecc = csio_rd_reg64(hw, MC_DATA(16));
204#undef MC_DATA
205 return 0;
206}
207
208/*
209 * csio_t5_edc_read - read from EDC through backdoor accesses
210 * @hw: the hw module
211 * @idx: which EDC to access
212 * @addr: address of first byte requested
213 * @data: 64 bytes of data containing the requested address
214 * @ecc: where to store the corresponding 64-bit ECC word
215 *
216 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
217 * that covers the requested address @addr. If @parity is not %NULL it
218 * is assigned the 64-bit ECC word for the read data.
219 */
220static int
221csio_t5_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
222 uint64_t *ecc)
223{
224 int i;
225 uint32_t edc_bist_cmd_reg, edc_bist_cmd_addr_reg, edc_bist_cmd_len_reg;
226 uint32_t edc_bist_cmd_data_pattern, edc_bist_status_rdata_reg;
227
228/*
229 * These macro are missing in t4_regs.h file.
230 */
231#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
232#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
233
234 edc_bist_cmd_reg = EDC_REG_T5(EDC_H_BIST_CMD, idx);
235 edc_bist_cmd_addr_reg = EDC_REG_T5(EDC_H_BIST_CMD_ADDR, idx);
236 edc_bist_cmd_len_reg = EDC_REG_T5(EDC_H_BIST_CMD_LEN, idx);
237 edc_bist_cmd_data_pattern = EDC_REG_T5(EDC_H_BIST_DATA_PATTERN, idx);
238 edc_bist_status_rdata_reg = EDC_REG_T5(EDC_H_BIST_STATUS_RDATA, idx);
239#undef EDC_REG_T5
240#undef EDC_STRIDE_T5
241
242 if (csio_rd_reg32(hw, edc_bist_cmd_reg) & START_BIST)
243 return -EBUSY;
244 csio_wr_reg32(hw, addr & ~0x3fU, edc_bist_cmd_addr_reg);
245 csio_wr_reg32(hw, 64, edc_bist_cmd_len_reg);
246 csio_wr_reg32(hw, 0xc, edc_bist_cmd_data_pattern);
247 csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
248 edc_bist_cmd_reg);
249 i = csio_hw_wait_op_done_val(hw, edc_bist_cmd_reg, START_BIST,
250 0, 10, 1, NULL);
251 if (i)
252 return i;
253
254#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
255
256 for (i = 15; i >= 0; i--)
257 *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
258 if (ecc)
259 *ecc = csio_rd_reg64(hw, EDC_DATA(16));
260#undef EDC_DATA
261 return 0;
262}
263
264/*
265 * csio_t5_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
266 * @hw: the csio_hw
267 * @win: PCI-E memory Window to use
268 * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_MC0 (or MEM_MC) or MEM_MC1
269 * @addr: address within indicated memory type
270 * @len: amount of memory to transfer
271 * @buf: host memory buffer
272 * @dir: direction of transfer 1 => read, 0 => write
273 *
274 * Reads/writes an [almost] arbitrary memory region in the firmware: the
275 * firmware memory address, length and host buffer must be aligned on
276 * 32-bit boudaries. The memory is transferred as a raw byte sequence
277 * from/to the firmware's memory. If this memory contains data
278 * structures which contain multi-byte integers, it's the callers
279 * responsibility to perform appropriate byte order conversions.
280 */
281static int
282csio_t5_memory_rw(struct csio_hw *hw, u32 win, int mtype, u32 addr,
283 u32 len, uint32_t *buf, int dir)
284{
285 u32 pos, start, offset, memoffset;
286 u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
287
288 /*
289 * Argument sanity checks ...
290 */
291 if ((addr & 0x3) || (len & 0x3))
292 return -EINVAL;
293
294 /* Offset into the region of memory which is being accessed
295 * MEM_EDC0 = 0
296 * MEM_EDC1 = 1
297 * MEM_MC = 2 -- T4
298 * MEM_MC0 = 2 -- For T5
299 * MEM_MC1 = 3 -- For T5
300 */
301 edc_size = EDRAM_SIZE_GET(csio_rd_reg32(hw, MA_EDRAM0_BAR));
302 if (mtype != MEM_MC1)
303 memoffset = (mtype * (edc_size * 1024 * 1024));
304 else {
305 mc_size = EXT_MEM_SIZE_GET(csio_rd_reg32(hw,
306 MA_EXT_MEMORY_BAR));
307 memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
308 }
309
310 /* Determine the PCIE_MEM_ACCESS_OFFSET */
311 addr = addr + memoffset;
312
313 /*
314 * Each PCI-E Memory Window is programmed with a window size -- or
315 * "aperture" -- which controls the granularity of its mapping onto
316 * adapter memory. We need to grab that aperture in order to know
317 * how to use the specified window. The window is also programmed
318 * with the base address of the Memory Window in BAR0's address
319 * space. For T4 this is an absolute PCI-E Bus Address. For T5
320 * the address is relative to BAR0.
321 */
322 mem_reg = csio_rd_reg32(hw,
323 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, win));
324 mem_aperture = 1 << (WINDOW(mem_reg) + 10);
325 mem_base = GET_PCIEOFST(mem_reg) << 10;
326
327 start = addr & ~(mem_aperture-1);
328 offset = addr - start;
329 win_pf = V_PFNUM(hw->pfn);
330
331 csio_dbg(hw, "csio_t5_memory_rw: mem_reg: 0x%x, mem_aperture: 0x%x\n",
332 mem_reg, mem_aperture);
333 csio_dbg(hw, "csio_t5_memory_rw: mem_base: 0x%x, mem_offset: 0x%x\n",
334 mem_base, memoffset);
335 csio_dbg(hw, "csio_t5_memory_rw: start:0x%x, offset:0x%x, win_pf:%d\n",
336 start, offset, win_pf);
337 csio_dbg(hw, "csio_t5_memory_rw: mtype: %d, addr: 0x%x, len: %d\n",
338 mtype, addr, len);
339
340 for (pos = start; len > 0; pos += mem_aperture, offset = 0) {
341 /*
342 * Move PCI-E Memory Window to our current transfer
343 * position. Read it back to ensure that changes propagate
344 * before we attempt to use the new value.
345 */
346 csio_wr_reg32(hw, pos | win_pf,
347 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
348 csio_rd_reg32(hw,
349 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
350
351 while (offset < mem_aperture && len > 0) {
352 if (dir)
353 *buf++ = csio_rd_reg32(hw, mem_base + offset);
354 else
355 csio_wr_reg32(hw, *buf++, mem_base + offset);
356
357 offset += sizeof(__be32);
358 len -= sizeof(__be32);
359 }
360 }
361 return 0;
362}
363
364/*
365 * csio_t5_dfs_create_ext_mem - setup debugfs for MC0 or MC1 to read the values
366 * @hw: the csio_hw
367 *
368 * This function creates files in the debugfs with external memory region
369 * MC0 & MC1.
370 */
371static void
372csio_t5_dfs_create_ext_mem(struct csio_hw *hw)
373{
374 u32 size;
375 int i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE);
376 if (i & EXT_MEM_ENABLE) {
377 size = csio_rd_reg32(hw, MA_EXT_MEMORY_BAR);
378 csio_add_debugfs_mem(hw, "mc0", MEM_MC0,
379 EXT_MEM_SIZE_GET(size));
380 }
381 if (i & EXT_MEM1_ENABLE) {
382 size = csio_rd_reg32(hw, MA_EXT_MEMORY1_BAR);
383 csio_add_debugfs_mem(hw, "mc1", MEM_MC1,
384 EXT_MEM_SIZE_GET(size));
385 }
386}
387
388/* T5 adapter specific function */
389struct csio_hw_chip_ops t5_ops = {
390 .chip_set_mem_win = csio_t5_set_mem_win,
391 .chip_pcie_intr_handler = csio_t5_pcie_intr_handler,
392 .chip_flash_cfg_addr = csio_t5_flash_cfg_addr,
393 .chip_mc_read = csio_t5_mc_read,
394 .chip_edc_read = csio_t5_edc_read,
395 .chip_memory_rw = csio_t5_memory_rw,
396 .chip_dfs_create_ext_mem = csio_t5_dfs_create_ext_mem,
397};
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
index 0604b5ff3638..00346fe939d5 100644
--- a/drivers/scsi/csiostor/csio_init.c
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -81,9 +81,11 @@ csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
81 __be32 data[16]; 81 __be32 data[16];
82 82
83 if (mem == MEM_MC) 83 if (mem == MEM_MC)
84 ret = csio_hw_mc_read(hw, pos, data, NULL); 84 ret = hw->chip_ops->chip_mc_read(hw, 0, pos,
85 data, NULL);
85 else 86 else
86 ret = csio_hw_edc_read(hw, mem, pos, data, NULL); 87 ret = hw->chip_ops->chip_edc_read(hw, mem, pos,
88 data, NULL);
87 if (ret) 89 if (ret)
88 return ret; 90 return ret;
89 91
@@ -108,7 +110,7 @@ static const struct file_operations csio_mem_debugfs_fops = {
108 .llseek = default_llseek, 110 .llseek = default_llseek,
109}; 111};
110 112
111static void csio_add_debugfs_mem(struct csio_hw *hw, const char *name, 113void csio_add_debugfs_mem(struct csio_hw *hw, const char *name,
112 unsigned int idx, unsigned int size_mb) 114 unsigned int idx, unsigned int size_mb)
113{ 115{
114 struct dentry *de; 116 struct dentry *de;
@@ -131,9 +133,8 @@ static int csio_setup_debugfs(struct csio_hw *hw)
131 csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5); 133 csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5);
132 if (i & EDRAM1_ENABLE) 134 if (i & EDRAM1_ENABLE)
133 csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5); 135 csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5);
134 if (i & EXT_MEM_ENABLE) 136
135 csio_add_debugfs_mem(hw, "mc", MEM_MC, 137 hw->chip_ops->chip_dfs_create_ext_mem(hw);
136 EXT_MEM_SIZE_GET(csio_rd_reg32(hw, MA_EXT_MEMORY_BAR)));
137 return 0; 138 return 0;
138} 139}
139 140
@@ -1169,7 +1170,7 @@ static struct pci_error_handlers csio_err_handler = {
1169}; 1170};
1170 1171
1171static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = { 1172static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = {
1172 CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0), /* T440DBG FCOE */ 1173 CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0), /* T4 DEBUG FCOE */
1173 CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0), /* T420CR FCOE */ 1174 CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0), /* T420CR FCOE */
1174 CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0), /* T422CR FCOE */ 1175 CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0), /* T422CR FCOE */
1175 CSIO_DEVICE(CSIO_DEVID_T440CR_FCOE, 0), /* T440CR FCOE */ 1176 CSIO_DEVICE(CSIO_DEVID_T440CR_FCOE, 0), /* T440CR FCOE */
@@ -1184,8 +1185,34 @@ static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = {
1184 CSIO_DEVICE(CSIO_DEVID_B404_FCOE, 0), /* B404 FCOE */ 1185 CSIO_DEVICE(CSIO_DEVID_B404_FCOE, 0), /* B404 FCOE */
1185 CSIO_DEVICE(CSIO_DEVID_T480CR_FCOE, 0), /* T480 CR FCOE */ 1186 CSIO_DEVICE(CSIO_DEVID_T480CR_FCOE, 0), /* T480 CR FCOE */
1186 CSIO_DEVICE(CSIO_DEVID_T440LPCR_FCOE, 0), /* T440 LP-CR FCOE */ 1187 CSIO_DEVICE(CSIO_DEVID_T440LPCR_FCOE, 0), /* T440 LP-CR FCOE */
1187 CSIO_DEVICE(CSIO_DEVID_PE10K, 0), /* PE10K FCOE */ 1188 CSIO_DEVICE(CSIO_DEVID_AMSTERDAM_T4_FCOE, 0), /* AMSTERDAM T4 FCOE */
1188 CSIO_DEVICE(CSIO_DEVID_PE10K_PF1, 0), /* PE10K FCOE on PF1 */ 1189 CSIO_DEVICE(CSIO_DEVID_HUAWEI_T480_FCOE, 0), /* HUAWEI T480 FCOE */
1190 CSIO_DEVICE(CSIO_DEVID_HUAWEI_T440_FCOE, 0), /* HUAWEI T440 FCOE */
1191 CSIO_DEVICE(CSIO_DEVID_HUAWEI_STG310_FCOE, 0), /* HUAWEI STG FCOE */
1192 CSIO_DEVICE(CSIO_DEVID_ACROMAG_XMC_XAUI, 0), /* ACROMAG XAUI FCOE */
1193 CSIO_DEVICE(CSIO_DEVID_QUANTA_MEZZ_SFP_FCOE, 0),/* QUANTA MEZZ FCOE */
1194 CSIO_DEVICE(CSIO_DEVID_HUAWEI_10GT_FCOE, 0), /* HUAWEI 10GT FCOE */
1195 CSIO_DEVICE(CSIO_DEVID_HUAWEI_T440_TOE_FCOE, 0),/* HUAWEI T4 TOE FCOE */
1196 CSIO_DEVICE(CSIO_DEVID_T580DBG_FCOE, 0), /* T5 DEBUG FCOE */
1197 CSIO_DEVICE(CSIO_DEVID_T520CR_FCOE, 0), /* T520CR FCOE */
1198 CSIO_DEVICE(CSIO_DEVID_T522CR_FCOE, 0), /* T522CR FCOE */
1199 CSIO_DEVICE(CSIO_DEVID_T540CR_FCOE, 0), /* T540CR FCOE */
1200 CSIO_DEVICE(CSIO_DEVID_T520BCH_FCOE, 0), /* T520BCH FCOE */
1201 CSIO_DEVICE(CSIO_DEVID_T540BCH_FCOE, 0), /* T540BCH FCOE */
1202 CSIO_DEVICE(CSIO_DEVID_T540CH_FCOE, 0), /* T540CH FCOE */
1203 CSIO_DEVICE(CSIO_DEVID_T520SO_FCOE, 0), /* T520SO FCOE */
1204 CSIO_DEVICE(CSIO_DEVID_T520CX_FCOE, 0), /* T520CX FCOE */
1205 CSIO_DEVICE(CSIO_DEVID_T520BT_FCOE, 0), /* T520BT FCOE */
1206 CSIO_DEVICE(CSIO_DEVID_T504BT_FCOE, 0), /* T504BT FCOE */
1207 CSIO_DEVICE(CSIO_DEVID_B520_FCOE, 0), /* B520 FCOE */
1208 CSIO_DEVICE(CSIO_DEVID_B504_FCOE, 0), /* B504 FCOE */
1209 CSIO_DEVICE(CSIO_DEVID_T580CR2_FCOE, 0), /* T580 CR FCOE */
1210 CSIO_DEVICE(CSIO_DEVID_T540LPCR_FCOE, 0), /* T540 LP-CR FCOE */
1211 CSIO_DEVICE(CSIO_DEVID_AMSTERDAM_T5_FCOE, 0), /* AMSTERDAM T5 FCOE */
1212 CSIO_DEVICE(CSIO_DEVID_T580LPCR_FCOE, 0), /* T580 LP-CR FCOE */
1213 CSIO_DEVICE(CSIO_DEVID_T520LLCR_FCOE, 0), /* T520 LL-CR FCOE */
1214 CSIO_DEVICE(CSIO_DEVID_T560CR_FCOE, 0), /* T560 CR FCOE */
1215 CSIO_DEVICE(CSIO_DEVID_T580CR_FCOE, 0), /* T580 CR FCOE */
1189 { 0, 0, 0, 0, 0, 0, 0 } 1216 { 0, 0, 0, 0, 0, 0, 0 }
1190}; 1217};
1191 1218
@@ -1259,4 +1286,5 @@ MODULE_DESCRIPTION(CSIO_DRV_DESC);
1259MODULE_LICENSE(CSIO_DRV_LICENSE); 1286MODULE_LICENSE(CSIO_DRV_LICENSE);
1260MODULE_DEVICE_TABLE(pci, csio_pci_tbl); 1287MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
1261MODULE_VERSION(CSIO_DRV_VERSION); 1288MODULE_VERSION(CSIO_DRV_VERSION);
1262MODULE_FIRMWARE(CSIO_FW_FNAME); 1289MODULE_FIRMWARE(FW_FNAME_T4);
1290MODULE_FIRMWARE(FW_FNAME_T5);
diff --git a/drivers/scsi/csiostor/csio_init.h b/drivers/scsi/csiostor/csio_init.h
index 0838fd7ec9c7..5cc5d317a442 100644
--- a/drivers/scsi/csiostor/csio_init.h
+++ b/drivers/scsi/csiostor/csio_init.h
@@ -52,31 +52,6 @@
52#define CSIO_DRV_DESC "Chelsio FCoE driver" 52#define CSIO_DRV_DESC "Chelsio FCoE driver"
53#define CSIO_DRV_VERSION "1.0.0" 53#define CSIO_DRV_VERSION "1.0.0"
54 54
55#define CSIO_DEVICE(devid, idx) \
56{ PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
57
58#define CSIO_IS_T4_FPGA(_dev) (((_dev) == CSIO_DEVID_PE10K) ||\
59 ((_dev) == CSIO_DEVID_PE10K_PF1))
60
61/* FCoE device IDs */
62#define CSIO_DEVID_PE10K 0xA000
63#define CSIO_DEVID_PE10K_PF1 0xA001
64#define CSIO_DEVID_T440DBG_FCOE 0x4600
65#define CSIO_DEVID_T420CR_FCOE 0x4601
66#define CSIO_DEVID_T422CR_FCOE 0x4602
67#define CSIO_DEVID_T440CR_FCOE 0x4603
68#define CSIO_DEVID_T420BCH_FCOE 0x4604
69#define CSIO_DEVID_T440BCH_FCOE 0x4605
70#define CSIO_DEVID_T440CH_FCOE 0x4606
71#define CSIO_DEVID_T420SO_FCOE 0x4607
72#define CSIO_DEVID_T420CX_FCOE 0x4608
73#define CSIO_DEVID_T420BT_FCOE 0x4609
74#define CSIO_DEVID_T404BT_FCOE 0x460A
75#define CSIO_DEVID_B420_FCOE 0x460B
76#define CSIO_DEVID_B404_FCOE 0x460C
77#define CSIO_DEVID_T480CR_FCOE 0x460D
78#define CSIO_DEVID_T440LPCR_FCOE 0x460E
79
80extern struct fc_function_template csio_fc_transport_funcs; 55extern struct fc_function_template csio_fc_transport_funcs;
81extern struct fc_function_template csio_fc_transport_vport_funcs; 56extern struct fc_function_template csio_fc_transport_vport_funcs;
82 57
@@ -100,6 +75,10 @@ struct csio_lnode *csio_shost_init(struct csio_hw *, struct device *, bool,
100void csio_shost_exit(struct csio_lnode *); 75void csio_shost_exit(struct csio_lnode *);
101void csio_lnodes_exit(struct csio_hw *, bool); 76void csio_lnodes_exit(struct csio_hw *, bool);
102 77
78/* DebugFS helper routines */
79void csio_add_debugfs_mem(struct csio_hw *, const char *,
80 unsigned int, unsigned int);
81
103static inline struct Scsi_Host * 82static inline struct Scsi_Host *
104csio_ln_to_shost(struct csio_lnode *ln) 83csio_ln_to_shost(struct csio_lnode *ln)
105{ 84{
diff --git a/drivers/scsi/csiostor/csio_lnode.h b/drivers/scsi/csiostor/csio_lnode.h
index 8d84988ab06d..0f9c04175b11 100644
--- a/drivers/scsi/csiostor/csio_lnode.h
+++ b/drivers/scsi/csiostor/csio_lnode.h
@@ -114,7 +114,7 @@ struct csio_lnode_stats {
114 uint32_t n_rnode_match; /* matched rnode */ 114 uint32_t n_rnode_match; /* matched rnode */
115 uint32_t n_dev_loss_tmo; /* Device loss timeout */ 115 uint32_t n_dev_loss_tmo; /* Device loss timeout */
116 uint32_t n_fdmi_err; /* fdmi err */ 116 uint32_t n_fdmi_err; /* fdmi err */
117 uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */ 117 uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO]; /* fw events */
118 enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */ 118 enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */
119 uint32_t n_rnode_alloc; /* rnode allocated */ 119 uint32_t n_rnode_alloc; /* rnode allocated */
120 uint32_t n_rnode_free; /* rnode freed */ 120 uint32_t n_rnode_free; /* rnode freed */
diff --git a/drivers/scsi/csiostor/csio_rnode.c b/drivers/scsi/csiostor/csio_rnode.c
index 51c6a388de2b..e9c3b045f587 100644
--- a/drivers/scsi/csiostor/csio_rnode.c
+++ b/drivers/scsi/csiostor/csio_rnode.c
@@ -302,7 +302,7 @@ csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid,
302{ 302{
303 uint8_t rport_type; 303 uint8_t rport_type;
304 struct csio_rnode *rn, *match_rn; 304 struct csio_rnode *rn, *match_rn;
305 uint32_t vnp_flowid; 305 uint32_t vnp_flowid = 0;
306 __be32 *port_id; 306 __be32 *port_id;
307 307
308 port_id = (__be32 *)&rdevp->r_id[0]; 308 port_id = (__be32 *)&rdevp->r_id[0];
@@ -350,6 +350,14 @@ csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid,
350 * Else, go ahead and alloc a new rnode. 350 * Else, go ahead and alloc a new rnode.
351 */ 351 */
352 if (!memcmp(csio_rn_wwpn(match_rn), rdevp->wwpn, 8)) { 352 if (!memcmp(csio_rn_wwpn(match_rn), rdevp->wwpn, 8)) {
353 if (rn == match_rn)
354 goto found_rnode;
355 csio_ln_dbg(ln,
356 "nport_id:x%x and wwpn:%llx"
357 " match for ssni:x%x\n",
358 rn->nport_id,
359 wwn_to_u64(rdevp->wwpn),
360 rdev_flowid);
353 if (csio_is_rnode_ready(rn)) { 361 if (csio_is_rnode_ready(rn)) {
354 csio_ln_warn(ln, 362 csio_ln_warn(ln,
355 "rnode is already" 363 "rnode is already"
diff --git a/drivers/scsi/csiostor/csio_rnode.h b/drivers/scsi/csiostor/csio_rnode.h
index a3b434c801da..65940096a80d 100644
--- a/drivers/scsi/csiostor/csio_rnode.h
+++ b/drivers/scsi/csiostor/csio_rnode.h
@@ -63,7 +63,7 @@ struct csio_rnode_stats {
63 uint32_t n_err_nomem; /* error nomem */ 63 uint32_t n_err_nomem; /* error nomem */
64 uint32_t n_evt_unexp; /* unexpected event */ 64 uint32_t n_evt_unexp; /* unexpected event */
65 uint32_t n_evt_drop; /* unexpected event */ 65 uint32_t n_evt_drop; /* unexpected event */
66 uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */ 66 uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO]; /* fw events */
67 enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */ 67 enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */
68 uint32_t n_lun_rst; /* Number of resets of 68 uint32_t n_lun_rst; /* Number of resets of
69 * of LUNs under this 69 * of LUNs under this
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
index c32df1bdaa97..4255ce264abf 100644
--- a/drivers/scsi/csiostor/csio_wr.c
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -85,8 +85,8 @@ csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
85 */ 85 */
86 if (flq->inc_idx >= 8) { 86 if (flq->inc_idx >= 8) {
87 csio_wr_reg32(hw, DBPRIO(1) | QID(flq->un.fl.flid) | 87 csio_wr_reg32(hw, DBPRIO(1) | QID(flq->un.fl.flid) |
88 PIDX(flq->inc_idx / 8), 88 CSIO_HW_PIDX(hw, flq->inc_idx / 8),
89 MYPF_REG(SGE_PF_KDOORBELL)); 89 MYPF_REG(SGE_PF_KDOORBELL));
90 flq->inc_idx &= 7; 90 flq->inc_idx &= 7;
91 } 91 }
92} 92}
@@ -989,7 +989,8 @@ csio_wr_issue(struct csio_hw *hw, int qidx, bool prio)
989 wmb(); 989 wmb();
990 /* Ring SGE Doorbell writing q->pidx into it */ 990 /* Ring SGE Doorbell writing q->pidx into it */
991 csio_wr_reg32(hw, DBPRIO(prio) | QID(q->un.eq.physeqid) | 991 csio_wr_reg32(hw, DBPRIO(prio) | QID(q->un.eq.physeqid) |
992 PIDX(q->inc_idx), MYPF_REG(SGE_PF_KDOORBELL)); 992 CSIO_HW_PIDX(hw, q->inc_idx),
993 MYPF_REG(SGE_PF_KDOORBELL));
993 q->inc_idx = 0; 994 q->inc_idx = 0;
994 995
995 return 0; 996 return 0;
@@ -1331,20 +1332,30 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
1331 1332
1332 /* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */ 1333 /* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */
1333 csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0); 1334 csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0);
1334 csio_wr_reg32(hw, 1335
1335 (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) + 1336 /*
1336 sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1), 1337 * If using hard params, the following will get set correctly
1337 SGE_FL_BUFFER_SIZE2); 1338 * in csio_wr_set_sge().
1338 csio_wr_reg32(hw, 1339 */
1339 (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) + 1340 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) {
1340 sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1), 1341 csio_wr_reg32(hw,
1341 SGE_FL_BUFFER_SIZE3); 1342 (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) +
1343 sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
1344 SGE_FL_BUFFER_SIZE2);
1345 csio_wr_reg32(hw,
1346 (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) +
1347 sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
1348 SGE_FL_BUFFER_SIZE3);
1349 }
1342 1350
1343 csio_wr_reg32(hw, HPZ0(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ); 1351 csio_wr_reg32(hw, HPZ0(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ);
1344 1352
1345 /* default value of rx_dma_offset of the NIC driver */ 1353 /* default value of rx_dma_offset of the NIC driver */
1346 csio_set_reg_field(hw, SGE_CONTROL, PKTSHIFT_MASK, 1354 csio_set_reg_field(hw, SGE_CONTROL, PKTSHIFT_MASK,
1347 PKTSHIFT(CSIO_SGE_RX_DMA_OFFSET)); 1355 PKTSHIFT(CSIO_SGE_RX_DMA_OFFSET));
1356
1357 csio_hw_tp_wr_bits_indirect(hw, TP_INGRESS_CONFIG,
1358 CSUM_HAS_PSEUDO_HDR, 0);
1348} 1359}
1349 1360
1350static void 1361static void
@@ -1460,18 +1471,21 @@ csio_wr_set_sge(struct csio_hw *hw)
1460 * and generate an interrupt when this occurs so we can recover. 1471 * and generate an interrupt when this occurs so we can recover.
1461 */ 1472 */
1462 csio_set_reg_field(hw, SGE_DBFIFO_STATUS, 1473 csio_set_reg_field(hw, SGE_DBFIFO_STATUS,
1463 HP_INT_THRESH(HP_INT_THRESH_MASK) | 1474 HP_INT_THRESH(HP_INT_THRESH_MASK) |
1464 LP_INT_THRESH(LP_INT_THRESH_MASK), 1475 CSIO_HW_LP_INT_THRESH(hw, CSIO_HW_M_LP_INT_THRESH(hw)),
1465 HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) | 1476 HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) |
1466 LP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH)); 1477 CSIO_HW_LP_INT_THRESH(hw, CSIO_SGE_DBFIFO_INT_THRESH));
1478
1467 csio_set_reg_field(hw, SGE_DOORBELL_CONTROL, ENABLE_DROP, 1479 csio_set_reg_field(hw, SGE_DOORBELL_CONTROL, ENABLE_DROP,
1468 ENABLE_DROP); 1480 ENABLE_DROP);
1469 1481
1470 /* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */ 1482 /* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */
1471 1483
1472 CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1); 1484 CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1);
1473 CSIO_SET_FLBUF_SIZE(hw, 2, CSIO_SGE_FLBUF_SIZE2); 1485 csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE2 + sge->csio_fl_align - 1)
1474 CSIO_SET_FLBUF_SIZE(hw, 3, CSIO_SGE_FLBUF_SIZE3); 1486 & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE2);
1487 csio_wr_reg32(hw, (CSIO_SGE_FLBUF_SIZE3 + sge->csio_fl_align - 1)
1488 & ~(sge->csio_fl_align - 1), SGE_FL_BUFFER_SIZE3);
1475 CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4); 1489 CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4);
1476 CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5); 1490 CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5);
1477 CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6); 1491 CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6);
@@ -1522,22 +1536,24 @@ void
1522csio_wr_sge_init(struct csio_hw *hw) 1536csio_wr_sge_init(struct csio_hw *hw)
1523{ 1537{
1524 /* 1538 /*
1525 * If we are master: 1539 * If we are master and chip is not initialized:
1526 * - If we plan to use the config file, we need to fixup some 1540 * - If we plan to use the config file, we need to fixup some
1527 * host specific registers, and read the rest of the SGE 1541 * host specific registers, and read the rest of the SGE
1528 * configuration. 1542 * configuration.
1529 * - If we dont plan to use the config file, we need to initialize 1543 * - If we dont plan to use the config file, we need to initialize
1530 * SGE entirely, including fixing the host specific registers. 1544 * SGE entirely, including fixing the host specific registers.
1545 * If we are master and chip is initialized, just read and work off of
1546 * the already initialized SGE values.
1531 * If we arent the master, we are only allowed to read and work off of 1547 * If we arent the master, we are only allowed to read and work off of
1532 * the already initialized SGE values. 1548 * the already initialized SGE values.
1533 * 1549 *
1534 * Therefore, before calling this function, we assume that the master- 1550 * Therefore, before calling this function, we assume that the master-
1535 * ship of the card, and whether to use config file or not, have 1551 * ship of the card, state and whether to use config file or not, have
1536 * already been decided. In other words, CSIO_HWF_USING_SOFT_PARAMS and 1552 * already been decided.
1537 * CSIO_HWF_MASTER should be set/unset.
1538 */ 1553 */
1539 if (csio_is_hw_master(hw)) { 1554 if (csio_is_hw_master(hw)) {
1540 csio_wr_fixup_host_params(hw); 1555 if (hw->fw_state != CSIO_DEV_STATE_INIT)
1556 csio_wr_fixup_host_params(hw);
1541 1557
1542 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) 1558 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS)
1543 csio_wr_get_sge(hw); 1559 csio_wr_get_sge(hw);
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index 65123a21b97e..fe30ea94ffe6 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -50,7 +50,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
50 u32 rlen; 50 u32 rlen;
51 int err, tport; 51 int err, tport;
52 52
53 while (skb->len >= NLMSG_SPACE(0)) { 53 while (skb->len >= NLMSG_HDRLEN) {
54 err = 0; 54 err = 0;
55 55
56 nlh = nlmsg_hdr(skb); 56 nlh = nlmsg_hdr(skb);
@@ -70,7 +70,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
70 goto next_msg; 70 goto next_msg;
71 } 71 }
72 72
73 hdr = NLMSG_DATA(nlh); 73 hdr = nlmsg_data(nlh);
74 if ((hdr->version != SCSI_NL_VERSION) || 74 if ((hdr->version != SCSI_NL_VERSION) ||
75 (hdr->magic != SCSI_NL_MAGIC)) { 75 (hdr->magic != SCSI_NL_MAGIC)) {
76 err = -EPROTOTYPE; 76 err = -EPROTOTYPE;
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index e894ca7b54c0..e106c276aa00 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -35,7 +35,6 @@
35#include <scsi/scsi_transport.h> 35#include <scsi/scsi_transport.h>
36#include <scsi/scsi_transport_fc.h> 36#include <scsi/scsi_transport_fc.h>
37#include <scsi/scsi_cmnd.h> 37#include <scsi/scsi_cmnd.h>
38#include <linux/netlink.h>
39#include <net/netlink.h> 38#include <net/netlink.h>
40#include <scsi/scsi_netlink_fc.h> 39#include <scsi/scsi_netlink_fc.h>
41#include <scsi/scsi_bsg_fc.h> 40#include <scsi/scsi_bsg_fc.h>
@@ -534,7 +533,7 @@ fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
534 struct nlmsghdr *nlh; 533 struct nlmsghdr *nlh;
535 struct fc_nl_event *event; 534 struct fc_nl_event *event;
536 const char *name; 535 const char *name;
537 u32 len, skblen; 536 u32 len;
538 int err; 537 int err;
539 538
540 if (!scsi_nl_sock) { 539 if (!scsi_nl_sock) {
@@ -543,21 +542,19 @@ fc_host_post_event(struct Scsi_Host *shost, u32 event_number,
543 } 542 }
544 543
545 len = FC_NL_MSGALIGN(sizeof(*event)); 544 len = FC_NL_MSGALIGN(sizeof(*event));
546 skblen = NLMSG_SPACE(len);
547 545
548 skb = alloc_skb(skblen, GFP_KERNEL); 546 skb = nlmsg_new(len, GFP_KERNEL);
549 if (!skb) { 547 if (!skb) {
550 err = -ENOBUFS; 548 err = -ENOBUFS;
551 goto send_fail; 549 goto send_fail;
552 } 550 }
553 551
554 nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, 552 nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, len, 0);
555 skblen - sizeof(*nlh), 0);
556 if (!nlh) { 553 if (!nlh) {
557 err = -ENOBUFS; 554 err = -ENOBUFS;
558 goto send_fail_skb; 555 goto send_fail_skb;
559 } 556 }
560 event = NLMSG_DATA(nlh); 557 event = nlmsg_data(nlh);
561 558
562 INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC, 559 INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
563 FC_NL_ASYNC_EVENT, len); 560 FC_NL_ASYNC_EVENT, len);
@@ -604,7 +601,7 @@ fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
604 struct sk_buff *skb; 601 struct sk_buff *skb;
605 struct nlmsghdr *nlh; 602 struct nlmsghdr *nlh;
606 struct fc_nl_event *event; 603 struct fc_nl_event *event;
607 u32 len, skblen; 604 u32 len;
608 int err; 605 int err;
609 606
610 if (!scsi_nl_sock) { 607 if (!scsi_nl_sock) {
@@ -613,21 +610,19 @@ fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
613 } 610 }
614 611
615 len = FC_NL_MSGALIGN(sizeof(*event) + data_len); 612 len = FC_NL_MSGALIGN(sizeof(*event) + data_len);
616 skblen = NLMSG_SPACE(len);
617 613
618 skb = alloc_skb(skblen, GFP_KERNEL); 614 skb = nlmsg_new(len, GFP_KERNEL);
619 if (!skb) { 615 if (!skb) {
620 err = -ENOBUFS; 616 err = -ENOBUFS;
621 goto send_vendor_fail; 617 goto send_vendor_fail;
622 } 618 }
623 619
624 nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, 620 nlh = nlmsg_put(skb, 0, 0, SCSI_TRANSPORT_MSG, len, 0);
625 skblen - sizeof(*nlh), 0);
626 if (!nlh) { 621 if (!nlh) {
627 err = -ENOBUFS; 622 err = -ENOBUFS;
628 goto send_vendor_fail_skb; 623 goto send_vendor_fail_skb;
629 } 624 }
630 event = NLMSG_DATA(nlh); 625 event = nlmsg_data(nlh);
631 626
632 INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC, 627 INIT_SCSI_NL_HDR(&event->snlh, SCSI_NL_TRANSPORT_FC,
633 FC_NL_ASYNC_EVENT, len); 628 FC_NL_ASYNC_EVENT, len);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 0a74b975efdf..2e3816530bba 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -1344,8 +1344,8 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
1344 struct iscsi_uevent *ev; 1344 struct iscsi_uevent *ev;
1345 char *pdu; 1345 char *pdu;
1346 struct iscsi_internal *priv; 1346 struct iscsi_internal *priv;
1347 int len = NLMSG_SPACE(sizeof(*ev) + sizeof(struct iscsi_hdr) + 1347 int len = nlmsg_total_size(sizeof(*ev) + sizeof(struct iscsi_hdr) +
1348 data_size); 1348 data_size);
1349 1349
1350 priv = iscsi_if_transport_lookup(conn->transport); 1350 priv = iscsi_if_transport_lookup(conn->transport);
1351 if (!priv) 1351 if (!priv)
@@ -1360,7 +1360,7 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
1360 } 1360 }
1361 1361
1362 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); 1362 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
1363 ev = NLMSG_DATA(nlh); 1363 ev = nlmsg_data(nlh);
1364 memset(ev, 0, sizeof(*ev)); 1364 memset(ev, 0, sizeof(*ev));
1365 ev->transport_handle = iscsi_handle(conn->transport); 1365 ev->transport_handle = iscsi_handle(conn->transport);
1366 ev->type = ISCSI_KEVENT_RECV_PDU; 1366 ev->type = ISCSI_KEVENT_RECV_PDU;
@@ -1381,7 +1381,7 @@ int iscsi_offload_mesg(struct Scsi_Host *shost,
1381 struct nlmsghdr *nlh; 1381 struct nlmsghdr *nlh;
1382 struct sk_buff *skb; 1382 struct sk_buff *skb;
1383 struct iscsi_uevent *ev; 1383 struct iscsi_uevent *ev;
1384 int len = NLMSG_SPACE(sizeof(*ev) + data_size); 1384 int len = nlmsg_total_size(sizeof(*ev) + data_size);
1385 1385
1386 skb = alloc_skb(len, GFP_ATOMIC); 1386 skb = alloc_skb(len, GFP_ATOMIC);
1387 if (!skb) { 1387 if (!skb) {
@@ -1390,7 +1390,7 @@ int iscsi_offload_mesg(struct Scsi_Host *shost,
1390 } 1390 }
1391 1391
1392 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); 1392 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
1393 ev = NLMSG_DATA(nlh); 1393 ev = nlmsg_data(nlh);
1394 memset(ev, 0, sizeof(*ev)); 1394 memset(ev, 0, sizeof(*ev));
1395 ev->type = type; 1395 ev->type = type;
1396 ev->transport_handle = iscsi_handle(transport); 1396 ev->transport_handle = iscsi_handle(transport);
@@ -1415,7 +1415,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
1415 struct sk_buff *skb; 1415 struct sk_buff *skb;
1416 struct iscsi_uevent *ev; 1416 struct iscsi_uevent *ev;
1417 struct iscsi_internal *priv; 1417 struct iscsi_internal *priv;
1418 int len = NLMSG_SPACE(sizeof(*ev)); 1418 int len = nlmsg_total_size(sizeof(*ev));
1419 1419
1420 priv = iscsi_if_transport_lookup(conn->transport); 1420 priv = iscsi_if_transport_lookup(conn->transport);
1421 if (!priv) 1421 if (!priv)
@@ -1429,7 +1429,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
1429 } 1429 }
1430 1430
1431 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); 1431 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
1432 ev = NLMSG_DATA(nlh); 1432 ev = nlmsg_data(nlh);
1433 ev->transport_handle = iscsi_handle(conn->transport); 1433 ev->transport_handle = iscsi_handle(conn->transport);
1434 ev->type = ISCSI_KEVENT_CONN_ERROR; 1434 ev->type = ISCSI_KEVENT_CONN_ERROR;
1435 ev->r.connerror.error = error; 1435 ev->r.connerror.error = error;
@@ -1450,7 +1450,7 @@ void iscsi_conn_login_event(struct iscsi_cls_conn *conn,
1450 struct sk_buff *skb; 1450 struct sk_buff *skb;
1451 struct iscsi_uevent *ev; 1451 struct iscsi_uevent *ev;
1452 struct iscsi_internal *priv; 1452 struct iscsi_internal *priv;
1453 int len = NLMSG_SPACE(sizeof(*ev)); 1453 int len = nlmsg_total_size(sizeof(*ev));
1454 1454
1455 priv = iscsi_if_transport_lookup(conn->transport); 1455 priv = iscsi_if_transport_lookup(conn->transport);
1456 if (!priv) 1456 if (!priv)
@@ -1464,7 +1464,7 @@ void iscsi_conn_login_event(struct iscsi_cls_conn *conn,
1464 } 1464 }
1465 1465
1466 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); 1466 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
1467 ev = NLMSG_DATA(nlh); 1467 ev = nlmsg_data(nlh);
1468 ev->transport_handle = iscsi_handle(conn->transport); 1468 ev->transport_handle = iscsi_handle(conn->transport);
1469 ev->type = ISCSI_KEVENT_CONN_LOGIN_STATE; 1469 ev->type = ISCSI_KEVENT_CONN_LOGIN_STATE;
1470 ev->r.conn_login.state = state; 1470 ev->r.conn_login.state = state;
@@ -1484,7 +1484,7 @@ void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport,
1484 struct nlmsghdr *nlh; 1484 struct nlmsghdr *nlh;
1485 struct sk_buff *skb; 1485 struct sk_buff *skb;
1486 struct iscsi_uevent *ev; 1486 struct iscsi_uevent *ev;
1487 int len = NLMSG_SPACE(sizeof(*ev) + data_size); 1487 int len = nlmsg_total_size(sizeof(*ev) + data_size);
1488 1488
1489 skb = alloc_skb(len, GFP_NOIO); 1489 skb = alloc_skb(len, GFP_NOIO);
1490 if (!skb) { 1490 if (!skb) {
@@ -1494,7 +1494,7 @@ void iscsi_post_host_event(uint32_t host_no, struct iscsi_transport *transport,
1494 } 1494 }
1495 1495
1496 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); 1496 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
1497 ev = NLMSG_DATA(nlh); 1497 ev = nlmsg_data(nlh);
1498 ev->transport_handle = iscsi_handle(transport); 1498 ev->transport_handle = iscsi_handle(transport);
1499 ev->type = ISCSI_KEVENT_HOST_EVENT; 1499 ev->type = ISCSI_KEVENT_HOST_EVENT;
1500 ev->r.host_event.host_no = host_no; 1500 ev->r.host_event.host_no = host_no;
@@ -1515,7 +1515,7 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
1515 struct nlmsghdr *nlh; 1515 struct nlmsghdr *nlh;
1516 struct sk_buff *skb; 1516 struct sk_buff *skb;
1517 struct iscsi_uevent *ev; 1517 struct iscsi_uevent *ev;
1518 int len = NLMSG_SPACE(sizeof(*ev) + data_size); 1518 int len = nlmsg_total_size(sizeof(*ev) + data_size);
1519 1519
1520 skb = alloc_skb(len, GFP_NOIO); 1520 skb = alloc_skb(len, GFP_NOIO);
1521 if (!skb) { 1521 if (!skb) {
@@ -1524,7 +1524,7 @@ void iscsi_ping_comp_event(uint32_t host_no, struct iscsi_transport *transport,
1524 } 1524 }
1525 1525
1526 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); 1526 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
1527 ev = NLMSG_DATA(nlh); 1527 ev = nlmsg_data(nlh);
1528 ev->transport_handle = iscsi_handle(transport); 1528 ev->transport_handle = iscsi_handle(transport);
1529 ev->type = ISCSI_KEVENT_PING_COMP; 1529 ev->type = ISCSI_KEVENT_PING_COMP;
1530 ev->r.ping_comp.host_no = host_no; 1530 ev->r.ping_comp.host_no = host_no;
@@ -1543,7 +1543,7 @@ iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
1543{ 1543{
1544 struct sk_buff *skb; 1544 struct sk_buff *skb;
1545 struct nlmsghdr *nlh; 1545 struct nlmsghdr *nlh;
1546 int len = NLMSG_SPACE(size); 1546 int len = nlmsg_total_size(size);
1547 int flags = multi ? NLM_F_MULTI : 0; 1547 int flags = multi ? NLM_F_MULTI : 0;
1548 int t = done ? NLMSG_DONE : type; 1548 int t = done ? NLMSG_DONE : type;
1549 1549
@@ -1555,24 +1555,24 @@ iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
1555 1555
1556 nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0); 1556 nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0);
1557 nlh->nlmsg_flags = flags; 1557 nlh->nlmsg_flags = flags;
1558 memcpy(NLMSG_DATA(nlh), payload, size); 1558 memcpy(nlmsg_data(nlh), payload, size);
1559 return iscsi_multicast_skb(skb, group, GFP_ATOMIC); 1559 return iscsi_multicast_skb(skb, group, GFP_ATOMIC);
1560} 1560}
1561 1561
1562static int 1562static int
1563iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh) 1563iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
1564{ 1564{
1565 struct iscsi_uevent *ev = NLMSG_DATA(nlh); 1565 struct iscsi_uevent *ev = nlmsg_data(nlh);
1566 struct iscsi_stats *stats; 1566 struct iscsi_stats *stats;
1567 struct sk_buff *skbstat; 1567 struct sk_buff *skbstat;
1568 struct iscsi_cls_conn *conn; 1568 struct iscsi_cls_conn *conn;
1569 struct nlmsghdr *nlhstat; 1569 struct nlmsghdr *nlhstat;
1570 struct iscsi_uevent *evstat; 1570 struct iscsi_uevent *evstat;
1571 struct iscsi_internal *priv; 1571 struct iscsi_internal *priv;
1572 int len = NLMSG_SPACE(sizeof(*ev) + 1572 int len = nlmsg_total_size(sizeof(*ev) +
1573 sizeof(struct iscsi_stats) + 1573 sizeof(struct iscsi_stats) +
1574 sizeof(struct iscsi_stats_custom) * 1574 sizeof(struct iscsi_stats_custom) *
1575 ISCSI_STATS_CUSTOM_MAX); 1575 ISCSI_STATS_CUSTOM_MAX);
1576 int err = 0; 1576 int err = 0;
1577 1577
1578 priv = iscsi_if_transport_lookup(transport); 1578 priv = iscsi_if_transport_lookup(transport);
@@ -1595,7 +1595,7 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
1595 1595
1596 nlhstat = __nlmsg_put(skbstat, 0, 0, 0, 1596 nlhstat = __nlmsg_put(skbstat, 0, 0, 0,
1597 (len - sizeof(*nlhstat)), 0); 1597 (len - sizeof(*nlhstat)), 0);
1598 evstat = NLMSG_DATA(nlhstat); 1598 evstat = nlmsg_data(nlhstat);
1599 memset(evstat, 0, sizeof(*evstat)); 1599 memset(evstat, 0, sizeof(*evstat));
1600 evstat->transport_handle = iscsi_handle(conn->transport); 1600 evstat->transport_handle = iscsi_handle(conn->transport);
1601 evstat->type = nlh->nlmsg_type; 1601 evstat->type = nlh->nlmsg_type;
@@ -1608,12 +1608,12 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
1608 memset(stats, 0, sizeof(*stats)); 1608 memset(stats, 0, sizeof(*stats));
1609 1609
1610 transport->get_stats(conn, stats); 1610 transport->get_stats(conn, stats);
1611 actual_size = NLMSG_SPACE(sizeof(struct iscsi_uevent) + 1611 actual_size = nlmsg_total_size(sizeof(struct iscsi_uevent) +
1612 sizeof(struct iscsi_stats) + 1612 sizeof(struct iscsi_stats) +
1613 sizeof(struct iscsi_stats_custom) * 1613 sizeof(struct iscsi_stats_custom) *
1614 stats->custom_length); 1614 stats->custom_length);
1615 actual_size -= sizeof(*nlhstat); 1615 actual_size -= sizeof(*nlhstat);
1616 actual_size = NLMSG_LENGTH(actual_size); 1616 actual_size = nlmsg_msg_size(actual_size);
1617 skb_trim(skbstat, NLMSG_ALIGN(actual_size)); 1617 skb_trim(skbstat, NLMSG_ALIGN(actual_size));
1618 nlhstat->nlmsg_len = actual_size; 1618 nlhstat->nlmsg_len = actual_size;
1619 1619
@@ -1637,7 +1637,7 @@ int iscsi_session_event(struct iscsi_cls_session *session,
1637 struct iscsi_uevent *ev; 1637 struct iscsi_uevent *ev;
1638 struct sk_buff *skb; 1638 struct sk_buff *skb;
1639 struct nlmsghdr *nlh; 1639 struct nlmsghdr *nlh;
1640 int rc, len = NLMSG_SPACE(sizeof(*ev)); 1640 int rc, len = nlmsg_total_size(sizeof(*ev));
1641 1641
1642 priv = iscsi_if_transport_lookup(session->transport); 1642 priv = iscsi_if_transport_lookup(session->transport);
1643 if (!priv) 1643 if (!priv)
@@ -1653,7 +1653,7 @@ int iscsi_session_event(struct iscsi_cls_session *session,
1653 } 1653 }
1654 1654
1655 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); 1655 nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
1656 ev = NLMSG_DATA(nlh); 1656 ev = nlmsg_data(nlh);
1657 ev->transport_handle = iscsi_handle(session->transport); 1657 ev->transport_handle = iscsi_handle(session->transport);
1658 1658
1659 ev->type = event; 1659 ev->type = event;
@@ -2005,7 +2005,7 @@ iscsi_send_ping(struct iscsi_transport *transport, struct iscsi_uevent *ev)
2005static int 2005static int
2006iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh) 2006iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh)
2007{ 2007{
2008 struct iscsi_uevent *ev = NLMSG_DATA(nlh); 2008 struct iscsi_uevent *ev = nlmsg_data(nlh);
2009 struct Scsi_Host *shost = NULL; 2009 struct Scsi_Host *shost = NULL;
2010 struct iscsi_chap_rec *chap_rec; 2010 struct iscsi_chap_rec *chap_rec;
2011 struct iscsi_internal *priv; 2011 struct iscsi_internal *priv;
@@ -2024,7 +2024,7 @@ iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh)
2024 return -EINVAL; 2024 return -EINVAL;
2025 2025
2026 chap_buf_size = (ev->u.get_chap.num_entries * sizeof(*chap_rec)); 2026 chap_buf_size = (ev->u.get_chap.num_entries * sizeof(*chap_rec));
2027 len = NLMSG_SPACE(sizeof(*ev) + chap_buf_size); 2027 len = nlmsg_total_size(sizeof(*ev) + chap_buf_size);
2028 2028
2029 shost = scsi_host_lookup(ev->u.get_chap.host_no); 2029 shost = scsi_host_lookup(ev->u.get_chap.host_no);
2030 if (!shost) { 2030 if (!shost) {
@@ -2045,7 +2045,7 @@ iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh)
2045 2045
2046 nlhchap = __nlmsg_put(skbchap, 0, 0, 0, 2046 nlhchap = __nlmsg_put(skbchap, 0, 0, 0,
2047 (len - sizeof(*nlhchap)), 0); 2047 (len - sizeof(*nlhchap)), 0);
2048 evchap = NLMSG_DATA(nlhchap); 2048 evchap = nlmsg_data(nlhchap);
2049 memset(evchap, 0, sizeof(*evchap)); 2049 memset(evchap, 0, sizeof(*evchap));
2050 evchap->transport_handle = iscsi_handle(transport); 2050 evchap->transport_handle = iscsi_handle(transport);
2051 evchap->type = nlh->nlmsg_type; 2051 evchap->type = nlh->nlmsg_type;
@@ -2058,7 +2058,7 @@ iscsi_get_chap(struct iscsi_transport *transport, struct nlmsghdr *nlh)
2058 err = transport->get_chap(shost, ev->u.get_chap.chap_tbl_idx, 2058 err = transport->get_chap(shost, ev->u.get_chap.chap_tbl_idx,
2059 &evchap->u.get_chap.num_entries, buf); 2059 &evchap->u.get_chap.num_entries, buf);
2060 2060
2061 actual_size = NLMSG_SPACE(sizeof(*ev) + chap_buf_size); 2061 actual_size = nlmsg_total_size(sizeof(*ev) + chap_buf_size);
2062 skb_trim(skbchap, NLMSG_ALIGN(actual_size)); 2062 skb_trim(skbchap, NLMSG_ALIGN(actual_size));
2063 nlhchap->nlmsg_len = actual_size; 2063 nlhchap->nlmsg_len = actual_size;
2064 2064
@@ -2096,7 +2096,7 @@ static int
2096iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) 2096iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
2097{ 2097{
2098 int err = 0; 2098 int err = 0;
2099 struct iscsi_uevent *ev = NLMSG_DATA(nlh); 2099 struct iscsi_uevent *ev = nlmsg_data(nlh);
2100 struct iscsi_transport *transport = NULL; 2100 struct iscsi_transport *transport = NULL;
2101 struct iscsi_internal *priv; 2101 struct iscsi_internal *priv;
2102 struct iscsi_cls_session *session; 2102 struct iscsi_cls_session *session;
@@ -2263,7 +2263,7 @@ static void
2263iscsi_if_rx(struct sk_buff *skb) 2263iscsi_if_rx(struct sk_buff *skb)
2264{ 2264{
2265 mutex_lock(&rx_queue_mutex); 2265 mutex_lock(&rx_queue_mutex);
2266 while (skb->len >= NLMSG_SPACE(0)) { 2266 while (skb->len >= NLMSG_HDRLEN) {
2267 int err; 2267 int err;
2268 uint32_t rlen; 2268 uint32_t rlen;
2269 struct nlmsghdr *nlh; 2269 struct nlmsghdr *nlh;
@@ -2276,7 +2276,7 @@ iscsi_if_rx(struct sk_buff *skb)
2276 break; 2276 break;
2277 } 2277 }
2278 2278
2279 ev = NLMSG_DATA(nlh); 2279 ev = nlmsg_data(nlh);
2280 rlen = NLMSG_ALIGN(nlh->nlmsg_len); 2280 rlen = NLMSG_ALIGN(nlh->nlmsg_len);
2281 if (rlen > skb->len) 2281 if (rlen > skb->len)
2282 rlen = skb->len; 2282 rlen = skb->len;
diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
index 71098a7b5fed..7cb7d2c8fd86 100644
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -354,7 +354,7 @@ void ssb_chipcommon_init(struct ssb_chipcommon *cc)
354 354
355 if (cc->dev->id.revision >= 11) 355 if (cc->dev->id.revision >= 11)
356 cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT); 356 cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
357 ssb_dprintk(KERN_INFO PFX "chipcommon status is 0x%x\n", cc->status); 357 ssb_dbg("chipcommon status is 0x%x\n", cc->status);
358 358
359 if (cc->dev->id.revision >= 20) { 359 if (cc->dev->id.revision >= 20) {
360 chipco_write32(cc, SSB_CHIPCO_GPIOPULLUP, 0); 360 chipco_write32(cc, SSB_CHIPCO_GPIOPULLUP, 0);
diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
index 4c0f6d883dd3..791da2c0d8f6 100644
--- a/drivers/ssb/driver_chipcommon_pmu.c
+++ b/drivers/ssb/driver_chipcommon_pmu.c
@@ -110,8 +110,8 @@ static void ssb_pmu0_pllinit_r0(struct ssb_chipcommon *cc,
110 return; 110 return;
111 } 111 }
112 112
113 ssb_printk(KERN_INFO PFX "Programming PLL to %u.%03u MHz\n", 113 ssb_info("Programming PLL to %u.%03u MHz\n",
114 (crystalfreq / 1000), (crystalfreq % 1000)); 114 crystalfreq / 1000, crystalfreq % 1000);
115 115
116 /* First turn the PLL off. */ 116 /* First turn the PLL off. */
117 switch (bus->chip_id) { 117 switch (bus->chip_id) {
@@ -138,7 +138,7 @@ static void ssb_pmu0_pllinit_r0(struct ssb_chipcommon *cc,
138 } 138 }
139 tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST); 139 tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST);
140 if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT) 140 if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT)
141 ssb_printk(KERN_EMERG PFX "Failed to turn the PLL off!\n"); 141 ssb_emerg("Failed to turn the PLL off!\n");
142 142
143 /* Set PDIV in PLL control 0. */ 143 /* Set PDIV in PLL control 0. */
144 pllctl = ssb_chipco_pll_read(cc, SSB_PMU0_PLLCTL0); 144 pllctl = ssb_chipco_pll_read(cc, SSB_PMU0_PLLCTL0);
@@ -249,8 +249,8 @@ static void ssb_pmu1_pllinit_r0(struct ssb_chipcommon *cc,
249 return; 249 return;
250 } 250 }
251 251
252 ssb_printk(KERN_INFO PFX "Programming PLL to %u.%03u MHz\n", 252 ssb_info("Programming PLL to %u.%03u MHz\n",
253 (crystalfreq / 1000), (crystalfreq % 1000)); 253 crystalfreq / 1000, crystalfreq % 1000);
254 254
255 /* First turn the PLL off. */ 255 /* First turn the PLL off. */
256 switch (bus->chip_id) { 256 switch (bus->chip_id) {
@@ -275,7 +275,7 @@ static void ssb_pmu1_pllinit_r0(struct ssb_chipcommon *cc,
275 } 275 }
276 tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST); 276 tmp = chipco_read32(cc, SSB_CHIPCO_CLKCTLST);
277 if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT) 277 if (tmp & SSB_CHIPCO_CLKCTLST_HAVEHT)
278 ssb_printk(KERN_EMERG PFX "Failed to turn the PLL off!\n"); 278 ssb_emerg("Failed to turn the PLL off!\n");
279 279
280 /* Set p1div and p2div. */ 280 /* Set p1div and p2div. */
281 pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL0); 281 pllctl = ssb_chipco_pll_read(cc, SSB_PMU1_PLLCTL0);
@@ -349,9 +349,8 @@ static void ssb_pmu_pll_init(struct ssb_chipcommon *cc)
349 case 43222: 349 case 43222:
350 break; 350 break;
351 default: 351 default:
352 ssb_printk(KERN_ERR PFX 352 ssb_err("ERROR: PLL init unknown for device %04X\n",
353 "ERROR: PLL init unknown for device %04X\n", 353 bus->chip_id);
354 bus->chip_id);
355 } 354 }
356} 355}
357 356
@@ -472,9 +471,8 @@ static void ssb_pmu_resources_init(struct ssb_chipcommon *cc)
472 max_msk = 0xFFFFF; 471 max_msk = 0xFFFFF;
473 break; 472 break;
474 default: 473 default:
475 ssb_printk(KERN_ERR PFX 474 ssb_err("ERROR: PMU resource config unknown for device %04X\n",
476 "ERROR: PMU resource config unknown for device %04X\n", 475 bus->chip_id);
477 bus->chip_id);
478 } 476 }
479 477
480 if (updown_tab) { 478 if (updown_tab) {
@@ -526,8 +524,8 @@ void ssb_pmu_init(struct ssb_chipcommon *cc)
526 pmucap = chipco_read32(cc, SSB_CHIPCO_PMU_CAP); 524 pmucap = chipco_read32(cc, SSB_CHIPCO_PMU_CAP);
527 cc->pmu.rev = (pmucap & SSB_CHIPCO_PMU_CAP_REVISION); 525 cc->pmu.rev = (pmucap & SSB_CHIPCO_PMU_CAP_REVISION);
528 526
529 ssb_dprintk(KERN_DEBUG PFX "Found rev %u PMU (capabilities 0x%08X)\n", 527 ssb_dbg("Found rev %u PMU (capabilities 0x%08X)\n",
530 cc->pmu.rev, pmucap); 528 cc->pmu.rev, pmucap);
531 529
532 if (cc->pmu.rev == 1) 530 if (cc->pmu.rev == 1)
533 chipco_mask32(cc, SSB_CHIPCO_PMU_CTL, 531 chipco_mask32(cc, SSB_CHIPCO_PMU_CTL,
@@ -638,9 +636,8 @@ u32 ssb_pmu_get_alp_clock(struct ssb_chipcommon *cc)
638 case 0x5354: 636 case 0x5354:
639 ssb_pmu_get_alp_clock_clk0(cc); 637 ssb_pmu_get_alp_clock_clk0(cc);
640 default: 638 default:
641 ssb_printk(KERN_ERR PFX 639 ssb_err("ERROR: PMU alp clock unknown for device %04X\n",
642 "ERROR: PMU alp clock unknown for device %04X\n", 640 bus->chip_id);
643 bus->chip_id);
644 return 0; 641 return 0;
645 } 642 }
646} 643}
@@ -654,9 +651,8 @@ u32 ssb_pmu_get_cpu_clock(struct ssb_chipcommon *cc)
654 /* 5354 chip uses a non programmable PLL of frequency 240MHz */ 651 /* 5354 chip uses a non programmable PLL of frequency 240MHz */
655 return 240000000; 652 return 240000000;
656 default: 653 default:
657 ssb_printk(KERN_ERR PFX 654 ssb_err("ERROR: PMU cpu clock unknown for device %04X\n",
658 "ERROR: PMU cpu clock unknown for device %04X\n", 655 bus->chip_id);
659 bus->chip_id);
660 return 0; 656 return 0;
661 } 657 }
662} 658}
@@ -669,9 +665,8 @@ u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc)
669 case 0x5354: 665 case 0x5354:
670 return 120000000; 666 return 120000000;
671 default: 667 default:
672 ssb_printk(KERN_ERR PFX 668 ssb_err("ERROR: PMU controlclock unknown for device %04X\n",
673 "ERROR: PMU controlclock unknown for device %04X\n", 669 bus->chip_id);
674 bus->chip_id);
675 return 0; 670 return 0;
676 } 671 }
677} 672}
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index 33b37dac40bd..fa385a368a56 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -167,21 +167,22 @@ static void set_irq(struct ssb_device *dev, unsigned int irq)
167 irqflag |= (ipsflag & ~ipsflag_irq_mask[irq]); 167 irqflag |= (ipsflag & ~ipsflag_irq_mask[irq]);
168 ssb_write32(mdev, SSB_IPSFLAG, irqflag); 168 ssb_write32(mdev, SSB_IPSFLAG, irqflag);
169 } 169 }
170 ssb_dprintk(KERN_INFO PFX 170 ssb_dbg("set_irq: core 0x%04x, irq %d => %d\n",
171 "set_irq: core 0x%04x, irq %d => %d\n", 171 dev->id.coreid, oldirq+2, irq+2);
172 dev->id.coreid, oldirq+2, irq+2);
173} 172}
174 173
175static void print_irq(struct ssb_device *dev, unsigned int irq) 174static void print_irq(struct ssb_device *dev, unsigned int irq)
176{ 175{
177 int i;
178 static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"}; 176 static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"};
179 ssb_dprintk(KERN_INFO PFX 177 ssb_dbg("core 0x%04x, irq : %s%s %s%s %s%s %s%s %s%s %s%s %s%s\n",
180 "core 0x%04x, irq :", dev->id.coreid); 178 dev->id.coreid,
181 for (i = 0; i <= 6; i++) { 179 irq_name[0], irq == 0 ? "*" : " ",
182 ssb_dprintk(" %s%s", irq_name[i], i==irq?"*":" "); 180 irq_name[1], irq == 1 ? "*" : " ",
183 } 181 irq_name[2], irq == 2 ? "*" : " ",
184 ssb_dprintk("\n"); 182 irq_name[3], irq == 3 ? "*" : " ",
183 irq_name[4], irq == 4 ? "*" : " ",
184 irq_name[5], irq == 5 ? "*" : " ",
185 irq_name[6], irq == 6 ? "*" : " ");
185} 186}
186 187
187static void dump_irq(struct ssb_bus *bus) 188static void dump_irq(struct ssb_bus *bus)
@@ -286,7 +287,7 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore)
286 if (!mcore->dev) 287 if (!mcore->dev)
287 return; /* We don't have a MIPS core */ 288 return; /* We don't have a MIPS core */
288 289
289 ssb_dprintk(KERN_INFO PFX "Initializing MIPS core...\n"); 290 ssb_dbg("Initializing MIPS core...\n");
290 291
291 bus = mcore->dev->bus; 292 bus = mcore->dev->bus;
292 hz = ssb_clockspeed(bus); 293 hz = ssb_clockspeed(bus);
@@ -334,7 +335,7 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore)
334 break; 335 break;
335 } 336 }
336 } 337 }
337 ssb_dprintk(KERN_INFO PFX "after irq reconfiguration\n"); 338 ssb_dbg("after irq reconfiguration\n");
338 dump_irq(bus); 339 dump_irq(bus);
339 340
340 ssb_mips_serial_init(mcore); 341 ssb_mips_serial_init(mcore);
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 59801d23d7ec..d75b72ba2672 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -263,8 +263,7 @@ int ssb_pcicore_plat_dev_init(struct pci_dev *d)
263 return -ENODEV; 263 return -ENODEV;
264 } 264 }
265 265
266 ssb_printk(KERN_INFO "PCI: Fixing up device %s\n", 266 ssb_info("PCI: Fixing up device %s\n", pci_name(d));
267 pci_name(d));
268 267
269 /* Fix up interrupt lines */ 268 /* Fix up interrupt lines */
270 d->irq = ssb_mips_irq(extpci_core->dev) + 2; 269 d->irq = ssb_mips_irq(extpci_core->dev) + 2;
@@ -285,12 +284,12 @@ static void ssb_pcicore_fixup_pcibridge(struct pci_dev *dev)
285 if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) != 0) 284 if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) != 0)
286 return; 285 return;
287 286
288 ssb_printk(KERN_INFO "PCI: Fixing up bridge %s\n", pci_name(dev)); 287 ssb_info("PCI: Fixing up bridge %s\n", pci_name(dev));
289 288
290 /* Enable PCI bridge bus mastering and memory space */ 289 /* Enable PCI bridge bus mastering and memory space */
291 pci_set_master(dev); 290 pci_set_master(dev);
292 if (pcibios_enable_device(dev, ~0) < 0) { 291 if (pcibios_enable_device(dev, ~0) < 0) {
293 ssb_printk(KERN_ERR "PCI: SSB bridge enable failed\n"); 292 ssb_err("PCI: SSB bridge enable failed\n");
294 return; 293 return;
295 } 294 }
296 295
@@ -299,8 +298,8 @@ static void ssb_pcicore_fixup_pcibridge(struct pci_dev *dev)
299 298
300 /* Make sure our latency is high enough to handle the devices behind us */ 299 /* Make sure our latency is high enough to handle the devices behind us */
301 lat = 168; 300 lat = 168;
302 ssb_printk(KERN_INFO "PCI: Fixing latency timer of device %s to %u\n", 301 ssb_info("PCI: Fixing latency timer of device %s to %u\n",
303 pci_name(dev), lat); 302 pci_name(dev), lat);
304 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); 303 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
305} 304}
306DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, ssb_pcicore_fixup_pcibridge); 305DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, ssb_pcicore_fixup_pcibridge);
@@ -323,7 +322,7 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
323 return; 322 return;
324 extpci_core = pc; 323 extpci_core = pc;
325 324
326 ssb_dprintk(KERN_INFO PFX "PCIcore in host mode found\n"); 325 ssb_dbg("PCIcore in host mode found\n");
327 /* Reset devices on the external PCI bus */ 326 /* Reset devices on the external PCI bus */
328 val = SSB_PCICORE_CTL_RST_OE; 327 val = SSB_PCICORE_CTL_RST_OE;
329 val |= SSB_PCICORE_CTL_CLK_OE; 328 val |= SSB_PCICORE_CTL_CLK_OE;
@@ -338,7 +337,7 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
338 udelay(1); /* Assertion time demanded by the PCI standard */ 337 udelay(1); /* Assertion time demanded by the PCI standard */
339 338
340 if (pc->dev->bus->has_cardbus_slot) { 339 if (pc->dev->bus->has_cardbus_slot) {
341 ssb_dprintk(KERN_INFO PFX "CardBus slot detected\n"); 340 ssb_dbg("CardBus slot detected\n");
342 pc->cardbusmode = 1; 341 pc->cardbusmode = 1;
343 /* GPIO 1 resets the bridge */ 342 /* GPIO 1 resets the bridge */
344 ssb_gpio_out(pc->dev->bus, 1, 1); 343 ssb_gpio_out(pc->dev->bus, 1, 1);
diff --git a/drivers/ssb/embedded.c b/drivers/ssb/embedded.c
index bb18d76f9f2c..55e101115038 100644
--- a/drivers/ssb/embedded.c
+++ b/drivers/ssb/embedded.c
@@ -57,9 +57,8 @@ int ssb_watchdog_register(struct ssb_bus *bus)
57 bus->busnumber, &wdt, 57 bus->busnumber, &wdt,
58 sizeof(wdt)); 58 sizeof(wdt));
59 if (IS_ERR(pdev)) { 59 if (IS_ERR(pdev)) {
60 ssb_dprintk(KERN_INFO PFX 60 ssb_dbg("can not register watchdog device, err: %li\n",
61 "can not register watchdog device, err: %li\n", 61 PTR_ERR(pdev));
62 PTR_ERR(pdev));
63 return PTR_ERR(pdev); 62 return PTR_ERR(pdev);
64 } 63 }
65 64
diff --git a/drivers/ssb/main.c b/drivers/ssb/main.c
index 3b645b8a261f..812775a4bfb6 100644
--- a/drivers/ssb/main.c
+++ b/drivers/ssb/main.c
@@ -275,8 +275,8 @@ int ssb_devices_thaw(struct ssb_freeze_context *ctx)
275 275
276 err = sdrv->probe(sdev, &sdev->id); 276 err = sdrv->probe(sdev, &sdev->id);
277 if (err) { 277 if (err) {
278 ssb_printk(KERN_ERR PFX "Failed to thaw device %s\n", 278 ssb_err("Failed to thaw device %s\n",
279 dev_name(sdev->dev)); 279 dev_name(sdev->dev));
280 result = err; 280 result = err;
281 } 281 }
282 ssb_device_put(sdev); 282 ssb_device_put(sdev);
@@ -447,10 +447,9 @@ void ssb_bus_unregister(struct ssb_bus *bus)
447 447
448 err = ssb_gpio_unregister(bus); 448 err = ssb_gpio_unregister(bus);
449 if (err == -EBUSY) 449 if (err == -EBUSY)
450 ssb_dprintk(KERN_ERR PFX "Some GPIOs are still in use.\n"); 450 ssb_dbg("Some GPIOs are still in use\n");
451 else if (err) 451 else if (err)
452 ssb_dprintk(KERN_ERR PFX 452 ssb_dbg("Can not unregister GPIO driver: %i\n", err);
453 "Can not unregister GPIO driver: %i\n", err);
454 453
455 ssb_buses_lock(); 454 ssb_buses_lock();
456 ssb_devices_unregister(bus); 455 ssb_devices_unregister(bus);
@@ -497,8 +496,7 @@ static int ssb_devices_register(struct ssb_bus *bus)
497 496
498 devwrap = kzalloc(sizeof(*devwrap), GFP_KERNEL); 497 devwrap = kzalloc(sizeof(*devwrap), GFP_KERNEL);
499 if (!devwrap) { 498 if (!devwrap) {
500 ssb_printk(KERN_ERR PFX 499 ssb_err("Could not allocate device\n");
501 "Could not allocate device\n");
502 err = -ENOMEM; 500 err = -ENOMEM;
503 goto error; 501 goto error;
504 } 502 }
@@ -537,9 +535,7 @@ static int ssb_devices_register(struct ssb_bus *bus)
537 sdev->dev = dev; 535 sdev->dev = dev;
538 err = device_register(dev); 536 err = device_register(dev);
539 if (err) { 537 if (err) {
540 ssb_printk(KERN_ERR PFX 538 ssb_err("Could not register %s\n", dev_name(dev));
541 "Could not register %s\n",
542 dev_name(dev));
543 /* Set dev to NULL to not unregister 539 /* Set dev to NULL to not unregister
544 * dev on error unwinding. */ 540 * dev on error unwinding. */
545 sdev->dev = NULL; 541 sdev->dev = NULL;
@@ -825,10 +821,9 @@ static int ssb_bus_register(struct ssb_bus *bus,
825 ssb_mipscore_init(&bus->mipscore); 821 ssb_mipscore_init(&bus->mipscore);
826 err = ssb_gpio_init(bus); 822 err = ssb_gpio_init(bus);
827 if (err == -ENOTSUPP) 823 if (err == -ENOTSUPP)
828 ssb_dprintk(KERN_DEBUG PFX "GPIO driver not activated\n"); 824 ssb_dbg("GPIO driver not activated\n");
829 else if (err) 825 else if (err)
830 ssb_dprintk(KERN_ERR PFX 826 ssb_dbg("Error registering GPIO driver: %i\n", err);
831 "Error registering GPIO driver: %i\n", err);
832 err = ssb_fetch_invariants(bus, get_invariants); 827 err = ssb_fetch_invariants(bus, get_invariants);
833 if (err) { 828 if (err) {
834 ssb_bus_may_powerdown(bus); 829 ssb_bus_may_powerdown(bus);
@@ -878,11 +873,11 @@ int ssb_bus_pcibus_register(struct ssb_bus *bus, struct pci_dev *host_pci)
878 873
879 err = ssb_bus_register(bus, ssb_pci_get_invariants, 0); 874 err = ssb_bus_register(bus, ssb_pci_get_invariants, 0);
880 if (!err) { 875 if (!err) {
881 ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found on " 876 ssb_info("Sonics Silicon Backplane found on PCI device %s\n",
882 "PCI device %s\n", dev_name(&host_pci->dev)); 877 dev_name(&host_pci->dev));
883 } else { 878 } else {
884 ssb_printk(KERN_ERR PFX "Failed to register PCI version" 879 ssb_err("Failed to register PCI version of SSB with error %d\n",
885 " of SSB with error %d\n", err); 880 err);
886 } 881 }
887 882
888 return err; 883 return err;
@@ -903,8 +898,8 @@ int ssb_bus_pcmciabus_register(struct ssb_bus *bus,
903 898
904 err = ssb_bus_register(bus, ssb_pcmcia_get_invariants, baseaddr); 899 err = ssb_bus_register(bus, ssb_pcmcia_get_invariants, baseaddr);
905 if (!err) { 900 if (!err) {
906 ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found on " 901 ssb_info("Sonics Silicon Backplane found on PCMCIA device %s\n",
907 "PCMCIA device %s\n", pcmcia_dev->devname); 902 pcmcia_dev->devname);
908 } 903 }
909 904
910 return err; 905 return err;
@@ -925,8 +920,8 @@ int ssb_bus_sdiobus_register(struct ssb_bus *bus, struct sdio_func *func,
925 920
926 err = ssb_bus_register(bus, ssb_sdio_get_invariants, ~0); 921 err = ssb_bus_register(bus, ssb_sdio_get_invariants, ~0);
927 if (!err) { 922 if (!err) {
928 ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found on " 923 ssb_info("Sonics Silicon Backplane found on SDIO device %s\n",
929 "SDIO device %s\n", sdio_func_id(func)); 924 sdio_func_id(func));
930 } 925 }
931 926
932 return err; 927 return err;
@@ -944,8 +939,8 @@ int ssb_bus_ssbbus_register(struct ssb_bus *bus, unsigned long baseaddr,
944 939
945 err = ssb_bus_register(bus, get_invariants, baseaddr); 940 err = ssb_bus_register(bus, get_invariants, baseaddr);
946 if (!err) { 941 if (!err) {
947 ssb_printk(KERN_INFO PFX "Sonics Silicon Backplane found at " 942 ssb_info("Sonics Silicon Backplane found at address 0x%08lX\n",
948 "address 0x%08lX\n", baseaddr); 943 baseaddr);
949 } 944 }
950 945
951 return err; 946 return err;
@@ -1339,7 +1334,7 @@ out:
1339#endif 1334#endif
1340 return err; 1335 return err;
1341error: 1336error:
1342 ssb_printk(KERN_ERR PFX "Bus powerdown failed\n"); 1337 ssb_err("Bus powerdown failed\n");
1343 goto out; 1338 goto out;
1344} 1339}
1345EXPORT_SYMBOL(ssb_bus_may_powerdown); 1340EXPORT_SYMBOL(ssb_bus_may_powerdown);
@@ -1362,7 +1357,7 @@ int ssb_bus_powerup(struct ssb_bus *bus, bool dynamic_pctl)
1362 1357
1363 return 0; 1358 return 0;
1364error: 1359error:
1365 ssb_printk(KERN_ERR PFX "Bus powerup failed\n"); 1360 ssb_err("Bus powerup failed\n");
1366 return err; 1361 return err;
1367} 1362}
1368EXPORT_SYMBOL(ssb_bus_powerup); 1363EXPORT_SYMBOL(ssb_bus_powerup);
@@ -1470,15 +1465,13 @@ static int __init ssb_modinit(void)
1470 1465
1471 err = b43_pci_ssb_bridge_init(); 1466 err = b43_pci_ssb_bridge_init();
1472 if (err) { 1467 if (err) {
1473 ssb_printk(KERN_ERR "Broadcom 43xx PCI-SSB-bridge " 1468 ssb_err("Broadcom 43xx PCI-SSB-bridge initialization failed\n");
1474 "initialization failed\n");
1475 /* don't fail SSB init because of this */ 1469 /* don't fail SSB init because of this */
1476 err = 0; 1470 err = 0;
1477 } 1471 }
1478 err = ssb_gige_init(); 1472 err = ssb_gige_init();
1479 if (err) { 1473 if (err) {
1480 ssb_printk(KERN_ERR "SSB Broadcom Gigabit Ethernet " 1474 ssb_err("SSB Broadcom Gigabit Ethernet driver initialization failed\n");
1481 "driver initialization failed\n");
1482 /* don't fail SSB init because of this */ 1475 /* don't fail SSB init because of this */
1483 err = 0; 1476 err = 0;
1484 } 1477 }
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index e9d94968f394..63ff69f9d3eb 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -56,7 +56,7 @@ int ssb_pci_switch_coreidx(struct ssb_bus *bus, u8 coreidx)
56 } 56 }
57 return 0; 57 return 0;
58error: 58error:
59 ssb_printk(KERN_ERR PFX "Failed to switch to core %u\n", coreidx); 59 ssb_err("Failed to switch to core %u\n", coreidx);
60 return -ENODEV; 60 return -ENODEV;
61} 61}
62 62
@@ -67,10 +67,9 @@ int ssb_pci_switch_core(struct ssb_bus *bus,
67 unsigned long flags; 67 unsigned long flags;
68 68
69#if SSB_VERBOSE_PCICORESWITCH_DEBUG 69#if SSB_VERBOSE_PCICORESWITCH_DEBUG
70 ssb_printk(KERN_INFO PFX 70 ssb_info("Switching to %s core, index %d\n",
71 "Switching to %s core, index %d\n", 71 ssb_core_name(dev->id.coreid),
72 ssb_core_name(dev->id.coreid), 72 dev->core_index);
73 dev->core_index);
74#endif 73#endif
75 74
76 spin_lock_irqsave(&bus->bar_lock, flags); 75 spin_lock_irqsave(&bus->bar_lock, flags);
@@ -231,6 +230,15 @@ static inline u8 ssb_crc8(u8 crc, u8 data)
231 return t[crc ^ data]; 230 return t[crc ^ data];
232} 231}
233 232
233static void sprom_get_mac(char *mac, const u16 *in)
234{
235 int i;
236 for (i = 0; i < 3; i++) {
237 *mac++ = in[i] >> 8;
238 *mac++ = in[i];
239 }
240}
241
234static u8 ssb_sprom_crc(const u16 *sprom, u16 size) 242static u8 ssb_sprom_crc(const u16 *sprom, u16 size)
235{ 243{
236 int word; 244 int word;
@@ -278,7 +286,7 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom)
278 u32 spromctl; 286 u32 spromctl;
279 u16 size = bus->sprom_size; 287 u16 size = bus->sprom_size;
280 288
281 ssb_printk(KERN_NOTICE PFX "Writing SPROM. Do NOT turn off the power! Please stand by...\n"); 289 ssb_notice("Writing SPROM. Do NOT turn off the power! Please stand by...\n");
282 err = pci_read_config_dword(pdev, SSB_SPROMCTL, &spromctl); 290 err = pci_read_config_dword(pdev, SSB_SPROMCTL, &spromctl);
283 if (err) 291 if (err)
284 goto err_ctlreg; 292 goto err_ctlreg;
@@ -286,17 +294,17 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom)
286 err = pci_write_config_dword(pdev, SSB_SPROMCTL, spromctl); 294 err = pci_write_config_dword(pdev, SSB_SPROMCTL, spromctl);
287 if (err) 295 if (err)
288 goto err_ctlreg; 296 goto err_ctlreg;
289 ssb_printk(KERN_NOTICE PFX "[ 0%%"); 297 ssb_notice("[ 0%%");
290 msleep(500); 298 msleep(500);
291 for (i = 0; i < size; i++) { 299 for (i = 0; i < size; i++) {
292 if (i == size / 4) 300 if (i == size / 4)
293 ssb_printk("25%%"); 301 ssb_cont("25%%");
294 else if (i == size / 2) 302 else if (i == size / 2)
295 ssb_printk("50%%"); 303 ssb_cont("50%%");
296 else if (i == (size * 3) / 4) 304 else if (i == (size * 3) / 4)
297 ssb_printk("75%%"); 305 ssb_cont("75%%");
298 else if (i % 2) 306 else if (i % 2)
299 ssb_printk("."); 307 ssb_cont(".");
300 writew(sprom[i], bus->mmio + bus->sprom_offset + (i * 2)); 308 writew(sprom[i], bus->mmio + bus->sprom_offset + (i * 2));
301 mmiowb(); 309 mmiowb();
302 msleep(20); 310 msleep(20);
@@ -309,12 +317,12 @@ static int sprom_do_write(struct ssb_bus *bus, const u16 *sprom)
309 if (err) 317 if (err)
310 goto err_ctlreg; 318 goto err_ctlreg;
311 msleep(500); 319 msleep(500);
312 ssb_printk("100%% ]\n"); 320 ssb_cont("100%% ]\n");
313 ssb_printk(KERN_NOTICE PFX "SPROM written.\n"); 321 ssb_notice("SPROM written\n");
314 322
315 return 0; 323 return 0;
316err_ctlreg: 324err_ctlreg:
317 ssb_printk(KERN_ERR PFX "Could not access SPROM control register.\n"); 325 ssb_err("Could not access SPROM control register.\n");
318 return err; 326 return err;
319} 327}
320 328
@@ -341,8 +349,6 @@ static s8 r123_extract_antgain(u8 sprom_revision, const u16 *in,
341 349
342static void sprom_extract_r123(struct ssb_sprom *out, const u16 *in) 350static void sprom_extract_r123(struct ssb_sprom *out, const u16 *in)
343{ 351{
344 int i;
345 u16 v;
346 u16 loc[3]; 352 u16 loc[3];
347 353
348 if (out->revision == 3) /* rev 3 moved MAC */ 354 if (out->revision == 3) /* rev 3 moved MAC */
@@ -352,19 +358,10 @@ static void sprom_extract_r123(struct ssb_sprom *out, const u16 *in)
352 loc[1] = SSB_SPROM1_ET0MAC; 358 loc[1] = SSB_SPROM1_ET0MAC;
353 loc[2] = SSB_SPROM1_ET1MAC; 359 loc[2] = SSB_SPROM1_ET1MAC;
354 } 360 }
355 for (i = 0; i < 3; i++) { 361 sprom_get_mac(out->il0mac, &in[SPOFF(loc[0])]);
356 v = in[SPOFF(loc[0]) + i];
357 *(((__be16 *)out->il0mac) + i) = cpu_to_be16(v);
358 }
359 if (out->revision < 3) { /* only rev 1-2 have et0, et1 */ 362 if (out->revision < 3) { /* only rev 1-2 have et0, et1 */
360 for (i = 0; i < 3; i++) { 363 sprom_get_mac(out->et0mac, &in[SPOFF(loc[1])]);
361 v = in[SPOFF(loc[1]) + i]; 364 sprom_get_mac(out->et1mac, &in[SPOFF(loc[2])]);
362 *(((__be16 *)out->et0mac) + i) = cpu_to_be16(v);
363 }
364 for (i = 0; i < 3; i++) {
365 v = in[SPOFF(loc[2]) + i];
366 *(((__be16 *)out->et1mac) + i) = cpu_to_be16(v);
367 }
368 } 365 }
369 SPEX(et0phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET0A, 0); 366 SPEX(et0phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET0A, 0);
370 SPEX(et1phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET1A, 367 SPEX(et1phyaddr, SSB_SPROM1_ETHPHY, SSB_SPROM1_ETHPHY_ET1A,
@@ -454,19 +451,15 @@ static void sprom_extract_r458(struct ssb_sprom *out, const u16 *in)
454 451
455static void sprom_extract_r45(struct ssb_sprom *out, const u16 *in) 452static void sprom_extract_r45(struct ssb_sprom *out, const u16 *in)
456{ 453{
457 int i;
458 u16 v;
459 u16 il0mac_offset; 454 u16 il0mac_offset;
460 455
461 if (out->revision == 4) 456 if (out->revision == 4)
462 il0mac_offset = SSB_SPROM4_IL0MAC; 457 il0mac_offset = SSB_SPROM4_IL0MAC;
463 else 458 else
464 il0mac_offset = SSB_SPROM5_IL0MAC; 459 il0mac_offset = SSB_SPROM5_IL0MAC;
465 /* extract the MAC address */ 460
466 for (i = 0; i < 3; i++) { 461 sprom_get_mac(out->il0mac, &in[SPOFF(il0mac_offset)]);
467 v = in[SPOFF(il0mac_offset) + i]; 462
468 *(((__be16 *)out->il0mac) + i) = cpu_to_be16(v);
469 }
470 SPEX(et0phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET0A, 0); 463 SPEX(et0phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET0A, 0);
471 SPEX(et1phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET1A, 464 SPEX(et1phyaddr, SSB_SPROM4_ETHPHY, SSB_SPROM4_ETHPHY_ET1A,
472 SSB_SPROM4_ETHPHY_ET1A_SHIFT); 465 SSB_SPROM4_ETHPHY_ET1A_SHIFT);
@@ -530,7 +523,7 @@ static void sprom_extract_r45(struct ssb_sprom *out, const u16 *in)
530static void sprom_extract_r8(struct ssb_sprom *out, const u16 *in) 523static void sprom_extract_r8(struct ssb_sprom *out, const u16 *in)
531{ 524{
532 int i; 525 int i;
533 u16 v, o; 526 u16 o;
534 u16 pwr_info_offset[] = { 527 u16 pwr_info_offset[] = {
535 SSB_SROM8_PWR_INFO_CORE0, SSB_SROM8_PWR_INFO_CORE1, 528 SSB_SROM8_PWR_INFO_CORE0, SSB_SROM8_PWR_INFO_CORE1,
536 SSB_SROM8_PWR_INFO_CORE2, SSB_SROM8_PWR_INFO_CORE3 529 SSB_SROM8_PWR_INFO_CORE2, SSB_SROM8_PWR_INFO_CORE3
@@ -539,10 +532,8 @@ static void sprom_extract_r8(struct ssb_sprom *out, const u16 *in)
539 ARRAY_SIZE(out->core_pwr_info)); 532 ARRAY_SIZE(out->core_pwr_info));
540 533
541 /* extract the MAC address */ 534 /* extract the MAC address */
542 for (i = 0; i < 3; i++) { 535 sprom_get_mac(out->il0mac, &in[SPOFF(SSB_SPROM8_IL0MAC)]);
543 v = in[SPOFF(SSB_SPROM8_IL0MAC) + i]; 536
544 *(((__be16 *)out->il0mac) + i) = cpu_to_be16(v);
545 }
546 SPEX(board_rev, SSB_SPROM8_BOARDREV, 0xFFFF, 0); 537 SPEX(board_rev, SSB_SPROM8_BOARDREV, 0xFFFF, 0);
547 SPEX(alpha2[0], SSB_SPROM8_CCODE, 0xff00, 8); 538 SPEX(alpha2[0], SSB_SPROM8_CCODE, 0xff00, 8);
548 SPEX(alpha2[1], SSB_SPROM8_CCODE, 0x00ff, 0); 539 SPEX(alpha2[1], SSB_SPROM8_CCODE, 0x00ff, 0);
@@ -743,7 +734,7 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
743 memset(out, 0, sizeof(*out)); 734 memset(out, 0, sizeof(*out));
744 735
745 out->revision = in[size - 1] & 0x00FF; 736 out->revision = in[size - 1] & 0x00FF;
746 ssb_dprintk(KERN_DEBUG PFX "SPROM revision %d detected.\n", out->revision); 737 ssb_dbg("SPROM revision %d detected\n", out->revision);
747 memset(out->et0mac, 0xFF, 6); /* preset et0 and et1 mac */ 738 memset(out->et0mac, 0xFF, 6); /* preset et0 and et1 mac */
748 memset(out->et1mac, 0xFF, 6); 739 memset(out->et1mac, 0xFF, 6);
749 740
@@ -752,7 +743,7 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
752 * number stored in the SPROM. 743 * number stored in the SPROM.
753 * Always extract r1. */ 744 * Always extract r1. */
754 out->revision = 1; 745 out->revision = 1;
755 ssb_dprintk(KERN_DEBUG PFX "SPROM treated as revision %d\n", out->revision); 746 ssb_dbg("SPROM treated as revision %d\n", out->revision);
756 } 747 }
757 748
758 switch (out->revision) { 749 switch (out->revision) {
@@ -769,9 +760,8 @@ static int sprom_extract(struct ssb_bus *bus, struct ssb_sprom *out,
769 sprom_extract_r8(out, in); 760 sprom_extract_r8(out, in);
770 break; 761 break;
771 default: 762 default:
772 ssb_printk(KERN_WARNING PFX "Unsupported SPROM" 763 ssb_warn("Unsupported SPROM revision %d detected. Will extract v1\n",
773 " revision %d detected. Will extract" 764 out->revision);
774 " v1\n", out->revision);
775 out->revision = 1; 765 out->revision = 1;
776 sprom_extract_r123(out, in); 766 sprom_extract_r123(out, in);
777 } 767 }
@@ -791,7 +781,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
791 u16 *buf; 781 u16 *buf;
792 782
793 if (!ssb_is_sprom_available(bus)) { 783 if (!ssb_is_sprom_available(bus)) {
794 ssb_printk(KERN_ERR PFX "No SPROM available!\n"); 784 ssb_err("No SPROM available!\n");
795 return -ENODEV; 785 return -ENODEV;
796 } 786 }
797 if (bus->chipco.dev) { /* can be unavailable! */ 787 if (bus->chipco.dev) { /* can be unavailable! */
@@ -810,7 +800,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
810 } else { 800 } else {
811 bus->sprom_offset = SSB_SPROM_BASE1; 801 bus->sprom_offset = SSB_SPROM_BASE1;
812 } 802 }
813 ssb_dprintk(KERN_INFO PFX "SPROM offset is 0x%x\n", bus->sprom_offset); 803 ssb_dbg("SPROM offset is 0x%x\n", bus->sprom_offset);
814 804
815 buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL); 805 buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL);
816 if (!buf) 806 if (!buf)
@@ -835,18 +825,15 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
835 * available for this device in some other storage */ 825 * available for this device in some other storage */
836 err = ssb_fill_sprom_with_fallback(bus, sprom); 826 err = ssb_fill_sprom_with_fallback(bus, sprom);
837 if (err) { 827 if (err) {
838 ssb_printk(KERN_WARNING PFX "WARNING: Using" 828 ssb_warn("WARNING: Using fallback SPROM failed (err %d)\n",
839 " fallback SPROM failed (err %d)\n", 829 err);
840 err);
841 } else { 830 } else {
842 ssb_dprintk(KERN_DEBUG PFX "Using SPROM" 831 ssb_dbg("Using SPROM revision %d provided by platform\n",
843 " revision %d provided by" 832 sprom->revision);
844 " platform.\n", sprom->revision);
845 err = 0; 833 err = 0;
846 goto out_free; 834 goto out_free;
847 } 835 }
848 ssb_printk(KERN_WARNING PFX "WARNING: Invalid" 836 ssb_warn("WARNING: Invalid SPROM CRC (corrupt SPROM)\n");
849 " SPROM CRC (corrupt SPROM)\n");
850 } 837 }
851 } 838 }
852 err = sprom_extract(bus, sprom, buf, bus->sprom_size); 839 err = sprom_extract(bus, sprom, buf, bus->sprom_size);
diff --git a/drivers/ssb/pcmcia.c b/drivers/ssb/pcmcia.c
index fbafed5b729b..b413e0187087 100644
--- a/drivers/ssb/pcmcia.c
+++ b/drivers/ssb/pcmcia.c
@@ -143,7 +143,7 @@ int ssb_pcmcia_switch_coreidx(struct ssb_bus *bus,
143 143
144 return 0; 144 return 0;
145error: 145error:
146 ssb_printk(KERN_ERR PFX "Failed to switch to core %u\n", coreidx); 146 ssb_err("Failed to switch to core %u\n", coreidx);
147 return err; 147 return err;
148} 148}
149 149
@@ -153,10 +153,9 @@ int ssb_pcmcia_switch_core(struct ssb_bus *bus,
153 int err; 153 int err;
154 154
155#if SSB_VERBOSE_PCMCIACORESWITCH_DEBUG 155#if SSB_VERBOSE_PCMCIACORESWITCH_DEBUG
156 ssb_printk(KERN_INFO PFX 156 ssb_info("Switching to %s core, index %d\n",
157 "Switching to %s core, index %d\n", 157 ssb_core_name(dev->id.coreid),
158 ssb_core_name(dev->id.coreid), 158 dev->core_index);
159 dev->core_index);
160#endif 159#endif
161 160
162 err = ssb_pcmcia_switch_coreidx(bus, dev->core_index); 161 err = ssb_pcmcia_switch_coreidx(bus, dev->core_index);
@@ -192,7 +191,7 @@ int ssb_pcmcia_switch_segment(struct ssb_bus *bus, u8 seg)
192 191
193 return 0; 192 return 0;
194error: 193error:
195 ssb_printk(KERN_ERR PFX "Failed to switch pcmcia segment\n"); 194 ssb_err("Failed to switch pcmcia segment\n");
196 return err; 195 return err;
197} 196}
198 197
@@ -549,44 +548,39 @@ static int ssb_pcmcia_sprom_write_all(struct ssb_bus *bus, const u16 *sprom)
549 bool failed = 0; 548 bool failed = 0;
550 size_t size = SSB_PCMCIA_SPROM_SIZE; 549 size_t size = SSB_PCMCIA_SPROM_SIZE;
551 550
552 ssb_printk(KERN_NOTICE PFX 551 ssb_notice("Writing SPROM. Do NOT turn off the power! Please stand by...\n");
553 "Writing SPROM. Do NOT turn off the power! "
554 "Please stand by...\n");
555 err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITEEN); 552 err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITEEN);
556 if (err) { 553 if (err) {
557 ssb_printk(KERN_NOTICE PFX 554 ssb_notice("Could not enable SPROM write access\n");
558 "Could not enable SPROM write access.\n");
559 return -EBUSY; 555 return -EBUSY;
560 } 556 }
561 ssb_printk(KERN_NOTICE PFX "[ 0%%"); 557 ssb_notice("[ 0%%");
562 msleep(500); 558 msleep(500);
563 for (i = 0; i < size; i++) { 559 for (i = 0; i < size; i++) {
564 if (i == size / 4) 560 if (i == size / 4)
565 ssb_printk("25%%"); 561 ssb_cont("25%%");
566 else if (i == size / 2) 562 else if (i == size / 2)
567 ssb_printk("50%%"); 563 ssb_cont("50%%");
568 else if (i == (size * 3) / 4) 564 else if (i == (size * 3) / 4)
569 ssb_printk("75%%"); 565 ssb_cont("75%%");
570 else if (i % 2) 566 else if (i % 2)
571 ssb_printk("."); 567 ssb_cont(".");
572 err = ssb_pcmcia_sprom_write(bus, i, sprom[i]); 568 err = ssb_pcmcia_sprom_write(bus, i, sprom[i]);
573 if (err) { 569 if (err) {
574 ssb_printk(KERN_NOTICE PFX 570 ssb_notice("Failed to write to SPROM\n");
575 "Failed to write to SPROM.\n");
576 failed = 1; 571 failed = 1;
577 break; 572 break;
578 } 573 }
579 } 574 }
580 err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITEDIS); 575 err = ssb_pcmcia_sprom_command(bus, SSB_PCMCIA_SPROMCTL_WRITEDIS);
581 if (err) { 576 if (err) {
582 ssb_printk(KERN_NOTICE PFX 577 ssb_notice("Could not disable SPROM write access\n");
583 "Could not disable SPROM write access.\n");
584 failed = 1; 578 failed = 1;
585 } 579 }
586 msleep(500); 580 msleep(500);
587 if (!failed) { 581 if (!failed) {
588 ssb_printk("100%% ]\n"); 582 ssb_cont("100%% ]\n");
589 ssb_printk(KERN_NOTICE PFX "SPROM written.\n"); 583 ssb_notice("SPROM written\n");
590 } 584 }
591 585
592 return failed ? -EBUSY : 0; 586 return failed ? -EBUSY : 0;
@@ -700,7 +694,7 @@ static int ssb_pcmcia_do_get_invariants(struct pcmcia_device *p_dev,
700 return -ENOSPC; /* continue with next entry */ 694 return -ENOSPC; /* continue with next entry */
701 695
702error: 696error:
703 ssb_printk(KERN_ERR PFX 697 ssb_err(
704 "PCMCIA: Failed to fetch device invariants: %s\n", 698 "PCMCIA: Failed to fetch device invariants: %s\n",
705 error_description); 699 error_description);
706 return -ENODEV; 700 return -ENODEV;
@@ -722,7 +716,7 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
722 res = pcmcia_loop_tuple(bus->host_pcmcia, CISTPL_FUNCE, 716 res = pcmcia_loop_tuple(bus->host_pcmcia, CISTPL_FUNCE,
723 ssb_pcmcia_get_mac, sprom); 717 ssb_pcmcia_get_mac, sprom);
724 if (res != 0) { 718 if (res != 0) {
725 ssb_printk(KERN_ERR PFX 719 ssb_err(
726 "PCMCIA: Failed to fetch MAC address\n"); 720 "PCMCIA: Failed to fetch MAC address\n");
727 return -ENODEV; 721 return -ENODEV;
728 } 722 }
@@ -733,7 +727,7 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
733 if ((res == 0) || (res == -ENOSPC)) 727 if ((res == 0) || (res == -ENOSPC))
734 return 0; 728 return 0;
735 729
736 ssb_printk(KERN_ERR PFX 730 ssb_err(
737 "PCMCIA: Failed to fetch device invariants\n"); 731 "PCMCIA: Failed to fetch device invariants\n");
738 return -ENODEV; 732 return -ENODEV;
739} 733}
@@ -843,6 +837,6 @@ int ssb_pcmcia_init(struct ssb_bus *bus)
843 837
844 return 0; 838 return 0;
845error: 839error:
846 ssb_printk(KERN_ERR PFX "Failed to initialize PCMCIA host device\n"); 840 ssb_err("Failed to initialize PCMCIA host device\n");
847 return err; 841 return err;
848} 842}
diff --git a/drivers/ssb/scan.c b/drivers/ssb/scan.c
index ab4627cf1114..b9429df583eb 100644
--- a/drivers/ssb/scan.c
+++ b/drivers/ssb/scan.c
@@ -125,8 +125,7 @@ static u16 pcidev_to_chipid(struct pci_dev *pci_dev)
125 chipid_fallback = 0x4401; 125 chipid_fallback = 0x4401;
126 break; 126 break;
127 default: 127 default:
128 ssb_printk(KERN_ERR PFX 128 ssb_err("PCI-ID not in fallback list\n");
129 "PCI-ID not in fallback list\n");
130 } 129 }
131 130
132 return chipid_fallback; 131 return chipid_fallback;
@@ -152,8 +151,7 @@ static u8 chipid_to_nrcores(u16 chipid)
152 case 0x4704: 151 case 0x4704:
153 return 9; 152 return 9;
154 default: 153 default:
155 ssb_printk(KERN_ERR PFX 154 ssb_err("CHIPID not in nrcores fallback list\n");
156 "CHIPID not in nrcores fallback list\n");
157 } 155 }
158 156
159 return 1; 157 return 1;
@@ -320,15 +318,13 @@ int ssb_bus_scan(struct ssb_bus *bus,
320 bus->chip_package = 0; 318 bus->chip_package = 0;
321 } 319 }
322 } 320 }
323 ssb_printk(KERN_INFO PFX "Found chip with id 0x%04X, rev 0x%02X and " 321 ssb_info("Found chip with id 0x%04X, rev 0x%02X and package 0x%02X\n",
324 "package 0x%02X\n", bus->chip_id, bus->chip_rev, 322 bus->chip_id, bus->chip_rev, bus->chip_package);
325 bus->chip_package);
326 if (!bus->nr_devices) 323 if (!bus->nr_devices)
327 bus->nr_devices = chipid_to_nrcores(bus->chip_id); 324 bus->nr_devices = chipid_to_nrcores(bus->chip_id);
328 if (bus->nr_devices > ARRAY_SIZE(bus->devices)) { 325 if (bus->nr_devices > ARRAY_SIZE(bus->devices)) {
329 ssb_printk(KERN_ERR PFX 326 ssb_err("More than %d ssb cores found (%d)\n",
330 "More than %d ssb cores found (%d)\n", 327 SSB_MAX_NR_CORES, bus->nr_devices);
331 SSB_MAX_NR_CORES, bus->nr_devices);
332 goto err_unmap; 328 goto err_unmap;
333 } 329 }
334 if (bus->bustype == SSB_BUSTYPE_SSB) { 330 if (bus->bustype == SSB_BUSTYPE_SSB) {
@@ -370,8 +366,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
370 nr_80211_cores++; 366 nr_80211_cores++;
371 if (nr_80211_cores > 1) { 367 if (nr_80211_cores > 1) {
372 if (!we_support_multiple_80211_cores(bus)) { 368 if (!we_support_multiple_80211_cores(bus)) {
373 ssb_dprintk(KERN_INFO PFX "Ignoring additional " 369 ssb_dbg("Ignoring additional 802.11 core\n");
374 "802.11 core\n");
375 continue; 370 continue;
376 } 371 }
377 } 372 }
@@ -379,8 +374,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
379 case SSB_DEV_EXTIF: 374 case SSB_DEV_EXTIF:
380#ifdef CONFIG_SSB_DRIVER_EXTIF 375#ifdef CONFIG_SSB_DRIVER_EXTIF
381 if (bus->extif.dev) { 376 if (bus->extif.dev) {
382 ssb_printk(KERN_WARNING PFX 377 ssb_warn("WARNING: Multiple EXTIFs found\n");
383 "WARNING: Multiple EXTIFs found\n");
384 break; 378 break;
385 } 379 }
386 bus->extif.dev = dev; 380 bus->extif.dev = dev;
@@ -388,8 +382,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
388 break; 382 break;
389 case SSB_DEV_CHIPCOMMON: 383 case SSB_DEV_CHIPCOMMON:
390 if (bus->chipco.dev) { 384 if (bus->chipco.dev) {
391 ssb_printk(KERN_WARNING PFX 385 ssb_warn("WARNING: Multiple ChipCommon found\n");
392 "WARNING: Multiple ChipCommon found\n");
393 break; 386 break;
394 } 387 }
395 bus->chipco.dev = dev; 388 bus->chipco.dev = dev;
@@ -398,8 +391,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
398 case SSB_DEV_MIPS_3302: 391 case SSB_DEV_MIPS_3302:
399#ifdef CONFIG_SSB_DRIVER_MIPS 392#ifdef CONFIG_SSB_DRIVER_MIPS
400 if (bus->mipscore.dev) { 393 if (bus->mipscore.dev) {
401 ssb_printk(KERN_WARNING PFX 394 ssb_warn("WARNING: Multiple MIPS cores found\n");
402 "WARNING: Multiple MIPS cores found\n");
403 break; 395 break;
404 } 396 }
405 bus->mipscore.dev = dev; 397 bus->mipscore.dev = dev;
@@ -420,8 +412,7 @@ int ssb_bus_scan(struct ssb_bus *bus,
420 } 412 }
421 } 413 }
422 if (bus->pcicore.dev) { 414 if (bus->pcicore.dev) {
423 ssb_printk(KERN_WARNING PFX 415 ssb_warn("WARNING: Multiple PCI(E) cores found\n");
424 "WARNING: Multiple PCI(E) cores found\n");
425 break; 416 break;
426 } 417 }
427 bus->pcicore.dev = dev; 418 bus->pcicore.dev = dev;
diff --git a/drivers/ssb/sprom.c b/drivers/ssb/sprom.c
index 80d366fcf8d3..a3b23644b0fb 100644
--- a/drivers/ssb/sprom.c
+++ b/drivers/ssb/sprom.c
@@ -127,13 +127,13 @@ ssize_t ssb_attr_sprom_store(struct ssb_bus *bus,
127 goto out_kfree; 127 goto out_kfree;
128 err = ssb_devices_freeze(bus, &freeze); 128 err = ssb_devices_freeze(bus, &freeze);
129 if (err) { 129 if (err) {
130 ssb_printk(KERN_ERR PFX "SPROM write: Could not freeze all devices\n"); 130 ssb_err("SPROM write: Could not freeze all devices\n");
131 goto out_unlock; 131 goto out_unlock;
132 } 132 }
133 res = sprom_write(bus, sprom); 133 res = sprom_write(bus, sprom);
134 err = ssb_devices_thaw(&freeze); 134 err = ssb_devices_thaw(&freeze);
135 if (err) 135 if (err)
136 ssb_printk(KERN_ERR PFX "SPROM write: Could not thaw all devices\n"); 136 ssb_err("SPROM write: Could not thaw all devices\n");
137out_unlock: 137out_unlock:
138 mutex_unlock(&bus->sprom_mutex); 138 mutex_unlock(&bus->sprom_mutex);
139out_kfree: 139out_kfree:
diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h
index 466171b77f68..4671f17f09af 100644
--- a/drivers/ssb/ssb_private.h
+++ b/drivers/ssb/ssb_private.h
@@ -9,16 +9,27 @@
9#define PFX "ssb: " 9#define PFX "ssb: "
10 10
11#ifdef CONFIG_SSB_SILENT 11#ifdef CONFIG_SSB_SILENT
12# define ssb_printk(fmt, x...) do { /* nothing */ } while (0) 12# define ssb_printk(fmt, ...) \
13 do { if (0) printk(fmt, ##__VA_ARGS__); } while (0)
13#else 14#else
14# define ssb_printk printk 15# define ssb_printk(fmt, ...) \
16 printk(fmt, ##__VA_ARGS__)
15#endif /* CONFIG_SSB_SILENT */ 17#endif /* CONFIG_SSB_SILENT */
16 18
19#define ssb_emerg(fmt, ...) ssb_printk(KERN_EMERG PFX fmt, ##__VA_ARGS__)
20#define ssb_err(fmt, ...) ssb_printk(KERN_ERR PFX fmt, ##__VA_ARGS__)
21#define ssb_warn(fmt, ...) ssb_printk(KERN_WARNING PFX fmt, ##__VA_ARGS__)
22#define ssb_notice(fmt, ...) ssb_printk(KERN_NOTICE PFX fmt, ##__VA_ARGS__)
23#define ssb_info(fmt, ...) ssb_printk(KERN_INFO PFX fmt, ##__VA_ARGS__)
24#define ssb_cont(fmt, ...) ssb_printk(KERN_CONT fmt, ##__VA_ARGS__)
25
17/* dprintk: Debugging printk; vanishes for non-debug compilation */ 26/* dprintk: Debugging printk; vanishes for non-debug compilation */
18#ifdef CONFIG_SSB_DEBUG 27#ifdef CONFIG_SSB_DEBUG
19# define ssb_dprintk(fmt, x...) ssb_printk(fmt , ##x) 28# define ssb_dbg(fmt, ...) \
29 ssb_printk(KERN_DEBUG PFX fmt, ##__VA_ARGS__)
20#else 30#else
21# define ssb_dprintk(fmt, x...) do { /* nothing */ } while (0) 31# define ssb_dbg(fmt, ...) \
32 do { if (0) printk(KERN_DEBUG PFX fmt, ##__VA_ARGS__); } while (0)
22#endif 33#endif
23 34
24#ifdef CONFIG_SSB_DEBUG 35#ifdef CONFIG_SSB_DEBUG
diff --git a/drivers/staging/gdm72xx/netlink_k.c b/drivers/staging/gdm72xx/netlink_k.c
index 52c25ba5831d..c1239aaa6282 100644
--- a/drivers/staging/gdm72xx/netlink_k.c
+++ b/drivers/staging/gdm72xx/netlink_k.c
@@ -15,7 +15,7 @@
15 15
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
18#include <linux/netlink.h> 18#include <net/netlink.h>
19#include <asm/byteorder.h> 19#include <asm/byteorder.h>
20#include <net/sock.h> 20#include <net/sock.h>
21 21
@@ -25,12 +25,12 @@
25 25
26#define ND_MAX_GROUP 30 26#define ND_MAX_GROUP 30
27#define ND_IFINDEX_LEN sizeof(int) 27#define ND_IFINDEX_LEN sizeof(int)
28#define ND_NLMSG_SPACE(len) (NLMSG_SPACE(len) + ND_IFINDEX_LEN) 28#define ND_NLMSG_SPACE(len) (nlmsg_total_size(len) + ND_IFINDEX_LEN)
29#define ND_NLMSG_DATA(nlh) \ 29#define ND_NLMSG_DATA(nlh) \
30 ((void *)((char *)NLMSG_DATA(nlh) + ND_IFINDEX_LEN)) 30 ((void *)((char *)nlmsg_data(nlh) + ND_IFINDEX_LEN))
31#define ND_NLMSG_S_LEN(len) (len+ND_IFINDEX_LEN) 31#define ND_NLMSG_S_LEN(len) (len+ND_IFINDEX_LEN)
32#define ND_NLMSG_R_LEN(nlh) (nlh->nlmsg_len-ND_IFINDEX_LEN) 32#define ND_NLMSG_R_LEN(nlh) (nlh->nlmsg_len-ND_IFINDEX_LEN)
33#define ND_NLMSG_IFIDX(nlh) NLMSG_DATA(nlh) 33#define ND_NLMSG_IFIDX(nlh) nlmsg_data(nlh)
34#define ND_MAX_MSG_LEN 8096 34#define ND_MAX_MSG_LEN 8096
35 35
36#if defined(DEFINE_MUTEX) 36#if defined(DEFINE_MUTEX)
@@ -51,7 +51,7 @@ static void netlink_rcv_cb(struct sk_buff *skb)
51 void *msg; 51 void *msg;
52 int ifindex; 52 int ifindex;
53 53
54 if (skb->len >= NLMSG_SPACE(0)) { 54 if (skb->len >= NLMSG_HDRLEN) {
55 nlh = (struct nlmsghdr *)skb->data; 55 nlh = (struct nlmsghdr *)skb->data;
56 56
57 if (skb->len < nlh->nlmsg_len || 57 if (skb->len < nlh->nlmsg_len ||
@@ -124,7 +124,7 @@ int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
124 return -EINVAL; 124 return -EINVAL;
125 } 125 }
126 126
127 skb = alloc_skb(NLMSG_SPACE(len), GFP_ATOMIC); 127 skb = nlmsg_new(len, GFP_ATOMIC);
128 if (!skb) { 128 if (!skb) {
129 pr_err("netlink_broadcast ret=%d\n", ret); 129 pr_err("netlink_broadcast ret=%d\n", ret);
130 return -ENOMEM; 130 return -ENOMEM;
diff --git a/include/linux/cn_proc.h b/include/linux/cn_proc.h
index 2c1bc1ea04ee..1d5b02a96c46 100644
--- a/include/linux/cn_proc.h
+++ b/include/linux/cn_proc.h
@@ -26,6 +26,7 @@ void proc_id_connector(struct task_struct *task, int which_id);
26void proc_sid_connector(struct task_struct *task); 26void proc_sid_connector(struct task_struct *task);
27void proc_ptrace_connector(struct task_struct *task, int which_id); 27void proc_ptrace_connector(struct task_struct *task, int which_id);
28void proc_comm_connector(struct task_struct *task); 28void proc_comm_connector(struct task_struct *task);
29void proc_coredump_connector(struct task_struct *task);
29void proc_exit_connector(struct task_struct *task); 30void proc_exit_connector(struct task_struct *task);
30#else 31#else
31static inline void proc_fork_connector(struct task_struct *task) 32static inline void proc_fork_connector(struct task_struct *task)
@@ -48,6 +49,9 @@ static inline void proc_ptrace_connector(struct task_struct *task,
48 int ptrace_id) 49 int ptrace_id)
49{} 50{}
50 51
52static inline void proc_coredump_connector(struct task_struct *task)
53{}
54
51static inline void proc_exit_connector(struct task_struct *task) 55static inline void proc_exit_connector(struct task_struct *task)
52{} 56{}
53#endif /* CONFIG_PROC_EVENTS */ 57#endif /* CONFIG_PROC_EVENTS */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index c45eabc135e1..d1248f401a56 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -48,8 +48,21 @@ extern int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
48extern int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned len); 48extern int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned len);
49 49
50#ifdef CONFIG_BPF_JIT 50#ifdef CONFIG_BPF_JIT
51#include <linux/linkage.h>
52#include <linux/printk.h>
53
51extern void bpf_jit_compile(struct sk_filter *fp); 54extern void bpf_jit_compile(struct sk_filter *fp);
52extern void bpf_jit_free(struct sk_filter *fp); 55extern void bpf_jit_free(struct sk_filter *fp);
56
57static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
58 u32 pass, void *image)
59{
60 pr_err("flen=%u proglen=%u pass=%u image=%p\n",
61 flen, proglen, pass, image);
62 if (image)
63 print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS,
64 16, 1, image, proglen, false);
65}
53#define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns) 66#define SK_RUN_FILTER(FILTER, SKB) (*FILTER->bpf_func)(SKB, FILTER->insns)
54#else 67#else
55static inline void bpf_jit_compile(struct sk_filter *fp) 68static inline void bpf_jit_compile(struct sk_filter *fp)
@@ -126,6 +139,7 @@ enum {
126 BPF_S_ANC_SECCOMP_LD_W, 139 BPF_S_ANC_SECCOMP_LD_W,
127 BPF_S_ANC_VLAN_TAG, 140 BPF_S_ANC_VLAN_TAG,
128 BPF_S_ANC_VLAN_TAG_PRESENT, 141 BPF_S_ANC_VLAN_TAG_PRESENT,
142 BPF_S_ANC_PAY_OFFSET,
129}; 143};
130 144
131#endif /* __LINUX_FILTER_H__ */ 145#endif /* __LINUX_FILTER_H__ */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 7e24fe0cfbcd..4cf0c9e4dd99 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -113,6 +113,34 @@
113#define IEEE80211_CTL_EXT_SSW_FBACK 0x9000 113#define IEEE80211_CTL_EXT_SSW_FBACK 0x9000
114#define IEEE80211_CTL_EXT_SSW_ACK 0xa000 114#define IEEE80211_CTL_EXT_SSW_ACK 0xa000
115 115
116
117#define IEEE80211_SN_MASK ((IEEE80211_SCTL_SEQ) >> 4)
118#define IEEE80211_MAX_SN IEEE80211_SN_MASK
119#define IEEE80211_SN_MODULO (IEEE80211_MAX_SN + 1)
120
121static inline int ieee80211_sn_less(u16 sn1, u16 sn2)
122{
123 return ((sn1 - sn2) & IEEE80211_SN_MASK) > (IEEE80211_SN_MODULO >> 1);
124}
125
126static inline u16 ieee80211_sn_add(u16 sn1, u16 sn2)
127{
128 return (sn1 + sn2) & IEEE80211_SN_MASK;
129}
130
131static inline u16 ieee80211_sn_inc(u16 sn)
132{
133 return ieee80211_sn_add(sn, 1);
134}
135
136static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
137{
138 return (sn1 - sn2) & IEEE80211_SN_MASK;
139}
140
141#define IEEE80211_SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
142#define IEEE80211_SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
143
116/* miscellaneous IEEE 802.11 constants */ 144/* miscellaneous IEEE 802.11 constants */
117#define IEEE80211_MAX_FRAG_THRESHOLD 2352 145#define IEEE80211_MAX_FRAG_THRESHOLD 2352
118#define IEEE80211_MAX_RTS_THRESHOLD 2353 146#define IEEE80211_MAX_RTS_THRESHOLD 2353
@@ -185,7 +213,7 @@ struct ieee80211_hdr {
185 u8 addr3[6]; 213 u8 addr3[6];
186 __le16 seq_ctrl; 214 __le16 seq_ctrl;
187 u8 addr4[6]; 215 u8 addr4[6];
188} __packed; 216} __packed __aligned(2);
189 217
190struct ieee80211_hdr_3addr { 218struct ieee80211_hdr_3addr {
191 __le16 frame_control; 219 __le16 frame_control;
@@ -194,7 +222,7 @@ struct ieee80211_hdr_3addr {
194 u8 addr2[6]; 222 u8 addr2[6];
195 u8 addr3[6]; 223 u8 addr3[6];
196 __le16 seq_ctrl; 224 __le16 seq_ctrl;
197} __packed; 225} __packed __aligned(2);
198 226
199struct ieee80211_qos_hdr { 227struct ieee80211_qos_hdr {
200 __le16 frame_control; 228 __le16 frame_control;
@@ -204,7 +232,7 @@ struct ieee80211_qos_hdr {
204 u8 addr3[6]; 232 u8 addr3[6];
205 __le16 seq_ctrl; 233 __le16 seq_ctrl;
206 __le16 qos_ctrl; 234 __le16 qos_ctrl;
207} __packed; 235} __packed __aligned(2);
208 236
209/** 237/**
210 * ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set 238 * ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set
@@ -581,7 +609,7 @@ struct ieee80211s_hdr {
581 __le32 seqnum; 609 __le32 seqnum;
582 u8 eaddr1[6]; 610 u8 eaddr1[6];
583 u8 eaddr2[6]; 611 u8 eaddr2[6];
584} __packed; 612} __packed __aligned(2);
585 613
586/* Mesh flags */ 614/* Mesh flags */
587#define MESH_FLAGS_AE_A4 0x1 615#define MESH_FLAGS_AE_A4 0x1
@@ -875,7 +903,7 @@ struct ieee80211_mgmt {
875 } u; 903 } u;
876 } __packed action; 904 } __packed action;
877 } u; 905 } u;
878} __packed; 906} __packed __aligned(2);
879 907
880/* Supported Rates value encodings in 802.11n-2009 7.3.2.2 */ 908/* Supported Rates value encodings in 802.11n-2009 7.3.2.2 */
881#define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127 909#define BSS_MEMBERSHIP_SELECTOR_HT_PHY 127
@@ -906,20 +934,20 @@ struct ieee80211_rts {
906 __le16 duration; 934 __le16 duration;
907 u8 ra[6]; 935 u8 ra[6];
908 u8 ta[6]; 936 u8 ta[6];
909} __packed; 937} __packed __aligned(2);
910 938
911struct ieee80211_cts { 939struct ieee80211_cts {
912 __le16 frame_control; 940 __le16 frame_control;
913 __le16 duration; 941 __le16 duration;
914 u8 ra[6]; 942 u8 ra[6];
915} __packed; 943} __packed __aligned(2);
916 944
917struct ieee80211_pspoll { 945struct ieee80211_pspoll {
918 __le16 frame_control; 946 __le16 frame_control;
919 __le16 aid; 947 __le16 aid;
920 u8 bssid[6]; 948 u8 bssid[6];
921 u8 ta[6]; 949 u8 ta[6];
922} __packed; 950} __packed __aligned(2);
923 951
924/* TDLS */ 952/* TDLS */
925 953
@@ -1290,11 +1318,6 @@ struct ieee80211_vht_operation {
1290} __packed; 1318} __packed;
1291 1319
1292 1320
1293#define IEEE80211_VHT_MCS_ZERO_TO_SEVEN_SUPPORT 0
1294#define IEEE80211_VHT_MCS_ZERO_TO_EIGHT_SUPPORT 1
1295#define IEEE80211_VHT_MCS_ZERO_TO_NINE_SUPPORT 2
1296#define IEEE80211_VHT_MCS_NOT_SUPPORTED 3
1297
1298/* 802.11ac VHT Capabilities */ 1321/* 802.11ac VHT Capabilities */
1299#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000 1322#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000
1300#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001 1323#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 0x00000001
@@ -1310,10 +1333,11 @@ struct ieee80211_vht_operation {
1310#define IEEE80211_VHT_CAP_RXSTBC_2 0x00000200 1333#define IEEE80211_VHT_CAP_RXSTBC_2 0x00000200
1311#define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300 1334#define IEEE80211_VHT_CAP_RXSTBC_3 0x00000300
1312#define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400 1335#define IEEE80211_VHT_CAP_RXSTBC_4 0x00000400
1336#define IEEE80211_VHT_CAP_RXSTBC_MASK 0x00000700
1313#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800 1337#define IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE 0x00000800
1314#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000 1338#define IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE 0x00001000
1315#define IEEE80211_VHT_CAP_BEAMFORMER_ANTENNAS_MAX 0x00006000 1339#define IEEE80211_VHT_CAP_BEAMFORMER_ANTENNAS_MAX 0x00006000
1316#define IEEE80211_VHT_CAP_SOUNDING_DIMENTION_MAX 0x00030000 1340#define IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MAX 0x00030000
1317#define IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE 0x00080000 1341#define IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE 0x00080000
1318#define IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE 0x00100000 1342#define IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE 0x00100000
1319#define IEEE80211_VHT_CAP_VHT_TXOP_PS 0x00200000 1343#define IEEE80211_VHT_CAP_VHT_TXOP_PS 0x00200000
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index 89b4614a4722..f563907ed776 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -33,7 +33,15 @@ static inline struct arphdr *arp_hdr(const struct sk_buff *skb)
33 33
34static inline int arp_hdr_len(struct net_device *dev) 34static inline int arp_hdr_len(struct net_device *dev)
35{ 35{
36 /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ 36 switch (dev->type) {
37 return sizeof(struct arphdr) + (dev->addr_len + sizeof(u32)) * 2; 37#if IS_ENABLED(CONFIG_FIREWIRE_NET)
38 case ARPHRD_IEEE1394:
39 /* ARP header, device address and 2 IP addresses */
40 return sizeof(struct arphdr) + dev->addr_len + sizeof(u32) * 2;
41#endif
42 default:
43 /* ARP header, plus 2 device addresses, plus 2 IP addresses. */
44 return sizeof(struct arphdr) + (dev->addr_len + sizeof(u32)) * 2;
45 }
38} 46}
39#endif /* _LINUX_IF_ARP_H */ 47#endif /* _LINUX_IF_ARP_H */
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index cfd21e3d5506..4474557904f6 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -112,6 +112,10 @@ struct team_mode_ops {
112 void (*port_disabled)(struct team *team, struct team_port *port); 112 void (*port_disabled)(struct team *team, struct team_port *port);
113}; 113};
114 114
115extern int team_modeop_port_enter(struct team *team, struct team_port *port);
116extern void team_modeop_port_change_dev_addr(struct team *team,
117 struct team_port *port);
118
115enum team_option_type { 119enum team_option_type {
116 TEAM_OPTION_TYPE_U32, 120 TEAM_OPTION_TYPE_U32,
117 TEAM_OPTION_TYPE_STRING, 121 TEAM_OPTION_TYPE_STRING,
@@ -236,7 +240,26 @@ static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
236 return NULL; 240 return NULL;
237} 241}
238 242
239extern int team_port_set_team_dev_addr(struct team_port *port); 243static inline struct team_port *
244team_get_first_port_txable_rcu(struct team *team, struct team_port *port)
245{
246 struct team_port *cur;
247
248 if (likely(team_port_txable(port)))
249 return port;
250 cur = port;
251 list_for_each_entry_continue_rcu(cur, &team->port_list, list)
252 if (team_port_txable(port))
253 return cur;
254 list_for_each_entry_rcu(cur, &team->port_list, list) {
255 if (cur == port)
256 break;
257 if (team_port_txable(port))
258 return cur;
259 }
260 return NULL;
261}
262
240extern int team_options_register(struct team *team, 263extern int team_options_register(struct team *team,
241 const struct team_option *option, 264 const struct team_option *option,
242 size_t option_count); 265 size_t option_count);
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 218a3b686d90..70962f3fdb79 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -339,7 +339,7 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb,
339 */ 339 */
340 340
341 proto = vhdr->h_vlan_encapsulated_proto; 341 proto = vhdr->h_vlan_encapsulated_proto;
342 if (ntohs(proto) >= 1536) { 342 if (ntohs(proto) >= ETH_P_802_3_MIN) {
343 skb->protocol = proto; 343 skb->protocol = proto;
344 return; 344 return;
345 } 345 }
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 9dbb41a4e250..8752dbbc6135 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -19,6 +19,7 @@
19#define PHY_ID_KSZ9021 0x00221610 19#define PHY_ID_KSZ9021 0x00221610
20#define PHY_ID_KS8737 0x00221720 20#define PHY_ID_KS8737 0x00221720
21#define PHY_ID_KSZ8021 0x00221555 21#define PHY_ID_KSZ8021 0x00221555
22#define PHY_ID_KSZ8031 0x00221556
22#define PHY_ID_KSZ8041 0x00221510 23#define PHY_ID_KSZ8041 0x00221510
23#define PHY_ID_KSZ8051 0x00221550 24#define PHY_ID_KSZ8051 0x00221550
24/* same id: ks8001 Rev. A/B, and ks8721 Rev 3. */ 25/* same id: ks8001 Rev. A/B, and ks8721 Rev 3. */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 811f91cf5e8c..1bc5a750b330 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -140,6 +140,7 @@ enum {
140 MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41, 140 MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41,
141 MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42, 141 MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42,
142 MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48, 142 MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48,
143 MLX4_DEV_CAP_FLAG_SET_ETH_SCHED = 1LL << 53,
143 MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55, 144 MLX4_DEV_CAP_FLAG_SENSE_SUPPORT = 1LL << 55,
144 MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59, 145 MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV = 1LL << 59,
145 MLX4_DEV_CAP_FLAG_64B_EQE = 1LL << 61, 146 MLX4_DEV_CAP_FLAG_64B_EQE = 1LL << 61,
diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h
index 49258e0ed1c6..141d395bbb5f 100644
--- a/include/linux/mv643xx_eth.h
+++ b/include/linux/mv643xx_eth.h
@@ -19,7 +19,6 @@
19 19
20struct mv643xx_eth_shared_platform_data { 20struct mv643xx_eth_shared_platform_data {
21 struct mbus_dram_target_info *dram; 21 struct mbus_dram_target_info *dram;
22 struct platform_device *shared_smi;
23 /* 22 /*
24 * Max packet size for Tx IP/Layer 4 checksum, when set to 0, default 23 * Max packet size for Tx IP/Layer 4 checksum, when set to 0, default
25 * limit of 9KiB will be used. 24 * limit of 9KiB will be used.
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 3dd39340430e..d6ee2d008ee4 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -42,9 +42,9 @@ enum {
42 NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */ 42 NETIF_F_TSO6_BIT, /* ... TCPv6 segmentation */
43 NETIF_F_FSO_BIT, /* ... FCoE segmentation */ 43 NETIF_F_FSO_BIT, /* ... FCoE segmentation */
44 NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */ 44 NETIF_F_GSO_GRE_BIT, /* ... GRE with TSO */
45 /**/NETIF_F_GSO_LAST, /* [can't be last bit, see GSO_MASK] */ 45 NETIF_F_GSO_UDP_TUNNEL_BIT, /* ... UDP TUNNEL with TSO */
46 NETIF_F_GSO_RESERVED2 /* ... free (fill GSO_MASK to 8 bits) */ 46 /**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
47 = NETIF_F_GSO_LAST, 47 NETIF_F_GSO_UDP_TUNNEL_BIT,
48 48
49 NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ 49 NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
50 NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */ 50 NETIF_F_SCTP_CSUM_BIT, /* SCTP checksum offload */
@@ -102,7 +102,8 @@ enum {
102#define NETIF_F_VLAN_CHALLENGED __NETIF_F(VLAN_CHALLENGED) 102#define NETIF_F_VLAN_CHALLENGED __NETIF_F(VLAN_CHALLENGED)
103#define NETIF_F_RXFCS __NETIF_F(RXFCS) 103#define NETIF_F_RXFCS __NETIF_F(RXFCS)
104#define NETIF_F_RXALL __NETIF_F(RXALL) 104#define NETIF_F_RXALL __NETIF_F(RXALL)
105#define NETIF_F_GRE_GSO __NETIF_F(GSO_GRE) 105#define NETIF_F_GSO_GRE __NETIF_F(GSO_GRE)
106#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL)
106 107
107/* Features valid for ethtool to change */ 108/* Features valid for ethtool to change */
108/* = all defined minus driver/device-class-related */ 109/* = all defined minus driver/device-class-related */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 6151e903eef0..53d3939358a7 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -144,8 +144,6 @@ static inline bool dev_xmit_complete(int rc)
144# else 144# else
145# define LL_MAX_HEADER 96 145# define LL_MAX_HEADER 96
146# endif 146# endif
147#elif IS_ENABLED(CONFIG_TR)
148# define LL_MAX_HEADER 48
149#else 147#else
150# define LL_MAX_HEADER 32 148# define LL_MAX_HEADER 32
151#endif 149#endif
@@ -1073,6 +1071,8 @@ struct net_device {
1073 struct list_head dev_list; 1071 struct list_head dev_list;
1074 struct list_head napi_list; 1072 struct list_head napi_list;
1075 struct list_head unreg_list; 1073 struct list_head unreg_list;
1074 struct list_head upper_dev_list; /* List of upper devices */
1075
1076 1076
1077 /* currently active device features */ 1077 /* currently active device features */
1078 netdev_features_t features; 1078 netdev_features_t features;
@@ -1145,6 +1145,13 @@ struct net_device {
1145 spinlock_t addr_list_lock; 1145 spinlock_t addr_list_lock;
1146 struct netdev_hw_addr_list uc; /* Unicast mac addresses */ 1146 struct netdev_hw_addr_list uc; /* Unicast mac addresses */
1147 struct netdev_hw_addr_list mc; /* Multicast mac addresses */ 1147 struct netdev_hw_addr_list mc; /* Multicast mac addresses */
1148 struct netdev_hw_addr_list dev_addrs; /* list of device
1149 * hw addresses
1150 */
1151#ifdef CONFIG_SYSFS
1152 struct kset *queues_kset;
1153#endif
1154
1148 bool uc_promisc; 1155 bool uc_promisc;
1149 unsigned int promiscuity; 1156 unsigned int promiscuity;
1150 unsigned int allmulti; 1157 unsigned int allmulti;
@@ -1177,21 +1184,11 @@ struct net_device {
1177 * avoid dirtying this cache line. 1184 * avoid dirtying this cache line.
1178 */ 1185 */
1179 1186
1180 struct list_head upper_dev_list; /* List of upper devices */
1181
1182 /* Interface address info used in eth_type_trans() */ 1187 /* Interface address info used in eth_type_trans() */
1183 unsigned char *dev_addr; /* hw address, (before bcast 1188 unsigned char *dev_addr; /* hw address, (before bcast
1184 because most packets are 1189 because most packets are
1185 unicast) */ 1190 unicast) */
1186 1191
1187 struct netdev_hw_addr_list dev_addrs; /* list of device
1188 hw addresses */
1189
1190 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
1191
1192#ifdef CONFIG_SYSFS
1193 struct kset *queues_kset;
1194#endif
1195 1192
1196#ifdef CONFIG_RPS 1193#ifdef CONFIG_RPS
1197 struct netdev_rx_queue *_rx; 1194 struct netdev_rx_queue *_rx;
@@ -1202,18 +1199,14 @@ struct net_device {
1202 /* Number of RX queues currently active in device */ 1199 /* Number of RX queues currently active in device */
1203 unsigned int real_num_rx_queues; 1200 unsigned int real_num_rx_queues;
1204 1201
1205#ifdef CONFIG_RFS_ACCEL
1206 /* CPU reverse-mapping for RX completion interrupts, indexed
1207 * by RX queue number. Assigned by driver. This must only be
1208 * set if the ndo_rx_flow_steer operation is defined. */
1209 struct cpu_rmap *rx_cpu_rmap;
1210#endif
1211#endif 1202#endif
1212 1203
1213 rx_handler_func_t __rcu *rx_handler; 1204 rx_handler_func_t __rcu *rx_handler;
1214 void __rcu *rx_handler_data; 1205 void __rcu *rx_handler_data;
1215 1206
1216 struct netdev_queue __rcu *ingress_queue; 1207 struct netdev_queue __rcu *ingress_queue;
1208 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
1209
1217 1210
1218/* 1211/*
1219 * Cache lines mostly used on transmit path 1212 * Cache lines mostly used on transmit path
@@ -1235,6 +1228,12 @@ struct net_device {
1235#ifdef CONFIG_XPS 1228#ifdef CONFIG_XPS
1236 struct xps_dev_maps __rcu *xps_maps; 1229 struct xps_dev_maps __rcu *xps_maps;
1237#endif 1230#endif
1231#ifdef CONFIG_RFS_ACCEL
1232 /* CPU reverse-mapping for RX completion interrupts, indexed
1233 * by RX queue number. Assigned by driver. This must only be
1234 * set if the ndo_rx_flow_steer operation is defined. */
1235 struct cpu_rmap *rx_cpu_rmap;
1236#endif
1238 1237
1239 /* These may be needed for future network-power-down code. */ 1238 /* These may be needed for future network-power-down code. */
1240 1239
@@ -1475,6 +1474,11 @@ static inline void *netdev_priv(const struct net_device *dev)
1475 */ 1474 */
1476#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype)) 1475#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
1477 1476
1477/* Default NAPI poll() weight
1478 * Device drivers are strongly advised to not use bigger value
1479 */
1480#define NAPI_POLL_WEIGHT 64
1481
1478/** 1482/**
1479 * netif_napi_add - initialize a napi context 1483 * netif_napi_add - initialize a napi context
1480 * @dev: network device 1484 * @dev: network device
@@ -1612,6 +1616,9 @@ extern seqcount_t devnet_rename_seq; /* Device rename seq */
1612 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list) 1616 list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
1613#define for_each_netdev_continue_rcu(net, d) \ 1617#define for_each_netdev_continue_rcu(net, d) \
1614 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) 1618 list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
1619#define for_each_netdev_in_bond_rcu(bond, slave) \
1620 for_each_netdev_rcu(&init_net, slave) \
1621 if (netdev_master_upper_dev_get_rcu(slave) == bond)
1615#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) 1622#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
1616 1623
1617static inline struct net_device *next_net_device(struct net_device *dev) 1624static inline struct net_device *next_net_device(struct net_device *dev)
@@ -1684,7 +1691,6 @@ extern int netdev_refcnt_read(const struct net_device *dev);
1684extern void free_netdev(struct net_device *dev); 1691extern void free_netdev(struct net_device *dev);
1685extern void synchronize_net(void); 1692extern void synchronize_net(void);
1686extern int init_dummy_netdev(struct net_device *dev); 1693extern int init_dummy_netdev(struct net_device *dev);
1687extern void netdev_resync_ops(struct net_device *dev);
1688 1694
1689extern struct net_device *dev_get_by_index(struct net *net, int ifindex); 1695extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
1690extern struct net_device *__dev_get_by_index(struct net *net, int ifindex); 1696extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
@@ -2678,6 +2684,19 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
2678{ 2684{
2679 return __skb_gso_segment(skb, features, true); 2685 return __skb_gso_segment(skb, features, true);
2680} 2686}
2687__be16 skb_network_protocol(struct sk_buff *skb);
2688
2689static inline bool can_checksum_protocol(netdev_features_t features,
2690 __be16 protocol)
2691{
2692 return ((features & NETIF_F_GEN_CSUM) ||
2693 ((features & NETIF_F_V4_CSUM) &&
2694 protocol == htons(ETH_P_IP)) ||
2695 ((features & NETIF_F_V6_CSUM) &&
2696 protocol == htons(ETH_P_IPV6)) ||
2697 ((features & NETIF_F_FCOE_CRC) &&
2698 protocol == htons(ETH_P_FCOE)));
2699}
2681 2700
2682#ifdef CONFIG_BUG 2701#ifdef CONFIG_BUG
2683extern void netdev_rx_csum_fault(struct net_device *dev); 2702extern void netdev_rx_csum_fault(struct net_device *dev);
@@ -2756,6 +2775,11 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
2756 dev->gso_max_size = size; 2775 dev->gso_max_size = size;
2757} 2776}
2758 2777
2778static inline bool netif_is_bond_master(struct net_device *dev)
2779{
2780 return dev->flags & IFF_MASTER && dev->priv_flags & IFF_BONDING;
2781}
2782
2759static inline bool netif_is_bond_slave(struct net_device *dev) 2783static inline bool netif_is_bond_slave(struct net_device *dev)
2760{ 2784{
2761 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING; 2785 return dev->flags & IFF_SLAVE && dev->priv_flags & IFF_BONDING;
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index ee142846f56a..0060fde3160e 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -289,11 +289,6 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
289#endif 289#endif
290} 290}
291 291
292#ifdef CONFIG_PROC_FS
293#include <linux/proc_fs.h>
294extern struct proc_dir_entry *proc_net_netfilter;
295#endif
296
297#else /* !CONFIG_NETFILTER */ 292#else /* !CONFIG_NETFILTER */
298#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb) 293#define NF_HOOK(pf, hook, skb, indev, outdev, okfn) (okfn)(skb)
299#define NF_HOOK_COND(pf, hook, skb, indev, outdev, okfn, cond) (okfn)(skb) 294#define NF_HOOK_COND(pf, hook, skb, indev, outdev, okfn, cond) (okfn)(skb)
diff --git a/include/linux/of_net.h b/include/linux/of_net.h
index f47464188710..61bf53b02779 100644
--- a/include/linux/of_net.h
+++ b/include/linux/of_net.h
@@ -11,6 +11,16 @@
11#include <linux/of.h> 11#include <linux/of.h>
12extern const int of_get_phy_mode(struct device_node *np); 12extern const int of_get_phy_mode(struct device_node *np);
13extern const void *of_get_mac_address(struct device_node *np); 13extern const void *of_get_mac_address(struct device_node *np);
14#else
15static inline const int of_get_phy_mode(struct device_node *np)
16{
17 return -ENODEV;
18}
19
20static inline const void *of_get_mac_address(struct device_node *np)
21{
22 return NULL;
23}
14#endif 24#endif
15 25
16#endif /* __LINUX_OF_NET_H */ 26#endif /* __LINUX_OF_NET_H */
diff --git a/include/linux/openvswitch.h b/include/linux/openvswitch.h
index d42e174bd0c8..67d6c7b03581 100644
--- a/include/linux/openvswitch.h
+++ b/include/linux/openvswitch.h
@@ -94,7 +94,7 @@ struct ovs_vport_stats {
94}; 94};
95 95
96/* Fixed logical ports. */ 96/* Fixed logical ports. */
97#define OVSP_LOCAL ((__u16)0) 97#define OVSP_LOCAL ((__u32)0)
98 98
99/* Packet transfer. */ 99/* Packet transfer. */
100 100
@@ -127,7 +127,8 @@ enum ovs_packet_cmd {
127 * for %OVS_PACKET_CMD_EXECUTE. It has nested %OVS_ACTION_ATTR_* attributes. 127 * for %OVS_PACKET_CMD_EXECUTE. It has nested %OVS_ACTION_ATTR_* attributes.
128 * @OVS_PACKET_ATTR_USERDATA: Present for an %OVS_PACKET_CMD_ACTION 128 * @OVS_PACKET_ATTR_USERDATA: Present for an %OVS_PACKET_CMD_ACTION
129 * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an 129 * notification if the %OVS_ACTION_ATTR_USERSPACE action specified an
130 * %OVS_USERSPACE_ATTR_USERDATA attribute. 130 * %OVS_USERSPACE_ATTR_USERDATA attribute, with the same length and content
131 * specified there.
131 * 132 *
132 * These attributes follow the &struct ovs_header within the Generic Netlink 133 * These attributes follow the &struct ovs_header within the Generic Netlink
133 * payload for %OVS_PACKET_* commands. 134 * payload for %OVS_PACKET_* commands.
@@ -137,7 +138,7 @@ enum ovs_packet_attr {
137 OVS_PACKET_ATTR_PACKET, /* Packet data. */ 138 OVS_PACKET_ATTR_PACKET, /* Packet data. */
138 OVS_PACKET_ATTR_KEY, /* Nested OVS_KEY_ATTR_* attributes. */ 139 OVS_PACKET_ATTR_KEY, /* Nested OVS_KEY_ATTR_* attributes. */
139 OVS_PACKET_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */ 140 OVS_PACKET_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */
140 OVS_PACKET_ATTR_USERDATA, /* u64 OVS_ACTION_ATTR_USERSPACE arg. */ 141 OVS_PACKET_ATTR_USERDATA, /* OVS_ACTION_ATTR_USERSPACE arg. */
141 __OVS_PACKET_ATTR_MAX 142 __OVS_PACKET_ATTR_MAX
142}; 143};
143 144
@@ -389,13 +390,13 @@ enum ovs_sample_attr {
389 * enum ovs_userspace_attr - Attributes for %OVS_ACTION_ATTR_USERSPACE action. 390 * enum ovs_userspace_attr - Attributes for %OVS_ACTION_ATTR_USERSPACE action.
390 * @OVS_USERSPACE_ATTR_PID: u32 Netlink PID to which the %OVS_PACKET_CMD_ACTION 391 * @OVS_USERSPACE_ATTR_PID: u32 Netlink PID to which the %OVS_PACKET_CMD_ACTION
391 * message should be sent. Required. 392 * message should be sent. Required.
392 * @OVS_USERSPACE_ATTR_USERDATA: If present, its u64 argument is copied to the 393 * @OVS_USERSPACE_ATTR_USERDATA: If present, its variable-length argument is
393 * %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA, 394 * copied to the %OVS_PACKET_CMD_ACTION message as %OVS_PACKET_ATTR_USERDATA.
394 */ 395 */
395enum ovs_userspace_attr { 396enum ovs_userspace_attr {
396 OVS_USERSPACE_ATTR_UNSPEC, 397 OVS_USERSPACE_ATTR_UNSPEC,
397 OVS_USERSPACE_ATTR_PID, /* u32 Netlink PID to receive upcalls. */ 398 OVS_USERSPACE_ATTR_PID, /* u32 Netlink PID to receive upcalls. */
398 OVS_USERSPACE_ATTR_USERDATA, /* u64 optional user-specified cookie. */ 399 OVS_USERSPACE_ATTR_USERDATA, /* Optional user-specified cookie. */
399 __OVS_USERSPACE_ATTR_MAX 400 __OVS_USERSPACE_ATTR_MAX
400}; 401};
401 402
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 33999adbf8c8..9e11039dd7a3 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -455,6 +455,14 @@ struct phy_driver {
455 */ 455 */
456 void (*txtstamp)(struct phy_device *dev, struct sk_buff *skb, int type); 456 void (*txtstamp)(struct phy_device *dev, struct sk_buff *skb, int type);
457 457
458 /* Some devices (e.g. qnap TS-119P II) require PHY register changes to
459 * enable Wake on LAN, so set_wol is provided to be called in the
460 * ethernet driver's set_wol function. */
461 int (*set_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol);
462
463 /* See set_wol, but for checking whether Wake on LAN is enabled. */
464 void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol);
465
458 struct device_driver driver; 466 struct device_driver driver;
459}; 467};
460#define to_phy_driver(d) container_of(d, struct phy_driver, driver) 468#define to_phy_driver(d) container_of(d, struct phy_driver, driver)
@@ -560,6 +568,8 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable);
560int phy_get_eee_err(struct phy_device *phydev); 568int phy_get_eee_err(struct phy_device *phydev);
561int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data); 569int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data);
562int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data); 570int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data);
571int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol);
572void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol);
563 573
564int __init mdio_bus_init(void); 574int __init mdio_bus_init(void);
565void mdio_bus_exit(void); 575void mdio_bus_exit(void);
diff --git a/include/linux/platform_data/cpsw.h b/include/linux/platform_data/cpsw.h
index 798fb80b024b..bb3cd58d71e3 100644
--- a/include/linux/platform_data/cpsw.h
+++ b/include/linux/platform_data/cpsw.h
@@ -30,7 +30,7 @@ struct cpsw_platform_data {
30 u32 channels; /* number of cpdma channels (symmetric) */ 30 u32 channels; /* number of cpdma channels (symmetric) */
31 u32 slaves; /* number of slave cpgmac ports */ 31 u32 slaves; /* number of slave cpgmac ports */
32 struct cpsw_slave_data *slave_data; 32 struct cpsw_slave_data *slave_data;
33 u32 cpts_active_slave; /* time stamping slave */ 33 u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
34 u32 cpts_clock_mult; /* convert input clock ticks to nanoseconds */ 34 u32 cpts_clock_mult; /* convert input clock ticks to nanoseconds */
35 u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */ 35 u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */
36 u32 ale_entries; /* ale table size */ 36 u32 ale_entries; /* ale table size */
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 489dd7bb28ec..f28544b2f9af 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -69,6 +69,15 @@ extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
69 struct netlink_callback *cb, 69 struct netlink_callback *cb,
70 struct net_device *dev, 70 struct net_device *dev,
71 int idx); 71 int idx);
72extern int ndo_dflt_fdb_add(struct ndmsg *ndm,
73 struct nlattr *tb[],
74 struct net_device *dev,
75 const unsigned char *addr,
76 u16 flags);
77extern int ndo_dflt_fdb_del(struct ndmsg *ndm,
78 struct nlattr *tb[],
79 struct net_device *dev,
80 const unsigned char *addr);
72 81
73extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, 82extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
74 struct net_device *dev, u16 mode); 83 struct net_device *dev, u16 mode);
diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h
index b17d765ded84..fc305713fc6d 100644
--- a/include/linux/sh_eth.h
+++ b/include/linux/sh_eth.h
@@ -6,6 +6,7 @@
6enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN}; 6enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN};
7enum { 7enum {
8 SH_ETH_REG_GIGABIT, 8 SH_ETH_REG_GIGABIT,
9 SH_ETH_REG_FAST_RCAR,
9 SH_ETH_REG_FAST_SH4, 10 SH_ETH_REG_FAST_SH4,
10 SH_ETH_REG_FAST_SH3_SH2 11 SH_ETH_REG_FAST_SH3_SH2
11}; 12};
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index b8292d8cc9fa..e27d1c782f32 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -32,6 +32,7 @@
32#include <linux/hrtimer.h> 32#include <linux/hrtimer.h>
33#include <linux/dma-mapping.h> 33#include <linux/dma-mapping.h>
34#include <linux/netdev_features.h> 34#include <linux/netdev_features.h>
35#include <net/flow_keys.h>
35 36
36/* Don't change this without changing skb_csum_unnecessary! */ 37/* Don't change this without changing skb_csum_unnecessary! */
37#define CHECKSUM_NONE 0 38#define CHECKSUM_NONE 0
@@ -316,6 +317,8 @@ enum {
316 SKB_GSO_FCOE = 1 << 5, 317 SKB_GSO_FCOE = 1 << 5,
317 318
318 SKB_GSO_GRE = 1 << 6, 319 SKB_GSO_GRE = 1 << 6,
320
321 SKB_GSO_UDP_TUNNEL = 1 << 7,
319}; 322};
320 323
321#if BITS_PER_LONG > 32 324#if BITS_PER_LONG > 32
@@ -387,6 +390,7 @@ typedef unsigned char *sk_buff_data_t;
387 * @vlan_tci: vlan tag control information 390 * @vlan_tci: vlan tag control information
388 * @inner_transport_header: Inner transport layer header (encapsulation) 391 * @inner_transport_header: Inner transport layer header (encapsulation)
389 * @inner_network_header: Network layer header (encapsulation) 392 * @inner_network_header: Network layer header (encapsulation)
393 * @inner_mac_header: Link layer header (encapsulation)
390 * @transport_header: Transport layer header 394 * @transport_header: Transport layer header
391 * @network_header: Network layer header 395 * @network_header: Network layer header
392 * @mac_header: Link layer header 396 * @mac_header: Link layer header
@@ -505,6 +509,7 @@ struct sk_buff {
505 509
506 sk_buff_data_t inner_transport_header; 510 sk_buff_data_t inner_transport_header;
507 sk_buff_data_t inner_network_header; 511 sk_buff_data_t inner_network_header;
512 sk_buff_data_t inner_mac_header;
508 sk_buff_data_t transport_header; 513 sk_buff_data_t transport_header;
509 sk_buff_data_t network_header; 514 sk_buff_data_t network_header;
510 sk_buff_data_t mac_header; 515 sk_buff_data_t mac_header;
@@ -570,7 +575,40 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
570 skb->_skb_refdst = (unsigned long)dst; 575 skb->_skb_refdst = (unsigned long)dst;
571} 576}
572 577
573extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst); 578extern void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
579 bool force);
580
581/**
582 * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
583 * @skb: buffer
584 * @dst: dst entry
585 *
586 * Sets skb dst, assuming a reference was not taken on dst.
587 * If dst entry is cached, we do not take reference and dst_release
588 * will be avoided by refdst_drop. If dst entry is not cached, we take
589 * reference, so that last dst_release can destroy the dst immediately.
590 */
591static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
592{
593 __skb_dst_set_noref(skb, dst, false);
594}
595
596/**
597 * skb_dst_set_noref_force - sets skb dst, without taking reference
598 * @skb: buffer
599 * @dst: dst entry
600 *
601 * Sets skb dst, assuming a reference was not taken on dst.
602 * No reference is taken and no dst_release will be called. While for
603 * cached dsts deferred reclaim is a basic feature, for entries that are
604 * not cached it is caller's job to guarantee that last dst_release for
605 * provided dst happens when nobody uses it, eg. after a RCU grace period.
606 */
607static inline void skb_dst_set_noref_force(struct sk_buff *skb,
608 struct dst_entry *dst)
609{
610 __skb_dst_set_noref(skb, dst, true);
611}
574 612
575/** 613/**
576 * skb_dst_is_noref - Test if skb dst isn't refcounted 614 * skb_dst_is_noref - Test if skb dst isn't refcounted
@@ -1471,6 +1509,7 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
1471 1509
1472static inline void skb_reset_inner_headers(struct sk_buff *skb) 1510static inline void skb_reset_inner_headers(struct sk_buff *skb)
1473{ 1511{
1512 skb->inner_mac_header = skb->mac_header;
1474 skb->inner_network_header = skb->network_header; 1513 skb->inner_network_header = skb->network_header;
1475 skb->inner_transport_header = skb->transport_header; 1514 skb->inner_transport_header = skb->transport_header;
1476} 1515}
@@ -1516,6 +1555,22 @@ static inline void skb_set_inner_network_header(struct sk_buff *skb,
1516 skb->inner_network_header += offset; 1555 skb->inner_network_header += offset;
1517} 1556}
1518 1557
1558static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
1559{
1560 return skb->head + skb->inner_mac_header;
1561}
1562
1563static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
1564{
1565 skb->inner_mac_header = skb->data - skb->head;
1566}
1567
1568static inline void skb_set_inner_mac_header(struct sk_buff *skb,
1569 const int offset)
1570{
1571 skb_reset_inner_mac_header(skb);
1572 skb->inner_mac_header += offset;
1573}
1519static inline bool skb_transport_header_was_set(const struct sk_buff *skb) 1574static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
1520{ 1575{
1521 return skb->transport_header != ~0U; 1576 return skb->transport_header != ~0U;
@@ -1609,6 +1664,21 @@ static inline void skb_set_inner_network_header(struct sk_buff *skb,
1609 skb->inner_network_header = skb->data + offset; 1664 skb->inner_network_header = skb->data + offset;
1610} 1665}
1611 1666
1667static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
1668{
1669 return skb->inner_mac_header;
1670}
1671
1672static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
1673{
1674 skb->inner_mac_header = skb->data;
1675}
1676
1677static inline void skb_set_inner_mac_header(struct sk_buff *skb,
1678 const int offset)
1679{
1680 skb->inner_mac_header = skb->data + offset;
1681}
1612static inline bool skb_transport_header_was_set(const struct sk_buff *skb) 1682static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
1613{ 1683{
1614 return skb->transport_header != NULL; 1684 return skb->transport_header != NULL;
@@ -1666,6 +1736,19 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1666} 1736}
1667#endif /* NET_SKBUFF_DATA_USES_OFFSET */ 1737#endif /* NET_SKBUFF_DATA_USES_OFFSET */
1668 1738
1739static inline void skb_probe_transport_header(struct sk_buff *skb,
1740 const int offset_hint)
1741{
1742 struct flow_keys keys;
1743
1744 if (skb_transport_header_was_set(skb))
1745 return;
1746 else if (skb_flow_dissect(skb, &keys))
1747 skb_set_transport_header(skb, keys.thoff);
1748 else
1749 skb_set_transport_header(skb, offset_hint);
1750}
1751
1669static inline void skb_mac_header_rebuild(struct sk_buff *skb) 1752static inline void skb_mac_header_rebuild(struct sk_buff *skb)
1670{ 1753{
1671 if (skb_mac_header_was_set(skb)) { 1754 if (skb_mac_header_was_set(skb)) {
@@ -2811,6 +2894,8 @@ static inline void skb_checksum_none_assert(const struct sk_buff *skb)
2811 2894
2812bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); 2895bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2813 2896
2897u32 __skb_get_poff(const struct sk_buff *skb);
2898
2814/** 2899/**
2815 * skb_head_is_locked - Determine if the skb->head is locked down 2900 * skb_head_is_locked - Determine if the skb->head is locked down
2816 * @skb: skb to check 2901 * @skb: skb to check
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 2b9f74b0ffea..428c37a1f95c 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -298,6 +298,7 @@ struct ucred {
298#define SOL_IUCV 277 298#define SOL_IUCV 277
299#define SOL_CAIF 278 299#define SOL_CAIF 278
300#define SOL_ALG 279 300#define SOL_ALG 279
301#define SOL_NFC 280
301 302
302/* IPX options */ 303/* IPX options */
303#define IPX_TYPE 1 304#define IPX_TYPE 1
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 22958d68ecfe..8b1322296fed 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -26,9 +26,9 @@ struct ssb_sprom_core_pwr_info {
26 26
27struct ssb_sprom { 27struct ssb_sprom {
28 u8 revision; 28 u8 revision;
29 u8 il0mac[6]; /* MAC address for 802.11b/g */ 29 u8 il0mac[6] __aligned(sizeof(u16)); /* MAC address for 802.11b/g */
30 u8 et0mac[6]; /* MAC address for Ethernet */ 30 u8 et0mac[6] __aligned(sizeof(u16)); /* MAC address for Ethernet */
31 u8 et1mac[6]; /* MAC address for 802.11a */ 31 u8 et1mac[6] __aligned(sizeof(u16)); /* MAC address for 802.11a */
32 u8 et0phyaddr; /* MII address for enet0 */ 32 u8 et0phyaddr; /* MII address for enet0 */
33 u8 et1phyaddr; /* MII address for enet1 */ 33 u8 et1phyaddr; /* MII address for enet1 */
34 u8 et0mdcport; /* MDIO for enet0 */ 34 u8 et0mdcport; /* MDIO for enet0 */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index f28408c07dc2..5adbc33d1ab3 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -90,9 +90,6 @@ struct tcp_options_received {
90 sack_ok : 4, /* SACK seen on SYN packet */ 90 sack_ok : 4, /* SACK seen on SYN packet */
91 snd_wscale : 4, /* Window scaling received from sender */ 91 snd_wscale : 4, /* Window scaling received from sender */
92 rcv_wscale : 4; /* Window scaling to send to receiver */ 92 rcv_wscale : 4; /* Window scaling to send to receiver */
93 u8 cookie_plus:6, /* bytes in authenticator/cookie option */
94 cookie_out_never:1,
95 cookie_in_always:1;
96 u8 num_sacks; /* Number of SACK blocks */ 93 u8 num_sacks; /* Number of SACK blocks */
97 u16 user_mss; /* mss requested by user in ioctl */ 94 u16 user_mss; /* mss requested by user in ioctl */
98 u16 mss_clamp; /* Maximal mss, negotiated at connection setup */ 95 u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
@@ -102,7 +99,6 @@ static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
102{ 99{
103 rx_opt->tstamp_ok = rx_opt->sack_ok = 0; 100 rx_opt->tstamp_ok = rx_opt->sack_ok = 0;
104 rx_opt->wscale_ok = rx_opt->snd_wscale = 0; 101 rx_opt->wscale_ok = rx_opt->snd_wscale = 0;
105 rx_opt->cookie_plus = 0;
106} 102}
107 103
108/* This is the max number of SACKS that we'll generate and process. It's safe 104/* This is the max number of SACKS that we'll generate and process. It's safe
@@ -191,20 +187,19 @@ struct tcp_sock {
191 u32 window_clamp; /* Maximal window to advertise */ 187 u32 window_clamp; /* Maximal window to advertise */
192 u32 rcv_ssthresh; /* Current window clamp */ 188 u32 rcv_ssthresh; /* Current window clamp */
193 189
194 u32 frto_highmark; /* snd_nxt when RTO occurred */
195 u16 advmss; /* Advertised MSS */ 190 u16 advmss; /* Advertised MSS */
196 u8 frto_counter; /* Number of new acks after RTO */ 191 u8 unused;
197 u8 nonagle : 4,/* Disable Nagle algorithm? */ 192 u8 nonagle : 4,/* Disable Nagle algorithm? */
198 thin_lto : 1,/* Use linear timeouts for thin streams */ 193 thin_lto : 1,/* Use linear timeouts for thin streams */
199 thin_dupack : 1,/* Fast retransmit on first dupack */ 194 thin_dupack : 1,/* Fast retransmit on first dupack */
200 repair : 1, 195 repair : 1,
201 unused : 1; 196 frto : 1;/* F-RTO (RFC5682) activated in CA_Loss */
202 u8 repair_queue; 197 u8 repair_queue;
203 u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */ 198 u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */
204 early_retrans_delayed:1, /* Delayed ER timer installed */
205 syn_data:1, /* SYN includes data */ 199 syn_data:1, /* SYN includes data */
206 syn_fastopen:1, /* SYN includes Fast Open option */ 200 syn_fastopen:1, /* SYN includes Fast Open option */
207 syn_data_acked:1;/* data in SYN is acked by SYN-ACK */ 201 syn_data_acked:1;/* data in SYN is acked by SYN-ACK */
202 u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
208 203
209/* RTT measurement */ 204/* RTT measurement */
210 u32 srtt; /* smoothed round trip time << 3 */ 205 u32 srtt; /* smoothed round trip time << 3 */
@@ -320,12 +315,6 @@ struct tcp_sock {
320 struct tcp_md5sig_info __rcu *md5sig_info; 315 struct tcp_md5sig_info __rcu *md5sig_info;
321#endif 316#endif
322 317
323 /* When the cookie options are generated and exchanged, then this
324 * object holds a reference to them (cookie_values->kref). Also
325 * contains related tcp_cookie_transactions fields.
326 */
327 struct tcp_cookie_values *cookie_values;
328
329/* TCP fastopen related information */ 318/* TCP fastopen related information */
330 struct tcp_fastopen_request *fastopen_req; 319 struct tcp_fastopen_request *fastopen_req;
331 /* fastopen_rsk points to request_sock that resulted in this big 320 /* fastopen_rsk points to request_sock that resulted in this big
@@ -361,10 +350,6 @@ struct tcp_timewait_sock {
361#ifdef CONFIG_TCP_MD5SIG 350#ifdef CONFIG_TCP_MD5SIG
362 struct tcp_md5sig_key *tw_md5_key; 351 struct tcp_md5sig_key *tw_md5_key;
363#endif 352#endif
364 /* Few sockets in timewait have cookies; in that case, then this
365 * object holds a reference to them (tw_cookie_values->kref).
366 */
367 struct tcp_cookie_values *tw_cookie_values;
368}; 353};
369 354
370static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk) 355static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index ff6714e6d0f5..2d7a5e045908 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -58,12 +58,6 @@ void *virtqueue_detach_unused_buf(struct virtqueue *vq);
58 58
59unsigned int virtqueue_get_vring_size(struct virtqueue *vq); 59unsigned int virtqueue_get_vring_size(struct virtqueue *vq);
60 60
61/* FIXME: Obsolete accessor, but required for virtio_net merge. */
62static inline unsigned int virtqueue_get_queue_index(struct virtqueue *vq)
63{
64 return vq->index;
65}
66
67/** 61/**
68 * virtio_device - representation of a device using virtio 62 * virtio_device - representation of a device using virtio
69 * @index: unique position on the virtio bus 63 * @index: unique position on the virtio bus
diff --git a/include/linux/vm_sockets.h b/include/linux/vm_sockets.h
new file mode 100644
index 000000000000..0805eecba8f7
--- /dev/null
+++ b/include/linux/vm_sockets.h
@@ -0,0 +1,23 @@
1/*
2 * VMware vSockets Driver
3 *
4 * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 */
15
16#ifndef _VM_SOCKETS_H
17#define _VM_SOCKETS_H
18
19#include <uapi/linux/vm_sockets.h>
20
21int vm_sockets_get_local_cid(void);
22
23#endif /* _VM_SOCKETS_H */
diff --git a/include/net/caif/caif_shm.h b/include/net/caif/caif_shm.h
deleted file mode 100644
index 5bcce55438cf..000000000000
--- a/include/net/caif/caif_shm.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * Author: Amarnath Revanna / amarnath.bangalore.revanna@stericsson.com
5 * License terms: GNU General Public License (GPL) version 2
6 */
7
8#ifndef CAIF_SHM_H_
9#define CAIF_SHM_H_
10
11struct shmdev_layer {
12 u32 shm_base_addr;
13 u32 shm_total_sz;
14 u32 shm_id;
15 u32 shm_loopback;
16 void *hmbx;
17 int (*pshmdev_mbxsend) (u32 shm_id, u32 mbx_msg);
18 int (*pshmdev_mbxsetup) (void *pshmdrv_cb,
19 struct shmdev_layer *pshm_dev, void *pshm_drv);
20 struct net_device *pshm_netdev;
21};
22
23extern int caif_shmcore_probe(struct shmdev_layer *pshm_dev);
24extern void caif_shmcore_remove(struct net_device *pshm_netdev);
25
26#endif
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index d581c6de5d64..bdba9b619064 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -611,22 +611,10 @@ struct cfg80211_ap_settings {
611}; 611};
612 612
613/** 613/**
614 * enum plink_action - actions to perform in mesh peers
615 *
616 * @PLINK_ACTION_INVALID: action 0 is reserved
617 * @PLINK_ACTION_OPEN: start mesh peer link establishment
618 * @PLINK_ACTION_BLOCK: block traffic from this mesh peer
619 */
620enum plink_actions {
621 PLINK_ACTION_INVALID,
622 PLINK_ACTION_OPEN,
623 PLINK_ACTION_BLOCK,
624};
625
626/**
627 * enum station_parameters_apply_mask - station parameter values to apply 614 * enum station_parameters_apply_mask - station parameter values to apply
628 * @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp) 615 * @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp)
629 * @STATION_PARAM_APPLY_CAPABILITY: apply new capability 616 * @STATION_PARAM_APPLY_CAPABILITY: apply new capability
617 * @STATION_PARAM_APPLY_PLINK_STATE: apply new plink state
630 * 618 *
631 * Not all station parameters have in-band "no change" signalling, 619 * Not all station parameters have in-band "no change" signalling,
632 * for those that don't these flags will are used. 620 * for those that don't these flags will are used.
@@ -634,6 +622,7 @@ enum plink_actions {
634enum station_parameters_apply_mask { 622enum station_parameters_apply_mask {
635 STATION_PARAM_APPLY_UAPSD = BIT(0), 623 STATION_PARAM_APPLY_UAPSD = BIT(0),
636 STATION_PARAM_APPLY_CAPABILITY = BIT(1), 624 STATION_PARAM_APPLY_CAPABILITY = BIT(1),
625 STATION_PARAM_APPLY_PLINK_STATE = BIT(2),
637}; 626};
638 627
639/** 628/**
@@ -669,7 +658,7 @@ enum station_parameters_apply_mask {
669 * @ext_capab_len: number of extended capabilities 658 * @ext_capab_len: number of extended capabilities
670 */ 659 */
671struct station_parameters { 660struct station_parameters {
672 u8 *supported_rates; 661 const u8 *supported_rates;
673 struct net_device *vlan; 662 struct net_device *vlan;
674 u32 sta_flags_mask, sta_flags_set; 663 u32 sta_flags_mask, sta_flags_set;
675 u32 sta_modify_mask; 664 u32 sta_modify_mask;
@@ -678,17 +667,60 @@ struct station_parameters {
678 u8 supported_rates_len; 667 u8 supported_rates_len;
679 u8 plink_action; 668 u8 plink_action;
680 u8 plink_state; 669 u8 plink_state;
681 struct ieee80211_ht_cap *ht_capa; 670 const struct ieee80211_ht_cap *ht_capa;
682 struct ieee80211_vht_cap *vht_capa; 671 const struct ieee80211_vht_cap *vht_capa;
683 u8 uapsd_queues; 672 u8 uapsd_queues;
684 u8 max_sp; 673 u8 max_sp;
685 enum nl80211_mesh_power_mode local_pm; 674 enum nl80211_mesh_power_mode local_pm;
686 u16 capability; 675 u16 capability;
687 u8 *ext_capab; 676 const u8 *ext_capab;
688 u8 ext_capab_len; 677 u8 ext_capab_len;
689}; 678};
690 679
691/** 680/**
681 * enum cfg80211_station_type - the type of station being modified
682 * @CFG80211_STA_AP_CLIENT: client of an AP interface
683 * @CFG80211_STA_AP_MLME_CLIENT: client of an AP interface that has
684 * the AP MLME in the device
685 * @CFG80211_STA_AP_STA: AP station on managed interface
686 * @CFG80211_STA_IBSS: IBSS station
687 * @CFG80211_STA_TDLS_PEER_SETUP: TDLS peer on managed interface (dummy entry
688 * while TDLS setup is in progress, it moves out of this state when
689 * being marked authorized; use this only if TDLS with external setup is
690 * supported/used)
691 * @CFG80211_STA_TDLS_PEER_ACTIVE: TDLS peer on managed interface (active
692 * entry that is operating, has been marked authorized by userspace)
693 * @CFG80211_STA_MESH_PEER_KERNEL: peer on mesh interface (kernel managed)
694 * @CFG80211_STA_MESH_PEER_USER: peer on mesh interface (user managed)
695 */
696enum cfg80211_station_type {
697 CFG80211_STA_AP_CLIENT,
698 CFG80211_STA_AP_MLME_CLIENT,
699 CFG80211_STA_AP_STA,
700 CFG80211_STA_IBSS,
701 CFG80211_STA_TDLS_PEER_SETUP,
702 CFG80211_STA_TDLS_PEER_ACTIVE,
703 CFG80211_STA_MESH_PEER_KERNEL,
704 CFG80211_STA_MESH_PEER_USER,
705};
706
707/**
708 * cfg80211_check_station_change - validate parameter changes
709 * @wiphy: the wiphy this operates on
710 * @params: the new parameters for a station
711 * @statype: the type of station being modified
712 *
713 * Utility function for the @change_station driver method. Call this function
714 * with the appropriate station type looking up the station (and checking that
715 * it exists). It will verify whether the station change is acceptable, and if
716 * not will return an error code. Note that it may modify the parameters for
717 * backward compatibility reasons, so don't use them before calling this.
718 */
719int cfg80211_check_station_change(struct wiphy *wiphy,
720 struct station_parameters *params,
721 enum cfg80211_station_type statype);
722
723/**
692 * enum station_info_flags - station information flags 724 * enum station_info_flags - station information flags
693 * 725 *
694 * Used by the driver to indicate which info in &struct station_info 726 * Used by the driver to indicate which info in &struct station_info
@@ -1119,6 +1151,7 @@ struct mesh_config {
1119 * @ie_len: length of vendor information elements 1151 * @ie_len: length of vendor information elements
1120 * @is_authenticated: this mesh requires authentication 1152 * @is_authenticated: this mesh requires authentication
1121 * @is_secure: this mesh uses security 1153 * @is_secure: this mesh uses security
1154 * @user_mpm: userspace handles all MPM functions
1122 * @dtim_period: DTIM period to use 1155 * @dtim_period: DTIM period to use
1123 * @beacon_interval: beacon interval to use 1156 * @beacon_interval: beacon interval to use
1124 * @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a] 1157 * @mcast_rate: multicat rate for Mesh Node [6Mbps is the default for 802.11a]
@@ -1136,6 +1169,7 @@ struct mesh_setup {
1136 u8 ie_len; 1169 u8 ie_len;
1137 bool is_authenticated; 1170 bool is_authenticated;
1138 bool is_secure; 1171 bool is_secure;
1172 bool user_mpm;
1139 u8 dtim_period; 1173 u8 dtim_period;
1140 u16 beacon_interval; 1174 u16 beacon_interval;
1141 int mcast_rate[IEEE80211_NUM_BANDS]; 1175 int mcast_rate[IEEE80211_NUM_BANDS];
@@ -1398,9 +1432,11 @@ struct cfg80211_auth_request {
1398 * enum cfg80211_assoc_req_flags - Over-ride default behaviour in association. 1432 * enum cfg80211_assoc_req_flags - Over-ride default behaviour in association.
1399 * 1433 *
1400 * @ASSOC_REQ_DISABLE_HT: Disable HT (802.11n) 1434 * @ASSOC_REQ_DISABLE_HT: Disable HT (802.11n)
1435 * @ASSOC_REQ_DISABLE_VHT: Disable VHT
1401 */ 1436 */
1402enum cfg80211_assoc_req_flags { 1437enum cfg80211_assoc_req_flags {
1403 ASSOC_REQ_DISABLE_HT = BIT(0), 1438 ASSOC_REQ_DISABLE_HT = BIT(0),
1439 ASSOC_REQ_DISABLE_VHT = BIT(1),
1404}; 1440};
1405 1441
1406/** 1442/**
@@ -1422,6 +1458,8 @@ enum cfg80211_assoc_req_flags {
1422 * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask 1458 * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
1423 * will be used in ht_capa. Un-supported values will be ignored. 1459 * will be used in ht_capa. Un-supported values will be ignored.
1424 * @ht_capa_mask: The bits of ht_capa which are to be used. 1460 * @ht_capa_mask: The bits of ht_capa which are to be used.
1461 * @vht_capa: VHT capability override
1462 * @vht_capa_mask: VHT capability mask indicating which fields to use
1425 */ 1463 */
1426struct cfg80211_assoc_request { 1464struct cfg80211_assoc_request {
1427 struct cfg80211_bss *bss; 1465 struct cfg80211_bss *bss;
@@ -1432,6 +1470,7 @@ struct cfg80211_assoc_request {
1432 u32 flags; 1470 u32 flags;
1433 struct ieee80211_ht_cap ht_capa; 1471 struct ieee80211_ht_cap ht_capa;
1434 struct ieee80211_ht_cap ht_capa_mask; 1472 struct ieee80211_ht_cap ht_capa_mask;
1473 struct ieee80211_vht_cap vht_capa, vht_capa_mask;
1435}; 1474};
1436 1475
1437/** 1476/**
@@ -1542,6 +1581,8 @@ struct cfg80211_ibss_params {
1542 * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask 1581 * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
1543 * will be used in ht_capa. Un-supported values will be ignored. 1582 * will be used in ht_capa. Un-supported values will be ignored.
1544 * @ht_capa_mask: The bits of ht_capa which are to be used. 1583 * @ht_capa_mask: The bits of ht_capa which are to be used.
1584 * @vht_capa: VHT Capability overrides
1585 * @vht_capa_mask: The bits of vht_capa which are to be used.
1545 */ 1586 */
1546struct cfg80211_connect_params { 1587struct cfg80211_connect_params {
1547 struct ieee80211_channel *channel; 1588 struct ieee80211_channel *channel;
@@ -1560,6 +1601,8 @@ struct cfg80211_connect_params {
1560 int bg_scan_period; 1601 int bg_scan_period;
1561 struct ieee80211_ht_cap ht_capa; 1602 struct ieee80211_ht_cap ht_capa;
1562 struct ieee80211_ht_cap ht_capa_mask; 1603 struct ieee80211_ht_cap ht_capa_mask;
1604 struct ieee80211_vht_cap vht_capa;
1605 struct ieee80211_vht_cap vht_capa_mask;
1563}; 1606};
1564 1607
1565/** 1608/**
@@ -1722,6 +1765,21 @@ struct cfg80211_gtk_rekey_data {
1722}; 1765};
1723 1766
1724/** 1767/**
1768 * struct cfg80211_update_ft_ies_params - FT IE Information
1769 *
1770 * This structure provides information needed to update the fast transition IE
1771 *
1772 * @md: The Mobility Domain ID, 2 Octet value
1773 * @ie: Fast Transition IEs
1774 * @ie_len: Length of ft_ie in octets
1775 */
1776struct cfg80211_update_ft_ies_params {
1777 u16 md;
1778 const u8 *ie;
1779 size_t ie_len;
1780};
1781
1782/**
1725 * struct cfg80211_ops - backend description for wireless configuration 1783 * struct cfg80211_ops - backend description for wireless configuration
1726 * 1784 *
1727 * This struct is registered by fullmac card drivers and/or wireless stacks 1785 * This struct is registered by fullmac card drivers and/or wireless stacks
@@ -1781,9 +1839,8 @@ struct cfg80211_gtk_rekey_data {
1781 * @change_station: Modify a given station. Note that flags changes are not much 1839 * @change_station: Modify a given station. Note that flags changes are not much
1782 * validated in cfg80211, in particular the auth/assoc/authorized flags 1840 * validated in cfg80211, in particular the auth/assoc/authorized flags
1783 * might come to the driver in invalid combinations -- make sure to check 1841 * might come to the driver in invalid combinations -- make sure to check
1784 * them, also against the existing state! Also, supported_rates changes are 1842 * them, also against the existing state! Drivers must call
1785 * not checked in station mode -- drivers need to reject (or ignore) them 1843 * cfg80211_check_station_change() to validate the information.
1786 * for anything but TDLS peers.
1787 * @get_station: get station information for the station identified by @mac 1844 * @get_station: get station information for the station identified by @mac
1788 * @dump_station: dump station callback -- resume dump at index @idx 1845 * @dump_station: dump station callback -- resume dump at index @idx
1789 * 1846 *
@@ -2168,6 +2225,8 @@ struct cfg80211_ops {
2168 int (*start_radar_detection)(struct wiphy *wiphy, 2225 int (*start_radar_detection)(struct wiphy *wiphy,
2169 struct net_device *dev, 2226 struct net_device *dev,
2170 struct cfg80211_chan_def *chandef); 2227 struct cfg80211_chan_def *chandef);
2228 int (*update_ft_ies)(struct wiphy *wiphy, struct net_device *dev,
2229 struct cfg80211_update_ft_ies_params *ftie);
2171}; 2230};
2172 2231
2173/* 2232/*
@@ -2485,6 +2544,8 @@ struct wiphy_wowlan_support {
2485 * @ap_sme_capa: AP SME capabilities, flags from &enum nl80211_ap_sme_features. 2544 * @ap_sme_capa: AP SME capabilities, flags from &enum nl80211_ap_sme_features.
2486 * @ht_capa_mod_mask: Specify what ht_cap values can be over-ridden. 2545 * @ht_capa_mod_mask: Specify what ht_cap values can be over-ridden.
2487 * If null, then none can be over-ridden. 2546 * If null, then none can be over-ridden.
2547 * @vht_capa_mod_mask: Specify what VHT capabilities can be over-ridden.
2548 * If null, then none can be over-ridden.
2488 * 2549 *
2489 * @max_acl_mac_addrs: Maximum number of MAC addresses that the device 2550 * @max_acl_mac_addrs: Maximum number of MAC addresses that the device
2490 * supports for ACL. 2551 * supports for ACL.
@@ -2593,6 +2654,7 @@ struct wiphy {
2593 struct dentry *debugfsdir; 2654 struct dentry *debugfsdir;
2594 2655
2595 const struct ieee80211_ht_cap *ht_capa_mod_mask; 2656 const struct ieee80211_ht_cap *ht_capa_mod_mask;
2657 const struct ieee80211_vht_cap *vht_capa_mod_mask;
2596 2658
2597#ifdef CONFIG_NET_NS 2659#ifdef CONFIG_NET_NS
2598 /* the network namespace this phy lives in currently */ 2660 /* the network namespace this phy lives in currently */
@@ -4002,6 +4064,30 @@ u32 cfg80211_calculate_bitrate(struct rate_info *rate);
4002void cfg80211_unregister_wdev(struct wireless_dev *wdev); 4064void cfg80211_unregister_wdev(struct wireless_dev *wdev);
4003 4065
4004/** 4066/**
4067 * struct cfg80211_ft_event - FT Information Elements
4068 * @ies: FT IEs
4069 * @ies_len: length of the FT IE in bytes
4070 * @target_ap: target AP's MAC address
4071 * @ric_ies: RIC IE
4072 * @ric_ies_len: length of the RIC IE in bytes
4073 */
4074struct cfg80211_ft_event_params {
4075 const u8 *ies;
4076 size_t ies_len;
4077 const u8 *target_ap;
4078 const u8 *ric_ies;
4079 size_t ric_ies_len;
4080};
4081
4082/**
4083 * cfg80211_ft_event - notify userspace about FT IE and RIC IE
4084 * @netdev: network device
4085 * @ft_event: IE information
4086 */
4087void cfg80211_ft_event(struct net_device *netdev,
4088 struct cfg80211_ft_event_params *ft_event);
4089
4090/**
4005 * cfg80211_get_p2p_attr - find and copy a P2P attribute from IE buffer 4091 * cfg80211_get_p2p_attr - find and copy a P2P attribute from IE buffer
4006 * @ies: the input IE buffer 4092 * @ies: the input IE buffer
4007 * @len: the input length 4093 * @len: the input length
diff --git a/include/net/dn_fib.h b/include/net/dn_fib.h
index 1ee9d4bda30d..74004af31c48 100644
--- a/include/net/dn_fib.h
+++ b/include/net/dn_fib.h
@@ -1,24 +1,9 @@
1#ifndef _NET_DN_FIB_H 1#ifndef _NET_DN_FIB_H
2#define _NET_DN_FIB_H 2#define _NET_DN_FIB_H
3 3
4/* WARNING: The ordering of these elements must match ordering 4#include <linux/netlink.h>
5 * of RTA_* rtnetlink attribute numbers. 5
6 */ 6extern const struct nla_policy rtm_dn_policy[];
7struct dn_kern_rta {
8 void *rta_dst;
9 void *rta_src;
10 int *rta_iif;
11 int *rta_oif;
12 void *rta_gw;
13 u32 *rta_priority;
14 void *rta_prefsrc;
15 struct rtattr *rta_mx;
16 struct rtattr *rta_mp;
17 unsigned char *rta_protoinfo;
18 u32 *rta_flow;
19 struct rta_cacheinfo *rta_ci;
20 struct rta_session *rta_sess;
21};
22 7
23struct dn_fib_res { 8struct dn_fib_res {
24 struct fib_rule *r; 9 struct fib_rule *r;
@@ -93,10 +78,10 @@ struct dn_fib_table {
93 u32 n; 78 u32 n;
94 79
95 int (*insert)(struct dn_fib_table *t, struct rtmsg *r, 80 int (*insert)(struct dn_fib_table *t, struct rtmsg *r,
96 struct dn_kern_rta *rta, struct nlmsghdr *n, 81 struct nlattr *attrs[], struct nlmsghdr *n,
97 struct netlink_skb_parms *req); 82 struct netlink_skb_parms *req);
98 int (*delete)(struct dn_fib_table *t, struct rtmsg *r, 83 int (*delete)(struct dn_fib_table *t, struct rtmsg *r,
99 struct dn_kern_rta *rta, struct nlmsghdr *n, 84 struct nlattr *attrs[], struct nlmsghdr *n,
100 struct netlink_skb_parms *req); 85 struct netlink_skb_parms *req);
101 int (*lookup)(struct dn_fib_table *t, const struct flowidn *fld, 86 int (*lookup)(struct dn_fib_table *t, const struct flowidn *fld,
102 struct dn_fib_res *res); 87 struct dn_fib_res *res);
@@ -116,13 +101,12 @@ extern void dn_fib_cleanup(void);
116extern int dn_fib_ioctl(struct socket *sock, unsigned int cmd, 101extern int dn_fib_ioctl(struct socket *sock, unsigned int cmd,
117 unsigned long arg); 102 unsigned long arg);
118extern struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, 103extern struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r,
119 struct dn_kern_rta *rta, 104 struct nlattr *attrs[],
120 const struct nlmsghdr *nlh, int *errp); 105 const struct nlmsghdr *nlh, int *errp);
121extern int dn_fib_semantic_match(int type, struct dn_fib_info *fi, 106extern int dn_fib_semantic_match(int type, struct dn_fib_info *fi,
122 const struct flowidn *fld, 107 const struct flowidn *fld,
123 struct dn_fib_res *res); 108 struct dn_fib_res *res);
124extern void dn_fib_release_info(struct dn_fib_info *fi); 109extern void dn_fib_release_info(struct dn_fib_info *fi);
125extern __le16 dn_fib_get_attr16(struct rtattr *attr, int attrlen, int type);
126extern void dn_fib_flush(void); 110extern void dn_fib_flush(void);
127extern void dn_fib_select_multipath(const struct flowidn *fld, 111extern void dn_fib_select_multipath(const struct flowidn *fld,
128 struct dn_fib_res *res); 112 struct dn_fib_res *res);
diff --git a/include/net/firewire.h b/include/net/firewire.h
new file mode 100644
index 000000000000..31bcbfe7a220
--- /dev/null
+++ b/include/net/firewire.h
@@ -0,0 +1,25 @@
1#ifndef _NET_FIREWIRE_H
2#define _NET_FIREWIRE_H
3
4/* Pseudo L2 address */
5#define FWNET_ALEN 16
6union fwnet_hwaddr {
7 u8 u[FWNET_ALEN];
8 /* "Hardware address" defined in RFC2734/RF3146 */
9 struct {
10 __be64 uniq_id; /* EUI-64 */
11 u8 max_rec; /* max packet size */
12 u8 sspd; /* max speed */
13 __be16 fifo_hi; /* hi 16bits of FIFO addr */
14 __be32 fifo_lo; /* lo 32bits of FIFO addr */
15 } __packed uc;
16};
17
18/* Pseudo L2 Header */
19#define FWNET_HLEN 18
20struct fwnet_header {
21 u8 h_dest[FWNET_ALEN]; /* destination address */
22 __be16 h_proto; /* packet type ID field */
23} __packed;
24
25#endif
diff --git a/include/net/gre.h b/include/net/gre.h
index 82665474bcb7..9f03a390c826 100644
--- a/include/net/gre.h
+++ b/include/net/gre.h
@@ -2,6 +2,7 @@
2#define __LINUX_GRE_H 2#define __LINUX_GRE_H
3 3
4#include <linux/skbuff.h> 4#include <linux/skbuff.h>
5#include <net/ip_tunnels.h>
5 6
6#define GREPROTO_CISCO 0 7#define GREPROTO_CISCO 0
7#define GREPROTO_PPTP 1 8#define GREPROTO_PPTP 1
@@ -12,7 +13,57 @@ struct gre_protocol {
12 void (*err_handler)(struct sk_buff *skb, u32 info); 13 void (*err_handler)(struct sk_buff *skb, u32 info);
13}; 14};
14 15
16struct gre_base_hdr {
17 __be16 flags;
18 __be16 protocol;
19};
20#define GRE_HEADER_SECTION 4
21
15int gre_add_protocol(const struct gre_protocol *proto, u8 version); 22int gre_add_protocol(const struct gre_protocol *proto, u8 version);
16int gre_del_protocol(const struct gre_protocol *proto, u8 version); 23int gre_del_protocol(const struct gre_protocol *proto, u8 version);
17 24
25static inline __be16 gre_flags_to_tnl_flags(__be16 flags)
26{
27 __be16 tflags = 0;
28
29 if (flags & GRE_CSUM)
30 tflags |= TUNNEL_CSUM;
31 if (flags & GRE_ROUTING)
32 tflags |= TUNNEL_ROUTING;
33 if (flags & GRE_KEY)
34 tflags |= TUNNEL_KEY;
35 if (flags & GRE_SEQ)
36 tflags |= TUNNEL_SEQ;
37 if (flags & GRE_STRICT)
38 tflags |= TUNNEL_STRICT;
39 if (flags & GRE_REC)
40 tflags |= TUNNEL_REC;
41 if (flags & GRE_VERSION)
42 tflags |= TUNNEL_VERSION;
43
44 return tflags;
45}
46
47static inline __be16 tnl_flags_to_gre_flags(__be16 tflags)
48{
49 __be16 flags = 0;
50
51 if (tflags & TUNNEL_CSUM)
52 flags |= GRE_CSUM;
53 if (tflags & TUNNEL_ROUTING)
54 flags |= GRE_ROUTING;
55 if (tflags & TUNNEL_KEY)
56 flags |= GRE_KEY;
57 if (tflags & TUNNEL_SEQ)
58 flags |= GRE_SEQ;
59 if (tflags & TUNNEL_STRICT)
60 flags |= GRE_STRICT;
61 if (tflags & TUNNEL_REC)
62 flags |= GRE_REC;
63 if (tflags & TUNNEL_VERSION)
64 flags |= GRE_VERSION;
65
66 return flags;
67}
68
18#endif 69#endif
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 183292722f6e..de2c78529afa 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -133,6 +133,8 @@ struct inet_connection_sock {
133#define ICSK_TIME_RETRANS 1 /* Retransmit timer */ 133#define ICSK_TIME_RETRANS 1 /* Retransmit timer */
134#define ICSK_TIME_DACK 2 /* Delayed ack timer */ 134#define ICSK_TIME_DACK 2 /* Delayed ack timer */
135#define ICSK_TIME_PROBE0 3 /* Zero window probe timer */ 135#define ICSK_TIME_PROBE0 3 /* Zero window probe timer */
136#define ICSK_TIME_EARLY_RETRANS 4 /* Early retransmit timer */
137#define ICSK_TIME_LOSS_PROBE 5 /* Tail loss probe timer */
136 138
137static inline struct inet_connection_sock *inet_csk(const struct sock *sk) 139static inline struct inet_connection_sock *inet_csk(const struct sock *sk)
138{ 140{
@@ -222,7 +224,8 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
222 when = max_when; 224 when = max_when;
223 } 225 }
224 226
225 if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) { 227 if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 ||
228 what == ICSK_TIME_EARLY_RETRANS || what == ICSK_TIME_LOSS_PROBE) {
226 icsk->icsk_pending = what; 229 icsk->icsk_pending = what;
227 icsk->icsk_timeout = jiffies + when; 230 icsk->icsk_timeout = jiffies + when;
228 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); 231 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 0a1dcc2fa2f5..6f41b45e819e 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -50,10 +50,16 @@ struct inet_frag_queue {
50 */ 50 */
51#define INETFRAGS_MAXDEPTH 128 51#define INETFRAGS_MAXDEPTH 128
52 52
53struct inet_frag_bucket {
54 struct hlist_head chain;
55 spinlock_t chain_lock;
56};
57
53struct inet_frags { 58struct inet_frags {
54 struct hlist_head hash[INETFRAGS_HASHSZ]; 59 struct inet_frag_bucket hash[INETFRAGS_HASHSZ];
55 /* This rwlock is a global lock (seperate per IPv4, IPv6 and 60 /* This rwlock is a global lock (seperate per IPv4, IPv6 and
56 * netfilter). Important to keep this on a seperate cacheline. 61 * netfilter). Important to keep this on a seperate cacheline.
62 * Its primarily a rebuild protection rwlock.
57 */ 63 */
58 rwlock_t lock ____cacheline_aligned_in_smp; 64 rwlock_t lock ____cacheline_aligned_in_smp;
59 int secret_interval; 65 int secret_interval;
@@ -143,6 +149,7 @@ static inline void inet_frag_lru_del(struct inet_frag_queue *q)
143{ 149{
144 spin_lock(&q->net->lru_lock); 150 spin_lock(&q->net->lru_lock);
145 list_del(&q->lru_list); 151 list_del(&q->lru_list);
152 q->net->nqueues--;
146 spin_unlock(&q->net->lru_lock); 153 spin_unlock(&q->net->lru_lock);
147} 154}
148 155
@@ -151,6 +158,19 @@ static inline void inet_frag_lru_add(struct netns_frags *nf,
151{ 158{
152 spin_lock(&nf->lru_lock); 159 spin_lock(&nf->lru_lock);
153 list_add_tail(&q->lru_list, &nf->lru_list); 160 list_add_tail(&q->lru_list, &nf->lru_list);
161 q->net->nqueues++;
154 spin_unlock(&nf->lru_lock); 162 spin_unlock(&nf->lru_lock);
155} 163}
164
165/* RFC 3168 support :
166 * We want to check ECN values of all fragments, do detect invalid combinations.
167 * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
168 */
169#define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */
170#define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */
171#define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */
172#define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */
173
174extern const u8 ip_frag_ecn_table[16];
175
156#endif 176#endif
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index e03047f7090b..4da5de10d1d4 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/ipv6.h> 4#include <linux/ipv6.h>
5#include <linux/netdevice.h> 5#include <linux/netdevice.h>
6#include <linux/if_tunnel.h>
6#include <linux/ip6_tunnel.h> 7#include <linux/ip6_tunnel.h>
7 8
8#define IP6TUNNEL_ERR_TIMEO (30*HZ) 9#define IP6TUNNEL_ERR_TIMEO (30*HZ)
@@ -68,4 +69,24 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw);
68__u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr, 69__u32 ip6_tnl_get_cap(struct ip6_tnl *t, const struct in6_addr *laddr,
69 const struct in6_addr *raddr); 70 const struct in6_addr *raddr);
70 71
72static inline void ip6tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
73{
74 struct net_device_stats *stats = &dev->stats;
75 int pkt_len, err;
76
77 nf_reset(skb);
78 pkt_len = skb->len;
79 err = ip6_local_out(skb);
80
81 if (net_xmit_eval(err) == 0) {
82 struct pcpu_tstats *tstats = this_cpu_ptr(dev->tstats);
83 u64_stats_update_begin(&tstats->syncp);
84 tstats->tx_bytes += pkt_len;
85 tstats->tx_packets++;
86 u64_stats_update_end(&tstats->syncp);
87 } else {
88 stats->tx_errors++;
89 stats->tx_aborted_errors++;
90 }
91}
71#endif 92#endif
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
new file mode 100644
index 000000000000..4b6f0b28f41f
--- /dev/null
+++ b/include/net/ip_tunnels.h
@@ -0,0 +1,177 @@
1#ifndef __NET_IP_TUNNELS_H
2#define __NET_IP_TUNNELS_H 1
3
4#include <linux/if_tunnel.h>
5#include <linux/netdevice.h>
6#include <linux/skbuff.h>
7#include <linux/types.h>
8#include <linux/u64_stats_sync.h>
9#include <net/dsfield.h>
10#include <net/gro_cells.h>
11#include <net/inet_ecn.h>
12#include <net/ip.h>
13#include <net/rtnetlink.h>
14
15#if IS_ENABLED(CONFIG_IPV6)
16#include <net/ipv6.h>
17#include <net/ip6_fib.h>
18#include <net/ip6_route.h>
19#endif
20
21/* Keep error state on tunnel for 30 sec */
22#define IPTUNNEL_ERR_TIMEO (30*HZ)
23
24/* 6rd prefix/relay information */
25#ifdef CONFIG_IPV6_SIT_6RD
26struct ip_tunnel_6rd_parm {
27 struct in6_addr prefix;
28 __be32 relay_prefix;
29 u16 prefixlen;
30 u16 relay_prefixlen;
31};
32#endif
33
34struct ip_tunnel_prl_entry {
35 struct ip_tunnel_prl_entry __rcu *next;
36 __be32 addr;
37 u16 flags;
38 struct rcu_head rcu_head;
39};
40
41struct ip_tunnel {
42 struct ip_tunnel __rcu *next;
43 struct hlist_node hash_node;
44 struct net_device *dev;
45
46 int err_count; /* Number of arrived ICMP errors */
47 unsigned long err_time; /* Time when the last ICMP error
48 * arrived */
49
50 /* These four fields used only by GRE */
51 __u32 i_seqno; /* The last seen seqno */
52 __u32 o_seqno; /* The last output seqno */
53 int hlen; /* Precalculated header length */
54 int mlink;
55
56 struct ip_tunnel_parm parms;
57
58 /* for SIT */
59#ifdef CONFIG_IPV6_SIT_6RD
60 struct ip_tunnel_6rd_parm ip6rd;
61#endif
62 struct ip_tunnel_prl_entry __rcu *prl; /* potential router list */
63 unsigned int prl_count; /* # of entries in PRL */
64 int ip_tnl_net_id;
65 struct gro_cells gro_cells;
66};
67
68#define TUNNEL_CSUM __cpu_to_be16(0x01)
69#define TUNNEL_ROUTING __cpu_to_be16(0x02)
70#define TUNNEL_KEY __cpu_to_be16(0x04)
71#define TUNNEL_SEQ __cpu_to_be16(0x08)
72#define TUNNEL_STRICT __cpu_to_be16(0x10)
73#define TUNNEL_REC __cpu_to_be16(0x20)
74#define TUNNEL_VERSION __cpu_to_be16(0x40)
75#define TUNNEL_NO_KEY __cpu_to_be16(0x80)
76
77struct tnl_ptk_info {
78 __be16 flags;
79 __be16 proto;
80 __be32 key;
81 __be32 seq;
82};
83
84#define PACKET_RCVD 0
85#define PACKET_REJECT 1
86
87#define IP_TNL_HASH_BITS 10
88#define IP_TNL_HASH_SIZE (1 << IP_TNL_HASH_BITS)
89
90struct ip_tunnel_net {
91 struct hlist_head *tunnels;
92 struct net_device *fb_tunnel_dev;
93};
94
95int ip_tunnel_init(struct net_device *dev);
96void ip_tunnel_uninit(struct net_device *dev);
97void ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
98int __net_init ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
99 struct rtnl_link_ops *ops, char *devname);
100
101void __net_exit ip_tunnel_delete_net(struct ip_tunnel_net *itn);
102
103void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
104 const struct iphdr *tnl_params);
105int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
106int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
107
108struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
109 struct rtnl_link_stats64 *tot);
110struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
111 int link, __be16 flags,
112 __be32 remote, __be32 local,
113 __be32 key);
114
115int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
116 const struct tnl_ptk_info *tpi, bool log_ecn_error);
117int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
118 struct ip_tunnel_parm *p);
119int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
120 struct ip_tunnel_parm *p);
121void ip_tunnel_setup(struct net_device *dev, int net_id);
122
123/* Extract dsfield from inner protocol */
124static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
125 const struct sk_buff *skb)
126{
127 if (skb->protocol == htons(ETH_P_IP))
128 return iph->tos;
129 else if (skb->protocol == htons(ETH_P_IPV6))
130 return ipv6_get_dsfield((const struct ipv6hdr *)iph);
131 else
132 return 0;
133}
134
135/* Propogate ECN bits out */
136static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
137 const struct sk_buff *skb)
138{
139 u8 inner = ip_tunnel_get_dsfield(iph, skb);
140
141 return INET_ECN_encapsulate(tos, inner);
142}
143
144static inline void tunnel_ip_select_ident(struct sk_buff *skb,
145 const struct iphdr *old_iph,
146 struct dst_entry *dst)
147{
148 struct iphdr *iph = ip_hdr(skb);
149
150 /* Use inner packet iph-id if possible. */
151 if (skb->protocol == htons(ETH_P_IP) && old_iph->id)
152 iph->id = old_iph->id;
153 else
154 __ip_select_ident(iph, dst,
155 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
156}
157
158static inline void iptunnel_xmit(struct sk_buff *skb, struct net_device *dev)
159{
160 int err;
161 int pkt_len = skb->len - skb_transport_offset(skb);
162 struct pcpu_tstats *tstats = this_cpu_ptr(dev->tstats);
163
164 nf_reset(skb);
165
166 err = ip_local_out(skb);
167 if (likely(net_xmit_eval(err) == 0)) {
168 u64_stats_update_begin(&tstats->syncp);
169 tstats->tx_bytes += pkt_len;
170 tstats->tx_packets++;
171 u64_stats_update_end(&tstats->syncp);
172 } else {
173 dev->stats.tx_errors++;
174 dev->stats.tx_aborted_errors++;
175 }
176}
177#endif /* __NET_IP_TUNNELS_H */
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index fce8e6b66d55..f9f5b057b480 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -233,6 +233,21 @@ static inline void ip_vs_addr_copy(int af, union nf_inet_addr *dst,
233 dst->ip = src->ip; 233 dst->ip = src->ip;
234} 234}
235 235
236static inline void ip_vs_addr_set(int af, union nf_inet_addr *dst,
237 const union nf_inet_addr *src)
238{
239#ifdef CONFIG_IP_VS_IPV6
240 if (af == AF_INET6) {
241 dst->in6 = src->in6;
242 return;
243 }
244#endif
245 dst->ip = src->ip;
246 dst->all[1] = 0;
247 dst->all[2] = 0;
248 dst->all[3] = 0;
249}
250
236static inline int ip_vs_addr_equal(int af, const union nf_inet_addr *a, 251static inline int ip_vs_addr_equal(int af, const union nf_inet_addr *a,
237 const union nf_inet_addr *b) 252 const union nf_inet_addr *b)
238{ 253{
@@ -344,8 +359,6 @@ static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len,
344#define LeaveFunction(level) do {} while (0) 359#define LeaveFunction(level) do {} while (0)
345#endif 360#endif
346 361
347#define IP_VS_WAIT_WHILE(expr) while (expr) { cpu_relax(); }
348
349 362
350/* 363/*
351 * The port number of FTP service (in network order). 364 * The port number of FTP service (in network order).
@@ -459,7 +472,7 @@ struct ip_vs_estimator {
459struct ip_vs_stats { 472struct ip_vs_stats {
460 struct ip_vs_stats_user ustats; /* statistics */ 473 struct ip_vs_stats_user ustats; /* statistics */
461 struct ip_vs_estimator est; /* estimator */ 474 struct ip_vs_estimator est; /* estimator */
462 struct ip_vs_cpu_stats *cpustats; /* per cpu counters */ 475 struct ip_vs_cpu_stats __percpu *cpustats; /* per cpu counters */
463 spinlock_t lock; /* spin lock */ 476 spinlock_t lock; /* spin lock */
464 struct ip_vs_stats_user ustats0; /* reset values */ 477 struct ip_vs_stats_user ustats0; /* reset values */
465}; 478};
@@ -566,20 +579,19 @@ struct ip_vs_conn_param {
566 */ 579 */
567struct ip_vs_conn { 580struct ip_vs_conn {
568 struct hlist_node c_list; /* hashed list heads */ 581 struct hlist_node c_list; /* hashed list heads */
569#ifdef CONFIG_NET_NS
570 struct net *net; /* Name space */
571#endif
572 /* Protocol, addresses and port numbers */ 582 /* Protocol, addresses and port numbers */
573 u16 af; /* address family */
574 __be16 cport; 583 __be16 cport;
575 __be16 vport;
576 __be16 dport; 584 __be16 dport;
577 __u32 fwmark; /* Fire wall mark from skb */ 585 __be16 vport;
586 u16 af; /* address family */
578 union nf_inet_addr caddr; /* client address */ 587 union nf_inet_addr caddr; /* client address */
579 union nf_inet_addr vaddr; /* virtual address */ 588 union nf_inet_addr vaddr; /* virtual address */
580 union nf_inet_addr daddr; /* destination address */ 589 union nf_inet_addr daddr; /* destination address */
581 volatile __u32 flags; /* status flags */ 590 volatile __u32 flags; /* status flags */
582 __u16 protocol; /* Which protocol (TCP/UDP) */ 591 __u16 protocol; /* Which protocol (TCP/UDP) */
592#ifdef CONFIG_NET_NS
593 struct net *net; /* Name space */
594#endif
583 595
584 /* counter and timer */ 596 /* counter and timer */
585 atomic_t refcnt; /* reference count */ 597 atomic_t refcnt; /* reference count */
@@ -593,6 +605,7 @@ struct ip_vs_conn {
593 * state transition triggerd 605 * state transition triggerd
594 * synchronization 606 * synchronization
595 */ 607 */
608 __u32 fwmark; /* Fire wall mark from skb */
596 unsigned long sync_endtime; /* jiffies + sent_retries */ 609 unsigned long sync_endtime; /* jiffies + sent_retries */
597 610
598 /* Control members */ 611 /* Control members */
@@ -620,6 +633,8 @@ struct ip_vs_conn {
620 const struct ip_vs_pe *pe; 633 const struct ip_vs_pe *pe;
621 char *pe_data; 634 char *pe_data;
622 __u8 pe_data_len; 635 __u8 pe_data_len;
636
637 struct rcu_head rcu_head;
623}; 638};
624 639
625/* 640/*
@@ -695,10 +710,9 @@ struct ip_vs_dest_user_kern {
695 * and the forwarding entries 710 * and the forwarding entries
696 */ 711 */
697struct ip_vs_service { 712struct ip_vs_service {
698 struct list_head s_list; /* for normal service table */ 713 struct hlist_node s_list; /* for normal service table */
699 struct list_head f_list; /* for fwmark-based service table */ 714 struct hlist_node f_list; /* for fwmark-based service table */
700 atomic_t refcnt; /* reference counter */ 715 atomic_t refcnt; /* reference counter */
701 atomic_t usecnt; /* use counter */
702 716
703 u16 af; /* address family */ 717 u16 af; /* address family */
704 __u16 protocol; /* which protocol (TCP/UDP) */ 718 __u16 protocol; /* which protocol (TCP/UDP) */
@@ -713,25 +727,35 @@ struct ip_vs_service {
713 struct list_head destinations; /* real server d-linked list */ 727 struct list_head destinations; /* real server d-linked list */
714 __u32 num_dests; /* number of servers */ 728 __u32 num_dests; /* number of servers */
715 struct ip_vs_stats stats; /* statistics for the service */ 729 struct ip_vs_stats stats; /* statistics for the service */
716 struct ip_vs_app *inc; /* bind conns to this app inc */
717 730
718 /* for scheduling */ 731 /* for scheduling */
719 struct ip_vs_scheduler *scheduler; /* bound scheduler object */ 732 struct ip_vs_scheduler __rcu *scheduler; /* bound scheduler object */
720 rwlock_t sched_lock; /* lock sched_data */ 733 spinlock_t sched_lock; /* lock sched_data */
721 void *sched_data; /* scheduler application data */ 734 void *sched_data; /* scheduler application data */
722 735
723 /* alternate persistence engine */ 736 /* alternate persistence engine */
724 struct ip_vs_pe *pe; 737 struct ip_vs_pe __rcu *pe;
738
739 struct rcu_head rcu_head;
725}; 740};
726 741
742/* Information for cached dst */
743struct ip_vs_dest_dst {
744 struct dst_entry *dst_cache; /* destination cache entry */
745 u32 dst_cookie;
746 union nf_inet_addr dst_saddr;
747 struct rcu_head rcu_head;
748};
727 749
750/* In grace period after removing */
751#define IP_VS_DEST_STATE_REMOVING 0x01
728/* 752/*
729 * The real server destination forwarding entry 753 * The real server destination forwarding entry
730 * with ip address, port number, and so on. 754 * with ip address, port number, and so on.
731 */ 755 */
732struct ip_vs_dest { 756struct ip_vs_dest {
733 struct list_head n_list; /* for the dests in the service */ 757 struct list_head n_list; /* for the dests in the service */
734 struct list_head d_list; /* for table with all the dests */ 758 struct hlist_node d_list; /* for table with all the dests */
735 759
736 u16 af; /* address family */ 760 u16 af; /* address family */
737 __be16 port; /* port number of the server */ 761 __be16 port; /* port number of the server */
@@ -742,6 +766,7 @@ struct ip_vs_dest {
742 766
743 atomic_t refcnt; /* reference counter */ 767 atomic_t refcnt; /* reference counter */
744 struct ip_vs_stats stats; /* statistics */ 768 struct ip_vs_stats stats; /* statistics */
769 unsigned long state; /* state flags */
745 770
746 /* connection counters and thresholds */ 771 /* connection counters and thresholds */
747 atomic_t activeconns; /* active connections */ 772 atomic_t activeconns; /* active connections */
@@ -752,10 +777,7 @@ struct ip_vs_dest {
752 777
753 /* for destination cache */ 778 /* for destination cache */
754 spinlock_t dst_lock; /* lock of dst_cache */ 779 spinlock_t dst_lock; /* lock of dst_cache */
755 struct dst_entry *dst_cache; /* destination cache entry */ 780 struct ip_vs_dest_dst __rcu *dest_dst; /* cached dst info */
756 u32 dst_rtos; /* RT_TOS(tos) for dst */
757 u32 dst_cookie;
758 union nf_inet_addr dst_saddr;
759 781
760 /* for virtual service */ 782 /* for virtual service */
761 struct ip_vs_service *svc; /* service it belongs to */ 783 struct ip_vs_service *svc; /* service it belongs to */
@@ -763,6 +785,10 @@ struct ip_vs_dest {
763 __be16 vport; /* virtual port number */ 785 __be16 vport; /* virtual port number */
764 union nf_inet_addr vaddr; /* virtual IP address */ 786 union nf_inet_addr vaddr; /* virtual IP address */
765 __u32 vfwmark; /* firewall mark of service */ 787 __u32 vfwmark; /* firewall mark of service */
788
789 struct list_head t_list; /* in dest_trash */
790 struct rcu_head rcu_head;
791 unsigned int in_rs_table:1; /* we are in rs_table */
766}; 792};
767 793
768 794
@@ -778,9 +804,13 @@ struct ip_vs_scheduler {
778 /* scheduler initializing service */ 804 /* scheduler initializing service */
779 int (*init_service)(struct ip_vs_service *svc); 805 int (*init_service)(struct ip_vs_service *svc);
780 /* scheduling service finish */ 806 /* scheduling service finish */
781 int (*done_service)(struct ip_vs_service *svc); 807 void (*done_service)(struct ip_vs_service *svc);
782 /* scheduler updating service */ 808 /* dest is linked */
783 int (*update_service)(struct ip_vs_service *svc); 809 int (*add_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest);
810 /* dest is unlinked */
811 int (*del_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest);
812 /* dest is updated */
813 int (*upd_dest)(struct ip_vs_service *svc, struct ip_vs_dest *dest);
784 814
785 /* selecting a server from the given service */ 815 /* selecting a server from the given service */
786 struct ip_vs_dest* (*schedule)(struct ip_vs_service *svc, 816 struct ip_vs_dest* (*schedule)(struct ip_vs_service *svc,
@@ -819,6 +849,7 @@ struct ip_vs_app {
819 struct ip_vs_app *app; /* its real application */ 849 struct ip_vs_app *app; /* its real application */
820 __be16 port; /* port number in net order */ 850 __be16 port; /* port number in net order */
821 atomic_t usecnt; /* usage counter */ 851 atomic_t usecnt; /* usage counter */
852 struct rcu_head rcu_head;
822 853
823 /* 854 /*
824 * output hook: Process packet in inout direction, diff set for TCP. 855 * output hook: Process packet in inout direction, diff set for TCP.
@@ -881,6 +912,9 @@ struct ipvs_master_sync_state {
881 struct netns_ipvs *ipvs; 912 struct netns_ipvs *ipvs;
882}; 913};
883 914
915/* How much time to keep dests in trash */
916#define IP_VS_DEST_TRASH_PERIOD (120 * HZ)
917
884/* IPVS in network namespace */ 918/* IPVS in network namespace */
885struct netns_ipvs { 919struct netns_ipvs {
886 int gen; /* Generation */ 920 int gen; /* Generation */
@@ -892,7 +926,7 @@ struct netns_ipvs {
892 #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS) 926 #define IP_VS_RTAB_SIZE (1 << IP_VS_RTAB_BITS)
893 #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1) 927 #define IP_VS_RTAB_MASK (IP_VS_RTAB_SIZE - 1)
894 928
895 struct list_head rs_table[IP_VS_RTAB_SIZE]; 929 struct hlist_head rs_table[IP_VS_RTAB_SIZE];
896 /* ip_vs_app */ 930 /* ip_vs_app */
897 struct list_head app_list; 931 struct list_head app_list;
898 /* ip_vs_proto */ 932 /* ip_vs_proto */
@@ -904,7 +938,6 @@ struct netns_ipvs {
904 #define TCP_APP_TAB_SIZE (1 << TCP_APP_TAB_BITS) 938 #define TCP_APP_TAB_SIZE (1 << TCP_APP_TAB_BITS)
905 #define TCP_APP_TAB_MASK (TCP_APP_TAB_SIZE - 1) 939 #define TCP_APP_TAB_MASK (TCP_APP_TAB_SIZE - 1)
906 struct list_head tcp_apps[TCP_APP_TAB_SIZE]; 940 struct list_head tcp_apps[TCP_APP_TAB_SIZE];
907 spinlock_t tcp_app_lock;
908#endif 941#endif
909 /* ip_vs_proto_udp */ 942 /* ip_vs_proto_udp */
910#ifdef CONFIG_IP_VS_PROTO_UDP 943#ifdef CONFIG_IP_VS_PROTO_UDP
@@ -912,7 +945,6 @@ struct netns_ipvs {
912 #define UDP_APP_TAB_SIZE (1 << UDP_APP_TAB_BITS) 945 #define UDP_APP_TAB_SIZE (1 << UDP_APP_TAB_BITS)
913 #define UDP_APP_TAB_MASK (UDP_APP_TAB_SIZE - 1) 946 #define UDP_APP_TAB_MASK (UDP_APP_TAB_SIZE - 1)
914 struct list_head udp_apps[UDP_APP_TAB_SIZE]; 947 struct list_head udp_apps[UDP_APP_TAB_SIZE];
915 spinlock_t udp_app_lock;
916#endif 948#endif
917 /* ip_vs_proto_sctp */ 949 /* ip_vs_proto_sctp */
918#ifdef CONFIG_IP_VS_PROTO_SCTP 950#ifdef CONFIG_IP_VS_PROTO_SCTP
@@ -921,7 +953,6 @@ struct netns_ipvs {
921 #define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1) 953 #define SCTP_APP_TAB_MASK (SCTP_APP_TAB_SIZE - 1)
922 /* Hash table for SCTP application incarnations */ 954 /* Hash table for SCTP application incarnations */
923 struct list_head sctp_apps[SCTP_APP_TAB_SIZE]; 955 struct list_head sctp_apps[SCTP_APP_TAB_SIZE];
924 spinlock_t sctp_app_lock;
925#endif 956#endif
926 /* ip_vs_conn */ 957 /* ip_vs_conn */
927 atomic_t conn_count; /* connection counter */ 958 atomic_t conn_count; /* connection counter */
@@ -931,9 +962,10 @@ struct netns_ipvs {
931 962
932 int num_services; /* no of virtual services */ 963 int num_services; /* no of virtual services */
933 964
934 rwlock_t rs_lock; /* real services table */
935 /* Trash for destinations */ 965 /* Trash for destinations */
936 struct list_head dest_trash; 966 struct list_head dest_trash;
967 spinlock_t dest_trash_lock;
968 struct timer_list dest_trash_timer; /* expiration timer */
937 /* Service counters */ 969 /* Service counters */
938 atomic_t ftpsvc_counter; 970 atomic_t ftpsvc_counter;
939 atomic_t nullsvc_counter; 971 atomic_t nullsvc_counter;
@@ -1181,9 +1213,19 @@ struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb,
1181 const struct ip_vs_iphdr *iph, 1213 const struct ip_vs_iphdr *iph,
1182 int inverse); 1214 int inverse);
1183 1215
1216/* Get reference to gain full access to conn.
1217 * By default, RCU read-side critical sections have access only to
1218 * conn fields and its PE data, see ip_vs_conn_rcu_free() for reference.
1219 */
1220static inline bool __ip_vs_conn_get(struct ip_vs_conn *cp)
1221{
1222 return atomic_inc_not_zero(&cp->refcnt);
1223}
1224
1184/* put back the conn without restarting its timer */ 1225/* put back the conn without restarting its timer */
1185static inline void __ip_vs_conn_put(struct ip_vs_conn *cp) 1226static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
1186{ 1227{
1228 smp_mb__before_atomic_dec();
1187 atomic_dec(&cp->refcnt); 1229 atomic_dec(&cp->refcnt);
1188} 1230}
1189extern void ip_vs_conn_put(struct ip_vs_conn *cp); 1231extern void ip_vs_conn_put(struct ip_vs_conn *cp);
@@ -1298,8 +1340,6 @@ extern void ip_vs_app_inc_put(struct ip_vs_app *inc);
1298extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb); 1340extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff *skb);
1299extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb); 1341extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb);
1300 1342
1301void ip_vs_bind_pe(struct ip_vs_service *svc, struct ip_vs_pe *pe);
1302void ip_vs_unbind_pe(struct ip_vs_service *svc);
1303int register_ip_vs_pe(struct ip_vs_pe *pe); 1343int register_ip_vs_pe(struct ip_vs_pe *pe);
1304int unregister_ip_vs_pe(struct ip_vs_pe *pe); 1344int unregister_ip_vs_pe(struct ip_vs_pe *pe);
1305struct ip_vs_pe *ip_vs_pe_getbyname(const char *name); 1345struct ip_vs_pe *ip_vs_pe_getbyname(const char *name);
@@ -1346,7 +1386,8 @@ extern int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
1346extern int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler); 1386extern int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler);
1347extern int ip_vs_bind_scheduler(struct ip_vs_service *svc, 1387extern int ip_vs_bind_scheduler(struct ip_vs_service *svc,
1348 struct ip_vs_scheduler *scheduler); 1388 struct ip_vs_scheduler *scheduler);
1349extern int ip_vs_unbind_scheduler(struct ip_vs_service *svc); 1389extern void ip_vs_unbind_scheduler(struct ip_vs_service *svc,
1390 struct ip_vs_scheduler *sched);
1350extern struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name); 1391extern struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name);
1351extern void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler); 1392extern void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler);
1352extern struct ip_vs_conn * 1393extern struct ip_vs_conn *
@@ -1366,17 +1407,12 @@ extern struct ip_vs_stats ip_vs_stats;
1366extern int sysctl_ip_vs_sync_ver; 1407extern int sysctl_ip_vs_sync_ver;
1367 1408
1368extern struct ip_vs_service * 1409extern struct ip_vs_service *
1369ip_vs_service_get(struct net *net, int af, __u32 fwmark, __u16 protocol, 1410ip_vs_service_find(struct net *net, int af, __u32 fwmark, __u16 protocol,
1370 const union nf_inet_addr *vaddr, __be16 vport); 1411 const union nf_inet_addr *vaddr, __be16 vport);
1371 1412
1372static inline void ip_vs_service_put(struct ip_vs_service *svc) 1413extern bool
1373{ 1414ip_vs_has_real_service(struct net *net, int af, __u16 protocol,
1374 atomic_dec(&svc->usecnt); 1415 const union nf_inet_addr *daddr, __be16 dport);
1375}
1376
1377extern struct ip_vs_dest *
1378ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol,
1379 const union nf_inet_addr *daddr, __be16 dport);
1380 1416
1381extern int ip_vs_use_count_inc(void); 1417extern int ip_vs_use_count_inc(void);
1382extern void ip_vs_use_count_dec(void); 1418extern void ip_vs_use_count_dec(void);
@@ -1388,8 +1424,18 @@ extern struct ip_vs_dest *
1388ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr, 1424ip_vs_find_dest(struct net *net, int af, const union nf_inet_addr *daddr,
1389 __be16 dport, const union nf_inet_addr *vaddr, __be16 vport, 1425 __be16 dport, const union nf_inet_addr *vaddr, __be16 vport,
1390 __u16 protocol, __u32 fwmark, __u32 flags); 1426 __u16 protocol, __u32 fwmark, __u32 flags);
1391extern struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp); 1427extern void ip_vs_try_bind_dest(struct ip_vs_conn *cp);
1392 1428
1429static inline void ip_vs_dest_hold(struct ip_vs_dest *dest)
1430{
1431 atomic_inc(&dest->refcnt);
1432}
1433
1434static inline void ip_vs_dest_put(struct ip_vs_dest *dest)
1435{
1436 smp_mb__before_atomic_dec();
1437 atomic_dec(&dest->refcnt);
1438}
1393 1439
1394/* 1440/*
1395 * IPVS sync daemon data and function prototypes 1441 * IPVS sync daemon data and function prototypes
@@ -1428,7 +1474,7 @@ extern int ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1428extern int ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1474extern int ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1429 struct ip_vs_protocol *pp, int offset, 1475 struct ip_vs_protocol *pp, int offset,
1430 unsigned int hooknum, struct ip_vs_iphdr *iph); 1476 unsigned int hooknum, struct ip_vs_iphdr *iph);
1431extern void ip_vs_dst_reset(struct ip_vs_dest *dest); 1477extern void ip_vs_dest_dst_rcu_free(struct rcu_head *head);
1432 1478
1433#ifdef CONFIG_IP_VS_IPV6 1479#ifdef CONFIG_IP_VS_IPV6
1434extern int ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1480extern int ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
diff --git a/include/net/ipip.h b/include/net/ipip.h
deleted file mode 100644
index 982141c15200..000000000000
--- a/include/net/ipip.h
+++ /dev/null
@@ -1,87 +0,0 @@
1#ifndef __NET_IPIP_H
2#define __NET_IPIP_H 1
3
4#include <linux/if_tunnel.h>
5#include <net/gro_cells.h>
6#include <net/ip.h>
7
8/* Keep error state on tunnel for 30 sec */
9#define IPTUNNEL_ERR_TIMEO (30*HZ)
10
11/* 6rd prefix/relay information */
12struct ip_tunnel_6rd_parm {
13 struct in6_addr prefix;
14 __be32 relay_prefix;
15 u16 prefixlen;
16 u16 relay_prefixlen;
17};
18
19struct ip_tunnel {
20 struct ip_tunnel __rcu *next;
21 struct net_device *dev;
22
23 int err_count; /* Number of arrived ICMP errors */
24 unsigned long err_time; /* Time when the last ICMP error arrived */
25
26 /* These four fields used only by GRE */
27 __u32 i_seqno; /* The last seen seqno */
28 __u32 o_seqno; /* The last output seqno */
29 int hlen; /* Precalculated GRE header length */
30 int mlink;
31
32 struct ip_tunnel_parm parms;
33
34 /* for SIT */
35#ifdef CONFIG_IPV6_SIT_6RD
36 struct ip_tunnel_6rd_parm ip6rd;
37#endif
38 struct ip_tunnel_prl_entry __rcu *prl; /* potential router list */
39 unsigned int prl_count; /* # of entries in PRL */
40
41 struct gro_cells gro_cells;
42};
43
44struct ip_tunnel_prl_entry {
45 struct ip_tunnel_prl_entry __rcu *next;
46 __be32 addr;
47 u16 flags;
48 struct rcu_head rcu_head;
49};
50
51static inline void iptunnel_xmit(struct sk_buff *skb, struct net_device *dev)
52{
53 int err;
54 struct iphdr *iph = ip_hdr(skb);
55 int pkt_len = skb->len - skb_transport_offset(skb);
56 struct pcpu_tstats *tstats = this_cpu_ptr(dev->tstats);
57
58 nf_reset(skb);
59 skb->ip_summed = CHECKSUM_NONE;
60 ip_select_ident(iph, skb_dst(skb), NULL);
61
62 err = ip_local_out(skb);
63 if (likely(net_xmit_eval(err) == 0)) {
64 u64_stats_update_begin(&tstats->syncp);
65 tstats->tx_bytes += pkt_len;
66 tstats->tx_packets++;
67 u64_stats_update_end(&tstats->syncp);
68 } else {
69 dev->stats.tx_errors++;
70 dev->stats.tx_aborted_errors++;
71 }
72}
73
74static inline void tunnel_ip_select_ident(struct sk_buff *skb,
75 const struct iphdr *old_iph,
76 struct dst_entry *dst)
77{
78 struct iphdr *iph = ip_hdr(skb);
79
80 /* Use inner packet iph-id if possible. */
81 if (skb->protocol == htons(ETH_P_IP) && old_iph->id)
82 iph->id = old_iph->id;
83 else
84 __ip_select_ident(iph, dst,
85 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
86}
87#endif
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 64d12e77719a..0810aa57c780 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -217,7 +217,7 @@ struct ipv6_txoptions {
217}; 217};
218 218
219struct ip6_flowlabel { 219struct ip6_flowlabel {
220 struct ip6_flowlabel *next; 220 struct ip6_flowlabel __rcu *next;
221 __be32 label; 221 __be32 label;
222 atomic_t users; 222 atomic_t users;
223 struct in6_addr dst; 223 struct in6_addr dst;
@@ -238,9 +238,9 @@ struct ip6_flowlabel {
238#define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF) 238#define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF)
239 239
240struct ipv6_fl_socklist { 240struct ipv6_fl_socklist {
241 struct ipv6_fl_socklist *next; 241 struct ipv6_fl_socklist __rcu *next;
242 struct ip6_flowlabel *fl; 242 struct ip6_flowlabel *fl;
243 struct rcu_head rcu; 243 struct rcu_head rcu;
244}; 244};
245 245
246extern struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label); 246extern struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label);
@@ -320,6 +320,18 @@ static inline int ipv6_addr_src_scope(const struct in6_addr *addr)
320 return __ipv6_addr_src_scope(__ipv6_addr_type(addr)); 320 return __ipv6_addr_src_scope(__ipv6_addr_type(addr));
321} 321}
322 322
323static inline bool __ipv6_addr_needs_scope_id(int type)
324{
325 return type & IPV6_ADDR_LINKLOCAL ||
326 (type & IPV6_ADDR_MULTICAST &&
327 (type & (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)));
328}
329
330static inline __u32 ipv6_iface_scope_id(const struct in6_addr *addr, int iface)
331{
332 return __ipv6_addr_needs_scope_id(__ipv6_addr_type(addr)) ? iface : 0;
333}
334
323static inline int ipv6_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2) 335static inline int ipv6_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2)
324{ 336{
325 return memcmp(a1, a2, sizeof(struct in6_addr)); 337 return memcmp(a1, a2, sizeof(struct in6_addr));
@@ -466,6 +478,7 @@ struct ip6_create_arg {
466 u32 user; 478 u32 user;
467 const struct in6_addr *src; 479 const struct in6_addr *src;
468 const struct in6_addr *dst; 480 const struct in6_addr *dst;
481 u8 ecn;
469}; 482};
470 483
471void ip6_frag_init(struct inet_frag_queue *q, void *a); 484void ip6_frag_init(struct inet_frag_queue *q, void *a);
@@ -485,6 +498,7 @@ struct frag_queue {
485 int iif; 498 int iif;
486 unsigned int csum; 499 unsigned int csum;
487 __u16 nhoffset; 500 __u16 nhoffset;
501 u8 ecn;
488}; 502};
489 503
490void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq, 504void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index f7eba1300d82..cdd7cea1fd4c 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1101,8 +1101,6 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
1101 * These flags are used for communication about keys between the driver 1101 * These flags are used for communication about keys between the driver
1102 * and mac80211, with the @flags parameter of &struct ieee80211_key_conf. 1102 * and mac80211, with the @flags parameter of &struct ieee80211_key_conf.
1103 * 1103 *
1104 * @IEEE80211_KEY_FLAG_WMM_STA: Set by mac80211, this flag indicates
1105 * that the STA this key will be used with could be using QoS.
1106 * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the 1104 * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the
1107 * driver to indicate that it requires IV generation for this 1105 * driver to indicate that it requires IV generation for this
1108 * particular key. 1106 * particular key.
@@ -1127,7 +1125,6 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif)
1127 * %IEEE80211_KEY_FLAG_SW_MGMT_TX flag to encrypt such frames in SW. 1125 * %IEEE80211_KEY_FLAG_SW_MGMT_TX flag to encrypt such frames in SW.
1128 */ 1126 */
1129enum ieee80211_key_flags { 1127enum ieee80211_key_flags {
1130 IEEE80211_KEY_FLAG_WMM_STA = 1<<0,
1131 IEEE80211_KEY_FLAG_GENERATE_IV = 1<<1, 1128 IEEE80211_KEY_FLAG_GENERATE_IV = 1<<1,
1132 IEEE80211_KEY_FLAG_GENERATE_MMIC= 1<<2, 1129 IEEE80211_KEY_FLAG_GENERATE_MMIC= 1<<2,
1133 IEEE80211_KEY_FLAG_PAIRWISE = 1<<3, 1130 IEEE80211_KEY_FLAG_PAIRWISE = 1<<3,
@@ -1231,9 +1228,8 @@ enum ieee80211_sta_rx_bandwidth {
1231 * @addr: MAC address 1228 * @addr: MAC address
1232 * @aid: AID we assigned to the station if we're an AP 1229 * @aid: AID we assigned to the station if we're an AP
1233 * @supp_rates: Bitmap of supported rates (per band) 1230 * @supp_rates: Bitmap of supported rates (per band)
1234 * @ht_cap: HT capabilities of this STA; restricted to our own TX capabilities 1231 * @ht_cap: HT capabilities of this STA; restricted to our own capabilities
1235 * @vht_cap: VHT capabilities of this STA; Not restricting any capabilities 1232 * @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
1236 * of remote STA. Taking as is.
1237 * @wme: indicates whether the STA supports WME. Only valid during AP-mode. 1233 * @wme: indicates whether the STA supports WME. Only valid during AP-mode.
1238 * @drv_priv: data area for driver use, will always be aligned to 1234 * @drv_priv: data area for driver use, will always be aligned to
1239 * sizeof(void *), size is determined in hw information. 1235 * sizeof(void *), size is determined in hw information.
@@ -2135,6 +2131,24 @@ enum ieee80211_rate_control_changed {
2135}; 2131};
2136 2132
2137/** 2133/**
2134 * enum ieee80211_roc_type - remain on channel type
2135 *
2136 * With the support for multi channel contexts and multi channel operations,
2137 * remain on channel operations might be limited/deferred/aborted by other
2138 * flows/operations which have higher priority (and vise versa).
2139 * Specifying the ROC type can be used by devices to prioritize the ROC
2140 * operations compared to other operations/flows.
2141 *
2142 * @IEEE80211_ROC_TYPE_NORMAL: There are no special requirements for this ROC.
2143 * @IEEE80211_ROC_TYPE_MGMT_TX: The remain on channel request is required
2144 * for sending managment frames offchannel.
2145 */
2146enum ieee80211_roc_type {
2147 IEEE80211_ROC_TYPE_NORMAL = 0,
2148 IEEE80211_ROC_TYPE_MGMT_TX,
2149};
2150
2151/**
2138 * struct ieee80211_ops - callbacks from mac80211 to the driver 2152 * struct ieee80211_ops - callbacks from mac80211 to the driver
2139 * 2153 *
2140 * This structure contains various callbacks that the driver may 2154 * This structure contains various callbacks that the driver may
@@ -2687,7 +2701,8 @@ struct ieee80211_ops {
2687 int (*remain_on_channel)(struct ieee80211_hw *hw, 2701 int (*remain_on_channel)(struct ieee80211_hw *hw,
2688 struct ieee80211_vif *vif, 2702 struct ieee80211_vif *vif,
2689 struct ieee80211_channel *chan, 2703 struct ieee80211_channel *chan,
2690 int duration); 2704 int duration,
2705 enum ieee80211_roc_type type);
2691 int (*cancel_remain_on_channel)(struct ieee80211_hw *hw); 2706 int (*cancel_remain_on_channel)(struct ieee80211_hw *hw);
2692 int (*set_ringparam)(struct ieee80211_hw *hw, u32 tx, u32 rx); 2707 int (*set_ringparam)(struct ieee80211_hw *hw, u32 tx, u32 rx);
2693 void (*get_ringparam)(struct ieee80211_hw *hw, 2708 void (*get_ringparam)(struct ieee80211_hw *hw,
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index de644bcd8613..b17697827482 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -17,6 +17,7 @@
17#include <net/netns/ipv6.h> 17#include <net/netns/ipv6.h>
18#include <net/netns/sctp.h> 18#include <net/netns/sctp.h>
19#include <net/netns/dccp.h> 19#include <net/netns/dccp.h>
20#include <net/netns/netfilter.h>
20#include <net/netns/x_tables.h> 21#include <net/netns/x_tables.h>
21#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 22#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
22#include <net/netns/conntrack.h> 23#include <net/netns/conntrack.h>
@@ -94,6 +95,7 @@ struct net {
94 struct netns_dccp dccp; 95 struct netns_dccp dccp;
95#endif 96#endif
96#ifdef CONFIG_NETFILTER 97#ifdef CONFIG_NETFILTER
98 struct netns_nf nf;
97 struct netns_xt xt; 99 struct netns_xt xt;
98#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) 100#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
99 struct netns_ct ct; 101 struct netns_ct ct;
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 930275fa2ea6..fb2b6234e937 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -27,6 +27,7 @@ extern unsigned int nf_conntrack_in(struct net *net,
27 27
28extern int nf_conntrack_init_net(struct net *net); 28extern int nf_conntrack_init_net(struct net *net);
29extern void nf_conntrack_cleanup_net(struct net *net); 29extern void nf_conntrack_cleanup_net(struct net *net);
30extern void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list);
30 31
31extern int nf_conntrack_proto_pernet_init(struct net *net); 32extern int nf_conntrack_proto_pernet_init(struct net *net);
32extern void nf_conntrack_proto_pernet_fini(struct net *net); 33extern void nf_conntrack_proto_pernet_fini(struct net *net);
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
index e991bd0a27af..31f1fb9eb784 100644
--- a/include/net/netfilter/nf_log.h
+++ b/include/net/netfilter/nf_log.h
@@ -49,12 +49,18 @@ struct nf_logger {
49int nf_log_register(u_int8_t pf, struct nf_logger *logger); 49int nf_log_register(u_int8_t pf, struct nf_logger *logger);
50void nf_log_unregister(struct nf_logger *logger); 50void nf_log_unregister(struct nf_logger *logger);
51 51
52int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger); 52void nf_log_set(struct net *net, u_int8_t pf,
53void nf_log_unbind_pf(u_int8_t pf); 53 const struct nf_logger *logger);
54void nf_log_unset(struct net *net, const struct nf_logger *logger);
55
56int nf_log_bind_pf(struct net *net, u_int8_t pf,
57 const struct nf_logger *logger);
58void nf_log_unbind_pf(struct net *net, u_int8_t pf);
54 59
55/* Calls the registered backend logging function */ 60/* Calls the registered backend logging function */
56__printf(7, 8) 61__printf(8, 9)
57void nf_log_packet(u_int8_t pf, 62void nf_log_packet(struct net *net,
63 u_int8_t pf,
58 unsigned int hooknum, 64 unsigned int hooknum,
59 const struct sk_buff *skb, 65 const struct sk_buff *skb,
60 const struct net_device *in, 66 const struct net_device *in,
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 1242f371718b..005e2c2e39a9 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -71,6 +71,7 @@ struct netns_ipv6 {
71 struct fib_rules_ops *mr6_rules_ops; 71 struct fib_rules_ops *mr6_rules_ops;
72#endif 72#endif
73#endif 73#endif
74 atomic_t dev_addr_genid;
74}; 75};
75 76
76#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) 77#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h
new file mode 100644
index 000000000000..88740024ccf3
--- /dev/null
+++ b/include/net/netns/netfilter.h
@@ -0,0 +1,18 @@
1#ifndef __NETNS_NETFILTER_H
2#define __NETNS_NETFILTER_H
3
4#include <linux/proc_fs.h>
5#include <linux/netfilter.h>
6
7struct nf_logger;
8
9struct netns_nf {
10#if defined CONFIG_PROC_FS
11 struct proc_dir_entry *proc_netfilter;
12#endif
13 const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO];
14#ifdef CONFIG_SYSCTL
15 struct ctl_table_header *nf_log_dir_header;
16#endif
17};
18#endif
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index a51dbd17c2de..9069e65c1c56 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -27,19 +27,13 @@ struct sk_buff;
27struct dst_entry; 27struct dst_entry;
28struct proto; 28struct proto;
29 29
30/* empty to "strongly type" an otherwise void parameter.
31 */
32struct request_values {
33};
34
35struct request_sock_ops { 30struct request_sock_ops {
36 int family; 31 int family;
37 int obj_size; 32 int obj_size;
38 struct kmem_cache *slab; 33 struct kmem_cache *slab;
39 char *slab_name; 34 char *slab_name;
40 int (*rtx_syn_ack)(struct sock *sk, 35 int (*rtx_syn_ack)(struct sock *sk,
41 struct request_sock *req, 36 struct request_sock *req);
42 struct request_values *rvp);
43 void (*send_ack)(struct sock *sk, struct sk_buff *skb, 37 void (*send_ack)(struct sock *sk, struct sk_buff *skb,
44 struct request_sock *req); 38 struct request_sock *req);
45 void (*send_reset)(struct sock *sk, 39 void (*send_reset)(struct sock *sk,
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 5a15fabd6a75..702664833a53 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -4,7 +4,7 @@
4#include <linux/rtnetlink.h> 4#include <linux/rtnetlink.h>
5#include <net/netlink.h> 5#include <net/netlink.h>
6 6
7typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, void *); 7typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *);
8typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *); 8typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
9typedef u16 (*rtnl_calcit_func)(struct sk_buff *, struct nlmsghdr *); 9typedef u16 (*rtnl_calcit_func)(struct sk_buff *, struct nlmsghdr *);
10 10
diff --git a/include/net/sock.h b/include/net/sock.h
index 14f6e9d19dc7..08f05f964737 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -667,6 +667,7 @@ enum sock_flags {
667 * user-space instead. 667 * user-space instead.
668 */ 668 */
669 SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */ 669 SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */
670 SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
670}; 671};
671 672
672static inline void sock_copy_flags(struct sock *nsk, struct sock *osk) 673static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index cf0694d4ad60..4475aaf0af57 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -179,7 +179,6 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
179#define TCPOPT_SACK 5 /* SACK Block */ 179#define TCPOPT_SACK 5 /* SACK Block */
180#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */ 180#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
181#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */ 181#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
182#define TCPOPT_COOKIE 253 /* Cookie extension (experimental) */
183#define TCPOPT_EXP 254 /* Experimental */ 182#define TCPOPT_EXP 254 /* Experimental */
184/* Magic number to be after the option value for sharing TCP 183/* Magic number to be after the option value for sharing TCP
185 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt 184 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
@@ -273,7 +272,6 @@ extern int sysctl_tcp_app_win;
273extern int sysctl_tcp_adv_win_scale; 272extern int sysctl_tcp_adv_win_scale;
274extern int sysctl_tcp_tw_reuse; 273extern int sysctl_tcp_tw_reuse;
275extern int sysctl_tcp_frto; 274extern int sysctl_tcp_frto;
276extern int sysctl_tcp_frto_response;
277extern int sysctl_tcp_low_latency; 275extern int sysctl_tcp_low_latency;
278extern int sysctl_tcp_dma_copybreak; 276extern int sysctl_tcp_dma_copybreak;
279extern int sysctl_tcp_nometrics_save; 277extern int sysctl_tcp_nometrics_save;
@@ -284,7 +282,6 @@ extern int sysctl_tcp_base_mss;
284extern int sysctl_tcp_workaround_signed_windows; 282extern int sysctl_tcp_workaround_signed_windows;
285extern int sysctl_tcp_slow_start_after_idle; 283extern int sysctl_tcp_slow_start_after_idle;
286extern int sysctl_tcp_max_ssthresh; 284extern int sysctl_tcp_max_ssthresh;
287extern int sysctl_tcp_cookie_size;
288extern int sysctl_tcp_thin_linear_timeouts; 285extern int sysctl_tcp_thin_linear_timeouts;
289extern int sysctl_tcp_thin_dupack; 286extern int sysctl_tcp_thin_dupack;
290extern int sysctl_tcp_early_retrans; 287extern int sysctl_tcp_early_retrans;
@@ -425,8 +422,6 @@ extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
425 bool fastopen); 422 bool fastopen);
426extern int tcp_child_process(struct sock *parent, struct sock *child, 423extern int tcp_child_process(struct sock *parent, struct sock *child,
427 struct sk_buff *skb); 424 struct sk_buff *skb);
428extern bool tcp_use_frto(struct sock *sk);
429extern void tcp_enter_frto(struct sock *sk);
430extern void tcp_enter_loss(struct sock *sk, int how); 425extern void tcp_enter_loss(struct sock *sk, int how);
431extern void tcp_clear_retrans(struct tcp_sock *tp); 426extern void tcp_clear_retrans(struct tcp_sock *tp);
432extern void tcp_update_metrics(struct sock *sk); 427extern void tcp_update_metrics(struct sock *sk);
@@ -454,7 +449,7 @@ extern void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
454extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, 449extern int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
455 size_t len, int nonblock, int flags, int *addr_len); 450 size_t len, int nonblock, int flags, int *addr_len);
456extern void tcp_parse_options(const struct sk_buff *skb, 451extern void tcp_parse_options(const struct sk_buff *skb,
457 struct tcp_options_received *opt_rx, const u8 **hvpp, 452 struct tcp_options_received *opt_rx,
458 int estab, struct tcp_fastopen_cookie *foc); 453 int estab, struct tcp_fastopen_cookie *foc);
459extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th); 454extern const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
460 455
@@ -476,7 +471,6 @@ extern int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr,
476extern int tcp_connect(struct sock *sk); 471extern int tcp_connect(struct sock *sk);
477extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, 472extern struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
478 struct request_sock *req, 473 struct request_sock *req,
479 struct request_values *rvp,
480 struct tcp_fastopen_cookie *foc); 474 struct tcp_fastopen_cookie *foc);
481extern int tcp_disconnect(struct sock *sk, int flags); 475extern int tcp_disconnect(struct sock *sk, int flags);
482 476
@@ -543,6 +537,8 @@ extern bool tcp_syn_flood_action(struct sock *sk,
543extern void tcp_push_one(struct sock *, unsigned int mss_now); 537extern void tcp_push_one(struct sock *, unsigned int mss_now);
544extern void tcp_send_ack(struct sock *sk); 538extern void tcp_send_ack(struct sock *sk);
545extern void tcp_send_delayed_ack(struct sock *sk); 539extern void tcp_send_delayed_ack(struct sock *sk);
540extern void tcp_send_loss_probe(struct sock *sk);
541extern bool tcp_schedule_loss_probe(struct sock *sk);
546 542
547/* tcp_input.c */ 543/* tcp_input.c */
548extern void tcp_cwnd_application_limited(struct sock *sk); 544extern void tcp_cwnd_application_limited(struct sock *sk);
@@ -756,7 +752,6 @@ enum tcp_ca_event {
756 CA_EVENT_TX_START, /* first transmit when no packets in flight */ 752 CA_EVENT_TX_START, /* first transmit when no packets in flight */
757 CA_EVENT_CWND_RESTART, /* congestion window restart */ 753 CA_EVENT_CWND_RESTART, /* congestion window restart */
758 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */ 754 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
759 CA_EVENT_FRTO, /* fast recovery timeout */
760 CA_EVENT_LOSS, /* loss timeout */ 755 CA_EVENT_LOSS, /* loss timeout */
761 CA_EVENT_FAST_ACK, /* in sequence ack */ 756 CA_EVENT_FAST_ACK, /* in sequence ack */
762 CA_EVENT_SLOW_ACK, /* other ack */ 757 CA_EVENT_SLOW_ACK, /* other ack */
@@ -873,8 +868,8 @@ static inline void tcp_enable_fack(struct tcp_sock *tp)
873static inline void tcp_enable_early_retrans(struct tcp_sock *tp) 868static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
874{ 869{
875 tp->do_early_retrans = sysctl_tcp_early_retrans && 870 tp->do_early_retrans = sysctl_tcp_early_retrans &&
876 !sysctl_tcp_thin_dupack && sysctl_tcp_reordering == 3; 871 sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack &&
877 tp->early_retrans_delayed = 0; 872 sysctl_tcp_reordering == 3;
878} 873}
879 874
880static inline void tcp_disable_early_retrans(struct tcp_sock *tp) 875static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
@@ -1030,50 +1025,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp)
1030#endif 1025#endif
1031} 1026}
1032 1027
1033/* Packet is added to VJ-style prequeue for processing in process 1028extern bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
1034 * context, if a reader task is waiting. Apparently, this exciting
1035 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1036 * failed somewhere. Latency? Burstiness? Well, at least now we will
1037 * see, why it failed. 8)8) --ANK
1038 *
1039 * NOTE: is this not too big to inline?
1040 */
1041static inline bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1042{
1043 struct tcp_sock *tp = tcp_sk(sk);
1044
1045 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1046 return false;
1047
1048 if (skb->len <= tcp_hdrlen(skb) &&
1049 skb_queue_len(&tp->ucopy.prequeue) == 0)
1050 return false;
1051
1052 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1053 tp->ucopy.memory += skb->truesize;
1054 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1055 struct sk_buff *skb1;
1056
1057 BUG_ON(sock_owned_by_user(sk));
1058
1059 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1060 sk_backlog_rcv(sk, skb1);
1061 NET_INC_STATS_BH(sock_net(sk),
1062 LINUX_MIB_TCPPREQUEUEDROPPED);
1063 }
1064
1065 tp->ucopy.memory = 0;
1066 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1067 wake_up_interruptible_sync_poll(sk_sleep(sk),
1068 POLLIN | POLLRDNORM | POLLRDBAND);
1069 if (!inet_csk_ack_scheduled(sk))
1070 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1071 (3 * tcp_rto_min(sk)) / 4,
1072 TCP_RTO_MAX);
1073 }
1074 return true;
1075}
1076
1077 1029
1078#undef STATE_TRACE 1030#undef STATE_TRACE
1079 1031
@@ -1630,91 +1582,6 @@ struct tcp_request_sock_ops {
1630#endif 1582#endif
1631}; 1583};
1632 1584
1633/* Using SHA1 for now, define some constants.
1634 */
1635#define COOKIE_DIGEST_WORDS (SHA_DIGEST_WORDS)
1636#define COOKIE_MESSAGE_WORDS (SHA_MESSAGE_BYTES / 4)
1637#define COOKIE_WORKSPACE_WORDS (COOKIE_DIGEST_WORDS + COOKIE_MESSAGE_WORDS)
1638
1639extern int tcp_cookie_generator(u32 *bakery);
1640
1641/**
1642 * struct tcp_cookie_values - each socket needs extra space for the
1643 * cookies, together with (optional) space for any SYN data.
1644 *
1645 * A tcp_sock contains a pointer to the current value, and this is
1646 * cloned to the tcp_timewait_sock.
1647 *
1648 * @cookie_pair: variable data from the option exchange.
1649 *
1650 * @cookie_desired: user specified tcpct_cookie_desired. Zero
1651 * indicates default (sysctl_tcp_cookie_size).
1652 * After cookie sent, remembers size of cookie.
1653 * Range 0, TCP_COOKIE_MIN to TCP_COOKIE_MAX.
1654 *
1655 * @s_data_desired: user specified tcpct_s_data_desired. When the
1656 * constant payload is specified (@s_data_constant),
1657 * holds its length instead.
1658 * Range 0 to TCP_MSS_DESIRED.
1659 *
1660 * @s_data_payload: constant data that is to be included in the
1661 * payload of SYN or SYNACK segments when the
1662 * cookie option is present.
1663 */
1664struct tcp_cookie_values {
1665 struct kref kref;
1666 u8 cookie_pair[TCP_COOKIE_PAIR_SIZE];
1667 u8 cookie_pair_size;
1668 u8 cookie_desired;
1669 u16 s_data_desired:11,
1670 s_data_constant:1,
1671 s_data_in:1,
1672 s_data_out:1,
1673 s_data_unused:2;
1674 u8 s_data_payload[0];
1675};
1676
1677static inline void tcp_cookie_values_release(struct kref *kref)
1678{
1679 kfree(container_of(kref, struct tcp_cookie_values, kref));
1680}
1681
1682/* The length of constant payload data. Note that s_data_desired is
1683 * overloaded, depending on s_data_constant: either the length of constant
1684 * data (returned here) or the limit on variable data.
1685 */
1686static inline int tcp_s_data_size(const struct tcp_sock *tp)
1687{
1688 return (tp->cookie_values != NULL && tp->cookie_values->s_data_constant)
1689 ? tp->cookie_values->s_data_desired
1690 : 0;
1691}
1692
1693/**
1694 * struct tcp_extend_values - tcp_ipv?.c to tcp_output.c workspace.
1695 *
1696 * As tcp_request_sock has already been extended in other places, the
1697 * only remaining method is to pass stack values along as function
1698 * parameters. These parameters are not needed after sending SYNACK.
1699 *
1700 * @cookie_bakery: cryptographic secret and message workspace.
1701 *
1702 * @cookie_plus: bytes in authenticator/cookie option, copied from
1703 * struct tcp_options_received (above).
1704 */
1705struct tcp_extend_values {
1706 struct request_values rv;
1707 u32 cookie_bakery[COOKIE_WORKSPACE_WORDS];
1708 u8 cookie_plus:6,
1709 cookie_out_never:1,
1710 cookie_in_always:1;
1711};
1712
1713static inline struct tcp_extend_values *tcp_xv(struct request_values *rvp)
1714{
1715 return (struct tcp_extend_values *)rvp;
1716}
1717
1718extern void tcp_v4_init(void); 1585extern void tcp_v4_init(void);
1719extern void tcp_init(void); 1586extern void tcp_init(void);
1720 1587
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index 4ef3acbba5da..c5d2e3a1cf68 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -74,4 +74,6 @@
74 74
75#define SO_LOCK_FILTER 44 75#define SO_LOCK_FILTER 44
76 76
77#define SO_SELECT_ERR_QUEUE 45
78
77#endif /* __ASM_GENERIC_SOCKET_H */ 79#endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/uapi/linux/cn_proc.h b/include/uapi/linux/cn_proc.h
index 0d7b49973bb3..f6c271035bbd 100644
--- a/include/uapi/linux/cn_proc.h
+++ b/include/uapi/linux/cn_proc.h
@@ -56,7 +56,9 @@ struct proc_event {
56 PROC_EVENT_PTRACE = 0x00000100, 56 PROC_EVENT_PTRACE = 0x00000100,
57 PROC_EVENT_COMM = 0x00000200, 57 PROC_EVENT_COMM = 0x00000200,
58 /* "next" should be 0x00000400 */ 58 /* "next" should be 0x00000400 */
59 /* "last" is the last process event: exit */ 59 /* "last" is the last process event: exit,
60 * while "next to last" is coredumping event */
61 PROC_EVENT_COREDUMP = 0x40000000,
60 PROC_EVENT_EXIT = 0x80000000 62 PROC_EVENT_EXIT = 0x80000000
61 } what; 63 } what;
62 __u32 cpu; 64 __u32 cpu;
@@ -110,11 +112,17 @@ struct proc_event {
110 char comm[16]; 112 char comm[16];
111 } comm; 113 } comm;
112 114
115 struct coredump_proc_event {
116 __kernel_pid_t process_pid;
117 __kernel_pid_t process_tgid;
118 } coredump;
119
113 struct exit_proc_event { 120 struct exit_proc_event {
114 __kernel_pid_t process_pid; 121 __kernel_pid_t process_pid;
115 __kernel_pid_t process_tgid; 122 __kernel_pid_t process_tgid;
116 __u32 exit_code, exit_signal; 123 __u32 exit_code, exit_signal;
117 } exit; 124 } exit;
125
118 } event_data; 126 } event_data;
119}; 127};
120 128
diff --git a/include/uapi/linux/filter.h b/include/uapi/linux/filter.h
index 9cfde6941099..8eb9ccaa5b48 100644
--- a/include/uapi/linux/filter.h
+++ b/include/uapi/linux/filter.h
@@ -129,7 +129,8 @@ struct sock_fprog { /* Required for SO_ATTACH_FILTER. */
129#define SKF_AD_ALU_XOR_X 40 129#define SKF_AD_ALU_XOR_X 40
130#define SKF_AD_VLAN_TAG 44 130#define SKF_AD_VLAN_TAG 44
131#define SKF_AD_VLAN_TAG_PRESENT 48 131#define SKF_AD_VLAN_TAG_PRESENT 48
132#define SKF_AD_MAX 52 132#define SKF_AD_PAY_OFFSET 52
133#define SKF_AD_MAX 56
133#define SKF_NET_OFF (-0x100000) 134#define SKF_NET_OFF (-0x100000)
134#define SKF_LL_OFF (-0x200000) 135#define SKF_LL_OFF (-0x200000)
135 136
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 798032d01112..ade07f1c491a 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -94,6 +94,9 @@
94#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */ 94#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */
95#define ETH_P_AF_IUCV 0xFBFB /* IBM af_iucv [ NOT AN OFFICIALLY REGISTERED ID ] */ 95#define ETH_P_AF_IUCV 0xFBFB /* IBM af_iucv [ NOT AN OFFICIALLY REGISTERED ID ] */
96 96
97#define ETH_P_802_3_MIN 0x0600 /* If the value in the ethernet type is less than this value
98 * then the frame is Ethernet II. Else it is 802.3 */
99
97/* 100/*
98 * Non DIX types. Won't clash for 1500 types. 101 * Non DIX types. Won't clash for 1500 types.
99 */ 102 */
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index f9a60375f0d0..8136658ea477 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -55,6 +55,8 @@ struct sockaddr_ll {
55#define PACKET_FANOUT_HASH 0 55#define PACKET_FANOUT_HASH 0
56#define PACKET_FANOUT_LB 1 56#define PACKET_FANOUT_LB 1
57#define PACKET_FANOUT_CPU 2 57#define PACKET_FANOUT_CPU 2
58#define PACKET_FANOUT_ROLLOVER 3
59#define PACKET_FANOUT_FLAG_ROLLOVER 0x1000
58#define PACKET_FANOUT_FLAG_DEFRAG 0x8000 60#define PACKET_FANOUT_FLAG_DEFRAG 0x8000
59 61
60struct tpacket_stats { 62struct tpacket_stats {
diff --git a/include/uapi/linux/neighbour.h b/include/uapi/linux/neighbour.h
index adb068c53c4e..f175212420ab 100644
--- a/include/uapi/linux/neighbour.h
+++ b/include/uapi/linux/neighbour.h
@@ -21,6 +21,9 @@ enum {
21 NDA_CACHEINFO, 21 NDA_CACHEINFO,
22 NDA_PROBES, 22 NDA_PROBES,
23 NDA_VLAN, 23 NDA_VLAN,
24 NDA_PORT,
25 NDA_VNI,
26 NDA_IFINDEX,
24 __NDA_MAX 27 __NDA_MAX
25}; 28};
26 29
diff --git a/include/uapi/linux/netfilter/xt_NFQUEUE.h b/include/uapi/linux/netfilter/xt_NFQUEUE.h
index 9eafdbbb401c..8bb5fe657d34 100644
--- a/include/uapi/linux/netfilter/xt_NFQUEUE.h
+++ b/include/uapi/linux/netfilter/xt_NFQUEUE.h
@@ -26,4 +26,13 @@ struct xt_NFQ_info_v2 {
26 __u16 bypass; 26 __u16 bypass;
27}; 27};
28 28
29struct xt_NFQ_info_v3 {
30 __u16 queuenum;
31 __u16 queues_total;
32 __u16 flags;
33#define NFQ_FLAG_BYPASS 0x01 /* for compatibility with v2 */
34#define NFQ_FLAG_CPU_FANOUT 0x02 /* use current CPU (no hashing) */
35#define NFQ_FLAG_MASK 0x03
36};
37
29#endif /* _XT_NFQ_TARGET_H */ 38#endif /* _XT_NFQ_TARGET_H */
diff --git a/include/uapi/linux/netfilter_ipv6/ip6t_frag.h b/include/uapi/linux/netfilter_ipv6/ip6t_frag.h
index b47f61b9e082..dfd8bc2268cf 100644
--- a/include/uapi/linux/netfilter_ipv6/ip6t_frag.h
+++ b/include/uapi/linux/netfilter_ipv6/ip6t_frag.h
@@ -4,9 +4,9 @@
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6struct ip6t_frag { 6struct ip6t_frag {
7 __u32 ids[2]; /* Security Parameter Index */ 7 __u32 ids[2]; /* Identification range */
8 __u32 hdrlen; /* Header Length */ 8 __u32 hdrlen; /* Header Length */
9 __u8 flags; /* */ 9 __u8 flags; /* Flags */
10 __u8 invflags; /* Inverse flags */ 10 __u8 invflags; /* Inverse flags */
11}; 11};
12 12
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index 78d5b8a546d6..32a354f67ba4 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -78,7 +78,7 @@ struct nlmsghdr {
78#define NLMSG_ALIGNTO 4U 78#define NLMSG_ALIGNTO 4U
79#define NLMSG_ALIGN(len) ( ((len)+NLMSG_ALIGNTO-1) & ~(NLMSG_ALIGNTO-1) ) 79#define NLMSG_ALIGN(len) ( ((len)+NLMSG_ALIGNTO-1) & ~(NLMSG_ALIGNTO-1) )
80#define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr))) 80#define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr)))
81#define NLMSG_LENGTH(len) ((len)+NLMSG_ALIGN(NLMSG_HDRLEN)) 81#define NLMSG_LENGTH(len) ((len) + NLMSG_HDRLEN)
82#define NLMSG_SPACE(len) NLMSG_ALIGN(NLMSG_LENGTH(len)) 82#define NLMSG_SPACE(len) NLMSG_ALIGN(NLMSG_LENGTH(len))
83#define NLMSG_DATA(nlh) ((void*)(((char*)nlh) + NLMSG_LENGTH(0))) 83#define NLMSG_DATA(nlh) ((void*)(((char*)nlh) + NLMSG_LENGTH(0)))
84#define NLMSG_NEXT(nlh,len) ((len) -= NLMSG_ALIGN((nlh)->nlmsg_len), \ 84#define NLMSG_NEXT(nlh,len) ((len) -= NLMSG_ALIGN((nlh)->nlmsg_len), \
diff --git a/include/uapi/linux/netlink_diag.h b/include/uapi/linux/netlink_diag.h
new file mode 100644
index 000000000000..88009a31cd06
--- /dev/null
+++ b/include/uapi/linux/netlink_diag.h
@@ -0,0 +1,42 @@
1#ifndef __NETLINK_DIAG_H__
2#define __NETLINK_DIAG_H__
3
4#include <linux/types.h>
5
6struct netlink_diag_req {
7 __u8 sdiag_family;
8 __u8 sdiag_protocol;
9 __u16 pad;
10 __u32 ndiag_ino;
11 __u32 ndiag_show;
12 __u32 ndiag_cookie[2];
13};
14
15struct netlink_diag_msg {
16 __u8 ndiag_family;
17 __u8 ndiag_type;
18 __u8 ndiag_protocol;
19 __u8 ndiag_state;
20
21 __u32 ndiag_portid;
22 __u32 ndiag_dst_portid;
23 __u32 ndiag_dst_group;
24 __u32 ndiag_ino;
25 __u32 ndiag_cookie[2];
26};
27
28enum {
29 NETLINK_DIAG_MEMINFO,
30 NETLINK_DIAG_GROUPS,
31
32 __NETLINK_DIAG_MAX,
33};
34
35#define NETLINK_DIAG_MAX (__NETLINK_DIAG_MAX - 1)
36
37#define NDIAG_PROTO_ALL ((__u8) ~0)
38
39#define NDIAG_SHOW_MEMINFO 0x00000001 /* show memory info of a socket */
40#define NDIAG_SHOW_GROUPS 0x00000002 /* show groups of a netlink socket */
41
42#endif
diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h
index 7969f46f1bb3..7440bc81a04b 100644
--- a/include/uapi/linux/nfc.h
+++ b/include/uapi/linux/nfc.h
@@ -90,6 +90,8 @@ enum nfc_commands {
90 NFC_CMD_LLC_SET_PARAMS, 90 NFC_CMD_LLC_SET_PARAMS,
91 NFC_CMD_ENABLE_SE, 91 NFC_CMD_ENABLE_SE,
92 NFC_CMD_DISABLE_SE, 92 NFC_CMD_DISABLE_SE,
93 NFC_CMD_LLC_SDREQ,
94 NFC_EVENT_LLC_SDRES,
93/* private: internal use only */ 95/* private: internal use only */
94 __NFC_CMD_AFTER_LAST 96 __NFC_CMD_AFTER_LAST
95}; 97};
@@ -140,11 +142,21 @@ enum nfc_attrs {
140 NFC_ATTR_LLC_PARAM_RW, 142 NFC_ATTR_LLC_PARAM_RW,
141 NFC_ATTR_LLC_PARAM_MIUX, 143 NFC_ATTR_LLC_PARAM_MIUX,
142 NFC_ATTR_SE, 144 NFC_ATTR_SE,
145 NFC_ATTR_LLC_SDP,
143/* private: internal use only */ 146/* private: internal use only */
144 __NFC_ATTR_AFTER_LAST 147 __NFC_ATTR_AFTER_LAST
145}; 148};
146#define NFC_ATTR_MAX (__NFC_ATTR_AFTER_LAST - 1) 149#define NFC_ATTR_MAX (__NFC_ATTR_AFTER_LAST - 1)
147 150
151enum nfc_sdp_attr {
152 NFC_SDP_ATTR_UNSPEC,
153 NFC_SDP_ATTR_URI,
154 NFC_SDP_ATTR_SAP,
155/* private: internal use only */
156 __NFC_SDP_ATTR_AFTER_LAST
157};
158#define NFC_SDP_ATTR_MAX (__NFC_SDP_ATTR_AFTER_LAST - 1)
159
148#define NFC_DEVICE_NAME_MAXSIZE 8 160#define NFC_DEVICE_NAME_MAXSIZE 8
149#define NFC_NFCID1_MAXSIZE 10 161#define NFC_NFCID1_MAXSIZE 10
150#define NFC_SENSB_RES_MAXSIZE 12 162#define NFC_SENSB_RES_MAXSIZE 12
@@ -220,4 +232,8 @@ struct sockaddr_nfc_llcp {
220#define NFC_LLCP_DIRECTION_RX 0x00 232#define NFC_LLCP_DIRECTION_RX 0x00
221#define NFC_LLCP_DIRECTION_TX 0x01 233#define NFC_LLCP_DIRECTION_TX 0x01
222 234
235/* socket option names */
236#define NFC_LLCP_RW 0
237#define NFC_LLCP_MIUX 1
238
223#endif /*__LINUX_NFC_H */ 239#endif /*__LINUX_NFC_H */
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index c46bb016f4e4..79da8710448e 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -36,7 +36,21 @@
36 * The station is still assumed to belong to the AP interface it was added 36 * The station is still assumed to belong to the AP interface it was added
37 * to. 37 * to.
38 * 38 *
39 * TODO: need more info? 39 * Station handling varies per interface type and depending on the driver's
40 * capabilities.
41 *
42 * For drivers supporting TDLS with external setup (WIPHY_FLAG_SUPPORTS_TDLS
43 * and WIPHY_FLAG_TDLS_EXTERNAL_SETUP), the station lifetime is as follows:
44 * - a setup station entry is added, not yet authorized, without any rate
45 * or capability information, this just exists to avoid race conditions
46 * - when the TDLS setup is done, a single NL80211_CMD_SET_STATION is valid
47 * to add rate and capability information to the station and at the same
48 * time mark it authorized.
49 * - %NL80211_TDLS_ENABLE_LINK is then used
50 * - after this, the only valid operation is to remove it by tearing down
51 * the TDLS link (%NL80211_TDLS_DISABLE_LINK)
52 *
53 * TODO: need more info for other interface types
40 */ 54 */
41 55
42/** 56/**
@@ -499,9 +513,11 @@
499 * @NL80211_CMD_NEW_PEER_CANDIDATE: Notification on the reception of a 513 * @NL80211_CMD_NEW_PEER_CANDIDATE: Notification on the reception of a
500 * beacon or probe response from a compatible mesh peer. This is only 514 * beacon or probe response from a compatible mesh peer. This is only
501 * sent while no station information (sta_info) exists for the new peer 515 * sent while no station information (sta_info) exists for the new peer
502 * candidate and when @NL80211_MESH_SETUP_USERSPACE_AUTH is set. On 516 * candidate and when @NL80211_MESH_SETUP_USERSPACE_AUTH,
503 * reception of this notification, userspace may decide to create a new 517 * @NL80211_MESH_SETUP_USERSPACE_AMPE, or
504 * station (@NL80211_CMD_NEW_STATION). To stop this notification from 518 * @NL80211_MESH_SETUP_USERSPACE_MPM is set. On reception of this
519 * notification, userspace may decide to create a new station
520 * (@NL80211_CMD_NEW_STATION). To stop this notification from
505 * reoccurring, the userspace authentication daemon may want to create the 521 * reoccurring, the userspace authentication daemon may want to create the
506 * new station with the AUTHENTICATED flag unset and maybe change it later 522 * new station with the AUTHENTICATED flag unset and maybe change it later
507 * depending on the authentication result. 523 * depending on the authentication result.
@@ -611,6 +627,18 @@
611 * %NL80211_ATTR_RADAR_EVENT is used to inform about the type of the 627 * %NL80211_ATTR_RADAR_EVENT is used to inform about the type of the
612 * event. 628 * event.
613 * 629 *
630 * @NL80211_CMD_GET_PROTOCOL_FEATURES: Get global nl80211 protocol features,
631 * i.e. features for the nl80211 protocol rather than device features.
632 * Returns the features in the %NL80211_ATTR_PROTOCOL_FEATURES bitmap.
633 *
634 * @NL80211_CMD_UPDATE_FT_IES: Pass down the most up-to-date Fast Transition
635 * Information Element to the WLAN driver
636 *
637 * @NL80211_CMD_FT_EVENT: Send a Fast transition event from the WLAN driver
638 * to the supplicant. This will carry the target AP's MAC address along
639 * with the relevant Information Elements. This event is used to report
640 * received FT IEs (MDIE, FTIE, RSN IE, TIE, RICIE).
641 *
614 * @NL80211_CMD_MAX: highest used command number 642 * @NL80211_CMD_MAX: highest used command number
615 * @__NL80211_CMD_AFTER_LAST: internal use 643 * @__NL80211_CMD_AFTER_LAST: internal use
616 */ 644 */
@@ -765,6 +793,11 @@ enum nl80211_commands {
765 793
766 NL80211_CMD_RADAR_DETECT, 794 NL80211_CMD_RADAR_DETECT,
767 795
796 NL80211_CMD_GET_PROTOCOL_FEATURES,
797
798 NL80211_CMD_UPDATE_FT_IES,
799 NL80211_CMD_FT_EVENT,
800
768 /* add new commands above here */ 801 /* add new commands above here */
769 802
770 /* used to define NL80211_CMD_MAX below */ 803 /* used to define NL80211_CMD_MAX below */
@@ -884,7 +917,8 @@ enum nl80211_commands {
884 * consisting of a nested array. 917 * consisting of a nested array.
885 * 918 *
886 * @NL80211_ATTR_MESH_ID: mesh id (1-32 bytes). 919 * @NL80211_ATTR_MESH_ID: mesh id (1-32 bytes).
887 * @NL80211_ATTR_STA_PLINK_ACTION: action to perform on the mesh peer link. 920 * @NL80211_ATTR_STA_PLINK_ACTION: action to perform on the mesh peer link
921 * (see &enum nl80211_plink_action).
888 * @NL80211_ATTR_MPATH_NEXT_HOP: MAC address of the next hop for a mesh path. 922 * @NL80211_ATTR_MPATH_NEXT_HOP: MAC address of the next hop for a mesh path.
889 * @NL80211_ATTR_MPATH_INFO: information about a mesh_path, part of mesh path 923 * @NL80211_ATTR_MPATH_INFO: information about a mesh_path, part of mesh path
890 * info given for %NL80211_CMD_GET_MPATH, nested attribute described at 924 * info given for %NL80211_CMD_GET_MPATH, nested attribute described at
@@ -1167,10 +1201,10 @@ enum nl80211_commands {
1167 * @NL80211_ATTR_SUPPORT_MESH_AUTH: Currently, this means the underlying driver 1201 * @NL80211_ATTR_SUPPORT_MESH_AUTH: Currently, this means the underlying driver
1168 * allows auth frames in a mesh to be passed to userspace for processing via 1202 * allows auth frames in a mesh to be passed to userspace for processing via
1169 * the @NL80211_MESH_SETUP_USERSPACE_AUTH flag. 1203 * the @NL80211_MESH_SETUP_USERSPACE_AUTH flag.
1170 * @NL80211_ATTR_STA_PLINK_STATE: The state of a mesh peer link as 1204 * @NL80211_ATTR_STA_PLINK_STATE: The state of a mesh peer link as defined in
1171 * defined in &enum nl80211_plink_state. Used when userspace is 1205 * &enum nl80211_plink_state. Used when userspace is driving the peer link
1172 * driving the peer link management state machine. 1206 * management state machine. @NL80211_MESH_SETUP_USERSPACE_AMPE or
1173 * @NL80211_MESH_SETUP_USERSPACE_AMPE must be enabled. 1207 * @NL80211_MESH_SETUP_USERSPACE_MPM must be enabled.
1174 * 1208 *
1175 * @NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED: indicates, as part of the wiphy 1209 * @NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED: indicates, as part of the wiphy
1176 * capabilities, the supported WoWLAN triggers 1210 * capabilities, the supported WoWLAN triggers
@@ -1368,6 +1402,18 @@ enum nl80211_commands {
1368 * advertised to the driver, e.g., to enable TDLS off channel operations 1402 * advertised to the driver, e.g., to enable TDLS off channel operations
1369 * and PU-APSD. 1403 * and PU-APSD.
1370 * 1404 *
1405 * @NL80211_ATTR_PROTOCOL_FEATURES: global nl80211 feature flags, see
1406 * &enum nl80211_protocol_features, the attribute is a u32.
1407 *
1408 * @NL80211_ATTR_SPLIT_WIPHY_DUMP: flag attribute, userspace supports
1409 * receiving the data for a single wiphy split across multiple
1410 * messages, given with wiphy dump message
1411 *
1412 * @NL80211_ATTR_MDID: Mobility Domain Identifier
1413 *
1414 * @NL80211_ATTR_IE_RIC: Resource Information Container Information
1415 * Element
1416 *
1371 * @NL80211_ATTR_MAX: highest attribute number currently defined 1417 * @NL80211_ATTR_MAX: highest attribute number currently defined
1372 * @__NL80211_ATTR_AFTER_LAST: internal use 1418 * @__NL80211_ATTR_AFTER_LAST: internal use
1373 */ 1419 */
@@ -1654,6 +1700,15 @@ enum nl80211_attrs {
1654 NL80211_ATTR_STA_CAPABILITY, 1700 NL80211_ATTR_STA_CAPABILITY,
1655 NL80211_ATTR_STA_EXT_CAPABILITY, 1701 NL80211_ATTR_STA_EXT_CAPABILITY,
1656 1702
1703 NL80211_ATTR_PROTOCOL_FEATURES,
1704 NL80211_ATTR_SPLIT_WIPHY_DUMP,
1705
1706 NL80211_ATTR_DISABLE_VHT,
1707 NL80211_ATTR_VHT_CAPABILITY_MASK,
1708
1709 NL80211_ATTR_MDID,
1710 NL80211_ATTR_IE_RIC,
1711
1657 /* add attributes here, update the policy in nl80211.c */ 1712 /* add attributes here, update the policy in nl80211.c */
1658 1713
1659 __NL80211_ATTR_AFTER_LAST, 1714 __NL80211_ATTR_AFTER_LAST,
@@ -2412,8 +2467,10 @@ enum nl80211_mesh_power_mode {
2412 * @NL80211_MESHCONF_TTL: specifies the value of TTL field set at a source mesh 2467 * @NL80211_MESHCONF_TTL: specifies the value of TTL field set at a source mesh
2413 * point. 2468 * point.
2414 * 2469 *
2415 * @NL80211_MESHCONF_AUTO_OPEN_PLINKS: whether we should automatically 2470 * @NL80211_MESHCONF_AUTO_OPEN_PLINKS: whether we should automatically open
2416 * open peer links when we detect compatible mesh peers. 2471 * peer links when we detect compatible mesh peers. Disabled if
2472 * @NL80211_MESH_SETUP_USERSPACE_MPM or @NL80211_MESH_SETUP_USERSPACE_AMPE are
2473 * set.
2417 * 2474 *
2418 * @NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES: the number of action frames 2475 * @NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES: the number of action frames
2419 * containing a PREQ that an MP can send to a particular destination (path 2476 * containing a PREQ that an MP can send to a particular destination (path
@@ -2559,6 +2616,9 @@ enum nl80211_meshconf_params {
2559 * vendor specific synchronization method or disable it to use the default 2616 * vendor specific synchronization method or disable it to use the default
2560 * neighbor offset synchronization 2617 * neighbor offset synchronization
2561 * 2618 *
2619 * @NL80211_MESH_SETUP_USERSPACE_MPM: Enable this option if userspace will
2620 * implement an MPM which handles peer allocation and state.
2621 *
2562 * @NL80211_MESH_SETUP_ATTR_MAX: highest possible mesh setup attribute number 2622 * @NL80211_MESH_SETUP_ATTR_MAX: highest possible mesh setup attribute number
2563 * 2623 *
2564 * @__NL80211_MESH_SETUP_ATTR_AFTER_LAST: Internal use 2624 * @__NL80211_MESH_SETUP_ATTR_AFTER_LAST: Internal use
@@ -2571,6 +2631,7 @@ enum nl80211_mesh_setup_params {
2571 NL80211_MESH_SETUP_USERSPACE_AUTH, 2631 NL80211_MESH_SETUP_USERSPACE_AUTH,
2572 NL80211_MESH_SETUP_USERSPACE_AMPE, 2632 NL80211_MESH_SETUP_USERSPACE_AMPE,
2573 NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC, 2633 NL80211_MESH_SETUP_ENABLE_VENDOR_SYNC,
2634 NL80211_MESH_SETUP_USERSPACE_MPM,
2574 2635
2575 /* keep last */ 2636 /* keep last */
2576 __NL80211_MESH_SETUP_ATTR_AFTER_LAST, 2637 __NL80211_MESH_SETUP_ATTR_AFTER_LAST,
@@ -3307,6 +3368,23 @@ enum nl80211_plink_state {
3307 MAX_NL80211_PLINK_STATES = NUM_NL80211_PLINK_STATES - 1 3368 MAX_NL80211_PLINK_STATES = NUM_NL80211_PLINK_STATES - 1
3308}; 3369};
3309 3370
3371/**
3372 * enum nl80211_plink_action - actions to perform in mesh peers
3373 *
3374 * @NL80211_PLINK_ACTION_NO_ACTION: perform no action
3375 * @NL80211_PLINK_ACTION_OPEN: start mesh peer link establishment
3376 * @NL80211_PLINK_ACTION_BLOCK: block traffic from this mesh peer
3377 * @NUM_NL80211_PLINK_ACTIONS: number of possible actions
3378 */
3379enum plink_actions {
3380 NL80211_PLINK_ACTION_NO_ACTION,
3381 NL80211_PLINK_ACTION_OPEN,
3382 NL80211_PLINK_ACTION_BLOCK,
3383
3384 NUM_NL80211_PLINK_ACTIONS,
3385};
3386
3387
3310#define NL80211_KCK_LEN 16 3388#define NL80211_KCK_LEN 16
3311#define NL80211_KEK_LEN 16 3389#define NL80211_KEK_LEN 16
3312#define NL80211_REPLAY_CTR_LEN 8 3390#define NL80211_REPLAY_CTR_LEN 8
@@ -3456,6 +3534,10 @@ enum nl80211_ap_sme_features {
3456 * stations the authenticated/associated bits have to be set in the mask. 3534 * stations the authenticated/associated bits have to be set in the mask.
3457 * @NL80211_FEATURE_ADVERTISE_CHAN_LIMITS: cfg80211 advertises channel limits 3535 * @NL80211_FEATURE_ADVERTISE_CHAN_LIMITS: cfg80211 advertises channel limits
3458 * (HT40, VHT 80/160 MHz) if this flag is set 3536 * (HT40, VHT 80/160 MHz) if this flag is set
3537 * @NL80211_FEATURE_USERSPACE_MPM: This driver supports a userspace Mesh
3538 * Peering Management entity which may be implemented by registering for
3539 * beacons or NL80211_CMD_NEW_PEER_CANDIDATE events. The mesh beacon is
3540 * still generated by the driver.
3459 */ 3541 */
3460enum nl80211_feature_flags { 3542enum nl80211_feature_flags {
3461 NL80211_FEATURE_SK_TX_STATUS = 1 << 0, 3543 NL80211_FEATURE_SK_TX_STATUS = 1 << 0,
@@ -3474,6 +3556,7 @@ enum nl80211_feature_flags {
3474 /* bit 13 is reserved */ 3556 /* bit 13 is reserved */
3475 NL80211_FEATURE_ADVERTISE_CHAN_LIMITS = 1 << 14, 3557 NL80211_FEATURE_ADVERTISE_CHAN_LIMITS = 1 << 14,
3476 NL80211_FEATURE_FULL_AP_CLIENT_STATE = 1 << 15, 3558 NL80211_FEATURE_FULL_AP_CLIENT_STATE = 1 << 15,
3559 NL80211_FEATURE_USERSPACE_MPM = 1 << 16,
3477}; 3560};
3478 3561
3479/** 3562/**
@@ -3587,4 +3670,16 @@ enum nl80211_dfs_state {
3587 NL80211_DFS_AVAILABLE, 3670 NL80211_DFS_AVAILABLE,
3588}; 3671};
3589 3672
3673/**
3674 * enum enum nl80211_protocol_features - nl80211 protocol features
3675 * @NL80211_PROTOCOL_FEATURE_SPLIT_WIPHY_DUMP: nl80211 supports splitting
3676 * wiphy dumps (if requested by the application with the attribute
3677 * %NL80211_ATTR_SPLIT_WIPHY_DUMP. Also supported is filtering the
3678 * wiphy dump by %NL80211_ATTR_WIPHY, %NL80211_ATTR_IFINDEX or
3679 * %NL80211_ATTR_WDEV.
3680 */
3681enum nl80211_protocol_features {
3682 NL80211_PROTOCOL_FEATURE_SPLIT_WIPHY_DUMP = 1 << 0,
3683};
3684
3590#endif /* __LINUX_NL80211_H */ 3685#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 32aef0a439ef..dbd71b0c7d8c 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -348,6 +348,7 @@ enum {
348 TCA_HTB_INIT, 348 TCA_HTB_INIT,
349 TCA_HTB_CTAB, 349 TCA_HTB_CTAB,
350 TCA_HTB_RTAB, 350 TCA_HTB_RTAB,
351 TCA_HTB_DIRECT_QLEN,
351 __TCA_HTB_MAX, 352 __TCA_HTB_MAX,
352}; 353};
353 354
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index b49eab89c9fd..e00013a1debc 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -202,6 +202,8 @@ enum
202 LINUX_MIB_TCPFORWARDRETRANS, /* TCPForwardRetrans */ 202 LINUX_MIB_TCPFORWARDRETRANS, /* TCPForwardRetrans */
203 LINUX_MIB_TCPSLOWSTARTRETRANS, /* TCPSlowStartRetrans */ 203 LINUX_MIB_TCPSLOWSTARTRETRANS, /* TCPSlowStartRetrans */
204 LINUX_MIB_TCPTIMEOUTS, /* TCPTimeouts */ 204 LINUX_MIB_TCPTIMEOUTS, /* TCPTimeouts */
205 LINUX_MIB_TCPLOSSPROBES, /* TCPLossProbes */
206 LINUX_MIB_TCPLOSSPROBERECOVERY, /* TCPLossProbeRecovery */
205 LINUX_MIB_TCPRENORECOVERYFAIL, /* TCPRenoRecoveryFail */ 207 LINUX_MIB_TCPRENORECOVERYFAIL, /* TCPRenoRecoveryFail */
206 LINUX_MIB_TCPSACKRECOVERYFAIL, /* TCPSackRecoveryFail */ 208 LINUX_MIB_TCPSACKRECOVERYFAIL, /* TCPSackRecoveryFail */
207 LINUX_MIB_TCPSCHEDULERFAILED, /* TCPSchedulerFailed */ 209 LINUX_MIB_TCPSCHEDULERFAILED, /* TCPSchedulerFailed */
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 6b1ead0b0c9d..8d776ebc4829 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -102,7 +102,6 @@ enum {
102#define TCP_QUICKACK 12 /* Block/reenable quick acks */ 102#define TCP_QUICKACK 12 /* Block/reenable quick acks */
103#define TCP_CONGESTION 13 /* Congestion control algorithm */ 103#define TCP_CONGESTION 13 /* Congestion control algorithm */
104#define TCP_MD5SIG 14 /* TCP MD5 Signature (RFC2385) */ 104#define TCP_MD5SIG 14 /* TCP MD5 Signature (RFC2385) */
105#define TCP_COOKIE_TRANSACTIONS 15 /* TCP Cookie Transactions */
106#define TCP_THIN_LINEAR_TIMEOUTS 16 /* Use linear timeouts for thin streams*/ 105#define TCP_THIN_LINEAR_TIMEOUTS 16 /* Use linear timeouts for thin streams*/
107#define TCP_THIN_DUPACK 17 /* Fast retrans. after 1 dupack */ 106#define TCP_THIN_DUPACK 17 /* Fast retrans. after 1 dupack */
108#define TCP_USER_TIMEOUT 18 /* How long for loss retry before timeout */ 107#define TCP_USER_TIMEOUT 18 /* How long for loss retry before timeout */
@@ -199,29 +198,4 @@ struct tcp_md5sig {
199 __u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN]; /* key (binary) */ 198 __u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN]; /* key (binary) */
200}; 199};
201 200
202/* for TCP_COOKIE_TRANSACTIONS (TCPCT) socket option */
203#define TCP_COOKIE_MIN 8 /* 64-bits */
204#define TCP_COOKIE_MAX 16 /* 128-bits */
205#define TCP_COOKIE_PAIR_SIZE (2*TCP_COOKIE_MAX)
206
207/* Flags for both getsockopt and setsockopt */
208#define TCP_COOKIE_IN_ALWAYS (1 << 0) /* Discard SYN without cookie */
209#define TCP_COOKIE_OUT_NEVER (1 << 1) /* Prohibit outgoing cookies,
210 * supercedes everything. */
211
212/* Flags for getsockopt */
213#define TCP_S_DATA_IN (1 << 2) /* Was data received? */
214#define TCP_S_DATA_OUT (1 << 3) /* Was data sent? */
215
216/* TCP_COOKIE_TRANSACTIONS data */
217struct tcp_cookie_transactions {
218 __u16 tcpct_flags; /* see above */
219 __u8 __tcpct_pad1; /* zero */
220 __u8 tcpct_cookie_desired; /* bytes */
221 __u16 tcpct_s_data_desired; /* bytes of variable data */
222 __u16 tcpct_used; /* bytes in value */
223 __u8 tcpct_value[TCP_MSS_DEFAULT];
224};
225
226
227#endif /* _UAPI_LINUX_TCP_H */ 201#endif /* _UAPI_LINUX_TCP_H */
diff --git a/include/uapi/linux/vm_sockets.h b/include/uapi/linux/vm_sockets.h
index df91301847ec..b4ed5d895699 100644
--- a/include/uapi/linux/vm_sockets.h
+++ b/include/uapi/linux/vm_sockets.h
@@ -13,12 +13,10 @@
13 * more details. 13 * more details.
14 */ 14 */
15 15
16#ifndef _VM_SOCKETS_H_ 16#ifndef _UAPI_VM_SOCKETS_H
17#define _VM_SOCKETS_H_ 17#define _UAPI_VM_SOCKETS_H
18 18
19#if !defined(__KERNEL__) 19#include <linux/socket.h>
20#include <sys/socket.h>
21#endif
22 20
23/* Option name for STREAM socket buffer size. Use as the option name in 21/* Option name for STREAM socket buffer size. Use as the option name in
24 * setsockopt(3) or getsockopt(3) to set or get an unsigned long long that 22 * setsockopt(3) or getsockopt(3) to set or get an unsigned long long that
@@ -137,14 +135,13 @@
137#define VM_SOCKETS_VERSION_MINOR(_v) (((_v) & 0x0000FFFF)) 135#define VM_SOCKETS_VERSION_MINOR(_v) (((_v) & 0x0000FFFF))
138 136
139/* Address structure for vSockets. The address family should be set to 137/* Address structure for vSockets. The address family should be set to
140 * whatever vmci_sock_get_af_value_fd() returns. The structure members should 138 * AF_VSOCK. The structure members should all align on their natural
141 * all align on their natural boundaries without resorting to compiler packing 139 * boundaries without resorting to compiler packing directives. The total size
142 * directives. The total size of this structure should be exactly the same as 140 * of this structure should be exactly the same as that of struct sockaddr.
143 * that of struct sockaddr.
144 */ 141 */
145 142
146struct sockaddr_vm { 143struct sockaddr_vm {
147 sa_family_t svm_family; 144 __kernel_sa_family_t svm_family;
148 unsigned short svm_reserved1; 145 unsigned short svm_reserved1;
149 unsigned int svm_port; 146 unsigned int svm_port;
150 unsigned int svm_cid; 147 unsigned int svm_cid;
@@ -156,8 +153,4 @@ struct sockaddr_vm {
156 153
157#define IOCTL_VM_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9) 154#define IOCTL_VM_SOCKETS_GET_LOCAL_CID _IO(7, 0xb9)
158 155
159#if defined(__KERNEL__) 156#endif /* _UAPI_VM_SOCKETS_H */
160int vm_sockets_get_local_cid(void);
161#endif
162
163#endif
diff --git a/kernel/audit.c b/kernel/audit.c
index d596e5355f15..488f85f76335 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -58,7 +58,7 @@
58#ifdef CONFIG_SECURITY 58#ifdef CONFIG_SECURITY
59#include <linux/security.h> 59#include <linux/security.h>
60#endif 60#endif
61#include <linux/netlink.h> 61#include <net/netlink.h>
62#include <linux/freezer.h> 62#include <linux/freezer.h>
63#include <linux/tty.h> 63#include <linux/tty.h>
64#include <linux/pid_namespace.h> 64#include <linux/pid_namespace.h>
@@ -910,7 +910,7 @@ static void audit_receive_skb(struct sk_buff *skb)
910{ 910{
911 struct nlmsghdr *nlh; 911 struct nlmsghdr *nlh;
912 /* 912 /*
913 * len MUST be signed for NLMSG_NEXT to be able to dec it below 0 913 * len MUST be signed for nlmsg_next to be able to dec it below 0
914 * if the nlmsg_len was not aligned 914 * if the nlmsg_len was not aligned
915 */ 915 */
916 int len; 916 int len;
@@ -919,13 +919,13 @@ static void audit_receive_skb(struct sk_buff *skb)
919 nlh = nlmsg_hdr(skb); 919 nlh = nlmsg_hdr(skb);
920 len = skb->len; 920 len = skb->len;
921 921
922 while (NLMSG_OK(nlh, len)) { 922 while (nlmsg_ok(nlh, len)) {
923 err = audit_receive_msg(skb, nlh); 923 err = audit_receive_msg(skb, nlh);
924 /* if err or if this message says it wants a response */ 924 /* if err or if this message says it wants a response */
925 if (err || (nlh->nlmsg_flags & NLM_F_ACK)) 925 if (err || (nlh->nlmsg_flags & NLM_F_ACK))
926 netlink_ack(skb, nlh, err); 926 netlink_ack(skb, nlh, err);
927 927
928 nlh = NLMSG_NEXT(nlh, len); 928 nlh = nlmsg_next(nlh, &len);
929 } 929 }
930} 930}
931 931
@@ -1483,7 +1483,7 @@ void audit_log_end(struct audit_buffer *ab)
1483 audit_log_lost("rate limit exceeded"); 1483 audit_log_lost("rate limit exceeded");
1484 } else { 1484 } else {
1485 struct nlmsghdr *nlh = nlmsg_hdr(ab->skb); 1485 struct nlmsghdr *nlh = nlmsg_hdr(ab->skb);
1486 nlh->nlmsg_len = ab->skb->len - NLMSG_SPACE(0); 1486 nlh->nlmsg_len = ab->skb->len - NLMSG_HDRLEN;
1487 1487
1488 if (audit_pid) { 1488 if (audit_pid) {
1489 skb_queue_tail(&audit_skb_queue, ab->skb); 1489 skb_queue_tail(&audit_skb_queue, ab->skb);
diff --git a/kernel/signal.c b/kernel/signal.c
index dd72567767d9..497330ec2ae9 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -32,6 +32,7 @@
32#include <linux/user_namespace.h> 32#include <linux/user_namespace.h>
33#include <linux/uprobes.h> 33#include <linux/uprobes.h>
34#include <linux/compat.h> 34#include <linux/compat.h>
35#include <linux/cn_proc.h>
35#define CREATE_TRACE_POINTS 36#define CREATE_TRACE_POINTS
36#include <trace/events/signal.h> 37#include <trace/events/signal.h>
37 38
@@ -2350,6 +2351,7 @@ relock:
2350 if (sig_kernel_coredump(signr)) { 2351 if (sig_kernel_coredump(signr)) {
2351 if (print_fatal_signals) 2352 if (print_fatal_signals)
2352 print_fatal_signal(info->si_signo); 2353 print_fatal_signal(info->si_signo);
2354 proc_coredump_connector(current);
2353 /* 2355 /*
2354 * If it was able to dump core, this kills all 2356 * If it was able to dump core, this kills all
2355 * other threads in the group and synchronizes with 2357 * other threads in the group and synchronizes with
diff --git a/net/802/garp.c b/net/802/garp.c
index 8456f5d98b85..5d9630a0eb93 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -609,8 +609,12 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
609 /* Delete timer and generate a final TRANSMIT_PDU event to flush out 609 /* Delete timer and generate a final TRANSMIT_PDU event to flush out
610 * all pending messages before the applicant is gone. */ 610 * all pending messages before the applicant is gone. */
611 del_timer_sync(&app->join_timer); 611 del_timer_sync(&app->join_timer);
612
613 spin_lock_bh(&app->lock);
612 garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU); 614 garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
613 garp_pdu_queue(app); 615 garp_pdu_queue(app);
616 spin_unlock_bh(&app->lock);
617
614 garp_queue_xmit(app); 618 garp_queue_xmit(app);
615 619
616 dev_mc_del(dev, appl->proto.group_address); 620 dev_mc_del(dev, appl->proto.group_address);
diff --git a/net/Kconfig b/net/Kconfig
index 6f676ab885be..2ddc9046868e 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -217,6 +217,7 @@ source "net/dns_resolver/Kconfig"
217source "net/batman-adv/Kconfig" 217source "net/batman-adv/Kconfig"
218source "net/openvswitch/Kconfig" 218source "net/openvswitch/Kconfig"
219source "net/vmw_vsock/Kconfig" 219source "net/vmw_vsock/Kconfig"
220source "net/netlink/Kconfig"
220 221
221config RPS 222config RPS
222 boolean 223 boolean
diff --git a/net/atm/lec.h b/net/atm/lec.h
index a86aff9a3c04..4149db1b7885 100644
--- a/net/atm/lec.h
+++ b/net/atm/lec.h
@@ -58,7 +58,7 @@ struct lane2_ops {
58 * field in h_type field. Data follows immediately after header. 58 * field in h_type field. Data follows immediately after header.
59 * 2. LLC Data frames whose total length, including LLC field and data, 59 * 2. LLC Data frames whose total length, including LLC field and data,
60 * but not padding required to meet the minimum data frame length, 60 * but not padding required to meet the minimum data frame length,
61 * is less than 1536(0x0600) MUST be encoded by placing that length 61 * is less than ETH_P_802_3_MIN MUST be encoded by placing that length
62 * in the h_type field. The LLC field follows header immediately. 62 * in the h_type field. The LLC field follows header immediately.
63 * 3. LLC data frames longer than this maximum MUST be encoded by placing 63 * 3. LLC data frames longer than this maximum MUST be encoded by placing
64 * the value 0 in the h_type field. 64 * the value 0 in the h_type field.
diff --git a/net/batman-adv/Kconfig b/net/batman-adv/Kconfig
index 8d8afb134b3a..fa780b76630e 100644
--- a/net/batman-adv/Kconfig
+++ b/net/batman-adv/Kconfig
@@ -36,6 +36,20 @@ config BATMAN_ADV_DAT
36 mesh networks. If you think that your network does not need 36 mesh networks. If you think that your network does not need
37 this option you can safely remove it and save some space. 37 this option you can safely remove it and save some space.
38 38
39config BATMAN_ADV_NC
40 bool "Network Coding"
41 depends on BATMAN_ADV
42 default n
43 help
44 This option enables network coding, a mechanism that aims to
45 increase the overall network throughput by fusing multiple
46 packets in one transmission.
47 Note that interfaces controlled by batman-adv must be manually
48 configured to have promiscuous mode enabled in order to make
49 network coding work.
50 If you think that your network does not need this feature you
51 can safely disable it and save some space.
52
39config BATMAN_ADV_DEBUG 53config BATMAN_ADV_DEBUG
40 bool "B.A.T.M.A.N. debugging" 54 bool "B.A.T.M.A.N. debugging"
41 depends on BATMAN_ADV 55 depends on BATMAN_ADV
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index e45e3b4e32e3..acbac2a9c62f 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -1,5 +1,5 @@
1# 1#
2# Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: 2# Copyright (C) 2007-2013 B.A.T.M.A.N. contributors:
3# 3#
4# Marek Lindner, Simon Wunderlich 4# Marek Lindner, Simon Wunderlich
5# 5#
@@ -30,6 +30,7 @@ batman-adv-y += hard-interface.o
30batman-adv-y += hash.o 30batman-adv-y += hash.o
31batman-adv-y += icmp_socket.o 31batman-adv-y += icmp_socket.o
32batman-adv-y += main.o 32batman-adv-y += main.o
33batman-adv-$(CONFIG_BATMAN_ADV_NC) += network-coding.o
33batman-adv-y += originator.o 34batman-adv-y += originator.o
34batman-adv-y += ring_buffer.o 35batman-adv-y += ring_buffer.o
35batman-adv-y += routing.o 36batman-adv-y += routing.o
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index a5bb0a769eb9..071f288b77a8 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -27,6 +27,7 @@
27#include "hard-interface.h" 27#include "hard-interface.h"
28#include "send.h" 28#include "send.h"
29#include "bat_algo.h" 29#include "bat_algo.h"
30#include "network-coding.h"
30 31
31static struct batadv_neigh_node * 32static struct batadv_neigh_node *
32batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface, 33batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
@@ -1185,6 +1186,10 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
1185 if (!orig_neigh_node) 1186 if (!orig_neigh_node)
1186 goto out; 1187 goto out;
1187 1188
1189 /* Update nc_nodes of the originator */
1190 batadv_nc_update_nc_node(bat_priv, orig_node, orig_neigh_node,
1191 batadv_ogm_packet, is_single_hop_neigh);
1192
1188 orig_neigh_router = batadv_orig_node_get_router(orig_neigh_node); 1193 orig_neigh_router = batadv_orig_node_get_router(orig_neigh_node);
1189 1194
1190 /* drop packet if sender is not a direct neighbor and if we 1195 /* drop packet if sender is not a direct neighbor and if we
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index 6ae86516db4d..f186a55b23c3 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -32,6 +32,7 @@
32#include "icmp_socket.h" 32#include "icmp_socket.h"
33#include "bridge_loop_avoidance.h" 33#include "bridge_loop_avoidance.h"
34#include "distributed-arp-table.h" 34#include "distributed-arp-table.h"
35#include "network-coding.h"
35 36
36static struct dentry *batadv_debugfs; 37static struct dentry *batadv_debugfs;
37 38
@@ -310,6 +311,14 @@ struct batadv_debuginfo {
310 const struct file_operations fops; 311 const struct file_operations fops;
311}; 312};
312 313
314#ifdef CONFIG_BATMAN_ADV_NC
315static int batadv_nc_nodes_open(struct inode *inode, struct file *file)
316{
317 struct net_device *net_dev = (struct net_device *)inode->i_private;
318 return single_open(file, batadv_nc_nodes_seq_print_text, net_dev);
319}
320#endif
321
313#define BATADV_DEBUGINFO(_name, _mode, _open) \ 322#define BATADV_DEBUGINFO(_name, _mode, _open) \
314struct batadv_debuginfo batadv_debuginfo_##_name = { \ 323struct batadv_debuginfo batadv_debuginfo_##_name = { \
315 .attr = { .name = __stringify(_name), \ 324 .attr = { .name = __stringify(_name), \
@@ -348,6 +357,9 @@ static BATADV_DEBUGINFO(dat_cache, S_IRUGO, batadv_dat_cache_open);
348static BATADV_DEBUGINFO(transtable_local, S_IRUGO, 357static BATADV_DEBUGINFO(transtable_local, S_IRUGO,
349 batadv_transtable_local_open); 358 batadv_transtable_local_open);
350static BATADV_DEBUGINFO(vis_data, S_IRUGO, batadv_vis_data_open); 359static BATADV_DEBUGINFO(vis_data, S_IRUGO, batadv_vis_data_open);
360#ifdef CONFIG_BATMAN_ADV_NC
361static BATADV_DEBUGINFO(nc_nodes, S_IRUGO, batadv_nc_nodes_open);
362#endif
351 363
352static struct batadv_debuginfo *batadv_mesh_debuginfos[] = { 364static struct batadv_debuginfo *batadv_mesh_debuginfos[] = {
353 &batadv_debuginfo_originators, 365 &batadv_debuginfo_originators,
@@ -362,6 +374,9 @@ static struct batadv_debuginfo *batadv_mesh_debuginfos[] = {
362#endif 374#endif
363 &batadv_debuginfo_transtable_local, 375 &batadv_debuginfo_transtable_local,
364 &batadv_debuginfo_vis_data, 376 &batadv_debuginfo_vis_data,
377#ifdef CONFIG_BATMAN_ADV_NC
378 &batadv_debuginfo_nc_nodes,
379#endif
365 NULL, 380 NULL,
366}; 381};
367 382
@@ -431,6 +446,9 @@ int batadv_debugfs_add_meshif(struct net_device *dev)
431 } 446 }
432 } 447 }
433 448
449 if (batadv_nc_init_debugfs(bat_priv) < 0)
450 goto rem_attr;
451
434 return 0; 452 return 0;
435rem_attr: 453rem_attr:
436 debugfs_remove_recursive(bat_priv->debug_dir); 454 debugfs_remove_recursive(bat_priv->debug_dir);
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index d54188a112ea..8e15d966d9b0 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -816,7 +816,6 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
816 bool ret = false; 816 bool ret = false;
817 struct batadv_dat_entry *dat_entry = NULL; 817 struct batadv_dat_entry *dat_entry = NULL;
818 struct sk_buff *skb_new; 818 struct sk_buff *skb_new;
819 struct batadv_hard_iface *primary_if = NULL;
820 819
821 if (!atomic_read(&bat_priv->distributed_arp_table)) 820 if (!atomic_read(&bat_priv->distributed_arp_table))
822 goto out; 821 goto out;
@@ -838,22 +837,18 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
838 837
839 dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst); 838 dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst);
840 if (dat_entry) { 839 if (dat_entry) {
841 primary_if = batadv_primary_if_get_selected(bat_priv);
842 if (!primary_if)
843 goto out;
844
845 skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src, 840 skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src,
846 primary_if->soft_iface, ip_dst, hw_src, 841 bat_priv->soft_iface, ip_dst, hw_src,
847 dat_entry->mac_addr, hw_src); 842 dat_entry->mac_addr, hw_src);
848 if (!skb_new) 843 if (!skb_new)
849 goto out; 844 goto out;
850 845
851 skb_reset_mac_header(skb_new); 846 skb_reset_mac_header(skb_new);
852 skb_new->protocol = eth_type_trans(skb_new, 847 skb_new->protocol = eth_type_trans(skb_new,
853 primary_if->soft_iface); 848 bat_priv->soft_iface);
854 bat_priv->stats.rx_packets++; 849 bat_priv->stats.rx_packets++;
855 bat_priv->stats.rx_bytes += skb->len + ETH_HLEN; 850 bat_priv->stats.rx_bytes += skb->len + ETH_HLEN;
856 primary_if->soft_iface->last_rx = jiffies; 851 bat_priv->soft_iface->last_rx = jiffies;
857 852
858 netif_rx(skb_new); 853 netif_rx(skb_new);
859 batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n"); 854 batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n");
@@ -866,8 +861,6 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
866out: 861out:
867 if (dat_entry) 862 if (dat_entry)
868 batadv_dat_entry_free_ref(dat_entry); 863 batadv_dat_entry_free_ref(dat_entry);
869 if (primary_if)
870 batadv_hardif_free_ref(primary_if);
871 return ret; 864 return ret;
872} 865}
873 866
@@ -887,7 +880,6 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
887 __be32 ip_src, ip_dst; 880 __be32 ip_src, ip_dst;
888 uint8_t *hw_src; 881 uint8_t *hw_src;
889 struct sk_buff *skb_new; 882 struct sk_buff *skb_new;
890 struct batadv_hard_iface *primary_if = NULL;
891 struct batadv_dat_entry *dat_entry = NULL; 883 struct batadv_dat_entry *dat_entry = NULL;
892 bool ret = false; 884 bool ret = false;
893 int err; 885 int err;
@@ -912,12 +904,8 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
912 if (!dat_entry) 904 if (!dat_entry)
913 goto out; 905 goto out;
914 906
915 primary_if = batadv_primary_if_get_selected(bat_priv);
916 if (!primary_if)
917 goto out;
918
919 skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src, 907 skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src,
920 primary_if->soft_iface, ip_dst, hw_src, 908 bat_priv->soft_iface, ip_dst, hw_src,
921 dat_entry->mac_addr, hw_src); 909 dat_entry->mac_addr, hw_src);
922 910
923 if (!skb_new) 911 if (!skb_new)
@@ -941,8 +929,6 @@ bool batadv_dat_snoop_incoming_arp_request(struct batadv_priv *bat_priv,
941out: 929out:
942 if (dat_entry) 930 if (dat_entry)
943 batadv_dat_entry_free_ref(dat_entry); 931 batadv_dat_entry_free_ref(dat_entry);
944 if (primary_if)
945 batadv_hardif_free_ref(primary_if);
946 if (ret) 932 if (ret)
947 kfree_skb(skb); 933 kfree_skb(skb);
948 return ret; 934 return ret;
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 34f99a46ec1d..f105219f4a4b 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -500,7 +500,7 @@ int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset)
500 rcu_read_unlock(); 500 rcu_read_unlock();
501 501
502 if (gw_count == 0) 502 if (gw_count == 0)
503 seq_printf(seq, "No gateways in range ...\n"); 503 seq_puts(seq, "No gateways in range ...\n");
504 504
505out: 505out:
506 if (primary_if) 506 if (primary_if)
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 368219e026a9..522243aff2f3 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -307,11 +307,35 @@ batadv_hardif_deactivate_interface(struct batadv_hard_iface *hard_iface)
307 batadv_update_min_mtu(hard_iface->soft_iface); 307 batadv_update_min_mtu(hard_iface->soft_iface);
308} 308}
309 309
310/**
311 * batadv_master_del_slave - remove hard_iface from the current master interface
312 * @slave: the interface enslaved in another master
313 * @master: the master from which slave has to be removed
314 *
315 * Invoke ndo_del_slave on master passing slave as argument. In this way slave
316 * is free'd and master can correctly change its internal state.
317 * Return 0 on success, a negative value representing the error otherwise
318 */
319static int batadv_master_del_slave(struct batadv_hard_iface *slave,
320 struct net_device *master)
321{
322 int ret;
323
324 if (!master)
325 return 0;
326
327 ret = -EBUSY;
328 if (master->netdev_ops->ndo_del_slave)
329 ret = master->netdev_ops->ndo_del_slave(master, slave->net_dev);
330
331 return ret;
332}
333
310int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, 334int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
311 const char *iface_name) 335 const char *iface_name)
312{ 336{
313 struct batadv_priv *bat_priv; 337 struct batadv_priv *bat_priv;
314 struct net_device *soft_iface; 338 struct net_device *soft_iface, *master;
315 __be16 ethertype = __constant_htons(ETH_P_BATMAN); 339 __be16 ethertype = __constant_htons(ETH_P_BATMAN);
316 int ret; 340 int ret;
317 341
@@ -321,11 +345,6 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
321 if (!atomic_inc_not_zero(&hard_iface->refcount)) 345 if (!atomic_inc_not_zero(&hard_iface->refcount))
322 goto out; 346 goto out;
323 347
324 /* hard-interface is part of a bridge */
325 if (hard_iface->net_dev->priv_flags & IFF_BRIDGE_PORT)
326 pr_err("You are about to enable batman-adv on '%s' which already is part of a bridge. Unless you know exactly what you are doing this is probably wrong and won't work the way you think it would.\n",
327 hard_iface->net_dev->name);
328
329 soft_iface = dev_get_by_name(&init_net, iface_name); 348 soft_iface = dev_get_by_name(&init_net, iface_name);
330 349
331 if (!soft_iface) { 350 if (!soft_iface) {
@@ -347,12 +366,24 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
347 goto err_dev; 366 goto err_dev;
348 } 367 }
349 368
369 /* check if the interface is enslaved in another virtual one and
370 * in that case unlink it first
371 */
372 master = netdev_master_upper_dev_get(hard_iface->net_dev);
373 ret = batadv_master_del_slave(hard_iface, master);
374 if (ret)
375 goto err_dev;
376
350 hard_iface->soft_iface = soft_iface; 377 hard_iface->soft_iface = soft_iface;
351 bat_priv = netdev_priv(hard_iface->soft_iface); 378 bat_priv = netdev_priv(hard_iface->soft_iface);
352 379
380 ret = netdev_master_upper_dev_link(hard_iface->net_dev, soft_iface);
381 if (ret)
382 goto err_dev;
383
353 ret = bat_priv->bat_algo_ops->bat_iface_enable(hard_iface); 384 ret = bat_priv->bat_algo_ops->bat_iface_enable(hard_iface);
354 if (ret < 0) 385 if (ret < 0)
355 goto err_dev; 386 goto err_upper;
356 387
357 hard_iface->if_num = bat_priv->num_ifaces; 388 hard_iface->if_num = bat_priv->num_ifaces;
358 bat_priv->num_ifaces++; 389 bat_priv->num_ifaces++;
@@ -362,7 +393,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
362 bat_priv->bat_algo_ops->bat_iface_disable(hard_iface); 393 bat_priv->bat_algo_ops->bat_iface_disable(hard_iface);
363 bat_priv->num_ifaces--; 394 bat_priv->num_ifaces--;
364 hard_iface->if_status = BATADV_IF_NOT_IN_USE; 395 hard_iface->if_status = BATADV_IF_NOT_IN_USE;
365 goto err_dev; 396 goto err_upper;
366 } 397 }
367 398
368 hard_iface->batman_adv_ptype.type = ethertype; 399 hard_iface->batman_adv_ptype.type = ethertype;
@@ -401,14 +432,18 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
401out: 432out:
402 return 0; 433 return 0;
403 434
435err_upper:
436 netdev_upper_dev_unlink(hard_iface->net_dev, soft_iface);
404err_dev: 437err_dev:
438 hard_iface->soft_iface = NULL;
405 dev_put(soft_iface); 439 dev_put(soft_iface);
406err: 440err:
407 batadv_hardif_free_ref(hard_iface); 441 batadv_hardif_free_ref(hard_iface);
408 return ret; 442 return ret;
409} 443}
410 444
411void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface) 445void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
446 enum batadv_hard_if_cleanup autodel)
412{ 447{
413 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); 448 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
414 struct batadv_hard_iface *primary_if = NULL; 449 struct batadv_hard_iface *primary_if = NULL;
@@ -446,9 +481,10 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface)
446 dev_put(hard_iface->soft_iface); 481 dev_put(hard_iface->soft_iface);
447 482
448 /* nobody uses this interface anymore */ 483 /* nobody uses this interface anymore */
449 if (!bat_priv->num_ifaces) 484 if (!bat_priv->num_ifaces && autodel == BATADV_IF_CLEANUP_AUTO)
450 batadv_softif_destroy(hard_iface->soft_iface); 485 batadv_softif_destroy_sysfs(hard_iface->soft_iface);
451 486
487 netdev_upper_dev_unlink(hard_iface->net_dev, hard_iface->soft_iface);
452 hard_iface->soft_iface = NULL; 488 hard_iface->soft_iface = NULL;
453 batadv_hardif_free_ref(hard_iface); 489 batadv_hardif_free_ref(hard_iface);
454 490
@@ -533,7 +569,8 @@ static void batadv_hardif_remove_interface(struct batadv_hard_iface *hard_iface)
533 569
534 /* first deactivate interface */ 570 /* first deactivate interface */
535 if (hard_iface->if_status != BATADV_IF_NOT_IN_USE) 571 if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
536 batadv_hardif_disable_interface(hard_iface); 572 batadv_hardif_disable_interface(hard_iface,
573 BATADV_IF_CLEANUP_AUTO);
537 574
538 if (hard_iface->if_status != BATADV_IF_NOT_IN_USE) 575 if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
539 return; 576 return;
@@ -563,6 +600,11 @@ static int batadv_hard_if_event(struct notifier_block *this,
563 struct batadv_hard_iface *primary_if = NULL; 600 struct batadv_hard_iface *primary_if = NULL;
564 struct batadv_priv *bat_priv; 601 struct batadv_priv *bat_priv;
565 602
603 if (batadv_softif_is_valid(net_dev) && event == NETDEV_REGISTER) {
604 batadv_sysfs_add_meshif(net_dev);
605 return NOTIFY_DONE;
606 }
607
566 hard_iface = batadv_hardif_get_by_netdev(net_dev); 608 hard_iface = batadv_hardif_get_by_netdev(net_dev);
567 if (!hard_iface && event == NETDEV_REGISTER) 609 if (!hard_iface && event == NETDEV_REGISTER)
568 hard_iface = batadv_hardif_add_interface(net_dev); 610 hard_iface = batadv_hardif_add_interface(net_dev);
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index 308437d52e22..49892881a7c5 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -29,13 +29,24 @@ enum batadv_hard_if_state {
29 BATADV_IF_I_WANT_YOU, 29 BATADV_IF_I_WANT_YOU,
30}; 30};
31 31
32/**
33 * enum batadv_hard_if_cleanup - Cleanup modi for soft_iface after slave removal
34 * @BATADV_IF_CLEANUP_KEEP: Don't automatically delete soft-interface
35 * @BATADV_IF_CLEANUP_AUTO: Delete soft-interface after last slave was removed
36 */
37enum batadv_hard_if_cleanup {
38 BATADV_IF_CLEANUP_KEEP,
39 BATADV_IF_CLEANUP_AUTO,
40};
41
32extern struct notifier_block batadv_hard_if_notifier; 42extern struct notifier_block batadv_hard_if_notifier;
33 43
34struct batadv_hard_iface* 44struct batadv_hard_iface*
35batadv_hardif_get_by_netdev(const struct net_device *net_dev); 45batadv_hardif_get_by_netdev(const struct net_device *net_dev);
36int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface, 46int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
37 const char *iface_name); 47 const char *iface_name);
38void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface); 48void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
49 enum batadv_hard_if_cleanup autodel);
39void batadv_hardif_remove_interfaces(void); 50void batadv_hardif_remove_interfaces(void);
40int batadv_hardif_min_mtu(struct net_device *soft_iface); 51int batadv_hardif_min_mtu(struct net_device *soft_iface);
41void batadv_update_min_mtu(struct net_device *soft_iface); 52void batadv_update_min_mtu(struct net_device *soft_iface);
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 0488d70c8c35..6277735cd89e 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -35,6 +35,7 @@
35#include "vis.h" 35#include "vis.h"
36#include "hash.h" 36#include "hash.h"
37#include "bat_algo.h" 37#include "bat_algo.h"
38#include "network-coding.h"
38 39
39 40
40/* List manipulations on hardif_list have to be rtnl_lock()'ed, 41/* List manipulations on hardif_list have to be rtnl_lock()'ed,
@@ -70,6 +71,7 @@ static int __init batadv_init(void)
70 batadv_debugfs_init(); 71 batadv_debugfs_init();
71 72
72 register_netdevice_notifier(&batadv_hard_if_notifier); 73 register_netdevice_notifier(&batadv_hard_if_notifier);
74 rtnl_link_register(&batadv_link_ops);
73 75
74 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n", 76 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
75 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION); 77 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
@@ -80,6 +82,7 @@ static int __init batadv_init(void)
80static void __exit batadv_exit(void) 82static void __exit batadv_exit(void)
81{ 83{
82 batadv_debugfs_destroy(); 84 batadv_debugfs_destroy();
85 rtnl_link_unregister(&batadv_link_ops);
83 unregister_netdevice_notifier(&batadv_hard_if_notifier); 86 unregister_netdevice_notifier(&batadv_hard_if_notifier);
84 batadv_hardif_remove_interfaces(); 87 batadv_hardif_remove_interfaces();
85 88
@@ -135,6 +138,10 @@ int batadv_mesh_init(struct net_device *soft_iface)
135 if (ret < 0) 138 if (ret < 0)
136 goto err; 139 goto err;
137 140
141 ret = batadv_nc_init(bat_priv);
142 if (ret < 0)
143 goto err;
144
138 atomic_set(&bat_priv->gw.reselect, 0); 145 atomic_set(&bat_priv->gw.reselect, 0);
139 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE); 146 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
140 147
@@ -157,6 +164,7 @@ void batadv_mesh_free(struct net_device *soft_iface)
157 164
158 batadv_gw_node_purge(bat_priv); 165 batadv_gw_node_purge(bat_priv);
159 batadv_originator_free(bat_priv); 166 batadv_originator_free(bat_priv);
167 batadv_nc_free(bat_priv);
160 168
161 batadv_tt_free(bat_priv); 169 batadv_tt_free(bat_priv);
162 170
@@ -411,7 +419,7 @@ int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
411{ 419{
412 struct batadv_algo_ops *bat_algo_ops; 420 struct batadv_algo_ops *bat_algo_ops;
413 421
414 seq_printf(seq, "Available routing algorithms:\n"); 422 seq_puts(seq, "Available routing algorithms:\n");
415 423
416 hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) { 424 hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
417 seq_printf(seq, "%s\n", bat_algo_ops->name); 425 seq_printf(seq, "%s\n", bat_algo_ops->name);
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index ced08b936a96..f90f5bc8e426 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -26,7 +26,7 @@
26#define BATADV_DRIVER_DEVICE "batman-adv" 26#define BATADV_DRIVER_DEVICE "batman-adv"
27 27
28#ifndef BATADV_SOURCE_VERSION 28#ifndef BATADV_SOURCE_VERSION
29#define BATADV_SOURCE_VERSION "2013.1.0" 29#define BATADV_SOURCE_VERSION "2013.2.0"
30#endif 30#endif
31 31
32/* B.A.T.M.A.N. parameters */ 32/* B.A.T.M.A.N. parameters */
@@ -105,6 +105,8 @@
105#define BATADV_RESET_PROTECTION_MS 30000 105#define BATADV_RESET_PROTECTION_MS 30000
106#define BATADV_EXPECTED_SEQNO_RANGE 65536 106#define BATADV_EXPECTED_SEQNO_RANGE 65536
107 107
108#define BATADV_NC_NODE_TIMEOUT 10000 /* Milliseconds */
109
108enum batadv_mesh_state { 110enum batadv_mesh_state {
109 BATADV_MESH_INACTIVE, 111 BATADV_MESH_INACTIVE,
110 BATADV_MESH_ACTIVE, 112 BATADV_MESH_ACTIVE,
@@ -150,6 +152,7 @@ enum batadv_uev_type {
150#include <linux/percpu.h> 152#include <linux/percpu.h>
151#include <linux/slab.h> 153#include <linux/slab.h>
152#include <net/sock.h> /* struct sock */ 154#include <net/sock.h> /* struct sock */
155#include <net/rtnetlink.h>
153#include <linux/jiffies.h> 156#include <linux/jiffies.h>
154#include <linux/seq_file.h> 157#include <linux/seq_file.h>
155#include "types.h" 158#include "types.h"
@@ -185,6 +188,7 @@ __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr);
185 * @BATADV_DBG_TT: translation table messages 188 * @BATADV_DBG_TT: translation table messages
186 * @BATADV_DBG_BLA: bridge loop avoidance messages 189 * @BATADV_DBG_BLA: bridge loop avoidance messages
187 * @BATADV_DBG_DAT: ARP snooping and DAT related messages 190 * @BATADV_DBG_DAT: ARP snooping and DAT related messages
191 * @BATADV_DBG_NC: network coding related messages
188 * @BATADV_DBG_ALL: the union of all the above log levels 192 * @BATADV_DBG_ALL: the union of all the above log levels
189 */ 193 */
190enum batadv_dbg_level { 194enum batadv_dbg_level {
@@ -193,7 +197,8 @@ enum batadv_dbg_level {
193 BATADV_DBG_TT = BIT(2), 197 BATADV_DBG_TT = BIT(2),
194 BATADV_DBG_BLA = BIT(3), 198 BATADV_DBG_BLA = BIT(3),
195 BATADV_DBG_DAT = BIT(4), 199 BATADV_DBG_DAT = BIT(4),
196 BATADV_DBG_ALL = 31, 200 BATADV_DBG_NC = BIT(5),
201 BATADV_DBG_ALL = 63,
197}; 202};
198 203
199#ifdef CONFIG_BATMAN_ADV_DEBUG 204#ifdef CONFIG_BATMAN_ADV_DEBUG
@@ -298,4 +303,10 @@ static inline uint64_t batadv_sum_counter(struct batadv_priv *bat_priv,
298 return sum; 303 return sum;
299} 304}
300 305
306/* Define a macro to reach the control buffer of the skb. The members of the
307 * control buffer are defined in struct batadv_skb_cb in types.h.
308 * The macro is inspired by the similar macro TCP_SKB_CB() in tcp.h.
309 */
310#define BATADV_SKB_CB(__skb) ((struct batadv_skb_cb *)&((__skb)->cb[0]))
311
301#endif /* _NET_BATMAN_ADV_MAIN_H_ */ 312#endif /* _NET_BATMAN_ADV_MAIN_H_ */
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
new file mode 100644
index 000000000000..6b9a54485314
--- /dev/null
+++ b/net/batman-adv/network-coding.c
@@ -0,0 +1,1821 @@
1/* Copyright (C) 2012-2013 B.A.T.M.A.N. contributors:
2 *
3 * Martin Hundebøll, Jeppe Ledet-Pedersen
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
18 */
19
20#include <linux/debugfs.h>
21
22#include "main.h"
23#include "hash.h"
24#include "network-coding.h"
25#include "send.h"
26#include "originator.h"
27#include "hard-interface.h"
28#include "routing.h"
29
30static struct lock_class_key batadv_nc_coding_hash_lock_class_key;
31static struct lock_class_key batadv_nc_decoding_hash_lock_class_key;
32
33static void batadv_nc_worker(struct work_struct *work);
34static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
35 struct batadv_hard_iface *recv_if);
36
37/**
38 * batadv_nc_start_timer - initialise the nc periodic worker
39 * @bat_priv: the bat priv with all the soft interface information
40 */
41static void batadv_nc_start_timer(struct batadv_priv *bat_priv)
42{
43 queue_delayed_work(batadv_event_workqueue, &bat_priv->nc.work,
44 msecs_to_jiffies(10));
45}
46
47/**
48 * batadv_nc_init - initialise coding hash table and start house keeping
49 * @bat_priv: the bat priv with all the soft interface information
50 */
51int batadv_nc_init(struct batadv_priv *bat_priv)
52{
53 bat_priv->nc.timestamp_fwd_flush = jiffies;
54 bat_priv->nc.timestamp_sniffed_purge = jiffies;
55
56 if (bat_priv->nc.coding_hash || bat_priv->nc.decoding_hash)
57 return 0;
58
59 bat_priv->nc.coding_hash = batadv_hash_new(128);
60 if (!bat_priv->nc.coding_hash)
61 goto err;
62
63 batadv_hash_set_lock_class(bat_priv->nc.coding_hash,
64 &batadv_nc_coding_hash_lock_class_key);
65
66 bat_priv->nc.decoding_hash = batadv_hash_new(128);
67 if (!bat_priv->nc.decoding_hash)
68 goto err;
69
70 batadv_hash_set_lock_class(bat_priv->nc.coding_hash,
71 &batadv_nc_decoding_hash_lock_class_key);
72
73 /* Register our packet type */
74 if (batadv_recv_handler_register(BATADV_CODED,
75 batadv_nc_recv_coded_packet) < 0)
76 goto err;
77
78 INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
79 batadv_nc_start_timer(bat_priv);
80
81 return 0;
82
83err:
84 return -ENOMEM;
85}
86
87/**
88 * batadv_nc_init_bat_priv - initialise the nc specific bat_priv variables
89 * @bat_priv: the bat priv with all the soft interface information
90 */
91void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv)
92{
93 atomic_set(&bat_priv->network_coding, 1);
94 bat_priv->nc.min_tq = 200;
95 bat_priv->nc.max_fwd_delay = 10;
96 bat_priv->nc.max_buffer_time = 200;
97}
98
99/**
100 * batadv_nc_init_orig - initialise the nc fields of an orig_node
101 * @orig_node: the orig_node which is going to be initialised
102 */
103void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
104{
105 INIT_LIST_HEAD(&orig_node->in_coding_list);
106 INIT_LIST_HEAD(&orig_node->out_coding_list);
107 spin_lock_init(&orig_node->in_coding_list_lock);
108 spin_lock_init(&orig_node->out_coding_list_lock);
109}
110
111/**
112 * batadv_nc_node_free_rcu - rcu callback to free an nc node and remove
113 * its refcount on the orig_node
114 * @rcu: rcu pointer of the nc node
115 */
116static void batadv_nc_node_free_rcu(struct rcu_head *rcu)
117{
118 struct batadv_nc_node *nc_node;
119
120 nc_node = container_of(rcu, struct batadv_nc_node, rcu);
121 batadv_orig_node_free_ref(nc_node->orig_node);
122 kfree(nc_node);
123}
124
125/**
126 * batadv_nc_node_free_ref - decrements the nc node refcounter and possibly
127 * frees it
128 * @nc_node: the nc node to free
129 */
130static void batadv_nc_node_free_ref(struct batadv_nc_node *nc_node)
131{
132 if (atomic_dec_and_test(&nc_node->refcount))
133 call_rcu(&nc_node->rcu, batadv_nc_node_free_rcu);
134}
135
136/**
137 * batadv_nc_path_free_ref - decrements the nc path refcounter and possibly
138 * frees it
139 * @nc_path: the nc node to free
140 */
141static void batadv_nc_path_free_ref(struct batadv_nc_path *nc_path)
142{
143 if (atomic_dec_and_test(&nc_path->refcount))
144 kfree_rcu(nc_path, rcu);
145}
146
147/**
148 * batadv_nc_packet_free - frees nc packet
149 * @nc_packet: the nc packet to free
150 */
151static void batadv_nc_packet_free(struct batadv_nc_packet *nc_packet)
152{
153 if (nc_packet->skb)
154 kfree_skb(nc_packet->skb);
155
156 batadv_nc_path_free_ref(nc_packet->nc_path);
157 kfree(nc_packet);
158}
159
160/**
161 * batadv_nc_to_purge_nc_node - checks whether an nc node has to be purged
162 * @bat_priv: the bat priv with all the soft interface information
163 * @nc_node: the nc node to check
164 *
165 * Returns true if the entry has to be purged now, false otherwise
166 */
167static bool batadv_nc_to_purge_nc_node(struct batadv_priv *bat_priv,
168 struct batadv_nc_node *nc_node)
169{
170 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
171 return true;
172
173 return batadv_has_timed_out(nc_node->last_seen, BATADV_NC_NODE_TIMEOUT);
174}
175
176/**
177 * batadv_nc_to_purge_nc_path_coding - checks whether an nc path has timed out
178 * @bat_priv: the bat priv with all the soft interface information
179 * @nc_path: the nc path to check
180 *
181 * Returns true if the entry has to be purged now, false otherwise
182 */
183static bool batadv_nc_to_purge_nc_path_coding(struct batadv_priv *bat_priv,
184 struct batadv_nc_path *nc_path)
185{
186 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
187 return true;
188
189 /* purge the path when no packets has been added for 10 times the
190 * max_fwd_delay time
191 */
192 return batadv_has_timed_out(nc_path->last_valid,
193 bat_priv->nc.max_fwd_delay * 10);
194}
195
196/**
197 * batadv_nc_to_purge_nc_path_decoding - checks whether an nc path has timed out
198 * @bat_priv: the bat priv with all the soft interface information
199 * @nc_path: the nc path to check
200 *
201 * Returns true if the entry has to be purged now, false otherwise
202 */
203static bool batadv_nc_to_purge_nc_path_decoding(struct batadv_priv *bat_priv,
204 struct batadv_nc_path *nc_path)
205{
206 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
207 return true;
208
209 /* purge the path when no packets has been added for 10 times the
210 * max_buffer time
211 */
212 return batadv_has_timed_out(nc_path->last_valid,
213 bat_priv->nc.max_buffer_time*10);
214}
215
216/**
217 * batadv_nc_purge_orig_nc_nodes - go through list of nc nodes and purge stale
218 * entries
219 * @bat_priv: the bat priv with all the soft interface information
220 * @list: list of nc nodes
221 * @lock: nc node list lock
222 * @to_purge: function in charge to decide whether an entry has to be purged or
223 * not. This function takes the nc node as argument and has to return
224 * a boolean value: true if the entry has to be deleted, false
225 * otherwise
226 */
227static void
228batadv_nc_purge_orig_nc_nodes(struct batadv_priv *bat_priv,
229 struct list_head *list,
230 spinlock_t *lock,
231 bool (*to_purge)(struct batadv_priv *,
232 struct batadv_nc_node *))
233{
234 struct batadv_nc_node *nc_node, *nc_node_tmp;
235
236 /* For each nc_node in list */
237 spin_lock_bh(lock);
238 list_for_each_entry_safe(nc_node, nc_node_tmp, list, list) {
239 /* if an helper function has been passed as parameter,
240 * ask it if the entry has to be purged or not
241 */
242 if (to_purge && !to_purge(bat_priv, nc_node))
243 continue;
244
245 batadv_dbg(BATADV_DBG_NC, bat_priv,
246 "Removing nc_node %pM -> %pM\n",
247 nc_node->addr, nc_node->orig_node->orig);
248 list_del_rcu(&nc_node->list);
249 batadv_nc_node_free_ref(nc_node);
250 }
251 spin_unlock_bh(lock);
252}
253
254/**
255 * batadv_nc_purge_orig - purges all nc node data attached of the given
256 * originator
257 * @bat_priv: the bat priv with all the soft interface information
258 * @orig_node: orig_node with the nc node entries to be purged
259 * @to_purge: function in charge to decide whether an entry has to be purged or
260 * not. This function takes the nc node as argument and has to return
261 * a boolean value: true is the entry has to be deleted, false
262 * otherwise
263 */
264void batadv_nc_purge_orig(struct batadv_priv *bat_priv,
265 struct batadv_orig_node *orig_node,
266 bool (*to_purge)(struct batadv_priv *,
267 struct batadv_nc_node *))
268{
269 /* Check ingoing nc_node's of this orig_node */
270 batadv_nc_purge_orig_nc_nodes(bat_priv, &orig_node->in_coding_list,
271 &orig_node->in_coding_list_lock,
272 to_purge);
273
274 /* Check outgoing nc_node's of this orig_node */
275 batadv_nc_purge_orig_nc_nodes(bat_priv, &orig_node->out_coding_list,
276 &orig_node->out_coding_list_lock,
277 to_purge);
278}
279
280/**
281 * batadv_nc_purge_orig_hash - traverse entire originator hash to check if they
282 * have timed out nc nodes
283 * @bat_priv: the bat priv with all the soft interface information
284 */
285static void batadv_nc_purge_orig_hash(struct batadv_priv *bat_priv)
286{
287 struct batadv_hashtable *hash = bat_priv->orig_hash;
288 struct hlist_head *head;
289 struct batadv_orig_node *orig_node;
290 uint32_t i;
291
292 if (!hash)
293 return;
294
295 /* For each orig_node */
296 for (i = 0; i < hash->size; i++) {
297 head = &hash->table[i];
298
299 rcu_read_lock();
300 hlist_for_each_entry_rcu(orig_node, head, hash_entry)
301 batadv_nc_purge_orig(bat_priv, orig_node,
302 batadv_nc_to_purge_nc_node);
303 rcu_read_unlock();
304 }
305}
306
307/**
308 * batadv_nc_purge_paths - traverse all nc paths part of the hash and remove
309 * unused ones
310 * @bat_priv: the bat priv with all the soft interface information
311 * @hash: hash table containing the nc paths to check
312 * @to_purge: function in charge to decide whether an entry has to be purged or
313 * not. This function takes the nc node as argument and has to return
314 * a boolean value: true is the entry has to be deleted, false
315 * otherwise
316 */
317static void batadv_nc_purge_paths(struct batadv_priv *bat_priv,
318 struct batadv_hashtable *hash,
319 bool (*to_purge)(struct batadv_priv *,
320 struct batadv_nc_path *))
321{
322 struct hlist_head *head;
323 struct hlist_node *node_tmp;
324 struct batadv_nc_path *nc_path;
325 spinlock_t *lock; /* Protects lists in hash */
326 uint32_t i;
327
328 for (i = 0; i < hash->size; i++) {
329 head = &hash->table[i];
330 lock = &hash->list_locks[i];
331
332 /* For each nc_path in this bin */
333 spin_lock_bh(lock);
334 hlist_for_each_entry_safe(nc_path, node_tmp, head, hash_entry) {
335 /* if an helper function has been passed as parameter,
336 * ask it if the entry has to be purged or not
337 */
338 if (to_purge && !to_purge(bat_priv, nc_path))
339 continue;
340
341 /* purging an non-empty nc_path should never happen, but
342 * is observed under high CPU load. Delay the purging
343 * until next iteration to allow the packet_list to be
344 * emptied first.
345 */
346 if (!unlikely(list_empty(&nc_path->packet_list))) {
347 net_ratelimited_function(printk,
348 KERN_WARNING
349 "Skipping free of non-empty nc_path (%pM -> %pM)!\n",
350 nc_path->prev_hop,
351 nc_path->next_hop);
352 continue;
353 }
354
355 /* nc_path is unused, so remove it */
356 batadv_dbg(BATADV_DBG_NC, bat_priv,
357 "Remove nc_path %pM -> %pM\n",
358 nc_path->prev_hop, nc_path->next_hop);
359 hlist_del_rcu(&nc_path->hash_entry);
360 batadv_nc_path_free_ref(nc_path);
361 }
362 spin_unlock_bh(lock);
363 }
364}
365
366/**
367 * batadv_nc_hash_key_gen - computes the nc_path hash key
368 * @key: buffer to hold the final hash key
369 * @src: source ethernet mac address going into the hash key
370 * @dst: destination ethernet mac address going into the hash key
371 */
372static void batadv_nc_hash_key_gen(struct batadv_nc_path *key, const char *src,
373 const char *dst)
374{
375 memcpy(key->prev_hop, src, sizeof(key->prev_hop));
376 memcpy(key->next_hop, dst, sizeof(key->next_hop));
377}
378
379/**
380 * batadv_nc_hash_choose - compute the hash value for an nc path
381 * @data: data to hash
382 * @size: size of the hash table
383 *
384 * Returns the selected index in the hash table for the given data.
385 */
386static uint32_t batadv_nc_hash_choose(const void *data, uint32_t size)
387{
388 const struct batadv_nc_path *nc_path = data;
389 uint32_t hash = 0;
390
391 hash = batadv_hash_bytes(hash, &nc_path->prev_hop,
392 sizeof(nc_path->prev_hop));
393 hash = batadv_hash_bytes(hash, &nc_path->next_hop,
394 sizeof(nc_path->next_hop));
395
396 hash += (hash << 3);
397 hash ^= (hash >> 11);
398 hash += (hash << 15);
399
400 return hash % size;
401}
402
403/**
404 * batadv_nc_hash_compare - comparing function used in the network coding hash
405 * tables
406 * @node: node in the local table
407 * @data2: second object to compare the node to
408 *
409 * Returns 1 if the two entry are the same, 0 otherwise
410 */
411static int batadv_nc_hash_compare(const struct hlist_node *node,
412 const void *data2)
413{
414 const struct batadv_nc_path *nc_path1, *nc_path2;
415
416 nc_path1 = container_of(node, struct batadv_nc_path, hash_entry);
417 nc_path2 = data2;
418
419 /* Return 1 if the two keys are identical */
420 if (memcmp(nc_path1->prev_hop, nc_path2->prev_hop,
421 sizeof(nc_path1->prev_hop)) != 0)
422 return 0;
423
424 if (memcmp(nc_path1->next_hop, nc_path2->next_hop,
425 sizeof(nc_path1->next_hop)) != 0)
426 return 0;
427
428 return 1;
429}
430
431/**
432 * batadv_nc_hash_find - search for an existing nc path and return it
433 * @hash: hash table containing the nc path
434 * @data: search key
435 *
436 * Returns the nc_path if found, NULL otherwise.
437 */
438static struct batadv_nc_path *
439batadv_nc_hash_find(struct batadv_hashtable *hash,
440 void *data)
441{
442 struct hlist_head *head;
443 struct batadv_nc_path *nc_path, *nc_path_tmp = NULL;
444 int index;
445
446 if (!hash)
447 return NULL;
448
449 index = batadv_nc_hash_choose(data, hash->size);
450 head = &hash->table[index];
451
452 rcu_read_lock();
453 hlist_for_each_entry_rcu(nc_path, head, hash_entry) {
454 if (!batadv_nc_hash_compare(&nc_path->hash_entry, data))
455 continue;
456
457 if (!atomic_inc_not_zero(&nc_path->refcount))
458 continue;
459
460 nc_path_tmp = nc_path;
461 break;
462 }
463 rcu_read_unlock();
464
465 return nc_path_tmp;
466}
467
468/**
469 * batadv_nc_send_packet - send non-coded packet and free nc_packet struct
470 * @nc_packet: the nc packet to send
471 */
472static void batadv_nc_send_packet(struct batadv_nc_packet *nc_packet)
473{
474 batadv_send_skb_packet(nc_packet->skb,
475 nc_packet->neigh_node->if_incoming,
476 nc_packet->nc_path->next_hop);
477 nc_packet->skb = NULL;
478 batadv_nc_packet_free(nc_packet);
479}
480
481/**
482 * batadv_nc_sniffed_purge - Checks timestamp of given sniffed nc_packet.
483 * @bat_priv: the bat priv with all the soft interface information
484 * @nc_path: the nc path the packet belongs to
485 * @nc_packet: the nc packet to be checked
486 *
487 * Checks whether the given sniffed (overheard) nc_packet has hit its buffering
488 * timeout. If so, the packet is no longer kept and the entry deleted from the
489 * queue. Has to be called with the appropriate locks.
490 *
491 * Returns false as soon as the entry in the fifo queue has not been timed out
492 * yet and true otherwise.
493 */
494static bool batadv_nc_sniffed_purge(struct batadv_priv *bat_priv,
495 struct batadv_nc_path *nc_path,
496 struct batadv_nc_packet *nc_packet)
497{
498 unsigned long timeout = bat_priv->nc.max_buffer_time;
499 bool res = false;
500
501 /* Packets are added to tail, so the remaining packets did not time
502 * out and we can stop processing the current queue
503 */
504 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE &&
505 !batadv_has_timed_out(nc_packet->timestamp, timeout))
506 goto out;
507
508 /* purge nc packet */
509 list_del(&nc_packet->list);
510 batadv_nc_packet_free(nc_packet);
511
512 res = true;
513
514out:
515 return res;
516}
517
518/**
519 * batadv_nc_fwd_flush - Checks the timestamp of the given nc packet.
520 * @bat_priv: the bat priv with all the soft interface information
521 * @nc_path: the nc path the packet belongs to
522 * @nc_packet: the nc packet to be checked
523 *
524 * Checks whether the given nc packet has hit its forward timeout. If so, the
525 * packet is no longer delayed, immediately sent and the entry deleted from the
526 * queue. Has to be called with the appropriate locks.
527 *
528 * Returns false as soon as the entry in the fifo queue has not been timed out
529 * yet and true otherwise.
530 */
531static bool batadv_nc_fwd_flush(struct batadv_priv *bat_priv,
532 struct batadv_nc_path *nc_path,
533 struct batadv_nc_packet *nc_packet)
534{
535 unsigned long timeout = bat_priv->nc.max_fwd_delay;
536
537 /* Packets are added to tail, so the remaining packets did not time
538 * out and we can stop processing the current queue
539 */
540 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE &&
541 !batadv_has_timed_out(nc_packet->timestamp, timeout))
542 return false;
543
544 /* Send packet */
545 batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
546 batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
547 nc_packet->skb->len + ETH_HLEN);
548 list_del(&nc_packet->list);
549 batadv_nc_send_packet(nc_packet);
550
551 return true;
552}
553
554/**
555 * batadv_nc_process_nc_paths - traverse given nc packet pool and free timed out
556 * nc packets
557 * @bat_priv: the bat priv with all the soft interface information
558 * @hash: to be processed hash table
559 * @process_fn: Function called to process given nc packet. Should return true
560 * to encourage this function to proceed with the next packet.
561 * Otherwise the rest of the current queue is skipped.
562 */
563static void
564batadv_nc_process_nc_paths(struct batadv_priv *bat_priv,
565 struct batadv_hashtable *hash,
566 bool (*process_fn)(struct batadv_priv *,
567 struct batadv_nc_path *,
568 struct batadv_nc_packet *))
569{
570 struct hlist_head *head;
571 struct batadv_nc_packet *nc_packet, *nc_packet_tmp;
572 struct batadv_nc_path *nc_path;
573 bool ret;
574 int i;
575
576 if (!hash)
577 return;
578
579 /* Loop hash table bins */
580 for (i = 0; i < hash->size; i++) {
581 head = &hash->table[i];
582
583 /* Loop coding paths */
584 rcu_read_lock();
585 hlist_for_each_entry_rcu(nc_path, head, hash_entry) {
586 /* Loop packets */
587 spin_lock_bh(&nc_path->packet_list_lock);
588 list_for_each_entry_safe(nc_packet, nc_packet_tmp,
589 &nc_path->packet_list, list) {
590 ret = process_fn(bat_priv, nc_path, nc_packet);
591 if (!ret)
592 break;
593 }
594 spin_unlock_bh(&nc_path->packet_list_lock);
595 }
596 rcu_read_unlock();
597 }
598}
599
600/**
601 * batadv_nc_worker - periodic task for house keeping related to network coding
602 * @work: kernel work struct
603 */
604static void batadv_nc_worker(struct work_struct *work)
605{
606 struct delayed_work *delayed_work;
607 struct batadv_priv_nc *priv_nc;
608 struct batadv_priv *bat_priv;
609 unsigned long timeout;
610
611 delayed_work = container_of(work, struct delayed_work, work);
612 priv_nc = container_of(delayed_work, struct batadv_priv_nc, work);
613 bat_priv = container_of(priv_nc, struct batadv_priv, nc);
614
615 batadv_nc_purge_orig_hash(bat_priv);
616 batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash,
617 batadv_nc_to_purge_nc_path_coding);
618 batadv_nc_purge_paths(bat_priv, bat_priv->nc.decoding_hash,
619 batadv_nc_to_purge_nc_path_decoding);
620
621 timeout = bat_priv->nc.max_fwd_delay;
622
623 if (batadv_has_timed_out(bat_priv->nc.timestamp_fwd_flush, timeout)) {
624 batadv_nc_process_nc_paths(bat_priv, bat_priv->nc.coding_hash,
625 batadv_nc_fwd_flush);
626 bat_priv->nc.timestamp_fwd_flush = jiffies;
627 }
628
629 if (batadv_has_timed_out(bat_priv->nc.timestamp_sniffed_purge,
630 bat_priv->nc.max_buffer_time)) {
631 batadv_nc_process_nc_paths(bat_priv, bat_priv->nc.decoding_hash,
632 batadv_nc_sniffed_purge);
633 bat_priv->nc.timestamp_sniffed_purge = jiffies;
634 }
635
636 /* Schedule a new check */
637 batadv_nc_start_timer(bat_priv);
638}
639
640/**
641 * batadv_can_nc_with_orig - checks whether the given orig node is suitable for
642 * coding or not
643 * @bat_priv: the bat priv with all the soft interface information
644 * @orig_node: neighboring orig node which may be used as nc candidate
645 * @ogm_packet: incoming ogm packet also used for the checks
646 *
647 * Returns true if:
648 * 1) The OGM must have the most recent sequence number.
649 * 2) The TTL must be decremented by one and only one.
650 * 3) The OGM must be received from the first hop from orig_node.
651 * 4) The TQ value of the OGM must be above bat_priv->nc.min_tq.
652 */
653static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv,
654 struct batadv_orig_node *orig_node,
655 struct batadv_ogm_packet *ogm_packet)
656{
657 if (orig_node->last_real_seqno != ntohl(ogm_packet->seqno))
658 return false;
659 if (orig_node->last_ttl != ogm_packet->header.ttl + 1)
660 return false;
661 if (!batadv_compare_eth(ogm_packet->orig, ogm_packet->prev_sender))
662 return false;
663 if (ogm_packet->tq < bat_priv->nc.min_tq)
664 return false;
665
666 return true;
667}
668
669/**
670 * batadv_nc_find_nc_node - search for an existing nc node and return it
671 * @orig_node: orig node originating the ogm packet
672 * @orig_neigh_node: neighboring orig node from which we received the ogm packet
673 * (can be equal to orig_node)
674 * @in_coding: traverse incoming or outgoing network coding list
675 *
676 * Returns the nc_node if found, NULL otherwise.
677 */
678static struct batadv_nc_node
679*batadv_nc_find_nc_node(struct batadv_orig_node *orig_node,
680 struct batadv_orig_node *orig_neigh_node,
681 bool in_coding)
682{
683 struct batadv_nc_node *nc_node, *nc_node_out = NULL;
684 struct list_head *list;
685
686 if (in_coding)
687 list = &orig_neigh_node->in_coding_list;
688 else
689 list = &orig_neigh_node->out_coding_list;
690
691 /* Traverse list of nc_nodes to orig_node */
692 rcu_read_lock();
693 list_for_each_entry_rcu(nc_node, list, list) {
694 if (!batadv_compare_eth(nc_node->addr, orig_node->orig))
695 continue;
696
697 if (!atomic_inc_not_zero(&nc_node->refcount))
698 continue;
699
700 /* Found a match */
701 nc_node_out = nc_node;
702 break;
703 }
704 rcu_read_unlock();
705
706 return nc_node_out;
707}
708
709/**
710 * batadv_nc_get_nc_node - retrieves an nc node or creates the entry if it was
711 * not found
712 * @bat_priv: the bat priv with all the soft interface information
713 * @orig_node: orig node originating the ogm packet
714 * @orig_neigh_node: neighboring orig node from which we received the ogm packet
715 * (can be equal to orig_node)
716 * @in_coding: traverse incoming or outgoing network coding list
717 *
718 * Returns the nc_node if found or created, NULL in case of an error.
719 */
720static struct batadv_nc_node
721*batadv_nc_get_nc_node(struct batadv_priv *bat_priv,
722 struct batadv_orig_node *orig_node,
723 struct batadv_orig_node *orig_neigh_node,
724 bool in_coding)
725{
726 struct batadv_nc_node *nc_node;
727 spinlock_t *lock; /* Used to lock list selected by "int in_coding" */
728 struct list_head *list;
729
730 /* Check if nc_node is already added */
731 nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding);
732
733 /* Node found */
734 if (nc_node)
735 return nc_node;
736
737 nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC);
738 if (!nc_node)
739 return NULL;
740
741 if (!atomic_inc_not_zero(&orig_neigh_node->refcount))
742 goto free;
743
744 /* Initialize nc_node */
745 INIT_LIST_HEAD(&nc_node->list);
746 memcpy(nc_node->addr, orig_node->orig, ETH_ALEN);
747 nc_node->orig_node = orig_neigh_node;
748 atomic_set(&nc_node->refcount, 2);
749
750 /* Select ingoing or outgoing coding node */
751 if (in_coding) {
752 lock = &orig_neigh_node->in_coding_list_lock;
753 list = &orig_neigh_node->in_coding_list;
754 } else {
755 lock = &orig_neigh_node->out_coding_list_lock;
756 list = &orig_neigh_node->out_coding_list;
757 }
758
759 batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n",
760 nc_node->addr, nc_node->orig_node->orig);
761
762 /* Add nc_node to orig_node */
763 spin_lock_bh(lock);
764 list_add_tail_rcu(&nc_node->list, list);
765 spin_unlock_bh(lock);
766
767 return nc_node;
768
769free:
770 kfree(nc_node);
771 return NULL;
772}
773
774/**
775 * batadv_nc_update_nc_node - updates stored incoming and outgoing nc node structs
776 * (best called on incoming OGMs)
777 * @bat_priv: the bat priv with all the soft interface information
778 * @orig_node: orig node originating the ogm packet
779 * @orig_neigh_node: neighboring orig node from which we received the ogm packet
780 * (can be equal to orig_node)
781 * @ogm_packet: incoming ogm packet
782 * @is_single_hop_neigh: orig_node is a single hop neighbor
783 */
784void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
785 struct batadv_orig_node *orig_node,
786 struct batadv_orig_node *orig_neigh_node,
787 struct batadv_ogm_packet *ogm_packet,
788 int is_single_hop_neigh)
789{
790 struct batadv_nc_node *in_nc_node = NULL, *out_nc_node = NULL;
791
792 /* Check if network coding is enabled */
793 if (!atomic_read(&bat_priv->network_coding))
794 goto out;
795
796 /* accept ogms from 'good' neighbors and single hop neighbors */
797 if (!batadv_can_nc_with_orig(bat_priv, orig_node, ogm_packet) &&
798 !is_single_hop_neigh)
799 goto out;
800
801 /* Add orig_node as in_nc_node on hop */
802 in_nc_node = batadv_nc_get_nc_node(bat_priv, orig_node,
803 orig_neigh_node, true);
804 if (!in_nc_node)
805 goto out;
806
807 in_nc_node->last_seen = jiffies;
808
809 /* Add hop as out_nc_node on orig_node */
810 out_nc_node = batadv_nc_get_nc_node(bat_priv, orig_neigh_node,
811 orig_node, false);
812 if (!out_nc_node)
813 goto out;
814
815 out_nc_node->last_seen = jiffies;
816
817out:
818 if (in_nc_node)
819 batadv_nc_node_free_ref(in_nc_node);
820 if (out_nc_node)
821 batadv_nc_node_free_ref(out_nc_node);
822}
823
824/**
825 * batadv_nc_get_path - get existing nc_path or allocate a new one
826 * @bat_priv: the bat priv with all the soft interface information
827 * @hash: hash table containing the nc path
828 * @src: ethernet source address - first half of the nc path search key
829 * @dst: ethernet destination address - second half of the nc path search key
830 *
831 * Returns pointer to nc_path if the path was found or created, returns NULL
832 * on error.
833 */
834static struct batadv_nc_path *batadv_nc_get_path(struct batadv_priv *bat_priv,
835 struct batadv_hashtable *hash,
836 uint8_t *src,
837 uint8_t *dst)
838{
839 int hash_added;
840 struct batadv_nc_path *nc_path, nc_path_key;
841
842 batadv_nc_hash_key_gen(&nc_path_key, src, dst);
843
844 /* Search for existing nc_path */
845 nc_path = batadv_nc_hash_find(hash, (void *)&nc_path_key);
846
847 if (nc_path) {
848 /* Set timestamp to delay removal of nc_path */
849 nc_path->last_valid = jiffies;
850 return nc_path;
851 }
852
853 /* No existing nc_path was found; create a new */
854 nc_path = kzalloc(sizeof(*nc_path), GFP_ATOMIC);
855
856 if (!nc_path)
857 return NULL;
858
859 /* Initialize nc_path */
860 INIT_LIST_HEAD(&nc_path->packet_list);
861 spin_lock_init(&nc_path->packet_list_lock);
862 atomic_set(&nc_path->refcount, 2);
863 nc_path->last_valid = jiffies;
864 memcpy(nc_path->next_hop, dst, ETH_ALEN);
865 memcpy(nc_path->prev_hop, src, ETH_ALEN);
866
867 batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_path %pM -> %pM\n",
868 nc_path->prev_hop,
869 nc_path->next_hop);
870
871 /* Add nc_path to hash table */
872 hash_added = batadv_hash_add(hash, batadv_nc_hash_compare,
873 batadv_nc_hash_choose, &nc_path_key,
874 &nc_path->hash_entry);
875
876 if (hash_added < 0) {
877 kfree(nc_path);
878 return NULL;
879 }
880
881 return nc_path;
882}
883
884/**
885 * batadv_nc_random_weight_tq - scale the receivers TQ-value to avoid unfair
886 * selection of a receiver with slightly lower TQ than the other
887 * @tq: to be weighted tq value
888 */
889static uint8_t batadv_nc_random_weight_tq(uint8_t tq)
890{
891 uint8_t rand_val, rand_tq;
892
893 get_random_bytes(&rand_val, sizeof(rand_val));
894
895 /* randomize the estimated packet loss (max TQ - estimated TQ) */
896 rand_tq = rand_val * (BATADV_TQ_MAX_VALUE - tq);
897
898 /* normalize the randomized packet loss */
899 rand_tq /= BATADV_TQ_MAX_VALUE;
900
901 /* convert to (randomized) estimated tq again */
902 return BATADV_TQ_MAX_VALUE - rand_tq;
903}
904
905/**
906 * batadv_nc_memxor - XOR destination with source
907 * @dst: byte array to XOR into
908 * @src: byte array to XOR from
909 * @len: length of destination array
910 */
911static void batadv_nc_memxor(char *dst, const char *src, unsigned int len)
912{
913 unsigned int i;
914
915 for (i = 0; i < len; ++i)
916 dst[i] ^= src[i];
917}
918
919/**
920 * batadv_nc_code_packets - code a received unicast_packet with an nc packet
921 * into a coded_packet and send it
922 * @bat_priv: the bat priv with all the soft interface information
923 * @skb: data skb to forward
924 * @ethhdr: pointer to the ethernet header inside the skb
925 * @nc_packet: structure containing the packet to the skb can be coded with
926 * @neigh_node: next hop to forward packet to
927 *
928 * Returns true if both packets are consumed, false otherwise.
929 */
930static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
931 struct sk_buff *skb,
932 struct ethhdr *ethhdr,
933 struct batadv_nc_packet *nc_packet,
934 struct batadv_neigh_node *neigh_node)
935{
936 uint8_t tq_weighted_neigh, tq_weighted_coding;
937 struct sk_buff *skb_dest, *skb_src;
938 struct batadv_unicast_packet *packet1;
939 struct batadv_unicast_packet *packet2;
940 struct batadv_coded_packet *coded_packet;
941 struct batadv_neigh_node *neigh_tmp, *router_neigh;
942 struct batadv_neigh_node *router_coding = NULL;
943 uint8_t *first_source, *first_dest, *second_source, *second_dest;
944 __be32 packet_id1, packet_id2;
945 size_t count;
946 bool res = false;
947 int coding_len;
948 int unicast_size = sizeof(*packet1);
949 int coded_size = sizeof(*coded_packet);
950 int header_add = coded_size - unicast_size;
951
952 router_neigh = batadv_orig_node_get_router(neigh_node->orig_node);
953 if (!router_neigh)
954 goto out;
955
956 neigh_tmp = nc_packet->neigh_node;
957 router_coding = batadv_orig_node_get_router(neigh_tmp->orig_node);
958 if (!router_coding)
959 goto out;
960
961 tq_weighted_neigh = batadv_nc_random_weight_tq(router_neigh->tq_avg);
962 tq_weighted_coding = batadv_nc_random_weight_tq(router_coding->tq_avg);
963
964 /* Select one destination for the MAC-header dst-field based on
965 * weighted TQ-values.
966 */
967 if (tq_weighted_neigh >= tq_weighted_coding) {
968 /* Destination from nc_packet is selected for MAC-header */
969 first_dest = nc_packet->nc_path->next_hop;
970 first_source = nc_packet->nc_path->prev_hop;
971 second_dest = neigh_node->addr;
972 second_source = ethhdr->h_source;
973 packet1 = (struct batadv_unicast_packet *)nc_packet->skb->data;
974 packet2 = (struct batadv_unicast_packet *)skb->data;
975 packet_id1 = nc_packet->packet_id;
976 packet_id2 = batadv_skb_crc32(skb,
977 skb->data + sizeof(*packet2));
978 } else {
979 /* Destination for skb is selected for MAC-header */
980 first_dest = neigh_node->addr;
981 first_source = ethhdr->h_source;
982 second_dest = nc_packet->nc_path->next_hop;
983 second_source = nc_packet->nc_path->prev_hop;
984 packet1 = (struct batadv_unicast_packet *)skb->data;
985 packet2 = (struct batadv_unicast_packet *)nc_packet->skb->data;
986 packet_id1 = batadv_skb_crc32(skb,
987 skb->data + sizeof(*packet1));
988 packet_id2 = nc_packet->packet_id;
989 }
990
991 /* Instead of zero padding the smallest data buffer, we
992 * code into the largest.
993 */
994 if (skb->len <= nc_packet->skb->len) {
995 skb_dest = nc_packet->skb;
996 skb_src = skb;
997 } else {
998 skb_dest = skb;
999 skb_src = nc_packet->skb;
1000 }
1001
1002 /* coding_len is used when decoding the packet shorter packet */
1003 coding_len = skb_src->len - unicast_size;
1004
1005 if (skb_linearize(skb_dest) < 0 || skb_linearize(skb_src) < 0)
1006 goto out;
1007
1008 skb_push(skb_dest, header_add);
1009
1010 coded_packet = (struct batadv_coded_packet *)skb_dest->data;
1011 skb_reset_mac_header(skb_dest);
1012
1013 coded_packet->header.packet_type = BATADV_CODED;
1014 coded_packet->header.version = BATADV_COMPAT_VERSION;
1015 coded_packet->header.ttl = packet1->header.ttl;
1016
1017 /* Info about first unicast packet */
1018 memcpy(coded_packet->first_source, first_source, ETH_ALEN);
1019 memcpy(coded_packet->first_orig_dest, packet1->dest, ETH_ALEN);
1020 coded_packet->first_crc = packet_id1;
1021 coded_packet->first_ttvn = packet1->ttvn;
1022
1023 /* Info about second unicast packet */
1024 memcpy(coded_packet->second_dest, second_dest, ETH_ALEN);
1025 memcpy(coded_packet->second_source, second_source, ETH_ALEN);
1026 memcpy(coded_packet->second_orig_dest, packet2->dest, ETH_ALEN);
1027 coded_packet->second_crc = packet_id2;
1028 coded_packet->second_ttl = packet2->header.ttl;
1029 coded_packet->second_ttvn = packet2->ttvn;
1030 coded_packet->coded_len = htons(coding_len);
1031
1032 /* This is where the magic happens: Code skb_src into skb_dest */
1033 batadv_nc_memxor(skb_dest->data + coded_size,
1034 skb_src->data + unicast_size, coding_len);
1035
1036 /* Update counters accordingly */
1037 if (BATADV_SKB_CB(skb_src)->decoded &&
1038 BATADV_SKB_CB(skb_dest)->decoded) {
1039 /* Both packets are recoded */
1040 count = skb_src->len + ETH_HLEN;
1041 count += skb_dest->len + ETH_HLEN;
1042 batadv_add_counter(bat_priv, BATADV_CNT_NC_RECODE, 2);
1043 batadv_add_counter(bat_priv, BATADV_CNT_NC_RECODE_BYTES, count);
1044 } else if (!BATADV_SKB_CB(skb_src)->decoded &&
1045 !BATADV_SKB_CB(skb_dest)->decoded) {
1046 /* Both packets are newly coded */
1047 count = skb_src->len + ETH_HLEN;
1048 count += skb_dest->len + ETH_HLEN;
1049 batadv_add_counter(bat_priv, BATADV_CNT_NC_CODE, 2);
1050 batadv_add_counter(bat_priv, BATADV_CNT_NC_CODE_BYTES, count);
1051 } else if (BATADV_SKB_CB(skb_src)->decoded &&
1052 !BATADV_SKB_CB(skb_dest)->decoded) {
1053 /* skb_src recoded and skb_dest is newly coded */
1054 batadv_inc_counter(bat_priv, BATADV_CNT_NC_RECODE);
1055 batadv_add_counter(bat_priv, BATADV_CNT_NC_RECODE_BYTES,
1056 skb_src->len + ETH_HLEN);
1057 batadv_inc_counter(bat_priv, BATADV_CNT_NC_CODE);
1058 batadv_add_counter(bat_priv, BATADV_CNT_NC_CODE_BYTES,
1059 skb_dest->len + ETH_HLEN);
1060 } else if (!BATADV_SKB_CB(skb_src)->decoded &&
1061 BATADV_SKB_CB(skb_dest)->decoded) {
1062 /* skb_src is newly coded and skb_dest is recoded */
1063 batadv_inc_counter(bat_priv, BATADV_CNT_NC_CODE);
1064 batadv_add_counter(bat_priv, BATADV_CNT_NC_CODE_BYTES,
1065 skb_src->len + ETH_HLEN);
1066 batadv_inc_counter(bat_priv, BATADV_CNT_NC_RECODE);
1067 batadv_add_counter(bat_priv, BATADV_CNT_NC_RECODE_BYTES,
1068 skb_dest->len + ETH_HLEN);
1069 }
1070
1071 /* skb_src is now coded into skb_dest, so free it */
1072 kfree_skb(skb_src);
1073
1074 /* avoid duplicate free of skb from nc_packet */
1075 nc_packet->skb = NULL;
1076 batadv_nc_packet_free(nc_packet);
1077
1078 /* Send the coded packet and return true */
1079 batadv_send_skb_packet(skb_dest, neigh_node->if_incoming, first_dest);
1080 res = true;
1081out:
1082 if (router_neigh)
1083 batadv_neigh_node_free_ref(router_neigh);
1084 if (router_coding)
1085 batadv_neigh_node_free_ref(router_coding);
1086 return res;
1087}
1088
1089/**
1090 * batadv_nc_skb_coding_possible - true if a decoded skb is available at dst.
1091 * @skb: data skb to forward
1092 * @dst: destination mac address of the other skb to code with
1093 * @src: source mac address of skb
1094 *
1095 * Whenever we network code a packet we have to check whether we received it in
1096 * a network coded form. If so, we may not be able to use it for coding because
1097 * some neighbors may also have received (overheard) the packet in the network
1098 * coded form without being able to decode it. It is hard to know which of the
1099 * neighboring nodes was able to decode the packet, therefore we can only
1100 * re-code the packet if the source of the previous encoded packet is involved.
1101 * Since the source encoded the packet we can be certain it has all necessary
1102 * decode information.
1103 *
1104 * Returns true if coding of a decoded packet is allowed.
1105 */
1106static bool batadv_nc_skb_coding_possible(struct sk_buff *skb,
1107 uint8_t *dst, uint8_t *src)
1108{
1109 if (BATADV_SKB_CB(skb)->decoded && !batadv_compare_eth(dst, src))
1110 return false;
1111 else
1112 return true;
1113}
1114
1115/**
1116 * batadv_nc_path_search - Find the coding path matching in_nc_node and
1117 * out_nc_node to retrieve a buffered packet that can be used for coding.
1118 * @bat_priv: the bat priv with all the soft interface information
1119 * @in_nc_node: pointer to skb next hop's neighbor nc node
1120 * @out_nc_node: pointer to skb source's neighbor nc node
1121 * @skb: data skb to forward
1122 * @eth_dst: next hop mac address of skb
1123 *
1124 * Returns true if coding of a decoded skb is allowed.
1125 */
1126static struct batadv_nc_packet *
1127batadv_nc_path_search(struct batadv_priv *bat_priv,
1128 struct batadv_nc_node *in_nc_node,
1129 struct batadv_nc_node *out_nc_node,
1130 struct sk_buff *skb,
1131 uint8_t *eth_dst)
1132{
1133 struct batadv_nc_path *nc_path, nc_path_key;
1134 struct batadv_nc_packet *nc_packet_out = NULL;
1135 struct batadv_nc_packet *nc_packet, *nc_packet_tmp;
1136 struct batadv_hashtable *hash = bat_priv->nc.coding_hash;
1137 int idx;
1138
1139 if (!hash)
1140 return NULL;
1141
1142 /* Create almost path key */
1143 batadv_nc_hash_key_gen(&nc_path_key, in_nc_node->addr,
1144 out_nc_node->addr);
1145 idx = batadv_nc_hash_choose(&nc_path_key, hash->size);
1146
1147 /* Check for coding opportunities in this nc_path */
1148 rcu_read_lock();
1149 hlist_for_each_entry_rcu(nc_path, &hash->table[idx], hash_entry) {
1150 if (!batadv_compare_eth(nc_path->prev_hop, in_nc_node->addr))
1151 continue;
1152
1153 if (!batadv_compare_eth(nc_path->next_hop, out_nc_node->addr))
1154 continue;
1155
1156 spin_lock_bh(&nc_path->packet_list_lock);
1157 if (list_empty(&nc_path->packet_list)) {
1158 spin_unlock_bh(&nc_path->packet_list_lock);
1159 continue;
1160 }
1161
1162 list_for_each_entry_safe(nc_packet, nc_packet_tmp,
1163 &nc_path->packet_list, list) {
1164 if (!batadv_nc_skb_coding_possible(nc_packet->skb,
1165 eth_dst,
1166 in_nc_node->addr))
1167 continue;
1168
1169 /* Coding opportunity is found! */
1170 list_del(&nc_packet->list);
1171 nc_packet_out = nc_packet;
1172 break;
1173 }
1174
1175 spin_unlock_bh(&nc_path->packet_list_lock);
1176 break;
1177 }
1178 rcu_read_unlock();
1179
1180 return nc_packet_out;
1181}
1182
1183/**
1184 * batadv_nc_skb_src_search - Loops through the list of neighoring nodes of the
1185 * skb's sender (may be equal to the originator).
1186 * @bat_priv: the bat priv with all the soft interface information
1187 * @skb: data skb to forward
1188 * @eth_dst: next hop mac address of skb
1189 * @eth_src: source mac address of skb
1190 * @in_nc_node: pointer to skb next hop's neighbor nc node
1191 *
1192 * Returns an nc packet if a suitable coding packet was found, NULL otherwise.
1193 */
1194static struct batadv_nc_packet *
1195batadv_nc_skb_src_search(struct batadv_priv *bat_priv,
1196 struct sk_buff *skb,
1197 uint8_t *eth_dst,
1198 uint8_t *eth_src,
1199 struct batadv_nc_node *in_nc_node)
1200{
1201 struct batadv_orig_node *orig_node;
1202 struct batadv_nc_node *out_nc_node;
1203 struct batadv_nc_packet *nc_packet = NULL;
1204
1205 orig_node = batadv_orig_hash_find(bat_priv, eth_src);
1206 if (!orig_node)
1207 return NULL;
1208
1209 rcu_read_lock();
1210 list_for_each_entry_rcu(out_nc_node,
1211 &orig_node->out_coding_list, list) {
1212 /* Check if the skb is decoded and if recoding is possible */
1213 if (!batadv_nc_skb_coding_possible(skb,
1214 out_nc_node->addr, eth_src))
1215 continue;
1216
1217 /* Search for an opportunity in this nc_path */
1218 nc_packet = batadv_nc_path_search(bat_priv, in_nc_node,
1219 out_nc_node, skb, eth_dst);
1220 if (nc_packet)
1221 break;
1222 }
1223 rcu_read_unlock();
1224
1225 batadv_orig_node_free_ref(orig_node);
1226 return nc_packet;
1227}
1228
1229/**
1230 * batadv_nc_skb_store_before_coding - set the ethernet src and dst of the
1231 * unicast skb before it is stored for use in later decoding
1232 * @bat_priv: the bat priv with all the soft interface information
1233 * @skb: data skb to store
1234 * @eth_dst_new: new destination mac address of skb
1235 */
1236static void batadv_nc_skb_store_before_coding(struct batadv_priv *bat_priv,
1237 struct sk_buff *skb,
1238 uint8_t *eth_dst_new)
1239{
1240 struct ethhdr *ethhdr;
1241
1242 /* Copy skb header to change the mac header */
1243 skb = pskb_copy(skb, GFP_ATOMIC);
1244 if (!skb)
1245 return;
1246
1247 /* Set the mac header as if we actually sent the packet uncoded */
1248 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1249 memcpy(ethhdr->h_source, ethhdr->h_dest, ETH_ALEN);
1250 memcpy(ethhdr->h_dest, eth_dst_new, ETH_ALEN);
1251
1252 /* Set data pointer to MAC header to mimic packets from our tx path */
1253 skb_push(skb, ETH_HLEN);
1254
1255 /* Add the packet to the decoding packet pool */
1256 batadv_nc_skb_store_for_decoding(bat_priv, skb);
1257
1258 /* batadv_nc_skb_store_for_decoding() clones the skb, so we must free
1259 * our ref
1260 */
1261 kfree_skb(skb);
1262}
1263
1264/**
1265 * batadv_nc_skb_dst_search - Loops through list of neighboring nodes to dst.
1266 * @skb: data skb to forward
1267 * @neigh_node: next hop to forward packet to
1268 * @ethhdr: pointer to the ethernet header inside the skb
1269 *
1270 * Loops through list of neighboring nodes the next hop has a good connection to
1271 * (receives OGMs with a sufficient quality). We need to find a neighbor of our
1272 * next hop that potentially sent a packet which our next hop also received
1273 * (overheard) and has stored for later decoding.
1274 *
1275 * Returns true if the skb was consumed (encoded packet sent) or false otherwise
1276 */
1277static bool batadv_nc_skb_dst_search(struct sk_buff *skb,
1278 struct batadv_neigh_node *neigh_node,
1279 struct ethhdr *ethhdr)
1280{
1281 struct net_device *netdev = neigh_node->if_incoming->soft_iface;
1282 struct batadv_priv *bat_priv = netdev_priv(netdev);
1283 struct batadv_orig_node *orig_node = neigh_node->orig_node;
1284 struct batadv_nc_node *nc_node;
1285 struct batadv_nc_packet *nc_packet = NULL;
1286
1287 rcu_read_lock();
1288 list_for_each_entry_rcu(nc_node, &orig_node->in_coding_list, list) {
1289 /* Search for coding opportunity with this in_nc_node */
1290 nc_packet = batadv_nc_skb_src_search(bat_priv, skb,
1291 neigh_node->addr,
1292 ethhdr->h_source, nc_node);
1293
1294 /* Opportunity was found, so stop searching */
1295 if (nc_packet)
1296 break;
1297 }
1298 rcu_read_unlock();
1299
1300 if (!nc_packet)
1301 return false;
1302
1303 /* Save packets for later decoding */
1304 batadv_nc_skb_store_before_coding(bat_priv, skb,
1305 neigh_node->addr);
1306 batadv_nc_skb_store_before_coding(bat_priv, nc_packet->skb,
1307 nc_packet->neigh_node->addr);
1308
1309 /* Code and send packets */
1310 if (batadv_nc_code_packets(bat_priv, skb, ethhdr, nc_packet,
1311 neigh_node))
1312 return true;
1313
1314 /* out of mem ? Coding failed - we have to free the buffered packet
1315 * to avoid memleaks. The skb passed as argument will be dealt with
1316 * by the calling function.
1317 */
1318 batadv_nc_send_packet(nc_packet);
1319 return false;
1320}
1321
1322/**
1323 * batadv_nc_skb_add_to_path - buffer skb for later encoding / decoding
1324 * @skb: skb to add to path
1325 * @nc_path: path to add skb to
1326 * @neigh_node: next hop to forward packet to
1327 * @packet_id: checksum to identify packet
1328 *
1329 * Returns true if the packet was buffered or false in case of an error.
1330 */
1331static bool batadv_nc_skb_add_to_path(struct sk_buff *skb,
1332 struct batadv_nc_path *nc_path,
1333 struct batadv_neigh_node *neigh_node,
1334 __be32 packet_id)
1335{
1336 struct batadv_nc_packet *nc_packet;
1337
1338 nc_packet = kzalloc(sizeof(*nc_packet), GFP_ATOMIC);
1339 if (!nc_packet)
1340 return false;
1341
1342 /* Initialize nc_packet */
1343 nc_packet->timestamp = jiffies;
1344 nc_packet->packet_id = packet_id;
1345 nc_packet->skb = skb;
1346 nc_packet->neigh_node = neigh_node;
1347 nc_packet->nc_path = nc_path;
1348
1349 /* Add coding packet to list */
1350 spin_lock_bh(&nc_path->packet_list_lock);
1351 list_add_tail(&nc_packet->list, &nc_path->packet_list);
1352 spin_unlock_bh(&nc_path->packet_list_lock);
1353
1354 return true;
1355}
1356
1357/**
1358 * batadv_nc_skb_forward - try to code a packet or add it to the coding packet
1359 * buffer
1360 * @skb: data skb to forward
1361 * @neigh_node: next hop to forward packet to
1362 * @ethhdr: pointer to the ethernet header inside the skb
1363 *
1364 * Returns true if the skb was consumed (encoded packet sent) or false otherwise
1365 */
1366bool batadv_nc_skb_forward(struct sk_buff *skb,
1367 struct batadv_neigh_node *neigh_node,
1368 struct ethhdr *ethhdr)
1369{
1370 const struct net_device *netdev = neigh_node->if_incoming->soft_iface;
1371 struct batadv_priv *bat_priv = netdev_priv(netdev);
1372 struct batadv_unicast_packet *packet;
1373 struct batadv_nc_path *nc_path;
1374 __be32 packet_id;
1375 u8 *payload;
1376
1377 /* Check if network coding is enabled */
1378 if (!atomic_read(&bat_priv->network_coding))
1379 goto out;
1380
1381 /* We only handle unicast packets */
1382 payload = skb_network_header(skb);
1383 packet = (struct batadv_unicast_packet *)payload;
1384 if (packet->header.packet_type != BATADV_UNICAST)
1385 goto out;
1386
1387 /* Try to find a coding opportunity and send the skb if one is found */
1388 if (batadv_nc_skb_dst_search(skb, neigh_node, ethhdr))
1389 return true;
1390
1391 /* Find or create a nc_path for this src-dst pair */
1392 nc_path = batadv_nc_get_path(bat_priv,
1393 bat_priv->nc.coding_hash,
1394 ethhdr->h_source,
1395 neigh_node->addr);
1396
1397 if (!nc_path)
1398 goto out;
1399
1400 /* Add skb to nc_path */
1401 packet_id = batadv_skb_crc32(skb, payload + sizeof(*packet));
1402 if (!batadv_nc_skb_add_to_path(skb, nc_path, neigh_node, packet_id))
1403 goto free_nc_path;
1404
1405 /* Packet is consumed */
1406 return true;
1407
1408free_nc_path:
1409 batadv_nc_path_free_ref(nc_path);
1410out:
1411 /* Packet is not consumed */
1412 return false;
1413}
1414
1415/**
1416 * batadv_nc_skb_store_for_decoding - save a clone of the skb which can be used
1417 * when decoding coded packets
1418 * @bat_priv: the bat priv with all the soft interface information
1419 * @skb: data skb to store
1420 */
1421void batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv,
1422 struct sk_buff *skb)
1423{
1424 struct batadv_unicast_packet *packet;
1425 struct batadv_nc_path *nc_path;
1426 struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
1427 __be32 packet_id;
1428 u8 *payload;
1429
1430 /* Check if network coding is enabled */
1431 if (!atomic_read(&bat_priv->network_coding))
1432 goto out;
1433
1434 /* Check for supported packet type */
1435 payload = skb_network_header(skb);
1436 packet = (struct batadv_unicast_packet *)payload;
1437 if (packet->header.packet_type != BATADV_UNICAST)
1438 goto out;
1439
1440 /* Find existing nc_path or create a new */
1441 nc_path = batadv_nc_get_path(bat_priv,
1442 bat_priv->nc.decoding_hash,
1443 ethhdr->h_source,
1444 ethhdr->h_dest);
1445
1446 if (!nc_path)
1447 goto out;
1448
1449 /* Clone skb and adjust skb->data to point at batman header */
1450 skb = skb_clone(skb, GFP_ATOMIC);
1451 if (unlikely(!skb))
1452 goto free_nc_path;
1453
1454 if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
1455 goto free_skb;
1456
1457 if (unlikely(!skb_pull_rcsum(skb, ETH_HLEN)))
1458 goto free_skb;
1459
1460 /* Add skb to nc_path */
1461 packet_id = batadv_skb_crc32(skb, payload + sizeof(*packet));
1462 if (!batadv_nc_skb_add_to_path(skb, nc_path, NULL, packet_id))
1463 goto free_skb;
1464
1465 batadv_inc_counter(bat_priv, BATADV_CNT_NC_BUFFER);
1466 return;
1467
1468free_skb:
1469 kfree_skb(skb);
1470free_nc_path:
1471 batadv_nc_path_free_ref(nc_path);
1472out:
1473 return;
1474}
1475
1476/**
1477 * batadv_nc_skb_store_sniffed_unicast - check if a received unicast packet
1478 * should be saved in the decoding buffer and, if so, store it there
1479 * @bat_priv: the bat priv with all the soft interface information
1480 * @skb: unicast skb to store
1481 */
1482void batadv_nc_skb_store_sniffed_unicast(struct batadv_priv *bat_priv,
1483 struct sk_buff *skb)
1484{
1485 struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
1486
1487 if (batadv_is_my_mac(ethhdr->h_dest))
1488 return;
1489
1490 /* Set data pointer to MAC header to mimic packets from our tx path */
1491 skb_push(skb, ETH_HLEN);
1492
1493 batadv_nc_skb_store_for_decoding(bat_priv, skb);
1494}
1495
1496/**
1497 * batadv_nc_skb_decode_packet - decode given skb using the decode data stored
1498 * in nc_packet
1499 * @skb: unicast skb to decode
1500 * @nc_packet: decode data needed to decode the skb
1501 *
1502 * Returns pointer to decoded unicast packet if the packet was decoded or NULL
1503 * in case of an error.
1504 */
1505static struct batadv_unicast_packet *
1506batadv_nc_skb_decode_packet(struct sk_buff *skb,
1507 struct batadv_nc_packet *nc_packet)
1508{
1509 const int h_size = sizeof(struct batadv_unicast_packet);
1510 const int h_diff = sizeof(struct batadv_coded_packet) - h_size;
1511 struct batadv_unicast_packet *unicast_packet;
1512 struct batadv_coded_packet coded_packet_tmp;
1513 struct ethhdr *ethhdr, ethhdr_tmp;
1514 uint8_t *orig_dest, ttl, ttvn;
1515 unsigned int coding_len;
1516
1517 /* Save headers temporarily */
1518 memcpy(&coded_packet_tmp, skb->data, sizeof(coded_packet_tmp));
1519 memcpy(&ethhdr_tmp, skb_mac_header(skb), sizeof(ethhdr_tmp));
1520
1521 if (skb_cow(skb, 0) < 0)
1522 return NULL;
1523
1524 if (unlikely(!skb_pull_rcsum(skb, h_diff)))
1525 return NULL;
1526
1527 /* Data points to batman header, so set mac header 14 bytes before
1528 * and network to data
1529 */
1530 skb_set_mac_header(skb, -ETH_HLEN);
1531 skb_reset_network_header(skb);
1532
1533 /* Reconstruct original mac header */
1534 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1535 memcpy(ethhdr, &ethhdr_tmp, sizeof(*ethhdr));
1536
1537 /* Select the correct unicast header information based on the location
1538 * of our mac address in the coded_packet header
1539 */
1540 if (batadv_is_my_mac(coded_packet_tmp.second_dest)) {
1541 /* If we are the second destination the packet was overheard,
1542 * so the Ethernet address must be copied to h_dest and
1543 * pkt_type changed from PACKET_OTHERHOST to PACKET_HOST
1544 */
1545 memcpy(ethhdr->h_dest, coded_packet_tmp.second_dest, ETH_ALEN);
1546 skb->pkt_type = PACKET_HOST;
1547
1548 orig_dest = coded_packet_tmp.second_orig_dest;
1549 ttl = coded_packet_tmp.second_ttl;
1550 ttvn = coded_packet_tmp.second_ttvn;
1551 } else {
1552 orig_dest = coded_packet_tmp.first_orig_dest;
1553 ttl = coded_packet_tmp.header.ttl;
1554 ttvn = coded_packet_tmp.first_ttvn;
1555 }
1556
1557 coding_len = ntohs(coded_packet_tmp.coded_len);
1558
1559 if (coding_len > skb->len)
1560 return NULL;
1561
1562 /* Here the magic is reversed:
1563 * extract the missing packet from the received coded packet
1564 */
1565 batadv_nc_memxor(skb->data + h_size,
1566 nc_packet->skb->data + h_size,
1567 coding_len);
1568
1569 /* Resize decoded skb if decoded with larger packet */
1570 if (nc_packet->skb->len > coding_len + h_size)
1571 pskb_trim_rcsum(skb, coding_len + h_size);
1572
1573 /* Create decoded unicast packet */
1574 unicast_packet = (struct batadv_unicast_packet *)skb->data;
1575 unicast_packet->header.packet_type = BATADV_UNICAST;
1576 unicast_packet->header.version = BATADV_COMPAT_VERSION;
1577 unicast_packet->header.ttl = ttl;
1578 memcpy(unicast_packet->dest, orig_dest, ETH_ALEN);
1579 unicast_packet->ttvn = ttvn;
1580
1581 batadv_nc_packet_free(nc_packet);
1582 return unicast_packet;
1583}
1584
1585/**
1586 * batadv_nc_find_decoding_packet - search through buffered decoding data to
1587 * find the data needed to decode the coded packet
1588 * @bat_priv: the bat priv with all the soft interface information
1589 * @ethhdr: pointer to the ethernet header inside the coded packet
1590 * @coded: coded packet we try to find decode data for
1591 *
1592 * Returns pointer to nc packet if the needed data was found or NULL otherwise.
1593 */
1594static struct batadv_nc_packet *
1595batadv_nc_find_decoding_packet(struct batadv_priv *bat_priv,
1596 struct ethhdr *ethhdr,
1597 struct batadv_coded_packet *coded)
1598{
1599 struct batadv_hashtable *hash = bat_priv->nc.decoding_hash;
1600 struct batadv_nc_packet *tmp_nc_packet, *nc_packet = NULL;
1601 struct batadv_nc_path *nc_path, nc_path_key;
1602 uint8_t *dest, *source;
1603 __be32 packet_id;
1604 int index;
1605
1606 if (!hash)
1607 return NULL;
1608
1609 /* Select the correct packet id based on the location of our mac-addr */
1610 dest = ethhdr->h_source;
1611 if (!batadv_is_my_mac(coded->second_dest)) {
1612 source = coded->second_source;
1613 packet_id = coded->second_crc;
1614 } else {
1615 source = coded->first_source;
1616 packet_id = coded->first_crc;
1617 }
1618
1619 batadv_nc_hash_key_gen(&nc_path_key, source, dest);
1620 index = batadv_nc_hash_choose(&nc_path_key, hash->size);
1621
1622 /* Search for matching coding path */
1623 rcu_read_lock();
1624 hlist_for_each_entry_rcu(nc_path, &hash->table[index], hash_entry) {
1625 /* Find matching nc_packet */
1626 spin_lock_bh(&nc_path->packet_list_lock);
1627 list_for_each_entry(tmp_nc_packet,
1628 &nc_path->packet_list, list) {
1629 if (packet_id == tmp_nc_packet->packet_id) {
1630 list_del(&tmp_nc_packet->list);
1631
1632 nc_packet = tmp_nc_packet;
1633 break;
1634 }
1635 }
1636 spin_unlock_bh(&nc_path->packet_list_lock);
1637
1638 if (nc_packet)
1639 break;
1640 }
1641 rcu_read_unlock();
1642
1643 if (!nc_packet)
1644 batadv_dbg(BATADV_DBG_NC, bat_priv,
1645 "No decoding packet found for %u\n", packet_id);
1646
1647 return nc_packet;
1648}
1649
1650/**
1651 * batadv_nc_recv_coded_packet - try to decode coded packet and enqueue the
1652 * resulting unicast packet
1653 * @skb: incoming coded packet
1654 * @recv_if: pointer to interface this packet was received on
1655 */
1656static int batadv_nc_recv_coded_packet(struct sk_buff *skb,
1657 struct batadv_hard_iface *recv_if)
1658{
1659 struct batadv_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1660 struct batadv_unicast_packet *unicast_packet;
1661 struct batadv_coded_packet *coded_packet;
1662 struct batadv_nc_packet *nc_packet;
1663 struct ethhdr *ethhdr;
1664 int hdr_size = sizeof(*coded_packet);
1665
1666 /* Check if network coding is enabled */
1667 if (!atomic_read(&bat_priv->network_coding))
1668 return NET_RX_DROP;
1669
1670 /* Make sure we can access (and remove) header */
1671 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1672 return NET_RX_DROP;
1673
1674 coded_packet = (struct batadv_coded_packet *)skb->data;
1675 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1676
1677 /* Verify frame is destined for us */
1678 if (!batadv_is_my_mac(ethhdr->h_dest) &&
1679 !batadv_is_my_mac(coded_packet->second_dest))
1680 return NET_RX_DROP;
1681
1682 /* Update stat counter */
1683 if (batadv_is_my_mac(coded_packet->second_dest))
1684 batadv_inc_counter(bat_priv, BATADV_CNT_NC_SNIFFED);
1685
1686 nc_packet = batadv_nc_find_decoding_packet(bat_priv, ethhdr,
1687 coded_packet);
1688 if (!nc_packet) {
1689 batadv_inc_counter(bat_priv, BATADV_CNT_NC_DECODE_FAILED);
1690 return NET_RX_DROP;
1691 }
1692
1693 /* Make skb's linear, because decoding accesses the entire buffer */
1694 if (skb_linearize(skb) < 0)
1695 goto free_nc_packet;
1696
1697 if (skb_linearize(nc_packet->skb) < 0)
1698 goto free_nc_packet;
1699
1700 /* Decode the packet */
1701 unicast_packet = batadv_nc_skb_decode_packet(skb, nc_packet);
1702 if (!unicast_packet) {
1703 batadv_inc_counter(bat_priv, BATADV_CNT_NC_DECODE_FAILED);
1704 goto free_nc_packet;
1705 }
1706
1707 /* Mark packet as decoded to do correct recoding when forwarding */
1708 BATADV_SKB_CB(skb)->decoded = true;
1709 batadv_inc_counter(bat_priv, BATADV_CNT_NC_DECODE);
1710 batadv_add_counter(bat_priv, BATADV_CNT_NC_DECODE_BYTES,
1711 skb->len + ETH_HLEN);
1712 return batadv_recv_unicast_packet(skb, recv_if);
1713
1714free_nc_packet:
1715 batadv_nc_packet_free(nc_packet);
1716 return NET_RX_DROP;
1717}
1718
1719/**
1720 * batadv_nc_free - clean up network coding memory
1721 * @bat_priv: the bat priv with all the soft interface information
1722 */
1723void batadv_nc_free(struct batadv_priv *bat_priv)
1724{
1725 batadv_recv_handler_unregister(BATADV_CODED);
1726 cancel_delayed_work_sync(&bat_priv->nc.work);
1727
1728 batadv_nc_purge_paths(bat_priv, bat_priv->nc.coding_hash, NULL);
1729 batadv_hash_destroy(bat_priv->nc.coding_hash);
1730 batadv_nc_purge_paths(bat_priv, bat_priv->nc.decoding_hash, NULL);
1731 batadv_hash_destroy(bat_priv->nc.decoding_hash);
1732}
1733
1734/**
1735 * batadv_nc_nodes_seq_print_text - print the nc node information
1736 * @seq: seq file to print on
1737 * @offset: not used
1738 */
1739int batadv_nc_nodes_seq_print_text(struct seq_file *seq, void *offset)
1740{
1741 struct net_device *net_dev = (struct net_device *)seq->private;
1742 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1743 struct batadv_hashtable *hash = bat_priv->orig_hash;
1744 struct batadv_hard_iface *primary_if;
1745 struct hlist_head *head;
1746 struct batadv_orig_node *orig_node;
1747 struct batadv_nc_node *nc_node;
1748 int i;
1749
1750 primary_if = batadv_seq_print_text_primary_if_get(seq);
1751 if (!primary_if)
1752 goto out;
1753
1754 /* Traverse list of originators */
1755 for (i = 0; i < hash->size; i++) {
1756 head = &hash->table[i];
1757
1758 /* For each orig_node in this bin */
1759 rcu_read_lock();
1760 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1761 seq_printf(seq, "Node: %pM\n", orig_node->orig);
1762
1763 seq_puts(seq, " Ingoing: ");
1764 /* For each in_nc_node to this orig_node */
1765 list_for_each_entry_rcu(nc_node,
1766 &orig_node->in_coding_list,
1767 list)
1768 seq_printf(seq, "%pM ",
1769 nc_node->addr);
1770 seq_puts(seq, "\n");
1771
1772 seq_puts(seq, " Outgoing: ");
1773 /* For out_nc_node to this orig_node */
1774 list_for_each_entry_rcu(nc_node,
1775 &orig_node->out_coding_list,
1776 list)
1777 seq_printf(seq, "%pM ",
1778 nc_node->addr);
1779 seq_puts(seq, "\n\n");
1780 }
1781 rcu_read_unlock();
1782 }
1783
1784out:
1785 if (primary_if)
1786 batadv_hardif_free_ref(primary_if);
1787 return 0;
1788}
1789
1790/**
1791 * batadv_nc_init_debugfs - create nc folder and related files in debugfs
1792 * @bat_priv: the bat priv with all the soft interface information
1793 */
1794int batadv_nc_init_debugfs(struct batadv_priv *bat_priv)
1795{
1796 struct dentry *nc_dir, *file;
1797
1798 nc_dir = debugfs_create_dir("nc", bat_priv->debug_dir);
1799 if (!nc_dir)
1800 goto out;
1801
1802 file = debugfs_create_u8("min_tq", S_IRUGO | S_IWUSR, nc_dir,
1803 &bat_priv->nc.min_tq);
1804 if (!file)
1805 goto out;
1806
1807 file = debugfs_create_u32("max_fwd_delay", S_IRUGO | S_IWUSR, nc_dir,
1808 &bat_priv->nc.max_fwd_delay);
1809 if (!file)
1810 goto out;
1811
1812 file = debugfs_create_u32("max_buffer_time", S_IRUGO | S_IWUSR, nc_dir,
1813 &bat_priv->nc.max_buffer_time);
1814 if (!file)
1815 goto out;
1816
1817 return 0;
1818
1819out:
1820 return -ENOMEM;
1821}
diff --git a/net/batman-adv/network-coding.h b/net/batman-adv/network-coding.h
new file mode 100644
index 000000000000..4fa6d0caddbd
--- /dev/null
+++ b/net/batman-adv/network-coding.h
@@ -0,0 +1,123 @@
1/* Copyright (C) 2012-2013 B.A.T.M.A.N. contributors:
2 *
3 * Martin Hundebøll, Jeppe Ledet-Pedersen
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
18 */
19
20#ifndef _NET_BATMAN_ADV_NETWORK_CODING_H_
21#define _NET_BATMAN_ADV_NETWORK_CODING_H_
22
23#ifdef CONFIG_BATMAN_ADV_NC
24
25int batadv_nc_init(struct batadv_priv *bat_priv);
26void batadv_nc_free(struct batadv_priv *bat_priv);
27void batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
28 struct batadv_orig_node *orig_node,
29 struct batadv_orig_node *orig_neigh_node,
30 struct batadv_ogm_packet *ogm_packet,
31 int is_single_hop_neigh);
32void batadv_nc_purge_orig(struct batadv_priv *bat_priv,
33 struct batadv_orig_node *orig_node,
34 bool (*to_purge)(struct batadv_priv *,
35 struct batadv_nc_node *));
36void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv);
37void batadv_nc_init_orig(struct batadv_orig_node *orig_node);
38bool batadv_nc_skb_forward(struct sk_buff *skb,
39 struct batadv_neigh_node *neigh_node,
40 struct ethhdr *ethhdr);
41void batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv,
42 struct sk_buff *skb);
43void batadv_nc_skb_store_sniffed_unicast(struct batadv_priv *bat_priv,
44 struct sk_buff *skb);
45int batadv_nc_nodes_seq_print_text(struct seq_file *seq, void *offset);
46int batadv_nc_init_debugfs(struct batadv_priv *bat_priv);
47
48#else /* ifdef CONFIG_BATMAN_ADV_NC */
49
50static inline int batadv_nc_init(struct batadv_priv *bat_priv)
51{
52 return 0;
53}
54
55static inline void batadv_nc_free(struct batadv_priv *bat_priv)
56{
57 return;
58}
59
60static inline void
61batadv_nc_update_nc_node(struct batadv_priv *bat_priv,
62 struct batadv_orig_node *orig_node,
63 struct batadv_orig_node *orig_neigh_node,
64 struct batadv_ogm_packet *ogm_packet,
65 int is_single_hop_neigh)
66{
67 return;
68}
69
70static inline void
71batadv_nc_purge_orig(struct batadv_priv *bat_priv,
72 struct batadv_orig_node *orig_node,
73 bool (*to_purge)(struct batadv_priv *,
74 struct batadv_nc_node *))
75{
76 return;
77}
78
79static inline void batadv_nc_init_bat_priv(struct batadv_priv *bat_priv)
80{
81 return;
82}
83
84static inline void batadv_nc_init_orig(struct batadv_orig_node *orig_node)
85{
86 return;
87}
88
89static inline bool batadv_nc_skb_forward(struct sk_buff *skb,
90 struct batadv_neigh_node *neigh_node,
91 struct ethhdr *ethhdr)
92{
93 return false;
94}
95
96static inline void
97batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv,
98 struct sk_buff *skb)
99{
100 return;
101}
102
103static inline void
104batadv_nc_skb_store_sniffed_unicast(struct batadv_priv *bat_priv,
105 struct sk_buff *skb)
106{
107 return;
108}
109
110static inline int batadv_nc_nodes_seq_print_text(struct seq_file *seq,
111 void *offset)
112{
113 return 0;
114}
115
116static inline int batadv_nc_init_debugfs(struct batadv_priv *bat_priv)
117{
118 return 0;
119}
120
121#endif /* ifdef CONFIG_BATMAN_ADV_NC */
122
123#endif /* _NET_BATMAN_ADV_NETWORK_CODING_H_ */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 96fb80b724dc..2f3452546636 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -28,6 +28,7 @@
28#include "unicast.h" 28#include "unicast.h"
29#include "soft-interface.h" 29#include "soft-interface.h"
30#include "bridge_loop_avoidance.h" 30#include "bridge_loop_avoidance.h"
31#include "network-coding.h"
31 32
32/* hash class keys */ 33/* hash class keys */
33static struct lock_class_key batadv_orig_hash_lock_class_key; 34static struct lock_class_key batadv_orig_hash_lock_class_key;
@@ -142,6 +143,9 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
142 143
143 spin_unlock_bh(&orig_node->neigh_list_lock); 144 spin_unlock_bh(&orig_node->neigh_list_lock);
144 145
146 /* Free nc_nodes */
147 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
148
145 batadv_frag_list_free(&orig_node->frag_list); 149 batadv_frag_list_free(&orig_node->frag_list);
146 batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, 150 batadv_tt_global_del_orig(orig_node->bat_priv, orig_node,
147 "originator timed out"); 151 "originator timed out");
@@ -219,6 +223,8 @@ struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
219 spin_lock_init(&orig_node->neigh_list_lock); 223 spin_lock_init(&orig_node->neigh_list_lock);
220 spin_lock_init(&orig_node->tt_buff_lock); 224 spin_lock_init(&orig_node->tt_buff_lock);
221 225
226 batadv_nc_init_orig(orig_node);
227
222 /* extra reference for return */ 228 /* extra reference for return */
223 atomic_set(&orig_node->refcount, 2); 229 atomic_set(&orig_node->refcount, 2);
224 230
@@ -459,7 +465,7 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
459 neigh_node_tmp->tq_avg); 465 neigh_node_tmp->tq_avg);
460 } 466 }
461 467
462 seq_printf(seq, "\n"); 468 seq_puts(seq, "\n");
463 batman_count++; 469 batman_count++;
464 470
465next: 471next:
@@ -469,7 +475,7 @@ next:
469 } 475 }
470 476
471 if (batman_count == 0) 477 if (batman_count == 0)
472 seq_printf(seq, "No batman nodes in range ...\n"); 478 seq_puts(seq, "No batman nodes in range ...\n");
473 479
474out: 480out:
475 if (primary_if) 481 if (primary_if)
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index ed0aa89bbf8b..a51ccfc39da4 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -30,6 +30,7 @@ enum batadv_packettype {
30 BATADV_TT_QUERY = 0x07, 30 BATADV_TT_QUERY = 0x07,
31 BATADV_ROAM_ADV = 0x08, 31 BATADV_ROAM_ADV = 0x08,
32 BATADV_UNICAST_4ADDR = 0x09, 32 BATADV_UNICAST_4ADDR = 0x09,
33 BATADV_CODED = 0x0a,
33}; 34};
34 35
35/** 36/**
@@ -278,4 +279,36 @@ struct batadv_tt_change {
278 uint8_t addr[ETH_ALEN]; 279 uint8_t addr[ETH_ALEN];
279} __packed; 280} __packed;
280 281
282/**
283 * struct batadv_coded_packet - network coded packet
284 * @header: common batman packet header and ttl of first included packet
285 * @reserved: Align following fields to 2-byte boundaries
286 * @first_source: original source of first included packet
287 * @first_orig_dest: original destinal of first included packet
288 * @first_crc: checksum of first included packet
289 * @first_ttvn: tt-version number of first included packet
290 * @second_ttl: ttl of second packet
291 * @second_dest: second receiver of this coded packet
292 * @second_source: original source of second included packet
293 * @second_orig_dest: original destination of second included packet
294 * @second_crc: checksum of second included packet
295 * @second_ttvn: tt version number of second included packet
296 * @coded_len: length of network coded part of the payload
297 */
298struct batadv_coded_packet {
299 struct batadv_header header;
300 uint8_t first_ttvn;
301 /* uint8_t first_dest[ETH_ALEN]; - saved in mac header destination */
302 uint8_t first_source[ETH_ALEN];
303 uint8_t first_orig_dest[ETH_ALEN];
304 __be32 first_crc;
305 uint8_t second_ttl;
306 uint8_t second_ttvn;
307 uint8_t second_dest[ETH_ALEN];
308 uint8_t second_source[ETH_ALEN];
309 uint8_t second_orig_dest[ETH_ALEN];
310 __be32 second_crc;
311 __be16 coded_len;
312};
313
281#endif /* _NET_BATMAN_ADV_PACKET_H_ */ 314#endif /* _NET_BATMAN_ADV_PACKET_H_ */
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 5ee21cebbbb0..8f88967ff14b 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -29,6 +29,7 @@
29#include "unicast.h" 29#include "unicast.h"
30#include "bridge_loop_avoidance.h" 30#include "bridge_loop_avoidance.h"
31#include "distributed-arp-table.h" 31#include "distributed-arp-table.h"
32#include "network-coding.h"
32 33
33static int batadv_route_unicast_packet(struct sk_buff *skb, 34static int batadv_route_unicast_packet(struct sk_buff *skb,
34 struct batadv_hard_iface *recv_if); 35 struct batadv_hard_iface *recv_if);
@@ -548,27 +549,37 @@ batadv_find_ifalter_router(struct batadv_orig_node *primary_orig,
548 return router; 549 return router;
549} 550}
550 551
552/**
553 * batadv_check_unicast_packet - Check for malformed unicast packets
554 * @skb: packet to check
555 * @hdr_size: size of header to pull
556 *
557 * Check for short header and bad addresses in given packet. Returns negative
558 * value when check fails and 0 otherwise. The negative value depends on the
559 * reason: -ENODATA for bad header, -EBADR for broadcast destination or source,
560 * and -EREMOTE for non-local (other host) destination.
561 */
551static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size) 562static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)
552{ 563{
553 struct ethhdr *ethhdr; 564 struct ethhdr *ethhdr;
554 565
555 /* drop packet if it has not necessary minimum size */ 566 /* drop packet if it has not necessary minimum size */
556 if (unlikely(!pskb_may_pull(skb, hdr_size))) 567 if (unlikely(!pskb_may_pull(skb, hdr_size)))
557 return -1; 568 return -ENODATA;
558 569
559 ethhdr = (struct ethhdr *)skb_mac_header(skb); 570 ethhdr = (struct ethhdr *)skb_mac_header(skb);
560 571
561 /* packet with unicast indication but broadcast recipient */ 572 /* packet with unicast indication but broadcast recipient */
562 if (is_broadcast_ether_addr(ethhdr->h_dest)) 573 if (is_broadcast_ether_addr(ethhdr->h_dest))
563 return -1; 574 return -EBADR;
564 575
565 /* packet with broadcast sender address */ 576 /* packet with broadcast sender address */
566 if (is_broadcast_ether_addr(ethhdr->h_source)) 577 if (is_broadcast_ether_addr(ethhdr->h_source))
567 return -1; 578 return -EBADR;
568 579
569 /* not for me */ 580 /* not for me */
570 if (!batadv_is_my_mac(ethhdr->h_dest)) 581 if (!batadv_is_my_mac(ethhdr->h_dest))
571 return -1; 582 return -EREMOTE;
572 583
573 return 0; 584 return 0;
574} 585}
@@ -850,15 +861,18 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
850 /* decrement ttl */ 861 /* decrement ttl */
851 unicast_packet->header.ttl--; 862 unicast_packet->header.ttl--;
852 863
853 /* Update stats counter */ 864 /* network code packet if possible */
854 batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD); 865 if (batadv_nc_skb_forward(skb, neigh_node, ethhdr)) {
855 batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES, 866 ret = NET_RX_SUCCESS;
856 skb->len + ETH_HLEN); 867 } else if (batadv_send_skb_to_orig(skb, orig_node, recv_if)) {
857
858 /* route it */
859 if (batadv_send_skb_to_orig(skb, orig_node, recv_if))
860 ret = NET_RX_SUCCESS; 868 ret = NET_RX_SUCCESS;
861 869
870 /* Update stats counter */
871 batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
872 batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
873 skb->len + ETH_HLEN);
874 }
875
862out: 876out:
863 if (neigh_node) 877 if (neigh_node)
864 batadv_neigh_node_free_ref(neigh_node); 878 batadv_neigh_node_free_ref(neigh_node);
@@ -1033,7 +1047,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
1033 struct batadv_unicast_4addr_packet *unicast_4addr_packet; 1047 struct batadv_unicast_4addr_packet *unicast_4addr_packet;
1034 uint8_t *orig_addr; 1048 uint8_t *orig_addr;
1035 struct batadv_orig_node *orig_node = NULL; 1049 struct batadv_orig_node *orig_node = NULL;
1036 int hdr_size = sizeof(*unicast_packet); 1050 int check, hdr_size = sizeof(*unicast_packet);
1037 bool is4addr; 1051 bool is4addr;
1038 1052
1039 unicast_packet = (struct batadv_unicast_packet *)skb->data; 1053 unicast_packet = (struct batadv_unicast_packet *)skb->data;
@@ -1044,7 +1058,16 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
1044 if (is4addr) 1058 if (is4addr)
1045 hdr_size = sizeof(*unicast_4addr_packet); 1059 hdr_size = sizeof(*unicast_4addr_packet);
1046 1060
1047 if (batadv_check_unicast_packet(skb, hdr_size) < 0) 1061 /* function returns -EREMOTE for promiscuous packets */
1062 check = batadv_check_unicast_packet(skb, hdr_size);
1063
1064 /* Even though the packet is not for us, we might save it to use for
1065 * decoding a later received coded packet
1066 */
1067 if (check == -EREMOTE)
1068 batadv_nc_skb_store_sniffed_unicast(bat_priv, skb);
1069
1070 if (check < 0)
1048 return NET_RX_DROP; 1071 return NET_RX_DROP;
1049 1072
1050 if (!batadv_check_unicast_ttvn(bat_priv, skb)) 1073 if (!batadv_check_unicast_ttvn(bat_priv, skb))
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index a67cffde37ae..263cfd1ccee7 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -27,6 +27,7 @@
27#include "vis.h" 27#include "vis.h"
28#include "gateway_common.h" 28#include "gateway_common.h"
29#include "originator.h" 29#include "originator.h"
30#include "network-coding.h"
30 31
31#include <linux/if_ether.h> 32#include <linux/if_ether.h>
32 33
@@ -39,6 +40,7 @@ int batadv_send_skb_packet(struct sk_buff *skb,
39 struct batadv_hard_iface *hard_iface, 40 struct batadv_hard_iface *hard_iface,
40 const uint8_t *dst_addr) 41 const uint8_t *dst_addr)
41{ 42{
43 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
42 struct ethhdr *ethhdr; 44 struct ethhdr *ethhdr;
43 45
44 if (hard_iface->if_status != BATADV_IF_ACTIVE) 46 if (hard_iface->if_status != BATADV_IF_ACTIVE)
@@ -70,6 +72,9 @@ int batadv_send_skb_packet(struct sk_buff *skb,
70 72
71 skb->dev = hard_iface->net_dev; 73 skb->dev = hard_iface->net_dev;
72 74
75 /* Save a clone of the skb to use when decoding coded packets */
76 batadv_nc_skb_store_for_decoding(bat_priv, skb);
77
73 /* dev_queue_xmit() returns a negative result on error. However on 78 /* dev_queue_xmit() returns a negative result on error. However on
74 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP 79 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
75 * (which is > 0). This will not be treated as an error. 80 * (which is > 0). This will not be treated as an error.
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 2711e870f557..403b8c46085e 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -37,6 +37,7 @@
37#include <linux/if_ether.h> 37#include <linux/if_ether.h>
38#include "unicast.h" 38#include "unicast.h"
39#include "bridge_loop_avoidance.h" 39#include "bridge_loop_avoidance.h"
40#include "network-coding.h"
40 41
41 42
42static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd); 43static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
@@ -401,55 +402,6 @@ static void batadv_set_lockdep_class(struct net_device *dev)
401} 402}
402 403
403/** 404/**
404 * batadv_softif_init - Late stage initialization of soft interface
405 * @dev: registered network device to modify
406 *
407 * Returns error code on failures
408 */
409static int batadv_softif_init(struct net_device *dev)
410{
411 batadv_set_lockdep_class(dev);
412
413 return 0;
414}
415
416static const struct net_device_ops batadv_netdev_ops = {
417 .ndo_init = batadv_softif_init,
418 .ndo_open = batadv_interface_open,
419 .ndo_stop = batadv_interface_release,
420 .ndo_get_stats = batadv_interface_stats,
421 .ndo_set_mac_address = batadv_interface_set_mac_addr,
422 .ndo_change_mtu = batadv_interface_change_mtu,
423 .ndo_start_xmit = batadv_interface_tx,
424 .ndo_validate_addr = eth_validate_addr
425};
426
427static void batadv_interface_setup(struct net_device *dev)
428{
429 struct batadv_priv *priv = netdev_priv(dev);
430
431 ether_setup(dev);
432
433 dev->netdev_ops = &batadv_netdev_ops;
434 dev->destructor = free_netdev;
435 dev->tx_queue_len = 0;
436
437 /* can't call min_mtu, because the needed variables
438 * have not been initialized yet
439 */
440 dev->mtu = ETH_DATA_LEN;
441 /* reserve more space in the skbuff for our header */
442 dev->hard_header_len = BATADV_HEADER_LEN;
443
444 /* generate random address */
445 eth_hw_addr_random(dev);
446
447 SET_ETHTOOL_OPS(dev, &batadv_ethtool_ops);
448
449 memset(priv, 0, sizeof(*priv));
450}
451
452/**
453 * batadv_softif_destroy_finish - cleans up the remains of a softif 405 * batadv_softif_destroy_finish - cleans up the remains of a softif
454 * @work: work queue item 406 * @work: work queue item
455 * 407 *
@@ -465,7 +417,6 @@ static void batadv_softif_destroy_finish(struct work_struct *work)
465 cleanup_work); 417 cleanup_work);
466 soft_iface = bat_priv->soft_iface; 418 soft_iface = bat_priv->soft_iface;
467 419
468 batadv_debugfs_del_meshif(soft_iface);
469 batadv_sysfs_del_meshif(soft_iface); 420 batadv_sysfs_del_meshif(soft_iface);
470 421
471 rtnl_lock(); 422 rtnl_lock();
@@ -473,21 +424,22 @@ static void batadv_softif_destroy_finish(struct work_struct *work)
473 rtnl_unlock(); 424 rtnl_unlock();
474} 425}
475 426
476struct net_device *batadv_softif_create(const char *name) 427/**
428 * batadv_softif_init_late - late stage initialization of soft interface
429 * @dev: registered network device to modify
430 *
431 * Returns error code on failures
432 */
433static int batadv_softif_init_late(struct net_device *dev)
477{ 434{
478 struct net_device *soft_iface;
479 struct batadv_priv *bat_priv; 435 struct batadv_priv *bat_priv;
480 int ret; 436 int ret;
481 size_t cnt_len = sizeof(uint64_t) * BATADV_CNT_NUM; 437 size_t cnt_len = sizeof(uint64_t) * BATADV_CNT_NUM;
482 438
483 soft_iface = alloc_netdev(sizeof(*bat_priv), name, 439 batadv_set_lockdep_class(dev);
484 batadv_interface_setup);
485
486 if (!soft_iface)
487 goto out;
488 440
489 bat_priv = netdev_priv(soft_iface); 441 bat_priv = netdev_priv(dev);
490 bat_priv->soft_iface = soft_iface; 442 bat_priv->soft_iface = dev;
491 INIT_WORK(&bat_priv->cleanup_work, batadv_softif_destroy_finish); 443 INIT_WORK(&bat_priv->cleanup_work, batadv_softif_destroy_finish);
492 444
493 /* batadv_interface_stats() needs to be available as soon as 445 /* batadv_interface_stats() needs to be available as soon as
@@ -495,14 +447,7 @@ struct net_device *batadv_softif_create(const char *name)
495 */ 447 */
496 bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t)); 448 bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t));
497 if (!bat_priv->bat_counters) 449 if (!bat_priv->bat_counters)
498 goto free_soft_iface; 450 return -ENOMEM;
499
500 ret = register_netdevice(soft_iface);
501 if (ret < 0) {
502 pr_err("Unable to register the batman interface '%s': %i\n",
503 name, ret);
504 goto free_bat_counters;
505 }
506 451
507 atomic_set(&bat_priv->aggregated_ogms, 1); 452 atomic_set(&bat_priv->aggregated_ogms, 1);
508 atomic_set(&bat_priv->bonding, 0); 453 atomic_set(&bat_priv->bonding, 0);
@@ -540,49 +485,189 @@ struct net_device *batadv_softif_create(const char *name)
540 bat_priv->primary_if = NULL; 485 bat_priv->primary_if = NULL;
541 bat_priv->num_ifaces = 0; 486 bat_priv->num_ifaces = 0;
542 487
543 ret = batadv_algo_select(bat_priv, batadv_routing_algo); 488 batadv_nc_init_bat_priv(bat_priv);
544 if (ret < 0)
545 goto unreg_soft_iface;
546 489
547 ret = batadv_sysfs_add_meshif(soft_iface); 490 ret = batadv_algo_select(bat_priv, batadv_routing_algo);
548 if (ret < 0) 491 if (ret < 0)
549 goto unreg_soft_iface; 492 goto free_bat_counters;
550 493
551 ret = batadv_debugfs_add_meshif(soft_iface); 494 ret = batadv_debugfs_add_meshif(dev);
552 if (ret < 0) 495 if (ret < 0)
553 goto unreg_sysfs; 496 goto free_bat_counters;
554 497
555 ret = batadv_mesh_init(soft_iface); 498 ret = batadv_mesh_init(dev);
556 if (ret < 0) 499 if (ret < 0)
557 goto unreg_debugfs; 500 goto unreg_debugfs;
558 501
559 return soft_iface; 502 return 0;
560 503
561unreg_debugfs: 504unreg_debugfs:
562 batadv_debugfs_del_meshif(soft_iface); 505 batadv_debugfs_del_meshif(dev);
563unreg_sysfs:
564 batadv_sysfs_del_meshif(soft_iface);
565unreg_soft_iface:
566 free_percpu(bat_priv->bat_counters);
567 unregister_netdevice(soft_iface);
568 return NULL;
569
570free_bat_counters: 506free_bat_counters:
571 free_percpu(bat_priv->bat_counters); 507 free_percpu(bat_priv->bat_counters);
572free_soft_iface: 508
573 free_netdev(soft_iface); 509 return ret;
510}
511
512/**
513 * batadv_softif_slave_add - Add a slave interface to a batadv_soft_interface
514 * @dev: batadv_soft_interface used as master interface
515 * @slave_dev: net_device which should become the slave interface
516 *
517 * Return 0 if successful or error otherwise.
518 */
519static int batadv_softif_slave_add(struct net_device *dev,
520 struct net_device *slave_dev)
521{
522 struct batadv_hard_iface *hard_iface;
523 int ret = -EINVAL;
524
525 hard_iface = batadv_hardif_get_by_netdev(slave_dev);
526 if (!hard_iface || hard_iface->soft_iface != NULL)
527 goto out;
528
529 ret = batadv_hardif_enable_interface(hard_iface, dev->name);
530
574out: 531out:
575 return NULL; 532 if (hard_iface)
533 batadv_hardif_free_ref(hard_iface);
534 return ret;
576} 535}
577 536
578void batadv_softif_destroy(struct net_device *soft_iface) 537/**
538 * batadv_softif_slave_del - Delete a slave iface from a batadv_soft_interface
539 * @dev: batadv_soft_interface used as master interface
540 * @slave_dev: net_device which should be removed from the master interface
541 *
542 * Return 0 if successful or error otherwise.
543 */
544static int batadv_softif_slave_del(struct net_device *dev,
545 struct net_device *slave_dev)
546{
547 struct batadv_hard_iface *hard_iface;
548 int ret = -EINVAL;
549
550 hard_iface = batadv_hardif_get_by_netdev(slave_dev);
551
552 if (!hard_iface || hard_iface->soft_iface != dev)
553 goto out;
554
555 batadv_hardif_disable_interface(hard_iface, BATADV_IF_CLEANUP_KEEP);
556 ret = 0;
557
558out:
559 if (hard_iface)
560 batadv_hardif_free_ref(hard_iface);
561 return ret;
562}
563
564static const struct net_device_ops batadv_netdev_ops = {
565 .ndo_init = batadv_softif_init_late,
566 .ndo_open = batadv_interface_open,
567 .ndo_stop = batadv_interface_release,
568 .ndo_get_stats = batadv_interface_stats,
569 .ndo_set_mac_address = batadv_interface_set_mac_addr,
570 .ndo_change_mtu = batadv_interface_change_mtu,
571 .ndo_start_xmit = batadv_interface_tx,
572 .ndo_validate_addr = eth_validate_addr,
573 .ndo_add_slave = batadv_softif_slave_add,
574 .ndo_del_slave = batadv_softif_slave_del,
575};
576
577/**
578 * batadv_softif_free - Deconstructor of batadv_soft_interface
579 * @dev: Device to cleanup and remove
580 */
581static void batadv_softif_free(struct net_device *dev)
582{
583 batadv_debugfs_del_meshif(dev);
584 batadv_mesh_free(dev);
585 free_netdev(dev);
586}
587
588/**
589 * batadv_softif_init_early - early stage initialization of soft interface
590 * @dev: registered network device to modify
591 */
592static void batadv_softif_init_early(struct net_device *dev)
593{
594 struct batadv_priv *priv = netdev_priv(dev);
595
596 ether_setup(dev);
597
598 dev->netdev_ops = &batadv_netdev_ops;
599 dev->destructor = batadv_softif_free;
600 dev->tx_queue_len = 0;
601
602 /* can't call min_mtu, because the needed variables
603 * have not been initialized yet
604 */
605 dev->mtu = ETH_DATA_LEN;
606 /* reserve more space in the skbuff for our header */
607 dev->hard_header_len = BATADV_HEADER_LEN;
608
609 /* generate random address */
610 eth_hw_addr_random(dev);
611
612 SET_ETHTOOL_OPS(dev, &batadv_ethtool_ops);
613
614 memset(priv, 0, sizeof(*priv));
615}
616
617struct net_device *batadv_softif_create(const char *name)
618{
619 struct net_device *soft_iface;
620 int ret;
621
622 soft_iface = alloc_netdev(sizeof(struct batadv_priv), name,
623 batadv_softif_init_early);
624 if (!soft_iface)
625 return NULL;
626
627 soft_iface->rtnl_link_ops = &batadv_link_ops;
628
629 ret = register_netdevice(soft_iface);
630 if (ret < 0) {
631 pr_err("Unable to register the batman interface '%s': %i\n",
632 name, ret);
633 free_netdev(soft_iface);
634 return NULL;
635 }
636
637 return soft_iface;
638}
639
640/**
641 * batadv_softif_destroy_sysfs - deletion of batadv_soft_interface via sysfs
642 * @soft_iface: the to-be-removed batman-adv interface
643 */
644void batadv_softif_destroy_sysfs(struct net_device *soft_iface)
579{ 645{
580 struct batadv_priv *bat_priv = netdev_priv(soft_iface); 646 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
581 647
582 batadv_mesh_free(soft_iface);
583 queue_work(batadv_event_workqueue, &bat_priv->cleanup_work); 648 queue_work(batadv_event_workqueue, &bat_priv->cleanup_work);
584} 649}
585 650
651/**
652 * batadv_softif_destroy_netlink - deletion of batadv_soft_interface via netlink
653 * @soft_iface: the to-be-removed batman-adv interface
654 * @head: list pointer
655 */
656static void batadv_softif_destroy_netlink(struct net_device *soft_iface,
657 struct list_head *head)
658{
659 struct batadv_hard_iface *hard_iface;
660
661 list_for_each_entry(hard_iface, &batadv_hardif_list, list) {
662 if (hard_iface->soft_iface == soft_iface)
663 batadv_hardif_disable_interface(hard_iface,
664 BATADV_IF_CLEANUP_KEEP);
665 }
666
667 batadv_sysfs_del_meshif(soft_iface);
668 unregister_netdevice_queue(soft_iface, head);
669}
670
586int batadv_softif_is_valid(const struct net_device *net_dev) 671int batadv_softif_is_valid(const struct net_device *net_dev)
587{ 672{
588 if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx) 673 if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx)
@@ -591,6 +676,13 @@ int batadv_softif_is_valid(const struct net_device *net_dev)
591 return 0; 676 return 0;
592} 677}
593 678
679struct rtnl_link_ops batadv_link_ops __read_mostly = {
680 .kind = "batadv",
681 .priv_size = sizeof(struct batadv_priv),
682 .setup = batadv_softif_init_early,
683 .dellink = batadv_softif_destroy_netlink,
684};
685
594/* ethtool */ 686/* ethtool */
595static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 687static int batadv_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
596{ 688{
@@ -662,6 +754,17 @@ static const struct {
662 { "dat_put_rx" }, 754 { "dat_put_rx" },
663 { "dat_cached_reply_tx" }, 755 { "dat_cached_reply_tx" },
664#endif 756#endif
757#ifdef CONFIG_BATMAN_ADV_NC
758 { "nc_code" },
759 { "nc_code_bytes" },
760 { "nc_recode" },
761 { "nc_recode_bytes" },
762 { "nc_buffer" },
763 { "nc_decode" },
764 { "nc_decode_bytes" },
765 { "nc_decode_failed" },
766 { "nc_sniffed" },
767#endif
665}; 768};
666 769
667static void batadv_get_strings(struct net_device *dev, uint32_t stringset, 770static void batadv_get_strings(struct net_device *dev, uint32_t stringset,
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index 43182e5e603a..2f2472c2ea0d 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -25,7 +25,8 @@ void batadv_interface_rx(struct net_device *soft_iface,
25 struct sk_buff *skb, struct batadv_hard_iface *recv_if, 25 struct sk_buff *skb, struct batadv_hard_iface *recv_if,
26 int hdr_size, struct batadv_orig_node *orig_node); 26 int hdr_size, struct batadv_orig_node *orig_node);
27struct net_device *batadv_softif_create(const char *name); 27struct net_device *batadv_softif_create(const char *name);
28void batadv_softif_destroy(struct net_device *soft_iface); 28void batadv_softif_destroy_sysfs(struct net_device *soft_iface);
29int batadv_softif_is_valid(const struct net_device *net_dev); 29int batadv_softif_is_valid(const struct net_device *net_dev);
30extern struct rtnl_link_ops batadv_link_ops;
30 31
31#endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */ 32#endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index afbba319d73a..15a22efa9a67 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -442,6 +442,9 @@ static BATADV_ATTR(gw_bandwidth, S_IRUGO | S_IWUSR, batadv_show_gw_bwidth,
442#ifdef CONFIG_BATMAN_ADV_DEBUG 442#ifdef CONFIG_BATMAN_ADV_DEBUG
443BATADV_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, BATADV_DBG_ALL, NULL); 443BATADV_ATTR_SIF_UINT(log_level, S_IRUGO | S_IWUSR, 0, BATADV_DBG_ALL, NULL);
444#endif 444#endif
445#ifdef CONFIG_BATMAN_ADV_NC
446BATADV_ATTR_SIF_BOOL(network_coding, S_IRUGO | S_IWUSR, NULL);
447#endif
445 448
446static struct batadv_attribute *batadv_mesh_attrs[] = { 449static struct batadv_attribute *batadv_mesh_attrs[] = {
447 &batadv_attr_aggregated_ogms, 450 &batadv_attr_aggregated_ogms,
@@ -464,6 +467,9 @@ static struct batadv_attribute *batadv_mesh_attrs[] = {
464#ifdef CONFIG_BATMAN_ADV_DEBUG 467#ifdef CONFIG_BATMAN_ADV_DEBUG
465 &batadv_attr_log_level, 468 &batadv_attr_log_level,
466#endif 469#endif
470#ifdef CONFIG_BATMAN_ADV_NC
471 &batadv_attr_network_coding,
472#endif
467 NULL, 473 NULL,
468}; 474};
469 475
@@ -582,13 +588,15 @@ static ssize_t batadv_store_mesh_iface(struct kobject *kobj,
582 } 588 }
583 589
584 if (status_tmp == BATADV_IF_NOT_IN_USE) { 590 if (status_tmp == BATADV_IF_NOT_IN_USE) {
585 batadv_hardif_disable_interface(hard_iface); 591 batadv_hardif_disable_interface(hard_iface,
592 BATADV_IF_CLEANUP_AUTO);
586 goto unlock; 593 goto unlock;
587 } 594 }
588 595
589 /* if the interface already is in use */ 596 /* if the interface already is in use */
590 if (hard_iface->if_status != BATADV_IF_NOT_IN_USE) 597 if (hard_iface->if_status != BATADV_IF_NOT_IN_USE)
591 batadv_hardif_disable_interface(hard_iface); 598 batadv_hardif_disable_interface(hard_iface,
599 BATADV_IF_CLEANUP_AUTO);
592 600
593 ret = batadv_hardif_enable_interface(hard_iface, buff); 601 ret = batadv_hardif_enable_interface(hard_iface, buff);
594 602
@@ -688,15 +696,10 @@ int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
688 enum batadv_uev_action action, const char *data) 696 enum batadv_uev_action action, const char *data)
689{ 697{
690 int ret = -ENOMEM; 698 int ret = -ENOMEM;
691 struct batadv_hard_iface *primary_if;
692 struct kobject *bat_kobj; 699 struct kobject *bat_kobj;
693 char *uevent_env[4] = { NULL, NULL, NULL, NULL }; 700 char *uevent_env[4] = { NULL, NULL, NULL, NULL };
694 701
695 primary_if = batadv_primary_if_get_selected(bat_priv); 702 bat_kobj = &bat_priv->soft_iface->dev.kobj;
696 if (!primary_if)
697 goto out;
698
699 bat_kobj = &primary_if->soft_iface->dev.kobj;
700 703
701 uevent_env[0] = kmalloc(strlen(BATADV_UEV_TYPE_VAR) + 704 uevent_env[0] = kmalloc(strlen(BATADV_UEV_TYPE_VAR) +
702 strlen(batadv_uev_type_str[type]) + 1, 705 strlen(batadv_uev_type_str[type]) + 1,
@@ -732,9 +735,6 @@ out:
732 kfree(uevent_env[1]); 735 kfree(uevent_env[1]);
733 kfree(uevent_env[2]); 736 kfree(uevent_env[2]);
734 737
735 if (primary_if)
736 batadv_hardif_free_ref(primary_if);
737
738 if (ret) 738 if (ret)
739 batadv_dbg(BATADV_DBG_BATMAN, bat_priv, 739 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
740 "Impossible to send uevent for (%s,%s,%s) event (err: %d)\n", 740 "Impossible to send uevent for (%s,%s,%s) event (err: %d)\n",
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 98a66a021a60..932232087449 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -385,25 +385,19 @@ static void batadv_tt_prepare_packet_buff(struct batadv_priv *bat_priv,
385 int *packet_buff_len, 385 int *packet_buff_len,
386 int min_packet_len) 386 int min_packet_len)
387{ 387{
388 struct batadv_hard_iface *primary_if;
389 int req_len; 388 int req_len;
390 389
391 primary_if = batadv_primary_if_get_selected(bat_priv);
392
393 req_len = min_packet_len; 390 req_len = min_packet_len;
394 req_len += batadv_tt_len(atomic_read(&bat_priv->tt.local_changes)); 391 req_len += batadv_tt_len(atomic_read(&bat_priv->tt.local_changes));
395 392
396 /* if we have too many changes for one packet don't send any 393 /* if we have too many changes for one packet don't send any
397 * and wait for the tt table request which will be fragmented 394 * and wait for the tt table request which will be fragmented
398 */ 395 */
399 if ((!primary_if) || (req_len > primary_if->soft_iface->mtu)) 396 if (req_len > bat_priv->soft_iface->mtu)
400 req_len = min_packet_len; 397 req_len = min_packet_len;
401 398
402 batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len, 399 batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
403 min_packet_len, req_len); 400 min_packet_len, req_len);
404
405 if (primary_if)
406 batadv_hardif_free_ref(primary_if);
407} 401}
408 402
409static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv, 403static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
@@ -908,7 +902,7 @@ out_remove:
908 /* remove address from local hash if present */ 902 /* remove address from local hash if present */
909 local_flags = batadv_tt_local_remove(bat_priv, tt_addr, 903 local_flags = batadv_tt_local_remove(bat_priv, tt_addr,
910 "global tt received", 904 "global tt received",
911 !!(flags & BATADV_TT_CLIENT_ROAM)); 905 flags & BATADV_TT_CLIENT_ROAM);
912 tt_global_entry->common.flags |= local_flags & BATADV_TT_CLIENT_WIFI; 906 tt_global_entry->common.flags |= local_flags & BATADV_TT_CLIENT_WIFI;
913 907
914 if (!(flags & BATADV_TT_CLIENT_ROAM)) 908 if (!(flags & BATADV_TT_CLIENT_ROAM))
@@ -1580,7 +1574,7 @@ static int batadv_tt_global_valid(const void *entry_ptr,
1580static struct sk_buff * 1574static struct sk_buff *
1581batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, 1575batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1582 struct batadv_hashtable *hash, 1576 struct batadv_hashtable *hash,
1583 struct batadv_hard_iface *primary_if, 1577 struct batadv_priv *bat_priv,
1584 int (*valid_cb)(const void *, const void *), 1578 int (*valid_cb)(const void *, const void *),
1585 void *cb_data) 1579 void *cb_data)
1586{ 1580{
@@ -1594,8 +1588,8 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1594 uint32_t i; 1588 uint32_t i;
1595 size_t len; 1589 size_t len;
1596 1590
1597 if (tt_query_size + tt_len > primary_if->soft_iface->mtu) { 1591 if (tt_query_size + tt_len > bat_priv->soft_iface->mtu) {
1598 tt_len = primary_if->soft_iface->mtu - tt_query_size; 1592 tt_len = bat_priv->soft_iface->mtu - tt_query_size;
1599 tt_len -= tt_len % sizeof(struct batadv_tt_change); 1593 tt_len -= tt_len % sizeof(struct batadv_tt_change);
1600 } 1594 }
1601 tt_tot = tt_len / sizeof(struct batadv_tt_change); 1595 tt_tot = tt_len / sizeof(struct batadv_tt_change);
@@ -1715,7 +1709,6 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
1715{ 1709{
1716 struct batadv_orig_node *req_dst_orig_node; 1710 struct batadv_orig_node *req_dst_orig_node;
1717 struct batadv_orig_node *res_dst_orig_node = NULL; 1711 struct batadv_orig_node *res_dst_orig_node = NULL;
1718 struct batadv_hard_iface *primary_if = NULL;
1719 uint8_t orig_ttvn, req_ttvn, ttvn; 1712 uint8_t orig_ttvn, req_ttvn, ttvn;
1720 int ret = false; 1713 int ret = false;
1721 unsigned char *tt_buff; 1714 unsigned char *tt_buff;
@@ -1740,10 +1733,6 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
1740 if (!res_dst_orig_node) 1733 if (!res_dst_orig_node)
1741 goto out; 1734 goto out;
1742 1735
1743 primary_if = batadv_primary_if_get_selected(bat_priv);
1744 if (!primary_if)
1745 goto out;
1746
1747 orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn); 1736 orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1748 req_ttvn = tt_request->ttvn; 1737 req_ttvn = tt_request->ttvn;
1749 1738
@@ -1791,7 +1780,7 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
1791 1780
1792 skb = batadv_tt_response_fill_table(tt_len, ttvn, 1781 skb = batadv_tt_response_fill_table(tt_len, ttvn,
1793 bat_priv->tt.global_hash, 1782 bat_priv->tt.global_hash,
1794 primary_if, 1783 bat_priv,
1795 batadv_tt_global_valid, 1784 batadv_tt_global_valid,
1796 req_dst_orig_node); 1785 req_dst_orig_node);
1797 if (!skb) 1786 if (!skb)
@@ -1828,8 +1817,6 @@ out:
1828 batadv_orig_node_free_ref(res_dst_orig_node); 1817 batadv_orig_node_free_ref(res_dst_orig_node);
1829 if (req_dst_orig_node) 1818 if (req_dst_orig_node)
1830 batadv_orig_node_free_ref(req_dst_orig_node); 1819 batadv_orig_node_free_ref(req_dst_orig_node);
1831 if (primary_if)
1832 batadv_hardif_free_ref(primary_if);
1833 if (!ret) 1820 if (!ret)
1834 kfree_skb(skb); 1821 kfree_skb(skb);
1835 return ret; 1822 return ret;
@@ -1907,7 +1894,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
1907 1894
1908 skb = batadv_tt_response_fill_table(tt_len, ttvn, 1895 skb = batadv_tt_response_fill_table(tt_len, ttvn,
1909 bat_priv->tt.local_hash, 1896 bat_priv->tt.local_hash,
1910 primary_if, 1897 bat_priv,
1911 batadv_tt_local_valid_entry, 1898 batadv_tt_local_valid_entry,
1912 NULL); 1899 NULL);
1913 if (!skb) 1900 if (!skb)
@@ -2528,7 +2515,7 @@ bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
2528 if (!tt_global_entry) 2515 if (!tt_global_entry)
2529 goto out; 2516 goto out;
2530 2517
2531 ret = !!(tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM); 2518 ret = tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM;
2532 batadv_tt_global_entry_free_ref(tt_global_entry); 2519 batadv_tt_global_entry_free_ref(tt_global_entry);
2533out: 2520out:
2534 return ret; 2521 return ret;
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index 4cd87a0b5b80..aba8364c3689 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -128,6 +128,10 @@ struct batadv_hard_iface {
128 * @bond_list: list of bonding candidates 128 * @bond_list: list of bonding candidates
129 * @refcount: number of contexts the object is used 129 * @refcount: number of contexts the object is used
130 * @rcu: struct used for freeing in an RCU-safe manner 130 * @rcu: struct used for freeing in an RCU-safe manner
131 * @in_coding_list: list of nodes this orig can hear
132 * @out_coding_list: list of nodes that can hear this orig
133 * @in_coding_list_lock: protects in_coding_list
134 * @out_coding_list_lock: protects out_coding_list
131 */ 135 */
132struct batadv_orig_node { 136struct batadv_orig_node {
133 uint8_t orig[ETH_ALEN]; 137 uint8_t orig[ETH_ALEN];
@@ -171,6 +175,12 @@ struct batadv_orig_node {
171 struct list_head bond_list; 175 struct list_head bond_list;
172 atomic_t refcount; 176 atomic_t refcount;
173 struct rcu_head rcu; 177 struct rcu_head rcu;
178#ifdef CONFIG_BATMAN_ADV_NC
179 struct list_head in_coding_list;
180 struct list_head out_coding_list;
181 spinlock_t in_coding_list_lock; /* Protects in_coding_list */
182 spinlock_t out_coding_list_lock; /* Protects out_coding_list */
183#endif
174}; 184};
175 185
176/** 186/**
@@ -265,6 +275,17 @@ struct batadv_bcast_duplist_entry {
265 * @BATADV_CNT_DAT_PUT_RX: received dht PUT traffic packet counter 275 * @BATADV_CNT_DAT_PUT_RX: received dht PUT traffic packet counter
266 * @BATADV_CNT_DAT_CACHED_REPLY_TX: transmitted dat cache reply traffic packet 276 * @BATADV_CNT_DAT_CACHED_REPLY_TX: transmitted dat cache reply traffic packet
267 * counter 277 * counter
278 * @BATADV_CNT_NC_CODE: transmitted nc-combined traffic packet counter
279 * @BATADV_CNT_NC_CODE_BYTES: transmitted nc-combined traffic bytes counter
280 * @BATADV_CNT_NC_RECODE: transmitted nc-recombined traffic packet counter
281 * @BATADV_CNT_NC_RECODE_BYTES: transmitted nc-recombined traffic bytes counter
282 * @BATADV_CNT_NC_BUFFER: counter for packets buffered for later nc decoding
283 * @BATADV_CNT_NC_DECODE: received and nc-decoded traffic packet counter
284 * @BATADV_CNT_NC_DECODE_BYTES: received and nc-decoded traffic bytes counter
285 * @BATADV_CNT_NC_DECODE_FAILED: received and decode-failed traffic packet
286 * counter
287 * @BATADV_CNT_NC_SNIFFED: counter for nc-decoded packets received in promisc
288 * mode.
268 * @BATADV_CNT_NUM: number of traffic counters 289 * @BATADV_CNT_NUM: number of traffic counters
269 */ 290 */
270enum batadv_counters { 291enum batadv_counters {
@@ -292,6 +313,17 @@ enum batadv_counters {
292 BATADV_CNT_DAT_PUT_RX, 313 BATADV_CNT_DAT_PUT_RX,
293 BATADV_CNT_DAT_CACHED_REPLY_TX, 314 BATADV_CNT_DAT_CACHED_REPLY_TX,
294#endif 315#endif
316#ifdef CONFIG_BATMAN_ADV_NC
317 BATADV_CNT_NC_CODE,
318 BATADV_CNT_NC_CODE_BYTES,
319 BATADV_CNT_NC_RECODE,
320 BATADV_CNT_NC_RECODE_BYTES,
321 BATADV_CNT_NC_BUFFER,
322 BATADV_CNT_NC_DECODE,
323 BATADV_CNT_NC_DECODE_BYTES,
324 BATADV_CNT_NC_DECODE_FAILED,
325 BATADV_CNT_NC_SNIFFED,
326#endif
295 BATADV_CNT_NUM, 327 BATADV_CNT_NUM,
296}; 328};
297 329
@@ -428,6 +460,35 @@ struct batadv_priv_dat {
428#endif 460#endif
429 461
430/** 462/**
463 * struct batadv_priv_nc - per mesh interface network coding private data
464 * @work: work queue callback item for cleanup
465 * @debug_dir: dentry for nc subdir in batman-adv directory in debugfs
466 * @min_tq: only consider neighbors for encoding if neigh_tq > min_tq
467 * @max_fwd_delay: maximum packet forward delay to allow coding of packets
468 * @max_buffer_time: buffer time for sniffed packets used to decoding
469 * @timestamp_fwd_flush: timestamp of last forward packet queue flush
470 * @timestamp_sniffed_purge: timestamp of last sniffed packet queue purge
471 * @coding_hash: Hash table used to buffer skbs while waiting for another
472 * incoming skb to code it with. Skbs are added to the buffer just before being
473 * forwarded in routing.c
474 * @decoding_hash: Hash table used to buffer skbs that might be needed to decode
475 * a received coded skb. The buffer is used for 1) skbs arriving on the
476 * soft-interface; 2) skbs overheard on the hard-interface; and 3) skbs
477 * forwarded by batman-adv.
478 */
479struct batadv_priv_nc {
480 struct delayed_work work;
481 struct dentry *debug_dir;
482 u8 min_tq;
483 u32 max_fwd_delay;
484 u32 max_buffer_time;
485 unsigned long timestamp_fwd_flush;
486 unsigned long timestamp_sniffed_purge;
487 struct batadv_hashtable *coding_hash;
488 struct batadv_hashtable *decoding_hash;
489};
490
491/**
431 * struct batadv_priv - per mesh interface data 492 * struct batadv_priv - per mesh interface data
432 * @mesh_state: current status of the mesh (inactive/active/deactivating) 493 * @mesh_state: current status of the mesh (inactive/active/deactivating)
433 * @soft_iface: net device which holds this struct as private data 494 * @soft_iface: net device which holds this struct as private data
@@ -470,6 +531,8 @@ struct batadv_priv_dat {
470 * @tt: translation table data 531 * @tt: translation table data
471 * @vis: vis data 532 * @vis: vis data
472 * @dat: distributed arp table data 533 * @dat: distributed arp table data
534 * @network_coding: bool indicating whether network coding is enabled
535 * @batadv_priv_nc: network coding data
473 */ 536 */
474struct batadv_priv { 537struct batadv_priv {
475 atomic_t mesh_state; 538 atomic_t mesh_state;
@@ -522,6 +585,10 @@ struct batadv_priv {
522#ifdef CONFIG_BATMAN_ADV_DAT 585#ifdef CONFIG_BATMAN_ADV_DAT
523 struct batadv_priv_dat dat; 586 struct batadv_priv_dat dat;
524#endif 587#endif
588#ifdef CONFIG_BATMAN_ADV_NC
589 atomic_t network_coding;
590 struct batadv_priv_nc nc;
591#endif /* CONFIG_BATMAN_ADV_NC */
525}; 592};
526 593
527/** 594/**
@@ -702,6 +769,75 @@ struct batadv_tt_roam_node {
702}; 769};
703 770
704/** 771/**
772 * struct batadv_nc_node - network coding node
773 * @list: next and prev pointer for the list handling
774 * @addr: the node's mac address
775 * @refcount: number of contexts the object is used by
776 * @rcu: struct used for freeing in an RCU-safe manner
777 * @orig_node: pointer to corresponding orig node struct
778 * @last_seen: timestamp of last ogm received from this node
779 */
780struct batadv_nc_node {
781 struct list_head list;
782 uint8_t addr[ETH_ALEN];
783 atomic_t refcount;
784 struct rcu_head rcu;
785 struct batadv_orig_node *orig_node;
786 unsigned long last_seen;
787};
788
789/**
790 * struct batadv_nc_path - network coding path
791 * @hash_entry: next and prev pointer for the list handling
792 * @rcu: struct used for freeing in an RCU-safe manner
793 * @refcount: number of contexts the object is used by
794 * @packet_list: list of buffered packets for this path
795 * @packet_list_lock: access lock for packet list
796 * @next_hop: next hop (destination) of path
797 * @prev_hop: previous hop (source) of path
798 * @last_valid: timestamp for last validation of path
799 */
800struct batadv_nc_path {
801 struct hlist_node hash_entry;
802 struct rcu_head rcu;
803 atomic_t refcount;
804 struct list_head packet_list;
805 spinlock_t packet_list_lock; /* Protects packet_list */
806 uint8_t next_hop[ETH_ALEN];
807 uint8_t prev_hop[ETH_ALEN];
808 unsigned long last_valid;
809};
810
811/**
812 * struct batadv_nc_packet - network coding packet used when coding and
813 * decoding packets
814 * @list: next and prev pointer for the list handling
815 * @packet_id: crc32 checksum of skb data
816 * @timestamp: field containing the info when the packet was added to path
817 * @neigh_node: pointer to original next hop neighbor of skb
818 * @skb: skb which can be encoded or used for decoding
819 * @nc_path: pointer to path this nc packet is attached to
820 */
821struct batadv_nc_packet {
822 struct list_head list;
823 __be32 packet_id;
824 unsigned long timestamp;
825 struct batadv_neigh_node *neigh_node;
826 struct sk_buff *skb;
827 struct batadv_nc_path *nc_path;
828};
829
830/**
831 * batadv_skb_cb - control buffer structure used to store private data relevant
832 * to batman-adv in the skb->cb buffer in skbs.
833 * @decoded: Marks a skb as decoded, which is checked when searching for coding
834 * opportunities in network-coding.c
835 */
836struct batadv_skb_cb {
837 bool decoded;
838};
839
840/**
705 * struct batadv_forw_packet - structure for bcast packets to be sent/forwarded 841 * struct batadv_forw_packet - structure for bcast packets to be sent/forwarded
706 * @list: list node for batadv_socket_client::queue_list 842 * @list: list node for batadv_socket_client::queue_list
707 * @send_time: execution time for delayed_work (packet sending) 843 * @send_time: execution time for delayed_work (packet sending)
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index 50e079f00be6..0bb3b5982f94 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -122,7 +122,7 @@ batadv_frag_search_packet(struct list_head *head,
122{ 122{
123 struct batadv_frag_packet_list_entry *tfp; 123 struct batadv_frag_packet_list_entry *tfp;
124 struct batadv_unicast_frag_packet *tmp_up = NULL; 124 struct batadv_unicast_frag_packet *tmp_up = NULL;
125 int is_head_tmp, is_head; 125 bool is_head_tmp, is_head;
126 uint16_t search_seqno; 126 uint16_t search_seqno;
127 127
128 if (up->flags & BATADV_UNI_FRAG_HEAD) 128 if (up->flags & BATADV_UNI_FRAG_HEAD)
@@ -130,7 +130,7 @@ batadv_frag_search_packet(struct list_head *head,
130 else 130 else
131 search_seqno = ntohs(up->seqno)-1; 131 search_seqno = ntohs(up->seqno)-1;
132 132
133 is_head = !!(up->flags & BATADV_UNI_FRAG_HEAD); 133 is_head = up->flags & BATADV_UNI_FRAG_HEAD;
134 134
135 list_for_each_entry(tfp, head, list) { 135 list_for_each_entry(tfp, head, list) {
136 if (!tfp->skb) 136 if (!tfp->skb)
@@ -142,7 +142,7 @@ batadv_frag_search_packet(struct list_head *head,
142 tmp_up = (struct batadv_unicast_frag_packet *)tfp->skb->data; 142 tmp_up = (struct batadv_unicast_frag_packet *)tfp->skb->data;
143 143
144 if (tfp->seqno == search_seqno) { 144 if (tfp->seqno == search_seqno) {
145 is_head_tmp = !!(tmp_up->flags & BATADV_UNI_FRAG_HEAD); 145 is_head_tmp = tmp_up->flags & BATADV_UNI_FRAG_HEAD;
146 if (is_head_tmp != is_head) 146 if (is_head_tmp != is_head)
147 return tfp; 147 return tfp;
148 else 148 else
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index c053244b97bd..962ccf3b8382 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -149,7 +149,7 @@ static void batadv_vis_data_read_prim_sec(struct seq_file *seq,
149 149
150 hlist_for_each_entry(entry, if_list, list) { 150 hlist_for_each_entry(entry, if_list, list) {
151 if (entry->primary) 151 if (entry->primary)
152 seq_printf(seq, "PRIMARY, "); 152 seq_puts(seq, "PRIMARY, ");
153 else 153 else
154 seq_printf(seq, "SEC %pM, ", entry->addr); 154 seq_printf(seq, "SEC %pM, ", entry->addr);
155 } 155 }
@@ -207,7 +207,7 @@ static void batadv_vis_data_read_entries(struct seq_file *seq,
207 if (batadv_compare_eth(entry->addr, packet->vis_orig)) 207 if (batadv_compare_eth(entry->addr, packet->vis_orig))
208 batadv_vis_data_read_prim_sec(seq, list); 208 batadv_vis_data_read_prim_sec(seq, list);
209 209
210 seq_printf(seq, "\n"); 210 seq_puts(seq, "\n");
211 } 211 }
212} 212}
213 213
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 0d1b08cc76e1..d5a973bf8a6f 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -422,7 +422,8 @@ unsigned int bt_sock_poll(struct file *file, struct socket *sock,
422 return bt_accept_poll(sk); 422 return bt_accept_poll(sk);
423 423
424 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 424 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
425 mask |= POLLERR; 425 mask |= POLLERR |
426 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
426 427
427 if (sk->sk_shutdown & RCV_SHUTDOWN) 428 if (sk->sk_shutdown & RCV_SHUTDOWN)
428 mask |= POLLRDHUP | POLLIN | POLLRDNORM; 429 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index e58c8b32589c..4b488ec26105 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -136,7 +136,7 @@ static u16 bnep_net_eth_proto(struct sk_buff *skb)
136 struct ethhdr *eh = (void *) skb->data; 136 struct ethhdr *eh = (void *) skb->data;
137 u16 proto = ntohs(eh->h_proto); 137 u16 proto = ntohs(eh->h_proto);
138 138
139 if (proto >= 1536) 139 if (proto >= ETH_P_802_3_MIN)
140 return proto; 140 return proto;
141 141
142 if (get_unaligned((__be16 *) skb->data) == htons(0xFFFF)) 142 if (get_unaligned((__be16 *) skb->data) == htons(0xFFFF))
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index bab338e6270d..c581f1200ef7 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -161,9 +161,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
161 if (!pv) 161 if (!pv)
162 return; 162 return;
163 163
164 for (vid = find_next_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN, vid); 164 for_each_set_bit_from(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
165 vid < BR_VLAN_BITMAP_LEN;
166 vid = find_next_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN, vid+1)) {
167 f = __br_fdb_get(br, br->dev->dev_addr, vid); 165 f = __br_fdb_get(br, br->dev->dev_addr, vid);
168 if (f && f->is_local && !f->dst) 166 if (f && f->is_local && !f->dst)
169 fdb_delete(br, f); 167 fdb_delete(br, f);
@@ -724,13 +722,10 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
724 * specify a VLAN. To be nice, add/update entry for every 722 * specify a VLAN. To be nice, add/update entry for every
725 * vlan on this port. 723 * vlan on this port.
726 */ 724 */
727 vid = find_first_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN); 725 for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
728 while (vid < BR_VLAN_BITMAP_LEN) {
729 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid); 726 err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
730 if (err) 727 if (err)
731 goto out; 728 goto out;
732 vid = find_next_bit(pv->vlan_bitmap,
733 BR_VLAN_BITMAP_LEN, vid+1);
734 } 729 }
735 } 730 }
736 731
@@ -815,11 +810,8 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
815 * vlan on this port. 810 * vlan on this port.
816 */ 811 */
817 err = -ENOENT; 812 err = -ENOENT;
818 vid = find_first_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN); 813 for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
819 while (vid < BR_VLAN_BITMAP_LEN) {
820 err &= __br_fdb_delete(p, addr, vid); 814 err &= __br_fdb_delete(p, addr, vid);
821 vid = find_next_bit(pv->vlan_bitmap,
822 BR_VLAN_BITMAP_LEN, vid+1);
823 } 815 }
824 } 816 }
825out: 817out:
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index ef1b91431c6b..f17fcb3097c2 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -148,7 +148,6 @@ static void del_nbp(struct net_bridge_port *p)
148 dev->priv_flags &= ~IFF_BRIDGE_PORT; 148 dev->priv_flags &= ~IFF_BRIDGE_PORT;
149 149
150 netdev_rx_handler_unregister(dev); 150 netdev_rx_handler_unregister(dev);
151 synchronize_net();
152 151
153 netdev_upper_dev_unlink(dev, br->dev); 152 netdev_upper_dev_unlink(dev, br->dev);
154 153
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index ee79f3f20383..19942e38fd2d 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -382,7 +382,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
382 return ret; 382 return ret;
383} 383}
384 384
385static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 385static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
386{ 386{
387 struct net *net = sock_net(skb->sk); 387 struct net *net = sock_net(skb->sk);
388 struct br_mdb_entry *entry; 388 struct br_mdb_entry *entry;
@@ -458,7 +458,7 @@ unlock:
458 return err; 458 return err;
459} 459}
460 460
461static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 461static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
462{ 462{
463 struct net_device *dev; 463 struct net_device *dev;
464 struct br_mdb_entry *entry; 464 struct br_mdb_entry *entry;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 923fbeaf7afd..81f2389f78eb 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1369,7 +1369,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1369 return -EINVAL; 1369 return -EINVAL;
1370 1370
1371 if (iph->protocol != IPPROTO_IGMP) { 1371 if (iph->protocol != IPPROTO_IGMP) {
1372 if ((iph->daddr & IGMP_LOCAL_GROUP_MASK) != IGMP_LOCAL_GROUP) 1372 if (!ipv4_is_local_multicast(iph->daddr))
1373 BR_INPUT_SKB_CB(skb)->mrouters_only = 1; 1373 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1374 return 0; 1374 return 0;
1375 } 1375 }
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 299fc5f40a26..8e3abf564798 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -136,10 +136,7 @@ static int br_fill_ifinfo(struct sk_buff *skb,
136 goto nla_put_failure; 136 goto nla_put_failure;
137 137
138 pvid = br_get_pvid(pv); 138 pvid = br_get_pvid(pv);
139 for (vid = find_first_bit(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN); 139 for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
140 vid < BR_VLAN_BITMAP_LEN;
141 vid = find_next_bit(pv->vlan_bitmap,
142 BR_VLAN_BITMAP_LEN, vid+1)) {
143 vinfo.vid = vid; 140 vinfo.vid = vid;
144 vinfo.flags = 0; 141 vinfo.flags = 0;
145 if (vid == pvid) 142 if (vid == pvid)
@@ -355,17 +352,14 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
355/* Change state and parameters on port. */ 352/* Change state and parameters on port. */
356int br_setlink(struct net_device *dev, struct nlmsghdr *nlh) 353int br_setlink(struct net_device *dev, struct nlmsghdr *nlh)
357{ 354{
358 struct ifinfomsg *ifm;
359 struct nlattr *protinfo; 355 struct nlattr *protinfo;
360 struct nlattr *afspec; 356 struct nlattr *afspec;
361 struct net_bridge_port *p; 357 struct net_bridge_port *p;
362 struct nlattr *tb[IFLA_BRPORT_MAX + 1]; 358 struct nlattr *tb[IFLA_BRPORT_MAX + 1];
363 int err; 359 int err = 0;
364
365 ifm = nlmsg_data(nlh);
366 360
367 protinfo = nlmsg_find_attr(nlh, sizeof(*ifm), IFLA_PROTINFO); 361 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
368 afspec = nlmsg_find_attr(nlh, sizeof(*ifm), IFLA_AF_SPEC); 362 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
369 if (!protinfo && !afspec) 363 if (!protinfo && !afspec)
370 return 0; 364 return 0;
371 365
@@ -373,7 +367,7 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh)
373 /* We want to accept dev as bridge itself if the AF_SPEC 367 /* We want to accept dev as bridge itself if the AF_SPEC
374 * is set to see if someone is setting vlan info on the brigde 368 * is set to see if someone is setting vlan info on the brigde
375 */ 369 */
376 if (!p && ((dev->priv_flags & IFF_EBRIDGE) && !afspec)) 370 if (!p && !afspec)
377 return -EINVAL; 371 return -EINVAL;
378 372
379 if (p && protinfo) { 373 if (p && protinfo) {
@@ -414,14 +408,11 @@ out:
414/* Delete port information */ 408/* Delete port information */
415int br_dellink(struct net_device *dev, struct nlmsghdr *nlh) 409int br_dellink(struct net_device *dev, struct nlmsghdr *nlh)
416{ 410{
417 struct ifinfomsg *ifm;
418 struct nlattr *afspec; 411 struct nlattr *afspec;
419 struct net_bridge_port *p; 412 struct net_bridge_port *p;
420 int err; 413 int err;
421 414
422 ifm = nlmsg_data(nlh); 415 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
423
424 afspec = nlmsg_find_attr(nlh, sizeof(*ifm), IFLA_AF_SPEC);
425 if (!afspec) 416 if (!afspec)
426 return 0; 417 return 0;
427 418
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index 92de5e5f9db2..9878eb8204c5 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -78,6 +78,11 @@ ebt_log_packet(u_int8_t pf, unsigned int hooknum,
78 const char *prefix) 78 const char *prefix)
79{ 79{
80 unsigned int bitmask; 80 unsigned int bitmask;
81 struct net *net = dev_net(in ? in : out);
82
83 /* FIXME: Disabled from containers until syslog ns is supported */
84 if (!net_eq(net, &init_net))
85 return;
81 86
82 spin_lock_bh(&ebt_log_lock); 87 spin_lock_bh(&ebt_log_lock);
83 printk(KERN_SOH "%c%s IN=%s OUT=%s MAC source = %pM MAC dest = %pM proto = 0x%04x", 88 printk(KERN_SOH "%c%s IN=%s OUT=%s MAC source = %pM MAC dest = %pM proto = 0x%04x",
@@ -176,17 +181,18 @@ ebt_log_tg(struct sk_buff *skb, const struct xt_action_param *par)
176{ 181{
177 const struct ebt_log_info *info = par->targinfo; 182 const struct ebt_log_info *info = par->targinfo;
178 struct nf_loginfo li; 183 struct nf_loginfo li;
184 struct net *net = dev_net(par->in ? par->in : par->out);
179 185
180 li.type = NF_LOG_TYPE_LOG; 186 li.type = NF_LOG_TYPE_LOG;
181 li.u.log.level = info->loglevel; 187 li.u.log.level = info->loglevel;
182 li.u.log.logflags = info->bitmask; 188 li.u.log.logflags = info->bitmask;
183 189
184 if (info->bitmask & EBT_LOG_NFLOG) 190 if (info->bitmask & EBT_LOG_NFLOG)
185 nf_log_packet(NFPROTO_BRIDGE, par->hooknum, skb, par->in, 191 nf_log_packet(net, NFPROTO_BRIDGE, par->hooknum, skb,
186 par->out, &li, "%s", info->prefix); 192 par->in, par->out, &li, "%s", info->prefix);
187 else 193 else
188 ebt_log_packet(NFPROTO_BRIDGE, par->hooknum, skb, par->in, 194 ebt_log_packet(NFPROTO_BRIDGE, par->hooknum, skb, par->in,
189 par->out, &li, info->prefix); 195 par->out, &li, info->prefix);
190 return EBT_CONTINUE; 196 return EBT_CONTINUE;
191} 197}
192 198
@@ -206,19 +212,47 @@ static struct nf_logger ebt_log_logger __read_mostly = {
206 .me = THIS_MODULE, 212 .me = THIS_MODULE,
207}; 213};
208 214
215static int __net_init ebt_log_net_init(struct net *net)
216{
217 nf_log_set(net, NFPROTO_BRIDGE, &ebt_log_logger);
218 return 0;
219}
220
221static void __net_exit ebt_log_net_fini(struct net *net)
222{
223 nf_log_unset(net, &ebt_log_logger);
224}
225
226static struct pernet_operations ebt_log_net_ops = {
227 .init = ebt_log_net_init,
228 .exit = ebt_log_net_fini,
229};
230
209static int __init ebt_log_init(void) 231static int __init ebt_log_init(void)
210{ 232{
211 int ret; 233 int ret;
212 234
235 ret = register_pernet_subsys(&ebt_log_net_ops);
236 if (ret < 0)
237 goto err_pernet;
238
213 ret = xt_register_target(&ebt_log_tg_reg); 239 ret = xt_register_target(&ebt_log_tg_reg);
214 if (ret < 0) 240 if (ret < 0)
215 return ret; 241 goto err_target;
242
216 nf_log_register(NFPROTO_BRIDGE, &ebt_log_logger); 243 nf_log_register(NFPROTO_BRIDGE, &ebt_log_logger);
217 return 0; 244
245 return ret;
246
247err_target:
248 unregister_pernet_subsys(&ebt_log_net_ops);
249err_pernet:
250 return ret;
218} 251}
219 252
220static void __exit ebt_log_fini(void) 253static void __exit ebt_log_fini(void)
221{ 254{
255 unregister_pernet_subsys(&ebt_log_net_ops);
222 nf_log_unregister(&ebt_log_logger); 256 nf_log_unregister(&ebt_log_logger);
223 xt_unregister_target(&ebt_log_tg_reg); 257 xt_unregister_target(&ebt_log_tg_reg);
224} 258}
diff --git a/net/bridge/netfilter/ebt_nflog.c b/net/bridge/netfilter/ebt_nflog.c
index 5be68bbcc341..59ac7952010d 100644
--- a/net/bridge/netfilter/ebt_nflog.c
+++ b/net/bridge/netfilter/ebt_nflog.c
@@ -24,14 +24,15 @@ ebt_nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
24{ 24{
25 const struct ebt_nflog_info *info = par->targinfo; 25 const struct ebt_nflog_info *info = par->targinfo;
26 struct nf_loginfo li; 26 struct nf_loginfo li;
27 struct net *net = dev_net(par->in ? par->in : par->out);
27 28
28 li.type = NF_LOG_TYPE_ULOG; 29 li.type = NF_LOG_TYPE_ULOG;
29 li.u.ulog.copy_len = info->len; 30 li.u.ulog.copy_len = info->len;
30 li.u.ulog.group = info->group; 31 li.u.ulog.group = info->group;
31 li.u.ulog.qthreshold = info->threshold; 32 li.u.ulog.qthreshold = info->threshold;
32 33
33 nf_log_packet(PF_BRIDGE, par->hooknum, skb, par->in, par->out, 34 nf_log_packet(net, PF_BRIDGE, par->hooknum, skb, par->in,
34 &li, "%s", info->prefix); 35 par->out, &li, "%s", info->prefix);
35 return EBT_CONTINUE; 36 return EBT_CONTINUE;
36} 37}
37 38
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index 3bf43f7bb9d4..fc1905c51417 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -35,12 +35,13 @@
35#include <linux/skbuff.h> 35#include <linux/skbuff.h>
36#include <linux/kernel.h> 36#include <linux/kernel.h>
37#include <linux/timer.h> 37#include <linux/timer.h>
38#include <linux/netlink.h> 38#include <net/netlink.h>
39#include <linux/netdevice.h> 39#include <linux/netdevice.h>
40#include <linux/netfilter/x_tables.h> 40#include <linux/netfilter/x_tables.h>
41#include <linux/netfilter_bridge/ebtables.h> 41#include <linux/netfilter_bridge/ebtables.h>
42#include <linux/netfilter_bridge/ebt_ulog.h> 42#include <linux/netfilter_bridge/ebt_ulog.h>
43#include <net/netfilter/nf_log.h> 43#include <net/netfilter/nf_log.h>
44#include <net/netns/generic.h>
44#include <net/sock.h> 45#include <net/sock.h>
45#include "../br_private.h" 46#include "../br_private.h"
46 47
@@ -62,13 +63,22 @@ typedef struct {
62 spinlock_t lock; /* the per-queue lock */ 63 spinlock_t lock; /* the per-queue lock */
63} ebt_ulog_buff_t; 64} ebt_ulog_buff_t;
64 65
65static ebt_ulog_buff_t ulog_buffers[EBT_ULOG_MAXNLGROUPS]; 66static int ebt_ulog_net_id __read_mostly;
66static struct sock *ebtulognl; 67struct ebt_ulog_net {
68 unsigned int nlgroup[EBT_ULOG_MAXNLGROUPS];
69 ebt_ulog_buff_t ulog_buffers[EBT_ULOG_MAXNLGROUPS];
70 struct sock *ebtulognl;
71};
72
73static struct ebt_ulog_net *ebt_ulog_pernet(struct net *net)
74{
75 return net_generic(net, ebt_ulog_net_id);
76}
67 77
68/* send one ulog_buff_t to userspace */ 78/* send one ulog_buff_t to userspace */
69static void ulog_send(unsigned int nlgroup) 79static void ulog_send(struct ebt_ulog_net *ebt, unsigned int nlgroup)
70{ 80{
71 ebt_ulog_buff_t *ub = &ulog_buffers[nlgroup]; 81 ebt_ulog_buff_t *ub = &ebt->ulog_buffers[nlgroup];
72 82
73 del_timer(&ub->timer); 83 del_timer(&ub->timer);
74 84
@@ -80,7 +90,7 @@ static void ulog_send(unsigned int nlgroup)
80 ub->lastnlh->nlmsg_type = NLMSG_DONE; 90 ub->lastnlh->nlmsg_type = NLMSG_DONE;
81 91
82 NETLINK_CB(ub->skb).dst_group = nlgroup + 1; 92 NETLINK_CB(ub->skb).dst_group = nlgroup + 1;
83 netlink_broadcast(ebtulognl, ub->skb, 0, nlgroup + 1, GFP_ATOMIC); 93 netlink_broadcast(ebt->ebtulognl, ub->skb, 0, nlgroup + 1, GFP_ATOMIC);
84 94
85 ub->qlen = 0; 95 ub->qlen = 0;
86 ub->skb = NULL; 96 ub->skb = NULL;
@@ -89,10 +99,15 @@ static void ulog_send(unsigned int nlgroup)
89/* timer function to flush queue in flushtimeout time */ 99/* timer function to flush queue in flushtimeout time */
90static void ulog_timer(unsigned long data) 100static void ulog_timer(unsigned long data)
91{ 101{
92 spin_lock_bh(&ulog_buffers[data].lock); 102 struct ebt_ulog_net *ebt = container_of((void *)data,
93 if (ulog_buffers[data].skb) 103 struct ebt_ulog_net,
94 ulog_send(data); 104 nlgroup[*(unsigned int *)data]);
95 spin_unlock_bh(&ulog_buffers[data].lock); 105
106 ebt_ulog_buff_t *ub = &ebt->ulog_buffers[*(unsigned int *)data];
107 spin_lock_bh(&ub->lock);
108 if (ub->skb)
109 ulog_send(ebt, *(unsigned int *)data);
110 spin_unlock_bh(&ub->lock);
96} 111}
97 112
98static struct sk_buff *ulog_alloc_skb(unsigned int size) 113static struct sk_buff *ulog_alloc_skb(unsigned int size)
@@ -123,8 +138,10 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
123 ebt_ulog_packet_msg_t *pm; 138 ebt_ulog_packet_msg_t *pm;
124 size_t size, copy_len; 139 size_t size, copy_len;
125 struct nlmsghdr *nlh; 140 struct nlmsghdr *nlh;
141 struct net *net = dev_net(in ? in : out);
142 struct ebt_ulog_net *ebt = ebt_ulog_pernet(net);
126 unsigned int group = uloginfo->nlgroup; 143 unsigned int group = uloginfo->nlgroup;
127 ebt_ulog_buff_t *ub = &ulog_buffers[group]; 144 ebt_ulog_buff_t *ub = &ebt->ulog_buffers[group];
128 spinlock_t *lock = &ub->lock; 145 spinlock_t *lock = &ub->lock;
129 ktime_t kt; 146 ktime_t kt;
130 147
@@ -134,7 +151,7 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
134 else 151 else
135 copy_len = uloginfo->cprange; 152 copy_len = uloginfo->cprange;
136 153
137 size = NLMSG_SPACE(sizeof(*pm) + copy_len); 154 size = nlmsg_total_size(sizeof(*pm) + copy_len);
138 if (size > nlbufsiz) { 155 if (size > nlbufsiz) {
139 pr_debug("Size %Zd needed, but nlbufsiz=%d\n", size, nlbufsiz); 156 pr_debug("Size %Zd needed, but nlbufsiz=%d\n", size, nlbufsiz);
140 return; 157 return;
@@ -146,7 +163,7 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
146 if (!(ub->skb = ulog_alloc_skb(size))) 163 if (!(ub->skb = ulog_alloc_skb(size)))
147 goto unlock; 164 goto unlock;
148 } else if (size > skb_tailroom(ub->skb)) { 165 } else if (size > skb_tailroom(ub->skb)) {
149 ulog_send(group); 166 ulog_send(ebt, group);
150 167
151 if (!(ub->skb = ulog_alloc_skb(size))) 168 if (!(ub->skb = ulog_alloc_skb(size)))
152 goto unlock; 169 goto unlock;
@@ -205,7 +222,7 @@ static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb,
205 ub->lastnlh = nlh; 222 ub->lastnlh = nlh;
206 223
207 if (ub->qlen >= uloginfo->qthreshold) 224 if (ub->qlen >= uloginfo->qthreshold)
208 ulog_send(group); 225 ulog_send(ebt, group);
209 else if (!timer_pending(&ub->timer)) { 226 else if (!timer_pending(&ub->timer)) {
210 ub->timer.expires = jiffies + flushtimeout * HZ / 100; 227 ub->timer.expires = jiffies + flushtimeout * HZ / 100;
211 add_timer(&ub->timer); 228 add_timer(&ub->timer);
@@ -277,56 +294,89 @@ static struct nf_logger ebt_ulog_logger __read_mostly = {
277 .me = THIS_MODULE, 294 .me = THIS_MODULE,
278}; 295};
279 296
280static int __init ebt_ulog_init(void) 297static int __net_init ebt_ulog_net_init(struct net *net)
281{ 298{
282 int ret;
283 int i; 299 int i;
300 struct ebt_ulog_net *ebt = ebt_ulog_pernet(net);
301
284 struct netlink_kernel_cfg cfg = { 302 struct netlink_kernel_cfg cfg = {
285 .groups = EBT_ULOG_MAXNLGROUPS, 303 .groups = EBT_ULOG_MAXNLGROUPS,
286 }; 304 };
287 305
288 if (nlbufsiz >= 128*1024) {
289 pr_warning("Netlink buffer has to be <= 128kB,"
290 " please try a smaller nlbufsiz parameter.\n");
291 return -EINVAL;
292 }
293
294 /* initialize ulog_buffers */ 306 /* initialize ulog_buffers */
295 for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) { 307 for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) {
296 setup_timer(&ulog_buffers[i].timer, ulog_timer, i); 308 ebt->nlgroup[i] = i;
297 spin_lock_init(&ulog_buffers[i].lock); 309 setup_timer(&ebt->ulog_buffers[i].timer, ulog_timer,
310 (unsigned long)&ebt->nlgroup[i]);
311 spin_lock_init(&ebt->ulog_buffers[i].lock);
298 } 312 }
299 313
300 ebtulognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, &cfg); 314 ebt->ebtulognl = netlink_kernel_create(net, NETLINK_NFLOG, &cfg);
301 if (!ebtulognl) 315 if (!ebt->ebtulognl)
302 ret = -ENOMEM; 316 return -ENOMEM;
303 else if ((ret = xt_register_target(&ebt_ulog_tg_reg)) != 0)
304 netlink_kernel_release(ebtulognl);
305 317
306 if (ret == 0) 318 nf_log_set(net, NFPROTO_BRIDGE, &ebt_ulog_logger);
307 nf_log_register(NFPROTO_BRIDGE, &ebt_ulog_logger); 319 return 0;
308
309 return ret;
310} 320}
311 321
312static void __exit ebt_ulog_fini(void) 322static void __net_exit ebt_ulog_net_fini(struct net *net)
313{ 323{
314 ebt_ulog_buff_t *ub;
315 int i; 324 int i;
325 struct ebt_ulog_net *ebt = ebt_ulog_pernet(net);
316 326
317 nf_log_unregister(&ebt_ulog_logger); 327 nf_log_unset(net, &ebt_ulog_logger);
318 xt_unregister_target(&ebt_ulog_tg_reg);
319 for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) { 328 for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) {
320 ub = &ulog_buffers[i]; 329 ebt_ulog_buff_t *ub = &ebt->ulog_buffers[i];
321 del_timer(&ub->timer); 330 del_timer(&ub->timer);
322 spin_lock_bh(&ub->lock); 331
323 if (ub->skb) { 332 if (ub->skb) {
324 kfree_skb(ub->skb); 333 kfree_skb(ub->skb);
325 ub->skb = NULL; 334 ub->skb = NULL;
326 } 335 }
327 spin_unlock_bh(&ub->lock);
328 } 336 }
329 netlink_kernel_release(ebtulognl); 337 netlink_kernel_release(ebt->ebtulognl);
338}
339
340static struct pernet_operations ebt_ulog_net_ops = {
341 .init = ebt_ulog_net_init,
342 .exit = ebt_ulog_net_fini,
343 .id = &ebt_ulog_net_id,
344 .size = sizeof(struct ebt_ulog_net),
345};
346
347static int __init ebt_ulog_init(void)
348{
349 int ret;
350
351 if (nlbufsiz >= 128*1024) {
352 pr_warn("Netlink buffer has to be <= 128kB,"
353 "please try a smaller nlbufsiz parameter.\n");
354 return -EINVAL;
355 }
356
357 ret = register_pernet_subsys(&ebt_ulog_net_ops);
358 if (ret)
359 goto out_pernet;
360
361 ret = xt_register_target(&ebt_ulog_tg_reg);
362 if (ret)
363 goto out_target;
364
365 nf_log_register(NFPROTO_BRIDGE, &ebt_ulog_logger);
366
367 return 0;
368
369out_target:
370 unregister_pernet_subsys(&ebt_ulog_net_ops);
371out_pernet:
372 return ret;
373}
374
375static void __exit ebt_ulog_fini(void)
376{
377 nf_log_unregister(&ebt_ulog_logger);
378 xt_unregister_target(&ebt_ulog_tg_reg);
379 unregister_pernet_subsys(&ebt_ulog_net_ops);
330} 380}
331 381
332module_init(ebt_ulog_init); 382module_init(ebt_ulog_init);
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index 40d8258bf74f..70f656ce0f4a 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -64,9 +64,7 @@ static int ebt_broute(struct sk_buff *skb)
64static int __net_init broute_net_init(struct net *net) 64static int __net_init broute_net_init(struct net *net)
65{ 65{
66 net->xt.broute_table = ebt_register_table(net, &broute_table); 66 net->xt.broute_table = ebt_register_table(net, &broute_table);
67 if (IS_ERR(net->xt.broute_table)) 67 return PTR_RET(net->xt.broute_table);
68 return PTR_ERR(net->xt.broute_table);
69 return 0;
70} 68}
71 69
72static void __net_exit broute_net_exit(struct net *net) 70static void __net_exit broute_net_exit(struct net *net)
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 8d493c91a562..3d110c4fc787 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -138,7 +138,7 @@ ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
138 ethproto = h->h_proto; 138 ethproto = h->h_proto;
139 139
140 if (e->bitmask & EBT_802_3) { 140 if (e->bitmask & EBT_802_3) {
141 if (FWINV2(ntohs(ethproto) >= 1536, EBT_IPROTO)) 141 if (FWINV2(ntohs(ethproto) >= ETH_P_802_3_MIN, EBT_IPROTO))
142 return 1; 142 return 1;
143 } else if (!(e->bitmask & EBT_NOPROTO) && 143 } else if (!(e->bitmask & EBT_NOPROTO) &&
144 FWINV2(e->ethproto != ethproto, EBT_IPROTO)) 144 FWINV2(e->ethproto != ethproto, EBT_IPROTO))
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 21760f008974..df6d56d8689a 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -301,10 +301,11 @@ static void dev_flowctrl(struct net_device *dev, int on)
301} 301}
302 302
303void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, 303void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
304 struct cflayer *link_support, int head_room, 304 struct cflayer *link_support, int head_room,
305 struct cflayer **layer, int (**rcv_func)( 305 struct cflayer **layer,
306 struct sk_buff *, struct net_device *, 306 int (**rcv_func)(struct sk_buff *, struct net_device *,
307 struct packet_type *, struct net_device *)) 307 struct packet_type *,
308 struct net_device *))
308{ 309{
309 struct caif_device_entry *caifd; 310 struct caif_device_entry *caifd;
310 enum cfcnfg_phy_preference pref; 311 enum cfcnfg_phy_preference pref;
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index ff2ff3ce6965..630b8be6e748 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -197,8 +197,8 @@ static void cfsk_put(struct cflayer *layr)
197 197
198/* Packet Control Callback function called from CAIF */ 198/* Packet Control Callback function called from CAIF */
199static void caif_ctrl_cb(struct cflayer *layr, 199static void caif_ctrl_cb(struct cflayer *layr,
200 enum caif_ctrlcmd flow, 200 enum caif_ctrlcmd flow,
201 int phyid) 201 int phyid)
202{ 202{
203 struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); 203 struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
204 switch (flow) { 204 switch (flow) {
@@ -274,7 +274,7 @@ static void caif_check_flow_release(struct sock *sk)
274 * changed locking, address handling and added MSG_TRUNC. 274 * changed locking, address handling and added MSG_TRUNC.
275 */ 275 */
276static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, 276static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
277 struct msghdr *m, size_t len, int flags) 277 struct msghdr *m, size_t len, int flags)
278 278
279{ 279{
280 struct sock *sk = sock->sk; 280 struct sock *sk = sock->sk;
@@ -348,8 +348,8 @@ static long caif_stream_data_wait(struct sock *sk, long timeo)
348 * changed locking calls, changed address handling. 348 * changed locking calls, changed address handling.
349 */ 349 */
350static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock, 350static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
351 struct msghdr *msg, size_t size, 351 struct msghdr *msg, size_t size,
352 int flags) 352 int flags)
353{ 353{
354 struct sock *sk = sock->sk; 354 struct sock *sk = sock->sk;
355 int copied = 0; 355 int copied = 0;
@@ -464,7 +464,7 @@ out:
464 * CAIF flow-on and sock_writable. 464 * CAIF flow-on and sock_writable.
465 */ 465 */
466static long caif_wait_for_flow_on(struct caifsock *cf_sk, 466static long caif_wait_for_flow_on(struct caifsock *cf_sk,
467 int wait_writeable, long timeo, int *err) 467 int wait_writeable, long timeo, int *err)
468{ 468{
469 struct sock *sk = &cf_sk->sk; 469 struct sock *sk = &cf_sk->sk;
470 DEFINE_WAIT(wait); 470 DEFINE_WAIT(wait);
@@ -518,7 +518,7 @@ static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
518 518
519/* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */ 519/* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */
520static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock, 520static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock,
521 struct msghdr *msg, size_t len) 521 struct msghdr *msg, size_t len)
522{ 522{
523 struct sock *sk = sock->sk; 523 struct sock *sk = sock->sk;
524 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 524 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
@@ -593,7 +593,7 @@ err:
593 * and other minor adaptations. 593 * and other minor adaptations.
594 */ 594 */
595static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, 595static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
596 struct msghdr *msg, size_t len) 596 struct msghdr *msg, size_t len)
597{ 597{
598 struct sock *sk = sock->sk; 598 struct sock *sk = sock->sk;
599 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 599 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
@@ -672,7 +672,7 @@ out_err:
672} 672}
673 673
674static int setsockopt(struct socket *sock, 674static int setsockopt(struct socket *sock,
675 int lvl, int opt, char __user *ov, unsigned int ol) 675 int lvl, int opt, char __user *ov, unsigned int ol)
676{ 676{
677 struct sock *sk = sock->sk; 677 struct sock *sk = sock->sk;
678 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 678 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
@@ -934,7 +934,7 @@ static int caif_release(struct socket *sock)
934 934
935/* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */ 935/* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */
936static unsigned int caif_poll(struct file *file, 936static unsigned int caif_poll(struct file *file,
937 struct socket *sock, poll_table *wait) 937 struct socket *sock, poll_table *wait)
938{ 938{
939 struct sock *sk = sock->sk; 939 struct sock *sk = sock->sk;
940 unsigned int mask; 940 unsigned int mask;
@@ -1024,7 +1024,7 @@ static void caif_sock_destructor(struct sock *sk)
1024} 1024}
1025 1025
1026static int caif_create(struct net *net, struct socket *sock, int protocol, 1026static int caif_create(struct net *net, struct socket *sock, int protocol,
1027 int kern) 1027 int kern)
1028{ 1028{
1029 struct sock *sk = NULL; 1029 struct sock *sk = NULL;
1030 struct caifsock *cf_sk = NULL; 1030 struct caifsock *cf_sk = NULL;
diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c
index ef8ebaa993cf..d76278d644b8 100644
--- a/net/caif/caif_usb.c
+++ b/net/caif/caif_usb.c
@@ -75,7 +75,7 @@ static int cfusbl_transmit(struct cflayer *layr, struct cfpkt *pkt)
75} 75}
76 76
77static void cfusbl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 77static void cfusbl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
78 int phyid) 78 int phyid)
79{ 79{
80 if (layr->up && layr->up->ctrlcmd) 80 if (layr->up && layr->up->ctrlcmd)
81 layr->up->ctrlcmd(layr->up, ctrl, layr->id); 81 layr->up->ctrlcmd(layr->up, ctrl, layr->id);
@@ -121,7 +121,7 @@ static struct packet_type caif_usb_type __read_mostly = {
121}; 121};
122 122
123static int cfusbl_device_notify(struct notifier_block *me, unsigned long what, 123static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
124 void *arg) 124 void *arg)
125{ 125{
126 struct net_device *dev = arg; 126 struct net_device *dev = arg;
127 struct caif_dev_common common; 127 struct caif_dev_common common;
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index f1dbddb95a6c..246ac3aa8de5 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -61,11 +61,11 @@ struct cfcnfg {
61}; 61};
62 62
63static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, 63static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id,
64 enum cfctrl_srv serv, u8 phyid, 64 enum cfctrl_srv serv, u8 phyid,
65 struct cflayer *adapt_layer); 65 struct cflayer *adapt_layer);
66static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id); 66static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id);
67static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id, 67static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
68 struct cflayer *adapt_layer); 68 struct cflayer *adapt_layer);
69static void cfctrl_resp_func(void); 69static void cfctrl_resp_func(void);
70static void cfctrl_enum_resp(void); 70static void cfctrl_enum_resp(void);
71 71
@@ -131,7 +131,7 @@ static void cfctrl_resp_func(void)
131} 131}
132 132
133static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo_rcu(struct cfcnfg *cnfg, 133static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo_rcu(struct cfcnfg *cnfg,
134 u8 phyid) 134 u8 phyid)
135{ 135{
136 struct cfcnfg_phyinfo *phy; 136 struct cfcnfg_phyinfo *phy;
137 137
@@ -216,8 +216,8 @@ static const int protohead[CFCTRL_SRV_MASK] = {
216 216
217 217
218static int caif_connect_req_to_link_param(struct cfcnfg *cnfg, 218static int caif_connect_req_to_link_param(struct cfcnfg *cnfg,
219 struct caif_connect_request *s, 219 struct caif_connect_request *s,
220 struct cfctrl_link_param *l) 220 struct cfctrl_link_param *l)
221{ 221{
222 struct dev_info *dev_info; 222 struct dev_info *dev_info;
223 enum cfcnfg_phy_preference pref; 223 enum cfcnfg_phy_preference pref;
@@ -301,8 +301,7 @@ static int caif_connect_req_to_link_param(struct cfcnfg *cnfg,
301 301
302int caif_connect_client(struct net *net, struct caif_connect_request *conn_req, 302int caif_connect_client(struct net *net, struct caif_connect_request *conn_req,
303 struct cflayer *adap_layer, int *ifindex, 303 struct cflayer *adap_layer, int *ifindex,
304 int *proto_head, 304 int *proto_head, int *proto_tail)
305 int *proto_tail)
306{ 305{
307 struct cflayer *frml; 306 struct cflayer *frml;
308 struct cfcnfg_phyinfo *phy; 307 struct cfcnfg_phyinfo *phy;
@@ -364,7 +363,7 @@ unlock:
364EXPORT_SYMBOL(caif_connect_client); 363EXPORT_SYMBOL(caif_connect_client);
365 364
366static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id, 365static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
367 struct cflayer *adapt_layer) 366 struct cflayer *adapt_layer)
368{ 367{
369 if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL) 368 if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL)
370 adapt_layer->ctrlcmd(adapt_layer, 369 adapt_layer->ctrlcmd(adapt_layer,
@@ -526,7 +525,7 @@ out_err:
526EXPORT_SYMBOL(cfcnfg_add_phy_layer); 525EXPORT_SYMBOL(cfcnfg_add_phy_layer);
527 526
528int cfcnfg_set_phy_state(struct cfcnfg *cnfg, struct cflayer *phy_layer, 527int cfcnfg_set_phy_state(struct cfcnfg *cnfg, struct cflayer *phy_layer,
529 bool up) 528 bool up)
530{ 529{
531 struct cfcnfg_phyinfo *phyinfo; 530 struct cfcnfg_phyinfo *phyinfo;
532 531
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index a376ec1ac0a7..9cd057c59c59 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -20,12 +20,12 @@
20 20
21#ifdef CAIF_NO_LOOP 21#ifdef CAIF_NO_LOOP
22static int handle_loop(struct cfctrl *ctrl, 22static int handle_loop(struct cfctrl *ctrl,
23 int cmd, struct cfpkt *pkt){ 23 int cmd, struct cfpkt *pkt){
24 return -1; 24 return -1;
25} 25}
26#else 26#else
27static int handle_loop(struct cfctrl *ctrl, 27static int handle_loop(struct cfctrl *ctrl,
28 int cmd, struct cfpkt *pkt); 28 int cmd, struct cfpkt *pkt);
29#endif 29#endif
30static int cfctrl_recv(struct cflayer *layr, struct cfpkt *pkt); 30static int cfctrl_recv(struct cflayer *layr, struct cfpkt *pkt);
31static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 31static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
@@ -72,7 +72,7 @@ void cfctrl_remove(struct cflayer *layer)
72} 72}
73 73
74static bool param_eq(const struct cfctrl_link_param *p1, 74static bool param_eq(const struct cfctrl_link_param *p1,
75 const struct cfctrl_link_param *p2) 75 const struct cfctrl_link_param *p2)
76{ 76{
77 bool eq = 77 bool eq =
78 p1->linktype == p2->linktype && 78 p1->linktype == p2->linktype &&
@@ -197,8 +197,8 @@ void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
197} 197}
198 198
199int cfctrl_linkup_request(struct cflayer *layer, 199int cfctrl_linkup_request(struct cflayer *layer,
200 struct cfctrl_link_param *param, 200 struct cfctrl_link_param *param,
201 struct cflayer *user_layer) 201 struct cflayer *user_layer)
202{ 202{
203 struct cfctrl *cfctrl = container_obj(layer); 203 struct cfctrl *cfctrl = container_obj(layer);
204 u32 tmp32; 204 u32 tmp32;
@@ -301,7 +301,7 @@ int cfctrl_linkup_request(struct cflayer *layer,
301} 301}
302 302
303int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid, 303int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
304 struct cflayer *client) 304 struct cflayer *client)
305{ 305{
306 int ret; 306 int ret;
307 struct cfpkt *pkt; 307 struct cfpkt *pkt;
@@ -555,7 +555,7 @@ error:
555} 555}
556 556
557static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 557static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
558 int phyid) 558 int phyid)
559{ 559{
560 struct cfctrl *this = container_obj(layr); 560 struct cfctrl *this = container_obj(layr);
561 switch (ctrl) { 561 switch (ctrl) {
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
index 0a7df7ef062d..204c5e226a61 100644
--- a/net/caif/cffrml.c
+++ b/net/caif/cffrml.c
@@ -28,7 +28,7 @@ struct cffrml {
28static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt); 28static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt);
29static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt); 29static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt);
30static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 30static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
31 int phyid); 31 int phyid);
32 32
33static u32 cffrml_rcv_error; 33static u32 cffrml_rcv_error;
34static u32 cffrml_rcv_checsum_error; 34static u32 cffrml_rcv_checsum_error;
@@ -167,7 +167,7 @@ static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
167} 167}
168 168
169static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 169static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
170 int phyid) 170 int phyid)
171{ 171{
172 if (layr->up && layr->up->ctrlcmd) 172 if (layr->up && layr->up->ctrlcmd)
173 layr->up->ctrlcmd(layr->up, ctrl, layr->id); 173 layr->up->ctrlcmd(layr->up, ctrl, layr->id);
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index 94b08612a4d8..154d9f8f964c 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -42,7 +42,7 @@ struct cfmuxl {
42static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt); 42static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt);
43static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt); 43static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt);
44static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 44static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
45 int phyid); 45 int phyid);
46static struct cflayer *get_up(struct cfmuxl *muxl, u16 id); 46static struct cflayer *get_up(struct cfmuxl *muxl, u16 id);
47 47
48struct cflayer *cfmuxl_create(void) 48struct cflayer *cfmuxl_create(void)
@@ -244,7 +244,7 @@ static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt)
244} 244}
245 245
246static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 246static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
247 int phyid) 247 int phyid)
248{ 248{
249 struct cfmuxl *muxl = container_obj(layr); 249 struct cfmuxl *muxl = container_obj(layr);
250 struct cflayer *layer; 250 struct cflayer *layer;
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 863dedd91bb6..e8f9c149504d 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -266,8 +266,8 @@ inline u16 cfpkt_getlen(struct cfpkt *pkt)
266} 266}
267 267
268inline u16 cfpkt_iterate(struct cfpkt *pkt, 268inline u16 cfpkt_iterate(struct cfpkt *pkt,
269 u16 (*iter_func)(u16, void *, u16), 269 u16 (*iter_func)(u16, void *, u16),
270 u16 data) 270 u16 data)
271{ 271{
272 /* 272 /*
273 * Don't care about the performance hit of linearizing, 273 * Don't care about the performance hit of linearizing,
@@ -307,8 +307,8 @@ int cfpkt_setlen(struct cfpkt *pkt, u16 len)
307} 307}
308 308
309struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, 309struct cfpkt *cfpkt_append(struct cfpkt *dstpkt,
310 struct cfpkt *addpkt, 310 struct cfpkt *addpkt,
311 u16 expectlen) 311 u16 expectlen)
312{ 312{
313 struct sk_buff *dst = pkt_to_skb(dstpkt); 313 struct sk_buff *dst = pkt_to_skb(dstpkt);
314 struct sk_buff *add = pkt_to_skb(addpkt); 314 struct sk_buff *add = pkt_to_skb(addpkt);
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index 2b563ad04597..db51830c8587 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -43,7 +43,7 @@ static void cfrfml_release(struct cflayer *layer)
43} 43}
44 44
45struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info, 45struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info,
46 int mtu_size) 46 int mtu_size)
47{ 47{
48 int tmp; 48 int tmp;
49 struct cfrfml *this = kzalloc(sizeof(struct cfrfml), GFP_ATOMIC); 49 struct cfrfml *this = kzalloc(sizeof(struct cfrfml), GFP_ATOMIC);
@@ -69,7 +69,7 @@ struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info,
69} 69}
70 70
71static struct cfpkt *rfm_append(struct cfrfml *rfml, char *seghead, 71static struct cfpkt *rfm_append(struct cfrfml *rfml, char *seghead,
72 struct cfpkt *pkt, int *err) 72 struct cfpkt *pkt, int *err)
73{ 73{
74 struct cfpkt *tmppkt; 74 struct cfpkt *tmppkt;
75 *err = -EPROTO; 75 *err = -EPROTO;
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
index 8e68b97f13ee..147c232b1285 100644
--- a/net/caif/cfserl.c
+++ b/net/caif/cfserl.c
@@ -29,7 +29,7 @@ struct cfserl {
29static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt); 29static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt);
30static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt); 30static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
31static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 31static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
32 int phyid); 32 int phyid);
33 33
34struct cflayer *cfserl_create(int instance, bool use_stx) 34struct cflayer *cfserl_create(int instance, bool use_stx)
35{ 35{
@@ -182,7 +182,7 @@ static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt)
182} 182}
183 183
184static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 184static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
185 int phyid) 185 int phyid)
186{ 186{
187 layr->up->ctrlcmd(layr->up, ctrl, phyid); 187 layr->up->ctrlcmd(layr->up, ctrl, phyid);
188} 188}
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index ba217e90765e..95f7f5ea30ef 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -25,7 +25,7 @@
25#define container_obj(layr) container_of(layr, struct cfsrvl, layer) 25#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
26 26
27static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, 27static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
28 int phyid) 28 int phyid)
29{ 29{
30 struct cfsrvl *service = container_obj(layr); 30 struct cfsrvl *service = container_obj(layr);
31 31
@@ -158,10 +158,9 @@ static void cfsrvl_release(struct cflayer *layer)
158} 158}
159 159
160void cfsrvl_init(struct cfsrvl *service, 160void cfsrvl_init(struct cfsrvl *service,
161 u8 channel_id, 161 u8 channel_id,
162 struct dev_info *dev_info, 162 struct dev_info *dev_info,
163 bool supports_flowctrl 163 bool supports_flowctrl)
164 )
165{ 164{
166 caif_assert(offsetof(struct cfsrvl, layer) == 0); 165 caif_assert(offsetof(struct cfsrvl, layer) == 0);
167 service->open = false; 166 service->open = false;
@@ -207,8 +206,8 @@ void caif_free_client(struct cflayer *adap_layer)
207EXPORT_SYMBOL(caif_free_client); 206EXPORT_SYMBOL(caif_free_client);
208 207
209void caif_client_register_refcnt(struct cflayer *adapt_layer, 208void caif_client_register_refcnt(struct cflayer *adapt_layer,
210 void (*hold)(struct cflayer *lyr), 209 void (*hold)(struct cflayer *lyr),
211 void (*put)(struct cflayer *lyr)) 210 void (*put)(struct cflayer *lyr))
212{ 211{
213 struct cfsrvl *service; 212 struct cfsrvl *service;
214 213
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index e597733affb8..26a4e4e3a767 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -167,7 +167,7 @@ static void chnl_put(struct cflayer *lyr)
167} 167}
168 168
169static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow, 169static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow,
170 int phyid) 170 int phyid)
171{ 171{
172 struct chnl_net *priv = container_of(layr, struct chnl_net, chnl); 172 struct chnl_net *priv = container_of(layr, struct chnl_net, chnl);
173 pr_debug("NET flowctrl func called flow: %s\n", 173 pr_debug("NET flowctrl func called flow: %s\n",
@@ -443,7 +443,7 @@ nla_put_failure:
443} 443}
444 444
445static void caif_netlink_parms(struct nlattr *data[], 445static void caif_netlink_parms(struct nlattr *data[],
446 struct caif_connect_request *conn_req) 446 struct caif_connect_request *conn_req)
447{ 447{
448 if (!data) { 448 if (!data) {
449 pr_warn("no params data found\n"); 449 pr_warn("no params data found\n");
@@ -488,7 +488,7 @@ static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
488} 488}
489 489
490static int ipcaif_changelink(struct net_device *dev, struct nlattr *tb[], 490static int ipcaif_changelink(struct net_device *dev, struct nlattr *tb[],
491 struct nlattr *data[]) 491 struct nlattr *data[])
492{ 492{
493 struct chnl_net *caifdev; 493 struct chnl_net *caifdev;
494 ASSERT_RTNL(); 494 ASSERT_RTNL();
diff --git a/net/can/af_can.c b/net/can/af_can.c
index c48e5220bbac..c4e50852c9f4 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -525,7 +525,7 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
525 525
526 d = find_dev_rcv_lists(dev); 526 d = find_dev_rcv_lists(dev);
527 if (!d) { 527 if (!d) {
528 printk(KERN_ERR "BUG: receive list not found for " 528 pr_err("BUG: receive list not found for "
529 "dev %s, id %03X, mask %03X\n", 529 "dev %s, id %03X, mask %03X\n",
530 DNAME(dev), can_id, mask); 530 DNAME(dev), can_id, mask);
531 goto out; 531 goto out;
@@ -546,16 +546,13 @@ void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
546 } 546 }
547 547
548 /* 548 /*
549 * Check for bugs in CAN protocol implementations: 549 * Check for bugs in CAN protocol implementations using af_can.c:
550 * If no matching list item was found, the list cursor variable next 550 * 'r' will be NULL if no matching list item was found for removal.
551 * will be NULL, while r will point to the last item of the list.
552 */ 551 */
553 552
554 if (!r) { 553 if (!r) {
555 printk(KERN_ERR "BUG: receive list entry not found for " 554 WARN(1, "BUG: receive list entry not found for dev %s, "
556 "dev %s, id %03X, mask %03X\n", 555 "id %03X, mask %03X\n", DNAME(dev), can_id, mask);
557 DNAME(dev), can_id, mask);
558 r = NULL;
559 goto out; 556 goto out;
560 } 557 }
561 558
@@ -749,8 +746,7 @@ int can_proto_register(const struct can_proto *cp)
749 int err = 0; 746 int err = 0;
750 747
751 if (proto < 0 || proto >= CAN_NPROTO) { 748 if (proto < 0 || proto >= CAN_NPROTO) {
752 printk(KERN_ERR "can: protocol number %d out of range\n", 749 pr_err("can: protocol number %d out of range\n", proto);
753 proto);
754 return -EINVAL; 750 return -EINVAL;
755 } 751 }
756 752
@@ -761,8 +757,7 @@ int can_proto_register(const struct can_proto *cp)
761 mutex_lock(&proto_tab_lock); 757 mutex_lock(&proto_tab_lock);
762 758
763 if (proto_tab[proto]) { 759 if (proto_tab[proto]) {
764 printk(KERN_ERR "can: protocol %d already registered\n", 760 pr_err("can: protocol %d already registered\n", proto);
765 proto);
766 err = -EBUSY; 761 err = -EBUSY;
767 } else 762 } else
768 RCU_INIT_POINTER(proto_tab[proto], cp); 763 RCU_INIT_POINTER(proto_tab[proto], cp);
@@ -816,11 +811,8 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
816 811
817 /* create new dev_rcv_lists for this device */ 812 /* create new dev_rcv_lists for this device */
818 d = kzalloc(sizeof(*d), GFP_KERNEL); 813 d = kzalloc(sizeof(*d), GFP_KERNEL);
819 if (!d) { 814 if (!d)
820 printk(KERN_ERR
821 "can: allocation of receive list failed\n");
822 return NOTIFY_DONE; 815 return NOTIFY_DONE;
823 }
824 BUG_ON(dev->ml_priv); 816 BUG_ON(dev->ml_priv);
825 dev->ml_priv = d; 817 dev->ml_priv = d;
826 818
@@ -838,8 +830,8 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg,
838 dev->ml_priv = NULL; 830 dev->ml_priv = NULL;
839 } 831 }
840 } else 832 } else
841 printk(KERN_ERR "can: notifier: receive list not " 833 pr_err("can: notifier: receive list not found for dev "
842 "found for dev %s\n", dev->name); 834 "%s\n", dev->name);
843 835
844 spin_unlock(&can_rcvlists_lock); 836 spin_unlock(&can_rcvlists_lock);
845 837
@@ -927,7 +919,7 @@ static __exit void can_exit(void)
927 /* remove created dev_rcv_lists from still registered CAN devices */ 919 /* remove created dev_rcv_lists from still registered CAN devices */
928 rcu_read_lock(); 920 rcu_read_lock();
929 for_each_netdev_rcu(&init_net, dev) { 921 for_each_netdev_rcu(&init_net, dev) {
930 if (dev->type == ARPHRD_CAN && dev->ml_priv){ 922 if (dev->type == ARPHRD_CAN && dev->ml_priv) {
931 923
932 struct dev_rcv_lists *d = dev->ml_priv; 924 struct dev_rcv_lists *d = dev->ml_priv;
933 925
diff --git a/net/can/gw.c b/net/can/gw.c
index 2d117dc5ebea..2dc619db805a 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -778,8 +778,7 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
778 return 0; 778 return 0;
779} 779}
780 780
781static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh, 781static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh)
782 void *arg)
783{ 782{
784 struct rtcanmsg *r; 783 struct rtcanmsg *r;
785 struct cgw_job *gwj; 784 struct cgw_job *gwj;
@@ -868,7 +867,7 @@ static void cgw_remove_all_jobs(void)
868 } 867 }
869} 868}
870 869
871static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 870static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
872{ 871{
873 struct cgw_job *gwj = NULL; 872 struct cgw_job *gwj = NULL;
874 struct hlist_node *nx; 873 struct hlist_node *nx;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 368f9c3f9dc6..ebba65d7e0da 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -749,7 +749,9 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
749 749
750 /* exceptional events? */ 750 /* exceptional events? */
751 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 751 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
752 mask |= POLLERR; 752 mask |= POLLERR |
753 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
754
753 if (sk->sk_shutdown & RCV_SHUTDOWN) 755 if (sk->sk_shutdown & RCV_SHUTDOWN)
754 mask |= POLLRDHUP | POLLIN | POLLRDNORM; 756 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
755 if (sk->sk_shutdown == SHUTDOWN_MASK) 757 if (sk->sk_shutdown == SHUTDOWN_MASK)
diff --git a/net/core/dev.c b/net/core/dev.c
index e7d68ed8aafe..3655ff927315 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2207,16 +2207,8 @@ out:
2207} 2207}
2208EXPORT_SYMBOL(skb_checksum_help); 2208EXPORT_SYMBOL(skb_checksum_help);
2209 2209
2210/** 2210__be16 skb_network_protocol(struct sk_buff *skb)
2211 * skb_mac_gso_segment - mac layer segmentation handler.
2212 * @skb: buffer to segment
2213 * @features: features for the output path (see dev->features)
2214 */
2215struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2216 netdev_features_t features)
2217{ 2211{
2218 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2219 struct packet_offload *ptype;
2220 __be16 type = skb->protocol; 2212 __be16 type = skb->protocol;
2221 int vlan_depth = ETH_HLEN; 2213 int vlan_depth = ETH_HLEN;
2222 2214
@@ -2224,13 +2216,31 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2224 struct vlan_hdr *vh; 2216 struct vlan_hdr *vh;
2225 2217
2226 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) 2218 if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
2227 return ERR_PTR(-EINVAL); 2219 return 0;
2228 2220
2229 vh = (struct vlan_hdr *)(skb->data + vlan_depth); 2221 vh = (struct vlan_hdr *)(skb->data + vlan_depth);
2230 type = vh->h_vlan_encapsulated_proto; 2222 type = vh->h_vlan_encapsulated_proto;
2231 vlan_depth += VLAN_HLEN; 2223 vlan_depth += VLAN_HLEN;
2232 } 2224 }
2233 2225
2226 return type;
2227}
2228
2229/**
2230 * skb_mac_gso_segment - mac layer segmentation handler.
2231 * @skb: buffer to segment
2232 * @features: features for the output path (see dev->features)
2233 */
2234struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2235 netdev_features_t features)
2236{
2237 struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2238 struct packet_offload *ptype;
2239 __be16 type = skb_network_protocol(skb);
2240
2241 if (unlikely(!type))
2242 return ERR_PTR(-EINVAL);
2243
2234 __skb_pull(skb, skb->mac_len); 2244 __skb_pull(skb, skb->mac_len);
2235 2245
2236 rcu_read_lock(); 2246 rcu_read_lock();
@@ -2397,24 +2407,12 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2397 return 0; 2407 return 0;
2398} 2408}
2399 2409
2400static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
2401{
2402 return ((features & NETIF_F_GEN_CSUM) ||
2403 ((features & NETIF_F_V4_CSUM) &&
2404 protocol == htons(ETH_P_IP)) ||
2405 ((features & NETIF_F_V6_CSUM) &&
2406 protocol == htons(ETH_P_IPV6)) ||
2407 ((features & NETIF_F_FCOE_CRC) &&
2408 protocol == htons(ETH_P_FCOE)));
2409}
2410
2411static netdev_features_t harmonize_features(struct sk_buff *skb, 2410static netdev_features_t harmonize_features(struct sk_buff *skb,
2412 __be16 protocol, netdev_features_t features) 2411 __be16 protocol, netdev_features_t features)
2413{ 2412{
2414 if (skb->ip_summed != CHECKSUM_NONE && 2413 if (skb->ip_summed != CHECKSUM_NONE &&
2415 !can_checksum_protocol(features, protocol)) { 2414 !can_checksum_protocol(features, protocol)) {
2416 features &= ~NETIF_F_ALL_CSUM; 2415 features &= ~NETIF_F_ALL_CSUM;
2417 features &= ~NETIF_F_SG;
2418 } else if (illegal_highdma(skb->dev, skb)) { 2416 } else if (illegal_highdma(skb->dev, skb)) {
2419 features &= ~NETIF_F_SG; 2417 features &= ~NETIF_F_SG;
2420 } 2418 }
@@ -2589,6 +2587,7 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
2589 */ 2587 */
2590 if (shinfo->gso_size) { 2588 if (shinfo->gso_size) {
2591 unsigned int hdr_len; 2589 unsigned int hdr_len;
2590 u16 gso_segs = shinfo->gso_segs;
2592 2591
2593 /* mac layer + network layer */ 2592 /* mac layer + network layer */
2594 hdr_len = skb_transport_header(skb) - skb_mac_header(skb); 2593 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
@@ -2598,7 +2597,12 @@ static void qdisc_pkt_len_init(struct sk_buff *skb)
2598 hdr_len += tcp_hdrlen(skb); 2597 hdr_len += tcp_hdrlen(skb);
2599 else 2598 else
2600 hdr_len += sizeof(struct udphdr); 2599 hdr_len += sizeof(struct udphdr);
2601 qdisc_skb_cb(skb)->pkt_len += (shinfo->gso_segs - 1) * hdr_len; 2600
2601 if (shinfo->gso_type & SKB_GSO_DODGY)
2602 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2603 shinfo->gso_size);
2604
2605 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
2602 } 2606 }
2603} 2607}
2604 2608
@@ -3326,7 +3330,7 @@ EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3326 * netdev_rx_handler_unregister - unregister receive handler 3330 * netdev_rx_handler_unregister - unregister receive handler
3327 * @dev: device to unregister a handler from 3331 * @dev: device to unregister a handler from
3328 * 3332 *
3329 * Unregister a receive hander from a device. 3333 * Unregister a receive handler from a device.
3330 * 3334 *
3331 * The caller must hold the rtnl_mutex. 3335 * The caller must hold the rtnl_mutex.
3332 */ 3336 */
@@ -4063,6 +4067,9 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4063 napi->gro_list = NULL; 4067 napi->gro_list = NULL;
4064 napi->skb = NULL; 4068 napi->skb = NULL;
4065 napi->poll = poll; 4069 napi->poll = poll;
4070 if (weight > NAPI_POLL_WEIGHT)
4071 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4072 weight, dev->name);
4066 napi->weight = weight; 4073 napi->weight = weight;
4067 list_add(&napi->dev_list, &dev->napi_list); 4074 list_add(&napi->dev_list, &dev->napi_list);
4068 napi->dev = dev; 4075 napi->dev = dev;
@@ -4924,20 +4931,25 @@ static netdev_features_t netdev_fix_features(struct net_device *dev,
4924 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM); 4931 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
4925 } 4932 }
4926 4933
4927 /* Fix illegal SG+CSUM combinations. */
4928 if ((features & NETIF_F_SG) &&
4929 !(features & NETIF_F_ALL_CSUM)) {
4930 netdev_dbg(dev,
4931 "Dropping NETIF_F_SG since no checksum feature.\n");
4932 features &= ~NETIF_F_SG;
4933 }
4934
4935 /* TSO requires that SG is present as well. */ 4934 /* TSO requires that SG is present as well. */
4936 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) { 4935 if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
4937 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n"); 4936 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
4938 features &= ~NETIF_F_ALL_TSO; 4937 features &= ~NETIF_F_ALL_TSO;
4939 } 4938 }
4940 4939
4940 if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
4941 !(features & NETIF_F_IP_CSUM)) {
4942 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
4943 features &= ~NETIF_F_TSO;
4944 features &= ~NETIF_F_TSO_ECN;
4945 }
4946
4947 if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
4948 !(features & NETIF_F_IPV6_CSUM)) {
4949 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
4950 features &= ~NETIF_F_TSO6;
4951 }
4952
4941 /* TSO ECN requires that TSO is present as well. */ 4953 /* TSO ECN requires that TSO is present as well. */
4942 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN) 4954 if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
4943 features &= ~NETIF_F_TSO_ECN; 4955 features &= ~NETIF_F_TSO_ECN;
@@ -5205,6 +5217,10 @@ int register_netdevice(struct net_device *dev)
5205 */ 5217 */
5206 dev->vlan_features |= NETIF_F_HIGHDMA; 5218 dev->vlan_features |= NETIF_F_HIGHDMA;
5207 5219
5220 /* Make NETIF_F_SG inheritable to tunnel devices.
5221 */
5222 dev->hw_enc_features |= NETIF_F_SG;
5223
5208 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev); 5224 ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
5209 ret = notifier_to_errno(ret); 5225 ret = notifier_to_errno(ret);
5210 if (ret) 5226 if (ret)
diff --git a/net/core/dst.c b/net/core/dst.c
index 35fd12f1a69c..df9cc810ec8e 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -320,27 +320,28 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
320EXPORT_SYMBOL(__dst_destroy_metrics_generic); 320EXPORT_SYMBOL(__dst_destroy_metrics_generic);
321 321
322/** 322/**
323 * skb_dst_set_noref - sets skb dst, without a reference 323 * __skb_dst_set_noref - sets skb dst, without a reference
324 * @skb: buffer 324 * @skb: buffer
325 * @dst: dst entry 325 * @dst: dst entry
326 * @force: if force is set, use noref version even for DST_NOCACHE entries
326 * 327 *
327 * Sets skb dst, assuming a reference was not taken on dst 328 * Sets skb dst, assuming a reference was not taken on dst
328 * skb_dst_drop() should not dst_release() this dst 329 * skb_dst_drop() should not dst_release() this dst
329 */ 330 */
330void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) 331void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, bool force)
331{ 332{
332 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); 333 WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
333 /* If dst not in cache, we must take a reference, because 334 /* If dst not in cache, we must take a reference, because
334 * dst_release() will destroy dst as soon as its refcount becomes zero 335 * dst_release() will destroy dst as soon as its refcount becomes zero
335 */ 336 */
336 if (unlikely(dst->flags & DST_NOCACHE)) { 337 if (unlikely((dst->flags & DST_NOCACHE) && !force)) {
337 dst_hold(dst); 338 dst_hold(dst);
338 skb_dst_set(skb, dst); 339 skb_dst_set(skb, dst);
339 } else { 340 } else {
340 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; 341 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
341 } 342 }
342} 343}
343EXPORT_SYMBOL(skb_dst_set_noref); 344EXPORT_SYMBOL(__skb_dst_set_noref);
344 345
345/* Dirty hack. We did it in 2.2 (in __dst_free), 346/* Dirty hack. We did it in 2.2 (in __dst_free),
346 * we have _very_ good reasons not to repeat 347 * we have _very_ good reasons not to repeat
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 3e9b2c3e30f0..adc1351e6873 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -78,6 +78,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
78 [NETIF_F_TSO6_BIT] = "tx-tcp6-segmentation", 78 [NETIF_F_TSO6_BIT] = "tx-tcp6-segmentation",
79 [NETIF_F_FSO_BIT] = "tx-fcoe-segmentation", 79 [NETIF_F_FSO_BIT] = "tx-fcoe-segmentation",
80 [NETIF_F_GSO_GRE_BIT] = "tx-gre-segmentation", 80 [NETIF_F_GSO_GRE_BIT] = "tx-gre-segmentation",
81 [NETIF_F_GSO_UDP_TUNNEL_BIT] = "tx-udp_tnl-segmentation",
81 82
82 [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc", 83 [NETIF_F_FCOE_CRC_BIT] = "tx-checksum-fcoe-crc",
83 [NETIF_F_SCTP_CSUM_BIT] = "tx-checksum-sctp", 84 [NETIF_F_SCTP_CSUM_BIT] = "tx-checksum-sctp",
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 58a4ba27dfe3..d5a9f8ead0d8 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -266,7 +266,7 @@ errout:
266 return err; 266 return err;
267} 267}
268 268
269static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) 269static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
270{ 270{
271 struct net *net = sock_net(skb->sk); 271 struct net *net = sock_net(skb->sk);
272 struct fib_rule_hdr *frh = nlmsg_data(nlh); 272 struct fib_rule_hdr *frh = nlmsg_data(nlh);
@@ -415,7 +415,7 @@ errout:
415 return err; 415 return err;
416} 416}
417 417
418static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) 418static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh)
419{ 419{
420 struct net *net = sock_net(skb->sk); 420 struct net *net = sock_net(skb->sk);
421 struct fib_rule_hdr *frh = nlmsg_data(nlh); 421 struct fib_rule_hdr *frh = nlmsg_data(nlh);
diff --git a/net/core/filter.c b/net/core/filter.c
index 2e20b55a7830..dad2a178f9f8 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -348,6 +348,9 @@ load_b:
348 case BPF_S_ANC_VLAN_TAG_PRESENT: 348 case BPF_S_ANC_VLAN_TAG_PRESENT:
349 A = !!vlan_tx_tag_present(skb); 349 A = !!vlan_tx_tag_present(skb);
350 continue; 350 continue;
351 case BPF_S_ANC_PAY_OFFSET:
352 A = __skb_get_poff(skb);
353 continue;
351 case BPF_S_ANC_NLATTR: { 354 case BPF_S_ANC_NLATTR: {
352 struct nlattr *nla; 355 struct nlattr *nla;
353 356
@@ -612,6 +615,7 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
612 ANCILLARY(ALU_XOR_X); 615 ANCILLARY(ALU_XOR_X);
613 ANCILLARY(VLAN_TAG); 616 ANCILLARY(VLAN_TAG);
614 ANCILLARY(VLAN_TAG_PRESENT); 617 ANCILLARY(VLAN_TAG_PRESENT);
618 ANCILLARY(PAY_OFFSET);
615 } 619 }
616 620
617 /* ancillary operation unknown or unsupported */ 621 /* ancillary operation unknown or unsupported */
@@ -814,6 +818,7 @@ static void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
814 [BPF_S_ANC_SECCOMP_LD_W] = BPF_LD|BPF_B|BPF_ABS, 818 [BPF_S_ANC_SECCOMP_LD_W] = BPF_LD|BPF_B|BPF_ABS,
815 [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS, 819 [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS,
816 [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS, 820 [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS,
821 [BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS,
817 [BPF_S_LD_W_LEN] = BPF_LD|BPF_W|BPF_LEN, 822 [BPF_S_LD_W_LEN] = BPF_LD|BPF_W|BPF_LEN,
818 [BPF_S_LD_W_IND] = BPF_LD|BPF_W|BPF_IND, 823 [BPF_S_LD_W_IND] = BPF_LD|BPF_W|BPF_IND,
819 [BPF_S_LD_H_IND] = BPF_LD|BPF_H|BPF_IND, 824 [BPF_S_LD_H_IND] = BPF_LD|BPF_H|BPF_IND,
diff --git a/net/core/flow.c b/net/core/flow.c
index 2bfd081c59f7..7102f166482d 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -323,6 +323,24 @@ static void flow_cache_flush_tasklet(unsigned long data)
323 complete(&info->completion); 323 complete(&info->completion);
324} 324}
325 325
326/*
327 * Return whether a cpu needs flushing. Conservatively, we assume
328 * the presence of any entries means the core may require flushing,
329 * since the flow_cache_ops.check() function may assume it's running
330 * on the same core as the per-cpu cache component.
331 */
332static int flow_cache_percpu_empty(struct flow_cache *fc, int cpu)
333{
334 struct flow_cache_percpu *fcp;
335 int i;
336
337 fcp = per_cpu_ptr(fc->percpu, cpu);
338 for (i = 0; i < flow_cache_hash_size(fc); i++)
339 if (!hlist_empty(&fcp->hash_table[i]))
340 return 0;
341 return 1;
342}
343
326static void flow_cache_flush_per_cpu(void *data) 344static void flow_cache_flush_per_cpu(void *data)
327{ 345{
328 struct flow_flush_info *info = data; 346 struct flow_flush_info *info = data;
@@ -337,22 +355,40 @@ void flow_cache_flush(void)
337{ 355{
338 struct flow_flush_info info; 356 struct flow_flush_info info;
339 static DEFINE_MUTEX(flow_flush_sem); 357 static DEFINE_MUTEX(flow_flush_sem);
358 cpumask_var_t mask;
359 int i, self;
360
361 /* Track which cpus need flushing to avoid disturbing all cores. */
362 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
363 return;
364 cpumask_clear(mask);
340 365
341 /* Don't want cpus going down or up during this. */ 366 /* Don't want cpus going down or up during this. */
342 get_online_cpus(); 367 get_online_cpus();
343 mutex_lock(&flow_flush_sem); 368 mutex_lock(&flow_flush_sem);
344 info.cache = &flow_cache_global; 369 info.cache = &flow_cache_global;
345 atomic_set(&info.cpuleft, num_online_cpus()); 370 for_each_online_cpu(i)
371 if (!flow_cache_percpu_empty(info.cache, i))
372 cpumask_set_cpu(i, mask);
373 atomic_set(&info.cpuleft, cpumask_weight(mask));
374 if (atomic_read(&info.cpuleft) == 0)
375 goto done;
376
346 init_completion(&info.completion); 377 init_completion(&info.completion);
347 378
348 local_bh_disable(); 379 local_bh_disable();
349 smp_call_function(flow_cache_flush_per_cpu, &info, 0); 380 self = cpumask_test_and_clear_cpu(smp_processor_id(), mask);
350 flow_cache_flush_tasklet((unsigned long)&info); 381 on_each_cpu_mask(mask, flow_cache_flush_per_cpu, &info, 0);
382 if (self)
383 flow_cache_flush_tasklet((unsigned long)&info);
351 local_bh_enable(); 384 local_bh_enable();
352 385
353 wait_for_completion(&info.completion); 386 wait_for_completion(&info.completion);
387
388done:
354 mutex_unlock(&flow_flush_sem); 389 mutex_unlock(&flow_flush_sem);
355 put_online_cpus(); 390 put_online_cpus();
391 free_cpumask_var(mask);
356} 392}
357 393
358static void flow_cache_flush_task(struct work_struct *work) 394static void flow_cache_flush_task(struct work_struct *work)
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index e187bf06d673..00ee068efc1c 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -5,6 +5,10 @@
5#include <linux/if_vlan.h> 5#include <linux/if_vlan.h>
6#include <net/ip.h> 6#include <net/ip.h>
7#include <net/ipv6.h> 7#include <net/ipv6.h>
8#include <linux/igmp.h>
9#include <linux/icmp.h>
10#include <linux/sctp.h>
11#include <linux/dccp.h>
8#include <linux/if_tunnel.h> 12#include <linux/if_tunnel.h>
9#include <linux/if_pppox.h> 13#include <linux/if_pppox.h>
10#include <linux/ppp_defs.h> 14#include <linux/ppp_defs.h>
@@ -119,6 +123,17 @@ ipv6:
119 nhoff += 4; 123 nhoff += 4;
120 if (hdr->flags & GRE_SEQ) 124 if (hdr->flags & GRE_SEQ)
121 nhoff += 4; 125 nhoff += 4;
126 if (proto == htons(ETH_P_TEB)) {
127 const struct ethhdr *eth;
128 struct ethhdr _eth;
129
130 eth = skb_header_pointer(skb, nhoff,
131 sizeof(_eth), &_eth);
132 if (!eth)
133 return false;
134 proto = eth->h_proto;
135 nhoff += sizeof(*eth);
136 }
122 goto again; 137 goto again;
123 } 138 }
124 break; 139 break;
@@ -217,6 +232,59 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
217} 232}
218EXPORT_SYMBOL(__skb_tx_hash); 233EXPORT_SYMBOL(__skb_tx_hash);
219 234
235/* __skb_get_poff() returns the offset to the payload as far as it could
236 * be dissected. The main user is currently BPF, so that we can dynamically
237 * truncate packets without needing to push actual payload to the user
238 * space and can analyze headers only, instead.
239 */
240u32 __skb_get_poff(const struct sk_buff *skb)
241{
242 struct flow_keys keys;
243 u32 poff = 0;
244
245 if (!skb_flow_dissect(skb, &keys))
246 return 0;
247
248 poff += keys.thoff;
249 switch (keys.ip_proto) {
250 case IPPROTO_TCP: {
251 const struct tcphdr *tcph;
252 struct tcphdr _tcph;
253
254 tcph = skb_header_pointer(skb, poff, sizeof(_tcph), &_tcph);
255 if (!tcph)
256 return poff;
257
258 poff += max_t(u32, sizeof(struct tcphdr), tcph->doff * 4);
259 break;
260 }
261 case IPPROTO_UDP:
262 case IPPROTO_UDPLITE:
263 poff += sizeof(struct udphdr);
264 break;
265 /* For the rest, we do not really care about header
266 * extensions at this point for now.
267 */
268 case IPPROTO_ICMP:
269 poff += sizeof(struct icmphdr);
270 break;
271 case IPPROTO_ICMPV6:
272 poff += sizeof(struct icmp6hdr);
273 break;
274 case IPPROTO_IGMP:
275 poff += sizeof(struct igmphdr);
276 break;
277 case IPPROTO_DCCP:
278 poff += sizeof(struct dccp_hdr);
279 break;
280 case IPPROTO_SCTP:
281 poff += sizeof(struct sctphdr);
282 break;
283 }
284
285 return poff;
286}
287
220static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index) 288static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
221{ 289{
222 if (unlikely(queue_index >= dev->real_num_tx_queues)) { 290 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 3863b8f639c5..c72a646d9f44 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1613,7 +1613,7 @@ int neigh_table_clear(struct neigh_table *tbl)
1613} 1613}
1614EXPORT_SYMBOL(neigh_table_clear); 1614EXPORT_SYMBOL(neigh_table_clear);
1615 1615
1616static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 1616static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh)
1617{ 1617{
1618 struct net *net = sock_net(skb->sk); 1618 struct net *net = sock_net(skb->sk);
1619 struct ndmsg *ndm; 1619 struct ndmsg *ndm;
@@ -1677,7 +1677,7 @@ out:
1677 return err; 1677 return err;
1678} 1678}
1679 1679
1680static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 1680static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh)
1681{ 1681{
1682 struct net *net = sock_net(skb->sk); 1682 struct net *net = sock_net(skb->sk);
1683 struct ndmsg *ndm; 1683 struct ndmsg *ndm;
@@ -1955,7 +1955,7 @@ static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1955 [NDTPA_LOCKTIME] = { .type = NLA_U64 }, 1955 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1956}; 1956};
1957 1957
1958static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 1958static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
1959{ 1959{
1960 struct net *net = sock_net(skb->sk); 1960 struct net *net = sock_net(skb->sk);
1961 struct neigh_table *tbl; 1961 struct neigh_table *tbl;
diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
index 3174f1998ee6..569d355fec3e 100644
--- a/net/core/net-procfs.c
+++ b/net/core/net-procfs.c
@@ -271,7 +271,7 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
271 else 271 else
272 seq_printf(seq, "%04x", ntohs(pt->type)); 272 seq_printf(seq, "%04x", ntohs(pt->type));
273 273
274 seq_printf(seq, " %-8s %pF\n", 274 seq_printf(seq, " %-8s %pf\n",
275 pt->dev ? pt->dev->name : "", pt->func); 275 pt->dev ? pt->dev->name : "", pt->func);
276 } 276 }
277 277
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index fa32899006a2..a3a17aed3639 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -47,7 +47,7 @@ static struct sk_buff_head skb_pool;
47 47
48static atomic_t trapped; 48static atomic_t trapped;
49 49
50static struct srcu_struct netpoll_srcu; 50DEFINE_STATIC_SRCU(netpoll_srcu);
51 51
52#define USEC_PER_POLL 50 52#define USEC_PER_POLL 50
53#define NETPOLL_RX_ENABLED 1 53#define NETPOLL_RX_ENABLED 1
@@ -1212,7 +1212,6 @@ EXPORT_SYMBOL(netpoll_setup);
1212static int __init netpoll_init(void) 1212static int __init netpoll_init(void)
1213{ 1213{
1214 skb_queue_head_init(&skb_pool); 1214 skb_queue_head_init(&skb_pool);
1215 init_srcu_struct(&netpoll_srcu);
1216 return 0; 1215 return 0;
1217} 1216}
1218core_initcall(netpoll_init); 1217core_initcall(netpoll_init);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index b65441da74ab..589d0abb34a0 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -517,32 +517,6 @@ out:
517 return err; 517 return err;
518} 518}
519 519
520static const int rtm_min[RTM_NR_FAMILIES] =
521{
522 [RTM_FAM(RTM_NEWLINK)] = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
523 [RTM_FAM(RTM_NEWADDR)] = NLMSG_LENGTH(sizeof(struct ifaddrmsg)),
524 [RTM_FAM(RTM_NEWROUTE)] = NLMSG_LENGTH(sizeof(struct rtmsg)),
525 [RTM_FAM(RTM_NEWRULE)] = NLMSG_LENGTH(sizeof(struct fib_rule_hdr)),
526 [RTM_FAM(RTM_NEWQDISC)] = NLMSG_LENGTH(sizeof(struct tcmsg)),
527 [RTM_FAM(RTM_NEWTCLASS)] = NLMSG_LENGTH(sizeof(struct tcmsg)),
528 [RTM_FAM(RTM_NEWTFILTER)] = NLMSG_LENGTH(sizeof(struct tcmsg)),
529 [RTM_FAM(RTM_NEWACTION)] = NLMSG_LENGTH(sizeof(struct tcamsg)),
530 [RTM_FAM(RTM_GETMULTICAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
531 [RTM_FAM(RTM_GETANYCAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
532};
533
534static const int rta_max[RTM_NR_FAMILIES] =
535{
536 [RTM_FAM(RTM_NEWLINK)] = IFLA_MAX,
537 [RTM_FAM(RTM_NEWADDR)] = IFA_MAX,
538 [RTM_FAM(RTM_NEWROUTE)] = RTA_MAX,
539 [RTM_FAM(RTM_NEWRULE)] = FRA_MAX,
540 [RTM_FAM(RTM_NEWQDISC)] = TCA_MAX,
541 [RTM_FAM(RTM_NEWTCLASS)] = TCA_MAX,
542 [RTM_FAM(RTM_NEWTFILTER)] = TCA_MAX,
543 [RTM_FAM(RTM_NEWACTION)] = TCAA_MAX,
544};
545
546int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo) 520int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
547{ 521{
548 struct sock *rtnl = net->rtnl; 522 struct sock *rtnl = net->rtnl;
@@ -1539,7 +1513,7 @@ errout:
1539 return err; 1513 return err;
1540} 1514}
1541 1515
1542static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 1516static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
1543{ 1517{
1544 struct net *net = sock_net(skb->sk); 1518 struct net *net = sock_net(skb->sk);
1545 struct ifinfomsg *ifm; 1519 struct ifinfomsg *ifm;
@@ -1580,7 +1554,7 @@ errout:
1580 return err; 1554 return err;
1581} 1555}
1582 1556
1583static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 1557static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
1584{ 1558{
1585 struct net *net = sock_net(skb->sk); 1559 struct net *net = sock_net(skb->sk);
1586 const struct rtnl_link_ops *ops; 1560 const struct rtnl_link_ops *ops;
@@ -1711,7 +1685,7 @@ static int rtnl_group_changelink(struct net *net, int group,
1711 return 0; 1685 return 0;
1712} 1686}
1713 1687
1714static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 1688static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh)
1715{ 1689{
1716 struct net *net = sock_net(skb->sk); 1690 struct net *net = sock_net(skb->sk);
1717 const struct rtnl_link_ops *ops; 1691 const struct rtnl_link_ops *ops;
@@ -1866,7 +1840,7 @@ out:
1866 } 1840 }
1867} 1841}
1868 1842
1869static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) 1843static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh)
1870{ 1844{
1871 struct net *net = sock_net(skb->sk); 1845 struct net *net = sock_net(skb->sk);
1872 struct ifinfomsg *ifm; 1846 struct ifinfomsg *ifm;
@@ -1957,8 +1931,11 @@ static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
1957 if (rtnl_msg_handlers[idx] == NULL || 1931 if (rtnl_msg_handlers[idx] == NULL ||
1958 rtnl_msg_handlers[idx][type].dumpit == NULL) 1932 rtnl_msg_handlers[idx][type].dumpit == NULL)
1959 continue; 1933 continue;
1960 if (idx > s_idx) 1934 if (idx > s_idx) {
1961 memset(&cb->args[0], 0, sizeof(cb->args)); 1935 memset(&cb->args[0], 0, sizeof(cb->args));
1936 cb->prev_seq = 0;
1937 cb->seq = 0;
1938 }
1962 if (rtnl_msg_handlers[idx][type].dumpit(skb, cb)) 1939 if (rtnl_msg_handlers[idx][type].dumpit(skb, cb))
1963 break; 1940 break;
1964 } 1941 }
@@ -2051,7 +2028,39 @@ errout:
2051 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); 2028 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2052} 2029}
2053 2030
2054static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 2031/**
2032 * ndo_dflt_fdb_add - default netdevice operation to add an FDB entry
2033 */
2034int ndo_dflt_fdb_add(struct ndmsg *ndm,
2035 struct nlattr *tb[],
2036 struct net_device *dev,
2037 const unsigned char *addr,
2038 u16 flags)
2039{
2040 int err = -EINVAL;
2041
2042 /* If aging addresses are supported device will need to
2043 * implement its own handler for this.
2044 */
2045 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
2046 pr_info("%s: FDB only supports static addresses\n", dev->name);
2047 return err;
2048 }
2049
2050 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
2051 err = dev_uc_add_excl(dev, addr);
2052 else if (is_multicast_ether_addr(addr))
2053 err = dev_mc_add_excl(dev, addr);
2054
2055 /* Only return duplicate errors if NLM_F_EXCL is set */
2056 if (err == -EEXIST && !(flags & NLM_F_EXCL))
2057 err = 0;
2058
2059 return err;
2060}
2061EXPORT_SYMBOL(ndo_dflt_fdb_add);
2062
2063static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
2055{ 2064{
2056 struct net *net = sock_net(skb->sk); 2065 struct net *net = sock_net(skb->sk);
2057 struct ndmsg *ndm; 2066 struct ndmsg *ndm;
@@ -2082,7 +2091,7 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2082 } 2091 }
2083 2092
2084 addr = nla_data(tb[NDA_LLADDR]); 2093 addr = nla_data(tb[NDA_LLADDR]);
2085 if (!is_valid_ether_addr(addr)) { 2094 if (is_zero_ether_addr(addr)) {
2086 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ether address\n"); 2095 pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ether address\n");
2087 return -EINVAL; 2096 return -EINVAL;
2088 } 2097 }
@@ -2103,10 +2112,13 @@ static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2103 } 2112 }
2104 2113
2105 /* Embedded bridge, macvlan, and any other device support */ 2114 /* Embedded bridge, macvlan, and any other device support */
2106 if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_add) { 2115 if ((ndm->ndm_flags & NTF_SELF)) {
2107 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, 2116 if (dev->netdev_ops->ndo_fdb_add)
2108 dev, addr, 2117 err = dev->netdev_ops->ndo_fdb_add(ndm, tb, dev, addr,
2109 nlh->nlmsg_flags); 2118 nlh->nlmsg_flags);
2119 else
2120 err = ndo_dflt_fdb_add(ndm, tb, dev, addr,
2121 nlh->nlmsg_flags);
2110 2122
2111 if (!err) { 2123 if (!err) {
2112 rtnl_fdb_notify(dev, addr, RTM_NEWNEIGH); 2124 rtnl_fdb_notify(dev, addr, RTM_NEWNEIGH);
@@ -2117,7 +2129,36 @@ out:
2117 return err; 2129 return err;
2118} 2130}
2119 2131
2120static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 2132/**
2133 * ndo_dflt_fdb_del - default netdevice operation to delete an FDB entry
2134 */
2135int ndo_dflt_fdb_del(struct ndmsg *ndm,
2136 struct nlattr *tb[],
2137 struct net_device *dev,
2138 const unsigned char *addr)
2139{
2140 int err = -EOPNOTSUPP;
2141
2142 /* If aging addresses are supported device will need to
2143 * implement its own handler for this.
2144 */
2145 if (ndm->ndm_state & NUD_PERMANENT) {
2146 pr_info("%s: FDB only supports static addresses\n", dev->name);
2147 return -EINVAL;
2148 }
2149
2150 if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
2151 err = dev_uc_del(dev, addr);
2152 else if (is_multicast_ether_addr(addr))
2153 err = dev_mc_del(dev, addr);
2154 else
2155 err = -EINVAL;
2156
2157 return err;
2158}
2159EXPORT_SYMBOL(ndo_dflt_fdb_del);
2160
2161static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
2121{ 2162{
2122 struct net *net = sock_net(skb->sk); 2163 struct net *net = sock_net(skb->sk);
2123 struct ndmsg *ndm; 2164 struct ndmsg *ndm;
@@ -2174,8 +2215,11 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
2174 } 2215 }
2175 2216
2176 /* Embedded bridge, macvlan, and any other device support */ 2217 /* Embedded bridge, macvlan, and any other device support */
2177 if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_del) { 2218 if (ndm->ndm_flags & NTF_SELF) {
2178 err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr); 2219 if (dev->netdev_ops->ndo_fdb_del)
2220 err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr);
2221 else
2222 err = ndo_dflt_fdb_del(ndm, tb, dev, addr);
2179 2223
2180 if (!err) { 2224 if (!err) {
2181 rtnl_fdb_notify(dev, addr, RTM_DELNEIGH); 2225 rtnl_fdb_notify(dev, addr, RTM_DELNEIGH);
@@ -2220,7 +2264,7 @@ skip:
2220 * @dev: netdevice 2264 * @dev: netdevice
2221 * 2265 *
2222 * Default netdevice operation to dump the existing unicast address list. 2266 * Default netdevice operation to dump the existing unicast address list.
2223 * Returns zero on success. 2267 * Returns number of addresses from list put in skb.
2224 */ 2268 */
2225int ndo_dflt_fdb_dump(struct sk_buff *skb, 2269int ndo_dflt_fdb_dump(struct sk_buff *skb,
2226 struct netlink_callback *cb, 2270 struct netlink_callback *cb,
@@ -2260,6 +2304,8 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
2260 2304
2261 if (dev->netdev_ops->ndo_fdb_dump) 2305 if (dev->netdev_ops->ndo_fdb_dump)
2262 idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, idx); 2306 idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, idx);
2307 else
2308 idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
2263 } 2309 }
2264 rcu_read_unlock(); 2310 rcu_read_unlock();
2265 2311
@@ -2411,8 +2457,7 @@ errout:
2411 return err; 2457 return err;
2412} 2458}
2413 2459
2414static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, 2460static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
2415 void *arg)
2416{ 2461{
2417 struct net *net = sock_net(skb->sk); 2462 struct net *net = sock_net(skb->sk);
2418 struct ifinfomsg *ifm; 2463 struct ifinfomsg *ifm;
@@ -2482,8 +2527,7 @@ out:
2482 return err; 2527 return err;
2483} 2528}
2484 2529
2485static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, 2530static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
2486 void *arg)
2487{ 2531{
2488 struct net *net = sock_net(skb->sk); 2532 struct net *net = sock_net(skb->sk);
2489 struct ifinfomsg *ifm; 2533 struct ifinfomsg *ifm;
@@ -2553,10 +2597,6 @@ out:
2553 return err; 2597 return err;
2554} 2598}
2555 2599
2556/* Protected by RTNL sempahore. */
2557static struct rtattr **rta_buf;
2558static int rtattr_max;
2559
2560/* Process one rtnetlink message. */ 2600/* Process one rtnetlink message. */
2561 2601
2562static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 2602static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
@@ -2564,7 +2604,6 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2564 struct net *net = sock_net(skb->sk); 2604 struct net *net = sock_net(skb->sk);
2565 rtnl_doit_func doit; 2605 rtnl_doit_func doit;
2566 int sz_idx, kind; 2606 int sz_idx, kind;
2567 int min_len;
2568 int family; 2607 int family;
2569 int type; 2608 int type;
2570 int err; 2609 int err;
@@ -2576,10 +2615,10 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2576 type -= RTM_BASE; 2615 type -= RTM_BASE;
2577 2616
2578 /* All the messages must have at least 1 byte length */ 2617 /* All the messages must have at least 1 byte length */
2579 if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(struct rtgenmsg))) 2618 if (nlmsg_len(nlh) < sizeof(struct rtgenmsg))
2580 return 0; 2619 return 0;
2581 2620
2582 family = ((struct rtgenmsg *)NLMSG_DATA(nlh))->rtgen_family; 2621 family = ((struct rtgenmsg *)nlmsg_data(nlh))->rtgen_family;
2583 sz_idx = type>>2; 2622 sz_idx = type>>2;
2584 kind = type&3; 2623 kind = type&3;
2585 2624
@@ -2612,32 +2651,11 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2612 return err; 2651 return err;
2613 } 2652 }
2614 2653
2615 memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *)));
2616
2617 min_len = rtm_min[sz_idx];
2618 if (nlh->nlmsg_len < min_len)
2619 return -EINVAL;
2620
2621 if (nlh->nlmsg_len > min_len) {
2622 int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len);
2623 struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len);
2624
2625 while (RTA_OK(attr, attrlen)) {
2626 unsigned int flavor = attr->rta_type & NLA_TYPE_MASK;
2627 if (flavor) {
2628 if (flavor > rta_max[sz_idx])
2629 return -EINVAL;
2630 rta_buf[flavor-1] = attr;
2631 }
2632 attr = RTA_NEXT(attr, attrlen);
2633 }
2634 }
2635
2636 doit = rtnl_get_doit(family, type); 2654 doit = rtnl_get_doit(family, type);
2637 if (doit == NULL) 2655 if (doit == NULL)
2638 return -EOPNOTSUPP; 2656 return -EOPNOTSUPP;
2639 2657
2640 return doit(skb, nlh, (void *)&rta_buf[0]); 2658 return doit(skb, nlh);
2641} 2659}
2642 2660
2643static void rtnetlink_rcv(struct sk_buff *skb) 2661static void rtnetlink_rcv(struct sk_buff *skb)
@@ -2707,16 +2725,6 @@ static struct pernet_operations rtnetlink_net_ops = {
2707 2725
2708void __init rtnetlink_init(void) 2726void __init rtnetlink_init(void)
2709{ 2727{
2710 int i;
2711
2712 rtattr_max = 0;
2713 for (i = 0; i < ARRAY_SIZE(rta_max); i++)
2714 if (rta_max[i] > rtattr_max)
2715 rtattr_max = rta_max[i];
2716 rta_buf = kmalloc(rtattr_max * sizeof(struct rtattr *), GFP_KERNEL);
2717 if (!rta_buf)
2718 panic("rtnetlink_init: cannot allocate rta_buf\n");
2719
2720 if (register_pernet_subsys(&rtnetlink_net_ops)) 2728 if (register_pernet_subsys(&rtnetlink_net_ops))
2721 panic("rtnetlink_init: cannot initialize rtnetlink\n"); 2729 panic("rtnetlink_init: cannot initialize rtnetlink\n");
2722 2730
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 33245ef54c3b..ba646145cd5c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -673,6 +673,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
673 new->mac_header = old->mac_header; 673 new->mac_header = old->mac_header;
674 new->inner_transport_header = old->inner_transport_header; 674 new->inner_transport_header = old->inner_transport_header;
675 new->inner_network_header = old->inner_network_header; 675 new->inner_network_header = old->inner_network_header;
676 new->inner_mac_header = old->inner_mac_header;
676 skb_dst_copy(new, old); 677 skb_dst_copy(new, old);
677 new->rxhash = old->rxhash; 678 new->rxhash = old->rxhash;
678 new->ooo_okay = old->ooo_okay; 679 new->ooo_okay = old->ooo_okay;
@@ -867,6 +868,18 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
867} 868}
868EXPORT_SYMBOL(skb_clone); 869EXPORT_SYMBOL(skb_clone);
869 870
871static void skb_headers_offset_update(struct sk_buff *skb, int off)
872{
873 /* {transport,network,mac}_header and tail are relative to skb->head */
874 skb->transport_header += off;
875 skb->network_header += off;
876 if (skb_mac_header_was_set(skb))
877 skb->mac_header += off;
878 skb->inner_transport_header += off;
879 skb->inner_network_header += off;
880 skb->inner_mac_header += off;
881}
882
870static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) 883static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
871{ 884{
872#ifndef NET_SKBUFF_DATA_USES_OFFSET 885#ifndef NET_SKBUFF_DATA_USES_OFFSET
@@ -879,13 +892,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
879 __copy_skb_header(new, old); 892 __copy_skb_header(new, old);
880 893
881#ifndef NET_SKBUFF_DATA_USES_OFFSET 894#ifndef NET_SKBUFF_DATA_USES_OFFSET
882 /* {transport,network,mac}_header are relative to skb->head */ 895 skb_headers_offset_update(new, offset);
883 new->transport_header += offset;
884 new->network_header += offset;
885 if (skb_mac_header_was_set(new))
886 new->mac_header += offset;
887 new->inner_transport_header += offset;
888 new->inner_network_header += offset;
889#endif 896#endif
890 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size; 897 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
891 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs; 898 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
@@ -1077,14 +1084,8 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
1077#else 1084#else
1078 skb->end = skb->head + size; 1085 skb->end = skb->head + size;
1079#endif 1086#endif
1080 /* {transport,network,mac}_header and tail are relative to skb->head */
1081 skb->tail += off; 1087 skb->tail += off;
1082 skb->transport_header += off; 1088 skb_headers_offset_update(skb, off);
1083 skb->network_header += off;
1084 if (skb_mac_header_was_set(skb))
1085 skb->mac_header += off;
1086 skb->inner_transport_header += off;
1087 skb->inner_network_header += off;
1088 /* Only adjust this if it actually is csum_start rather than csum */ 1089 /* Only adjust this if it actually is csum_start rather than csum */
1089 if (skb->ip_summed == CHECKSUM_PARTIAL) 1090 if (skb->ip_summed == CHECKSUM_PARTIAL)
1090 skb->csum_start += nhead; 1091 skb->csum_start += nhead;
@@ -1180,12 +1181,7 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1180 if (n->ip_summed == CHECKSUM_PARTIAL) 1181 if (n->ip_summed == CHECKSUM_PARTIAL)
1181 n->csum_start += off; 1182 n->csum_start += off;
1182#ifdef NET_SKBUFF_DATA_USES_OFFSET 1183#ifdef NET_SKBUFF_DATA_USES_OFFSET
1183 n->transport_header += off; 1184 skb_headers_offset_update(n, off);
1184 n->network_header += off;
1185 if (skb_mac_header_was_set(skb))
1186 n->mac_header += off;
1187 n->inner_transport_header += off;
1188 n->inner_network_header += off;
1189#endif 1185#endif
1190 1186
1191 return n; 1187 return n;
@@ -2741,12 +2737,19 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
2741 unsigned int tnl_hlen = skb_tnl_header_len(skb); 2737 unsigned int tnl_hlen = skb_tnl_header_len(skb);
2742 unsigned int headroom; 2738 unsigned int headroom;
2743 unsigned int len; 2739 unsigned int len;
2740 __be16 proto;
2741 bool csum;
2744 int sg = !!(features & NETIF_F_SG); 2742 int sg = !!(features & NETIF_F_SG);
2745 int nfrags = skb_shinfo(skb)->nr_frags; 2743 int nfrags = skb_shinfo(skb)->nr_frags;
2746 int err = -ENOMEM; 2744 int err = -ENOMEM;
2747 int i = 0; 2745 int i = 0;
2748 int pos; 2746 int pos;
2749 2747
2748 proto = skb_network_protocol(skb);
2749 if (unlikely(!proto))
2750 return ERR_PTR(-EINVAL);
2751
2752 csum = !!can_checksum_protocol(features, proto);
2750 __skb_push(skb, doffset); 2753 __skb_push(skb, doffset);
2751 headroom = skb_headroom(skb); 2754 headroom = skb_headroom(skb);
2752 pos = skb_headlen(skb); 2755 pos = skb_headlen(skb);
@@ -2884,6 +2887,12 @@ skip_fraglist:
2884 nskb->data_len = len - hsize; 2887 nskb->data_len = len - hsize;
2885 nskb->len += nskb->data_len; 2888 nskb->len += nskb->data_len;
2886 nskb->truesize += nskb->data_len; 2889 nskb->truesize += nskb->data_len;
2890
2891 if (!csum) {
2892 nskb->csum = skb_checksum(nskb, doffset,
2893 nskb->len - doffset, 0);
2894 nskb->ip_summed = CHECKSUM_NONE;
2895 }
2887 } while ((offset += len) < skb->len); 2896 } while ((offset += len) < skb->len);
2888 2897
2889 return segs; 2898 return segs;
@@ -3361,6 +3370,7 @@ bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3361 skb->ip_summed = CHECKSUM_PARTIAL; 3370 skb->ip_summed = CHECKSUM_PARTIAL;
3362 skb->csum_start = skb_headroom(skb) + start; 3371 skb->csum_start = skb_headroom(skb) + start;
3363 skb->csum_offset = off; 3372 skb->csum_offset = off;
3373 skb_set_transport_header(skb, start);
3364 return true; 3374 return true;
3365} 3375}
3366EXPORT_SYMBOL_GPL(skb_partial_csum_set); 3376EXPORT_SYMBOL_GPL(skb_partial_csum_set);
diff --git a/net/core/sock.c b/net/core/sock.c
index b261a7977746..2ff5f3619a8d 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -907,6 +907,10 @@ set_rcvbuf:
907 sock_valbool_flag(sk, SOCK_NOFCS, valbool); 907 sock_valbool_flag(sk, SOCK_NOFCS, valbool);
908 break; 908 break;
909 909
910 case SO_SELECT_ERR_QUEUE:
911 sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
912 break;
913
910 default: 914 default:
911 ret = -ENOPROTOOPT; 915 ret = -ENOPROTOOPT;
912 break; 916 break;
@@ -1160,6 +1164,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
1160 v.val = sock_flag(sk, SOCK_FILTER_LOCKED); 1164 v.val = sock_flag(sk, SOCK_FILTER_LOCKED);
1161 break; 1165 break;
1162 1166
1167 case SO_SELECT_ERR_QUEUE:
1168 v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
1169 break;
1170
1163 default: 1171 default:
1164 return -ENOPROTOOPT; 1172 return -ENOPROTOOPT;
1165 } 1173 }
@@ -1298,7 +1306,6 @@ static void sk_prot_free(struct proto *prot, struct sock *sk)
1298 module_put(owner); 1306 module_put(owner);
1299} 1307}
1300 1308
1301#ifdef CONFIG_CGROUPS
1302#if IS_ENABLED(CONFIG_NET_CLS_CGROUP) 1309#if IS_ENABLED(CONFIG_NET_CLS_CGROUP)
1303void sock_update_classid(struct sock *sk, struct task_struct *task) 1310void sock_update_classid(struct sock *sk, struct task_struct *task)
1304{ 1311{
@@ -1321,7 +1328,6 @@ void sock_update_netprioidx(struct sock *sk, struct task_struct *task)
1321} 1328}
1322EXPORT_SYMBOL_GPL(sock_update_netprioidx); 1329EXPORT_SYMBOL_GPL(sock_update_netprioidx);
1323#endif 1330#endif
1324#endif
1325 1331
1326/** 1332/**
1327 * sk_alloc - All socket objects are allocated here 1333 * sk_alloc - All socket objects are allocated here
diff --git a/net/core/utils.c b/net/core/utils.c
index e3487e461939..3c7f5b51b979 100644
--- a/net/core/utils.c
+++ b/net/core/utils.c
@@ -17,6 +17,7 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/jiffies.h> 18#include <linux/jiffies.h>
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/ctype.h>
20#include <linux/inet.h> 21#include <linux/inet.h>
21#include <linux/mm.h> 22#include <linux/mm.h>
22#include <linux/net.h> 23#include <linux/net.h>
@@ -348,9 +349,7 @@ int mac_pton(const char *s, u8 *mac)
348 349
349 /* Don't dirty result unless string is valid MAC. */ 350 /* Don't dirty result unless string is valid MAC. */
350 for (i = 0; i < ETH_ALEN; i++) { 351 for (i = 0; i < ETH_ALEN; i++) {
351 if (!strchr("0123456789abcdefABCDEF", s[i * 3])) 352 if (!isxdigit(s[i * 3]) || !isxdigit(s[i * 3 + 1]))
352 return 0;
353 if (!strchr("0123456789abcdefABCDEF", s[i * 3 + 1]))
354 return 0; 353 return 0;
355 if (i != ETH_ALEN - 1 && s[i * 3 + 2] != ':') 354 if (i != ETH_ALEN - 1 && s[i * 3 + 2] != ':')
356 return 0; 355 return 0;
diff --git a/net/dcb/dcbevent.c b/net/dcb/dcbevent.c
index 1d9eb7c60a68..4f72fc40bf02 100644
--- a/net/dcb/dcbevent.c
+++ b/net/dcb/dcbevent.c
@@ -20,6 +20,7 @@
20#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
21#include <linux/notifier.h> 21#include <linux/notifier.h>
22#include <linux/export.h> 22#include <linux/export.h>
23#include <net/dcbevent.h>
23 24
24static ATOMIC_NOTIFIER_HEAD(dcbevent_notif_chain); 25static ATOMIC_NOTIFIER_HEAD(dcbevent_notif_chain);
25 26
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 21291f1abcd6..40d5829ed36a 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1658,7 +1658,7 @@ static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = {
1658 [DCB_CMD_CEE_GET] = { RTM_GETDCB, dcbnl_cee_get }, 1658 [DCB_CMD_CEE_GET] = { RTM_GETDCB, dcbnl_cee_get },
1659}; 1659};
1660 1660
1661static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 1661static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
1662{ 1662{
1663 struct net *net = sock_net(skb->sk); 1663 struct net *net = sock_net(skb->sk);
1664 struct net_device *netdev; 1664 struct net_device *netdev;
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 4f9f5eb478f1..ebc54fef85a5 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -500,8 +500,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
500 return &rt->dst; 500 return &rt->dst;
501} 501}
502 502
503static int dccp_v4_send_response(struct sock *sk, struct request_sock *req, 503static int dccp_v4_send_response(struct sock *sk, struct request_sock *req)
504 struct request_values *rv_unused)
505{ 504{
506 int err = -1; 505 int err = -1;
507 struct sk_buff *skb; 506 struct sk_buff *skb;
@@ -658,7 +657,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
658 dreq->dreq_gss = dreq->dreq_iss; 657 dreq->dreq_gss = dreq->dreq_iss;
659 dreq->dreq_service = service; 658 dreq->dreq_service = service;
660 659
661 if (dccp_v4_send_response(sk, req, NULL)) 660 if (dccp_v4_send_response(sk, req))
662 goto drop_and_free; 661 goto drop_and_free;
663 662
664 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); 663 inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 6e05981f271e..9c61f9c02fdb 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -213,8 +213,7 @@ out:
213} 213}
214 214
215 215
216static int dccp_v6_send_response(struct sock *sk, struct request_sock *req, 216static int dccp_v6_send_response(struct sock *sk, struct request_sock *req)
217 struct request_values *rv_unused)
218{ 217{
219 struct inet6_request_sock *ireq6 = inet6_rsk(req); 218 struct inet6_request_sock *ireq6 = inet6_rsk(req);
220 struct ipv6_pinfo *np = inet6_sk(sk); 219 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -428,7 +427,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
428 dreq->dreq_gss = dreq->dreq_iss; 427 dreq->dreq_gss = dreq->dreq_iss;
429 dreq->dreq_service = service; 428 dreq->dreq_service = service;
430 429
431 if (dccp_v6_send_response(sk, req, NULL)) 430 if (dccp_v6_send_response(sk, req))
432 goto drop_and_free; 431 goto drop_and_free;
433 432
434 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT); 433 inet6_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index c8da116d84a4..7d9197063ebb 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -563,7 +563,7 @@ static const struct nla_policy dn_ifa_policy[IFA_MAX+1] = {
563 .len = IFNAMSIZ - 1 }, 563 .len = IFNAMSIZ - 1 },
564}; 564};
565 565
566static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 566static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
567{ 567{
568 struct net *net = sock_net(skb->sk); 568 struct net *net = sock_net(skb->sk);
569 struct nlattr *tb[IFA_MAX+1]; 569 struct nlattr *tb[IFA_MAX+1];
@@ -607,7 +607,7 @@ errout:
607 return err; 607 return err;
608} 608}
609 609
610static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 610static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
611{ 611{
612 struct net *net = sock_net(skb->sk); 612 struct net *net = sock_net(skb->sk);
613 struct nlattr *tb[IFA_MAX+1]; 613 struct nlattr *tb[IFA_MAX+1];
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index e36614eccc04..57dc159245ec 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -145,22 +145,10 @@ static inline struct dn_fib_info *dn_fib_find_info(const struct dn_fib_info *nfi
145 return NULL; 145 return NULL;
146} 146}
147 147
148__le16 dn_fib_get_attr16(struct rtattr *attr, int attrlen, int type) 148static int dn_fib_count_nhs(const struct nlattr *attr)
149{ 149{
150 while(RTA_OK(attr,attrlen)) { 150 struct rtnexthop *nhp = nla_data(attr);
151 if (attr->rta_type == type) 151 int nhs = 0, nhlen = nla_len(attr);
152 return *(__le16*)RTA_DATA(attr);
153 attr = RTA_NEXT(attr, attrlen);
154 }
155
156 return 0;
157}
158
159static int dn_fib_count_nhs(struct rtattr *rta)
160{
161 int nhs = 0;
162 struct rtnexthop *nhp = RTA_DATA(rta);
163 int nhlen = RTA_PAYLOAD(rta);
164 152
165 while(nhlen >= (int)sizeof(struct rtnexthop)) { 153 while(nhlen >= (int)sizeof(struct rtnexthop)) {
166 if ((nhlen -= nhp->rtnh_len) < 0) 154 if ((nhlen -= nhp->rtnh_len) < 0)
@@ -172,10 +160,11 @@ static int dn_fib_count_nhs(struct rtattr *rta)
172 return nhs; 160 return nhs;
173} 161}
174 162
175static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct rtattr *rta, const struct rtmsg *r) 163static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct nlattr *attr,
164 const struct rtmsg *r)
176{ 165{
177 struct rtnexthop *nhp = RTA_DATA(rta); 166 struct rtnexthop *nhp = nla_data(attr);
178 int nhlen = RTA_PAYLOAD(rta); 167 int nhlen = nla_len(attr);
179 168
180 change_nexthops(fi) { 169 change_nexthops(fi) {
181 int attrlen = nhlen - sizeof(struct rtnexthop); 170 int attrlen = nhlen - sizeof(struct rtnexthop);
@@ -187,7 +176,10 @@ static int dn_fib_get_nhs(struct dn_fib_info *fi, const struct rtattr *rta, cons
187 nh->nh_weight = nhp->rtnh_hops + 1; 176 nh->nh_weight = nhp->rtnh_hops + 1;
188 177
189 if (attrlen) { 178 if (attrlen) {
190 nh->nh_gw = dn_fib_get_attr16(RTNH_DATA(nhp), attrlen, RTA_GATEWAY); 179 struct nlattr *gw_attr;
180
181 gw_attr = nla_find((struct nlattr *) (nhp + 1), attrlen, RTA_GATEWAY);
182 nh->nh_gw = gw_attr ? nla_get_le16(gw_attr) : 0;
191 } 183 }
192 nhp = RTNH_NEXT(nhp); 184 nhp = RTNH_NEXT(nhp);
193 } endfor_nexthops(fi); 185 } endfor_nexthops(fi);
@@ -268,7 +260,8 @@ out:
268} 260}
269 261
270 262
271struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct dn_kern_rta *rta, const struct nlmsghdr *nlh, int *errp) 263struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct nlattr *attrs[],
264 const struct nlmsghdr *nlh, int *errp)
272{ 265{
273 int err; 266 int err;
274 struct dn_fib_info *fi = NULL; 267 struct dn_fib_info *fi = NULL;
@@ -281,11 +274,9 @@ struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct dn_kern_rta
281 if (dn_fib_props[r->rtm_type].scope > r->rtm_scope) 274 if (dn_fib_props[r->rtm_type].scope > r->rtm_scope)
282 goto err_inval; 275 goto err_inval;
283 276
284 if (rta->rta_mp) { 277 if (attrs[RTA_MULTIPATH] &&
285 nhs = dn_fib_count_nhs(rta->rta_mp); 278 (nhs = dn_fib_count_nhs(attrs[RTA_MULTIPATH])) == 0)
286 if (nhs == 0) 279 goto err_inval;
287 goto err_inval;
288 }
289 280
290 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct dn_fib_nh), GFP_KERNEL); 281 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct dn_fib_nh), GFP_KERNEL);
291 err = -ENOBUFS; 282 err = -ENOBUFS;
@@ -295,53 +286,65 @@ struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct dn_kern_rta
295 fi->fib_protocol = r->rtm_protocol; 286 fi->fib_protocol = r->rtm_protocol;
296 fi->fib_nhs = nhs; 287 fi->fib_nhs = nhs;
297 fi->fib_flags = r->rtm_flags; 288 fi->fib_flags = r->rtm_flags;
298 if (rta->rta_priority)
299 fi->fib_priority = *rta->rta_priority;
300 if (rta->rta_mx) {
301 int attrlen = RTA_PAYLOAD(rta->rta_mx);
302 struct rtattr *attr = RTA_DATA(rta->rta_mx);
303 289
304 while(RTA_OK(attr, attrlen)) { 290 if (attrs[RTA_PRIORITY])
305 unsigned int flavour = attr->rta_type; 291 fi->fib_priority = nla_get_u32(attrs[RTA_PRIORITY]);
292
293 if (attrs[RTA_METRICS]) {
294 struct nlattr *attr;
295 int rem;
306 296
307 if (flavour) { 297 nla_for_each_nested(attr, attrs[RTA_METRICS], rem) {
308 if (flavour > RTAX_MAX) 298 int type = nla_type(attr);
299
300 if (type) {
301 if (type > RTAX_MAX || nla_len(attr) < 4)
309 goto err_inval; 302 goto err_inval;
310 fi->fib_metrics[flavour-1] = *(unsigned int *)RTA_DATA(attr); 303
304 fi->fib_metrics[type-1] = nla_get_u32(attr);
311 } 305 }
312 attr = RTA_NEXT(attr, attrlen);
313 } 306 }
314 } 307 }
315 if (rta->rta_prefsrc)
316 memcpy(&fi->fib_prefsrc, rta->rta_prefsrc, 2);
317 308
318 if (rta->rta_mp) { 309 if (attrs[RTA_PREFSRC])
319 if ((err = dn_fib_get_nhs(fi, rta->rta_mp, r)) != 0) 310 fi->fib_prefsrc = nla_get_le16(attrs[RTA_PREFSRC]);
311
312 if (attrs[RTA_MULTIPATH]) {
313 if ((err = dn_fib_get_nhs(fi, attrs[RTA_MULTIPATH], r)) != 0)
320 goto failure; 314 goto failure;
321 if (rta->rta_oif && fi->fib_nh->nh_oif != *rta->rta_oif) 315
316 if (attrs[RTA_OIF] &&
317 fi->fib_nh->nh_oif != nla_get_u32(attrs[RTA_OIF]))
322 goto err_inval; 318 goto err_inval;
323 if (rta->rta_gw && memcmp(&fi->fib_nh->nh_gw, rta->rta_gw, 2)) 319
320 if (attrs[RTA_GATEWAY] &&
321 fi->fib_nh->nh_gw != nla_get_le16(attrs[RTA_GATEWAY]))
324 goto err_inval; 322 goto err_inval;
325 } else { 323 } else {
326 struct dn_fib_nh *nh = fi->fib_nh; 324 struct dn_fib_nh *nh = fi->fib_nh;
327 if (rta->rta_oif) 325
328 nh->nh_oif = *rta->rta_oif; 326 if (attrs[RTA_OIF])
329 if (rta->rta_gw) 327 nh->nh_oif = nla_get_u32(attrs[RTA_OIF]);
330 memcpy(&nh->nh_gw, rta->rta_gw, 2); 328
329 if (attrs[RTA_GATEWAY])
330 nh->nh_gw = nla_get_le16(attrs[RTA_GATEWAY]);
331
331 nh->nh_flags = r->rtm_flags; 332 nh->nh_flags = r->rtm_flags;
332 nh->nh_weight = 1; 333 nh->nh_weight = 1;
333 } 334 }
334 335
335 if (r->rtm_type == RTN_NAT) { 336 if (r->rtm_type == RTN_NAT) {
336 if (rta->rta_gw == NULL || nhs != 1 || rta->rta_oif) 337 if (!attrs[RTA_GATEWAY] || nhs != 1 || attrs[RTA_OIF])
337 goto err_inval; 338 goto err_inval;
338 memcpy(&fi->fib_nh->nh_gw, rta->rta_gw, 2); 339
340 fi->fib_nh->nh_gw = nla_get_le16(attrs[RTA_GATEWAY]);
339 goto link_it; 341 goto link_it;
340 } 342 }
341 343
342 if (dn_fib_props[r->rtm_type].error) { 344 if (dn_fib_props[r->rtm_type].error) {
343 if (rta->rta_gw || rta->rta_oif || rta->rta_mp) 345 if (attrs[RTA_GATEWAY] || attrs[RTA_OIF] || attrs[RTA_MULTIPATH])
344 goto err_inval; 346 goto err_inval;
347
345 goto link_it; 348 goto link_it;
346 } 349 }
347 350
@@ -367,8 +370,8 @@ struct dn_fib_info *dn_fib_create_info(const struct rtmsg *r, struct dn_kern_rta
367 } 370 }
368 371
369 if (fi->fib_prefsrc) { 372 if (fi->fib_prefsrc) {
370 if (r->rtm_type != RTN_LOCAL || rta->rta_dst == NULL || 373 if (r->rtm_type != RTN_LOCAL || !attrs[RTA_DST] ||
371 memcmp(&fi->fib_prefsrc, rta->rta_dst, 2)) 374 fi->fib_prefsrc != nla_get_le16(attrs[RTA_DST]))
372 if (dnet_addr_type(fi->fib_prefsrc) != RTN_LOCAL) 375 if (dnet_addr_type(fi->fib_prefsrc) != RTN_LOCAL)
373 goto err_inval; 376 goto err_inval;
374 } 377 }
@@ -486,39 +489,21 @@ void dn_fib_select_multipath(const struct flowidn *fld, struct dn_fib_res *res)
486 spin_unlock_bh(&dn_fib_multipath_lock); 489 spin_unlock_bh(&dn_fib_multipath_lock);
487} 490}
488 491
489 492static inline u32 rtm_get_table(struct nlattr *attrs[], u8 table)
490static int dn_fib_check_attr(struct rtmsg *r, struct rtattr **rta)
491{
492 int i;
493
494 for(i = 1; i <= RTA_MAX; i++) {
495 struct rtattr *attr = rta[i-1];
496 if (attr) {
497 if (RTA_PAYLOAD(attr) < 4 && RTA_PAYLOAD(attr) != 2)
498 return -EINVAL;
499 if (i != RTA_MULTIPATH && i != RTA_METRICS &&
500 i != RTA_TABLE)
501 rta[i-1] = (struct rtattr *)RTA_DATA(attr);
502 }
503 }
504
505 return 0;
506}
507
508static inline u32 rtm_get_table(struct rtattr **rta, u8 table)
509{ 493{
510 if (rta[RTA_TABLE - 1]) 494 if (attrs[RTA_TABLE])
511 table = nla_get_u32((struct nlattr *) rta[RTA_TABLE - 1]); 495 table = nla_get_u32(attrs[RTA_TABLE]);
512 496
513 return table; 497 return table;
514} 498}
515 499
516static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 500static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
517{ 501{
518 struct net *net = sock_net(skb->sk); 502 struct net *net = sock_net(skb->sk);
519 struct dn_fib_table *tb; 503 struct dn_fib_table *tb;
520 struct rtattr **rta = arg; 504 struct rtmsg *r = nlmsg_data(nlh);
521 struct rtmsg *r = NLMSG_DATA(nlh); 505 struct nlattr *attrs[RTA_MAX+1];
506 int err;
522 507
523 if (!capable(CAP_NET_ADMIN)) 508 if (!capable(CAP_NET_ADMIN))
524 return -EPERM; 509 return -EPERM;
@@ -526,22 +511,24 @@ static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *
526 if (!net_eq(net, &init_net)) 511 if (!net_eq(net, &init_net))
527 return -EINVAL; 512 return -EINVAL;
528 513
529 if (dn_fib_check_attr(r, rta)) 514 err = nlmsg_parse(nlh, sizeof(*r), attrs, RTA_MAX, rtm_dn_policy);
530 return -EINVAL; 515 if (err < 0)
516 return err;
531 517
532 tb = dn_fib_get_table(rtm_get_table(rta, r->rtm_table), 0); 518 tb = dn_fib_get_table(rtm_get_table(attrs, r->rtm_table), 0);
533 if (tb) 519 if (!tb)
534 return tb->delete(tb, r, (struct dn_kern_rta *)rta, nlh, &NETLINK_CB(skb)); 520 return -ESRCH;
535 521
536 return -ESRCH; 522 return tb->delete(tb, r, attrs, nlh, &NETLINK_CB(skb));
537} 523}
538 524
539static int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 525static int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
540{ 526{
541 struct net *net = sock_net(skb->sk); 527 struct net *net = sock_net(skb->sk);
542 struct dn_fib_table *tb; 528 struct dn_fib_table *tb;
543 struct rtattr **rta = arg; 529 struct rtmsg *r = nlmsg_data(nlh);
544 struct rtmsg *r = NLMSG_DATA(nlh); 530 struct nlattr *attrs[RTA_MAX+1];
531 int err;
545 532
546 if (!capable(CAP_NET_ADMIN)) 533 if (!capable(CAP_NET_ADMIN))
547 return -EPERM; 534 return -EPERM;
@@ -549,14 +536,15 @@ static int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *
549 if (!net_eq(net, &init_net)) 536 if (!net_eq(net, &init_net))
550 return -EINVAL; 537 return -EINVAL;
551 538
552 if (dn_fib_check_attr(r, rta)) 539 err = nlmsg_parse(nlh, sizeof(*r), attrs, RTA_MAX, rtm_dn_policy);
553 return -EINVAL; 540 if (err < 0)
541 return err;
554 542
555 tb = dn_fib_get_table(rtm_get_table(rta, r->rtm_table), 1); 543 tb = dn_fib_get_table(rtm_get_table(attrs, r->rtm_table), 1);
556 if (tb) 544 if (!tb)
557 return tb->insert(tb, r, (struct dn_kern_rta *)rta, nlh, &NETLINK_CB(skb)); 545 return -ENOBUFS;
558 546
559 return -ENOBUFS; 547 return tb->insert(tb, r, attrs, nlh, &NETLINK_CB(skb));
560} 548}
561 549
562static void fib_magic(int cmd, int type, __le16 dst, int dst_len, struct dn_ifaddr *ifa) 550static void fib_magic(int cmd, int type, __le16 dst, int dst_len, struct dn_ifaddr *ifa)
@@ -566,10 +554,31 @@ static void fib_magic(int cmd, int type, __le16 dst, int dst_len, struct dn_ifad
566 struct nlmsghdr nlh; 554 struct nlmsghdr nlh;
567 struct rtmsg rtm; 555 struct rtmsg rtm;
568 } req; 556 } req;
569 struct dn_kern_rta rta; 557 struct {
558 struct nlattr hdr;
559 __le16 dst;
560 } dst_attr = {
561 .dst = dst,
562 };
563 struct {
564 struct nlattr hdr;
565 __le16 prefsrc;
566 } prefsrc_attr = {
567 .prefsrc = ifa->ifa_local,
568 };
569 struct {
570 struct nlattr hdr;
571 u32 oif;
572 } oif_attr = {
573 .oif = ifa->ifa_dev->dev->ifindex,
574 };
575 struct nlattr *attrs[RTA_MAX+1] = {
576 [RTA_DST] = (struct nlattr *) &dst_attr,
577 [RTA_PREFSRC] = (struct nlattr * ) &prefsrc_attr,
578 [RTA_OIF] = (struct nlattr *) &oif_attr,
579 };
570 580
571 memset(&req.rtm, 0, sizeof(req.rtm)); 581 memset(&req.rtm, 0, sizeof(req.rtm));
572 memset(&rta, 0, sizeof(rta));
573 582
574 if (type == RTN_UNICAST) 583 if (type == RTN_UNICAST)
575 tb = dn_fib_get_table(RT_MIN_TABLE, 1); 584 tb = dn_fib_get_table(RT_MIN_TABLE, 1);
@@ -591,14 +600,10 @@ static void fib_magic(int cmd, int type, __le16 dst, int dst_len, struct dn_ifad
591 req.rtm.rtm_scope = (type != RTN_LOCAL ? RT_SCOPE_LINK : RT_SCOPE_HOST); 600 req.rtm.rtm_scope = (type != RTN_LOCAL ? RT_SCOPE_LINK : RT_SCOPE_HOST);
592 req.rtm.rtm_type = type; 601 req.rtm.rtm_type = type;
593 602
594 rta.rta_dst = &dst;
595 rta.rta_prefsrc = &ifa->ifa_local;
596 rta.rta_oif = &ifa->ifa_dev->dev->ifindex;
597
598 if (cmd == RTM_NEWROUTE) 603 if (cmd == RTM_NEWROUTE)
599 tb->insert(tb, &req.rtm, &rta, &req.nlh, NULL); 604 tb->insert(tb, &req.rtm, attrs, &req.nlh, NULL);
600 else 605 else
601 tb->delete(tb, &req.rtm, &rta, &req.nlh, NULL); 606 tb->delete(tb, &req.rtm, attrs, &req.nlh, NULL);
602} 607}
603 608
604static void dn_fib_add_ifaddr(struct dn_ifaddr *ifa) 609static void dn_fib_add_ifaddr(struct dn_ifaddr *ifa)
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index 5ac0e153ef83..fe32388ea24f 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -1613,23 +1613,41 @@ errout:
1613 return -EMSGSIZE; 1613 return -EMSGSIZE;
1614} 1614}
1615 1615
1616const struct nla_policy rtm_dn_policy[RTA_MAX + 1] = {
1617 [RTA_DST] = { .type = NLA_U16 },
1618 [RTA_SRC] = { .type = NLA_U16 },
1619 [RTA_IIF] = { .type = NLA_U32 },
1620 [RTA_OIF] = { .type = NLA_U32 },
1621 [RTA_GATEWAY] = { .type = NLA_U16 },
1622 [RTA_PRIORITY] = { .type = NLA_U32 },
1623 [RTA_PREFSRC] = { .type = NLA_U16 },
1624 [RTA_METRICS] = { .type = NLA_NESTED },
1625 [RTA_MULTIPATH] = { .type = NLA_NESTED },
1626 [RTA_TABLE] = { .type = NLA_U32 },
1627 [RTA_MARK] = { .type = NLA_U32 },
1628};
1629
1616/* 1630/*
1617 * This is called by both endnodes and routers now. 1631 * This is called by both endnodes and routers now.
1618 */ 1632 */
1619static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg) 1633static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
1620{ 1634{
1621 struct net *net = sock_net(in_skb->sk); 1635 struct net *net = sock_net(in_skb->sk);
1622 struct rtattr **rta = arg;
1623 struct rtmsg *rtm = nlmsg_data(nlh); 1636 struct rtmsg *rtm = nlmsg_data(nlh);
1624 struct dn_route *rt = NULL; 1637 struct dn_route *rt = NULL;
1625 struct dn_skb_cb *cb; 1638 struct dn_skb_cb *cb;
1626 int err; 1639 int err;
1627 struct sk_buff *skb; 1640 struct sk_buff *skb;
1628 struct flowidn fld; 1641 struct flowidn fld;
1642 struct nlattr *tb[RTA_MAX+1];
1629 1643
1630 if (!net_eq(net, &init_net)) 1644 if (!net_eq(net, &init_net))
1631 return -EINVAL; 1645 return -EINVAL;
1632 1646
1647 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_dn_policy);
1648 if (err < 0)
1649 return err;
1650
1633 memset(&fld, 0, sizeof(fld)); 1651 memset(&fld, 0, sizeof(fld));
1634 fld.flowidn_proto = DNPROTO_NSP; 1652 fld.flowidn_proto = DNPROTO_NSP;
1635 1653
@@ -1639,12 +1657,14 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
1639 skb_reset_mac_header(skb); 1657 skb_reset_mac_header(skb);
1640 cb = DN_SKB_CB(skb); 1658 cb = DN_SKB_CB(skb);
1641 1659
1642 if (rta[RTA_SRC-1]) 1660 if (tb[RTA_SRC])
1643 memcpy(&fld.saddr, RTA_DATA(rta[RTA_SRC-1]), 2); 1661 fld.saddr = nla_get_le16(tb[RTA_SRC]);
1644 if (rta[RTA_DST-1]) 1662
1645 memcpy(&fld.daddr, RTA_DATA(rta[RTA_DST-1]), 2); 1663 if (tb[RTA_DST])
1646 if (rta[RTA_IIF-1]) 1664 fld.daddr = nla_get_le16(tb[RTA_DST]);
1647 memcpy(&fld.flowidn_iif, RTA_DATA(rta[RTA_IIF-1]), sizeof(int)); 1665
1666 if (tb[RTA_IIF])
1667 fld.flowidn_iif = nla_get_u32(tb[RTA_IIF]);
1648 1668
1649 if (fld.flowidn_iif) { 1669 if (fld.flowidn_iif) {
1650 struct net_device *dev; 1670 struct net_device *dev;
@@ -1669,10 +1689,9 @@ static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void
1669 if (!err && -rt->dst.error) 1689 if (!err && -rt->dst.error)
1670 err = rt->dst.error; 1690 err = rt->dst.error;
1671 } else { 1691 } else {
1672 int oif = 0; 1692 if (tb[RTA_OIF])
1673 if (rta[RTA_OIF - 1]) 1693 fld.flowidn_oif = nla_get_u32(tb[RTA_OIF]);
1674 memcpy(&oif, RTA_DATA(rta[RTA_OIF - 1]), sizeof(int)); 1694
1675 fld.flowidn_oif = oif;
1676 err = dn_route_output_key((struct dst_entry **)&rt, &fld, 0); 1695 err = dn_route_output_key((struct dst_entry **)&rt, &fld, 0);
1677 } 1696 }
1678 1697
diff --git a/net/decnet/dn_table.c b/net/decnet/dn_table.c
index 6c2445bcaba1..86e3807052e9 100644
--- a/net/decnet/dn_table.c
+++ b/net/decnet/dn_table.c
@@ -19,7 +19,6 @@
19#include <linux/sockios.h> 19#include <linux/sockios.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/skbuff.h> 21#include <linux/skbuff.h>
22#include <linux/netlink.h>
23#include <linux/rtnetlink.h> 22#include <linux/rtnetlink.h>
24#include <linux/proc_fs.h> 23#include <linux/proc_fs.h>
25#include <linux/netdevice.h> 24#include <linux/netdevice.h>
@@ -224,26 +223,27 @@ static struct dn_zone *dn_new_zone(struct dn_hash *table, int z)
224} 223}
225 224
226 225
227static int dn_fib_nh_match(struct rtmsg *r, struct nlmsghdr *nlh, struct dn_kern_rta *rta, struct dn_fib_info *fi) 226static int dn_fib_nh_match(struct rtmsg *r, struct nlmsghdr *nlh, struct nlattr *attrs[], struct dn_fib_info *fi)
228{ 227{
229 struct rtnexthop *nhp; 228 struct rtnexthop *nhp;
230 int nhlen; 229 int nhlen;
231 230
232 if (rta->rta_priority && *rta->rta_priority != fi->fib_priority) 231 if (attrs[RTA_PRIORITY] &&
232 nla_get_u32(attrs[RTA_PRIORITY]) != fi->fib_priority)
233 return 1; 233 return 1;
234 234
235 if (rta->rta_oif || rta->rta_gw) { 235 if (attrs[RTA_OIF] || attrs[RTA_GATEWAY]) {
236 if ((!rta->rta_oif || *rta->rta_oif == fi->fib_nh->nh_oif) && 236 if ((!attrs[RTA_OIF] || nla_get_u32(attrs[RTA_OIF]) == fi->fib_nh->nh_oif) &&
237 (!rta->rta_gw || memcmp(rta->rta_gw, &fi->fib_nh->nh_gw, 2) == 0)) 237 (!attrs[RTA_GATEWAY] || nla_get_le16(attrs[RTA_GATEWAY]) != fi->fib_nh->nh_gw))
238 return 0; 238 return 0;
239 return 1; 239 return 1;
240 } 240 }
241 241
242 if (rta->rta_mp == NULL) 242 if (!attrs[RTA_MULTIPATH])
243 return 0; 243 return 0;
244 244
245 nhp = RTA_DATA(rta->rta_mp); 245 nhp = nla_data(attrs[RTA_MULTIPATH]);
246 nhlen = RTA_PAYLOAD(rta->rta_mp); 246 nhlen = nla_len(attrs[RTA_MULTIPATH]);
247 247
248 for_nexthops(fi) { 248 for_nexthops(fi) {
249 int attrlen = nhlen - sizeof(struct rtnexthop); 249 int attrlen = nhlen - sizeof(struct rtnexthop);
@@ -254,7 +254,10 @@ static int dn_fib_nh_match(struct rtmsg *r, struct nlmsghdr *nlh, struct dn_kern
254 if (nhp->rtnh_ifindex && nhp->rtnh_ifindex != nh->nh_oif) 254 if (nhp->rtnh_ifindex && nhp->rtnh_ifindex != nh->nh_oif)
255 return 1; 255 return 1;
256 if (attrlen) { 256 if (attrlen) {
257 gw = dn_fib_get_attr16(RTNH_DATA(nhp), attrlen, RTA_GATEWAY); 257 struct nlattr *gw_attr;
258
259 gw_attr = nla_find((struct nlattr *) (nhp + 1), attrlen, RTA_GATEWAY);
260 gw = gw_attr ? nla_get_le16(gw_attr) : 0;
258 261
259 if (gw && gw != nh->nh_gw) 262 if (gw && gw != nh->nh_gw)
260 return 1; 263 return 1;
@@ -488,7 +491,7 @@ int dn_fib_dump(struct sk_buff *skb, struct netlink_callback *cb)
488 if (!net_eq(net, &init_net)) 491 if (!net_eq(net, &init_net))
489 return 0; 492 return 0;
490 493
491 if (NLMSG_PAYLOAD(cb->nlh, 0) >= sizeof(struct rtmsg) && 494 if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
492 ((struct rtmsg *)nlmsg_data(cb->nlh))->rtm_flags&RTM_F_CLONED) 495 ((struct rtmsg *)nlmsg_data(cb->nlh))->rtm_flags&RTM_F_CLONED)
493 return dn_cache_dump(skb, cb); 496 return dn_cache_dump(skb, cb);
494 497
@@ -517,7 +520,8 @@ out:
517 return skb->len; 520 return skb->len;
518} 521}
519 522
520static int dn_fib_table_insert(struct dn_fib_table *tb, struct rtmsg *r, struct dn_kern_rta *rta, struct nlmsghdr *n, struct netlink_skb_parms *req) 523static int dn_fib_table_insert(struct dn_fib_table *tb, struct rtmsg *r, struct nlattr *attrs[],
524 struct nlmsghdr *n, struct netlink_skb_parms *req)
521{ 525{
522 struct dn_hash *table = (struct dn_hash *)tb->data; 526 struct dn_hash *table = (struct dn_hash *)tb->data;
523 struct dn_fib_node *new_f, *f, **fp, **del_fp; 527 struct dn_fib_node *new_f, *f, **fp, **del_fp;
@@ -536,15 +540,14 @@ static int dn_fib_table_insert(struct dn_fib_table *tb, struct rtmsg *r, struct
536 return -ENOBUFS; 540 return -ENOBUFS;
537 541
538 dz_key_0(key); 542 dz_key_0(key);
539 if (rta->rta_dst) { 543 if (attrs[RTA_DST]) {
540 __le16 dst; 544 __le16 dst = nla_get_le16(attrs[RTA_DST]);
541 memcpy(&dst, rta->rta_dst, 2);
542 if (dst & ~DZ_MASK(dz)) 545 if (dst & ~DZ_MASK(dz))
543 return -EINVAL; 546 return -EINVAL;
544 key = dz_key(dst, dz); 547 key = dz_key(dst, dz);
545 } 548 }
546 549
547 if ((fi = dn_fib_create_info(r, rta, n, &err)) == NULL) 550 if ((fi = dn_fib_create_info(r, attrs, n, &err)) == NULL)
548 return err; 551 return err;
549 552
550 if (dz->dz_nent > (dz->dz_divisor << 2) && 553 if (dz->dz_nent > (dz->dz_divisor << 2) &&
@@ -654,7 +657,8 @@ out:
654} 657}
655 658
656 659
657static int dn_fib_table_delete(struct dn_fib_table *tb, struct rtmsg *r, struct dn_kern_rta *rta, struct nlmsghdr *n, struct netlink_skb_parms *req) 660static int dn_fib_table_delete(struct dn_fib_table *tb, struct rtmsg *r, struct nlattr *attrs[],
661 struct nlmsghdr *n, struct netlink_skb_parms *req)
658{ 662{
659 struct dn_hash *table = (struct dn_hash*)tb->data; 663 struct dn_hash *table = (struct dn_hash*)tb->data;
660 struct dn_fib_node **fp, **del_fp, *f; 664 struct dn_fib_node **fp, **del_fp, *f;
@@ -671,9 +675,8 @@ static int dn_fib_table_delete(struct dn_fib_table *tb, struct rtmsg *r, struct
671 return -ESRCH; 675 return -ESRCH;
672 676
673 dz_key_0(key); 677 dz_key_0(key);
674 if (rta->rta_dst) { 678 if (attrs[RTA_DST]) {
675 __le16 dst; 679 __le16 dst = nla_get_le16(attrs[RTA_DST]);
676 memcpy(&dst, rta->rta_dst, 2);
677 if (dst & ~DZ_MASK(dz)) 680 if (dst & ~DZ_MASK(dz))
678 return -EINVAL; 681 return -EINVAL;
679 key = dz_key(dst, dz); 682 key = dz_key(dst, dz);
@@ -703,7 +706,7 @@ static int dn_fib_table_delete(struct dn_fib_table *tb, struct rtmsg *r, struct
703 (r->rtm_scope == RT_SCOPE_NOWHERE || f->fn_scope == r->rtm_scope) && 706 (r->rtm_scope == RT_SCOPE_NOWHERE || f->fn_scope == r->rtm_scope) &&
704 (!r->rtm_protocol || 707 (!r->rtm_protocol ||
705 fi->fib_protocol == r->rtm_protocol) && 708 fi->fib_protocol == r->rtm_protocol) &&
706 dn_fib_nh_match(r, n, rta, fi) == 0) 709 dn_fib_nh_match(r, n, attrs, fi) == 0)
707 del_fp = fp; 710 del_fp = fp;
708 } 711 }
709 712
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index dfe42012a044..2a7efe388344 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -19,7 +19,7 @@
19#include <linux/netdevice.h> 19#include <linux/netdevice.h>
20#include <linux/netfilter.h> 20#include <linux/netfilter.h>
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22#include <linux/netlink.h> 22#include <net/netlink.h>
23#include <linux/netfilter_decnet.h> 23#include <linux/netfilter_decnet.h>
24 24
25#include <net/sock.h> 25#include <net/sock.h>
@@ -39,21 +39,21 @@ static struct sk_buff *dnrmg_build_message(struct sk_buff *rt_skb, int *errp)
39 unsigned char *ptr; 39 unsigned char *ptr;
40 struct nf_dn_rtmsg *rtm; 40 struct nf_dn_rtmsg *rtm;
41 41
42 size = NLMSG_SPACE(rt_skb->len); 42 size = NLMSG_ALIGN(rt_skb->len) +
43 size += NLMSG_ALIGN(sizeof(struct nf_dn_rtmsg)); 43 NLMSG_ALIGN(sizeof(struct nf_dn_rtmsg));
44 skb = alloc_skb(size, GFP_ATOMIC); 44 skb = nlmsg_new(size, GFP_ATOMIC);
45 if (!skb) { 45 if (!skb) {
46 *errp = -ENOMEM; 46 *errp = -ENOMEM;
47 return NULL; 47 return NULL;
48 } 48 }
49 old_tail = skb->tail; 49 old_tail = skb->tail;
50 nlh = nlmsg_put(skb, 0, 0, 0, size - sizeof(*nlh), 0); 50 nlh = nlmsg_put(skb, 0, 0, 0, size, 0);
51 if (!nlh) { 51 if (!nlh) {
52 kfree_skb(skb); 52 kfree_skb(skb);
53 *errp = -ENOMEM; 53 *errp = -ENOMEM;
54 return NULL; 54 return NULL;
55 } 55 }
56 rtm = (struct nf_dn_rtmsg *)NLMSG_DATA(nlh); 56 rtm = (struct nf_dn_rtmsg *)nlmsg_data(nlh);
57 rtm->nfdn_ifindex = rt_skb->dev->ifindex; 57 rtm->nfdn_ifindex = rt_skb->dev->ifindex;
58 ptr = NFDN_RTMSG(rtm); 58 ptr = NFDN_RTMSG(rtm);
59 skb_copy_from_linear_data(rt_skb, ptr, rt_skb->len); 59 skb_copy_from_linear_data(rt_skb, ptr, rt_skb->len);
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 2bc62ea857c8..0eb5d5e76dfb 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * net/dsa/dsa.c - Hardware switch handling 2 * net/dsa/dsa.c - Hardware switch handling
3 * Copyright (c) 2008-2009 Marvell Semiconductor 3 * Copyright (c) 2008-2009 Marvell Semiconductor
4 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
4 * 5 *
5 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 7 * it under the terms of the GNU General Public License as published by
@@ -14,6 +15,9 @@
14#include <linux/slab.h> 15#include <linux/slab.h>
15#include <linux/module.h> 16#include <linux/module.h>
16#include <net/dsa.h> 17#include <net/dsa.h>
18#include <linux/of.h>
19#include <linux/of_mdio.h>
20#include <linux/of_platform.h>
17#include "dsa_priv.h" 21#include "dsa_priv.h"
18 22
19char dsa_driver_version[] = "0.1"; 23char dsa_driver_version[] = "0.1";
@@ -287,34 +291,239 @@ static struct net_device *dev_to_net_device(struct device *dev)
287 return NULL; 291 return NULL;
288} 292}
289 293
294#ifdef CONFIG_OF
295static int dsa_of_setup_routing_table(struct dsa_platform_data *pd,
296 struct dsa_chip_data *cd,
297 int chip_index,
298 struct device_node *link)
299{
300 int ret;
301 const __be32 *reg;
302 int link_port_addr;
303 int link_sw_addr;
304 struct device_node *parent_sw;
305 int len;
306
307 parent_sw = of_get_parent(link);
308 if (!parent_sw)
309 return -EINVAL;
310
311 reg = of_get_property(parent_sw, "reg", &len);
312 if (!reg || (len != sizeof(*reg) * 2))
313 return -EINVAL;
314
315 link_sw_addr = be32_to_cpup(reg + 1);
316
317 if (link_sw_addr >= pd->nr_chips)
318 return -EINVAL;
319
320 /* First time routing table allocation */
321 if (!cd->rtable) {
322 cd->rtable = kmalloc(pd->nr_chips * sizeof(s8), GFP_KERNEL);
323 if (!cd->rtable)
324 return -ENOMEM;
325
326 /* default to no valid uplink/downlink */
327 memset(cd->rtable, -1, pd->nr_chips * sizeof(s8));
328 }
329
330 reg = of_get_property(link, "reg", NULL);
331 if (!reg) {
332 ret = -EINVAL;
333 goto out;
334 }
335
336 link_port_addr = be32_to_cpup(reg);
337
338 cd->rtable[link_sw_addr] = link_port_addr;
339
340 return 0;
341out:
342 kfree(cd->rtable);
343 return ret;
344}
345
346static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
347{
348 int i;
349 int port_index;
350
351 for (i = 0; i < pd->nr_chips; i++) {
352 port_index = 0;
353 while (port_index < DSA_MAX_PORTS) {
354 if (pd->chip[i].port_names[port_index])
355 kfree(pd->chip[i].port_names[port_index]);
356 port_index++;
357 }
358 kfree(pd->chip[i].rtable);
359 }
360 kfree(pd->chip);
361}
362
363static int dsa_of_probe(struct platform_device *pdev)
364{
365 struct device_node *np = pdev->dev.of_node;
366 struct device_node *child, *mdio, *ethernet, *port, *link;
367 struct mii_bus *mdio_bus;
368 struct platform_device *ethernet_dev;
369 struct dsa_platform_data *pd;
370 struct dsa_chip_data *cd;
371 const char *port_name;
372 int chip_index, port_index;
373 const unsigned int *sw_addr, *port_reg;
374 int ret;
375
376 mdio = of_parse_phandle(np, "dsa,mii-bus", 0);
377 if (!mdio)
378 return -EINVAL;
379
380 mdio_bus = of_mdio_find_bus(mdio);
381 if (!mdio_bus)
382 return -EINVAL;
383
384 ethernet = of_parse_phandle(np, "dsa,ethernet", 0);
385 if (!ethernet)
386 return -EINVAL;
387
388 ethernet_dev = of_find_device_by_node(ethernet);
389 if (!ethernet_dev)
390 return -ENODEV;
391
392 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
393 if (!pd)
394 return -ENOMEM;
395
396 pdev->dev.platform_data = pd;
397 pd->netdev = &ethernet_dev->dev;
398 pd->nr_chips = of_get_child_count(np);
399 if (pd->nr_chips > DSA_MAX_SWITCHES)
400 pd->nr_chips = DSA_MAX_SWITCHES;
401
402 pd->chip = kzalloc(pd->nr_chips * sizeof(struct dsa_chip_data),
403 GFP_KERNEL);
404 if (!pd->chip) {
405 ret = -ENOMEM;
406 goto out_free;
407 }
408
409 chip_index = 0;
410 for_each_available_child_of_node(np, child) {
411 cd = &pd->chip[chip_index];
412
413 cd->mii_bus = &mdio_bus->dev;
414
415 sw_addr = of_get_property(child, "reg", NULL);
416 if (!sw_addr)
417 continue;
418
419 cd->sw_addr = be32_to_cpup(sw_addr);
420 if (cd->sw_addr > PHY_MAX_ADDR)
421 continue;
422
423 for_each_available_child_of_node(child, port) {
424 port_reg = of_get_property(port, "reg", NULL);
425 if (!port_reg)
426 continue;
427
428 port_index = be32_to_cpup(port_reg);
429
430 port_name = of_get_property(port, "label", NULL);
431 if (!port_name)
432 continue;
433
434 cd->port_names[port_index] = kstrdup(port_name,
435 GFP_KERNEL);
436 if (!cd->port_names[port_index]) {
437 ret = -ENOMEM;
438 goto out_free_chip;
439 }
440
441 link = of_parse_phandle(port, "link", 0);
442
443 if (!strcmp(port_name, "dsa") && link &&
444 pd->nr_chips > 1) {
445 ret = dsa_of_setup_routing_table(pd, cd,
446 chip_index, link);
447 if (ret)
448 goto out_free_chip;
449 }
450
451 if (port_index == DSA_MAX_PORTS)
452 break;
453 }
454 }
455
456 return 0;
457
458out_free_chip:
459 dsa_of_free_platform_data(pd);
460out_free:
461 kfree(pd);
462 pdev->dev.platform_data = NULL;
463 return ret;
464}
465
466static void dsa_of_remove(struct platform_device *pdev)
467{
468 struct dsa_platform_data *pd = pdev->dev.platform_data;
469
470 if (!pdev->dev.of_node)
471 return;
472
473 dsa_of_free_platform_data(pd);
474 kfree(pd);
475}
476#else
477static inline int dsa_of_probe(struct platform_device *pdev)
478{
479 return 0;
480}
481
482static inline void dsa_of_remove(struct platform_device *pdev)
483{
484}
485#endif
486
290static int dsa_probe(struct platform_device *pdev) 487static int dsa_probe(struct platform_device *pdev)
291{ 488{
292 static int dsa_version_printed; 489 static int dsa_version_printed;
293 struct dsa_platform_data *pd = pdev->dev.platform_data; 490 struct dsa_platform_data *pd = pdev->dev.platform_data;
294 struct net_device *dev; 491 struct net_device *dev;
295 struct dsa_switch_tree *dst; 492 struct dsa_switch_tree *dst;
296 int i; 493 int i, ret;
297 494
298 if (!dsa_version_printed++) 495 if (!dsa_version_printed++)
299 printk(KERN_NOTICE "Distributed Switch Architecture " 496 printk(KERN_NOTICE "Distributed Switch Architecture "
300 "driver version %s\n", dsa_driver_version); 497 "driver version %s\n", dsa_driver_version);
301 498
499 if (pdev->dev.of_node) {
500 ret = dsa_of_probe(pdev);
501 if (ret)
502 return ret;
503
504 pd = pdev->dev.platform_data;
505 }
506
302 if (pd == NULL || pd->netdev == NULL) 507 if (pd == NULL || pd->netdev == NULL)
303 return -EINVAL; 508 return -EINVAL;
304 509
305 dev = dev_to_net_device(pd->netdev); 510 dev = dev_to_net_device(pd->netdev);
306 if (dev == NULL) 511 if (dev == NULL) {
307 return -EINVAL; 512 ret = -EINVAL;
513 goto out;
514 }
308 515
309 if (dev->dsa_ptr != NULL) { 516 if (dev->dsa_ptr != NULL) {
310 dev_put(dev); 517 dev_put(dev);
311 return -EEXIST; 518 ret = -EEXIST;
519 goto out;
312 } 520 }
313 521
314 dst = kzalloc(sizeof(*dst), GFP_KERNEL); 522 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
315 if (dst == NULL) { 523 if (dst == NULL) {
316 dev_put(dev); 524 dev_put(dev);
317 return -ENOMEM; 525 ret = -ENOMEM;
526 goto out;
318 } 527 }
319 528
320 platform_set_drvdata(pdev, dst); 529 platform_set_drvdata(pdev, dst);
@@ -366,6 +575,11 @@ static int dsa_probe(struct platform_device *pdev)
366 } 575 }
367 576
368 return 0; 577 return 0;
578
579out:
580 dsa_of_remove(pdev);
581
582 return ret;
369} 583}
370 584
371static int dsa_remove(struct platform_device *pdev) 585static int dsa_remove(struct platform_device *pdev)
@@ -385,6 +599,8 @@ static int dsa_remove(struct platform_device *pdev)
385 dsa_switch_destroy(ds); 599 dsa_switch_destroy(ds);
386 } 600 }
387 601
602 dsa_of_remove(pdev);
603
388 return 0; 604 return 0;
389} 605}
390 606
@@ -392,6 +608,12 @@ static void dsa_shutdown(struct platform_device *pdev)
392{ 608{
393} 609}
394 610
611static const struct of_device_id dsa_of_match_table[] = {
612 { .compatible = "marvell,dsa", },
613 {}
614};
615MODULE_DEVICE_TABLE(of, dsa_of_match_table);
616
395static struct platform_driver dsa_driver = { 617static struct platform_driver dsa_driver = {
396 .probe = dsa_probe, 618 .probe = dsa_probe,
397 .remove = dsa_remove, 619 .remove = dsa_remove,
@@ -399,6 +621,7 @@ static struct platform_driver dsa_driver = {
399 .driver = { 621 .driver = {
400 .name = "dsa", 622 .name = "dsa",
401 .owner = THIS_MODULE, 623 .owner = THIS_MODULE,
624 .of_match_table = dsa_of_match_table,
402 }, 625 },
403}; 626};
404 627
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index a36c85eab5b4..5359560926bc 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -195,7 +195,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
195 if (netdev_uses_trailer_tags(dev)) 195 if (netdev_uses_trailer_tags(dev))
196 return htons(ETH_P_TRAILER); 196 return htons(ETH_P_TRAILER);
197 197
198 if (ntohs(eth->h_proto) >= 1536) 198 if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
199 return eth->h_proto; 199 return eth->h_proto;
200 200
201 /* 201 /*
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 43b95ca61114..55e1fd5b3e56 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -104,6 +104,7 @@ static const u8 lowpan_llprefix[] = {0xfe, 0x80};
104struct lowpan_dev_info { 104struct lowpan_dev_info {
105 struct net_device *real_dev; /* real WPAN device ptr */ 105 struct net_device *real_dev; /* real WPAN device ptr */
106 struct mutex dev_list_mtx; /* mutex for list ops */ 106 struct mutex dev_list_mtx; /* mutex for list ops */
107 unsigned short fragment_tag;
107}; 108};
108 109
109struct lowpan_dev_record { 110struct lowpan_dev_record {
@@ -120,7 +121,6 @@ struct lowpan_fragment {
120 struct list_head list; /* fragments list */ 121 struct list_head list; /* fragments list */
121}; 122};
122 123
123static unsigned short fragment_tag;
124static LIST_HEAD(lowpan_fragments); 124static LIST_HEAD(lowpan_fragments);
125static DEFINE_SPINLOCK(flist_lock); 125static DEFINE_SPINLOCK(flist_lock);
126 126
@@ -284,6 +284,9 @@ lowpan_compress_udp_header(u8 **hc06_ptr, struct sk_buff *skb)
284 /* checksum is always inline */ 284 /* checksum is always inline */
285 memcpy(*hc06_ptr, &uh->check, 2); 285 memcpy(*hc06_ptr, &uh->check, 2);
286 *hc06_ptr += 2; 286 *hc06_ptr += 2;
287
288 /* skip the UDP header */
289 skb_pull(skb, sizeof(struct udphdr));
287} 290}
288 291
289static inline int lowpan_fetch_skb_u8(struct sk_buff *skb, u8 *val) 292static inline int lowpan_fetch_skb_u8(struct sk_buff *skb, u8 *val)
@@ -309,9 +312,8 @@ static inline int lowpan_fetch_skb_u16(struct sk_buff *skb, u16 *val)
309} 312}
310 313
311static int 314static int
312lowpan_uncompress_udp_header(struct sk_buff *skb) 315lowpan_uncompress_udp_header(struct sk_buff *skb, struct udphdr *uh)
313{ 316{
314 struct udphdr *uh = udp_hdr(skb);
315 u8 tmp; 317 u8 tmp;
316 318
317 if (!uh) 319 if (!uh)
@@ -358,6 +360,14 @@ lowpan_uncompress_udp_header(struct sk_buff *skb)
358 /* copy checksum */ 360 /* copy checksum */
359 memcpy(&uh->check, &skb->data[0], 2); 361 memcpy(&uh->check, &skb->data[0], 2);
360 skb_pull(skb, 2); 362 skb_pull(skb, 2);
363
364 /*
365 * UDP lenght needs to be infered from the lower layers
366 * here, we obtain the hint from the remaining size of the
367 * frame
368 */
369 uh->len = htons(skb->len + sizeof(struct udphdr));
370 pr_debug("uncompressed UDP length: src = %d", uh->len);
361 } else { 371 } else {
362 pr_debug("ERROR: unsupported NH format\n"); 372 pr_debug("ERROR: unsupported NH format\n");
363 goto err; 373 goto err;
@@ -572,17 +582,31 @@ static int lowpan_header_create(struct sk_buff *skb,
572 * this isn't implemented in mainline yet, so currently we assign 0xff 582 * this isn't implemented in mainline yet, so currently we assign 0xff
573 */ 583 */
574 { 584 {
585 mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA;
586 mac_cb(skb)->seq = ieee802154_mlme_ops(dev)->get_dsn(dev);
587
575 /* prepare wpan address data */ 588 /* prepare wpan address data */
576 sa.addr_type = IEEE802154_ADDR_LONG; 589 sa.addr_type = IEEE802154_ADDR_LONG;
577 sa.pan_id = 0xff; 590 sa.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
578
579 da.addr_type = IEEE802154_ADDR_LONG;
580 da.pan_id = 0xff;
581 591
582 memcpy(&(da.hwaddr), daddr, 8);
583 memcpy(&(sa.hwaddr), saddr, 8); 592 memcpy(&(sa.hwaddr), saddr, 8);
593 /* intra-PAN communications */
594 da.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
584 595
585 mac_cb(skb)->flags = IEEE802154_FC_TYPE_DATA; 596 /*
597 * if the destination address is the broadcast address, use the
598 * corresponding short address
599 */
600 if (lowpan_is_addr_broadcast(daddr)) {
601 da.addr_type = IEEE802154_ADDR_SHORT;
602 da.short_addr = IEEE802154_ADDR_BROADCAST;
603 } else {
604 da.addr_type = IEEE802154_ADDR_LONG;
605 memcpy(&(da.hwaddr), daddr, IEEE802154_ADDR_LEN);
606
607 /* request acknowledgment */
608 mac_cb(skb)->flags |= MAC_CB_FLAG_ACKREQ;
609 }
586 610
587 return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev, 611 return dev_hard_header(skb, lowpan_dev_info(dev)->real_dev,
588 type, (void *)&da, (void *)&sa, skb->len); 612 type, (void *)&da, (void *)&sa, skb->len);
@@ -650,7 +674,7 @@ static void lowpan_fragment_timer_expired(unsigned long entry_addr)
650} 674}
651 675
652static struct lowpan_fragment * 676static struct lowpan_fragment *
653lowpan_alloc_new_frame(struct sk_buff *skb, u8 len, u16 tag) 677lowpan_alloc_new_frame(struct sk_buff *skb, u16 len, u16 tag)
654{ 678{
655 struct lowpan_fragment *frame; 679 struct lowpan_fragment *frame;
656 680
@@ -720,7 +744,7 @@ lowpan_process_data(struct sk_buff *skb)
720 { 744 {
721 struct lowpan_fragment *frame; 745 struct lowpan_fragment *frame;
722 /* slen stores the rightmost 8 bits of the 11 bits length */ 746 /* slen stores the rightmost 8 bits of the 11 bits length */
723 u8 slen, offset; 747 u8 slen, offset = 0;
724 u16 len, tag; 748 u16 len, tag;
725 bool found = false; 749 bool found = false;
726 750
@@ -731,6 +755,18 @@ lowpan_process_data(struct sk_buff *skb)
731 /* adds the 3 MSB to the 8 LSB to retrieve the 11 bits length */ 755 /* adds the 3 MSB to the 8 LSB to retrieve the 11 bits length */
732 len = ((iphc0 & 7) << 8) | slen; 756 len = ((iphc0 & 7) << 8) | slen;
733 757
758 if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1) {
759 pr_debug("%s received a FRAG1 packet (tag: %d, "
760 "size of the entire IP packet: %d)",
761 __func__, tag, len);
762 } else { /* FRAGN */
763 if (lowpan_fetch_skb_u8(skb, &offset))
764 goto unlock_and_drop;
765 pr_debug("%s received a FRAGN packet (tag: %d, "
766 "size of the entire IP packet: %d, "
767 "offset: %d)", __func__, tag, len, offset * 8);
768 }
769
734 /* 770 /*
735 * check if frame assembling with the same tag is 771 * check if frame assembling with the same tag is
736 * already in progress 772 * already in progress
@@ -745,17 +781,13 @@ lowpan_process_data(struct sk_buff *skb)
745 781
746 /* alloc new frame structure */ 782 /* alloc new frame structure */
747 if (!found) { 783 if (!found) {
784 pr_debug("%s first fragment received for tag %d, "
785 "begin packet reassembly", __func__, tag);
748 frame = lowpan_alloc_new_frame(skb, len, tag); 786 frame = lowpan_alloc_new_frame(skb, len, tag);
749 if (!frame) 787 if (!frame)
750 goto unlock_and_drop; 788 goto unlock_and_drop;
751 } 789 }
752 790
753 if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1)
754 goto unlock_and_drop;
755
756 if (lowpan_fetch_skb_u8(skb, &offset)) /* fetch offset */
757 goto unlock_and_drop;
758
759 /* if payload fits buffer, copy it */ 791 /* if payload fits buffer, copy it */
760 if (likely((offset * 8 + skb->len) <= frame->length)) 792 if (likely((offset * 8 + skb->len) <= frame->length))
761 skb_copy_to_linear_data_offset(frame->skb, offset * 8, 793 skb_copy_to_linear_data_offset(frame->skb, offset * 8,
@@ -773,6 +805,9 @@ lowpan_process_data(struct sk_buff *skb)
773 list_del(&frame->list); 805 list_del(&frame->list);
774 spin_unlock_bh(&flist_lock); 806 spin_unlock_bh(&flist_lock);
775 807
808 pr_debug("%s successfully reassembled fragment "
809 "(tag %d)", __func__, tag);
810
776 dev_kfree_skb(skb); 811 dev_kfree_skb(skb);
777 skb = frame->skb; 812 skb = frame->skb;
778 kfree(frame); 813 kfree(frame);
@@ -918,10 +953,35 @@ lowpan_process_data(struct sk_buff *skb)
918 } 953 }
919 954
920 /* UDP data uncompression */ 955 /* UDP data uncompression */
921 if (iphc0 & LOWPAN_IPHC_NH_C) 956 if (iphc0 & LOWPAN_IPHC_NH_C) {
922 if (lowpan_uncompress_udp_header(skb)) 957 struct udphdr uh;
958 struct sk_buff *new;
959 if (lowpan_uncompress_udp_header(skb, &uh))
923 goto drop; 960 goto drop;
924 961
962 /*
963 * replace the compressed UDP head by the uncompressed UDP
964 * header
965 */
966 new = skb_copy_expand(skb, sizeof(struct udphdr),
967 skb_tailroom(skb), GFP_ATOMIC);
968 kfree_skb(skb);
969
970 if (!new)
971 return -ENOMEM;
972
973 skb = new;
974
975 skb_push(skb, sizeof(struct udphdr));
976 skb_reset_transport_header(skb);
977 skb_copy_to_linear_data(skb, &uh, sizeof(struct udphdr));
978
979 lowpan_raw_dump_table(__func__, "raw UDP header dump",
980 (u8 *)&uh, sizeof(uh));
981
982 hdr.nexthdr = UIP_PROTO_UDP;
983 }
984
925 /* Not fragmented package */ 985 /* Not fragmented package */
926 hdr.payload_len = htons(skb->len); 986 hdr.payload_len = htons(skb->len);
927 987
@@ -969,13 +1029,13 @@ static int lowpan_get_mac_header_length(struct sk_buff *skb)
969 1029
970static int 1030static int
971lowpan_fragment_xmit(struct sk_buff *skb, u8 *head, 1031lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
972 int mlen, int plen, int offset) 1032 int mlen, int plen, int offset, int type)
973{ 1033{
974 struct sk_buff *frag; 1034 struct sk_buff *frag;
975 int hlen, ret; 1035 int hlen, ret;
976 1036
977 /* if payload length is zero, therefore it's a first fragment */ 1037 hlen = (type == LOWPAN_DISPATCH_FRAG1) ?
978 hlen = (plen == 0 ? LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE); 1038 LOWPAN_FRAG1_HEAD_SIZE : LOWPAN_FRAGN_HEAD_SIZE;
979 1039
980 lowpan_raw_dump_inline(__func__, "6lowpan fragment header", head, hlen); 1040 lowpan_raw_dump_inline(__func__, "6lowpan fragment header", head, hlen);
981 1041
@@ -1003,14 +1063,14 @@ lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
1003} 1063}
1004 1064
1005static int 1065static int
1006lowpan_skb_fragmentation(struct sk_buff *skb) 1066lowpan_skb_fragmentation(struct sk_buff *skb, struct net_device *dev)
1007{ 1067{
1008 int err, header_length, payload_length, tag, offset = 0; 1068 int err, header_length, payload_length, tag, offset = 0;
1009 u8 head[5]; 1069 u8 head[5];
1010 1070
1011 header_length = lowpan_get_mac_header_length(skb); 1071 header_length = lowpan_get_mac_header_length(skb);
1012 payload_length = skb->len - header_length; 1072 payload_length = skb->len - header_length;
1013 tag = fragment_tag++; 1073 tag = lowpan_dev_info(dev)->fragment_tag++;
1014 1074
1015 /* first fragment header */ 1075 /* first fragment header */
1016 head[0] = LOWPAN_DISPATCH_FRAG1 | ((payload_length >> 8) & 0x7); 1076 head[0] = LOWPAN_DISPATCH_FRAG1 | ((payload_length >> 8) & 0x7);
@@ -1018,7 +1078,16 @@ lowpan_skb_fragmentation(struct sk_buff *skb)
1018 head[2] = tag >> 8; 1078 head[2] = tag >> 8;
1019 head[3] = tag & 0xff; 1079 head[3] = tag & 0xff;
1020 1080
1021 err = lowpan_fragment_xmit(skb, head, header_length, 0, 0); 1081 err = lowpan_fragment_xmit(skb, head, header_length, LOWPAN_FRAG_SIZE,
1082 0, LOWPAN_DISPATCH_FRAG1);
1083
1084 if (err) {
1085 pr_debug("%s unable to send FRAG1 packet (tag: %d)",
1086 __func__, tag);
1087 goto exit;
1088 }
1089
1090 offset = LOWPAN_FRAG_SIZE;
1022 1091
1023 /* next fragment header */ 1092 /* next fragment header */
1024 head[0] &= ~LOWPAN_DISPATCH_FRAG1; 1093 head[0] &= ~LOWPAN_DISPATCH_FRAG1;
@@ -1033,10 +1102,17 @@ lowpan_skb_fragmentation(struct sk_buff *skb)
1033 len = payload_length - offset; 1102 len = payload_length - offset;
1034 1103
1035 err = lowpan_fragment_xmit(skb, head, header_length, 1104 err = lowpan_fragment_xmit(skb, head, header_length,
1036 len, offset); 1105 len, offset, LOWPAN_DISPATCH_FRAGN);
1106 if (err) {
1107 pr_debug("%s unable to send a subsequent FRAGN packet "
1108 "(tag: %d, offset: %d", __func__, tag, offset);
1109 goto exit;
1110 }
1111
1037 offset += len; 1112 offset += len;
1038 } 1113 }
1039 1114
1115exit:
1040 return err; 1116 return err;
1041} 1117}
1042 1118
@@ -1059,14 +1135,14 @@ static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
1059 } 1135 }
1060 1136
1061 pr_debug("frame is too big, fragmentation is needed\n"); 1137 pr_debug("frame is too big, fragmentation is needed\n");
1062 err = lowpan_skb_fragmentation(skb); 1138 err = lowpan_skb_fragmentation(skb, dev);
1063error: 1139error:
1064 dev_kfree_skb(skb); 1140 dev_kfree_skb(skb);
1065out: 1141out:
1066 if (err < 0) 1142 if (err)
1067 pr_debug("ERROR: xmit failed\n"); 1143 pr_debug("ERROR: xmit failed\n");
1068 1144
1069 return (err < 0 ? NETDEV_TX_BUSY : NETDEV_TX_OK); 1145 return (err < 0) ? NET_XMIT_DROP : err;
1070} 1146}
1071 1147
1072static struct wpan_phy *lowpan_get_phy(const struct net_device *dev) 1148static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
@@ -1087,6 +1163,12 @@ static u16 lowpan_get_short_addr(const struct net_device *dev)
1087 return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev); 1163 return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
1088} 1164}
1089 1165
1166static u8 lowpan_get_dsn(const struct net_device *dev)
1167{
1168 struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
1169 return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev);
1170}
1171
1090static struct header_ops lowpan_header_ops = { 1172static struct header_ops lowpan_header_ops = {
1091 .create = lowpan_header_create, 1173 .create = lowpan_header_create,
1092}; 1174};
@@ -1100,6 +1182,7 @@ static struct ieee802154_mlme_ops lowpan_mlme = {
1100 .get_pan_id = lowpan_get_pan_id, 1182 .get_pan_id = lowpan_get_pan_id,
1101 .get_phy = lowpan_get_phy, 1183 .get_phy = lowpan_get_phy,
1102 .get_short_addr = lowpan_get_short_addr, 1184 .get_short_addr = lowpan_get_short_addr,
1185 .get_dsn = lowpan_get_dsn,
1103}; 1186};
1104 1187
1105static void lowpan_setup(struct net_device *dev) 1188static void lowpan_setup(struct net_device *dev)
@@ -1203,6 +1286,7 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
1203 return -ENODEV; 1286 return -ENODEV;
1204 1287
1205 lowpan_dev_info(dev)->real_dev = real_dev; 1288 lowpan_dev_info(dev)->real_dev = real_dev;
1289 lowpan_dev_info(dev)->fragment_tag = 0;
1206 mutex_init(&lowpan_dev_info(dev)->dev_list_mtx); 1290 mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
1207 1291
1208 entry = kzalloc(sizeof(struct lowpan_dev_record), GFP_KERNEL); 1292 entry = kzalloc(sizeof(struct lowpan_dev_record), GFP_KERNEL);
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h
index bba5f8336317..4b8f917658b5 100644
--- a/net/ieee802154/6lowpan.h
+++ b/net/ieee802154/6lowpan.h
@@ -92,9 +92,10 @@
92 */ 92 */
93#define lowpan_is_iid_16_bit_compressable(a) \ 93#define lowpan_is_iid_16_bit_compressable(a) \
94 ((((a)->s6_addr16[4]) == 0) && \ 94 ((((a)->s6_addr16[4]) == 0) && \
95 (((a)->s6_addr16[5]) == 0) && \ 95 (((a)->s6_addr[10]) == 0) && \
96 (((a)->s6_addr16[6]) == 0) && \ 96 (((a)->s6_addr[11]) == 0xff) && \
97 ((((a)->s6_addr[14]) & 0x80) == 0)) 97 (((a)->s6_addr[12]) == 0xfe) && \
98 (((a)->s6_addr[13]) == 0))
98 99
99/* multicast address */ 100/* multicast address */
100#define is_addr_mcast(a) (((a)->s6_addr[0]) == 0xFF) 101#define is_addr_mcast(a) (((a)->s6_addr[0]) == 0xFF)
diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
index e0da175f8e5b..581a59504bd5 100644
--- a/net/ieee802154/dgram.c
+++ b/net/ieee802154/dgram.c
@@ -291,6 +291,9 @@ static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
291 size_t copied = 0; 291 size_t copied = 0;
292 int err = -EOPNOTSUPP; 292 int err = -EOPNOTSUPP;
293 struct sk_buff *skb; 293 struct sk_buff *skb;
294 struct sockaddr_ieee802154 *saddr;
295
296 saddr = (struct sockaddr_ieee802154 *)msg->msg_name;
294 297
295 skb = skb_recv_datagram(sk, flags, noblock, &err); 298 skb = skb_recv_datagram(sk, flags, noblock, &err);
296 if (!skb) 299 if (!skb)
@@ -309,6 +312,13 @@ static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
309 312
310 sock_recv_ts_and_drops(msg, sk, skb); 313 sock_recv_ts_and_drops(msg, sk, skb);
311 314
315 if (saddr) {
316 saddr->family = AF_IEEE802154;
317 saddr->addr = mac_cb(skb)->sa;
318 }
319 if (addr_len)
320 *addr_len = sizeof(*saddr);
321
312 if (flags & MSG_TRUNC) 322 if (flags & MSG_TRUNC)
313 copied = skb->len; 323 copied = skb->len;
314done: 324done:
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c
index 97351e1d07a4..7e49bbcc6967 100644
--- a/net/ieee802154/netlink.c
+++ b/net/ieee802154/netlink.c
@@ -64,8 +64,8 @@ struct sk_buff *ieee802154_nl_create(int flags, u8 req)
64 64
65int ieee802154_nl_mcast(struct sk_buff *msg, unsigned int group) 65int ieee802154_nl_mcast(struct sk_buff *msg, unsigned int group)
66{ 66{
67 /* XXX: nlh is right at the start of msg */ 67 struct nlmsghdr *nlh = nlmsg_hdr(msg);
68 void *hdr = genlmsg_data(NLMSG_DATA(msg->data)); 68 void *hdr = genlmsg_data(nlmsg_data(nlh));
69 69
70 if (genlmsg_end(msg, hdr) < 0) 70 if (genlmsg_end(msg, hdr) < 0)
71 goto out; 71 goto out;
@@ -97,8 +97,8 @@ struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info,
97 97
98int ieee802154_nl_reply(struct sk_buff *msg, struct genl_info *info) 98int ieee802154_nl_reply(struct sk_buff *msg, struct genl_info *info)
99{ 99{
100 /* XXX: nlh is right at the start of msg */ 100 struct nlmsghdr *nlh = nlmsg_hdr(msg);
101 void *hdr = genlmsg_data(NLMSG_DATA(msg->data)); 101 void *hdr = genlmsg_data(nlmsg_data(nlh));
102 102
103 if (genlmsg_end(msg, hdr) < 0) 103 if (genlmsg_end(msg, hdr) < 0)
104 goto out; 104 goto out;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 7944df768454..8603ca827104 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -166,6 +166,7 @@ config IP_PNP_RARP
166config NET_IPIP 166config NET_IPIP
167 tristate "IP: tunneling" 167 tristate "IP: tunneling"
168 select INET_TUNNEL 168 select INET_TUNNEL
169 select NET_IP_TUNNEL
169 ---help--- 170 ---help---
170 Tunneling means encapsulating data of one protocol type within 171 Tunneling means encapsulating data of one protocol type within
171 another protocol and sending it over a channel that understands the 172 another protocol and sending it over a channel that understands the
@@ -186,9 +187,14 @@ config NET_IPGRE_DEMUX
186 This is helper module to demultiplex GRE packets on GRE version field criteria. 187 This is helper module to demultiplex GRE packets on GRE version field criteria.
187 Required by ip_gre and pptp modules. 188 Required by ip_gre and pptp modules.
188 189
190config NET_IP_TUNNEL
191 tristate
192 default n
193
189config NET_IPGRE 194config NET_IPGRE
190 tristate "IP: GRE tunnels over IP" 195 tristate "IP: GRE tunnels over IP"
191 depends on (IPV6 || IPV6=n) && NET_IPGRE_DEMUX 196 depends on (IPV6 || IPV6=n) && NET_IPGRE_DEMUX
197 select NET_IP_TUNNEL
192 help 198 help
193 Tunneling means encapsulating data of one protocol type within 199 Tunneling means encapsulating data of one protocol type within
194 another protocol and sending it over a channel that understands the 200 another protocol and sending it over a channel that understands the
@@ -313,6 +319,7 @@ config SYN_COOKIES
313config NET_IPVTI 319config NET_IPVTI
314 tristate "Virtual (secure) IP: tunneling" 320 tristate "Virtual (secure) IP: tunneling"
315 select INET_TUNNEL 321 select INET_TUNNEL
322 select NET_IP_TUNNEL
316 depends on INET_XFRM_MODE_TUNNEL 323 depends on INET_XFRM_MODE_TUNNEL
317 ---help--- 324 ---help---
318 Tunneling means encapsulating data of one protocol type within 325 Tunneling means encapsulating data of one protocol type within
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index 15ca63ec604e..089cb9f36387 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -13,6 +13,7 @@ obj-y := route.o inetpeer.o protocol.o \
13 fib_frontend.o fib_semantics.o fib_trie.o \ 13 fib_frontend.o fib_semantics.o fib_trie.o \
14 inet_fragment.o ping.o 14 inet_fragment.o ping.o
15 15
16obj-$(CONFIG_NET_IP_TUNNEL) += ip_tunnel.o
16obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o 17obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
17obj-$(CONFIG_PROC_FS) += proc.o 18obj-$(CONFIG_PROC_FS) += proc.o
18obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o 19obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index c929d9c1c4b6..93824c57b108 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -111,7 +111,6 @@
111#include <net/sock.h> 111#include <net/sock.h>
112#include <net/raw.h> 112#include <net/raw.h>
113#include <net/icmp.h> 113#include <net/icmp.h>
114#include <net/ipip.h>
115#include <net/inet_common.h> 114#include <net/inet_common.h>
116#include <net/xfrm.h> 115#include <net/xfrm.h>
117#include <net/net_namespace.h> 116#include <net/net_namespace.h>
@@ -1283,9 +1282,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1283 int ihl; 1282 int ihl;
1284 int id; 1283 int id;
1285 unsigned int offset = 0; 1284 unsigned int offset = 0;
1286 1285 bool tunnel;
1287 if (!(features & NETIF_F_V4_CSUM))
1288 features &= ~NETIF_F_SG;
1289 1286
1290 if (unlikely(skb_shinfo(skb)->gso_type & 1287 if (unlikely(skb_shinfo(skb)->gso_type &
1291 ~(SKB_GSO_TCPV4 | 1288 ~(SKB_GSO_TCPV4 |
@@ -1293,6 +1290,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1293 SKB_GSO_DODGY | 1290 SKB_GSO_DODGY |
1294 SKB_GSO_TCP_ECN | 1291 SKB_GSO_TCP_ECN |
1295 SKB_GSO_GRE | 1292 SKB_GSO_GRE |
1293 SKB_GSO_UDP_TUNNEL |
1296 0))) 1294 0)))
1297 goto out; 1295 goto out;
1298 1296
@@ -1307,6 +1305,8 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1307 if (unlikely(!pskb_may_pull(skb, ihl))) 1305 if (unlikely(!pskb_may_pull(skb, ihl)))
1308 goto out; 1306 goto out;
1309 1307
1308 tunnel = !!skb->encapsulation;
1309
1310 __skb_pull(skb, ihl); 1310 __skb_pull(skb, ihl);
1311 skb_reset_transport_header(skb); 1311 skb_reset_transport_header(skb);
1312 iph = ip_hdr(skb); 1312 iph = ip_hdr(skb);
@@ -1326,7 +1326,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1326 skb = segs; 1326 skb = segs;
1327 do { 1327 do {
1328 iph = ip_hdr(skb); 1328 iph = ip_hdr(skb);
1329 if (proto == IPPROTO_UDP) { 1329 if (!tunnel && proto == IPPROTO_UDP) {
1330 iph->id = htons(id); 1330 iph->id = htons(id);
1331 iph->frag_off = htons(offset >> 3); 1331 iph->frag_off = htons(offset >> 3);
1332 if (skb->next != NULL) 1332 if (skb->next != NULL)
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index fea4929f6200..247ec1951c35 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -654,11 +654,19 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
654 arp_ptr += dev->addr_len; 654 arp_ptr += dev->addr_len;
655 memcpy(arp_ptr, &src_ip, 4); 655 memcpy(arp_ptr, &src_ip, 4);
656 arp_ptr += 4; 656 arp_ptr += 4;
657 if (target_hw != NULL) 657
658 memcpy(arp_ptr, target_hw, dev->addr_len); 658 switch (dev->type) {
659 else 659#if IS_ENABLED(CONFIG_FIREWIRE_NET)
660 memset(arp_ptr, 0, dev->addr_len); 660 case ARPHRD_IEEE1394:
661 arp_ptr += dev->addr_len; 661 break;
662#endif
663 default:
664 if (target_hw != NULL)
665 memcpy(arp_ptr, target_hw, dev->addr_len);
666 else
667 memset(arp_ptr, 0, dev->addr_len);
668 arp_ptr += dev->addr_len;
669 }
662 memcpy(arp_ptr, &dest_ip, 4); 670 memcpy(arp_ptr, &dest_ip, 4);
663 671
664 return skb; 672 return skb;
@@ -781,7 +789,14 @@ static int arp_process(struct sk_buff *skb)
781 arp_ptr += dev->addr_len; 789 arp_ptr += dev->addr_len;
782 memcpy(&sip, arp_ptr, 4); 790 memcpy(&sip, arp_ptr, 4);
783 arp_ptr += 4; 791 arp_ptr += 4;
784 arp_ptr += dev->addr_len; 792 switch (dev_type) {
793#if IS_ENABLED(CONFIG_FIREWIRE_NET)
794 case ARPHRD_IEEE1394:
795 break;
796#endif
797 default:
798 arp_ptr += dev->addr_len;
799 }
785 memcpy(&tip, arp_ptr, 4); 800 memcpy(&tip, arp_ptr, 4);
786/* 801/*
787 * Check for bad requests for 127.x.x.x and requests for multicast 802 * Check for bad requests for 127.x.x.x and requests for multicast
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 96083b7a436b..2759dfd576ae 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -536,7 +536,7 @@ struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
536 return NULL; 536 return NULL;
537} 537}
538 538
539static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 539static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
540{ 540{
541 struct net *net = sock_net(skb->sk); 541 struct net *net = sock_net(skb->sk);
542 struct nlattr *tb[IFA_MAX+1]; 542 struct nlattr *tb[IFA_MAX+1];
@@ -775,7 +775,7 @@ static struct in_ifaddr *find_matching_ifa(struct in_ifaddr *ifa)
775 return NULL; 775 return NULL;
776} 776}
777 777
778static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 778static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
779{ 779{
780 struct net *net = sock_net(skb->sk); 780 struct net *net = sock_net(skb->sk);
781 struct in_ifaddr *ifa; 781 struct in_ifaddr *ifa;
@@ -1501,6 +1501,8 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1501 idx = 0; 1501 idx = 0;
1502 head = &net->dev_index_head[h]; 1502 head = &net->dev_index_head[h];
1503 rcu_read_lock(); 1503 rcu_read_lock();
1504 cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
1505 net->dev_base_seq;
1504 hlist_for_each_entry_rcu(dev, head, index_hlist) { 1506 hlist_for_each_entry_rcu(dev, head, index_hlist) {
1505 if (idx < s_idx) 1507 if (idx < s_idx)
1506 goto cont; 1508 goto cont;
@@ -1521,6 +1523,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
1521 rcu_read_unlock(); 1523 rcu_read_unlock();
1522 goto done; 1524 goto done;
1523 } 1525 }
1526 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1524 } 1527 }
1525cont: 1528cont:
1526 idx++; 1529 idx++;
@@ -1732,8 +1735,7 @@ static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
1732}; 1735};
1733 1736
1734static int inet_netconf_get_devconf(struct sk_buff *in_skb, 1737static int inet_netconf_get_devconf(struct sk_buff *in_skb,
1735 struct nlmsghdr *nlh, 1738 struct nlmsghdr *nlh)
1736 void *arg)
1737{ 1739{
1738 struct net *net = sock_net(in_skb->sk); 1740 struct net *net = sock_net(in_skb->sk);
1739 struct nlattr *tb[NETCONFA_MAX+1]; 1741 struct nlattr *tb[NETCONFA_MAX+1];
@@ -1793,6 +1795,77 @@ errout:
1793 return err; 1795 return err;
1794} 1796}
1795 1797
1798static int inet_netconf_dump_devconf(struct sk_buff *skb,
1799 struct netlink_callback *cb)
1800{
1801 struct net *net = sock_net(skb->sk);
1802 int h, s_h;
1803 int idx, s_idx;
1804 struct net_device *dev;
1805 struct in_device *in_dev;
1806 struct hlist_head *head;
1807
1808 s_h = cb->args[0];
1809 s_idx = idx = cb->args[1];
1810
1811 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
1812 idx = 0;
1813 head = &net->dev_index_head[h];
1814 rcu_read_lock();
1815 cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
1816 net->dev_base_seq;
1817 hlist_for_each_entry_rcu(dev, head, index_hlist) {
1818 if (idx < s_idx)
1819 goto cont;
1820 in_dev = __in_dev_get_rcu(dev);
1821 if (!in_dev)
1822 goto cont;
1823
1824 if (inet_netconf_fill_devconf(skb, dev->ifindex,
1825 &in_dev->cnf,
1826 NETLINK_CB(cb->skb).portid,
1827 cb->nlh->nlmsg_seq,
1828 RTM_NEWNETCONF,
1829 NLM_F_MULTI,
1830 -1) <= 0) {
1831 rcu_read_unlock();
1832 goto done;
1833 }
1834 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1835cont:
1836 idx++;
1837 }
1838 rcu_read_unlock();
1839 }
1840 if (h == NETDEV_HASHENTRIES) {
1841 if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
1842 net->ipv4.devconf_all,
1843 NETLINK_CB(cb->skb).portid,
1844 cb->nlh->nlmsg_seq,
1845 RTM_NEWNETCONF, NLM_F_MULTI,
1846 -1) <= 0)
1847 goto done;
1848 else
1849 h++;
1850 }
1851 if (h == NETDEV_HASHENTRIES + 1) {
1852 if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
1853 net->ipv4.devconf_dflt,
1854 NETLINK_CB(cb->skb).portid,
1855 cb->nlh->nlmsg_seq,
1856 RTM_NEWNETCONF, NLM_F_MULTI,
1857 -1) <= 0)
1858 goto done;
1859 else
1860 h++;
1861 }
1862done:
1863 cb->args[0] = h;
1864 cb->args[1] = idx;
1865
1866 return skb->len;
1867}
1868
1796#ifdef CONFIG_SYSCTL 1869#ifdef CONFIG_SYSCTL
1797 1870
1798static void devinet_copy_dflt_conf(struct net *net, int i) 1871static void devinet_copy_dflt_conf(struct net *net, int i)
@@ -2197,6 +2270,6 @@ void __init devinet_init(void)
2197 rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, NULL); 2270 rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, NULL);
2198 rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, NULL); 2271 rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, NULL);
2199 rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf, 2272 rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
2200 NULL, NULL); 2273 inet_netconf_dump_devconf, NULL);
2201} 2274}
2202 2275
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index eb4bb12b3eb4..c7629a209f9d 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -604,7 +604,7 @@ errout:
604 return err; 604 return err;
605} 605}
606 606
607static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 607static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
608{ 608{
609 struct net *net = sock_net(skb->sk); 609 struct net *net = sock_net(skb->sk);
610 struct fib_config cfg; 610 struct fib_config cfg;
@@ -626,7 +626,7 @@ errout:
626 return err; 626 return err;
627} 627}
628 628
629static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 629static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
630{ 630{
631 struct net *net = sock_net(skb->sk); 631 struct net *net = sock_net(skb->sk);
632 struct fib_config cfg; 632 struct fib_config cfg;
@@ -957,8 +957,8 @@ static void nl_fib_input(struct sk_buff *skb)
957 957
958 net = sock_net(skb->sk); 958 net = sock_net(skb->sk);
959 nlh = nlmsg_hdr(skb); 959 nlh = nlmsg_hdr(skb);
960 if (skb->len < NLMSG_SPACE(0) || skb->len < nlh->nlmsg_len || 960 if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len ||
961 nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*frn))) 961 nlmsg_len(nlh) < sizeof(*frn))
962 return; 962 return;
963 963
964 skb = skb_clone(skb, GFP_KERNEL); 964 skb = skb_clone(skb, GFP_KERNEL);
@@ -966,7 +966,7 @@ static void nl_fib_input(struct sk_buff *skb)
966 return; 966 return;
967 nlh = nlmsg_hdr(skb); 967 nlh = nlmsg_hdr(skb);
968 968
969 frn = (struct fib_result_nl *) NLMSG_DATA(nlh); 969 frn = (struct fib_result_nl *) nlmsg_data(nlh);
970 tb = fib_get_table(net, frn->tb_id_in); 970 tb = fib_get_table(net, frn->tb_id_in);
971 971
972 nl_fib_lookup(frn, tb); 972 nl_fib_lookup(frn, tb);
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
index 7a4c710c4cdd..d2d5a99fba09 100644
--- a/net/ipv4/gre.c
+++ b/net/ipv4/gre.c
@@ -27,11 +27,6 @@
27 27
28static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly; 28static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly;
29static DEFINE_SPINLOCK(gre_proto_lock); 29static DEFINE_SPINLOCK(gre_proto_lock);
30struct gre_base_hdr {
31 __be16 flags;
32 __be16 protocol;
33};
34#define GRE_HEADER_SECTION 4
35 30
36int gre_add_protocol(const struct gre_protocol *proto, u8 version) 31int gre_add_protocol(const struct gre_protocol *proto, u8 version)
37{ 32{
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 786d97aee751..6acb541c9091 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -559,7 +559,7 @@ static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
559 559
560int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req) 560int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req)
561{ 561{
562 int err = req->rsk_ops->rtx_syn_ack(parent, req, NULL); 562 int err = req->rsk_ops->rtx_syn_ack(parent, req);
563 563
564 if (!err) 564 if (!err)
565 req->num_retrans++; 565 req->num_retrans++;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 7afa2c3c788f..8620408af574 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -158,7 +158,9 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
158 158
159#define EXPIRES_IN_MS(tmo) DIV_ROUND_UP((tmo - jiffies) * 1000, HZ) 159#define EXPIRES_IN_MS(tmo) DIV_ROUND_UP((tmo - jiffies) * 1000, HZ)
160 160
161 if (icsk->icsk_pending == ICSK_TIME_RETRANS) { 161 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
162 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
163 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
162 r->idiag_timer = 1; 164 r->idiag_timer = 1;
163 r->idiag_retrans = icsk->icsk_retransmits; 165 r->idiag_retrans = icsk->icsk_retransmits;
164 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout); 166 r->idiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index f4fd23de9b13..e97d66a1fdde 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -23,6 +23,28 @@
23 23
24#include <net/sock.h> 24#include <net/sock.h>
25#include <net/inet_frag.h> 25#include <net/inet_frag.h>
26#include <net/inet_ecn.h>
27
28/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
29 * Value : 0xff if frame should be dropped.
30 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
31 */
32const u8 ip_frag_ecn_table[16] = {
33 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
34 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
35 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
36 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
37
38 /* invalid combinations : drop frame */
39 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
40 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
41 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
42 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
43 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
44 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
45 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
46};
47EXPORT_SYMBOL(ip_frag_ecn_table);
26 48
27static void inet_frag_secret_rebuild(unsigned long dummy) 49static void inet_frag_secret_rebuild(unsigned long dummy)
28{ 50{
@@ -30,20 +52,27 @@ static void inet_frag_secret_rebuild(unsigned long dummy)
30 unsigned long now = jiffies; 52 unsigned long now = jiffies;
31 int i; 53 int i;
32 54
55 /* Per bucket lock NOT needed here, due to write lock protection */
33 write_lock(&f->lock); 56 write_lock(&f->lock);
57
34 get_random_bytes(&f->rnd, sizeof(u32)); 58 get_random_bytes(&f->rnd, sizeof(u32));
35 for (i = 0; i < INETFRAGS_HASHSZ; i++) { 59 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
60 struct inet_frag_bucket *hb;
36 struct inet_frag_queue *q; 61 struct inet_frag_queue *q;
37 struct hlist_node *n; 62 struct hlist_node *n;
38 63
39 hlist_for_each_entry_safe(q, n, &f->hash[i], list) { 64 hb = &f->hash[i];
65 hlist_for_each_entry_safe(q, n, &hb->chain, list) {
40 unsigned int hval = f->hashfn(q); 66 unsigned int hval = f->hashfn(q);
41 67
42 if (hval != i) { 68 if (hval != i) {
69 struct inet_frag_bucket *hb_dest;
70
43 hlist_del(&q->list); 71 hlist_del(&q->list);
44 72
45 /* Relink to new hash chain. */ 73 /* Relink to new hash chain. */
46 hlist_add_head(&q->list, &f->hash[hval]); 74 hb_dest = &f->hash[hval];
75 hlist_add_head(&q->list, &hb_dest->chain);
47 } 76 }
48 } 77 }
49 } 78 }
@@ -56,9 +85,12 @@ void inet_frags_init(struct inet_frags *f)
56{ 85{
57 int i; 86 int i;
58 87
59 for (i = 0; i < INETFRAGS_HASHSZ; i++) 88 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
60 INIT_HLIST_HEAD(&f->hash[i]); 89 struct inet_frag_bucket *hb = &f->hash[i];
61 90
91 spin_lock_init(&hb->chain_lock);
92 INIT_HLIST_HEAD(&hb->chain);
93 }
62 rwlock_init(&f->lock); 94 rwlock_init(&f->lock);
63 95
64 f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ 96 f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
@@ -100,10 +132,18 @@ EXPORT_SYMBOL(inet_frags_exit_net);
100 132
101static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f) 133static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
102{ 134{
103 write_lock(&f->lock); 135 struct inet_frag_bucket *hb;
136 unsigned int hash;
137
138 read_lock(&f->lock);
139 hash = f->hashfn(fq);
140 hb = &f->hash[hash];
141
142 spin_lock(&hb->chain_lock);
104 hlist_del(&fq->list); 143 hlist_del(&fq->list);
105 fq->net->nqueues--; 144 spin_unlock(&hb->chain_lock);
106 write_unlock(&f->lock); 145
146 read_unlock(&f->lock);
107 inet_frag_lru_del(fq); 147 inet_frag_lru_del(fq);
108} 148}
109 149
@@ -182,6 +222,9 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
182 q = list_first_entry(&nf->lru_list, 222 q = list_first_entry(&nf->lru_list,
183 struct inet_frag_queue, lru_list); 223 struct inet_frag_queue, lru_list);
184 atomic_inc(&q->refcnt); 224 atomic_inc(&q->refcnt);
225 /* Remove q from list to avoid several CPUs grabbing it */
226 list_del_init(&q->lru_list);
227
185 spin_unlock(&nf->lru_lock); 228 spin_unlock(&nf->lru_lock);
186 229
187 spin_lock(&q->lock); 230 spin_lock(&q->lock);
@@ -202,27 +245,32 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
202 struct inet_frag_queue *qp_in, struct inet_frags *f, 245 struct inet_frag_queue *qp_in, struct inet_frags *f,
203 void *arg) 246 void *arg)
204{ 247{
248 struct inet_frag_bucket *hb;
205 struct inet_frag_queue *qp; 249 struct inet_frag_queue *qp;
206#ifdef CONFIG_SMP 250#ifdef CONFIG_SMP
207#endif 251#endif
208 unsigned int hash; 252 unsigned int hash;
209 253
210 write_lock(&f->lock); 254 read_lock(&f->lock); /* Protects against hash rebuild */
211 /* 255 /*
212 * While we stayed w/o the lock other CPU could update 256 * While we stayed w/o the lock other CPU could update
213 * the rnd seed, so we need to re-calculate the hash 257 * the rnd seed, so we need to re-calculate the hash
214 * chain. Fortunatelly the qp_in can be used to get one. 258 * chain. Fortunatelly the qp_in can be used to get one.
215 */ 259 */
216 hash = f->hashfn(qp_in); 260 hash = f->hashfn(qp_in);
261 hb = &f->hash[hash];
262 spin_lock(&hb->chain_lock);
263
217#ifdef CONFIG_SMP 264#ifdef CONFIG_SMP
218 /* With SMP race we have to recheck hash table, because 265 /* With SMP race we have to recheck hash table, because
219 * such entry could be created on other cpu, while we 266 * such entry could be created on other cpu, while we
220 * promoted read lock to write lock. 267 * released the hash bucket lock.
221 */ 268 */
222 hlist_for_each_entry(qp, &f->hash[hash], list) { 269 hlist_for_each_entry(qp, &hb->chain, list) {
223 if (qp->net == nf && f->match(qp, arg)) { 270 if (qp->net == nf && f->match(qp, arg)) {
224 atomic_inc(&qp->refcnt); 271 atomic_inc(&qp->refcnt);
225 write_unlock(&f->lock); 272 spin_unlock(&hb->chain_lock);
273 read_unlock(&f->lock);
226 qp_in->last_in |= INET_FRAG_COMPLETE; 274 qp_in->last_in |= INET_FRAG_COMPLETE;
227 inet_frag_put(qp_in, f); 275 inet_frag_put(qp_in, f);
228 return qp; 276 return qp;
@@ -234,9 +282,9 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
234 atomic_inc(&qp->refcnt); 282 atomic_inc(&qp->refcnt);
235 283
236 atomic_inc(&qp->refcnt); 284 atomic_inc(&qp->refcnt);
237 hlist_add_head(&qp->list, &f->hash[hash]); 285 hlist_add_head(&qp->list, &hb->chain);
238 nf->nqueues++; 286 spin_unlock(&hb->chain_lock);
239 write_unlock(&f->lock); 287 read_unlock(&f->lock);
240 inet_frag_lru_add(nf, qp); 288 inet_frag_lru_add(nf, qp);
241 return qp; 289 return qp;
242} 290}
@@ -277,17 +325,23 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
277 struct inet_frags *f, void *key, unsigned int hash) 325 struct inet_frags *f, void *key, unsigned int hash)
278 __releases(&f->lock) 326 __releases(&f->lock)
279{ 327{
328 struct inet_frag_bucket *hb;
280 struct inet_frag_queue *q; 329 struct inet_frag_queue *q;
281 int depth = 0; 330 int depth = 0;
282 331
283 hlist_for_each_entry(q, &f->hash[hash], list) { 332 hb = &f->hash[hash];
333
334 spin_lock(&hb->chain_lock);
335 hlist_for_each_entry(q, &hb->chain, list) {
284 if (q->net == nf && f->match(q, key)) { 336 if (q->net == nf && f->match(q, key)) {
285 atomic_inc(&q->refcnt); 337 atomic_inc(&q->refcnt);
338 spin_unlock(&hb->chain_lock);
286 read_unlock(&f->lock); 339 read_unlock(&f->lock);
287 return q; 340 return q;
288 } 341 }
289 depth++; 342 depth++;
290 } 343 }
344 spin_unlock(&hb->chain_lock);
291 read_unlock(&f->lock); 345 read_unlock(&f->lock);
292 346
293 if (depth <= INETFRAGS_MAXDEPTH) 347 if (depth <= INETFRAGS_MAXDEPTH)
diff --git a/net/ipv4/inet_lro.c b/net/ipv4/inet_lro.c
index cc280a3f4f96..1975f52933c5 100644
--- a/net/ipv4/inet_lro.c
+++ b/net/ipv4/inet_lro.c
@@ -29,6 +29,7 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/if_vlan.h> 30#include <linux/if_vlan.h>
31#include <linux/inet_lro.h> 31#include <linux/inet_lro.h>
32#include <net/checksum.h>
32 33
33MODULE_LICENSE("GPL"); 34MODULE_LICENSE("GPL");
34MODULE_AUTHOR("Jan-Bernd Themann <themann@de.ibm.com>"); 35MODULE_AUTHOR("Jan-Bernd Themann <themann@de.ibm.com>");
@@ -114,11 +115,9 @@ static void lro_update_tcp_ip_header(struct net_lro_desc *lro_desc)
114 *(p+2) = lro_desc->tcp_rcv_tsecr; 115 *(p+2) = lro_desc->tcp_rcv_tsecr;
115 } 116 }
116 117
118 csum_replace2(&iph->check, iph->tot_len, htons(lro_desc->ip_tot_len));
117 iph->tot_len = htons(lro_desc->ip_tot_len); 119 iph->tot_len = htons(lro_desc->ip_tot_len);
118 120
119 iph->check = 0;
120 iph->check = ip_fast_csum((u8 *)lro_desc->iph, iph->ihl);
121
122 tcph->check = 0; 121 tcph->check = 0;
123 tcp_hdr_csum = csum_partial(tcph, TCP_HDR_LEN(tcph), 0); 122 tcp_hdr_csum = csum_partial(tcph, TCP_HDR_LEN(tcph), 0);
124 lro_desc->data_csum = csum_add(lro_desc->data_csum, tcp_hdr_csum); 123 lro_desc->data_csum = csum_add(lro_desc->data_csum, tcp_hdr_csum);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index a6445b843ef4..938520668b2f 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -79,40 +79,11 @@ struct ipq {
79 struct inet_peer *peer; 79 struct inet_peer *peer;
80}; 80};
81 81
82/* RFC 3168 support :
83 * We want to check ECN values of all fragments, do detect invalid combinations.
84 * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
85 */
86#define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */
87#define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */
88#define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */
89#define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */
90
91static inline u8 ip4_frag_ecn(u8 tos) 82static inline u8 ip4_frag_ecn(u8 tos)
92{ 83{
93 return 1 << (tos & INET_ECN_MASK); 84 return 1 << (tos & INET_ECN_MASK);
94} 85}
95 86
96/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
97 * Value : 0xff if frame should be dropped.
98 * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
99 */
100static const u8 ip4_frag_ecn_table[16] = {
101 /* at least one fragment had CE, and others ECT_0 or ECT_1 */
102 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
103 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
104 [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
105
106 /* invalid combinations : drop frame */
107 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
108 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
109 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
110 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
111 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
112 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
113 [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
114};
115
116static struct inet_frags ip4_frags; 87static struct inet_frags ip4_frags;
117 88
118int ip_frag_nqueues(struct net *net) 89int ip_frag_nqueues(struct net *net)
@@ -551,7 +522,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
551 522
552 ipq_kill(qp); 523 ipq_kill(qp);
553 524
554 ecn = ip4_frag_ecn_table[qp->ecn]; 525 ecn = ip_frag_ecn_table[qp->ecn];
555 if (unlikely(ecn == 0xff)) { 526 if (unlikely(ecn == 0xff)) {
556 err = -EINVAL; 527 err = -EINVAL;
557 goto out_fail; 528 goto out_fail;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 91d66dbde9c0..e5dfd2843f28 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -37,7 +37,7 @@
37#include <net/ip.h> 37#include <net/ip.h>
38#include <net/icmp.h> 38#include <net/icmp.h>
39#include <net/protocol.h> 39#include <net/protocol.h>
40#include <net/ipip.h> 40#include <net/ip_tunnels.h>
41#include <net/arp.h> 41#include <net/arp.h>
42#include <net/checksum.h> 42#include <net/checksum.h>
43#include <net/dsfield.h> 43#include <net/dsfield.h>
@@ -108,15 +108,6 @@
108 fatal route to network, even if it were you who configured 108 fatal route to network, even if it were you who configured
109 fatal static route: you are innocent. :-) 109 fatal static route: you are innocent. :-)
110 110
111
112
113 3. Really, ipv4/ipip.c, ipv4/ip_gre.c and ipv6/sit.c contain
114 practically identical code. It would be good to glue them
115 together, but it is not very evident, how to make them modular.
116 sit is integral part of IPv6, ipip and gre are naturally modular.
117 We could extract common parts (hash table, ioctl etc)
118 to a separate module (ip_tunnel.c).
119
120 Alexey Kuznetsov. 111 Alexey Kuznetsov.
121 */ 112 */
122 113
@@ -126,400 +117,135 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
126 117
127static struct rtnl_link_ops ipgre_link_ops __read_mostly; 118static struct rtnl_link_ops ipgre_link_ops __read_mostly;
128static int ipgre_tunnel_init(struct net_device *dev); 119static int ipgre_tunnel_init(struct net_device *dev);
129static void ipgre_tunnel_setup(struct net_device *dev);
130static int ipgre_tunnel_bind_dev(struct net_device *dev);
131
132/* Fallback tunnel: no source, no destination, no key, no options */
133
134#define HASH_SIZE 16
135 120
136static int ipgre_net_id __read_mostly; 121static int ipgre_net_id __read_mostly;
137struct ipgre_net { 122static int gre_tap_net_id __read_mostly;
138 struct ip_tunnel __rcu *tunnels[4][HASH_SIZE];
139
140 struct net_device *fb_tunnel_dev;
141};
142
143/* Tunnel hash table */
144
145/*
146 4 hash tables:
147
148 3: (remote,local)
149 2: (remote,*)
150 1: (*,local)
151 0: (*,*)
152 123
153 We require exact key match i.e. if a key is present in packet 124static __sum16 check_checksum(struct sk_buff *skb)
154 it will match only tunnel with the same key; if it is not present, 125{
155 it will match only keyless tunnel. 126 __sum16 csum = 0;
156
157 All keysless packets, if not matched configured keyless tunnels
158 will match fallback tunnel.
159 */
160 127
161#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF) 128 switch (skb->ip_summed) {
129 case CHECKSUM_COMPLETE:
130 csum = csum_fold(skb->csum);
162 131
163#define tunnels_r_l tunnels[3] 132 if (!csum)
164#define tunnels_r tunnels[2] 133 break;
165#define tunnels_l tunnels[1] 134 /* Fall through. */
166#define tunnels_wc tunnels[0]
167 135
168static struct rtnl_link_stats64 *ipgre_get_stats64(struct net_device *dev, 136 case CHECKSUM_NONE:
169 struct rtnl_link_stats64 *tot) 137 skb->csum = 0;
170{ 138 csum = __skb_checksum_complete(skb);
171 int i; 139 skb->ip_summed = CHECKSUM_COMPLETE;
172 140 break;
173 for_each_possible_cpu(i) {
174 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
175 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
176 unsigned int start;
177
178 do {
179 start = u64_stats_fetch_begin_bh(&tstats->syncp);
180 rx_packets = tstats->rx_packets;
181 tx_packets = tstats->tx_packets;
182 rx_bytes = tstats->rx_bytes;
183 tx_bytes = tstats->tx_bytes;
184 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
185
186 tot->rx_packets += rx_packets;
187 tot->tx_packets += tx_packets;
188 tot->rx_bytes += rx_bytes;
189 tot->tx_bytes += tx_bytes;
190 } 141 }
191 142
192 tot->multicast = dev->stats.multicast; 143 return csum;
193 tot->rx_crc_errors = dev->stats.rx_crc_errors;
194 tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
195 tot->rx_length_errors = dev->stats.rx_length_errors;
196 tot->rx_frame_errors = dev->stats.rx_frame_errors;
197 tot->rx_errors = dev->stats.rx_errors;
198
199 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
200 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
201 tot->tx_dropped = dev->stats.tx_dropped;
202 tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
203 tot->tx_errors = dev->stats.tx_errors;
204
205 return tot;
206} 144}
207 145
208/* Does key in tunnel parameters match packet */ 146static int ip_gre_calc_hlen(__be16 o_flags)
209static bool ipgre_key_match(const struct ip_tunnel_parm *p,
210 __be16 flags, __be32 key)
211{ 147{
212 if (p->i_flags & GRE_KEY) { 148 int addend = 4;
213 if (flags & GRE_KEY)
214 return key == p->i_key;
215 else
216 return false; /* key expected, none present */
217 } else
218 return !(flags & GRE_KEY);
219}
220 149
221/* Given src, dst and key, find appropriate for input tunnel. */ 150 if (o_flags&TUNNEL_CSUM)
151 addend += 4;
152 if (o_flags&TUNNEL_KEY)
153 addend += 4;
154 if (o_flags&TUNNEL_SEQ)
155 addend += 4;
156 return addend;
157}
222 158
223static struct ip_tunnel *ipgre_tunnel_lookup(struct net_device *dev, 159static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
224 __be32 remote, __be32 local, 160 bool *csum_err, int *hdr_len)
225 __be16 flags, __be32 key,
226 __be16 gre_proto)
227{ 161{
228 struct net *net = dev_net(dev); 162 struct iphdr *iph = ip_hdr(skb);
229 int link = dev->ifindex; 163 struct gre_base_hdr *greh;
230 unsigned int h0 = HASH(remote); 164 __be32 *options;
231 unsigned int h1 = HASH(key);
232 struct ip_tunnel *t, *cand = NULL;
233 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
234 int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
235 ARPHRD_ETHER : ARPHRD_IPGRE;
236 int score, cand_score = 4;
237
238 for_each_ip_tunnel_rcu(t, ign->tunnels_r_l[h0 ^ h1]) {
239 if (local != t->parms.iph.saddr ||
240 remote != t->parms.iph.daddr ||
241 !(t->dev->flags & IFF_UP))
242 continue;
243
244 if (!ipgre_key_match(&t->parms, flags, key))
245 continue;
246
247 if (t->dev->type != ARPHRD_IPGRE &&
248 t->dev->type != dev_type)
249 continue;
250
251 score = 0;
252 if (t->parms.link != link)
253 score |= 1;
254 if (t->dev->type != dev_type)
255 score |= 2;
256 if (score == 0)
257 return t;
258
259 if (score < cand_score) {
260 cand = t;
261 cand_score = score;
262 }
263 }
264
265 for_each_ip_tunnel_rcu(t, ign->tunnels_r[h0 ^ h1]) {
266 if (remote != t->parms.iph.daddr ||
267 !(t->dev->flags & IFF_UP))
268 continue;
269
270 if (!ipgre_key_match(&t->parms, flags, key))
271 continue;
272
273 if (t->dev->type != ARPHRD_IPGRE &&
274 t->dev->type != dev_type)
275 continue;
276
277 score = 0;
278 if (t->parms.link != link)
279 score |= 1;
280 if (t->dev->type != dev_type)
281 score |= 2;
282 if (score == 0)
283 return t;
284
285 if (score < cand_score) {
286 cand = t;
287 cand_score = score;
288 }
289 }
290 165
291 for_each_ip_tunnel_rcu(t, ign->tunnels_l[h1]) { 166 if (unlikely(!pskb_may_pull(skb, sizeof(struct gre_base_hdr))))
292 if ((local != t->parms.iph.saddr && 167 return -EINVAL;
293 (local != t->parms.iph.daddr ||
294 !ipv4_is_multicast(local))) ||
295 !(t->dev->flags & IFF_UP))
296 continue;
297
298 if (!ipgre_key_match(&t->parms, flags, key))
299 continue;
300
301 if (t->dev->type != ARPHRD_IPGRE &&
302 t->dev->type != dev_type)
303 continue;
304
305 score = 0;
306 if (t->parms.link != link)
307 score |= 1;
308 if (t->dev->type != dev_type)
309 score |= 2;
310 if (score == 0)
311 return t;
312
313 if (score < cand_score) {
314 cand = t;
315 cand_score = score;
316 }
317 }
318 168
319 for_each_ip_tunnel_rcu(t, ign->tunnels_wc[h1]) { 169 greh = (struct gre_base_hdr *)((u8 *)iph + (iph->ihl << 2));
320 if (t->parms.i_key != key || 170 if (unlikely(greh->flags & (GRE_VERSION | GRE_ROUTING)))
321 !(t->dev->flags & IFF_UP)) 171 return -EINVAL;
322 continue;
323
324 if (t->dev->type != ARPHRD_IPGRE &&
325 t->dev->type != dev_type)
326 continue;
327
328 score = 0;
329 if (t->parms.link != link)
330 score |= 1;
331 if (t->dev->type != dev_type)
332 score |= 2;
333 if (score == 0)
334 return t;
335
336 if (score < cand_score) {
337 cand = t;
338 cand_score = score;
339 }
340 }
341 172
342 if (cand != NULL) 173 tpi->flags = gre_flags_to_tnl_flags(greh->flags);
343 return cand; 174 *hdr_len = ip_gre_calc_hlen(tpi->flags);
344 175
345 dev = ign->fb_tunnel_dev; 176 if (!pskb_may_pull(skb, *hdr_len))
346 if (dev->flags & IFF_UP) 177 return -EINVAL;
347 return netdev_priv(dev);
348 178
349 return NULL; 179 tpi->proto = greh->protocol;
350}
351 180
352static struct ip_tunnel __rcu **__ipgre_bucket(struct ipgre_net *ign, 181 options = (__be32 *)(greh + 1);
353 struct ip_tunnel_parm *parms) 182 if (greh->flags & GRE_CSUM) {
354{ 183 if (check_checksum(skb)) {
355 __be32 remote = parms->iph.daddr; 184 *csum_err = true;
356 __be32 local = parms->iph.saddr; 185 return -EINVAL;
357 __be32 key = parms->i_key; 186 }
358 unsigned int h = HASH(key); 187 options++;
359 int prio = 0;
360
361 if (local)
362 prio |= 1;
363 if (remote && !ipv4_is_multicast(remote)) {
364 prio |= 2;
365 h ^= HASH(remote);
366 } 188 }
367 189
368 return &ign->tunnels[prio][h]; 190 if (greh->flags & GRE_KEY) {
369} 191 tpi->key = *options;
370 192 options++;
371static inline struct ip_tunnel __rcu **ipgre_bucket(struct ipgre_net *ign, 193 } else
372 struct ip_tunnel *t) 194 tpi->key = 0;
373{
374 return __ipgre_bucket(ign, &t->parms);
375}
376
377static void ipgre_tunnel_link(struct ipgre_net *ign, struct ip_tunnel *t)
378{
379 struct ip_tunnel __rcu **tp = ipgre_bucket(ign, t);
380 195
381 rcu_assign_pointer(t->next, rtnl_dereference(*tp)); 196 if (unlikely(greh->flags & GRE_SEQ)) {
382 rcu_assign_pointer(*tp, t); 197 tpi->seq = *options;
383} 198 options++;
199 } else
200 tpi->seq = 0;
384 201
385static void ipgre_tunnel_unlink(struct ipgre_net *ign, struct ip_tunnel *t) 202 /* WCCP version 1 and 2 protocol decoding.
386{ 203 * - Change protocol to IP
387 struct ip_tunnel __rcu **tp; 204 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
388 struct ip_tunnel *iter; 205 */
389 206 if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
390 for (tp = ipgre_bucket(ign, t); 207 tpi->proto = htons(ETH_P_IP);
391 (iter = rtnl_dereference(*tp)) != NULL; 208 if ((*(u8 *)options & 0xF0) != 0x40) {
392 tp = &iter->next) { 209 *hdr_len += 4;
393 if (t == iter) { 210 if (!pskb_may_pull(skb, *hdr_len))
394 rcu_assign_pointer(*tp, t->next); 211 return -EINVAL;
395 break;
396 } 212 }
397 } 213 }
398}
399
400static struct ip_tunnel *ipgre_tunnel_find(struct net *net,
401 struct ip_tunnel_parm *parms,
402 int type)
403{
404 __be32 remote = parms->iph.daddr;
405 __be32 local = parms->iph.saddr;
406 __be32 key = parms->i_key;
407 int link = parms->link;
408 struct ip_tunnel *t;
409 struct ip_tunnel __rcu **tp;
410 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
411
412 for (tp = __ipgre_bucket(ign, parms);
413 (t = rtnl_dereference(*tp)) != NULL;
414 tp = &t->next)
415 if (local == t->parms.iph.saddr &&
416 remote == t->parms.iph.daddr &&
417 key == t->parms.i_key &&
418 link == t->parms.link &&
419 type == t->dev->type)
420 break;
421
422 return t;
423}
424
425static struct ip_tunnel *ipgre_tunnel_locate(struct net *net,
426 struct ip_tunnel_parm *parms, int create)
427{
428 struct ip_tunnel *t, *nt;
429 struct net_device *dev;
430 char name[IFNAMSIZ];
431 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
432
433 t = ipgre_tunnel_find(net, parms, ARPHRD_IPGRE);
434 if (t || !create)
435 return t;
436
437 if (parms->name[0])
438 strlcpy(name, parms->name, IFNAMSIZ);
439 else
440 strcpy(name, "gre%d");
441
442 dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup);
443 if (!dev)
444 return NULL;
445
446 dev_net_set(dev, net);
447
448 nt = netdev_priv(dev);
449 nt->parms = *parms;
450 dev->rtnl_link_ops = &ipgre_link_ops;
451 214
452 dev->mtu = ipgre_tunnel_bind_dev(dev); 215 return 0;
453
454 if (register_netdevice(dev) < 0)
455 goto failed_free;
456
457 /* Can use a lockless transmit, unless we generate output sequences */
458 if (!(nt->parms.o_flags & GRE_SEQ))
459 dev->features |= NETIF_F_LLTX;
460
461 dev_hold(dev);
462 ipgre_tunnel_link(ign, nt);
463 return nt;
464
465failed_free:
466 free_netdev(dev);
467 return NULL;
468}
469
470static void ipgre_tunnel_uninit(struct net_device *dev)
471{
472 struct net *net = dev_net(dev);
473 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
474
475 ipgre_tunnel_unlink(ign, netdev_priv(dev));
476 dev_put(dev);
477} 216}
478 217
479
480static void ipgre_err(struct sk_buff *skb, u32 info) 218static void ipgre_err(struct sk_buff *skb, u32 info)
481{ 219{
482 220
483/* All the routers (except for Linux) return only 221 /* All the routers (except for Linux) return only
484 8 bytes of packet payload. It means, that precise relaying of 222 8 bytes of packet payload. It means, that precise relaying of
485 ICMP in the real Internet is absolutely infeasible. 223 ICMP in the real Internet is absolutely infeasible.
486 224
487 Moreover, Cisco "wise men" put GRE key to the third word 225 Moreover, Cisco "wise men" put GRE key to the third word
488 in GRE header. It makes impossible maintaining even soft state for keyed 226 in GRE header. It makes impossible maintaining even soft
489 GRE tunnels with enabled checksum. Tell them "thank you". 227 state for keyed GRE tunnels with enabled checksum. Tell
490 228 them "thank you".
491 Well, I wonder, rfc1812 was written by Cisco employee,
492 what the hell these idiots break standards established
493 by themselves???
494 */
495 229
230 Well, I wonder, rfc1812 was written by Cisco employee,
231 what the hell these idiots break standards established
232 by themselves???
233 */
234 struct net *net = dev_net(skb->dev);
235 struct ip_tunnel_net *itn;
496 const struct iphdr *iph = (const struct iphdr *)skb->data; 236 const struct iphdr *iph = (const struct iphdr *)skb->data;
497 __be16 *p = (__be16 *)(skb->data+(iph->ihl<<2));
498 int grehlen = (iph->ihl<<2) + 4;
499 const int type = icmp_hdr(skb)->type; 237 const int type = icmp_hdr(skb)->type;
500 const int code = icmp_hdr(skb)->code; 238 const int code = icmp_hdr(skb)->code;
501 struct ip_tunnel *t; 239 struct ip_tunnel *t;
502 __be16 flags; 240 struct tnl_ptk_info tpi;
503 __be32 key = 0; 241 int hdr_len;
242 bool csum_err = false;
504 243
505 flags = p[0]; 244 if (parse_gre_header(skb, &tpi, &csum_err, &hdr_len)) {
506 if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) { 245 if (!csum_err) /* ignore csum errors. */
507 if (flags&(GRE_VERSION|GRE_ROUTING))
508 return; 246 return;
509 if (flags&GRE_KEY) {
510 grehlen += 4;
511 if (flags&GRE_CSUM)
512 grehlen += 4;
513 }
514 } 247 }
515 248
516 /* If only 8 bytes returned, keyed message will be dropped here */
517 if (skb_headlen(skb) < grehlen)
518 return;
519
520 if (flags & GRE_KEY)
521 key = *(((__be32 *)p) + (grehlen / 4) - 1);
522
523 switch (type) { 249 switch (type) {
524 default: 250 default:
525 case ICMP_PARAMETERPROB: 251 case ICMP_PARAMETERPROB:
@@ -548,8 +274,13 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
548 break; 274 break;
549 } 275 }
550 276
551 t = ipgre_tunnel_lookup(skb->dev, iph->daddr, iph->saddr, 277 if (tpi.proto == htons(ETH_P_TEB))
552 flags, key, p[1]); 278 itn = net_generic(net, gre_tap_net_id);
279 else
280 itn = net_generic(net, ipgre_net_id);
281
282 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi.flags,
283 iph->daddr, iph->saddr, tpi.key);
553 284
554 if (t == NULL) 285 if (t == NULL)
555 return; 286 return;
@@ -578,158 +309,33 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
578 t->err_time = jiffies; 309 t->err_time = jiffies;
579} 310}
580 311
581static inline u8
582ipgre_ecn_encapsulate(u8 tos, const struct iphdr *old_iph, struct sk_buff *skb)
583{
584 u8 inner = 0;
585 if (skb->protocol == htons(ETH_P_IP))
586 inner = old_iph->tos;
587 else if (skb->protocol == htons(ETH_P_IPV6))
588 inner = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
589 return INET_ECN_encapsulate(tos, inner);
590}
591
592static int ipgre_rcv(struct sk_buff *skb) 312static int ipgre_rcv(struct sk_buff *skb)
593{ 313{
314 struct net *net = dev_net(skb->dev);
315 struct ip_tunnel_net *itn;
594 const struct iphdr *iph; 316 const struct iphdr *iph;
595 u8 *h;
596 __be16 flags;
597 __sum16 csum = 0;
598 __be32 key = 0;
599 u32 seqno = 0;
600 struct ip_tunnel *tunnel; 317 struct ip_tunnel *tunnel;
601 int offset = 4; 318 struct tnl_ptk_info tpi;
602 __be16 gre_proto; 319 int hdr_len;
603 int err; 320 bool csum_err = false;
604 321
605 if (!pskb_may_pull(skb, 16)) 322 if (parse_gre_header(skb, &tpi, &csum_err, &hdr_len) < 0)
606 goto drop; 323 goto drop;
607 324
608 iph = ip_hdr(skb); 325 if (tpi.proto == htons(ETH_P_TEB))
609 h = skb->data; 326 itn = net_generic(net, gre_tap_net_id);
610 flags = *(__be16 *)h; 327 else
611 328 itn = net_generic(net, ipgre_net_id);
612 if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) {
613 /* - Version must be 0.
614 - We do not support routing headers.
615 */
616 if (flags&(GRE_VERSION|GRE_ROUTING))
617 goto drop;
618
619 if (flags&GRE_CSUM) {
620 switch (skb->ip_summed) {
621 case CHECKSUM_COMPLETE:
622 csum = csum_fold(skb->csum);
623 if (!csum)
624 break;
625 /* fall through */
626 case CHECKSUM_NONE:
627 skb->csum = 0;
628 csum = __skb_checksum_complete(skb);
629 skb->ip_summed = CHECKSUM_COMPLETE;
630 }
631 offset += 4;
632 }
633 if (flags&GRE_KEY) {
634 key = *(__be32 *)(h + offset);
635 offset += 4;
636 }
637 if (flags&GRE_SEQ) {
638 seqno = ntohl(*(__be32 *)(h + offset));
639 offset += 4;
640 }
641 }
642 329
643 gre_proto = *(__be16 *)(h + 2); 330 iph = ip_hdr(skb);
331 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi.flags,
332 iph->saddr, iph->daddr, tpi.key);
644 333
645 tunnel = ipgre_tunnel_lookup(skb->dev,
646 iph->saddr, iph->daddr, flags, key,
647 gre_proto);
648 if (tunnel) { 334 if (tunnel) {
649 struct pcpu_tstats *tstats; 335 ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
650
651 secpath_reset(skb);
652
653 skb->protocol = gre_proto;
654 /* WCCP version 1 and 2 protocol decoding.
655 * - Change protocol to IP
656 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
657 */
658 if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
659 skb->protocol = htons(ETH_P_IP);
660 if ((*(h + offset) & 0xF0) != 0x40)
661 offset += 4;
662 }
663
664 skb->mac_header = skb->network_header;
665 __pskb_pull(skb, offset);
666 skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
667 skb->pkt_type = PACKET_HOST;
668#ifdef CONFIG_NET_IPGRE_BROADCAST
669 if (ipv4_is_multicast(iph->daddr)) {
670 /* Looped back packet, drop it! */
671 if (rt_is_output_route(skb_rtable(skb)))
672 goto drop;
673 tunnel->dev->stats.multicast++;
674 skb->pkt_type = PACKET_BROADCAST;
675 }
676#endif
677
678 if (((flags&GRE_CSUM) && csum) ||
679 (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
680 tunnel->dev->stats.rx_crc_errors++;
681 tunnel->dev->stats.rx_errors++;
682 goto drop;
683 }
684 if (tunnel->parms.i_flags&GRE_SEQ) {
685 if (!(flags&GRE_SEQ) ||
686 (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
687 tunnel->dev->stats.rx_fifo_errors++;
688 tunnel->dev->stats.rx_errors++;
689 goto drop;
690 }
691 tunnel->i_seqno = seqno + 1;
692 }
693
694 /* Warning: All skb pointers will be invalidated! */
695 if (tunnel->dev->type == ARPHRD_ETHER) {
696 if (!pskb_may_pull(skb, ETH_HLEN)) {
697 tunnel->dev->stats.rx_length_errors++;
698 tunnel->dev->stats.rx_errors++;
699 goto drop;
700 }
701
702 iph = ip_hdr(skb);
703 skb->protocol = eth_type_trans(skb, tunnel->dev);
704 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
705 }
706
707 __skb_tunnel_rx(skb, tunnel->dev);
708
709 skb_reset_network_header(skb);
710 err = IP_ECN_decapsulate(iph, skb);
711 if (unlikely(err)) {
712 if (log_ecn_error)
713 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
714 &iph->saddr, iph->tos);
715 if (err > 1) {
716 ++tunnel->dev->stats.rx_frame_errors;
717 ++tunnel->dev->stats.rx_errors;
718 goto drop;
719 }
720 }
721
722 tstats = this_cpu_ptr(tunnel->dev->tstats);
723 u64_stats_update_begin(&tstats->syncp);
724 tstats->rx_packets++;
725 tstats->rx_bytes += skb->len;
726 u64_stats_update_end(&tstats->syncp);
727
728 gro_cells_receive(&tunnel->gro_cells, skb);
729 return 0; 336 return 0;
730 } 337 }
731 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); 338 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
732
733drop: 339drop:
734 kfree_skb(skb); 340 kfree_skb(skb);
735 return 0; 341 return 0;
@@ -746,7 +352,7 @@ static struct sk_buff *handle_offloads(struct ip_tunnel *tunnel, struct sk_buff
746 skb_shinfo(skb)->gso_type |= SKB_GSO_GRE; 352 skb_shinfo(skb)->gso_type |= SKB_GSO_GRE;
747 return skb; 353 return skb;
748 } else if (skb->ip_summed == CHECKSUM_PARTIAL && 354 } else if (skb->ip_summed == CHECKSUM_PARTIAL &&
749 tunnel->parms.o_flags&GRE_CSUM) { 355 tunnel->parms.o_flags&TUNNEL_CSUM) {
750 err = skb_checksum_help(skb); 356 err = skb_checksum_help(skb);
751 if (unlikely(err)) 357 if (unlikely(err))
752 goto error; 358 goto error;
@@ -760,494 +366,157 @@ error:
760 return ERR_PTR(err); 366 return ERR_PTR(err);
761} 367}
762 368
763static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 369static struct sk_buff *gre_build_header(struct sk_buff *skb,
370 const struct tnl_ptk_info *tpi,
371 int hdr_len)
764{ 372{
765 struct pcpu_tstats *tstats = this_cpu_ptr(dev->tstats); 373 struct gre_base_hdr *greh;
766 struct ip_tunnel *tunnel = netdev_priv(dev);
767 const struct iphdr *old_iph;
768 const struct iphdr *tiph;
769 struct flowi4 fl4;
770 u8 tos;
771 __be16 df;
772 struct rtable *rt; /* Route to the other host */
773 struct net_device *tdev; /* Device to other host */
774 struct iphdr *iph; /* Our new IP header */
775 unsigned int max_headroom; /* The extra header space needed */
776 int gre_hlen;
777 __be32 dst;
778 int mtu;
779 u8 ttl;
780 int err;
781 int pkt_len;
782
783 skb = handle_offloads(tunnel, skb);
784 if (IS_ERR(skb)) {
785 dev->stats.tx_dropped++;
786 return NETDEV_TX_OK;
787 }
788 374
789 if (!skb->encapsulation) { 375 skb_push(skb, hdr_len);
790 skb_reset_inner_headers(skb);
791 skb->encapsulation = 1;
792 }
793 376
794 old_iph = ip_hdr(skb); 377 greh = (struct gre_base_hdr *)skb->data;
378 greh->flags = tnl_flags_to_gre_flags(tpi->flags);
379 greh->protocol = tpi->proto;
795 380
796 if (dev->type == ARPHRD_ETHER) 381 if (tpi->flags&(TUNNEL_KEY|TUNNEL_CSUM|TUNNEL_SEQ)) {
797 IPCB(skb)->flags = 0; 382 __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
798 383
799 if (dev->header_ops && dev->type == ARPHRD_IPGRE) { 384 if (tpi->flags&TUNNEL_SEQ) {
800 gre_hlen = 0; 385 *ptr = tpi->seq;
801 tiph = (const struct iphdr *)skb->data; 386 ptr--;
802 } else {
803 gre_hlen = tunnel->hlen;
804 tiph = &tunnel->parms.iph;
805 }
806
807 if ((dst = tiph->daddr) == 0) {
808 /* NBMA tunnel */
809
810 if (skb_dst(skb) == NULL) {
811 dev->stats.tx_fifo_errors++;
812 goto tx_error;
813 } 387 }
814 388 if (tpi->flags&TUNNEL_KEY) {
815 if (skb->protocol == htons(ETH_P_IP)) { 389 *ptr = tpi->key;
816 rt = skb_rtable(skb); 390 ptr--;
817 dst = rt_nexthop(rt, old_iph->daddr);
818 } 391 }
819#if IS_ENABLED(CONFIG_IPV6) 392 if (tpi->flags&TUNNEL_CSUM &&
820 else if (skb->protocol == htons(ETH_P_IPV6)) { 393 !(skb_shinfo(skb)->gso_type & SKB_GSO_GRE)) {
821 const struct in6_addr *addr6; 394 *(__sum16 *)ptr = 0;
822 struct neighbour *neigh; 395 *(__sum16 *)ptr = csum_fold(skb_checksum(skb, 0,
823 bool do_tx_error_icmp; 396 skb->len, 0));
824 int addr_type;
825
826 neigh = dst_neigh_lookup(skb_dst(skb), &ipv6_hdr(skb)->daddr);
827 if (neigh == NULL)
828 goto tx_error;
829
830 addr6 = (const struct in6_addr *)&neigh->primary_key;
831 addr_type = ipv6_addr_type(addr6);
832
833 if (addr_type == IPV6_ADDR_ANY) {
834 addr6 = &ipv6_hdr(skb)->daddr;
835 addr_type = ipv6_addr_type(addr6);
836 }
837
838 if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
839 do_tx_error_icmp = true;
840 else {
841 do_tx_error_icmp = false;
842 dst = addr6->s6_addr32[3];
843 }
844 neigh_release(neigh);
845 if (do_tx_error_icmp)
846 goto tx_error_icmp;
847 } 397 }
848#endif
849 else
850 goto tx_error;
851 } 398 }
852 399
853 ttl = tiph->ttl; 400 return skb;
854 tos = tiph->tos; 401}
855 if (tos & 0x1) {
856 tos &= ~0x1;
857 if (skb->protocol == htons(ETH_P_IP))
858 tos = old_iph->tos;
859 else if (skb->protocol == htons(ETH_P_IPV6))
860 tos = ipv6_get_dsfield((const struct ipv6hdr *)old_iph);
861 }
862 402
863 rt = ip_route_output_gre(dev_net(dev), &fl4, dst, tiph->saddr, 403static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
864 tunnel->parms.o_key, RT_TOS(tos), 404 const struct iphdr *tnl_params,
865 tunnel->parms.link); 405 __be16 proto)
866 if (IS_ERR(rt)) { 406{
867 dev->stats.tx_carrier_errors++; 407 struct ip_tunnel *tunnel = netdev_priv(dev);
868 goto tx_error; 408 struct tnl_ptk_info tpi;
869 }
870 tdev = rt->dst.dev;
871 409
872 if (tdev == dev) { 410 if (likely(!skb->encapsulation)) {
873 ip_rt_put(rt); 411 skb_reset_inner_headers(skb);
874 dev->stats.collisions++; 412 skb->encapsulation = 1;
875 goto tx_error;
876 } 413 }
877 414
878 df = tiph->frag_off; 415 tpi.flags = tunnel->parms.o_flags;
879 if (df) 416 tpi.proto = proto;
880 mtu = dst_mtu(&rt->dst) - dev->hard_header_len - tunnel->hlen; 417 tpi.key = tunnel->parms.o_key;
881 else 418 if (tunnel->parms.o_flags & TUNNEL_SEQ)
882 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; 419 tunnel->o_seqno++;
883 420 tpi.seq = htonl(tunnel->o_seqno);
884 if (skb_dst(skb))
885 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
886
887 if (skb->protocol == htons(ETH_P_IP)) {
888 df |= (old_iph->frag_off&htons(IP_DF));
889 421
890 if (!skb_is_gso(skb) && 422 /* Push GRE header. */
891 (old_iph->frag_off&htons(IP_DF)) && 423 skb = gre_build_header(skb, &tpi, tunnel->hlen);
892 mtu < ntohs(old_iph->tot_len)) { 424 if (unlikely(!skb)) {
893 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); 425 dev->stats.tx_dropped++;
894 ip_rt_put(rt); 426 return;
895 goto tx_error;
896 }
897 } 427 }
898#if IS_ENABLED(CONFIG_IPV6)
899 else if (skb->protocol == htons(ETH_P_IPV6)) {
900 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
901
902 if (rt6 && mtu < dst_mtu(skb_dst(skb)) && mtu >= IPV6_MIN_MTU) {
903 if ((tunnel->parms.iph.daddr &&
904 !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
905 rt6->rt6i_dst.plen == 128) {
906 rt6->rt6i_flags |= RTF_MODIFIED;
907 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
908 }
909 }
910 428
911 if (!skb_is_gso(skb) && 429 ip_tunnel_xmit(skb, dev, tnl_params);
912 mtu >= IPV6_MIN_MTU && 430}
913 mtu < skb->len - tunnel->hlen + gre_hlen) {
914 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
915 ip_rt_put(rt);
916 goto tx_error;
917 }
918 }
919#endif
920 431
921 if (tunnel->err_count > 0) { 432static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
922 if (time_before(jiffies, 433 struct net_device *dev)
923 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) { 434{
924 tunnel->err_count--; 435 struct ip_tunnel *tunnel = netdev_priv(dev);
436 const struct iphdr *tnl_params;
925 437
926 dst_link_failure(skb); 438 skb = handle_offloads(tunnel, skb);
927 } else 439 if (IS_ERR(skb))
928 tunnel->err_count = 0; 440 goto out;
929 }
930 441
931 max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->dst.header_len; 442 if (dev->header_ops) {
932 443 /* Need space for new headers */
933 if (skb_headroom(skb) < max_headroom || skb_shared(skb)|| 444 if (skb_cow_head(skb, dev->needed_headroom -
934 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { 445 (tunnel->hlen + sizeof(struct iphdr))));
935 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); 446 goto free_skb;
936 if (max_headroom > dev->needed_headroom)
937 dev->needed_headroom = max_headroom;
938 if (!new_skb) {
939 ip_rt_put(rt);
940 dev->stats.tx_dropped++;
941 dev_kfree_skb(skb);
942 return NETDEV_TX_OK;
943 }
944 if (skb->sk)
945 skb_set_owner_w(new_skb, skb->sk);
946 dev_kfree_skb(skb);
947 skb = new_skb;
948 old_iph = ip_hdr(skb);
949 /* Warning : tiph value might point to freed memory */
950 }
951 447
952 skb_push(skb, gre_hlen); 448 tnl_params = (const struct iphdr *)skb->data;
953 skb_reset_network_header(skb);
954 skb_set_transport_header(skb, sizeof(*iph));
955 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
956 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
957 IPSKB_REROUTED);
958 skb_dst_drop(skb);
959 skb_dst_set(skb, &rt->dst);
960
961 /*
962 * Push down and install the IPIP header.
963 */
964 449
965 iph = ip_hdr(skb); 450 /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
966 iph->version = 4; 451 * to gre header.
967 iph->ihl = sizeof(struct iphdr) >> 2; 452 */
968 iph->frag_off = df; 453 skb_pull(skb, tunnel->hlen + sizeof(struct iphdr));
969 iph->protocol = IPPROTO_GRE; 454 } else {
970 iph->tos = ipgre_ecn_encapsulate(tos, old_iph, skb); 455 if (skb_cow_head(skb, dev->needed_headroom))
971 iph->daddr = fl4.daddr; 456 goto free_skb;
972 iph->saddr = fl4.saddr;
973 iph->ttl = ttl;
974
975 tunnel_ip_select_ident(skb, old_iph, &rt->dst);
976
977 if (ttl == 0) {
978 if (skb->protocol == htons(ETH_P_IP))
979 iph->ttl = old_iph->ttl;
980#if IS_ENABLED(CONFIG_IPV6)
981 else if (skb->protocol == htons(ETH_P_IPV6))
982 iph->ttl = ((const struct ipv6hdr *)old_iph)->hop_limit;
983#endif
984 else
985 iph->ttl = ip4_dst_hoplimit(&rt->dst);
986 }
987
988 ((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags;
989 ((__be16 *)(iph + 1))[1] = (dev->type == ARPHRD_ETHER) ?
990 htons(ETH_P_TEB) : skb->protocol;
991
992 if (tunnel->parms.o_flags&(GRE_KEY|GRE_CSUM|GRE_SEQ)) {
993 __be32 *ptr = (__be32 *)(((u8 *)iph) + tunnel->hlen - 4);
994 457
995 if (tunnel->parms.o_flags&GRE_SEQ) { 458 tnl_params = &tunnel->parms.iph;
996 ++tunnel->o_seqno;
997 *ptr = htonl(tunnel->o_seqno);
998 ptr--;
999 }
1000 if (tunnel->parms.o_flags&GRE_KEY) {
1001 *ptr = tunnel->parms.o_key;
1002 ptr--;
1003 }
1004 /* Skip GRE checksum if skb is getting offloaded. */
1005 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_GRE) &&
1006 (tunnel->parms.o_flags&GRE_CSUM)) {
1007 int offset = skb_transport_offset(skb);
1008
1009 if (skb_has_shared_frag(skb)) {
1010 err = __skb_linearize(skb);
1011 if (err)
1012 goto tx_error;
1013 }
1014
1015 *ptr = 0;
1016 *(__sum16 *)ptr = csum_fold(skb_checksum(skb, offset,
1017 skb->len - offset,
1018 0));
1019 }
1020 } 459 }
1021 460
1022 nf_reset(skb); 461 __gre_xmit(skb, dev, tnl_params, skb->protocol);
1023 462
1024 pkt_len = skb->len - skb_transport_offset(skb);
1025 err = ip_local_out(skb);
1026 if (likely(net_xmit_eval(err) == 0)) {
1027 u64_stats_update_begin(&tstats->syncp);
1028 tstats->tx_bytes += pkt_len;
1029 tstats->tx_packets++;
1030 u64_stats_update_end(&tstats->syncp);
1031 } else {
1032 dev->stats.tx_errors++;
1033 dev->stats.tx_aborted_errors++;
1034 }
1035 return NETDEV_TX_OK; 463 return NETDEV_TX_OK;
1036 464
1037#if IS_ENABLED(CONFIG_IPV6) 465free_skb:
1038tx_error_icmp:
1039 dst_link_failure(skb);
1040#endif
1041tx_error:
1042 dev->stats.tx_errors++;
1043 dev_kfree_skb(skb); 466 dev_kfree_skb(skb);
467out:
468 dev->stats.tx_dropped++;
1044 return NETDEV_TX_OK; 469 return NETDEV_TX_OK;
1045} 470}
1046 471
1047static int ipgre_tunnel_bind_dev(struct net_device *dev) 472static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
473 struct net_device *dev)
1048{ 474{
1049 struct net_device *tdev = NULL; 475 struct ip_tunnel *tunnel = netdev_priv(dev);
1050 struct ip_tunnel *tunnel;
1051 const struct iphdr *iph;
1052 int hlen = LL_MAX_HEADER;
1053 int mtu = ETH_DATA_LEN;
1054 int addend = sizeof(struct iphdr) + 4;
1055
1056 tunnel = netdev_priv(dev);
1057 iph = &tunnel->parms.iph;
1058
1059 /* Guess output device to choose reasonable mtu and needed_headroom */
1060
1061 if (iph->daddr) {
1062 struct flowi4 fl4;
1063 struct rtable *rt;
1064
1065 rt = ip_route_output_gre(dev_net(dev), &fl4,
1066 iph->daddr, iph->saddr,
1067 tunnel->parms.o_key,
1068 RT_TOS(iph->tos),
1069 tunnel->parms.link);
1070 if (!IS_ERR(rt)) {
1071 tdev = rt->dst.dev;
1072 ip_rt_put(rt);
1073 }
1074
1075 if (dev->type != ARPHRD_ETHER)
1076 dev->flags |= IFF_POINTOPOINT;
1077 }
1078 476
1079 if (!tdev && tunnel->parms.link) 477 skb = handle_offloads(tunnel, skb);
1080 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link); 478 if (IS_ERR(skb))
479 goto out;
1081 480
1082 if (tdev) { 481 if (skb_cow_head(skb, dev->needed_headroom))
1083 hlen = tdev->hard_header_len + tdev->needed_headroom; 482 goto free_skb;
1084 mtu = tdev->mtu;
1085 }
1086 dev->iflink = tunnel->parms.link;
1087
1088 /* Precalculate GRE options length */
1089 if (tunnel->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
1090 if (tunnel->parms.o_flags&GRE_CSUM)
1091 addend += 4;
1092 if (tunnel->parms.o_flags&GRE_KEY)
1093 addend += 4;
1094 if (tunnel->parms.o_flags&GRE_SEQ)
1095 addend += 4;
1096 }
1097 dev->needed_headroom = addend + hlen;
1098 mtu -= dev->hard_header_len + addend;
1099 483
1100 if (mtu < 68) 484 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
1101 mtu = 68;
1102 485
1103 tunnel->hlen = addend; 486 return NETDEV_TX_OK;
1104 /* TCP offload with GRE SEQ is not supported. */
1105 if (!(tunnel->parms.o_flags & GRE_SEQ)) {
1106 dev->features |= NETIF_F_GSO_SOFTWARE;
1107 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1108 }
1109 487
1110 return mtu; 488free_skb:
489 dev_kfree_skb(skb);
490out:
491 dev->stats.tx_dropped++;
492 return NETDEV_TX_OK;
1111} 493}
1112 494
1113static int 495static int ipgre_tunnel_ioctl(struct net_device *dev,
1114ipgre_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) 496 struct ifreq *ifr, int cmd)
1115{ 497{
1116 int err = 0; 498 int err = 0;
1117 struct ip_tunnel_parm p; 499 struct ip_tunnel_parm p;
1118 struct ip_tunnel *t;
1119 struct net *net = dev_net(dev);
1120 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1121
1122 switch (cmd) {
1123 case SIOCGETTUNNEL:
1124 t = NULL;
1125 if (dev == ign->fb_tunnel_dev) {
1126 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1127 err = -EFAULT;
1128 break;
1129 }
1130 t = ipgre_tunnel_locate(net, &p, 0);
1131 }
1132 if (t == NULL)
1133 t = netdev_priv(dev);
1134 memcpy(&p, &t->parms, sizeof(p));
1135 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1136 err = -EFAULT;
1137 break;
1138
1139 case SIOCADDTUNNEL:
1140 case SIOCCHGTUNNEL:
1141 err = -EPERM;
1142 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1143 goto done;
1144
1145 err = -EFAULT;
1146 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1147 goto done;
1148
1149 err = -EINVAL;
1150 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
1151 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
1152 ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
1153 goto done;
1154 if (p.iph.ttl)
1155 p.iph.frag_off |= htons(IP_DF);
1156
1157 if (!(p.i_flags&GRE_KEY))
1158 p.i_key = 0;
1159 if (!(p.o_flags&GRE_KEY))
1160 p.o_key = 0;
1161
1162 t = ipgre_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
1163
1164 if (dev != ign->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
1165 if (t != NULL) {
1166 if (t->dev != dev) {
1167 err = -EEXIST;
1168 break;
1169 }
1170 } else {
1171 unsigned int nflags = 0;
1172
1173 t = netdev_priv(dev);
1174
1175 if (ipv4_is_multicast(p.iph.daddr))
1176 nflags = IFF_BROADCAST;
1177 else if (p.iph.daddr)
1178 nflags = IFF_POINTOPOINT;
1179
1180 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
1181 err = -EINVAL;
1182 break;
1183 }
1184 ipgre_tunnel_unlink(ign, t);
1185 synchronize_net();
1186 t->parms.iph.saddr = p.iph.saddr;
1187 t->parms.iph.daddr = p.iph.daddr;
1188 t->parms.i_key = p.i_key;
1189 t->parms.o_key = p.o_key;
1190 memcpy(dev->dev_addr, &p.iph.saddr, 4);
1191 memcpy(dev->broadcast, &p.iph.daddr, 4);
1192 ipgre_tunnel_link(ign, t);
1193 netdev_state_change(dev);
1194 }
1195 }
1196
1197 if (t) {
1198 err = 0;
1199 if (cmd == SIOCCHGTUNNEL) {
1200 t->parms.iph.ttl = p.iph.ttl;
1201 t->parms.iph.tos = p.iph.tos;
1202 t->parms.iph.frag_off = p.iph.frag_off;
1203 if (t->parms.link != p.link) {
1204 t->parms.link = p.link;
1205 dev->mtu = ipgre_tunnel_bind_dev(dev);
1206 netdev_state_change(dev);
1207 }
1208 }
1209 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
1210 err = -EFAULT;
1211 } else
1212 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
1213 break;
1214
1215 case SIOCDELTUNNEL:
1216 err = -EPERM;
1217 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1218 goto done;
1219
1220 if (dev == ign->fb_tunnel_dev) {
1221 err = -EFAULT;
1222 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1223 goto done;
1224 err = -ENOENT;
1225 if ((t = ipgre_tunnel_locate(net, &p, 0)) == NULL)
1226 goto done;
1227 err = -EPERM;
1228 if (t == netdev_priv(ign->fb_tunnel_dev))
1229 goto done;
1230 dev = t->dev;
1231 }
1232 unregister_netdevice(dev);
1233 err = 0;
1234 break;
1235 500
1236 default: 501 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1237 err = -EINVAL; 502 return -EFAULT;
503 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
504 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
505 ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))) {
506 return -EINVAL;
1238 } 507 }
508 p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
509 p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
1239 510
1240done: 511 err = ip_tunnel_ioctl(dev, &p, cmd);
1241 return err; 512 if (err)
1242} 513 return err;
1243 514
1244static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu) 515 p.i_flags = tnl_flags_to_gre_flags(p.i_flags);
1245{ 516 p.o_flags = tnl_flags_to_gre_flags(p.o_flags);
1246 struct ip_tunnel *tunnel = netdev_priv(dev); 517
1247 if (new_mtu < 68 || 518 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1248 new_mtu > 0xFFF8 - dev->hard_header_len - tunnel->hlen) 519 return -EFAULT;
1249 return -EINVAL;
1250 dev->mtu = new_mtu;
1251 return 0; 520 return 0;
1252} 521}
1253 522
@@ -1277,25 +546,23 @@ static int ipgre_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1277 ... 546 ...
1278 ftp fec0:6666:6666::193.233.7.65 547 ftp fec0:6666:6666::193.233.7.65
1279 ... 548 ...
1280
1281 */ 549 */
1282
1283static int ipgre_header(struct sk_buff *skb, struct net_device *dev, 550static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1284 unsigned short type, 551 unsigned short type,
1285 const void *daddr, const void *saddr, unsigned int len) 552 const void *daddr, const void *saddr, unsigned int len)
1286{ 553{
1287 struct ip_tunnel *t = netdev_priv(dev); 554 struct ip_tunnel *t = netdev_priv(dev);
1288 struct iphdr *iph = (struct iphdr *)skb_push(skb, t->hlen); 555 struct iphdr *iph;
1289 __be16 *p = (__be16 *)(iph+1); 556 struct gre_base_hdr *greh;
1290 557
1291 memcpy(iph, &t->parms.iph, sizeof(struct iphdr)); 558 iph = (struct iphdr *)skb_push(skb, t->hlen + sizeof(*iph));
1292 p[0] = t->parms.o_flags; 559 greh = (struct gre_base_hdr *)(iph+1);
1293 p[1] = htons(type); 560 greh->flags = tnl_flags_to_gre_flags(t->parms.o_flags);
561 greh->protocol = htons(type);
1294 562
1295 /* 563 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
1296 * Set the source hardware address.
1297 */
1298 564
565 /* Set the source hardware address. */
1299 if (saddr) 566 if (saddr)
1300 memcpy(&iph->saddr, saddr, 4); 567 memcpy(&iph->saddr, saddr, 4);
1301 if (daddr) 568 if (daddr)
@@ -1303,7 +570,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
1303 if (iph->daddr) 570 if (iph->daddr)
1304 return t->hlen; 571 return t->hlen;
1305 572
1306 return -t->hlen; 573 return -(t->hlen + sizeof(*iph));
1307} 574}
1308 575
1309static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr) 576static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
@@ -1357,31 +624,21 @@ static int ipgre_close(struct net_device *dev)
1357 } 624 }
1358 return 0; 625 return 0;
1359} 626}
1360
1361#endif 627#endif
1362 628
1363static const struct net_device_ops ipgre_netdev_ops = { 629static const struct net_device_ops ipgre_netdev_ops = {
1364 .ndo_init = ipgre_tunnel_init, 630 .ndo_init = ipgre_tunnel_init,
1365 .ndo_uninit = ipgre_tunnel_uninit, 631 .ndo_uninit = ip_tunnel_uninit,
1366#ifdef CONFIG_NET_IPGRE_BROADCAST 632#ifdef CONFIG_NET_IPGRE_BROADCAST
1367 .ndo_open = ipgre_open, 633 .ndo_open = ipgre_open,
1368 .ndo_stop = ipgre_close, 634 .ndo_stop = ipgre_close,
1369#endif 635#endif
1370 .ndo_start_xmit = ipgre_tunnel_xmit, 636 .ndo_start_xmit = ipgre_xmit,
1371 .ndo_do_ioctl = ipgre_tunnel_ioctl, 637 .ndo_do_ioctl = ipgre_tunnel_ioctl,
1372 .ndo_change_mtu = ipgre_tunnel_change_mtu, 638 .ndo_change_mtu = ip_tunnel_change_mtu,
1373 .ndo_get_stats64 = ipgre_get_stats64, 639 .ndo_get_stats64 = ip_tunnel_get_stats64,
1374}; 640};
1375 641
1376static void ipgre_dev_free(struct net_device *dev)
1377{
1378 struct ip_tunnel *tunnel = netdev_priv(dev);
1379
1380 gro_cells_destroy(&tunnel->gro_cells);
1381 free_percpu(dev->tstats);
1382 free_netdev(dev);
1383}
1384
1385#define GRE_FEATURES (NETIF_F_SG | \ 642#define GRE_FEATURES (NETIF_F_SG | \
1386 NETIF_F_FRAGLIST | \ 643 NETIF_F_FRAGLIST | \
1387 NETIF_F_HIGHDMA | \ 644 NETIF_F_HIGHDMA | \
@@ -1390,35 +647,48 @@ static void ipgre_dev_free(struct net_device *dev)
1390static void ipgre_tunnel_setup(struct net_device *dev) 647static void ipgre_tunnel_setup(struct net_device *dev)
1391{ 648{
1392 dev->netdev_ops = &ipgre_netdev_ops; 649 dev->netdev_ops = &ipgre_netdev_ops;
1393 dev->destructor = ipgre_dev_free; 650 ip_tunnel_setup(dev, ipgre_net_id);
651}
1394 652
1395 dev->type = ARPHRD_IPGRE; 653static void __gre_tunnel_init(struct net_device *dev)
1396 dev->needed_headroom = LL_MAX_HEADER + sizeof(struct iphdr) + 4; 654{
655 struct ip_tunnel *tunnel;
656
657 tunnel = netdev_priv(dev);
658 tunnel->hlen = ip_gre_calc_hlen(tunnel->parms.o_flags);
659 tunnel->parms.iph.protocol = IPPROTO_GRE;
660
661 dev->needed_headroom = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
1397 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 4; 662 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 4;
1398 dev->flags = IFF_NOARP;
1399 dev->iflink = 0;
1400 dev->addr_len = 4;
1401 dev->features |= NETIF_F_NETNS_LOCAL;
1402 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1403 663
1404 dev->features |= GRE_FEATURES; 664 dev->features |= NETIF_F_NETNS_LOCAL | GRE_FEATURES;
1405 dev->hw_features |= GRE_FEATURES; 665 dev->hw_features |= GRE_FEATURES;
666
667 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
668 /* TCP offload with GRE SEQ is not supported. */
669 dev->features |= NETIF_F_GSO_SOFTWARE;
670 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
671 /* Can use a lockless transmit, unless we generate
672 * output sequences
673 */
674 dev->features |= NETIF_F_LLTX;
675 }
1406} 676}
1407 677
1408static int ipgre_tunnel_init(struct net_device *dev) 678static int ipgre_tunnel_init(struct net_device *dev)
1409{ 679{
1410 struct ip_tunnel *tunnel; 680 struct ip_tunnel *tunnel = netdev_priv(dev);
1411 struct iphdr *iph; 681 struct iphdr *iph = &tunnel->parms.iph;
1412 int err;
1413 682
1414 tunnel = netdev_priv(dev); 683 __gre_tunnel_init(dev);
1415 iph = &tunnel->parms.iph;
1416 684
1417 tunnel->dev = dev; 685 memcpy(dev->dev_addr, &iph->saddr, 4);
1418 strcpy(tunnel->parms.name, dev->name); 686 memcpy(dev->broadcast, &iph->daddr, 4);
1419 687
1420 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); 688 dev->type = ARPHRD_IPGRE;
1421 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); 689 dev->flags = IFF_NOARP;
690 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
691 dev->addr_len = 4;
1422 692
1423 if (iph->daddr) { 693 if (iph->daddr) {
1424#ifdef CONFIG_NET_IPGRE_BROADCAST 694#ifdef CONFIG_NET_IPGRE_BROADCAST
@@ -1432,106 +702,30 @@ static int ipgre_tunnel_init(struct net_device *dev)
1432 } else 702 } else
1433 dev->header_ops = &ipgre_header_ops; 703 dev->header_ops = &ipgre_header_ops;
1434 704
1435 dev->tstats = alloc_percpu(struct pcpu_tstats); 705 return ip_tunnel_init(dev);
1436 if (!dev->tstats)
1437 return -ENOMEM;
1438
1439 err = gro_cells_init(&tunnel->gro_cells, dev);
1440 if (err) {
1441 free_percpu(dev->tstats);
1442 return err;
1443 }
1444
1445 return 0;
1446}
1447
1448static void ipgre_fb_tunnel_init(struct net_device *dev)
1449{
1450 struct ip_tunnel *tunnel = netdev_priv(dev);
1451 struct iphdr *iph = &tunnel->parms.iph;
1452
1453 tunnel->dev = dev;
1454 strcpy(tunnel->parms.name, dev->name);
1455
1456 iph->version = 4;
1457 iph->protocol = IPPROTO_GRE;
1458 iph->ihl = 5;
1459 tunnel->hlen = sizeof(struct iphdr) + 4;
1460
1461 dev_hold(dev);
1462} 706}
1463 707
1464
1465static const struct gre_protocol ipgre_protocol = { 708static const struct gre_protocol ipgre_protocol = {
1466 .handler = ipgre_rcv, 709 .handler = ipgre_rcv,
1467 .err_handler = ipgre_err, 710 .err_handler = ipgre_err,
1468}; 711};
1469 712
1470static void ipgre_destroy_tunnels(struct ipgre_net *ign, struct list_head *head)
1471{
1472 int prio;
1473
1474 for (prio = 0; prio < 4; prio++) {
1475 int h;
1476 for (h = 0; h < HASH_SIZE; h++) {
1477 struct ip_tunnel *t;
1478
1479 t = rtnl_dereference(ign->tunnels[prio][h]);
1480
1481 while (t != NULL) {
1482 unregister_netdevice_queue(t->dev, head);
1483 t = rtnl_dereference(t->next);
1484 }
1485 }
1486 }
1487}
1488
1489static int __net_init ipgre_init_net(struct net *net) 713static int __net_init ipgre_init_net(struct net *net)
1490{ 714{
1491 struct ipgre_net *ign = net_generic(net, ipgre_net_id); 715 return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
1492 int err;
1493
1494 ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "gre0",
1495 ipgre_tunnel_setup);
1496 if (!ign->fb_tunnel_dev) {
1497 err = -ENOMEM;
1498 goto err_alloc_dev;
1499 }
1500 dev_net_set(ign->fb_tunnel_dev, net);
1501
1502 ipgre_fb_tunnel_init(ign->fb_tunnel_dev);
1503 ign->fb_tunnel_dev->rtnl_link_ops = &ipgre_link_ops;
1504
1505 if ((err = register_netdev(ign->fb_tunnel_dev)))
1506 goto err_reg_dev;
1507
1508 rcu_assign_pointer(ign->tunnels_wc[0],
1509 netdev_priv(ign->fb_tunnel_dev));
1510 return 0;
1511
1512err_reg_dev:
1513 ipgre_dev_free(ign->fb_tunnel_dev);
1514err_alloc_dev:
1515 return err;
1516} 716}
1517 717
1518static void __net_exit ipgre_exit_net(struct net *net) 718static void __net_exit ipgre_exit_net(struct net *net)
1519{ 719{
1520 struct ipgre_net *ign; 720 struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id);
1521 LIST_HEAD(list); 721 ip_tunnel_delete_net(itn);
1522
1523 ign = net_generic(net, ipgre_net_id);
1524 rtnl_lock();
1525 ipgre_destroy_tunnels(ign, &list);
1526 unregister_netdevice_many(&list);
1527 rtnl_unlock();
1528} 722}
1529 723
1530static struct pernet_operations ipgre_net_ops = { 724static struct pernet_operations ipgre_net_ops = {
1531 .init = ipgre_init_net, 725 .init = ipgre_init_net,
1532 .exit = ipgre_exit_net, 726 .exit = ipgre_exit_net,
1533 .id = &ipgre_net_id, 727 .id = &ipgre_net_id,
1534 .size = sizeof(struct ipgre_net), 728 .size = sizeof(struct ip_tunnel_net),
1535}; 729};
1536 730
1537static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[]) 731static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -1576,8 +770,8 @@ out:
1576 return ipgre_tunnel_validate(tb, data); 770 return ipgre_tunnel_validate(tb, data);
1577} 771}
1578 772
1579static void ipgre_netlink_parms(struct nlattr *data[], 773static void ipgre_netlink_parms(struct nlattr *data[], struct nlattr *tb[],
1580 struct ip_tunnel_parm *parms) 774 struct ip_tunnel_parm *parms)
1581{ 775{
1582 memset(parms, 0, sizeof(*parms)); 776 memset(parms, 0, sizeof(*parms));
1583 777
@@ -1590,10 +784,10 @@ static void ipgre_netlink_parms(struct nlattr *data[],
1590 parms->link = nla_get_u32(data[IFLA_GRE_LINK]); 784 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1591 785
1592 if (data[IFLA_GRE_IFLAGS]) 786 if (data[IFLA_GRE_IFLAGS])
1593 parms->i_flags = nla_get_be16(data[IFLA_GRE_IFLAGS]); 787 parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
1594 788
1595 if (data[IFLA_GRE_OFLAGS]) 789 if (data[IFLA_GRE_OFLAGS])
1596 parms->o_flags = nla_get_be16(data[IFLA_GRE_OFLAGS]); 790 parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
1597 791
1598 if (data[IFLA_GRE_IKEY]) 792 if (data[IFLA_GRE_IKEY])
1599 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]); 793 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
@@ -1617,148 +811,46 @@ static void ipgre_netlink_parms(struct nlattr *data[],
1617 parms->iph.frag_off = htons(IP_DF); 811 parms->iph.frag_off = htons(IP_DF);
1618} 812}
1619 813
1620static int ipgre_tap_init(struct net_device *dev) 814static int gre_tap_init(struct net_device *dev)
1621{ 815{
1622 struct ip_tunnel *tunnel; 816 __gre_tunnel_init(dev);
1623
1624 tunnel = netdev_priv(dev);
1625
1626 tunnel->dev = dev;
1627 strcpy(tunnel->parms.name, dev->name);
1628 817
1629 ipgre_tunnel_bind_dev(dev); 818 return ip_tunnel_init(dev);
1630
1631 dev->tstats = alloc_percpu(struct pcpu_tstats);
1632 if (!dev->tstats)
1633 return -ENOMEM;
1634
1635 return 0;
1636} 819}
1637 820
1638static const struct net_device_ops ipgre_tap_netdev_ops = { 821static const struct net_device_ops gre_tap_netdev_ops = {
1639 .ndo_init = ipgre_tap_init, 822 .ndo_init = gre_tap_init,
1640 .ndo_uninit = ipgre_tunnel_uninit, 823 .ndo_uninit = ip_tunnel_uninit,
1641 .ndo_start_xmit = ipgre_tunnel_xmit, 824 .ndo_start_xmit = gre_tap_xmit,
1642 .ndo_set_mac_address = eth_mac_addr, 825 .ndo_set_mac_address = eth_mac_addr,
1643 .ndo_validate_addr = eth_validate_addr, 826 .ndo_validate_addr = eth_validate_addr,
1644 .ndo_change_mtu = ipgre_tunnel_change_mtu, 827 .ndo_change_mtu = ip_tunnel_change_mtu,
1645 .ndo_get_stats64 = ipgre_get_stats64, 828 .ndo_get_stats64 = ip_tunnel_get_stats64,
1646}; 829};
1647 830
1648static void ipgre_tap_setup(struct net_device *dev) 831static void ipgre_tap_setup(struct net_device *dev)
1649{ 832{
1650
1651 ether_setup(dev); 833 ether_setup(dev);
1652 834 dev->netdev_ops = &gre_tap_netdev_ops;
1653 dev->netdev_ops = &ipgre_tap_netdev_ops; 835 ip_tunnel_setup(dev, gre_tap_net_id);
1654 dev->destructor = ipgre_dev_free;
1655
1656 dev->iflink = 0;
1657 dev->features |= NETIF_F_NETNS_LOCAL;
1658
1659 dev->features |= GRE_FEATURES;
1660 dev->hw_features |= GRE_FEATURES;
1661} 836}
1662 837
1663static int ipgre_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], 838static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1664 struct nlattr *data[]) 839 struct nlattr *tb[], struct nlattr *data[])
1665{ 840{
1666 struct ip_tunnel *nt; 841 struct ip_tunnel_parm p;
1667 struct net *net = dev_net(dev);
1668 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1669 int mtu;
1670 int err;
1671
1672 nt = netdev_priv(dev);
1673 ipgre_netlink_parms(data, &nt->parms);
1674
1675 if (ipgre_tunnel_find(net, &nt->parms, dev->type))
1676 return -EEXIST;
1677
1678 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
1679 eth_hw_addr_random(dev);
1680
1681 mtu = ipgre_tunnel_bind_dev(dev);
1682 if (!tb[IFLA_MTU])
1683 dev->mtu = mtu;
1684
1685 /* Can use a lockless transmit, unless we generate output sequences */
1686 if (!(nt->parms.o_flags & GRE_SEQ))
1687 dev->features |= NETIF_F_LLTX;
1688
1689 err = register_netdevice(dev);
1690 if (err)
1691 goto out;
1692
1693 dev_hold(dev);
1694 ipgre_tunnel_link(ign, nt);
1695 842
1696out: 843 ipgre_netlink_parms(data, tb, &p);
1697 return err; 844 return ip_tunnel_newlink(dev, tb, &p);
1698} 845}
1699 846
1700static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[], 847static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1701 struct nlattr *data[]) 848 struct nlattr *data[])
1702{ 849{
1703 struct ip_tunnel *t, *nt;
1704 struct net *net = dev_net(dev);
1705 struct ipgre_net *ign = net_generic(net, ipgre_net_id);
1706 struct ip_tunnel_parm p; 850 struct ip_tunnel_parm p;
1707 int mtu;
1708
1709 if (dev == ign->fb_tunnel_dev)
1710 return -EINVAL;
1711
1712 nt = netdev_priv(dev);
1713 ipgre_netlink_parms(data, &p);
1714
1715 t = ipgre_tunnel_locate(net, &p, 0);
1716
1717 if (t) {
1718 if (t->dev != dev)
1719 return -EEXIST;
1720 } else {
1721 t = nt;
1722
1723 if (dev->type != ARPHRD_ETHER) {
1724 unsigned int nflags = 0;
1725
1726 if (ipv4_is_multicast(p.iph.daddr))
1727 nflags = IFF_BROADCAST;
1728 else if (p.iph.daddr)
1729 nflags = IFF_POINTOPOINT;
1730
1731 if ((dev->flags ^ nflags) &
1732 (IFF_POINTOPOINT | IFF_BROADCAST))
1733 return -EINVAL;
1734 }
1735 851
1736 ipgre_tunnel_unlink(ign, t); 852 ipgre_netlink_parms(data, tb, &p);
1737 t->parms.iph.saddr = p.iph.saddr; 853 return ip_tunnel_changelink(dev, tb, &p);
1738 t->parms.iph.daddr = p.iph.daddr;
1739 t->parms.i_key = p.i_key;
1740 if (dev->type != ARPHRD_ETHER) {
1741 memcpy(dev->dev_addr, &p.iph.saddr, 4);
1742 memcpy(dev->broadcast, &p.iph.daddr, 4);
1743 }
1744 ipgre_tunnel_link(ign, t);
1745 netdev_state_change(dev);
1746 }
1747
1748 t->parms.o_key = p.o_key;
1749 t->parms.iph.ttl = p.iph.ttl;
1750 t->parms.iph.tos = p.iph.tos;
1751 t->parms.iph.frag_off = p.iph.frag_off;
1752
1753 if (t->parms.link != p.link) {
1754 t->parms.link = p.link;
1755 mtu = ipgre_tunnel_bind_dev(dev);
1756 if (!tb[IFLA_MTU])
1757 dev->mtu = mtu;
1758 netdev_state_change(dev);
1759 }
1760
1761 return 0;
1762} 854}
1763 855
1764static size_t ipgre_get_size(const struct net_device *dev) 856static size_t ipgre_get_size(const struct net_device *dev)
@@ -1793,8 +885,8 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1793 struct ip_tunnel_parm *p = &t->parms; 885 struct ip_tunnel_parm *p = &t->parms;
1794 886
1795 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || 887 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1796 nla_put_be16(skb, IFLA_GRE_IFLAGS, p->i_flags) || 888 nla_put_be16(skb, IFLA_GRE_IFLAGS, tnl_flags_to_gre_flags(p->i_flags)) ||
1797 nla_put_be16(skb, IFLA_GRE_OFLAGS, p->o_flags) || 889 nla_put_be16(skb, IFLA_GRE_OFLAGS, tnl_flags_to_gre_flags(p->o_flags)) ||
1798 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || 890 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1799 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || 891 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1800 nla_put_be32(skb, IFLA_GRE_LOCAL, p->iph.saddr) || 892 nla_put_be32(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
@@ -1832,6 +924,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1832 .validate = ipgre_tunnel_validate, 924 .validate = ipgre_tunnel_validate,
1833 .newlink = ipgre_newlink, 925 .newlink = ipgre_newlink,
1834 .changelink = ipgre_changelink, 926 .changelink = ipgre_changelink,
927 .dellink = ip_tunnel_dellink,
1835 .get_size = ipgre_get_size, 928 .get_size = ipgre_get_size,
1836 .fill_info = ipgre_fill_info, 929 .fill_info = ipgre_fill_info,
1837}; 930};
@@ -1845,13 +938,28 @@ static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1845 .validate = ipgre_tap_validate, 938 .validate = ipgre_tap_validate,
1846 .newlink = ipgre_newlink, 939 .newlink = ipgre_newlink,
1847 .changelink = ipgre_changelink, 940 .changelink = ipgre_changelink,
941 .dellink = ip_tunnel_dellink,
1848 .get_size = ipgre_get_size, 942 .get_size = ipgre_get_size,
1849 .fill_info = ipgre_fill_info, 943 .fill_info = ipgre_fill_info,
1850}; 944};
1851 945
1852/* 946static int __net_init ipgre_tap_init_net(struct net *net)
1853 * And now the modules code and kernel interface. 947{
1854 */ 948 return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, NULL);
949}
950
951static void __net_exit ipgre_tap_exit_net(struct net *net)
952{
953 struct ip_tunnel_net *itn = net_generic(net, gre_tap_net_id);
954 ip_tunnel_delete_net(itn);
955}
956
957static struct pernet_operations ipgre_tap_net_ops = {
958 .init = ipgre_tap_init_net,
959 .exit = ipgre_tap_exit_net,
960 .id = &gre_tap_net_id,
961 .size = sizeof(struct ip_tunnel_net),
962};
1855 963
1856static int __init ipgre_init(void) 964static int __init ipgre_init(void)
1857{ 965{
@@ -1863,6 +971,10 @@ static int __init ipgre_init(void)
1863 if (err < 0) 971 if (err < 0)
1864 return err; 972 return err;
1865 973
974 err = register_pernet_device(&ipgre_tap_net_ops);
975 if (err < 0)
976 goto pnet_tap_faied;
977
1866 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO); 978 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1867 if (err < 0) { 979 if (err < 0) {
1868 pr_info("%s: can't add protocol\n", __func__); 980 pr_info("%s: can't add protocol\n", __func__);
@@ -1877,16 +989,17 @@ static int __init ipgre_init(void)
1877 if (err < 0) 989 if (err < 0)
1878 goto tap_ops_failed; 990 goto tap_ops_failed;
1879 991
1880out: 992 return 0;
1881 return err;
1882 993
1883tap_ops_failed: 994tap_ops_failed:
1884 rtnl_link_unregister(&ipgre_link_ops); 995 rtnl_link_unregister(&ipgre_link_ops);
1885rtnl_link_failed: 996rtnl_link_failed:
1886 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO); 997 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1887add_proto_failed: 998add_proto_failed:
999 unregister_pernet_device(&ipgre_tap_net_ops);
1000pnet_tap_faied:
1888 unregister_pernet_device(&ipgre_net_ops); 1001 unregister_pernet_device(&ipgre_net_ops);
1889 goto out; 1002 return err;
1890} 1003}
1891 1004
1892static void __exit ipgre_fini(void) 1005static void __exit ipgre_fini(void)
@@ -1895,6 +1008,7 @@ static void __exit ipgre_fini(void)
1895 rtnl_link_unregister(&ipgre_link_ops); 1008 rtnl_link_unregister(&ipgre_link_ops);
1896 if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0) 1009 if (gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO) < 0)
1897 pr_info("%s: can't remove protocol\n", __func__); 1010 pr_info("%s: can't remove protocol\n", __func__);
1011 unregister_pernet_device(&ipgre_tap_net_ops);
1898 unregister_pernet_device(&ipgre_net_ops); 1012 unregister_pernet_device(&ipgre_net_ops);
1899} 1013}
1900 1014
@@ -1904,3 +1018,4 @@ MODULE_LICENSE("GPL");
1904MODULE_ALIAS_RTNL_LINK("gre"); 1018MODULE_ALIAS_RTNL_LINK("gre");
1905MODULE_ALIAS_RTNL_LINK("gretap"); 1019MODULE_ALIAS_RTNL_LINK("gretap");
1906MODULE_ALIAS_NETDEV("gre0"); 1020MODULE_ALIAS_NETDEV("gre0");
1021MODULE_ALIAS_NETDEV("gretap0");
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 5e12dca7b3dd..147abf5275aa 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -430,8 +430,7 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
430 to->tc_index = from->tc_index; 430 to->tc_index = from->tc_index;
431#endif 431#endif
432 nf_copy(to, from); 432 nf_copy(to, from);
433#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 433#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
434 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
435 to->nf_trace = from->nf_trace; 434 to->nf_trace = from->nf_trace;
436#endif 435#endif
437#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE) 436#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
new file mode 100644
index 000000000000..e4147ec1665a
--- /dev/null
+++ b/net/ipv4/ip_tunnel.c
@@ -0,0 +1,1035 @@
1/*
2 * Copyright (c) 2013 Nicira, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16 * 02110-1301, USA
17 */
18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21#include <linux/capability.h>
22#include <linux/module.h>
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/slab.h>
26#include <linux/uaccess.h>
27#include <linux/skbuff.h>
28#include <linux/netdevice.h>
29#include <linux/in.h>
30#include <linux/tcp.h>
31#include <linux/udp.h>
32#include <linux/if_arp.h>
33#include <linux/mroute.h>
34#include <linux/init.h>
35#include <linux/in6.h>
36#include <linux/inetdevice.h>
37#include <linux/igmp.h>
38#include <linux/netfilter_ipv4.h>
39#include <linux/etherdevice.h>
40#include <linux/if_ether.h>
41#include <linux/if_vlan.h>
42#include <linux/rculist.h>
43
44#include <net/sock.h>
45#include <net/ip.h>
46#include <net/icmp.h>
47#include <net/protocol.h>
48#include <net/ip_tunnels.h>
49#include <net/arp.h>
50#include <net/checksum.h>
51#include <net/dsfield.h>
52#include <net/inet_ecn.h>
53#include <net/xfrm.h>
54#include <net/net_namespace.h>
55#include <net/netns/generic.h>
56#include <net/rtnetlink.h>
57
58#if IS_ENABLED(CONFIG_IPV6)
59#include <net/ipv6.h>
60#include <net/ip6_fib.h>
61#include <net/ip6_route.h>
62#endif
63
64static unsigned int ip_tunnel_hash(struct ip_tunnel_net *itn,
65 __be32 key, __be32 remote)
66{
67 return hash_32((__force u32)key ^ (__force u32)remote,
68 IP_TNL_HASH_BITS);
69}
70
71/* Often modified stats are per cpu, other are shared (netdev->stats) */
72struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
73 struct rtnl_link_stats64 *tot)
74{
75 int i;
76
77 for_each_possible_cpu(i) {
78 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
79 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
80 unsigned int start;
81
82 do {
83 start = u64_stats_fetch_begin_bh(&tstats->syncp);
84 rx_packets = tstats->rx_packets;
85 tx_packets = tstats->tx_packets;
86 rx_bytes = tstats->rx_bytes;
87 tx_bytes = tstats->tx_bytes;
88 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
89
90 tot->rx_packets += rx_packets;
91 tot->tx_packets += tx_packets;
92 tot->rx_bytes += rx_bytes;
93 tot->tx_bytes += tx_bytes;
94 }
95
96 tot->multicast = dev->stats.multicast;
97
98 tot->rx_crc_errors = dev->stats.rx_crc_errors;
99 tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
100 tot->rx_length_errors = dev->stats.rx_length_errors;
101 tot->rx_frame_errors = dev->stats.rx_frame_errors;
102 tot->rx_errors = dev->stats.rx_errors;
103
104 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
105 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
106 tot->tx_dropped = dev->stats.tx_dropped;
107 tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
108 tot->tx_errors = dev->stats.tx_errors;
109
110 tot->collisions = dev->stats.collisions;
111
112 return tot;
113}
114EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
115
116static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p,
117 __be16 flags, __be32 key)
118{
119 if (p->i_flags & TUNNEL_KEY) {
120 if (flags & TUNNEL_KEY)
121 return key == p->i_key;
122 else
123 /* key expected, none present */
124 return false;
125 } else
126 return !(flags & TUNNEL_KEY);
127}
128
129/* Fallback tunnel: no source, no destination, no key, no options
130
131 Tunnel hash table:
132 We require exact key match i.e. if a key is present in packet
133 it will match only tunnel with the same key; if it is not present,
134 it will match only keyless tunnel.
135
136 All keysless packets, if not matched configured keyless tunnels
137 will match fallback tunnel.
138 Given src, dst and key, find appropriate for input tunnel.
139*/
140struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
141 int link, __be16 flags,
142 __be32 remote, __be32 local,
143 __be32 key)
144{
145 unsigned int hash;
146 struct ip_tunnel *t, *cand = NULL;
147 struct hlist_head *head;
148
149 hash = ip_tunnel_hash(itn, key, remote);
150 head = &itn->tunnels[hash];
151
152 hlist_for_each_entry_rcu(t, head, hash_node) {
153 if (local != t->parms.iph.saddr ||
154 remote != t->parms.iph.daddr ||
155 !(t->dev->flags & IFF_UP))
156 continue;
157
158 if (!ip_tunnel_key_match(&t->parms, flags, key))
159 continue;
160
161 if (t->parms.link == link)
162 return t;
163 else
164 cand = t;
165 }
166
167 hlist_for_each_entry_rcu(t, head, hash_node) {
168 if (remote != t->parms.iph.daddr ||
169 !(t->dev->flags & IFF_UP))
170 continue;
171
172 if (!ip_tunnel_key_match(&t->parms, flags, key))
173 continue;
174
175 if (t->parms.link == link)
176 return t;
177 else if (!cand)
178 cand = t;
179 }
180
181 hash = ip_tunnel_hash(itn, key, 0);
182 head = &itn->tunnels[hash];
183
184 hlist_for_each_entry_rcu(t, head, hash_node) {
185 if ((local != t->parms.iph.saddr &&
186 (local != t->parms.iph.daddr ||
187 !ipv4_is_multicast(local))) ||
188 !(t->dev->flags & IFF_UP))
189 continue;
190
191 if (!ip_tunnel_key_match(&t->parms, flags, key))
192 continue;
193
194 if (t->parms.link == link)
195 return t;
196 else if (!cand)
197 cand = t;
198 }
199
200 if (flags & TUNNEL_NO_KEY)
201 goto skip_key_lookup;
202
203 hlist_for_each_entry_rcu(t, head, hash_node) {
204 if (t->parms.i_key != key ||
205 !(t->dev->flags & IFF_UP))
206 continue;
207
208 if (t->parms.link == link)
209 return t;
210 else if (!cand)
211 cand = t;
212 }
213
214skip_key_lookup:
215 if (cand)
216 return cand;
217
218 if (itn->fb_tunnel_dev && itn->fb_tunnel_dev->flags & IFF_UP)
219 return netdev_priv(itn->fb_tunnel_dev);
220
221
222 return NULL;
223}
224EXPORT_SYMBOL_GPL(ip_tunnel_lookup);
225
226static struct hlist_head *ip_bucket(struct ip_tunnel_net *itn,
227 struct ip_tunnel_parm *parms)
228{
229 unsigned int h;
230 __be32 remote;
231
232 if (parms->iph.daddr && !ipv4_is_multicast(parms->iph.daddr))
233 remote = parms->iph.daddr;
234 else
235 remote = 0;
236
237 h = ip_tunnel_hash(itn, parms->i_key, remote);
238 return &itn->tunnels[h];
239}
240
241static void ip_tunnel_add(struct ip_tunnel_net *itn, struct ip_tunnel *t)
242{
243 struct hlist_head *head = ip_bucket(itn, &t->parms);
244
245 hlist_add_head_rcu(&t->hash_node, head);
246}
247
248static void ip_tunnel_del(struct ip_tunnel *t)
249{
250 hlist_del_init_rcu(&t->hash_node);
251}
252
253static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
254 struct ip_tunnel_parm *parms,
255 int type)
256{
257 __be32 remote = parms->iph.daddr;
258 __be32 local = parms->iph.saddr;
259 __be32 key = parms->i_key;
260 int link = parms->link;
261 struct ip_tunnel *t = NULL;
262 struct hlist_head *head = ip_bucket(itn, parms);
263
264 hlist_for_each_entry_rcu(t, head, hash_node) {
265 if (local == t->parms.iph.saddr &&
266 remote == t->parms.iph.daddr &&
267 key == t->parms.i_key &&
268 link == t->parms.link &&
269 type == t->dev->type)
270 break;
271 }
272 return t;
273}
274
275static struct net_device *__ip_tunnel_create(struct net *net,
276 const struct rtnl_link_ops *ops,
277 struct ip_tunnel_parm *parms)
278{
279 int err;
280 struct ip_tunnel *tunnel;
281 struct net_device *dev;
282 char name[IFNAMSIZ];
283
284 if (parms->name[0])
285 strlcpy(name, parms->name, IFNAMSIZ);
286 else {
287 if (strlen(ops->kind) > (IFNAMSIZ - 3)) {
288 err = -E2BIG;
289 goto failed;
290 }
291 strlcpy(name, ops->kind, IFNAMSIZ);
292 strncat(name, "%d", 2);
293 }
294
295 ASSERT_RTNL();
296 dev = alloc_netdev(ops->priv_size, name, ops->setup);
297 if (!dev) {
298 err = -ENOMEM;
299 goto failed;
300 }
301 dev_net_set(dev, net);
302
303 dev->rtnl_link_ops = ops;
304
305 tunnel = netdev_priv(dev);
306 tunnel->parms = *parms;
307
308 err = register_netdevice(dev);
309 if (err)
310 goto failed_free;
311
312 return dev;
313
314failed_free:
315 free_netdev(dev);
316failed:
317 return ERR_PTR(err);
318}
319
320static inline struct rtable *ip_route_output_tunnel(struct net *net,
321 struct flowi4 *fl4,
322 int proto,
323 __be32 daddr, __be32 saddr,
324 __be32 key, __u8 tos, int oif)
325{
326 memset(fl4, 0, sizeof(*fl4));
327 fl4->flowi4_oif = oif;
328 fl4->daddr = daddr;
329 fl4->saddr = saddr;
330 fl4->flowi4_tos = tos;
331 fl4->flowi4_proto = proto;
332 fl4->fl4_gre_key = key;
333 return ip_route_output_key(net, fl4);
334}
335
336static int ip_tunnel_bind_dev(struct net_device *dev)
337{
338 struct net_device *tdev = NULL;
339 struct ip_tunnel *tunnel = netdev_priv(dev);
340 const struct iphdr *iph;
341 int hlen = LL_MAX_HEADER;
342 int mtu = ETH_DATA_LEN;
343 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
344
345 iph = &tunnel->parms.iph;
346
347 /* Guess output device to choose reasonable mtu and needed_headroom */
348 if (iph->daddr) {
349 struct flowi4 fl4;
350 struct rtable *rt;
351
352 rt = ip_route_output_tunnel(dev_net(dev), &fl4,
353 tunnel->parms.iph.protocol,
354 iph->daddr, iph->saddr,
355 tunnel->parms.o_key,
356 RT_TOS(iph->tos),
357 tunnel->parms.link);
358 if (!IS_ERR(rt)) {
359 tdev = rt->dst.dev;
360 ip_rt_put(rt);
361 }
362 if (dev->type != ARPHRD_ETHER)
363 dev->flags |= IFF_POINTOPOINT;
364 }
365
366 if (!tdev && tunnel->parms.link)
367 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
368
369 if (tdev) {
370 hlen = tdev->hard_header_len + tdev->needed_headroom;
371 mtu = tdev->mtu;
372 }
373 dev->iflink = tunnel->parms.link;
374
375 dev->needed_headroom = t_hlen + hlen;
376 mtu -= (dev->hard_header_len + t_hlen);
377
378 if (mtu < 68)
379 mtu = 68;
380
381 return mtu;
382}
383
384static struct ip_tunnel *ip_tunnel_create(struct net *net,
385 struct ip_tunnel_net *itn,
386 struct ip_tunnel_parm *parms)
387{
388 struct ip_tunnel *nt, *fbt;
389 struct net_device *dev;
390
391 BUG_ON(!itn->fb_tunnel_dev);
392 fbt = netdev_priv(itn->fb_tunnel_dev);
393 dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms);
394 if (IS_ERR(dev))
395 return NULL;
396
397 dev->mtu = ip_tunnel_bind_dev(dev);
398
399 nt = netdev_priv(dev);
400 ip_tunnel_add(itn, nt);
401 return nt;
402}
403
404int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
405 const struct tnl_ptk_info *tpi, bool log_ecn_error)
406{
407 struct pcpu_tstats *tstats;
408 const struct iphdr *iph = ip_hdr(skb);
409 int err;
410
411 secpath_reset(skb);
412
413 skb->protocol = tpi->proto;
414
415 skb->mac_header = skb->network_header;
416 __pskb_pull(skb, tunnel->hlen);
417 skb_postpull_rcsum(skb, skb_transport_header(skb), tunnel->hlen);
418#ifdef CONFIG_NET_IPGRE_BROADCAST
419 if (ipv4_is_multicast(iph->daddr)) {
420 /* Looped back packet, drop it! */
421 if (rt_is_output_route(skb_rtable(skb)))
422 goto drop;
423 tunnel->dev->stats.multicast++;
424 skb->pkt_type = PACKET_BROADCAST;
425 }
426#endif
427
428 if ((!(tpi->flags&TUNNEL_CSUM) && (tunnel->parms.i_flags&TUNNEL_CSUM)) ||
429 ((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) {
430 tunnel->dev->stats.rx_crc_errors++;
431 tunnel->dev->stats.rx_errors++;
432 goto drop;
433 }
434
435 if (tunnel->parms.i_flags&TUNNEL_SEQ) {
436 if (!(tpi->flags&TUNNEL_SEQ) ||
437 (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
438 tunnel->dev->stats.rx_fifo_errors++;
439 tunnel->dev->stats.rx_errors++;
440 goto drop;
441 }
442 tunnel->i_seqno = ntohl(tpi->seq) + 1;
443 }
444
445 /* Warning: All skb pointers will be invalidated! */
446 if (tunnel->dev->type == ARPHRD_ETHER) {
447 if (!pskb_may_pull(skb, ETH_HLEN)) {
448 tunnel->dev->stats.rx_length_errors++;
449 tunnel->dev->stats.rx_errors++;
450 goto drop;
451 }
452
453 iph = ip_hdr(skb);
454 skb->protocol = eth_type_trans(skb, tunnel->dev);
455 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
456 }
457
458 skb->pkt_type = PACKET_HOST;
459 __skb_tunnel_rx(skb, tunnel->dev);
460
461 skb_reset_network_header(skb);
462 err = IP_ECN_decapsulate(iph, skb);
463 if (unlikely(err)) {
464 if (log_ecn_error)
465 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
466 &iph->saddr, iph->tos);
467 if (err > 1) {
468 ++tunnel->dev->stats.rx_frame_errors;
469 ++tunnel->dev->stats.rx_errors;
470 goto drop;
471 }
472 }
473
474 tstats = this_cpu_ptr(tunnel->dev->tstats);
475 u64_stats_update_begin(&tstats->syncp);
476 tstats->rx_packets++;
477 tstats->rx_bytes += skb->len;
478 u64_stats_update_end(&tstats->syncp);
479
480 gro_cells_receive(&tunnel->gro_cells, skb);
481 return 0;
482
483drop:
484 kfree_skb(skb);
485 return 0;
486}
487EXPORT_SYMBOL_GPL(ip_tunnel_rcv);
488
489void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
490 const struct iphdr *tnl_params)
491{
492 struct ip_tunnel *tunnel = netdev_priv(dev);
493 const struct iphdr *inner_iph;
494 struct iphdr *iph;
495 struct flowi4 fl4;
496 u8 tos, ttl;
497 __be16 df;
498 struct rtable *rt; /* Route to the other host */
499 struct net_device *tdev; /* Device to other host */
500 unsigned int max_headroom; /* The extra header space needed */
501 __be32 dst;
502 int mtu;
503
504 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
505
506 dst = tnl_params->daddr;
507 if (dst == 0) {
508 /* NBMA tunnel */
509
510 if (skb_dst(skb) == NULL) {
511 dev->stats.tx_fifo_errors++;
512 goto tx_error;
513 }
514
515 if (skb->protocol == htons(ETH_P_IP)) {
516 rt = skb_rtable(skb);
517 dst = rt_nexthop(rt, inner_iph->daddr);
518 }
519#if IS_ENABLED(CONFIG_IPV6)
520 else if (skb->protocol == htons(ETH_P_IPV6)) {
521 const struct in6_addr *addr6;
522 struct neighbour *neigh;
523 bool do_tx_error_icmp;
524 int addr_type;
525
526 neigh = dst_neigh_lookup(skb_dst(skb),
527 &ipv6_hdr(skb)->daddr);
528 if (neigh == NULL)
529 goto tx_error;
530
531 addr6 = (const struct in6_addr *)&neigh->primary_key;
532 addr_type = ipv6_addr_type(addr6);
533
534 if (addr_type == IPV6_ADDR_ANY) {
535 addr6 = &ipv6_hdr(skb)->daddr;
536 addr_type = ipv6_addr_type(addr6);
537 }
538
539 if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
540 do_tx_error_icmp = true;
541 else {
542 do_tx_error_icmp = false;
543 dst = addr6->s6_addr32[3];
544 }
545 neigh_release(neigh);
546 if (do_tx_error_icmp)
547 goto tx_error_icmp;
548 }
549#endif
550 else
551 goto tx_error;
552 }
553
554 tos = tnl_params->tos;
555 if (tos & 0x1) {
556 tos &= ~0x1;
557 if (skb->protocol == htons(ETH_P_IP))
558 tos = inner_iph->tos;
559 else if (skb->protocol == htons(ETH_P_IPV6))
560 tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
561 }
562
563 rt = ip_route_output_tunnel(dev_net(dev), &fl4,
564 tunnel->parms.iph.protocol,
565 dst, tnl_params->saddr,
566 tunnel->parms.o_key,
567 RT_TOS(tos),
568 tunnel->parms.link);
569 if (IS_ERR(rt)) {
570 dev->stats.tx_carrier_errors++;
571 goto tx_error;
572 }
573 tdev = rt->dst.dev;
574
575 if (tdev == dev) {
576 ip_rt_put(rt);
577 dev->stats.collisions++;
578 goto tx_error;
579 }
580
581 df = tnl_params->frag_off;
582
583 if (df)
584 mtu = dst_mtu(&rt->dst) - dev->hard_header_len
585 - sizeof(struct iphdr);
586 else
587 mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
588
589 if (skb_dst(skb))
590 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
591
592 if (skb->protocol == htons(ETH_P_IP)) {
593 df |= (inner_iph->frag_off&htons(IP_DF));
594
595 if (!skb_is_gso(skb) &&
596 (inner_iph->frag_off&htons(IP_DF)) &&
597 mtu < ntohs(inner_iph->tot_len)) {
598 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
599 ip_rt_put(rt);
600 goto tx_error;
601 }
602 }
603#if IS_ENABLED(CONFIG_IPV6)
604 else if (skb->protocol == htons(ETH_P_IPV6)) {
605 struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb);
606
607 if (rt6 && mtu < dst_mtu(skb_dst(skb)) &&
608 mtu >= IPV6_MIN_MTU) {
609 if ((tunnel->parms.iph.daddr &&
610 !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
611 rt6->rt6i_dst.plen == 128) {
612 rt6->rt6i_flags |= RTF_MODIFIED;
613 dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
614 }
615 }
616
617 if (!skb_is_gso(skb) && mtu >= IPV6_MIN_MTU &&
618 mtu < skb->len) {
619 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
620 ip_rt_put(rt);
621 goto tx_error;
622 }
623 }
624#endif
625
626 if (tunnel->err_count > 0) {
627 if (time_before(jiffies,
628 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
629 tunnel->err_count--;
630
631 dst_link_failure(skb);
632 } else
633 tunnel->err_count = 0;
634 }
635
636 ttl = tnl_params->ttl;
637 if (ttl == 0) {
638 if (skb->protocol == htons(ETH_P_IP))
639 ttl = inner_iph->ttl;
640#if IS_ENABLED(CONFIG_IPV6)
641 else if (skb->protocol == htons(ETH_P_IPV6))
642 ttl = ((const struct ipv6hdr *)inner_iph)->hop_limit;
643#endif
644 else
645 ttl = ip4_dst_hoplimit(&rt->dst);
646 }
647
648 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr)
649 + rt->dst.header_len;
650 if (max_headroom > dev->needed_headroom) {
651 dev->needed_headroom = max_headroom;
652 if (skb_cow_head(skb, dev->needed_headroom)) {
653 dev->stats.tx_dropped++;
654 dev_kfree_skb(skb);
655 return;
656 }
657 }
658
659 skb_dst_drop(skb);
660 skb_dst_set(skb, &rt->dst);
661 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
662
663 /* Push down and install the IP header. */
664 skb_push(skb, sizeof(struct iphdr));
665 skb_reset_network_header(skb);
666
667 iph = ip_hdr(skb);
668 inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
669
670 iph->version = 4;
671 iph->ihl = sizeof(struct iphdr) >> 2;
672 iph->frag_off = df;
673 iph->protocol = tnl_params->protocol;
674 iph->tos = ip_tunnel_ecn_encap(tos, inner_iph, skb);
675 iph->daddr = fl4.daddr;
676 iph->saddr = fl4.saddr;
677 iph->ttl = ttl;
678 tunnel_ip_select_ident(skb, inner_iph, &rt->dst);
679
680 iptunnel_xmit(skb, dev);
681 return;
682
683#if IS_ENABLED(CONFIG_IPV6)
684tx_error_icmp:
685 dst_link_failure(skb);
686#endif
687tx_error:
688 dev->stats.tx_errors++;
689 dev_kfree_skb(skb);
690}
691EXPORT_SYMBOL_GPL(ip_tunnel_xmit);
692
693static void ip_tunnel_update(struct ip_tunnel_net *itn,
694 struct ip_tunnel *t,
695 struct net_device *dev,
696 struct ip_tunnel_parm *p,
697 bool set_mtu)
698{
699 ip_tunnel_del(t);
700 t->parms.iph.saddr = p->iph.saddr;
701 t->parms.iph.daddr = p->iph.daddr;
702 t->parms.i_key = p->i_key;
703 t->parms.o_key = p->o_key;
704 if (dev->type != ARPHRD_ETHER) {
705 memcpy(dev->dev_addr, &p->iph.saddr, 4);
706 memcpy(dev->broadcast, &p->iph.daddr, 4);
707 }
708 ip_tunnel_add(itn, t);
709
710 t->parms.iph.ttl = p->iph.ttl;
711 t->parms.iph.tos = p->iph.tos;
712 t->parms.iph.frag_off = p->iph.frag_off;
713
714 if (t->parms.link != p->link) {
715 int mtu;
716
717 t->parms.link = p->link;
718 mtu = ip_tunnel_bind_dev(dev);
719 if (set_mtu)
720 dev->mtu = mtu;
721 }
722 netdev_state_change(dev);
723}
724
725int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
726{
727 int err = 0;
728 struct ip_tunnel *t;
729 struct net *net = dev_net(dev);
730 struct ip_tunnel *tunnel = netdev_priv(dev);
731 struct ip_tunnel_net *itn = net_generic(net, tunnel->ip_tnl_net_id);
732
733 BUG_ON(!itn->fb_tunnel_dev);
734 switch (cmd) {
735 case SIOCGETTUNNEL:
736 t = NULL;
737 if (dev == itn->fb_tunnel_dev)
738 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
739 if (t == NULL)
740 t = netdev_priv(dev);
741 memcpy(p, &t->parms, sizeof(*p));
742 break;
743
744 case SIOCADDTUNNEL:
745 case SIOCCHGTUNNEL:
746 err = -EPERM;
747 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
748 goto done;
749 if (p->iph.ttl)
750 p->iph.frag_off |= htons(IP_DF);
751 if (!(p->i_flags&TUNNEL_KEY))
752 p->i_key = 0;
753 if (!(p->o_flags&TUNNEL_KEY))
754 p->o_key = 0;
755
756 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
757
758 if (!t && (cmd == SIOCADDTUNNEL))
759 t = ip_tunnel_create(net, itn, p);
760
761 if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
762 if (t != NULL) {
763 if (t->dev != dev) {
764 err = -EEXIST;
765 break;
766 }
767 } else {
768 unsigned int nflags = 0;
769
770 if (ipv4_is_multicast(p->iph.daddr))
771 nflags = IFF_BROADCAST;
772 else if (p->iph.daddr)
773 nflags = IFF_POINTOPOINT;
774
775 if ((dev->flags^nflags)&(IFF_POINTOPOINT|IFF_BROADCAST)) {
776 err = -EINVAL;
777 break;
778 }
779
780 t = netdev_priv(dev);
781 }
782 }
783
784 if (t) {
785 err = 0;
786 ip_tunnel_update(itn, t, dev, p, true);
787 } else
788 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
789 break;
790
791 case SIOCDELTUNNEL:
792 err = -EPERM;
793 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
794 goto done;
795
796 if (dev == itn->fb_tunnel_dev) {
797 err = -ENOENT;
798 t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
799 if (t == NULL)
800 goto done;
801 err = -EPERM;
802 if (t == netdev_priv(itn->fb_tunnel_dev))
803 goto done;
804 dev = t->dev;
805 }
806 unregister_netdevice(dev);
807 err = 0;
808 break;
809
810 default:
811 err = -EINVAL;
812 }
813
814done:
815 return err;
816}
817EXPORT_SYMBOL_GPL(ip_tunnel_ioctl);
818
819int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
820{
821 struct ip_tunnel *tunnel = netdev_priv(dev);
822 int t_hlen = tunnel->hlen + sizeof(struct iphdr);
823
824 if (new_mtu < 68 ||
825 new_mtu > 0xFFF8 - dev->hard_header_len - t_hlen)
826 return -EINVAL;
827 dev->mtu = new_mtu;
828 return 0;
829}
830EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu);
831
832static void ip_tunnel_dev_free(struct net_device *dev)
833{
834 struct ip_tunnel *tunnel = netdev_priv(dev);
835
836 gro_cells_destroy(&tunnel->gro_cells);
837 free_percpu(dev->tstats);
838 free_netdev(dev);
839}
840
841void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
842{
843 struct net *net = dev_net(dev);
844 struct ip_tunnel *tunnel = netdev_priv(dev);
845 struct ip_tunnel_net *itn;
846
847 itn = net_generic(net, tunnel->ip_tnl_net_id);
848
849 if (itn->fb_tunnel_dev != dev) {
850 ip_tunnel_del(netdev_priv(dev));
851 unregister_netdevice_queue(dev, head);
852 }
853}
854EXPORT_SYMBOL_GPL(ip_tunnel_dellink);
855
856int __net_init ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
857 struct rtnl_link_ops *ops, char *devname)
858{
859 struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
860 struct ip_tunnel_parm parms;
861
862 itn->tunnels = kzalloc(IP_TNL_HASH_SIZE * sizeof(struct hlist_head), GFP_KERNEL);
863 if (!itn->tunnels)
864 return -ENOMEM;
865
866 if (!ops) {
867 itn->fb_tunnel_dev = NULL;
868 return 0;
869 }
870 memset(&parms, 0, sizeof(parms));
871 if (devname)
872 strlcpy(parms.name, devname, IFNAMSIZ);
873
874 rtnl_lock();
875 itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms);
876 rtnl_unlock();
877 if (IS_ERR(itn->fb_tunnel_dev)) {
878 kfree(itn->tunnels);
879 return PTR_ERR(itn->fb_tunnel_dev);
880 }
881
882 return 0;
883}
884EXPORT_SYMBOL_GPL(ip_tunnel_init_net);
885
886static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head)
887{
888 int h;
889
890 for (h = 0; h < IP_TNL_HASH_SIZE; h++) {
891 struct ip_tunnel *t;
892 struct hlist_node *n;
893 struct hlist_head *thead = &itn->tunnels[h];
894
895 hlist_for_each_entry_safe(t, n, thead, hash_node)
896 unregister_netdevice_queue(t->dev, head);
897 }
898 if (itn->fb_tunnel_dev)
899 unregister_netdevice_queue(itn->fb_tunnel_dev, head);
900}
901
902void __net_exit ip_tunnel_delete_net(struct ip_tunnel_net *itn)
903{
904 LIST_HEAD(list);
905
906 rtnl_lock();
907 ip_tunnel_destroy(itn, &list);
908 unregister_netdevice_many(&list);
909 rtnl_unlock();
910 kfree(itn->tunnels);
911}
912EXPORT_SYMBOL_GPL(ip_tunnel_delete_net);
913
914int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
915 struct ip_tunnel_parm *p)
916{
917 struct ip_tunnel *nt;
918 struct net *net = dev_net(dev);
919 struct ip_tunnel_net *itn;
920 int mtu;
921 int err;
922
923 nt = netdev_priv(dev);
924 itn = net_generic(net, nt->ip_tnl_net_id);
925
926 if (ip_tunnel_find(itn, p, dev->type))
927 return -EEXIST;
928
929 nt->parms = *p;
930 err = register_netdevice(dev);
931 if (err)
932 goto out;
933
934 if (dev->type == ARPHRD_ETHER && !tb[IFLA_ADDRESS])
935 eth_hw_addr_random(dev);
936
937 mtu = ip_tunnel_bind_dev(dev);
938 if (!tb[IFLA_MTU])
939 dev->mtu = mtu;
940
941 ip_tunnel_add(itn, nt);
942
943out:
944 return err;
945}
946EXPORT_SYMBOL_GPL(ip_tunnel_newlink);
947
948int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
949 struct ip_tunnel_parm *p)
950{
951 struct ip_tunnel *t, *nt;
952 struct net *net = dev_net(dev);
953 struct ip_tunnel *tunnel = netdev_priv(dev);
954 struct ip_tunnel_net *itn = net_generic(net, tunnel->ip_tnl_net_id);
955
956 if (dev == itn->fb_tunnel_dev)
957 return -EINVAL;
958
959 nt = netdev_priv(dev);
960
961 t = ip_tunnel_find(itn, p, dev->type);
962
963 if (t) {
964 if (t->dev != dev)
965 return -EEXIST;
966 } else {
967 t = nt;
968
969 if (dev->type != ARPHRD_ETHER) {
970 unsigned int nflags = 0;
971
972 if (ipv4_is_multicast(p->iph.daddr))
973 nflags = IFF_BROADCAST;
974 else if (p->iph.daddr)
975 nflags = IFF_POINTOPOINT;
976
977 if ((dev->flags ^ nflags) &
978 (IFF_POINTOPOINT | IFF_BROADCAST))
979 return -EINVAL;
980 }
981 }
982
983 ip_tunnel_update(itn, t, dev, p, !tb[IFLA_MTU]);
984 return 0;
985}
986EXPORT_SYMBOL_GPL(ip_tunnel_changelink);
987
988int ip_tunnel_init(struct net_device *dev)
989{
990 struct ip_tunnel *tunnel = netdev_priv(dev);
991 struct iphdr *iph = &tunnel->parms.iph;
992 int err;
993
994 dev->destructor = ip_tunnel_dev_free;
995 dev->tstats = alloc_percpu(struct pcpu_tstats);
996 if (!dev->tstats)
997 return -ENOMEM;
998
999 err = gro_cells_init(&tunnel->gro_cells, dev);
1000 if (err) {
1001 free_percpu(dev->tstats);
1002 return err;
1003 }
1004
1005 tunnel->dev = dev;
1006 strcpy(tunnel->parms.name, dev->name);
1007 iph->version = 4;
1008 iph->ihl = 5;
1009
1010 return 0;
1011}
1012EXPORT_SYMBOL_GPL(ip_tunnel_init);
1013
1014void ip_tunnel_uninit(struct net_device *dev)
1015{
1016 struct net *net = dev_net(dev);
1017 struct ip_tunnel *tunnel = netdev_priv(dev);
1018 struct ip_tunnel_net *itn;
1019
1020 itn = net_generic(net, tunnel->ip_tnl_net_id);
1021 /* fb_tunnel_dev will be unregisted in net-exit call. */
1022 if (itn->fb_tunnel_dev != dev)
1023 ip_tunnel_del(netdev_priv(dev));
1024}
1025EXPORT_SYMBOL_GPL(ip_tunnel_uninit);
1026
1027/* Do least required initialization, rest of init is done in tunnel_init call */
1028void ip_tunnel_setup(struct net_device *dev, int net_id)
1029{
1030 struct ip_tunnel *tunnel = netdev_priv(dev);
1031 tunnel->ip_tnl_net_id = net_id;
1032}
1033EXPORT_SYMBOL_GPL(ip_tunnel_setup);
1034
1035MODULE_LICENSE("GPL");
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index c3a4233c0ac2..9d2bdb2c1d3f 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -38,7 +38,7 @@
38#include <net/sock.h> 38#include <net/sock.h>
39#include <net/ip.h> 39#include <net/ip.h>
40#include <net/icmp.h> 40#include <net/icmp.h>
41#include <net/ipip.h> 41#include <net/ip_tunnels.h>
42#include <net/inet_ecn.h> 42#include <net/inet_ecn.h>
43#include <net/xfrm.h> 43#include <net/xfrm.h>
44#include <net/net_namespace.h> 44#include <net/net_namespace.h>
@@ -82,44 +82,6 @@ static int vti_tunnel_bind_dev(struct net_device *dev);
82} while (0) 82} while (0)
83 83
84 84
85static struct rtnl_link_stats64 *vti_get_stats64(struct net_device *dev,
86 struct rtnl_link_stats64 *tot)
87{
88 int i;
89
90 for_each_possible_cpu(i) {
91 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
92 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
93 unsigned int start;
94
95 do {
96 start = u64_stats_fetch_begin_bh(&tstats->syncp);
97 rx_packets = tstats->rx_packets;
98 tx_packets = tstats->tx_packets;
99 rx_bytes = tstats->rx_bytes;
100 tx_bytes = tstats->tx_bytes;
101 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
102
103 tot->rx_packets += rx_packets;
104 tot->tx_packets += tx_packets;
105 tot->rx_bytes += rx_bytes;
106 tot->tx_bytes += tx_bytes;
107 }
108
109 tot->multicast = dev->stats.multicast;
110 tot->rx_crc_errors = dev->stats.rx_crc_errors;
111 tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
112 tot->rx_length_errors = dev->stats.rx_length_errors;
113 tot->rx_errors = dev->stats.rx_errors;
114 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
115 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
116 tot->tx_dropped = dev->stats.tx_dropped;
117 tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
118 tot->tx_errors = dev->stats.tx_errors;
119
120 return tot;
121}
122
123static struct ip_tunnel *vti_tunnel_lookup(struct net *net, 85static struct ip_tunnel *vti_tunnel_lookup(struct net *net,
124 __be32 remote, __be32 local) 86 __be32 remote, __be32 local)
125{ 87{
@@ -597,7 +559,7 @@ static const struct net_device_ops vti_netdev_ops = {
597 .ndo_start_xmit = vti_tunnel_xmit, 559 .ndo_start_xmit = vti_tunnel_xmit,
598 .ndo_do_ioctl = vti_tunnel_ioctl, 560 .ndo_do_ioctl = vti_tunnel_ioctl,
599 .ndo_change_mtu = vti_tunnel_change_mtu, 561 .ndo_change_mtu = vti_tunnel_change_mtu,
600 .ndo_get_stats64 = vti_get_stats64, 562 .ndo_get_stats64 = ip_tunnel_get_stats64,
601}; 563};
602 564
603static void vti_dev_free(struct net_device *dev) 565static void vti_dev_free(struct net_device *dev)
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index bf6c5cf31aed..efa1138fa523 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -206,7 +206,7 @@ static int __init ic_open_devs(void)
206 struct ic_device *d, **last; 206 struct ic_device *d, **last;
207 struct net_device *dev; 207 struct net_device *dev;
208 unsigned short oflags; 208 unsigned short oflags;
209 unsigned long start; 209 unsigned long start, next_msg;
210 210
211 last = &ic_first_dev; 211 last = &ic_first_dev;
212 rtnl_lock(); 212 rtnl_lock();
@@ -263,12 +263,23 @@ static int __init ic_open_devs(void)
263 263
264 /* wait for a carrier on at least one device */ 264 /* wait for a carrier on at least one device */
265 start = jiffies; 265 start = jiffies;
266 next_msg = start + msecs_to_jiffies(CONF_CARRIER_TIMEOUT/12);
266 while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) { 267 while (jiffies - start < msecs_to_jiffies(CONF_CARRIER_TIMEOUT)) {
268 int wait, elapsed;
269
267 for_each_netdev(&init_net, dev) 270 for_each_netdev(&init_net, dev)
268 if (ic_is_init_dev(dev) && netif_carrier_ok(dev)) 271 if (ic_is_init_dev(dev) && netif_carrier_ok(dev))
269 goto have_carrier; 272 goto have_carrier;
270 273
271 msleep(1); 274 msleep(1);
275
276 if time_before(jiffies, next_msg)
277 continue;
278
279 elapsed = jiffies_to_msecs(jiffies - start);
280 wait = (CONF_CARRIER_TIMEOUT - elapsed + 500)/1000;
281 pr_info("Waiting up to %d more seconds for network.\n", wait);
282 next_msg = jiffies + msecs_to_jiffies(CONF_CARRIER_TIMEOUT/12);
272 } 283 }
273have_carrier: 284have_carrier:
274 rtnl_unlock(); 285 rtnl_unlock();
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 8f024d41eefa..77bfcce64fe5 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -111,227 +111,21 @@
111#include <net/sock.h> 111#include <net/sock.h>
112#include <net/ip.h> 112#include <net/ip.h>
113#include <net/icmp.h> 113#include <net/icmp.h>
114#include <net/ipip.h> 114#include <net/ip_tunnels.h>
115#include <net/inet_ecn.h> 115#include <net/inet_ecn.h>
116#include <net/xfrm.h> 116#include <net/xfrm.h>
117#include <net/net_namespace.h> 117#include <net/net_namespace.h>
118#include <net/netns/generic.h> 118#include <net/netns/generic.h>
119 119
120#define HASH_SIZE 16
121#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&0xF)
122
123static bool log_ecn_error = true; 120static bool log_ecn_error = true;
124module_param(log_ecn_error, bool, 0644); 121module_param(log_ecn_error, bool, 0644);
125MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN"); 122MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
126 123
127static int ipip_net_id __read_mostly; 124static int ipip_net_id __read_mostly;
128struct ipip_net {
129 struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
130 struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
131 struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
132 struct ip_tunnel __rcu *tunnels_wc[1];
133 struct ip_tunnel __rcu **tunnels[4];
134
135 struct net_device *fb_tunnel_dev;
136};
137 125
138static int ipip_tunnel_init(struct net_device *dev); 126static int ipip_tunnel_init(struct net_device *dev);
139static void ipip_tunnel_setup(struct net_device *dev);
140static void ipip_dev_free(struct net_device *dev);
141static struct rtnl_link_ops ipip_link_ops __read_mostly; 127static struct rtnl_link_ops ipip_link_ops __read_mostly;
142 128
143static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev,
144 struct rtnl_link_stats64 *tot)
145{
146 int i;
147
148 for_each_possible_cpu(i) {
149 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
150 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
151 unsigned int start;
152
153 do {
154 start = u64_stats_fetch_begin_bh(&tstats->syncp);
155 rx_packets = tstats->rx_packets;
156 tx_packets = tstats->tx_packets;
157 rx_bytes = tstats->rx_bytes;
158 tx_bytes = tstats->tx_bytes;
159 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
160
161 tot->rx_packets += rx_packets;
162 tot->tx_packets += tx_packets;
163 tot->rx_bytes += rx_bytes;
164 tot->tx_bytes += tx_bytes;
165 }
166
167 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
168 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
169 tot->tx_dropped = dev->stats.tx_dropped;
170 tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
171 tot->tx_errors = dev->stats.tx_errors;
172 tot->collisions = dev->stats.collisions;
173
174 return tot;
175}
176
177static struct ip_tunnel *ipip_tunnel_lookup(struct net *net,
178 __be32 remote, __be32 local)
179{
180 unsigned int h0 = HASH(remote);
181 unsigned int h1 = HASH(local);
182 struct ip_tunnel *t;
183 struct ipip_net *ipn = net_generic(net, ipip_net_id);
184
185 for_each_ip_tunnel_rcu(t, ipn->tunnels_r_l[h0 ^ h1])
186 if (local == t->parms.iph.saddr &&
187 remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
188 return t;
189
190 for_each_ip_tunnel_rcu(t, ipn->tunnels_r[h0])
191 if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
192 return t;
193
194 for_each_ip_tunnel_rcu(t, ipn->tunnels_l[h1])
195 if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP))
196 return t;
197
198 t = rcu_dereference(ipn->tunnels_wc[0]);
199 if (t && (t->dev->flags&IFF_UP))
200 return t;
201 return NULL;
202}
203
204static struct ip_tunnel __rcu **__ipip_bucket(struct ipip_net *ipn,
205 struct ip_tunnel_parm *parms)
206{
207 __be32 remote = parms->iph.daddr;
208 __be32 local = parms->iph.saddr;
209 unsigned int h = 0;
210 int prio = 0;
211
212 if (remote) {
213 prio |= 2;
214 h ^= HASH(remote);
215 }
216 if (local) {
217 prio |= 1;
218 h ^= HASH(local);
219 }
220 return &ipn->tunnels[prio][h];
221}
222
223static inline struct ip_tunnel __rcu **ipip_bucket(struct ipip_net *ipn,
224 struct ip_tunnel *t)
225{
226 return __ipip_bucket(ipn, &t->parms);
227}
228
229static void ipip_tunnel_unlink(struct ipip_net *ipn, struct ip_tunnel *t)
230{
231 struct ip_tunnel __rcu **tp;
232 struct ip_tunnel *iter;
233
234 for (tp = ipip_bucket(ipn, t);
235 (iter = rtnl_dereference(*tp)) != NULL;
236 tp = &iter->next) {
237 if (t == iter) {
238 rcu_assign_pointer(*tp, t->next);
239 break;
240 }
241 }
242}
243
244static void ipip_tunnel_link(struct ipip_net *ipn, struct ip_tunnel *t)
245{
246 struct ip_tunnel __rcu **tp = ipip_bucket(ipn, t);
247
248 rcu_assign_pointer(t->next, rtnl_dereference(*tp));
249 rcu_assign_pointer(*tp, t);
250}
251
252static int ipip_tunnel_create(struct net_device *dev)
253{
254 struct ip_tunnel *t = netdev_priv(dev);
255 struct net *net = dev_net(dev);
256 struct ipip_net *ipn = net_generic(net, ipip_net_id);
257 int err;
258
259 err = ipip_tunnel_init(dev);
260 if (err < 0)
261 goto out;
262
263 err = register_netdevice(dev);
264 if (err < 0)
265 goto out;
266
267 strcpy(t->parms.name, dev->name);
268 dev->rtnl_link_ops = &ipip_link_ops;
269
270 dev_hold(dev);
271 ipip_tunnel_link(ipn, t);
272 return 0;
273
274out:
275 return err;
276}
277
278static struct ip_tunnel *ipip_tunnel_locate(struct net *net,
279 struct ip_tunnel_parm *parms, int create)
280{
281 __be32 remote = parms->iph.daddr;
282 __be32 local = parms->iph.saddr;
283 struct ip_tunnel *t, *nt;
284 struct ip_tunnel __rcu **tp;
285 struct net_device *dev;
286 char name[IFNAMSIZ];
287 struct ipip_net *ipn = net_generic(net, ipip_net_id);
288
289 for (tp = __ipip_bucket(ipn, parms);
290 (t = rtnl_dereference(*tp)) != NULL;
291 tp = &t->next) {
292 if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
293 return t;
294 }
295 if (!create)
296 return NULL;
297
298 if (parms->name[0])
299 strlcpy(name, parms->name, IFNAMSIZ);
300 else
301 strcpy(name, "tunl%d");
302
303 dev = alloc_netdev(sizeof(*t), name, ipip_tunnel_setup);
304 if (dev == NULL)
305 return NULL;
306
307 dev_net_set(dev, net);
308
309 nt = netdev_priv(dev);
310 nt->parms = *parms;
311
312 if (ipip_tunnel_create(dev) < 0)
313 goto failed_free;
314
315 return nt;
316
317failed_free:
318 ipip_dev_free(dev);
319 return NULL;
320}
321
322/* called with RTNL */
323static void ipip_tunnel_uninit(struct net_device *dev)
324{
325 struct net *net = dev_net(dev);
326 struct ipip_net *ipn = net_generic(net, ipip_net_id);
327
328 if (dev == ipn->fb_tunnel_dev)
329 RCU_INIT_POINTER(ipn->tunnels_wc[0], NULL);
330 else
331 ipip_tunnel_unlink(ipn, netdev_priv(dev));
332 dev_put(dev);
333}
334
335static int ipip_err(struct sk_buff *skb, u32 info) 129static int ipip_err(struct sk_buff *skb, u32 info)
336{ 130{
337 131
@@ -339,41 +133,17 @@ static int ipip_err(struct sk_buff *skb, u32 info)
339 8 bytes of packet payload. It means, that precise relaying of 133 8 bytes of packet payload. It means, that precise relaying of
340 ICMP in the real Internet is absolutely infeasible. 134 ICMP in the real Internet is absolutely infeasible.
341 */ 135 */
136 struct net *net = dev_net(skb->dev);
137 struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
342 const struct iphdr *iph = (const struct iphdr *)skb->data; 138 const struct iphdr *iph = (const struct iphdr *)skb->data;
343 const int type = icmp_hdr(skb)->type;
344 const int code = icmp_hdr(skb)->code;
345 struct ip_tunnel *t; 139 struct ip_tunnel *t;
346 int err; 140 int err;
347 141 const int type = icmp_hdr(skb)->type;
348 switch (type) { 142 const int code = icmp_hdr(skb)->code;
349 default:
350 case ICMP_PARAMETERPROB:
351 return 0;
352
353 case ICMP_DEST_UNREACH:
354 switch (code) {
355 case ICMP_SR_FAILED:
356 case ICMP_PORT_UNREACH:
357 /* Impossible event. */
358 return 0;
359 default:
360 /* All others are translated to HOST_UNREACH.
361 rfc2003 contains "deep thoughts" about NET_UNREACH,
362 I believe they are just ether pollution. --ANK
363 */
364 break;
365 }
366 break;
367 case ICMP_TIME_EXCEEDED:
368 if (code != ICMP_EXC_TTL)
369 return 0;
370 break;
371 case ICMP_REDIRECT:
372 break;
373 }
374 143
375 err = -ENOENT; 144 err = -ENOENT;
376 t = ipip_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr); 145 t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
146 iph->daddr, iph->saddr, 0);
377 if (t == NULL) 147 if (t == NULL)
378 goto out; 148 goto out;
379 149
@@ -403,53 +173,29 @@ static int ipip_err(struct sk_buff *skb, u32 info)
403 else 173 else
404 t->err_count = 1; 174 t->err_count = 1;
405 t->err_time = jiffies; 175 t->err_time = jiffies;
406out:
407 176
177out:
408 return err; 178 return err;
409} 179}
410 180
181static const struct tnl_ptk_info tpi = {
182 /* no tunnel info required for ipip. */
183 .proto = htons(ETH_P_IP),
184};
185
411static int ipip_rcv(struct sk_buff *skb) 186static int ipip_rcv(struct sk_buff *skb)
412{ 187{
188 struct net *net = dev_net(skb->dev);
189 struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
413 struct ip_tunnel *tunnel; 190 struct ip_tunnel *tunnel;
414 const struct iphdr *iph = ip_hdr(skb); 191 const struct iphdr *iph = ip_hdr(skb);
415 int err;
416
417 tunnel = ipip_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
418 if (tunnel != NULL) {
419 struct pcpu_tstats *tstats;
420 192
193 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
194 iph->saddr, iph->daddr, 0);
195 if (tunnel) {
421 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 196 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
422 goto drop; 197 goto drop;
423 198 return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
424 secpath_reset(skb);
425
426 skb->mac_header = skb->network_header;
427 skb_reset_network_header(skb);
428 skb->protocol = htons(ETH_P_IP);
429 skb->pkt_type = PACKET_HOST;
430
431 __skb_tunnel_rx(skb, tunnel->dev);
432
433 err = IP_ECN_decapsulate(iph, skb);
434 if (unlikely(err)) {
435 if (log_ecn_error)
436 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
437 &iph->saddr, iph->tos);
438 if (err > 1) {
439 ++tunnel->dev->stats.rx_frame_errors;
440 ++tunnel->dev->stats.rx_errors;
441 goto drop;
442 }
443 }
444
445 tstats = this_cpu_ptr(tunnel->dev->tstats);
446 u64_stats_update_begin(&tstats->syncp);
447 tstats->rx_packets++;
448 tstats->rx_bytes += skb->len;
449 u64_stats_update_end(&tstats->syncp);
450
451 netif_rx(skb);
452 return 0;
453 } 199 }
454 200
455 return -1; 201 return -1;
@@ -463,329 +209,64 @@ drop:
463 * This function assumes it is being called from dev_queue_xmit() 209 * This function assumes it is being called from dev_queue_xmit()
464 * and that skb is filled properly by that function. 210 * and that skb is filled properly by that function.
465 */ 211 */
466
467static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 212static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
468{ 213{
469 struct ip_tunnel *tunnel = netdev_priv(dev); 214 struct ip_tunnel *tunnel = netdev_priv(dev);
470 const struct iphdr *tiph = &tunnel->parms.iph; 215 const struct iphdr *tiph = &tunnel->parms.iph;
471 u8 tos = tunnel->parms.iph.tos;
472 __be16 df = tiph->frag_off;
473 struct rtable *rt; /* Route to the other host */
474 struct net_device *tdev; /* Device to other host */
475 const struct iphdr *old_iph;
476 struct iphdr *iph; /* Our new IP header */
477 unsigned int max_headroom; /* The extra header space needed */
478 __be32 dst = tiph->daddr;
479 struct flowi4 fl4;
480 int mtu;
481
482 if (skb->protocol != htons(ETH_P_IP))
483 goto tx_error;
484 216
485 if (skb->ip_summed == CHECKSUM_PARTIAL && 217 if (unlikely(skb->protocol != htons(ETH_P_IP)))
486 skb_checksum_help(skb))
487 goto tx_error; 218 goto tx_error;
488 219
489 old_iph = ip_hdr(skb); 220 if (likely(!skb->encapsulation)) {
490 221 skb_reset_inner_headers(skb);
491 if (tos & 1) 222 skb->encapsulation = 1;
492 tos = old_iph->tos;
493
494 if (!dst) {
495 /* NBMA tunnel */
496 if ((rt = skb_rtable(skb)) == NULL) {
497 dev->stats.tx_fifo_errors++;
498 goto tx_error;
499 }
500 dst = rt_nexthop(rt, old_iph->daddr);
501 } 223 }
502 224
503 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL, 225 ip_tunnel_xmit(skb, dev, tiph);
504 dst, tiph->saddr,
505 0, 0,
506 IPPROTO_IPIP, RT_TOS(tos),
507 tunnel->parms.link);
508 if (IS_ERR(rt)) {
509 dev->stats.tx_carrier_errors++;
510 goto tx_error_icmp;
511 }
512 tdev = rt->dst.dev;
513
514 if (tdev == dev) {
515 ip_rt_put(rt);
516 dev->stats.collisions++;
517 goto tx_error;
518 }
519
520 df |= old_iph->frag_off & htons(IP_DF);
521
522 if (df) {
523 mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
524
525 if (mtu < 68) {
526 dev->stats.collisions++;
527 ip_rt_put(rt);
528 goto tx_error;
529 }
530
531 if (skb_dst(skb))
532 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
533
534 if ((old_iph->frag_off & htons(IP_DF)) &&
535 mtu < ntohs(old_iph->tot_len)) {
536 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
537 htonl(mtu));
538 ip_rt_put(rt);
539 goto tx_error;
540 }
541 }
542
543 if (tunnel->err_count > 0) {
544 if (time_before(jiffies,
545 tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
546 tunnel->err_count--;
547 dst_link_failure(skb);
548 } else
549 tunnel->err_count = 0;
550 }
551
552 /*
553 * Okay, now see if we can stuff it in the buffer as-is.
554 */
555 max_headroom = (LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr));
556
557 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
558 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
559 struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
560 if (!new_skb) {
561 ip_rt_put(rt);
562 dev->stats.tx_dropped++;
563 dev_kfree_skb(skb);
564 return NETDEV_TX_OK;
565 }
566 if (skb->sk)
567 skb_set_owner_w(new_skb, skb->sk);
568 dev_kfree_skb(skb);
569 skb = new_skb;
570 old_iph = ip_hdr(skb);
571 }
572
573 skb->transport_header = skb->network_header;
574 skb_push(skb, sizeof(struct iphdr));
575 skb_reset_network_header(skb);
576 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
577 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
578 IPSKB_REROUTED);
579 skb_dst_drop(skb);
580 skb_dst_set(skb, &rt->dst);
581
582 /*
583 * Push down and install the IPIP header.
584 */
585
586 iph = ip_hdr(skb);
587 iph->version = 4;
588 iph->ihl = sizeof(struct iphdr)>>2;
589 iph->frag_off = df;
590 iph->protocol = IPPROTO_IPIP;
591 iph->tos = INET_ECN_encapsulate(tos, old_iph->tos);
592 iph->daddr = fl4.daddr;
593 iph->saddr = fl4.saddr;
594
595 if ((iph->ttl = tiph->ttl) == 0)
596 iph->ttl = old_iph->ttl;
597
598 iptunnel_xmit(skb, dev);
599 return NETDEV_TX_OK; 226 return NETDEV_TX_OK;
600 227
601tx_error_icmp:
602 dst_link_failure(skb);
603tx_error: 228tx_error:
604 dev->stats.tx_errors++; 229 dev->stats.tx_errors++;
605 dev_kfree_skb(skb); 230 dev_kfree_skb(skb);
606 return NETDEV_TX_OK; 231 return NETDEV_TX_OK;
607} 232}
608 233
609static void ipip_tunnel_bind_dev(struct net_device *dev)
610{
611 struct net_device *tdev = NULL;
612 struct ip_tunnel *tunnel;
613 const struct iphdr *iph;
614
615 tunnel = netdev_priv(dev);
616 iph = &tunnel->parms.iph;
617
618 if (iph->daddr) {
619 struct rtable *rt;
620 struct flowi4 fl4;
621
622 rt = ip_route_output_ports(dev_net(dev), &fl4, NULL,
623 iph->daddr, iph->saddr,
624 0, 0,
625 IPPROTO_IPIP,
626 RT_TOS(iph->tos),
627 tunnel->parms.link);
628 if (!IS_ERR(rt)) {
629 tdev = rt->dst.dev;
630 ip_rt_put(rt);
631 }
632 dev->flags |= IFF_POINTOPOINT;
633 }
634
635 if (!tdev && tunnel->parms.link)
636 tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
637
638 if (tdev) {
639 dev->hard_header_len = tdev->hard_header_len + sizeof(struct iphdr);
640 dev->mtu = tdev->mtu - sizeof(struct iphdr);
641 }
642 dev->iflink = tunnel->parms.link;
643}
644
645static void ipip_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p)
646{
647 struct net *net = dev_net(t->dev);
648 struct ipip_net *ipn = net_generic(net, ipip_net_id);
649
650 ipip_tunnel_unlink(ipn, t);
651 synchronize_net();
652 t->parms.iph.saddr = p->iph.saddr;
653 t->parms.iph.daddr = p->iph.daddr;
654 memcpy(t->dev->dev_addr, &p->iph.saddr, 4);
655 memcpy(t->dev->broadcast, &p->iph.daddr, 4);
656 ipip_tunnel_link(ipn, t);
657 t->parms.iph.ttl = p->iph.ttl;
658 t->parms.iph.tos = p->iph.tos;
659 t->parms.iph.frag_off = p->iph.frag_off;
660 if (t->parms.link != p->link) {
661 t->parms.link = p->link;
662 ipip_tunnel_bind_dev(t->dev);
663 }
664 netdev_state_change(t->dev);
665}
666
667static int 234static int
668ipip_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) 235ipip_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
669{ 236{
670 int err = 0; 237 int err = 0;
671 struct ip_tunnel_parm p; 238 struct ip_tunnel_parm p;
672 struct ip_tunnel *t;
673 struct net *net = dev_net(dev);
674 struct ipip_net *ipn = net_generic(net, ipip_net_id);
675
676 switch (cmd) {
677 case SIOCGETTUNNEL:
678 t = NULL;
679 if (dev == ipn->fb_tunnel_dev) {
680 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
681 err = -EFAULT;
682 break;
683 }
684 t = ipip_tunnel_locate(net, &p, 0);
685 }
686 if (t == NULL)
687 t = netdev_priv(dev);
688 memcpy(&p, &t->parms, sizeof(p));
689 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
690 err = -EFAULT;
691 break;
692
693 case SIOCADDTUNNEL:
694 case SIOCCHGTUNNEL:
695 err = -EPERM;
696 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
697 goto done;
698
699 err = -EFAULT;
700 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
701 goto done;
702
703 err = -EINVAL;
704 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
705 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
706 goto done;
707 if (p.iph.ttl)
708 p.iph.frag_off |= htons(IP_DF);
709
710 t = ipip_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
711
712 if (dev != ipn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
713 if (t != NULL) {
714 if (t->dev != dev) {
715 err = -EEXIST;
716 break;
717 }
718 } else {
719 if (((dev->flags&IFF_POINTOPOINT) && !p.iph.daddr) ||
720 (!(dev->flags&IFF_POINTOPOINT) && p.iph.daddr)) {
721 err = -EINVAL;
722 break;
723 }
724 t = netdev_priv(dev);
725 }
726
727 ipip_tunnel_update(t, &p);
728 }
729
730 if (t) {
731 err = 0;
732 if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms, sizeof(p)))
733 err = -EFAULT;
734 } else
735 err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
736 break;
737
738 case SIOCDELTUNNEL:
739 err = -EPERM;
740 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
741 goto done;
742
743 if (dev == ipn->fb_tunnel_dev) {
744 err = -EFAULT;
745 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
746 goto done;
747 err = -ENOENT;
748 if ((t = ipip_tunnel_locate(net, &p, 0)) == NULL)
749 goto done;
750 err = -EPERM;
751 if (t->dev == ipn->fb_tunnel_dev)
752 goto done;
753 dev = t->dev;
754 }
755 unregister_netdevice(dev);
756 err = 0;
757 break;
758 239
759 default: 240 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
760 err = -EINVAL; 241 return -EFAULT;
761 }
762
763done:
764 return err;
765}
766 242
767static int ipip_tunnel_change_mtu(struct net_device *dev, int new_mtu) 243 if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
768{ 244 p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)))
769 if (new_mtu < 68 || new_mtu > 0xFFF8 - sizeof(struct iphdr)) 245 return -EINVAL;
246 if (p.i_key || p.o_key || p.i_flags || p.o_flags)
770 return -EINVAL; 247 return -EINVAL;
771 dev->mtu = new_mtu; 248 if (p.iph.ttl)
249 p.iph.frag_off |= htons(IP_DF);
250
251 err = ip_tunnel_ioctl(dev, &p, cmd);
252 if (err)
253 return err;
254
255 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
256 return -EFAULT;
257
772 return 0; 258 return 0;
773} 259}
774 260
775static const struct net_device_ops ipip_netdev_ops = { 261static const struct net_device_ops ipip_netdev_ops = {
776 .ndo_uninit = ipip_tunnel_uninit, 262 .ndo_init = ipip_tunnel_init,
263 .ndo_uninit = ip_tunnel_uninit,
777 .ndo_start_xmit = ipip_tunnel_xmit, 264 .ndo_start_xmit = ipip_tunnel_xmit,
778 .ndo_do_ioctl = ipip_tunnel_ioctl, 265 .ndo_do_ioctl = ipip_tunnel_ioctl,
779 .ndo_change_mtu = ipip_tunnel_change_mtu, 266 .ndo_change_mtu = ip_tunnel_change_mtu,
780 .ndo_get_stats64 = ipip_get_stats64, 267 .ndo_get_stats64 = ip_tunnel_get_stats64,
781}; 268};
782 269
783static void ipip_dev_free(struct net_device *dev)
784{
785 free_percpu(dev->tstats);
786 free_netdev(dev);
787}
788
789#define IPIP_FEATURES (NETIF_F_SG | \ 270#define IPIP_FEATURES (NETIF_F_SG | \
790 NETIF_F_FRAGLIST | \ 271 NETIF_F_FRAGLIST | \
791 NETIF_F_HIGHDMA | \ 272 NETIF_F_HIGHDMA | \
@@ -794,11 +275,8 @@ static void ipip_dev_free(struct net_device *dev)
794static void ipip_tunnel_setup(struct net_device *dev) 275static void ipip_tunnel_setup(struct net_device *dev)
795{ 276{
796 dev->netdev_ops = &ipip_netdev_ops; 277 dev->netdev_ops = &ipip_netdev_ops;
797 dev->destructor = ipip_dev_free;
798 278
799 dev->type = ARPHRD_TUNNEL; 279 dev->type = ARPHRD_TUNNEL;
800 dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
801 dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr);
802 dev->flags = IFF_NOARP; 280 dev->flags = IFF_NOARP;
803 dev->iflink = 0; 281 dev->iflink = 0;
804 dev->addr_len = 4; 282 dev->addr_len = 4;
@@ -808,46 +286,19 @@ static void ipip_tunnel_setup(struct net_device *dev)
808 286
809 dev->features |= IPIP_FEATURES; 287 dev->features |= IPIP_FEATURES;
810 dev->hw_features |= IPIP_FEATURES; 288 dev->hw_features |= IPIP_FEATURES;
289 ip_tunnel_setup(dev, ipip_net_id);
811} 290}
812 291
813static int ipip_tunnel_init(struct net_device *dev) 292static int ipip_tunnel_init(struct net_device *dev)
814{ 293{
815 struct ip_tunnel *tunnel = netdev_priv(dev); 294 struct ip_tunnel *tunnel = netdev_priv(dev);
816 295
817 tunnel->dev = dev;
818
819 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); 296 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
820 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); 297 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
821 298
822 ipip_tunnel_bind_dev(dev); 299 tunnel->hlen = 0;
823 300 tunnel->parms.iph.protocol = IPPROTO_IPIP;
824 dev->tstats = alloc_percpu(struct pcpu_tstats); 301 return ip_tunnel_init(dev);
825 if (!dev->tstats)
826 return -ENOMEM;
827
828 return 0;
829}
830
831static int __net_init ipip_fb_tunnel_init(struct net_device *dev)
832{
833 struct ip_tunnel *tunnel = netdev_priv(dev);
834 struct iphdr *iph = &tunnel->parms.iph;
835 struct ipip_net *ipn = net_generic(dev_net(dev), ipip_net_id);
836
837 tunnel->dev = dev;
838 strcpy(tunnel->parms.name, dev->name);
839
840 iph->version = 4;
841 iph->protocol = IPPROTO_IPIP;
842 iph->ihl = 5;
843
844 dev->tstats = alloc_percpu(struct pcpu_tstats);
845 if (!dev->tstats)
846 return -ENOMEM;
847
848 dev_hold(dev);
849 rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
850 return 0;
851} 302}
852 303
853static void ipip_netlink_parms(struct nlattr *data[], 304static void ipip_netlink_parms(struct nlattr *data[],
@@ -887,28 +338,16 @@ static void ipip_netlink_parms(struct nlattr *data[],
887static int ipip_newlink(struct net *src_net, struct net_device *dev, 338static int ipip_newlink(struct net *src_net, struct net_device *dev,
888 struct nlattr *tb[], struct nlattr *data[]) 339 struct nlattr *tb[], struct nlattr *data[])
889{ 340{
890 struct net *net = dev_net(dev); 341 struct ip_tunnel_parm p;
891 struct ip_tunnel *nt;
892
893 nt = netdev_priv(dev);
894 ipip_netlink_parms(data, &nt->parms);
895
896 if (ipip_tunnel_locate(net, &nt->parms, 0))
897 return -EEXIST;
898 342
899 return ipip_tunnel_create(dev); 343 ipip_netlink_parms(data, &p);
344 return ip_tunnel_newlink(dev, tb, &p);
900} 345}
901 346
902static int ipip_changelink(struct net_device *dev, struct nlattr *tb[], 347static int ipip_changelink(struct net_device *dev, struct nlattr *tb[],
903 struct nlattr *data[]) 348 struct nlattr *data[])
904{ 349{
905 struct ip_tunnel *t;
906 struct ip_tunnel_parm p; 350 struct ip_tunnel_parm p;
907 struct net *net = dev_net(dev);
908 struct ipip_net *ipn = net_generic(net, ipip_net_id);
909
910 if (dev == ipn->fb_tunnel_dev)
911 return -EINVAL;
912 351
913 ipip_netlink_parms(data, &p); 352 ipip_netlink_parms(data, &p);
914 353
@@ -916,16 +355,7 @@ static int ipip_changelink(struct net_device *dev, struct nlattr *tb[],
916 (!(dev->flags & IFF_POINTOPOINT) && p.iph.daddr)) 355 (!(dev->flags & IFF_POINTOPOINT) && p.iph.daddr))
917 return -EINVAL; 356 return -EINVAL;
918 357
919 t = ipip_tunnel_locate(net, &p, 0); 358 return ip_tunnel_changelink(dev, tb, &p);
920
921 if (t) {
922 if (t->dev != dev)
923 return -EEXIST;
924 } else
925 t = netdev_priv(dev);
926
927 ipip_tunnel_update(t, &p);
928 return 0;
929} 359}
930 360
931static size_t ipip_get_size(const struct net_device *dev) 361static size_t ipip_get_size(const struct net_device *dev)
@@ -982,6 +412,7 @@ static struct rtnl_link_ops ipip_link_ops __read_mostly = {
982 .setup = ipip_tunnel_setup, 412 .setup = ipip_tunnel_setup,
983 .newlink = ipip_newlink, 413 .newlink = ipip_newlink,
984 .changelink = ipip_changelink, 414 .changelink = ipip_changelink,
415 .dellink = ip_tunnel_dellink,
985 .get_size = ipip_get_size, 416 .get_size = ipip_get_size,
986 .fill_info = ipip_fill_info, 417 .fill_info = ipip_fill_info,
987}; 418};
@@ -992,90 +423,29 @@ static struct xfrm_tunnel ipip_handler __read_mostly = {
992 .priority = 1, 423 .priority = 1,
993}; 424};
994 425
995static const char banner[] __initconst =
996 KERN_INFO "IPv4 over IPv4 tunneling driver\n";
997
998static void ipip_destroy_tunnels(struct ipip_net *ipn, struct list_head *head)
999{
1000 int prio;
1001
1002 for (prio = 1; prio < 4; prio++) {
1003 int h;
1004 for (h = 0; h < HASH_SIZE; h++) {
1005 struct ip_tunnel *t;
1006
1007 t = rtnl_dereference(ipn->tunnels[prio][h]);
1008 while (t != NULL) {
1009 unregister_netdevice_queue(t->dev, head);
1010 t = rtnl_dereference(t->next);
1011 }
1012 }
1013 }
1014}
1015
1016static int __net_init ipip_init_net(struct net *net) 426static int __net_init ipip_init_net(struct net *net)
1017{ 427{
1018 struct ipip_net *ipn = net_generic(net, ipip_net_id); 428 return ip_tunnel_init_net(net, ipip_net_id, &ipip_link_ops, "tunl0");
1019 struct ip_tunnel *t;
1020 int err;
1021
1022 ipn->tunnels[0] = ipn->tunnels_wc;
1023 ipn->tunnels[1] = ipn->tunnels_l;
1024 ipn->tunnels[2] = ipn->tunnels_r;
1025 ipn->tunnels[3] = ipn->tunnels_r_l;
1026
1027 ipn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel),
1028 "tunl0",
1029 ipip_tunnel_setup);
1030 if (!ipn->fb_tunnel_dev) {
1031 err = -ENOMEM;
1032 goto err_alloc_dev;
1033 }
1034 dev_net_set(ipn->fb_tunnel_dev, net);
1035
1036 err = ipip_fb_tunnel_init(ipn->fb_tunnel_dev);
1037 if (err)
1038 goto err_reg_dev;
1039
1040 if ((err = register_netdev(ipn->fb_tunnel_dev)))
1041 goto err_reg_dev;
1042
1043 t = netdev_priv(ipn->fb_tunnel_dev);
1044
1045 strcpy(t->parms.name, ipn->fb_tunnel_dev->name);
1046 return 0;
1047
1048err_reg_dev:
1049 ipip_dev_free(ipn->fb_tunnel_dev);
1050err_alloc_dev:
1051 /* nothing */
1052 return err;
1053} 429}
1054 430
1055static void __net_exit ipip_exit_net(struct net *net) 431static void __net_exit ipip_exit_net(struct net *net)
1056{ 432{
1057 struct ipip_net *ipn = net_generic(net, ipip_net_id); 433 struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
1058 LIST_HEAD(list); 434 ip_tunnel_delete_net(itn);
1059
1060 rtnl_lock();
1061 ipip_destroy_tunnels(ipn, &list);
1062 unregister_netdevice_queue(ipn->fb_tunnel_dev, &list);
1063 unregister_netdevice_many(&list);
1064 rtnl_unlock();
1065} 435}
1066 436
1067static struct pernet_operations ipip_net_ops = { 437static struct pernet_operations ipip_net_ops = {
1068 .init = ipip_init_net, 438 .init = ipip_init_net,
1069 .exit = ipip_exit_net, 439 .exit = ipip_exit_net,
1070 .id = &ipip_net_id, 440 .id = &ipip_net_id,
1071 .size = sizeof(struct ipip_net), 441 .size = sizeof(struct ip_tunnel_net),
1072}; 442};
1073 443
1074static int __init ipip_init(void) 444static int __init ipip_init(void)
1075{ 445{
1076 int err; 446 int err;
1077 447
1078 printk(banner); 448 pr_info("ipip: IPv4 over IPv4 tunneling driver\n");
1079 449
1080 err = register_pernet_device(&ipip_net_ops); 450 err = register_pernet_device(&ipip_net_ops);
1081 if (err < 0) 451 if (err < 0)
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 5f95b3aa579e..9d9610ae7855 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -61,7 +61,7 @@
61#include <linux/netfilter_ipv4.h> 61#include <linux/netfilter_ipv4.h>
62#include <linux/compat.h> 62#include <linux/compat.h>
63#include <linux/export.h> 63#include <linux/export.h>
64#include <net/ipip.h> 64#include <net/ip_tunnels.h>
65#include <net/checksum.h> 65#include <net/checksum.h>
66#include <net/netlink.h> 66#include <net/netlink.h>
67#include <net/fib_rules.h> 67#include <net/fib_rules.h>
@@ -626,9 +626,9 @@ static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
626 if (ip_hdr(skb)->version == 0) { 626 if (ip_hdr(skb)->version == 0) {
627 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); 627 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
628 nlh->nlmsg_type = NLMSG_ERROR; 628 nlh->nlmsg_type = NLMSG_ERROR;
629 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); 629 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
630 skb_trim(skb, nlh->nlmsg_len); 630 skb_trim(skb, nlh->nlmsg_len);
631 e = NLMSG_DATA(nlh); 631 e = nlmsg_data(nlh);
632 e->error = -ETIMEDOUT; 632 e->error = -ETIMEDOUT;
633 memset(&e->msg, 0, sizeof(e->msg)); 633 memset(&e->msg, 0, sizeof(e->msg));
634 634
@@ -910,14 +910,14 @@ static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
910 if (ip_hdr(skb)->version == 0) { 910 if (ip_hdr(skb)->version == 0) {
911 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); 911 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
912 912
913 if (__ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) { 913 if (__ipmr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
914 nlh->nlmsg_len = skb_tail_pointer(skb) - 914 nlh->nlmsg_len = skb_tail_pointer(skb) -
915 (u8 *)nlh; 915 (u8 *)nlh;
916 } else { 916 } else {
917 nlh->nlmsg_type = NLMSG_ERROR; 917 nlh->nlmsg_type = NLMSG_ERROR;
918 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); 918 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
919 skb_trim(skb, nlh->nlmsg_len); 919 skb_trim(skb, nlh->nlmsg_len);
920 e = NLMSG_DATA(nlh); 920 e = nlmsg_data(nlh);
921 e->error = -EMSGSIZE; 921 e->error = -EMSGSIZE;
922 memset(&e->msg, 0, sizeof(e->msg)); 922 memset(&e->msg, 0, sizeof(e->msg));
923 } 923 }
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index 79ca5e70d497..eadab1ed6500 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -48,9 +48,7 @@ static int __net_init arptable_filter_net_init(struct net *net)
48 net->ipv4.arptable_filter = 48 net->ipv4.arptable_filter =
49 arpt_register_table(net, &packet_filter, repl); 49 arpt_register_table(net, &packet_filter, repl);
50 kfree(repl); 50 kfree(repl);
51 if (IS_ERR(net->ipv4.arptable_filter)) 51 return PTR_RET(net->ipv4.arptable_filter);
52 return PTR_ERR(net->ipv4.arptable_filter);
53 return 0;
54} 52}
55 53
56static void __net_exit arptable_filter_net_exit(struct net *net) 54static void __net_exit arptable_filter_net_exit(struct net *net)
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 3efcf87400c3..e391db1f056d 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -182,8 +182,7 @@ ipt_get_target_c(const struct ipt_entry *e)
182 return ipt_get_target((struct ipt_entry *)e); 182 return ipt_get_target((struct ipt_entry *)e);
183} 183}
184 184
185#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 185#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
186 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
187static const char *const hooknames[] = { 186static const char *const hooknames[] = {
188 [NF_INET_PRE_ROUTING] = "PREROUTING", 187 [NF_INET_PRE_ROUTING] = "PREROUTING",
189 [NF_INET_LOCAL_IN] = "INPUT", 188 [NF_INET_LOCAL_IN] = "INPUT",
@@ -259,6 +258,7 @@ static void trace_packet(const struct sk_buff *skb,
259 const char *hookname, *chainname, *comment; 258 const char *hookname, *chainname, *comment;
260 const struct ipt_entry *iter; 259 const struct ipt_entry *iter;
261 unsigned int rulenum = 0; 260 unsigned int rulenum = 0;
261 struct net *net = dev_net(in ? in : out);
262 262
263 table_base = private->entries[smp_processor_id()]; 263 table_base = private->entries[smp_processor_id()];
264 root = get_entry(table_base, private->hook_entry[hook]); 264 root = get_entry(table_base, private->hook_entry[hook]);
@@ -271,7 +271,7 @@ static void trace_packet(const struct sk_buff *skb,
271 &chainname, &comment, &rulenum) != 0) 271 &chainname, &comment, &rulenum) != 0)
272 break; 272 break;
273 273
274 nf_log_packet(AF_INET, hook, skb, in, out, &trace_loginfo, 274 nf_log_packet(net, AF_INET, hook, skb, in, out, &trace_loginfo,
275 "TRACE: %s:%s:%s:%u ", 275 "TRACE: %s:%s:%s:%u ",
276 tablename, chainname, comment, rulenum); 276 tablename, chainname, comment, rulenum);
277} 277}
@@ -361,8 +361,7 @@ ipt_do_table(struct sk_buff *skb,
361 t = ipt_get_target(e); 361 t = ipt_get_target(e);
362 IP_NF_ASSERT(t->u.kernel.target); 362 IP_NF_ASSERT(t->u.kernel.target);
363 363
364#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ 364#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
365 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
366 /* The packet is traced: log it */ 365 /* The packet is traced: log it */
367 if (unlikely(skb->nf_trace)) 366 if (unlikely(skb->nf_trace))
368 trace_packet(skb, hook, in, out, 367 trace_packet(skb, hook, in, out,
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index 7d168dcbd135..8799c836ccaa 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -37,7 +37,7 @@
37#include <linux/skbuff.h> 37#include <linux/skbuff.h>
38#include <linux/kernel.h> 38#include <linux/kernel.h>
39#include <linux/timer.h> 39#include <linux/timer.h>
40#include <linux/netlink.h> 40#include <net/netlink.h>
41#include <linux/netdevice.h> 41#include <linux/netdevice.h>
42#include <linux/mm.h> 42#include <linux/mm.h>
43#include <linux/moduleparam.h> 43#include <linux/moduleparam.h>
@@ -45,6 +45,7 @@
45#include <linux/netfilter/x_tables.h> 45#include <linux/netfilter/x_tables.h>
46#include <linux/netfilter_ipv4/ipt_ULOG.h> 46#include <linux/netfilter_ipv4/ipt_ULOG.h>
47#include <net/netfilter/nf_log.h> 47#include <net/netfilter/nf_log.h>
48#include <net/netns/generic.h>
48#include <net/sock.h> 49#include <net/sock.h>
49#include <linux/bitops.h> 50#include <linux/bitops.h>
50#include <asm/unaligned.h> 51#include <asm/unaligned.h>
@@ -78,15 +79,23 @@ typedef struct {
78 struct timer_list timer; /* the timer function */ 79 struct timer_list timer; /* the timer function */
79} ulog_buff_t; 80} ulog_buff_t;
80 81
81static ulog_buff_t ulog_buffers[ULOG_MAXNLGROUPS]; /* array of buffers */ 82static int ulog_net_id __read_mostly;
83struct ulog_net {
84 unsigned int nlgroup[ULOG_MAXNLGROUPS];
85 ulog_buff_t ulog_buffers[ULOG_MAXNLGROUPS];
86 struct sock *nflognl;
87 spinlock_t lock;
88};
82 89
83static struct sock *nflognl; /* our socket */ 90static struct ulog_net *ulog_pernet(struct net *net)
84static DEFINE_SPINLOCK(ulog_lock); /* spinlock */ 91{
92 return net_generic(net, ulog_net_id);
93}
85 94
86/* send one ulog_buff_t to userspace */ 95/* send one ulog_buff_t to userspace */
87static void ulog_send(unsigned int nlgroupnum) 96static void ulog_send(struct ulog_net *ulog, unsigned int nlgroupnum)
88{ 97{
89 ulog_buff_t *ub = &ulog_buffers[nlgroupnum]; 98 ulog_buff_t *ub = &ulog->ulog_buffers[nlgroupnum];
90 99
91 pr_debug("ulog_send: timer is deleting\n"); 100 pr_debug("ulog_send: timer is deleting\n");
92 del_timer(&ub->timer); 101 del_timer(&ub->timer);
@@ -103,7 +112,8 @@ static void ulog_send(unsigned int nlgroupnum)
103 NETLINK_CB(ub->skb).dst_group = nlgroupnum + 1; 112 NETLINK_CB(ub->skb).dst_group = nlgroupnum + 1;
104 pr_debug("throwing %d packets to netlink group %u\n", 113 pr_debug("throwing %d packets to netlink group %u\n",
105 ub->qlen, nlgroupnum + 1); 114 ub->qlen, nlgroupnum + 1);
106 netlink_broadcast(nflognl, ub->skb, 0, nlgroupnum + 1, GFP_ATOMIC); 115 netlink_broadcast(ulog->nflognl, ub->skb, 0, nlgroupnum + 1,
116 GFP_ATOMIC);
107 117
108 ub->qlen = 0; 118 ub->qlen = 0;
109 ub->skb = NULL; 119 ub->skb = NULL;
@@ -114,13 +124,16 @@ static void ulog_send(unsigned int nlgroupnum)
114/* timer function to flush queue in flushtimeout time */ 124/* timer function to flush queue in flushtimeout time */
115static void ulog_timer(unsigned long data) 125static void ulog_timer(unsigned long data)
116{ 126{
127 struct ulog_net *ulog = container_of((void *)data,
128 struct ulog_net,
129 nlgroup[*(unsigned int *)data]);
117 pr_debug("timer function called, calling ulog_send\n"); 130 pr_debug("timer function called, calling ulog_send\n");
118 131
119 /* lock to protect against somebody modifying our structure 132 /* lock to protect against somebody modifying our structure
120 * from ipt_ulog_target at the same time */ 133 * from ipt_ulog_target at the same time */
121 spin_lock_bh(&ulog_lock); 134 spin_lock_bh(&ulog->lock);
122 ulog_send(data); 135 ulog_send(ulog, data);
123 spin_unlock_bh(&ulog_lock); 136 spin_unlock_bh(&ulog->lock);
124} 137}
125 138
126static struct sk_buff *ulog_alloc_skb(unsigned int size) 139static struct sk_buff *ulog_alloc_skb(unsigned int size)
@@ -160,6 +173,8 @@ static void ipt_ulog_packet(unsigned int hooknum,
160 size_t size, copy_len; 173 size_t size, copy_len;
161 struct nlmsghdr *nlh; 174 struct nlmsghdr *nlh;
162 struct timeval tv; 175 struct timeval tv;
176 struct net *net = dev_net(in ? in : out);
177 struct ulog_net *ulog = ulog_pernet(net);
163 178
164 /* ffs == find first bit set, necessary because userspace 179 /* ffs == find first bit set, necessary because userspace
165 * is already shifting groupnumber, but we need unshifted. 180 * is already shifting groupnumber, but we need unshifted.
@@ -172,11 +187,11 @@ static void ipt_ulog_packet(unsigned int hooknum,
172 else 187 else
173 copy_len = loginfo->copy_range; 188 copy_len = loginfo->copy_range;
174 189
175 size = NLMSG_SPACE(sizeof(*pm) + copy_len); 190 size = nlmsg_total_size(sizeof(*pm) + copy_len);
176 191
177 ub = &ulog_buffers[groupnum]; 192 ub = &ulog->ulog_buffers[groupnum];
178 193
179 spin_lock_bh(&ulog_lock); 194 spin_lock_bh(&ulog->lock);
180 195
181 if (!ub->skb) { 196 if (!ub->skb) {
182 if (!(ub->skb = ulog_alloc_skb(size))) 197 if (!(ub->skb = ulog_alloc_skb(size)))
@@ -186,7 +201,7 @@ static void ipt_ulog_packet(unsigned int hooknum,
186 /* either the queue len is too high or we don't have 201 /* either the queue len is too high or we don't have
187 * enough room in nlskb left. send it to userspace. */ 202 * enough room in nlskb left. send it to userspace. */
188 203
189 ulog_send(groupnum); 204 ulog_send(ulog, groupnum);
190 205
191 if (!(ub->skb = ulog_alloc_skb(size))) 206 if (!(ub->skb = ulog_alloc_skb(size)))
192 goto alloc_failure; 207 goto alloc_failure;
@@ -260,16 +275,16 @@ static void ipt_ulog_packet(unsigned int hooknum,
260 if (ub->qlen >= loginfo->qthreshold) { 275 if (ub->qlen >= loginfo->qthreshold) {
261 if (loginfo->qthreshold > 1) 276 if (loginfo->qthreshold > 1)
262 nlh->nlmsg_type = NLMSG_DONE; 277 nlh->nlmsg_type = NLMSG_DONE;
263 ulog_send(groupnum); 278 ulog_send(ulog, groupnum);
264 } 279 }
265out_unlock: 280out_unlock:
266 spin_unlock_bh(&ulog_lock); 281 spin_unlock_bh(&ulog->lock);
267 282
268 return; 283 return;
269 284
270alloc_failure: 285alloc_failure:
271 pr_debug("Error building netlink message\n"); 286 pr_debug("Error building netlink message\n");
272 spin_unlock_bh(&ulog_lock); 287 spin_unlock_bh(&ulog->lock);
273} 288}
274 289
275static unsigned int 290static unsigned int
@@ -376,54 +391,43 @@ static struct nf_logger ipt_ulog_logger __read_mostly = {
376 .me = THIS_MODULE, 391 .me = THIS_MODULE,
377}; 392};
378 393
379static int __init ulog_tg_init(void) 394static int __net_init ulog_tg_net_init(struct net *net)
380{ 395{
381 int ret, i; 396 int i;
397 struct ulog_net *ulog = ulog_pernet(net);
382 struct netlink_kernel_cfg cfg = { 398 struct netlink_kernel_cfg cfg = {
383 .groups = ULOG_MAXNLGROUPS, 399 .groups = ULOG_MAXNLGROUPS,
384 }; 400 };
385 401
386 pr_debug("init module\n"); 402 spin_lock_init(&ulog->lock);
387
388 if (nlbufsiz > 128*1024) {
389 pr_warning("Netlink buffer has to be <= 128kB\n");
390 return -EINVAL;
391 }
392
393 /* initialize ulog_buffers */ 403 /* initialize ulog_buffers */
394 for (i = 0; i < ULOG_MAXNLGROUPS; i++) 404 for (i = 0; i < ULOG_MAXNLGROUPS; i++)
395 setup_timer(&ulog_buffers[i].timer, ulog_timer, i); 405 setup_timer(&ulog->ulog_buffers[i].timer, ulog_timer, i);
396 406
397 nflognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, &cfg); 407 ulog->nflognl = netlink_kernel_create(net, NETLINK_NFLOG, &cfg);
398 if (!nflognl) 408 if (!ulog->nflognl)
399 return -ENOMEM; 409 return -ENOMEM;
400 410
401 ret = xt_register_target(&ulog_tg_reg);
402 if (ret < 0) {
403 netlink_kernel_release(nflognl);
404 return ret;
405 }
406 if (nflog) 411 if (nflog)
407 nf_log_register(NFPROTO_IPV4, &ipt_ulog_logger); 412 nf_log_set(net, NFPROTO_IPV4, &ipt_ulog_logger);
408 413
409 return 0; 414 return 0;
410} 415}
411 416
412static void __exit ulog_tg_exit(void) 417static void __net_exit ulog_tg_net_exit(struct net *net)
413{ 418{
414 ulog_buff_t *ub; 419 ulog_buff_t *ub;
415 int i; 420 int i;
416 421 struct ulog_net *ulog = ulog_pernet(net);
417 pr_debug("cleanup_module\n");
418 422
419 if (nflog) 423 if (nflog)
420 nf_log_unregister(&ipt_ulog_logger); 424 nf_log_unset(net, &ipt_ulog_logger);
421 xt_unregister_target(&ulog_tg_reg); 425
422 netlink_kernel_release(nflognl); 426 netlink_kernel_release(ulog->nflognl);
423 427
424 /* remove pending timers and free allocated skb's */ 428 /* remove pending timers and free allocated skb's */
425 for (i = 0; i < ULOG_MAXNLGROUPS; i++) { 429 for (i = 0; i < ULOG_MAXNLGROUPS; i++) {
426 ub = &ulog_buffers[i]; 430 ub = &ulog->ulog_buffers[i];
427 pr_debug("timer is deleting\n"); 431 pr_debug("timer is deleting\n");
428 del_timer(&ub->timer); 432 del_timer(&ub->timer);
429 433
@@ -434,5 +438,50 @@ static void __exit ulog_tg_exit(void)
434 } 438 }
435} 439}
436 440
441static struct pernet_operations ulog_tg_net_ops = {
442 .init = ulog_tg_net_init,
443 .exit = ulog_tg_net_exit,
444 .id = &ulog_net_id,
445 .size = sizeof(struct ulog_net),
446};
447
448static int __init ulog_tg_init(void)
449{
450 int ret;
451 pr_debug("init module\n");
452
453 if (nlbufsiz > 128*1024) {
454 pr_warn("Netlink buffer has to be <= 128kB\n");
455 return -EINVAL;
456 }
457
458 ret = register_pernet_subsys(&ulog_tg_net_ops);
459 if (ret)
460 goto out_pernet;
461
462 ret = xt_register_target(&ulog_tg_reg);
463 if (ret < 0)
464 goto out_target;
465
466 if (nflog)
467 nf_log_register(NFPROTO_IPV4, &ipt_ulog_logger);
468
469 return 0;
470
471out_target:
472 unregister_pernet_subsys(&ulog_tg_net_ops);
473out_pernet:
474 return ret;
475}
476
477static void __exit ulog_tg_exit(void)
478{
479 pr_debug("cleanup_module\n");
480 if (nflog)
481 nf_log_unregister(&ipt_ulog_logger);
482 xt_unregister_target(&ulog_tg_reg);
483 unregister_pernet_subsys(&ulog_tg_net_ops);
484}
485
437module_init(ulog_tg_init); 486module_init(ulog_tg_init);
438module_exit(ulog_tg_exit); 487module_exit(ulog_tg_exit);
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 5241d997ab75..c2cd63d2d892 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -187,8 +187,8 @@ icmp_error(struct net *net, struct nf_conn *tmpl,
187 icmph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_ih), &_ih); 187 icmph = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_ih), &_ih);
188 if (icmph == NULL) { 188 if (icmph == NULL) {
189 if (LOG_INVALID(net, IPPROTO_ICMP)) 189 if (LOG_INVALID(net, IPPROTO_ICMP))
190 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, 190 nf_log_packet(net, PF_INET, 0, skb, NULL, NULL,
191 "nf_ct_icmp: short packet "); 191 NULL, "nf_ct_icmp: short packet ");
192 return -NF_ACCEPT; 192 return -NF_ACCEPT;
193 } 193 }
194 194
@@ -196,7 +196,7 @@ icmp_error(struct net *net, struct nf_conn *tmpl,
196 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && 196 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
197 nf_ip_checksum(skb, hooknum, dataoff, 0)) { 197 nf_ip_checksum(skb, hooknum, dataoff, 0)) {
198 if (LOG_INVALID(net, IPPROTO_ICMP)) 198 if (LOG_INVALID(net, IPPROTO_ICMP))
199 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, 199 nf_log_packet(net, PF_INET, 0, skb, NULL, NULL, NULL,
200 "nf_ct_icmp: bad HW ICMP checksum "); 200 "nf_ct_icmp: bad HW ICMP checksum ");
201 return -NF_ACCEPT; 201 return -NF_ACCEPT;
202 } 202 }
@@ -209,7 +209,7 @@ icmp_error(struct net *net, struct nf_conn *tmpl,
209 */ 209 */
210 if (icmph->type > NR_ICMP_TYPES) { 210 if (icmph->type > NR_ICMP_TYPES) {
211 if (LOG_INVALID(net, IPPROTO_ICMP)) 211 if (LOG_INVALID(net, IPPROTO_ICMP))
212 nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL, 212 nf_log_packet(net, PF_INET, 0, skb, NULL, NULL, NULL,
213 "nf_ct_icmp: invalid ICMP type "); 213 "nf_ct_icmp: invalid ICMP type ");
214 return -NF_ACCEPT; 214 return -NF_ACCEPT;
215 } 215 }
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 32030a24e776..b6f2ea174898 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -224,6 +224,8 @@ static const struct snmp_mib snmp4_net_list[] = {
224 SNMP_MIB_ITEM("TCPForwardRetrans", LINUX_MIB_TCPFORWARDRETRANS), 224 SNMP_MIB_ITEM("TCPForwardRetrans", LINUX_MIB_TCPFORWARDRETRANS),
225 SNMP_MIB_ITEM("TCPSlowStartRetrans", LINUX_MIB_TCPSLOWSTARTRETRANS), 225 SNMP_MIB_ITEM("TCPSlowStartRetrans", LINUX_MIB_TCPSLOWSTARTRETRANS),
226 SNMP_MIB_ITEM("TCPTimeouts", LINUX_MIB_TCPTIMEOUTS), 226 SNMP_MIB_ITEM("TCPTimeouts", LINUX_MIB_TCPTIMEOUTS),
227 SNMP_MIB_ITEM("TCPLossProbes", LINUX_MIB_TCPLOSSPROBES),
228 SNMP_MIB_ITEM("TCPLossProbeRecovery", LINUX_MIB_TCPLOSSPROBERECOVERY),
227 SNMP_MIB_ITEM("TCPRenoRecoveryFail", LINUX_MIB_TCPRENORECOVERYFAIL), 229 SNMP_MIB_ITEM("TCPRenoRecoveryFail", LINUX_MIB_TCPRENORECOVERYFAIL),
228 SNMP_MIB_ITEM("TCPSackRecoveryFail", LINUX_MIB_TCPSACKRECOVERYFAIL), 230 SNMP_MIB_ITEM("TCPSackRecoveryFail", LINUX_MIB_TCPSACKRECOVERYFAIL),
229 SNMP_MIB_ITEM("TCPSchedulerFailed", LINUX_MIB_TCPSCHEDULERFAILED), 231 SNMP_MIB_ITEM("TCPSchedulerFailed", LINUX_MIB_TCPSCHEDULERFAILED),
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 6e2851464f8f..550781a17b34 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2311,7 +2311,7 @@ nla_put_failure:
2311 return -EMSGSIZE; 2311 return -EMSGSIZE;
2312} 2312}
2313 2313
2314static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg) 2314static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
2315{ 2315{
2316 struct net *net = sock_net(in_skb->sk); 2316 struct net *net = sock_net(in_skb->sk);
2317 struct rtmsg *rtm; 2317 struct rtmsg *rtm;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index ef54377fb11c..7f4a5cb8f8d0 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -267,7 +267,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
267 struct ip_options *opt) 267 struct ip_options *opt)
268{ 268{
269 struct tcp_options_received tcp_opt; 269 struct tcp_options_received tcp_opt;
270 const u8 *hash_location;
271 struct inet_request_sock *ireq; 270 struct inet_request_sock *ireq;
272 struct tcp_request_sock *treq; 271 struct tcp_request_sock *treq;
273 struct tcp_sock *tp = tcp_sk(sk); 272 struct tcp_sock *tp = tcp_sk(sk);
@@ -294,7 +293,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
294 293
295 /* check for timestamp cookie support */ 294 /* check for timestamp cookie support */
296 memset(&tcp_opt, 0, sizeof(tcp_opt)); 295 memset(&tcp_opt, 0, sizeof(tcp_opt));
297 tcp_parse_options(skb, &tcp_opt, &hash_location, 0, NULL); 296 tcp_parse_options(skb, &tcp_opt, 0, NULL);
298 297
299 if (!cookie_check_timestamp(&tcp_opt, sock_net(sk), &ecn_ok)) 298 if (!cookie_check_timestamp(&tcp_opt, sock_net(sk), &ecn_ok))
300 goto out; 299 goto out;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 960fd29d9b8e..fa2f63fc453b 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -28,7 +28,7 @@
28 28
29static int zero; 29static int zero;
30static int one = 1; 30static int one = 1;
31static int two = 2; 31static int four = 4;
32static int tcp_retr1_max = 255; 32static int tcp_retr1_max = 255;
33static int ip_local_port_range_min[] = { 1, 1 }; 33static int ip_local_port_range_min[] = { 1, 1 };
34static int ip_local_port_range_max[] = { 65535, 65535 }; 34static int ip_local_port_range_max[] = { 65535, 65535 };
@@ -592,13 +592,6 @@ static struct ctl_table ipv4_table[] = {
592 .proc_handler = proc_dointvec 592 .proc_handler = proc_dointvec
593 }, 593 },
594 { 594 {
595 .procname = "tcp_frto_response",
596 .data = &sysctl_tcp_frto_response,
597 .maxlen = sizeof(int),
598 .mode = 0644,
599 .proc_handler = proc_dointvec
600 },
601 {
602 .procname = "tcp_low_latency", 595 .procname = "tcp_low_latency",
603 .data = &sysctl_tcp_low_latency, 596 .data = &sysctl_tcp_low_latency,
604 .maxlen = sizeof(int), 597 .maxlen = sizeof(int),
@@ -733,13 +726,6 @@ static struct ctl_table ipv4_table[] = {
733 .proc_handler = proc_dointvec, 726 .proc_handler = proc_dointvec,
734 }, 727 },
735 { 728 {
736 .procname = "tcp_cookie_size",
737 .data = &sysctl_tcp_cookie_size,
738 .maxlen = sizeof(int),
739 .mode = 0644,
740 .proc_handler = proc_dointvec
741 },
742 {
743 .procname = "tcp_thin_linear_timeouts", 729 .procname = "tcp_thin_linear_timeouts",
744 .data = &sysctl_tcp_thin_linear_timeouts, 730 .data = &sysctl_tcp_thin_linear_timeouts,
745 .maxlen = sizeof(int), 731 .maxlen = sizeof(int),
@@ -760,7 +746,7 @@ static struct ctl_table ipv4_table[] = {
760 .mode = 0644, 746 .mode = 0644,
761 .proc_handler = proc_dointvec_minmax, 747 .proc_handler = proc_dointvec_minmax,
762 .extra1 = &zero, 748 .extra1 = &zero,
763 .extra2 = &two, 749 .extra2 = &four,
764 }, 750 },
765 { 751 {
766 .procname = "udp_mem", 752 .procname = "udp_mem",
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index e22020790709..a96f7b586277 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -409,15 +409,6 @@ void tcp_init_sock(struct sock *sk)
409 409
410 icsk->icsk_sync_mss = tcp_sync_mss; 410 icsk->icsk_sync_mss = tcp_sync_mss;
411 411
412 /* TCP Cookie Transactions */
413 if (sysctl_tcp_cookie_size > 0) {
414 /* Default, cookies without s_data_payload. */
415 tp->cookie_values =
416 kzalloc(sizeof(*tp->cookie_values),
417 sk->sk_allocation);
418 if (tp->cookie_values != NULL)
419 kref_init(&tp->cookie_values->kref);
420 }
421 /* Presumed zeroed, in order of appearance: 412 /* Presumed zeroed, in order of appearance:
422 * cookie_in_always, cookie_out_never, 413 * cookie_in_always, cookie_out_never,
423 * s_data_constant, s_data_in, s_data_out 414 * s_data_constant, s_data_in, s_data_out
@@ -2397,92 +2388,6 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2397 release_sock(sk); 2388 release_sock(sk);
2398 return err; 2389 return err;
2399 } 2390 }
2400 case TCP_COOKIE_TRANSACTIONS: {
2401 struct tcp_cookie_transactions ctd;
2402 struct tcp_cookie_values *cvp = NULL;
2403
2404 if (sizeof(ctd) > optlen)
2405 return -EINVAL;
2406 if (copy_from_user(&ctd, optval, sizeof(ctd)))
2407 return -EFAULT;
2408
2409 if (ctd.tcpct_used > sizeof(ctd.tcpct_value) ||
2410 ctd.tcpct_s_data_desired > TCP_MSS_DESIRED)
2411 return -EINVAL;
2412
2413 if (ctd.tcpct_cookie_desired == 0) {
2414 /* default to global value */
2415 } else if ((0x1 & ctd.tcpct_cookie_desired) ||
2416 ctd.tcpct_cookie_desired > TCP_COOKIE_MAX ||
2417 ctd.tcpct_cookie_desired < TCP_COOKIE_MIN) {
2418 return -EINVAL;
2419 }
2420
2421 if (TCP_COOKIE_OUT_NEVER & ctd.tcpct_flags) {
2422 /* Supercedes all other values */
2423 lock_sock(sk);
2424 if (tp->cookie_values != NULL) {
2425 kref_put(&tp->cookie_values->kref,
2426 tcp_cookie_values_release);
2427 tp->cookie_values = NULL;
2428 }
2429 tp->rx_opt.cookie_in_always = 0; /* false */
2430 tp->rx_opt.cookie_out_never = 1; /* true */
2431 release_sock(sk);
2432 return err;
2433 }
2434
2435 /* Allocate ancillary memory before locking.
2436 */
2437 if (ctd.tcpct_used > 0 ||
2438 (tp->cookie_values == NULL &&
2439 (sysctl_tcp_cookie_size > 0 ||
2440 ctd.tcpct_cookie_desired > 0 ||
2441 ctd.tcpct_s_data_desired > 0))) {
2442 cvp = kzalloc(sizeof(*cvp) + ctd.tcpct_used,
2443 GFP_KERNEL);
2444 if (cvp == NULL)
2445 return -ENOMEM;
2446
2447 kref_init(&cvp->kref);
2448 }
2449 lock_sock(sk);
2450 tp->rx_opt.cookie_in_always =
2451 (TCP_COOKIE_IN_ALWAYS & ctd.tcpct_flags);
2452 tp->rx_opt.cookie_out_never = 0; /* false */
2453
2454 if (tp->cookie_values != NULL) {
2455 if (cvp != NULL) {
2456 /* Changed values are recorded by a changed
2457 * pointer, ensuring the cookie will differ,
2458 * without separately hashing each value later.
2459 */
2460 kref_put(&tp->cookie_values->kref,
2461 tcp_cookie_values_release);
2462 } else {
2463 cvp = tp->cookie_values;
2464 }
2465 }
2466
2467 if (cvp != NULL) {
2468 cvp->cookie_desired = ctd.tcpct_cookie_desired;
2469
2470 if (ctd.tcpct_used > 0) {
2471 memcpy(cvp->s_data_payload, ctd.tcpct_value,
2472 ctd.tcpct_used);
2473 cvp->s_data_desired = ctd.tcpct_used;
2474 cvp->s_data_constant = 1; /* true */
2475 } else {
2476 /* No constant payload data. */
2477 cvp->s_data_desired = ctd.tcpct_s_data_desired;
2478 cvp->s_data_constant = 0; /* false */
2479 }
2480
2481 tp->cookie_values = cvp;
2482 }
2483 release_sock(sk);
2484 return err;
2485 }
2486 default: 2391 default:
2487 /* fallthru */ 2392 /* fallthru */
2488 break; 2393 break;
@@ -2902,41 +2807,6 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
2902 return -EFAULT; 2807 return -EFAULT;
2903 return 0; 2808 return 0;
2904 2809
2905 case TCP_COOKIE_TRANSACTIONS: {
2906 struct tcp_cookie_transactions ctd;
2907 struct tcp_cookie_values *cvp = tp->cookie_values;
2908
2909 if (get_user(len, optlen))
2910 return -EFAULT;
2911 if (len < sizeof(ctd))
2912 return -EINVAL;
2913
2914 memset(&ctd, 0, sizeof(ctd));
2915 ctd.tcpct_flags = (tp->rx_opt.cookie_in_always ?
2916 TCP_COOKIE_IN_ALWAYS : 0)
2917 | (tp->rx_opt.cookie_out_never ?
2918 TCP_COOKIE_OUT_NEVER : 0);
2919
2920 if (cvp != NULL) {
2921 ctd.tcpct_flags |= (cvp->s_data_in ?
2922 TCP_S_DATA_IN : 0)
2923 | (cvp->s_data_out ?
2924 TCP_S_DATA_OUT : 0);
2925
2926 ctd.tcpct_cookie_desired = cvp->cookie_desired;
2927 ctd.tcpct_s_data_desired = cvp->s_data_desired;
2928
2929 memcpy(&ctd.tcpct_value[0], &cvp->cookie_pair[0],
2930 cvp->cookie_pair_size);
2931 ctd.tcpct_used = cvp->cookie_pair_size;
2932 }
2933
2934 if (put_user(sizeof(ctd), optlen))
2935 return -EFAULT;
2936 if (copy_to_user(optval, &ctd, sizeof(ctd)))
2937 return -EFAULT;
2938 return 0;
2939 }
2940 case TCP_THIN_LINEAR_TIMEOUTS: 2810 case TCP_THIN_LINEAR_TIMEOUTS:
2941 val = tp->thin_lto; 2811 val = tp->thin_lto;
2942 break; 2812 break;
@@ -3044,6 +2914,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
3044 SKB_GSO_TCP_ECN | 2914 SKB_GSO_TCP_ECN |
3045 SKB_GSO_TCPV6 | 2915 SKB_GSO_TCPV6 |
3046 SKB_GSO_GRE | 2916 SKB_GSO_GRE |
2917 SKB_GSO_UDP_TUNNEL |
3047 0) || 2918 0) ||
3048 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) 2919 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
3049 goto out; 2920 goto out;
@@ -3408,134 +3279,6 @@ EXPORT_SYMBOL(tcp_md5_hash_key);
3408 3279
3409#endif 3280#endif
3410 3281
3411/* Each Responder maintains up to two secret values concurrently for
3412 * efficient secret rollover. Each secret value has 4 states:
3413 *
3414 * Generating. (tcp_secret_generating != tcp_secret_primary)
3415 * Generates new Responder-Cookies, but not yet used for primary
3416 * verification. This is a short-term state, typically lasting only
3417 * one round trip time (RTT).
3418 *
3419 * Primary. (tcp_secret_generating == tcp_secret_primary)
3420 * Used both for generation and primary verification.
3421 *
3422 * Retiring. (tcp_secret_retiring != tcp_secret_secondary)
3423 * Used for verification, until the first failure that can be
3424 * verified by the newer Generating secret. At that time, this
3425 * cookie's state is changed to Secondary, and the Generating
3426 * cookie's state is changed to Primary. This is a short-term state,
3427 * typically lasting only one round trip time (RTT).
3428 *
3429 * Secondary. (tcp_secret_retiring == tcp_secret_secondary)
3430 * Used for secondary verification, after primary verification
3431 * failures. This state lasts no more than twice the Maximum Segment
3432 * Lifetime (2MSL). Then, the secret is discarded.
3433 */
3434struct tcp_cookie_secret {
3435 /* The secret is divided into two parts. The digest part is the
3436 * equivalent of previously hashing a secret and saving the state,
3437 * and serves as an initialization vector (IV). The message part
3438 * serves as the trailing secret.
3439 */
3440 u32 secrets[COOKIE_WORKSPACE_WORDS];
3441 unsigned long expires;
3442};
3443
3444#define TCP_SECRET_1MSL (HZ * TCP_PAWS_MSL)
3445#define TCP_SECRET_2MSL (HZ * TCP_PAWS_MSL * 2)
3446#define TCP_SECRET_LIFE (HZ * 600)
3447
3448static struct tcp_cookie_secret tcp_secret_one;
3449static struct tcp_cookie_secret tcp_secret_two;
3450
3451/* Essentially a circular list, without dynamic allocation. */
3452static struct tcp_cookie_secret *tcp_secret_generating;
3453static struct tcp_cookie_secret *tcp_secret_primary;
3454static struct tcp_cookie_secret *tcp_secret_retiring;
3455static struct tcp_cookie_secret *tcp_secret_secondary;
3456
3457static DEFINE_SPINLOCK(tcp_secret_locker);
3458
3459/* Select a pseudo-random word in the cookie workspace.
3460 */
3461static inline u32 tcp_cookie_work(const u32 *ws, const int n)
3462{
3463 return ws[COOKIE_DIGEST_WORDS + ((COOKIE_MESSAGE_WORDS-1) & ws[n])];
3464}
3465
3466/* Fill bakery[COOKIE_WORKSPACE_WORDS] with generator, updating as needed.
3467 * Called in softirq context.
3468 * Returns: 0 for success.
3469 */
3470int tcp_cookie_generator(u32 *bakery)
3471{
3472 unsigned long jiffy = jiffies;
3473
3474 if (unlikely(time_after_eq(jiffy, tcp_secret_generating->expires))) {
3475 spin_lock_bh(&tcp_secret_locker);
3476 if (!time_after_eq(jiffy, tcp_secret_generating->expires)) {
3477 /* refreshed by another */
3478 memcpy(bakery,
3479 &tcp_secret_generating->secrets[0],
3480 COOKIE_WORKSPACE_WORDS);
3481 } else {
3482 /* still needs refreshing */
3483 get_random_bytes(bakery, COOKIE_WORKSPACE_WORDS);
3484
3485 /* The first time, paranoia assumes that the
3486 * randomization function isn't as strong. But,
3487 * this secret initialization is delayed until
3488 * the last possible moment (packet arrival).
3489 * Although that time is observable, it is
3490 * unpredictably variable. Mash in the most
3491 * volatile clock bits available, and expire the
3492 * secret extra quickly.
3493 */
3494 if (unlikely(tcp_secret_primary->expires ==
3495 tcp_secret_secondary->expires)) {
3496 struct timespec tv;
3497
3498 getnstimeofday(&tv);
3499 bakery[COOKIE_DIGEST_WORDS+0] ^=
3500 (u32)tv.tv_nsec;
3501
3502 tcp_secret_secondary->expires = jiffy
3503 + TCP_SECRET_1MSL
3504 + (0x0f & tcp_cookie_work(bakery, 0));
3505 } else {
3506 tcp_secret_secondary->expires = jiffy
3507 + TCP_SECRET_LIFE
3508 + (0xff & tcp_cookie_work(bakery, 1));
3509 tcp_secret_primary->expires = jiffy
3510 + TCP_SECRET_2MSL
3511 + (0x1f & tcp_cookie_work(bakery, 2));
3512 }
3513 memcpy(&tcp_secret_secondary->secrets[0],
3514 bakery, COOKIE_WORKSPACE_WORDS);
3515
3516 rcu_assign_pointer(tcp_secret_generating,
3517 tcp_secret_secondary);
3518 rcu_assign_pointer(tcp_secret_retiring,
3519 tcp_secret_primary);
3520 /*
3521 * Neither call_rcu() nor synchronize_rcu() needed.
3522 * Retiring data is not freed. It is replaced after
3523 * further (locked) pointer updates, and a quiet time
3524 * (minimum 1MSL, maximum LIFE - 2MSL).
3525 */
3526 }
3527 spin_unlock_bh(&tcp_secret_locker);
3528 } else {
3529 rcu_read_lock_bh();
3530 memcpy(bakery,
3531 &rcu_dereference(tcp_secret_generating)->secrets[0],
3532 COOKIE_WORKSPACE_WORDS);
3533 rcu_read_unlock_bh();
3534 }
3535 return 0;
3536}
3537EXPORT_SYMBOL(tcp_cookie_generator);
3538
3539void tcp_done(struct sock *sk) 3282void tcp_done(struct sock *sk)
3540{ 3283{
3541 struct request_sock *req = tcp_sk(sk)->fastopen_rsk; 3284 struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
@@ -3590,7 +3333,6 @@ void __init tcp_init(void)
3590 unsigned long limit; 3333 unsigned long limit;
3591 int max_rshare, max_wshare, cnt; 3334 int max_rshare, max_wshare, cnt;
3592 unsigned int i; 3335 unsigned int i;
3593 unsigned long jiffy = jiffies;
3594 3336
3595 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); 3337 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
3596 3338
@@ -3666,13 +3408,5 @@ void __init tcp_init(void)
3666 3408
3667 tcp_register_congestion_control(&tcp_reno); 3409 tcp_register_congestion_control(&tcp_reno);
3668 3410
3669 memset(&tcp_secret_one.secrets[0], 0, sizeof(tcp_secret_one.secrets));
3670 memset(&tcp_secret_two.secrets[0], 0, sizeof(tcp_secret_two.secrets));
3671 tcp_secret_one.expires = jiffy; /* past due */
3672 tcp_secret_two.expires = jiffy; /* past due */
3673 tcp_secret_generating = &tcp_secret_one;
3674 tcp_secret_primary = &tcp_secret_one;
3675 tcp_secret_retiring = &tcp_secret_two;
3676 tcp_secret_secondary = &tcp_secret_two;
3677 tcp_tasklet_init(); 3411 tcp_tasklet_init();
3678} 3412}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3bd55bad230a..6d9ca35f0c35 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -93,12 +93,11 @@ int sysctl_tcp_stdurg __read_mostly;
93int sysctl_tcp_rfc1337 __read_mostly; 93int sysctl_tcp_rfc1337 __read_mostly;
94int sysctl_tcp_max_orphans __read_mostly = NR_FILE; 94int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
95int sysctl_tcp_frto __read_mostly = 2; 95int sysctl_tcp_frto __read_mostly = 2;
96int sysctl_tcp_frto_response __read_mostly;
97 96
98int sysctl_tcp_thin_dupack __read_mostly; 97int sysctl_tcp_thin_dupack __read_mostly;
99 98
100int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; 99int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
101int sysctl_tcp_early_retrans __read_mostly = 2; 100int sysctl_tcp_early_retrans __read_mostly = 3;
102 101
103#define FLAG_DATA 0x01 /* Incoming frame contained data. */ 102#define FLAG_DATA 0x01 /* Incoming frame contained data. */
104#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */ 103#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */
@@ -108,17 +107,15 @@ int sysctl_tcp_early_retrans __read_mostly = 2;
108#define FLAG_DATA_SACKED 0x20 /* New SACK. */ 107#define FLAG_DATA_SACKED 0x20 /* New SACK. */
109#define FLAG_ECE 0x40 /* ECE in this ACK */ 108#define FLAG_ECE 0x40 /* ECE in this ACK */
110#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/ 109#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/
111#define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */ 110#define FLAG_ORIG_SACK_ACKED 0x200 /* Never retransmitted data are (s)acked */
112#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */ 111#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
113#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ 112#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
114#define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */
115#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ 113#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
116 114
117#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) 115#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
118#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) 116#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
119#define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE) 117#define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE)
120#define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) 118#define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED)
121#define FLAG_ANY_PROGRESS (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED)
122 119
123#define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) 120#define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
124#define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH)) 121#define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH))
@@ -1159,10 +1156,8 @@ static u8 tcp_sacktag_one(struct sock *sk,
1159 tcp_highest_sack_seq(tp))) 1156 tcp_highest_sack_seq(tp)))
1160 state->reord = min(fack_count, 1157 state->reord = min(fack_count,
1161 state->reord); 1158 state->reord);
1162 1159 if (!after(end_seq, tp->high_seq))
1163 /* SACK enhanced F-RTO (RFC4138; Appendix B) */ 1160 state->flag |= FLAG_ORIG_SACK_ACKED;
1164 if (!after(end_seq, tp->frto_highmark))
1165 state->flag |= FLAG_ONLY_ORIG_SACKED;
1166 } 1161 }
1167 1162
1168 if (sacked & TCPCB_LOST) { 1163 if (sacked & TCPCB_LOST) {
@@ -1555,7 +1550,6 @@ static int
1555tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, 1550tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
1556 u32 prior_snd_una) 1551 u32 prior_snd_una)
1557{ 1552{
1558 const struct inet_connection_sock *icsk = inet_csk(sk);
1559 struct tcp_sock *tp = tcp_sk(sk); 1553 struct tcp_sock *tp = tcp_sk(sk);
1560 const unsigned char *ptr = (skb_transport_header(ack_skb) + 1554 const unsigned char *ptr = (skb_transport_header(ack_skb) +
1561 TCP_SKB_CB(ack_skb)->sacked); 1555 TCP_SKB_CB(ack_skb)->sacked);
@@ -1728,12 +1722,6 @@ walk:
1728 start_seq, end_seq, dup_sack); 1722 start_seq, end_seq, dup_sack);
1729 1723
1730advance_sp: 1724advance_sp:
1731 /* SACK enhanced FRTO (RFC4138, Appendix B): Clearing correct
1732 * due to in-order walk
1733 */
1734 if (after(end_seq, tp->frto_highmark))
1735 state.flag &= ~FLAG_ONLY_ORIG_SACKED;
1736
1737 i++; 1725 i++;
1738 } 1726 }
1739 1727
@@ -1750,8 +1738,7 @@ advance_sp:
1750 tcp_verify_left_out(tp); 1738 tcp_verify_left_out(tp);
1751 1739
1752 if ((state.reord < tp->fackets_out) && 1740 if ((state.reord < tp->fackets_out) &&
1753 ((icsk->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker) && 1741 ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker))
1754 (!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark)))
1755 tcp_update_reordering(sk, tp->fackets_out - state.reord, 0); 1742 tcp_update_reordering(sk, tp->fackets_out - state.reord, 0);
1756 1743
1757out: 1744out:
@@ -1825,197 +1812,6 @@ static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
1825 tp->sacked_out = 0; 1812 tp->sacked_out = 0;
1826} 1813}
1827 1814
1828static int tcp_is_sackfrto(const struct tcp_sock *tp)
1829{
1830 return (sysctl_tcp_frto == 0x2) && !tcp_is_reno(tp);
1831}
1832
1833/* F-RTO can only be used if TCP has never retransmitted anything other than
1834 * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here)
1835 */
1836bool tcp_use_frto(struct sock *sk)
1837{
1838 const struct tcp_sock *tp = tcp_sk(sk);
1839 const struct inet_connection_sock *icsk = inet_csk(sk);
1840 struct sk_buff *skb;
1841
1842 if (!sysctl_tcp_frto)
1843 return false;
1844
1845 /* MTU probe and F-RTO won't really play nicely along currently */
1846 if (icsk->icsk_mtup.probe_size)
1847 return false;
1848
1849 if (tcp_is_sackfrto(tp))
1850 return true;
1851
1852 /* Avoid expensive walking of rexmit queue if possible */
1853 if (tp->retrans_out > 1)
1854 return false;
1855
1856 skb = tcp_write_queue_head(sk);
1857 if (tcp_skb_is_last(sk, skb))
1858 return true;
1859 skb = tcp_write_queue_next(sk, skb); /* Skips head */
1860 tcp_for_write_queue_from(skb, sk) {
1861 if (skb == tcp_send_head(sk))
1862 break;
1863 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
1864 return false;
1865 /* Short-circuit when first non-SACKed skb has been checked */
1866 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
1867 break;
1868 }
1869 return true;
1870}
1871
1872/* RTO occurred, but do not yet enter Loss state. Instead, defer RTO
1873 * recovery a bit and use heuristics in tcp_process_frto() to detect if
1874 * the RTO was spurious. Only clear SACKED_RETRANS of the head here to
1875 * keep retrans_out counting accurate (with SACK F-RTO, other than head
1876 * may still have that bit set); TCPCB_LOST and remaining SACKED_RETRANS
1877 * bits are handled if the Loss state is really to be entered (in
1878 * tcp_enter_frto_loss).
1879 *
1880 * Do like tcp_enter_loss() would; when RTO expires the second time it
1881 * does:
1882 * "Reduce ssthresh if it has not yet been made inside this window."
1883 */
1884void tcp_enter_frto(struct sock *sk)
1885{
1886 const struct inet_connection_sock *icsk = inet_csk(sk);
1887 struct tcp_sock *tp = tcp_sk(sk);
1888 struct sk_buff *skb;
1889
1890 if ((!tp->frto_counter && icsk->icsk_ca_state <= TCP_CA_Disorder) ||
1891 tp->snd_una == tp->high_seq ||
1892 ((icsk->icsk_ca_state == TCP_CA_Loss || tp->frto_counter) &&
1893 !icsk->icsk_retransmits)) {
1894 tp->prior_ssthresh = tcp_current_ssthresh(sk);
1895 /* Our state is too optimistic in ssthresh() call because cwnd
1896 * is not reduced until tcp_enter_frto_loss() when previous F-RTO
1897 * recovery has not yet completed. Pattern would be this: RTO,
1898 * Cumulative ACK, RTO (2xRTO for the same segment does not end
1899 * up here twice).
1900 * RFC4138 should be more specific on what to do, even though
1901 * RTO is quite unlikely to occur after the first Cumulative ACK
1902 * due to back-off and complexity of triggering events ...
1903 */
1904 if (tp->frto_counter) {
1905 u32 stored_cwnd;
1906 stored_cwnd = tp->snd_cwnd;
1907 tp->snd_cwnd = 2;
1908 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1909 tp->snd_cwnd = stored_cwnd;
1910 } else {
1911 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1912 }
1913 /* ... in theory, cong.control module could do "any tricks" in
1914 * ssthresh(), which means that ca_state, lost bits and lost_out
1915 * counter would have to be faked before the call occurs. We
1916 * consider that too expensive, unlikely and hacky, so modules
1917 * using these in ssthresh() must deal these incompatibility
1918 * issues if they receives CA_EVENT_FRTO and frto_counter != 0
1919 */
1920 tcp_ca_event(sk, CA_EVENT_FRTO);
1921 }
1922
1923 tp->undo_marker = tp->snd_una;
1924 tp->undo_retrans = 0;
1925
1926 skb = tcp_write_queue_head(sk);
1927 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
1928 tp->undo_marker = 0;
1929 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
1930 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1931 tp->retrans_out -= tcp_skb_pcount(skb);
1932 }
1933 tcp_verify_left_out(tp);
1934
1935 /* Too bad if TCP was application limited */
1936 tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1);
1937
1938 /* Earlier loss recovery underway (see RFC4138; Appendix B).
1939 * The last condition is necessary at least in tp->frto_counter case.
1940 */
1941 if (tcp_is_sackfrto(tp) && (tp->frto_counter ||
1942 ((1 << icsk->icsk_ca_state) & (TCPF_CA_Recovery|TCPF_CA_Loss))) &&
1943 after(tp->high_seq, tp->snd_una)) {
1944 tp->frto_highmark = tp->high_seq;
1945 } else {
1946 tp->frto_highmark = tp->snd_nxt;
1947 }
1948 tcp_set_ca_state(sk, TCP_CA_Disorder);
1949 tp->high_seq = tp->snd_nxt;
1950 tp->frto_counter = 1;
1951}
1952
1953/* Enter Loss state after F-RTO was applied. Dupack arrived after RTO,
1954 * which indicates that we should follow the traditional RTO recovery,
1955 * i.e. mark everything lost and do go-back-N retransmission.
1956 */
1957static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
1958{
1959 struct tcp_sock *tp = tcp_sk(sk);
1960 struct sk_buff *skb;
1961
1962 tp->lost_out = 0;
1963 tp->retrans_out = 0;
1964 if (tcp_is_reno(tp))
1965 tcp_reset_reno_sack(tp);
1966
1967 tcp_for_write_queue(skb, sk) {
1968 if (skb == tcp_send_head(sk))
1969 break;
1970
1971 TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
1972 /*
1973 * Count the retransmission made on RTO correctly (only when
1974 * waiting for the first ACK and did not get it)...
1975 */
1976 if ((tp->frto_counter == 1) && !(flag & FLAG_DATA_ACKED)) {
1977 /* For some reason this R-bit might get cleared? */
1978 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
1979 tp->retrans_out += tcp_skb_pcount(skb);
1980 /* ...enter this if branch just for the first segment */
1981 flag |= FLAG_DATA_ACKED;
1982 } else {
1983 if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
1984 tp->undo_marker = 0;
1985 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1986 }
1987
1988 /* Marking forward transmissions that were made after RTO lost
1989 * can cause unnecessary retransmissions in some scenarios,
1990 * SACK blocks will mitigate that in some but not in all cases.
1991 * We used to not mark them but it was causing break-ups with
1992 * receivers that do only in-order receival.
1993 *
1994 * TODO: we could detect presence of such receiver and select
1995 * different behavior per flow.
1996 */
1997 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
1998 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1999 tp->lost_out += tcp_skb_pcount(skb);
2000 tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
2001 }
2002 }
2003 tcp_verify_left_out(tp);
2004
2005 tp->snd_cwnd = tcp_packets_in_flight(tp) + allowed_segments;
2006 tp->snd_cwnd_cnt = 0;
2007 tp->snd_cwnd_stamp = tcp_time_stamp;
2008 tp->frto_counter = 0;
2009
2010 tp->reordering = min_t(unsigned int, tp->reordering,
2011 sysctl_tcp_reordering);
2012 tcp_set_ca_state(sk, TCP_CA_Loss);
2013 tp->high_seq = tp->snd_nxt;
2014 TCP_ECN_queue_cwr(tp);
2015
2016 tcp_clear_all_retrans_hints(tp);
2017}
2018
2019static void tcp_clear_retrans_partial(struct tcp_sock *tp) 1815static void tcp_clear_retrans_partial(struct tcp_sock *tp)
2020{ 1816{
2021 tp->retrans_out = 0; 1817 tp->retrans_out = 0;
@@ -2042,10 +1838,13 @@ void tcp_enter_loss(struct sock *sk, int how)
2042 const struct inet_connection_sock *icsk = inet_csk(sk); 1838 const struct inet_connection_sock *icsk = inet_csk(sk);
2043 struct tcp_sock *tp = tcp_sk(sk); 1839 struct tcp_sock *tp = tcp_sk(sk);
2044 struct sk_buff *skb; 1840 struct sk_buff *skb;
1841 bool new_recovery = false;
2045 1842
2046 /* Reduce ssthresh if it has not yet been made inside this window. */ 1843 /* Reduce ssthresh if it has not yet been made inside this window. */
2047 if (icsk->icsk_ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq || 1844 if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
1845 !after(tp->high_seq, tp->snd_una) ||
2048 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { 1846 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
1847 new_recovery = true;
2049 tp->prior_ssthresh = tcp_current_ssthresh(sk); 1848 tp->prior_ssthresh = tcp_current_ssthresh(sk);
2050 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 1849 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
2051 tcp_ca_event(sk, CA_EVENT_LOSS); 1850 tcp_ca_event(sk, CA_EVENT_LOSS);
@@ -2087,8 +1886,14 @@ void tcp_enter_loss(struct sock *sk, int how)
2087 tcp_set_ca_state(sk, TCP_CA_Loss); 1886 tcp_set_ca_state(sk, TCP_CA_Loss);
2088 tp->high_seq = tp->snd_nxt; 1887 tp->high_seq = tp->snd_nxt;
2089 TCP_ECN_queue_cwr(tp); 1888 TCP_ECN_queue_cwr(tp);
2090 /* Abort F-RTO algorithm if one is in progress */ 1889
2091 tp->frto_counter = 0; 1890 /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
1891 * loss recovery is underway except recurring timeout(s) on
1892 * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
1893 */
1894 tp->frto = sysctl_tcp_frto &&
1895 (new_recovery || icsk->icsk_retransmits) &&
1896 !inet_csk(sk)->icsk_mtup.probe_size;
2092} 1897}
2093 1898
2094/* If ACK arrived pointing to a remembered SACK, it means that our 1899/* If ACK arrived pointing to a remembered SACK, it means that our
@@ -2147,15 +1952,16 @@ static bool tcp_pause_early_retransmit(struct sock *sk, int flag)
2147 * max(RTT/4, 2msec) unless ack has ECE mark, no RTT samples 1952 * max(RTT/4, 2msec) unless ack has ECE mark, no RTT samples
2148 * available, or RTO is scheduled to fire first. 1953 * available, or RTO is scheduled to fire first.
2149 */ 1954 */
2150 if (sysctl_tcp_early_retrans < 2 || (flag & FLAG_ECE) || !tp->srtt) 1955 if (sysctl_tcp_early_retrans < 2 || sysctl_tcp_early_retrans > 3 ||
1956 (flag & FLAG_ECE) || !tp->srtt)
2151 return false; 1957 return false;
2152 1958
2153 delay = max_t(unsigned long, (tp->srtt >> 5), msecs_to_jiffies(2)); 1959 delay = max_t(unsigned long, (tp->srtt >> 5), msecs_to_jiffies(2));
2154 if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay))) 1960 if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay)))
2155 return false; 1961 return false;
2156 1962
2157 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, delay, TCP_RTO_MAX); 1963 inet_csk_reset_xmit_timer(sk, ICSK_TIME_EARLY_RETRANS, delay,
2158 tp->early_retrans_delayed = 1; 1964 TCP_RTO_MAX);
2159 return true; 1965 return true;
2160} 1966}
2161 1967
@@ -2271,10 +2077,6 @@ static bool tcp_time_to_recover(struct sock *sk, int flag)
2271 struct tcp_sock *tp = tcp_sk(sk); 2077 struct tcp_sock *tp = tcp_sk(sk);
2272 __u32 packets_out; 2078 __u32 packets_out;
2273 2079
2274 /* Do not perform any recovery during F-RTO algorithm */
2275 if (tp->frto_counter)
2276 return false;
2277
2278 /* Trick#1: The loss is proven. */ 2080 /* Trick#1: The loss is proven. */
2279 if (tp->lost_out) 2081 if (tp->lost_out)
2280 return true; 2082 return true;
@@ -2318,7 +2120,7 @@ static bool tcp_time_to_recover(struct sock *sk, int flag)
2318 * interval if appropriate. 2120 * interval if appropriate.
2319 */ 2121 */
2320 if (tp->do_early_retrans && !tp->retrans_out && tp->sacked_out && 2122 if (tp->do_early_retrans && !tp->retrans_out && tp->sacked_out &&
2321 (tp->packets_out == (tp->sacked_out + 1) && tp->packets_out < 4) && 2123 (tp->packets_out >= (tp->sacked_out + 1) && tp->packets_out < 4) &&
2322 !tcp_may_send_now(sk)) 2124 !tcp_may_send_now(sk))
2323 return !tcp_pause_early_retransmit(sk, flag); 2125 return !tcp_pause_early_retransmit(sk, flag);
2324 2126
@@ -2635,12 +2437,12 @@ static int tcp_try_undo_partial(struct sock *sk, int acked)
2635 return failed; 2437 return failed;
2636} 2438}
2637 2439
2638/* Undo during loss recovery after partial ACK. */ 2440/* Undo during loss recovery after partial ACK or using F-RTO. */
2639static bool tcp_try_undo_loss(struct sock *sk) 2441static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
2640{ 2442{
2641 struct tcp_sock *tp = tcp_sk(sk); 2443 struct tcp_sock *tp = tcp_sk(sk);
2642 2444
2643 if (tcp_may_undo(tp)) { 2445 if (frto_undo || tcp_may_undo(tp)) {
2644 struct sk_buff *skb; 2446 struct sk_buff *skb;
2645 tcp_for_write_queue(skb, sk) { 2447 tcp_for_write_queue(skb, sk) {
2646 if (skb == tcp_send_head(sk)) 2448 if (skb == tcp_send_head(sk))
@@ -2654,9 +2456,12 @@ static bool tcp_try_undo_loss(struct sock *sk)
2654 tp->lost_out = 0; 2456 tp->lost_out = 0;
2655 tcp_undo_cwr(sk, true); 2457 tcp_undo_cwr(sk, true);
2656 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); 2458 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
2459 if (frto_undo)
2460 NET_INC_STATS_BH(sock_net(sk),
2461 LINUX_MIB_TCPSPURIOUSRTOS);
2657 inet_csk(sk)->icsk_retransmits = 0; 2462 inet_csk(sk)->icsk_retransmits = 0;
2658 tp->undo_marker = 0; 2463 tp->undo_marker = 0;
2659 if (tcp_is_sack(tp)) 2464 if (frto_undo || tcp_is_sack(tp))
2660 tcp_set_ca_state(sk, TCP_CA_Open); 2465 tcp_set_ca_state(sk, TCP_CA_Open);
2661 return true; 2466 return true;
2662 } 2467 }
@@ -2678,6 +2483,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh)
2678 struct tcp_sock *tp = tcp_sk(sk); 2483 struct tcp_sock *tp = tcp_sk(sk);
2679 2484
2680 tp->high_seq = tp->snd_nxt; 2485 tp->high_seq = tp->snd_nxt;
2486 tp->tlp_high_seq = 0;
2681 tp->snd_cwnd_cnt = 0; 2487 tp->snd_cwnd_cnt = 0;
2682 tp->prior_cwnd = tp->snd_cwnd; 2488 tp->prior_cwnd = tp->snd_cwnd;
2683 tp->prr_delivered = 0; 2489 tp->prr_delivered = 0;
@@ -2755,7 +2561,7 @@ static void tcp_try_to_open(struct sock *sk, int flag, int newly_acked_sacked)
2755 2561
2756 tcp_verify_left_out(tp); 2562 tcp_verify_left_out(tp);
2757 2563
2758 if (!tp->frto_counter && !tcp_any_retrans_done(sk)) 2564 if (!tcp_any_retrans_done(sk))
2759 tp->retrans_stamp = 0; 2565 tp->retrans_stamp = 0;
2760 2566
2761 if (flag & FLAG_ECE) 2567 if (flag & FLAG_ECE)
@@ -2872,6 +2678,58 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
2872 tcp_set_ca_state(sk, TCP_CA_Recovery); 2678 tcp_set_ca_state(sk, TCP_CA_Recovery);
2873} 2679}
2874 2680
2681/* Process an ACK in CA_Loss state. Move to CA_Open if lost data are
2682 * recovered or spurious. Otherwise retransmits more on partial ACKs.
2683 */
2684static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
2685{
2686 struct inet_connection_sock *icsk = inet_csk(sk);
2687 struct tcp_sock *tp = tcp_sk(sk);
2688 bool recovered = !before(tp->snd_una, tp->high_seq);
2689
2690 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
2691 if (flag & FLAG_ORIG_SACK_ACKED) {
2692 /* Step 3.b. A timeout is spurious if not all data are
2693 * lost, i.e., never-retransmitted data are (s)acked.
2694 */
2695 tcp_try_undo_loss(sk, true);
2696 return;
2697 }
2698 if (after(tp->snd_nxt, tp->high_seq) &&
2699 (flag & FLAG_DATA_SACKED || is_dupack)) {
2700 tp->frto = 0; /* Loss was real: 2nd part of step 3.a */
2701 } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) {
2702 tp->high_seq = tp->snd_nxt;
2703 __tcp_push_pending_frames(sk, tcp_current_mss(sk),
2704 TCP_NAGLE_OFF);
2705 if (after(tp->snd_nxt, tp->high_seq))
2706 return; /* Step 2.b */
2707 tp->frto = 0;
2708 }
2709 }
2710
2711 if (recovered) {
2712 /* F-RTO RFC5682 sec 3.1 step 2.a and 1st part of step 3.a */
2713 icsk->icsk_retransmits = 0;
2714 tcp_try_undo_recovery(sk);
2715 return;
2716 }
2717 if (flag & FLAG_DATA_ACKED)
2718 icsk->icsk_retransmits = 0;
2719 if (tcp_is_reno(tp)) {
2720 /* A Reno DUPACK means new data in F-RTO step 2.b above are
2721 * delivered. Lower inflight to clock out (re)tranmissions.
2722 */
2723 if (after(tp->snd_nxt, tp->high_seq) && is_dupack)
2724 tcp_add_reno_sack(sk);
2725 else if (flag & FLAG_SND_UNA_ADVANCED)
2726 tcp_reset_reno_sack(tp);
2727 }
2728 if (tcp_try_undo_loss(sk, false))
2729 return;
2730 tcp_xmit_retransmit_queue(sk);
2731}
2732
2875/* Process an event, which can update packets-in-flight not trivially. 2733/* Process an event, which can update packets-in-flight not trivially.
2876 * Main goal of this function is to calculate new estimate for left_out, 2734 * Main goal of this function is to calculate new estimate for left_out,
2877 * taking into account both packets sitting in receiver's buffer and 2735 * taking into account both packets sitting in receiver's buffer and
@@ -2918,12 +2776,6 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
2918 tp->retrans_stamp = 0; 2776 tp->retrans_stamp = 0;
2919 } else if (!before(tp->snd_una, tp->high_seq)) { 2777 } else if (!before(tp->snd_una, tp->high_seq)) {
2920 switch (icsk->icsk_ca_state) { 2778 switch (icsk->icsk_ca_state) {
2921 case TCP_CA_Loss:
2922 icsk->icsk_retransmits = 0;
2923 if (tcp_try_undo_recovery(sk))
2924 return;
2925 break;
2926
2927 case TCP_CA_CWR: 2779 case TCP_CA_CWR:
2928 /* CWR is to be held something *above* high_seq 2780 /* CWR is to be held something *above* high_seq
2929 * is ACKed for CWR bit to reach receiver. */ 2781 * is ACKed for CWR bit to reach receiver. */
@@ -2954,18 +2806,10 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
2954 newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked; 2806 newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
2955 break; 2807 break;
2956 case TCP_CA_Loss: 2808 case TCP_CA_Loss:
2957 if (flag & FLAG_DATA_ACKED) 2809 tcp_process_loss(sk, flag, is_dupack);
2958 icsk->icsk_retransmits = 0;
2959 if (tcp_is_reno(tp) && flag & FLAG_SND_UNA_ADVANCED)
2960 tcp_reset_reno_sack(tp);
2961 if (!tcp_try_undo_loss(sk)) {
2962 tcp_moderate_cwnd(tp);
2963 tcp_xmit_retransmit_queue(sk);
2964 return;
2965 }
2966 if (icsk->icsk_ca_state != TCP_CA_Open) 2810 if (icsk->icsk_ca_state != TCP_CA_Open)
2967 return; 2811 return;
2968 /* Loss is undone; fall through to processing in Open state. */ 2812 /* Fall through to processing in Open state. */
2969 default: 2813 default:
2970 if (tcp_is_reno(tp)) { 2814 if (tcp_is_reno(tp)) {
2971 if (flag & FLAG_SND_UNA_ADVANCED) 2815 if (flag & FLAG_SND_UNA_ADVANCED)
@@ -3078,6 +2922,7 @@ static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
3078 */ 2922 */
3079void tcp_rearm_rto(struct sock *sk) 2923void tcp_rearm_rto(struct sock *sk)
3080{ 2924{
2925 const struct inet_connection_sock *icsk = inet_csk(sk);
3081 struct tcp_sock *tp = tcp_sk(sk); 2926 struct tcp_sock *tp = tcp_sk(sk);
3082 2927
3083 /* If the retrans timer is currently being used by Fast Open 2928 /* If the retrans timer is currently being used by Fast Open
@@ -3091,12 +2936,13 @@ void tcp_rearm_rto(struct sock *sk)
3091 } else { 2936 } else {
3092 u32 rto = inet_csk(sk)->icsk_rto; 2937 u32 rto = inet_csk(sk)->icsk_rto;
3093 /* Offset the time elapsed after installing regular RTO */ 2938 /* Offset the time elapsed after installing regular RTO */
3094 if (tp->early_retrans_delayed) { 2939 if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2940 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
3095 struct sk_buff *skb = tcp_write_queue_head(sk); 2941 struct sk_buff *skb = tcp_write_queue_head(sk);
3096 const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto; 2942 const u32 rto_time_stamp = TCP_SKB_CB(skb)->when + rto;
3097 s32 delta = (s32)(rto_time_stamp - tcp_time_stamp); 2943 s32 delta = (s32)(rto_time_stamp - tcp_time_stamp);
3098 /* delta may not be positive if the socket is locked 2944 /* delta may not be positive if the socket is locked
3099 * when the delayed ER timer fires and is rescheduled. 2945 * when the retrans timer fires and is rescheduled.
3100 */ 2946 */
3101 if (delta > 0) 2947 if (delta > 0)
3102 rto = delta; 2948 rto = delta;
@@ -3104,7 +2950,6 @@ void tcp_rearm_rto(struct sock *sk)
3104 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, 2950 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
3105 TCP_RTO_MAX); 2951 TCP_RTO_MAX);
3106 } 2952 }
3107 tp->early_retrans_delayed = 0;
3108} 2953}
3109 2954
3110/* This function is called when the delayed ER timer fires. TCP enters 2955/* This function is called when the delayed ER timer fires. TCP enters
@@ -3192,8 +3037,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3192 flag |= FLAG_RETRANS_DATA_ACKED; 3037 flag |= FLAG_RETRANS_DATA_ACKED;
3193 ca_seq_rtt = -1; 3038 ca_seq_rtt = -1;
3194 seq_rtt = -1; 3039 seq_rtt = -1;
3195 if ((flag & FLAG_DATA_ACKED) || (acked_pcount > 1))
3196 flag |= FLAG_NONHEAD_RETRANS_ACKED;
3197 } else { 3040 } else {
3198 ca_seq_rtt = now - scb->when; 3041 ca_seq_rtt = now - scb->when;
3199 last_ackt = skb->tstamp; 3042 last_ackt = skb->tstamp;
@@ -3202,6 +3045,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3202 } 3045 }
3203 if (!(sacked & TCPCB_SACKED_ACKED)) 3046 if (!(sacked & TCPCB_SACKED_ACKED))
3204 reord = min(pkts_acked, reord); 3047 reord = min(pkts_acked, reord);
3048 if (!after(scb->end_seq, tp->high_seq))
3049 flag |= FLAG_ORIG_SACK_ACKED;
3205 } 3050 }
3206 3051
3207 if (sacked & TCPCB_SACKED_ACKED) 3052 if (sacked & TCPCB_SACKED_ACKED)
@@ -3402,150 +3247,6 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32
3402 return flag; 3247 return flag;
3403} 3248}
3404 3249
3405/* A very conservative spurious RTO response algorithm: reduce cwnd and
3406 * continue in congestion avoidance.
3407 */
3408static void tcp_conservative_spur_to_response(struct tcp_sock *tp)
3409{
3410 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
3411 tp->snd_cwnd_cnt = 0;
3412 TCP_ECN_queue_cwr(tp);
3413 tcp_moderate_cwnd(tp);
3414}
3415
3416/* A conservative spurious RTO response algorithm: reduce cwnd using
3417 * PRR and continue in congestion avoidance.
3418 */
3419static void tcp_cwr_spur_to_response(struct sock *sk)
3420{
3421 tcp_enter_cwr(sk, 0);
3422}
3423
3424static void tcp_undo_spur_to_response(struct sock *sk, int flag)
3425{
3426 if (flag & FLAG_ECE)
3427 tcp_cwr_spur_to_response(sk);
3428 else
3429 tcp_undo_cwr(sk, true);
3430}
3431
3432/* F-RTO spurious RTO detection algorithm (RFC4138)
3433 *
3434 * F-RTO affects during two new ACKs following RTO (well, almost, see inline
3435 * comments). State (ACK number) is kept in frto_counter. When ACK advances
3436 * window (but not to or beyond highest sequence sent before RTO):
3437 * On First ACK, send two new segments out.
3438 * On Second ACK, RTO was likely spurious. Do spurious response (response
3439 * algorithm is not part of the F-RTO detection algorithm
3440 * given in RFC4138 but can be selected separately).
3441 * Otherwise (basically on duplicate ACK), RTO was (likely) caused by a loss
3442 * and TCP falls back to conventional RTO recovery. F-RTO allows overriding
3443 * of Nagle, this is done using frto_counter states 2 and 3, when a new data
3444 * segment of any size sent during F-RTO, state 2 is upgraded to 3.
3445 *
3446 * Rationale: if the RTO was spurious, new ACKs should arrive from the
3447 * original window even after we transmit two new data segments.
3448 *
3449 * SACK version:
3450 * on first step, wait until first cumulative ACK arrives, then move to
3451 * the second step. In second step, the next ACK decides.
3452 *
3453 * F-RTO is implemented (mainly) in four functions:
3454 * - tcp_use_frto() is used to determine if TCP is can use F-RTO
3455 * - tcp_enter_frto() prepares TCP state on RTO if F-RTO is used, it is
3456 * called when tcp_use_frto() showed green light
3457 * - tcp_process_frto() handles incoming ACKs during F-RTO algorithm
3458 * - tcp_enter_frto_loss() is called if there is not enough evidence
3459 * to prove that the RTO is indeed spurious. It transfers the control
3460 * from F-RTO to the conventional RTO recovery
3461 */
3462static bool tcp_process_frto(struct sock *sk, int flag)
3463{
3464 struct tcp_sock *tp = tcp_sk(sk);
3465
3466 tcp_verify_left_out(tp);
3467
3468 /* Duplicate the behavior from Loss state (fastretrans_alert) */
3469 if (flag & FLAG_DATA_ACKED)
3470 inet_csk(sk)->icsk_retransmits = 0;
3471
3472 if ((flag & FLAG_NONHEAD_RETRANS_ACKED) ||
3473 ((tp->frto_counter >= 2) && (flag & FLAG_RETRANS_DATA_ACKED)))
3474 tp->undo_marker = 0;
3475
3476 if (!before(tp->snd_una, tp->frto_highmark)) {
3477 tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag);
3478 return true;
3479 }
3480
3481 if (!tcp_is_sackfrto(tp)) {
3482 /* RFC4138 shortcoming in step 2; should also have case c):
3483 * ACK isn't duplicate nor advances window, e.g., opposite dir
3484 * data, winupdate
3485 */
3486 if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP))
3487 return true;
3488
3489 if (!(flag & FLAG_DATA_ACKED)) {
3490 tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3),
3491 flag);
3492 return true;
3493 }
3494 } else {
3495 if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
3496 if (!tcp_packets_in_flight(tp)) {
3497 tcp_enter_frto_loss(sk, 2, flag);
3498 return true;
3499 }
3500
3501 /* Prevent sending of new data. */
3502 tp->snd_cwnd = min(tp->snd_cwnd,
3503 tcp_packets_in_flight(tp));
3504 return true;
3505 }
3506
3507 if ((tp->frto_counter >= 2) &&
3508 (!(flag & FLAG_FORWARD_PROGRESS) ||
3509 ((flag & FLAG_DATA_SACKED) &&
3510 !(flag & FLAG_ONLY_ORIG_SACKED)))) {
3511 /* RFC4138 shortcoming (see comment above) */
3512 if (!(flag & FLAG_FORWARD_PROGRESS) &&
3513 (flag & FLAG_NOT_DUP))
3514 return true;
3515
3516 tcp_enter_frto_loss(sk, 3, flag);
3517 return true;
3518 }
3519 }
3520
3521 if (tp->frto_counter == 1) {
3522 /* tcp_may_send_now needs to see updated state */
3523 tp->snd_cwnd = tcp_packets_in_flight(tp) + 2;
3524 tp->frto_counter = 2;
3525
3526 if (!tcp_may_send_now(sk))
3527 tcp_enter_frto_loss(sk, 2, flag);
3528
3529 return true;
3530 } else {
3531 switch (sysctl_tcp_frto_response) {
3532 case 2:
3533 tcp_undo_spur_to_response(sk, flag);
3534 break;
3535 case 1:
3536 tcp_conservative_spur_to_response(tp);
3537 break;
3538 default:
3539 tcp_cwr_spur_to_response(sk);
3540 break;
3541 }
3542 tp->frto_counter = 0;
3543 tp->undo_marker = 0;
3544 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS);
3545 }
3546 return false;
3547}
3548
3549/* RFC 5961 7 [ACK Throttling] */ 3250/* RFC 5961 7 [ACK Throttling] */
3550static void tcp_send_challenge_ack(struct sock *sk) 3251static void tcp_send_challenge_ack(struct sock *sk)
3551{ 3252{
@@ -3564,6 +3265,38 @@ static void tcp_send_challenge_ack(struct sock *sk)
3564 } 3265 }
3565} 3266}
3566 3267
3268/* This routine deals with acks during a TLP episode.
3269 * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe.
3270 */
3271static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
3272{
3273 struct tcp_sock *tp = tcp_sk(sk);
3274 bool is_tlp_dupack = (ack == tp->tlp_high_seq) &&
3275 !(flag & (FLAG_SND_UNA_ADVANCED |
3276 FLAG_NOT_DUP | FLAG_DATA_SACKED));
3277
3278 /* Mark the end of TLP episode on receiving TLP dupack or when
3279 * ack is after tlp_high_seq.
3280 */
3281 if (is_tlp_dupack) {
3282 tp->tlp_high_seq = 0;
3283 return;
3284 }
3285
3286 if (after(ack, tp->tlp_high_seq)) {
3287 tp->tlp_high_seq = 0;
3288 /* Don't reduce cwnd if DSACK arrives for TLP retrans. */
3289 if (!(flag & FLAG_DSACKING_ACK)) {
3290 tcp_init_cwnd_reduction(sk, true);
3291 tcp_set_ca_state(sk, TCP_CA_CWR);
3292 tcp_end_cwnd_reduction(sk);
3293 tcp_set_ca_state(sk, TCP_CA_Open);
3294 NET_INC_STATS_BH(sock_net(sk),
3295 LINUX_MIB_TCPLOSSPROBERECOVERY);
3296 }
3297 }
3298}
3299
3567/* This routine deals with incoming acks, but not outgoing ones. */ 3300/* This routine deals with incoming acks, but not outgoing ones. */
3568static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) 3301static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3569{ 3302{
@@ -3578,7 +3311,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3578 int prior_packets; 3311 int prior_packets;
3579 int prior_sacked = tp->sacked_out; 3312 int prior_sacked = tp->sacked_out;
3580 int pkts_acked = 0; 3313 int pkts_acked = 0;
3581 bool frto_cwnd = false;
3582 3314
3583 /* If the ack is older than previous acks 3315 /* If the ack is older than previous acks
3584 * then we can probably ignore it. 3316 * then we can probably ignore it.
@@ -3598,7 +3330,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3598 if (after(ack, tp->snd_nxt)) 3330 if (after(ack, tp->snd_nxt))
3599 goto invalid_ack; 3331 goto invalid_ack;
3600 3332
3601 if (tp->early_retrans_delayed) 3333 if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
3334 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
3602 tcp_rearm_rto(sk); 3335 tcp_rearm_rto(sk);
3603 3336
3604 if (after(ack, prior_snd_una)) 3337 if (after(ack, prior_snd_una))
@@ -3651,30 +3384,29 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3651 3384
3652 pkts_acked = prior_packets - tp->packets_out; 3385 pkts_acked = prior_packets - tp->packets_out;
3653 3386
3654 if (tp->frto_counter)
3655 frto_cwnd = tcp_process_frto(sk, flag);
3656 /* Guarantee sacktag reordering detection against wrap-arounds */
3657 if (before(tp->frto_highmark, tp->snd_una))
3658 tp->frto_highmark = 0;
3659
3660 if (tcp_ack_is_dubious(sk, flag)) { 3387 if (tcp_ack_is_dubious(sk, flag)) {
3661 /* Advance CWND, if state allows this. */ 3388 /* Advance CWND, if state allows this. */
3662 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd && 3389 if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag))
3663 tcp_may_raise_cwnd(sk, flag))
3664 tcp_cong_avoid(sk, ack, prior_in_flight); 3390 tcp_cong_avoid(sk, ack, prior_in_flight);
3665 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 3391 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
3666 tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, 3392 tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
3667 is_dupack, flag); 3393 is_dupack, flag);
3668 } else { 3394 } else {
3669 if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) 3395 if (flag & FLAG_DATA_ACKED)
3670 tcp_cong_avoid(sk, ack, prior_in_flight); 3396 tcp_cong_avoid(sk, ack, prior_in_flight);
3671 } 3397 }
3672 3398
3399 if (tp->tlp_high_seq)
3400 tcp_process_tlp_ack(sk, ack, flag);
3401
3673 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) { 3402 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) {
3674 struct dst_entry *dst = __sk_dst_get(sk); 3403 struct dst_entry *dst = __sk_dst_get(sk);
3675 if (dst) 3404 if (dst)
3676 dst_confirm(dst); 3405 dst_confirm(dst);
3677 } 3406 }
3407
3408 if (icsk->icsk_pending == ICSK_TIME_RETRANS)
3409 tcp_schedule_loss_probe(sk);
3678 return 1; 3410 return 1;
3679 3411
3680no_queue: 3412no_queue:
@@ -3688,6 +3420,9 @@ no_queue:
3688 */ 3420 */
3689 if (tcp_send_head(sk)) 3421 if (tcp_send_head(sk))
3690 tcp_ack_probe(sk); 3422 tcp_ack_probe(sk);
3423
3424 if (tp->tlp_high_seq)
3425 tcp_process_tlp_ack(sk, ack, flag);
3691 return 1; 3426 return 1;
3692 3427
3693invalid_ack: 3428invalid_ack:
@@ -3712,8 +3447,8 @@ old_ack:
3712 * But, this can also be called on packets in the established flow when 3447 * But, this can also be called on packets in the established flow when
3713 * the fast version below fails. 3448 * the fast version below fails.
3714 */ 3449 */
3715void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx, 3450void tcp_parse_options(const struct sk_buff *skb,
3716 const u8 **hvpp, int estab, 3451 struct tcp_options_received *opt_rx, int estab,
3717 struct tcp_fastopen_cookie *foc) 3452 struct tcp_fastopen_cookie *foc)
3718{ 3453{
3719 const unsigned char *ptr; 3454 const unsigned char *ptr;
@@ -3797,31 +3532,6 @@ void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *o
3797 */ 3532 */
3798 break; 3533 break;
3799#endif 3534#endif
3800 case TCPOPT_COOKIE:
3801 /* This option is variable length.
3802 */
3803 switch (opsize) {
3804 case TCPOLEN_COOKIE_BASE:
3805 /* not yet implemented */
3806 break;
3807 case TCPOLEN_COOKIE_PAIR:
3808 /* not yet implemented */
3809 break;
3810 case TCPOLEN_COOKIE_MIN+0:
3811 case TCPOLEN_COOKIE_MIN+2:
3812 case TCPOLEN_COOKIE_MIN+4:
3813 case TCPOLEN_COOKIE_MIN+6:
3814 case TCPOLEN_COOKIE_MAX:
3815 /* 16-bit multiple */
3816 opt_rx->cookie_plus = opsize;
3817 *hvpp = ptr;
3818 break;
3819 default:
3820 /* ignore option */
3821 break;
3822 }
3823 break;
3824
3825 case TCPOPT_EXP: 3535 case TCPOPT_EXP:
3826 /* Fast Open option shares code 254 using a 3536 /* Fast Open option shares code 254 using a
3827 * 16 bits magic number. It's valid only in 3537 * 16 bits magic number. It's valid only in
@@ -3867,8 +3577,7 @@ static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr
3867 * If it is wrong it falls back on tcp_parse_options(). 3577 * If it is wrong it falls back on tcp_parse_options().
3868 */ 3578 */
3869static bool tcp_fast_parse_options(const struct sk_buff *skb, 3579static bool tcp_fast_parse_options(const struct sk_buff *skb,
3870 const struct tcphdr *th, 3580 const struct tcphdr *th, struct tcp_sock *tp)
3871 struct tcp_sock *tp, const u8 **hvpp)
3872{ 3581{
3873 /* In the spirit of fast parsing, compare doff directly to constant 3582 /* In the spirit of fast parsing, compare doff directly to constant
3874 * values. Because equality is used, short doff can be ignored here. 3583 * values. Because equality is used, short doff can be ignored here.
@@ -3882,7 +3591,7 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb,
3882 return true; 3591 return true;
3883 } 3592 }
3884 3593
3885 tcp_parse_options(skb, &tp->rx_opt, hvpp, 1, NULL); 3594 tcp_parse_options(skb, &tp->rx_opt, 1, NULL);
3886 if (tp->rx_opt.saw_tstamp) 3595 if (tp->rx_opt.saw_tstamp)
3887 tp->rx_opt.rcv_tsecr -= tp->tsoffset; 3596 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
3888 3597
@@ -5263,12 +4972,10 @@ out:
5263static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, 4972static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
5264 const struct tcphdr *th, int syn_inerr) 4973 const struct tcphdr *th, int syn_inerr)
5265{ 4974{
5266 const u8 *hash_location;
5267 struct tcp_sock *tp = tcp_sk(sk); 4975 struct tcp_sock *tp = tcp_sk(sk);
5268 4976
5269 /* RFC1323: H1. Apply PAWS check first. */ 4977 /* RFC1323: H1. Apply PAWS check first. */
5270 if (tcp_fast_parse_options(skb, th, tp, &hash_location) && 4978 if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
5271 tp->rx_opt.saw_tstamp &&
5272 tcp_paws_discard(sk, skb)) { 4979 tcp_paws_discard(sk, skb)) {
5273 if (!th->rst) { 4980 if (!th->rst) {
5274 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); 4981 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
@@ -5622,12 +5329,11 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
5622 5329
5623 if (mss == tp->rx_opt.user_mss) { 5330 if (mss == tp->rx_opt.user_mss) {
5624 struct tcp_options_received opt; 5331 struct tcp_options_received opt;
5625 const u8 *hash_location;
5626 5332
5627 /* Get original SYNACK MSS value if user MSS sets mss_clamp */ 5333 /* Get original SYNACK MSS value if user MSS sets mss_clamp */
5628 tcp_clear_options(&opt); 5334 tcp_clear_options(&opt);
5629 opt.user_mss = opt.mss_clamp = 0; 5335 opt.user_mss = opt.mss_clamp = 0;
5630 tcp_parse_options(synack, &opt, &hash_location, 0, NULL); 5336 tcp_parse_options(synack, &opt, 0, NULL);
5631 mss = opt.mss_clamp; 5337 mss = opt.mss_clamp;
5632 } 5338 }
5633 5339
@@ -5658,14 +5364,12 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
5658static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, 5364static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5659 const struct tcphdr *th, unsigned int len) 5365 const struct tcphdr *th, unsigned int len)
5660{ 5366{
5661 const u8 *hash_location;
5662 struct inet_connection_sock *icsk = inet_csk(sk); 5367 struct inet_connection_sock *icsk = inet_csk(sk);
5663 struct tcp_sock *tp = tcp_sk(sk); 5368 struct tcp_sock *tp = tcp_sk(sk);
5664 struct tcp_cookie_values *cvp = tp->cookie_values;
5665 struct tcp_fastopen_cookie foc = { .len = -1 }; 5369 struct tcp_fastopen_cookie foc = { .len = -1 };
5666 int saved_clamp = tp->rx_opt.mss_clamp; 5370 int saved_clamp = tp->rx_opt.mss_clamp;
5667 5371
5668 tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0, &foc); 5372 tcp_parse_options(skb, &tp->rx_opt, 0, &foc);
5669 if (tp->rx_opt.saw_tstamp) 5373 if (tp->rx_opt.saw_tstamp)
5670 tp->rx_opt.rcv_tsecr -= tp->tsoffset; 5374 tp->rx_opt.rcv_tsecr -= tp->tsoffset;
5671 5375
@@ -5762,30 +5466,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
5762 * is initialized. */ 5466 * is initialized. */
5763 tp->copied_seq = tp->rcv_nxt; 5467 tp->copied_seq = tp->rcv_nxt;
5764 5468
5765 if (cvp != NULL &&
5766 cvp->cookie_pair_size > 0 &&
5767 tp->rx_opt.cookie_plus > 0) {
5768 int cookie_size = tp->rx_opt.cookie_plus
5769 - TCPOLEN_COOKIE_BASE;
5770 int cookie_pair_size = cookie_size
5771 + cvp->cookie_desired;
5772
5773 /* A cookie extension option was sent and returned.
5774 * Note that each incoming SYNACK replaces the
5775 * Responder cookie. The initial exchange is most
5776 * fragile, as protection against spoofing relies
5777 * entirely upon the sequence and timestamp (above).
5778 * This replacement strategy allows the correct pair to
5779 * pass through, while any others will be filtered via
5780 * Responder verification later.
5781 */
5782 if (sizeof(cvp->cookie_pair) >= cookie_pair_size) {
5783 memcpy(&cvp->cookie_pair[cvp->cookie_desired],
5784 hash_location, cookie_size);
5785 cvp->cookie_pair_size = cookie_pair_size;
5786 }
5787 }
5788
5789 smp_mb(); 5469 smp_mb();
5790 5470
5791 tcp_finish_connect(sk, skb); 5471 tcp_finish_connect(sk, skb);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index d09203c63264..2278669b1d85 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -838,7 +838,6 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
838 */ 838 */
839static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, 839static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
840 struct request_sock *req, 840 struct request_sock *req,
841 struct request_values *rvp,
842 u16 queue_mapping, 841 u16 queue_mapping,
843 bool nocache) 842 bool nocache)
844{ 843{
@@ -851,7 +850,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
851 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) 850 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
852 return -1; 851 return -1;
853 852
854 skb = tcp_make_synack(sk, dst, req, rvp, NULL); 853 skb = tcp_make_synack(sk, dst, req, NULL);
855 854
856 if (skb) { 855 if (skb) {
857 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr); 856 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
@@ -868,10 +867,9 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
868 return err; 867 return err;
869} 868}
870 869
871static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req, 870static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
872 struct request_values *rvp)
873{ 871{
874 int res = tcp_v4_send_synack(sk, NULL, req, rvp, 0, false); 872 int res = tcp_v4_send_synack(sk, NULL, req, 0, false);
875 873
876 if (!res) 874 if (!res)
877 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 875 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
@@ -1371,8 +1369,7 @@ static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
1371static int tcp_v4_conn_req_fastopen(struct sock *sk, 1369static int tcp_v4_conn_req_fastopen(struct sock *sk,
1372 struct sk_buff *skb, 1370 struct sk_buff *skb,
1373 struct sk_buff *skb_synack, 1371 struct sk_buff *skb_synack,
1374 struct request_sock *req, 1372 struct request_sock *req)
1375 struct request_values *rvp)
1376{ 1373{
1377 struct tcp_sock *tp = tcp_sk(sk); 1374 struct tcp_sock *tp = tcp_sk(sk);
1378 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; 1375 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
@@ -1467,9 +1464,7 @@ static int tcp_v4_conn_req_fastopen(struct sock *sk,
1467 1464
1468int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) 1465int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1469{ 1466{
1470 struct tcp_extend_values tmp_ext;
1471 struct tcp_options_received tmp_opt; 1467 struct tcp_options_received tmp_opt;
1472 const u8 *hash_location;
1473 struct request_sock *req; 1468 struct request_sock *req;
1474 struct inet_request_sock *ireq; 1469 struct inet_request_sock *ireq;
1475 struct tcp_sock *tp = tcp_sk(sk); 1470 struct tcp_sock *tp = tcp_sk(sk);
@@ -1519,42 +1514,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1519 tcp_clear_options(&tmp_opt); 1514 tcp_clear_options(&tmp_opt);
1520 tmp_opt.mss_clamp = TCP_MSS_DEFAULT; 1515 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1521 tmp_opt.user_mss = tp->rx_opt.user_mss; 1516 tmp_opt.user_mss = tp->rx_opt.user_mss;
1522 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, 1517 tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
1523 want_cookie ? NULL : &foc);
1524
1525 if (tmp_opt.cookie_plus > 0 &&
1526 tmp_opt.saw_tstamp &&
1527 !tp->rx_opt.cookie_out_never &&
1528 (sysctl_tcp_cookie_size > 0 ||
1529 (tp->cookie_values != NULL &&
1530 tp->cookie_values->cookie_desired > 0))) {
1531 u8 *c;
1532 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1533 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1534
1535 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1536 goto drop_and_release;
1537
1538 /* Secret recipe starts with IP addresses */
1539 *mess++ ^= (__force u32)daddr;
1540 *mess++ ^= (__force u32)saddr;
1541
1542 /* plus variable length Initiator Cookie */
1543 c = (u8 *)mess;
1544 while (l-- > 0)
1545 *c++ ^= *hash_location++;
1546
1547 want_cookie = false; /* not our kind of cookie */
1548 tmp_ext.cookie_out_never = 0; /* false */
1549 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1550 } else if (!tp->rx_opt.cookie_in_always) {
1551 /* redundant indications, but ensure initialization. */
1552 tmp_ext.cookie_out_never = 1; /* true */
1553 tmp_ext.cookie_plus = 0;
1554 } else {
1555 goto drop_and_release;
1556 }
1557 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1558 1518
1559 if (want_cookie && !tmp_opt.saw_tstamp) 1519 if (want_cookie && !tmp_opt.saw_tstamp)
1560 tcp_clear_options(&tmp_opt); 1520 tcp_clear_options(&tmp_opt);
@@ -1636,7 +1596,6 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1636 * of tcp_v4_send_synack()->tcp_select_initial_window(). 1596 * of tcp_v4_send_synack()->tcp_select_initial_window().
1637 */ 1597 */
1638 skb_synack = tcp_make_synack(sk, dst, req, 1598 skb_synack = tcp_make_synack(sk, dst, req,
1639 (struct request_values *)&tmp_ext,
1640 fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL); 1599 fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
1641 1600
1642 if (skb_synack) { 1601 if (skb_synack) {
@@ -1660,8 +1619,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1660 if (fastopen_cookie_present(&foc) && foc.len != 0) 1619 if (fastopen_cookie_present(&foc) && foc.len != 0)
1661 NET_INC_STATS_BH(sock_net(sk), 1620 NET_INC_STATS_BH(sock_net(sk),
1662 LINUX_MIB_TCPFASTOPENPASSIVEFAIL); 1621 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1663 } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req, 1622 } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req))
1664 (struct request_values *)&tmp_ext))
1665 goto drop_and_free; 1623 goto drop_and_free;
1666 1624
1667 return 0; 1625 return 0;
@@ -1950,6 +1908,50 @@ void tcp_v4_early_demux(struct sk_buff *skb)
1950 } 1908 }
1951} 1909}
1952 1910
1911/* Packet is added to VJ-style prequeue for processing in process
1912 * context, if a reader task is waiting. Apparently, this exciting
1913 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1914 * failed somewhere. Latency? Burstiness? Well, at least now we will
1915 * see, why it failed. 8)8) --ANK
1916 *
1917 */
1918bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1919{
1920 struct tcp_sock *tp = tcp_sk(sk);
1921
1922 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1923 return false;
1924
1925 if (skb->len <= tcp_hdrlen(skb) &&
1926 skb_queue_len(&tp->ucopy.prequeue) == 0)
1927 return false;
1928
1929 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1930 tp->ucopy.memory += skb->truesize;
1931 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1932 struct sk_buff *skb1;
1933
1934 BUG_ON(sock_owned_by_user(sk));
1935
1936 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1937 sk_backlog_rcv(sk, skb1);
1938 NET_INC_STATS_BH(sock_net(sk),
1939 LINUX_MIB_TCPPREQUEUEDROPPED);
1940 }
1941
1942 tp->ucopy.memory = 0;
1943 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1944 wake_up_interruptible_sync_poll(sk_sleep(sk),
1945 POLLIN | POLLRDNORM | POLLRDBAND);
1946 if (!inet_csk_ack_scheduled(sk))
1947 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1948 (3 * tcp_rto_min(sk)) / 4,
1949 TCP_RTO_MAX);
1950 }
1951 return true;
1952}
1953EXPORT_SYMBOL(tcp_prequeue);
1954
1953/* 1955/*
1954 * From tcp_input.c 1956 * From tcp_input.c
1955 */ 1957 */
@@ -2197,12 +2199,6 @@ void tcp_v4_destroy_sock(struct sock *sk)
2197 if (inet_csk(sk)->icsk_bind_hash) 2199 if (inet_csk(sk)->icsk_bind_hash)
2198 inet_put_port(sk); 2200 inet_put_port(sk);
2199 2201
2200 /* TCP Cookie Transactions */
2201 if (tp->cookie_values != NULL) {
2202 kref_put(&tp->cookie_values->kref,
2203 tcp_cookie_values_release);
2204 tp->cookie_values = NULL;
2205 }
2206 BUG_ON(tp->fastopen_rsk != NULL); 2202 BUG_ON(tp->fastopen_rsk != NULL);
2207 2203
2208 /* If socket is aborted during connect operation */ 2204 /* If socket is aborted during connect operation */
@@ -2659,7 +2655,9 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2659 __u16 srcp = ntohs(inet->inet_sport); 2655 __u16 srcp = ntohs(inet->inet_sport);
2660 int rx_queue; 2656 int rx_queue;
2661 2657
2662 if (icsk->icsk_pending == ICSK_TIME_RETRANS) { 2658 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2659 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2660 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2663 timer_active = 1; 2661 timer_active = 1;
2664 timer_expires = icsk->icsk_timeout; 2662 timer_expires = icsk->icsk_timeout;
2665 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { 2663 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index b83a49cc3816..05eaf8904613 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -93,13 +93,12 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
93 const struct tcphdr *th) 93 const struct tcphdr *th)
94{ 94{
95 struct tcp_options_received tmp_opt; 95 struct tcp_options_received tmp_opt;
96 const u8 *hash_location;
97 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); 96 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
98 bool paws_reject = false; 97 bool paws_reject = false;
99 98
100 tmp_opt.saw_tstamp = 0; 99 tmp_opt.saw_tstamp = 0;
101 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { 100 if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
102 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL); 101 tcp_parse_options(skb, &tmp_opt, 0, NULL);
103 102
104 if (tmp_opt.saw_tstamp) { 103 if (tmp_opt.saw_tstamp) {
105 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset; 104 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
@@ -388,32 +387,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
388 struct tcp_request_sock *treq = tcp_rsk(req); 387 struct tcp_request_sock *treq = tcp_rsk(req);
389 struct inet_connection_sock *newicsk = inet_csk(newsk); 388 struct inet_connection_sock *newicsk = inet_csk(newsk);
390 struct tcp_sock *newtp = tcp_sk(newsk); 389 struct tcp_sock *newtp = tcp_sk(newsk);
391 struct tcp_sock *oldtp = tcp_sk(sk);
392 struct tcp_cookie_values *oldcvp = oldtp->cookie_values;
393
394 /* TCP Cookie Transactions require space for the cookie pair,
395 * as it differs for each connection. There is no need to
396 * copy any s_data_payload stored at the original socket.
397 * Failure will prevent resuming the connection.
398 *
399 * Presumed copied, in order of appearance:
400 * cookie_in_always, cookie_out_never
401 */
402 if (oldcvp != NULL) {
403 struct tcp_cookie_values *newcvp =
404 kzalloc(sizeof(*newtp->cookie_values),
405 GFP_ATOMIC);
406
407 if (newcvp != NULL) {
408 kref_init(&newcvp->kref);
409 newcvp->cookie_desired =
410 oldcvp->cookie_desired;
411 newtp->cookie_values = newcvp;
412 } else {
413 /* Not Yet Implemented */
414 newtp->cookie_values = NULL;
415 }
416 }
417 390
418 /* Now setup tcp_sock */ 391 /* Now setup tcp_sock */
419 newtp->pred_flags = 0; 392 newtp->pred_flags = 0;
@@ -422,8 +395,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
422 newtp->rcv_nxt = treq->rcv_isn + 1; 395 newtp->rcv_nxt = treq->rcv_isn + 1;
423 396
424 newtp->snd_sml = newtp->snd_una = 397 newtp->snd_sml = newtp->snd_una =
425 newtp->snd_nxt = newtp->snd_up = 398 newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
426 treq->snt_isn + 1 + tcp_s_data_size(oldtp);
427 399
428 tcp_prequeue_init(newtp); 400 tcp_prequeue_init(newtp);
429 INIT_LIST_HEAD(&newtp->tsq_node); 401 INIT_LIST_HEAD(&newtp->tsq_node);
@@ -440,6 +412,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
440 newtp->fackets_out = 0; 412 newtp->fackets_out = 0;
441 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 413 newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
442 tcp_enable_early_retrans(newtp); 414 tcp_enable_early_retrans(newtp);
415 newtp->tlp_high_seq = 0;
443 416
444 /* So many TCP implementations out there (incorrectly) count the 417 /* So many TCP implementations out there (incorrectly) count the
445 * initial SYN frame in their delayed-ACK and congestion control 418 * initial SYN frame in their delayed-ACK and congestion control
@@ -449,9 +422,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
449 newtp->snd_cwnd = TCP_INIT_CWND; 422 newtp->snd_cwnd = TCP_INIT_CWND;
450 newtp->snd_cwnd_cnt = 0; 423 newtp->snd_cwnd_cnt = 0;
451 424
452 newtp->frto_counter = 0;
453 newtp->frto_highmark = 0;
454
455 if (newicsk->icsk_ca_ops != &tcp_init_congestion_ops && 425 if (newicsk->icsk_ca_ops != &tcp_init_congestion_ops &&
456 !try_module_get(newicsk->icsk_ca_ops->owner)) 426 !try_module_get(newicsk->icsk_ca_ops->owner))
457 newicsk->icsk_ca_ops = &tcp_init_congestion_ops; 427 newicsk->icsk_ca_ops = &tcp_init_congestion_ops;
@@ -459,8 +429,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
459 tcp_set_ca_state(newsk, TCP_CA_Open); 429 tcp_set_ca_state(newsk, TCP_CA_Open);
460 tcp_init_xmit_timers(newsk); 430 tcp_init_xmit_timers(newsk);
461 skb_queue_head_init(&newtp->out_of_order_queue); 431 skb_queue_head_init(&newtp->out_of_order_queue);
462 newtp->write_seq = newtp->pushed_seq = 432 newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
463 treq->snt_isn + 1 + tcp_s_data_size(oldtp);
464 433
465 newtp->rx_opt.saw_tstamp = 0; 434 newtp->rx_opt.saw_tstamp = 0;
466 435
@@ -537,7 +506,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
537 bool fastopen) 506 bool fastopen)
538{ 507{
539 struct tcp_options_received tmp_opt; 508 struct tcp_options_received tmp_opt;
540 const u8 *hash_location;
541 struct sock *child; 509 struct sock *child;
542 const struct tcphdr *th = tcp_hdr(skb); 510 const struct tcphdr *th = tcp_hdr(skb);
543 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); 511 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
@@ -547,7 +515,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
547 515
548 tmp_opt.saw_tstamp = 0; 516 tmp_opt.saw_tstamp = 0;
549 if (th->doff > (sizeof(struct tcphdr)>>2)) { 517 if (th->doff > (sizeof(struct tcphdr)>>2)) {
550 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL); 518 tcp_parse_options(skb, &tmp_opt, 0, NULL);
551 519
552 if (tmp_opt.saw_tstamp) { 520 if (tmp_opt.saw_tstamp) {
553 tmp_opt.ts_recent = req->ts_recent; 521 tmp_opt.ts_recent = req->ts_recent;
@@ -647,7 +615,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
647 */ 615 */
648 if ((flg & TCP_FLAG_ACK) && !fastopen && 616 if ((flg & TCP_FLAG_ACK) && !fastopen &&
649 (TCP_SKB_CB(skb)->ack_seq != 617 (TCP_SKB_CB(skb)->ack_seq !=
650 tcp_rsk(req)->snt_isn + 1 + tcp_s_data_size(tcp_sk(sk)))) 618 tcp_rsk(req)->snt_isn + 1))
651 return sk; 619 return sk;
652 620
653 /* Also, it would be not so bad idea to check rcv_tsecr, which 621 /* Also, it would be not so bad idea to check rcv_tsecr, which
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 5d0b4387cba6..af354c98fdb5 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -65,27 +65,22 @@ int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS;
65/* By default, RFC2861 behavior. */ 65/* By default, RFC2861 behavior. */
66int sysctl_tcp_slow_start_after_idle __read_mostly = 1; 66int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
67 67
68int sysctl_tcp_cookie_size __read_mostly = 0; /* TCP_COOKIE_MAX */
69EXPORT_SYMBOL_GPL(sysctl_tcp_cookie_size);
70
71static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, 68static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
72 int push_one, gfp_t gfp); 69 int push_one, gfp_t gfp);
73 70
74/* Account for new data that has been sent to the network. */ 71/* Account for new data that has been sent to the network. */
75static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) 72static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
76{ 73{
74 struct inet_connection_sock *icsk = inet_csk(sk);
77 struct tcp_sock *tp = tcp_sk(sk); 75 struct tcp_sock *tp = tcp_sk(sk);
78 unsigned int prior_packets = tp->packets_out; 76 unsigned int prior_packets = tp->packets_out;
79 77
80 tcp_advance_send_head(sk, skb); 78 tcp_advance_send_head(sk, skb);
81 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; 79 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
82 80
83 /* Don't override Nagle indefinitely with F-RTO */
84 if (tp->frto_counter == 2)
85 tp->frto_counter = 3;
86
87 tp->packets_out += tcp_skb_pcount(skb); 81 tp->packets_out += tcp_skb_pcount(skb);
88 if (!prior_packets || tp->early_retrans_delayed) 82 if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
83 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
89 tcp_rearm_rto(sk); 84 tcp_rearm_rto(sk);
90} 85}
91 86
@@ -384,7 +379,6 @@ static inline bool tcp_urg_mode(const struct tcp_sock *tp)
384#define OPTION_TS (1 << 1) 379#define OPTION_TS (1 << 1)
385#define OPTION_MD5 (1 << 2) 380#define OPTION_MD5 (1 << 2)
386#define OPTION_WSCALE (1 << 3) 381#define OPTION_WSCALE (1 << 3)
387#define OPTION_COOKIE_EXTENSION (1 << 4)
388#define OPTION_FAST_OPEN_COOKIE (1 << 8) 382#define OPTION_FAST_OPEN_COOKIE (1 << 8)
389 383
390struct tcp_out_options { 384struct tcp_out_options {
@@ -398,36 +392,6 @@ struct tcp_out_options {
398 struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */ 392 struct tcp_fastopen_cookie *fastopen_cookie; /* Fast open cookie */
399}; 393};
400 394
401/* The sysctl int routines are generic, so check consistency here.
402 */
403static u8 tcp_cookie_size_check(u8 desired)
404{
405 int cookie_size;
406
407 if (desired > 0)
408 /* previously specified */
409 return desired;
410
411 cookie_size = ACCESS_ONCE(sysctl_tcp_cookie_size);
412 if (cookie_size <= 0)
413 /* no default specified */
414 return 0;
415
416 if (cookie_size <= TCP_COOKIE_MIN)
417 /* value too small, specify minimum */
418 return TCP_COOKIE_MIN;
419
420 if (cookie_size >= TCP_COOKIE_MAX)
421 /* value too large, specify maximum */
422 return TCP_COOKIE_MAX;
423
424 if (cookie_size & 1)
425 /* 8-bit multiple, illegal, fix it */
426 cookie_size++;
427
428 return (u8)cookie_size;
429}
430
431/* Write previously computed TCP options to the packet. 395/* Write previously computed TCP options to the packet.
432 * 396 *
433 * Beware: Something in the Internet is very sensitive to the ordering of 397 * Beware: Something in the Internet is very sensitive to the ordering of
@@ -446,27 +410,9 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
446{ 410{
447 u16 options = opts->options; /* mungable copy */ 411 u16 options = opts->options; /* mungable copy */
448 412
449 /* Having both authentication and cookies for security is redundant,
450 * and there's certainly not enough room. Instead, the cookie-less
451 * extension variant is proposed.
452 *
453 * Consider the pessimal case with authentication. The options
454 * could look like:
455 * COOKIE|MD5(20) + MSS(4) + SACK|TS(12) + WSCALE(4) == 40
456 */
457 if (unlikely(OPTION_MD5 & options)) { 413 if (unlikely(OPTION_MD5 & options)) {
458 if (unlikely(OPTION_COOKIE_EXTENSION & options)) { 414 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
459 *ptr++ = htonl((TCPOPT_COOKIE << 24) | 415 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
460 (TCPOLEN_COOKIE_BASE << 16) |
461 (TCPOPT_MD5SIG << 8) |
462 TCPOLEN_MD5SIG);
463 } else {
464 *ptr++ = htonl((TCPOPT_NOP << 24) |
465 (TCPOPT_NOP << 16) |
466 (TCPOPT_MD5SIG << 8) |
467 TCPOLEN_MD5SIG);
468 }
469 options &= ~OPTION_COOKIE_EXTENSION;
470 /* overload cookie hash location */ 416 /* overload cookie hash location */
471 opts->hash_location = (__u8 *)ptr; 417 opts->hash_location = (__u8 *)ptr;
472 ptr += 4; 418 ptr += 4;
@@ -495,44 +441,6 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
495 *ptr++ = htonl(opts->tsecr); 441 *ptr++ = htonl(opts->tsecr);
496 } 442 }
497 443
498 /* Specification requires after timestamp, so do it now.
499 *
500 * Consider the pessimal case without authentication. The options
501 * could look like:
502 * MSS(4) + SACK|TS(12) + COOKIE(20) + WSCALE(4) == 40
503 */
504 if (unlikely(OPTION_COOKIE_EXTENSION & options)) {
505 __u8 *cookie_copy = opts->hash_location;
506 u8 cookie_size = opts->hash_size;
507
508 /* 8-bit multiple handled in tcp_cookie_size_check() above,
509 * and elsewhere.
510 */
511 if (0x2 & cookie_size) {
512 __u8 *p = (__u8 *)ptr;
513
514 /* 16-bit multiple */
515 *p++ = TCPOPT_COOKIE;
516 *p++ = TCPOLEN_COOKIE_BASE + cookie_size;
517 *p++ = *cookie_copy++;
518 *p++ = *cookie_copy++;
519 ptr++;
520 cookie_size -= 2;
521 } else {
522 /* 32-bit multiple */
523 *ptr++ = htonl(((TCPOPT_NOP << 24) |
524 (TCPOPT_NOP << 16) |
525 (TCPOPT_COOKIE << 8) |
526 TCPOLEN_COOKIE_BASE) +
527 cookie_size);
528 }
529
530 if (cookie_size > 0) {
531 memcpy(ptr, cookie_copy, cookie_size);
532 ptr += (cookie_size / 4);
533 }
534 }
535
536 if (unlikely(OPTION_SACK_ADVERTISE & options)) { 444 if (unlikely(OPTION_SACK_ADVERTISE & options)) {
537 *ptr++ = htonl((TCPOPT_NOP << 24) | 445 *ptr++ = htonl((TCPOPT_NOP << 24) |
538 (TCPOPT_NOP << 16) | 446 (TCPOPT_NOP << 16) |
@@ -591,11 +499,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
591 struct tcp_md5sig_key **md5) 499 struct tcp_md5sig_key **md5)
592{ 500{
593 struct tcp_sock *tp = tcp_sk(sk); 501 struct tcp_sock *tp = tcp_sk(sk);
594 struct tcp_cookie_values *cvp = tp->cookie_values;
595 unsigned int remaining = MAX_TCP_OPTION_SPACE; 502 unsigned int remaining = MAX_TCP_OPTION_SPACE;
596 u8 cookie_size = (!tp->rx_opt.cookie_out_never && cvp != NULL) ?
597 tcp_cookie_size_check(cvp->cookie_desired) :
598 0;
599 struct tcp_fastopen_request *fastopen = tp->fastopen_req; 503 struct tcp_fastopen_request *fastopen = tp->fastopen_req;
600 504
601#ifdef CONFIG_TCP_MD5SIG 505#ifdef CONFIG_TCP_MD5SIG
@@ -647,52 +551,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
647 tp->syn_fastopen = 1; 551 tp->syn_fastopen = 1;
648 } 552 }
649 } 553 }
650 /* Note that timestamps are required by the specification.
651 *
652 * Odd numbers of bytes are prohibited by the specification, ensuring
653 * that the cookie is 16-bit aligned, and the resulting cookie pair is
654 * 32-bit aligned.
655 */
656 if (*md5 == NULL &&
657 (OPTION_TS & opts->options) &&
658 cookie_size > 0) {
659 int need = TCPOLEN_COOKIE_BASE + cookie_size;
660
661 if (0x2 & need) {
662 /* 32-bit multiple */
663 need += 2; /* NOPs */
664
665 if (need > remaining) {
666 /* try shrinking cookie to fit */
667 cookie_size -= 2;
668 need -= 4;
669 }
670 }
671 while (need > remaining && TCP_COOKIE_MIN <= cookie_size) {
672 cookie_size -= 4;
673 need -= 4;
674 }
675 if (TCP_COOKIE_MIN <= cookie_size) {
676 opts->options |= OPTION_COOKIE_EXTENSION;
677 opts->hash_location = (__u8 *)&cvp->cookie_pair[0];
678 opts->hash_size = cookie_size;
679
680 /* Remember for future incarnations. */
681 cvp->cookie_desired = cookie_size;
682
683 if (cvp->cookie_desired != cvp->cookie_pair_size) {
684 /* Currently use random bytes as a nonce,
685 * assuming these are completely unpredictable
686 * by hostile users of the same system.
687 */
688 get_random_bytes(&cvp->cookie_pair[0],
689 cookie_size);
690 cvp->cookie_pair_size = cookie_size;
691 }
692 554
693 remaining -= need;
694 }
695 }
696 return MAX_TCP_OPTION_SPACE - remaining; 555 return MAX_TCP_OPTION_SPACE - remaining;
697} 556}
698 557
@@ -702,14 +561,10 @@ static unsigned int tcp_synack_options(struct sock *sk,
702 unsigned int mss, struct sk_buff *skb, 561 unsigned int mss, struct sk_buff *skb,
703 struct tcp_out_options *opts, 562 struct tcp_out_options *opts,
704 struct tcp_md5sig_key **md5, 563 struct tcp_md5sig_key **md5,
705 struct tcp_extend_values *xvp,
706 struct tcp_fastopen_cookie *foc) 564 struct tcp_fastopen_cookie *foc)
707{ 565{
708 struct inet_request_sock *ireq = inet_rsk(req); 566 struct inet_request_sock *ireq = inet_rsk(req);
709 unsigned int remaining = MAX_TCP_OPTION_SPACE; 567 unsigned int remaining = MAX_TCP_OPTION_SPACE;
710 u8 cookie_plus = (xvp != NULL && !xvp->cookie_out_never) ?
711 xvp->cookie_plus :
712 0;
713 568
714#ifdef CONFIG_TCP_MD5SIG 569#ifdef CONFIG_TCP_MD5SIG
715 *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req); 570 *md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
@@ -757,28 +612,7 @@ static unsigned int tcp_synack_options(struct sock *sk,
757 remaining -= need; 612 remaining -= need;
758 } 613 }
759 } 614 }
760 /* Similar rationale to tcp_syn_options() applies here, too. 615
761 * If the <SYN> options fit, the same options should fit now!
762 */
763 if (*md5 == NULL &&
764 ireq->tstamp_ok &&
765 cookie_plus > TCPOLEN_COOKIE_BASE) {
766 int need = cookie_plus; /* has TCPOLEN_COOKIE_BASE */
767
768 if (0x2 & need) {
769 /* 32-bit multiple */
770 need += 2; /* NOPs */
771 }
772 if (need <= remaining) {
773 opts->options |= OPTION_COOKIE_EXTENSION;
774 opts->hash_size = cookie_plus - TCPOLEN_COOKIE_BASE;
775 remaining -= need;
776 } else {
777 /* There's no error return, so flag it. */
778 xvp->cookie_out_never = 1; /* true */
779 opts->hash_size = 0;
780 }
781 }
782 return MAX_TCP_OPTION_SPACE - remaining; 616 return MAX_TCP_OPTION_SPACE - remaining;
783} 617}
784 618
@@ -1632,11 +1466,8 @@ static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buf
1632 if (nonagle & TCP_NAGLE_PUSH) 1466 if (nonagle & TCP_NAGLE_PUSH)
1633 return true; 1467 return true;
1634 1468
1635 /* Don't use the nagle rule for urgent data (or for the final FIN). 1469 /* Don't use the nagle rule for urgent data (or for the final FIN). */
1636 * Nagle can be ignored during F-RTO too (see RFC4138). 1470 if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1637 */
1638 if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
1639 (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
1640 return true; 1471 return true;
1641 1472
1642 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) 1473 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
@@ -1961,6 +1792,9 @@ static int tcp_mtu_probe(struct sock *sk)
1961 * snd_up-64k-mss .. snd_up cannot be large. However, taking into 1792 * snd_up-64k-mss .. snd_up cannot be large. However, taking into
1962 * account rare use of URG, this is not a big flaw. 1793 * account rare use of URG, this is not a big flaw.
1963 * 1794 *
1795 * Send at most one packet when push_one > 0. Temporarily ignore
1796 * cwnd limit to force at most one packet out when push_one == 2.
1797
1964 * Returns true, if no segments are in flight and we have queued segments, 1798 * Returns true, if no segments are in flight and we have queued segments,
1965 * but cannot send anything now because of SWS or another problem. 1799 * but cannot send anything now because of SWS or another problem.
1966 */ 1800 */
@@ -1996,8 +1830,13 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1996 goto repair; /* Skip network transmission */ 1830 goto repair; /* Skip network transmission */
1997 1831
1998 cwnd_quota = tcp_cwnd_test(tp, skb); 1832 cwnd_quota = tcp_cwnd_test(tp, skb);
1999 if (!cwnd_quota) 1833 if (!cwnd_quota) {
2000 break; 1834 if (push_one == 2)
1835 /* Force out a loss probe pkt. */
1836 cwnd_quota = 1;
1837 else
1838 break;
1839 }
2001 1840
2002 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) 1841 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
2003 break; 1842 break;
@@ -2051,10 +1890,129 @@ repair:
2051 if (likely(sent_pkts)) { 1890 if (likely(sent_pkts)) {
2052 if (tcp_in_cwnd_reduction(sk)) 1891 if (tcp_in_cwnd_reduction(sk))
2053 tp->prr_out += sent_pkts; 1892 tp->prr_out += sent_pkts;
1893
1894 /* Send one loss probe per tail loss episode. */
1895 if (push_one != 2)
1896 tcp_schedule_loss_probe(sk);
2054 tcp_cwnd_validate(sk); 1897 tcp_cwnd_validate(sk);
2055 return false; 1898 return false;
2056 } 1899 }
2057 return !tp->packets_out && tcp_send_head(sk); 1900 return (push_one == 2) || (!tp->packets_out && tcp_send_head(sk));
1901}
1902
1903bool tcp_schedule_loss_probe(struct sock *sk)
1904{
1905 struct inet_connection_sock *icsk = inet_csk(sk);
1906 struct tcp_sock *tp = tcp_sk(sk);
1907 u32 timeout, tlp_time_stamp, rto_time_stamp;
1908 u32 rtt = tp->srtt >> 3;
1909
1910 if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS))
1911 return false;
1912 /* No consecutive loss probes. */
1913 if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
1914 tcp_rearm_rto(sk);
1915 return false;
1916 }
1917 /* Don't do any loss probe on a Fast Open connection before 3WHS
1918 * finishes.
1919 */
1920 if (sk->sk_state == TCP_SYN_RECV)
1921 return false;
1922
1923 /* TLP is only scheduled when next timer event is RTO. */
1924 if (icsk->icsk_pending != ICSK_TIME_RETRANS)
1925 return false;
1926
1927 /* Schedule a loss probe in 2*RTT for SACK capable connections
1928 * in Open state, that are either limited by cwnd or application.
1929 */
1930 if (sysctl_tcp_early_retrans < 3 || !rtt || !tp->packets_out ||
1931 !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
1932 return false;
1933
1934 if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) &&
1935 tcp_send_head(sk))
1936 return false;
1937
1938 /* Probe timeout is at least 1.5*rtt + TCP_DELACK_MAX to account
1939 * for delayed ack when there's one outstanding packet.
1940 */
1941 timeout = rtt << 1;
1942 if (tp->packets_out == 1)
1943 timeout = max_t(u32, timeout,
1944 (rtt + (rtt >> 1) + TCP_DELACK_MAX));
1945 timeout = max_t(u32, timeout, msecs_to_jiffies(10));
1946
1947 /* If RTO is shorter, just schedule TLP in its place. */
1948 tlp_time_stamp = tcp_time_stamp + timeout;
1949 rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;
1950 if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {
1951 s32 delta = rto_time_stamp - tcp_time_stamp;
1952 if (delta > 0)
1953 timeout = delta;
1954 }
1955
1956 inet_csk_reset_xmit_timer(sk, ICSK_TIME_LOSS_PROBE, timeout,
1957 TCP_RTO_MAX);
1958 return true;
1959}
1960
1961/* When probe timeout (PTO) fires, send a new segment if one exists, else
1962 * retransmit the last segment.
1963 */
1964void tcp_send_loss_probe(struct sock *sk)
1965{
1966 struct tcp_sock *tp = tcp_sk(sk);
1967 struct sk_buff *skb;
1968 int pcount;
1969 int mss = tcp_current_mss(sk);
1970 int err = -1;
1971
1972 if (tcp_send_head(sk) != NULL) {
1973 err = tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
1974 goto rearm_timer;
1975 }
1976
1977 /* At most one outstanding TLP retransmission. */
1978 if (tp->tlp_high_seq)
1979 goto rearm_timer;
1980
1981 /* Retransmit last segment. */
1982 skb = tcp_write_queue_tail(sk);
1983 if (WARN_ON(!skb))
1984 goto rearm_timer;
1985
1986 pcount = tcp_skb_pcount(skb);
1987 if (WARN_ON(!pcount))
1988 goto rearm_timer;
1989
1990 if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) {
1991 if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss)))
1992 goto rearm_timer;
1993 skb = tcp_write_queue_tail(sk);
1994 }
1995
1996 if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
1997 goto rearm_timer;
1998
1999 /* Probe with zero data doesn't trigger fast recovery. */
2000 if (skb->len > 0)
2001 err = __tcp_retransmit_skb(sk, skb);
2002
2003 /* Record snd_nxt for loss detection. */
2004 if (likely(!err))
2005 tp->tlp_high_seq = tp->snd_nxt;
2006
2007rearm_timer:
2008 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2009 inet_csk(sk)->icsk_rto,
2010 TCP_RTO_MAX);
2011
2012 if (likely(!err))
2013 NET_INC_STATS_BH(sock_net(sk),
2014 LINUX_MIB_TCPLOSSPROBES);
2015 return;
2058} 2016}
2059 2017
2060/* Push out any pending frames which were held back due to 2018/* Push out any pending frames which were held back due to
@@ -2675,32 +2633,24 @@ int tcp_send_synack(struct sock *sk)
2675 * sk: listener socket 2633 * sk: listener socket
2676 * dst: dst entry attached to the SYNACK 2634 * dst: dst entry attached to the SYNACK
2677 * req: request_sock pointer 2635 * req: request_sock pointer
2678 * rvp: request_values pointer
2679 * 2636 *
2680 * Allocate one skb and build a SYNACK packet. 2637 * Allocate one skb and build a SYNACK packet.
2681 * @dst is consumed : Caller should not use it again. 2638 * @dst is consumed : Caller should not use it again.
2682 */ 2639 */
2683struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, 2640struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2684 struct request_sock *req, 2641 struct request_sock *req,
2685 struct request_values *rvp,
2686 struct tcp_fastopen_cookie *foc) 2642 struct tcp_fastopen_cookie *foc)
2687{ 2643{
2688 struct tcp_out_options opts; 2644 struct tcp_out_options opts;
2689 struct tcp_extend_values *xvp = tcp_xv(rvp);
2690 struct inet_request_sock *ireq = inet_rsk(req); 2645 struct inet_request_sock *ireq = inet_rsk(req);
2691 struct tcp_sock *tp = tcp_sk(sk); 2646 struct tcp_sock *tp = tcp_sk(sk);
2692 const struct tcp_cookie_values *cvp = tp->cookie_values;
2693 struct tcphdr *th; 2647 struct tcphdr *th;
2694 struct sk_buff *skb; 2648 struct sk_buff *skb;
2695 struct tcp_md5sig_key *md5; 2649 struct tcp_md5sig_key *md5;
2696 int tcp_header_size; 2650 int tcp_header_size;
2697 int mss; 2651 int mss;
2698 int s_data_desired = 0;
2699 2652
2700 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired) 2653 skb = alloc_skb(MAX_TCP_HEADER + 15, sk_gfp_atomic(sk, GFP_ATOMIC));
2701 s_data_desired = cvp->s_data_desired;
2702 skb = alloc_skb(MAX_TCP_HEADER + 15 + s_data_desired,
2703 sk_gfp_atomic(sk, GFP_ATOMIC));
2704 if (unlikely(!skb)) { 2654 if (unlikely(!skb)) {
2705 dst_release(dst); 2655 dst_release(dst);
2706 return NULL; 2656 return NULL;
@@ -2742,9 +2692,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2742 else 2692 else
2743#endif 2693#endif
2744 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2694 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2745 tcp_header_size = tcp_synack_options(sk, req, mss, 2695 tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5,
2746 skb, &opts, &md5, xvp, foc) 2696 foc) + sizeof(*th);
2747 + sizeof(*th);
2748 2697
2749 skb_push(skb, tcp_header_size); 2698 skb_push(skb, tcp_header_size);
2750 skb_reset_transport_header(skb); 2699 skb_reset_transport_header(skb);
@@ -2762,40 +2711,6 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2762 tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, 2711 tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
2763 TCPHDR_SYN | TCPHDR_ACK); 2712 TCPHDR_SYN | TCPHDR_ACK);
2764 2713
2765 if (OPTION_COOKIE_EXTENSION & opts.options) {
2766 if (s_data_desired) {
2767 u8 *buf = skb_put(skb, s_data_desired);
2768
2769 /* copy data directly from the listening socket. */
2770 memcpy(buf, cvp->s_data_payload, s_data_desired);
2771 TCP_SKB_CB(skb)->end_seq += s_data_desired;
2772 }
2773
2774 if (opts.hash_size > 0) {
2775 __u32 workspace[SHA_WORKSPACE_WORDS];
2776 u32 *mess = &xvp->cookie_bakery[COOKIE_DIGEST_WORDS];
2777 u32 *tail = &mess[COOKIE_MESSAGE_WORDS-1];
2778
2779 /* Secret recipe depends on the Timestamp, (future)
2780 * Sequence and Acknowledgment Numbers, Initiator
2781 * Cookie, and others handled by IP variant caller.
2782 */
2783 *tail-- ^= opts.tsval;
2784 *tail-- ^= tcp_rsk(req)->rcv_isn + 1;
2785 *tail-- ^= TCP_SKB_CB(skb)->seq + 1;
2786
2787 /* recommended */
2788 *tail-- ^= (((__force u32)th->dest << 16) | (__force u32)th->source);
2789 *tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */
2790
2791 sha_transform((__u32 *)&xvp->cookie_bakery[0],
2792 (char *)mess,
2793 &workspace[0]);
2794 opts.hash_location =
2795 (__u8 *)&xvp->cookie_bakery[0];
2796 }
2797 }
2798
2799 th->seq = htonl(TCP_SKB_CB(skb)->seq); 2714 th->seq = htonl(TCP_SKB_CB(skb)->seq);
2800 /* XXX data is queued and acked as is. No buffer/window check */ 2715 /* XXX data is queued and acked as is. No buffer/window check */
2801 th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); 2716 th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index b78aac30c498..4b85e6f636c9 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -342,10 +342,6 @@ void tcp_retransmit_timer(struct sock *sk)
342 struct tcp_sock *tp = tcp_sk(sk); 342 struct tcp_sock *tp = tcp_sk(sk);
343 struct inet_connection_sock *icsk = inet_csk(sk); 343 struct inet_connection_sock *icsk = inet_csk(sk);
344 344
345 if (tp->early_retrans_delayed) {
346 tcp_resume_early_retransmit(sk);
347 return;
348 }
349 if (tp->fastopen_rsk) { 345 if (tp->fastopen_rsk) {
350 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV && 346 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
351 sk->sk_state != TCP_FIN_WAIT1); 347 sk->sk_state != TCP_FIN_WAIT1);
@@ -360,6 +356,8 @@ void tcp_retransmit_timer(struct sock *sk)
360 356
361 WARN_ON(tcp_write_queue_empty(sk)); 357 WARN_ON(tcp_write_queue_empty(sk));
362 358
359 tp->tlp_high_seq = 0;
360
363 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) && 361 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
364 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) { 362 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
365 /* Receiver dastardly shrinks window. Our retransmits 363 /* Receiver dastardly shrinks window. Our retransmits
@@ -418,11 +416,7 @@ void tcp_retransmit_timer(struct sock *sk)
418 NET_INC_STATS_BH(sock_net(sk), mib_idx); 416 NET_INC_STATS_BH(sock_net(sk), mib_idx);
419 } 417 }
420 418
421 if (tcp_use_frto(sk)) { 419 tcp_enter_loss(sk, 0);
422 tcp_enter_frto(sk);
423 } else {
424 tcp_enter_loss(sk, 0);
425 }
426 420
427 if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) { 421 if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) {
428 /* Retransmission failed because of local congestion, 422 /* Retransmission failed because of local congestion,
@@ -495,13 +489,20 @@ void tcp_write_timer_handler(struct sock *sk)
495 } 489 }
496 490
497 event = icsk->icsk_pending; 491 event = icsk->icsk_pending;
498 icsk->icsk_pending = 0;
499 492
500 switch (event) { 493 switch (event) {
494 case ICSK_TIME_EARLY_RETRANS:
495 tcp_resume_early_retransmit(sk);
496 break;
497 case ICSK_TIME_LOSS_PROBE:
498 tcp_send_loss_probe(sk);
499 break;
501 case ICSK_TIME_RETRANS: 500 case ICSK_TIME_RETRANS:
501 icsk->icsk_pending = 0;
502 tcp_retransmit_timer(sk); 502 tcp_retransmit_timer(sk);
503 break; 503 break;
504 case ICSK_TIME_PROBE0: 504 case ICSK_TIME_PROBE0:
505 icsk->icsk_pending = 0;
505 tcp_probe_timer(sk); 506 tcp_probe_timer(sk);
506 break; 507 break;
507 } 508 }
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index 1b91bf48e277..76a1e23259e1 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -236,7 +236,7 @@ static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
236 tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk); 236 tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
237 break; 237 break;
238 238
239 case CA_EVENT_FRTO: 239 case CA_EVENT_LOSS:
240 tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk); 240 tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
241 /* Update RTT_min when next ack arrives */ 241 /* Update RTT_min when next ack arrives */
242 w->reset_rtt_min = 1; 242 w->reset_rtt_min = 1;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 0a073a263720..7117d1467b02 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2279,31 +2279,88 @@ void __init udp_init(void)
2279 2279
2280int udp4_ufo_send_check(struct sk_buff *skb) 2280int udp4_ufo_send_check(struct sk_buff *skb)
2281{ 2281{
2282 const struct iphdr *iph; 2282 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
2283 struct udphdr *uh;
2284
2285 if (!pskb_may_pull(skb, sizeof(*uh)))
2286 return -EINVAL; 2283 return -EINVAL;
2287 2284
2288 iph = ip_hdr(skb); 2285 if (likely(!skb->encapsulation)) {
2289 uh = udp_hdr(skb); 2286 const struct iphdr *iph;
2287 struct udphdr *uh;
2290 2288
2291 uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len, 2289 iph = ip_hdr(skb);
2292 IPPROTO_UDP, 0); 2290 uh = udp_hdr(skb);
2293 skb->csum_start = skb_transport_header(skb) - skb->head; 2291
2294 skb->csum_offset = offsetof(struct udphdr, check); 2292 uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len,
2295 skb->ip_summed = CHECKSUM_PARTIAL; 2293 IPPROTO_UDP, 0);
2294 skb->csum_start = skb_transport_header(skb) - skb->head;
2295 skb->csum_offset = offsetof(struct udphdr, check);
2296 skb->ip_summed = CHECKSUM_PARTIAL;
2297 }
2296 return 0; 2298 return 0;
2297} 2299}
2298 2300
2301static struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
2302 netdev_features_t features)
2303{
2304 struct sk_buff *segs = ERR_PTR(-EINVAL);
2305 int mac_len = skb->mac_len;
2306 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
2307 int outer_hlen;
2308 netdev_features_t enc_features;
2309
2310 if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
2311 goto out;
2312
2313 skb->encapsulation = 0;
2314 __skb_pull(skb, tnl_hlen);
2315 skb_reset_mac_header(skb);
2316 skb_set_network_header(skb, skb_inner_network_offset(skb));
2317 skb->mac_len = skb_inner_network_offset(skb);
2318
2319 /* segment inner packet. */
2320 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
2321 segs = skb_mac_gso_segment(skb, enc_features);
2322 if (!segs || IS_ERR(segs))
2323 goto out;
2324
2325 outer_hlen = skb_tnl_header_len(skb);
2326 skb = segs;
2327 do {
2328 struct udphdr *uh;
2329 int udp_offset = outer_hlen - tnl_hlen;
2330
2331 skb->mac_len = mac_len;
2332
2333 skb_push(skb, outer_hlen);
2334 skb_reset_mac_header(skb);
2335 skb_set_network_header(skb, mac_len);
2336 skb_set_transport_header(skb, udp_offset);
2337 uh = udp_hdr(skb);
2338 uh->len = htons(skb->len - udp_offset);
2339
2340 /* csum segment if tunnel sets skb with csum. */
2341 if (unlikely(uh->check)) {
2342 struct iphdr *iph = ip_hdr(skb);
2343
2344 uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
2345 skb->len - udp_offset,
2346 IPPROTO_UDP, 0);
2347 uh->check = csum_fold(skb_checksum(skb, udp_offset,
2348 skb->len - udp_offset, 0));
2349 if (uh->check == 0)
2350 uh->check = CSUM_MANGLED_0;
2351
2352 }
2353 skb->ip_summed = CHECKSUM_NONE;
2354 } while ((skb = skb->next));
2355out:
2356 return segs;
2357}
2358
2299struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, 2359struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
2300 netdev_features_t features) 2360 netdev_features_t features)
2301{ 2361{
2302 struct sk_buff *segs = ERR_PTR(-EINVAL); 2362 struct sk_buff *segs = ERR_PTR(-EINVAL);
2303 unsigned int mss; 2363 unsigned int mss;
2304 int offset;
2305 __wsum csum;
2306
2307 mss = skb_shinfo(skb)->gso_size; 2364 mss = skb_shinfo(skb)->gso_size;
2308 if (unlikely(skb->len <= mss)) 2365 if (unlikely(skb->len <= mss))
2309 goto out; 2366 goto out;
@@ -2313,6 +2370,7 @@ struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
2313 int type = skb_shinfo(skb)->gso_type; 2370 int type = skb_shinfo(skb)->gso_type;
2314 2371
2315 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | 2372 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY |
2373 SKB_GSO_UDP_TUNNEL |
2316 SKB_GSO_GRE) || 2374 SKB_GSO_GRE) ||
2317 !(type & (SKB_GSO_UDP)))) 2375 !(type & (SKB_GSO_UDP))))
2318 goto out; 2376 goto out;
@@ -2323,20 +2381,27 @@ struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
2323 goto out; 2381 goto out;
2324 } 2382 }
2325 2383
2326 /* Do software UFO. Complete and fill in the UDP checksum as HW cannot
2327 * do checksum of UDP packets sent as multiple IP fragments.
2328 */
2329 offset = skb_checksum_start_offset(skb);
2330 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2331 offset += skb->csum_offset;
2332 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
2333 skb->ip_summed = CHECKSUM_NONE;
2334
2335 /* Fragment the skb. IP headers of the fragments are updated in 2384 /* Fragment the skb. IP headers of the fragments are updated in
2336 * inet_gso_segment() 2385 * inet_gso_segment()
2337 */ 2386 */
2338 segs = skb_segment(skb, features); 2387 if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL)
2388 segs = skb_udp_tunnel_segment(skb, features);
2389 else {
2390 int offset;
2391 __wsum csum;
2392
2393 /* Do software UFO. Complete and fill in the UDP checksum as
2394 * HW cannot do checksum of UDP packets sent as multiple
2395 * IP fragments.
2396 */
2397 offset = skb_checksum_start_offset(skb);
2398 csum = skb_checksum(skb, offset, skb->len - offset, 0);
2399 offset += skb->csum_offset;
2400 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
2401 skb->ip_summed = CHECKSUM_NONE;
2402
2403 segs = skb_segment(skb, features);
2404 }
2339out: 2405out:
2340 return segs; 2406 return segs;
2341} 2407}
2342
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index 505b30ad9182..369a781851ad 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -64,9 +64,9 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
64 goto out; 64 goto out;
65 65
66 err = -ENOMEM; 66 err = -ENOMEM;
67 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) + 67 rep = nlmsg_new(sizeof(struct inet_diag_msg) +
68 sizeof(struct inet_diag_meminfo) + 68 sizeof(struct inet_diag_meminfo) + 64,
69 64)), GFP_KERNEL); 69 GFP_KERNEL);
70 if (!rep) 70 if (!rep)
71 goto out; 71 goto out;
72 72
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index ed0b9e2e797a..11b13ea69db4 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -156,6 +156,7 @@ config INET6_XFRM_MODE_ROUTEOPTIMIZATION
156config IPV6_SIT 156config IPV6_SIT
157 tristate "IPv6: IPv6-in-IPv4 tunnel (SIT driver)" 157 tristate "IPv6: IPv6-in-IPv4 tunnel (SIT driver)"
158 select INET_TUNNEL 158 select INET_TUNNEL
159 select NET_IP_TUNNEL
159 select IPV6_NDISC_NODETYPE 160 select IPV6_NDISC_NODETYPE
160 default y 161 default y
161 ---help--- 162 ---help---
@@ -201,6 +202,7 @@ config IPV6_TUNNEL
201config IPV6_GRE 202config IPV6_GRE
202 tristate "IPv6: GRE tunnel" 203 tristate "IPv6: GRE tunnel"
203 select IPV6_TUNNEL 204 select IPV6_TUNNEL
205 select NET_IP_TUNNEL
204 ---help--- 206 ---help---
205 Tunneling means encapsulating data of one protocol type within 207 Tunneling means encapsulating data of one protocol type within
206 another protocol and sending it over a channel that understands the 208 another protocol and sending it over a channel that understands the
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a459c4f5b769..a33b157d9ccf 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -70,6 +70,7 @@
70#include <net/snmp.h> 70#include <net/snmp.h>
71 71
72#include <net/af_ieee802154.h> 72#include <net/af_ieee802154.h>
73#include <net/firewire.h>
73#include <net/ipv6.h> 74#include <net/ipv6.h>
74#include <net/protocol.h> 75#include <net/protocol.h>
75#include <net/ndisc.h> 76#include <net/ndisc.h>
@@ -544,8 +545,7 @@ static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
544}; 545};
545 546
546static int inet6_netconf_get_devconf(struct sk_buff *in_skb, 547static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
547 struct nlmsghdr *nlh, 548 struct nlmsghdr *nlh)
548 void *arg)
549{ 549{
550 struct net *net = sock_net(in_skb->sk); 550 struct net *net = sock_net(in_skb->sk);
551 struct nlattr *tb[NETCONFA_MAX+1]; 551 struct nlattr *tb[NETCONFA_MAX+1];
@@ -605,6 +605,77 @@ errout:
605 return err; 605 return err;
606} 606}
607 607
608static int inet6_netconf_dump_devconf(struct sk_buff *skb,
609 struct netlink_callback *cb)
610{
611 struct net *net = sock_net(skb->sk);
612 int h, s_h;
613 int idx, s_idx;
614 struct net_device *dev;
615 struct inet6_dev *idev;
616 struct hlist_head *head;
617
618 s_h = cb->args[0];
619 s_idx = idx = cb->args[1];
620
621 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
622 idx = 0;
623 head = &net->dev_index_head[h];
624 rcu_read_lock();
625 cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
626 net->dev_base_seq;
627 hlist_for_each_entry_rcu(dev, head, index_hlist) {
628 if (idx < s_idx)
629 goto cont;
630 idev = __in6_dev_get(dev);
631 if (!idev)
632 goto cont;
633
634 if (inet6_netconf_fill_devconf(skb, dev->ifindex,
635 &idev->cnf,
636 NETLINK_CB(cb->skb).portid,
637 cb->nlh->nlmsg_seq,
638 RTM_NEWNETCONF,
639 NLM_F_MULTI,
640 -1) <= 0) {
641 rcu_read_unlock();
642 goto done;
643 }
644 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
645cont:
646 idx++;
647 }
648 rcu_read_unlock();
649 }
650 if (h == NETDEV_HASHENTRIES) {
651 if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
652 net->ipv6.devconf_all,
653 NETLINK_CB(cb->skb).portid,
654 cb->nlh->nlmsg_seq,
655 RTM_NEWNETCONF, NLM_F_MULTI,
656 -1) <= 0)
657 goto done;
658 else
659 h++;
660 }
661 if (h == NETDEV_HASHENTRIES + 1) {
662 if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
663 net->ipv6.devconf_dflt,
664 NETLINK_CB(cb->skb).portid,
665 cb->nlh->nlmsg_seq,
666 RTM_NEWNETCONF, NLM_F_MULTI,
667 -1) <= 0)
668 goto done;
669 else
670 h++;
671 }
672done:
673 cb->args[0] = h;
674 cb->args[1] = idx;
675
676 return skb->len;
677}
678
608#ifdef CONFIG_SYSCTL 679#ifdef CONFIG_SYSCTL
609static void dev_forward_change(struct inet6_dev *idev) 680static void dev_forward_change(struct inet6_dev *idev)
610{ 681{
@@ -1668,6 +1739,20 @@ static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev)
1668 return 0; 1739 return 0;
1669} 1740}
1670 1741
1742static int addrconf_ifid_ieee1394(u8 *eui, struct net_device *dev)
1743{
1744 union fwnet_hwaddr *ha;
1745
1746 if (dev->addr_len != FWNET_ALEN)
1747 return -1;
1748
1749 ha = (union fwnet_hwaddr *)dev->dev_addr;
1750
1751 memcpy(eui, &ha->uc.uniq_id, sizeof(ha->uc.uniq_id));
1752 eui[0] ^= 2;
1753 return 0;
1754}
1755
1671static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev) 1756static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
1672{ 1757{
1673 /* XXX: inherit EUI-64 from other interface -- yoshfuji */ 1758 /* XXX: inherit EUI-64 from other interface -- yoshfuji */
@@ -1732,6 +1817,8 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
1732 return addrconf_ifid_gre(eui, dev); 1817 return addrconf_ifid_gre(eui, dev);
1733 case ARPHRD_IEEE802154: 1818 case ARPHRD_IEEE802154:
1734 return addrconf_ifid_eui64(eui, dev); 1819 return addrconf_ifid_eui64(eui, dev);
1820 case ARPHRD_IEEE1394:
1821 return addrconf_ifid_ieee1394(eui, dev);
1735 } 1822 }
1736 return -1; 1823 return -1;
1737} 1824}
@@ -2600,7 +2687,8 @@ static void addrconf_dev_config(struct net_device *dev)
2600 (dev->type != ARPHRD_FDDI) && 2687 (dev->type != ARPHRD_FDDI) &&
2601 (dev->type != ARPHRD_ARCNET) && 2688 (dev->type != ARPHRD_ARCNET) &&
2602 (dev->type != ARPHRD_INFINIBAND) && 2689 (dev->type != ARPHRD_INFINIBAND) &&
2603 (dev->type != ARPHRD_IEEE802154)) { 2690 (dev->type != ARPHRD_IEEE802154) &&
2691 (dev->type != ARPHRD_IEEE1394)) {
2604 /* Alas, we support only Ethernet autoconfiguration. */ 2692 /* Alas, we support only Ethernet autoconfiguration. */
2605 return; 2693 return;
2606 } 2694 }
@@ -3537,7 +3625,7 @@ static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = {
3537}; 3625};
3538 3626
3539static int 3627static int
3540inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 3628inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
3541{ 3629{
3542 struct net *net = sock_net(skb->sk); 3630 struct net *net = sock_net(skb->sk);
3543 struct ifaddrmsg *ifm; 3631 struct ifaddrmsg *ifm;
@@ -3603,7 +3691,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u8 ifa_flags,
3603} 3691}
3604 3692
3605static int 3693static int
3606inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) 3694inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
3607{ 3695{
3608 struct net *net = sock_net(skb->sk); 3696 struct net *net = sock_net(skb->sk);
3609 struct ifaddrmsg *ifm; 3697 struct ifaddrmsg *ifm;
@@ -3834,6 +3922,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
3834 NLM_F_MULTI); 3922 NLM_F_MULTI);
3835 if (err <= 0) 3923 if (err <= 0)
3836 break; 3924 break;
3925 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
3837 } 3926 }
3838 break; 3927 break;
3839 } 3928 }
@@ -3891,6 +3980,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
3891 s_ip_idx = ip_idx = cb->args[2]; 3980 s_ip_idx = ip_idx = cb->args[2];
3892 3981
3893 rcu_read_lock(); 3982 rcu_read_lock();
3983 cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
3894 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { 3984 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
3895 idx = 0; 3985 idx = 0;
3896 head = &net->dev_index_head[h]; 3986 head = &net->dev_index_head[h];
@@ -3942,8 +4032,7 @@ static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
3942 return inet6_dump_addr(skb, cb, type); 4032 return inet6_dump_addr(skb, cb, type);
3943} 4033}
3944 4034
3945static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh, 4035static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh)
3946 void *arg)
3947{ 4036{
3948 struct net *net = sock_net(in_skb->sk); 4037 struct net *net = sock_net(in_skb->sk);
3949 struct ifaddrmsg *ifm; 4038 struct ifaddrmsg *ifm;
@@ -4368,6 +4457,8 @@ errout:
4368 4457
4369static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) 4458static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
4370{ 4459{
4460 struct net *net = dev_net(ifp->idev->dev);
4461
4371 inet6_ifa_notify(event ? : RTM_NEWADDR, ifp); 4462 inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
4372 4463
4373 switch (event) { 4464 switch (event) {
@@ -4393,6 +4484,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
4393 dst_free(&ifp->rt->dst); 4484 dst_free(&ifp->rt->dst);
4394 break; 4485 break;
4395 } 4486 }
4487 atomic_inc(&net->ipv6.dev_addr_genid);
4396} 4488}
4397 4489
4398static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) 4490static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
@@ -4961,7 +5053,7 @@ int __init addrconf_init(void)
4961 __rtnl_register(PF_INET6, RTM_GETANYCAST, NULL, 5053 __rtnl_register(PF_INET6, RTM_GETANYCAST, NULL,
4962 inet6_dump_ifacaddr, NULL); 5054 inet6_dump_ifacaddr, NULL);
4963 __rtnl_register(PF_INET6, RTM_GETNETCONF, inet6_netconf_get_devconf, 5055 __rtnl_register(PF_INET6, RTM_GETNETCONF, inet6_netconf_get_devconf,
4964 NULL, NULL); 5056 inet6_netconf_dump_devconf, NULL);
4965 5057
4966 ipv6_addr_label_rtnl_register(); 5058 ipv6_addr_label_rtnl_register();
4967 5059
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index aad64352cb60..f083a583a05c 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -414,8 +414,7 @@ static const struct nla_policy ifal_policy[IFAL_MAX+1] = {
414 [IFAL_LABEL] = { .len = sizeof(u32), }, 414 [IFAL_LABEL] = { .len = sizeof(u32), },
415}; 415};
416 416
417static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh, 417static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh)
418 void *arg)
419{ 418{
420 struct net *net = sock_net(skb->sk); 419 struct net *net = sock_net(skb->sk);
421 struct ifaddrlblmsg *ifal; 420 struct ifaddrlblmsg *ifal;
@@ -436,10 +435,7 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh,
436 435
437 if (!tb[IFAL_ADDRESS]) 436 if (!tb[IFAL_ADDRESS])
438 return -EINVAL; 437 return -EINVAL;
439
440 pfx = nla_data(tb[IFAL_ADDRESS]); 438 pfx = nla_data(tb[IFAL_ADDRESS]);
441 if (!pfx)
442 return -EINVAL;
443 439
444 if (!tb[IFAL_LABEL]) 440 if (!tb[IFAL_LABEL])
445 return -EINVAL; 441 return -EINVAL;
@@ -533,8 +529,7 @@ static inline int ip6addrlbl_msgsize(void)
533 + nla_total_size(4); /* IFAL_LABEL */ 529 + nla_total_size(4); /* IFAL_LABEL */
534} 530}
535 531
536static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh, 532static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh)
537 void *arg)
538{ 533{
539 struct net *net = sock_net(in_skb->sk); 534 struct net *net = sock_net(in_skb->sk);
540 struct ifaddrlblmsg *ifal; 535 struct ifaddrlblmsg *ifal;
@@ -561,10 +556,7 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh,
561 556
562 if (!tb[IFAL_ADDRESS]) 557 if (!tb[IFAL_ADDRESS])
563 return -EINVAL; 558 return -EINVAL;
564
565 addr = nla_data(tb[IFAL_ADDRESS]); 559 addr = nla_data(tb[IFAL_ADDRESS]);
566 if (!addr)
567 return -EINVAL;
568 560
569 rcu_read_lock(); 561 rcu_read_lock();
570 p = __ipv6_addr_label(net, addr, ipv6_addr_type(addr), ifal->ifal_index); 562 p = __ipv6_addr_label(net, addr, ipv6_addr_type(addr), ifal->ifal_index);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 6b793bfc0e10..ab5c7ad482cd 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -49,7 +49,6 @@
49#include <net/udp.h> 49#include <net/udp.h>
50#include <net/udplite.h> 50#include <net/udplite.h>
51#include <net/tcp.h> 51#include <net/tcp.h>
52#include <net/ipip.h>
53#include <net/protocol.h> 52#include <net/protocol.h>
54#include <net/inet_common.h> 53#include <net/inet_common.h>
55#include <net/route.h> 54#include <net/route.h>
@@ -323,7 +322,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
323 struct net_device *dev = NULL; 322 struct net_device *dev = NULL;
324 323
325 rcu_read_lock(); 324 rcu_read_lock();
326 if (addr_type & IPV6_ADDR_LINKLOCAL) { 325 if (__ipv6_addr_needs_scope_id(addr_type)) {
327 if (addr_len >= sizeof(struct sockaddr_in6) && 326 if (addr_len >= sizeof(struct sockaddr_in6) &&
328 addr->sin6_scope_id) { 327 addr->sin6_scope_id) {
329 /* Override any existing binding, if another one 328 /* Override any existing binding, if another one
@@ -471,8 +470,8 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
471 470
472 sin->sin6_port = inet->inet_sport; 471 sin->sin6_port = inet->inet_sport;
473 } 472 }
474 if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) 473 sin->sin6_scope_id = ipv6_iface_scope_id(&sin->sin6_addr,
475 sin->sin6_scope_id = sk->sk_bound_dev_if; 474 sk->sk_bound_dev_if);
476 *uaddr_len = sizeof(*sin); 475 *uaddr_len = sizeof(*sin);
477 return 0; 476 return 0;
478} 477}
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index f5a54782a340..4b56cbbc7890 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -124,7 +124,7 @@ ipv4_connected:
124 goto out; 124 goto out;
125 } 125 }
126 126
127 if (addr_type&IPV6_ADDR_LINKLOCAL) { 127 if (__ipv6_addr_needs_scope_id(addr_type)) {
128 if (addr_len >= sizeof(struct sockaddr_in6) && 128 if (addr_len >= sizeof(struct sockaddr_in6) &&
129 usin->sin6_scope_id) { 129 usin->sin6_scope_id) {
130 if (sk->sk_bound_dev_if && 130 if (sk->sk_bound_dev_if &&
@@ -355,18 +355,19 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
355 sin->sin6_family = AF_INET6; 355 sin->sin6_family = AF_INET6;
356 sin->sin6_flowinfo = 0; 356 sin->sin6_flowinfo = 0;
357 sin->sin6_port = serr->port; 357 sin->sin6_port = serr->port;
358 sin->sin6_scope_id = 0;
359 if (skb->protocol == htons(ETH_P_IPV6)) { 358 if (skb->protocol == htons(ETH_P_IPV6)) {
360 const struct ipv6hdr *ip6h = container_of((struct in6_addr *)(nh + serr->addr_offset), 359 const struct ipv6hdr *ip6h = container_of((struct in6_addr *)(nh + serr->addr_offset),
361 struct ipv6hdr, daddr); 360 struct ipv6hdr, daddr);
362 sin->sin6_addr = ip6h->daddr; 361 sin->sin6_addr = ip6h->daddr;
363 if (np->sndflow) 362 if (np->sndflow)
364 sin->sin6_flowinfo = ip6_flowinfo(ip6h); 363 sin->sin6_flowinfo = ip6_flowinfo(ip6h);
365 if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) 364 sin->sin6_scope_id =
366 sin->sin6_scope_id = IP6CB(skb)->iif; 365 ipv6_iface_scope_id(&sin->sin6_addr,
366 IP6CB(skb)->iif);
367 } else { 367 } else {
368 ipv6_addr_set_v4mapped(*(__be32 *)(nh + serr->addr_offset), 368 ipv6_addr_set_v4mapped(*(__be32 *)(nh + serr->addr_offset),
369 &sin->sin6_addr); 369 &sin->sin6_addr);
370 sin->sin6_scope_id = 0;
370 } 371 }
371 } 372 }
372 373
@@ -376,18 +377,19 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
376 if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) { 377 if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) {
377 sin->sin6_family = AF_INET6; 378 sin->sin6_family = AF_INET6;
378 sin->sin6_flowinfo = 0; 379 sin->sin6_flowinfo = 0;
379 sin->sin6_scope_id = 0;
380 if (skb->protocol == htons(ETH_P_IPV6)) { 380 if (skb->protocol == htons(ETH_P_IPV6)) {
381 sin->sin6_addr = ipv6_hdr(skb)->saddr; 381 sin->sin6_addr = ipv6_hdr(skb)->saddr;
382 if (np->rxopt.all) 382 if (np->rxopt.all)
383 ip6_datagram_recv_ctl(sk, msg, skb); 383 ip6_datagram_recv_ctl(sk, msg, skb);
384 if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) 384 sin->sin6_scope_id =
385 sin->sin6_scope_id = IP6CB(skb)->iif; 385 ipv6_iface_scope_id(&sin->sin6_addr,
386 IP6CB(skb)->iif);
386 } else { 387 } else {
387 struct inet_sock *inet = inet_sk(sk); 388 struct inet_sock *inet = inet_sk(sk);
388 389
389 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, 390 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
390 &sin->sin6_addr); 391 &sin->sin6_addr);
392 sin->sin6_scope_id = 0;
391 if (inet->cmsg_flags) 393 if (inet->cmsg_flags)
392 ip_cmsg_recv(msg, skb); 394 ip_cmsg_recv(msg, skb);
393 } 395 }
@@ -592,7 +594,9 @@ int ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg,
592 sin6.sin6_addr = ipv6_hdr(skb)->daddr; 594 sin6.sin6_addr = ipv6_hdr(skb)->daddr;
593 sin6.sin6_port = ports[1]; 595 sin6.sin6_port = ports[1];
594 sin6.sin6_flowinfo = 0; 596 sin6.sin6_flowinfo = 0;
595 sin6.sin6_scope_id = 0; 597 sin6.sin6_scope_id =
598 ipv6_iface_scope_id(&ipv6_hdr(skb)->daddr,
599 opt->iif);
596 600
597 put_cmsg(msg, SOL_IPV6, IPV6_ORIGDSTADDR, sizeof(sin6), &sin6); 601 put_cmsg(msg, SOL_IPV6, IPV6_ORIGDSTADDR, sizeof(sin6), &sin6);
598 } 602 }
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index fff5bdd8b680..71b900c3f4ff 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -434,7 +434,7 @@ void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
434 * Source addr check 434 * Source addr check
435 */ 435 */
436 436
437 if (addr_type & IPV6_ADDR_LINKLOCAL) 437 if (__ipv6_addr_needs_scope_id(addr_type))
438 iif = skb->dev->ifindex; 438 iif = skb->dev->ifindex;
439 439
440 /* 440 /*
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 9bfab19ff3c0..e4311cbc8b4e 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -54,6 +54,10 @@ int inet6_csk_bind_conflict(const struct sock *sk,
54 if (ipv6_rcv_saddr_equal(sk, sk2)) 54 if (ipv6_rcv_saddr_equal(sk, sk2))
55 break; 55 break;
56 } 56 }
57 if (!relax && reuse && sk2->sk_reuse &&
58 sk2->sk_state != TCP_LISTEN &&
59 ipv6_rcv_saddr_equal(sk, sk2))
60 break;
57 } 61 }
58 } 62 }
59 63
@@ -169,10 +173,8 @@ void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
169 sin6->sin6_port = inet_sk(sk)->inet_dport; 173 sin6->sin6_port = inet_sk(sk)->inet_dport;
170 /* We do not store received flowlabel for TCP */ 174 /* We do not store received flowlabel for TCP */
171 sin6->sin6_flowinfo = 0; 175 sin6->sin6_flowinfo = 0;
172 sin6->sin6_scope_id = 0; 176 sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
173 if (sk->sk_bound_dev_if && 177 sk->sk_bound_dev_if);
174 ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
175 sin6->sin6_scope_id = sk->sk_bound_dev_if;
176} 178}
177 179
178EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr); 180EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr);
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index b973ed3d06cf..46e88433ec7d 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -144,7 +144,9 @@ static void ip6_fl_gc(unsigned long dummy)
144 spin_lock(&ip6_fl_lock); 144 spin_lock(&ip6_fl_lock);
145 145
146 for (i=0; i<=FL_HASH_MASK; i++) { 146 for (i=0; i<=FL_HASH_MASK; i++) {
147 struct ip6_flowlabel *fl, **flp; 147 struct ip6_flowlabel *fl;
148 struct ip6_flowlabel __rcu **flp;
149
148 flp = &fl_ht[i]; 150 flp = &fl_ht[i];
149 while ((fl = rcu_dereference_protected(*flp, 151 while ((fl = rcu_dereference_protected(*flp,
150 lockdep_is_held(&ip6_fl_lock))) != NULL) { 152 lockdep_is_held(&ip6_fl_lock))) != NULL) {
@@ -179,7 +181,9 @@ static void __net_exit ip6_fl_purge(struct net *net)
179 181
180 spin_lock(&ip6_fl_lock); 182 spin_lock(&ip6_fl_lock);
181 for (i = 0; i <= FL_HASH_MASK; i++) { 183 for (i = 0; i <= FL_HASH_MASK; i++) {
182 struct ip6_flowlabel *fl, **flp; 184 struct ip6_flowlabel *fl;
185 struct ip6_flowlabel __rcu **flp;
186
183 flp = &fl_ht[i]; 187 flp = &fl_ht[i];
184 while ((fl = rcu_dereference_protected(*flp, 188 while ((fl = rcu_dereference_protected(*flp,
185 lockdep_is_held(&ip6_fl_lock))) != NULL) { 189 lockdep_is_held(&ip6_fl_lock))) != NULL) {
@@ -506,7 +510,8 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
506 struct ipv6_pinfo *np = inet6_sk(sk); 510 struct ipv6_pinfo *np = inet6_sk(sk);
507 struct in6_flowlabel_req freq; 511 struct in6_flowlabel_req freq;
508 struct ipv6_fl_socklist *sfl1=NULL; 512 struct ipv6_fl_socklist *sfl1=NULL;
509 struct ipv6_fl_socklist *sfl, **sflp; 513 struct ipv6_fl_socklist *sfl;
514 struct ipv6_fl_socklist __rcu **sflp;
510 struct ip6_flowlabel *fl, *fl1 = NULL; 515 struct ip6_flowlabel *fl, *fl1 = NULL;
511 516
512 517
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index e4efffe2522e..d3ddd8400354 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -38,6 +38,7 @@
38 38
39#include <net/sock.h> 39#include <net/sock.h>
40#include <net/ip.h> 40#include <net/ip.h>
41#include <net/ip_tunnels.h>
41#include <net/icmp.h> 42#include <net/icmp.h>
42#include <net/protocol.h> 43#include <net/protocol.h>
43#include <net/addrconf.h> 44#include <net/addrconf.h>
@@ -110,46 +111,6 @@ static u32 HASH_ADDR(const struct in6_addr *addr)
110#define tunnels_l tunnels[1] 111#define tunnels_l tunnels[1]
111#define tunnels_wc tunnels[0] 112#define tunnels_wc tunnels[0]
112 113
113static struct rtnl_link_stats64 *ip6gre_get_stats64(struct net_device *dev,
114 struct rtnl_link_stats64 *tot)
115{
116 int i;
117
118 for_each_possible_cpu(i) {
119 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
120 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
121 unsigned int start;
122
123 do {
124 start = u64_stats_fetch_begin_bh(&tstats->syncp);
125 rx_packets = tstats->rx_packets;
126 tx_packets = tstats->tx_packets;
127 rx_bytes = tstats->rx_bytes;
128 tx_bytes = tstats->tx_bytes;
129 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
130
131 tot->rx_packets += rx_packets;
132 tot->tx_packets += tx_packets;
133 tot->rx_bytes += rx_bytes;
134 tot->tx_bytes += tx_bytes;
135 }
136
137 tot->multicast = dev->stats.multicast;
138 tot->rx_crc_errors = dev->stats.rx_crc_errors;
139 tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
140 tot->rx_length_errors = dev->stats.rx_length_errors;
141 tot->rx_frame_errors = dev->stats.rx_frame_errors;
142 tot->rx_errors = dev->stats.rx_errors;
143
144 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
145 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
146 tot->tx_dropped = dev->stats.tx_dropped;
147 tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
148 tot->tx_errors = dev->stats.tx_errors;
149
150 return tot;
151}
152
153/* Given src, dst and key, find appropriate for input tunnel. */ 114/* Given src, dst and key, find appropriate for input tunnel. */
154 115
155static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev, 116static struct ip6_tnl *ip6gre_tunnel_lookup(struct net_device *dev,
@@ -667,7 +628,6 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
667 struct net_device_stats *stats = &tunnel->dev->stats; 628 struct net_device_stats *stats = &tunnel->dev->stats;
668 int err = -1; 629 int err = -1;
669 u8 proto; 630 u8 proto;
670 int pkt_len;
671 struct sk_buff *new_skb; 631 struct sk_buff *new_skb;
672 632
673 if (dev->type == ARPHRD_ETHER) 633 if (dev->type == ARPHRD_ETHER)
@@ -801,23 +761,9 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
801 } 761 }
802 } 762 }
803 763
804 nf_reset(skb); 764 ip6tunnel_xmit(skb, dev);
805 pkt_len = skb->len;
806 err = ip6_local_out(skb);
807
808 if (net_xmit_eval(err) == 0) {
809 struct pcpu_tstats *tstats = this_cpu_ptr(tunnel->dev->tstats);
810
811 tstats->tx_bytes += pkt_len;
812 tstats->tx_packets++;
813 } else {
814 stats->tx_errors++;
815 stats->tx_aborted_errors++;
816 }
817
818 if (ndst) 765 if (ndst)
819 ip6_tnl_dst_store(tunnel, ndst); 766 ip6_tnl_dst_store(tunnel, ndst);
820
821 return 0; 767 return 0;
822tx_err_link_failure: 768tx_err_link_failure:
823 stats->tx_carrier_errors++; 769 stats->tx_carrier_errors++;
@@ -1271,7 +1217,7 @@ static const struct net_device_ops ip6gre_netdev_ops = {
1271 .ndo_start_xmit = ip6gre_tunnel_xmit, 1217 .ndo_start_xmit = ip6gre_tunnel_xmit,
1272 .ndo_do_ioctl = ip6gre_tunnel_ioctl, 1218 .ndo_do_ioctl = ip6gre_tunnel_ioctl,
1273 .ndo_change_mtu = ip6gre_tunnel_change_mtu, 1219 .ndo_change_mtu = ip6gre_tunnel_change_mtu,
1274 .ndo_get_stats64 = ip6gre_get_stats64, 1220 .ndo_get_stats64 = ip_tunnel_get_stats64,
1275}; 1221};
1276 1222
1277static void ip6gre_dev_free(struct net_device *dev) 1223static void ip6gre_dev_free(struct net_device *dev)
@@ -1520,7 +1466,7 @@ static const struct net_device_ops ip6gre_tap_netdev_ops = {
1520 .ndo_set_mac_address = eth_mac_addr, 1466 .ndo_set_mac_address = eth_mac_addr,
1521 .ndo_validate_addr = eth_validate_addr, 1467 .ndo_validate_addr = eth_validate_addr,
1522 .ndo_change_mtu = ip6gre_tunnel_change_mtu, 1468 .ndo_change_mtu = ip6gre_tunnel_change_mtu,
1523 .ndo_get_stats64 = ip6gre_get_stats64, 1469 .ndo_get_stats64 = ip_tunnel_get_stats64,
1524}; 1470};
1525 1471
1526static void ip6gre_tap_setup(struct net_device *dev) 1472static void ip6gre_tap_setup(struct net_device *dev)
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 8234c1dcdf72..71b766ee821d 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -92,14 +92,12 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
92 u8 *prevhdr; 92 u8 *prevhdr;
93 int offset = 0; 93 int offset = 0;
94 94
95 if (!(features & NETIF_F_V6_CSUM))
96 features &= ~NETIF_F_SG;
97
98 if (unlikely(skb_shinfo(skb)->gso_type & 95 if (unlikely(skb_shinfo(skb)->gso_type &
99 ~(SKB_GSO_UDP | 96 ~(SKB_GSO_UDP |
100 SKB_GSO_DODGY | 97 SKB_GSO_DODGY |
101 SKB_GSO_TCP_ECN | 98 SKB_GSO_TCP_ECN |
102 SKB_GSO_GRE | 99 SKB_GSO_GRE |
100 SKB_GSO_UDP_TUNNEL |
103 SKB_GSO_TCPV6 | 101 SKB_GSO_TCPV6 |
104 0))) 102 0)))
105 goto out; 103 goto out;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index fff83cbc197f..1e55866cead7 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -47,6 +47,7 @@
47 47
48#include <net/icmp.h> 48#include <net/icmp.h>
49#include <net/ip.h> 49#include <net/ip.h>
50#include <net/ip_tunnels.h>
50#include <net/ipv6.h> 51#include <net/ipv6.h>
51#include <net/ip6_route.h> 52#include <net/ip6_route.h>
52#include <net/addrconf.h> 53#include <net/addrconf.h>
@@ -955,7 +956,6 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
955 unsigned int max_headroom = sizeof(struct ipv6hdr); 956 unsigned int max_headroom = sizeof(struct ipv6hdr);
956 u8 proto; 957 u8 proto;
957 int err = -1; 958 int err = -1;
958 int pkt_len;
959 959
960 if (!fl6->flowi6_mark) 960 if (!fl6->flowi6_mark)
961 dst = ip6_tnl_dst_check(t); 961 dst = ip6_tnl_dst_check(t);
@@ -1035,19 +1035,7 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
1035 ipv6h->nexthdr = proto; 1035 ipv6h->nexthdr = proto;
1036 ipv6h->saddr = fl6->saddr; 1036 ipv6h->saddr = fl6->saddr;
1037 ipv6h->daddr = fl6->daddr; 1037 ipv6h->daddr = fl6->daddr;
1038 nf_reset(skb); 1038 ip6tunnel_xmit(skb, dev);
1039 pkt_len = skb->len;
1040 err = ip6_local_out(skb);
1041
1042 if (net_xmit_eval(err) == 0) {
1043 struct pcpu_tstats *tstats = this_cpu_ptr(t->dev->tstats);
1044
1045 tstats->tx_bytes += pkt_len;
1046 tstats->tx_packets++;
1047 } else {
1048 stats->tx_errors++;
1049 stats->tx_aborted_errors++;
1050 }
1051 if (ndst) 1039 if (ndst)
1052 ip6_tnl_dst_store(t, ndst); 1040 ip6_tnl_dst_store(t, ndst);
1053 return 0; 1041 return 0;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 96bfb4e4b820..241fb8ad9fcf 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -842,9 +842,9 @@ static void ip6mr_destroy_unres(struct mr6_table *mrt, struct mfc6_cache *c)
842 if (ipv6_hdr(skb)->version == 0) { 842 if (ipv6_hdr(skb)->version == 0) {
843 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr)); 843 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
844 nlh->nlmsg_type = NLMSG_ERROR; 844 nlh->nlmsg_type = NLMSG_ERROR;
845 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); 845 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
846 skb_trim(skb, nlh->nlmsg_len); 846 skb_trim(skb, nlh->nlmsg_len);
847 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -ETIMEDOUT; 847 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -ETIMEDOUT;
848 rtnl_unicast(skb, net, NETLINK_CB(skb).portid); 848 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
849 } else 849 } else
850 kfree_skb(skb); 850 kfree_skb(skb);
@@ -1100,13 +1100,13 @@ static void ip6mr_cache_resolve(struct net *net, struct mr6_table *mrt,
1100 if (ipv6_hdr(skb)->version == 0) { 1100 if (ipv6_hdr(skb)->version == 0) {
1101 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr)); 1101 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct ipv6hdr));
1102 1102
1103 if (__ip6mr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) { 1103 if (__ip6mr_fill_mroute(mrt, skb, c, nlmsg_data(nlh)) > 0) {
1104 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh; 1104 nlh->nlmsg_len = skb_tail_pointer(skb) - (u8 *)nlh;
1105 } else { 1105 } else {
1106 nlh->nlmsg_type = NLMSG_ERROR; 1106 nlh->nlmsg_type = NLMSG_ERROR;
1107 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); 1107 nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
1108 skb_trim(skb, nlh->nlmsg_len); 1108 skb_trim(skb, nlh->nlmsg_len);
1109 ((struct nlmsgerr *)NLMSG_DATA(nlh))->error = -EMSGSIZE; 1109 ((struct nlmsgerr *)nlmsg_data(nlh))->error = -EMSGSIZE;
1110 } 1110 }
1111 rtnl_unicast(skb, net, NETLINK_CB(skb).portid); 1111 rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
1112 } else 1112 } else
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 76ef4353d518..2712ab22a174 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -610,8 +610,6 @@ void ndisc_send_rs(struct net_device *dev, const struct in6_addr *saddr,
610 } 610 }
611 } 611 }
612#endif 612#endif
613 if (!dev->addr_len)
614 send_sllao = 0;
615 if (send_sllao) 613 if (send_sllao)
616 optlen += ndisc_opt_addr_space(dev); 614 optlen += ndisc_opt_addr_space(dev);
617 615
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 341b54ade72c..8861b1ef420e 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -284,6 +284,7 @@ static void trace_packet(const struct sk_buff *skb,
284 const char *hookname, *chainname, *comment; 284 const char *hookname, *chainname, *comment;
285 const struct ip6t_entry *iter; 285 const struct ip6t_entry *iter;
286 unsigned int rulenum = 0; 286 unsigned int rulenum = 0;
287 struct net *net = dev_net(in ? in : out);
287 288
288 table_base = private->entries[smp_processor_id()]; 289 table_base = private->entries[smp_processor_id()];
289 root = get_entry(table_base, private->hook_entry[hook]); 290 root = get_entry(table_base, private->hook_entry[hook]);
@@ -296,7 +297,7 @@ static void trace_packet(const struct sk_buff *skb,
296 &chainname, &comment, &rulenum) != 0) 297 &chainname, &comment, &rulenum) != 0)
297 break; 298 break;
298 299
299 nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo, 300 nf_log_packet(net, AF_INET6, hook, skb, in, out, &trace_loginfo,
300 "TRACE: %s:%s:%s:%u ", 301 "TRACE: %s:%s:%s:%u ",
301 tablename, chainname, comment, rulenum); 302 tablename, chainname, comment, rulenum);
302} 303}
diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c
index cb631143721c..590f767db5d4 100644
--- a/net/ipv6/netfilter/ip6t_NPT.c
+++ b/net/ipv6/netfilter/ip6t_NPT.c
@@ -18,9 +18,8 @@
18static int ip6t_npt_checkentry(const struct xt_tgchk_param *par) 18static int ip6t_npt_checkentry(const struct xt_tgchk_param *par)
19{ 19{
20 struct ip6t_npt_tginfo *npt = par->targinfo; 20 struct ip6t_npt_tginfo *npt = par->targinfo;
21 __wsum src_sum = 0, dst_sum = 0;
22 struct in6_addr pfx; 21 struct in6_addr pfx;
23 unsigned int i; 22 __wsum src_sum, dst_sum;
24 23
25 if (npt->src_pfx_len > 64 || npt->dst_pfx_len > 64) 24 if (npt->src_pfx_len > 64 || npt->dst_pfx_len > 64)
26 return -EINVAL; 25 return -EINVAL;
@@ -33,12 +32,8 @@ static int ip6t_npt_checkentry(const struct xt_tgchk_param *par)
33 if (!ipv6_addr_equal(&pfx, &npt->dst_pfx.in6)) 32 if (!ipv6_addr_equal(&pfx, &npt->dst_pfx.in6))
34 return -EINVAL; 33 return -EINVAL;
35 34
36 for (i = 0; i < ARRAY_SIZE(npt->src_pfx.in6.s6_addr16); i++) { 35 src_sum = csum_partial(&npt->src_pfx.in6, sizeof(npt->src_pfx.in6), 0);
37 src_sum = csum_add(src_sum, 36 dst_sum = csum_partial(&npt->dst_pfx.in6, sizeof(npt->dst_pfx.in6), 0);
38 (__force __wsum)npt->src_pfx.in6.s6_addr16[i]);
39 dst_sum = csum_add(dst_sum,
40 (__force __wsum)npt->dst_pfx.in6.s6_addr16[i]);
41 }
42 37
43 npt->adjustment = ~csum_fold(csum_sub(src_sum, dst_sum)); 38 npt->adjustment = ~csum_fold(csum_sub(src_sum, dst_sum));
44 return 0; 39 return 0;
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 2b6c226f5198..97bcf2bae857 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -330,12 +330,8 @@ ipv6_getorigdst(struct sock *sk, int optval, void __user *user, int *len)
330 sizeof(sin6.sin6_addr)); 330 sizeof(sin6.sin6_addr));
331 331
332 nf_ct_put(ct); 332 nf_ct_put(ct);
333 333 sin6.sin6_scope_id = ipv6_iface_scope_id(&sin6.sin6_addr,
334 if (ipv6_addr_type(&sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL) 334 sk->sk_bound_dev_if);
335 sin6.sin6_scope_id = sk->sk_bound_dev_if;
336 else
337 sin6.sin6_scope_id = 0;
338
339 return copy_to_user(user, &sin6, sizeof(sin6)) ? -EFAULT : 0; 335 return copy_to_user(user, &sin6, sizeof(sin6)) ? -EFAULT : 0;
340} 336}
341 337
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 24df3dde0076..b3807c5cb888 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -131,7 +131,8 @@ static bool icmpv6_new(struct nf_conn *ct, const struct sk_buff *skb,
131 type + 128); 131 type + 128);
132 nf_ct_dump_tuple_ipv6(&ct->tuplehash[0].tuple); 132 nf_ct_dump_tuple_ipv6(&ct->tuplehash[0].tuple);
133 if (LOG_INVALID(nf_ct_net(ct), IPPROTO_ICMPV6)) 133 if (LOG_INVALID(nf_ct_net(ct), IPPROTO_ICMPV6))
134 nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL, 134 nf_log_packet(nf_ct_net(ct), PF_INET6, 0, skb, NULL,
135 NULL, NULL,
135 "nf_ct_icmpv6: invalid new with type %d ", 136 "nf_ct_icmpv6: invalid new with type %d ",
136 type + 128); 137 type + 128);
137 return false; 138 return false;
@@ -203,7 +204,7 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl,
203 icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih); 204 icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih);
204 if (icmp6h == NULL) { 205 if (icmp6h == NULL) {
205 if (LOG_INVALID(net, IPPROTO_ICMPV6)) 206 if (LOG_INVALID(net, IPPROTO_ICMPV6))
206 nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL, 207 nf_log_packet(net, PF_INET6, 0, skb, NULL, NULL, NULL,
207 "nf_ct_icmpv6: short packet "); 208 "nf_ct_icmpv6: short packet ");
208 return -NF_ACCEPT; 209 return -NF_ACCEPT;
209 } 210 }
@@ -211,7 +212,7 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl,
211 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && 212 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
212 nf_ip6_checksum(skb, hooknum, dataoff, IPPROTO_ICMPV6)) { 213 nf_ip6_checksum(skb, hooknum, dataoff, IPPROTO_ICMPV6)) {
213 if (LOG_INVALID(net, IPPROTO_ICMPV6)) 214 if (LOG_INVALID(net, IPPROTO_ICMPV6))
214 nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL, 215 nf_log_packet(net, PF_INET6, 0, skb, NULL, NULL, NULL,
215 "nf_ct_icmpv6: ICMPv6 checksum failed "); 216 "nf_ct_icmpv6: ICMPv6 checksum failed ");
216 return -NF_ACCEPT; 217 return -NF_ACCEPT;
217 } 218 }
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 6700069949dd..dffdc1a389c5 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -41,6 +41,7 @@
41#include <net/rawv6.h> 41#include <net/rawv6.h>
42#include <net/ndisc.h> 42#include <net/ndisc.h>
43#include <net/addrconf.h> 43#include <net/addrconf.h>
44#include <net/inet_ecn.h>
44#include <net/netfilter/ipv6/nf_conntrack_ipv6.h> 45#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
45#include <linux/sysctl.h> 46#include <linux/sysctl.h>
46#include <linux/netfilter.h> 47#include <linux/netfilter.h>
@@ -138,6 +139,11 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
138} 139}
139#endif 140#endif
140 141
142static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
143{
144 return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
145}
146
141static unsigned int nf_hashfn(struct inet_frag_queue *q) 147static unsigned int nf_hashfn(struct inet_frag_queue *q)
142{ 148{
143 const struct frag_queue *nq; 149 const struct frag_queue *nq;
@@ -166,7 +172,7 @@ static void nf_ct_frag6_expire(unsigned long data)
166/* Creation primitives. */ 172/* Creation primitives. */
167static inline struct frag_queue *fq_find(struct net *net, __be32 id, 173static inline struct frag_queue *fq_find(struct net *net, __be32 id,
168 u32 user, struct in6_addr *src, 174 u32 user, struct in6_addr *src,
169 struct in6_addr *dst) 175 struct in6_addr *dst, u8 ecn)
170{ 176{
171 struct inet_frag_queue *q; 177 struct inet_frag_queue *q;
172 struct ip6_create_arg arg; 178 struct ip6_create_arg arg;
@@ -176,6 +182,7 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id,
176 arg.user = user; 182 arg.user = user;
177 arg.src = src; 183 arg.src = src;
178 arg.dst = dst; 184 arg.dst = dst;
185 arg.ecn = ecn;
179 186
180 read_lock_bh(&nf_frags.lock); 187 read_lock_bh(&nf_frags.lock);
181 hash = inet6_hash_frag(id, src, dst, nf_frags.rnd); 188 hash = inet6_hash_frag(id, src, dst, nf_frags.rnd);
@@ -196,6 +203,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
196 struct sk_buff *prev, *next; 203 struct sk_buff *prev, *next;
197 unsigned int payload_len; 204 unsigned int payload_len;
198 int offset, end; 205 int offset, end;
206 u8 ecn;
199 207
200 if (fq->q.last_in & INET_FRAG_COMPLETE) { 208 if (fq->q.last_in & INET_FRAG_COMPLETE) {
201 pr_debug("Already completed\n"); 209 pr_debug("Already completed\n");
@@ -213,6 +221,8 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
213 return -1; 221 return -1;
214 } 222 }
215 223
224 ecn = ip6_frag_ecn(ipv6_hdr(skb));
225
216 if (skb->ip_summed == CHECKSUM_COMPLETE) { 226 if (skb->ip_summed == CHECKSUM_COMPLETE) {
217 const unsigned char *nh = skb_network_header(skb); 227 const unsigned char *nh = skb_network_header(skb);
218 skb->csum = csum_sub(skb->csum, 228 skb->csum = csum_sub(skb->csum,
@@ -317,6 +327,7 @@ found:
317 } 327 }
318 fq->q.stamp = skb->tstamp; 328 fq->q.stamp = skb->tstamp;
319 fq->q.meat += skb->len; 329 fq->q.meat += skb->len;
330 fq->ecn |= ecn;
320 if (payload_len > fq->q.max_size) 331 if (payload_len > fq->q.max_size)
321 fq->q.max_size = payload_len; 332 fq->q.max_size = payload_len;
322 add_frag_mem_limit(&fq->q, skb->truesize); 333 add_frag_mem_limit(&fq->q, skb->truesize);
@@ -352,12 +363,17 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
352{ 363{
353 struct sk_buff *fp, *op, *head = fq->q.fragments; 364 struct sk_buff *fp, *op, *head = fq->q.fragments;
354 int payload_len; 365 int payload_len;
366 u8 ecn;
355 367
356 inet_frag_kill(&fq->q, &nf_frags); 368 inet_frag_kill(&fq->q, &nf_frags);
357 369
358 WARN_ON(head == NULL); 370 WARN_ON(head == NULL);
359 WARN_ON(NFCT_FRAG6_CB(head)->offset != 0); 371 WARN_ON(NFCT_FRAG6_CB(head)->offset != 0);
360 372
373 ecn = ip_frag_ecn_table[fq->ecn];
374 if (unlikely(ecn == 0xff))
375 goto out_fail;
376
361 /* Unfragmented part is taken from the first segment. */ 377 /* Unfragmented part is taken from the first segment. */
362 payload_len = ((head->data - skb_network_header(head)) - 378 payload_len = ((head->data - skb_network_header(head)) -
363 sizeof(struct ipv6hdr) + fq->q.len - 379 sizeof(struct ipv6hdr) + fq->q.len -
@@ -428,6 +444,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
428 head->dev = dev; 444 head->dev = dev;
429 head->tstamp = fq->q.stamp; 445 head->tstamp = fq->q.stamp;
430 ipv6_hdr(head)->payload_len = htons(payload_len); 446 ipv6_hdr(head)->payload_len = htons(payload_len);
447 ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
431 IP6CB(head)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size; 448 IP6CB(head)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size;
432 449
433 /* Yes, and fold redundant checksum back. 8) */ 450 /* Yes, and fold redundant checksum back. 8) */
@@ -572,7 +589,8 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
572 inet_frag_evictor(&net->nf_frag.frags, &nf_frags, false); 589 inet_frag_evictor(&net->nf_frag.frags, &nf_frags, false);
573 local_bh_enable(); 590 local_bh_enable();
574 591
575 fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr); 592 fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
593 ip6_frag_ecn(hdr));
576 if (fq == NULL) { 594 if (fq == NULL) {
577 pr_debug("Can't find and can't create new queue\n"); 595 pr_debug("Can't find and can't create new queue\n");
578 goto ret_orig; 596 goto ret_orig;
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 330b5e7b7df6..eedff8ccded5 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -263,7 +263,7 @@ static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
263 if (addr_type != IPV6_ADDR_ANY) { 263 if (addr_type != IPV6_ADDR_ANY) {
264 struct net_device *dev = NULL; 264 struct net_device *dev = NULL;
265 265
266 if (addr_type & IPV6_ADDR_LINKLOCAL) { 266 if (__ipv6_addr_needs_scope_id(addr_type)) {
267 if (addr_len >= sizeof(struct sockaddr_in6) && 267 if (addr_len >= sizeof(struct sockaddr_in6) &&
268 addr->sin6_scope_id) { 268 addr->sin6_scope_id) {
269 /* Override any existing binding, if another 269 /* Override any existing binding, if another
@@ -498,9 +498,8 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
498 sin6->sin6_port = 0; 498 sin6->sin6_port = 0;
499 sin6->sin6_addr = ipv6_hdr(skb)->saddr; 499 sin6->sin6_addr = ipv6_hdr(skb)->saddr;
500 sin6->sin6_flowinfo = 0; 500 sin6->sin6_flowinfo = 0;
501 sin6->sin6_scope_id = 0; 501 sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
502 if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) 502 IP6CB(skb)->iif);
503 sin6->sin6_scope_id = IP6CB(skb)->iif;
504 } 503 }
505 504
506 sock_recv_ts_and_drops(msg, sk, skb); 505 sock_recv_ts_and_drops(msg, sk, skb);
@@ -802,7 +801,7 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
802 801
803 if (addr_len >= sizeof(struct sockaddr_in6) && 802 if (addr_len >= sizeof(struct sockaddr_in6) &&
804 sin6->sin6_scope_id && 803 sin6->sin6_scope_id &&
805 ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL) 804 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
806 fl6.flowi6_oif = sin6->sin6_scope_id; 805 fl6.flowi6_oif = sin6->sin6_scope_id;
807 } else { 806 } else {
808 if (sk->sk_state != TCP_ESTABLISHED) 807 if (sk->sk_state != TCP_ESTABLISHED)
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 196ab9347ad1..e6e44cef8db2 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -58,6 +58,7 @@
58#include <net/ndisc.h> 58#include <net/ndisc.h>
59#include <net/addrconf.h> 59#include <net/addrconf.h>
60#include <net/inet_frag.h> 60#include <net/inet_frag.h>
61#include <net/inet_ecn.h>
61 62
62struct ip6frag_skb_cb 63struct ip6frag_skb_cb
63{ 64{
@@ -67,6 +68,10 @@ struct ip6frag_skb_cb
67 68
68#define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb)) 69#define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb))
69 70
71static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
72{
73 return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
74}
70 75
71static struct inet_frags ip6_frags; 76static struct inet_frags ip6_frags;
72 77
@@ -119,6 +124,7 @@ void ip6_frag_init(struct inet_frag_queue *q, void *a)
119 fq->user = arg->user; 124 fq->user = arg->user;
120 fq->saddr = *arg->src; 125 fq->saddr = *arg->src;
121 fq->daddr = *arg->dst; 126 fq->daddr = *arg->dst;
127 fq->ecn = arg->ecn;
122} 128}
123EXPORT_SYMBOL(ip6_frag_init); 129EXPORT_SYMBOL(ip6_frag_init);
124 130
@@ -173,7 +179,8 @@ static void ip6_frag_expire(unsigned long data)
173} 179}
174 180
175static __inline__ struct frag_queue * 181static __inline__ struct frag_queue *
176fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6_addr *dst) 182fq_find(struct net *net, __be32 id, const struct in6_addr *src,
183 const struct in6_addr *dst, u8 ecn)
177{ 184{
178 struct inet_frag_queue *q; 185 struct inet_frag_queue *q;
179 struct ip6_create_arg arg; 186 struct ip6_create_arg arg;
@@ -183,6 +190,7 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6
183 arg.user = IP6_DEFRAG_LOCAL_DELIVER; 190 arg.user = IP6_DEFRAG_LOCAL_DELIVER;
184 arg.src = src; 191 arg.src = src;
185 arg.dst = dst; 192 arg.dst = dst;
193 arg.ecn = ecn;
186 194
187 read_lock(&ip6_frags.lock); 195 read_lock(&ip6_frags.lock);
188 hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd); 196 hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);
@@ -202,6 +210,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
202 struct net_device *dev; 210 struct net_device *dev;
203 int offset, end; 211 int offset, end;
204 struct net *net = dev_net(skb_dst(skb)->dev); 212 struct net *net = dev_net(skb_dst(skb)->dev);
213 u8 ecn;
205 214
206 if (fq->q.last_in & INET_FRAG_COMPLETE) 215 if (fq->q.last_in & INET_FRAG_COMPLETE)
207 goto err; 216 goto err;
@@ -219,6 +228,8 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
219 return -1; 228 return -1;
220 } 229 }
221 230
231 ecn = ip6_frag_ecn(ipv6_hdr(skb));
232
222 if (skb->ip_summed == CHECKSUM_COMPLETE) { 233 if (skb->ip_summed == CHECKSUM_COMPLETE) {
223 const unsigned char *nh = skb_network_header(skb); 234 const unsigned char *nh = skb_network_header(skb);
224 skb->csum = csum_sub(skb->csum, 235 skb->csum = csum_sub(skb->csum,
@@ -319,6 +330,7 @@ found:
319 } 330 }
320 fq->q.stamp = skb->tstamp; 331 fq->q.stamp = skb->tstamp;
321 fq->q.meat += skb->len; 332 fq->q.meat += skb->len;
333 fq->ecn |= ecn;
322 add_frag_mem_limit(&fq->q, skb->truesize); 334 add_frag_mem_limit(&fq->q, skb->truesize);
323 335
324 /* The first fragment. 336 /* The first fragment.
@@ -362,9 +374,14 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
362 int payload_len; 374 int payload_len;
363 unsigned int nhoff; 375 unsigned int nhoff;
364 int sum_truesize; 376 int sum_truesize;
377 u8 ecn;
365 378
366 inet_frag_kill(&fq->q, &ip6_frags); 379 inet_frag_kill(&fq->q, &ip6_frags);
367 380
381 ecn = ip_frag_ecn_table[fq->ecn];
382 if (unlikely(ecn == 0xff))
383 goto out_fail;
384
368 /* Make the one we just received the head. */ 385 /* Make the one we just received the head. */
369 if (prev) { 386 if (prev) {
370 head = prev->next; 387 head = prev->next;
@@ -463,6 +480,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
463 head->dev = dev; 480 head->dev = dev;
464 head->tstamp = fq->q.stamp; 481 head->tstamp = fq->q.stamp;
465 ipv6_hdr(head)->payload_len = htons(payload_len); 482 ipv6_hdr(head)->payload_len = htons(payload_len);
483 ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
466 IP6CB(head)->nhoff = nhoff; 484 IP6CB(head)->nhoff = nhoff;
467 485
468 /* Yes, and fold redundant checksum back. 8) */ 486 /* Yes, and fold redundant checksum back. 8) */
@@ -526,7 +544,8 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
526 IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), 544 IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
527 IPSTATS_MIB_REASMFAILS, evicted); 545 IPSTATS_MIB_REASMFAILS, evicted);
528 546
529 fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr); 547 fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
548 ip6_frag_ecn(hdr));
530 if (fq != NULL) { 549 if (fq != NULL) {
531 int ret; 550 int ret;
532 551
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index e5fe0041adfa..ad0aa6b0b86a 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2355,7 +2355,7 @@ beginning:
2355 return last_err; 2355 return last_err;
2356} 2356}
2357 2357
2358static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) 2358static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh)
2359{ 2359{
2360 struct fib6_config cfg; 2360 struct fib6_config cfg;
2361 int err; 2361 int err;
@@ -2370,7 +2370,7 @@ static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *a
2370 return ip6_route_del(&cfg); 2370 return ip6_route_del(&cfg);
2371} 2371}
2372 2372
2373static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) 2373static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr* nlh)
2374{ 2374{
2375 struct fib6_config cfg; 2375 struct fib6_config cfg;
2376 int err; 2376 int err;
@@ -2562,7 +2562,7 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg)
2562 prefix, 0, NLM_F_MULTI); 2562 prefix, 0, NLM_F_MULTI);
2563} 2563}
2564 2564
2565static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg) 2565static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh)
2566{ 2566{
2567 struct net *net = sock_net(in_skb->sk); 2567 struct net *net = sock_net(in_skb->sk);
2568 struct nlattr *tb[RTA_MAX+1]; 2568 struct nlattr *tb[RTA_MAX+1];
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 02f96dcbcf02..335363478bbf 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -49,7 +49,7 @@
49#include <net/ip.h> 49#include <net/ip.h>
50#include <net/udp.h> 50#include <net/udp.h>
51#include <net/icmp.h> 51#include <net/icmp.h>
52#include <net/ipip.h> 52#include <net/ip_tunnels.h>
53#include <net/inet_ecn.h> 53#include <net/inet_ecn.h>
54#include <net/xfrm.h> 54#include <net/xfrm.h>
55#include <net/dsfield.h> 55#include <net/dsfield.h>
@@ -87,41 +87,6 @@ struct sit_net {
87 struct net_device *fb_tunnel_dev; 87 struct net_device *fb_tunnel_dev;
88}; 88};
89 89
90static struct rtnl_link_stats64 *ipip6_get_stats64(struct net_device *dev,
91 struct rtnl_link_stats64 *tot)
92{
93 int i;
94
95 for_each_possible_cpu(i) {
96 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
97 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
98 unsigned int start;
99
100 do {
101 start = u64_stats_fetch_begin_bh(&tstats->syncp);
102 rx_packets = tstats->rx_packets;
103 tx_packets = tstats->tx_packets;
104 rx_bytes = tstats->rx_bytes;
105 tx_bytes = tstats->tx_bytes;
106 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
107
108 tot->rx_packets += rx_packets;
109 tot->tx_packets += tx_packets;
110 tot->rx_bytes += rx_bytes;
111 tot->tx_bytes += tx_bytes;
112 }
113
114 tot->rx_errors = dev->stats.rx_errors;
115 tot->rx_frame_errors = dev->stats.rx_frame_errors;
116 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
117 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
118 tot->tx_dropped = dev->stats.tx_dropped;
119 tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
120 tot->tx_errors = dev->stats.tx_errors;
121
122 return tot;
123}
124
125/* 90/*
126 * Must be invoked with rcu_read_lock 91 * Must be invoked with rcu_read_lock
127 */ 92 */
@@ -899,6 +864,8 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
899 if ((iph->ttl = tiph->ttl) == 0) 864 if ((iph->ttl = tiph->ttl) == 0)
900 iph->ttl = iph6->hop_limit; 865 iph->ttl = iph6->hop_limit;
901 866
867 skb->ip_summed = CHECKSUM_NONE;
868 ip_select_ident(iph, skb_dst(skb), NULL);
902 iptunnel_xmit(skb, dev); 869 iptunnel_xmit(skb, dev);
903 return NETDEV_TX_OK; 870 return NETDEV_TX_OK;
904 871
@@ -1200,7 +1167,7 @@ static const struct net_device_ops ipip6_netdev_ops = {
1200 .ndo_start_xmit = ipip6_tunnel_xmit, 1167 .ndo_start_xmit = ipip6_tunnel_xmit,
1201 .ndo_do_ioctl = ipip6_tunnel_ioctl, 1168 .ndo_do_ioctl = ipip6_tunnel_ioctl,
1202 .ndo_change_mtu = ipip6_tunnel_change_mtu, 1169 .ndo_change_mtu = ipip6_tunnel_change_mtu,
1203 .ndo_get_stats64= ipip6_get_stats64, 1170 .ndo_get_stats64 = ip_tunnel_get_stats64,
1204}; 1171};
1205 1172
1206static void ipip6_dev_free(struct net_device *dev) 1173static void ipip6_dev_free(struct net_device *dev)
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 8a0848b60b35..d5dda20bd717 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -149,7 +149,6 @@ static inline int cookie_check(const struct sk_buff *skb, __u32 cookie)
149struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) 149struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
150{ 150{
151 struct tcp_options_received tcp_opt; 151 struct tcp_options_received tcp_opt;
152 const u8 *hash_location;
153 struct inet_request_sock *ireq; 152 struct inet_request_sock *ireq;
154 struct inet6_request_sock *ireq6; 153 struct inet6_request_sock *ireq6;
155 struct tcp_request_sock *treq; 154 struct tcp_request_sock *treq;
@@ -177,7 +176,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
177 176
178 /* check for timestamp cookie support */ 177 /* check for timestamp cookie support */
179 memset(&tcp_opt, 0, sizeof(tcp_opt)); 178 memset(&tcp_opt, 0, sizeof(tcp_opt));
180 tcp_parse_options(skb, &tcp_opt, &hash_location, 0, NULL); 179 tcp_parse_options(skb, &tcp_opt, 0, NULL);
181 180
182 if (!cookie_check_timestamp(&tcp_opt, sock_net(sk), &ecn_ok)) 181 if (!cookie_check_timestamp(&tcp_opt, sock_net(sk), &ecn_ok))
183 goto out; 182 goto out;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 46a5be85be87..e51bd1a58264 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -462,7 +462,6 @@ out:
462static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst, 462static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
463 struct flowi6 *fl6, 463 struct flowi6 *fl6,
464 struct request_sock *req, 464 struct request_sock *req,
465 struct request_values *rvp,
466 u16 queue_mapping) 465 u16 queue_mapping)
467{ 466{
468 struct inet6_request_sock *treq = inet6_rsk(req); 467 struct inet6_request_sock *treq = inet6_rsk(req);
@@ -474,7 +473,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
474 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL) 473 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req)) == NULL)
475 goto done; 474 goto done;
476 475
477 skb = tcp_make_synack(sk, dst, req, rvp, NULL); 476 skb = tcp_make_synack(sk, dst, req, NULL);
478 477
479 if (skb) { 478 if (skb) {
480 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); 479 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
@@ -489,13 +488,12 @@ done:
489 return err; 488 return err;
490} 489}
491 490
492static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req, 491static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req)
493 struct request_values *rvp)
494{ 492{
495 struct flowi6 fl6; 493 struct flowi6 fl6;
496 int res; 494 int res;
497 495
498 res = tcp_v6_send_synack(sk, NULL, &fl6, req, rvp, 0); 496 res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0);
499 if (!res) 497 if (!res)
500 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 498 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
501 return res; 499 return res;
@@ -948,9 +946,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
948 */ 946 */
949static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) 947static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
950{ 948{
951 struct tcp_extend_values tmp_ext;
952 struct tcp_options_received tmp_opt; 949 struct tcp_options_received tmp_opt;
953 const u8 *hash_location;
954 struct request_sock *req; 950 struct request_sock *req;
955 struct inet6_request_sock *treq; 951 struct inet6_request_sock *treq;
956 struct ipv6_pinfo *np = inet6_sk(sk); 952 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -988,50 +984,7 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
988 tcp_clear_options(&tmp_opt); 984 tcp_clear_options(&tmp_opt);
989 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); 985 tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
990 tmp_opt.user_mss = tp->rx_opt.user_mss; 986 tmp_opt.user_mss = tp->rx_opt.user_mss;
991 tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL); 987 tcp_parse_options(skb, &tmp_opt, 0, NULL);
992
993 if (tmp_opt.cookie_plus > 0 &&
994 tmp_opt.saw_tstamp &&
995 !tp->rx_opt.cookie_out_never &&
996 (sysctl_tcp_cookie_size > 0 ||
997 (tp->cookie_values != NULL &&
998 tp->cookie_values->cookie_desired > 0))) {
999 u8 *c;
1000 u32 *d;
1001 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1002 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1003
1004 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1005 goto drop_and_free;
1006
1007 /* Secret recipe starts with IP addresses */
1008 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1009 *mess++ ^= *d++;
1010 *mess++ ^= *d++;
1011 *mess++ ^= *d++;
1012 *mess++ ^= *d++;
1013 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1014 *mess++ ^= *d++;
1015 *mess++ ^= *d++;
1016 *mess++ ^= *d++;
1017 *mess++ ^= *d++;
1018
1019 /* plus variable length Initiator Cookie */
1020 c = (u8 *)mess;
1021 while (l-- > 0)
1022 *c++ ^= *hash_location++;
1023
1024 want_cookie = false; /* not our kind of cookie */
1025 tmp_ext.cookie_out_never = 0; /* false */
1026 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1027 } else if (!tp->rx_opt.cookie_in_always) {
1028 /* redundant indications, but ensure initialization. */
1029 tmp_ext.cookie_out_never = 1; /* true */
1030 tmp_ext.cookie_plus = 0;
1031 } else {
1032 goto drop_and_free;
1033 }
1034 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1035 988
1036 if (want_cookie && !tmp_opt.saw_tstamp) 989 if (want_cookie && !tmp_opt.saw_tstamp)
1037 tcp_clear_options(&tmp_opt); 990 tcp_clear_options(&tmp_opt);
@@ -1109,7 +1062,6 @@ have_isn:
1109 goto drop_and_release; 1062 goto drop_and_release;
1110 1063
1111 if (tcp_v6_send_synack(sk, dst, &fl6, req, 1064 if (tcp_v6_send_synack(sk, dst, &fl6, req,
1112 (struct request_values *)&tmp_ext,
1113 skb_get_queue_mapping(skb)) || 1065 skb_get_queue_mapping(skb)) ||
1114 want_cookie) 1066 want_cookie)
1115 goto drop_and_free; 1067 goto drop_and_free;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d8e5e852fc7a..da6019b63730 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -450,15 +450,16 @@ try_again:
450 sin6->sin6_family = AF_INET6; 450 sin6->sin6_family = AF_INET6;
451 sin6->sin6_port = udp_hdr(skb)->source; 451 sin6->sin6_port = udp_hdr(skb)->source;
452 sin6->sin6_flowinfo = 0; 452 sin6->sin6_flowinfo = 0;
453 sin6->sin6_scope_id = 0;
454 453
455 if (is_udp4) 454 if (is_udp4) {
456 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, 455 ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
457 &sin6->sin6_addr); 456 &sin6->sin6_addr);
458 else { 457 sin6->sin6_scope_id = 0;
458 } else {
459 sin6->sin6_addr = ipv6_hdr(skb)->saddr; 459 sin6->sin6_addr = ipv6_hdr(skb)->saddr;
460 if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) 460 sin6->sin6_scope_id =
461 sin6->sin6_scope_id = IP6CB(skb)->iif; 461 ipv6_iface_scope_id(&sin6->sin6_addr,
462 IP6CB(skb)->iif);
462 } 463 }
463 464
464 } 465 }
@@ -1118,7 +1119,7 @@ do_udp_sendmsg:
1118 1119
1119 if (addr_len >= sizeof(struct sockaddr_in6) && 1120 if (addr_len >= sizeof(struct sockaddr_in6) &&
1120 sin6->sin6_scope_id && 1121 sin6->sin6_scope_id &&
1121 ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL) 1122 __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
1122 fl6.flowi6_oif = sin6->sin6_scope_id; 1123 fl6.flowi6_oif = sin6->sin6_scope_id;
1123 } else { 1124 } else {
1124 if (sk->sk_state != TCP_ESTABLISHED) 1125 if (sk->sk_state != TCP_ESTABLISHED)
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index cf05cf073c51..3bb3a891a424 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -21,6 +21,10 @@ static int udp6_ufo_send_check(struct sk_buff *skb)
21 const struct ipv6hdr *ipv6h; 21 const struct ipv6hdr *ipv6h;
22 struct udphdr *uh; 22 struct udphdr *uh;
23 23
24 /* UDP Tunnel offload on ipv6 is not yet supported. */
25 if (skb->encapsulation)
26 return -EINVAL;
27
24 if (!pskb_may_pull(skb, sizeof(*uh))) 28 if (!pskb_may_pull(skb, sizeof(*uh)))
25 return -EINVAL; 29 return -EINVAL;
26 30
@@ -56,7 +60,9 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
56 /* Packet is from an untrusted source, reset gso_segs. */ 60 /* Packet is from an untrusted source, reset gso_segs. */
57 int type = skb_shinfo(skb)->gso_type; 61 int type = skb_shinfo(skb)->gso_type;
58 62
59 if (unlikely(type & ~(SKB_GSO_UDP | SKB_GSO_DODGY | 63 if (unlikely(type & ~(SKB_GSO_UDP |
64 SKB_GSO_DODGY |
65 SKB_GSO_UDP_TUNNEL |
60 SKB_GSO_GRE) || 66 SKB_GSO_GRE) ||
61 !(type & (SKB_GSO_UDP)))) 67 !(type & (SKB_GSO_UDP))))
62 goto out; 68 goto out;
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index bf6935820001..e165e8dc962e 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1463,7 +1463,8 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1463 return iucv_accept_poll(sk); 1463 return iucv_accept_poll(sk);
1464 1464
1465 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 1465 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
1466 mask |= POLLERR; 1466 mask |= POLLERR |
1467 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
1467 1468
1468 if (sk->sk_shutdown & RCV_SHUTDOWN) 1469 if (sk->sk_shutdown & RCV_SHUTDOWN)
1469 mask |= POLLRDHUP; 1470 mask |= POLLRDHUP;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 8aecf5df6656..6984c3a353cd 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1777,7 +1777,7 @@ int l2tp_session_delete(struct l2tp_session *session)
1777 if (session->session_close != NULL) 1777 if (session->session_close != NULL)
1778 (*session->session_close)(session); 1778 (*session->session_close)(session);
1779 if (session->deref) 1779 if (session->deref)
1780 (*session->ref)(session); 1780 (*session->deref)(session);
1781 l2tp_session_dec_refcount(session); 1781 l2tp_session_dec_refcount(session);
1782 return 0; 1782 return 0;
1783} 1783}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index a6893602f87a..c34e6d78a592 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -254,7 +254,7 @@ static int ieee80211_del_key(struct wiphy *wiphy, struct net_device *dev,
254 goto out_unlock; 254 goto out_unlock;
255 } 255 }
256 256
257 __ieee80211_key_free(key); 257 __ieee80211_key_free(key, true);
258 258
259 ret = 0; 259 ret = 0;
260 out_unlock: 260 out_unlock:
@@ -1035,9 +1035,12 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
1035 sta_info_flush_defer(vlan); 1035 sta_info_flush_defer(vlan);
1036 sta_info_flush_defer(sdata); 1036 sta_info_flush_defer(sdata);
1037 rcu_barrier(); 1037 rcu_barrier();
1038 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) 1038 list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) {
1039 sta_info_flush_cleanup(vlan); 1039 sta_info_flush_cleanup(vlan);
1040 ieee80211_free_keys(vlan);
1041 }
1040 sta_info_flush_cleanup(sdata); 1042 sta_info_flush_cleanup(sdata);
1043 ieee80211_free_keys(sdata);
1041 1044
1042 sdata->vif.bss_conf.enable_beacon = false; 1045 sdata->vif.bss_conf.enable_beacon = false;
1043 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state); 1046 clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
@@ -1177,6 +1180,18 @@ static int sta_apply_parameters(struct ieee80211_local *local,
1177 mask |= BIT(NL80211_STA_FLAG_ASSOCIATED); 1180 mask |= BIT(NL80211_STA_FLAG_ASSOCIATED);
1178 if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED)) 1181 if (set & BIT(NL80211_STA_FLAG_AUTHENTICATED))
1179 set |= BIT(NL80211_STA_FLAG_ASSOCIATED); 1182 set |= BIT(NL80211_STA_FLAG_ASSOCIATED);
1183 } else if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
1184 /*
1185 * TDLS -- everything follows authorized, but
1186 * only becoming authorized is possible, not
1187 * going back
1188 */
1189 if (set & BIT(NL80211_STA_FLAG_AUTHORIZED)) {
1190 set |= BIT(NL80211_STA_FLAG_AUTHENTICATED) |
1191 BIT(NL80211_STA_FLAG_ASSOCIATED);
1192 mask |= BIT(NL80211_STA_FLAG_AUTHENTICATED) |
1193 BIT(NL80211_STA_FLAG_ASSOCIATED);
1194 }
1180 } 1195 }
1181 1196
1182 ret = sta_apply_auth_flags(local, sta, mask, set); 1197 ret = sta_apply_auth_flags(local, sta, mask, set);
@@ -1261,7 +1276,8 @@ static int sta_apply_parameters(struct ieee80211_local *local,
1261 if (ieee80211_vif_is_mesh(&sdata->vif)) { 1276 if (ieee80211_vif_is_mesh(&sdata->vif)) {
1262#ifdef CONFIG_MAC80211_MESH 1277#ifdef CONFIG_MAC80211_MESH
1263 u32 changed = 0; 1278 u32 changed = 0;
1264 if (sdata->u.mesh.security & IEEE80211_MESH_SEC_SECURED) { 1279
1280 if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE) {
1265 switch (params->plink_state) { 1281 switch (params->plink_state) {
1266 case NL80211_PLINK_ESTAB: 1282 case NL80211_PLINK_ESTAB:
1267 if (sta->plink_state != NL80211_PLINK_ESTAB) 1283 if (sta->plink_state != NL80211_PLINK_ESTAB)
@@ -1292,15 +1308,18 @@ static int sta_apply_parameters(struct ieee80211_local *local,
1292 /* nothing */ 1308 /* nothing */
1293 break; 1309 break;
1294 } 1310 }
1295 } else { 1311 }
1296 switch (params->plink_action) { 1312
1297 case PLINK_ACTION_OPEN: 1313 switch (params->plink_action) {
1298 changed |= mesh_plink_open(sta); 1314 case NL80211_PLINK_ACTION_NO_ACTION:
1299 break; 1315 /* nothing */
1300 case PLINK_ACTION_BLOCK: 1316 break;
1301 changed |= mesh_plink_block(sta); 1317 case NL80211_PLINK_ACTION_OPEN:
1302 break; 1318 changed |= mesh_plink_open(sta);
1303 } 1319 break;
1320 case NL80211_PLINK_ACTION_BLOCK:
1321 changed |= mesh_plink_block(sta);
1322 break;
1304 } 1323 }
1305 1324
1306 if (params->local_pm) 1325 if (params->local_pm)
@@ -1346,8 +1365,10 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1346 * defaults -- if userspace wants something else we'll 1365 * defaults -- if userspace wants something else we'll
1347 * change it accordingly in sta_apply_parameters() 1366 * change it accordingly in sta_apply_parameters()
1348 */ 1367 */
1349 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH); 1368 if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))) {
1350 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC); 1369 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
1370 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
1371 }
1351 1372
1352 err = sta_apply_parameters(local, sta, params); 1373 err = sta_apply_parameters(local, sta, params);
1353 if (err) { 1374 if (err) {
@@ -1356,8 +1377,8 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1356 } 1377 }
1357 1378
1358 /* 1379 /*
1359 * for TDLS, rate control should be initialized only when supported 1380 * for TDLS, rate control should be initialized only when
1360 * rates are known. 1381 * rates are known and station is marked authorized
1361 */ 1382 */
1362 if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER)) 1383 if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER))
1363 rate_control_rate_init(sta); 1384 rate_control_rate_init(sta);
@@ -1394,50 +1415,67 @@ static int ieee80211_del_station(struct wiphy *wiphy, struct net_device *dev,
1394} 1415}
1395 1416
1396static int ieee80211_change_station(struct wiphy *wiphy, 1417static int ieee80211_change_station(struct wiphy *wiphy,
1397 struct net_device *dev, 1418 struct net_device *dev, u8 *mac,
1398 u8 *mac,
1399 struct station_parameters *params) 1419 struct station_parameters *params)
1400{ 1420{
1401 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 1421 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1402 struct ieee80211_local *local = wiphy_priv(wiphy); 1422 struct ieee80211_local *local = wiphy_priv(wiphy);
1403 struct sta_info *sta; 1423 struct sta_info *sta;
1404 struct ieee80211_sub_if_data *vlansdata; 1424 struct ieee80211_sub_if_data *vlansdata;
1425 enum cfg80211_station_type statype;
1405 int err; 1426 int err;
1406 1427
1407 mutex_lock(&local->sta_mtx); 1428 mutex_lock(&local->sta_mtx);
1408 1429
1409 sta = sta_info_get_bss(sdata, mac); 1430 sta = sta_info_get_bss(sdata, mac);
1410 if (!sta) { 1431 if (!sta) {
1411 mutex_unlock(&local->sta_mtx); 1432 err = -ENOENT;
1412 return -ENOENT; 1433 goto out_err;
1413 } 1434 }
1414 1435
1415 /* in station mode, some updates are only valid with TDLS */ 1436 switch (sdata->vif.type) {
1416 if (sdata->vif.type == NL80211_IFTYPE_STATION && 1437 case NL80211_IFTYPE_MESH_POINT:
1417 (params->supported_rates || params->ht_capa || params->vht_capa || 1438 if (sdata->u.mesh.user_mpm)
1418 params->sta_modify_mask || 1439 statype = CFG80211_STA_MESH_PEER_USER;
1419 (params->sta_flags_mask & BIT(NL80211_STA_FLAG_WME))) && 1440 else
1420 !test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { 1441 statype = CFG80211_STA_MESH_PEER_KERNEL;
1421 mutex_unlock(&local->sta_mtx); 1442 break;
1422 return -EINVAL; 1443 case NL80211_IFTYPE_ADHOC:
1444 statype = CFG80211_STA_IBSS;
1445 break;
1446 case NL80211_IFTYPE_STATION:
1447 if (!test_sta_flag(sta, WLAN_STA_TDLS_PEER)) {
1448 statype = CFG80211_STA_AP_STA;
1449 break;
1450 }
1451 if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
1452 statype = CFG80211_STA_TDLS_PEER_ACTIVE;
1453 else
1454 statype = CFG80211_STA_TDLS_PEER_SETUP;
1455 break;
1456 case NL80211_IFTYPE_AP:
1457 case NL80211_IFTYPE_AP_VLAN:
1458 statype = CFG80211_STA_AP_CLIENT;
1459 break;
1460 default:
1461 err = -EOPNOTSUPP;
1462 goto out_err;
1423 } 1463 }
1424 1464
1465 err = cfg80211_check_station_change(wiphy, params, statype);
1466 if (err)
1467 goto out_err;
1468
1425 if (params->vlan && params->vlan != sta->sdata->dev) { 1469 if (params->vlan && params->vlan != sta->sdata->dev) {
1426 bool prev_4addr = false; 1470 bool prev_4addr = false;
1427 bool new_4addr = false; 1471 bool new_4addr = false;
1428 1472
1429 vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); 1473 vlansdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
1430 1474
1431 if (vlansdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
1432 vlansdata->vif.type != NL80211_IFTYPE_AP) {
1433 mutex_unlock(&local->sta_mtx);
1434 return -EINVAL;
1435 }
1436
1437 if (params->vlan->ieee80211_ptr->use_4addr) { 1475 if (params->vlan->ieee80211_ptr->use_4addr) {
1438 if (vlansdata->u.vlan.sta) { 1476 if (vlansdata->u.vlan.sta) {
1439 mutex_unlock(&local->sta_mtx); 1477 err = -EBUSY;
1440 return -EBUSY; 1478 goto out_err;
1441 } 1479 }
1442 1480
1443 rcu_assign_pointer(vlansdata->u.vlan.sta, sta); 1481 rcu_assign_pointer(vlansdata->u.vlan.sta, sta);
@@ -1464,12 +1502,12 @@ static int ieee80211_change_station(struct wiphy *wiphy,
1464 } 1502 }
1465 1503
1466 err = sta_apply_parameters(local, sta, params); 1504 err = sta_apply_parameters(local, sta, params);
1467 if (err) { 1505 if (err)
1468 mutex_unlock(&local->sta_mtx); 1506 goto out_err;
1469 return err;
1470 }
1471 1507
1472 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) && params->supported_rates) 1508 /* When peer becomes authorized, init rate control as well */
1509 if (test_sta_flag(sta, WLAN_STA_TDLS_PEER) &&
1510 test_sta_flag(sta, WLAN_STA_AUTHORIZED))
1473 rate_control_rate_init(sta); 1511 rate_control_rate_init(sta);
1474 1512
1475 mutex_unlock(&local->sta_mtx); 1513 mutex_unlock(&local->sta_mtx);
@@ -1479,7 +1517,11 @@ static int ieee80211_change_station(struct wiphy *wiphy,
1479 ieee80211_recalc_ps(local, -1); 1517 ieee80211_recalc_ps(local, -1);
1480 ieee80211_recalc_ps_vif(sdata); 1518 ieee80211_recalc_ps_vif(sdata);
1481 } 1519 }
1520
1482 return 0; 1521 return 0;
1522out_err:
1523 mutex_unlock(&local->sta_mtx);
1524 return err;
1483} 1525}
1484 1526
1485#ifdef CONFIG_MAC80211_MESH 1527#ifdef CONFIG_MAC80211_MESH
@@ -1687,6 +1729,7 @@ static int copy_mesh_setup(struct ieee80211_if_mesh *ifmsh,
1687 ifmsh->mesh_sp_id = setup->sync_method; 1729 ifmsh->mesh_sp_id = setup->sync_method;
1688 ifmsh->mesh_pp_id = setup->path_sel_proto; 1730 ifmsh->mesh_pp_id = setup->path_sel_proto;
1689 ifmsh->mesh_pm_id = setup->path_metric; 1731 ifmsh->mesh_pm_id = setup->path_metric;
1732 ifmsh->user_mpm = setup->user_mpm;
1690 ifmsh->security = IEEE80211_MESH_SEC_NONE; 1733 ifmsh->security = IEEE80211_MESH_SEC_NONE;
1691 if (setup->is_authenticated) 1734 if (setup->is_authenticated)
1692 ifmsh->security |= IEEE80211_MESH_SEC_AUTHED; 1735 ifmsh->security |= IEEE80211_MESH_SEC_AUTHED;
@@ -1730,8 +1773,11 @@ static int ieee80211_update_mesh_config(struct wiphy *wiphy,
1730 conf->dot11MeshTTL = nconf->dot11MeshTTL; 1773 conf->dot11MeshTTL = nconf->dot11MeshTTL;
1731 if (_chg_mesh_attr(NL80211_MESHCONF_ELEMENT_TTL, mask)) 1774 if (_chg_mesh_attr(NL80211_MESHCONF_ELEMENT_TTL, mask))
1732 conf->element_ttl = nconf->element_ttl; 1775 conf->element_ttl = nconf->element_ttl;
1733 if (_chg_mesh_attr(NL80211_MESHCONF_AUTO_OPEN_PLINKS, mask)) 1776 if (_chg_mesh_attr(NL80211_MESHCONF_AUTO_OPEN_PLINKS, mask)) {
1777 if (ifmsh->user_mpm)
1778 return -EBUSY;
1734 conf->auto_open_plinks = nconf->auto_open_plinks; 1779 conf->auto_open_plinks = nconf->auto_open_plinks;
1780 }
1735 if (_chg_mesh_attr(NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, mask)) 1781 if (_chg_mesh_attr(NL80211_MESHCONF_SYNC_OFFSET_MAX_NEIGHBOR, mask))
1736 conf->dot11MeshNbrOffsetMaxNeighbor = 1782 conf->dot11MeshNbrOffsetMaxNeighbor =
1737 nconf->dot11MeshNbrOffsetMaxNeighbor; 1783 nconf->dot11MeshNbrOffsetMaxNeighbor;
@@ -2371,7 +2417,8 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
2371 struct ieee80211_sub_if_data *sdata, 2417 struct ieee80211_sub_if_data *sdata,
2372 struct ieee80211_channel *channel, 2418 struct ieee80211_channel *channel,
2373 unsigned int duration, u64 *cookie, 2419 unsigned int duration, u64 *cookie,
2374 struct sk_buff *txskb) 2420 struct sk_buff *txskb,
2421 enum ieee80211_roc_type type)
2375{ 2422{
2376 struct ieee80211_roc_work *roc, *tmp; 2423 struct ieee80211_roc_work *roc, *tmp;
2377 bool queued = false; 2424 bool queued = false;
@@ -2390,6 +2437,7 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
2390 roc->duration = duration; 2437 roc->duration = duration;
2391 roc->req_duration = duration; 2438 roc->req_duration = duration;
2392 roc->frame = txskb; 2439 roc->frame = txskb;
2440 roc->type = type;
2393 roc->mgmt_tx_cookie = (unsigned long)txskb; 2441 roc->mgmt_tx_cookie = (unsigned long)txskb;
2394 roc->sdata = sdata; 2442 roc->sdata = sdata;
2395 INIT_DELAYED_WORK(&roc->work, ieee80211_sw_roc_work); 2443 INIT_DELAYED_WORK(&roc->work, ieee80211_sw_roc_work);
@@ -2420,7 +2468,7 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
2420 if (!duration) 2468 if (!duration)
2421 duration = 10; 2469 duration = 10;
2422 2470
2423 ret = drv_remain_on_channel(local, sdata, channel, duration); 2471 ret = drv_remain_on_channel(local, sdata, channel, duration, type);
2424 if (ret) { 2472 if (ret) {
2425 kfree(roc); 2473 kfree(roc);
2426 return ret; 2474 return ret;
@@ -2439,10 +2487,13 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
2439 * 2487 *
2440 * If it hasn't started yet, just increase the duration 2488 * If it hasn't started yet, just increase the duration
2441 * and add the new one to the list of dependents. 2489 * and add the new one to the list of dependents.
2490 * If the type of the new ROC has higher priority, modify the
2491 * type of the previous one to match that of the new one.
2442 */ 2492 */
2443 if (!tmp->started) { 2493 if (!tmp->started) {
2444 list_add_tail(&roc->list, &tmp->dependents); 2494 list_add_tail(&roc->list, &tmp->dependents);
2445 tmp->duration = max(tmp->duration, roc->duration); 2495 tmp->duration = max(tmp->duration, roc->duration);
2496 tmp->type = max(tmp->type, roc->type);
2446 queued = true; 2497 queued = true;
2447 break; 2498 break;
2448 } 2499 }
@@ -2454,16 +2505,18 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
2454 /* 2505 /*
2455 * In the offloaded ROC case, if it hasn't begun, add 2506 * In the offloaded ROC case, if it hasn't begun, add
2456 * this new one to the dependent list to be handled 2507 * this new one to the dependent list to be handled
2457 * when the the master one begins. If it has begun, 2508 * when the master one begins. If it has begun,
2458 * check that there's still a minimum time left and 2509 * check that there's still a minimum time left and
2459 * if so, start this one, transmitting the frame, but 2510 * if so, start this one, transmitting the frame, but
2460 * add it to the list directly after this one with a 2511 * add it to the list directly after this one with
2461 * a reduced time so we'll ask the driver to execute 2512 * a reduced time so we'll ask the driver to execute
2462 * it right after finishing the previous one, in the 2513 * it right after finishing the previous one, in the
2463 * hope that it'll also be executed right afterwards, 2514 * hope that it'll also be executed right afterwards,
2464 * effectively extending the old one. 2515 * effectively extending the old one.
2465 * If there's no minimum time left, just add it to the 2516 * If there's no minimum time left, just add it to the
2466 * normal list. 2517 * normal list.
2518 * TODO: the ROC type is ignored here, assuming that it
2519 * is better to immediately use the current ROC.
2467 */ 2520 */
2468 if (!tmp->hw_begun) { 2521 if (!tmp->hw_begun) {
2469 list_add_tail(&roc->list, &tmp->dependents); 2522 list_add_tail(&roc->list, &tmp->dependents);
@@ -2557,7 +2610,8 @@ static int ieee80211_remain_on_channel(struct wiphy *wiphy,
2557 2610
2558 mutex_lock(&local->mtx); 2611 mutex_lock(&local->mtx);
2559 ret = ieee80211_start_roc_work(local, sdata, chan, 2612 ret = ieee80211_start_roc_work(local, sdata, chan,
2560 duration, cookie, NULL); 2613 duration, cookie, NULL,
2614 IEEE80211_ROC_TYPE_NORMAL);
2561 mutex_unlock(&local->mtx); 2615 mutex_unlock(&local->mtx);
2562 2616
2563 return ret; 2617 return ret;
@@ -2792,7 +2846,8 @@ static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
2792 2846
2793 /* This will handle all kinds of coalescing and immediate TX */ 2847 /* This will handle all kinds of coalescing and immediate TX */
2794 ret = ieee80211_start_roc_work(local, sdata, chan, 2848 ret = ieee80211_start_roc_work(local, sdata, chan,
2795 wait, cookie, skb); 2849 wait, cookie, skb,
2850 IEEE80211_ROC_TYPE_MGMT_TX);
2796 if (ret) 2851 if (ret)
2797 kfree_skb(skb); 2852 kfree_skb(skb);
2798 out_unlock: 2853 out_unlock:
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index c7591f73dbc3..4f841fe559df 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -325,6 +325,36 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
325} 325}
326STA_OPS(ht_capa); 326STA_OPS(ht_capa);
327 327
328static ssize_t sta_vht_capa_read(struct file *file, char __user *userbuf,
329 size_t count, loff_t *ppos)
330{
331 char buf[128], *p = buf;
332 struct sta_info *sta = file->private_data;
333 struct ieee80211_sta_vht_cap *vhtc = &sta->sta.vht_cap;
334
335 p += scnprintf(p, sizeof(buf) + buf - p, "VHT %ssupported\n",
336 vhtc->vht_supported ? "" : "not ");
337 if (vhtc->vht_supported) {
338 p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.8x\n", vhtc->cap);
339
340 p += scnprintf(p, sizeof(buf)+buf-p, "RX MCS: %.4x\n",
341 le16_to_cpu(vhtc->vht_mcs.rx_mcs_map));
342 if (vhtc->vht_mcs.rx_highest)
343 p += scnprintf(p, sizeof(buf)+buf-p,
344 "MCS RX highest: %d Mbps\n",
345 le16_to_cpu(vhtc->vht_mcs.rx_highest));
346 p += scnprintf(p, sizeof(buf)+buf-p, "TX MCS: %.4x\n",
347 le16_to_cpu(vhtc->vht_mcs.tx_mcs_map));
348 if (vhtc->vht_mcs.tx_highest)
349 p += scnprintf(p, sizeof(buf)+buf-p,
350 "MCS TX highest: %d Mbps\n",
351 le16_to_cpu(vhtc->vht_mcs.tx_highest));
352 }
353
354 return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
355}
356STA_OPS(vht_capa);
357
328static ssize_t sta_current_tx_rate_read(struct file *file, char __user *userbuf, 358static ssize_t sta_current_tx_rate_read(struct file *file, char __user *userbuf,
329 size_t count, loff_t *ppos) 359 size_t count, loff_t *ppos)
330{ 360{
@@ -405,6 +435,7 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
405 DEBUGFS_ADD(dev); 435 DEBUGFS_ADD(dev);
406 DEBUGFS_ADD(last_signal); 436 DEBUGFS_ADD(last_signal);
407 DEBUGFS_ADD(ht_capa); 437 DEBUGFS_ADD(ht_capa);
438 DEBUGFS_ADD(vht_capa);
408 DEBUGFS_ADD(last_ack_signal); 439 DEBUGFS_ADD(last_ack_signal);
409 DEBUGFS_ADD(current_tx_rate); 440 DEBUGFS_ADD(current_tx_rate);
410 DEBUGFS_ADD(last_rx_rate); 441 DEBUGFS_ADD(last_rx_rate);
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index ee56d0779d8b..832acea4a5cb 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -787,15 +787,16 @@ static inline int drv_get_antenna(struct ieee80211_local *local,
787static inline int drv_remain_on_channel(struct ieee80211_local *local, 787static inline int drv_remain_on_channel(struct ieee80211_local *local,
788 struct ieee80211_sub_if_data *sdata, 788 struct ieee80211_sub_if_data *sdata,
789 struct ieee80211_channel *chan, 789 struct ieee80211_channel *chan,
790 unsigned int duration) 790 unsigned int duration,
791 enum ieee80211_roc_type type)
791{ 792{
792 int ret; 793 int ret;
793 794
794 might_sleep(); 795 might_sleep();
795 796
796 trace_drv_remain_on_channel(local, sdata, chan, duration); 797 trace_drv_remain_on_channel(local, sdata, chan, duration, type);
797 ret = local->ops->remain_on_channel(&local->hw, &sdata->vif, 798 ret = local->ops->remain_on_channel(&local->hw, &sdata->vif,
798 chan, duration); 799 chan, duration, type);
799 trace_drv_return_int(local, ret); 800 trace_drv_return_int(local, ret);
800 801
801 return ret; 802 return ret;
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 0db25d4bb223..af8cee06e4f3 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -40,13 +40,6 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
40 if (!ht_cap->ht_supported) 40 if (!ht_cap->ht_supported)
41 return; 41 return;
42 42
43 if (sdata->vif.type != NL80211_IFTYPE_STATION) {
44 /* AP interfaces call this code when adding new stations,
45 * so just silently ignore non station interfaces.
46 */
47 return;
48 }
49
50 /* NOTE: If you add more over-rides here, update register_hw 43 /* NOTE: If you add more over-rides here, update register_hw
51 * ht_capa_mod_msk logic in main.c as well. 44 * ht_capa_mod_msk logic in main.c as well.
52 * And, if this method can ever change ht_cap.ht_supported, fix 45 * And, if this method can ever change ht_cap.ht_supported, fix
@@ -97,7 +90,7 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
97 const struct ieee80211_ht_cap *ht_cap_ie, 90 const struct ieee80211_ht_cap *ht_cap_ie,
98 struct sta_info *sta) 91 struct sta_info *sta)
99{ 92{
100 struct ieee80211_sta_ht_cap ht_cap; 93 struct ieee80211_sta_ht_cap ht_cap, own_cap;
101 u8 ampdu_info, tx_mcs_set_cap; 94 u8 ampdu_info, tx_mcs_set_cap;
102 int i, max_tx_streams; 95 int i, max_tx_streams;
103 bool changed; 96 bool changed;
@@ -111,6 +104,18 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
111 104
112 ht_cap.ht_supported = true; 105 ht_cap.ht_supported = true;
113 106
107 own_cap = sband->ht_cap;
108
109 /*
110 * If user has specified capability over-rides, take care
111 * of that if the station we're setting up is the AP that
112 * we advertised a restricted capability set to. Override
113 * our own capabilities and then use those below.
114 */
115 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
116 !test_sta_flag(sta, WLAN_STA_TDLS_PEER))
117 ieee80211_apply_htcap_overrides(sdata, &own_cap);
118
114 /* 119 /*
115 * The bits listed in this expression should be 120 * The bits listed in this expression should be
116 * the same for the peer and us, if the station 121 * the same for the peer and us, if the station
@@ -118,21 +123,20 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
118 * we mask them out. 123 * we mask them out.
119 */ 124 */
120 ht_cap.cap = le16_to_cpu(ht_cap_ie->cap_info) & 125 ht_cap.cap = le16_to_cpu(ht_cap_ie->cap_info) &
121 (sband->ht_cap.cap | 126 (own_cap.cap | ~(IEEE80211_HT_CAP_LDPC_CODING |
122 ~(IEEE80211_HT_CAP_LDPC_CODING | 127 IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
123 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | 128 IEEE80211_HT_CAP_GRN_FLD |
124 IEEE80211_HT_CAP_GRN_FLD | 129 IEEE80211_HT_CAP_SGI_20 |
125 IEEE80211_HT_CAP_SGI_20 | 130 IEEE80211_HT_CAP_SGI_40 |
126 IEEE80211_HT_CAP_SGI_40 | 131 IEEE80211_HT_CAP_DSSSCCK40));
127 IEEE80211_HT_CAP_DSSSCCK40));
128 132
129 /* 133 /*
130 * The STBC bits are asymmetric -- if we don't have 134 * The STBC bits are asymmetric -- if we don't have
131 * TX then mask out the peer's RX and vice versa. 135 * TX then mask out the peer's RX and vice versa.
132 */ 136 */
133 if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC)) 137 if (!(own_cap.cap & IEEE80211_HT_CAP_TX_STBC))
134 ht_cap.cap &= ~IEEE80211_HT_CAP_RX_STBC; 138 ht_cap.cap &= ~IEEE80211_HT_CAP_RX_STBC;
135 if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)) 139 if (!(own_cap.cap & IEEE80211_HT_CAP_RX_STBC))
136 ht_cap.cap &= ~IEEE80211_HT_CAP_TX_STBC; 140 ht_cap.cap &= ~IEEE80211_HT_CAP_TX_STBC;
137 141
138 ampdu_info = ht_cap_ie->ampdu_params_info; 142 ampdu_info = ht_cap_ie->ampdu_params_info;
@@ -142,7 +146,7 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
142 (ampdu_info & IEEE80211_HT_AMPDU_PARM_DENSITY) >> 2; 146 (ampdu_info & IEEE80211_HT_AMPDU_PARM_DENSITY) >> 2;
143 147
144 /* own MCS TX capabilities */ 148 /* own MCS TX capabilities */
145 tx_mcs_set_cap = sband->ht_cap.mcs.tx_params; 149 tx_mcs_set_cap = own_cap.mcs.tx_params;
146 150
147 /* Copy peer MCS TX capabilities, the driver might need them. */ 151 /* Copy peer MCS TX capabilities, the driver might need them. */
148 ht_cap.mcs.tx_params = ht_cap_ie->mcs.tx_params; 152 ht_cap.mcs.tx_params = ht_cap_ie->mcs.tx_params;
@@ -168,26 +172,20 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
168 */ 172 */
169 for (i = 0; i < max_tx_streams; i++) 173 for (i = 0; i < max_tx_streams; i++)
170 ht_cap.mcs.rx_mask[i] = 174 ht_cap.mcs.rx_mask[i] =
171 sband->ht_cap.mcs.rx_mask[i] & ht_cap_ie->mcs.rx_mask[i]; 175 own_cap.mcs.rx_mask[i] & ht_cap_ie->mcs.rx_mask[i];
172 176
173 if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION) 177 if (tx_mcs_set_cap & IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION)
174 for (i = IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE; 178 for (i = IEEE80211_HT_MCS_UNEQUAL_MODULATION_START_BYTE;
175 i < IEEE80211_HT_MCS_MASK_LEN; i++) 179 i < IEEE80211_HT_MCS_MASK_LEN; i++)
176 ht_cap.mcs.rx_mask[i] = 180 ht_cap.mcs.rx_mask[i] =
177 sband->ht_cap.mcs.rx_mask[i] & 181 own_cap.mcs.rx_mask[i] &
178 ht_cap_ie->mcs.rx_mask[i]; 182 ht_cap_ie->mcs.rx_mask[i];
179 183
180 /* handle MCS rate 32 too */ 184 /* handle MCS rate 32 too */
181 if (sband->ht_cap.mcs.rx_mask[32/8] & ht_cap_ie->mcs.rx_mask[32/8] & 1) 185 if (own_cap.mcs.rx_mask[32/8] & ht_cap_ie->mcs.rx_mask[32/8] & 1)
182 ht_cap.mcs.rx_mask[32/8] |= 1; 186 ht_cap.mcs.rx_mask[32/8] |= 1;
183 187
184 apply: 188 apply:
185 /*
186 * If user has specified capability over-rides, take care
187 * of that here.
188 */
189 ieee80211_apply_htcap_overrides(sdata, &ht_cap);
190
191 changed = memcmp(&sta->sta.ht_cap, &ht_cap, sizeof(ht_cap)); 189 changed = memcmp(&sta->sta.ht_cap, &ht_cap, sizeof(ht_cap));
192 190
193 memcpy(&sta->sta.ht_cap, &ht_cap, sizeof(ht_cap)); 191 memcpy(&sta->sta.ht_cap, &ht_cap, sizeof(ht_cap));
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 40b71dfcc79d..539d4a11b47b 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -985,36 +985,9 @@ static void ieee80211_ibss_timer(unsigned long data)
985{ 985{
986 struct ieee80211_sub_if_data *sdata = 986 struct ieee80211_sub_if_data *sdata =
987 (struct ieee80211_sub_if_data *) data; 987 (struct ieee80211_sub_if_data *) data;
988 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
989 struct ieee80211_local *local = sdata->local;
990
991 if (local->quiescing) {
992 ifibss->timer_running = true;
993 return;
994 }
995
996 ieee80211_queue_work(&local->hw, &sdata->work);
997}
998
999#ifdef CONFIG_PM
1000void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata)
1001{
1002 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
1003 988
1004 if (del_timer_sync(&ifibss->timer)) 989 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
1005 ifibss->timer_running = true;
1006}
1007
1008void ieee80211_ibss_restart(struct ieee80211_sub_if_data *sdata)
1009{
1010 struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
1011
1012 if (ifibss->timer_running) {
1013 add_timer(&ifibss->timer);
1014 ifibss->timer_running = false;
1015 }
1016} 990}
1017#endif
1018 991
1019void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata) 992void ieee80211_ibss_setup_sdata(struct ieee80211_sub_if_data *sdata)
1020{ 993{
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 5672533a0832..e140184c28ce 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -316,6 +316,7 @@ struct ieee80211_roc_work {
316 u32 duration, req_duration; 316 u32 duration, req_duration;
317 struct sk_buff *frame; 317 struct sk_buff *frame;
318 u64 cookie, mgmt_tx_cookie; 318 u64 cookie, mgmt_tx_cookie;
319 enum ieee80211_roc_type type;
319}; 320};
320 321
321/* flags used in struct ieee80211_if_managed.flags */ 322/* flags used in struct ieee80211_if_managed.flags */
@@ -401,7 +402,6 @@ struct ieee80211_if_managed {
401 402
402 u16 aid; 403 u16 aid;
403 404
404 unsigned long timers_running; /* used for quiesce/restart */
405 bool powersave; /* powersave requested for this iface */ 405 bool powersave; /* powersave requested for this iface */
406 bool broken_ap; /* AP is broken -- turn off powersave */ 406 bool broken_ap; /* AP is broken -- turn off powersave */
407 u8 dtim_period; 407 u8 dtim_period;
@@ -480,6 +480,8 @@ struct ieee80211_if_managed {
480 480
481 struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */ 481 struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */
482 struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */ 482 struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */
483 struct ieee80211_vht_cap vht_capa; /* configured VHT overrides */
484 struct ieee80211_vht_cap vht_capa_mask; /* Valid parts of vht_capa */
483}; 485};
484 486
485struct ieee80211_if_ibss { 487struct ieee80211_if_ibss {
@@ -491,8 +493,6 @@ struct ieee80211_if_ibss {
491 493
492 u32 basic_rates; 494 u32 basic_rates;
493 495
494 bool timer_running;
495
496 bool fixed_bssid; 496 bool fixed_bssid;
497 bool fixed_channel; 497 bool fixed_channel;
498 bool privacy; 498 bool privacy;
@@ -544,8 +544,6 @@ struct ieee80211_if_mesh {
544 struct timer_list mesh_path_timer; 544 struct timer_list mesh_path_timer;
545 struct timer_list mesh_path_root_timer; 545 struct timer_list mesh_path_root_timer;
546 546
547 unsigned long timers_running;
548
549 unsigned long wrkq_flags; 547 unsigned long wrkq_flags;
550 548
551 u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN]; 549 u8 mesh_id[IEEE80211_MAX_MESH_ID_LEN];
@@ -591,6 +589,7 @@ struct ieee80211_if_mesh {
591 IEEE80211_MESH_SEC_AUTHED = 0x1, 589 IEEE80211_MESH_SEC_AUTHED = 0x1,
592 IEEE80211_MESH_SEC_SECURED = 0x2, 590 IEEE80211_MESH_SEC_SECURED = 0x2,
593 } security; 591 } security;
592 bool user_mpm;
594 /* Extensible Synchronization Framework */ 593 /* Extensible Synchronization Framework */
595 const struct ieee80211_mesh_sync_ops *sync_ops; 594 const struct ieee80211_mesh_sync_ops *sync_ops;
596 s64 sync_offset_clockdrift_max; 595 s64 sync_offset_clockdrift_max;
@@ -683,6 +682,8 @@ struct ieee80211_sub_if_data {
683 682
684 /* count for keys needing tailroom space allocation */ 683 /* count for keys needing tailroom space allocation */
685 int crypto_tx_tailroom_needed_cnt; 684 int crypto_tx_tailroom_needed_cnt;
685 int crypto_tx_tailroom_pending_dec;
686 struct delayed_work dec_tailroom_needed_wk;
686 687
687 struct net_device *dev; 688 struct net_device *dev;
688 struct ieee80211_local *local; 689 struct ieee80211_local *local;
@@ -766,10 +767,6 @@ struct ieee80211_sub_if_data {
766 } debugfs; 767 } debugfs;
767#endif 768#endif
768 769
769#ifdef CONFIG_PM
770 struct ieee80211_bss_conf suspend_bss_conf;
771#endif
772
773 /* must be last, dynamically sized area in this! */ 770 /* must be last, dynamically sized area in this! */
774 struct ieee80211_vif vif; 771 struct ieee80211_vif vif;
775}; 772};
@@ -1137,11 +1134,6 @@ struct ieee80211_local {
1137 1134
1138 struct ieee80211_sub_if_data __rcu *p2p_sdata; 1135 struct ieee80211_sub_if_data __rcu *p2p_sdata;
1139 1136
1140 /* dummy netdev for use w/ NAPI */
1141 struct net_device napi_dev;
1142
1143 struct napi_struct napi;
1144
1145 /* virtual monitor interface */ 1137 /* virtual monitor interface */
1146 struct ieee80211_sub_if_data __rcu *monitor_sdata; 1138 struct ieee80211_sub_if_data __rcu *monitor_sdata;
1147 struct cfg80211_chan_def monitor_chandef; 1139 struct cfg80211_chan_def monitor_chandef;
@@ -1284,8 +1276,6 @@ void
1284ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, 1276ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1285 const struct ieee80211_channel_sw_ie *sw_elem, 1277 const struct ieee80211_channel_sw_ie *sw_elem,
1286 struct ieee80211_bss *bss, u64 timestamp); 1278 struct ieee80211_bss *bss, u64 timestamp);
1287void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata);
1288void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata);
1289void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata); 1279void ieee80211_sta_work(struct ieee80211_sub_if_data *sdata);
1290void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, 1280void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1291 struct sk_buff *skb); 1281 struct sk_buff *skb);
@@ -1303,8 +1293,6 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
1303int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, 1293int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
1304 struct cfg80211_ibss_params *params); 1294 struct cfg80211_ibss_params *params);
1305int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata); 1295int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata);
1306void ieee80211_ibss_quiesce(struct ieee80211_sub_if_data *sdata);
1307void ieee80211_ibss_restart(struct ieee80211_sub_if_data *sdata);
1308void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata); 1296void ieee80211_ibss_work(struct ieee80211_sub_if_data *sdata);
1309void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, 1297void ieee80211_ibss_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
1310 struct sk_buff *skb); 1298 struct sk_buff *skb);
@@ -1443,6 +1431,8 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta);
1443void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, 1431void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
1444 struct sta_info *sta, u8 opmode, 1432 struct sta_info *sta, u8 opmode,
1445 enum ieee80211_band band, bool nss_only); 1433 enum ieee80211_band band, bool nss_only);
1434void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata,
1435 struct ieee80211_sta_vht_cap *vht_cap);
1446 1436
1447/* Spectrum management */ 1437/* Spectrum management */
1448void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, 1438void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 58150f877ec3..a2b5e17036bb 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -488,8 +488,6 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
488 res = drv_start(local); 488 res = drv_start(local);
489 if (res) 489 if (res)
490 goto err_del_bss; 490 goto err_del_bss;
491 if (local->ops->napi_poll)
492 napi_enable(&local->napi);
493 /* we're brought up, everything changes */ 491 /* we're brought up, everything changes */
494 hw_reconf_flags = ~0; 492 hw_reconf_flags = ~0;
495 ieee80211_led_radio(local, true); 493 ieee80211_led_radio(local, true);
@@ -841,14 +839,16 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
841 rcu_barrier(); 839 rcu_barrier();
842 sta_info_flush_cleanup(sdata); 840 sta_info_flush_cleanup(sdata);
843 841
844 skb_queue_purge(&sdata->skb_queue);
845
846 /* 842 /*
847 * Free all remaining keys, there shouldn't be any, 843 * Free all remaining keys, there shouldn't be any,
848 * except maybe group keys in AP more or WDS? 844 * except maybe in WDS mode?
849 */ 845 */
850 ieee80211_free_keys(sdata); 846 ieee80211_free_keys(sdata);
851 847
848 /* fall through */
849 case NL80211_IFTYPE_AP:
850 skb_queue_purge(&sdata->skb_queue);
851
852 drv_remove_interface_debugfs(local, sdata); 852 drv_remove_interface_debugfs(local, sdata);
853 853
854 if (going_down) 854 if (going_down)
@@ -860,8 +860,6 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
860 ieee80211_recalc_ps(local, -1); 860 ieee80211_recalc_ps(local, -1);
861 861
862 if (local->open_count == 0) { 862 if (local->open_count == 0) {
863 if (local->ops->napi_poll)
864 napi_disable(&local->napi);
865 ieee80211_clear_tx_pending(local); 863 ieee80211_clear_tx_pending(local);
866 ieee80211_stop_device(local); 864 ieee80211_stop_device(local);
867 865
@@ -1550,6 +1548,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
1550 INIT_WORK(&sdata->cleanup_stations_wk, ieee80211_cleanup_sdata_stas_wk); 1548 INIT_WORK(&sdata->cleanup_stations_wk, ieee80211_cleanup_sdata_stas_wk);
1551 INIT_DELAYED_WORK(&sdata->dfs_cac_timer_work, 1549 INIT_DELAYED_WORK(&sdata->dfs_cac_timer_work,
1552 ieee80211_dfs_cac_timer_work); 1550 ieee80211_dfs_cac_timer_work);
1551 INIT_DELAYED_WORK(&sdata->dec_tailroom_needed_wk,
1552 ieee80211_delayed_tailroom_dec);
1553 1553
1554 for (i = 0; i < IEEE80211_NUM_BANDS; i++) { 1554 for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
1555 struct ieee80211_supported_band *sband; 1555 struct ieee80211_supported_band *sband;
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index ef252eb58c36..99e9f6ae6a54 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -397,7 +397,8 @@ struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
397 return key; 397 return key;
398} 398}
399 399
400static void __ieee80211_key_destroy(struct ieee80211_key *key) 400static void __ieee80211_key_destroy(struct ieee80211_key *key,
401 bool delay_tailroom)
401{ 402{
402 if (!key) 403 if (!key)
403 return; 404 return;
@@ -416,8 +417,18 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key)
416 if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC) 417 if (key->conf.cipher == WLAN_CIPHER_SUITE_AES_CMAC)
417 ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm); 418 ieee80211_aes_cmac_key_free(key->u.aes_cmac.tfm);
418 if (key->local) { 419 if (key->local) {
420 struct ieee80211_sub_if_data *sdata = key->sdata;
421
419 ieee80211_debugfs_key_remove(key); 422 ieee80211_debugfs_key_remove(key);
420 key->sdata->crypto_tx_tailroom_needed_cnt--; 423
424 if (delay_tailroom) {
425 /* see ieee80211_delayed_tailroom_dec */
426 sdata->crypto_tx_tailroom_pending_dec++;
427 schedule_delayed_work(&sdata->dec_tailroom_needed_wk,
428 HZ/2);
429 } else {
430 sdata->crypto_tx_tailroom_needed_cnt--;
431 }
421 } 432 }
422 433
423 kfree(key); 434 kfree(key);
@@ -440,32 +451,6 @@ int ieee80211_key_link(struct ieee80211_key *key,
440 key->sdata = sdata; 451 key->sdata = sdata;
441 key->sta = sta; 452 key->sta = sta;
442 453
443 if (sta) {
444 /*
445 * some hardware cannot handle TKIP with QoS, so
446 * we indicate whether QoS could be in use.
447 */
448 if (test_sta_flag(sta, WLAN_STA_WME))
449 key->conf.flags |= IEEE80211_KEY_FLAG_WMM_STA;
450 } else {
451 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
452 struct sta_info *ap;
453
454 /*
455 * We're getting a sta pointer in, so must be under
456 * appropriate locking for sta_info_get().
457 */
458
459 /* same here, the AP could be using QoS */
460 ap = sta_info_get(key->sdata, key->sdata->u.mgd.bssid);
461 if (ap) {
462 if (test_sta_flag(ap, WLAN_STA_WME))
463 key->conf.flags |=
464 IEEE80211_KEY_FLAG_WMM_STA;
465 }
466 }
467 }
468
469 mutex_lock(&sdata->local->key_mtx); 454 mutex_lock(&sdata->local->key_mtx);
470 455
471 if (sta && pairwise) 456 if (sta && pairwise)
@@ -478,7 +463,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
478 increment_tailroom_need_count(sdata); 463 increment_tailroom_need_count(sdata);
479 464
480 __ieee80211_key_replace(sdata, sta, pairwise, old_key, key); 465 __ieee80211_key_replace(sdata, sta, pairwise, old_key, key);
481 __ieee80211_key_destroy(old_key); 466 __ieee80211_key_destroy(old_key, true);
482 467
483 ieee80211_debugfs_key_add(key); 468 ieee80211_debugfs_key_add(key);
484 469
@@ -489,7 +474,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
489 return ret; 474 return ret;
490} 475}
491 476
492void __ieee80211_key_free(struct ieee80211_key *key) 477void __ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom)
493{ 478{
494 if (!key) 479 if (!key)
495 return; 480 return;
@@ -501,14 +486,14 @@ void __ieee80211_key_free(struct ieee80211_key *key)
501 __ieee80211_key_replace(key->sdata, key->sta, 486 __ieee80211_key_replace(key->sdata, key->sta,
502 key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE, 487 key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE,
503 key, NULL); 488 key, NULL);
504 __ieee80211_key_destroy(key); 489 __ieee80211_key_destroy(key, delay_tailroom);
505} 490}
506 491
507void ieee80211_key_free(struct ieee80211_local *local, 492void ieee80211_key_free(struct ieee80211_local *local,
508 struct ieee80211_key *key) 493 struct ieee80211_key *key)
509{ 494{
510 mutex_lock(&local->key_mtx); 495 mutex_lock(&local->key_mtx);
511 __ieee80211_key_free(key); 496 __ieee80211_key_free(key, true);
512 mutex_unlock(&local->key_mtx); 497 mutex_unlock(&local->key_mtx);
513} 498}
514 499
@@ -566,36 +551,60 @@ void ieee80211_iter_keys(struct ieee80211_hw *hw,
566} 551}
567EXPORT_SYMBOL(ieee80211_iter_keys); 552EXPORT_SYMBOL(ieee80211_iter_keys);
568 553
569void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata)
570{
571 struct ieee80211_key *key;
572
573 ASSERT_RTNL();
574
575 mutex_lock(&sdata->local->key_mtx);
576
577 list_for_each_entry(key, &sdata->key_list, list)
578 ieee80211_key_disable_hw_accel(key);
579
580 mutex_unlock(&sdata->local->key_mtx);
581}
582
583void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata) 554void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata)
584{ 555{
585 struct ieee80211_key *key, *tmp; 556 struct ieee80211_key *key, *tmp;
586 557
558 cancel_delayed_work_sync(&sdata->dec_tailroom_needed_wk);
559
587 mutex_lock(&sdata->local->key_mtx); 560 mutex_lock(&sdata->local->key_mtx);
588 561
562 sdata->crypto_tx_tailroom_needed_cnt -=
563 sdata->crypto_tx_tailroom_pending_dec;
564 sdata->crypto_tx_tailroom_pending_dec = 0;
565
589 ieee80211_debugfs_key_remove_mgmt_default(sdata); 566 ieee80211_debugfs_key_remove_mgmt_default(sdata);
590 567
591 list_for_each_entry_safe(key, tmp, &sdata->key_list, list) 568 list_for_each_entry_safe(key, tmp, &sdata->key_list, list)
592 __ieee80211_key_free(key); 569 __ieee80211_key_free(key, false);
593 570
594 ieee80211_debugfs_key_update_default(sdata); 571 ieee80211_debugfs_key_update_default(sdata);
595 572
573 WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt ||
574 sdata->crypto_tx_tailroom_pending_dec);
575
596 mutex_unlock(&sdata->local->key_mtx); 576 mutex_unlock(&sdata->local->key_mtx);
597} 577}
598 578
579void ieee80211_delayed_tailroom_dec(struct work_struct *wk)
580{
581 struct ieee80211_sub_if_data *sdata;
582
583 sdata = container_of(wk, struct ieee80211_sub_if_data,
584 dec_tailroom_needed_wk.work);
585
586 /*
587 * The reason for the delayed tailroom needed decrementing is to
588 * make roaming faster: during roaming, all keys are first deleted
589 * and then new keys are installed. The first new key causes the
590 * crypto_tx_tailroom_needed_cnt to go from 0 to 1, which invokes
591 * the cost of synchronize_net() (which can be slow). Avoid this
592 * by deferring the crypto_tx_tailroom_needed_cnt decrementing on
593 * key removal for a while, so if we roam the value is larger than
594 * zero and no 0->1 transition happens.
595 *
596 * The cost is that if the AP switching was from an AP with keys
597 * to one without, we still allocate tailroom while it would no
598 * longer be needed. However, in the typical (fast) roaming case
599 * within an ESS this usually won't happen.
600 */
601
602 mutex_lock(&sdata->local->key_mtx);
603 sdata->crypto_tx_tailroom_needed_cnt -=
604 sdata->crypto_tx_tailroom_pending_dec;
605 sdata->crypto_tx_tailroom_pending_dec = 0;
606 mutex_unlock(&sdata->local->key_mtx);
607}
599 608
600void ieee80211_gtk_rekey_notify(struct ieee80211_vif *vif, const u8 *bssid, 609void ieee80211_gtk_rekey_notify(struct ieee80211_vif *vif, const u8 *bssid,
601 const u8 *replay_ctr, gfp_t gfp) 610 const u8 *replay_ctr, gfp_t gfp)
diff --git a/net/mac80211/key.h b/net/mac80211/key.h
index 382dc44ed330..2a682d81cee9 100644
--- a/net/mac80211/key.h
+++ b/net/mac80211/key.h
@@ -134,7 +134,7 @@ struct ieee80211_key *ieee80211_key_alloc(u32 cipher, int idx, size_t key_len,
134int __must_check ieee80211_key_link(struct ieee80211_key *key, 134int __must_check ieee80211_key_link(struct ieee80211_key *key,
135 struct ieee80211_sub_if_data *sdata, 135 struct ieee80211_sub_if_data *sdata,
136 struct sta_info *sta); 136 struct sta_info *sta);
137void __ieee80211_key_free(struct ieee80211_key *key); 137void __ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom);
138void ieee80211_key_free(struct ieee80211_local *local, 138void ieee80211_key_free(struct ieee80211_local *local,
139 struct ieee80211_key *key); 139 struct ieee80211_key *key);
140void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx, 140void ieee80211_set_default_key(struct ieee80211_sub_if_data *sdata, int idx,
@@ -143,9 +143,10 @@ void ieee80211_set_default_mgmt_key(struct ieee80211_sub_if_data *sdata,
143 int idx); 143 int idx);
144void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata); 144void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata);
145void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata); 145void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata);
146void ieee80211_disable_keys(struct ieee80211_sub_if_data *sdata);
147 146
148#define key_mtx_dereference(local, ref) \ 147#define key_mtx_dereference(local, ref) \
149 rcu_dereference_protected(ref, lockdep_is_held(&((local)->key_mtx))) 148 rcu_dereference_protected(ref, lockdep_is_held(&((local)->key_mtx)))
150 149
150void ieee80211_delayed_tailroom_dec(struct work_struct *wk);
151
151#endif /* IEEE80211_KEY_H */ 152#endif /* IEEE80211_KEY_H */
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 1a8591b77a13..5a53aa5ede80 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -399,30 +399,6 @@ static int ieee80211_ifa6_changed(struct notifier_block *nb,
399} 399}
400#endif 400#endif
401 401
402static int ieee80211_napi_poll(struct napi_struct *napi, int budget)
403{
404 struct ieee80211_local *local =
405 container_of(napi, struct ieee80211_local, napi);
406
407 return local->ops->napi_poll(&local->hw, budget);
408}
409
410void ieee80211_napi_schedule(struct ieee80211_hw *hw)
411{
412 struct ieee80211_local *local = hw_to_local(hw);
413
414 napi_schedule(&local->napi);
415}
416EXPORT_SYMBOL(ieee80211_napi_schedule);
417
418void ieee80211_napi_complete(struct ieee80211_hw *hw)
419{
420 struct ieee80211_local *local = hw_to_local(hw);
421
422 napi_complete(&local->napi);
423}
424EXPORT_SYMBOL(ieee80211_napi_complete);
425
426/* There isn't a lot of sense in it, but you can transmit anything you like */ 402/* There isn't a lot of sense in it, but you can transmit anything you like */
427static const struct ieee80211_txrx_stypes 403static const struct ieee80211_txrx_stypes
428ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = { 404ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
@@ -501,6 +477,27 @@ static const struct ieee80211_ht_cap mac80211_ht_capa_mod_mask = {
501 }, 477 },
502}; 478};
503 479
480static const struct ieee80211_vht_cap mac80211_vht_capa_mod_mask = {
481 .vht_cap_info =
482 cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC |
483 IEEE80211_VHT_CAP_SHORT_GI_80 |
484 IEEE80211_VHT_CAP_SHORT_GI_160 |
485 IEEE80211_VHT_CAP_RXSTBC_1 |
486 IEEE80211_VHT_CAP_RXSTBC_2 |
487 IEEE80211_VHT_CAP_RXSTBC_3 |
488 IEEE80211_VHT_CAP_RXSTBC_4 |
489 IEEE80211_VHT_CAP_TXSTBC |
490 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
491 IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
492 IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN |
493 IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
494 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK),
495 .supp_mcs = {
496 .rx_mcs_map = cpu_to_le16(~0),
497 .tx_mcs_map = cpu_to_le16(~0),
498 },
499};
500
504static const u8 extended_capabilities[] = { 501static const u8 extended_capabilities[] = {
505 0, 0, 0, 0, 0, 0, 0, 502 0, 0, 0, 0, 0, 0, 0,
506 WLAN_EXT_CAPA8_OPMODE_NOTIF, 503 WLAN_EXT_CAPA8_OPMODE_NOTIF,
@@ -572,7 +569,8 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
572 wiphy->features |= NL80211_FEATURE_SK_TX_STATUS | 569 wiphy->features |= NL80211_FEATURE_SK_TX_STATUS |
573 NL80211_FEATURE_SAE | 570 NL80211_FEATURE_SAE |
574 NL80211_FEATURE_HT_IBSS | 571 NL80211_FEATURE_HT_IBSS |
575 NL80211_FEATURE_VIF_TXPOWER; 572 NL80211_FEATURE_VIF_TXPOWER |
573 NL80211_FEATURE_USERSPACE_MPM;
576 574
577 if (!ops->hw_scan) 575 if (!ops->hw_scan)
578 wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN | 576 wiphy->features |= NL80211_FEATURE_LOW_PRIORITY_SCAN |
@@ -609,6 +607,7 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
609 IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH; 607 IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH;
610 local->user_power_level = IEEE80211_UNSET_POWER_LEVEL; 608 local->user_power_level = IEEE80211_UNSET_POWER_LEVEL;
611 wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask; 609 wiphy->ht_capa_mod_mask = &mac80211_ht_capa_mod_mask;
610 wiphy->vht_capa_mod_mask = &mac80211_vht_capa_mod_mask;
612 611
613 INIT_LIST_HEAD(&local->interfaces); 612 INIT_LIST_HEAD(&local->interfaces);
614 613
@@ -664,9 +663,6 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
664 skb_queue_head_init(&local->skb_queue); 663 skb_queue_head_init(&local->skb_queue);
665 skb_queue_head_init(&local->skb_queue_unreliable); 664 skb_queue_head_init(&local->skb_queue_unreliable);
666 665
667 /* init dummy netdev for use w/ NAPI */
668 init_dummy_netdev(&local->napi_dev);
669
670 ieee80211_led_names(local); 666 ieee80211_led_names(local);
671 667
672 ieee80211_roc_setup(local); 668 ieee80211_roc_setup(local);
@@ -1021,9 +1017,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1021 goto fail_ifa6; 1017 goto fail_ifa6;
1022#endif 1018#endif
1023 1019
1024 netif_napi_add(&local->napi_dev, &local->napi, ieee80211_napi_poll,
1025 local->hw.napi_weight);
1026
1027 return 0; 1020 return 0;
1028 1021
1029#if IS_ENABLED(CONFIG_IPV6) 1022#if IS_ENABLED(CONFIG_IPV6)
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 4749b3858695..77b5710db241 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -13,10 +13,6 @@
13#include "ieee80211_i.h" 13#include "ieee80211_i.h"
14#include "mesh.h" 14#include "mesh.h"
15 15
16#define TMR_RUNNING_HK 0
17#define TMR_RUNNING_MP 1
18#define TMR_RUNNING_MPR 2
19
20static int mesh_allocated; 16static int mesh_allocated;
21static struct kmem_cache *rm_cache; 17static struct kmem_cache *rm_cache;
22 18
@@ -50,11 +46,6 @@ static void ieee80211_mesh_housekeeping_timer(unsigned long data)
50 46
51 set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags); 47 set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
52 48
53 if (local->quiescing) {
54 set_bit(TMR_RUNNING_HK, &ifmsh->timers_running);
55 return;
56 }
57
58 ieee80211_queue_work(&local->hw, &sdata->work); 49 ieee80211_queue_work(&local->hw, &sdata->work);
59} 50}
60 51
@@ -165,7 +156,7 @@ void mesh_sta_cleanup(struct sta_info *sta)
165 * an update. 156 * an update.
166 */ 157 */
167 changed = mesh_accept_plinks_update(sdata); 158 changed = mesh_accept_plinks_update(sdata);
168 if (sdata->u.mesh.security == IEEE80211_MESH_SEC_NONE) { 159 if (!sdata->u.mesh.user_mpm) {
169 changed |= mesh_plink_deactivate(sta); 160 changed |= mesh_plink_deactivate(sta);
170 del_timer_sync(&sta->plink_timer); 161 del_timer_sync(&sta->plink_timer);
171 } 162 }
@@ -479,15 +470,8 @@ static void ieee80211_mesh_path_timer(unsigned long data)
479{ 470{
480 struct ieee80211_sub_if_data *sdata = 471 struct ieee80211_sub_if_data *sdata =
481 (struct ieee80211_sub_if_data *) data; 472 (struct ieee80211_sub_if_data *) data;
482 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
483 struct ieee80211_local *local = sdata->local;
484
485 if (local->quiescing) {
486 set_bit(TMR_RUNNING_MP, &ifmsh->timers_running);
487 return;
488 }
489 473
490 ieee80211_queue_work(&local->hw, &sdata->work); 474 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
491} 475}
492 476
493static void ieee80211_mesh_path_root_timer(unsigned long data) 477static void ieee80211_mesh_path_root_timer(unsigned long data)
@@ -495,16 +479,10 @@ static void ieee80211_mesh_path_root_timer(unsigned long data)
495 struct ieee80211_sub_if_data *sdata = 479 struct ieee80211_sub_if_data *sdata =
496 (struct ieee80211_sub_if_data *) data; 480 (struct ieee80211_sub_if_data *) data;
497 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 481 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
498 struct ieee80211_local *local = sdata->local;
499 482
500 set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags); 483 set_bit(MESH_WORK_ROOT, &ifmsh->wrkq_flags);
501 484
502 if (local->quiescing) { 485 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
503 set_bit(TMR_RUNNING_MPR, &ifmsh->timers_running);
504 return;
505 }
506
507 ieee80211_queue_work(&local->hw, &sdata->work);
508} 486}
509 487
510void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh) 488void ieee80211_mesh_root_setup(struct ieee80211_if_mesh *ifmsh)
@@ -622,35 +600,6 @@ static void ieee80211_mesh_rootpath(struct ieee80211_sub_if_data *sdata)
622 round_jiffies(TU_TO_EXP_TIME(interval))); 600 round_jiffies(TU_TO_EXP_TIME(interval)));
623} 601}
624 602
625#ifdef CONFIG_PM
626void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata)
627{
628 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
629
630 /* use atomic bitops in case all timers fire at the same time */
631
632 if (del_timer_sync(&ifmsh->housekeeping_timer))
633 set_bit(TMR_RUNNING_HK, &ifmsh->timers_running);
634 if (del_timer_sync(&ifmsh->mesh_path_timer))
635 set_bit(TMR_RUNNING_MP, &ifmsh->timers_running);
636 if (del_timer_sync(&ifmsh->mesh_path_root_timer))
637 set_bit(TMR_RUNNING_MPR, &ifmsh->timers_running);
638}
639
640void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata)
641{
642 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
643
644 if (test_and_clear_bit(TMR_RUNNING_HK, &ifmsh->timers_running))
645 add_timer(&ifmsh->housekeeping_timer);
646 if (test_and_clear_bit(TMR_RUNNING_MP, &ifmsh->timers_running))
647 add_timer(&ifmsh->mesh_path_timer);
648 if (test_and_clear_bit(TMR_RUNNING_MPR, &ifmsh->timers_running))
649 add_timer(&ifmsh->mesh_path_root_timer);
650 ieee80211_mesh_root_setup(ifmsh);
651}
652#endif
653
654static int 603static int
655ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh) 604ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
656{ 605{
@@ -871,8 +820,6 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
871 local->fif_other_bss--; 820 local->fif_other_bss--;
872 atomic_dec(&local->iff_allmultis); 821 atomic_dec(&local->iff_allmultis);
873 ieee80211_configure_filter(local); 822 ieee80211_configure_filter(local);
874
875 sdata->u.mesh.timers_running = 0;
876} 823}
877 824
878static void 825static void
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 336c88a16687..6ffabbe99c46 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -313,8 +313,6 @@ void mesh_path_timer(unsigned long data);
313void mesh_path_flush_by_nexthop(struct sta_info *sta); 313void mesh_path_flush_by_nexthop(struct sta_info *sta);
314void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata, 314void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
315 struct sk_buff *skb); 315 struct sk_buff *skb);
316void mesh_path_quiesce(struct ieee80211_sub_if_data *sdata);
317void mesh_path_restart(struct ieee80211_sub_if_data *sdata);
318void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata); 316void mesh_path_tx_root_frame(struct ieee80211_sub_if_data *sdata);
319 317
320bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt); 318bool mesh_action_is_path_sel(struct ieee80211_mgmt *mgmt);
@@ -359,22 +357,12 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
359 357
360void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local); 358void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local);
361 359
362void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata);
363void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata);
364void mesh_plink_quiesce(struct sta_info *sta);
365void mesh_plink_restart(struct sta_info *sta);
366void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata); 360void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
367void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata); 361void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata);
368void ieee80211s_stop(void); 362void ieee80211s_stop(void);
369#else 363#else
370static inline void 364static inline void
371ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {} 365ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {}
372static inline void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata)
373{}
374static inline void ieee80211_mesh_restart(struct ieee80211_sub_if_data *sdata)
375{}
376static inline void mesh_plink_quiesce(struct sta_info *sta) {}
377static inline void mesh_plink_restart(struct sta_info *sta) {}
378static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata) 366static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
379{ return false; } 367{ return false; }
380static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata) 368static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 07d396d57079..937e06fe8f2a 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -420,7 +420,6 @@ __mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *hw_addr)
420 return NULL; 420 return NULL;
421 421
422 sta->plink_state = NL80211_PLINK_LISTEN; 422 sta->plink_state = NL80211_PLINK_LISTEN;
423 init_timer(&sta->plink_timer);
424 423
425 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH); 424 sta_info_pre_move_state(sta, IEEE80211_STA_AUTH);
426 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC); 425 sta_info_pre_move_state(sta, IEEE80211_STA_ASSOC);
@@ -437,8 +436,9 @@ mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *addr,
437{ 436{
438 struct sta_info *sta = NULL; 437 struct sta_info *sta = NULL;
439 438
440 /* Userspace handles peer allocation when security is enabled */ 439 /* Userspace handles station allocation */
441 if (sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) 440 if (sdata->u.mesh.user_mpm ||
441 sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED)
442 cfg80211_notify_new_peer_candidate(sdata->dev, addr, 442 cfg80211_notify_new_peer_candidate(sdata->dev, addr,
443 elems->ie_start, 443 elems->ie_start,
444 elems->total_len, 444 elems->total_len,
@@ -534,10 +534,8 @@ static void mesh_plink_timer(unsigned long data)
534 */ 534 */
535 sta = (struct sta_info *) data; 535 sta = (struct sta_info *) data;
536 536
537 if (sta->sdata->local->quiescing) { 537 if (sta->sdata->local->quiescing)
538 sta->plink_timer_was_running = true;
539 return; 538 return;
540 }
541 539
542 spin_lock_bh(&sta->lock); 540 spin_lock_bh(&sta->lock);
543 if (sta->ignore_plink_timer) { 541 if (sta->ignore_plink_timer) {
@@ -598,29 +596,6 @@ static void mesh_plink_timer(unsigned long data)
598 } 596 }
599} 597}
600 598
601#ifdef CONFIG_PM
602void mesh_plink_quiesce(struct sta_info *sta)
603{
604 if (!ieee80211_vif_is_mesh(&sta->sdata->vif))
605 return;
606
607 /* no kernel mesh sta timers have been initialized */
608 if (sta->sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE)
609 return;
610
611 if (del_timer_sync(&sta->plink_timer))
612 sta->plink_timer_was_running = true;
613}
614
615void mesh_plink_restart(struct sta_info *sta)
616{
617 if (sta->plink_timer_was_running) {
618 add_timer(&sta->plink_timer);
619 sta->plink_timer_was_running = false;
620 }
621}
622#endif
623
624static inline void mesh_plink_timer_set(struct sta_info *sta, int timeout) 599static inline void mesh_plink_timer_set(struct sta_info *sta, int timeout)
625{ 600{
626 sta->plink_timer.expires = jiffies + (HZ * timeout / 1000); 601 sta->plink_timer.expires = jiffies + (HZ * timeout / 1000);
@@ -695,6 +670,10 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
695 if (len < IEEE80211_MIN_ACTION_SIZE + 3) 670 if (len < IEEE80211_MIN_ACTION_SIZE + 3)
696 return; 671 return;
697 672
673 if (sdata->u.mesh.user_mpm)
674 /* userspace must register for these */
675 return;
676
698 if (is_multicast_ether_addr(mgmt->da)) { 677 if (is_multicast_ether_addr(mgmt->da)) {
699 mpl_dbg(sdata, 678 mpl_dbg(sdata,
700 "Mesh plink: ignore frame from multicast address\n"); 679 "Mesh plink: ignore frame from multicast address\n");
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 82cc30318a86..167158646593 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -87,9 +87,6 @@ MODULE_PARM_DESC(probe_wait_ms,
87 */ 87 */
88#define IEEE80211_SIGNAL_AVE_MIN_COUNT 4 88#define IEEE80211_SIGNAL_AVE_MIN_COUNT 4
89 89
90#define TMR_RUNNING_TIMER 0
91#define TMR_RUNNING_CHANSW 1
92
93/* 90/*
94 * All cfg80211 functions have to be called outside a locked 91 * All cfg80211 functions have to be called outside a locked
95 * section so that they can acquire a lock themselves... This 92 * section so that they can acquire a lock themselves... This
@@ -609,6 +606,7 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
609 BUILD_BUG_ON(sizeof(vht_cap) != sizeof(sband->vht_cap)); 606 BUILD_BUG_ON(sizeof(vht_cap) != sizeof(sband->vht_cap));
610 607
611 memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap)); 608 memcpy(&vht_cap, &sband->vht_cap, sizeof(vht_cap));
609 ieee80211_apply_vhtcap_overrides(sdata, &vht_cap);
612 610
613 /* determine capability flags */ 611 /* determine capability flags */
614 cap = vht_cap.cap; 612 cap = vht_cap.cap;
@@ -1038,14 +1036,8 @@ static void ieee80211_chswitch_timer(unsigned long data)
1038{ 1036{
1039 struct ieee80211_sub_if_data *sdata = 1037 struct ieee80211_sub_if_data *sdata =
1040 (struct ieee80211_sub_if_data *) data; 1038 (struct ieee80211_sub_if_data *) data;
1041 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1042 1039
1043 if (sdata->local->quiescing) { 1040 ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.chswitch_work);
1044 set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running);
1045 return;
1046 }
1047
1048 ieee80211_queue_work(&sdata->local->hw, &ifmgd->chswitch_work);
1049} 1041}
1050 1042
1051void 1043void
@@ -1802,9 +1794,11 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1802 sdata->vif.bss_conf.p2p_ctwindow = 0; 1794 sdata->vif.bss_conf.p2p_ctwindow = 0;
1803 sdata->vif.bss_conf.p2p_oppps = false; 1795 sdata->vif.bss_conf.p2p_oppps = false;
1804 1796
1805 /* on the next assoc, re-program HT parameters */ 1797 /* on the next assoc, re-program HT/VHT parameters */
1806 memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa)); 1798 memset(&ifmgd->ht_capa, 0, sizeof(ifmgd->ht_capa));
1807 memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask)); 1799 memset(&ifmgd->ht_capa_mask, 0, sizeof(ifmgd->ht_capa_mask));
1800 memset(&ifmgd->vht_capa, 0, sizeof(ifmgd->vht_capa));
1801 memset(&ifmgd->vht_capa_mask, 0, sizeof(ifmgd->vht_capa_mask));
1808 1802
1809 sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL; 1803 sdata->ap_power_level = IEEE80211_UNSET_POWER_LEVEL;
1810 1804
@@ -1830,8 +1824,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
1830 del_timer_sync(&sdata->u.mgd.timer); 1824 del_timer_sync(&sdata->u.mgd.timer);
1831 del_timer_sync(&sdata->u.mgd.chswitch_timer); 1825 del_timer_sync(&sdata->u.mgd.chswitch_timer);
1832 1826
1833 sdata->u.mgd.timers_running = 0;
1834
1835 sdata->vif.bss_conf.dtim_period = 0; 1827 sdata->vif.bss_conf.dtim_period = 0;
1836 1828
1837 ifmgd->flags = 0; 1829 ifmgd->flags = 0;
@@ -3140,15 +3132,8 @@ static void ieee80211_sta_timer(unsigned long data)
3140{ 3132{
3141 struct ieee80211_sub_if_data *sdata = 3133 struct ieee80211_sub_if_data *sdata =
3142 (struct ieee80211_sub_if_data *) data; 3134 (struct ieee80211_sub_if_data *) data;
3143 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3144 struct ieee80211_local *local = sdata->local;
3145
3146 if (local->quiescing) {
3147 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
3148 return;
3149 }
3150 3135
3151 ieee80211_queue_work(&local->hw, &sdata->work); 3136 ieee80211_queue_work(&sdata->local->hw, &sdata->work);
3152} 3137}
3153 3138
3154static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, 3139static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
@@ -3500,72 +3485,6 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
3500 } 3485 }
3501} 3486}
3502 3487
3503#ifdef CONFIG_PM
3504void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
3505{
3506 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3507
3508 /*
3509 * Stop timers before deleting work items, as timers
3510 * could race and re-add the work-items. They will be
3511 * re-established on connection.
3512 */
3513 del_timer_sync(&ifmgd->conn_mon_timer);
3514 del_timer_sync(&ifmgd->bcn_mon_timer);
3515
3516 /*
3517 * we need to use atomic bitops for the running bits
3518 * only because both timers might fire at the same
3519 * time -- the code here is properly synchronised.
3520 */
3521
3522 cancel_work_sync(&ifmgd->request_smps_work);
3523
3524 cancel_work_sync(&ifmgd->monitor_work);
3525 cancel_work_sync(&ifmgd->beacon_connection_loss_work);
3526 cancel_work_sync(&ifmgd->csa_connection_drop_work);
3527 if (del_timer_sync(&ifmgd->timer))
3528 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
3529
3530 if (del_timer_sync(&ifmgd->chswitch_timer))
3531 set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running);
3532 cancel_work_sync(&ifmgd->chswitch_work);
3533}
3534
3535void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
3536{
3537 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3538
3539 mutex_lock(&ifmgd->mtx);
3540 if (!ifmgd->associated) {
3541 mutex_unlock(&ifmgd->mtx);
3542 return;
3543 }
3544
3545 if (sdata->flags & IEEE80211_SDATA_DISCONNECT_RESUME) {
3546 sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME;
3547 mlme_dbg(sdata, "driver requested disconnect after resume\n");
3548 ieee80211_sta_connection_lost(sdata,
3549 ifmgd->associated->bssid,
3550 WLAN_REASON_UNSPECIFIED,
3551 true);
3552 mutex_unlock(&ifmgd->mtx);
3553 return;
3554 }
3555 mutex_unlock(&ifmgd->mtx);
3556
3557 if (test_and_clear_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running))
3558 add_timer(&ifmgd->timer);
3559 if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running))
3560 add_timer(&ifmgd->chswitch_timer);
3561 ieee80211_sta_reset_beacon_monitor(sdata);
3562
3563 mutex_lock(&sdata->local->mtx);
3564 ieee80211_restart_sta_timer(sdata);
3565 mutex_unlock(&sdata->local->mtx);
3566}
3567#endif
3568
3569/* interface setup */ 3488/* interface setup */
3570void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) 3489void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
3571{ 3490{
@@ -4073,6 +3992,9 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
4073 ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; 3992 ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
4074 } 3993 }
4075 3994
3995 if (req->flags & ASSOC_REQ_DISABLE_VHT)
3996 ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
3997
4076 /* Also disable HT if we don't support it or the AP doesn't use WMM */ 3998 /* Also disable HT if we don't support it or the AP doesn't use WMM */
4077 sband = local->hw.wiphy->bands[req->bss->channel->band]; 3999 sband = local->hw.wiphy->bands[req->bss->channel->band];
4078 if (!sband->ht_cap.ht_supported || 4000 if (!sband->ht_cap.ht_supported ||
@@ -4096,6 +4018,10 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
4096 memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask, 4018 memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask,
4097 sizeof(ifmgd->ht_capa_mask)); 4019 sizeof(ifmgd->ht_capa_mask));
4098 4020
4021 memcpy(&ifmgd->vht_capa, &req->vht_capa, sizeof(ifmgd->vht_capa));
4022 memcpy(&ifmgd->vht_capa_mask, &req->vht_capa_mask,
4023 sizeof(ifmgd->vht_capa_mask));
4024
4099 if (req->ie && req->ie_len) { 4025 if (req->ie && req->ie_len) {
4100 memcpy(assoc_data->ie, req->ie, req->ie_len); 4026 memcpy(assoc_data->ie, req->ie, req->ie_len);
4101 assoc_data->ie_len = req->ie_len; 4027 assoc_data->ie_len = req->ie_len;
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index 430bd254e496..950c95bec13d 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -277,7 +277,7 @@ void ieee80211_start_next_roc(struct ieee80211_local *local)
277 duration = 10; 277 duration = 10;
278 278
279 ret = drv_remain_on_channel(local, roc->sdata, roc->chan, 279 ret = drv_remain_on_channel(local, roc->sdata, roc->chan,
280 duration); 280 duration, roc->type);
281 281
282 roc->started = true; 282 roc->started = true;
283 283
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index d0275f34bf70..b471a67f224d 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -6,32 +6,11 @@
6#include "driver-ops.h" 6#include "driver-ops.h"
7#include "led.h" 7#include "led.h"
8 8
9/* return value indicates whether the driver should be further notified */
10static void ieee80211_quiesce(struct ieee80211_sub_if_data *sdata)
11{
12 switch (sdata->vif.type) {
13 case NL80211_IFTYPE_STATION:
14 ieee80211_sta_quiesce(sdata);
15 break;
16 case NL80211_IFTYPE_ADHOC:
17 ieee80211_ibss_quiesce(sdata);
18 break;
19 case NL80211_IFTYPE_MESH_POINT:
20 ieee80211_mesh_quiesce(sdata);
21 break;
22 default:
23 break;
24 }
25
26 cancel_work_sync(&sdata->work);
27}
28
29int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) 9int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
30{ 10{
31 struct ieee80211_local *local = hw_to_local(hw); 11 struct ieee80211_local *local = hw_to_local(hw);
32 struct ieee80211_sub_if_data *sdata; 12 struct ieee80211_sub_if_data *sdata;
33 struct sta_info *sta; 13 struct sta_info *sta;
34 struct ieee80211_chanctx *ctx;
35 14
36 if (!local->open_count) 15 if (!local->open_count)
37 goto suspend; 16 goto suspend;
@@ -93,19 +72,12 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
93 return err; 72 return err;
94 } else if (err > 0) { 73 } else if (err > 0) {
95 WARN_ON(err != 1); 74 WARN_ON(err != 1);
96 local->wowlan = false; 75 return err;
97 } else { 76 } else {
98 list_for_each_entry(sdata, &local->interfaces, list)
99 if (ieee80211_sdata_running(sdata))
100 ieee80211_quiesce(sdata);
101 goto suspend; 77 goto suspend;
102 } 78 }
103 } 79 }
104 80
105 /* disable keys */
106 list_for_each_entry(sdata, &local->interfaces, list)
107 ieee80211_disable_keys(sdata);
108
109 /* tear down aggregation sessions and remove STAs */ 81 /* tear down aggregation sessions and remove STAs */
110 mutex_lock(&local->sta_mtx); 82 mutex_lock(&local->sta_mtx);
111 list_for_each_entry(sta, &local->sta_list, list) { 83 list_for_each_entry(sta, &local->sta_list, list) {
@@ -117,100 +89,25 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
117 WARN_ON(drv_sta_state(local, sta->sdata, sta, 89 WARN_ON(drv_sta_state(local, sta->sdata, sta,
118 state, state - 1)); 90 state, state - 1));
119 } 91 }
120
121 mesh_plink_quiesce(sta);
122 } 92 }
123 mutex_unlock(&local->sta_mtx); 93 mutex_unlock(&local->sta_mtx);
124 94
125 /* remove all interfaces */ 95 /* remove all interfaces */
126 list_for_each_entry(sdata, &local->interfaces, list) { 96 list_for_each_entry(sdata, &local->interfaces, list) {
127 static u8 zero_addr[ETH_ALEN] = {};
128 u32 changed = 0;
129
130 if (!ieee80211_sdata_running(sdata)) 97 if (!ieee80211_sdata_running(sdata))
131 continue; 98 continue;
132
133 switch (sdata->vif.type) {
134 case NL80211_IFTYPE_AP_VLAN:
135 case NL80211_IFTYPE_MONITOR:
136 /* skip these */
137 continue;
138 case NL80211_IFTYPE_STATION:
139 if (sdata->vif.bss_conf.assoc)
140 changed = BSS_CHANGED_ASSOC |
141 BSS_CHANGED_BSSID |
142 BSS_CHANGED_IDLE;
143 break;
144 case NL80211_IFTYPE_AP:
145 case NL80211_IFTYPE_ADHOC:
146 case NL80211_IFTYPE_MESH_POINT:
147 if (sdata->vif.bss_conf.enable_beacon)
148 changed = BSS_CHANGED_BEACON_ENABLED;
149 break;
150 default:
151 break;
152 }
153
154 ieee80211_quiesce(sdata);
155
156 sdata->suspend_bss_conf = sdata->vif.bss_conf;
157 memset(&sdata->vif.bss_conf, 0, sizeof(sdata->vif.bss_conf));
158 sdata->vif.bss_conf.idle = true;
159 if (sdata->suspend_bss_conf.bssid)
160 sdata->vif.bss_conf.bssid = zero_addr;
161
162 /* disable beaconing or remove association */
163 ieee80211_bss_info_change_notify(sdata, changed);
164
165 if (sdata->vif.type == NL80211_IFTYPE_AP &&
166 rcu_access_pointer(sdata->u.ap.beacon))
167 drv_stop_ap(local, sdata);
168
169 if (local->use_chanctx) {
170 struct ieee80211_chanctx_conf *conf;
171
172 mutex_lock(&local->chanctx_mtx);
173 conf = rcu_dereference_protected(
174 sdata->vif.chanctx_conf,
175 lockdep_is_held(&local->chanctx_mtx));
176 if (conf) {
177 ctx = container_of(conf,
178 struct ieee80211_chanctx,
179 conf);
180 drv_unassign_vif_chanctx(local, sdata, ctx);
181 }
182
183 mutex_unlock(&local->chanctx_mtx);
184 }
185 drv_remove_interface(local, sdata); 99 drv_remove_interface(local, sdata);
186 } 100 }
187 101
188 sdata = rtnl_dereference(local->monitor_sdata); 102 sdata = rtnl_dereference(local->monitor_sdata);
189 if (sdata) { 103 if (sdata)
190 if (local->use_chanctx) {
191 struct ieee80211_chanctx_conf *conf;
192
193 mutex_lock(&local->chanctx_mtx);
194 conf = rcu_dereference_protected(
195 sdata->vif.chanctx_conf,
196 lockdep_is_held(&local->chanctx_mtx));
197 if (conf) {
198 ctx = container_of(conf,
199 struct ieee80211_chanctx,
200 conf);
201 drv_unassign_vif_chanctx(local, sdata, ctx);
202 }
203
204 mutex_unlock(&local->chanctx_mtx);
205 }
206
207 drv_remove_interface(local, sdata); 104 drv_remove_interface(local, sdata);
208 }
209 105
210 mutex_lock(&local->chanctx_mtx); 106 /*
211 list_for_each_entry(ctx, &local->chanctx_list, list) 107 * We disconnected on all interfaces before suspend, all channel
212 drv_remove_chanctx(local, ctx); 108 * contexts should be released.
213 mutex_unlock(&local->chanctx_mtx); 109 */
110 WARN_ON(!list_empty(&local->chanctx_list));
214 111
215 /* stop hardware - this must stop RX */ 112 /* stop hardware - this must stop RX */
216 if (local->open_count) 113 if (local->open_count)
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index eea45a2c7c35..1c36c9b4fa4a 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -55,7 +55,6 @@
55#include "rate.h" 55#include "rate.h"
56#include "rc80211_minstrel.h" 56#include "rc80211_minstrel.h"
57 57
58#define SAMPLE_COLUMNS 10
59#define SAMPLE_TBL(_mi, _idx, _col) \ 58#define SAMPLE_TBL(_mi, _idx, _col) \
60 _mi->sample_table[(_idx * SAMPLE_COLUMNS) + _col] 59 _mi->sample_table[(_idx * SAMPLE_COLUMNS) + _col]
61 60
@@ -70,16 +69,31 @@ rix_to_ndx(struct minstrel_sta_info *mi, int rix)
70 return i; 69 return i;
71} 70}
72 71
72/* find & sort topmost throughput rates */
73static inline void
74minstrel_sort_best_tp_rates(struct minstrel_sta_info *mi, int i, u8 *tp_list)
75{
76 int j = MAX_THR_RATES;
77
78 while (j > 0 && mi->r[i].cur_tp > mi->r[tp_list[j - 1]].cur_tp)
79 j--;
80 if (j < MAX_THR_RATES - 1)
81 memmove(&tp_list[j + 1], &tp_list[j], MAX_THR_RATES - (j + 1));
82 if (j < MAX_THR_RATES)
83 tp_list[j] = i;
84}
85
73static void 86static void
74minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi) 87minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
75{ 88{
76 u32 max_tp = 0, index_max_tp = 0, index_max_tp2 = 0; 89 u8 tmp_tp_rate[MAX_THR_RATES];
77 u32 max_prob = 0, index_max_prob = 0; 90 u8 tmp_prob_rate = 0;
78 u32 usecs; 91 u32 usecs;
79 u32 p;
80 int i; 92 int i;
81 93
82 mi->stats_update = jiffies; 94 for (i=0; i < MAX_THR_RATES; i++)
95 tmp_tp_rate[i] = 0;
96
83 for (i = 0; i < mi->n_rates; i++) { 97 for (i = 0; i < mi->n_rates; i++) {
84 struct minstrel_rate *mr = &mi->r[i]; 98 struct minstrel_rate *mr = &mi->r[i];
85 99
@@ -87,27 +101,32 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
87 if (!usecs) 101 if (!usecs)
88 usecs = 1000000; 102 usecs = 1000000;
89 103
90 /* To avoid rounding issues, probabilities scale from 0 (0%) 104 if (unlikely(mr->attempts > 0)) {
91 * to 18000 (100%) */ 105 mr->sample_skipped = 0;
92 if (mr->attempts) { 106 mr->cur_prob = MINSTREL_FRAC(mr->success, mr->attempts);
93 p = (mr->success * 18000) / mr->attempts;
94 mr->succ_hist += mr->success; 107 mr->succ_hist += mr->success;
95 mr->att_hist += mr->attempts; 108 mr->att_hist += mr->attempts;
96 mr->cur_prob = p; 109 mr->probability = minstrel_ewma(mr->probability,
97 p = ((p * (100 - mp->ewma_level)) + (mr->probability * 110 mr->cur_prob,
98 mp->ewma_level)) / 100; 111 EWMA_LEVEL);
99 mr->probability = p; 112 } else
100 mr->cur_tp = p * (1000000 / usecs); 113 mr->sample_skipped++;
101 }
102 114
103 mr->last_success = mr->success; 115 mr->last_success = mr->success;
104 mr->last_attempts = mr->attempts; 116 mr->last_attempts = mr->attempts;
105 mr->success = 0; 117 mr->success = 0;
106 mr->attempts = 0; 118 mr->attempts = 0;
107 119
120 /* Update throughput per rate, reset thr. below 10% success */
121 if (mr->probability < MINSTREL_FRAC(10, 100))
122 mr->cur_tp = 0;
123 else
124 mr->cur_tp = mr->probability * (1000000 / usecs);
125
108 /* Sample less often below the 10% chance of success. 126 /* Sample less often below the 10% chance of success.
109 * Sample less often above the 95% chance of success. */ 127 * Sample less often above the 95% chance of success. */
110 if ((mr->probability > 17100) || (mr->probability < 1800)) { 128 if (mr->probability > MINSTREL_FRAC(95, 100) ||
129 mr->probability < MINSTREL_FRAC(10, 100)) {
111 mr->adjusted_retry_count = mr->retry_count >> 1; 130 mr->adjusted_retry_count = mr->retry_count >> 1;
112 if (mr->adjusted_retry_count > 2) 131 if (mr->adjusted_retry_count > 2)
113 mr->adjusted_retry_count = 2; 132 mr->adjusted_retry_count = 2;
@@ -118,35 +137,30 @@ minstrel_update_stats(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
118 } 137 }
119 if (!mr->adjusted_retry_count) 138 if (!mr->adjusted_retry_count)
120 mr->adjusted_retry_count = 2; 139 mr->adjusted_retry_count = 2;
121 }
122 140
123 for (i = 0; i < mi->n_rates; i++) { 141 minstrel_sort_best_tp_rates(mi, i, tmp_tp_rate);
124 struct minstrel_rate *mr = &mi->r[i]; 142
125 if (max_tp < mr->cur_tp) { 143 /* To determine the most robust rate (max_prob_rate) used at
126 index_max_tp = i; 144 * 3rd mmr stage we distinct between two cases:
127 max_tp = mr->cur_tp; 145 * (1) if any success probabilitiy >= 95%, out of those rates
128 } 146 * choose the maximum throughput rate as max_prob_rate
129 if (max_prob < mr->probability) { 147 * (2) if all success probabilities < 95%, the rate with
130 index_max_prob = i; 148 * highest success probability is choosen as max_prob_rate */
131 max_prob = mr->probability; 149 if (mr->probability >= MINSTREL_FRAC(95,100)) {
150 if (mr->cur_tp >= mi->r[tmp_prob_rate].cur_tp)
151 tmp_prob_rate = i;
152 } else {
153 if (mr->probability >= mi->r[tmp_prob_rate].probability)
154 tmp_prob_rate = i;
132 } 155 }
133 } 156 }
134 157
135 max_tp = 0; 158 /* Assign the new rate set */
136 for (i = 0; i < mi->n_rates; i++) { 159 memcpy(mi->max_tp_rate, tmp_tp_rate, sizeof(mi->max_tp_rate));
137 struct minstrel_rate *mr = &mi->r[i]; 160 mi->max_prob_rate = tmp_prob_rate;
138
139 if (i == index_max_tp)
140 continue;
141 161
142 if (max_tp < mr->cur_tp) { 162 /* Reset update timer */
143 index_max_tp2 = i; 163 mi->stats_update = jiffies;
144 max_tp = mr->cur_tp;
145 }
146 }
147 mi->max_tp_rate = index_max_tp;
148 mi->max_tp_rate2 = index_max_tp2;
149 mi->max_prob_rate = index_max_prob;
150} 164}
151 165
152static void 166static void
@@ -207,10 +221,10 @@ static int
207minstrel_get_next_sample(struct minstrel_sta_info *mi) 221minstrel_get_next_sample(struct minstrel_sta_info *mi)
208{ 222{
209 unsigned int sample_ndx; 223 unsigned int sample_ndx;
210 sample_ndx = SAMPLE_TBL(mi, mi->sample_idx, mi->sample_column); 224 sample_ndx = SAMPLE_TBL(mi, mi->sample_row, mi->sample_column);
211 mi->sample_idx++; 225 mi->sample_row++;
212 if ((int) mi->sample_idx > (mi->n_rates - 2)) { 226 if ((int) mi->sample_row >= mi->n_rates) {
213 mi->sample_idx = 0; 227 mi->sample_row = 0;
214 mi->sample_column++; 228 mi->sample_column++;
215 if (mi->sample_column >= SAMPLE_COLUMNS) 229 if (mi->sample_column >= SAMPLE_COLUMNS)
216 mi->sample_column = 0; 230 mi->sample_column = 0;
@@ -228,31 +242,37 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
228 struct minstrel_priv *mp = priv; 242 struct minstrel_priv *mp = priv;
229 struct ieee80211_tx_rate *ar = info->control.rates; 243 struct ieee80211_tx_rate *ar = info->control.rates;
230 unsigned int ndx, sample_ndx = 0; 244 unsigned int ndx, sample_ndx = 0;
231 bool mrr; 245 bool mrr_capable;
232 bool sample_slower = false; 246 bool indirect_rate_sampling = false;
233 bool sample = false; 247 bool rate_sampling = false;
234 int i, delta; 248 int i, delta;
235 int mrr_ndx[3]; 249 int mrr_ndx[3];
236 int sample_rate; 250 int sampling_ratio;
237 251
252 /* management/no-ack frames do not use rate control */
238 if (rate_control_send_low(sta, priv_sta, txrc)) 253 if (rate_control_send_low(sta, priv_sta, txrc))
239 return; 254 return;
240 255
241 mrr = mp->has_mrr && !txrc->rts && !txrc->bss_conf->use_cts_prot; 256 /* check multi-rate-retry capabilities & adjust lookaround_rate */
242 257 mrr_capable = mp->has_mrr &&
243 ndx = mi->max_tp_rate; 258 !txrc->rts &&
244 259 !txrc->bss_conf->use_cts_prot;
245 if (mrr) 260 if (mrr_capable)
246 sample_rate = mp->lookaround_rate_mrr; 261 sampling_ratio = mp->lookaround_rate_mrr;
247 else 262 else
248 sample_rate = mp->lookaround_rate; 263 sampling_ratio = mp->lookaround_rate;
264
265 /* init rateindex [ndx] with max throughput rate */
266 ndx = mi->max_tp_rate[0];
249 267
268 /* increase sum packet counter */
250 mi->packet_count++; 269 mi->packet_count++;
251 delta = (mi->packet_count * sample_rate / 100) - 270
271 delta = (mi->packet_count * sampling_ratio / 100) -
252 (mi->sample_count + mi->sample_deferred / 2); 272 (mi->sample_count + mi->sample_deferred / 2);
253 273
254 /* delta > 0: sampling required */ 274 /* delta > 0: sampling required */
255 if ((delta > 0) && (mrr || !mi->prev_sample)) { 275 if ((delta > 0) && (mrr_capable || !mi->prev_sample)) {
256 struct minstrel_rate *msr; 276 struct minstrel_rate *msr;
257 if (mi->packet_count >= 10000) { 277 if (mi->packet_count >= 10000) {
258 mi->sample_deferred = 0; 278 mi->sample_deferred = 0;
@@ -271,21 +291,28 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
271 mi->sample_count += (delta - mi->n_rates * 2); 291 mi->sample_count += (delta - mi->n_rates * 2);
272 } 292 }
273 293
294 /* get next random rate sample */
274 sample_ndx = minstrel_get_next_sample(mi); 295 sample_ndx = minstrel_get_next_sample(mi);
275 msr = &mi->r[sample_ndx]; 296 msr = &mi->r[sample_ndx];
276 sample = true; 297 rate_sampling = true;
277 sample_slower = mrr && (msr->perfect_tx_time > 298
278 mi->r[ndx].perfect_tx_time); 299 /* Decide if direct ( 1st mrr stage) or indirect (2nd mrr stage)
279 300 * rate sampling method should be used.
280 if (!sample_slower) { 301 * Respect such rates that are not sampled for 20 interations.
302 */
303 if (mrr_capable &&
304 msr->perfect_tx_time > mi->r[ndx].perfect_tx_time &&
305 msr->sample_skipped < 20)
306 indirect_rate_sampling = true;
307
308 if (!indirect_rate_sampling) {
281 if (msr->sample_limit != 0) { 309 if (msr->sample_limit != 0) {
282 ndx = sample_ndx; 310 ndx = sample_ndx;
283 mi->sample_count++; 311 mi->sample_count++;
284 if (msr->sample_limit > 0) 312 if (msr->sample_limit > 0)
285 msr->sample_limit--; 313 msr->sample_limit--;
286 } else { 314 } else
287 sample = false; 315 rate_sampling = false;
288 }
289 } else { 316 } else {
290 /* Only use IEEE80211_TX_CTL_RATE_CTRL_PROBE to mark 317 /* Only use IEEE80211_TX_CTL_RATE_CTRL_PROBE to mark
291 * packets that have the sampling rate deferred to the 318 * packets that have the sampling rate deferred to the
@@ -297,34 +324,39 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
297 mi->sample_deferred++; 324 mi->sample_deferred++;
298 } 325 }
299 } 326 }
300 mi->prev_sample = sample; 327 mi->prev_sample = rate_sampling;
301 328
302 /* If we're not using MRR and the sampling rate already 329 /* If we're not using MRR and the sampling rate already
303 * has a probability of >95%, we shouldn't be attempting 330 * has a probability of >95%, we shouldn't be attempting
304 * to use it, as this only wastes precious airtime */ 331 * to use it, as this only wastes precious airtime */
305 if (!mrr && sample && (mi->r[ndx].probability > 17100)) 332 if (!mrr_capable && rate_sampling &&
306 ndx = mi->max_tp_rate; 333 (mi->r[ndx].probability > MINSTREL_FRAC(95, 100)))
334 ndx = mi->max_tp_rate[0];
307 335
336 /* mrr setup for 1st stage */
308 ar[0].idx = mi->r[ndx].rix; 337 ar[0].idx = mi->r[ndx].rix;
309 ar[0].count = minstrel_get_retry_count(&mi->r[ndx], info); 338 ar[0].count = minstrel_get_retry_count(&mi->r[ndx], info);
310 339
311 if (!mrr) { 340 /* non mrr setup for 2nd stage */
312 if (!sample) 341 if (!mrr_capable) {
342 if (!rate_sampling)
313 ar[0].count = mp->max_retry; 343 ar[0].count = mp->max_retry;
314 ar[1].idx = mi->lowest_rix; 344 ar[1].idx = mi->lowest_rix;
315 ar[1].count = mp->max_retry; 345 ar[1].count = mp->max_retry;
316 return; 346 return;
317 } 347 }
318 348
319 /* MRR setup */ 349 /* mrr setup for 2nd stage */
320 if (sample) { 350 if (rate_sampling) {
321 if (sample_slower) 351 if (indirect_rate_sampling)
322 mrr_ndx[0] = sample_ndx; 352 mrr_ndx[0] = sample_ndx;
323 else 353 else
324 mrr_ndx[0] = mi->max_tp_rate; 354 mrr_ndx[0] = mi->max_tp_rate[0];
325 } else { 355 } else {
326 mrr_ndx[0] = mi->max_tp_rate2; 356 mrr_ndx[0] = mi->max_tp_rate[1];
327 } 357 }
358
359 /* mrr setup for 3rd & 4th stage */
328 mrr_ndx[1] = mi->max_prob_rate; 360 mrr_ndx[1] = mi->max_prob_rate;
329 mrr_ndx[2] = 0; 361 mrr_ndx[2] = 0;
330 for (i = 1; i < 4; i++) { 362 for (i = 1; i < 4; i++) {
@@ -351,26 +383,21 @@ static void
351init_sample_table(struct minstrel_sta_info *mi) 383init_sample_table(struct minstrel_sta_info *mi)
352{ 384{
353 unsigned int i, col, new_idx; 385 unsigned int i, col, new_idx;
354 unsigned int n_srates = mi->n_rates - 1;
355 u8 rnd[8]; 386 u8 rnd[8];
356 387
357 mi->sample_column = 0; 388 mi->sample_column = 0;
358 mi->sample_idx = 0; 389 mi->sample_row = 0;
359 memset(mi->sample_table, 0, SAMPLE_COLUMNS * mi->n_rates); 390 memset(mi->sample_table, 0xff, SAMPLE_COLUMNS * mi->n_rates);
360 391
361 for (col = 0; col < SAMPLE_COLUMNS; col++) { 392 for (col = 0; col < SAMPLE_COLUMNS; col++) {
362 for (i = 0; i < n_srates; i++) { 393 for (i = 0; i < mi->n_rates; i++) {
363 get_random_bytes(rnd, sizeof(rnd)); 394 get_random_bytes(rnd, sizeof(rnd));
364 new_idx = (i + rnd[i & 7]) % n_srates; 395 new_idx = (i + rnd[i & 7]) % mi->n_rates;
365 396
366 while (SAMPLE_TBL(mi, new_idx, col) != 0) 397 while (SAMPLE_TBL(mi, new_idx, col) != 0xff)
367 new_idx = (new_idx + 1) % n_srates; 398 new_idx = (new_idx + 1) % mi->n_rates;
368 399
369 /* Don't sample the slowest rate (i.e. slowest base 400 SAMPLE_TBL(mi, new_idx, col) = i;
370 * rate). We must presume that the slowest rate works
371 * fine, or else other management frames will also be
372 * failing and the link will break */
373 SAMPLE_TBL(mi, new_idx, col) = i + 1;
374 } 401 }
375 } 402 }
376} 403}
@@ -542,9 +569,6 @@ minstrel_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
542 mp->lookaround_rate = 5; 569 mp->lookaround_rate = 5;
543 mp->lookaround_rate_mrr = 10; 570 mp->lookaround_rate_mrr = 10;
544 571
545 /* moving average weight for EWMA */
546 mp->ewma_level = 75;
547
548 /* maximum time that the hw is allowed to stay in one MRR segment */ 572 /* maximum time that the hw is allowed to stay in one MRR segment */
549 mp->segment_size = 6000; 573 mp->segment_size = 6000;
550 574
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
index 5ecf757817f2..85ebf42cb46d 100644
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -9,6 +9,28 @@
9#ifndef __RC_MINSTREL_H 9#ifndef __RC_MINSTREL_H
10#define __RC_MINSTREL_H 10#define __RC_MINSTREL_H
11 11
12#define EWMA_LEVEL 75 /* ewma weighting factor [%] */
13#define SAMPLE_COLUMNS 10 /* number of columns in sample table */
14
15
16/* scaled fraction values */
17#define MINSTREL_SCALE 16
18#define MINSTREL_FRAC(val, div) (((val) << MINSTREL_SCALE) / div)
19#define MINSTREL_TRUNC(val) ((val) >> MINSTREL_SCALE)
20
21/* number of highest throughput rates to consider*/
22#define MAX_THR_RATES 4
23
24/*
25 * Perform EWMA (Exponentially Weighted Moving Average) calculation
26 */
27static inline int
28minstrel_ewma(int old, int new, int weight)
29{
30 return (new * (100 - weight) + old * weight) / 100;
31}
32
33
12struct minstrel_rate { 34struct minstrel_rate {
13 int bitrate; 35 int bitrate;
14 int rix; 36 int rix;
@@ -26,6 +48,7 @@ struct minstrel_rate {
26 u32 attempts; 48 u32 attempts;
27 u32 last_attempts; 49 u32 last_attempts;
28 u32 last_success; 50 u32 last_success;
51 u8 sample_skipped;
29 52
30 /* parts per thousand */ 53 /* parts per thousand */
31 u32 cur_prob; 54 u32 cur_prob;
@@ -45,14 +68,13 @@ struct minstrel_sta_info {
45 68
46 unsigned int lowest_rix; 69 unsigned int lowest_rix;
47 70
48 unsigned int max_tp_rate; 71 u8 max_tp_rate[MAX_THR_RATES];
49 unsigned int max_tp_rate2; 72 u8 max_prob_rate;
50 unsigned int max_prob_rate;
51 unsigned int packet_count; 73 unsigned int packet_count;
52 unsigned int sample_count; 74 unsigned int sample_count;
53 int sample_deferred; 75 int sample_deferred;
54 76
55 unsigned int sample_idx; 77 unsigned int sample_row;
56 unsigned int sample_column; 78 unsigned int sample_column;
57 79
58 int n_rates; 80 int n_rates;
@@ -73,7 +95,6 @@ struct minstrel_priv {
73 unsigned int cw_min; 95 unsigned int cw_min;
74 unsigned int cw_max; 96 unsigned int cw_max;
75 unsigned int max_retry; 97 unsigned int max_retry;
76 unsigned int ewma_level;
77 unsigned int segment_size; 98 unsigned int segment_size;
78 unsigned int update_interval; 99 unsigned int update_interval;
79 unsigned int lookaround_rate; 100 unsigned int lookaround_rate;
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c
index d5a56226e675..d1048348d399 100644
--- a/net/mac80211/rc80211_minstrel_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_debugfs.c
@@ -73,15 +73,17 @@ minstrel_stats_open(struct inode *inode, struct file *file)
73 for (i = 0; i < mi->n_rates; i++) { 73 for (i = 0; i < mi->n_rates; i++) {
74 struct minstrel_rate *mr = &mi->r[i]; 74 struct minstrel_rate *mr = &mi->r[i];
75 75
76 *(p++) = (i == mi->max_tp_rate) ? 'T' : ' '; 76 *(p++) = (i == mi->max_tp_rate[0]) ? 'A' : ' ';
77 *(p++) = (i == mi->max_tp_rate2) ? 't' : ' '; 77 *(p++) = (i == mi->max_tp_rate[1]) ? 'B' : ' ';
78 *(p++) = (i == mi->max_tp_rate[2]) ? 'C' : ' ';
79 *(p++) = (i == mi->max_tp_rate[3]) ? 'D' : ' ';
78 *(p++) = (i == mi->max_prob_rate) ? 'P' : ' '; 80 *(p++) = (i == mi->max_prob_rate) ? 'P' : ' ';
79 p += sprintf(p, "%3u%s", mr->bitrate / 2, 81 p += sprintf(p, "%3u%s", mr->bitrate / 2,
80 (mr->bitrate & 1 ? ".5" : " ")); 82 (mr->bitrate & 1 ? ".5" : " "));
81 83
82 tp = mr->cur_tp / ((18000 << 10) / 96); 84 tp = MINSTREL_TRUNC(mr->cur_tp / 10);
83 prob = mr->cur_prob / 18; 85 prob = MINSTREL_TRUNC(mr->cur_prob * 1000);
84 eprob = mr->probability / 18; 86 eprob = MINSTREL_TRUNC(mr->probability * 1000);
85 87
86 p += sprintf(p, " %6u.%1u %6u.%1u %6u.%1u " 88 p += sprintf(p, " %6u.%1u %6u.%1u %6u.%1u "
87 "%3u(%3u) %8llu %8llu\n", 89 "%3u(%3u) %8llu %8llu\n",
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 3af141c69712..749552bdcfe1 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -17,8 +17,6 @@
17#include "rc80211_minstrel_ht.h" 17#include "rc80211_minstrel_ht.h"
18 18
19#define AVG_PKT_SIZE 1200 19#define AVG_PKT_SIZE 1200
20#define SAMPLE_COLUMNS 10
21#define EWMA_LEVEL 75
22 20
23/* Number of bits for an average sized packet */ 21/* Number of bits for an average sized packet */
24#define MCS_NBITS (AVG_PKT_SIZE << 3) 22#define MCS_NBITS (AVG_PKT_SIZE << 3)
@@ -26,11 +24,11 @@
26/* Number of symbols for a packet with (bps) bits per symbol */ 24/* Number of symbols for a packet with (bps) bits per symbol */
27#define MCS_NSYMS(bps) ((MCS_NBITS + (bps) - 1) / (bps)) 25#define MCS_NSYMS(bps) ((MCS_NBITS + (bps) - 1) / (bps))
28 26
29/* Transmission time for a packet containing (syms) symbols */ 27/* Transmission time (nanoseconds) for a packet containing (syms) symbols */
30#define MCS_SYMBOL_TIME(sgi, syms) \ 28#define MCS_SYMBOL_TIME(sgi, syms) \
31 (sgi ? \ 29 (sgi ? \
32 ((syms) * 18 + 4) / 5 : /* syms * 3.6 us */ \ 30 ((syms) * 18000 + 4000) / 5 : /* syms * 3.6 us */ \
33 (syms) << 2 /* syms * 4 us */ \ 31 ((syms) * 1000) << 2 /* syms * 4 us */ \
34 ) 32 )
35 33
36/* Transmit duration for the raw data part of an average sized packet */ 34/* Transmit duration for the raw data part of an average sized packet */
@@ -64,9 +62,9 @@
64} 62}
65 63
66#define CCK_DURATION(_bitrate, _short, _len) \ 64#define CCK_DURATION(_bitrate, _short, _len) \
67 (10 /* SIFS */ + \ 65 (1000 * (10 /* SIFS */ + \
68 (_short ? 72 + 24 : 144 + 48 ) + \ 66 (_short ? 72 + 24 : 144 + 48 ) + \
69 (8 * (_len + 4) * 10) / (_bitrate)) 67 (8 * (_len + 4) * 10) / (_bitrate)))
70 68
71#define CCK_ACK_DURATION(_bitrate, _short) \ 69#define CCK_ACK_DURATION(_bitrate, _short) \
72 (CCK_DURATION((_bitrate > 10 ? 20 : 10), false, 60) + \ 70 (CCK_DURATION((_bitrate > 10 ? 20 : 10), false, 60) + \
@@ -129,15 +127,6 @@ const struct mcs_group minstrel_mcs_groups[] = {
129static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES]; 127static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES];
130 128
131/* 129/*
132 * Perform EWMA (Exponentially Weighted Moving Average) calculation
133 */
134static int
135minstrel_ewma(int old, int new, int weight)
136{
137 return (new * (100 - weight) + old * weight) / 100;
138}
139
140/*
141 * Look up an MCS group index based on mac80211 rate information 130 * Look up an MCS group index based on mac80211 rate information
142 */ 131 */
143static int 132static int
@@ -211,7 +200,8 @@ static void
211minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate) 200minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
212{ 201{
213 struct minstrel_rate_stats *mr; 202 struct minstrel_rate_stats *mr;
214 unsigned int usecs = 0; 203 unsigned int nsecs = 0;
204 unsigned int tp;
215 205
216 mr = &mi->groups[group].rates[rate]; 206 mr = &mi->groups[group].rates[rate];
217 207
@@ -221,10 +211,12 @@ minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
221 } 211 }
222 212
223 if (group != MINSTREL_CCK_GROUP) 213 if (group != MINSTREL_CCK_GROUP)
224 usecs = mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len); 214 nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len);
225 215
226 usecs += minstrel_mcs_groups[group].duration[rate]; 216 nsecs += minstrel_mcs_groups[group].duration[rate];
227 mr->cur_tp = MINSTREL_TRUNC((1000000 / usecs) * mr->probability); 217 tp = 1000000 * ((mr->probability * 1000) / nsecs);
218
219 mr->cur_tp = MINSTREL_TRUNC(tp);
228} 220}
229 221
230/* 222/*
@@ -308,8 +300,8 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
308 } 300 }
309 } 301 }
310 302
311 /* try to sample up to half of the available rates during each interval */ 303 /* try to sample all available rates during each interval */
312 mi->sample_count *= 4; 304 mi->sample_count *= 8;
313 305
314 cur_prob = 0; 306 cur_prob = 0;
315 cur_prob_tp = 0; 307 cur_prob_tp = 0;
@@ -320,20 +312,13 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
320 if (!mg->supported) 312 if (!mg->supported)
321 continue; 313 continue;
322 314
323 mr = minstrel_get_ratestats(mi, mg->max_prob_rate);
324 if (cur_prob_tp < mr->cur_tp &&
325 minstrel_mcs_groups[group].streams == 1) {
326 mi->max_prob_rate = mg->max_prob_rate;
327 cur_prob = mr->cur_prob;
328 cur_prob_tp = mr->cur_tp;
329 }
330
331 mr = minstrel_get_ratestats(mi, mg->max_tp_rate); 315 mr = minstrel_get_ratestats(mi, mg->max_tp_rate);
332 if (cur_tp < mr->cur_tp) { 316 if (cur_tp < mr->cur_tp) {
333 mi->max_tp_rate2 = mi->max_tp_rate; 317 mi->max_tp_rate2 = mi->max_tp_rate;
334 cur_tp2 = cur_tp; 318 cur_tp2 = cur_tp;
335 mi->max_tp_rate = mg->max_tp_rate; 319 mi->max_tp_rate = mg->max_tp_rate;
336 cur_tp = mr->cur_tp; 320 cur_tp = mr->cur_tp;
321 mi->max_prob_streams = minstrel_mcs_groups[group].streams - 1;
337 } 322 }
338 323
339 mr = minstrel_get_ratestats(mi, mg->max_tp_rate2); 324 mr = minstrel_get_ratestats(mi, mg->max_tp_rate2);
@@ -343,6 +328,23 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
343 } 328 }
344 } 329 }
345 330
331 if (mi->max_prob_streams < 1)
332 mi->max_prob_streams = 1;
333
334 for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
335 mg = &mi->groups[group];
336 if (!mg->supported)
337 continue;
338 mr = minstrel_get_ratestats(mi, mg->max_prob_rate);
339 if (cur_prob_tp < mr->cur_tp &&
340 minstrel_mcs_groups[group].streams <= mi->max_prob_streams) {
341 mi->max_prob_rate = mg->max_prob_rate;
342 cur_prob = mr->cur_prob;
343 cur_prob_tp = mr->cur_tp;
344 }
345 }
346
347
346 mi->stats_update = jiffies; 348 mi->stats_update = jiffies;
347} 349}
348 350
@@ -467,7 +469,7 @@ minstrel_ht_tx_status(void *priv, struct ieee80211_supported_band *sband,
467 469
468 if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) { 470 if (!mi->sample_wait && !mi->sample_tries && mi->sample_count > 0) {
469 mi->sample_wait = 16 + 2 * MINSTREL_TRUNC(mi->avg_ampdu_len); 471 mi->sample_wait = 16 + 2 * MINSTREL_TRUNC(mi->avg_ampdu_len);
470 mi->sample_tries = 2; 472 mi->sample_tries = 1;
471 mi->sample_count--; 473 mi->sample_count--;
472 } 474 }
473 475
@@ -536,7 +538,7 @@ minstrel_calc_retransmit(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
536 mr->retry_updated = true; 538 mr->retry_updated = true;
537 539
538 group = &minstrel_mcs_groups[index / MCS_GROUP_RATES]; 540 group = &minstrel_mcs_groups[index / MCS_GROUP_RATES];
539 tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len; 541 tx_time_data = group->duration[index % MCS_GROUP_RATES] * ampdu_len / 1000;
540 542
541 /* Contention time for first 2 tries */ 543 /* Contention time for first 2 tries */
542 ctime = (t_slot * cw) >> 1; 544 ctime = (t_slot * cw) >> 1;
@@ -616,6 +618,7 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
616{ 618{
617 struct minstrel_rate_stats *mr; 619 struct minstrel_rate_stats *mr;
618 struct minstrel_mcs_group_data *mg; 620 struct minstrel_mcs_group_data *mg;
621 unsigned int sample_dur, sample_group;
619 int sample_idx = 0; 622 int sample_idx = 0;
620 623
621 if (mi->sample_wait > 0) { 624 if (mi->sample_wait > 0) {
@@ -626,11 +629,11 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
626 if (!mi->sample_tries) 629 if (!mi->sample_tries)
627 return -1; 630 return -1;
628 631
629 mi->sample_tries--;
630 mg = &mi->groups[mi->sample_group]; 632 mg = &mi->groups[mi->sample_group];
631 sample_idx = sample_table[mg->column][mg->index]; 633 sample_idx = sample_table[mg->column][mg->index];
632 mr = &mg->rates[sample_idx]; 634 mr = &mg->rates[sample_idx];
633 sample_idx += mi->sample_group * MCS_GROUP_RATES; 635 sample_group = mi->sample_group;
636 sample_idx += sample_group * MCS_GROUP_RATES;
634 minstrel_next_sample_idx(mi); 637 minstrel_next_sample_idx(mi);
635 638
636 /* 639 /*
@@ -651,14 +654,18 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
651 * Make sure that lower rates get sampled only occasionally, 654 * Make sure that lower rates get sampled only occasionally,
652 * if the link is working perfectly. 655 * if the link is working perfectly.
653 */ 656 */
654 if (minstrel_get_duration(sample_idx) > 657 sample_dur = minstrel_get_duration(sample_idx);
655 minstrel_get_duration(mi->max_tp_rate)) { 658 if (sample_dur >= minstrel_get_duration(mi->max_tp_rate2) &&
659 (mi->max_prob_streams <
660 minstrel_mcs_groups[sample_group].streams ||
661 sample_dur >= minstrel_get_duration(mi->max_prob_rate))) {
656 if (mr->sample_skipped < 20) 662 if (mr->sample_skipped < 20)
657 return -1; 663 return -1;
658 664
659 if (mi->sample_slow++ > 2) 665 if (mi->sample_slow++ > 2)
660 return -1; 666 return -1;
661 } 667 }
668 mi->sample_tries--;
662 669
663 return sample_idx; 670 return sample_idx;
664} 671}
diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h
index 302dbd52180d..9b16e9de9923 100644
--- a/net/mac80211/rc80211_minstrel_ht.h
+++ b/net/mac80211/rc80211_minstrel_ht.h
@@ -16,11 +16,6 @@
16#define MINSTREL_MAX_STREAMS 3 16#define MINSTREL_MAX_STREAMS 3
17#define MINSTREL_STREAM_GROUPS 4 17#define MINSTREL_STREAM_GROUPS 4
18 18
19/* scaled fraction values */
20#define MINSTREL_SCALE 16
21#define MINSTREL_FRAC(val, div) (((val) << MINSTREL_SCALE) / div)
22#define MINSTREL_TRUNC(val) ((val) >> MINSTREL_SCALE)
23
24#define MCS_GROUP_RATES 8 19#define MCS_GROUP_RATES 8
25 20
26struct mcs_group { 21struct mcs_group {
@@ -85,6 +80,7 @@ struct minstrel_ht_sta {
85 80
86 /* best probability rate */ 81 /* best probability rate */
87 unsigned int max_prob_rate; 82 unsigned int max_prob_rate;
83 unsigned int max_prob_streams;
88 84
89 /* time of last status update */ 85 /* time of last status update */
90 unsigned long stats_update; 86 unsigned long stats_update;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index c6844ad080be..2528b5a4d6d4 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -648,24 +648,6 @@ static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
648 return RX_CONTINUE; 648 return RX_CONTINUE;
649} 649}
650 650
651#define SEQ_MODULO 0x1000
652#define SEQ_MASK 0xfff
653
654static inline int seq_less(u16 sq1, u16 sq2)
655{
656 return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
657}
658
659static inline u16 seq_inc(u16 sq)
660{
661 return (sq + 1) & SEQ_MASK;
662}
663
664static inline u16 seq_sub(u16 sq1, u16 sq2)
665{
666 return (sq1 - sq2) & SEQ_MASK;
667}
668
669static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, 651static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
670 struct tid_ampdu_rx *tid_agg_rx, 652 struct tid_ampdu_rx *tid_agg_rx,
671 int index, 653 int index,
@@ -687,7 +669,7 @@ static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
687 __skb_queue_tail(frames, skb); 669 __skb_queue_tail(frames, skb);
688 670
689no_frame: 671no_frame:
690 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 672 tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num);
691} 673}
692 674
693static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata, 675static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
@@ -699,8 +681,9 @@ static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata
699 681
700 lockdep_assert_held(&tid_agg_rx->reorder_lock); 682 lockdep_assert_held(&tid_agg_rx->reorder_lock);
701 683
702 while (seq_less(tid_agg_rx->head_seq_num, head_seq_num)) { 684 while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) {
703 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 685 index = ieee80211_sn_sub(tid_agg_rx->head_seq_num,
686 tid_agg_rx->ssn) %
704 tid_agg_rx->buf_size; 687 tid_agg_rx->buf_size;
705 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 688 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
706 frames); 689 frames);
@@ -727,8 +710,8 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
727 lockdep_assert_held(&tid_agg_rx->reorder_lock); 710 lockdep_assert_held(&tid_agg_rx->reorder_lock);
728 711
729 /* release the buffer until next missing frame */ 712 /* release the buffer until next missing frame */
730 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 713 index = ieee80211_sn_sub(tid_agg_rx->head_seq_num,
731 tid_agg_rx->buf_size; 714 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
732 if (!tid_agg_rx->reorder_buf[index] && 715 if (!tid_agg_rx->reorder_buf[index] &&
733 tid_agg_rx->stored_mpdu_num) { 716 tid_agg_rx->stored_mpdu_num) {
734 /* 717 /*
@@ -756,19 +739,22 @@ static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
756 * Increment the head seq# also for the skipped slots. 739 * Increment the head seq# also for the skipped slots.
757 */ 740 */
758 tid_agg_rx->head_seq_num = 741 tid_agg_rx->head_seq_num =
759 (tid_agg_rx->head_seq_num + skipped) & SEQ_MASK; 742 (tid_agg_rx->head_seq_num +
743 skipped) & IEEE80211_SN_MASK;
760 skipped = 0; 744 skipped = 0;
761 } 745 }
762 } else while (tid_agg_rx->reorder_buf[index]) { 746 } else while (tid_agg_rx->reorder_buf[index]) {
763 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index, 747 ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
764 frames); 748 frames);
765 index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn) % 749 index = ieee80211_sn_sub(tid_agg_rx->head_seq_num,
750 tid_agg_rx->ssn) %
766 tid_agg_rx->buf_size; 751 tid_agg_rx->buf_size;
767 } 752 }
768 753
769 if (tid_agg_rx->stored_mpdu_num) { 754 if (tid_agg_rx->stored_mpdu_num) {
770 j = index = seq_sub(tid_agg_rx->head_seq_num, 755 j = index = ieee80211_sn_sub(tid_agg_rx->head_seq_num,
771 tid_agg_rx->ssn) % tid_agg_rx->buf_size; 756 tid_agg_rx->ssn) %
757 tid_agg_rx->buf_size;
772 758
773 for (; j != (index - 1) % tid_agg_rx->buf_size; 759 for (; j != (index - 1) % tid_agg_rx->buf_size;
774 j = (j + 1) % tid_agg_rx->buf_size) { 760 j = (j + 1) % tid_agg_rx->buf_size) {
@@ -809,7 +795,7 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
809 head_seq_num = tid_agg_rx->head_seq_num; 795 head_seq_num = tid_agg_rx->head_seq_num;
810 796
811 /* frame with out of date sequence number */ 797 /* frame with out of date sequence number */
812 if (seq_less(mpdu_seq_num, head_seq_num)) { 798 if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
813 dev_kfree_skb(skb); 799 dev_kfree_skb(skb);
814 goto out; 800 goto out;
815 } 801 }
@@ -818,8 +804,9 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
818 * If frame the sequence number exceeds our buffering window 804 * If frame the sequence number exceeds our buffering window
819 * size release some previous frames to make room for this one. 805 * size release some previous frames to make room for this one.
820 */ 806 */
821 if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) { 807 if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) {
822 head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size)); 808 head_seq_num = ieee80211_sn_inc(
809 ieee80211_sn_sub(mpdu_seq_num, buf_size));
823 /* release stored frames up to new head to stack */ 810 /* release stored frames up to new head to stack */
824 ieee80211_release_reorder_frames(sdata, tid_agg_rx, 811 ieee80211_release_reorder_frames(sdata, tid_agg_rx,
825 head_seq_num, frames); 812 head_seq_num, frames);
@@ -827,7 +814,8 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
827 814
828 /* Now the new frame is always in the range of the reordering buffer */ 815 /* Now the new frame is always in the range of the reordering buffer */
829 816
830 index = seq_sub(mpdu_seq_num, tid_agg_rx->ssn) % tid_agg_rx->buf_size; 817 index = ieee80211_sn_sub(mpdu_seq_num,
818 tid_agg_rx->ssn) % tid_agg_rx->buf_size;
831 819
832 /* check if we already stored this frame */ 820 /* check if we already stored this frame */
833 if (tid_agg_rx->reorder_buf[index]) { 821 if (tid_agg_rx->reorder_buf[index]) {
@@ -843,7 +831,8 @@ static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata
843 */ 831 */
844 if (mpdu_seq_num == tid_agg_rx->head_seq_num && 832 if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
845 tid_agg_rx->stored_mpdu_num == 0) { 833 tid_agg_rx->stored_mpdu_num == 0) {
846 tid_agg_rx->head_seq_num = seq_inc(tid_agg_rx->head_seq_num); 834 tid_agg_rx->head_seq_num =
835 ieee80211_sn_inc(tid_agg_rx->head_seq_num);
847 ret = false; 836 ret = false;
848 goto out; 837 goto out;
849 } 838 }
@@ -1894,8 +1883,10 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
1894 * 'align' will only take the values 0 or 2 here 1883 * 'align' will only take the values 0 or 2 here
1895 * since all frames are required to be aligned 1884 * since all frames are required to be aligned
1896 * to 2-byte boundaries when being passed to 1885 * to 2-byte boundaries when being passed to
1897 * mac80211. That also explains the __skb_push() 1886 * mac80211; the code here works just as well if
1898 * below. 1887 * that isn't true, but mac80211 assumes it can
1888 * access fields as 2-byte aligned (e.g. for
1889 * compare_ether_addr)
1899 */ 1890 */
1900 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3; 1891 align = ((unsigned long)(skb->data + sizeof(struct ethhdr))) & 3;
1901 if (align) { 1892 if (align) {
@@ -2552,7 +2543,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
2552 case WLAN_SP_MESH_PEERING_CONFIRM: 2543 case WLAN_SP_MESH_PEERING_CONFIRM:
2553 if (!ieee80211_vif_is_mesh(&sdata->vif)) 2544 if (!ieee80211_vif_is_mesh(&sdata->vif))
2554 goto invalid; 2545 goto invalid;
2555 if (sdata->u.mesh.security != IEEE80211_MESH_SEC_NONE) 2546 if (sdata->u.mesh.user_mpm)
2556 /* userspace handles this frame */ 2547 /* userspace handles this frame */
2557 break; 2548 break;
2558 goto queue; 2549 goto queue;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 238a0cca320e..85458a28ffa0 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -342,6 +342,11 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
342 INIT_WORK(&sta->drv_unblock_wk, sta_unblock); 342 INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
343 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work); 343 INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
344 mutex_init(&sta->ampdu_mlme.mtx); 344 mutex_init(&sta->ampdu_mlme.mtx);
345#ifdef CONFIG_MAC80211_MESH
346 if (ieee80211_vif_is_mesh(&sdata->vif) &&
347 !sdata->u.mesh.user_mpm)
348 init_timer(&sta->plink_timer);
349#endif
345 350
346 memcpy(sta->sta.addr, addr, ETH_ALEN); 351 memcpy(sta->sta.addr, addr, ETH_ALEN);
347 sta->local = local; 352 sta->local = local;
@@ -795,13 +800,16 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
795 800
796 mutex_lock(&local->key_mtx); 801 mutex_lock(&local->key_mtx);
797 for (i = 0; i < NUM_DEFAULT_KEYS; i++) { 802 for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
798 __ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i])); 803 __ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i]),
804 true);
799 have_key = true; 805 have_key = true;
800 } 806 }
801 if (sta->ptk) { 807 if (sta->ptk) {
802 __ieee80211_key_free(key_mtx_dereference(local, sta->ptk)); 808 __ieee80211_key_free(key_mtx_dereference(local, sta->ptk),
809 true);
803 have_key = true; 810 have_key = true;
804 } 811 }
812
805 mutex_unlock(&local->key_mtx); 813 mutex_unlock(&local->key_mtx);
806 814
807 if (!have_key) 815 if (!have_key)
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 4947341a2a82..e5868c32d1a3 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -281,7 +281,6 @@ struct sta_ampdu_mlme {
281 * @plink_state: peer link state 281 * @plink_state: peer link state
282 * @plink_timeout: timeout of peer link 282 * @plink_timeout: timeout of peer link
283 * @plink_timer: peer link watch timer 283 * @plink_timer: peer link watch timer
284 * @plink_timer_was_running: used by suspend/resume to restore timers
285 * @t_offset: timing offset relative to this host 284 * @t_offset: timing offset relative to this host
286 * @t_offset_setpoint: reference timing offset of this sta to be used when 285 * @t_offset_setpoint: reference timing offset of this sta to be used when
287 * calculating clockdrift 286 * calculating clockdrift
@@ -379,7 +378,6 @@ struct sta_info {
379 __le16 reason; 378 __le16 reason;
380 u8 plink_retries; 379 u8 plink_retries;
381 bool ignore_plink_timer; 380 bool ignore_plink_timer;
382 bool plink_timer_was_running;
383 enum nl80211_plink_state plink_state; 381 enum nl80211_plink_state plink_state;
384 u32 plink_timeout; 382 u32 plink_timeout;
385 struct timer_list plink_timer; 383 struct timer_list plink_timer;
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index 3d7cd2a0582f..e7db2b804e0c 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -1042,15 +1042,17 @@ TRACE_EVENT(drv_remain_on_channel,
1042 TP_PROTO(struct ieee80211_local *local, 1042 TP_PROTO(struct ieee80211_local *local,
1043 struct ieee80211_sub_if_data *sdata, 1043 struct ieee80211_sub_if_data *sdata,
1044 struct ieee80211_channel *chan, 1044 struct ieee80211_channel *chan,
1045 unsigned int duration), 1045 unsigned int duration,
1046 enum ieee80211_roc_type type),
1046 1047
1047 TP_ARGS(local, sdata, chan, duration), 1048 TP_ARGS(local, sdata, chan, duration, type),
1048 1049
1049 TP_STRUCT__entry( 1050 TP_STRUCT__entry(
1050 LOCAL_ENTRY 1051 LOCAL_ENTRY
1051 VIF_ENTRY 1052 VIF_ENTRY
1052 __field(int, center_freq) 1053 __field(int, center_freq)
1053 __field(unsigned int, duration) 1054 __field(unsigned int, duration)
1055 __field(u32, type)
1054 ), 1056 ),
1055 1057
1056 TP_fast_assign( 1058 TP_fast_assign(
@@ -1058,12 +1060,13 @@ TRACE_EVENT(drv_remain_on_channel,
1058 VIF_ASSIGN; 1060 VIF_ASSIGN;
1059 __entry->center_freq = chan->center_freq; 1061 __entry->center_freq = chan->center_freq;
1060 __entry->duration = duration; 1062 __entry->duration = duration;
1063 __entry->type = type;
1061 ), 1064 ),
1062 1065
1063 TP_printk( 1066 TP_printk(
1064 LOCAL_PR_FMT VIF_PR_FMT " freq:%dMHz duration:%dms", 1067 LOCAL_PR_FMT VIF_PR_FMT " freq:%dMHz duration:%dms type=%d",
1065 LOCAL_PR_ARG, VIF_PR_ARG, 1068 LOCAL_PR_ARG, VIF_PR_ARG,
1066 __entry->center_freq, __entry->duration 1069 __entry->center_freq, __entry->duration, __entry->type
1067 ) 1070 )
1068); 1071);
1069 1072
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 8914d2d2881a..4e8a86163fc7 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -2085,7 +2085,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
2085 encaps_data = bridge_tunnel_header; 2085 encaps_data = bridge_tunnel_header;
2086 encaps_len = sizeof(bridge_tunnel_header); 2086 encaps_len = sizeof(bridge_tunnel_header);
2087 skip_header_bytes -= 2; 2087 skip_header_bytes -= 2;
2088 } else if (ethertype >= 0x600) { 2088 } else if (ethertype >= ETH_P_802_3_MIN) {
2089 encaps_data = rfc1042_header; 2089 encaps_data = rfc1042_header;
2090 encaps_len = sizeof(rfc1042_header); 2090 encaps_len = sizeof(rfc1042_header);
2091 skip_header_bytes -= 2; 2091 skip_header_bytes -= 2;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 0f38f43ac62e..b7a856e3281b 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1357,6 +1357,25 @@ void ieee80211_stop_device(struct ieee80211_local *local)
1357 drv_stop(local); 1357 drv_stop(local);
1358} 1358}
1359 1359
1360static void ieee80211_assign_chanctx(struct ieee80211_local *local,
1361 struct ieee80211_sub_if_data *sdata)
1362{
1363 struct ieee80211_chanctx_conf *conf;
1364 struct ieee80211_chanctx *ctx;
1365
1366 if (!local->use_chanctx)
1367 return;
1368
1369 mutex_lock(&local->chanctx_mtx);
1370 conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
1371 lockdep_is_held(&local->chanctx_mtx));
1372 if (conf) {
1373 ctx = container_of(conf, struct ieee80211_chanctx, conf);
1374 drv_assign_vif_chanctx(local, sdata, ctx);
1375 }
1376 mutex_unlock(&local->chanctx_mtx);
1377}
1378
1360int ieee80211_reconfig(struct ieee80211_local *local) 1379int ieee80211_reconfig(struct ieee80211_local *local)
1361{ 1380{
1362 struct ieee80211_hw *hw = &local->hw; 1381 struct ieee80211_hw *hw = &local->hw;
@@ -1445,36 +1464,14 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1445 } 1464 }
1446 1465
1447 list_for_each_entry(sdata, &local->interfaces, list) { 1466 list_for_each_entry(sdata, &local->interfaces, list) {
1448 struct ieee80211_chanctx_conf *ctx_conf;
1449
1450 if (!ieee80211_sdata_running(sdata)) 1467 if (!ieee80211_sdata_running(sdata))
1451 continue; 1468 continue;
1452 1469 ieee80211_assign_chanctx(local, sdata);
1453 mutex_lock(&local->chanctx_mtx);
1454 ctx_conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
1455 lockdep_is_held(&local->chanctx_mtx));
1456 if (ctx_conf) {
1457 ctx = container_of(ctx_conf, struct ieee80211_chanctx,
1458 conf);
1459 drv_assign_vif_chanctx(local, sdata, ctx);
1460 }
1461 mutex_unlock(&local->chanctx_mtx);
1462 } 1470 }
1463 1471
1464 sdata = rtnl_dereference(local->monitor_sdata); 1472 sdata = rtnl_dereference(local->monitor_sdata);
1465 if (sdata && local->use_chanctx && ieee80211_sdata_running(sdata)) { 1473 if (sdata && ieee80211_sdata_running(sdata))
1466 struct ieee80211_chanctx_conf *ctx_conf; 1474 ieee80211_assign_chanctx(local, sdata);
1467
1468 mutex_lock(&local->chanctx_mtx);
1469 ctx_conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
1470 lockdep_is_held(&local->chanctx_mtx));
1471 if (ctx_conf) {
1472 ctx = container_of(ctx_conf, struct ieee80211_chanctx,
1473 conf);
1474 drv_assign_vif_chanctx(local, sdata, ctx);
1475 }
1476 mutex_unlock(&local->chanctx_mtx);
1477 }
1478 1475
1479 /* add STAs back */ 1476 /* add STAs back */
1480 mutex_lock(&local->sta_mtx); 1477 mutex_lock(&local->sta_mtx);
@@ -1534,11 +1531,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1534 BSS_CHANGED_IDLE | 1531 BSS_CHANGED_IDLE |
1535 BSS_CHANGED_TXPOWER; 1532 BSS_CHANGED_TXPOWER;
1536 1533
1537#ifdef CONFIG_PM
1538 if (local->resuming && !reconfig_due_to_wowlan)
1539 sdata->vif.bss_conf = sdata->suspend_bss_conf;
1540#endif
1541
1542 switch (sdata->vif.type) { 1534 switch (sdata->vif.type) {
1543 case NL80211_IFTYPE_STATION: 1535 case NL80211_IFTYPE_STATION:
1544 changed |= BSS_CHANGED_ASSOC | 1536 changed |= BSS_CHANGED_ASSOC |
@@ -1678,28 +1670,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1678 mb(); 1670 mb();
1679 local->resuming = false; 1671 local->resuming = false;
1680 1672
1681 list_for_each_entry(sdata, &local->interfaces, list) {
1682 switch(sdata->vif.type) {
1683 case NL80211_IFTYPE_STATION:
1684 ieee80211_sta_restart(sdata);
1685 break;
1686 case NL80211_IFTYPE_ADHOC:
1687 ieee80211_ibss_restart(sdata);
1688 break;
1689 case NL80211_IFTYPE_MESH_POINT:
1690 ieee80211_mesh_restart(sdata);
1691 break;
1692 default:
1693 break;
1694 }
1695 }
1696
1697 mod_timer(&local->sta_cleanup, jiffies + 1); 1673 mod_timer(&local->sta_cleanup, jiffies + 1);
1698
1699 mutex_lock(&local->sta_mtx);
1700 list_for_each_entry(sta, &local->sta_list, list)
1701 mesh_plink_restart(sta);
1702 mutex_unlock(&local->sta_mtx);
1703#else 1674#else
1704 WARN_ON(1); 1675 WARN_ON(1);
1705#endif 1676#endif
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index a2c2258bc84e..171344d4eb7c 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -13,6 +13,104 @@
13#include "rate.h" 13#include "rate.h"
14 14
15 15
16static void __check_vhtcap_disable(struct ieee80211_sub_if_data *sdata,
17 struct ieee80211_sta_vht_cap *vht_cap,
18 u32 flag)
19{
20 __le32 le_flag = cpu_to_le32(flag);
21
22 if (sdata->u.mgd.vht_capa_mask.vht_cap_info & le_flag &&
23 !(sdata->u.mgd.vht_capa.vht_cap_info & le_flag))
24 vht_cap->cap &= ~flag;
25}
26
27void ieee80211_apply_vhtcap_overrides(struct ieee80211_sub_if_data *sdata,
28 struct ieee80211_sta_vht_cap *vht_cap)
29{
30 int i;
31 u16 rxmcs_mask, rxmcs_cap, rxmcs_n, txmcs_mask, txmcs_cap, txmcs_n;
32
33 if (!vht_cap->vht_supported)
34 return;
35
36 if (sdata->vif.type != NL80211_IFTYPE_STATION)
37 return;
38
39 __check_vhtcap_disable(sdata, vht_cap,
40 IEEE80211_VHT_CAP_RXLDPC);
41 __check_vhtcap_disable(sdata, vht_cap,
42 IEEE80211_VHT_CAP_SHORT_GI_80);
43 __check_vhtcap_disable(sdata, vht_cap,
44 IEEE80211_VHT_CAP_SHORT_GI_160);
45 __check_vhtcap_disable(sdata, vht_cap,
46 IEEE80211_VHT_CAP_TXSTBC);
47 __check_vhtcap_disable(sdata, vht_cap,
48 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE);
49 __check_vhtcap_disable(sdata, vht_cap,
50 IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE);
51 __check_vhtcap_disable(sdata, vht_cap,
52 IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN);
53 __check_vhtcap_disable(sdata, vht_cap,
54 IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN);
55
56 /* Allow user to decrease AMPDU length exponent */
57 if (sdata->u.mgd.vht_capa_mask.vht_cap_info &
58 cpu_to_le32(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK)) {
59 u32 cap, n;
60
61 n = le32_to_cpu(sdata->u.mgd.vht_capa.vht_cap_info) &
62 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
63 n >>= IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
64 cap = vht_cap->cap & IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
65 cap >>= IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
66
67 if (n < cap) {
68 vht_cap->cap &=
69 ~IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
70 vht_cap->cap |=
71 n << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
72 }
73 }
74
75 /* Allow the user to decrease MCSes */
76 rxmcs_mask =
77 le16_to_cpu(sdata->u.mgd.vht_capa_mask.supp_mcs.rx_mcs_map);
78 rxmcs_n = le16_to_cpu(sdata->u.mgd.vht_capa.supp_mcs.rx_mcs_map);
79 rxmcs_n &= rxmcs_mask;
80 rxmcs_cap = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
81
82 txmcs_mask =
83 le16_to_cpu(sdata->u.mgd.vht_capa_mask.supp_mcs.tx_mcs_map);
84 txmcs_n = le16_to_cpu(sdata->u.mgd.vht_capa.supp_mcs.tx_mcs_map);
85 txmcs_n &= txmcs_mask;
86 txmcs_cap = le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
87 for (i = 0; i < 8; i++) {
88 u8 m, n, c;
89
90 m = (rxmcs_mask >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
91 n = (rxmcs_n >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
92 c = (rxmcs_cap >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
93
94 if (m && ((c != IEEE80211_VHT_MCS_NOT_SUPPORTED && n < c) ||
95 n == IEEE80211_VHT_MCS_NOT_SUPPORTED)) {
96 rxmcs_cap &= ~(3 << 2*i);
97 rxmcs_cap |= (rxmcs_n & (3 << 2*i));
98 }
99
100 m = (txmcs_mask >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
101 n = (txmcs_n >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
102 c = (txmcs_cap >> 2*i) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
103
104 if (m && ((c != IEEE80211_VHT_MCS_NOT_SUPPORTED && n < c) ||
105 n == IEEE80211_VHT_MCS_NOT_SUPPORTED)) {
106 txmcs_cap &= ~(3 << 2*i);
107 txmcs_cap |= (txmcs_n & (3 << 2*i));
108 }
109 }
110 vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(rxmcs_cap);
111 vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(txmcs_cap);
112}
113
16void 114void
17ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, 115ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
18 struct ieee80211_supported_band *sband, 116 struct ieee80211_supported_band *sband,
@@ -20,6 +118,8 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
20 struct sta_info *sta) 118 struct sta_info *sta)
21{ 119{
22 struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap; 120 struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap;
121 struct ieee80211_sta_vht_cap own_cap;
122 u32 cap_info, i;
23 123
24 memset(vht_cap, 0, sizeof(*vht_cap)); 124 memset(vht_cap, 0, sizeof(*vht_cap));
25 125
@@ -35,12 +135,122 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
35 135
36 vht_cap->vht_supported = true; 136 vht_cap->vht_supported = true;
37 137
38 vht_cap->cap = le32_to_cpu(vht_cap_ie->vht_cap_info); 138 own_cap = sband->vht_cap;
139 /*
140 * If user has specified capability overrides, take care
141 * of that if the station we're setting up is the AP that
142 * we advertised a restricted capability set to. Override
143 * our own capabilities and then use those below.
144 */
145 if (sdata->vif.type == NL80211_IFTYPE_STATION &&
146 !test_sta_flag(sta, WLAN_STA_TDLS_PEER))
147 ieee80211_apply_vhtcap_overrides(sdata, &own_cap);
148
149 /* take some capabilities as-is */
150 cap_info = le32_to_cpu(vht_cap_ie->vht_cap_info);
151 vht_cap->cap = cap_info;
152 vht_cap->cap &= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 |
153 IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 |
154 IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
155 IEEE80211_VHT_CAP_RXLDPC |
156 IEEE80211_VHT_CAP_VHT_TXOP_PS |
157 IEEE80211_VHT_CAP_HTC_VHT |
158 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
159 IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_UNSOL_MFB |
160 IEEE80211_VHT_CAP_VHT_LINK_ADAPTATION_VHT_MRQ_MFB |
161 IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
162 IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
163
164 /* and some based on our own capabilities */
165 switch (own_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
166 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
167 vht_cap->cap |= cap_info &
168 IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ;
169 break;
170 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
171 vht_cap->cap |= cap_info &
172 IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
173 break;
174 default:
175 /* nothing */
176 break;
177 }
178
179 /* symmetric capabilities */
180 vht_cap->cap |= cap_info & own_cap.cap &
181 (IEEE80211_VHT_CAP_SHORT_GI_80 |
182 IEEE80211_VHT_CAP_SHORT_GI_160);
183
184 /* remaining ones */
185 if (own_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) {
186 vht_cap->cap |= cap_info &
187 (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
188 IEEE80211_VHT_CAP_BEAMFORMER_ANTENNAS_MAX |
189 IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MAX);
190 }
191
192 if (own_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
193 vht_cap->cap |= cap_info &
194 IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
195
196 if (own_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
197 vht_cap->cap |= cap_info &
198 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
199
200 if (own_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
201 vht_cap->cap |= cap_info &
202 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE;
203
204 if (own_cap.cap & IEEE80211_VHT_CAP_TXSTBC)
205 vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_RXSTBC_MASK;
206
207 if (own_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
208 vht_cap->cap |= cap_info & IEEE80211_VHT_CAP_TXSTBC;
39 209
40 /* Copy peer MCS info, the driver might need them. */ 210 /* Copy peer MCS info, the driver might need them. */
41 memcpy(&vht_cap->vht_mcs, &vht_cap_ie->supp_mcs, 211 memcpy(&vht_cap->vht_mcs, &vht_cap_ie->supp_mcs,
42 sizeof(struct ieee80211_vht_mcs_info)); 212 sizeof(struct ieee80211_vht_mcs_info));
43 213
214 /* but also restrict MCSes */
215 for (i = 0; i < 8; i++) {
216 u16 own_rx, own_tx, peer_rx, peer_tx;
217
218 own_rx = le16_to_cpu(own_cap.vht_mcs.rx_mcs_map);
219 own_rx = (own_rx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
220
221 own_tx = le16_to_cpu(own_cap.vht_mcs.tx_mcs_map);
222 own_tx = (own_tx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
223
224 peer_rx = le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
225 peer_rx = (peer_rx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
226
227 peer_tx = le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map);
228 peer_tx = (peer_tx >> i * 2) & IEEE80211_VHT_MCS_NOT_SUPPORTED;
229
230 if (peer_tx != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
231 if (own_rx == IEEE80211_VHT_MCS_NOT_SUPPORTED)
232 peer_tx = IEEE80211_VHT_MCS_NOT_SUPPORTED;
233 else if (own_rx < peer_tx)
234 peer_tx = own_rx;
235 }
236
237 if (peer_rx != IEEE80211_VHT_MCS_NOT_SUPPORTED) {
238 if (own_tx == IEEE80211_VHT_MCS_NOT_SUPPORTED)
239 peer_rx = IEEE80211_VHT_MCS_NOT_SUPPORTED;
240 else if (own_tx < peer_rx)
241 peer_rx = own_tx;
242 }
243
244 vht_cap->vht_mcs.rx_mcs_map &=
245 ~cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << i * 2);
246 vht_cap->vht_mcs.rx_mcs_map |= cpu_to_le16(peer_rx << i * 2);
247
248 vht_cap->vht_mcs.tx_mcs_map &=
249 ~cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << i * 2);
250 vht_cap->vht_mcs.tx_mcs_map |= cpu_to_le16(peer_tx << i * 2);
251 }
252
253 /* finally set up the bandwidth */
44 switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { 254 switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
45 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: 255 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
46 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: 256 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
diff --git a/net/mac802154/mac802154.h b/net/mac802154/mac802154.h
index a4dcaf1dd4b6..5c9e021994ba 100644
--- a/net/mac802154/mac802154.h
+++ b/net/mac802154/mac802154.h
@@ -88,8 +88,6 @@ struct mac802154_sub_if_data {
88 88
89#define mac802154_to_priv(_hw) container_of(_hw, struct mac802154_priv, hw) 89#define mac802154_to_priv(_hw) container_of(_hw, struct mac802154_priv, hw)
90 90
91#define MAC802154_MAX_XMIT_ATTEMPTS 3
92
93#define MAC802154_CHAN_NONE (~(u8)0) /* No channel is assigned */ 91#define MAC802154_CHAN_NONE (~(u8)0) /* No channel is assigned */
94 92
95extern struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced; 93extern struct ieee802154_reduced_mlme_ops mac802154_mlme_reduced;
@@ -114,5 +112,6 @@ void mac802154_dev_set_ieee_addr(struct net_device *dev);
114u16 mac802154_dev_get_pan_id(const struct net_device *dev); 112u16 mac802154_dev_get_pan_id(const struct net_device *dev);
115void mac802154_dev_set_pan_id(struct net_device *dev, u16 val); 113void mac802154_dev_set_pan_id(struct net_device *dev, u16 val);
116void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan); 114void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan);
115u8 mac802154_dev_get_dsn(const struct net_device *dev);
117 116
118#endif /* MAC802154_H */ 117#endif /* MAC802154_H */
diff --git a/net/mac802154/mac_cmd.c b/net/mac802154/mac_cmd.c
index d8d277006089..a99910d4d52f 100644
--- a/net/mac802154/mac_cmd.c
+++ b/net/mac802154/mac_cmd.c
@@ -73,4 +73,5 @@ struct ieee802154_mlme_ops mac802154_mlme_wpan = {
73 .start_req = mac802154_mlme_start_req, 73 .start_req = mac802154_mlme_start_req,
74 .get_pan_id = mac802154_dev_get_pan_id, 74 .get_pan_id = mac802154_dev_get_pan_id,
75 .get_short_addr = mac802154_dev_get_short_addr, 75 .get_short_addr = mac802154_dev_get_short_addr,
76 .get_dsn = mac802154_dev_get_dsn,
76}; 77};
diff --git a/net/mac802154/mib.c b/net/mac802154/mib.c
index f47781ab0ccc..f03e55f2ebf0 100644
--- a/net/mac802154/mib.c
+++ b/net/mac802154/mib.c
@@ -159,6 +159,15 @@ void mac802154_dev_set_pan_id(struct net_device *dev, u16 val)
159 } 159 }
160} 160}
161 161
162u8 mac802154_dev_get_dsn(const struct net_device *dev)
163{
164 struct mac802154_sub_if_data *priv = netdev_priv(dev);
165
166 BUG_ON(dev->type != ARPHRD_IEEE802154);
167
168 return priv->dsn++;
169}
170
162static void phy_chan_notify(struct work_struct *work) 171static void phy_chan_notify(struct work_struct *work)
163{ 172{
164 struct phy_chan_notify_work *nw = container_of(work, 173 struct phy_chan_notify_work *nw = container_of(work,
diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c
index 4e09d070995a..3fd3e07ec599 100644
--- a/net/mac802154/tx.c
+++ b/net/mac802154/tx.c
@@ -25,6 +25,7 @@
25#include <linux/if_arp.h> 25#include <linux/if_arp.h>
26#include <linux/crc-ccitt.h> 26#include <linux/crc-ccitt.h>
27 27
28#include <net/ieee802154_netdev.h>
28#include <net/mac802154.h> 29#include <net/mac802154.h>
29#include <net/wpan-phy.h> 30#include <net/wpan-phy.h>
30 31
@@ -39,12 +40,12 @@ struct xmit_work {
39 struct mac802154_priv *priv; 40 struct mac802154_priv *priv;
40 u8 chan; 41 u8 chan;
41 u8 page; 42 u8 page;
42 u8 xmit_attempts;
43}; 43};
44 44
45static void mac802154_xmit_worker(struct work_struct *work) 45static void mac802154_xmit_worker(struct work_struct *work)
46{ 46{
47 struct xmit_work *xw = container_of(work, struct xmit_work, work); 47 struct xmit_work *xw = container_of(work, struct xmit_work, work);
48 struct mac802154_sub_if_data *sdata;
48 int res; 49 int res;
49 50
50 mutex_lock(&xw->priv->phy->pib_lock); 51 mutex_lock(&xw->priv->phy->pib_lock);
@@ -60,18 +61,17 @@ static void mac802154_xmit_worker(struct work_struct *work)
60 } 61 }
61 62
62 res = xw->priv->ops->xmit(&xw->priv->hw, xw->skb); 63 res = xw->priv->ops->xmit(&xw->priv->hw, xw->skb);
64 if (res)
65 pr_debug("transmission failed\n");
63 66
64out: 67out:
65 mutex_unlock(&xw->priv->phy->pib_lock); 68 mutex_unlock(&xw->priv->phy->pib_lock);
66 69
67 if (res) { 70 /* Restart the netif queue on each sub_if_data object. */
68 if (xw->xmit_attempts++ < MAC802154_MAX_XMIT_ATTEMPTS) { 71 rcu_read_lock();
69 queue_work(xw->priv->dev_workqueue, &xw->work); 72 list_for_each_entry_rcu(sdata, &xw->priv->slaves, list)
70 return; 73 netif_wake_queue(sdata->dev);
71 } else 74 rcu_read_unlock();
72 pr_debug("transmission failed for %d times",
73 MAC802154_MAX_XMIT_ATTEMPTS);
74 }
75 75
76 dev_kfree_skb(xw->skb); 76 dev_kfree_skb(xw->skb);
77 77
@@ -82,6 +82,7 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
82 u8 page, u8 chan) 82 u8 page, u8 chan)
83{ 83{
84 struct xmit_work *work; 84 struct xmit_work *work;
85 struct mac802154_sub_if_data *sdata;
85 86
86 if (!(priv->phy->channels_supported[page] & (1 << chan))) { 87 if (!(priv->phy->channels_supported[page] & (1 << chan))) {
87 WARN_ON(1); 88 WARN_ON(1);
@@ -109,12 +110,17 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
109 return NETDEV_TX_BUSY; 110 return NETDEV_TX_BUSY;
110 } 111 }
111 112
113 /* Stop the netif queue on each sub_if_data object. */
114 rcu_read_lock();
115 list_for_each_entry_rcu(sdata, &priv->slaves, list)
116 netif_stop_queue(sdata->dev);
117 rcu_read_unlock();
118
112 INIT_WORK(&work->work, mac802154_xmit_worker); 119 INIT_WORK(&work->work, mac802154_xmit_worker);
113 work->skb = skb; 120 work->skb = skb;
114 work->priv = priv; 121 work->priv = priv;
115 work->page = page; 122 work->page = page;
116 work->chan = chan; 123 work->chan = chan;
117 work->xmit_attempts = 0;
118 124
119 queue_work(priv->dev_workqueue, &work->work); 125 queue_work(priv->dev_workqueue, &work->work);
120 126
diff --git a/net/mac802154/wpan.c b/net/mac802154/wpan.c
index d20c6d3c247d..2ca2f4dceab7 100644
--- a/net/mac802154/wpan.c
+++ b/net/mac802154/wpan.c
@@ -145,6 +145,8 @@ static int mac802154_header_create(struct sk_buff *skb,
145 145
146 head[pos++] = mac_cb(skb)->seq; /* DSN/BSN */ 146 head[pos++] = mac_cb(skb)->seq; /* DSN/BSN */
147 fc = mac_cb_type(skb); 147 fc = mac_cb_type(skb);
148 if (mac_cb_is_ackreq(skb))
149 fc |= IEEE802154_FC_ACK_REQ;
148 150
149 if (!saddr) { 151 if (!saddr) {
150 spin_lock_bh(&priv->mib_lock); 152 spin_lock_bh(&priv->mib_lock);
@@ -358,7 +360,7 @@ void mac802154_wpan_setup(struct net_device *dev)
358 dev->header_ops = &mac802154_header_ops; 360 dev->header_ops = &mac802154_header_ops;
359 dev->needed_tailroom = 2; /* FCS */ 361 dev->needed_tailroom = 2; /* FCS */
360 dev->mtu = IEEE802154_MTU; 362 dev->mtu = IEEE802154_MTU;
361 dev->tx_queue_len = 10; 363 dev->tx_queue_len = 300;
362 dev->type = ARPHRD_IEEE802154; 364 dev->type = ARPHRD_IEEE802154;
363 dev->flags = IFF_NOARP | IFF_BROADCAST; 365 dev->flags = IFF_NOARP | IFF_BROADCAST;
364 dev->watchdog_timeo = 0; 366 dev->watchdog_timeo = 0;
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index a9c488b6c50d..7d97302f7c07 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -276,10 +276,30 @@ void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
276EXPORT_SYMBOL(nf_nat_decode_session_hook); 276EXPORT_SYMBOL(nf_nat_decode_session_hook);
277#endif 277#endif
278 278
279static int __net_init netfilter_net_init(struct net *net)
280{
279#ifdef CONFIG_PROC_FS 281#ifdef CONFIG_PROC_FS
280struct proc_dir_entry *proc_net_netfilter; 282 net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
281EXPORT_SYMBOL(proc_net_netfilter); 283 net->proc_net);
284 if (!net->nf.proc_netfilter) {
285 if (!net_eq(net, &init_net))
286 pr_err("cannot create netfilter proc entry");
287
288 return -ENOMEM;
289 }
282#endif 290#endif
291 return 0;
292}
293
294static void __net_exit netfilter_net_exit(struct net *net)
295{
296 remove_proc_entry("netfilter", net->proc_net);
297}
298
299static struct pernet_operations netfilter_net_ops = {
300 .init = netfilter_net_init,
301 .exit = netfilter_net_exit,
302};
283 303
284void __init netfilter_init(void) 304void __init netfilter_init(void)
285{ 305{
@@ -289,11 +309,8 @@ void __init netfilter_init(void)
289 INIT_LIST_HEAD(&nf_hooks[i][h]); 309 INIT_LIST_HEAD(&nf_hooks[i][h]);
290 } 310 }
291 311
292#ifdef CONFIG_PROC_FS 312 if (register_pernet_subsys(&netfilter_net_ops) < 0)
293 proc_net_netfilter = proc_mkdir("netfilter", init_net.proc_net);
294 if (!proc_net_netfilter)
295 panic("cannot create netfilter proc entry"); 313 panic("cannot create netfilter proc entry");
296#endif
297 314
298 if (netfilter_log_init() < 0) 315 if (netfilter_log_init() < 0)
299 panic("cannot initialize nf_log"); 316 panic("cannot initialize nf_log");
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 1ba9dbc0e107..86f5e26f39d3 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -15,7 +15,6 @@
15#include <linux/ip.h> 15#include <linux/ip.h>
16#include <linux/skbuff.h> 16#include <linux/skbuff.h>
17#include <linux/spinlock.h> 17#include <linux/spinlock.h>
18#include <linux/netlink.h>
19#include <linux/rculist.h> 18#include <linux/rculist.h>
20#include <net/netlink.h> 19#include <net/netlink.h>
21 20
@@ -1085,7 +1084,7 @@ static int
1085dump_init(struct netlink_callback *cb) 1084dump_init(struct netlink_callback *cb)
1086{ 1085{
1087 struct nlmsghdr *nlh = nlmsg_hdr(cb->skb); 1086 struct nlmsghdr *nlh = nlmsg_hdr(cb->skb);
1088 int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg)); 1087 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
1089 struct nlattr *cda[IPSET_ATTR_CMD_MAX+1]; 1088 struct nlattr *cda[IPSET_ATTR_CMD_MAX+1];
1090 struct nlattr *attr = (void *)nlh + min_len; 1089 struct nlattr *attr = (void *)nlh + min_len;
1091 u32 dump_type; 1090 u32 dump_type;
@@ -1301,7 +1300,7 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
1301 struct sk_buff *skb2; 1300 struct sk_buff *skb2;
1302 struct nlmsgerr *errmsg; 1301 struct nlmsgerr *errmsg;
1303 size_t payload = sizeof(*errmsg) + nlmsg_len(nlh); 1302 size_t payload = sizeof(*errmsg) + nlmsg_len(nlh);
1304 int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg)); 1303 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
1305 struct nlattr *cda[IPSET_ATTR_CMD_MAX+1]; 1304 struct nlattr *cda[IPSET_ATTR_CMD_MAX+1];
1306 struct nlattr *cmdattr; 1305 struct nlattr *cmdattr;
1307 u32 *errline; 1306 u32 *errline;
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 0b779d7df881..dfd7b65b3d2a 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -58,6 +58,18 @@ static inline void ip_vs_app_put(struct ip_vs_app *app)
58 module_put(app->module); 58 module_put(app->module);
59} 59}
60 60
61static void ip_vs_app_inc_destroy(struct ip_vs_app *inc)
62{
63 kfree(inc->timeout_table);
64 kfree(inc);
65}
66
67static void ip_vs_app_inc_rcu_free(struct rcu_head *head)
68{
69 struct ip_vs_app *inc = container_of(head, struct ip_vs_app, rcu_head);
70
71 ip_vs_app_inc_destroy(inc);
72}
61 73
62/* 74/*
63 * Allocate/initialize app incarnation and register it in proto apps. 75 * Allocate/initialize app incarnation and register it in proto apps.
@@ -106,8 +118,7 @@ ip_vs_app_inc_new(struct net *net, struct ip_vs_app *app, __u16 proto,
106 return 0; 118 return 0;
107 119
108 out: 120 out:
109 kfree(inc->timeout_table); 121 ip_vs_app_inc_destroy(inc);
110 kfree(inc);
111 return ret; 122 return ret;
112} 123}
113 124
@@ -131,8 +142,7 @@ ip_vs_app_inc_release(struct net *net, struct ip_vs_app *inc)
131 142
132 list_del(&inc->a_list); 143 list_del(&inc->a_list);
133 144
134 kfree(inc->timeout_table); 145 call_rcu(&inc->rcu_head, ip_vs_app_inc_rcu_free);
135 kfree(inc);
136} 146}
137 147
138 148
@@ -144,9 +154,9 @@ int ip_vs_app_inc_get(struct ip_vs_app *inc)
144{ 154{
145 int result; 155 int result;
146 156
147 atomic_inc(&inc->usecnt); 157 result = ip_vs_app_get(inc->app);
148 if (unlikely((result = ip_vs_app_get(inc->app)) != 1)) 158 if (result)
149 atomic_dec(&inc->usecnt); 159 atomic_inc(&inc->usecnt);
150 return result; 160 return result;
151} 161}
152 162
@@ -156,8 +166,8 @@ int ip_vs_app_inc_get(struct ip_vs_app *inc)
156 */ 166 */
157void ip_vs_app_inc_put(struct ip_vs_app *inc) 167void ip_vs_app_inc_put(struct ip_vs_app *inc)
158{ 168{
159 ip_vs_app_put(inc->app);
160 atomic_dec(&inc->usecnt); 169 atomic_dec(&inc->usecnt);
170 ip_vs_app_put(inc->app);
161} 171}
162 172
163 173
@@ -218,6 +228,7 @@ out_unlock:
218/* 228/*
219 * ip_vs_app unregistration routine 229 * ip_vs_app unregistration routine
220 * We are sure there are no app incarnations attached to services 230 * We are sure there are no app incarnations attached to services
231 * Caller should use synchronize_rcu() or rcu_barrier()
221 */ 232 */
222void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app) 233void unregister_ip_vs_app(struct net *net, struct ip_vs_app *app)
223{ 234{
@@ -341,14 +352,14 @@ static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq,
341 unsigned int flag, __u32 seq, int diff) 352 unsigned int flag, __u32 seq, int diff)
342{ 353{
343 /* spinlock is to keep updating cp->flags atomic */ 354 /* spinlock is to keep updating cp->flags atomic */
344 spin_lock(&cp->lock); 355 spin_lock_bh(&cp->lock);
345 if (!(cp->flags & flag) || after(seq, vseq->init_seq)) { 356 if (!(cp->flags & flag) || after(seq, vseq->init_seq)) {
346 vseq->previous_delta = vseq->delta; 357 vseq->previous_delta = vseq->delta;
347 vseq->delta += diff; 358 vseq->delta += diff;
348 vseq->init_seq = seq; 359 vseq->init_seq = seq;
349 cp->flags |= flag; 360 cp->flags |= flag;
350 } 361 }
351 spin_unlock(&cp->lock); 362 spin_unlock_bh(&cp->lock);
352} 363}
353 364
354static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb, 365static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb,
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 704e514e02ab..de6475894a39 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -79,51 +79,21 @@ static unsigned int ip_vs_conn_rnd __read_mostly;
79 79
80struct ip_vs_aligned_lock 80struct ip_vs_aligned_lock
81{ 81{
82 rwlock_t l; 82 spinlock_t l;
83} __attribute__((__aligned__(SMP_CACHE_BYTES))); 83} __attribute__((__aligned__(SMP_CACHE_BYTES)));
84 84
85/* lock array for conn table */ 85/* lock array for conn table */
86static struct ip_vs_aligned_lock 86static struct ip_vs_aligned_lock
87__ip_vs_conntbl_lock_array[CT_LOCKARRAY_SIZE] __cacheline_aligned; 87__ip_vs_conntbl_lock_array[CT_LOCKARRAY_SIZE] __cacheline_aligned;
88 88
89static inline void ct_read_lock(unsigned int key)
90{
91 read_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
92}
93
94static inline void ct_read_unlock(unsigned int key)
95{
96 read_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
97}
98
99static inline void ct_write_lock(unsigned int key)
100{
101 write_lock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
102}
103
104static inline void ct_write_unlock(unsigned int key)
105{
106 write_unlock(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
107}
108
109static inline void ct_read_lock_bh(unsigned int key)
110{
111 read_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
112}
113
114static inline void ct_read_unlock_bh(unsigned int key)
115{
116 read_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
117}
118
119static inline void ct_write_lock_bh(unsigned int key) 89static inline void ct_write_lock_bh(unsigned int key)
120{ 90{
121 write_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); 91 spin_lock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
122} 92}
123 93
124static inline void ct_write_unlock_bh(unsigned int key) 94static inline void ct_write_unlock_bh(unsigned int key)
125{ 95{
126 write_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l); 96 spin_unlock_bh(&__ip_vs_conntbl_lock_array[key&CT_LOCKARRAY_MASK].l);
127} 97}
128 98
129 99
@@ -197,13 +167,13 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
197 /* Hash by protocol, client address and port */ 167 /* Hash by protocol, client address and port */
198 hash = ip_vs_conn_hashkey_conn(cp); 168 hash = ip_vs_conn_hashkey_conn(cp);
199 169
200 ct_write_lock(hash); 170 ct_write_lock_bh(hash);
201 spin_lock(&cp->lock); 171 spin_lock(&cp->lock);
202 172
203 if (!(cp->flags & IP_VS_CONN_F_HASHED)) { 173 if (!(cp->flags & IP_VS_CONN_F_HASHED)) {
204 hlist_add_head(&cp->c_list, &ip_vs_conn_tab[hash]);
205 cp->flags |= IP_VS_CONN_F_HASHED; 174 cp->flags |= IP_VS_CONN_F_HASHED;
206 atomic_inc(&cp->refcnt); 175 atomic_inc(&cp->refcnt);
176 hlist_add_head_rcu(&cp->c_list, &ip_vs_conn_tab[hash]);
207 ret = 1; 177 ret = 1;
208 } else { 178 } else {
209 pr_err("%s(): request for already hashed, called from %pF\n", 179 pr_err("%s(): request for already hashed, called from %pF\n",
@@ -212,7 +182,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
212 } 182 }
213 183
214 spin_unlock(&cp->lock); 184 spin_unlock(&cp->lock);
215 ct_write_unlock(hash); 185 ct_write_unlock_bh(hash);
216 186
217 return ret; 187 return ret;
218} 188}
@@ -220,7 +190,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp)
220 190
221/* 191/*
222 * UNhashes ip_vs_conn from ip_vs_conn_tab. 192 * UNhashes ip_vs_conn from ip_vs_conn_tab.
223 * returns bool success. 193 * returns bool success. Caller should hold conn reference.
224 */ 194 */
225static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp) 195static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
226{ 196{
@@ -230,11 +200,11 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
230 /* unhash it and decrease its reference counter */ 200 /* unhash it and decrease its reference counter */
231 hash = ip_vs_conn_hashkey_conn(cp); 201 hash = ip_vs_conn_hashkey_conn(cp);
232 202
233 ct_write_lock(hash); 203 ct_write_lock_bh(hash);
234 spin_lock(&cp->lock); 204 spin_lock(&cp->lock);
235 205
236 if (cp->flags & IP_VS_CONN_F_HASHED) { 206 if (cp->flags & IP_VS_CONN_F_HASHED) {
237 hlist_del(&cp->c_list); 207 hlist_del_rcu(&cp->c_list);
238 cp->flags &= ~IP_VS_CONN_F_HASHED; 208 cp->flags &= ~IP_VS_CONN_F_HASHED;
239 atomic_dec(&cp->refcnt); 209 atomic_dec(&cp->refcnt);
240 ret = 1; 210 ret = 1;
@@ -242,7 +212,37 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp)
242 ret = 0; 212 ret = 0;
243 213
244 spin_unlock(&cp->lock); 214 spin_unlock(&cp->lock);
245 ct_write_unlock(hash); 215 ct_write_unlock_bh(hash);
216
217 return ret;
218}
219
220/* Try to unlink ip_vs_conn from ip_vs_conn_tab.
221 * returns bool success.
222 */
223static inline bool ip_vs_conn_unlink(struct ip_vs_conn *cp)
224{
225 unsigned int hash;
226 bool ret;
227
228 hash = ip_vs_conn_hashkey_conn(cp);
229
230 ct_write_lock_bh(hash);
231 spin_lock(&cp->lock);
232
233 if (cp->flags & IP_VS_CONN_F_HASHED) {
234 ret = false;
235 /* Decrease refcnt and unlink conn only if we are last user */
236 if (atomic_cmpxchg(&cp->refcnt, 1, 0) == 1) {
237 hlist_del_rcu(&cp->c_list);
238 cp->flags &= ~IP_VS_CONN_F_HASHED;
239 ret = true;
240 }
241 } else
242 ret = atomic_read(&cp->refcnt) ? false : true;
243
244 spin_unlock(&cp->lock);
245 ct_write_unlock_bh(hash);
246 246
247 return ret; 247 return ret;
248} 248}
@@ -262,24 +262,25 @@ __ip_vs_conn_in_get(const struct ip_vs_conn_param *p)
262 262
263 hash = ip_vs_conn_hashkey_param(p, false); 263 hash = ip_vs_conn_hashkey_param(p, false);
264 264
265 ct_read_lock(hash); 265 rcu_read_lock();
266 266
267 hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { 267 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
268 if (cp->af == p->af && 268 if (p->cport == cp->cport && p->vport == cp->vport &&
269 p->cport == cp->cport && p->vport == cp->vport && 269 cp->af == p->af &&
270 ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) && 270 ip_vs_addr_equal(p->af, p->caddr, &cp->caddr) &&
271 ip_vs_addr_equal(p->af, p->vaddr, &cp->vaddr) && 271 ip_vs_addr_equal(p->af, p->vaddr, &cp->vaddr) &&
272 ((!p->cport) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) && 272 ((!p->cport) ^ (!(cp->flags & IP_VS_CONN_F_NO_CPORT))) &&
273 p->protocol == cp->protocol && 273 p->protocol == cp->protocol &&
274 ip_vs_conn_net_eq(cp, p->net)) { 274 ip_vs_conn_net_eq(cp, p->net)) {
275 if (!__ip_vs_conn_get(cp))
276 continue;
275 /* HIT */ 277 /* HIT */
276 atomic_inc(&cp->refcnt); 278 rcu_read_unlock();
277 ct_read_unlock(hash);
278 return cp; 279 return cp;
279 } 280 }
280 } 281 }
281 282
282 ct_read_unlock(hash); 283 rcu_read_unlock();
283 284
284 return NULL; 285 return NULL;
285} 286}
@@ -346,14 +347,16 @@ struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
346 347
347 hash = ip_vs_conn_hashkey_param(p, false); 348 hash = ip_vs_conn_hashkey_param(p, false);
348 349
349 ct_read_lock(hash); 350 rcu_read_lock();
350 351
351 hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { 352 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
352 if (!ip_vs_conn_net_eq(cp, p->net)) 353 if (unlikely(p->pe_data && p->pe->ct_match)) {
353 continue; 354 if (!ip_vs_conn_net_eq(cp, p->net))
354 if (p->pe_data && p->pe->ct_match) { 355 continue;
355 if (p->pe == cp->pe && p->pe->ct_match(p, cp)) 356 if (p->pe == cp->pe && p->pe->ct_match(p, cp)) {
356 goto out; 357 if (__ip_vs_conn_get(cp))
358 goto out;
359 }
357 continue; 360 continue;
358 } 361 }
359 362
@@ -363,17 +366,18 @@ struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p)
363 * p->vaddr is a fwmark */ 366 * p->vaddr is a fwmark */
364 ip_vs_addr_equal(p->protocol == IPPROTO_IP ? AF_UNSPEC : 367 ip_vs_addr_equal(p->protocol == IPPROTO_IP ? AF_UNSPEC :
365 p->af, p->vaddr, &cp->vaddr) && 368 p->af, p->vaddr, &cp->vaddr) &&
366 p->cport == cp->cport && p->vport == cp->vport && 369 p->vport == cp->vport && p->cport == cp->cport &&
367 cp->flags & IP_VS_CONN_F_TEMPLATE && 370 cp->flags & IP_VS_CONN_F_TEMPLATE &&
368 p->protocol == cp->protocol) 371 p->protocol == cp->protocol &&
369 goto out; 372 ip_vs_conn_net_eq(cp, p->net)) {
373 if (__ip_vs_conn_get(cp))
374 goto out;
375 }
370 } 376 }
371 cp = NULL; 377 cp = NULL;
372 378
373 out: 379 out:
374 if (cp) 380 rcu_read_unlock();
375 atomic_inc(&cp->refcnt);
376 ct_read_unlock(hash);
377 381
378 IP_VS_DBG_BUF(9, "template lookup/in %s %s:%d->%s:%d %s\n", 382 IP_VS_DBG_BUF(9, "template lookup/in %s %s:%d->%s:%d %s\n",
379 ip_vs_proto_name(p->protocol), 383 ip_vs_proto_name(p->protocol),
@@ -398,23 +402,24 @@ struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p)
398 */ 402 */
399 hash = ip_vs_conn_hashkey_param(p, true); 403 hash = ip_vs_conn_hashkey_param(p, true);
400 404
401 ct_read_lock(hash); 405 rcu_read_lock();
402 406
403 hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { 407 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
404 if (cp->af == p->af && 408 if (p->vport == cp->cport && p->cport == cp->dport &&
405 p->vport == cp->cport && p->cport == cp->dport && 409 cp->af == p->af &&
406 ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) && 410 ip_vs_addr_equal(p->af, p->vaddr, &cp->caddr) &&
407 ip_vs_addr_equal(p->af, p->caddr, &cp->daddr) && 411 ip_vs_addr_equal(p->af, p->caddr, &cp->daddr) &&
408 p->protocol == cp->protocol && 412 p->protocol == cp->protocol &&
409 ip_vs_conn_net_eq(cp, p->net)) { 413 ip_vs_conn_net_eq(cp, p->net)) {
414 if (!__ip_vs_conn_get(cp))
415 continue;
410 /* HIT */ 416 /* HIT */
411 atomic_inc(&cp->refcnt);
412 ret = cp; 417 ret = cp;
413 break; 418 break;
414 } 419 }
415 } 420 }
416 421
417 ct_read_unlock(hash); 422 rcu_read_unlock();
418 423
419 IP_VS_DBG_BUF(9, "lookup/out %s %s:%d->%s:%d %s\n", 424 IP_VS_DBG_BUF(9, "lookup/out %s %s:%d->%s:%d %s\n",
420 ip_vs_proto_name(p->protocol), 425 ip_vs_proto_name(p->protocol),
@@ -457,13 +462,13 @@ void ip_vs_conn_put(struct ip_vs_conn *cp)
457void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport) 462void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport)
458{ 463{
459 if (ip_vs_conn_unhash(cp)) { 464 if (ip_vs_conn_unhash(cp)) {
460 spin_lock(&cp->lock); 465 spin_lock_bh(&cp->lock);
461 if (cp->flags & IP_VS_CONN_F_NO_CPORT) { 466 if (cp->flags & IP_VS_CONN_F_NO_CPORT) {
462 atomic_dec(&ip_vs_conn_no_cport_cnt); 467 atomic_dec(&ip_vs_conn_no_cport_cnt);
463 cp->flags &= ~IP_VS_CONN_F_NO_CPORT; 468 cp->flags &= ~IP_VS_CONN_F_NO_CPORT;
464 cp->cport = cport; 469 cp->cport = cport;
465 } 470 }
466 spin_unlock(&cp->lock); 471 spin_unlock_bh(&cp->lock);
467 472
468 /* hash on new dport */ 473 /* hash on new dport */
469 ip_vs_conn_hash(cp); 474 ip_vs_conn_hash(cp);
@@ -549,7 +554,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
549 return; 554 return;
550 555
551 /* Increase the refcnt counter of the dest */ 556 /* Increase the refcnt counter of the dest */
552 atomic_inc(&dest->refcnt); 557 ip_vs_dest_hold(dest);
553 558
554 conn_flags = atomic_read(&dest->conn_flags); 559 conn_flags = atomic_read(&dest->conn_flags);
555 if (cp->protocol != IPPROTO_UDP) 560 if (cp->protocol != IPPROTO_UDP)
@@ -606,20 +611,22 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
606 * Check if there is a destination for the connection, if so 611 * Check if there is a destination for the connection, if so
607 * bind the connection to the destination. 612 * bind the connection to the destination.
608 */ 613 */
609struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp) 614void ip_vs_try_bind_dest(struct ip_vs_conn *cp)
610{ 615{
611 struct ip_vs_dest *dest; 616 struct ip_vs_dest *dest;
612 617
618 rcu_read_lock();
613 dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr, 619 dest = ip_vs_find_dest(ip_vs_conn_net(cp), cp->af, &cp->daddr,
614 cp->dport, &cp->vaddr, cp->vport, 620 cp->dport, &cp->vaddr, cp->vport,
615 cp->protocol, cp->fwmark, cp->flags); 621 cp->protocol, cp->fwmark, cp->flags);
616 if (dest) { 622 if (dest) {
617 struct ip_vs_proto_data *pd; 623 struct ip_vs_proto_data *pd;
618 624
619 spin_lock(&cp->lock); 625 spin_lock_bh(&cp->lock);
620 if (cp->dest) { 626 if (cp->dest) {
621 spin_unlock(&cp->lock); 627 spin_unlock_bh(&cp->lock);
622 return dest; 628 rcu_read_unlock();
629 return;
623 } 630 }
624 631
625 /* Applications work depending on the forwarding method 632 /* Applications work depending on the forwarding method
@@ -628,7 +635,7 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp)
628 ip_vs_unbind_app(cp); 635 ip_vs_unbind_app(cp);
629 636
630 ip_vs_bind_dest(cp, dest); 637 ip_vs_bind_dest(cp, dest);
631 spin_unlock(&cp->lock); 638 spin_unlock_bh(&cp->lock);
632 639
633 /* Update its packet transmitter */ 640 /* Update its packet transmitter */
634 cp->packet_xmit = NULL; 641 cp->packet_xmit = NULL;
@@ -643,7 +650,7 @@ struct ip_vs_dest *ip_vs_try_bind_dest(struct ip_vs_conn *cp)
643 if (pd && atomic_read(&pd->appcnt)) 650 if (pd && atomic_read(&pd->appcnt))
644 ip_vs_bind_app(cp, pd->pp); 651 ip_vs_bind_app(cp, pd->pp);
645 } 652 }
646 return dest; 653 rcu_read_unlock();
647} 654}
648 655
649 656
@@ -695,12 +702,7 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
695 dest->flags &= ~IP_VS_DEST_F_OVERLOAD; 702 dest->flags &= ~IP_VS_DEST_F_OVERLOAD;
696 } 703 }
697 704
698 /* 705 ip_vs_dest_put(dest);
699 * Simply decrease the refcnt of the dest, because the
700 * dest will be either in service's destination list
701 * or in the trash.
702 */
703 atomic_dec(&dest->refcnt);
704} 706}
705 707
706static int expire_quiescent_template(struct netns_ipvs *ipvs, 708static int expire_quiescent_template(struct netns_ipvs *ipvs,
@@ -757,41 +759,36 @@ int ip_vs_check_template(struct ip_vs_conn *ct)
757 * Simply decrease the refcnt of the template, 759 * Simply decrease the refcnt of the template,
758 * don't restart its timer. 760 * don't restart its timer.
759 */ 761 */
760 atomic_dec(&ct->refcnt); 762 __ip_vs_conn_put(ct);
761 return 0; 763 return 0;
762 } 764 }
763 return 1; 765 return 1;
764} 766}
765 767
768static void ip_vs_conn_rcu_free(struct rcu_head *head)
769{
770 struct ip_vs_conn *cp = container_of(head, struct ip_vs_conn,
771 rcu_head);
772
773 ip_vs_pe_put(cp->pe);
774 kfree(cp->pe_data);
775 kmem_cache_free(ip_vs_conn_cachep, cp);
776}
777
766static void ip_vs_conn_expire(unsigned long data) 778static void ip_vs_conn_expire(unsigned long data)
767{ 779{
768 struct ip_vs_conn *cp = (struct ip_vs_conn *)data; 780 struct ip_vs_conn *cp = (struct ip_vs_conn *)data;
769 struct net *net = ip_vs_conn_net(cp); 781 struct net *net = ip_vs_conn_net(cp);
770 struct netns_ipvs *ipvs = net_ipvs(net); 782 struct netns_ipvs *ipvs = net_ipvs(net);
771 783
772 cp->timeout = 60*HZ;
773
774 /*
775 * hey, I'm using it
776 */
777 atomic_inc(&cp->refcnt);
778
779 /* 784 /*
780 * do I control anybody? 785 * do I control anybody?
781 */ 786 */
782 if (atomic_read(&cp->n_control)) 787 if (atomic_read(&cp->n_control))
783 goto expire_later; 788 goto expire_later;
784 789
785 /* 790 /* Unlink conn if not referenced anymore */
786 * unhash it if it is hashed in the conn table 791 if (likely(ip_vs_conn_unlink(cp))) {
787 */
788 if (!ip_vs_conn_unhash(cp) && !(cp->flags & IP_VS_CONN_F_ONE_PACKET))
789 goto expire_later;
790
791 /*
792 * refcnt==1 implies I'm the only one referrer
793 */
794 if (likely(atomic_read(&cp->refcnt) == 1)) {
795 /* delete the timer if it is activated by other users */ 792 /* delete the timer if it is activated by other users */
796 del_timer(&cp->timer); 793 del_timer(&cp->timer);
797 794
@@ -810,38 +807,41 @@ static void ip_vs_conn_expire(unsigned long data)
810 ip_vs_conn_drop_conntrack(cp); 807 ip_vs_conn_drop_conntrack(cp);
811 } 808 }
812 809
813 ip_vs_pe_put(cp->pe);
814 kfree(cp->pe_data);
815 if (unlikely(cp->app != NULL)) 810 if (unlikely(cp->app != NULL))
816 ip_vs_unbind_app(cp); 811 ip_vs_unbind_app(cp);
817 ip_vs_unbind_dest(cp); 812 ip_vs_unbind_dest(cp);
818 if (cp->flags & IP_VS_CONN_F_NO_CPORT) 813 if (cp->flags & IP_VS_CONN_F_NO_CPORT)
819 atomic_dec(&ip_vs_conn_no_cport_cnt); 814 atomic_dec(&ip_vs_conn_no_cport_cnt);
815 call_rcu(&cp->rcu_head, ip_vs_conn_rcu_free);
820 atomic_dec(&ipvs->conn_count); 816 atomic_dec(&ipvs->conn_count);
821
822 kmem_cache_free(ip_vs_conn_cachep, cp);
823 return; 817 return;
824 } 818 }
825 819
826 /* hash it back to the table */
827 ip_vs_conn_hash(cp);
828
829 expire_later: 820 expire_later:
830 IP_VS_DBG(7, "delayed: conn->refcnt-1=%d conn->n_control=%d\n", 821 IP_VS_DBG(7, "delayed: conn->refcnt=%d conn->n_control=%d\n",
831 atomic_read(&cp->refcnt)-1, 822 atomic_read(&cp->refcnt),
832 atomic_read(&cp->n_control)); 823 atomic_read(&cp->n_control));
833 824
825 atomic_inc(&cp->refcnt);
826 cp->timeout = 60*HZ;
827
834 if (ipvs->sync_state & IP_VS_STATE_MASTER) 828 if (ipvs->sync_state & IP_VS_STATE_MASTER)
835 ip_vs_sync_conn(net, cp, sysctl_sync_threshold(ipvs)); 829 ip_vs_sync_conn(net, cp, sysctl_sync_threshold(ipvs));
836 830
837 ip_vs_conn_put(cp); 831 ip_vs_conn_put(cp);
838} 832}
839 833
840 834/* Modify timer, so that it expires as soon as possible.
835 * Can be called without reference only if under RCU lock.
836 */
841void ip_vs_conn_expire_now(struct ip_vs_conn *cp) 837void ip_vs_conn_expire_now(struct ip_vs_conn *cp)
842{ 838{
843 if (del_timer(&cp->timer)) 839 /* Using mod_timer_pending will ensure the timer is not
844 mod_timer(&cp->timer, jiffies); 840 * modified after the final del_timer in ip_vs_conn_expire.
841 */
842 if (timer_pending(&cp->timer) &&
843 time_after(cp->timer.expires, jiffies))
844 mod_timer_pending(&cp->timer, jiffies);
845} 845}
846 846
847 847
@@ -858,7 +858,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
858 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(p->net, 858 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(p->net,
859 p->protocol); 859 p->protocol);
860 860
861 cp = kmem_cache_zalloc(ip_vs_conn_cachep, GFP_ATOMIC); 861 cp = kmem_cache_alloc(ip_vs_conn_cachep, GFP_ATOMIC);
862 if (cp == NULL) { 862 if (cp == NULL) {
863 IP_VS_ERR_RL("%s(): no memory\n", __func__); 863 IP_VS_ERR_RL("%s(): no memory\n", __func__);
864 return NULL; 864 return NULL;
@@ -869,13 +869,13 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
869 ip_vs_conn_net_set(cp, p->net); 869 ip_vs_conn_net_set(cp, p->net);
870 cp->af = p->af; 870 cp->af = p->af;
871 cp->protocol = p->protocol; 871 cp->protocol = p->protocol;
872 ip_vs_addr_copy(p->af, &cp->caddr, p->caddr); 872 ip_vs_addr_set(p->af, &cp->caddr, p->caddr);
873 cp->cport = p->cport; 873 cp->cport = p->cport;
874 ip_vs_addr_copy(p->af, &cp->vaddr, p->vaddr); 874 ip_vs_addr_set(p->af, &cp->vaddr, p->vaddr);
875 cp->vport = p->vport; 875 cp->vport = p->vport;
876 /* proto should only be IPPROTO_IP if d_addr is a fwmark */ 876 /* proto should only be IPPROTO_IP if d_addr is a fwmark */
877 ip_vs_addr_copy(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af, 877 ip_vs_addr_set(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af,
878 &cp->daddr, daddr); 878 &cp->daddr, daddr);
879 cp->dport = dport; 879 cp->dport = dport;
880 cp->flags = flags; 880 cp->flags = flags;
881 cp->fwmark = fwmark; 881 cp->fwmark = fwmark;
@@ -884,6 +884,10 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
884 cp->pe = p->pe; 884 cp->pe = p->pe;
885 cp->pe_data = p->pe_data; 885 cp->pe_data = p->pe_data;
886 cp->pe_data_len = p->pe_data_len; 886 cp->pe_data_len = p->pe_data_len;
887 } else {
888 cp->pe = NULL;
889 cp->pe_data = NULL;
890 cp->pe_data_len = 0;
887 } 891 }
888 spin_lock_init(&cp->lock); 892 spin_lock_init(&cp->lock);
889 893
@@ -894,18 +898,28 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
894 */ 898 */
895 atomic_set(&cp->refcnt, 1); 899 atomic_set(&cp->refcnt, 1);
896 900
901 cp->control = NULL;
897 atomic_set(&cp->n_control, 0); 902 atomic_set(&cp->n_control, 0);
898 atomic_set(&cp->in_pkts, 0); 903 atomic_set(&cp->in_pkts, 0);
899 904
905 cp->packet_xmit = NULL;
906 cp->app = NULL;
907 cp->app_data = NULL;
908 /* reset struct ip_vs_seq */
909 cp->in_seq.delta = 0;
910 cp->out_seq.delta = 0;
911
900 atomic_inc(&ipvs->conn_count); 912 atomic_inc(&ipvs->conn_count);
901 if (flags & IP_VS_CONN_F_NO_CPORT) 913 if (flags & IP_VS_CONN_F_NO_CPORT)
902 atomic_inc(&ip_vs_conn_no_cport_cnt); 914 atomic_inc(&ip_vs_conn_no_cport_cnt);
903 915
904 /* Bind the connection with a destination server */ 916 /* Bind the connection with a destination server */
917 cp->dest = NULL;
905 ip_vs_bind_dest(cp, dest); 918 ip_vs_bind_dest(cp, dest);
906 919
907 /* Set its state and timeout */ 920 /* Set its state and timeout */
908 cp->state = 0; 921 cp->state = 0;
922 cp->old_state = 0;
909 cp->timeout = 3*HZ; 923 cp->timeout = 3*HZ;
910 cp->sync_endtime = jiffies & ~3UL; 924 cp->sync_endtime = jiffies & ~3UL;
911 925
@@ -952,14 +966,17 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
952 struct ip_vs_iter_state *iter = seq->private; 966 struct ip_vs_iter_state *iter = seq->private;
953 967
954 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) { 968 for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
955 ct_read_lock_bh(idx); 969 rcu_read_lock();
956 hlist_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { 970 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
971 /* __ip_vs_conn_get() is not needed by
972 * ip_vs_conn_seq_show and ip_vs_conn_sync_seq_show
973 */
957 if (pos-- == 0) { 974 if (pos-- == 0) {
958 iter->l = &ip_vs_conn_tab[idx]; 975 iter->l = &ip_vs_conn_tab[idx];
959 return cp; 976 return cp;
960 } 977 }
961 } 978 }
962 ct_read_unlock_bh(idx); 979 rcu_read_unlock();
963 } 980 }
964 981
965 return NULL; 982 return NULL;
@@ -977,6 +994,7 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
977{ 994{
978 struct ip_vs_conn *cp = v; 995 struct ip_vs_conn *cp = v;
979 struct ip_vs_iter_state *iter = seq->private; 996 struct ip_vs_iter_state *iter = seq->private;
997 struct hlist_node *e;
980 struct hlist_head *l = iter->l; 998 struct hlist_head *l = iter->l;
981 int idx; 999 int idx;
982 1000
@@ -985,19 +1003,19 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
985 return ip_vs_conn_array(seq, 0); 1003 return ip_vs_conn_array(seq, 0);
986 1004
987 /* more on same hash chain? */ 1005 /* more on same hash chain? */
988 if (cp->c_list.next) 1006 e = rcu_dereference(hlist_next_rcu(&cp->c_list));
989 return hlist_entry(cp->c_list.next, struct ip_vs_conn, c_list); 1007 if (e)
1008 return hlist_entry(e, struct ip_vs_conn, c_list);
1009 rcu_read_unlock();
990 1010
991 idx = l - ip_vs_conn_tab; 1011 idx = l - ip_vs_conn_tab;
992 ct_read_unlock_bh(idx);
993
994 while (++idx < ip_vs_conn_tab_size) { 1012 while (++idx < ip_vs_conn_tab_size) {
995 ct_read_lock_bh(idx); 1013 rcu_read_lock();
996 hlist_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { 1014 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
997 iter->l = &ip_vs_conn_tab[idx]; 1015 iter->l = &ip_vs_conn_tab[idx];
998 return cp; 1016 return cp;
999 } 1017 }
1000 ct_read_unlock_bh(idx); 1018 rcu_read_unlock();
1001 } 1019 }
1002 iter->l = NULL; 1020 iter->l = NULL;
1003 return NULL; 1021 return NULL;
@@ -1009,7 +1027,7 @@ static void ip_vs_conn_seq_stop(struct seq_file *seq, void *v)
1009 struct hlist_head *l = iter->l; 1027 struct hlist_head *l = iter->l;
1010 1028
1011 if (l) 1029 if (l)
1012 ct_read_unlock_bh(l - ip_vs_conn_tab); 1030 rcu_read_unlock();
1013} 1031}
1014 1032
1015static int ip_vs_conn_seq_show(struct seq_file *seq, void *v) 1033static int ip_vs_conn_seq_show(struct seq_file *seq, void *v)
@@ -1188,7 +1206,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
1188void ip_vs_random_dropentry(struct net *net) 1206void ip_vs_random_dropentry(struct net *net)
1189{ 1207{
1190 int idx; 1208 int idx;
1191 struct ip_vs_conn *cp; 1209 struct ip_vs_conn *cp, *cp_c;
1192 1210
1193 /* 1211 /*
1194 * Randomly scan 1/32 of the whole table every second 1212 * Randomly scan 1/32 of the whole table every second
@@ -1199,9 +1217,9 @@ void ip_vs_random_dropentry(struct net *net)
1199 /* 1217 /*
1200 * Lock is actually needed in this loop. 1218 * Lock is actually needed in this loop.
1201 */ 1219 */
1202 ct_write_lock_bh(hash); 1220 rcu_read_lock();
1203 1221
1204 hlist_for_each_entry(cp, &ip_vs_conn_tab[hash], c_list) { 1222 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[hash], c_list) {
1205 if (cp->flags & IP_VS_CONN_F_TEMPLATE) 1223 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
1206 /* connection template */ 1224 /* connection template */
1207 continue; 1225 continue;
@@ -1228,12 +1246,15 @@ void ip_vs_random_dropentry(struct net *net)
1228 1246
1229 IP_VS_DBG(4, "del connection\n"); 1247 IP_VS_DBG(4, "del connection\n");
1230 ip_vs_conn_expire_now(cp); 1248 ip_vs_conn_expire_now(cp);
1231 if (cp->control) { 1249 cp_c = cp->control;
1250 /* cp->control is valid only with reference to cp */
1251 if (cp_c && __ip_vs_conn_get(cp)) {
1232 IP_VS_DBG(4, "del conn template\n"); 1252 IP_VS_DBG(4, "del conn template\n");
1233 ip_vs_conn_expire_now(cp->control); 1253 ip_vs_conn_expire_now(cp_c);
1254 __ip_vs_conn_put(cp);
1234 } 1255 }
1235 } 1256 }
1236 ct_write_unlock_bh(hash); 1257 rcu_read_unlock();
1237 } 1258 }
1238} 1259}
1239 1260
@@ -1244,7 +1265,7 @@ void ip_vs_random_dropentry(struct net *net)
1244static void ip_vs_conn_flush(struct net *net) 1265static void ip_vs_conn_flush(struct net *net)
1245{ 1266{
1246 int idx; 1267 int idx;
1247 struct ip_vs_conn *cp; 1268 struct ip_vs_conn *cp, *cp_c;
1248 struct netns_ipvs *ipvs = net_ipvs(net); 1269 struct netns_ipvs *ipvs = net_ipvs(net);
1249 1270
1250flush_again: 1271flush_again:
@@ -1252,19 +1273,22 @@ flush_again:
1252 /* 1273 /*
1253 * Lock is actually needed in this loop. 1274 * Lock is actually needed in this loop.
1254 */ 1275 */
1255 ct_write_lock_bh(idx); 1276 rcu_read_lock();
1256 1277
1257 hlist_for_each_entry(cp, &ip_vs_conn_tab[idx], c_list) { 1278 hlist_for_each_entry_rcu(cp, &ip_vs_conn_tab[idx], c_list) {
1258 if (!ip_vs_conn_net_eq(cp, net)) 1279 if (!ip_vs_conn_net_eq(cp, net))
1259 continue; 1280 continue;
1260 IP_VS_DBG(4, "del connection\n"); 1281 IP_VS_DBG(4, "del connection\n");
1261 ip_vs_conn_expire_now(cp); 1282 ip_vs_conn_expire_now(cp);
1262 if (cp->control) { 1283 cp_c = cp->control;
1284 /* cp->control is valid only with reference to cp */
1285 if (cp_c && __ip_vs_conn_get(cp)) {
1263 IP_VS_DBG(4, "del conn template\n"); 1286 IP_VS_DBG(4, "del conn template\n");
1264 ip_vs_conn_expire_now(cp->control); 1287 ip_vs_conn_expire_now(cp_c);
1288 __ip_vs_conn_put(cp);
1265 } 1289 }
1266 } 1290 }
1267 ct_write_unlock_bh(idx); 1291 rcu_read_unlock();
1268 } 1292 }
1269 1293
1270 /* the counter may be not NULL, because maybe some conn entries 1294 /* the counter may be not NULL, because maybe some conn entries
@@ -1331,7 +1355,7 @@ int __init ip_vs_conn_init(void)
1331 INIT_HLIST_HEAD(&ip_vs_conn_tab[idx]); 1355 INIT_HLIST_HEAD(&ip_vs_conn_tab[idx]);
1332 1356
1333 for (idx = 0; idx < CT_LOCKARRAY_SIZE; idx++) { 1357 for (idx = 0; idx < CT_LOCKARRAY_SIZE; idx++) {
1334 rwlock_init(&__ip_vs_conntbl_lock_array[idx].l); 1358 spin_lock_init(&__ip_vs_conntbl_lock_array[idx].l);
1335 } 1359 }
1336 1360
1337 /* calculate the random value for connection hash */ 1361 /* calculate the random value for connection hash */
@@ -1342,6 +1366,8 @@ int __init ip_vs_conn_init(void)
1342 1366
1343void ip_vs_conn_cleanup(void) 1367void ip_vs_conn_cleanup(void)
1344{ 1368{
1369 /* Wait all ip_vs_conn_rcu_free() callbacks to complete */
1370 rcu_barrier();
1345 /* Release the empty cache */ 1371 /* Release the empty cache */
1346 kmem_cache_destroy(ip_vs_conn_cachep); 1372 kmem_cache_destroy(ip_vs_conn_cachep);
1347 vfree(ip_vs_conn_tab); 1373 vfree(ip_vs_conn_tab);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 61f49d241712..f26fe3353a30 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -69,10 +69,7 @@ EXPORT_SYMBOL(ip_vs_conn_put);
69EXPORT_SYMBOL(ip_vs_get_debug_level); 69EXPORT_SYMBOL(ip_vs_get_debug_level);
70#endif 70#endif
71 71
72int ip_vs_net_id __read_mostly; 72static int ip_vs_net_id __read_mostly;
73#ifdef IP_VS_GENERIC_NETNS
74EXPORT_SYMBOL(ip_vs_net_id);
75#endif
76/* netns cnt used for uniqueness */ 73/* netns cnt used for uniqueness */
77static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0); 74static atomic_t ipvs_netns_cnt = ATOMIC_INIT(0);
78 75
@@ -206,7 +203,7 @@ ip_vs_conn_fill_param_persist(const struct ip_vs_service *svc,
206{ 203{
207 ip_vs_conn_fill_param(svc->net, svc->af, protocol, caddr, cport, vaddr, 204 ip_vs_conn_fill_param(svc->net, svc->af, protocol, caddr, cport, vaddr,
208 vport, p); 205 vport, p);
209 p->pe = svc->pe; 206 p->pe = rcu_dereference(svc->pe);
210 if (p->pe && p->pe->fill_param) 207 if (p->pe && p->pe->fill_param)
211 return p->pe->fill_param(p, skb); 208 return p->pe->fill_param(p, skb);
212 209
@@ -299,12 +296,15 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
299 /* Check if a template already exists */ 296 /* Check if a template already exists */
300 ct = ip_vs_ct_in_get(&param); 297 ct = ip_vs_ct_in_get(&param);
301 if (!ct || !ip_vs_check_template(ct)) { 298 if (!ct || !ip_vs_check_template(ct)) {
299 struct ip_vs_scheduler *sched;
300
302 /* 301 /*
303 * No template found or the dest of the connection 302 * No template found or the dest of the connection
304 * template is not available. 303 * template is not available.
305 * return *ignored=0 i.e. ICMP and NF_DROP 304 * return *ignored=0 i.e. ICMP and NF_DROP
306 */ 305 */
307 dest = svc->scheduler->schedule(svc, skb); 306 sched = rcu_dereference(svc->scheduler);
307 dest = sched->schedule(svc, skb);
308 if (!dest) { 308 if (!dest) {
309 IP_VS_DBG(1, "p-schedule: no dest found.\n"); 309 IP_VS_DBG(1, "p-schedule: no dest found.\n");
310 kfree(param.pe_data); 310 kfree(param.pe_data);
@@ -394,6 +394,7 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
394{ 394{
395 struct ip_vs_protocol *pp = pd->pp; 395 struct ip_vs_protocol *pp = pd->pp;
396 struct ip_vs_conn *cp = NULL; 396 struct ip_vs_conn *cp = NULL;
397 struct ip_vs_scheduler *sched;
397 struct ip_vs_dest *dest; 398 struct ip_vs_dest *dest;
398 __be16 _ports[2], *pptr; 399 __be16 _ports[2], *pptr;
399 unsigned int flags; 400 unsigned int flags;
@@ -449,7 +450,8 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
449 return NULL; 450 return NULL;
450 } 451 }
451 452
452 dest = svc->scheduler->schedule(svc, skb); 453 sched = rcu_dereference(svc->scheduler);
454 dest = sched->schedule(svc, skb);
453 if (dest == NULL) { 455 if (dest == NULL) {
454 IP_VS_DBG(1, "Schedule: no dest found.\n"); 456 IP_VS_DBG(1, "Schedule: no dest found.\n");
455 return NULL; 457 return NULL;
@@ -507,7 +509,6 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
507 509
508 pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph); 510 pptr = frag_safe_skb_hp(skb, iph->len, sizeof(_ports), _ports, iph);
509 if (pptr == NULL) { 511 if (pptr == NULL) {
510 ip_vs_service_put(svc);
511 return NF_DROP; 512 return NF_DROP;
512 } 513 }
513 514
@@ -533,8 +534,6 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
533 IP_VS_CONN_F_ONE_PACKET : 0; 534 IP_VS_CONN_F_ONE_PACKET : 0;
534 union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } }; 535 union nf_inet_addr daddr = { .all = { 0, 0, 0, 0 } };
535 536
536 ip_vs_service_put(svc);
537
538 /* create a new connection entry */ 537 /* create a new connection entry */
539 IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__); 538 IP_VS_DBG(6, "%s(): create a cache_bypass entry\n", __func__);
540 { 539 {
@@ -571,12 +570,8 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
571 * listed in the ipvs table), pass the packets, because it is 570 * listed in the ipvs table), pass the packets, because it is
572 * not ipvs job to decide to drop the packets. 571 * not ipvs job to decide to drop the packets.
573 */ 572 */
574 if ((svc->port == FTPPORT) && (pptr[1] != FTPPORT)) { 573 if ((svc->port == FTPPORT) && (pptr[1] != FTPPORT))
575 ip_vs_service_put(svc);
576 return NF_ACCEPT; 574 return NF_ACCEPT;
577 }
578
579 ip_vs_service_put(svc);
580 575
581 /* 576 /*
582 * Notify the client that the destination is unreachable, and 577 * Notify the client that the destination is unreachable, and
@@ -643,8 +638,11 @@ static inline enum ip_defrag_users ip_vs_defrag_user(unsigned int hooknum)
643 638
644static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user) 639static inline int ip_vs_gather_frags(struct sk_buff *skb, u_int32_t user)
645{ 640{
646 int err = ip_defrag(skb, user); 641 int err;
647 642
643 local_bh_disable();
644 err = ip_defrag(skb, user);
645 local_bh_enable();
648 if (!err) 646 if (!err)
649 ip_send_check(ip_hdr(skb)); 647 ip_send_check(ip_hdr(skb));
650 648
@@ -1164,9 +1162,8 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
1164 sizeof(_ports), _ports, &iph); 1162 sizeof(_ports), _ports, &iph);
1165 if (pptr == NULL) 1163 if (pptr == NULL)
1166 return NF_ACCEPT; /* Not for me */ 1164 return NF_ACCEPT; /* Not for me */
1167 if (ip_vs_lookup_real_service(net, af, iph.protocol, 1165 if (ip_vs_has_real_service(net, af, iph.protocol, &iph.saddr,
1168 &iph.saddr, 1166 pptr[0])) {
1169 pptr[0])) {
1170 /* 1167 /*
1171 * Notify the real server: there is no 1168 * Notify the real server: there is no
1172 * existing entry if it is not RST 1169 * existing entry if it is not RST
@@ -1181,9 +1178,6 @@ ip_vs_out(unsigned int hooknum, struct sk_buff *skb, int af)
1181 iph.len)))) { 1178 iph.len)))) {
1182#ifdef CONFIG_IP_VS_IPV6 1179#ifdef CONFIG_IP_VS_IPV6
1183 if (af == AF_INET6) { 1180 if (af == AF_INET6) {
1184 struct net *net =
1185 dev_net(skb_dst(skb)->dev);
1186
1187 if (!skb->dev) 1181 if (!skb->dev)
1188 skb->dev = net->loopback_dev; 1182 skb->dev = net->loopback_dev;
1189 icmpv6_send(skb, 1183 icmpv6_send(skb,
@@ -1226,13 +1220,7 @@ ip_vs_local_reply4(unsigned int hooknum, struct sk_buff *skb,
1226 const struct net_device *in, const struct net_device *out, 1220 const struct net_device *in, const struct net_device *out,
1227 int (*okfn)(struct sk_buff *)) 1221 int (*okfn)(struct sk_buff *))
1228{ 1222{
1229 unsigned int verdict; 1223 return ip_vs_out(hooknum, skb, AF_INET);
1230
1231 /* Disable BH in LOCAL_OUT until all places are fixed */
1232 local_bh_disable();
1233 verdict = ip_vs_out(hooknum, skb, AF_INET);
1234 local_bh_enable();
1235 return verdict;
1236} 1224}
1237 1225
1238#ifdef CONFIG_IP_VS_IPV6 1226#ifdef CONFIG_IP_VS_IPV6
@@ -1259,13 +1247,7 @@ ip_vs_local_reply6(unsigned int hooknum, struct sk_buff *skb,
1259 const struct net_device *in, const struct net_device *out, 1247 const struct net_device *in, const struct net_device *out,
1260 int (*okfn)(struct sk_buff *)) 1248 int (*okfn)(struct sk_buff *))
1261{ 1249{
1262 unsigned int verdict; 1250 return ip_vs_out(hooknum, skb, AF_INET6);
1263
1264 /* Disable BH in LOCAL_OUT until all places are fixed */
1265 local_bh_disable();
1266 verdict = ip_vs_out(hooknum, skb, AF_INET6);
1267 local_bh_enable();
1268 return verdict;
1269} 1251}
1270 1252
1271#endif 1253#endif
@@ -1401,10 +1383,13 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1401 goto ignore_ipip; 1383 goto ignore_ipip;
1402 /* Prefer the resulting PMTU */ 1384 /* Prefer the resulting PMTU */
1403 if (dest) { 1385 if (dest) {
1404 spin_lock(&dest->dst_lock); 1386 struct ip_vs_dest_dst *dest_dst;
1405 if (dest->dst_cache) 1387
1406 mtu = dst_mtu(dest->dst_cache); 1388 rcu_read_lock();
1407 spin_unlock(&dest->dst_lock); 1389 dest_dst = rcu_dereference(dest->dest_dst);
1390 if (dest_dst)
1391 mtu = dst_mtu(dest_dst->dst_cache);
1392 rcu_read_unlock();
1408 } 1393 }
1409 if (mtu > 68 + sizeof(struct iphdr)) 1394 if (mtu > 68 + sizeof(struct iphdr))
1410 mtu -= sizeof(struct iphdr); 1395 mtu -= sizeof(struct iphdr);
@@ -1720,13 +1705,7 @@ ip_vs_local_request4(unsigned int hooknum, struct sk_buff *skb,
1720 const struct net_device *in, const struct net_device *out, 1705 const struct net_device *in, const struct net_device *out,
1721 int (*okfn)(struct sk_buff *)) 1706 int (*okfn)(struct sk_buff *))
1722{ 1707{
1723 unsigned int verdict; 1708 return ip_vs_in(hooknum, skb, AF_INET);
1724
1725 /* Disable BH in LOCAL_OUT until all places are fixed */
1726 local_bh_disable();
1727 verdict = ip_vs_in(hooknum, skb, AF_INET);
1728 local_bh_enable();
1729 return verdict;
1730} 1709}
1731 1710
1732#ifdef CONFIG_IP_VS_IPV6 1711#ifdef CONFIG_IP_VS_IPV6
@@ -1785,13 +1764,7 @@ ip_vs_local_request6(unsigned int hooknum, struct sk_buff *skb,
1785 const struct net_device *in, const struct net_device *out, 1764 const struct net_device *in, const struct net_device *out,
1786 int (*okfn)(struct sk_buff *)) 1765 int (*okfn)(struct sk_buff *))
1787{ 1766{
1788 unsigned int verdict; 1767 return ip_vs_in(hooknum, skb, AF_INET6);
1789
1790 /* Disable BH in LOCAL_OUT until all places are fixed */
1791 local_bh_disable();
1792 verdict = ip_vs_in(hooknum, skb, AF_INET6);
1793 local_bh_enable();
1794 return verdict;
1795} 1768}
1796 1769
1797#endif 1770#endif
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 9e2d1cccd1eb..9e4074c26dc2 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -55,9 +55,6 @@
55/* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */ 55/* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */
56static DEFINE_MUTEX(__ip_vs_mutex); 56static DEFINE_MUTEX(__ip_vs_mutex);
57 57
58/* lock for service table */
59static DEFINE_RWLOCK(__ip_vs_svc_lock);
60
61/* sysctl variables */ 58/* sysctl variables */
62 59
63#ifdef CONFIG_IP_VS_DEBUG 60#ifdef CONFIG_IP_VS_DEBUG
@@ -71,7 +68,7 @@ int ip_vs_get_debug_level(void)
71 68
72 69
73/* Protos */ 70/* Protos */
74static void __ip_vs_del_service(struct ip_vs_service *svc); 71static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup);
75 72
76 73
77#ifdef CONFIG_IP_VS_IPV6 74#ifdef CONFIG_IP_VS_IPV6
@@ -257,9 +254,9 @@ ip_vs_use_count_dec(void)
257#define IP_VS_SVC_TAB_MASK (IP_VS_SVC_TAB_SIZE - 1) 254#define IP_VS_SVC_TAB_MASK (IP_VS_SVC_TAB_SIZE - 1)
258 255
259/* the service table hashed by <protocol, addr, port> */ 256/* the service table hashed by <protocol, addr, port> */
260static struct list_head ip_vs_svc_table[IP_VS_SVC_TAB_SIZE]; 257static struct hlist_head ip_vs_svc_table[IP_VS_SVC_TAB_SIZE];
261/* the service table hashed by fwmark */ 258/* the service table hashed by fwmark */
262static struct list_head ip_vs_svc_fwm_table[IP_VS_SVC_TAB_SIZE]; 259static struct hlist_head ip_vs_svc_fwm_table[IP_VS_SVC_TAB_SIZE];
263 260
264 261
265/* 262/*
@@ -271,16 +268,18 @@ ip_vs_svc_hashkey(struct net *net, int af, unsigned int proto,
271{ 268{
272 register unsigned int porth = ntohs(port); 269 register unsigned int porth = ntohs(port);
273 __be32 addr_fold = addr->ip; 270 __be32 addr_fold = addr->ip;
271 __u32 ahash;
274 272
275#ifdef CONFIG_IP_VS_IPV6 273#ifdef CONFIG_IP_VS_IPV6
276 if (af == AF_INET6) 274 if (af == AF_INET6)
277 addr_fold = addr->ip6[0]^addr->ip6[1]^ 275 addr_fold = addr->ip6[0]^addr->ip6[1]^
278 addr->ip6[2]^addr->ip6[3]; 276 addr->ip6[2]^addr->ip6[3];
279#endif 277#endif
280 addr_fold ^= ((size_t)net>>8); 278 ahash = ntohl(addr_fold);
279 ahash ^= ((size_t) net >> 8);
281 280
282 return (proto^ntohl(addr_fold)^(porth>>IP_VS_SVC_TAB_BITS)^porth) 281 return (proto ^ ahash ^ (porth >> IP_VS_SVC_TAB_BITS) ^ porth) &
283 & IP_VS_SVC_TAB_MASK; 282 IP_VS_SVC_TAB_MASK;
284} 283}
285 284
286/* 285/*
@@ -312,13 +311,13 @@ static int ip_vs_svc_hash(struct ip_vs_service *svc)
312 */ 311 */
313 hash = ip_vs_svc_hashkey(svc->net, svc->af, svc->protocol, 312 hash = ip_vs_svc_hashkey(svc->net, svc->af, svc->protocol,
314 &svc->addr, svc->port); 313 &svc->addr, svc->port);
315 list_add(&svc->s_list, &ip_vs_svc_table[hash]); 314 hlist_add_head_rcu(&svc->s_list, &ip_vs_svc_table[hash]);
316 } else { 315 } else {
317 /* 316 /*
318 * Hash it by fwmark in svc_fwm_table 317 * Hash it by fwmark in svc_fwm_table
319 */ 318 */
320 hash = ip_vs_svc_fwm_hashkey(svc->net, svc->fwmark); 319 hash = ip_vs_svc_fwm_hashkey(svc->net, svc->fwmark);
321 list_add(&svc->f_list, &ip_vs_svc_fwm_table[hash]); 320 hlist_add_head_rcu(&svc->f_list, &ip_vs_svc_fwm_table[hash]);
322 } 321 }
323 322
324 svc->flags |= IP_VS_SVC_F_HASHED; 323 svc->flags |= IP_VS_SVC_F_HASHED;
@@ -342,10 +341,10 @@ static int ip_vs_svc_unhash(struct ip_vs_service *svc)
342 341
343 if (svc->fwmark == 0) { 342 if (svc->fwmark == 0) {
344 /* Remove it from the svc_table table */ 343 /* Remove it from the svc_table table */
345 list_del(&svc->s_list); 344 hlist_del_rcu(&svc->s_list);
346 } else { 345 } else {
347 /* Remove it from the svc_fwm_table table */ 346 /* Remove it from the svc_fwm_table table */
348 list_del(&svc->f_list); 347 hlist_del_rcu(&svc->f_list);
349 } 348 }
350 349
351 svc->flags &= ~IP_VS_SVC_F_HASHED; 350 svc->flags &= ~IP_VS_SVC_F_HASHED;
@@ -367,7 +366,7 @@ __ip_vs_service_find(struct net *net, int af, __u16 protocol,
367 /* Check for "full" addressed entries */ 366 /* Check for "full" addressed entries */
368 hash = ip_vs_svc_hashkey(net, af, protocol, vaddr, vport); 367 hash = ip_vs_svc_hashkey(net, af, protocol, vaddr, vport);
369 368
370 list_for_each_entry(svc, &ip_vs_svc_table[hash], s_list){ 369 hlist_for_each_entry_rcu(svc, &ip_vs_svc_table[hash], s_list) {
371 if ((svc->af == af) 370 if ((svc->af == af)
372 && ip_vs_addr_equal(af, &svc->addr, vaddr) 371 && ip_vs_addr_equal(af, &svc->addr, vaddr)
373 && (svc->port == vport) 372 && (svc->port == vport)
@@ -394,7 +393,7 @@ __ip_vs_svc_fwm_find(struct net *net, int af, __u32 fwmark)
394 /* Check for fwmark addressed entries */ 393 /* Check for fwmark addressed entries */
395 hash = ip_vs_svc_fwm_hashkey(net, fwmark); 394 hash = ip_vs_svc_fwm_hashkey(net, fwmark);
396 395
397 list_for_each_entry(svc, &ip_vs_svc_fwm_table[hash], f_list) { 396 hlist_for_each_entry_rcu(svc, &ip_vs_svc_fwm_table[hash], f_list) {
398 if (svc->fwmark == fwmark && svc->af == af 397 if (svc->fwmark == fwmark && svc->af == af
399 && net_eq(svc->net, net)) { 398 && net_eq(svc->net, net)) {
400 /* HIT */ 399 /* HIT */
@@ -405,15 +404,14 @@ __ip_vs_svc_fwm_find(struct net *net, int af, __u32 fwmark)
405 return NULL; 404 return NULL;
406} 405}
407 406
407/* Find service, called under RCU lock */
408struct ip_vs_service * 408struct ip_vs_service *
409ip_vs_service_get(struct net *net, int af, __u32 fwmark, __u16 protocol, 409ip_vs_service_find(struct net *net, int af, __u32 fwmark, __u16 protocol,
410 const union nf_inet_addr *vaddr, __be16 vport) 410 const union nf_inet_addr *vaddr, __be16 vport)
411{ 411{
412 struct ip_vs_service *svc; 412 struct ip_vs_service *svc;
413 struct netns_ipvs *ipvs = net_ipvs(net); 413 struct netns_ipvs *ipvs = net_ipvs(net);
414 414
415 read_lock(&__ip_vs_svc_lock);
416
417 /* 415 /*
418 * Check the table hashed by fwmark first 416 * Check the table hashed by fwmark first
419 */ 417 */
@@ -449,10 +447,6 @@ ip_vs_service_get(struct net *net, int af, __u32 fwmark, __u16 protocol,
449 } 447 }
450 448
451 out: 449 out:
452 if (svc)
453 atomic_inc(&svc->usecnt);
454 read_unlock(&__ip_vs_svc_lock);
455
456 IP_VS_DBG_BUF(9, "lookup service: fwm %u %s %s:%u %s\n", 450 IP_VS_DBG_BUF(9, "lookup service: fwm %u %s %s:%u %s\n",
457 fwmark, ip_vs_proto_name(protocol), 451 fwmark, ip_vs_proto_name(protocol),
458 IP_VS_DBG_ADDR(af, vaddr), ntohs(vport), 452 IP_VS_DBG_ADDR(af, vaddr), ntohs(vport),
@@ -469,6 +463,13 @@ __ip_vs_bind_svc(struct ip_vs_dest *dest, struct ip_vs_service *svc)
469 dest->svc = svc; 463 dest->svc = svc;
470} 464}
471 465
466static void ip_vs_service_free(struct ip_vs_service *svc)
467{
468 if (svc->stats.cpustats)
469 free_percpu(svc->stats.cpustats);
470 kfree(svc);
471}
472
472static void 473static void
473__ip_vs_unbind_svc(struct ip_vs_dest *dest) 474__ip_vs_unbind_svc(struct ip_vs_dest *dest)
474{ 475{
@@ -476,12 +477,11 @@ __ip_vs_unbind_svc(struct ip_vs_dest *dest)
476 477
477 dest->svc = NULL; 478 dest->svc = NULL;
478 if (atomic_dec_and_test(&svc->refcnt)) { 479 if (atomic_dec_and_test(&svc->refcnt)) {
479 IP_VS_DBG_BUF(3, "Removing service %u/%s:%u usecnt=%d\n", 480 IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n",
480 svc->fwmark, 481 svc->fwmark,
481 IP_VS_DBG_ADDR(svc->af, &svc->addr), 482 IP_VS_DBG_ADDR(svc->af, &svc->addr),
482 ntohs(svc->port), atomic_read(&svc->usecnt)); 483 ntohs(svc->port));
483 free_percpu(svc->stats.cpustats); 484 ip_vs_service_free(svc);
484 kfree(svc);
485 } 485 }
486} 486}
487 487
@@ -506,17 +506,13 @@ static inline unsigned int ip_vs_rs_hashkey(int af,
506 & IP_VS_RTAB_MASK; 506 & IP_VS_RTAB_MASK;
507} 507}
508 508
509/* 509/* Hash ip_vs_dest in rs_table by <proto,addr,port>. */
510 * Hashes ip_vs_dest in rs_table by <proto,addr,port>. 510static void ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest)
511 * should be called with locked tables.
512 */
513static int ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest)
514{ 511{
515 unsigned int hash; 512 unsigned int hash;
516 513
517 if (!list_empty(&dest->d_list)) { 514 if (dest->in_rs_table)
518 return 0; 515 return;
519 }
520 516
521 /* 517 /*
522 * Hash by proto,addr,port, 518 * Hash by proto,addr,port,
@@ -524,64 +520,51 @@ static int ip_vs_rs_hash(struct netns_ipvs *ipvs, struct ip_vs_dest *dest)
524 */ 520 */
525 hash = ip_vs_rs_hashkey(dest->af, &dest->addr, dest->port); 521 hash = ip_vs_rs_hashkey(dest->af, &dest->addr, dest->port);
526 522
527 list_add(&dest->d_list, &ipvs->rs_table[hash]); 523 hlist_add_head_rcu(&dest->d_list, &ipvs->rs_table[hash]);
528 524 dest->in_rs_table = 1;
529 return 1;
530} 525}
531 526
532/* 527/* Unhash ip_vs_dest from rs_table. */
533 * UNhashes ip_vs_dest from rs_table. 528static void ip_vs_rs_unhash(struct ip_vs_dest *dest)
534 * should be called with locked tables.
535 */
536static int ip_vs_rs_unhash(struct ip_vs_dest *dest)
537{ 529{
538 /* 530 /*
539 * Remove it from the rs_table table. 531 * Remove it from the rs_table table.
540 */ 532 */
541 if (!list_empty(&dest->d_list)) { 533 if (dest->in_rs_table) {
542 list_del_init(&dest->d_list); 534 hlist_del_rcu(&dest->d_list);
535 dest->in_rs_table = 0;
543 } 536 }
544
545 return 1;
546} 537}
547 538
548/* 539/* Check if real service by <proto,addr,port> is present */
549 * Lookup real service by <proto,addr,port> in the real service table. 540bool ip_vs_has_real_service(struct net *net, int af, __u16 protocol,
550 */ 541 const union nf_inet_addr *daddr, __be16 dport)
551struct ip_vs_dest *
552ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol,
553 const union nf_inet_addr *daddr,
554 __be16 dport)
555{ 542{
556 struct netns_ipvs *ipvs = net_ipvs(net); 543 struct netns_ipvs *ipvs = net_ipvs(net);
557 unsigned int hash; 544 unsigned int hash;
558 struct ip_vs_dest *dest; 545 struct ip_vs_dest *dest;
559 546
560 /* 547 /* Check for "full" addressed entries */
561 * Check for "full" addressed entries
562 * Return the first found entry
563 */
564 hash = ip_vs_rs_hashkey(af, daddr, dport); 548 hash = ip_vs_rs_hashkey(af, daddr, dport);
565 549
566 read_lock(&ipvs->rs_lock); 550 rcu_read_lock();
567 list_for_each_entry(dest, &ipvs->rs_table[hash], d_list) { 551 hlist_for_each_entry_rcu(dest, &ipvs->rs_table[hash], d_list) {
568 if ((dest->af == af) 552 if (dest->port == dport &&
569 && ip_vs_addr_equal(af, &dest->addr, daddr) 553 dest->af == af &&
570 && (dest->port == dport) 554 ip_vs_addr_equal(af, &dest->addr, daddr) &&
571 && ((dest->protocol == protocol) || 555 (dest->protocol == protocol || dest->vfwmark)) {
572 dest->vfwmark)) {
573 /* HIT */ 556 /* HIT */
574 read_unlock(&ipvs->rs_lock); 557 rcu_read_unlock();
575 return dest; 558 return true;
576 } 559 }
577 } 560 }
578 read_unlock(&ipvs->rs_lock); 561 rcu_read_unlock();
579 562
580 return NULL; 563 return false;
581} 564}
582 565
583/* 566/* Lookup destination by {addr,port} in the given service
584 * Lookup destination by {addr,port} in the given service 567 * Called under RCU lock.
585 */ 568 */
586static struct ip_vs_dest * 569static struct ip_vs_dest *
587ip_vs_lookup_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr, 570ip_vs_lookup_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
@@ -592,7 +575,7 @@ ip_vs_lookup_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
592 /* 575 /*
593 * Find the destination for the given service 576 * Find the destination for the given service
594 */ 577 */
595 list_for_each_entry(dest, &svc->destinations, n_list) { 578 list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
596 if ((dest->af == svc->af) 579 if ((dest->af == svc->af)
597 && ip_vs_addr_equal(svc->af, &dest->addr, daddr) 580 && ip_vs_addr_equal(svc->af, &dest->addr, daddr)
598 && (dest->port == dport)) { 581 && (dest->port == dport)) {
@@ -606,13 +589,11 @@ ip_vs_lookup_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
606 589
607/* 590/*
608 * Find destination by {daddr,dport,vaddr,protocol} 591 * Find destination by {daddr,dport,vaddr,protocol}
609 * Cretaed to be used in ip_vs_process_message() in 592 * Created to be used in ip_vs_process_message() in
610 * the backup synchronization daemon. It finds the 593 * the backup synchronization daemon. It finds the
611 * destination to be bound to the received connection 594 * destination to be bound to the received connection
612 * on the backup. 595 * on the backup.
613 * 596 * Called under RCU lock, no refcnt is returned.
614 * ip_vs_lookup_real_service() looked promissing, but
615 * seems not working as expected.
616 */ 597 */
617struct ip_vs_dest *ip_vs_find_dest(struct net *net, int af, 598struct ip_vs_dest *ip_vs_find_dest(struct net *net, int af,
618 const union nf_inet_addr *daddr, 599 const union nf_inet_addr *daddr,
@@ -625,7 +606,7 @@ struct ip_vs_dest *ip_vs_find_dest(struct net *net, int af,
625 struct ip_vs_service *svc; 606 struct ip_vs_service *svc;
626 __be16 port = dport; 607 __be16 port = dport;
627 608
628 svc = ip_vs_service_get(net, af, fwmark, protocol, vaddr, vport); 609 svc = ip_vs_service_find(net, af, fwmark, protocol, vaddr, vport);
629 if (!svc) 610 if (!svc)
630 return NULL; 611 return NULL;
631 if (fwmark && (flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ) 612 if (fwmark && (flags & IP_VS_CONN_F_FWD_MASK) != IP_VS_CONN_F_MASQ)
@@ -633,12 +614,31 @@ struct ip_vs_dest *ip_vs_find_dest(struct net *net, int af,
633 dest = ip_vs_lookup_dest(svc, daddr, port); 614 dest = ip_vs_lookup_dest(svc, daddr, port);
634 if (!dest) 615 if (!dest)
635 dest = ip_vs_lookup_dest(svc, daddr, port ^ dport); 616 dest = ip_vs_lookup_dest(svc, daddr, port ^ dport);
636 if (dest)
637 atomic_inc(&dest->refcnt);
638 ip_vs_service_put(svc);
639 return dest; 617 return dest;
640} 618}
641 619
620void ip_vs_dest_dst_rcu_free(struct rcu_head *head)
621{
622 struct ip_vs_dest_dst *dest_dst = container_of(head,
623 struct ip_vs_dest_dst,
624 rcu_head);
625
626 dst_release(dest_dst->dst_cache);
627 kfree(dest_dst);
628}
629
630/* Release dest_dst and dst_cache for dest in user context */
631static void __ip_vs_dst_cache_reset(struct ip_vs_dest *dest)
632{
633 struct ip_vs_dest_dst *old;
634
635 old = rcu_dereference_protected(dest->dest_dst, 1);
636 if (old) {
637 RCU_INIT_POINTER(dest->dest_dst, NULL);
638 call_rcu(&old->rcu_head, ip_vs_dest_dst_rcu_free);
639 }
640}
641
642/* 642/*
643 * Lookup dest by {svc,addr,port} in the destination trash. 643 * Lookup dest by {svc,addr,port} in the destination trash.
644 * The destination trash is used to hold the destinations that are removed 644 * The destination trash is used to hold the destinations that are removed
@@ -653,19 +653,25 @@ static struct ip_vs_dest *
653ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr, 653ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
654 __be16 dport) 654 __be16 dport)
655{ 655{
656 struct ip_vs_dest *dest, *nxt; 656 struct ip_vs_dest *dest;
657 struct netns_ipvs *ipvs = net_ipvs(svc->net); 657 struct netns_ipvs *ipvs = net_ipvs(svc->net);
658 658
659 /* 659 /*
660 * Find the destination in trash 660 * Find the destination in trash
661 */ 661 */
662 list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, n_list) { 662 spin_lock_bh(&ipvs->dest_trash_lock);
663 list_for_each_entry(dest, &ipvs->dest_trash, t_list) {
663 IP_VS_DBG_BUF(3, "Destination %u/%s:%u still in trash, " 664 IP_VS_DBG_BUF(3, "Destination %u/%s:%u still in trash, "
664 "dest->refcnt=%d\n", 665 "dest->refcnt=%d\n",
665 dest->vfwmark, 666 dest->vfwmark,
666 IP_VS_DBG_ADDR(svc->af, &dest->addr), 667 IP_VS_DBG_ADDR(svc->af, &dest->addr),
667 ntohs(dest->port), 668 ntohs(dest->port),
668 atomic_read(&dest->refcnt)); 669 atomic_read(&dest->refcnt));
670 /* We can not reuse dest while in grace period
671 * because conns still can use dest->svc
672 */
673 if (test_bit(IP_VS_DEST_STATE_REMOVING, &dest->state))
674 continue;
669 if (dest->af == svc->af && 675 if (dest->af == svc->af &&
670 ip_vs_addr_equal(svc->af, &dest->addr, daddr) && 676 ip_vs_addr_equal(svc->af, &dest->addr, daddr) &&
671 dest->port == dport && 677 dest->port == dport &&
@@ -675,29 +681,27 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
675 (ip_vs_addr_equal(svc->af, &dest->vaddr, &svc->addr) && 681 (ip_vs_addr_equal(svc->af, &dest->vaddr, &svc->addr) &&
676 dest->vport == svc->port))) { 682 dest->vport == svc->port))) {
677 /* HIT */ 683 /* HIT */
678 return dest; 684 list_del(&dest->t_list);
679 } 685 ip_vs_dest_hold(dest);
680 686 goto out;
681 /*
682 * Try to purge the destination from trash if not referenced
683 */
684 if (atomic_read(&dest->refcnt) == 1) {
685 IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u "
686 "from trash\n",
687 dest->vfwmark,
688 IP_VS_DBG_ADDR(svc->af, &dest->addr),
689 ntohs(dest->port));
690 list_del(&dest->n_list);
691 ip_vs_dst_reset(dest);
692 __ip_vs_unbind_svc(dest);
693 free_percpu(dest->stats.cpustats);
694 kfree(dest);
695 } 687 }
696 } 688 }
697 689
698 return NULL; 690 dest = NULL;
691
692out:
693 spin_unlock_bh(&ipvs->dest_trash_lock);
694
695 return dest;
699} 696}
700 697
698static void ip_vs_dest_free(struct ip_vs_dest *dest)
699{
700 __ip_vs_dst_cache_reset(dest);
701 __ip_vs_unbind_svc(dest);
702 free_percpu(dest->stats.cpustats);
703 kfree(dest);
704}
701 705
702/* 706/*
703 * Clean up all the destinations in the trash 707 * Clean up all the destinations in the trash
@@ -706,19 +710,18 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, const union nf_inet_addr *daddr,
706 * When the ip_vs_control_clearup is activated by ipvs module exit, 710 * When the ip_vs_control_clearup is activated by ipvs module exit,
707 * the service tables must have been flushed and all the connections 711 * the service tables must have been flushed and all the connections
708 * are expired, and the refcnt of each destination in the trash must 712 * are expired, and the refcnt of each destination in the trash must
709 * be 1, so we simply release them here. 713 * be 0, so we simply release them here.
710 */ 714 */
711static void ip_vs_trash_cleanup(struct net *net) 715static void ip_vs_trash_cleanup(struct net *net)
712{ 716{
713 struct ip_vs_dest *dest, *nxt; 717 struct ip_vs_dest *dest, *nxt;
714 struct netns_ipvs *ipvs = net_ipvs(net); 718 struct netns_ipvs *ipvs = net_ipvs(net);
715 719
716 list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, n_list) { 720 del_timer_sync(&ipvs->dest_trash_timer);
717 list_del(&dest->n_list); 721 /* No need to use dest_trash_lock */
718 ip_vs_dst_reset(dest); 722 list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, t_list) {
719 __ip_vs_unbind_svc(dest); 723 list_del(&dest->t_list);
720 free_percpu(dest->stats.cpustats); 724 ip_vs_dest_free(dest);
721 kfree(dest);
722 } 725 }
723} 726}
724 727
@@ -768,6 +771,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
768 struct ip_vs_dest_user_kern *udest, int add) 771 struct ip_vs_dest_user_kern *udest, int add)
769{ 772{
770 struct netns_ipvs *ipvs = net_ipvs(svc->net); 773 struct netns_ipvs *ipvs = net_ipvs(svc->net);
774 struct ip_vs_scheduler *sched;
771 int conn_flags; 775 int conn_flags;
772 776
773 /* set the weight and the flags */ 777 /* set the weight and the flags */
@@ -783,9 +787,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
783 * Put the real service in rs_table if not present. 787 * Put the real service in rs_table if not present.
784 * For now only for NAT! 788 * For now only for NAT!
785 */ 789 */
786 write_lock_bh(&ipvs->rs_lock);
787 ip_vs_rs_hash(ipvs, dest); 790 ip_vs_rs_hash(ipvs, dest);
788 write_unlock_bh(&ipvs->rs_lock);
789 } 791 }
790 atomic_set(&dest->conn_flags, conn_flags); 792 atomic_set(&dest->conn_flags, conn_flags);
791 793
@@ -809,27 +811,20 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
809 dest->l_threshold = udest->l_threshold; 811 dest->l_threshold = udest->l_threshold;
810 812
811 spin_lock_bh(&dest->dst_lock); 813 spin_lock_bh(&dest->dst_lock);
812 ip_vs_dst_reset(dest); 814 __ip_vs_dst_cache_reset(dest);
813 spin_unlock_bh(&dest->dst_lock); 815 spin_unlock_bh(&dest->dst_lock);
814 816
815 if (add) 817 sched = rcu_dereference_protected(svc->scheduler, 1);
816 ip_vs_start_estimator(svc->net, &dest->stats);
817
818 write_lock_bh(&__ip_vs_svc_lock);
819
820 /* Wait until all other svc users go away */
821 IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 0);
822
823 if (add) { 818 if (add) {
824 list_add(&dest->n_list, &svc->destinations); 819 ip_vs_start_estimator(svc->net, &dest->stats);
820 list_add_rcu(&dest->n_list, &svc->destinations);
825 svc->num_dests++; 821 svc->num_dests++;
822 if (sched->add_dest)
823 sched->add_dest(svc, dest);
824 } else {
825 if (sched->upd_dest)
826 sched->upd_dest(svc, dest);
826 } 827 }
827
828 /* call the update_service, because server weight may be changed */
829 if (svc->scheduler->update_service)
830 svc->scheduler->update_service(svc);
831
832 write_unlock_bh(&__ip_vs_svc_lock);
833} 828}
834 829
835 830
@@ -881,7 +876,7 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest,
881 atomic_set(&dest->persistconns, 0); 876 atomic_set(&dest->persistconns, 0);
882 atomic_set(&dest->refcnt, 1); 877 atomic_set(&dest->refcnt, 1);
883 878
884 INIT_LIST_HEAD(&dest->d_list); 879 INIT_HLIST_NODE(&dest->d_list);
885 spin_lock_init(&dest->dst_lock); 880 spin_lock_init(&dest->dst_lock);
886 spin_lock_init(&dest->stats.lock); 881 spin_lock_init(&dest->stats.lock);
887 __ip_vs_update_dest(svc, dest, udest, 1); 882 __ip_vs_update_dest(svc, dest, udest, 1);
@@ -923,10 +918,10 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
923 918
924 ip_vs_addr_copy(svc->af, &daddr, &udest->addr); 919 ip_vs_addr_copy(svc->af, &daddr, &udest->addr);
925 920
926 /* 921 /* We use function that requires RCU lock */
927 * Check if the dest already exists in the list 922 rcu_read_lock();
928 */
929 dest = ip_vs_lookup_dest(svc, &daddr, dport); 923 dest = ip_vs_lookup_dest(svc, &daddr, dport);
924 rcu_read_unlock();
930 925
931 if (dest != NULL) { 926 if (dest != NULL) {
932 IP_VS_DBG(1, "%s(): dest already exists\n", __func__); 927 IP_VS_DBG(1, "%s(): dest already exists\n", __func__);
@@ -948,11 +943,6 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
948 IP_VS_DBG_ADDR(svc->af, &dest->vaddr), 943 IP_VS_DBG_ADDR(svc->af, &dest->vaddr),
949 ntohs(dest->vport)); 944 ntohs(dest->vport));
950 945
951 /*
952 * Get the destination from the trash
953 */
954 list_del(&dest->n_list);
955
956 __ip_vs_update_dest(svc, dest, udest, 1); 946 __ip_vs_update_dest(svc, dest, udest, 1);
957 ret = 0; 947 ret = 0;
958 } else { 948 } else {
@@ -992,10 +982,10 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
992 982
993 ip_vs_addr_copy(svc->af, &daddr, &udest->addr); 983 ip_vs_addr_copy(svc->af, &daddr, &udest->addr);
994 984
995 /* 985 /* We use function that requires RCU lock */
996 * Lookup the destination list 986 rcu_read_lock();
997 */
998 dest = ip_vs_lookup_dest(svc, &daddr, dport); 987 dest = ip_vs_lookup_dest(svc, &daddr, dport);
988 rcu_read_unlock();
999 989
1000 if (dest == NULL) { 990 if (dest == NULL) {
1001 IP_VS_DBG(1, "%s(): dest doesn't exist\n", __func__); 991 IP_VS_DBG(1, "%s(): dest doesn't exist\n", __func__);
@@ -1008,11 +998,21 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
1008 return 0; 998 return 0;
1009} 999}
1010 1000
1001static void ip_vs_dest_wait_readers(struct rcu_head *head)
1002{
1003 struct ip_vs_dest *dest = container_of(head, struct ip_vs_dest,
1004 rcu_head);
1005
1006 /* End of grace period after unlinking */
1007 clear_bit(IP_VS_DEST_STATE_REMOVING, &dest->state);
1008}
1009
1011 1010
1012/* 1011/*
1013 * Delete a destination (must be already unlinked from the service) 1012 * Delete a destination (must be already unlinked from the service)
1014 */ 1013 */
1015static void __ip_vs_del_dest(struct net *net, struct ip_vs_dest *dest) 1014static void __ip_vs_del_dest(struct net *net, struct ip_vs_dest *dest,
1015 bool cleanup)
1016{ 1016{
1017 struct netns_ipvs *ipvs = net_ipvs(net); 1017 struct netns_ipvs *ipvs = net_ipvs(net);
1018 1018
@@ -1021,38 +1021,24 @@ static void __ip_vs_del_dest(struct net *net, struct ip_vs_dest *dest)
1021 /* 1021 /*
1022 * Remove it from the d-linked list with the real services. 1022 * Remove it from the d-linked list with the real services.
1023 */ 1023 */
1024 write_lock_bh(&ipvs->rs_lock);
1025 ip_vs_rs_unhash(dest); 1024 ip_vs_rs_unhash(dest);
1026 write_unlock_bh(&ipvs->rs_lock);
1027 1025
1028 /* 1026 if (!cleanup) {
1029 * Decrease the refcnt of the dest, and free the dest 1027 set_bit(IP_VS_DEST_STATE_REMOVING, &dest->state);
1030 * if nobody refers to it (refcnt=0). Otherwise, throw 1028 call_rcu(&dest->rcu_head, ip_vs_dest_wait_readers);
1031 * the destination into the trash.
1032 */
1033 if (atomic_dec_and_test(&dest->refcnt)) {
1034 IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u\n",
1035 dest->vfwmark,
1036 IP_VS_DBG_ADDR(dest->af, &dest->addr),
1037 ntohs(dest->port));
1038 ip_vs_dst_reset(dest);
1039 /* simply decrease svc->refcnt here, let the caller check
1040 and release the service if nobody refers to it.
1041 Only user context can release destination and service,
1042 and only one user context can update virtual service at a
1043 time, so the operation here is OK */
1044 atomic_dec(&dest->svc->refcnt);
1045 free_percpu(dest->stats.cpustats);
1046 kfree(dest);
1047 } else {
1048 IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, "
1049 "dest->refcnt=%d\n",
1050 IP_VS_DBG_ADDR(dest->af, &dest->addr),
1051 ntohs(dest->port),
1052 atomic_read(&dest->refcnt));
1053 list_add(&dest->n_list, &ipvs->dest_trash);
1054 atomic_inc(&dest->refcnt);
1055 } 1029 }
1030
1031 spin_lock_bh(&ipvs->dest_trash_lock);
1032 IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, dest->refcnt=%d\n",
1033 IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port),
1034 atomic_read(&dest->refcnt));
1035 if (list_empty(&ipvs->dest_trash) && !cleanup)
1036 mod_timer(&ipvs->dest_trash_timer,
1037 jiffies + IP_VS_DEST_TRASH_PERIOD);
1038 /* dest lives in trash without reference */
1039 list_add(&dest->t_list, &ipvs->dest_trash);
1040 spin_unlock_bh(&ipvs->dest_trash_lock);
1041 ip_vs_dest_put(dest);
1056} 1042}
1057 1043
1058 1044
@@ -1068,14 +1054,16 @@ static void __ip_vs_unlink_dest(struct ip_vs_service *svc,
1068 /* 1054 /*
1069 * Remove it from the d-linked destination list. 1055 * Remove it from the d-linked destination list.
1070 */ 1056 */
1071 list_del(&dest->n_list); 1057 list_del_rcu(&dest->n_list);
1072 svc->num_dests--; 1058 svc->num_dests--;
1073 1059
1074 /* 1060 if (svcupd) {
1075 * Call the update_service function of its scheduler 1061 struct ip_vs_scheduler *sched;
1076 */ 1062
1077 if (svcupd && svc->scheduler->update_service) 1063 sched = rcu_dereference_protected(svc->scheduler, 1);
1078 svc->scheduler->update_service(svc); 1064 if (sched->del_dest)
1065 sched->del_dest(svc, dest);
1066 }
1079} 1067}
1080 1068
1081 1069
@@ -1090,37 +1078,56 @@ ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
1090 1078
1091 EnterFunction(2); 1079 EnterFunction(2);
1092 1080
1081 /* We use function that requires RCU lock */
1082 rcu_read_lock();
1093 dest = ip_vs_lookup_dest(svc, &udest->addr, dport); 1083 dest = ip_vs_lookup_dest(svc, &udest->addr, dport);
1084 rcu_read_unlock();
1094 1085
1095 if (dest == NULL) { 1086 if (dest == NULL) {
1096 IP_VS_DBG(1, "%s(): destination not found!\n", __func__); 1087 IP_VS_DBG(1, "%s(): destination not found!\n", __func__);
1097 return -ENOENT; 1088 return -ENOENT;
1098 } 1089 }
1099 1090
1100 write_lock_bh(&__ip_vs_svc_lock);
1101
1102 /*
1103 * Wait until all other svc users go away.
1104 */
1105 IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 0);
1106
1107 /* 1091 /*
1108 * Unlink dest from the service 1092 * Unlink dest from the service
1109 */ 1093 */
1110 __ip_vs_unlink_dest(svc, dest, 1); 1094 __ip_vs_unlink_dest(svc, dest, 1);
1111 1095
1112 write_unlock_bh(&__ip_vs_svc_lock);
1113
1114 /* 1096 /*
1115 * Delete the destination 1097 * Delete the destination
1116 */ 1098 */
1117 __ip_vs_del_dest(svc->net, dest); 1099 __ip_vs_del_dest(svc->net, dest, false);
1118 1100
1119 LeaveFunction(2); 1101 LeaveFunction(2);
1120 1102
1121 return 0; 1103 return 0;
1122} 1104}
1123 1105
1106static void ip_vs_dest_trash_expire(unsigned long data)
1107{
1108 struct net *net = (struct net *) data;
1109 struct netns_ipvs *ipvs = net_ipvs(net);
1110 struct ip_vs_dest *dest, *next;
1111
1112 spin_lock(&ipvs->dest_trash_lock);
1113 list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) {
1114 /* Skip if dest is in grace period */
1115 if (test_bit(IP_VS_DEST_STATE_REMOVING, &dest->state))
1116 continue;
1117 if (atomic_read(&dest->refcnt) > 0)
1118 continue;
1119 IP_VS_DBG_BUF(3, "Removing destination %u/%s:%u from trash\n",
1120 dest->vfwmark,
1121 IP_VS_DBG_ADDR(dest->svc->af, &dest->addr),
1122 ntohs(dest->port));
1123 list_del(&dest->t_list);
1124 ip_vs_dest_free(dest);
1125 }
1126 if (!list_empty(&ipvs->dest_trash))
1127 mod_timer(&ipvs->dest_trash_timer,
1128 jiffies + IP_VS_DEST_TRASH_PERIOD);
1129 spin_unlock(&ipvs->dest_trash_lock);
1130}
1124 1131
1125/* 1132/*
1126 * Add a service into the service hash table 1133 * Add a service into the service hash table
@@ -1176,7 +1183,6 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1176 } 1183 }
1177 1184
1178 /* I'm the first user of the service */ 1185 /* I'm the first user of the service */
1179 atomic_set(&svc->usecnt, 0);
1180 atomic_set(&svc->refcnt, 0); 1186 atomic_set(&svc->refcnt, 0);
1181 1187
1182 svc->af = u->af; 1188 svc->af = u->af;
@@ -1190,7 +1196,7 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1190 svc->net = net; 1196 svc->net = net;
1191 1197
1192 INIT_LIST_HEAD(&svc->destinations); 1198 INIT_LIST_HEAD(&svc->destinations);
1193 rwlock_init(&svc->sched_lock); 1199 spin_lock_init(&svc->sched_lock);
1194 spin_lock_init(&svc->stats.lock); 1200 spin_lock_init(&svc->stats.lock);
1195 1201
1196 /* Bind the scheduler */ 1202 /* Bind the scheduler */
@@ -1200,7 +1206,7 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1200 sched = NULL; 1206 sched = NULL;
1201 1207
1202 /* Bind the ct retriever */ 1208 /* Bind the ct retriever */
1203 ip_vs_bind_pe(svc, pe); 1209 RCU_INIT_POINTER(svc->pe, pe);
1204 pe = NULL; 1210 pe = NULL;
1205 1211
1206 /* Update the virtual service counters */ 1212 /* Update the virtual service counters */
@@ -1216,9 +1222,7 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1216 ipvs->num_services++; 1222 ipvs->num_services++;
1217 1223
1218 /* Hash the service into the service table */ 1224 /* Hash the service into the service table */
1219 write_lock_bh(&__ip_vs_svc_lock);
1220 ip_vs_svc_hash(svc); 1225 ip_vs_svc_hash(svc);
1221 write_unlock_bh(&__ip_vs_svc_lock);
1222 1226
1223 *svc_p = svc; 1227 *svc_p = svc;
1224 /* Now there is a service - full throttle */ 1228 /* Now there is a service - full throttle */
@@ -1228,15 +1232,8 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1228 1232
1229 out_err: 1233 out_err:
1230 if (svc != NULL) { 1234 if (svc != NULL) {
1231 ip_vs_unbind_scheduler(svc); 1235 ip_vs_unbind_scheduler(svc, sched);
1232 if (svc->inc) { 1236 ip_vs_service_free(svc);
1233 local_bh_disable();
1234 ip_vs_app_inc_put(svc->inc);
1235 local_bh_enable();
1236 }
1237 if (svc->stats.cpustats)
1238 free_percpu(svc->stats.cpustats);
1239 kfree(svc);
1240 } 1237 }
1241 ip_vs_scheduler_put(sched); 1238 ip_vs_scheduler_put(sched);
1242 ip_vs_pe_put(pe); 1239 ip_vs_pe_put(pe);
@@ -1286,12 +1283,17 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
1286 } 1283 }
1287#endif 1284#endif
1288 1285
1289 write_lock_bh(&__ip_vs_svc_lock); 1286 old_sched = rcu_dereference_protected(svc->scheduler, 1);
1290 1287 if (sched != old_sched) {
1291 /* 1288 /* Bind the new scheduler */
1292 * Wait until all other svc users go away. 1289 ret = ip_vs_bind_scheduler(svc, sched);
1293 */ 1290 if (ret) {
1294 IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 0); 1291 old_sched = sched;
1292 goto out;
1293 }
1294 /* Unbind the old scheduler on success */
1295 ip_vs_unbind_scheduler(svc, old_sched);
1296 }
1295 1297
1296 /* 1298 /*
1297 * Set the flags and timeout value 1299 * Set the flags and timeout value
@@ -1300,57 +1302,30 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
1300 svc->timeout = u->timeout * HZ; 1302 svc->timeout = u->timeout * HZ;
1301 svc->netmask = u->netmask; 1303 svc->netmask = u->netmask;
1302 1304
1303 old_sched = svc->scheduler; 1305 old_pe = rcu_dereference_protected(svc->pe, 1);
1304 if (sched != old_sched) { 1306 if (pe != old_pe)
1305 /* 1307 rcu_assign_pointer(svc->pe, pe);
1306 * Unbind the old scheduler
1307 */
1308 if ((ret = ip_vs_unbind_scheduler(svc))) {
1309 old_sched = sched;
1310 goto out_unlock;
1311 }
1312
1313 /*
1314 * Bind the new scheduler
1315 */
1316 if ((ret = ip_vs_bind_scheduler(svc, sched))) {
1317 /*
1318 * If ip_vs_bind_scheduler fails, restore the old
1319 * scheduler.
1320 * The main reason of failure is out of memory.
1321 *
1322 * The question is if the old scheduler can be
1323 * restored all the time. TODO: if it cannot be
1324 * restored some time, we must delete the service,
1325 * otherwise the system may crash.
1326 */
1327 ip_vs_bind_scheduler(svc, old_sched);
1328 old_sched = sched;
1329 goto out_unlock;
1330 }
1331 }
1332 1308
1333 old_pe = svc->pe;
1334 if (pe != old_pe) {
1335 ip_vs_unbind_pe(svc);
1336 ip_vs_bind_pe(svc, pe);
1337 }
1338
1339out_unlock:
1340 write_unlock_bh(&__ip_vs_svc_lock);
1341out: 1309out:
1342 ip_vs_scheduler_put(old_sched); 1310 ip_vs_scheduler_put(old_sched);
1343 ip_vs_pe_put(old_pe); 1311 ip_vs_pe_put(old_pe);
1344 return ret; 1312 return ret;
1345} 1313}
1346 1314
1315static void ip_vs_service_rcu_free(struct rcu_head *head)
1316{
1317 struct ip_vs_service *svc;
1318
1319 svc = container_of(head, struct ip_vs_service, rcu_head);
1320 ip_vs_service_free(svc);
1321}
1347 1322
1348/* 1323/*
1349 * Delete a service from the service list 1324 * Delete a service from the service list
1350 * - The service must be unlinked, unlocked and not referenced! 1325 * - The service must be unlinked, unlocked and not referenced!
1351 * - We are called under _bh lock 1326 * - We are called under _bh lock
1352 */ 1327 */
1353static void __ip_vs_del_service(struct ip_vs_service *svc) 1328static void __ip_vs_del_service(struct ip_vs_service *svc, bool cleanup)
1354{ 1329{
1355 struct ip_vs_dest *dest, *nxt; 1330 struct ip_vs_dest *dest, *nxt;
1356 struct ip_vs_scheduler *old_sched; 1331 struct ip_vs_scheduler *old_sched;
@@ -1366,27 +1341,20 @@ static void __ip_vs_del_service(struct ip_vs_service *svc)
1366 ip_vs_stop_estimator(svc->net, &svc->stats); 1341 ip_vs_stop_estimator(svc->net, &svc->stats);
1367 1342
1368 /* Unbind scheduler */ 1343 /* Unbind scheduler */
1369 old_sched = svc->scheduler; 1344 old_sched = rcu_dereference_protected(svc->scheduler, 1);
1370 ip_vs_unbind_scheduler(svc); 1345 ip_vs_unbind_scheduler(svc, old_sched);
1371 ip_vs_scheduler_put(old_sched); 1346 ip_vs_scheduler_put(old_sched);
1372 1347
1373 /* Unbind persistence engine */ 1348 /* Unbind persistence engine, keep svc->pe */
1374 old_pe = svc->pe; 1349 old_pe = rcu_dereference_protected(svc->pe, 1);
1375 ip_vs_unbind_pe(svc);
1376 ip_vs_pe_put(old_pe); 1350 ip_vs_pe_put(old_pe);
1377 1351
1378 /* Unbind app inc */
1379 if (svc->inc) {
1380 ip_vs_app_inc_put(svc->inc);
1381 svc->inc = NULL;
1382 }
1383
1384 /* 1352 /*
1385 * Unlink the whole destination list 1353 * Unlink the whole destination list
1386 */ 1354 */
1387 list_for_each_entry_safe(dest, nxt, &svc->destinations, n_list) { 1355 list_for_each_entry_safe(dest, nxt, &svc->destinations, n_list) {
1388 __ip_vs_unlink_dest(svc, dest, 0); 1356 __ip_vs_unlink_dest(svc, dest, 0);
1389 __ip_vs_del_dest(svc->net, dest); 1357 __ip_vs_del_dest(svc->net, dest, cleanup);
1390 } 1358 }
1391 1359
1392 /* 1360 /*
@@ -1400,13 +1368,12 @@ static void __ip_vs_del_service(struct ip_vs_service *svc)
1400 /* 1368 /*
1401 * Free the service if nobody refers to it 1369 * Free the service if nobody refers to it
1402 */ 1370 */
1403 if (atomic_read(&svc->refcnt) == 0) { 1371 if (atomic_dec_and_test(&svc->refcnt)) {
1404 IP_VS_DBG_BUF(3, "Removing service %u/%s:%u usecnt=%d\n", 1372 IP_VS_DBG_BUF(3, "Removing service %u/%s:%u\n",
1405 svc->fwmark, 1373 svc->fwmark,
1406 IP_VS_DBG_ADDR(svc->af, &svc->addr), 1374 IP_VS_DBG_ADDR(svc->af, &svc->addr),
1407 ntohs(svc->port), atomic_read(&svc->usecnt)); 1375 ntohs(svc->port));
1408 free_percpu(svc->stats.cpustats); 1376 call_rcu(&svc->rcu_head, ip_vs_service_rcu_free);
1409 kfree(svc);
1410 } 1377 }
1411 1378
1412 /* decrease the module use count */ 1379 /* decrease the module use count */
@@ -1416,23 +1383,16 @@ static void __ip_vs_del_service(struct ip_vs_service *svc)
1416/* 1383/*
1417 * Unlink a service from list and try to delete it if its refcnt reached 0 1384 * Unlink a service from list and try to delete it if its refcnt reached 0
1418 */ 1385 */
1419static void ip_vs_unlink_service(struct ip_vs_service *svc) 1386static void ip_vs_unlink_service(struct ip_vs_service *svc, bool cleanup)
1420{ 1387{
1388 /* Hold svc to avoid double release from dest_trash */
1389 atomic_inc(&svc->refcnt);
1421 /* 1390 /*
1422 * Unhash it from the service table 1391 * Unhash it from the service table
1423 */ 1392 */
1424 write_lock_bh(&__ip_vs_svc_lock);
1425
1426 ip_vs_svc_unhash(svc); 1393 ip_vs_svc_unhash(svc);
1427 1394
1428 /* 1395 __ip_vs_del_service(svc, cleanup);
1429 * Wait until all the svc users go away.
1430 */
1431 IP_VS_WAIT_WHILE(atomic_read(&svc->usecnt) > 0);
1432
1433 __ip_vs_del_service(svc);
1434
1435 write_unlock_bh(&__ip_vs_svc_lock);
1436} 1396}
1437 1397
1438/* 1398/*
@@ -1442,7 +1402,7 @@ static int ip_vs_del_service(struct ip_vs_service *svc)
1442{ 1402{
1443 if (svc == NULL) 1403 if (svc == NULL)
1444 return -EEXIST; 1404 return -EEXIST;
1445 ip_vs_unlink_service(svc); 1405 ip_vs_unlink_service(svc, false);
1446 1406
1447 return 0; 1407 return 0;
1448} 1408}
@@ -1451,19 +1411,20 @@ static int ip_vs_del_service(struct ip_vs_service *svc)
1451/* 1411/*
1452 * Flush all the virtual services 1412 * Flush all the virtual services
1453 */ 1413 */
1454static int ip_vs_flush(struct net *net) 1414static int ip_vs_flush(struct net *net, bool cleanup)
1455{ 1415{
1456 int idx; 1416 int idx;
1457 struct ip_vs_service *svc, *nxt; 1417 struct ip_vs_service *svc;
1418 struct hlist_node *n;
1458 1419
1459 /* 1420 /*
1460 * Flush the service table hashed by <netns,protocol,addr,port> 1421 * Flush the service table hashed by <netns,protocol,addr,port>
1461 */ 1422 */
1462 for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { 1423 for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
1463 list_for_each_entry_safe(svc, nxt, &ip_vs_svc_table[idx], 1424 hlist_for_each_entry_safe(svc, n, &ip_vs_svc_table[idx],
1464 s_list) { 1425 s_list) {
1465 if (net_eq(svc->net, net)) 1426 if (net_eq(svc->net, net))
1466 ip_vs_unlink_service(svc); 1427 ip_vs_unlink_service(svc, cleanup);
1467 } 1428 }
1468 } 1429 }
1469 1430
@@ -1471,10 +1432,10 @@ static int ip_vs_flush(struct net *net)
1471 * Flush the service table hashed by fwmark 1432 * Flush the service table hashed by fwmark
1472 */ 1433 */
1473 for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { 1434 for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
1474 list_for_each_entry_safe(svc, nxt, 1435 hlist_for_each_entry_safe(svc, n, &ip_vs_svc_fwm_table[idx],
1475 &ip_vs_svc_fwm_table[idx], f_list) { 1436 f_list) {
1476 if (net_eq(svc->net, net)) 1437 if (net_eq(svc->net, net))
1477 ip_vs_unlink_service(svc); 1438 ip_vs_unlink_service(svc, cleanup);
1478 } 1439 }
1479 } 1440 }
1480 1441
@@ -1490,32 +1451,29 @@ void ip_vs_service_net_cleanup(struct net *net)
1490 EnterFunction(2); 1451 EnterFunction(2);
1491 /* Check for "full" addressed entries */ 1452 /* Check for "full" addressed entries */
1492 mutex_lock(&__ip_vs_mutex); 1453 mutex_lock(&__ip_vs_mutex);
1493 ip_vs_flush(net); 1454 ip_vs_flush(net, true);
1494 mutex_unlock(&__ip_vs_mutex); 1455 mutex_unlock(&__ip_vs_mutex);
1495 LeaveFunction(2); 1456 LeaveFunction(2);
1496} 1457}
1497/* 1458
1498 * Release dst hold by dst_cache 1459/* Put all references for device (dst_cache) */
1499 */
1500static inline void 1460static inline void
1501__ip_vs_dev_reset(struct ip_vs_dest *dest, struct net_device *dev) 1461ip_vs_forget_dev(struct ip_vs_dest *dest, struct net_device *dev)
1502{ 1462{
1503 spin_lock_bh(&dest->dst_lock); 1463 spin_lock_bh(&dest->dst_lock);
1504 if (dest->dst_cache && dest->dst_cache->dev == dev) { 1464 if (dest->dest_dst && dest->dest_dst->dst_cache->dev == dev) {
1505 IP_VS_DBG_BUF(3, "Reset dev:%s dest %s:%u ,dest->refcnt=%d\n", 1465 IP_VS_DBG_BUF(3, "Reset dev:%s dest %s:%u ,dest->refcnt=%d\n",
1506 dev->name, 1466 dev->name,
1507 IP_VS_DBG_ADDR(dest->af, &dest->addr), 1467 IP_VS_DBG_ADDR(dest->af, &dest->addr),
1508 ntohs(dest->port), 1468 ntohs(dest->port),
1509 atomic_read(&dest->refcnt)); 1469 atomic_read(&dest->refcnt));
1510 ip_vs_dst_reset(dest); 1470 __ip_vs_dst_cache_reset(dest);
1511 } 1471 }
1512 spin_unlock_bh(&dest->dst_lock); 1472 spin_unlock_bh(&dest->dst_lock);
1513 1473
1514} 1474}
1515/* 1475/* Netdev event receiver
1516 * Netdev event receiver 1476 * Currently only NETDEV_DOWN is handled to release refs to cached dsts
1517 * Currently only NETDEV_UNREGISTER is handled, i.e. if we hold a reference to
1518 * a device that is "unregister" it must be released.
1519 */ 1477 */
1520static int ip_vs_dst_event(struct notifier_block *this, unsigned long event, 1478static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
1521 void *ptr) 1479 void *ptr)
@@ -1527,35 +1485,37 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event,
1527 struct ip_vs_dest *dest; 1485 struct ip_vs_dest *dest;
1528 unsigned int idx; 1486 unsigned int idx;
1529 1487
1530 if (event != NETDEV_UNREGISTER || !ipvs) 1488 if (event != NETDEV_DOWN || !ipvs)
1531 return NOTIFY_DONE; 1489 return NOTIFY_DONE;
1532 IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name); 1490 IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name);
1533 EnterFunction(2); 1491 EnterFunction(2);
1534 mutex_lock(&__ip_vs_mutex); 1492 mutex_lock(&__ip_vs_mutex);
1535 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { 1493 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
1536 list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { 1494 hlist_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
1537 if (net_eq(svc->net, net)) { 1495 if (net_eq(svc->net, net)) {
1538 list_for_each_entry(dest, &svc->destinations, 1496 list_for_each_entry(dest, &svc->destinations,
1539 n_list) { 1497 n_list) {
1540 __ip_vs_dev_reset(dest, dev); 1498 ip_vs_forget_dev(dest, dev);
1541 } 1499 }
1542 } 1500 }
1543 } 1501 }
1544 1502
1545 list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { 1503 hlist_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
1546 if (net_eq(svc->net, net)) { 1504 if (net_eq(svc->net, net)) {
1547 list_for_each_entry(dest, &svc->destinations, 1505 list_for_each_entry(dest, &svc->destinations,
1548 n_list) { 1506 n_list) {
1549 __ip_vs_dev_reset(dest, dev); 1507 ip_vs_forget_dev(dest, dev);
1550 } 1508 }
1551 } 1509 }
1552 1510
1553 } 1511 }
1554 } 1512 }
1555 1513
1556 list_for_each_entry(dest, &ipvs->dest_trash, n_list) { 1514 spin_lock_bh(&ipvs->dest_trash_lock);
1557 __ip_vs_dev_reset(dest, dev); 1515 list_for_each_entry(dest, &ipvs->dest_trash, t_list) {
1516 ip_vs_forget_dev(dest, dev);
1558 } 1517 }
1518 spin_unlock_bh(&ipvs->dest_trash_lock);
1559 mutex_unlock(&__ip_vs_mutex); 1519 mutex_unlock(&__ip_vs_mutex);
1560 LeaveFunction(2); 1520 LeaveFunction(2);
1561 return NOTIFY_DONE; 1521 return NOTIFY_DONE;
@@ -1568,12 +1528,10 @@ static int ip_vs_zero_service(struct ip_vs_service *svc)
1568{ 1528{
1569 struct ip_vs_dest *dest; 1529 struct ip_vs_dest *dest;
1570 1530
1571 write_lock_bh(&__ip_vs_svc_lock);
1572 list_for_each_entry(dest, &svc->destinations, n_list) { 1531 list_for_each_entry(dest, &svc->destinations, n_list) {
1573 ip_vs_zero_stats(&dest->stats); 1532 ip_vs_zero_stats(&dest->stats);
1574 } 1533 }
1575 ip_vs_zero_stats(&svc->stats); 1534 ip_vs_zero_stats(&svc->stats);
1576 write_unlock_bh(&__ip_vs_svc_lock);
1577 return 0; 1535 return 0;
1578} 1536}
1579 1537
@@ -1583,14 +1541,14 @@ static int ip_vs_zero_all(struct net *net)
1583 struct ip_vs_service *svc; 1541 struct ip_vs_service *svc;
1584 1542
1585 for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { 1543 for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
1586 list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { 1544 hlist_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
1587 if (net_eq(svc->net, net)) 1545 if (net_eq(svc->net, net))
1588 ip_vs_zero_service(svc); 1546 ip_vs_zero_service(svc);
1589 } 1547 }
1590 } 1548 }
1591 1549
1592 for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { 1550 for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
1593 list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { 1551 hlist_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
1594 if (net_eq(svc->net, net)) 1552 if (net_eq(svc->net, net))
1595 ip_vs_zero_service(svc); 1553 ip_vs_zero_service(svc);
1596 } 1554 }
@@ -1918,7 +1876,7 @@ static struct ctl_table vs_vars[] = {
1918 1876
1919struct ip_vs_iter { 1877struct ip_vs_iter {
1920 struct seq_net_private p; /* Do not move this, netns depends upon it*/ 1878 struct seq_net_private p; /* Do not move this, netns depends upon it*/
1921 struct list_head *table; 1879 struct hlist_head *table;
1922 int bucket; 1880 int bucket;
1923}; 1881};
1924 1882
@@ -1951,7 +1909,7 @@ static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
1951 1909
1952 /* look in hash by protocol */ 1910 /* look in hash by protocol */
1953 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { 1911 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
1954 list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { 1912 hlist_for_each_entry_rcu(svc, &ip_vs_svc_table[idx], s_list) {
1955 if (net_eq(svc->net, net) && pos-- == 0) { 1913 if (net_eq(svc->net, net) && pos-- == 0) {
1956 iter->table = ip_vs_svc_table; 1914 iter->table = ip_vs_svc_table;
1957 iter->bucket = idx; 1915 iter->bucket = idx;
@@ -1962,7 +1920,8 @@ static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
1962 1920
1963 /* keep looking in fwmark */ 1921 /* keep looking in fwmark */
1964 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { 1922 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
1965 list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { 1923 hlist_for_each_entry_rcu(svc, &ip_vs_svc_fwm_table[idx],
1924 f_list) {
1966 if (net_eq(svc->net, net) && pos-- == 0) { 1925 if (net_eq(svc->net, net) && pos-- == 0) {
1967 iter->table = ip_vs_svc_fwm_table; 1926 iter->table = ip_vs_svc_fwm_table;
1968 iter->bucket = idx; 1927 iter->bucket = idx;
@@ -1975,17 +1934,16 @@ static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
1975} 1934}
1976 1935
1977static void *ip_vs_info_seq_start(struct seq_file *seq, loff_t *pos) 1936static void *ip_vs_info_seq_start(struct seq_file *seq, loff_t *pos)
1978__acquires(__ip_vs_svc_lock)
1979{ 1937{
1980 1938
1981 read_lock_bh(&__ip_vs_svc_lock); 1939 rcu_read_lock();
1982 return *pos ? ip_vs_info_array(seq, *pos - 1) : SEQ_START_TOKEN; 1940 return *pos ? ip_vs_info_array(seq, *pos - 1) : SEQ_START_TOKEN;
1983} 1941}
1984 1942
1985 1943
1986static void *ip_vs_info_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1944static void *ip_vs_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1987{ 1945{
1988 struct list_head *e; 1946 struct hlist_node *e;
1989 struct ip_vs_iter *iter; 1947 struct ip_vs_iter *iter;
1990 struct ip_vs_service *svc; 1948 struct ip_vs_service *svc;
1991 1949
@@ -1998,13 +1956,14 @@ static void *ip_vs_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1998 1956
1999 if (iter->table == ip_vs_svc_table) { 1957 if (iter->table == ip_vs_svc_table) {
2000 /* next service in table hashed by protocol */ 1958 /* next service in table hashed by protocol */
2001 if ((e = svc->s_list.next) != &ip_vs_svc_table[iter->bucket]) 1959 e = rcu_dereference(hlist_next_rcu(&svc->s_list));
2002 return list_entry(e, struct ip_vs_service, s_list); 1960 if (e)
2003 1961 return hlist_entry(e, struct ip_vs_service, s_list);
2004 1962
2005 while (++iter->bucket < IP_VS_SVC_TAB_SIZE) { 1963 while (++iter->bucket < IP_VS_SVC_TAB_SIZE) {
2006 list_for_each_entry(svc,&ip_vs_svc_table[iter->bucket], 1964 hlist_for_each_entry_rcu(svc,
2007 s_list) { 1965 &ip_vs_svc_table[iter->bucket],
1966 s_list) {
2008 return svc; 1967 return svc;
2009 } 1968 }
2010 } 1969 }
@@ -2015,13 +1974,15 @@ static void *ip_vs_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2015 } 1974 }
2016 1975
2017 /* next service in hashed by fwmark */ 1976 /* next service in hashed by fwmark */
2018 if ((e = svc->f_list.next) != &ip_vs_svc_fwm_table[iter->bucket]) 1977 e = rcu_dereference(hlist_next_rcu(&svc->f_list));
2019 return list_entry(e, struct ip_vs_service, f_list); 1978 if (e)
1979 return hlist_entry(e, struct ip_vs_service, f_list);
2020 1980
2021 scan_fwmark: 1981 scan_fwmark:
2022 while (++iter->bucket < IP_VS_SVC_TAB_SIZE) { 1982 while (++iter->bucket < IP_VS_SVC_TAB_SIZE) {
2023 list_for_each_entry(svc, &ip_vs_svc_fwm_table[iter->bucket], 1983 hlist_for_each_entry_rcu(svc,
2024 f_list) 1984 &ip_vs_svc_fwm_table[iter->bucket],
1985 f_list)
2025 return svc; 1986 return svc;
2026 } 1987 }
2027 1988
@@ -2029,9 +1990,8 @@ static void *ip_vs_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2029} 1990}
2030 1991
2031static void ip_vs_info_seq_stop(struct seq_file *seq, void *v) 1992static void ip_vs_info_seq_stop(struct seq_file *seq, void *v)
2032__releases(__ip_vs_svc_lock)
2033{ 1993{
2034 read_unlock_bh(&__ip_vs_svc_lock); 1994 rcu_read_unlock();
2035} 1995}
2036 1996
2037 1997
@@ -2049,6 +2009,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
2049 const struct ip_vs_service *svc = v; 2009 const struct ip_vs_service *svc = v;
2050 const struct ip_vs_iter *iter = seq->private; 2010 const struct ip_vs_iter *iter = seq->private;
2051 const struct ip_vs_dest *dest; 2011 const struct ip_vs_dest *dest;
2012 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
2052 2013
2053 if (iter->table == ip_vs_svc_table) { 2014 if (iter->table == ip_vs_svc_table) {
2054#ifdef CONFIG_IP_VS_IPV6 2015#ifdef CONFIG_IP_VS_IPV6
@@ -2057,18 +2018,18 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
2057 ip_vs_proto_name(svc->protocol), 2018 ip_vs_proto_name(svc->protocol),
2058 &svc->addr.in6, 2019 &svc->addr.in6,
2059 ntohs(svc->port), 2020 ntohs(svc->port),
2060 svc->scheduler->name); 2021 sched->name);
2061 else 2022 else
2062#endif 2023#endif
2063 seq_printf(seq, "%s %08X:%04X %s %s ", 2024 seq_printf(seq, "%s %08X:%04X %s %s ",
2064 ip_vs_proto_name(svc->protocol), 2025 ip_vs_proto_name(svc->protocol),
2065 ntohl(svc->addr.ip), 2026 ntohl(svc->addr.ip),
2066 ntohs(svc->port), 2027 ntohs(svc->port),
2067 svc->scheduler->name, 2028 sched->name,
2068 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":""); 2029 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
2069 } else { 2030 } else {
2070 seq_printf(seq, "FWM %08X %s %s", 2031 seq_printf(seq, "FWM %08X %s %s",
2071 svc->fwmark, svc->scheduler->name, 2032 svc->fwmark, sched->name,
2072 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":""); 2033 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
2073 } 2034 }
2074 2035
@@ -2079,7 +2040,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
2079 else 2040 else
2080 seq_putc(seq, '\n'); 2041 seq_putc(seq, '\n');
2081 2042
2082 list_for_each_entry(dest, &svc->destinations, n_list) { 2043 list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
2083#ifdef CONFIG_IP_VS_IPV6 2044#ifdef CONFIG_IP_VS_IPV6
2084 if (dest->af == AF_INET6) 2045 if (dest->af == AF_INET6)
2085 seq_printf(seq, 2046 seq_printf(seq,
@@ -2389,7 +2350,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2389 2350
2390 if (cmd == IP_VS_SO_SET_FLUSH) { 2351 if (cmd == IP_VS_SO_SET_FLUSH) {
2391 /* Flush the virtual service */ 2352 /* Flush the virtual service */
2392 ret = ip_vs_flush(net); 2353 ret = ip_vs_flush(net, false);
2393 goto out_unlock; 2354 goto out_unlock;
2394 } else if (cmd == IP_VS_SO_SET_TIMEOUT) { 2355 } else if (cmd == IP_VS_SO_SET_TIMEOUT) {
2395 /* Set timeout values for (tcp tcpfin udp) */ 2356 /* Set timeout values for (tcp tcpfin udp) */
@@ -2424,11 +2385,13 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2424 } 2385 }
2425 2386
2426 /* Lookup the exact service by <protocol, addr, port> or fwmark */ 2387 /* Lookup the exact service by <protocol, addr, port> or fwmark */
2388 rcu_read_lock();
2427 if (usvc.fwmark == 0) 2389 if (usvc.fwmark == 0)
2428 svc = __ip_vs_service_find(net, usvc.af, usvc.protocol, 2390 svc = __ip_vs_service_find(net, usvc.af, usvc.protocol,
2429 &usvc.addr, usvc.port); 2391 &usvc.addr, usvc.port);
2430 else 2392 else
2431 svc = __ip_vs_svc_fwm_find(net, usvc.af, usvc.fwmark); 2393 svc = __ip_vs_svc_fwm_find(net, usvc.af, usvc.fwmark);
2394 rcu_read_unlock();
2432 2395
2433 if (cmd != IP_VS_SO_SET_ADD 2396 if (cmd != IP_VS_SO_SET_ADD
2434 && (svc == NULL || svc->protocol != usvc.protocol)) { 2397 && (svc == NULL || svc->protocol != usvc.protocol)) {
@@ -2480,11 +2443,14 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2480static void 2443static void
2481ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src) 2444ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
2482{ 2445{
2446 struct ip_vs_scheduler *sched;
2447
2448 sched = rcu_dereference_protected(src->scheduler, 1);
2483 dst->protocol = src->protocol; 2449 dst->protocol = src->protocol;
2484 dst->addr = src->addr.ip; 2450 dst->addr = src->addr.ip;
2485 dst->port = src->port; 2451 dst->port = src->port;
2486 dst->fwmark = src->fwmark; 2452 dst->fwmark = src->fwmark;
2487 strlcpy(dst->sched_name, src->scheduler->name, sizeof(dst->sched_name)); 2453 strlcpy(dst->sched_name, sched->name, sizeof(dst->sched_name));
2488 dst->flags = src->flags; 2454 dst->flags = src->flags;
2489 dst->timeout = src->timeout / HZ; 2455 dst->timeout = src->timeout / HZ;
2490 dst->netmask = src->netmask; 2456 dst->netmask = src->netmask;
@@ -2503,7 +2469,7 @@ __ip_vs_get_service_entries(struct net *net,
2503 int ret = 0; 2469 int ret = 0;
2504 2470
2505 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { 2471 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
2506 list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { 2472 hlist_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
2507 /* Only expose IPv4 entries to old interface */ 2473 /* Only expose IPv4 entries to old interface */
2508 if (svc->af != AF_INET || !net_eq(svc->net, net)) 2474 if (svc->af != AF_INET || !net_eq(svc->net, net))
2509 continue; 2475 continue;
@@ -2522,7 +2488,7 @@ __ip_vs_get_service_entries(struct net *net,
2522 } 2488 }
2523 2489
2524 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { 2490 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
2525 list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) { 2491 hlist_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
2526 /* Only expose IPv4 entries to old interface */ 2492 /* Only expose IPv4 entries to old interface */
2527 if (svc->af != AF_INET || !net_eq(svc->net, net)) 2493 if (svc->af != AF_INET || !net_eq(svc->net, net))
2528 continue; 2494 continue;
@@ -2551,11 +2517,13 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
2551 union nf_inet_addr addr = { .ip = get->addr }; 2517 union nf_inet_addr addr = { .ip = get->addr };
2552 int ret = 0; 2518 int ret = 0;
2553 2519
2520 rcu_read_lock();
2554 if (get->fwmark) 2521 if (get->fwmark)
2555 svc = __ip_vs_svc_fwm_find(net, AF_INET, get->fwmark); 2522 svc = __ip_vs_svc_fwm_find(net, AF_INET, get->fwmark);
2556 else 2523 else
2557 svc = __ip_vs_service_find(net, AF_INET, get->protocol, &addr, 2524 svc = __ip_vs_service_find(net, AF_INET, get->protocol, &addr,
2558 get->port); 2525 get->port);
2526 rcu_read_unlock();
2559 2527
2560 if (svc) { 2528 if (svc) {
2561 int count = 0; 2529 int count = 0;
@@ -2738,12 +2706,14 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2738 2706
2739 entry = (struct ip_vs_service_entry *)arg; 2707 entry = (struct ip_vs_service_entry *)arg;
2740 addr.ip = entry->addr; 2708 addr.ip = entry->addr;
2709 rcu_read_lock();
2741 if (entry->fwmark) 2710 if (entry->fwmark)
2742 svc = __ip_vs_svc_fwm_find(net, AF_INET, entry->fwmark); 2711 svc = __ip_vs_svc_fwm_find(net, AF_INET, entry->fwmark);
2743 else 2712 else
2744 svc = __ip_vs_service_find(net, AF_INET, 2713 svc = __ip_vs_service_find(net, AF_INET,
2745 entry->protocol, &addr, 2714 entry->protocol, &addr,
2746 entry->port); 2715 entry->port);
2716 rcu_read_unlock();
2747 if (svc) { 2717 if (svc) {
2748 ip_vs_copy_service(entry, svc); 2718 ip_vs_copy_service(entry, svc);
2749 if (copy_to_user(user, entry, sizeof(*entry)) != 0) 2719 if (copy_to_user(user, entry, sizeof(*entry)) != 0)
@@ -2900,6 +2870,7 @@ nla_put_failure:
2900static int ip_vs_genl_fill_service(struct sk_buff *skb, 2870static int ip_vs_genl_fill_service(struct sk_buff *skb,
2901 struct ip_vs_service *svc) 2871 struct ip_vs_service *svc)
2902{ 2872{
2873 struct ip_vs_scheduler *sched;
2903 struct nlattr *nl_service; 2874 struct nlattr *nl_service;
2904 struct ip_vs_flags flags = { .flags = svc->flags, 2875 struct ip_vs_flags flags = { .flags = svc->flags,
2905 .mask = ~0 }; 2876 .mask = ~0 };
@@ -2920,7 +2891,8 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
2920 goto nla_put_failure; 2891 goto nla_put_failure;
2921 } 2892 }
2922 2893
2923 if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, svc->scheduler->name) || 2894 sched = rcu_dereference_protected(svc->scheduler, 1);
2895 if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched->name) ||
2924 (svc->pe && 2896 (svc->pe &&
2925 nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, svc->pe->name)) || 2897 nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, svc->pe->name)) ||
2926 nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) || 2898 nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) ||
@@ -2971,7 +2943,7 @@ static int ip_vs_genl_dump_services(struct sk_buff *skb,
2971 2943
2972 mutex_lock(&__ip_vs_mutex); 2944 mutex_lock(&__ip_vs_mutex);
2973 for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) { 2945 for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
2974 list_for_each_entry(svc, &ip_vs_svc_table[i], s_list) { 2946 hlist_for_each_entry(svc, &ip_vs_svc_table[i], s_list) {
2975 if (++idx <= start || !net_eq(svc->net, net)) 2947 if (++idx <= start || !net_eq(svc->net, net))
2976 continue; 2948 continue;
2977 if (ip_vs_genl_dump_service(skb, svc, cb) < 0) { 2949 if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
@@ -2982,7 +2954,7 @@ static int ip_vs_genl_dump_services(struct sk_buff *skb,
2982 } 2954 }
2983 2955
2984 for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) { 2956 for (i = 0; i < IP_VS_SVC_TAB_SIZE; i++) {
2985 list_for_each_entry(svc, &ip_vs_svc_fwm_table[i], f_list) { 2957 hlist_for_each_entry(svc, &ip_vs_svc_fwm_table[i], f_list) {
2986 if (++idx <= start || !net_eq(svc->net, net)) 2958 if (++idx <= start || !net_eq(svc->net, net))
2987 continue; 2959 continue;
2988 if (ip_vs_genl_dump_service(skb, svc, cb) < 0) { 2960 if (ip_vs_genl_dump_service(skb, svc, cb) < 0) {
@@ -3042,11 +3014,13 @@ static int ip_vs_genl_parse_service(struct net *net,
3042 usvc->fwmark = 0; 3014 usvc->fwmark = 0;
3043 } 3015 }
3044 3016
3017 rcu_read_lock();
3045 if (usvc->fwmark) 3018 if (usvc->fwmark)
3046 svc = __ip_vs_svc_fwm_find(net, usvc->af, usvc->fwmark); 3019 svc = __ip_vs_svc_fwm_find(net, usvc->af, usvc->fwmark);
3047 else 3020 else
3048 svc = __ip_vs_service_find(net, usvc->af, usvc->protocol, 3021 svc = __ip_vs_service_find(net, usvc->af, usvc->protocol,
3049 &usvc->addr, usvc->port); 3022 &usvc->addr, usvc->port);
3023 rcu_read_unlock();
3050 *ret_svc = svc; 3024 *ret_svc = svc;
3051 3025
3052 /* If a full entry was requested, check for the additional fields */ 3026 /* If a full entry was requested, check for the additional fields */
@@ -3398,7 +3372,7 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info)
3398 mutex_lock(&__ip_vs_mutex); 3372 mutex_lock(&__ip_vs_mutex);
3399 3373
3400 if (cmd == IPVS_CMD_FLUSH) { 3374 if (cmd == IPVS_CMD_FLUSH) {
3401 ret = ip_vs_flush(net); 3375 ret = ip_vs_flush(net, false);
3402 goto out; 3376 goto out;
3403 } else if (cmd == IPVS_CMD_SET_CONFIG) { 3377 } else if (cmd == IPVS_CMD_SET_CONFIG) {
3404 ret = ip_vs_genl_set_config(net, info->attrs); 3378 ret = ip_vs_genl_set_config(net, info->attrs);
@@ -3790,13 +3764,14 @@ int __net_init ip_vs_control_net_init(struct net *net)
3790 int idx; 3764 int idx;
3791 struct netns_ipvs *ipvs = net_ipvs(net); 3765 struct netns_ipvs *ipvs = net_ipvs(net);
3792 3766
3793 rwlock_init(&ipvs->rs_lock);
3794
3795 /* Initialize rs_table */ 3767 /* Initialize rs_table */
3796 for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++) 3768 for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
3797 INIT_LIST_HEAD(&ipvs->rs_table[idx]); 3769 INIT_HLIST_HEAD(&ipvs->rs_table[idx]);
3798 3770
3799 INIT_LIST_HEAD(&ipvs->dest_trash); 3771 INIT_LIST_HEAD(&ipvs->dest_trash);
3772 spin_lock_init(&ipvs->dest_trash_lock);
3773 setup_timer(&ipvs->dest_trash_timer, ip_vs_dest_trash_expire,
3774 (unsigned long) net);
3800 atomic_set(&ipvs->ftpsvc_counter, 0); 3775 atomic_set(&ipvs->ftpsvc_counter, 0);
3801 atomic_set(&ipvs->nullsvc_counter, 0); 3776 atomic_set(&ipvs->nullsvc_counter, 0);
3802 3777
@@ -3826,6 +3801,10 @@ void __net_exit ip_vs_control_net_cleanup(struct net *net)
3826{ 3801{
3827 struct netns_ipvs *ipvs = net_ipvs(net); 3802 struct netns_ipvs *ipvs = net_ipvs(net);
3828 3803
3804 /* Some dest can be in grace period even before cleanup, we have to
3805 * defer ip_vs_trash_cleanup until ip_vs_dest_wait_readers is called.
3806 */
3807 rcu_barrier();
3829 ip_vs_trash_cleanup(net); 3808 ip_vs_trash_cleanup(net);
3830 ip_vs_stop_estimator(net, &ipvs->tot_stats); 3809 ip_vs_stop_estimator(net, &ipvs->tot_stats);
3831 ip_vs_control_net_cleanup_sysctl(net); 3810 ip_vs_control_net_cleanup_sysctl(net);
@@ -3871,10 +3850,10 @@ int __init ip_vs_control_init(void)
3871 3850
3872 EnterFunction(2); 3851 EnterFunction(2);
3873 3852
3874 /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */ 3853 /* Initialize svc_table, ip_vs_svc_fwm_table */
3875 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { 3854 for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
3876 INIT_LIST_HEAD(&ip_vs_svc_table[idx]); 3855 INIT_HLIST_HEAD(&ip_vs_svc_table[idx]);
3877 INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]); 3856 INIT_HLIST_HEAD(&ip_vs_svc_fwm_table[idx]);
3878 } 3857 }
3879 3858
3880 smp_wmb(); /* Do we really need it now ? */ 3859 smp_wmb(); /* Do we really need it now ? */
diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c
index 7f3b0cc00b7a..ccab120df45e 100644
--- a/net/netfilter/ipvs/ip_vs_dh.c
+++ b/net/netfilter/ipvs/ip_vs_dh.c
@@ -51,7 +51,7 @@
51 * IPVS DH bucket 51 * IPVS DH bucket
52 */ 52 */
53struct ip_vs_dh_bucket { 53struct ip_vs_dh_bucket {
54 struct ip_vs_dest *dest; /* real server (cache) */ 54 struct ip_vs_dest __rcu *dest; /* real server (cache) */
55}; 55};
56 56
57/* 57/*
@@ -64,6 +64,10 @@ struct ip_vs_dh_bucket {
64#define IP_VS_DH_TAB_SIZE (1 << IP_VS_DH_TAB_BITS) 64#define IP_VS_DH_TAB_SIZE (1 << IP_VS_DH_TAB_BITS)
65#define IP_VS_DH_TAB_MASK (IP_VS_DH_TAB_SIZE - 1) 65#define IP_VS_DH_TAB_MASK (IP_VS_DH_TAB_SIZE - 1)
66 66
67struct ip_vs_dh_state {
68 struct ip_vs_dh_bucket buckets[IP_VS_DH_TAB_SIZE];
69 struct rcu_head rcu_head;
70};
67 71
68/* 72/*
69 * Returns hash value for IPVS DH entry 73 * Returns hash value for IPVS DH entry
@@ -85,10 +89,9 @@ static inline unsigned int ip_vs_dh_hashkey(int af, const union nf_inet_addr *ad
85 * Get ip_vs_dest associated with supplied parameters. 89 * Get ip_vs_dest associated with supplied parameters.
86 */ 90 */
87static inline struct ip_vs_dest * 91static inline struct ip_vs_dest *
88ip_vs_dh_get(int af, struct ip_vs_dh_bucket *tbl, 92ip_vs_dh_get(int af, struct ip_vs_dh_state *s, const union nf_inet_addr *addr)
89 const union nf_inet_addr *addr)
90{ 93{
91 return (tbl[ip_vs_dh_hashkey(af, addr)]).dest; 94 return rcu_dereference(s->buckets[ip_vs_dh_hashkey(af, addr)].dest);
92} 95}
93 96
94 97
@@ -96,25 +99,30 @@ ip_vs_dh_get(int af, struct ip_vs_dh_bucket *tbl,
96 * Assign all the hash buckets of the specified table with the service. 99 * Assign all the hash buckets of the specified table with the service.
97 */ 100 */
98static int 101static int
99ip_vs_dh_assign(struct ip_vs_dh_bucket *tbl, struct ip_vs_service *svc) 102ip_vs_dh_reassign(struct ip_vs_dh_state *s, struct ip_vs_service *svc)
100{ 103{
101 int i; 104 int i;
102 struct ip_vs_dh_bucket *b; 105 struct ip_vs_dh_bucket *b;
103 struct list_head *p; 106 struct list_head *p;
104 struct ip_vs_dest *dest; 107 struct ip_vs_dest *dest;
108 bool empty;
105 109
106 b = tbl; 110 b = &s->buckets[0];
107 p = &svc->destinations; 111 p = &svc->destinations;
112 empty = list_empty(p);
108 for (i=0; i<IP_VS_DH_TAB_SIZE; i++) { 113 for (i=0; i<IP_VS_DH_TAB_SIZE; i++) {
109 if (list_empty(p)) { 114 dest = rcu_dereference_protected(b->dest, 1);
110 b->dest = NULL; 115 if (dest)
111 } else { 116 ip_vs_dest_put(dest);
117 if (empty)
118 RCU_INIT_POINTER(b->dest, NULL);
119 else {
112 if (p == &svc->destinations) 120 if (p == &svc->destinations)
113 p = p->next; 121 p = p->next;
114 122
115 dest = list_entry(p, struct ip_vs_dest, n_list); 123 dest = list_entry(p, struct ip_vs_dest, n_list);
116 atomic_inc(&dest->refcnt); 124 ip_vs_dest_hold(dest);
117 b->dest = dest; 125 RCU_INIT_POINTER(b->dest, dest);
118 126
119 p = p->next; 127 p = p->next;
120 } 128 }
@@ -127,16 +135,18 @@ ip_vs_dh_assign(struct ip_vs_dh_bucket *tbl, struct ip_vs_service *svc)
127/* 135/*
128 * Flush all the hash buckets of the specified table. 136 * Flush all the hash buckets of the specified table.
129 */ 137 */
130static void ip_vs_dh_flush(struct ip_vs_dh_bucket *tbl) 138static void ip_vs_dh_flush(struct ip_vs_dh_state *s)
131{ 139{
132 int i; 140 int i;
133 struct ip_vs_dh_bucket *b; 141 struct ip_vs_dh_bucket *b;
142 struct ip_vs_dest *dest;
134 143
135 b = tbl; 144 b = &s->buckets[0];
136 for (i=0; i<IP_VS_DH_TAB_SIZE; i++) { 145 for (i=0; i<IP_VS_DH_TAB_SIZE; i++) {
137 if (b->dest) { 146 dest = rcu_dereference_protected(b->dest, 1);
138 atomic_dec(&b->dest->refcnt); 147 if (dest) {
139 b->dest = NULL; 148 ip_vs_dest_put(dest);
149 RCU_INIT_POINTER(b->dest, NULL);
140 } 150 }
141 b++; 151 b++;
142 } 152 }
@@ -145,51 +155,46 @@ static void ip_vs_dh_flush(struct ip_vs_dh_bucket *tbl)
145 155
146static int ip_vs_dh_init_svc(struct ip_vs_service *svc) 156static int ip_vs_dh_init_svc(struct ip_vs_service *svc)
147{ 157{
148 struct ip_vs_dh_bucket *tbl; 158 struct ip_vs_dh_state *s;
149 159
150 /* allocate the DH table for this service */ 160 /* allocate the DH table for this service */
151 tbl = kmalloc(sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE, 161 s = kzalloc(sizeof(struct ip_vs_dh_state), GFP_KERNEL);
152 GFP_KERNEL); 162 if (s == NULL)
153 if (tbl == NULL)
154 return -ENOMEM; 163 return -ENOMEM;
155 164
156 svc->sched_data = tbl; 165 svc->sched_data = s;
157 IP_VS_DBG(6, "DH hash table (memory=%Zdbytes) allocated for " 166 IP_VS_DBG(6, "DH hash table (memory=%Zdbytes) allocated for "
158 "current service\n", 167 "current service\n",
159 sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE); 168 sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE);
160 169
161 /* assign the hash buckets with the updated service */ 170 /* assign the hash buckets with current dests */
162 ip_vs_dh_assign(tbl, svc); 171 ip_vs_dh_reassign(s, svc);
163 172
164 return 0; 173 return 0;
165} 174}
166 175
167 176
168static int ip_vs_dh_done_svc(struct ip_vs_service *svc) 177static void ip_vs_dh_done_svc(struct ip_vs_service *svc)
169{ 178{
170 struct ip_vs_dh_bucket *tbl = svc->sched_data; 179 struct ip_vs_dh_state *s = svc->sched_data;
171 180
172 /* got to clean up hash buckets here */ 181 /* got to clean up hash buckets here */
173 ip_vs_dh_flush(tbl); 182 ip_vs_dh_flush(s);
174 183
175 /* release the table itself */ 184 /* release the table itself */
176 kfree(svc->sched_data); 185 kfree_rcu(s, rcu_head);
177 IP_VS_DBG(6, "DH hash table (memory=%Zdbytes) released\n", 186 IP_VS_DBG(6, "DH hash table (memory=%Zdbytes) released\n",
178 sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE); 187 sizeof(struct ip_vs_dh_bucket)*IP_VS_DH_TAB_SIZE);
179
180 return 0;
181} 188}
182 189
183 190
184static int ip_vs_dh_update_svc(struct ip_vs_service *svc) 191static int ip_vs_dh_dest_changed(struct ip_vs_service *svc,
192 struct ip_vs_dest *dest)
185{ 193{
186 struct ip_vs_dh_bucket *tbl = svc->sched_data; 194 struct ip_vs_dh_state *s = svc->sched_data;
187
188 /* got to clean up hash buckets here */
189 ip_vs_dh_flush(tbl);
190 195
191 /* assign the hash buckets with the updated service */ 196 /* assign the hash buckets with the updated service */
192 ip_vs_dh_assign(tbl, svc); 197 ip_vs_dh_reassign(s, svc);
193 198
194 return 0; 199 return 0;
195} 200}
@@ -212,19 +217,20 @@ static struct ip_vs_dest *
212ip_vs_dh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) 217ip_vs_dh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
213{ 218{
214 struct ip_vs_dest *dest; 219 struct ip_vs_dest *dest;
215 struct ip_vs_dh_bucket *tbl; 220 struct ip_vs_dh_state *s;
216 struct ip_vs_iphdr iph; 221 struct ip_vs_iphdr iph;
217 222
218 ip_vs_fill_iph_addr_only(svc->af, skb, &iph); 223 ip_vs_fill_iph_addr_only(svc->af, skb, &iph);
219 224
220 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); 225 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
221 226
222 tbl = (struct ip_vs_dh_bucket *)svc->sched_data; 227 s = (struct ip_vs_dh_state *) svc->sched_data;
223 dest = ip_vs_dh_get(svc->af, tbl, &iph.daddr); 228 dest = ip_vs_dh_get(svc->af, s, &iph.daddr);
224 if (!dest 229 if (!dest
225 || !(dest->flags & IP_VS_DEST_F_AVAILABLE) 230 || !(dest->flags & IP_VS_DEST_F_AVAILABLE)
226 || atomic_read(&dest->weight) <= 0 231 || atomic_read(&dest->weight) <= 0
227 || is_overloaded(dest)) { 232 || is_overloaded(dest)) {
233 ip_vs_scheduler_err(svc, "no destination available");
228 return NULL; 234 return NULL;
229 } 235 }
230 236
@@ -248,7 +254,8 @@ static struct ip_vs_scheduler ip_vs_dh_scheduler =
248 .n_list = LIST_HEAD_INIT(ip_vs_dh_scheduler.n_list), 254 .n_list = LIST_HEAD_INIT(ip_vs_dh_scheduler.n_list),
249 .init_service = ip_vs_dh_init_svc, 255 .init_service = ip_vs_dh_init_svc,
250 .done_service = ip_vs_dh_done_svc, 256 .done_service = ip_vs_dh_done_svc,
251 .update_service = ip_vs_dh_update_svc, 257 .add_dest = ip_vs_dh_dest_changed,
258 .del_dest = ip_vs_dh_dest_changed,
252 .schedule = ip_vs_dh_schedule, 259 .schedule = ip_vs_dh_schedule,
253}; 260};
254 261
@@ -262,6 +269,7 @@ static int __init ip_vs_dh_init(void)
262static void __exit ip_vs_dh_cleanup(void) 269static void __exit ip_vs_dh_cleanup(void)
263{ 270{
264 unregister_ip_vs_scheduler(&ip_vs_dh_scheduler); 271 unregister_ip_vs_scheduler(&ip_vs_dh_scheduler);
272 synchronize_rcu();
265} 273}
266 274
267 275
diff --git a/net/netfilter/ipvs/ip_vs_est.c b/net/netfilter/ipvs/ip_vs_est.c
index 0fac6017b6fb..6bee6d0c73a5 100644
--- a/net/netfilter/ipvs/ip_vs_est.c
+++ b/net/netfilter/ipvs/ip_vs_est.c
@@ -56,7 +56,7 @@
56 * Make a summary from each cpu 56 * Make a summary from each cpu
57 */ 57 */
58static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum, 58static void ip_vs_read_cpu_stats(struct ip_vs_stats_user *sum,
59 struct ip_vs_cpu_stats *stats) 59 struct ip_vs_cpu_stats __percpu *stats)
60{ 60{
61 int i; 61 int i;
62 62
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 4f53a5f04437..77c173282f38 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -267,10 +267,12 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
267 * hopefully it will succeed on the retransmitted 267 * hopefully it will succeed on the retransmitted
268 * packet. 268 * packet.
269 */ 269 */
270 rcu_read_lock();
270 ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo, 271 ret = nf_nat_mangle_tcp_packet(skb, ct, ctinfo,
271 iph->ihl * 4, 272 iph->ihl * 4,
272 start-data, end-start, 273 start-data, end-start,
273 buf, buf_len); 274 buf, buf_len);
275 rcu_read_unlock();
274 if (ret) { 276 if (ret) {
275 ip_vs_nfct_expect_related(skb, ct, n_cp, 277 ip_vs_nfct_expect_related(skb, ct, n_cp,
276 IPPROTO_TCP, 0, 0); 278 IPPROTO_TCP, 0, 0);
@@ -480,6 +482,7 @@ static int __init ip_vs_ftp_init(void)
480 int rv; 482 int rv;
481 483
482 rv = register_pernet_subsys(&ip_vs_ftp_ops); 484 rv = register_pernet_subsys(&ip_vs_ftp_ops);
485 /* rcu_barrier() is called by netns on error */
483 return rv; 486 return rv;
484} 487}
485 488
@@ -489,6 +492,7 @@ static int __init ip_vs_ftp_init(void)
489static void __exit ip_vs_ftp_exit(void) 492static void __exit ip_vs_ftp_exit(void)
490{ 493{
491 unregister_pernet_subsys(&ip_vs_ftp_ops); 494 unregister_pernet_subsys(&ip_vs_ftp_ops);
495 /* rcu_barrier() is called by netns */
492} 496}
493 497
494 498
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index fdd89b9564ea..b2cc2528a4df 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -90,11 +90,12 @@
90 * IP address and its destination server 90 * IP address and its destination server
91 */ 91 */
92struct ip_vs_lblc_entry { 92struct ip_vs_lblc_entry {
93 struct list_head list; 93 struct hlist_node list;
94 int af; /* address family */ 94 int af; /* address family */
95 union nf_inet_addr addr; /* destination IP address */ 95 union nf_inet_addr addr; /* destination IP address */
96 struct ip_vs_dest *dest; /* real server (cache) */ 96 struct ip_vs_dest __rcu *dest; /* real server (cache) */
97 unsigned long lastuse; /* last used time */ 97 unsigned long lastuse; /* last used time */
98 struct rcu_head rcu_head;
98}; 99};
99 100
100 101
@@ -102,12 +103,14 @@ struct ip_vs_lblc_entry {
102 * IPVS lblc hash table 103 * IPVS lblc hash table
103 */ 104 */
104struct ip_vs_lblc_table { 105struct ip_vs_lblc_table {
105 struct list_head bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */ 106 struct rcu_head rcu_head;
107 struct hlist_head __rcu bucket[IP_VS_LBLC_TAB_SIZE]; /* hash bucket */
108 struct timer_list periodic_timer; /* collect stale entries */
106 atomic_t entries; /* number of entries */ 109 atomic_t entries; /* number of entries */
107 int max_size; /* maximum size of entries */ 110 int max_size; /* maximum size of entries */
108 struct timer_list periodic_timer; /* collect stale entries */
109 int rover; /* rover for expire check */ 111 int rover; /* rover for expire check */
110 int counter; /* counter for no expire */ 112 int counter; /* counter for no expire */
113 bool dead;
111}; 114};
112 115
113 116
@@ -129,13 +132,16 @@ static ctl_table vs_vars_table[] = {
129 132
130static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en) 133static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
131{ 134{
132 list_del(&en->list); 135 struct ip_vs_dest *dest;
136
137 hlist_del_rcu(&en->list);
133 /* 138 /*
134 * We don't kfree dest because it is referred either by its service 139 * We don't kfree dest because it is referred either by its service
135 * or the trash dest list. 140 * or the trash dest list.
136 */ 141 */
137 atomic_dec(&en->dest->refcnt); 142 dest = rcu_dereference_protected(en->dest, 1);
138 kfree(en); 143 ip_vs_dest_put(dest);
144 kfree_rcu(en, rcu_head);
139} 145}
140 146
141 147
@@ -165,15 +171,12 @@ ip_vs_lblc_hash(struct ip_vs_lblc_table *tbl, struct ip_vs_lblc_entry *en)
165{ 171{
166 unsigned int hash = ip_vs_lblc_hashkey(en->af, &en->addr); 172 unsigned int hash = ip_vs_lblc_hashkey(en->af, &en->addr);
167 173
168 list_add(&en->list, &tbl->bucket[hash]); 174 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]);
169 atomic_inc(&tbl->entries); 175 atomic_inc(&tbl->entries);
170} 176}
171 177
172 178
173/* 179/* Get ip_vs_lblc_entry associated with supplied parameters. */
174 * Get ip_vs_lblc_entry associated with supplied parameters. Called under read
175 * lock
176 */
177static inline struct ip_vs_lblc_entry * 180static inline struct ip_vs_lblc_entry *
178ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl, 181ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl,
179 const union nf_inet_addr *addr) 182 const union nf_inet_addr *addr)
@@ -181,7 +184,7 @@ ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl,
181 unsigned int hash = ip_vs_lblc_hashkey(af, addr); 184 unsigned int hash = ip_vs_lblc_hashkey(af, addr);
182 struct ip_vs_lblc_entry *en; 185 struct ip_vs_lblc_entry *en;
183 186
184 list_for_each_entry(en, &tbl->bucket[hash], list) 187 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list)
185 if (ip_vs_addr_equal(af, &en->addr, addr)) 188 if (ip_vs_addr_equal(af, &en->addr, addr))
186 return en; 189 return en;
187 190
@@ -191,7 +194,7 @@ ip_vs_lblc_get(int af, struct ip_vs_lblc_table *tbl,
191 194
192/* 195/*
193 * Create or update an ip_vs_lblc_entry, which is a mapping of a destination IP 196 * Create or update an ip_vs_lblc_entry, which is a mapping of a destination IP
194 * address to a server. Called under write lock. 197 * address to a server. Called under spin lock.
195 */ 198 */
196static inline struct ip_vs_lblc_entry * 199static inline struct ip_vs_lblc_entry *
197ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr, 200ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
@@ -209,14 +212,20 @@ ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
209 ip_vs_addr_copy(dest->af, &en->addr, daddr); 212 ip_vs_addr_copy(dest->af, &en->addr, daddr);
210 en->lastuse = jiffies; 213 en->lastuse = jiffies;
211 214
212 atomic_inc(&dest->refcnt); 215 ip_vs_dest_hold(dest);
213 en->dest = dest; 216 RCU_INIT_POINTER(en->dest, dest);
214 217
215 ip_vs_lblc_hash(tbl, en); 218 ip_vs_lblc_hash(tbl, en);
216 } else if (en->dest != dest) { 219 } else {
217 atomic_dec(&en->dest->refcnt); 220 struct ip_vs_dest *old_dest;
218 atomic_inc(&dest->refcnt); 221
219 en->dest = dest; 222 old_dest = rcu_dereference_protected(en->dest, 1);
223 if (old_dest != dest) {
224 ip_vs_dest_put(old_dest);
225 ip_vs_dest_hold(dest);
226 /* No ordering constraints for refcnt */
227 RCU_INIT_POINTER(en->dest, dest);
228 }
220 } 229 }
221 230
222 return en; 231 return en;
@@ -226,17 +235,22 @@ ip_vs_lblc_new(struct ip_vs_lblc_table *tbl, const union nf_inet_addr *daddr,
226/* 235/*
227 * Flush all the entries of the specified table. 236 * Flush all the entries of the specified table.
228 */ 237 */
229static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl) 238static void ip_vs_lblc_flush(struct ip_vs_service *svc)
230{ 239{
231 struct ip_vs_lblc_entry *en, *nxt; 240 struct ip_vs_lblc_table *tbl = svc->sched_data;
241 struct ip_vs_lblc_entry *en;
242 struct hlist_node *next;
232 int i; 243 int i;
233 244
245 spin_lock_bh(&svc->sched_lock);
246 tbl->dead = 1;
234 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { 247 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
235 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) { 248 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
236 ip_vs_lblc_free(en); 249 ip_vs_lblc_free(en);
237 atomic_dec(&tbl->entries); 250 atomic_dec(&tbl->entries);
238 } 251 }
239 } 252 }
253 spin_unlock_bh(&svc->sched_lock);
240} 254}
241 255
242static int sysctl_lblc_expiration(struct ip_vs_service *svc) 256static int sysctl_lblc_expiration(struct ip_vs_service *svc)
@@ -252,15 +266,16 @@ static int sysctl_lblc_expiration(struct ip_vs_service *svc)
252static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc) 266static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
253{ 267{
254 struct ip_vs_lblc_table *tbl = svc->sched_data; 268 struct ip_vs_lblc_table *tbl = svc->sched_data;
255 struct ip_vs_lblc_entry *en, *nxt; 269 struct ip_vs_lblc_entry *en;
270 struct hlist_node *next;
256 unsigned long now = jiffies; 271 unsigned long now = jiffies;
257 int i, j; 272 int i, j;
258 273
259 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) { 274 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
260 j = (j + 1) & IP_VS_LBLC_TAB_MASK; 275 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
261 276
262 write_lock(&svc->sched_lock); 277 spin_lock(&svc->sched_lock);
263 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { 278 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
264 if (time_before(now, 279 if (time_before(now,
265 en->lastuse + 280 en->lastuse +
266 sysctl_lblc_expiration(svc))) 281 sysctl_lblc_expiration(svc)))
@@ -269,7 +284,7 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
269 ip_vs_lblc_free(en); 284 ip_vs_lblc_free(en);
270 atomic_dec(&tbl->entries); 285 atomic_dec(&tbl->entries);
271 } 286 }
272 write_unlock(&svc->sched_lock); 287 spin_unlock(&svc->sched_lock);
273 } 288 }
274 tbl->rover = j; 289 tbl->rover = j;
275} 290}
@@ -293,7 +308,8 @@ static void ip_vs_lblc_check_expire(unsigned long data)
293 unsigned long now = jiffies; 308 unsigned long now = jiffies;
294 int goal; 309 int goal;
295 int i, j; 310 int i, j;
296 struct ip_vs_lblc_entry *en, *nxt; 311 struct ip_vs_lblc_entry *en;
312 struct hlist_node *next;
297 313
298 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { 314 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
299 /* do full expiration check */ 315 /* do full expiration check */
@@ -314,8 +330,8 @@ static void ip_vs_lblc_check_expire(unsigned long data)
314 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) { 330 for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
315 j = (j + 1) & IP_VS_LBLC_TAB_MASK; 331 j = (j + 1) & IP_VS_LBLC_TAB_MASK;
316 332
317 write_lock(&svc->sched_lock); 333 spin_lock(&svc->sched_lock);
318 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { 334 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
319 if (time_before(now, en->lastuse + ENTRY_TIMEOUT)) 335 if (time_before(now, en->lastuse + ENTRY_TIMEOUT))
320 continue; 336 continue;
321 337
@@ -323,7 +339,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
323 atomic_dec(&tbl->entries); 339 atomic_dec(&tbl->entries);
324 goal--; 340 goal--;
325 } 341 }
326 write_unlock(&svc->sched_lock); 342 spin_unlock(&svc->sched_lock);
327 if (goal <= 0) 343 if (goal <= 0)
328 break; 344 break;
329 } 345 }
@@ -354,11 +370,12 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
354 * Initialize the hash buckets 370 * Initialize the hash buckets
355 */ 371 */
356 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) { 372 for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
357 INIT_LIST_HEAD(&tbl->bucket[i]); 373 INIT_HLIST_HEAD(&tbl->bucket[i]);
358 } 374 }
359 tbl->max_size = IP_VS_LBLC_TAB_SIZE*16; 375 tbl->max_size = IP_VS_LBLC_TAB_SIZE*16;
360 tbl->rover = 0; 376 tbl->rover = 0;
361 tbl->counter = 1; 377 tbl->counter = 1;
378 tbl->dead = 0;
362 379
363 /* 380 /*
364 * Hook periodic timer for garbage collection 381 * Hook periodic timer for garbage collection
@@ -371,7 +388,7 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
371} 388}
372 389
373 390
374static int ip_vs_lblc_done_svc(struct ip_vs_service *svc) 391static void ip_vs_lblc_done_svc(struct ip_vs_service *svc)
375{ 392{
376 struct ip_vs_lblc_table *tbl = svc->sched_data; 393 struct ip_vs_lblc_table *tbl = svc->sched_data;
377 394
@@ -379,14 +396,12 @@ static int ip_vs_lblc_done_svc(struct ip_vs_service *svc)
379 del_timer_sync(&tbl->periodic_timer); 396 del_timer_sync(&tbl->periodic_timer);
380 397
381 /* got to clean up table entries here */ 398 /* got to clean up table entries here */
382 ip_vs_lblc_flush(tbl); 399 ip_vs_lblc_flush(svc);
383 400
384 /* release the table itself */ 401 /* release the table itself */
385 kfree(tbl); 402 kfree_rcu(tbl, rcu_head);
386 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n", 403 IP_VS_DBG(6, "LBLC hash table (memory=%Zdbytes) released\n",
387 sizeof(*tbl)); 404 sizeof(*tbl));
388
389 return 0;
390} 405}
391 406
392 407
@@ -408,7 +423,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
408 * The server with weight=0 is quiesced and will not receive any 423 * The server with weight=0 is quiesced and will not receive any
409 * new connection. 424 * new connection.
410 */ 425 */
411 list_for_each_entry(dest, &svc->destinations, n_list) { 426 list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
412 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 427 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
413 continue; 428 continue;
414 if (atomic_read(&dest->weight) > 0) { 429 if (atomic_read(&dest->weight) > 0) {
@@ -423,7 +438,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
423 * Find the destination with the least load. 438 * Find the destination with the least load.
424 */ 439 */
425 nextstage: 440 nextstage:
426 list_for_each_entry_continue(dest, &svc->destinations, n_list) { 441 list_for_each_entry_continue_rcu(dest, &svc->destinations, n_list) {
427 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 442 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
428 continue; 443 continue;
429 444
@@ -457,7 +472,7 @@ is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
457 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) { 472 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) {
458 struct ip_vs_dest *d; 473 struct ip_vs_dest *d;
459 474
460 list_for_each_entry(d, &svc->destinations, n_list) { 475 list_for_each_entry_rcu(d, &svc->destinations, n_list) {
461 if (atomic_read(&d->activeconns)*2 476 if (atomic_read(&d->activeconns)*2
462 < atomic_read(&d->weight)) { 477 < atomic_read(&d->weight)) {
463 return 1; 478 return 1;
@@ -484,7 +499,6 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
484 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); 499 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
485 500
486 /* First look in our cache */ 501 /* First look in our cache */
487 read_lock(&svc->sched_lock);
488 en = ip_vs_lblc_get(svc->af, tbl, &iph.daddr); 502 en = ip_vs_lblc_get(svc->af, tbl, &iph.daddr);
489 if (en) { 503 if (en) {
490 /* We only hold a read lock, but this is atomic */ 504 /* We only hold a read lock, but this is atomic */
@@ -499,14 +513,11 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
499 * free up entries from the trash at any time. 513 * free up entries from the trash at any time.
500 */ 514 */
501 515
502 if (en->dest->flags & IP_VS_DEST_F_AVAILABLE) 516 dest = rcu_dereference(en->dest);
503 dest = en->dest; 517 if ((dest->flags & IP_VS_DEST_F_AVAILABLE) &&
518 atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc))
519 goto out;
504 } 520 }
505 read_unlock(&svc->sched_lock);
506
507 /* If the destination has a weight and is not overloaded, use it */
508 if (dest && atomic_read(&dest->weight) > 0 && !is_overloaded(dest, svc))
509 goto out;
510 521
511 /* No cache entry or it is invalid, time to schedule */ 522 /* No cache entry or it is invalid, time to schedule */
512 dest = __ip_vs_lblc_schedule(svc); 523 dest = __ip_vs_lblc_schedule(svc);
@@ -516,9 +527,10 @@ ip_vs_lblc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
516 } 527 }
517 528
518 /* If we fail to create a cache entry, we'll just use the valid dest */ 529 /* If we fail to create a cache entry, we'll just use the valid dest */
519 write_lock(&svc->sched_lock); 530 spin_lock_bh(&svc->sched_lock);
520 ip_vs_lblc_new(tbl, &iph.daddr, dest); 531 if (!tbl->dead)
521 write_unlock(&svc->sched_lock); 532 ip_vs_lblc_new(tbl, &iph.daddr, dest);
533 spin_unlock_bh(&svc->sched_lock);
522 534
523out: 535out:
524 IP_VS_DBG_BUF(6, "LBLC: destination IP address %s --> server %s:%d\n", 536 IP_VS_DBG_BUF(6, "LBLC: destination IP address %s --> server %s:%d\n",
@@ -621,6 +633,7 @@ static void __exit ip_vs_lblc_cleanup(void)
621{ 633{
622 unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler); 634 unregister_ip_vs_scheduler(&ip_vs_lblc_scheduler);
623 unregister_pernet_subsys(&ip_vs_lblc_ops); 635 unregister_pernet_subsys(&ip_vs_lblc_ops);
636 synchronize_rcu();
624} 637}
625 638
626 639
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index c03b6a3ade2f..feb9656eac58 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -89,40 +89,44 @@
89 */ 89 */
90struct ip_vs_dest_set_elem { 90struct ip_vs_dest_set_elem {
91 struct list_head list; /* list link */ 91 struct list_head list; /* list link */
92 struct ip_vs_dest *dest; /* destination server */ 92 struct ip_vs_dest __rcu *dest; /* destination server */
93 struct rcu_head rcu_head;
93}; 94};
94 95
95struct ip_vs_dest_set { 96struct ip_vs_dest_set {
96 atomic_t size; /* set size */ 97 atomic_t size; /* set size */
97 unsigned long lastmod; /* last modified time */ 98 unsigned long lastmod; /* last modified time */
98 struct list_head list; /* destination list */ 99 struct list_head list; /* destination list */
99 rwlock_t lock; /* lock for this list */
100}; 100};
101 101
102 102
103static struct ip_vs_dest_set_elem * 103static void ip_vs_dest_set_insert(struct ip_vs_dest_set *set,
104ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest) 104 struct ip_vs_dest *dest, bool check)
105{ 105{
106 struct ip_vs_dest_set_elem *e; 106 struct ip_vs_dest_set_elem *e;
107 107
108 list_for_each_entry(e, &set->list, list) { 108 if (check) {
109 if (e->dest == dest) 109 list_for_each_entry(e, &set->list, list) {
110 /* already existed */ 110 struct ip_vs_dest *d;
111 return NULL; 111
112 d = rcu_dereference_protected(e->dest, 1);
113 if (d == dest)
114 /* already existed */
115 return;
116 }
112 } 117 }
113 118
114 e = kmalloc(sizeof(*e), GFP_ATOMIC); 119 e = kmalloc(sizeof(*e), GFP_ATOMIC);
115 if (e == NULL) 120 if (e == NULL)
116 return NULL; 121 return;
117 122
118 atomic_inc(&dest->refcnt); 123 ip_vs_dest_hold(dest);
119 e->dest = dest; 124 RCU_INIT_POINTER(e->dest, dest);
120 125
121 list_add(&e->list, &set->list); 126 list_add_rcu(&e->list, &set->list);
122 atomic_inc(&set->size); 127 atomic_inc(&set->size);
123 128
124 set->lastmod = jiffies; 129 set->lastmod = jiffies;
125 return e;
126} 130}
127 131
128static void 132static void
@@ -131,13 +135,16 @@ ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
131 struct ip_vs_dest_set_elem *e; 135 struct ip_vs_dest_set_elem *e;
132 136
133 list_for_each_entry(e, &set->list, list) { 137 list_for_each_entry(e, &set->list, list) {
134 if (e->dest == dest) { 138 struct ip_vs_dest *d;
139
140 d = rcu_dereference_protected(e->dest, 1);
141 if (d == dest) {
135 /* HIT */ 142 /* HIT */
136 atomic_dec(&set->size); 143 atomic_dec(&set->size);
137 set->lastmod = jiffies; 144 set->lastmod = jiffies;
138 atomic_dec(&e->dest->refcnt); 145 ip_vs_dest_put(dest);
139 list_del(&e->list); 146 list_del_rcu(&e->list);
140 kfree(e); 147 kfree_rcu(e, rcu_head);
141 break; 148 break;
142 } 149 }
143 } 150 }
@@ -147,17 +154,18 @@ static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
147{ 154{
148 struct ip_vs_dest_set_elem *e, *ep; 155 struct ip_vs_dest_set_elem *e, *ep;
149 156
150 write_lock(&set->lock);
151 list_for_each_entry_safe(e, ep, &set->list, list) { 157 list_for_each_entry_safe(e, ep, &set->list, list) {
158 struct ip_vs_dest *d;
159
160 d = rcu_dereference_protected(e->dest, 1);
152 /* 161 /*
153 * We don't kfree dest because it is referred either 162 * We don't kfree dest because it is referred either
154 * by its service or by the trash dest list. 163 * by its service or by the trash dest list.
155 */ 164 */
156 atomic_dec(&e->dest->refcnt); 165 ip_vs_dest_put(d);
157 list_del(&e->list); 166 list_del_rcu(&e->list);
158 kfree(e); 167 kfree_rcu(e, rcu_head);
159 } 168 }
160 write_unlock(&set->lock);
161} 169}
162 170
163/* get weighted least-connection node in the destination set */ 171/* get weighted least-connection node in the destination set */
@@ -171,8 +179,8 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
171 return NULL; 179 return NULL;
172 180
173 /* select the first destination server, whose weight > 0 */ 181 /* select the first destination server, whose weight > 0 */
174 list_for_each_entry(e, &set->list, list) { 182 list_for_each_entry_rcu(e, &set->list, list) {
175 least = e->dest; 183 least = rcu_dereference(e->dest);
176 if (least->flags & IP_VS_DEST_F_OVERLOAD) 184 if (least->flags & IP_VS_DEST_F_OVERLOAD)
177 continue; 185 continue;
178 186
@@ -186,8 +194,8 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
186 194
187 /* find the destination with the weighted least load */ 195 /* find the destination with the weighted least load */
188 nextstage: 196 nextstage:
189 list_for_each_entry(e, &set->list, list) { 197 list_for_each_entry_continue_rcu(e, &set->list, list) {
190 dest = e->dest; 198 dest = rcu_dereference(e->dest);
191 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 199 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
192 continue; 200 continue;
193 201
@@ -224,7 +232,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
224 232
225 /* select the first destination server, whose weight > 0 */ 233 /* select the first destination server, whose weight > 0 */
226 list_for_each_entry(e, &set->list, list) { 234 list_for_each_entry(e, &set->list, list) {
227 most = e->dest; 235 most = rcu_dereference_protected(e->dest, 1);
228 if (atomic_read(&most->weight) > 0) { 236 if (atomic_read(&most->weight) > 0) {
229 moh = ip_vs_dest_conn_overhead(most); 237 moh = ip_vs_dest_conn_overhead(most);
230 goto nextstage; 238 goto nextstage;
@@ -234,8 +242,8 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
234 242
235 /* find the destination with the weighted most load */ 243 /* find the destination with the weighted most load */
236 nextstage: 244 nextstage:
237 list_for_each_entry(e, &set->list, list) { 245 list_for_each_entry_continue(e, &set->list, list) {
238 dest = e->dest; 246 dest = rcu_dereference_protected(e->dest, 1);
239 doh = ip_vs_dest_conn_overhead(dest); 247 doh = ip_vs_dest_conn_overhead(dest);
240 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */ 248 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
241 if ((moh * atomic_read(&dest->weight) < 249 if ((moh * atomic_read(&dest->weight) <
@@ -262,11 +270,12 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
262 * IP address and its destination server set 270 * IP address and its destination server set
263 */ 271 */
264struct ip_vs_lblcr_entry { 272struct ip_vs_lblcr_entry {
265 struct list_head list; 273 struct hlist_node list;
266 int af; /* address family */ 274 int af; /* address family */
267 union nf_inet_addr addr; /* destination IP address */ 275 union nf_inet_addr addr; /* destination IP address */
268 struct ip_vs_dest_set set; /* destination server set */ 276 struct ip_vs_dest_set set; /* destination server set */
269 unsigned long lastuse; /* last used time */ 277 unsigned long lastuse; /* last used time */
278 struct rcu_head rcu_head;
270}; 279};
271 280
272 281
@@ -274,12 +283,14 @@ struct ip_vs_lblcr_entry {
274 * IPVS lblcr hash table 283 * IPVS lblcr hash table
275 */ 284 */
276struct ip_vs_lblcr_table { 285struct ip_vs_lblcr_table {
277 struct list_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */ 286 struct rcu_head rcu_head;
287 struct hlist_head __rcu bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */
278 atomic_t entries; /* number of entries */ 288 atomic_t entries; /* number of entries */
279 int max_size; /* maximum size of entries */ 289 int max_size; /* maximum size of entries */
280 struct timer_list periodic_timer; /* collect stale entries */ 290 struct timer_list periodic_timer; /* collect stale entries */
281 int rover; /* rover for expire check */ 291 int rover; /* rover for expire check */
282 int counter; /* counter for no expire */ 292 int counter; /* counter for no expire */
293 bool dead;
283}; 294};
284 295
285 296
@@ -302,9 +313,9 @@ static ctl_table vs_vars_table[] = {
302 313
303static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en) 314static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
304{ 315{
305 list_del(&en->list); 316 hlist_del_rcu(&en->list);
306 ip_vs_dest_set_eraseall(&en->set); 317 ip_vs_dest_set_eraseall(&en->set);
307 kfree(en); 318 kfree_rcu(en, rcu_head);
308} 319}
309 320
310 321
@@ -334,15 +345,12 @@ ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en)
334{ 345{
335 unsigned int hash = ip_vs_lblcr_hashkey(en->af, &en->addr); 346 unsigned int hash = ip_vs_lblcr_hashkey(en->af, &en->addr);
336 347
337 list_add(&en->list, &tbl->bucket[hash]); 348 hlist_add_head_rcu(&en->list, &tbl->bucket[hash]);
338 atomic_inc(&tbl->entries); 349 atomic_inc(&tbl->entries);
339} 350}
340 351
341 352
342/* 353/* Get ip_vs_lblcr_entry associated with supplied parameters. */
343 * Get ip_vs_lblcr_entry associated with supplied parameters. Called under
344 * read lock.
345 */
346static inline struct ip_vs_lblcr_entry * 354static inline struct ip_vs_lblcr_entry *
347ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl, 355ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl,
348 const union nf_inet_addr *addr) 356 const union nf_inet_addr *addr)
@@ -350,7 +358,7 @@ ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl,
350 unsigned int hash = ip_vs_lblcr_hashkey(af, addr); 358 unsigned int hash = ip_vs_lblcr_hashkey(af, addr);
351 struct ip_vs_lblcr_entry *en; 359 struct ip_vs_lblcr_entry *en;
352 360
353 list_for_each_entry(en, &tbl->bucket[hash], list) 361 hlist_for_each_entry_rcu(en, &tbl->bucket[hash], list)
354 if (ip_vs_addr_equal(af, &en->addr, addr)) 362 if (ip_vs_addr_equal(af, &en->addr, addr))
355 return en; 363 return en;
356 364
@@ -360,7 +368,7 @@ ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl,
360 368
361/* 369/*
362 * Create or update an ip_vs_lblcr_entry, which is a mapping of a destination 370 * Create or update an ip_vs_lblcr_entry, which is a mapping of a destination
363 * IP address to a server. Called under write lock. 371 * IP address to a server. Called under spin lock.
364 */ 372 */
365static inline struct ip_vs_lblcr_entry * 373static inline struct ip_vs_lblcr_entry *
366ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr, 374ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr,
@@ -381,14 +389,14 @@ ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr,
381 /* initialize its dest set */ 389 /* initialize its dest set */
382 atomic_set(&(en->set.size), 0); 390 atomic_set(&(en->set.size), 0);
383 INIT_LIST_HEAD(&en->set.list); 391 INIT_LIST_HEAD(&en->set.list);
384 rwlock_init(&en->set.lock); 392
393 ip_vs_dest_set_insert(&en->set, dest, false);
385 394
386 ip_vs_lblcr_hash(tbl, en); 395 ip_vs_lblcr_hash(tbl, en);
396 return en;
387 } 397 }
388 398
389 write_lock(&en->set.lock); 399 ip_vs_dest_set_insert(&en->set, dest, true);
390 ip_vs_dest_set_insert(&en->set, dest);
391 write_unlock(&en->set.lock);
392 400
393 return en; 401 return en;
394} 402}
@@ -397,17 +405,21 @@ ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr,
397/* 405/*
398 * Flush all the entries of the specified table. 406 * Flush all the entries of the specified table.
399 */ 407 */
400static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table *tbl) 408static void ip_vs_lblcr_flush(struct ip_vs_service *svc)
401{ 409{
410 struct ip_vs_lblcr_table *tbl = svc->sched_data;
402 int i; 411 int i;
403 struct ip_vs_lblcr_entry *en, *nxt; 412 struct ip_vs_lblcr_entry *en;
413 struct hlist_node *next;
404 414
405 /* No locking required, only called during cleanup. */ 415 spin_lock_bh(&svc->sched_lock);
416 tbl->dead = 1;
406 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) { 417 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
407 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) { 418 hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
408 ip_vs_lblcr_free(en); 419 ip_vs_lblcr_free(en);
409 } 420 }
410 } 421 }
422 spin_unlock_bh(&svc->sched_lock);
411} 423}
412 424
413static int sysctl_lblcr_expiration(struct ip_vs_service *svc) 425static int sysctl_lblcr_expiration(struct ip_vs_service *svc)
@@ -425,13 +437,14 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
425 struct ip_vs_lblcr_table *tbl = svc->sched_data; 437 struct ip_vs_lblcr_table *tbl = svc->sched_data;
426 unsigned long now = jiffies; 438 unsigned long now = jiffies;
427 int i, j; 439 int i, j;
428 struct ip_vs_lblcr_entry *en, *nxt; 440 struct ip_vs_lblcr_entry *en;
441 struct hlist_node *next;
429 442
430 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) { 443 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
431 j = (j + 1) & IP_VS_LBLCR_TAB_MASK; 444 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
432 445
433 write_lock(&svc->sched_lock); 446 spin_lock(&svc->sched_lock);
434 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { 447 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
435 if (time_after(en->lastuse + 448 if (time_after(en->lastuse +
436 sysctl_lblcr_expiration(svc), now)) 449 sysctl_lblcr_expiration(svc), now))
437 continue; 450 continue;
@@ -439,7 +452,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
439 ip_vs_lblcr_free(en); 452 ip_vs_lblcr_free(en);
440 atomic_dec(&tbl->entries); 453 atomic_dec(&tbl->entries);
441 } 454 }
442 write_unlock(&svc->sched_lock); 455 spin_unlock(&svc->sched_lock);
443 } 456 }
444 tbl->rover = j; 457 tbl->rover = j;
445} 458}
@@ -463,7 +476,8 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
463 unsigned long now = jiffies; 476 unsigned long now = jiffies;
464 int goal; 477 int goal;
465 int i, j; 478 int i, j;
466 struct ip_vs_lblcr_entry *en, *nxt; 479 struct ip_vs_lblcr_entry *en;
480 struct hlist_node *next;
467 481
468 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) { 482 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
469 /* do full expiration check */ 483 /* do full expiration check */
@@ -484,8 +498,8 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
484 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) { 498 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
485 j = (j + 1) & IP_VS_LBLCR_TAB_MASK; 499 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
486 500
487 write_lock(&svc->sched_lock); 501 spin_lock(&svc->sched_lock);
488 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) { 502 hlist_for_each_entry_safe(en, next, &tbl->bucket[j], list) {
489 if (time_before(now, en->lastuse+ENTRY_TIMEOUT)) 503 if (time_before(now, en->lastuse+ENTRY_TIMEOUT))
490 continue; 504 continue;
491 505
@@ -493,7 +507,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
493 atomic_dec(&tbl->entries); 507 atomic_dec(&tbl->entries);
494 goal--; 508 goal--;
495 } 509 }
496 write_unlock(&svc->sched_lock); 510 spin_unlock(&svc->sched_lock);
497 if (goal <= 0) 511 if (goal <= 0)
498 break; 512 break;
499 } 513 }
@@ -523,11 +537,12 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
523 * Initialize the hash buckets 537 * Initialize the hash buckets
524 */ 538 */
525 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) { 539 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
526 INIT_LIST_HEAD(&tbl->bucket[i]); 540 INIT_HLIST_HEAD(&tbl->bucket[i]);
527 } 541 }
528 tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16; 542 tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16;
529 tbl->rover = 0; 543 tbl->rover = 0;
530 tbl->counter = 1; 544 tbl->counter = 1;
545 tbl->dead = 0;
531 546
532 /* 547 /*
533 * Hook periodic timer for garbage collection 548 * Hook periodic timer for garbage collection
@@ -540,7 +555,7 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
540} 555}
541 556
542 557
543static int ip_vs_lblcr_done_svc(struct ip_vs_service *svc) 558static void ip_vs_lblcr_done_svc(struct ip_vs_service *svc)
544{ 559{
545 struct ip_vs_lblcr_table *tbl = svc->sched_data; 560 struct ip_vs_lblcr_table *tbl = svc->sched_data;
546 561
@@ -548,14 +563,12 @@ static int ip_vs_lblcr_done_svc(struct ip_vs_service *svc)
548 del_timer_sync(&tbl->periodic_timer); 563 del_timer_sync(&tbl->periodic_timer);
549 564
550 /* got to clean up table entries here */ 565 /* got to clean up table entries here */
551 ip_vs_lblcr_flush(tbl); 566 ip_vs_lblcr_flush(svc);
552 567
553 /* release the table itself */ 568 /* release the table itself */
554 kfree(tbl); 569 kfree_rcu(tbl, rcu_head);
555 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n", 570 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n",
556 sizeof(*tbl)); 571 sizeof(*tbl));
557
558 return 0;
559} 572}
560 573
561 574
@@ -577,7 +590,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
577 * The server with weight=0 is quiesced and will not receive any 590 * The server with weight=0 is quiesced and will not receive any
578 * new connection. 591 * new connection.
579 */ 592 */
580 list_for_each_entry(dest, &svc->destinations, n_list) { 593 list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
581 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 594 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
582 continue; 595 continue;
583 596
@@ -593,7 +606,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
593 * Find the destination with the least load. 606 * Find the destination with the least load.
594 */ 607 */
595 nextstage: 608 nextstage:
596 list_for_each_entry_continue(dest, &svc->destinations, n_list) { 609 list_for_each_entry_continue_rcu(dest, &svc->destinations, n_list) {
597 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 610 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
598 continue; 611 continue;
599 612
@@ -627,7 +640,7 @@ is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
627 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) { 640 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) {
628 struct ip_vs_dest *d; 641 struct ip_vs_dest *d;
629 642
630 list_for_each_entry(d, &svc->destinations, n_list) { 643 list_for_each_entry_rcu(d, &svc->destinations, n_list) {
631 if (atomic_read(&d->activeconns)*2 644 if (atomic_read(&d->activeconns)*2
632 < atomic_read(&d->weight)) { 645 < atomic_read(&d->weight)) {
633 return 1; 646 return 1;
@@ -646,7 +659,7 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
646{ 659{
647 struct ip_vs_lblcr_table *tbl = svc->sched_data; 660 struct ip_vs_lblcr_table *tbl = svc->sched_data;
648 struct ip_vs_iphdr iph; 661 struct ip_vs_iphdr iph;
649 struct ip_vs_dest *dest = NULL; 662 struct ip_vs_dest *dest;
650 struct ip_vs_lblcr_entry *en; 663 struct ip_vs_lblcr_entry *en;
651 664
652 ip_vs_fill_iph_addr_only(svc->af, skb, &iph); 665 ip_vs_fill_iph_addr_only(svc->af, skb, &iph);
@@ -654,53 +667,46 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
654 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); 667 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
655 668
656 /* First look in our cache */ 669 /* First look in our cache */
657 read_lock(&svc->sched_lock);
658 en = ip_vs_lblcr_get(svc->af, tbl, &iph.daddr); 670 en = ip_vs_lblcr_get(svc->af, tbl, &iph.daddr);
659 if (en) { 671 if (en) {
660 /* We only hold a read lock, but this is atomic */
661 en->lastuse = jiffies; 672 en->lastuse = jiffies;
662 673
663 /* Get the least loaded destination */ 674 /* Get the least loaded destination */
664 read_lock(&en->set.lock);
665 dest = ip_vs_dest_set_min(&en->set); 675 dest = ip_vs_dest_set_min(&en->set);
666 read_unlock(&en->set.lock);
667 676
668 /* More than one destination + enough time passed by, cleanup */ 677 /* More than one destination + enough time passed by, cleanup */
669 if (atomic_read(&en->set.size) > 1 && 678 if (atomic_read(&en->set.size) > 1 &&
670 time_after(jiffies, en->set.lastmod + 679 time_after(jiffies, en->set.lastmod +
671 sysctl_lblcr_expiration(svc))) { 680 sysctl_lblcr_expiration(svc))) {
672 struct ip_vs_dest *m; 681 spin_lock_bh(&svc->sched_lock);
682 if (atomic_read(&en->set.size) > 1) {
683 struct ip_vs_dest *m;
673 684
674 write_lock(&en->set.lock); 685 m = ip_vs_dest_set_max(&en->set);
675 m = ip_vs_dest_set_max(&en->set); 686 if (m)
676 if (m) 687 ip_vs_dest_set_erase(&en->set, m);
677 ip_vs_dest_set_erase(&en->set, m); 688 }
678 write_unlock(&en->set.lock); 689 spin_unlock_bh(&svc->sched_lock);
679 } 690 }
680 691
681 /* If the destination is not overloaded, use it */ 692 /* If the destination is not overloaded, use it */
682 if (dest && !is_overloaded(dest, svc)) { 693 if (dest && !is_overloaded(dest, svc))
683 read_unlock(&svc->sched_lock);
684 goto out; 694 goto out;
685 }
686 695
687 /* The cache entry is invalid, time to schedule */ 696 /* The cache entry is invalid, time to schedule */
688 dest = __ip_vs_lblcr_schedule(svc); 697 dest = __ip_vs_lblcr_schedule(svc);
689 if (!dest) { 698 if (!dest) {
690 ip_vs_scheduler_err(svc, "no destination available"); 699 ip_vs_scheduler_err(svc, "no destination available");
691 read_unlock(&svc->sched_lock);
692 return NULL; 700 return NULL;
693 } 701 }
694 702
695 /* Update our cache entry */ 703 /* Update our cache entry */
696 write_lock(&en->set.lock); 704 spin_lock_bh(&svc->sched_lock);
697 ip_vs_dest_set_insert(&en->set, dest); 705 if (!tbl->dead)
698 write_unlock(&en->set.lock); 706 ip_vs_dest_set_insert(&en->set, dest, true);
699 } 707 spin_unlock_bh(&svc->sched_lock);
700 read_unlock(&svc->sched_lock);
701
702 if (dest)
703 goto out; 708 goto out;
709 }
704 710
705 /* No cache entry, time to schedule */ 711 /* No cache entry, time to schedule */
706 dest = __ip_vs_lblcr_schedule(svc); 712 dest = __ip_vs_lblcr_schedule(svc);
@@ -710,9 +716,10 @@ ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
710 } 716 }
711 717
712 /* If we fail to create a cache entry, we'll just use the valid dest */ 718 /* If we fail to create a cache entry, we'll just use the valid dest */
713 write_lock(&svc->sched_lock); 719 spin_lock_bh(&svc->sched_lock);
714 ip_vs_lblcr_new(tbl, &iph.daddr, dest); 720 if (!tbl->dead)
715 write_unlock(&svc->sched_lock); 721 ip_vs_lblcr_new(tbl, &iph.daddr, dest);
722 spin_unlock_bh(&svc->sched_lock);
716 723
717out: 724out:
718 IP_VS_DBG_BUF(6, "LBLCR: destination IP address %s --> server %s:%d\n", 725 IP_VS_DBG_BUF(6, "LBLCR: destination IP address %s --> server %s:%d\n",
@@ -814,6 +821,7 @@ static void __exit ip_vs_lblcr_cleanup(void)
814{ 821{
815 unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler); 822 unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
816 unregister_pernet_subsys(&ip_vs_lblcr_ops); 823 unregister_pernet_subsys(&ip_vs_lblcr_ops);
824 synchronize_rcu();
817} 825}
818 826
819 827
diff --git a/net/netfilter/ipvs/ip_vs_lc.c b/net/netfilter/ipvs/ip_vs_lc.c
index f391819c0cca..5128e338a749 100644
--- a/net/netfilter/ipvs/ip_vs_lc.c
+++ b/net/netfilter/ipvs/ip_vs_lc.c
@@ -42,7 +42,7 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
42 * served, but no new connection is assigned to the server. 42 * served, but no new connection is assigned to the server.
43 */ 43 */
44 44
45 list_for_each_entry(dest, &svc->destinations, n_list) { 45 list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
46 if ((dest->flags & IP_VS_DEST_F_OVERLOAD) || 46 if ((dest->flags & IP_VS_DEST_F_OVERLOAD) ||
47 atomic_read(&dest->weight) == 0) 47 atomic_read(&dest->weight) == 0)
48 continue; 48 continue;
@@ -84,6 +84,7 @@ static int __init ip_vs_lc_init(void)
84static void __exit ip_vs_lc_cleanup(void) 84static void __exit ip_vs_lc_cleanup(void)
85{ 85{
86 unregister_ip_vs_scheduler(&ip_vs_lc_scheduler); 86 unregister_ip_vs_scheduler(&ip_vs_lc_scheduler);
87 synchronize_rcu();
87} 88}
88 89
89module_init(ip_vs_lc_init); 90module_init(ip_vs_lc_init);
diff --git a/net/netfilter/ipvs/ip_vs_nq.c b/net/netfilter/ipvs/ip_vs_nq.c
index 984d9c137d84..646cfd4baa73 100644
--- a/net/netfilter/ipvs/ip_vs_nq.c
+++ b/net/netfilter/ipvs/ip_vs_nq.c
@@ -75,7 +75,7 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
75 * new connections. 75 * new connections.
76 */ 76 */
77 77
78 list_for_each_entry(dest, &svc->destinations, n_list) { 78 list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
79 79
80 if (dest->flags & IP_VS_DEST_F_OVERLOAD || 80 if (dest->flags & IP_VS_DEST_F_OVERLOAD ||
81 !atomic_read(&dest->weight)) 81 !atomic_read(&dest->weight))
@@ -133,6 +133,7 @@ static int __init ip_vs_nq_init(void)
133static void __exit ip_vs_nq_cleanup(void) 133static void __exit ip_vs_nq_cleanup(void)
134{ 134{
135 unregister_ip_vs_scheduler(&ip_vs_nq_scheduler); 135 unregister_ip_vs_scheduler(&ip_vs_nq_scheduler);
136 synchronize_rcu();
136} 137}
137 138
138module_init(ip_vs_nq_init); 139module_init(ip_vs_nq_init);
diff --git a/net/netfilter/ipvs/ip_vs_pe.c b/net/netfilter/ipvs/ip_vs_pe.c
index 5cf859ccb31b..1a82b29ce8ea 100644
--- a/net/netfilter/ipvs/ip_vs_pe.c
+++ b/net/netfilter/ipvs/ip_vs_pe.c
@@ -13,20 +13,8 @@
13/* IPVS pe list */ 13/* IPVS pe list */
14static LIST_HEAD(ip_vs_pe); 14static LIST_HEAD(ip_vs_pe);
15 15
16/* lock for service table */ 16/* semaphore for IPVS PEs. */
17static DEFINE_SPINLOCK(ip_vs_pe_lock); 17static DEFINE_MUTEX(ip_vs_pe_mutex);
18
19/* Bind a service with a pe */
20void ip_vs_bind_pe(struct ip_vs_service *svc, struct ip_vs_pe *pe)
21{
22 svc->pe = pe;
23}
24
25/* Unbind a service from its pe */
26void ip_vs_unbind_pe(struct ip_vs_service *svc)
27{
28 svc->pe = NULL;
29}
30 18
31/* Get pe in the pe list by name */ 19/* Get pe in the pe list by name */
32struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name) 20struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name)
@@ -36,9 +24,8 @@ struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name)
36 IP_VS_DBG(10, "%s(): pe_name \"%s\"\n", __func__, 24 IP_VS_DBG(10, "%s(): pe_name \"%s\"\n", __func__,
37 pe_name); 25 pe_name);
38 26
39 spin_lock_bh(&ip_vs_pe_lock); 27 rcu_read_lock();
40 28 list_for_each_entry_rcu(pe, &ip_vs_pe, n_list) {
41 list_for_each_entry(pe, &ip_vs_pe, n_list) {
42 /* Test and get the modules atomically */ 29 /* Test and get the modules atomically */
43 if (pe->module && 30 if (pe->module &&
44 !try_module_get(pe->module)) { 31 !try_module_get(pe->module)) {
@@ -47,14 +34,14 @@ struct ip_vs_pe *__ip_vs_pe_getbyname(const char *pe_name)
47 } 34 }
48 if (strcmp(pe_name, pe->name)==0) { 35 if (strcmp(pe_name, pe->name)==0) {
49 /* HIT */ 36 /* HIT */
50 spin_unlock_bh(&ip_vs_pe_lock); 37 rcu_read_unlock();
51 return pe; 38 return pe;
52 } 39 }
53 if (pe->module) 40 if (pe->module)
54 module_put(pe->module); 41 module_put(pe->module);
55 } 42 }
43 rcu_read_unlock();
56 44
57 spin_unlock_bh(&ip_vs_pe_lock);
58 return NULL; 45 return NULL;
59} 46}
60 47
@@ -83,22 +70,13 @@ int register_ip_vs_pe(struct ip_vs_pe *pe)
83 /* increase the module use count */ 70 /* increase the module use count */
84 ip_vs_use_count_inc(); 71 ip_vs_use_count_inc();
85 72
86 spin_lock_bh(&ip_vs_pe_lock); 73 mutex_lock(&ip_vs_pe_mutex);
87
88 if (!list_empty(&pe->n_list)) {
89 spin_unlock_bh(&ip_vs_pe_lock);
90 ip_vs_use_count_dec();
91 pr_err("%s(): [%s] pe already linked\n",
92 __func__, pe->name);
93 return -EINVAL;
94 }
95
96 /* Make sure that the pe with this name doesn't exist 74 /* Make sure that the pe with this name doesn't exist
97 * in the pe list. 75 * in the pe list.
98 */ 76 */
99 list_for_each_entry(tmp, &ip_vs_pe, n_list) { 77 list_for_each_entry(tmp, &ip_vs_pe, n_list) {
100 if (strcmp(tmp->name, pe->name) == 0) { 78 if (strcmp(tmp->name, pe->name) == 0) {
101 spin_unlock_bh(&ip_vs_pe_lock); 79 mutex_unlock(&ip_vs_pe_mutex);
102 ip_vs_use_count_dec(); 80 ip_vs_use_count_dec();
103 pr_err("%s(): [%s] pe already existed " 81 pr_err("%s(): [%s] pe already existed "
104 "in the system\n", __func__, pe->name); 82 "in the system\n", __func__, pe->name);
@@ -106,8 +84,8 @@ int register_ip_vs_pe(struct ip_vs_pe *pe)
106 } 84 }
107 } 85 }
108 /* Add it into the d-linked pe list */ 86 /* Add it into the d-linked pe list */
109 list_add(&pe->n_list, &ip_vs_pe); 87 list_add_rcu(&pe->n_list, &ip_vs_pe);
110 spin_unlock_bh(&ip_vs_pe_lock); 88 mutex_unlock(&ip_vs_pe_mutex);
111 89
112 pr_info("[%s] pe registered.\n", pe->name); 90 pr_info("[%s] pe registered.\n", pe->name);
113 91
@@ -118,17 +96,10 @@ EXPORT_SYMBOL_GPL(register_ip_vs_pe);
118/* Unregister a pe from the pe list */ 96/* Unregister a pe from the pe list */
119int unregister_ip_vs_pe(struct ip_vs_pe *pe) 97int unregister_ip_vs_pe(struct ip_vs_pe *pe)
120{ 98{
121 spin_lock_bh(&ip_vs_pe_lock); 99 mutex_lock(&ip_vs_pe_mutex);
122 if (list_empty(&pe->n_list)) {
123 spin_unlock_bh(&ip_vs_pe_lock);
124 pr_err("%s(): [%s] pe is not in the list. failed\n",
125 __func__, pe->name);
126 return -EINVAL;
127 }
128
129 /* Remove it from the d-linked pe list */ 100 /* Remove it from the d-linked pe list */
130 list_del(&pe->n_list); 101 list_del_rcu(&pe->n_list);
131 spin_unlock_bh(&ip_vs_pe_lock); 102 mutex_unlock(&ip_vs_pe_mutex);
132 103
133 /* decrease the module use count */ 104 /* decrease the module use count */
134 ip_vs_use_count_dec(); 105 ip_vs_use_count_dec();
diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
index 12475ef88daf..00cc0241ed87 100644
--- a/net/netfilter/ipvs/ip_vs_pe_sip.c
+++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
@@ -172,6 +172,7 @@ static int __init ip_vs_sip_init(void)
172static void __exit ip_vs_sip_cleanup(void) 172static void __exit ip_vs_sip_cleanup(void)
173{ 173{
174 unregister_ip_vs_pe(&ip_vs_sip_pe); 174 unregister_ip_vs_pe(&ip_vs_sip_pe);
175 synchronize_rcu();
175} 176}
176 177
177module_init(ip_vs_sip_init); 178module_init(ip_vs_sip_init);
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index cd1d7298f7ba..6e14a7b5602f 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -27,9 +27,10 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
27 if (sch == NULL) 27 if (sch == NULL)
28 return 0; 28 return 0;
29 net = skb_net(skb); 29 net = skb_net(skb);
30 rcu_read_lock();
30 if ((sch->type == SCTP_CID_INIT) && 31 if ((sch->type == SCTP_CID_INIT) &&
31 (svc = ip_vs_service_get(net, af, skb->mark, iph->protocol, 32 (svc = ip_vs_service_find(net, af, skb->mark, iph->protocol,
32 &iph->daddr, sh->dest))) { 33 &iph->daddr, sh->dest))) {
33 int ignored; 34 int ignored;
34 35
35 if (ip_vs_todrop(net_ipvs(net))) { 36 if (ip_vs_todrop(net_ipvs(net))) {
@@ -37,7 +38,7 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
37 * It seems that we are very loaded. 38 * It seems that we are very loaded.
38 * We have to drop this packet :( 39 * We have to drop this packet :(
39 */ 40 */
40 ip_vs_service_put(svc); 41 rcu_read_unlock();
41 *verdict = NF_DROP; 42 *verdict = NF_DROP;
42 return 0; 43 return 0;
43 } 44 }
@@ -49,14 +50,13 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
49 if (!*cpp && ignored <= 0) { 50 if (!*cpp && ignored <= 0) {
50 if (!ignored) 51 if (!ignored)
51 *verdict = ip_vs_leave(svc, skb, pd, iph); 52 *verdict = ip_vs_leave(svc, skb, pd, iph);
52 else { 53 else
53 ip_vs_service_put(svc);
54 *verdict = NF_DROP; 54 *verdict = NF_DROP;
55 } 55 rcu_read_unlock();
56 return 0; 56 return 0;
57 } 57 }
58 ip_vs_service_put(svc);
59 } 58 }
59 rcu_read_unlock();
60 /* NF_ACCEPT */ 60 /* NF_ACCEPT */
61 return 1; 61 return 1;
62} 62}
@@ -994,9 +994,9 @@ static void
994sctp_state_transition(struct ip_vs_conn *cp, int direction, 994sctp_state_transition(struct ip_vs_conn *cp, int direction,
995 const struct sk_buff *skb, struct ip_vs_proto_data *pd) 995 const struct sk_buff *skb, struct ip_vs_proto_data *pd)
996{ 996{
997 spin_lock(&cp->lock); 997 spin_lock_bh(&cp->lock);
998 set_sctp_state(pd, cp, direction, skb); 998 set_sctp_state(pd, cp, direction, skb);
999 spin_unlock(&cp->lock); 999 spin_unlock_bh(&cp->lock);
1000} 1000}
1001 1001
1002static inline __u16 sctp_app_hashkey(__be16 port) 1002static inline __u16 sctp_app_hashkey(__be16 port)
@@ -1016,30 +1016,25 @@ static int sctp_register_app(struct net *net, struct ip_vs_app *inc)
1016 1016
1017 hash = sctp_app_hashkey(port); 1017 hash = sctp_app_hashkey(port);
1018 1018
1019 spin_lock_bh(&ipvs->sctp_app_lock);
1020 list_for_each_entry(i, &ipvs->sctp_apps[hash], p_list) { 1019 list_for_each_entry(i, &ipvs->sctp_apps[hash], p_list) {
1021 if (i->port == port) { 1020 if (i->port == port) {
1022 ret = -EEXIST; 1021 ret = -EEXIST;
1023 goto out; 1022 goto out;
1024 } 1023 }
1025 } 1024 }
1026 list_add(&inc->p_list, &ipvs->sctp_apps[hash]); 1025 list_add_rcu(&inc->p_list, &ipvs->sctp_apps[hash]);
1027 atomic_inc(&pd->appcnt); 1026 atomic_inc(&pd->appcnt);
1028out: 1027out:
1029 spin_unlock_bh(&ipvs->sctp_app_lock);
1030 1028
1031 return ret; 1029 return ret;
1032} 1030}
1033 1031
1034static void sctp_unregister_app(struct net *net, struct ip_vs_app *inc) 1032static void sctp_unregister_app(struct net *net, struct ip_vs_app *inc)
1035{ 1033{
1036 struct netns_ipvs *ipvs = net_ipvs(net);
1037 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_SCTP); 1034 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_SCTP);
1038 1035
1039 spin_lock_bh(&ipvs->sctp_app_lock);
1040 atomic_dec(&pd->appcnt); 1036 atomic_dec(&pd->appcnt);
1041 list_del(&inc->p_list); 1037 list_del_rcu(&inc->p_list);
1042 spin_unlock_bh(&ipvs->sctp_app_lock);
1043} 1038}
1044 1039
1045static int sctp_app_conn_bind(struct ip_vs_conn *cp) 1040static int sctp_app_conn_bind(struct ip_vs_conn *cp)
@@ -1055,12 +1050,12 @@ static int sctp_app_conn_bind(struct ip_vs_conn *cp)
1055 /* Lookup application incarnations and bind the right one */ 1050 /* Lookup application incarnations and bind the right one */
1056 hash = sctp_app_hashkey(cp->vport); 1051 hash = sctp_app_hashkey(cp->vport);
1057 1052
1058 spin_lock(&ipvs->sctp_app_lock); 1053 rcu_read_lock();
1059 list_for_each_entry(inc, &ipvs->sctp_apps[hash], p_list) { 1054 list_for_each_entry_rcu(inc, &ipvs->sctp_apps[hash], p_list) {
1060 if (inc->port == cp->vport) { 1055 if (inc->port == cp->vport) {
1061 if (unlikely(!ip_vs_app_inc_get(inc))) 1056 if (unlikely(!ip_vs_app_inc_get(inc)))
1062 break; 1057 break;
1063 spin_unlock(&ipvs->sctp_app_lock); 1058 rcu_read_unlock();
1064 1059
1065 IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->" 1060 IP_VS_DBG_BUF(9, "%s: Binding conn %s:%u->"
1066 "%s:%u to app %s on port %u\n", 1061 "%s:%u to app %s on port %u\n",
@@ -1076,7 +1071,7 @@ static int sctp_app_conn_bind(struct ip_vs_conn *cp)
1076 goto out; 1071 goto out;
1077 } 1072 }
1078 } 1073 }
1079 spin_unlock(&ipvs->sctp_app_lock); 1074 rcu_read_unlock();
1080out: 1075out:
1081 return result; 1076 return result;
1082} 1077}
@@ -1090,7 +1085,6 @@ static int __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd)
1090 struct netns_ipvs *ipvs = net_ipvs(net); 1085 struct netns_ipvs *ipvs = net_ipvs(net);
1091 1086
1092 ip_vs_init_hash_table(ipvs->sctp_apps, SCTP_APP_TAB_SIZE); 1087 ip_vs_init_hash_table(ipvs->sctp_apps, SCTP_APP_TAB_SIZE);
1093 spin_lock_init(&ipvs->sctp_app_lock);
1094 pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts, 1088 pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts,
1095 sizeof(sctp_timeouts)); 1089 sizeof(sctp_timeouts));
1096 if (!pd->timeout_table) 1090 if (!pd->timeout_table)
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index 9af653a75825..50a15944c6c1 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -47,9 +47,10 @@ tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
47 } 47 }
48 net = skb_net(skb); 48 net = skb_net(skb);
49 /* No !th->ack check to allow scheduling on SYN+ACK for Active FTP */ 49 /* No !th->ack check to allow scheduling on SYN+ACK for Active FTP */
50 rcu_read_lock();
50 if (th->syn && 51 if (th->syn &&
51 (svc = ip_vs_service_get(net, af, skb->mark, iph->protocol, 52 (svc = ip_vs_service_find(net, af, skb->mark, iph->protocol,
52 &iph->daddr, th->dest))) { 53 &iph->daddr, th->dest))) {
53 int ignored; 54 int ignored;
54 55
55 if (ip_vs_todrop(net_ipvs(net))) { 56 if (ip_vs_todrop(net_ipvs(net))) {
@@ -57,7 +58,7 @@ tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
57 * It seems that we are very loaded. 58 * It seems that we are very loaded.
58 * We have to drop this packet :( 59 * We have to drop this packet :(
59 */ 60 */
60 ip_vs_service_put(svc); 61 rcu_read_unlock();
61 *verdict = NF_DROP; 62 *verdict = NF_DROP;
62 return 0; 63 return 0;
63 } 64 }
@@ -70,14 +71,13 @@ tcp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
70 if (!*cpp && ignored <= 0) { 71 if (!*cpp && ignored <= 0) {
71 if (!ignored) 72 if (!ignored)
72 *verdict = ip_vs_leave(svc, skb, pd, iph); 73 *verdict = ip_vs_leave(svc, skb, pd, iph);
73 else { 74 else
74 ip_vs_service_put(svc);
75 *verdict = NF_DROP; 75 *verdict = NF_DROP;
76 } 76 rcu_read_unlock();
77 return 0; 77 return 0;
78 } 78 }
79 ip_vs_service_put(svc);
80 } 79 }
80 rcu_read_unlock();
81 /* NF_ACCEPT */ 81 /* NF_ACCEPT */
82 return 1; 82 return 1;
83} 83}
@@ -557,9 +557,9 @@ tcp_state_transition(struct ip_vs_conn *cp, int direction,
557 if (th == NULL) 557 if (th == NULL)
558 return; 558 return;
559 559
560 spin_lock(&cp->lock); 560 spin_lock_bh(&cp->lock);
561 set_tcp_state(pd, cp, direction, th); 561 set_tcp_state(pd, cp, direction, th);
562 spin_unlock(&cp->lock); 562 spin_unlock_bh(&cp->lock);
563} 563}
564 564
565static inline __u16 tcp_app_hashkey(__be16 port) 565static inline __u16 tcp_app_hashkey(__be16 port)
@@ -580,18 +580,16 @@ static int tcp_register_app(struct net *net, struct ip_vs_app *inc)
580 580
581 hash = tcp_app_hashkey(port); 581 hash = tcp_app_hashkey(port);
582 582
583 spin_lock_bh(&ipvs->tcp_app_lock);
584 list_for_each_entry(i, &ipvs->tcp_apps[hash], p_list) { 583 list_for_each_entry(i, &ipvs->tcp_apps[hash], p_list) {
585 if (i->port == port) { 584 if (i->port == port) {
586 ret = -EEXIST; 585 ret = -EEXIST;
587 goto out; 586 goto out;
588 } 587 }
589 } 588 }
590 list_add(&inc->p_list, &ipvs->tcp_apps[hash]); 589 list_add_rcu(&inc->p_list, &ipvs->tcp_apps[hash]);
591 atomic_inc(&pd->appcnt); 590 atomic_inc(&pd->appcnt);
592 591
593 out: 592 out:
594 spin_unlock_bh(&ipvs->tcp_app_lock);
595 return ret; 593 return ret;
596} 594}
597 595
@@ -599,13 +597,10 @@ static int tcp_register_app(struct net *net, struct ip_vs_app *inc)
599static void 597static void
600tcp_unregister_app(struct net *net, struct ip_vs_app *inc) 598tcp_unregister_app(struct net *net, struct ip_vs_app *inc)
601{ 599{
602 struct netns_ipvs *ipvs = net_ipvs(net);
603 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP); 600 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
604 601
605 spin_lock_bh(&ipvs->tcp_app_lock);
606 atomic_dec(&pd->appcnt); 602 atomic_dec(&pd->appcnt);
607 list_del(&inc->p_list); 603 list_del_rcu(&inc->p_list);
608 spin_unlock_bh(&ipvs->tcp_app_lock);
609} 604}
610 605
611 606
@@ -624,12 +619,12 @@ tcp_app_conn_bind(struct ip_vs_conn *cp)
624 /* Lookup application incarnations and bind the right one */ 619 /* Lookup application incarnations and bind the right one */
625 hash = tcp_app_hashkey(cp->vport); 620 hash = tcp_app_hashkey(cp->vport);
626 621
627 spin_lock(&ipvs->tcp_app_lock); 622 rcu_read_lock();
628 list_for_each_entry(inc, &ipvs->tcp_apps[hash], p_list) { 623 list_for_each_entry_rcu(inc, &ipvs->tcp_apps[hash], p_list) {
629 if (inc->port == cp->vport) { 624 if (inc->port == cp->vport) {
630 if (unlikely(!ip_vs_app_inc_get(inc))) 625 if (unlikely(!ip_vs_app_inc_get(inc)))
631 break; 626 break;
632 spin_unlock(&ipvs->tcp_app_lock); 627 rcu_read_unlock();
633 628
634 IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->" 629 IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
635 "%s:%u to app %s on port %u\n", 630 "%s:%u to app %s on port %u\n",
@@ -646,7 +641,7 @@ tcp_app_conn_bind(struct ip_vs_conn *cp)
646 goto out; 641 goto out;
647 } 642 }
648 } 643 }
649 spin_unlock(&ipvs->tcp_app_lock); 644 rcu_read_unlock();
650 645
651 out: 646 out:
652 return result; 647 return result;
@@ -660,11 +655,11 @@ void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp)
660{ 655{
661 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP); 656 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_TCP);
662 657
663 spin_lock(&cp->lock); 658 spin_lock_bh(&cp->lock);
664 cp->state = IP_VS_TCP_S_LISTEN; 659 cp->state = IP_VS_TCP_S_LISTEN;
665 cp->timeout = (pd ? pd->timeout_table[IP_VS_TCP_S_LISTEN] 660 cp->timeout = (pd ? pd->timeout_table[IP_VS_TCP_S_LISTEN]
666 : tcp_timeouts[IP_VS_TCP_S_LISTEN]); 661 : tcp_timeouts[IP_VS_TCP_S_LISTEN]);
667 spin_unlock(&cp->lock); 662 spin_unlock_bh(&cp->lock);
668} 663}
669 664
670/* --------------------------------------------- 665/* ---------------------------------------------
@@ -676,7 +671,6 @@ static int __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd)
676 struct netns_ipvs *ipvs = net_ipvs(net); 671 struct netns_ipvs *ipvs = net_ipvs(net);
677 672
678 ip_vs_init_hash_table(ipvs->tcp_apps, TCP_APP_TAB_SIZE); 673 ip_vs_init_hash_table(ipvs->tcp_apps, TCP_APP_TAB_SIZE);
679 spin_lock_init(&ipvs->tcp_app_lock);
680 pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts, 674 pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts,
681 sizeof(tcp_timeouts)); 675 sizeof(tcp_timeouts));
682 if (!pd->timeout_table) 676 if (!pd->timeout_table)
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index 503a842c90d2..b62a3c0ff9bf 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -44,8 +44,9 @@ udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
44 return 0; 44 return 0;
45 } 45 }
46 net = skb_net(skb); 46 net = skb_net(skb);
47 svc = ip_vs_service_get(net, af, skb->mark, iph->protocol, 47 rcu_read_lock();
48 &iph->daddr, uh->dest); 48 svc = ip_vs_service_find(net, af, skb->mark, iph->protocol,
49 &iph->daddr, uh->dest);
49 if (svc) { 50 if (svc) {
50 int ignored; 51 int ignored;
51 52
@@ -54,7 +55,7 @@ udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
54 * It seems that we are very loaded. 55 * It seems that we are very loaded.
55 * We have to drop this packet :( 56 * We have to drop this packet :(
56 */ 57 */
57 ip_vs_service_put(svc); 58 rcu_read_unlock();
58 *verdict = NF_DROP; 59 *verdict = NF_DROP;
59 return 0; 60 return 0;
60 } 61 }
@@ -67,14 +68,13 @@ udp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
67 if (!*cpp && ignored <= 0) { 68 if (!*cpp && ignored <= 0) {
68 if (!ignored) 69 if (!ignored)
69 *verdict = ip_vs_leave(svc, skb, pd, iph); 70 *verdict = ip_vs_leave(svc, skb, pd, iph);
70 else { 71 else
71 ip_vs_service_put(svc);
72 *verdict = NF_DROP; 72 *verdict = NF_DROP;
73 } 73 rcu_read_unlock();
74 return 0; 74 return 0;
75 } 75 }
76 ip_vs_service_put(svc);
77 } 76 }
77 rcu_read_unlock();
78 /* NF_ACCEPT */ 78 /* NF_ACCEPT */
79 return 1; 79 return 1;
80} 80}
@@ -359,19 +359,16 @@ static int udp_register_app(struct net *net, struct ip_vs_app *inc)
359 359
360 hash = udp_app_hashkey(port); 360 hash = udp_app_hashkey(port);
361 361
362
363 spin_lock_bh(&ipvs->udp_app_lock);
364 list_for_each_entry(i, &ipvs->udp_apps[hash], p_list) { 362 list_for_each_entry(i, &ipvs->udp_apps[hash], p_list) {
365 if (i->port == port) { 363 if (i->port == port) {
366 ret = -EEXIST; 364 ret = -EEXIST;
367 goto out; 365 goto out;
368 } 366 }
369 } 367 }
370 list_add(&inc->p_list, &ipvs->udp_apps[hash]); 368 list_add_rcu(&inc->p_list, &ipvs->udp_apps[hash]);
371 atomic_inc(&pd->appcnt); 369 atomic_inc(&pd->appcnt);
372 370
373 out: 371 out:
374 spin_unlock_bh(&ipvs->udp_app_lock);
375 return ret; 372 return ret;
376} 373}
377 374
@@ -380,12 +377,9 @@ static void
380udp_unregister_app(struct net *net, struct ip_vs_app *inc) 377udp_unregister_app(struct net *net, struct ip_vs_app *inc)
381{ 378{
382 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP); 379 struct ip_vs_proto_data *pd = ip_vs_proto_data_get(net, IPPROTO_UDP);
383 struct netns_ipvs *ipvs = net_ipvs(net);
384 380
385 spin_lock_bh(&ipvs->udp_app_lock);
386 atomic_dec(&pd->appcnt); 381 atomic_dec(&pd->appcnt);
387 list_del(&inc->p_list); 382 list_del_rcu(&inc->p_list);
388 spin_unlock_bh(&ipvs->udp_app_lock);
389} 383}
390 384
391 385
@@ -403,12 +397,12 @@ static int udp_app_conn_bind(struct ip_vs_conn *cp)
403 /* Lookup application incarnations and bind the right one */ 397 /* Lookup application incarnations and bind the right one */
404 hash = udp_app_hashkey(cp->vport); 398 hash = udp_app_hashkey(cp->vport);
405 399
406 spin_lock(&ipvs->udp_app_lock); 400 rcu_read_lock();
407 list_for_each_entry(inc, &ipvs->udp_apps[hash], p_list) { 401 list_for_each_entry_rcu(inc, &ipvs->udp_apps[hash], p_list) {
408 if (inc->port == cp->vport) { 402 if (inc->port == cp->vport) {
409 if (unlikely(!ip_vs_app_inc_get(inc))) 403 if (unlikely(!ip_vs_app_inc_get(inc)))
410 break; 404 break;
411 spin_unlock(&ipvs->udp_app_lock); 405 rcu_read_unlock();
412 406
413 IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->" 407 IP_VS_DBG_BUF(9, "%s(): Binding conn %s:%u->"
414 "%s:%u to app %s on port %u\n", 408 "%s:%u to app %s on port %u\n",
@@ -425,7 +419,7 @@ static int udp_app_conn_bind(struct ip_vs_conn *cp)
425 goto out; 419 goto out;
426 } 420 }
427 } 421 }
428 spin_unlock(&ipvs->udp_app_lock); 422 rcu_read_unlock();
429 423
430 out: 424 out:
431 return result; 425 return result;
@@ -467,7 +461,6 @@ static int __udp_init(struct net *net, struct ip_vs_proto_data *pd)
467 struct netns_ipvs *ipvs = net_ipvs(net); 461 struct netns_ipvs *ipvs = net_ipvs(net);
468 462
469 ip_vs_init_hash_table(ipvs->udp_apps, UDP_APP_TAB_SIZE); 463 ip_vs_init_hash_table(ipvs->udp_apps, UDP_APP_TAB_SIZE);
470 spin_lock_init(&ipvs->udp_app_lock);
471 pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts, 464 pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts,
472 sizeof(udp_timeouts)); 465 sizeof(udp_timeouts));
473 if (!pd->timeout_table) 466 if (!pd->timeout_table)
diff --git a/net/netfilter/ipvs/ip_vs_rr.c b/net/netfilter/ipvs/ip_vs_rr.c
index c49b388d1085..c35986c793d9 100644
--- a/net/netfilter/ipvs/ip_vs_rr.c
+++ b/net/netfilter/ipvs/ip_vs_rr.c
@@ -35,9 +35,18 @@ static int ip_vs_rr_init_svc(struct ip_vs_service *svc)
35} 35}
36 36
37 37
38static int ip_vs_rr_update_svc(struct ip_vs_service *svc) 38static int ip_vs_rr_del_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest)
39{ 39{
40 svc->sched_data = &svc->destinations; 40 struct list_head *p;
41
42 spin_lock_bh(&svc->sched_lock);
43 p = (struct list_head *) svc->sched_data;
44 /* dest is already unlinked, so p->prev is not valid but
45 * p->next is valid, use it to reach previous entry.
46 */
47 if (p == &dest->n_list)
48 svc->sched_data = p->next->prev;
49 spin_unlock_bh(&svc->sched_lock);
41 return 0; 50 return 0;
42} 51}
43 52
@@ -48,36 +57,41 @@ static int ip_vs_rr_update_svc(struct ip_vs_service *svc)
48static struct ip_vs_dest * 57static struct ip_vs_dest *
49ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) 58ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
50{ 59{
51 struct list_head *p, *q; 60 struct list_head *p;
52 struct ip_vs_dest *dest; 61 struct ip_vs_dest *dest, *last;
62 int pass = 0;
53 63
54 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); 64 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
55 65
56 write_lock(&svc->sched_lock); 66 spin_lock_bh(&svc->sched_lock);
57 p = (struct list_head *)svc->sched_data; 67 p = (struct list_head *) svc->sched_data;
58 p = p->next; 68 last = dest = list_entry(p, struct ip_vs_dest, n_list);
59 q = p; 69
60 do { 70 do {
61 /* skip list head */ 71 list_for_each_entry_continue_rcu(dest,
62 if (q == &svc->destinations) { 72 &svc->destinations,
63 q = q->next; 73 n_list) {
64 continue; 74 if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) &&
75 atomic_read(&dest->weight) > 0)
76 /* HIT */
77 goto out;
78 if (dest == last)
79 goto stop;
65 } 80 }
66 81 pass++;
67 dest = list_entry(q, struct ip_vs_dest, n_list); 82 /* Previous dest could be unlinked, do not loop forever.
68 if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && 83 * If we stay at head there is no need for 2nd pass.
69 atomic_read(&dest->weight) > 0) 84 */
70 /* HIT */ 85 } while (pass < 2 && p != &svc->destinations);
71 goto out; 86
72 q = q->next; 87stop:
73 } while (q != p); 88 spin_unlock_bh(&svc->sched_lock);
74 write_unlock(&svc->sched_lock);
75 ip_vs_scheduler_err(svc, "no destination available"); 89 ip_vs_scheduler_err(svc, "no destination available");
76 return NULL; 90 return NULL;
77 91
78 out: 92 out:
79 svc->sched_data = q; 93 svc->sched_data = &dest->n_list;
80 write_unlock(&svc->sched_lock); 94 spin_unlock_bh(&svc->sched_lock);
81 IP_VS_DBG_BUF(6, "RR: server %s:%u " 95 IP_VS_DBG_BUF(6, "RR: server %s:%u "
82 "activeconns %d refcnt %d weight %d\n", 96 "activeconns %d refcnt %d weight %d\n",
83 IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port), 97 IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port),
@@ -94,7 +108,8 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = {
94 .module = THIS_MODULE, 108 .module = THIS_MODULE,
95 .n_list = LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list), 109 .n_list = LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list),
96 .init_service = ip_vs_rr_init_svc, 110 .init_service = ip_vs_rr_init_svc,
97 .update_service = ip_vs_rr_update_svc, 111 .add_dest = NULL,
112 .del_dest = ip_vs_rr_del_dest,
98 .schedule = ip_vs_rr_schedule, 113 .schedule = ip_vs_rr_schedule,
99}; 114};
100 115
@@ -106,6 +121,7 @@ static int __init ip_vs_rr_init(void)
106static void __exit ip_vs_rr_cleanup(void) 121static void __exit ip_vs_rr_cleanup(void)
107{ 122{
108 unregister_ip_vs_scheduler(&ip_vs_rr_scheduler); 123 unregister_ip_vs_scheduler(&ip_vs_rr_scheduler);
124 synchronize_rcu();
109} 125}
110 126
111module_init(ip_vs_rr_init); 127module_init(ip_vs_rr_init);
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index d6bf20d6cdbe..4dbcda6258bc 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -35,8 +35,8 @@ EXPORT_SYMBOL(ip_vs_scheduler_err);
35 */ 35 */
36static LIST_HEAD(ip_vs_schedulers); 36static LIST_HEAD(ip_vs_schedulers);
37 37
38/* lock for service table */ 38/* semaphore for schedulers */
39static DEFINE_SPINLOCK(ip_vs_sched_lock); 39static DEFINE_MUTEX(ip_vs_sched_mutex);
40 40
41 41
42/* 42/*
@@ -47,8 +47,6 @@ int ip_vs_bind_scheduler(struct ip_vs_service *svc,
47{ 47{
48 int ret; 48 int ret;
49 49
50 svc->scheduler = scheduler;
51
52 if (scheduler->init_service) { 50 if (scheduler->init_service) {
53 ret = scheduler->init_service(svc); 51 ret = scheduler->init_service(svc);
54 if (ret) { 52 if (ret) {
@@ -56,7 +54,7 @@ int ip_vs_bind_scheduler(struct ip_vs_service *svc,
56 return ret; 54 return ret;
57 } 55 }
58 } 56 }
59 57 rcu_assign_pointer(svc->scheduler, scheduler);
60 return 0; 58 return 0;
61} 59}
62 60
@@ -64,22 +62,19 @@ int ip_vs_bind_scheduler(struct ip_vs_service *svc,
64/* 62/*
65 * Unbind a service with its scheduler 63 * Unbind a service with its scheduler
66 */ 64 */
67int ip_vs_unbind_scheduler(struct ip_vs_service *svc) 65void ip_vs_unbind_scheduler(struct ip_vs_service *svc,
66 struct ip_vs_scheduler *sched)
68{ 67{
69 struct ip_vs_scheduler *sched = svc->scheduler; 68 struct ip_vs_scheduler *cur_sched;
70 69
71 if (!sched) 70 cur_sched = rcu_dereference_protected(svc->scheduler, 1);
72 return 0; 71 /* This check proves that old 'sched' was installed */
72 if (!cur_sched)
73 return;
73 74
74 if (sched->done_service) { 75 if (sched->done_service)
75 if (sched->done_service(svc) != 0) { 76 sched->done_service(svc);
76 pr_err("%s(): done error\n", __func__); 77 /* svc->scheduler can not be set to NULL */
77 return -EINVAL;
78 }
79 }
80
81 svc->scheduler = NULL;
82 return 0;
83} 78}
84 79
85 80
@@ -92,7 +87,7 @@ static struct ip_vs_scheduler *ip_vs_sched_getbyname(const char *sched_name)
92 87
93 IP_VS_DBG(2, "%s(): sched_name \"%s\"\n", __func__, sched_name); 88 IP_VS_DBG(2, "%s(): sched_name \"%s\"\n", __func__, sched_name);
94 89
95 spin_lock_bh(&ip_vs_sched_lock); 90 mutex_lock(&ip_vs_sched_mutex);
96 91
97 list_for_each_entry(sched, &ip_vs_schedulers, n_list) { 92 list_for_each_entry(sched, &ip_vs_schedulers, n_list) {
98 /* 93 /*
@@ -106,14 +101,14 @@ static struct ip_vs_scheduler *ip_vs_sched_getbyname(const char *sched_name)
106 } 101 }
107 if (strcmp(sched_name, sched->name)==0) { 102 if (strcmp(sched_name, sched->name)==0) {
108 /* HIT */ 103 /* HIT */
109 spin_unlock_bh(&ip_vs_sched_lock); 104 mutex_unlock(&ip_vs_sched_mutex);
110 return sched; 105 return sched;
111 } 106 }
112 if (sched->module) 107 if (sched->module)
113 module_put(sched->module); 108 module_put(sched->module);
114 } 109 }
115 110
116 spin_unlock_bh(&ip_vs_sched_lock); 111 mutex_unlock(&ip_vs_sched_mutex);
117 return NULL; 112 return NULL;
118} 113}
119 114
@@ -153,21 +148,21 @@ void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler)
153 148
154void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg) 149void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg)
155{ 150{
151 struct ip_vs_scheduler *sched;
152
153 sched = rcu_dereference(svc->scheduler);
156 if (svc->fwmark) { 154 if (svc->fwmark) {
157 IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n", 155 IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n",
158 svc->scheduler->name, svc->fwmark, 156 sched->name, svc->fwmark, svc->fwmark, msg);
159 svc->fwmark, msg);
160#ifdef CONFIG_IP_VS_IPV6 157#ifdef CONFIG_IP_VS_IPV6
161 } else if (svc->af == AF_INET6) { 158 } else if (svc->af == AF_INET6) {
162 IP_VS_ERR_RL("%s: %s [%pI6c]:%d - %s\n", 159 IP_VS_ERR_RL("%s: %s [%pI6c]:%d - %s\n",
163 svc->scheduler->name, 160 sched->name, ip_vs_proto_name(svc->protocol),
164 ip_vs_proto_name(svc->protocol),
165 &svc->addr.in6, ntohs(svc->port), msg); 161 &svc->addr.in6, ntohs(svc->port), msg);
166#endif 162#endif
167 } else { 163 } else {
168 IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n", 164 IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n",
169 svc->scheduler->name, 165 sched->name, ip_vs_proto_name(svc->protocol),
170 ip_vs_proto_name(svc->protocol),
171 &svc->addr.ip, ntohs(svc->port), msg); 166 &svc->addr.ip, ntohs(svc->port), msg);
172 } 167 }
173} 168}
@@ -192,10 +187,10 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
192 /* increase the module use count */ 187 /* increase the module use count */
193 ip_vs_use_count_inc(); 188 ip_vs_use_count_inc();
194 189
195 spin_lock_bh(&ip_vs_sched_lock); 190 mutex_lock(&ip_vs_sched_mutex);
196 191
197 if (!list_empty(&scheduler->n_list)) { 192 if (!list_empty(&scheduler->n_list)) {
198 spin_unlock_bh(&ip_vs_sched_lock); 193 mutex_unlock(&ip_vs_sched_mutex);
199 ip_vs_use_count_dec(); 194 ip_vs_use_count_dec();
200 pr_err("%s(): [%s] scheduler already linked\n", 195 pr_err("%s(): [%s] scheduler already linked\n",
201 __func__, scheduler->name); 196 __func__, scheduler->name);
@@ -208,7 +203,7 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
208 */ 203 */
209 list_for_each_entry(sched, &ip_vs_schedulers, n_list) { 204 list_for_each_entry(sched, &ip_vs_schedulers, n_list) {
210 if (strcmp(scheduler->name, sched->name) == 0) { 205 if (strcmp(scheduler->name, sched->name) == 0) {
211 spin_unlock_bh(&ip_vs_sched_lock); 206 mutex_unlock(&ip_vs_sched_mutex);
212 ip_vs_use_count_dec(); 207 ip_vs_use_count_dec();
213 pr_err("%s(): [%s] scheduler already existed " 208 pr_err("%s(): [%s] scheduler already existed "
214 "in the system\n", __func__, scheduler->name); 209 "in the system\n", __func__, scheduler->name);
@@ -219,7 +214,7 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
219 * Add it into the d-linked scheduler list 214 * Add it into the d-linked scheduler list
220 */ 215 */
221 list_add(&scheduler->n_list, &ip_vs_schedulers); 216 list_add(&scheduler->n_list, &ip_vs_schedulers);
222 spin_unlock_bh(&ip_vs_sched_lock); 217 mutex_unlock(&ip_vs_sched_mutex);
223 218
224 pr_info("[%s] scheduler registered.\n", scheduler->name); 219 pr_info("[%s] scheduler registered.\n", scheduler->name);
225 220
@@ -237,9 +232,9 @@ int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
237 return -EINVAL; 232 return -EINVAL;
238 } 233 }
239 234
240 spin_lock_bh(&ip_vs_sched_lock); 235 mutex_lock(&ip_vs_sched_mutex);
241 if (list_empty(&scheduler->n_list)) { 236 if (list_empty(&scheduler->n_list)) {
242 spin_unlock_bh(&ip_vs_sched_lock); 237 mutex_unlock(&ip_vs_sched_mutex);
243 pr_err("%s(): [%s] scheduler is not in the list. failed\n", 238 pr_err("%s(): [%s] scheduler is not in the list. failed\n",
244 __func__, scheduler->name); 239 __func__, scheduler->name);
245 return -EINVAL; 240 return -EINVAL;
@@ -249,7 +244,7 @@ int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
249 * Remove it from the d-linked scheduler list 244 * Remove it from the d-linked scheduler list
250 */ 245 */
251 list_del(&scheduler->n_list); 246 list_del(&scheduler->n_list);
252 spin_unlock_bh(&ip_vs_sched_lock); 247 mutex_unlock(&ip_vs_sched_mutex);
253 248
254 /* decrease the module use count */ 249 /* decrease the module use count */
255 ip_vs_use_count_dec(); 250 ip_vs_use_count_dec();
diff --git a/net/netfilter/ipvs/ip_vs_sed.c b/net/netfilter/ipvs/ip_vs_sed.c
index 89ead246ed3d..f3205925359a 100644
--- a/net/netfilter/ipvs/ip_vs_sed.c
+++ b/net/netfilter/ipvs/ip_vs_sed.c
@@ -79,7 +79,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
79 * new connections. 79 * new connections.
80 */ 80 */
81 81
82 list_for_each_entry(dest, &svc->destinations, n_list) { 82 list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
83 if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && 83 if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) &&
84 atomic_read(&dest->weight) > 0) { 84 atomic_read(&dest->weight) > 0) {
85 least = dest; 85 least = dest;
@@ -94,7 +94,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
94 * Find the destination with the least load. 94 * Find the destination with the least load.
95 */ 95 */
96 nextstage: 96 nextstage:
97 list_for_each_entry_continue(dest, &svc->destinations, n_list) { 97 list_for_each_entry_continue_rcu(dest, &svc->destinations, n_list) {
98 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 98 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
99 continue; 99 continue;
100 doh = ip_vs_sed_dest_overhead(dest); 100 doh = ip_vs_sed_dest_overhead(dest);
@@ -134,6 +134,7 @@ static int __init ip_vs_sed_init(void)
134static void __exit ip_vs_sed_cleanup(void) 134static void __exit ip_vs_sed_cleanup(void)
135{ 135{
136 unregister_ip_vs_scheduler(&ip_vs_sed_scheduler); 136 unregister_ip_vs_scheduler(&ip_vs_sed_scheduler);
137 synchronize_rcu();
137} 138}
138 139
139module_init(ip_vs_sed_init); 140module_init(ip_vs_sed_init);
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index e33126994628..0df269d7c99f 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -53,7 +53,7 @@
53 * IPVS SH bucket 53 * IPVS SH bucket
54 */ 54 */
55struct ip_vs_sh_bucket { 55struct ip_vs_sh_bucket {
56 struct ip_vs_dest *dest; /* real server (cache) */ 56 struct ip_vs_dest __rcu *dest; /* real server (cache) */
57}; 57};
58 58
59/* 59/*
@@ -66,6 +66,10 @@ struct ip_vs_sh_bucket {
66#define IP_VS_SH_TAB_SIZE (1 << IP_VS_SH_TAB_BITS) 66#define IP_VS_SH_TAB_SIZE (1 << IP_VS_SH_TAB_BITS)
67#define IP_VS_SH_TAB_MASK (IP_VS_SH_TAB_SIZE - 1) 67#define IP_VS_SH_TAB_MASK (IP_VS_SH_TAB_SIZE - 1)
68 68
69struct ip_vs_sh_state {
70 struct ip_vs_sh_bucket buckets[IP_VS_SH_TAB_SIZE];
71 struct rcu_head rcu_head;
72};
69 73
70/* 74/*
71 * Returns hash value for IPVS SH entry 75 * Returns hash value for IPVS SH entry
@@ -87,10 +91,9 @@ static inline unsigned int ip_vs_sh_hashkey(int af, const union nf_inet_addr *ad
87 * Get ip_vs_dest associated with supplied parameters. 91 * Get ip_vs_dest associated with supplied parameters.
88 */ 92 */
89static inline struct ip_vs_dest * 93static inline struct ip_vs_dest *
90ip_vs_sh_get(int af, struct ip_vs_sh_bucket *tbl, 94ip_vs_sh_get(int af, struct ip_vs_sh_state *s, const union nf_inet_addr *addr)
91 const union nf_inet_addr *addr)
92{ 95{
93 return (tbl[ip_vs_sh_hashkey(af, addr)]).dest; 96 return rcu_dereference(s->buckets[ip_vs_sh_hashkey(af, addr)].dest);
94} 97}
95 98
96 99
@@ -98,27 +101,32 @@ ip_vs_sh_get(int af, struct ip_vs_sh_bucket *tbl,
98 * Assign all the hash buckets of the specified table with the service. 101 * Assign all the hash buckets of the specified table with the service.
99 */ 102 */
100static int 103static int
101ip_vs_sh_assign(struct ip_vs_sh_bucket *tbl, struct ip_vs_service *svc) 104ip_vs_sh_reassign(struct ip_vs_sh_state *s, struct ip_vs_service *svc)
102{ 105{
103 int i; 106 int i;
104 struct ip_vs_sh_bucket *b; 107 struct ip_vs_sh_bucket *b;
105 struct list_head *p; 108 struct list_head *p;
106 struct ip_vs_dest *dest; 109 struct ip_vs_dest *dest;
107 int d_count; 110 int d_count;
111 bool empty;
108 112
109 b = tbl; 113 b = &s->buckets[0];
110 p = &svc->destinations; 114 p = &svc->destinations;
115 empty = list_empty(p);
111 d_count = 0; 116 d_count = 0;
112 for (i=0; i<IP_VS_SH_TAB_SIZE; i++) { 117 for (i=0; i<IP_VS_SH_TAB_SIZE; i++) {
113 if (list_empty(p)) { 118 dest = rcu_dereference_protected(b->dest, 1);
114 b->dest = NULL; 119 if (dest)
115 } else { 120 ip_vs_dest_put(dest);
121 if (empty)
122 RCU_INIT_POINTER(b->dest, NULL);
123 else {
116 if (p == &svc->destinations) 124 if (p == &svc->destinations)
117 p = p->next; 125 p = p->next;
118 126
119 dest = list_entry(p, struct ip_vs_dest, n_list); 127 dest = list_entry(p, struct ip_vs_dest, n_list);
120 atomic_inc(&dest->refcnt); 128 ip_vs_dest_hold(dest);
121 b->dest = dest; 129 RCU_INIT_POINTER(b->dest, dest);
122 130
123 IP_VS_DBG_BUF(6, "assigned i: %d dest: %s weight: %d\n", 131 IP_VS_DBG_BUF(6, "assigned i: %d dest: %s weight: %d\n",
124 i, IP_VS_DBG_ADDR(svc->af, &dest->addr), 132 i, IP_VS_DBG_ADDR(svc->af, &dest->addr),
@@ -140,16 +148,18 @@ ip_vs_sh_assign(struct ip_vs_sh_bucket *tbl, struct ip_vs_service *svc)
140/* 148/*
141 * Flush all the hash buckets of the specified table. 149 * Flush all the hash buckets of the specified table.
142 */ 150 */
143static void ip_vs_sh_flush(struct ip_vs_sh_bucket *tbl) 151static void ip_vs_sh_flush(struct ip_vs_sh_state *s)
144{ 152{
145 int i; 153 int i;
146 struct ip_vs_sh_bucket *b; 154 struct ip_vs_sh_bucket *b;
155 struct ip_vs_dest *dest;
147 156
148 b = tbl; 157 b = &s->buckets[0];
149 for (i=0; i<IP_VS_SH_TAB_SIZE; i++) { 158 for (i=0; i<IP_VS_SH_TAB_SIZE; i++) {
150 if (b->dest) { 159 dest = rcu_dereference_protected(b->dest, 1);
151 atomic_dec(&b->dest->refcnt); 160 if (dest) {
152 b->dest = NULL; 161 ip_vs_dest_put(dest);
162 RCU_INIT_POINTER(b->dest, NULL);
153 } 163 }
154 b++; 164 b++;
155 } 165 }
@@ -158,51 +168,46 @@ static void ip_vs_sh_flush(struct ip_vs_sh_bucket *tbl)
158 168
159static int ip_vs_sh_init_svc(struct ip_vs_service *svc) 169static int ip_vs_sh_init_svc(struct ip_vs_service *svc)
160{ 170{
161 struct ip_vs_sh_bucket *tbl; 171 struct ip_vs_sh_state *s;
162 172
163 /* allocate the SH table for this service */ 173 /* allocate the SH table for this service */
164 tbl = kmalloc(sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE, 174 s = kzalloc(sizeof(struct ip_vs_sh_state), GFP_KERNEL);
165 GFP_KERNEL); 175 if (s == NULL)
166 if (tbl == NULL)
167 return -ENOMEM; 176 return -ENOMEM;
168 177
169 svc->sched_data = tbl; 178 svc->sched_data = s;
170 IP_VS_DBG(6, "SH hash table (memory=%Zdbytes) allocated for " 179 IP_VS_DBG(6, "SH hash table (memory=%Zdbytes) allocated for "
171 "current service\n", 180 "current service\n",
172 sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE); 181 sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE);
173 182
174 /* assign the hash buckets with the updated service */ 183 /* assign the hash buckets with current dests */
175 ip_vs_sh_assign(tbl, svc); 184 ip_vs_sh_reassign(s, svc);
176 185
177 return 0; 186 return 0;
178} 187}
179 188
180 189
181static int ip_vs_sh_done_svc(struct ip_vs_service *svc) 190static void ip_vs_sh_done_svc(struct ip_vs_service *svc)
182{ 191{
183 struct ip_vs_sh_bucket *tbl = svc->sched_data; 192 struct ip_vs_sh_state *s = svc->sched_data;
184 193
185 /* got to clean up hash buckets here */ 194 /* got to clean up hash buckets here */
186 ip_vs_sh_flush(tbl); 195 ip_vs_sh_flush(s);
187 196
188 /* release the table itself */ 197 /* release the table itself */
189 kfree(svc->sched_data); 198 kfree_rcu(s, rcu_head);
190 IP_VS_DBG(6, "SH hash table (memory=%Zdbytes) released\n", 199 IP_VS_DBG(6, "SH hash table (memory=%Zdbytes) released\n",
191 sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE); 200 sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE);
192
193 return 0;
194} 201}
195 202
196 203
197static int ip_vs_sh_update_svc(struct ip_vs_service *svc) 204static int ip_vs_sh_dest_changed(struct ip_vs_service *svc,
205 struct ip_vs_dest *dest)
198{ 206{
199 struct ip_vs_sh_bucket *tbl = svc->sched_data; 207 struct ip_vs_sh_state *s = svc->sched_data;
200
201 /* got to clean up hash buckets here */
202 ip_vs_sh_flush(tbl);
203 208
204 /* assign the hash buckets with the updated service */ 209 /* assign the hash buckets with the updated service */
205 ip_vs_sh_assign(tbl, svc); 210 ip_vs_sh_reassign(s, svc);
206 211
207 return 0; 212 return 0;
208} 213}
@@ -225,15 +230,15 @@ static struct ip_vs_dest *
225ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) 230ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
226{ 231{
227 struct ip_vs_dest *dest; 232 struct ip_vs_dest *dest;
228 struct ip_vs_sh_bucket *tbl; 233 struct ip_vs_sh_state *s;
229 struct ip_vs_iphdr iph; 234 struct ip_vs_iphdr iph;
230 235
231 ip_vs_fill_iph_addr_only(svc->af, skb, &iph); 236 ip_vs_fill_iph_addr_only(svc->af, skb, &iph);
232 237
233 IP_VS_DBG(6, "ip_vs_sh_schedule(): Scheduling...\n"); 238 IP_VS_DBG(6, "ip_vs_sh_schedule(): Scheduling...\n");
234 239
235 tbl = (struct ip_vs_sh_bucket *)svc->sched_data; 240 s = (struct ip_vs_sh_state *) svc->sched_data;
236 dest = ip_vs_sh_get(svc->af, tbl, &iph.saddr); 241 dest = ip_vs_sh_get(svc->af, s, &iph.saddr);
237 if (!dest 242 if (!dest
238 || !(dest->flags & IP_VS_DEST_F_AVAILABLE) 243 || !(dest->flags & IP_VS_DEST_F_AVAILABLE)
239 || atomic_read(&dest->weight) <= 0 244 || atomic_read(&dest->weight) <= 0
@@ -262,7 +267,9 @@ static struct ip_vs_scheduler ip_vs_sh_scheduler =
262 .n_list = LIST_HEAD_INIT(ip_vs_sh_scheduler.n_list), 267 .n_list = LIST_HEAD_INIT(ip_vs_sh_scheduler.n_list),
263 .init_service = ip_vs_sh_init_svc, 268 .init_service = ip_vs_sh_init_svc,
264 .done_service = ip_vs_sh_done_svc, 269 .done_service = ip_vs_sh_done_svc,
265 .update_service = ip_vs_sh_update_svc, 270 .add_dest = ip_vs_sh_dest_changed,
271 .del_dest = ip_vs_sh_dest_changed,
272 .upd_dest = ip_vs_sh_dest_changed,
266 .schedule = ip_vs_sh_schedule, 273 .schedule = ip_vs_sh_schedule,
267}; 274};
268 275
@@ -276,6 +283,7 @@ static int __init ip_vs_sh_init(void)
276static void __exit ip_vs_sh_cleanup(void) 283static void __exit ip_vs_sh_cleanup(void)
277{ 284{
278 unregister_ip_vs_scheduler(&ip_vs_sh_scheduler); 285 unregister_ip_vs_scheduler(&ip_vs_sh_scheduler);
286 synchronize_rcu();
279} 287}
280 288
281 289
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 44fd10c539ac..8e57077e5540 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -531,9 +531,9 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
531 if (!ip_vs_sync_conn_needed(ipvs, cp, pkts)) 531 if (!ip_vs_sync_conn_needed(ipvs, cp, pkts))
532 return; 532 return;
533 533
534 spin_lock(&ipvs->sync_buff_lock); 534 spin_lock_bh(&ipvs->sync_buff_lock);
535 if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { 535 if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
536 spin_unlock(&ipvs->sync_buff_lock); 536 spin_unlock_bh(&ipvs->sync_buff_lock);
537 return; 537 return;
538 } 538 }
539 539
@@ -552,7 +552,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
552 if (!buff) { 552 if (!buff) {
553 buff = ip_vs_sync_buff_create_v0(ipvs); 553 buff = ip_vs_sync_buff_create_v0(ipvs);
554 if (!buff) { 554 if (!buff) {
555 spin_unlock(&ipvs->sync_buff_lock); 555 spin_unlock_bh(&ipvs->sync_buff_lock);
556 pr_err("ip_vs_sync_buff_create failed.\n"); 556 pr_err("ip_vs_sync_buff_create failed.\n");
557 return; 557 return;
558 } 558 }
@@ -590,7 +590,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
590 sb_queue_tail(ipvs, ms); 590 sb_queue_tail(ipvs, ms);
591 ms->sync_buff = NULL; 591 ms->sync_buff = NULL;
592 } 592 }
593 spin_unlock(&ipvs->sync_buff_lock); 593 spin_unlock_bh(&ipvs->sync_buff_lock);
594 594
595 /* synchronize its controller if it has */ 595 /* synchronize its controller if it has */
596 cp = cp->control; 596 cp = cp->control;
@@ -641,9 +641,9 @@ sloop:
641 pe_name_len = strnlen(cp->pe->name, IP_VS_PENAME_MAXLEN); 641 pe_name_len = strnlen(cp->pe->name, IP_VS_PENAME_MAXLEN);
642 } 642 }
643 643
644 spin_lock(&ipvs->sync_buff_lock); 644 spin_lock_bh(&ipvs->sync_buff_lock);
645 if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { 645 if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
646 spin_unlock(&ipvs->sync_buff_lock); 646 spin_unlock_bh(&ipvs->sync_buff_lock);
647 return; 647 return;
648 } 648 }
649 649
@@ -683,7 +683,7 @@ sloop:
683 if (!buff) { 683 if (!buff) {
684 buff = ip_vs_sync_buff_create(ipvs); 684 buff = ip_vs_sync_buff_create(ipvs);
685 if (!buff) { 685 if (!buff) {
686 spin_unlock(&ipvs->sync_buff_lock); 686 spin_unlock_bh(&ipvs->sync_buff_lock);
687 pr_err("ip_vs_sync_buff_create failed.\n"); 687 pr_err("ip_vs_sync_buff_create failed.\n");
688 return; 688 return;
689 } 689 }
@@ -750,7 +750,7 @@ sloop:
750 } 750 }
751 } 751 }
752 752
753 spin_unlock(&ipvs->sync_buff_lock); 753 spin_unlock_bh(&ipvs->sync_buff_lock);
754 754
755control: 755control:
756 /* synchronize its controller if it has */ 756 /* synchronize its controller if it has */
@@ -843,7 +843,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
843 kfree(param->pe_data); 843 kfree(param->pe_data);
844 844
845 dest = cp->dest; 845 dest = cp->dest;
846 spin_lock(&cp->lock); 846 spin_lock_bh(&cp->lock);
847 if ((cp->flags ^ flags) & IP_VS_CONN_F_INACTIVE && 847 if ((cp->flags ^ flags) & IP_VS_CONN_F_INACTIVE &&
848 !(flags & IP_VS_CONN_F_TEMPLATE) && dest) { 848 !(flags & IP_VS_CONN_F_TEMPLATE) && dest) {
849 if (flags & IP_VS_CONN_F_INACTIVE) { 849 if (flags & IP_VS_CONN_F_INACTIVE) {
@@ -857,24 +857,21 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
857 flags &= IP_VS_CONN_F_BACKUP_UPD_MASK; 857 flags &= IP_VS_CONN_F_BACKUP_UPD_MASK;
858 flags |= cp->flags & ~IP_VS_CONN_F_BACKUP_UPD_MASK; 858 flags |= cp->flags & ~IP_VS_CONN_F_BACKUP_UPD_MASK;
859 cp->flags = flags; 859 cp->flags = flags;
860 spin_unlock(&cp->lock); 860 spin_unlock_bh(&cp->lock);
861 if (!dest) { 861 if (!dest)
862 dest = ip_vs_try_bind_dest(cp); 862 ip_vs_try_bind_dest(cp);
863 if (dest)
864 atomic_dec(&dest->refcnt);
865 }
866 } else { 863 } else {
867 /* 864 /*
868 * Find the appropriate destination for the connection. 865 * Find the appropriate destination for the connection.
869 * If it is not found the connection will remain unbound 866 * If it is not found the connection will remain unbound
870 * but still handled. 867 * but still handled.
871 */ 868 */
869 rcu_read_lock();
872 dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr, 870 dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr,
873 param->vport, protocol, fwmark, flags); 871 param->vport, protocol, fwmark, flags);
874 872
875 cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark); 873 cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark);
876 if (dest) 874 rcu_read_unlock();
877 atomic_dec(&dest->refcnt);
878 if (!cp) { 875 if (!cp) {
879 if (param->pe_data) 876 if (param->pe_data)
880 kfree(param->pe_data); 877 kfree(param->pe_data);
@@ -1692,11 +1689,7 @@ static int sync_thread_backup(void *data)
1692 break; 1689 break;
1693 } 1690 }
1694 1691
1695 /* disable bottom half, because it accesses the data
1696 shared by softirq while getting/creating conns */
1697 local_bh_disable();
1698 ip_vs_process_message(tinfo->net, tinfo->buf, len); 1692 ip_vs_process_message(tinfo->net, tinfo->buf, len);
1699 local_bh_enable();
1700 } 1693 }
1701 } 1694 }
1702 1695
diff --git a/net/netfilter/ipvs/ip_vs_wlc.c b/net/netfilter/ipvs/ip_vs_wlc.c
index bc1bfc48a17f..c60a81c4ce9a 100644
--- a/net/netfilter/ipvs/ip_vs_wlc.c
+++ b/net/netfilter/ipvs/ip_vs_wlc.c
@@ -51,7 +51,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
51 * new connections. 51 * new connections.
52 */ 52 */
53 53
54 list_for_each_entry(dest, &svc->destinations, n_list) { 54 list_for_each_entry_rcu(dest, &svc->destinations, n_list) {
55 if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && 55 if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) &&
56 atomic_read(&dest->weight) > 0) { 56 atomic_read(&dest->weight) > 0) {
57 least = dest; 57 least = dest;
@@ -66,7 +66,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
66 * Find the destination with the least load. 66 * Find the destination with the least load.
67 */ 67 */
68 nextstage: 68 nextstage:
69 list_for_each_entry_continue(dest, &svc->destinations, n_list) { 69 list_for_each_entry_continue_rcu(dest, &svc->destinations, n_list) {
70 if (dest->flags & IP_VS_DEST_F_OVERLOAD) 70 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
71 continue; 71 continue;
72 doh = ip_vs_dest_conn_overhead(dest); 72 doh = ip_vs_dest_conn_overhead(dest);
@@ -106,6 +106,7 @@ static int __init ip_vs_wlc_init(void)
106static void __exit ip_vs_wlc_cleanup(void) 106static void __exit ip_vs_wlc_cleanup(void)
107{ 107{
108 unregister_ip_vs_scheduler(&ip_vs_wlc_scheduler); 108 unregister_ip_vs_scheduler(&ip_vs_wlc_scheduler);
109 synchronize_rcu();
109} 110}
110 111
111module_init(ip_vs_wlc_init); 112module_init(ip_vs_wlc_init);
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c
index 231be7dd547a..0e68555bceb9 100644
--- a/net/netfilter/ipvs/ip_vs_wrr.c
+++ b/net/netfilter/ipvs/ip_vs_wrr.c
@@ -29,14 +29,45 @@
29 29
30#include <net/ip_vs.h> 30#include <net/ip_vs.h>
31 31
32/* The WRR algorithm depends on some caclulations:
33 * - mw: maximum weight
34 * - di: weight step, greatest common divisor from all weights
35 * - cw: current required weight
36 * As result, all weights are in the [di..mw] range with a step=di.
37 *
38 * First, we start with cw = mw and select dests with weight >= cw.
39 * Then cw is reduced with di and all dests are checked again.
40 * Last pass should be with cw = di. We have mw/di passes in total:
41 *
42 * pass 1: cw = max weight
43 * pass 2: cw = max weight - di
44 * pass 3: cw = max weight - 2 * di
45 * ...
46 * last pass: cw = di
47 *
48 * Weights are supposed to be >= di but we run in parallel with
49 * weight changes, it is possible some dest weight to be reduced
50 * below di, bad if it is the only available dest.
51 *
52 * So, we modify how mw is calculated, now it is reduced with (di - 1),
53 * so that last cw is 1 to catch such dests with weight below di:
54 * pass 1: cw = max weight - (di - 1)
55 * pass 2: cw = max weight - di - (di - 1)
56 * pass 3: cw = max weight - 2 * di - (di - 1)
57 * ...
58 * last pass: cw = 1
59 *
60 */
61
32/* 62/*
33 * current destination pointer for weighted round-robin scheduling 63 * current destination pointer for weighted round-robin scheduling
34 */ 64 */
35struct ip_vs_wrr_mark { 65struct ip_vs_wrr_mark {
36 struct list_head *cl; /* current list head */ 66 struct ip_vs_dest *cl; /* current dest or head */
37 int cw; /* current weight */ 67 int cw; /* current weight */
38 int mw; /* maximum weight */ 68 int mw; /* maximum weight */
39 int di; /* decreasing interval */ 69 int di; /* decreasing interval */
70 struct rcu_head rcu_head;
40}; 71};
41 72
42 73
@@ -88,36 +119,41 @@ static int ip_vs_wrr_init_svc(struct ip_vs_service *svc)
88 if (mark == NULL) 119 if (mark == NULL)
89 return -ENOMEM; 120 return -ENOMEM;
90 121
91 mark->cl = &svc->destinations; 122 mark->cl = list_entry(&svc->destinations, struct ip_vs_dest, n_list);
92 mark->cw = 0;
93 mark->mw = ip_vs_wrr_max_weight(svc);
94 mark->di = ip_vs_wrr_gcd_weight(svc); 123 mark->di = ip_vs_wrr_gcd_weight(svc);
124 mark->mw = ip_vs_wrr_max_weight(svc) - (mark->di - 1);
125 mark->cw = mark->mw;
95 svc->sched_data = mark; 126 svc->sched_data = mark;
96 127
97 return 0; 128 return 0;
98} 129}
99 130
100 131
101static int ip_vs_wrr_done_svc(struct ip_vs_service *svc) 132static void ip_vs_wrr_done_svc(struct ip_vs_service *svc)
102{ 133{
134 struct ip_vs_wrr_mark *mark = svc->sched_data;
135
103 /* 136 /*
104 * Release the mark variable 137 * Release the mark variable
105 */ 138 */
106 kfree(svc->sched_data); 139 kfree_rcu(mark, rcu_head);
107
108 return 0;
109} 140}
110 141
111 142
112static int ip_vs_wrr_update_svc(struct ip_vs_service *svc) 143static int ip_vs_wrr_dest_changed(struct ip_vs_service *svc,
144 struct ip_vs_dest *dest)
113{ 145{
114 struct ip_vs_wrr_mark *mark = svc->sched_data; 146 struct ip_vs_wrr_mark *mark = svc->sched_data;
115 147
116 mark->cl = &svc->destinations; 148 spin_lock_bh(&svc->sched_lock);
117 mark->mw = ip_vs_wrr_max_weight(svc); 149 mark->cl = list_entry(&svc->destinations, struct ip_vs_dest, n_list);
118 mark->di = ip_vs_wrr_gcd_weight(svc); 150 mark->di = ip_vs_wrr_gcd_weight(svc);
119 if (mark->cw > mark->mw) 151 mark->mw = ip_vs_wrr_max_weight(svc) - (mark->di - 1);
120 mark->cw = 0; 152 if (mark->cw > mark->mw || !mark->cw)
153 mark->cw = mark->mw;
154 else if (mark->di > 1)
155 mark->cw = (mark->cw / mark->di) * mark->di + 1;
156 spin_unlock_bh(&svc->sched_lock);
121 return 0; 157 return 0;
122} 158}
123 159
@@ -128,80 +164,79 @@ static int ip_vs_wrr_update_svc(struct ip_vs_service *svc)
128static struct ip_vs_dest * 164static struct ip_vs_dest *
129ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) 165ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
130{ 166{
131 struct ip_vs_dest *dest; 167 struct ip_vs_dest *dest, *last, *stop = NULL;
132 struct ip_vs_wrr_mark *mark = svc->sched_data; 168 struct ip_vs_wrr_mark *mark = svc->sched_data;
133 struct list_head *p; 169 bool last_pass = false, restarted = false;
134 170
135 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__); 171 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
136 172
137 /* 173 spin_lock_bh(&svc->sched_lock);
138 * This loop will always terminate, because mark->cw in (0, max_weight] 174 dest = mark->cl;
139 * and at least one server has its weight equal to max_weight. 175 /* No available dests? */
140 */ 176 if (mark->mw == 0)
141 write_lock(&svc->sched_lock); 177 goto err_noavail;
142 p = mark->cl; 178 last = dest;
179 /* Stop only after all dests were checked for weight >= 1 (last pass) */
143 while (1) { 180 while (1) {
144 if (mark->cl == &svc->destinations) { 181 list_for_each_entry_continue_rcu(dest,
145 /* it is at the head of the destination list */ 182 &svc->destinations,
146 183 n_list) {
147 if (mark->cl == mark->cl->next) {
148 /* no dest entry */
149 ip_vs_scheduler_err(svc,
150 "no destination available: "
151 "no destinations present");
152 dest = NULL;
153 goto out;
154 }
155
156 mark->cl = svc->destinations.next;
157 mark->cw -= mark->di;
158 if (mark->cw <= 0) {
159 mark->cw = mark->mw;
160 /*
161 * Still zero, which means no available servers.
162 */
163 if (mark->cw == 0) {
164 mark->cl = &svc->destinations;
165 ip_vs_scheduler_err(svc,
166 "no destination available");
167 dest = NULL;
168 goto out;
169 }
170 }
171 } else
172 mark->cl = mark->cl->next;
173
174 if (mark->cl != &svc->destinations) {
175 /* not at the head of the list */
176 dest = list_entry(mark->cl, struct ip_vs_dest, n_list);
177 if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && 184 if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) &&
178 atomic_read(&dest->weight) >= mark->cw) { 185 atomic_read(&dest->weight) >= mark->cw)
179 /* got it */ 186 goto found;
180 break; 187 if (dest == stop)
181 } 188 goto err_over;
182 } 189 }
183 190 mark->cw -= mark->di;
184 if (mark->cl == p && mark->cw == mark->di) { 191 if (mark->cw <= 0) {
185 /* back to the start, and no dest is found. 192 mark->cw = mark->mw;
186 It is only possible when all dests are OVERLOADED */ 193 /* Stop if we tried last pass from first dest:
187 dest = NULL; 194 * 1. last_pass: we started checks when cw > di but
188 ip_vs_scheduler_err(svc, 195 * then all dests were checked for w >= 1
189 "no destination available: " 196 * 2. last was head: the first and only traversal
190 "all destinations are overloaded"); 197 * was for weight >= 1, for all dests.
191 goto out; 198 */
199 if (last_pass ||
200 &last->n_list == &svc->destinations)
201 goto err_over;
202 restarted = true;
203 }
204 last_pass = mark->cw <= mark->di;
205 if (last_pass && restarted &&
206 &last->n_list != &svc->destinations) {
207 /* First traversal was for w >= 1 but only
208 * for dests after 'last', now do the same
209 * for all dests up to 'last'.
210 */
211 stop = last;
192 } 212 }
193 } 213 }
194 214
215found:
195 IP_VS_DBG_BUF(6, "WRR: server %s:%u " 216 IP_VS_DBG_BUF(6, "WRR: server %s:%u "
196 "activeconns %d refcnt %d weight %d\n", 217 "activeconns %d refcnt %d weight %d\n",
197 IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port), 218 IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port),
198 atomic_read(&dest->activeconns), 219 atomic_read(&dest->activeconns),
199 atomic_read(&dest->refcnt), 220 atomic_read(&dest->refcnt),
200 atomic_read(&dest->weight)); 221 atomic_read(&dest->weight));
222 mark->cl = dest;
201 223
202 out: 224 out:
203 write_unlock(&svc->sched_lock); 225 spin_unlock_bh(&svc->sched_lock);
204 return dest; 226 return dest;
227
228err_noavail:
229 mark->cl = dest;
230 dest = NULL;
231 ip_vs_scheduler_err(svc, "no destination available");
232 goto out;
233
234err_over:
235 mark->cl = dest;
236 dest = NULL;
237 ip_vs_scheduler_err(svc, "no destination available: "
238 "all destinations are overloaded");
239 goto out;
205} 240}
206 241
207 242
@@ -212,7 +247,9 @@ static struct ip_vs_scheduler ip_vs_wrr_scheduler = {
212 .n_list = LIST_HEAD_INIT(ip_vs_wrr_scheduler.n_list), 247 .n_list = LIST_HEAD_INIT(ip_vs_wrr_scheduler.n_list),
213 .init_service = ip_vs_wrr_init_svc, 248 .init_service = ip_vs_wrr_init_svc,
214 .done_service = ip_vs_wrr_done_svc, 249 .done_service = ip_vs_wrr_done_svc,
215 .update_service = ip_vs_wrr_update_svc, 250 .add_dest = ip_vs_wrr_dest_changed,
251 .del_dest = ip_vs_wrr_dest_changed,
252 .upd_dest = ip_vs_wrr_dest_changed,
216 .schedule = ip_vs_wrr_schedule, 253 .schedule = ip_vs_wrr_schedule,
217}; 254};
218 255
@@ -224,6 +261,7 @@ static int __init ip_vs_wrr_init(void)
224static void __exit ip_vs_wrr_cleanup(void) 261static void __exit ip_vs_wrr_cleanup(void)
225{ 262{
226 unregister_ip_vs_scheduler(&ip_vs_wrr_scheduler); 263 unregister_ip_vs_scheduler(&ip_vs_wrr_scheduler);
264 synchronize_rcu();
227} 265}
228 266
229module_init(ip_vs_wrr_init); 267module_init(ip_vs_wrr_init);
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index ee6b7a9f1ec2..b75ff6429a04 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -17,6 +17,8 @@
17 * - not all connections have destination server, for example, 17 * - not all connections have destination server, for example,
18 * connections in backup server when fwmark is used 18 * connections in backup server when fwmark is used
19 * - bypass connections use daddr from packet 19 * - bypass connections use daddr from packet
20 * - we can use dst without ref while sending in RCU section, we use
21 * ref when returning NF_ACCEPT for NAT-ed packet via loopback
20 * LOCAL_OUT rules: 22 * LOCAL_OUT rules:
21 * - skb->dev is NULL, skb->protocol is not set (both are set in POST_ROUTING) 23 * - skb->dev is NULL, skb->protocol is not set (both are set in POST_ROUTING)
22 * - skb->pkt_type is not set yet 24 * - skb->pkt_type is not set yet
@@ -51,39 +53,54 @@ enum {
51 */ 53 */
52 IP_VS_RT_MODE_CONNECT = 8, /* Always bind route to saddr */ 54 IP_VS_RT_MODE_CONNECT = 8, /* Always bind route to saddr */
53 IP_VS_RT_MODE_KNOWN_NH = 16,/* Route via remote addr */ 55 IP_VS_RT_MODE_KNOWN_NH = 16,/* Route via remote addr */
56 IP_VS_RT_MODE_TUNNEL = 32,/* Tunnel mode */
54}; 57};
55 58
59static inline struct ip_vs_dest_dst *ip_vs_dest_dst_alloc(void)
60{
61 return kmalloc(sizeof(struct ip_vs_dest_dst), GFP_ATOMIC);
62}
63
64static inline void ip_vs_dest_dst_free(struct ip_vs_dest_dst *dest_dst)
65{
66 kfree(dest_dst);
67}
68
56/* 69/*
57 * Destination cache to speed up outgoing route lookup 70 * Destination cache to speed up outgoing route lookup
58 */ 71 */
59static inline void 72static inline void
60__ip_vs_dst_set(struct ip_vs_dest *dest, u32 rtos, struct dst_entry *dst, 73__ip_vs_dst_set(struct ip_vs_dest *dest, struct ip_vs_dest_dst *dest_dst,
61 u32 dst_cookie) 74 struct dst_entry *dst, u32 dst_cookie)
62{ 75{
63 struct dst_entry *old_dst; 76 struct ip_vs_dest_dst *old;
77
78 old = rcu_dereference_protected(dest->dest_dst,
79 lockdep_is_held(&dest->dst_lock));
64 80
65 old_dst = dest->dst_cache; 81 if (dest_dst) {
66 dest->dst_cache = dst; 82 dest_dst->dst_cache = dst;
67 dest->dst_rtos = rtos; 83 dest_dst->dst_cookie = dst_cookie;
68 dest->dst_cookie = dst_cookie; 84 }
69 dst_release(old_dst); 85 rcu_assign_pointer(dest->dest_dst, dest_dst);
86
87 if (old)
88 call_rcu(&old->rcu_head, ip_vs_dest_dst_rcu_free);
70} 89}
71 90
72static inline struct dst_entry * 91static inline struct ip_vs_dest_dst *
73__ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos) 92__ip_vs_dst_check(struct ip_vs_dest *dest)
74{ 93{
75 struct dst_entry *dst = dest->dst_cache; 94 struct ip_vs_dest_dst *dest_dst = rcu_dereference(dest->dest_dst);
95 struct dst_entry *dst;
76 96
77 if (!dst) 97 if (!dest_dst)
78 return NULL; 98 return NULL;
79 if ((dst->obsolete || rtos != dest->dst_rtos) && 99 dst = dest_dst->dst_cache;
80 dst->ops->check(dst, dest->dst_cookie) == NULL) { 100 if (dst->obsolete &&
81 dest->dst_cache = NULL; 101 dst->ops->check(dst, dest_dst->dst_cookie) == NULL)
82 dst_release(dst);
83 return NULL; 102 return NULL;
84 } 103 return dest_dst;
85 dst_hold(dst);
86 return dst;
87} 104}
88 105
89static inline bool 106static inline bool
@@ -104,7 +121,7 @@ __mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu)
104 121
105/* Get route to daddr, update *saddr, optionally bind route to saddr */ 122/* Get route to daddr, update *saddr, optionally bind route to saddr */
106static struct rtable *do_output_route4(struct net *net, __be32 daddr, 123static struct rtable *do_output_route4(struct net *net, __be32 daddr,
107 u32 rtos, int rt_mode, __be32 *saddr) 124 int rt_mode, __be32 *saddr)
108{ 125{
109 struct flowi4 fl4; 126 struct flowi4 fl4;
110 struct rtable *rt; 127 struct rtable *rt;
@@ -113,7 +130,6 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr,
113 memset(&fl4, 0, sizeof(fl4)); 130 memset(&fl4, 0, sizeof(fl4));
114 fl4.daddr = daddr; 131 fl4.daddr = daddr;
115 fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0; 132 fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0;
116 fl4.flowi4_tos = rtos;
117 fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ? 133 fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ?
118 FLOWI_FLAG_KNOWN_NH : 0; 134 FLOWI_FLAG_KNOWN_NH : 0;
119 135
@@ -124,7 +140,7 @@ retry:
124 if (PTR_ERR(rt) == -EINVAL && *saddr && 140 if (PTR_ERR(rt) == -EINVAL && *saddr &&
125 rt_mode & IP_VS_RT_MODE_CONNECT && !loop) { 141 rt_mode & IP_VS_RT_MODE_CONNECT && !loop) {
126 *saddr = 0; 142 *saddr = 0;
127 flowi4_update_output(&fl4, 0, rtos, daddr, 0); 143 flowi4_update_output(&fl4, 0, 0, daddr, 0);
128 goto retry; 144 goto retry;
129 } 145 }
130 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", &daddr); 146 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", &daddr);
@@ -132,7 +148,7 @@ retry:
132 } else if (!*saddr && rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) { 148 } else if (!*saddr && rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) {
133 ip_rt_put(rt); 149 ip_rt_put(rt);
134 *saddr = fl4.saddr; 150 *saddr = fl4.saddr;
135 flowi4_update_output(&fl4, 0, rtos, daddr, fl4.saddr); 151 flowi4_update_output(&fl4, 0, 0, daddr, fl4.saddr);
136 loop++; 152 loop++;
137 goto retry; 153 goto retry;
138 } 154 }
@@ -141,113 +157,140 @@ retry:
141} 157}
142 158
143/* Get route to destination or remote server */ 159/* Get route to destination or remote server */
144static struct rtable * 160static int
145__ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest, 161__ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
146 __be32 daddr, u32 rtos, int rt_mode, __be32 *ret_saddr) 162 __be32 daddr, int rt_mode, __be32 *ret_saddr)
147{ 163{
148 struct net *net = dev_net(skb_dst(skb)->dev); 164 struct net *net = dev_net(skb_dst(skb)->dev);
165 struct netns_ipvs *ipvs = net_ipvs(net);
166 struct ip_vs_dest_dst *dest_dst;
149 struct rtable *rt; /* Route to the other host */ 167 struct rtable *rt; /* Route to the other host */
150 struct rtable *ort; /* Original route */ 168 struct rtable *ort; /* Original route */
151 int local; 169 struct iphdr *iph;
170 __be16 df;
171 int mtu;
172 int local, noref = 1;
152 173
153 if (dest) { 174 if (dest) {
154 spin_lock(&dest->dst_lock); 175 dest_dst = __ip_vs_dst_check(dest);
155 if (!(rt = (struct rtable *) 176 if (likely(dest_dst))
156 __ip_vs_dst_check(dest, rtos))) { 177 rt = (struct rtable *) dest_dst->dst_cache;
157 rt = do_output_route4(net, dest->addr.ip, rtos, 178 else {
158 rt_mode, &dest->dst_saddr.ip); 179 dest_dst = ip_vs_dest_dst_alloc();
180 spin_lock_bh(&dest->dst_lock);
181 if (!dest_dst) {
182 __ip_vs_dst_set(dest, NULL, NULL, 0);
183 spin_unlock_bh(&dest->dst_lock);
184 goto err_unreach;
185 }
186 rt = do_output_route4(net, dest->addr.ip, rt_mode,
187 &dest_dst->dst_saddr.ip);
159 if (!rt) { 188 if (!rt) {
160 spin_unlock(&dest->dst_lock); 189 __ip_vs_dst_set(dest, NULL, NULL, 0);
161 return NULL; 190 spin_unlock_bh(&dest->dst_lock);
191 ip_vs_dest_dst_free(dest_dst);
192 goto err_unreach;
162 } 193 }
163 __ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst), 0); 194 __ip_vs_dst_set(dest, dest_dst, &rt->dst, 0);
164 IP_VS_DBG(10, "new dst %pI4, src %pI4, refcnt=%d, " 195 spin_unlock_bh(&dest->dst_lock);
165 "rtos=%X\n", 196 IP_VS_DBG(10, "new dst %pI4, src %pI4, refcnt=%d\n",
166 &dest->addr.ip, &dest->dst_saddr.ip, 197 &dest->addr.ip, &dest_dst->dst_saddr.ip,
167 atomic_read(&rt->dst.__refcnt), rtos); 198 atomic_read(&rt->dst.__refcnt));
168 } 199 }
169 daddr = dest->addr.ip; 200 daddr = dest->addr.ip;
170 if (ret_saddr) 201 if (ret_saddr)
171 *ret_saddr = dest->dst_saddr.ip; 202 *ret_saddr = dest_dst->dst_saddr.ip;
172 spin_unlock(&dest->dst_lock);
173 } else { 203 } else {
174 __be32 saddr = htonl(INADDR_ANY); 204 __be32 saddr = htonl(INADDR_ANY);
175 205
206 noref = 0;
207
176 /* For such unconfigured boxes avoid many route lookups 208 /* For such unconfigured boxes avoid many route lookups
177 * for performance reasons because we do not remember saddr 209 * for performance reasons because we do not remember saddr
178 */ 210 */
179 rt_mode &= ~IP_VS_RT_MODE_CONNECT; 211 rt_mode &= ~IP_VS_RT_MODE_CONNECT;
180 rt = do_output_route4(net, daddr, rtos, rt_mode, &saddr); 212 rt = do_output_route4(net, daddr, rt_mode, &saddr);
181 if (!rt) 213 if (!rt)
182 return NULL; 214 goto err_unreach;
183 if (ret_saddr) 215 if (ret_saddr)
184 *ret_saddr = saddr; 216 *ret_saddr = saddr;
185 } 217 }
186 218
187 local = rt->rt_flags & RTCF_LOCAL; 219 local = (rt->rt_flags & RTCF_LOCAL) ? 1 : 0;
188 if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) & 220 if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) &
189 rt_mode)) { 221 rt_mode)) {
190 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n", 222 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n",
191 (rt->rt_flags & RTCF_LOCAL) ? 223 (rt->rt_flags & RTCF_LOCAL) ?
192 "local":"non-local", &daddr); 224 "local":"non-local", &daddr);
193 ip_rt_put(rt); 225 goto err_put;
194 return NULL;
195 }
196 if (local && !(rt_mode & IP_VS_RT_MODE_RDR) &&
197 !((ort = skb_rtable(skb)) && ort->rt_flags & RTCF_LOCAL)) {
198 IP_VS_DBG_RL("Redirect from non-local address %pI4 to local "
199 "requires NAT method, dest: %pI4\n",
200 &ip_hdr(skb)->daddr, &daddr);
201 ip_rt_put(rt);
202 return NULL;
203 } 226 }
204 if (unlikely(!local && ipv4_is_loopback(ip_hdr(skb)->saddr))) { 227 iph = ip_hdr(skb);
205 IP_VS_DBG_RL("Stopping traffic from loopback address %pI4 " 228 if (likely(!local)) {
206 "to non-local address, dest: %pI4\n", 229 if (unlikely(ipv4_is_loopback(iph->saddr))) {
207 &ip_hdr(skb)->saddr, &daddr); 230 IP_VS_DBG_RL("Stopping traffic from loopback address "
208 ip_rt_put(rt); 231 "%pI4 to non-local address, dest: %pI4\n",
209 return NULL; 232 &iph->saddr, &daddr);
233 goto err_put;
234 }
235 } else {
236 ort = skb_rtable(skb);
237 if (!(rt_mode & IP_VS_RT_MODE_RDR) &&
238 !(ort->rt_flags & RTCF_LOCAL)) {
239 IP_VS_DBG_RL("Redirect from non-local address %pI4 to "
240 "local requires NAT method, dest: %pI4\n",
241 &iph->daddr, &daddr);
242 goto err_put;
243 }
244 /* skb to local stack, preserve old route */
245 if (!noref)
246 ip_rt_put(rt);
247 return local;
210 } 248 }
211 249
212 return rt; 250 if (likely(!(rt_mode & IP_VS_RT_MODE_TUNNEL))) {
213} 251 mtu = dst_mtu(&rt->dst);
214 252 df = iph->frag_off & htons(IP_DF);
215/* Reroute packet to local IPv4 stack after DNAT */
216static int
217__ip_vs_reroute_locally(struct sk_buff *skb)
218{
219 struct rtable *rt = skb_rtable(skb);
220 struct net_device *dev = rt->dst.dev;
221 struct net *net = dev_net(dev);
222 struct iphdr *iph = ip_hdr(skb);
223
224 if (rt_is_input_route(rt)) {
225 unsigned long orefdst = skb->_skb_refdst;
226
227 if (ip_route_input(skb, iph->daddr, iph->saddr,
228 iph->tos, skb->dev))
229 return 0;
230 refdst_drop(orefdst);
231 } else { 253 } else {
232 struct flowi4 fl4 = { 254 struct sock *sk = skb->sk;
233 .daddr = iph->daddr, 255
234 .saddr = iph->saddr, 256 mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
235 .flowi4_tos = RT_TOS(iph->tos), 257 if (mtu < 68) {
236 .flowi4_mark = skb->mark, 258 IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
237 }; 259 goto err_put;
238
239 rt = ip_route_output_key(net, &fl4);
240 if (IS_ERR(rt))
241 return 0;
242 if (!(rt->rt_flags & RTCF_LOCAL)) {
243 ip_rt_put(rt);
244 return 0;
245 } 260 }
246 /* Drop old route. */ 261 ort = skb_rtable(skb);
247 skb_dst_drop(skb); 262 if (!skb->dev && sk && sk->sk_state != TCP_TIME_WAIT)
248 skb_dst_set(skb, &rt->dst); 263 ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu);
264 /* MTU check allowed? */
265 df = sysctl_pmtu_disc(ipvs) ? iph->frag_off & htons(IP_DF) : 0;
249 } 266 }
250 return 1; 267
268 /* MTU checking */
269 if (unlikely(df && skb->len > mtu && !skb_is_gso(skb))) {
270 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
271 IP_VS_DBG(1, "frag needed for %pI4\n", &iph->saddr);
272 goto err_put;
273 }
274
275 skb_dst_drop(skb);
276 if (noref) {
277 if (!local)
278 skb_dst_set_noref_force(skb, &rt->dst);
279 else
280 skb_dst_set(skb, dst_clone(&rt->dst));
281 } else
282 skb_dst_set(skb, &rt->dst);
283
284 return local;
285
286err_put:
287 if (!noref)
288 ip_rt_put(rt);
289 return -1;
290
291err_unreach:
292 dst_link_failure(skb);
293 return -1;
251} 294}
252 295
253#ifdef CONFIG_IP_VS_IPV6 296#ifdef CONFIG_IP_VS_IPV6
@@ -294,44 +337,57 @@ out_err:
294/* 337/*
295 * Get route to destination or remote server 338 * Get route to destination or remote server
296 */ 339 */
297static struct rt6_info * 340static int
298__ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest, 341__ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
299 struct in6_addr *daddr, struct in6_addr *ret_saddr, 342 struct in6_addr *daddr, struct in6_addr *ret_saddr,
300 int do_xfrm, int rt_mode) 343 struct ip_vs_iphdr *ipvsh, int do_xfrm, int rt_mode)
301{ 344{
302 struct net *net = dev_net(skb_dst(skb)->dev); 345 struct net *net = dev_net(skb_dst(skb)->dev);
346 struct ip_vs_dest_dst *dest_dst;
303 struct rt6_info *rt; /* Route to the other host */ 347 struct rt6_info *rt; /* Route to the other host */
304 struct rt6_info *ort; /* Original route */ 348 struct rt6_info *ort; /* Original route */
305 struct dst_entry *dst; 349 struct dst_entry *dst;
306 int local; 350 int mtu;
351 int local, noref = 1;
307 352
308 if (dest) { 353 if (dest) {
309 spin_lock(&dest->dst_lock); 354 dest_dst = __ip_vs_dst_check(dest);
310 rt = (struct rt6_info *)__ip_vs_dst_check(dest, 0); 355 if (likely(dest_dst))
311 if (!rt) { 356 rt = (struct rt6_info *) dest_dst->dst_cache;
357 else {
312 u32 cookie; 358 u32 cookie;
313 359
360 dest_dst = ip_vs_dest_dst_alloc();
361 spin_lock_bh(&dest->dst_lock);
362 if (!dest_dst) {
363 __ip_vs_dst_set(dest, NULL, NULL, 0);
364 spin_unlock_bh(&dest->dst_lock);
365 goto err_unreach;
366 }
314 dst = __ip_vs_route_output_v6(net, &dest->addr.in6, 367 dst = __ip_vs_route_output_v6(net, &dest->addr.in6,
315 &dest->dst_saddr.in6, 368 &dest_dst->dst_saddr.in6,
316 do_xfrm); 369 do_xfrm);
317 if (!dst) { 370 if (!dst) {
318 spin_unlock(&dest->dst_lock); 371 __ip_vs_dst_set(dest, NULL, NULL, 0);
319 return NULL; 372 spin_unlock_bh(&dest->dst_lock);
373 ip_vs_dest_dst_free(dest_dst);
374 goto err_unreach;
320 } 375 }
321 rt = (struct rt6_info *) dst; 376 rt = (struct rt6_info *) dst;
322 cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; 377 cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
323 __ip_vs_dst_set(dest, 0, dst_clone(&rt->dst), cookie); 378 __ip_vs_dst_set(dest, dest_dst, &rt->dst, cookie);
379 spin_unlock_bh(&dest->dst_lock);
324 IP_VS_DBG(10, "new dst %pI6, src %pI6, refcnt=%d\n", 380 IP_VS_DBG(10, "new dst %pI6, src %pI6, refcnt=%d\n",
325 &dest->addr.in6, &dest->dst_saddr.in6, 381 &dest->addr.in6, &dest_dst->dst_saddr.in6,
326 atomic_read(&rt->dst.__refcnt)); 382 atomic_read(&rt->dst.__refcnt));
327 } 383 }
328 if (ret_saddr) 384 if (ret_saddr)
329 *ret_saddr = dest->dst_saddr.in6; 385 *ret_saddr = dest_dst->dst_saddr.in6;
330 spin_unlock(&dest->dst_lock);
331 } else { 386 } else {
387 noref = 0;
332 dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm); 388 dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm);
333 if (!dst) 389 if (!dst)
334 return NULL; 390 goto err_unreach;
335 rt = (struct rt6_info *) dst; 391 rt = (struct rt6_info *) dst;
336 } 392 }
337 393
@@ -340,86 +396,137 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
340 rt_mode)) { 396 rt_mode)) {
341 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI6c\n", 397 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI6c\n",
342 local ? "local":"non-local", daddr); 398 local ? "local":"non-local", daddr);
343 dst_release(&rt->dst); 399 goto err_put;
344 return NULL;
345 } 400 }
346 if (local && !(rt_mode & IP_VS_RT_MODE_RDR) && 401 if (likely(!local)) {
347 !((ort = (struct rt6_info *) skb_dst(skb)) && 402 if (unlikely((!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
348 __ip_vs_is_local_route6(ort))) { 403 ipv6_addr_type(&ipv6_hdr(skb)->saddr) &
349 IP_VS_DBG_RL("Redirect from non-local address %pI6c to local " 404 IPV6_ADDR_LOOPBACK)) {
350 "requires NAT method, dest: %pI6c\n", 405 IP_VS_DBG_RL("Stopping traffic from loopback address "
351 &ipv6_hdr(skb)->daddr, daddr); 406 "%pI6c to non-local address, "
352 dst_release(&rt->dst); 407 "dest: %pI6c\n",
353 return NULL; 408 &ipv6_hdr(skb)->saddr, daddr);
409 goto err_put;
410 }
411 } else {
412 ort = (struct rt6_info *) skb_dst(skb);
413 if (!(rt_mode & IP_VS_RT_MODE_RDR) &&
414 !__ip_vs_is_local_route6(ort)) {
415 IP_VS_DBG_RL("Redirect from non-local address %pI6c "
416 "to local requires NAT method, "
417 "dest: %pI6c\n",
418 &ipv6_hdr(skb)->daddr, daddr);
419 goto err_put;
420 }
421 /* skb to local stack, preserve old route */
422 if (!noref)
423 dst_release(&rt->dst);
424 return local;
354 } 425 }
355 if (unlikely(!local && (!skb->dev || skb->dev->flags & IFF_LOOPBACK) && 426
356 ipv6_addr_type(&ipv6_hdr(skb)->saddr) & 427 /* MTU checking */
357 IPV6_ADDR_LOOPBACK)) { 428 if (likely(!(rt_mode & IP_VS_RT_MODE_TUNNEL)))
358 IP_VS_DBG_RL("Stopping traffic from loopback address %pI6c " 429 mtu = dst_mtu(&rt->dst);
359 "to non-local address, dest: %pI6c\n", 430 else {
360 &ipv6_hdr(skb)->saddr, daddr); 431 struct sock *sk = skb->sk;
361 dst_release(&rt->dst); 432
362 return NULL; 433 mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr);
434 if (mtu < IPV6_MIN_MTU) {
435 IP_VS_DBG_RL("%s(): mtu less than %d\n", __func__,
436 IPV6_MIN_MTU);
437 goto err_put;
438 }
439 ort = (struct rt6_info *) skb_dst(skb);
440 if (!skb->dev && sk && sk->sk_state != TCP_TIME_WAIT)
441 ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu);
363 } 442 }
364 443
365 return rt; 444 if (unlikely(__mtu_check_toobig_v6(skb, mtu))) {
445 if (!skb->dev)
446 skb->dev = net->loopback_dev;
447 /* only send ICMP too big on first fragment */
448 if (!ipvsh->fragoffs)
449 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
450 IP_VS_DBG(1, "frag needed for %pI6c\n", &ipv6_hdr(skb)->saddr);
451 goto err_put;
452 }
453
454 skb_dst_drop(skb);
455 if (noref) {
456 if (!local)
457 skb_dst_set_noref_force(skb, &rt->dst);
458 else
459 skb_dst_set(skb, dst_clone(&rt->dst));
460 } else
461 skb_dst_set(skb, &rt->dst);
462
463 return local;
464
465err_put:
466 if (!noref)
467 dst_release(&rt->dst);
468 return -1;
469
470err_unreach:
471 dst_link_failure(skb);
472 return -1;
366} 473}
367#endif 474#endif
368 475
369 476
370/* 477/* return NF_ACCEPT to allow forwarding or other NF_xxx on error */
371 * Release dest->dst_cache before a dest is removed 478static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
372 */ 479 struct ip_vs_conn *cp)
373void
374ip_vs_dst_reset(struct ip_vs_dest *dest)
375{ 480{
376 struct dst_entry *old_dst; 481 int ret = NF_ACCEPT;
482
483 skb->ipvs_property = 1;
484 if (unlikely(cp->flags & IP_VS_CONN_F_NFCT))
485 ret = ip_vs_confirm_conntrack(skb);
486 if (ret == NF_ACCEPT) {
487 nf_reset(skb);
488 skb_forward_csum(skb);
489 }
490 return ret;
491}
492
493/* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */
494static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
495 struct ip_vs_conn *cp, int local)
496{
497 int ret = NF_STOLEN;
377 498
378 old_dst = dest->dst_cache; 499 skb->ipvs_property = 1;
379 dest->dst_cache = NULL; 500 if (likely(!(cp->flags & IP_VS_CONN_F_NFCT)))
380 dst_release(old_dst); 501 ip_vs_notrack(skb);
381 dest->dst_saddr.ip = 0; 502 else
503 ip_vs_update_conntrack(skb, cp, 1);
504 if (!local) {
505 skb_forward_csum(skb);
506 NF_HOOK(pf, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev,
507 dst_output);
508 } else
509 ret = NF_ACCEPT;
510 return ret;
382} 511}
383 512
384#define IP_VS_XMIT_TUNNEL(skb, cp) \ 513/* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */
385({ \ 514static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
386 int __ret = NF_ACCEPT; \ 515 struct ip_vs_conn *cp, int local)
387 \ 516{
388 (skb)->ipvs_property = 1; \ 517 int ret = NF_STOLEN;
389 if (unlikely((cp)->flags & IP_VS_CONN_F_NFCT)) \ 518
390 __ret = ip_vs_confirm_conntrack(skb); \ 519 skb->ipvs_property = 1;
391 if (__ret == NF_ACCEPT) { \ 520 if (likely(!(cp->flags & IP_VS_CONN_F_NFCT)))
392 nf_reset(skb); \ 521 ip_vs_notrack(skb);
393 skb_forward_csum(skb); \ 522 if (!local) {
394 } \ 523 skb_forward_csum(skb);
395 __ret; \ 524 NF_HOOK(pf, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev,
396}) 525 dst_output);
397 526 } else
398#define IP_VS_XMIT_NAT(pf, skb, cp, local) \ 527 ret = NF_ACCEPT;
399do { \ 528 return ret;
400 (skb)->ipvs_property = 1; \ 529}
401 if (likely(!((cp)->flags & IP_VS_CONN_F_NFCT))) \
402 ip_vs_notrack(skb); \
403 else \
404 ip_vs_update_conntrack(skb, cp, 1); \
405 if (local) \
406 return NF_ACCEPT; \
407 skb_forward_csum(skb); \
408 NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \
409 skb_dst(skb)->dev, dst_output); \
410} while (0)
411
412#define IP_VS_XMIT(pf, skb, cp, local) \
413do { \
414 (skb)->ipvs_property = 1; \
415 if (likely(!((cp)->flags & IP_VS_CONN_F_NFCT))) \
416 ip_vs_notrack(skb); \
417 if (local) \
418 return NF_ACCEPT; \
419 skb_forward_csum(skb); \
420 NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \
421 skb_dst(skb)->dev, dst_output); \
422} while (0)
423 530
424 531
425/* 532/*
@@ -430,7 +537,7 @@ ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
430 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) 537 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
431{ 538{
432 /* we do not touch skb and do not need pskb ptr */ 539 /* we do not touch skb and do not need pskb ptr */
433 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1); 540 return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
434} 541}
435 542
436 543
@@ -443,52 +550,29 @@ int
443ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 550ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
444 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) 551 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
445{ 552{
446 struct rtable *rt; /* Route to the other host */
447 struct iphdr *iph = ip_hdr(skb); 553 struct iphdr *iph = ip_hdr(skb);
448 int mtu;
449 554
450 EnterFunction(10); 555 EnterFunction(10);
451 556
452 if (!(rt = __ip_vs_get_out_rt(skb, NULL, iph->daddr, RT_TOS(iph->tos), 557 rcu_read_lock();
453 IP_VS_RT_MODE_NON_LOCAL, NULL))) 558 if (__ip_vs_get_out_rt(skb, NULL, iph->daddr, IP_VS_RT_MODE_NON_LOCAL,
454 goto tx_error_icmp; 559 NULL) < 0)
455
456 /* MTU checking */
457 mtu = dst_mtu(&rt->dst);
458 if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
459 !skb_is_gso(skb)) {
460 ip_rt_put(rt);
461 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
462 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
463 goto tx_error; 560 goto tx_error;
464 }
465 561
466 /* 562 ip_send_check(iph);
467 * Call ip_send_check because we are not sure it is called
468 * after ip_defrag. Is copy-on-write needed?
469 */
470 if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
471 ip_rt_put(rt);
472 return NF_STOLEN;
473 }
474 ip_send_check(ip_hdr(skb));
475
476 /* drop old route */
477 skb_dst_drop(skb);
478 skb_dst_set(skb, &rt->dst);
479 563
480 /* Another hack: avoid icmp_send in ip_fragment */ 564 /* Another hack: avoid icmp_send in ip_fragment */
481 skb->local_df = 1; 565 skb->local_df = 1;
482 566
483 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 0); 567 ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0);
568 rcu_read_unlock();
484 569
485 LeaveFunction(10); 570 LeaveFunction(10);
486 return NF_STOLEN; 571 return NF_STOLEN;
487 572
488 tx_error_icmp:
489 dst_link_failure(skb);
490 tx_error: 573 tx_error:
491 kfree_skb(skb); 574 kfree_skb(skb);
575 rcu_read_unlock();
492 LeaveFunction(10); 576 LeaveFunction(10);
493 return NF_STOLEN; 577 return NF_STOLEN;
494} 578}
@@ -496,60 +580,27 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
496#ifdef CONFIG_IP_VS_IPV6 580#ifdef CONFIG_IP_VS_IPV6
497int 581int
498ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 582ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
499 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph) 583 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
500{ 584{
501 struct rt6_info *rt; /* Route to the other host */
502 int mtu;
503
504 EnterFunction(10); 585 EnterFunction(10);
505 586
506 rt = __ip_vs_get_out_rt_v6(skb, NULL, &iph->daddr.in6, NULL, 0, 587 rcu_read_lock();
507 IP_VS_RT_MODE_NON_LOCAL); 588 if (__ip_vs_get_out_rt_v6(skb, NULL, &ipvsh->daddr.in6, NULL,
508 if (!rt) 589 ipvsh, 0, IP_VS_RT_MODE_NON_LOCAL) < 0)
509 goto tx_error_icmp;
510
511 /* MTU checking */
512 mtu = dst_mtu(&rt->dst);
513 if (__mtu_check_toobig_v6(skb, mtu)) {
514 if (!skb->dev) {
515 struct net *net = dev_net(skb_dst(skb)->dev);
516
517 skb->dev = net->loopback_dev;
518 }
519 /* only send ICMP too big on first fragment */
520 if (!iph->fragoffs)
521 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
522 dst_release(&rt->dst);
523 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
524 goto tx_error; 590 goto tx_error;
525 }
526
527 /*
528 * Call ip_send_check because we are not sure it is called
529 * after ip_defrag. Is copy-on-write needed?
530 */
531 skb = skb_share_check(skb, GFP_ATOMIC);
532 if (unlikely(skb == NULL)) {
533 dst_release(&rt->dst);
534 return NF_STOLEN;
535 }
536
537 /* drop old route */
538 skb_dst_drop(skb);
539 skb_dst_set(skb, &rt->dst);
540 591
541 /* Another hack: avoid icmp_send in ip_fragment */ 592 /* Another hack: avoid icmp_send in ip_fragment */
542 skb->local_df = 1; 593 skb->local_df = 1;
543 594
544 IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 0); 595 ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0);
596 rcu_read_unlock();
545 597
546 LeaveFunction(10); 598 LeaveFunction(10);
547 return NF_STOLEN; 599 return NF_STOLEN;
548 600
549 tx_error_icmp:
550 dst_link_failure(skb);
551 tx_error: 601 tx_error:
552 kfree_skb(skb); 602 kfree_skb(skb);
603 rcu_read_unlock();
553 LeaveFunction(10); 604 LeaveFunction(10);
554 return NF_STOLEN; 605 return NF_STOLEN;
555} 606}
@@ -564,29 +615,30 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
564 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) 615 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
565{ 616{
566 struct rtable *rt; /* Route to the other host */ 617 struct rtable *rt; /* Route to the other host */
567 int mtu; 618 int local, rc, was_input;
568 struct iphdr *iph = ip_hdr(skb);
569 int local;
570 619
571 EnterFunction(10); 620 EnterFunction(10);
572 621
622 rcu_read_lock();
573 /* check if it is a connection of no-client-port */ 623 /* check if it is a connection of no-client-port */
574 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) { 624 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
575 __be16 _pt, *p; 625 __be16 _pt, *p;
576 p = skb_header_pointer(skb, iph->ihl*4, sizeof(_pt), &_pt); 626
627 p = skb_header_pointer(skb, ipvsh->len, sizeof(_pt), &_pt);
577 if (p == NULL) 628 if (p == NULL)
578 goto tx_error; 629 goto tx_error;
579 ip_vs_conn_fill_cport(cp, *p); 630 ip_vs_conn_fill_cport(cp, *p);
580 IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p)); 631 IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
581 } 632 }
582 633
583 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, 634 was_input = rt_is_input_route(skb_rtable(skb));
584 RT_TOS(iph->tos), 635 local = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
585 IP_VS_RT_MODE_LOCAL | 636 IP_VS_RT_MODE_LOCAL |
586 IP_VS_RT_MODE_NON_LOCAL | 637 IP_VS_RT_MODE_NON_LOCAL |
587 IP_VS_RT_MODE_RDR, NULL))) 638 IP_VS_RT_MODE_RDR, NULL);
588 goto tx_error_icmp; 639 if (local < 0)
589 local = rt->rt_flags & RTCF_LOCAL; 640 goto tx_error;
641 rt = skb_rtable(skb);
590 /* 642 /*
591 * Avoid duplicate tuple in reply direction for NAT traffic 643 * Avoid duplicate tuple in reply direction for NAT traffic
592 * to local address when connection is sync-ed 644 * to local address when connection is sync-ed
@@ -600,57 +652,31 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
600 IP_VS_DBG_RL_PKT(10, AF_INET, pp, skb, 0, 652 IP_VS_DBG_RL_PKT(10, AF_INET, pp, skb, 0,
601 "ip_vs_nat_xmit(): " 653 "ip_vs_nat_xmit(): "
602 "stopping DNAT to local address"); 654 "stopping DNAT to local address");
603 goto tx_error_put; 655 goto tx_error;
604 } 656 }
605 } 657 }
606#endif 658#endif
607 659
608 /* From world but DNAT to loopback address? */ 660 /* From world but DNAT to loopback address? */
609 if (local && ipv4_is_loopback(cp->daddr.ip) && 661 if (local && ipv4_is_loopback(cp->daddr.ip) && was_input) {
610 rt_is_input_route(skb_rtable(skb))) {
611 IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): " 662 IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): "
612 "stopping DNAT to loopback address"); 663 "stopping DNAT to loopback address");
613 goto tx_error_put; 664 goto tx_error;
614 }
615
616 /* MTU checking */
617 mtu = dst_mtu(&rt->dst);
618 if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
619 !skb_is_gso(skb)) {
620 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
621 IP_VS_DBG_RL_PKT(0, AF_INET, pp, skb, 0,
622 "ip_vs_nat_xmit(): frag needed for");
623 goto tx_error_put;
624 } 665 }
625 666
626 /* copy-on-write the packet before mangling it */ 667 /* copy-on-write the packet before mangling it */
627 if (!skb_make_writable(skb, sizeof(struct iphdr))) 668 if (!skb_make_writable(skb, sizeof(struct iphdr)))
628 goto tx_error_put; 669 goto tx_error;
629 670
630 if (skb_cow(skb, rt->dst.dev->hard_header_len)) 671 if (skb_cow(skb, rt->dst.dev->hard_header_len))
631 goto tx_error_put; 672 goto tx_error;
632 673
633 /* mangle the packet */ 674 /* mangle the packet */
634 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh)) 675 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh))
635 goto tx_error_put; 676 goto tx_error;
636 ip_hdr(skb)->daddr = cp->daddr.ip; 677 ip_hdr(skb)->daddr = cp->daddr.ip;
637 ip_send_check(ip_hdr(skb)); 678 ip_send_check(ip_hdr(skb));
638 679
639 if (!local) {
640 /* drop old route */
641 skb_dst_drop(skb);
642 skb_dst_set(skb, &rt->dst);
643 } else {
644 ip_rt_put(rt);
645 /*
646 * Some IPv4 replies get local address from routes,
647 * not from iph, so while we DNAT after routing
648 * we need this second input/output route.
649 */
650 if (!__ip_vs_reroute_locally(skb))
651 goto tx_error;
652 }
653
654 IP_VS_DBG_PKT(10, AF_INET, pp, skb, 0, "After DNAT"); 680 IP_VS_DBG_PKT(10, AF_INET, pp, skb, 0, "After DNAT");
655 681
656 /* FIXME: when application helper enlarges the packet and the length 682 /* FIXME: when application helper enlarges the packet and the length
@@ -660,49 +686,48 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
660 /* Another hack: avoid icmp_send in ip_fragment */ 686 /* Another hack: avoid icmp_send in ip_fragment */
661 skb->local_df = 1; 687 skb->local_df = 1;
662 688
663 IP_VS_XMIT_NAT(NFPROTO_IPV4, skb, cp, local); 689 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local);
690 rcu_read_unlock();
664 691
665 LeaveFunction(10); 692 LeaveFunction(10);
666 return NF_STOLEN; 693 return rc;
667 694
668 tx_error_icmp:
669 dst_link_failure(skb);
670 tx_error: 695 tx_error:
671 kfree_skb(skb); 696 kfree_skb(skb);
697 rcu_read_unlock();
672 LeaveFunction(10); 698 LeaveFunction(10);
673 return NF_STOLEN; 699 return NF_STOLEN;
674 tx_error_put:
675 ip_rt_put(rt);
676 goto tx_error;
677} 700}
678 701
679#ifdef CONFIG_IP_VS_IPV6 702#ifdef CONFIG_IP_VS_IPV6
680int 703int
681ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 704ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
682 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph) 705 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
683{ 706{
684 struct rt6_info *rt; /* Route to the other host */ 707 struct rt6_info *rt; /* Route to the other host */
685 int mtu; 708 int local, rc;
686 int local;
687 709
688 EnterFunction(10); 710 EnterFunction(10);
689 711
712 rcu_read_lock();
690 /* check if it is a connection of no-client-port */ 713 /* check if it is a connection of no-client-port */
691 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT && !iph->fragoffs)) { 714 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT && !ipvsh->fragoffs)) {
692 __be16 _pt, *p; 715 __be16 _pt, *p;
693 p = skb_header_pointer(skb, iph->len, sizeof(_pt), &_pt); 716 p = skb_header_pointer(skb, ipvsh->len, sizeof(_pt), &_pt);
694 if (p == NULL) 717 if (p == NULL)
695 goto tx_error; 718 goto tx_error;
696 ip_vs_conn_fill_cport(cp, *p); 719 ip_vs_conn_fill_cport(cp, *p);
697 IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p)); 720 IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
698 } 721 }
699 722
700 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL, 723 local = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
701 0, (IP_VS_RT_MODE_LOCAL | 724 ipvsh, 0,
702 IP_VS_RT_MODE_NON_LOCAL | 725 IP_VS_RT_MODE_LOCAL |
703 IP_VS_RT_MODE_RDR)))) 726 IP_VS_RT_MODE_NON_LOCAL |
704 goto tx_error_icmp; 727 IP_VS_RT_MODE_RDR);
705 local = __ip_vs_is_local_route6(rt); 728 if (local < 0)
729 goto tx_error;
730 rt = (struct rt6_info *) skb_dst(skb);
706 /* 731 /*
707 * Avoid duplicate tuple in reply direction for NAT traffic 732 * Avoid duplicate tuple in reply direction for NAT traffic
708 * to local address when connection is sync-ed 733 * to local address when connection is sync-ed
@@ -716,7 +741,7 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
716 IP_VS_DBG_RL_PKT(10, AF_INET6, pp, skb, 0, 741 IP_VS_DBG_RL_PKT(10, AF_INET6, pp, skb, 0,
717 "ip_vs_nat_xmit_v6(): " 742 "ip_vs_nat_xmit_v6(): "
718 "stopping DNAT to local address"); 743 "stopping DNAT to local address");
719 goto tx_error_put; 744 goto tx_error;
720 } 745 }
721 } 746 }
722#endif 747#endif
@@ -727,46 +752,21 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
727 IP_VS_DBG_RL_PKT(1, AF_INET6, pp, skb, 0, 752 IP_VS_DBG_RL_PKT(1, AF_INET6, pp, skb, 0,
728 "ip_vs_nat_xmit_v6(): " 753 "ip_vs_nat_xmit_v6(): "
729 "stopping DNAT to loopback address"); 754 "stopping DNAT to loopback address");
730 goto tx_error_put; 755 goto tx_error;
731 }
732
733 /* MTU checking */
734 mtu = dst_mtu(&rt->dst);
735 if (__mtu_check_toobig_v6(skb, mtu)) {
736 if (!skb->dev) {
737 struct net *net = dev_net(skb_dst(skb)->dev);
738
739 skb->dev = net->loopback_dev;
740 }
741 /* only send ICMP too big on first fragment */
742 if (!iph->fragoffs)
743 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
744 IP_VS_DBG_RL_PKT(0, AF_INET6, pp, skb, 0,
745 "ip_vs_nat_xmit_v6(): frag needed for");
746 goto tx_error_put;
747 } 756 }
748 757
749 /* copy-on-write the packet before mangling it */ 758 /* copy-on-write the packet before mangling it */
750 if (!skb_make_writable(skb, sizeof(struct ipv6hdr))) 759 if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
751 goto tx_error_put; 760 goto tx_error;
752 761
753 if (skb_cow(skb, rt->dst.dev->hard_header_len)) 762 if (skb_cow(skb, rt->dst.dev->hard_header_len))
754 goto tx_error_put; 763 goto tx_error;
755 764
756 /* mangle the packet */ 765 /* mangle the packet */
757 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, iph)) 766 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh))
758 goto tx_error; 767 goto tx_error;
759 ipv6_hdr(skb)->daddr = cp->daddr.in6; 768 ipv6_hdr(skb)->daddr = cp->daddr.in6;
760 769
761 if (!local || !skb->dev) {
762 /* drop the old route when skb is not shared */
763 skb_dst_drop(skb);
764 skb_dst_set(skb, &rt->dst);
765 } else {
766 /* destined to loopback, do we need to change route? */
767 dst_release(&rt->dst);
768 }
769
770 IP_VS_DBG_PKT(10, AF_INET6, pp, skb, 0, "After DNAT"); 770 IP_VS_DBG_PKT(10, AF_INET6, pp, skb, 0, "After DNAT");
771 771
772 /* FIXME: when application helper enlarges the packet and the length 772 /* FIXME: when application helper enlarges the packet and the length
@@ -776,20 +776,17 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
776 /* Another hack: avoid icmp_send in ip_fragment */ 776 /* Another hack: avoid icmp_send in ip_fragment */
777 skb->local_df = 1; 777 skb->local_df = 1;
778 778
779 IP_VS_XMIT_NAT(NFPROTO_IPV6, skb, cp, local); 779 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local);
780 rcu_read_unlock();
780 781
781 LeaveFunction(10); 782 LeaveFunction(10);
782 return NF_STOLEN; 783 return rc;
783 784
784tx_error_icmp:
785 dst_link_failure(skb);
786tx_error: 785tx_error:
787 LeaveFunction(10); 786 LeaveFunction(10);
788 kfree_skb(skb); 787 kfree_skb(skb);
788 rcu_read_unlock();
789 return NF_STOLEN; 789 return NF_STOLEN;
790tx_error_put:
791 dst_release(&rt->dst);
792 goto tx_error;
793} 790}
794#endif 791#endif
795 792
@@ -826,56 +823,40 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
826 __be16 df; 823 __be16 df;
827 struct iphdr *iph; /* Our new IP header */ 824 struct iphdr *iph; /* Our new IP header */
828 unsigned int max_headroom; /* The extra header space needed */ 825 unsigned int max_headroom; /* The extra header space needed */
829 int mtu; 826 int ret, local;
830 int ret;
831 827
832 EnterFunction(10); 828 EnterFunction(10);
833 829
834 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, 830 rcu_read_lock();
835 RT_TOS(tos), IP_VS_RT_MODE_LOCAL | 831 local = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
836 IP_VS_RT_MODE_NON_LOCAL | 832 IP_VS_RT_MODE_LOCAL |
837 IP_VS_RT_MODE_CONNECT, 833 IP_VS_RT_MODE_NON_LOCAL |
838 &saddr))) 834 IP_VS_RT_MODE_CONNECT |
839 goto tx_error_icmp; 835 IP_VS_RT_MODE_TUNNEL, &saddr);
840 if (rt->rt_flags & RTCF_LOCAL) { 836 if (local < 0)
841 ip_rt_put(rt); 837 goto tx_error;
842 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1); 838 if (local) {
839 rcu_read_unlock();
840 return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
843 } 841 }
844 842
843 rt = skb_rtable(skb);
845 tdev = rt->dst.dev; 844 tdev = rt->dst.dev;
846 845
847 mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
848 if (mtu < 68) {
849 IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
850 goto tx_error_put;
851 }
852 if (rt_is_output_route(skb_rtable(skb)))
853 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
854
855 /* Copy DF, reset fragment offset and MF */ 846 /* Copy DF, reset fragment offset and MF */
856 df = sysctl_pmtu_disc(ipvs) ? old_iph->frag_off & htons(IP_DF) : 0; 847 df = sysctl_pmtu_disc(ipvs) ? old_iph->frag_off & htons(IP_DF) : 0;
857 848
858 if (df && mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb)) {
859 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
860 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
861 goto tx_error_put;
862 }
863
864 /* 849 /*
865 * Okay, now see if we can stuff it in the buffer as-is. 850 * Okay, now see if we can stuff it in the buffer as-is.
866 */ 851 */
867 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr); 852 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr);
868 853
869 if (skb_headroom(skb) < max_headroom 854 if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) {
870 || skb_cloned(skb) || skb_shared(skb)) {
871 struct sk_buff *new_skb = 855 struct sk_buff *new_skb =
872 skb_realloc_headroom(skb, max_headroom); 856 skb_realloc_headroom(skb, max_headroom);
873 if (!new_skb) { 857
874 ip_rt_put(rt); 858 if (!new_skb)
875 kfree_skb(skb); 859 goto tx_error;
876 IP_VS_ERR_RL("%s(): no memory\n", __func__);
877 return NF_STOLEN;
878 }
879 consume_skb(skb); 860 consume_skb(skb);
880 skb = new_skb; 861 skb = new_skb;
881 old_iph = ip_hdr(skb); 862 old_iph = ip_hdr(skb);
@@ -890,10 +871,6 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
890 skb_reset_network_header(skb); 871 skb_reset_network_header(skb);
891 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 872 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
892 873
893 /* drop old route */
894 skb_dst_drop(skb);
895 skb_dst_set(skb, &rt->dst);
896
897 /* 874 /*
898 * Push down and install the IPIP header. 875 * Push down and install the IPIP header.
899 */ 876 */
@@ -911,25 +888,22 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
911 /* Another hack: avoid icmp_send in ip_fragment */ 888 /* Another hack: avoid icmp_send in ip_fragment */
912 skb->local_df = 1; 889 skb->local_df = 1;
913 890
914 ret = IP_VS_XMIT_TUNNEL(skb, cp); 891 ret = ip_vs_tunnel_xmit_prepare(skb, cp);
915 if (ret == NF_ACCEPT) 892 if (ret == NF_ACCEPT)
916 ip_local_out(skb); 893 ip_local_out(skb);
917 else if (ret == NF_DROP) 894 else if (ret == NF_DROP)
918 kfree_skb(skb); 895 kfree_skb(skb);
896 rcu_read_unlock();
919 897
920 LeaveFunction(10); 898 LeaveFunction(10);
921 899
922 return NF_STOLEN; 900 return NF_STOLEN;
923 901
924 tx_error_icmp:
925 dst_link_failure(skb);
926 tx_error: 902 tx_error:
927 kfree_skb(skb); 903 kfree_skb(skb);
904 rcu_read_unlock();
928 LeaveFunction(10); 905 LeaveFunction(10);
929 return NF_STOLEN; 906 return NF_STOLEN;
930tx_error_put:
931 ip_rt_put(rt);
932 goto tx_error;
933} 907}
934 908
935#ifdef CONFIG_IP_VS_IPV6 909#ifdef CONFIG_IP_VS_IPV6
@@ -943,60 +917,37 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
943 struct ipv6hdr *old_iph = ipv6_hdr(skb); 917 struct ipv6hdr *old_iph = ipv6_hdr(skb);
944 struct ipv6hdr *iph; /* Our new IP header */ 918 struct ipv6hdr *iph; /* Our new IP header */
945 unsigned int max_headroom; /* The extra header space needed */ 919 unsigned int max_headroom; /* The extra header space needed */
946 int mtu; 920 int ret, local;
947 int ret;
948 921
949 EnterFunction(10); 922 EnterFunction(10);
950 923
951 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, 924 rcu_read_lock();
952 &saddr, 1, (IP_VS_RT_MODE_LOCAL | 925 local = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6,
953 IP_VS_RT_MODE_NON_LOCAL)))) 926 &saddr, ipvsh, 1,
954 goto tx_error_icmp; 927 IP_VS_RT_MODE_LOCAL |
955 if (__ip_vs_is_local_route6(rt)) { 928 IP_VS_RT_MODE_NON_LOCAL |
956 dst_release(&rt->dst); 929 IP_VS_RT_MODE_TUNNEL);
957 IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 1); 930 if (local < 0)
931 goto tx_error;
932 if (local) {
933 rcu_read_unlock();
934 return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1);
958 } 935 }
959 936
937 rt = (struct rt6_info *) skb_dst(skb);
960 tdev = rt->dst.dev; 938 tdev = rt->dst.dev;
961 939
962 mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr);
963 if (mtu < IPV6_MIN_MTU) {
964 IP_VS_DBG_RL("%s(): mtu less than %d\n", __func__,
965 IPV6_MIN_MTU);
966 goto tx_error_put;
967 }
968 if (skb_dst(skb))
969 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
970
971 /* MTU checking: Notice that 'mtu' have been adjusted before hand */
972 if (__mtu_check_toobig_v6(skb, mtu)) {
973 if (!skb->dev) {
974 struct net *net = dev_net(skb_dst(skb)->dev);
975
976 skb->dev = net->loopback_dev;
977 }
978 /* only send ICMP too big on first fragment */
979 if (!ipvsh->fragoffs)
980 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
981 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
982 goto tx_error_put;
983 }
984
985 /* 940 /*
986 * Okay, now see if we can stuff it in the buffer as-is. 941 * Okay, now see if we can stuff it in the buffer as-is.
987 */ 942 */
988 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr); 943 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);
989 944
990 if (skb_headroom(skb) < max_headroom 945 if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) {
991 || skb_cloned(skb) || skb_shared(skb)) {
992 struct sk_buff *new_skb = 946 struct sk_buff *new_skb =
993 skb_realloc_headroom(skb, max_headroom); 947 skb_realloc_headroom(skb, max_headroom);
994 if (!new_skb) { 948
995 dst_release(&rt->dst); 949 if (!new_skb)
996 kfree_skb(skb); 950 goto tx_error;
997 IP_VS_ERR_RL("%s(): no memory\n", __func__);
998 return NF_STOLEN;
999 }
1000 consume_skb(skb); 951 consume_skb(skb);
1001 skb = new_skb; 952 skb = new_skb;
1002 old_iph = ipv6_hdr(skb); 953 old_iph = ipv6_hdr(skb);
@@ -1008,10 +959,6 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1008 skb_reset_network_header(skb); 959 skb_reset_network_header(skb);
1009 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 960 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1010 961
1011 /* drop old route */
1012 skb_dst_drop(skb);
1013 skb_dst_set(skb, &rt->dst);
1014
1015 /* 962 /*
1016 * Push down and install the IPIP header. 963 * Push down and install the IPIP header.
1017 */ 964 */
@@ -1029,25 +976,22 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1029 /* Another hack: avoid icmp_send in ip_fragment */ 976 /* Another hack: avoid icmp_send in ip_fragment */
1030 skb->local_df = 1; 977 skb->local_df = 1;
1031 978
1032 ret = IP_VS_XMIT_TUNNEL(skb, cp); 979 ret = ip_vs_tunnel_xmit_prepare(skb, cp);
1033 if (ret == NF_ACCEPT) 980 if (ret == NF_ACCEPT)
1034 ip6_local_out(skb); 981 ip6_local_out(skb);
1035 else if (ret == NF_DROP) 982 else if (ret == NF_DROP)
1036 kfree_skb(skb); 983 kfree_skb(skb);
984 rcu_read_unlock();
1037 985
1038 LeaveFunction(10); 986 LeaveFunction(10);
1039 987
1040 return NF_STOLEN; 988 return NF_STOLEN;
1041 989
1042tx_error_icmp:
1043 dst_link_failure(skb);
1044tx_error: 990tx_error:
1045 kfree_skb(skb); 991 kfree_skb(skb);
992 rcu_read_unlock();
1046 LeaveFunction(10); 993 LeaveFunction(10);
1047 return NF_STOLEN; 994 return NF_STOLEN;
1048tx_error_put:
1049 dst_release(&rt->dst);
1050 goto tx_error;
1051} 995}
1052#endif 996#endif
1053 997
@@ -1060,59 +1004,36 @@ int
1060ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1004ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1061 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) 1005 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
1062{ 1006{
1063 struct rtable *rt; /* Route to the other host */ 1007 int local;
1064 struct iphdr *iph = ip_hdr(skb);
1065 int mtu;
1066 1008
1067 EnterFunction(10); 1009 EnterFunction(10);
1068 1010
1069 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, 1011 rcu_read_lock();
1070 RT_TOS(iph->tos), 1012 local = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
1071 IP_VS_RT_MODE_LOCAL | 1013 IP_VS_RT_MODE_LOCAL |
1072 IP_VS_RT_MODE_NON_LOCAL | 1014 IP_VS_RT_MODE_NON_LOCAL |
1073 IP_VS_RT_MODE_KNOWN_NH, NULL))) 1015 IP_VS_RT_MODE_KNOWN_NH, NULL);
1074 goto tx_error_icmp; 1016 if (local < 0)
1075 if (rt->rt_flags & RTCF_LOCAL) {
1076 ip_rt_put(rt);
1077 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1);
1078 }
1079
1080 /* MTU checking */
1081 mtu = dst_mtu(&rt->dst);
1082 if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu &&
1083 !skb_is_gso(skb)) {
1084 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
1085 ip_rt_put(rt);
1086 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1087 goto tx_error; 1017 goto tx_error;
1018 if (local) {
1019 rcu_read_unlock();
1020 return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
1088 } 1021 }
1089 1022
1090 /*
1091 * Call ip_send_check because we are not sure it is called
1092 * after ip_defrag. Is copy-on-write needed?
1093 */
1094 if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
1095 ip_rt_put(rt);
1096 return NF_STOLEN;
1097 }
1098 ip_send_check(ip_hdr(skb)); 1023 ip_send_check(ip_hdr(skb));
1099 1024
1100 /* drop old route */
1101 skb_dst_drop(skb);
1102 skb_dst_set(skb, &rt->dst);
1103
1104 /* Another hack: avoid icmp_send in ip_fragment */ 1025 /* Another hack: avoid icmp_send in ip_fragment */
1105 skb->local_df = 1; 1026 skb->local_df = 1;
1106 1027
1107 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 0); 1028 ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0);
1029 rcu_read_unlock();
1108 1030
1109 LeaveFunction(10); 1031 LeaveFunction(10);
1110 return NF_STOLEN; 1032 return NF_STOLEN;
1111 1033
1112 tx_error_icmp:
1113 dst_link_failure(skb);
1114 tx_error: 1034 tx_error:
1115 kfree_skb(skb); 1035 kfree_skb(skb);
1036 rcu_read_unlock();
1116 LeaveFunction(10); 1037 LeaveFunction(10);
1117 return NF_STOLEN; 1038 return NF_STOLEN;
1118} 1039}
@@ -1120,64 +1041,36 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1120#ifdef CONFIG_IP_VS_IPV6 1041#ifdef CONFIG_IP_VS_IPV6
1121int 1042int
1122ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1043ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1123 struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph) 1044 struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
1124{ 1045{
1125 struct rt6_info *rt; /* Route to the other host */ 1046 int local;
1126 int mtu;
1127 1047
1128 EnterFunction(10); 1048 EnterFunction(10);
1129 1049
1130 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL, 1050 rcu_read_lock();
1131 0, (IP_VS_RT_MODE_LOCAL | 1051 local = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
1132 IP_VS_RT_MODE_NON_LOCAL)))) 1052 ipvsh, 0,
1133 goto tx_error_icmp; 1053 IP_VS_RT_MODE_LOCAL |
1134 if (__ip_vs_is_local_route6(rt)) { 1054 IP_VS_RT_MODE_NON_LOCAL);
1135 dst_release(&rt->dst); 1055 if (local < 0)
1136 IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 1);
1137 }
1138
1139 /* MTU checking */
1140 mtu = dst_mtu(&rt->dst);
1141 if (__mtu_check_toobig_v6(skb, mtu)) {
1142 if (!skb->dev) {
1143 struct net *net = dev_net(skb_dst(skb)->dev);
1144
1145 skb->dev = net->loopback_dev;
1146 }
1147 /* only send ICMP too big on first fragment */
1148 if (!iph->fragoffs)
1149 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1150 dst_release(&rt->dst);
1151 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1152 goto tx_error; 1056 goto tx_error;
1057 if (local) {
1058 rcu_read_unlock();
1059 return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1);
1153 } 1060 }
1154 1061
1155 /*
1156 * Call ip_send_check because we are not sure it is called
1157 * after ip_defrag. Is copy-on-write needed?
1158 */
1159 skb = skb_share_check(skb, GFP_ATOMIC);
1160 if (unlikely(skb == NULL)) {
1161 dst_release(&rt->dst);
1162 return NF_STOLEN;
1163 }
1164
1165 /* drop old route */
1166 skb_dst_drop(skb);
1167 skb_dst_set(skb, &rt->dst);
1168
1169 /* Another hack: avoid icmp_send in ip_fragment */ 1062 /* Another hack: avoid icmp_send in ip_fragment */
1170 skb->local_df = 1; 1063 skb->local_df = 1;
1171 1064
1172 IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 0); 1065 ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0);
1066 rcu_read_unlock();
1173 1067
1174 LeaveFunction(10); 1068 LeaveFunction(10);
1175 return NF_STOLEN; 1069 return NF_STOLEN;
1176 1070
1177tx_error_icmp:
1178 dst_link_failure(skb);
1179tx_error: 1071tx_error:
1180 kfree_skb(skb); 1072 kfree_skb(skb);
1073 rcu_read_unlock();
1181 LeaveFunction(10); 1074 LeaveFunction(10);
1182 return NF_STOLEN; 1075 return NF_STOLEN;
1183} 1076}
@@ -1194,10 +1087,9 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1194 struct ip_vs_iphdr *iph) 1087 struct ip_vs_iphdr *iph)
1195{ 1088{
1196 struct rtable *rt; /* Route to the other host */ 1089 struct rtable *rt; /* Route to the other host */
1197 int mtu;
1198 int rc; 1090 int rc;
1199 int local; 1091 int local;
1200 int rt_mode; 1092 int rt_mode, was_input;
1201 1093
1202 EnterFunction(10); 1094 EnterFunction(10);
1203 1095
@@ -1217,16 +1109,17 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1217 /* 1109 /*
1218 * mangle and send the packet here (only for VS/NAT) 1110 * mangle and send the packet here (only for VS/NAT)
1219 */ 1111 */
1112 was_input = rt_is_input_route(skb_rtable(skb));
1220 1113
1221 /* LOCALNODE from FORWARD hook is not supported */ 1114 /* LOCALNODE from FORWARD hook is not supported */
1222 rt_mode = (hooknum != NF_INET_FORWARD) ? 1115 rt_mode = (hooknum != NF_INET_FORWARD) ?
1223 IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL | 1116 IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
1224 IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL; 1117 IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
1225 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, 1118 rcu_read_lock();
1226 RT_TOS(ip_hdr(skb)->tos), 1119 local = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, rt_mode, NULL);
1227 rt_mode, NULL))) 1120 if (local < 0)
1228 goto tx_error_icmp; 1121 goto tx_error;
1229 local = rt->rt_flags & RTCF_LOCAL; 1122 rt = skb_rtable(skb);
1230 1123
1231 /* 1124 /*
1232 * Avoid duplicate tuple in reply direction for NAT traffic 1125 * Avoid duplicate tuple in reply direction for NAT traffic
@@ -1241,82 +1134,51 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1241 IP_VS_DBG(10, "%s(): " 1134 IP_VS_DBG(10, "%s(): "
1242 "stopping DNAT to local address %pI4\n", 1135 "stopping DNAT to local address %pI4\n",
1243 __func__, &cp->daddr.ip); 1136 __func__, &cp->daddr.ip);
1244 goto tx_error_put; 1137 goto tx_error;
1245 } 1138 }
1246 } 1139 }
1247#endif 1140#endif
1248 1141
1249 /* From world but DNAT to loopback address? */ 1142 /* From world but DNAT to loopback address? */
1250 if (local && ipv4_is_loopback(cp->daddr.ip) && 1143 if (local && ipv4_is_loopback(cp->daddr.ip) && was_input) {
1251 rt_is_input_route(skb_rtable(skb))) {
1252 IP_VS_DBG(1, "%s(): " 1144 IP_VS_DBG(1, "%s(): "
1253 "stopping DNAT to loopback %pI4\n", 1145 "stopping DNAT to loopback %pI4\n",
1254 __func__, &cp->daddr.ip); 1146 __func__, &cp->daddr.ip);
1255 goto tx_error_put; 1147 goto tx_error;
1256 }
1257
1258 /* MTU checking */
1259 mtu = dst_mtu(&rt->dst);
1260 if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF)) &&
1261 !skb_is_gso(skb)) {
1262 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1263 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1264 goto tx_error_put;
1265 } 1148 }
1266 1149
1267 /* copy-on-write the packet before mangling it */ 1150 /* copy-on-write the packet before mangling it */
1268 if (!skb_make_writable(skb, offset)) 1151 if (!skb_make_writable(skb, offset))
1269 goto tx_error_put; 1152 goto tx_error;
1270 1153
1271 if (skb_cow(skb, rt->dst.dev->hard_header_len)) 1154 if (skb_cow(skb, rt->dst.dev->hard_header_len))
1272 goto tx_error_put; 1155 goto tx_error;
1273 1156
1274 ip_vs_nat_icmp(skb, pp, cp, 0); 1157 ip_vs_nat_icmp(skb, pp, cp, 0);
1275 1158
1276 if (!local) {
1277 /* drop the old route when skb is not shared */
1278 skb_dst_drop(skb);
1279 skb_dst_set(skb, &rt->dst);
1280 } else {
1281 ip_rt_put(rt);
1282 /*
1283 * Some IPv4 replies get local address from routes,
1284 * not from iph, so while we DNAT after routing
1285 * we need this second input/output route.
1286 */
1287 if (!__ip_vs_reroute_locally(skb))
1288 goto tx_error;
1289 }
1290
1291 /* Another hack: avoid icmp_send in ip_fragment */ 1159 /* Another hack: avoid icmp_send in ip_fragment */
1292 skb->local_df = 1; 1160 skb->local_df = 1;
1293 1161
1294 IP_VS_XMIT_NAT(NFPROTO_IPV4, skb, cp, local); 1162 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local);
1295 1163 rcu_read_unlock();
1296 rc = NF_STOLEN;
1297 goto out; 1164 goto out;
1298 1165
1299 tx_error_icmp:
1300 dst_link_failure(skb);
1301 tx_error: 1166 tx_error:
1302 dev_kfree_skb(skb); 1167 kfree_skb(skb);
1168 rcu_read_unlock();
1303 rc = NF_STOLEN; 1169 rc = NF_STOLEN;
1304 out: 1170 out:
1305 LeaveFunction(10); 1171 LeaveFunction(10);
1306 return rc; 1172 return rc;
1307 tx_error_put:
1308 ip_rt_put(rt);
1309 goto tx_error;
1310} 1173}
1311 1174
1312#ifdef CONFIG_IP_VS_IPV6 1175#ifdef CONFIG_IP_VS_IPV6
1313int 1176int
1314ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1177ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1315 struct ip_vs_protocol *pp, int offset, unsigned int hooknum, 1178 struct ip_vs_protocol *pp, int offset, unsigned int hooknum,
1316 struct ip_vs_iphdr *iph) 1179 struct ip_vs_iphdr *ipvsh)
1317{ 1180{
1318 struct rt6_info *rt; /* Route to the other host */ 1181 struct rt6_info *rt; /* Route to the other host */
1319 int mtu;
1320 int rc; 1182 int rc;
1321 int local; 1183 int local;
1322 int rt_mode; 1184 int rt_mode;
@@ -1328,7 +1190,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1328 translate address/port back */ 1190 translate address/port back */
1329 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) { 1191 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
1330 if (cp->packet_xmit) 1192 if (cp->packet_xmit)
1331 rc = cp->packet_xmit(skb, cp, pp, iph); 1193 rc = cp->packet_xmit(skb, cp, pp, ipvsh);
1332 else 1194 else
1333 rc = NF_ACCEPT; 1195 rc = NF_ACCEPT;
1334 /* do not touch skb anymore */ 1196 /* do not touch skb anymore */
@@ -1344,11 +1206,12 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1344 rt_mode = (hooknum != NF_INET_FORWARD) ? 1206 rt_mode = (hooknum != NF_INET_FORWARD) ?
1345 IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL | 1207 IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
1346 IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL; 1208 IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
1347 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL, 1209 rcu_read_lock();
1348 0, rt_mode))) 1210 local = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
1349 goto tx_error_icmp; 1211 ipvsh, 0, rt_mode);
1350 1212 if (local < 0)
1351 local = __ip_vs_is_local_route6(rt); 1213 goto tx_error;
1214 rt = (struct rt6_info *) skb_dst(skb);
1352 /* 1215 /*
1353 * Avoid duplicate tuple in reply direction for NAT traffic 1216 * Avoid duplicate tuple in reply direction for NAT traffic
1354 * to local address when connection is sync-ed 1217 * to local address when connection is sync-ed
@@ -1362,7 +1225,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1362 IP_VS_DBG(10, "%s(): " 1225 IP_VS_DBG(10, "%s(): "
1363 "stopping DNAT to local address %pI6\n", 1226 "stopping DNAT to local address %pI6\n",
1364 __func__, &cp->daddr.in6); 1227 __func__, &cp->daddr.in6);
1365 goto tx_error_put; 1228 goto tx_error;
1366 } 1229 }
1367 } 1230 }
1368#endif 1231#endif
@@ -1373,60 +1236,31 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1373 IP_VS_DBG(1, "%s(): " 1236 IP_VS_DBG(1, "%s(): "
1374 "stopping DNAT to loopback %pI6\n", 1237 "stopping DNAT to loopback %pI6\n",
1375 __func__, &cp->daddr.in6); 1238 __func__, &cp->daddr.in6);
1376 goto tx_error_put; 1239 goto tx_error;
1377 }
1378
1379 /* MTU checking */
1380 mtu = dst_mtu(&rt->dst);
1381 if (__mtu_check_toobig_v6(skb, mtu)) {
1382 if (!skb->dev) {
1383 struct net *net = dev_net(skb_dst(skb)->dev);
1384
1385 skb->dev = net->loopback_dev;
1386 }
1387 /* only send ICMP too big on first fragment */
1388 if (!iph->fragoffs)
1389 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1390 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1391 goto tx_error_put;
1392 } 1240 }
1393 1241
1394 /* copy-on-write the packet before mangling it */ 1242 /* copy-on-write the packet before mangling it */
1395 if (!skb_make_writable(skb, offset)) 1243 if (!skb_make_writable(skb, offset))
1396 goto tx_error_put; 1244 goto tx_error;
1397 1245
1398 if (skb_cow(skb, rt->dst.dev->hard_header_len)) 1246 if (skb_cow(skb, rt->dst.dev->hard_header_len))
1399 goto tx_error_put; 1247 goto tx_error;
1400 1248
1401 ip_vs_nat_icmp_v6(skb, pp, cp, 0); 1249 ip_vs_nat_icmp_v6(skb, pp, cp, 0);
1402 1250
1403 if (!local || !skb->dev) {
1404 /* drop the old route when skb is not shared */
1405 skb_dst_drop(skb);
1406 skb_dst_set(skb, &rt->dst);
1407 } else {
1408 /* destined to loopback, do we need to change route? */
1409 dst_release(&rt->dst);
1410 }
1411
1412 /* Another hack: avoid icmp_send in ip_fragment */ 1251 /* Another hack: avoid icmp_send in ip_fragment */
1413 skb->local_df = 1; 1252 skb->local_df = 1;
1414 1253
1415 IP_VS_XMIT_NAT(NFPROTO_IPV6, skb, cp, local); 1254 rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local);
1416 1255 rcu_read_unlock();
1417 rc = NF_STOLEN;
1418 goto out; 1256 goto out;
1419 1257
1420tx_error_icmp:
1421 dst_link_failure(skb);
1422tx_error: 1258tx_error:
1423 dev_kfree_skb(skb); 1259 kfree_skb(skb);
1260 rcu_read_unlock();
1424 rc = NF_STOLEN; 1261 rc = NF_STOLEN;
1425out: 1262out:
1426 LeaveFunction(10); 1263 LeaveFunction(10);
1427 return rc; 1264 return rc;
1428tx_error_put:
1429 dst_release(&rt->dst);
1430 goto tx_error;
1431} 1265}
1432#endif 1266#endif
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index c8e001a9c45b..007e8c43d19a 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -48,6 +48,7 @@
48#include <net/netfilter/nf_conntrack_labels.h> 48#include <net/netfilter/nf_conntrack_labels.h>
49#include <net/netfilter/nf_nat.h> 49#include <net/netfilter/nf_nat.h>
50#include <net/netfilter/nf_nat_core.h> 50#include <net/netfilter/nf_nat_core.h>
51#include <net/netfilter/nf_nat_helper.h>
51 52
52#define NF_CONNTRACK_VERSION "0.5.0" 53#define NF_CONNTRACK_VERSION "0.5.0"
53 54
@@ -1364,30 +1365,48 @@ void nf_conntrack_cleanup_end(void)
1364 */ 1365 */
1365void nf_conntrack_cleanup_net(struct net *net) 1366void nf_conntrack_cleanup_net(struct net *net)
1366{ 1367{
1368 LIST_HEAD(single);
1369
1370 list_add(&net->exit_list, &single);
1371 nf_conntrack_cleanup_net_list(&single);
1372}
1373
1374void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
1375{
1376 int busy;
1377 struct net *net;
1378
1367 /* 1379 /*
1368 * This makes sure all current packets have passed through 1380 * This makes sure all current packets have passed through
1369 * netfilter framework. Roll on, two-stage module 1381 * netfilter framework. Roll on, two-stage module
1370 * delete... 1382 * delete...
1371 */ 1383 */
1372 synchronize_net(); 1384 synchronize_net();
1373 i_see_dead_people: 1385i_see_dead_people:
1374 nf_ct_iterate_cleanup(net, kill_all, NULL); 1386 busy = 0;
1375 nf_ct_release_dying_list(net); 1387 list_for_each_entry(net, net_exit_list, exit_list) {
1376 if (atomic_read(&net->ct.count) != 0) { 1388 nf_ct_iterate_cleanup(net, kill_all, NULL);
1389 nf_ct_release_dying_list(net);
1390 if (atomic_read(&net->ct.count) != 0)
1391 busy = 1;
1392 }
1393 if (busy) {
1377 schedule(); 1394 schedule();
1378 goto i_see_dead_people; 1395 goto i_see_dead_people;
1379 } 1396 }
1380 1397
1381 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size); 1398 list_for_each_entry(net, net_exit_list, exit_list) {
1382 nf_conntrack_proto_pernet_fini(net); 1399 nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
1383 nf_conntrack_helper_pernet_fini(net); 1400 nf_conntrack_proto_pernet_fini(net);
1384 nf_conntrack_ecache_pernet_fini(net); 1401 nf_conntrack_helper_pernet_fini(net);
1385 nf_conntrack_tstamp_pernet_fini(net); 1402 nf_conntrack_ecache_pernet_fini(net);
1386 nf_conntrack_acct_pernet_fini(net); 1403 nf_conntrack_tstamp_pernet_fini(net);
1387 nf_conntrack_expect_pernet_fini(net); 1404 nf_conntrack_acct_pernet_fini(net);
1388 kmem_cache_destroy(net->ct.nf_conntrack_cachep); 1405 nf_conntrack_expect_pernet_fini(net);
1389 kfree(net->ct.slabname); 1406 kmem_cache_destroy(net->ct.nf_conntrack_cachep);
1390 free_percpu(net->ct.stat); 1407 kfree(net->ct.slabname);
1408 free_percpu(net->ct.stat);
1409 }
1391} 1410}
1392 1411
1393void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls) 1412void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 94b4b9853f60..a0b1c5c23d1c 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -353,7 +353,7 @@ void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct,
353 /* rcu_read_lock()ed by nf_hook_slow */ 353 /* rcu_read_lock()ed by nf_hook_slow */
354 helper = rcu_dereference(help->helper); 354 helper = rcu_dereference(help->helper);
355 355
356 nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, 356 nf_log_packet(nf_ct_net(ct), nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL,
357 "nf_ct_%s: dropping packet: %pV ", helper->name, &vaf); 357 "nf_ct_%s: dropping packet: %pV ", helper->name, &vaf);
358 358
359 va_end(args); 359 va_end(args);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 9904b15f600e..6d0f8a17c5b7 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2409,6 +2409,92 @@ out:
2409 return skb->len; 2409 return skb->len;
2410} 2410}
2411 2411
2412static int
2413ctnetlink_exp_ct_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
2414{
2415 struct nf_conntrack_expect *exp, *last;
2416 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
2417 struct nf_conn *ct = cb->data;
2418 struct nf_conn_help *help = nfct_help(ct);
2419 u_int8_t l3proto = nfmsg->nfgen_family;
2420
2421 if (cb->args[0])
2422 return 0;
2423
2424 rcu_read_lock();
2425 last = (struct nf_conntrack_expect *)cb->args[1];
2426restart:
2427 hlist_for_each_entry(exp, &help->expectations, lnode) {
2428 if (l3proto && exp->tuple.src.l3num != l3proto)
2429 continue;
2430 if (cb->args[1]) {
2431 if (exp != last)
2432 continue;
2433 cb->args[1] = 0;
2434 }
2435 if (ctnetlink_exp_fill_info(skb, NETLINK_CB(cb->skb).portid,
2436 cb->nlh->nlmsg_seq,
2437 IPCTNL_MSG_EXP_NEW,
2438 exp) < 0) {
2439 if (!atomic_inc_not_zero(&exp->use))
2440 continue;
2441 cb->args[1] = (unsigned long)exp;
2442 goto out;
2443 }
2444 }
2445 if (cb->args[1]) {
2446 cb->args[1] = 0;
2447 goto restart;
2448 }
2449 cb->args[0] = 1;
2450out:
2451 rcu_read_unlock();
2452 if (last)
2453 nf_ct_expect_put(last);
2454
2455 return skb->len;
2456}
2457
2458static int ctnetlink_dump_exp_ct(struct sock *ctnl, struct sk_buff *skb,
2459 const struct nlmsghdr *nlh,
2460 const struct nlattr * const cda[])
2461{
2462 int err;
2463 struct net *net = sock_net(ctnl);
2464 struct nfgenmsg *nfmsg = nlmsg_data(nlh);
2465 u_int8_t u3 = nfmsg->nfgen_family;
2466 struct nf_conntrack_tuple tuple;
2467 struct nf_conntrack_tuple_hash *h;
2468 struct nf_conn *ct;
2469 u16 zone = 0;
2470 struct netlink_dump_control c = {
2471 .dump = ctnetlink_exp_ct_dump_table,
2472 .done = ctnetlink_exp_done,
2473 };
2474
2475 err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
2476 if (err < 0)
2477 return err;
2478
2479 if (cda[CTA_EXPECT_ZONE]) {
2480 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
2481 if (err < 0)
2482 return err;
2483 }
2484
2485 h = nf_conntrack_find_get(net, zone, &tuple);
2486 if (!h)
2487 return -ENOENT;
2488
2489 ct = nf_ct_tuplehash_to_ctrack(h);
2490 c.data = ct;
2491
2492 err = netlink_dump_start(ctnl, skb, nlh, &c);
2493 nf_ct_put(ct);
2494
2495 return err;
2496}
2497
2412static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = { 2498static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
2413 [CTA_EXPECT_MASTER] = { .type = NLA_NESTED }, 2499 [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
2414 [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED }, 2500 [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
@@ -2439,11 +2525,15 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
2439 int err; 2525 int err;
2440 2526
2441 if (nlh->nlmsg_flags & NLM_F_DUMP) { 2527 if (nlh->nlmsg_flags & NLM_F_DUMP) {
2442 struct netlink_dump_control c = { 2528 if (cda[CTA_EXPECT_MASTER])
2443 .dump = ctnetlink_exp_dump_table, 2529 return ctnetlink_dump_exp_ct(ctnl, skb, nlh, cda);
2444 .done = ctnetlink_exp_done, 2530 else {
2445 }; 2531 struct netlink_dump_control c = {
2446 return netlink_dump_start(ctnl, skb, nlh, &c); 2532 .dump = ctnetlink_exp_dump_table,
2533 .done = ctnetlink_exp_done,
2534 };
2535 return netlink_dump_start(ctnl, skb, nlh, &c);
2536 }
2447 } 2537 }
2448 2538
2449 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone); 2539 err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index ba65b2041eb4..a99b6c3427b0 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -456,7 +456,8 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
456 456
457out_invalid: 457out_invalid:
458 if (LOG_INVALID(net, IPPROTO_DCCP)) 458 if (LOG_INVALID(net, IPPROTO_DCCP))
459 nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, msg); 459 nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL,
460 NULL, msg);
460 return false; 461 return false;
461} 462}
462 463
@@ -542,13 +543,13 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
542 543
543 spin_unlock_bh(&ct->lock); 544 spin_unlock_bh(&ct->lock);
544 if (LOG_INVALID(net, IPPROTO_DCCP)) 545 if (LOG_INVALID(net, IPPROTO_DCCP))
545 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 546 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
546 "nf_ct_dccp: invalid packet ignored "); 547 "nf_ct_dccp: invalid packet ignored ");
547 return NF_ACCEPT; 548 return NF_ACCEPT;
548 case CT_DCCP_INVALID: 549 case CT_DCCP_INVALID:
549 spin_unlock_bh(&ct->lock); 550 spin_unlock_bh(&ct->lock);
550 if (LOG_INVALID(net, IPPROTO_DCCP)) 551 if (LOG_INVALID(net, IPPROTO_DCCP))
551 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 552 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
552 "nf_ct_dccp: invalid state transition "); 553 "nf_ct_dccp: invalid state transition ");
553 return -NF_ACCEPT; 554 return -NF_ACCEPT;
554 } 555 }
@@ -613,7 +614,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
613 614
614out_invalid: 615out_invalid:
615 if (LOG_INVALID(net, IPPROTO_DCCP)) 616 if (LOG_INVALID(net, IPPROTO_DCCP))
616 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, msg); 617 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, msg);
617 return -NF_ACCEPT; 618 return -NF_ACCEPT;
618} 619}
619 620
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 83876e9877f1..f021a2076c87 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -720,7 +720,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
720 tn->tcp_be_liberal) 720 tn->tcp_be_liberal)
721 res = true; 721 res = true;
722 if (!res && LOG_INVALID(net, IPPROTO_TCP)) 722 if (!res && LOG_INVALID(net, IPPROTO_TCP))
723 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 723 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
724 "nf_ct_tcp: %s ", 724 "nf_ct_tcp: %s ",
725 before(seq, sender->td_maxend + 1) ? 725 before(seq, sender->td_maxend + 1) ?
726 after(end, sender->td_end - receiver->td_maxwin - 1) ? 726 after(end, sender->td_end - receiver->td_maxwin - 1) ?
@@ -772,7 +772,7 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl,
772 th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph); 772 th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
773 if (th == NULL) { 773 if (th == NULL) {
774 if (LOG_INVALID(net, IPPROTO_TCP)) 774 if (LOG_INVALID(net, IPPROTO_TCP))
775 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 775 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
776 "nf_ct_tcp: short packet "); 776 "nf_ct_tcp: short packet ");
777 return -NF_ACCEPT; 777 return -NF_ACCEPT;
778 } 778 }
@@ -780,7 +780,7 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl,
780 /* Not whole TCP header or malformed packet */ 780 /* Not whole TCP header or malformed packet */
781 if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) { 781 if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) {
782 if (LOG_INVALID(net, IPPROTO_TCP)) 782 if (LOG_INVALID(net, IPPROTO_TCP))
783 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 783 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
784 "nf_ct_tcp: truncated/malformed packet "); 784 "nf_ct_tcp: truncated/malformed packet ");
785 return -NF_ACCEPT; 785 return -NF_ACCEPT;
786 } 786 }
@@ -793,7 +793,7 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl,
793 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && 793 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
794 nf_checksum(skb, hooknum, dataoff, IPPROTO_TCP, pf)) { 794 nf_checksum(skb, hooknum, dataoff, IPPROTO_TCP, pf)) {
795 if (LOG_INVALID(net, IPPROTO_TCP)) 795 if (LOG_INVALID(net, IPPROTO_TCP))
796 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 796 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
797 "nf_ct_tcp: bad TCP checksum "); 797 "nf_ct_tcp: bad TCP checksum ");
798 return -NF_ACCEPT; 798 return -NF_ACCEPT;
799 } 799 }
@@ -802,7 +802,7 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl,
802 tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH)); 802 tcpflags = (tcp_flag_byte(th) & ~(TCPHDR_ECE|TCPHDR_CWR|TCPHDR_PSH));
803 if (!tcp_valid_flags[tcpflags]) { 803 if (!tcp_valid_flags[tcpflags]) {
804 if (LOG_INVALID(net, IPPROTO_TCP)) 804 if (LOG_INVALID(net, IPPROTO_TCP))
805 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 805 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
806 "nf_ct_tcp: invalid TCP flag combination "); 806 "nf_ct_tcp: invalid TCP flag combination ");
807 return -NF_ACCEPT; 807 return -NF_ACCEPT;
808 } 808 }
@@ -949,7 +949,7 @@ static int tcp_packet(struct nf_conn *ct,
949 } 949 }
950 spin_unlock_bh(&ct->lock); 950 spin_unlock_bh(&ct->lock);
951 if (LOG_INVALID(net, IPPROTO_TCP)) 951 if (LOG_INVALID(net, IPPROTO_TCP))
952 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 952 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
953 "nf_ct_tcp: invalid packet ignored in " 953 "nf_ct_tcp: invalid packet ignored in "
954 "state %s ", tcp_conntrack_names[old_state]); 954 "state %s ", tcp_conntrack_names[old_state]);
955 return NF_ACCEPT; 955 return NF_ACCEPT;
@@ -959,7 +959,7 @@ static int tcp_packet(struct nf_conn *ct,
959 dir, get_conntrack_index(th), old_state); 959 dir, get_conntrack_index(th), old_state);
960 spin_unlock_bh(&ct->lock); 960 spin_unlock_bh(&ct->lock);
961 if (LOG_INVALID(net, IPPROTO_TCP)) 961 if (LOG_INVALID(net, IPPROTO_TCP))
962 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 962 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
963 "nf_ct_tcp: invalid state "); 963 "nf_ct_tcp: invalid state ");
964 return -NF_ACCEPT; 964 return -NF_ACCEPT;
965 case TCP_CONNTRACK_CLOSE: 965 case TCP_CONNTRACK_CLOSE:
@@ -969,8 +969,8 @@ static int tcp_packet(struct nf_conn *ct,
969 /* Invalid RST */ 969 /* Invalid RST */
970 spin_unlock_bh(&ct->lock); 970 spin_unlock_bh(&ct->lock);
971 if (LOG_INVALID(net, IPPROTO_TCP)) 971 if (LOG_INVALID(net, IPPROTO_TCP))
972 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 972 nf_log_packet(net, pf, 0, skb, NULL, NULL,
973 "nf_ct_tcp: invalid RST "); 973 NULL, "nf_ct_tcp: invalid RST ");
974 return -NF_ACCEPT; 974 return -NF_ACCEPT;
975 } 975 }
976 if (index == TCP_RST_SET 976 if (index == TCP_RST_SET
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 59623cc56e8d..fee43228e115 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -119,7 +119,7 @@ static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
119 hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); 119 hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
120 if (hdr == NULL) { 120 if (hdr == NULL) {
121 if (LOG_INVALID(net, IPPROTO_UDP)) 121 if (LOG_INVALID(net, IPPROTO_UDP))
122 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 122 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
123 "nf_ct_udp: short packet "); 123 "nf_ct_udp: short packet ");
124 return -NF_ACCEPT; 124 return -NF_ACCEPT;
125 } 125 }
@@ -127,7 +127,7 @@ static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
127 /* Truncated/malformed packets */ 127 /* Truncated/malformed packets */
128 if (ntohs(hdr->len) > udplen || ntohs(hdr->len) < sizeof(*hdr)) { 128 if (ntohs(hdr->len) > udplen || ntohs(hdr->len) < sizeof(*hdr)) {
129 if (LOG_INVALID(net, IPPROTO_UDP)) 129 if (LOG_INVALID(net, IPPROTO_UDP))
130 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 130 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
131 "nf_ct_udp: truncated/malformed packet "); 131 "nf_ct_udp: truncated/malformed packet ");
132 return -NF_ACCEPT; 132 return -NF_ACCEPT;
133 } 133 }
@@ -143,7 +143,7 @@ static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
143 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && 143 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
144 nf_checksum(skb, hooknum, dataoff, IPPROTO_UDP, pf)) { 144 nf_checksum(skb, hooknum, dataoff, IPPROTO_UDP, pf)) {
145 if (LOG_INVALID(net, IPPROTO_UDP)) 145 if (LOG_INVALID(net, IPPROTO_UDP))
146 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 146 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
147 "nf_ct_udp: bad UDP checksum "); 147 "nf_ct_udp: bad UDP checksum ");
148 return -NF_ACCEPT; 148 return -NF_ACCEPT;
149 } 149 }
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index ca969f6273f7..2750e6c69f82 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -131,7 +131,7 @@ static int udplite_error(struct net *net, struct nf_conn *tmpl,
131 hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); 131 hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
132 if (hdr == NULL) { 132 if (hdr == NULL) {
133 if (LOG_INVALID(net, IPPROTO_UDPLITE)) 133 if (LOG_INVALID(net, IPPROTO_UDPLITE))
134 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 134 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
135 "nf_ct_udplite: short packet "); 135 "nf_ct_udplite: short packet ");
136 return -NF_ACCEPT; 136 return -NF_ACCEPT;
137 } 137 }
@@ -141,7 +141,7 @@ static int udplite_error(struct net *net, struct nf_conn *tmpl,
141 cscov = udplen; 141 cscov = udplen;
142 else if (cscov < sizeof(*hdr) || cscov > udplen) { 142 else if (cscov < sizeof(*hdr) || cscov > udplen) {
143 if (LOG_INVALID(net, IPPROTO_UDPLITE)) 143 if (LOG_INVALID(net, IPPROTO_UDPLITE))
144 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 144 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
145 "nf_ct_udplite: invalid checksum coverage "); 145 "nf_ct_udplite: invalid checksum coverage ");
146 return -NF_ACCEPT; 146 return -NF_ACCEPT;
147 } 147 }
@@ -149,7 +149,7 @@ static int udplite_error(struct net *net, struct nf_conn *tmpl,
149 /* UDPLITE mandates checksums */ 149 /* UDPLITE mandates checksums */
150 if (!hdr->check) { 150 if (!hdr->check) {
151 if (LOG_INVALID(net, IPPROTO_UDPLITE)) 151 if (LOG_INVALID(net, IPPROTO_UDPLITE))
152 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 152 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
153 "nf_ct_udplite: checksum missing "); 153 "nf_ct_udplite: checksum missing ");
154 return -NF_ACCEPT; 154 return -NF_ACCEPT;
155 } 155 }
@@ -159,7 +159,7 @@ static int udplite_error(struct net *net, struct nf_conn *tmpl,
159 nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_UDP, 159 nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_UDP,
160 pf)) { 160 pf)) {
161 if (LOG_INVALID(net, IPPROTO_UDPLITE)) 161 if (LOG_INVALID(net, IPPROTO_UDPLITE))
162 nf_log_packet(pf, 0, skb, NULL, NULL, NULL, 162 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
163 "nf_ct_udplite: bad UDPLite checksum "); 163 "nf_ct_udplite: bad UDPLite checksum ");
164 return -NF_ACCEPT; 164 return -NF_ACCEPT;
165 } 165 }
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index fedee3943661..ebb67d33bd63 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -545,16 +545,20 @@ out_init:
545 return ret; 545 return ret;
546} 546}
547 547
548static void nf_conntrack_pernet_exit(struct net *net) 548static void nf_conntrack_pernet_exit(struct list_head *net_exit_list)
549{ 549{
550 nf_conntrack_standalone_fini_sysctl(net); 550 struct net *net;
551 nf_conntrack_standalone_fini_proc(net); 551
552 nf_conntrack_cleanup_net(net); 552 list_for_each_entry(net, net_exit_list, exit_list) {
553 nf_conntrack_standalone_fini_sysctl(net);
554 nf_conntrack_standalone_fini_proc(net);
555 }
556 nf_conntrack_cleanup_net_list(net_exit_list);
553} 557}
554 558
555static struct pernet_operations nf_conntrack_net_ops = { 559static struct pernet_operations nf_conntrack_net_ops = {
556 .init = nf_conntrack_pernet_init, 560 .init = nf_conntrack_pernet_init,
557 .exit = nf_conntrack_pernet_exit, 561 .exit_batch = nf_conntrack_pernet_exit,
558}; 562};
559 563
560static int __init nf_conntrack_standalone_init(void) 564static int __init nf_conntrack_standalone_init(void)
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 9e312695c818..388656d5a9ec 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -16,7 +16,6 @@
16#define NF_LOG_PREFIXLEN 128 16#define NF_LOG_PREFIXLEN 128
17#define NFLOGGER_NAME_LEN 64 17#define NFLOGGER_NAME_LEN 64
18 18
19static const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO] __read_mostly;
20static struct list_head nf_loggers_l[NFPROTO_NUMPROTO] __read_mostly; 19static struct list_head nf_loggers_l[NFPROTO_NUMPROTO] __read_mostly;
21static DEFINE_MUTEX(nf_log_mutex); 20static DEFINE_MUTEX(nf_log_mutex);
22 21
@@ -32,13 +31,46 @@ static struct nf_logger *__find_logger(int pf, const char *str_logger)
32 return NULL; 31 return NULL;
33} 32}
34 33
34void nf_log_set(struct net *net, u_int8_t pf, const struct nf_logger *logger)
35{
36 const struct nf_logger *log;
37
38 if (pf == NFPROTO_UNSPEC)
39 return;
40
41 mutex_lock(&nf_log_mutex);
42 log = rcu_dereference_protected(net->nf.nf_loggers[pf],
43 lockdep_is_held(&nf_log_mutex));
44 if (log == NULL)
45 rcu_assign_pointer(net->nf.nf_loggers[pf], logger);
46
47 mutex_unlock(&nf_log_mutex);
48}
49EXPORT_SYMBOL(nf_log_set);
50
51void nf_log_unset(struct net *net, const struct nf_logger *logger)
52{
53 int i;
54 const struct nf_logger *log;
55
56 mutex_lock(&nf_log_mutex);
57 for (i = 0; i < NFPROTO_NUMPROTO; i++) {
58 log = rcu_dereference_protected(net->nf.nf_loggers[i],
59 lockdep_is_held(&nf_log_mutex));
60 if (log == logger)
61 RCU_INIT_POINTER(net->nf.nf_loggers[i], NULL);
62 }
63 mutex_unlock(&nf_log_mutex);
64 synchronize_rcu();
65}
66EXPORT_SYMBOL(nf_log_unset);
67
35/* return EEXIST if the same logger is registered, 0 on success. */ 68/* return EEXIST if the same logger is registered, 0 on success. */
36int nf_log_register(u_int8_t pf, struct nf_logger *logger) 69int nf_log_register(u_int8_t pf, struct nf_logger *logger)
37{ 70{
38 const struct nf_logger *llog;
39 int i; 71 int i;
40 72
41 if (pf >= ARRAY_SIZE(nf_loggers)) 73 if (pf >= ARRAY_SIZE(init_net.nf.nf_loggers))
42 return -EINVAL; 74 return -EINVAL;
43 75
44 for (i = 0; i < ARRAY_SIZE(logger->list); i++) 76 for (i = 0; i < ARRAY_SIZE(logger->list); i++)
@@ -52,10 +84,6 @@ int nf_log_register(u_int8_t pf, struct nf_logger *logger)
52 } else { 84 } else {
53 /* register at end of list to honor first register win */ 85 /* register at end of list to honor first register win */
54 list_add_tail(&logger->list[pf], &nf_loggers_l[pf]); 86 list_add_tail(&logger->list[pf], &nf_loggers_l[pf]);
55 llog = rcu_dereference_protected(nf_loggers[pf],
56 lockdep_is_held(&nf_log_mutex));
57 if (llog == NULL)
58 rcu_assign_pointer(nf_loggers[pf], logger);
59 } 87 }
60 88
61 mutex_unlock(&nf_log_mutex); 89 mutex_unlock(&nf_log_mutex);
@@ -66,49 +94,43 @@ EXPORT_SYMBOL(nf_log_register);
66 94
67void nf_log_unregister(struct nf_logger *logger) 95void nf_log_unregister(struct nf_logger *logger)
68{ 96{
69 const struct nf_logger *c_logger;
70 int i; 97 int i;
71 98
72 mutex_lock(&nf_log_mutex); 99 mutex_lock(&nf_log_mutex);
73 for (i = 0; i < ARRAY_SIZE(nf_loggers); i++) { 100 for (i = 0; i < NFPROTO_NUMPROTO; i++)
74 c_logger = rcu_dereference_protected(nf_loggers[i],
75 lockdep_is_held(&nf_log_mutex));
76 if (c_logger == logger)
77 RCU_INIT_POINTER(nf_loggers[i], NULL);
78 list_del(&logger->list[i]); 101 list_del(&logger->list[i]);
79 }
80 mutex_unlock(&nf_log_mutex); 102 mutex_unlock(&nf_log_mutex);
81
82 synchronize_rcu();
83} 103}
84EXPORT_SYMBOL(nf_log_unregister); 104EXPORT_SYMBOL(nf_log_unregister);
85 105
86int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger) 106int nf_log_bind_pf(struct net *net, u_int8_t pf,
107 const struct nf_logger *logger)
87{ 108{
88 if (pf >= ARRAY_SIZE(nf_loggers)) 109 if (pf >= ARRAY_SIZE(net->nf.nf_loggers))
89 return -EINVAL; 110 return -EINVAL;
90 mutex_lock(&nf_log_mutex); 111 mutex_lock(&nf_log_mutex);
91 if (__find_logger(pf, logger->name) == NULL) { 112 if (__find_logger(pf, logger->name) == NULL) {
92 mutex_unlock(&nf_log_mutex); 113 mutex_unlock(&nf_log_mutex);
93 return -ENOENT; 114 return -ENOENT;
94 } 115 }
95 rcu_assign_pointer(nf_loggers[pf], logger); 116 rcu_assign_pointer(net->nf.nf_loggers[pf], logger);
96 mutex_unlock(&nf_log_mutex); 117 mutex_unlock(&nf_log_mutex);
97 return 0; 118 return 0;
98} 119}
99EXPORT_SYMBOL(nf_log_bind_pf); 120EXPORT_SYMBOL(nf_log_bind_pf);
100 121
101void nf_log_unbind_pf(u_int8_t pf) 122void nf_log_unbind_pf(struct net *net, u_int8_t pf)
102{ 123{
103 if (pf >= ARRAY_SIZE(nf_loggers)) 124 if (pf >= ARRAY_SIZE(net->nf.nf_loggers))
104 return; 125 return;
105 mutex_lock(&nf_log_mutex); 126 mutex_lock(&nf_log_mutex);
106 RCU_INIT_POINTER(nf_loggers[pf], NULL); 127 RCU_INIT_POINTER(net->nf.nf_loggers[pf], NULL);
107 mutex_unlock(&nf_log_mutex); 128 mutex_unlock(&nf_log_mutex);
108} 129}
109EXPORT_SYMBOL(nf_log_unbind_pf); 130EXPORT_SYMBOL(nf_log_unbind_pf);
110 131
111void nf_log_packet(u_int8_t pf, 132void nf_log_packet(struct net *net,
133 u_int8_t pf,
112 unsigned int hooknum, 134 unsigned int hooknum,
113 const struct sk_buff *skb, 135 const struct sk_buff *skb,
114 const struct net_device *in, 136 const struct net_device *in,
@@ -121,7 +143,7 @@ void nf_log_packet(u_int8_t pf,
121 const struct nf_logger *logger; 143 const struct nf_logger *logger;
122 144
123 rcu_read_lock(); 145 rcu_read_lock();
124 logger = rcu_dereference(nf_loggers[pf]); 146 logger = rcu_dereference(net->nf.nf_loggers[pf]);
125 if (logger) { 147 if (logger) {
126 va_start(args, fmt); 148 va_start(args, fmt);
127 vsnprintf(prefix, sizeof(prefix), fmt, args); 149 vsnprintf(prefix, sizeof(prefix), fmt, args);
@@ -135,9 +157,11 @@ EXPORT_SYMBOL(nf_log_packet);
135#ifdef CONFIG_PROC_FS 157#ifdef CONFIG_PROC_FS
136static void *seq_start(struct seq_file *seq, loff_t *pos) 158static void *seq_start(struct seq_file *seq, loff_t *pos)
137{ 159{
160 struct net *net = seq_file_net(seq);
161
138 mutex_lock(&nf_log_mutex); 162 mutex_lock(&nf_log_mutex);
139 163
140 if (*pos >= ARRAY_SIZE(nf_loggers)) 164 if (*pos >= ARRAY_SIZE(net->nf.nf_loggers))
141 return NULL; 165 return NULL;
142 166
143 return pos; 167 return pos;
@@ -145,9 +169,11 @@ static void *seq_start(struct seq_file *seq, loff_t *pos)
145 169
146static void *seq_next(struct seq_file *s, void *v, loff_t *pos) 170static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
147{ 171{
172 struct net *net = seq_file_net(s);
173
148 (*pos)++; 174 (*pos)++;
149 175
150 if (*pos >= ARRAY_SIZE(nf_loggers)) 176 if (*pos >= ARRAY_SIZE(net->nf.nf_loggers))
151 return NULL; 177 return NULL;
152 178
153 return pos; 179 return pos;
@@ -164,8 +190,9 @@ static int seq_show(struct seq_file *s, void *v)
164 const struct nf_logger *logger; 190 const struct nf_logger *logger;
165 struct nf_logger *t; 191 struct nf_logger *t;
166 int ret; 192 int ret;
193 struct net *net = seq_file_net(s);
167 194
168 logger = rcu_dereference_protected(nf_loggers[*pos], 195 logger = rcu_dereference_protected(net->nf.nf_loggers[*pos],
169 lockdep_is_held(&nf_log_mutex)); 196 lockdep_is_held(&nf_log_mutex));
170 197
171 if (!logger) 198 if (!logger)
@@ -199,7 +226,8 @@ static const struct seq_operations nflog_seq_ops = {
199 226
200static int nflog_open(struct inode *inode, struct file *file) 227static int nflog_open(struct inode *inode, struct file *file)
201{ 228{
202 return seq_open(file, &nflog_seq_ops); 229 return seq_open_net(inode, file, &nflog_seq_ops,
230 sizeof(struct seq_net_private));
203} 231}
204 232
205static const struct file_operations nflog_file_ops = { 233static const struct file_operations nflog_file_ops = {
@@ -207,7 +235,7 @@ static const struct file_operations nflog_file_ops = {
207 .open = nflog_open, 235 .open = nflog_open,
208 .read = seq_read, 236 .read = seq_read,
209 .llseek = seq_lseek, 237 .llseek = seq_lseek,
210 .release = seq_release, 238 .release = seq_release_net,
211}; 239};
212 240
213 241
@@ -216,7 +244,6 @@ static const struct file_operations nflog_file_ops = {
216#ifdef CONFIG_SYSCTL 244#ifdef CONFIG_SYSCTL
217static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3]; 245static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
218static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1]; 246static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
219static struct ctl_table_header *nf_log_dir_header;
220 247
221static int nf_log_proc_dostring(ctl_table *table, int write, 248static int nf_log_proc_dostring(ctl_table *table, int write,
222 void __user *buffer, size_t *lenp, loff_t *ppos) 249 void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -226,6 +253,7 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
226 size_t size = *lenp; 253 size_t size = *lenp;
227 int r = 0; 254 int r = 0;
228 int tindex = (unsigned long)table->extra1; 255 int tindex = (unsigned long)table->extra1;
256 struct net *net = current->nsproxy->net_ns;
229 257
230 if (write) { 258 if (write) {
231 if (size > sizeof(buf)) 259 if (size > sizeof(buf))
@@ -234,7 +262,7 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
234 return -EFAULT; 262 return -EFAULT;
235 263
236 if (!strcmp(buf, "NONE")) { 264 if (!strcmp(buf, "NONE")) {
237 nf_log_unbind_pf(tindex); 265 nf_log_unbind_pf(net, tindex);
238 return 0; 266 return 0;
239 } 267 }
240 mutex_lock(&nf_log_mutex); 268 mutex_lock(&nf_log_mutex);
@@ -243,11 +271,11 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
243 mutex_unlock(&nf_log_mutex); 271 mutex_unlock(&nf_log_mutex);
244 return -ENOENT; 272 return -ENOENT;
245 } 273 }
246 rcu_assign_pointer(nf_loggers[tindex], logger); 274 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
247 mutex_unlock(&nf_log_mutex); 275 mutex_unlock(&nf_log_mutex);
248 } else { 276 } else {
249 mutex_lock(&nf_log_mutex); 277 mutex_lock(&nf_log_mutex);
250 logger = rcu_dereference_protected(nf_loggers[tindex], 278 logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
251 lockdep_is_held(&nf_log_mutex)); 279 lockdep_is_held(&nf_log_mutex));
252 if (!logger) 280 if (!logger)
253 table->data = "NONE"; 281 table->data = "NONE";
@@ -260,49 +288,111 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
260 return r; 288 return r;
261} 289}
262 290
263static __init int netfilter_log_sysctl_init(void) 291static int netfilter_log_sysctl_init(struct net *net)
264{ 292{
265 int i; 293 int i;
266 294 struct ctl_table *table;
267 for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) { 295
268 snprintf(nf_log_sysctl_fnames[i-NFPROTO_UNSPEC], 3, "%d", i); 296 table = nf_log_sysctl_table;
269 nf_log_sysctl_table[i].procname = 297 if (!net_eq(net, &init_net)) {
270 nf_log_sysctl_fnames[i-NFPROTO_UNSPEC]; 298 table = kmemdup(nf_log_sysctl_table,
271 nf_log_sysctl_table[i].data = NULL; 299 sizeof(nf_log_sysctl_table),
272 nf_log_sysctl_table[i].maxlen = 300 GFP_KERNEL);
273 NFLOGGER_NAME_LEN * sizeof(char); 301 if (!table)
274 nf_log_sysctl_table[i].mode = 0644; 302 goto err_alloc;
275 nf_log_sysctl_table[i].proc_handler = nf_log_proc_dostring; 303 } else {
276 nf_log_sysctl_table[i].extra1 = (void *)(unsigned long) i; 304 for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) {
305 snprintf(nf_log_sysctl_fnames[i],
306 3, "%d", i);
307 nf_log_sysctl_table[i].procname =
308 nf_log_sysctl_fnames[i];
309 nf_log_sysctl_table[i].data = NULL;
310 nf_log_sysctl_table[i].maxlen =
311 NFLOGGER_NAME_LEN * sizeof(char);
312 nf_log_sysctl_table[i].mode = 0644;
313 nf_log_sysctl_table[i].proc_handler =
314 nf_log_proc_dostring;
315 nf_log_sysctl_table[i].extra1 =
316 (void *)(unsigned long) i;
317 }
277 } 318 }
278 319
279 nf_log_dir_header = register_net_sysctl(&init_net, "net/netfilter/nf_log", 320 net->nf.nf_log_dir_header = register_net_sysctl(net,
280 nf_log_sysctl_table); 321 "net/netfilter/nf_log",
281 if (!nf_log_dir_header) 322 table);
282 return -ENOMEM; 323 if (!net->nf.nf_log_dir_header)
324 goto err_reg;
283 325
284 return 0; 326 return 0;
327
328err_reg:
329 if (!net_eq(net, &init_net))
330 kfree(table);
331err_alloc:
332 return -ENOMEM;
333}
334
335static void netfilter_log_sysctl_exit(struct net *net)
336{
337 struct ctl_table *table;
338
339 table = net->nf.nf_log_dir_header->ctl_table_arg;
340 unregister_net_sysctl_table(net->nf.nf_log_dir_header);
341 if (!net_eq(net, &init_net))
342 kfree(table);
285} 343}
286#else 344#else
287static __init int netfilter_log_sysctl_init(void) 345static int netfilter_log_sysctl_init(struct net *net)
288{ 346{
289 return 0; 347 return 0;
290} 348}
349
350static void netfilter_log_sysctl_exit(struct net *net)
351{
352}
291#endif /* CONFIG_SYSCTL */ 353#endif /* CONFIG_SYSCTL */
292 354
293int __init netfilter_log_init(void) 355static int __net_init nf_log_net_init(struct net *net)
294{ 356{
295 int i, r; 357 int ret = -ENOMEM;
358
296#ifdef CONFIG_PROC_FS 359#ifdef CONFIG_PROC_FS
297 if (!proc_create("nf_log", S_IRUGO, 360 if (!proc_create("nf_log", S_IRUGO,
298 proc_net_netfilter, &nflog_file_ops)) 361 net->nf.proc_netfilter, &nflog_file_ops))
299 return -1; 362 return ret;
300#endif 363#endif
364 ret = netfilter_log_sysctl_init(net);
365 if (ret < 0)
366 goto out_sysctl;
367
368 return 0;
301 369
302 /* Errors will trigger panic, unroll on error is unnecessary. */ 370out_sysctl:
303 r = netfilter_log_sysctl_init(); 371 /* For init_net: errors will trigger panic, don't unroll on error. */
304 if (r < 0) 372 if (!net_eq(net, &init_net))
305 return r; 373 remove_proc_entry("nf_log", net->nf.proc_netfilter);
374
375 return ret;
376}
377
378static void __net_exit nf_log_net_exit(struct net *net)
379{
380 netfilter_log_sysctl_exit(net);
381 remove_proc_entry("nf_log", net->nf.proc_netfilter);
382}
383
384static struct pernet_operations nf_log_net_ops = {
385 .init = nf_log_net_init,
386 .exit = nf_log_net_exit,
387};
388
389int __init netfilter_log_init(void)
390{
391 int i, ret;
392
393 ret = register_pernet_subsys(&nf_log_net_ops);
394 if (ret < 0)
395 return ret;
306 396
307 for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++) 397 for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
308 INIT_LIST_HEAD(&(nf_loggers_l[i])); 398 INIT_LIST_HEAD(&(nf_loggers_l[i]));
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 0b1b32cda307..bc4c499adb13 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -24,10 +24,9 @@
24#include <linux/skbuff.h> 24#include <linux/skbuff.h>
25#include <asm/uaccess.h> 25#include <asm/uaccess.h>
26#include <net/sock.h> 26#include <net/sock.h>
27#include <net/netlink.h>
28#include <linux/init.h> 27#include <linux/init.h>
29 28
30#include <linux/netlink.h> 29#include <net/netlink.h>
31#include <linux/netfilter/nfnetlink.h> 30#include <linux/netfilter/nfnetlink.h>
32 31
33MODULE_LICENSE("GPL"); 32MODULE_LICENSE("GPL");
@@ -144,7 +143,7 @@ static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
144 return -EPERM; 143 return -EPERM;
145 144
146 /* All the messages must at least contain nfgenmsg */ 145 /* All the messages must at least contain nfgenmsg */
147 if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(struct nfgenmsg))) 146 if (nlmsg_len(nlh) < sizeof(struct nfgenmsg))
148 return 0; 147 return 0;
149 148
150 type = nlh->nlmsg_type; 149 type = nlh->nlmsg_type;
@@ -172,7 +171,7 @@ replay:
172 } 171 }
173 172
174 { 173 {
175 int min_len = NLMSG_SPACE(sizeof(struct nfgenmsg)); 174 int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
176 u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type); 175 u_int8_t cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
177 struct nlattr *cda[ss->cb[cb_id].attr_count + 1]; 176 struct nlattr *cda[ss->cb[cb_id].attr_count + 1];
178 struct nlattr *attr = (void *)nlh + min_len; 177 struct nlattr *attr = (void *)nlh + min_len;
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index f248db572972..1a0be2af1dd8 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -19,7 +19,7 @@
19#include <linux/ipv6.h> 19#include <linux/ipv6.h>
20#include <linux/netdevice.h> 20#include <linux/netdevice.h>
21#include <linux/netfilter.h> 21#include <linux/netfilter.h>
22#include <linux/netlink.h> 22#include <net/netlink.h>
23#include <linux/netfilter/nfnetlink.h> 23#include <linux/netfilter/nfnetlink.h>
24#include <linux/netfilter/nfnetlink_log.h> 24#include <linux/netfilter/nfnetlink_log.h>
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
@@ -32,6 +32,7 @@
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <net/sock.h> 33#include <net/sock.h>
34#include <net/netfilter/nf_log.h> 34#include <net/netfilter/nf_log.h>
35#include <net/netns/generic.h>
35#include <net/netfilter/nfnetlink_log.h> 36#include <net/netfilter/nfnetlink_log.h>
36 37
37#include <linux/atomic.h> 38#include <linux/atomic.h>
@@ -56,6 +57,7 @@ struct nfulnl_instance {
56 unsigned int qlen; /* number of nlmsgs in skb */ 57 unsigned int qlen; /* number of nlmsgs in skb */
57 struct sk_buff *skb; /* pre-allocatd skb */ 58 struct sk_buff *skb; /* pre-allocatd skb */
58 struct timer_list timer; 59 struct timer_list timer;
60 struct net *net;
59 struct user_namespace *peer_user_ns; /* User namespace of the peer process */ 61 struct user_namespace *peer_user_ns; /* User namespace of the peer process */
60 int peer_portid; /* PORTID of the peer process */ 62 int peer_portid; /* PORTID of the peer process */
61 63
@@ -71,25 +73,34 @@ struct nfulnl_instance {
71 struct rcu_head rcu; 73 struct rcu_head rcu;
72}; 74};
73 75
74static DEFINE_SPINLOCK(instances_lock);
75static atomic_t global_seq;
76
77#define INSTANCE_BUCKETS 16 76#define INSTANCE_BUCKETS 16
78static struct hlist_head instance_table[INSTANCE_BUCKETS];
79static unsigned int hash_init; 77static unsigned int hash_init;
80 78
79static int nfnl_log_net_id __read_mostly;
80
81struct nfnl_log_net {
82 spinlock_t instances_lock;
83 struct hlist_head instance_table[INSTANCE_BUCKETS];
84 atomic_t global_seq;
85};
86
87static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
88{
89 return net_generic(net, nfnl_log_net_id);
90}
91
81static inline u_int8_t instance_hashfn(u_int16_t group_num) 92static inline u_int8_t instance_hashfn(u_int16_t group_num)
82{ 93{
83 return ((group_num & 0xff) % INSTANCE_BUCKETS); 94 return ((group_num & 0xff) % INSTANCE_BUCKETS);
84} 95}
85 96
86static struct nfulnl_instance * 97static struct nfulnl_instance *
87__instance_lookup(u_int16_t group_num) 98__instance_lookup(struct nfnl_log_net *log, u_int16_t group_num)
88{ 99{
89 struct hlist_head *head; 100 struct hlist_head *head;
90 struct nfulnl_instance *inst; 101 struct nfulnl_instance *inst;
91 102
92 head = &instance_table[instance_hashfn(group_num)]; 103 head = &log->instance_table[instance_hashfn(group_num)];
93 hlist_for_each_entry_rcu(inst, head, hlist) { 104 hlist_for_each_entry_rcu(inst, head, hlist) {
94 if (inst->group_num == group_num) 105 if (inst->group_num == group_num)
95 return inst; 106 return inst;
@@ -104,12 +115,12 @@ instance_get(struct nfulnl_instance *inst)
104} 115}
105 116
106static struct nfulnl_instance * 117static struct nfulnl_instance *
107instance_lookup_get(u_int16_t group_num) 118instance_lookup_get(struct nfnl_log_net *log, u_int16_t group_num)
108{ 119{
109 struct nfulnl_instance *inst; 120 struct nfulnl_instance *inst;
110 121
111 rcu_read_lock_bh(); 122 rcu_read_lock_bh();
112 inst = __instance_lookup(group_num); 123 inst = __instance_lookup(log, group_num);
113 if (inst && !atomic_inc_not_zero(&inst->use)) 124 if (inst && !atomic_inc_not_zero(&inst->use))
114 inst = NULL; 125 inst = NULL;
115 rcu_read_unlock_bh(); 126 rcu_read_unlock_bh();
@@ -119,7 +130,11 @@ instance_lookup_get(u_int16_t group_num)
119 130
120static void nfulnl_instance_free_rcu(struct rcu_head *head) 131static void nfulnl_instance_free_rcu(struct rcu_head *head)
121{ 132{
122 kfree(container_of(head, struct nfulnl_instance, rcu)); 133 struct nfulnl_instance *inst =
134 container_of(head, struct nfulnl_instance, rcu);
135
136 put_net(inst->net);
137 kfree(inst);
123 module_put(THIS_MODULE); 138 module_put(THIS_MODULE);
124} 139}
125 140
@@ -133,13 +148,15 @@ instance_put(struct nfulnl_instance *inst)
133static void nfulnl_timer(unsigned long data); 148static void nfulnl_timer(unsigned long data);
134 149
135static struct nfulnl_instance * 150static struct nfulnl_instance *
136instance_create(u_int16_t group_num, int portid, struct user_namespace *user_ns) 151instance_create(struct net *net, u_int16_t group_num,
152 int portid, struct user_namespace *user_ns)
137{ 153{
138 struct nfulnl_instance *inst; 154 struct nfulnl_instance *inst;
155 struct nfnl_log_net *log = nfnl_log_pernet(net);
139 int err; 156 int err;
140 157
141 spin_lock_bh(&instances_lock); 158 spin_lock_bh(&log->instances_lock);
142 if (__instance_lookup(group_num)) { 159 if (__instance_lookup(log, group_num)) {
143 err = -EEXIST; 160 err = -EEXIST;
144 goto out_unlock; 161 goto out_unlock;
145 } 162 }
@@ -163,6 +180,7 @@ instance_create(u_int16_t group_num, int portid, struct user_namespace *user_ns)
163 180
164 setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst); 181 setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst);
165 182
183 inst->net = get_net(net);
166 inst->peer_user_ns = user_ns; 184 inst->peer_user_ns = user_ns;
167 inst->peer_portid = portid; 185 inst->peer_portid = portid;
168 inst->group_num = group_num; 186 inst->group_num = group_num;
@@ -174,14 +192,15 @@ instance_create(u_int16_t group_num, int portid, struct user_namespace *user_ns)
174 inst->copy_range = NFULNL_COPY_RANGE_MAX; 192 inst->copy_range = NFULNL_COPY_RANGE_MAX;
175 193
176 hlist_add_head_rcu(&inst->hlist, 194 hlist_add_head_rcu(&inst->hlist,
177 &instance_table[instance_hashfn(group_num)]); 195 &log->instance_table[instance_hashfn(group_num)]);
196
178 197
179 spin_unlock_bh(&instances_lock); 198 spin_unlock_bh(&log->instances_lock);
180 199
181 return inst; 200 return inst;
182 201
183out_unlock: 202out_unlock:
184 spin_unlock_bh(&instances_lock); 203 spin_unlock_bh(&log->instances_lock);
185 return ERR_PTR(err); 204 return ERR_PTR(err);
186} 205}
187 206
@@ -210,11 +229,12 @@ __instance_destroy(struct nfulnl_instance *inst)
210} 229}
211 230
212static inline void 231static inline void
213instance_destroy(struct nfulnl_instance *inst) 232instance_destroy(struct nfnl_log_net *log,
233 struct nfulnl_instance *inst)
214{ 234{
215 spin_lock_bh(&instances_lock); 235 spin_lock_bh(&log->instances_lock);
216 __instance_destroy(inst); 236 __instance_destroy(inst);
217 spin_unlock_bh(&instances_lock); 237 spin_unlock_bh(&log->instances_lock);
218} 238}
219 239
220static int 240static int
@@ -336,7 +356,7 @@ __nfulnl_send(struct nfulnl_instance *inst)
336 if (!nlh) 356 if (!nlh)
337 goto out; 357 goto out;
338 } 358 }
339 status = nfnetlink_unicast(inst->skb, &init_net, inst->peer_portid, 359 status = nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid,
340 MSG_DONTWAIT); 360 MSG_DONTWAIT);
341 361
342 inst->qlen = 0; 362 inst->qlen = 0;
@@ -370,7 +390,8 @@ nfulnl_timer(unsigned long data)
370/* This is an inline function, we don't really care about a long 390/* This is an inline function, we don't really care about a long
371 * list of arguments */ 391 * list of arguments */
372static inline int 392static inline int
373__build_packet_message(struct nfulnl_instance *inst, 393__build_packet_message(struct nfnl_log_net *log,
394 struct nfulnl_instance *inst,
374 const struct sk_buff *skb, 395 const struct sk_buff *skb,
375 unsigned int data_len, 396 unsigned int data_len,
376 u_int8_t pf, 397 u_int8_t pf,
@@ -536,7 +557,7 @@ __build_packet_message(struct nfulnl_instance *inst,
536 /* global sequence number */ 557 /* global sequence number */
537 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) && 558 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
538 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL, 559 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
539 htonl(atomic_inc_return(&global_seq)))) 560 htonl(atomic_inc_return(&log->global_seq))))
540 goto nla_put_failure; 561 goto nla_put_failure;
541 562
542 if (data_len) { 563 if (data_len) {
@@ -592,13 +613,15 @@ nfulnl_log_packet(u_int8_t pf,
592 const struct nf_loginfo *li; 613 const struct nf_loginfo *li;
593 unsigned int qthreshold; 614 unsigned int qthreshold;
594 unsigned int plen; 615 unsigned int plen;
616 struct net *net = dev_net(in ? in : out);
617 struct nfnl_log_net *log = nfnl_log_pernet(net);
595 618
596 if (li_user && li_user->type == NF_LOG_TYPE_ULOG) 619 if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
597 li = li_user; 620 li = li_user;
598 else 621 else
599 li = &default_loginfo; 622 li = &default_loginfo;
600 623
601 inst = instance_lookup_get(li->u.ulog.group); 624 inst = instance_lookup_get(log, li->u.ulog.group);
602 if (!inst) 625 if (!inst)
603 return; 626 return;
604 627
@@ -609,7 +632,7 @@ nfulnl_log_packet(u_int8_t pf,
609 /* FIXME: do we want to make the size calculation conditional based on 632 /* FIXME: do we want to make the size calculation conditional based on
610 * what is actually present? way more branches and checks, but more 633 * what is actually present? way more branches and checks, but more
611 * memory efficient... */ 634 * memory efficient... */
612 size = NLMSG_SPACE(sizeof(struct nfgenmsg)) 635 size = nlmsg_total_size(sizeof(struct nfgenmsg))
613 + nla_total_size(sizeof(struct nfulnl_msg_packet_hdr)) 636 + nla_total_size(sizeof(struct nfulnl_msg_packet_hdr))
614 + nla_total_size(sizeof(u_int32_t)) /* ifindex */ 637 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
615 + nla_total_size(sizeof(u_int32_t)) /* ifindex */ 638 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
@@ -680,7 +703,7 @@ nfulnl_log_packet(u_int8_t pf,
680 703
681 inst->qlen++; 704 inst->qlen++;
682 705
683 __build_packet_message(inst, skb, data_len, pf, 706 __build_packet_message(log, inst, skb, data_len, pf,
684 hooknum, in, out, prefix, plen); 707 hooknum, in, out, prefix, plen);
685 708
686 if (inst->qlen >= qthreshold) 709 if (inst->qlen >= qthreshold)
@@ -709,24 +732,24 @@ nfulnl_rcv_nl_event(struct notifier_block *this,
709 unsigned long event, void *ptr) 732 unsigned long event, void *ptr)
710{ 733{
711 struct netlink_notify *n = ptr; 734 struct netlink_notify *n = ptr;
735 struct nfnl_log_net *log = nfnl_log_pernet(n->net);
712 736
713 if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) { 737 if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
714 int i; 738 int i;
715 739
716 /* destroy all instances for this portid */ 740 /* destroy all instances for this portid */
717 spin_lock_bh(&instances_lock); 741 spin_lock_bh(&log->instances_lock);
718 for (i = 0; i < INSTANCE_BUCKETS; i++) { 742 for (i = 0; i < INSTANCE_BUCKETS; i++) {
719 struct hlist_node *t2; 743 struct hlist_node *t2;
720 struct nfulnl_instance *inst; 744 struct nfulnl_instance *inst;
721 struct hlist_head *head = &instance_table[i]; 745 struct hlist_head *head = &log->instance_table[i];
722 746
723 hlist_for_each_entry_safe(inst, t2, head, hlist) { 747 hlist_for_each_entry_safe(inst, t2, head, hlist) {
724 if ((net_eq(n->net, &init_net)) && 748 if (n->portid == inst->peer_portid)
725 (n->portid == inst->peer_portid))
726 __instance_destroy(inst); 749 __instance_destroy(inst);
727 } 750 }
728 } 751 }
729 spin_unlock_bh(&instances_lock); 752 spin_unlock_bh(&log->instances_lock);
730 } 753 }
731 return NOTIFY_DONE; 754 return NOTIFY_DONE;
732} 755}
@@ -767,6 +790,8 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
767 u_int16_t group_num = ntohs(nfmsg->res_id); 790 u_int16_t group_num = ntohs(nfmsg->res_id);
768 struct nfulnl_instance *inst; 791 struct nfulnl_instance *inst;
769 struct nfulnl_msg_config_cmd *cmd = NULL; 792 struct nfulnl_msg_config_cmd *cmd = NULL;
793 struct net *net = sock_net(ctnl);
794 struct nfnl_log_net *log = nfnl_log_pernet(net);
770 int ret = 0; 795 int ret = 0;
771 796
772 if (nfula[NFULA_CFG_CMD]) { 797 if (nfula[NFULA_CFG_CMD]) {
@@ -776,14 +801,14 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
776 /* Commands without queue context */ 801 /* Commands without queue context */
777 switch (cmd->command) { 802 switch (cmd->command) {
778 case NFULNL_CFG_CMD_PF_BIND: 803 case NFULNL_CFG_CMD_PF_BIND:
779 return nf_log_bind_pf(pf, &nfulnl_logger); 804 return nf_log_bind_pf(net, pf, &nfulnl_logger);
780 case NFULNL_CFG_CMD_PF_UNBIND: 805 case NFULNL_CFG_CMD_PF_UNBIND:
781 nf_log_unbind_pf(pf); 806 nf_log_unbind_pf(net, pf);
782 return 0; 807 return 0;
783 } 808 }
784 } 809 }
785 810
786 inst = instance_lookup_get(group_num); 811 inst = instance_lookup_get(log, group_num);
787 if (inst && inst->peer_portid != NETLINK_CB(skb).portid) { 812 if (inst && inst->peer_portid != NETLINK_CB(skb).portid) {
788 ret = -EPERM; 813 ret = -EPERM;
789 goto out_put; 814 goto out_put;
@@ -797,7 +822,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
797 goto out_put; 822 goto out_put;
798 } 823 }
799 824
800 inst = instance_create(group_num, 825 inst = instance_create(net, group_num,
801 NETLINK_CB(skb).portid, 826 NETLINK_CB(skb).portid,
802 sk_user_ns(NETLINK_CB(skb).ssk)); 827 sk_user_ns(NETLINK_CB(skb).ssk));
803 if (IS_ERR(inst)) { 828 if (IS_ERR(inst)) {
@@ -811,7 +836,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
811 goto out; 836 goto out;
812 } 837 }
813 838
814 instance_destroy(inst); 839 instance_destroy(log, inst);
815 goto out_put; 840 goto out_put;
816 default: 841 default:
817 ret = -ENOTSUPP; 842 ret = -ENOTSUPP;
@@ -894,55 +919,68 @@ static const struct nfnetlink_subsystem nfulnl_subsys = {
894 919
895#ifdef CONFIG_PROC_FS 920#ifdef CONFIG_PROC_FS
896struct iter_state { 921struct iter_state {
922 struct seq_net_private p;
897 unsigned int bucket; 923 unsigned int bucket;
898}; 924};
899 925
900static struct hlist_node *get_first(struct iter_state *st) 926static struct hlist_node *get_first(struct net *net, struct iter_state *st)
901{ 927{
928 struct nfnl_log_net *log;
902 if (!st) 929 if (!st)
903 return NULL; 930 return NULL;
904 931
932 log = nfnl_log_pernet(net);
933
905 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { 934 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
906 if (!hlist_empty(&instance_table[st->bucket])) 935 struct hlist_head *head = &log->instance_table[st->bucket];
907 return rcu_dereference_bh(hlist_first_rcu(&instance_table[st->bucket])); 936
937 if (!hlist_empty(head))
938 return rcu_dereference_bh(hlist_first_rcu(head));
908 } 939 }
909 return NULL; 940 return NULL;
910} 941}
911 942
912static struct hlist_node *get_next(struct iter_state *st, struct hlist_node *h) 943static struct hlist_node *get_next(struct net *net, struct iter_state *st,
944 struct hlist_node *h)
913{ 945{
914 h = rcu_dereference_bh(hlist_next_rcu(h)); 946 h = rcu_dereference_bh(hlist_next_rcu(h));
915 while (!h) { 947 while (!h) {
948 struct nfnl_log_net *log;
949 struct hlist_head *head;
950
916 if (++st->bucket >= INSTANCE_BUCKETS) 951 if (++st->bucket >= INSTANCE_BUCKETS)
917 return NULL; 952 return NULL;
918 953
919 h = rcu_dereference_bh(hlist_first_rcu(&instance_table[st->bucket])); 954 log = nfnl_log_pernet(net);
955 head = &log->instance_table[st->bucket];
956 h = rcu_dereference_bh(hlist_first_rcu(head));
920 } 957 }
921 return h; 958 return h;
922} 959}
923 960
924static struct hlist_node *get_idx(struct iter_state *st, loff_t pos) 961static struct hlist_node *get_idx(struct net *net, struct iter_state *st,
962 loff_t pos)
925{ 963{
926 struct hlist_node *head; 964 struct hlist_node *head;
927 head = get_first(st); 965 head = get_first(net, st);
928 966
929 if (head) 967 if (head)
930 while (pos && (head = get_next(st, head))) 968 while (pos && (head = get_next(net, st, head)))
931 pos--; 969 pos--;
932 return pos ? NULL : head; 970 return pos ? NULL : head;
933} 971}
934 972
935static void *seq_start(struct seq_file *seq, loff_t *pos) 973static void *seq_start(struct seq_file *s, loff_t *pos)
936 __acquires(rcu_bh) 974 __acquires(rcu_bh)
937{ 975{
938 rcu_read_lock_bh(); 976 rcu_read_lock_bh();
939 return get_idx(seq->private, *pos); 977 return get_idx(seq_file_net(s), s->private, *pos);
940} 978}
941 979
942static void *seq_next(struct seq_file *s, void *v, loff_t *pos) 980static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
943{ 981{
944 (*pos)++; 982 (*pos)++;
945 return get_next(s->private, v); 983 return get_next(seq_file_net(s), s->private, v);
946} 984}
947 985
948static void seq_stop(struct seq_file *s, void *v) 986static void seq_stop(struct seq_file *s, void *v)
@@ -971,8 +1009,8 @@ static const struct seq_operations nful_seq_ops = {
971 1009
972static int nful_open(struct inode *inode, struct file *file) 1010static int nful_open(struct inode *inode, struct file *file)
973{ 1011{
974 return seq_open_private(file, &nful_seq_ops, 1012 return seq_open_net(inode, file, &nful_seq_ops,
975 sizeof(struct iter_state)); 1013 sizeof(struct iter_state));
976} 1014}
977 1015
978static const struct file_operations nful_file_ops = { 1016static const struct file_operations nful_file_ops = {
@@ -980,17 +1018,43 @@ static const struct file_operations nful_file_ops = {
980 .open = nful_open, 1018 .open = nful_open,
981 .read = seq_read, 1019 .read = seq_read,
982 .llseek = seq_lseek, 1020 .llseek = seq_lseek,
983 .release = seq_release_private, 1021 .release = seq_release_net,
984}; 1022};
985 1023
986#endif /* PROC_FS */ 1024#endif /* PROC_FS */
987 1025
988static int __init nfnetlink_log_init(void) 1026static int __net_init nfnl_log_net_init(struct net *net)
989{ 1027{
990 int i, status = -ENOMEM; 1028 unsigned int i;
1029 struct nfnl_log_net *log = nfnl_log_pernet(net);
991 1030
992 for (i = 0; i < INSTANCE_BUCKETS; i++) 1031 for (i = 0; i < INSTANCE_BUCKETS; i++)
993 INIT_HLIST_HEAD(&instance_table[i]); 1032 INIT_HLIST_HEAD(&log->instance_table[i]);
1033 spin_lock_init(&log->instances_lock);
1034
1035#ifdef CONFIG_PROC_FS
1036 if (!proc_create("nfnetlink_log", 0440,
1037 net->nf.proc_netfilter, &nful_file_ops))
1038 return -ENOMEM;
1039#endif
1040 return 0;
1041}
1042
1043static void __net_exit nfnl_log_net_exit(struct net *net)
1044{
1045 remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter);
1046}
1047
1048static struct pernet_operations nfnl_log_net_ops = {
1049 .init = nfnl_log_net_init,
1050 .exit = nfnl_log_net_exit,
1051 .id = &nfnl_log_net_id,
1052 .size = sizeof(struct nfnl_log_net),
1053};
1054
1055static int __init nfnetlink_log_init(void)
1056{
1057 int status = -ENOMEM;
994 1058
995 /* it's not really all that important to have a random value, so 1059 /* it's not really all that important to have a random value, so
996 * we can do this from the init function, even if there hasn't 1060 * we can do this from the init function, even if there hasn't
@@ -1000,29 +1064,25 @@ static int __init nfnetlink_log_init(void)
1000 netlink_register_notifier(&nfulnl_rtnl_notifier); 1064 netlink_register_notifier(&nfulnl_rtnl_notifier);
1001 status = nfnetlink_subsys_register(&nfulnl_subsys); 1065 status = nfnetlink_subsys_register(&nfulnl_subsys);
1002 if (status < 0) { 1066 if (status < 0) {
1003 printk(KERN_ERR "log: failed to create netlink socket\n"); 1067 pr_err("log: failed to create netlink socket\n");
1004 goto cleanup_netlink_notifier; 1068 goto cleanup_netlink_notifier;
1005 } 1069 }
1006 1070
1007 status = nf_log_register(NFPROTO_UNSPEC, &nfulnl_logger); 1071 status = nf_log_register(NFPROTO_UNSPEC, &nfulnl_logger);
1008 if (status < 0) { 1072 if (status < 0) {
1009 printk(KERN_ERR "log: failed to register logger\n"); 1073 pr_err("log: failed to register logger\n");
1010 goto cleanup_subsys; 1074 goto cleanup_subsys;
1011 } 1075 }
1012 1076
1013#ifdef CONFIG_PROC_FS 1077 status = register_pernet_subsys(&nfnl_log_net_ops);
1014 if (!proc_create("nfnetlink_log", 0440, 1078 if (status < 0) {
1015 proc_net_netfilter, &nful_file_ops)) { 1079 pr_err("log: failed to register pernet ops\n");
1016 status = -ENOMEM;
1017 goto cleanup_logger; 1080 goto cleanup_logger;
1018 } 1081 }
1019#endif
1020 return status; 1082 return status;
1021 1083
1022#ifdef CONFIG_PROC_FS
1023cleanup_logger: 1084cleanup_logger:
1024 nf_log_unregister(&nfulnl_logger); 1085 nf_log_unregister(&nfulnl_logger);
1025#endif
1026cleanup_subsys: 1086cleanup_subsys:
1027 nfnetlink_subsys_unregister(&nfulnl_subsys); 1087 nfnetlink_subsys_unregister(&nfulnl_subsys);
1028cleanup_netlink_notifier: 1088cleanup_netlink_notifier:
@@ -1032,10 +1092,8 @@ cleanup_netlink_notifier:
1032 1092
1033static void __exit nfnetlink_log_fini(void) 1093static void __exit nfnetlink_log_fini(void)
1034{ 1094{
1095 unregister_pernet_subsys(&nfnl_log_net_ops);
1035 nf_log_unregister(&nfulnl_logger); 1096 nf_log_unregister(&nfulnl_logger);
1036#ifdef CONFIG_PROC_FS
1037 remove_proc_entry("nfnetlink_log", proc_net_netfilter);
1038#endif
1039 nfnetlink_subsys_unregister(&nfulnl_subsys); 1097 nfnetlink_subsys_unregister(&nfulnl_subsys);
1040 netlink_unregister_notifier(&nfulnl_rtnl_notifier); 1098 netlink_unregister_notifier(&nfulnl_rtnl_notifier);
1041} 1099}
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 42680b2baa11..5e280b3e154f 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -30,6 +30,7 @@
30#include <linux/list.h> 30#include <linux/list.h>
31#include <net/sock.h> 31#include <net/sock.h>
32#include <net/netfilter/nf_queue.h> 32#include <net/netfilter/nf_queue.h>
33#include <net/netns/generic.h>
33#include <net/netfilter/nfnetlink_queue.h> 34#include <net/netfilter/nfnetlink_queue.h>
34 35
35#include <linux/atomic.h> 36#include <linux/atomic.h>
@@ -66,23 +67,31 @@ struct nfqnl_instance {
66 67
67typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long); 68typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
68 69
69static DEFINE_SPINLOCK(instances_lock); 70static int nfnl_queue_net_id __read_mostly;
70 71
71#define INSTANCE_BUCKETS 16 72#define INSTANCE_BUCKETS 16
72static struct hlist_head instance_table[INSTANCE_BUCKETS] __read_mostly; 73struct nfnl_queue_net {
74 spinlock_t instances_lock;
75 struct hlist_head instance_table[INSTANCE_BUCKETS];
76};
77
78static struct nfnl_queue_net *nfnl_queue_pernet(struct net *net)
79{
80 return net_generic(net, nfnl_queue_net_id);
81}
73 82
74static inline u_int8_t instance_hashfn(u_int16_t queue_num) 83static inline u_int8_t instance_hashfn(u_int16_t queue_num)
75{ 84{
76 return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS; 85 return ((queue_num >> 8) ^ queue_num) % INSTANCE_BUCKETS;
77} 86}
78 87
79static struct nfqnl_instance * 88static struct nfqnl_instance *
80instance_lookup(u_int16_t queue_num) 89instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num)
81{ 90{
82 struct hlist_head *head; 91 struct hlist_head *head;
83 struct nfqnl_instance *inst; 92 struct nfqnl_instance *inst;
84 93
85 head = &instance_table[instance_hashfn(queue_num)]; 94 head = &q->instance_table[instance_hashfn(queue_num)];
86 hlist_for_each_entry_rcu(inst, head, hlist) { 95 hlist_for_each_entry_rcu(inst, head, hlist) {
87 if (inst->queue_num == queue_num) 96 if (inst->queue_num == queue_num)
88 return inst; 97 return inst;
@@ -91,14 +100,15 @@ instance_lookup(u_int16_t queue_num)
91} 100}
92 101
93static struct nfqnl_instance * 102static struct nfqnl_instance *
94instance_create(u_int16_t queue_num, int portid) 103instance_create(struct nfnl_queue_net *q, u_int16_t queue_num,
104 int portid)
95{ 105{
96 struct nfqnl_instance *inst; 106 struct nfqnl_instance *inst;
97 unsigned int h; 107 unsigned int h;
98 int err; 108 int err;
99 109
100 spin_lock(&instances_lock); 110 spin_lock(&q->instances_lock);
101 if (instance_lookup(queue_num)) { 111 if (instance_lookup(q, queue_num)) {
102 err = -EEXIST; 112 err = -EEXIST;
103 goto out_unlock; 113 goto out_unlock;
104 } 114 }
@@ -123,16 +133,16 @@ instance_create(u_int16_t queue_num, int portid)
123 } 133 }
124 134
125 h = instance_hashfn(queue_num); 135 h = instance_hashfn(queue_num);
126 hlist_add_head_rcu(&inst->hlist, &instance_table[h]); 136 hlist_add_head_rcu(&inst->hlist, &q->instance_table[h]);
127 137
128 spin_unlock(&instances_lock); 138 spin_unlock(&q->instances_lock);
129 139
130 return inst; 140 return inst;
131 141
132out_free: 142out_free:
133 kfree(inst); 143 kfree(inst);
134out_unlock: 144out_unlock:
135 spin_unlock(&instances_lock); 145 spin_unlock(&q->instances_lock);
136 return ERR_PTR(err); 146 return ERR_PTR(err);
137} 147}
138 148
@@ -158,11 +168,11 @@ __instance_destroy(struct nfqnl_instance *inst)
158} 168}
159 169
160static void 170static void
161instance_destroy(struct nfqnl_instance *inst) 171instance_destroy(struct nfnl_queue_net *q, struct nfqnl_instance *inst)
162{ 172{
163 spin_lock(&instances_lock); 173 spin_lock(&q->instances_lock);
164 __instance_destroy(inst); 174 __instance_destroy(inst);
165 spin_unlock(&instances_lock); 175 spin_unlock(&q->instances_lock);
166} 176}
167 177
168static inline void 178static inline void
@@ -217,14 +227,59 @@ nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
217 spin_unlock_bh(&queue->lock); 227 spin_unlock_bh(&queue->lock);
218} 228}
219 229
230static void
231nfqnl_zcopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
232{
233 int i, j = 0;
234 int plen = 0; /* length of skb->head fragment */
235 struct page *page;
236 unsigned int offset;
237
238 /* dont bother with small payloads */
239 if (len <= skb_tailroom(to)) {
240 skb_copy_bits(from, 0, skb_put(to, len), len);
241 return;
242 }
243
244 if (hlen) {
245 skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
246 len -= hlen;
247 } else {
248 plen = min_t(int, skb_headlen(from), len);
249 if (plen) {
250 page = virt_to_head_page(from->head);
251 offset = from->data - (unsigned char *)page_address(page);
252 __skb_fill_page_desc(to, 0, page, offset, plen);
253 get_page(page);
254 j = 1;
255 len -= plen;
256 }
257 }
258
259 to->truesize += len + plen;
260 to->len += len + plen;
261 to->data_len += len + plen;
262
263 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
264 if (!len)
265 break;
266 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
267 skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
268 len -= skb_shinfo(to)->frags[j].size;
269 skb_frag_ref(to, j);
270 j++;
271 }
272 skb_shinfo(to)->nr_frags = j;
273}
274
220static struct sk_buff * 275static struct sk_buff *
221nfqnl_build_packet_message(struct nfqnl_instance *queue, 276nfqnl_build_packet_message(struct nfqnl_instance *queue,
222 struct nf_queue_entry *entry, 277 struct nf_queue_entry *entry,
223 __be32 **packet_id_ptr) 278 __be32 **packet_id_ptr)
224{ 279{
225 sk_buff_data_t old_tail;
226 size_t size; 280 size_t size;
227 size_t data_len = 0, cap_len = 0; 281 size_t data_len = 0, cap_len = 0;
282 int hlen = 0;
228 struct sk_buff *skb; 283 struct sk_buff *skb;
229 struct nlattr *nla; 284 struct nlattr *nla;
230 struct nfqnl_msg_packet_hdr *pmsg; 285 struct nfqnl_msg_packet_hdr *pmsg;
@@ -236,7 +291,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
236 struct nf_conn *ct = NULL; 291 struct nf_conn *ct = NULL;
237 enum ip_conntrack_info uninitialized_var(ctinfo); 292 enum ip_conntrack_info uninitialized_var(ctinfo);
238 293
239 size = NLMSG_SPACE(sizeof(struct nfgenmsg)) 294 size = nlmsg_total_size(sizeof(struct nfgenmsg))
240 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr)) 295 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
241 + nla_total_size(sizeof(u_int32_t)) /* ifindex */ 296 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
242 + nla_total_size(sizeof(u_int32_t)) /* ifindex */ 297 + nla_total_size(sizeof(u_int32_t)) /* ifindex */
@@ -246,8 +301,10 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
246#endif 301#endif
247 + nla_total_size(sizeof(u_int32_t)) /* mark */ 302 + nla_total_size(sizeof(u_int32_t)) /* mark */
248 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw)) 303 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
249 + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp) 304 + nla_total_size(sizeof(u_int32_t)); /* cap_len */
250 + nla_total_size(sizeof(u_int32_t))); /* cap_len */ 305
306 if (entskb->tstamp.tv64)
307 size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
251 308
252 outdev = entry->outdev; 309 outdev = entry->outdev;
253 310
@@ -265,7 +322,16 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
265 if (data_len == 0 || data_len > entskb->len) 322 if (data_len == 0 || data_len > entskb->len)
266 data_len = entskb->len; 323 data_len = entskb->len;
267 324
268 size += nla_total_size(data_len); 325
326 if (!entskb->head_frag ||
327 skb_headlen(entskb) < L1_CACHE_BYTES ||
328 skb_shinfo(entskb)->nr_frags >= MAX_SKB_FRAGS)
329 hlen = skb_headlen(entskb);
330
331 if (skb_has_frag_list(entskb))
332 hlen = entskb->len;
333 hlen = min_t(int, data_len, hlen);
334 size += sizeof(struct nlattr) + hlen;
269 cap_len = entskb->len; 335 cap_len = entskb->len;
270 break; 336 break;
271 } 337 }
@@ -277,7 +343,6 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
277 if (!skb) 343 if (!skb)
278 return NULL; 344 return NULL;
279 345
280 old_tail = skb->tail;
281 nlh = nlmsg_put(skb, 0, 0, 346 nlh = nlmsg_put(skb, 0, 0,
282 NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET, 347 NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
283 sizeof(struct nfgenmsg), 0); 348 sizeof(struct nfgenmsg), 0);
@@ -382,31 +447,26 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
382 goto nla_put_failure; 447 goto nla_put_failure;
383 } 448 }
384 449
450 if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0)
451 goto nla_put_failure;
452
453 if (cap_len > 0 && nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len)))
454 goto nla_put_failure;
455
385 if (data_len) { 456 if (data_len) {
386 struct nlattr *nla; 457 struct nlattr *nla;
387 int sz = nla_attr_size(data_len);
388 458
389 if (skb_tailroom(skb) < nla_total_size(data_len)) { 459 if (skb_tailroom(skb) < sizeof(*nla) + hlen)
390 printk(KERN_WARNING "nf_queue: no tailroom!\n"); 460 goto nla_put_failure;
391 kfree_skb(skb);
392 return NULL;
393 }
394 461
395 nla = (struct nlattr *)skb_put(skb, nla_total_size(data_len)); 462 nla = (struct nlattr *)skb_put(skb, sizeof(*nla));
396 nla->nla_type = NFQA_PAYLOAD; 463 nla->nla_type = NFQA_PAYLOAD;
397 nla->nla_len = sz; 464 nla->nla_len = nla_attr_size(data_len);
398 465
399 if (skb_copy_bits(entskb, 0, nla_data(nla), data_len)) 466 nfqnl_zcopy(skb, entskb, data_len, hlen);
400 BUG();
401 } 467 }
402 468
403 if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0) 469 nlh->nlmsg_len = skb->len;
404 goto nla_put_failure;
405
406 if (cap_len > 0 && nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len)))
407 goto nla_put_failure;
408
409 nlh->nlmsg_len = skb->tail - old_tail;
410 return skb; 470 return skb;
411 471
412nla_put_failure: 472nla_put_failure:
@@ -423,9 +483,12 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
423 int err = -ENOBUFS; 483 int err = -ENOBUFS;
424 __be32 *packet_id_ptr; 484 __be32 *packet_id_ptr;
425 int failopen = 0; 485 int failopen = 0;
486 struct net *net = dev_net(entry->indev ?
487 entry->indev : entry->outdev);
488 struct nfnl_queue_net *q = nfnl_queue_pernet(net);
426 489
427 /* rcu_read_lock()ed by nf_hook_slow() */ 490 /* rcu_read_lock()ed by nf_hook_slow() */
428 queue = instance_lookup(queuenum); 491 queue = instance_lookup(q, queuenum);
429 if (!queue) { 492 if (!queue) {
430 err = -ESRCH; 493 err = -ESRCH;
431 goto err_out; 494 goto err_out;
@@ -462,7 +525,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
462 *packet_id_ptr = htonl(entry->id); 525 *packet_id_ptr = htonl(entry->id);
463 526
464 /* nfnetlink_unicast will either free the nskb or add it to a socket */ 527 /* nfnetlink_unicast will either free the nskb or add it to a socket */
465 err = nfnetlink_unicast(nskb, &init_net, queue->peer_portid, MSG_DONTWAIT); 528 err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT);
466 if (err < 0) { 529 if (err < 0) {
467 queue->queue_user_dropped++; 530 queue->queue_user_dropped++;
468 goto err_out_unlock; 531 goto err_out_unlock;
@@ -575,15 +638,16 @@ dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
575/* drop all packets with either indev or outdev == ifindex from all queue 638/* drop all packets with either indev or outdev == ifindex from all queue
576 * instances */ 639 * instances */
577static void 640static void
578nfqnl_dev_drop(int ifindex) 641nfqnl_dev_drop(struct net *net, int ifindex)
579{ 642{
580 int i; 643 int i;
644 struct nfnl_queue_net *q = nfnl_queue_pernet(net);
581 645
582 rcu_read_lock(); 646 rcu_read_lock();
583 647
584 for (i = 0; i < INSTANCE_BUCKETS; i++) { 648 for (i = 0; i < INSTANCE_BUCKETS; i++) {
585 struct nfqnl_instance *inst; 649 struct nfqnl_instance *inst;
586 struct hlist_head *head = &instance_table[i]; 650 struct hlist_head *head = &q->instance_table[i];
587 651
588 hlist_for_each_entry_rcu(inst, head, hlist) 652 hlist_for_each_entry_rcu(inst, head, hlist)
589 nfqnl_flush(inst, dev_cmp, ifindex); 653 nfqnl_flush(inst, dev_cmp, ifindex);
@@ -600,12 +664,9 @@ nfqnl_rcv_dev_event(struct notifier_block *this,
600{ 664{
601 struct net_device *dev = ptr; 665 struct net_device *dev = ptr;
602 666
603 if (!net_eq(dev_net(dev), &init_net))
604 return NOTIFY_DONE;
605
606 /* Drop any packets associated with the downed device */ 667 /* Drop any packets associated with the downed device */
607 if (event == NETDEV_DOWN) 668 if (event == NETDEV_DOWN)
608 nfqnl_dev_drop(dev->ifindex); 669 nfqnl_dev_drop(dev_net(dev), dev->ifindex);
609 return NOTIFY_DONE; 670 return NOTIFY_DONE;
610} 671}
611 672
@@ -618,24 +679,24 @@ nfqnl_rcv_nl_event(struct notifier_block *this,
618 unsigned long event, void *ptr) 679 unsigned long event, void *ptr)
619{ 680{
620 struct netlink_notify *n = ptr; 681 struct netlink_notify *n = ptr;
682 struct nfnl_queue_net *q = nfnl_queue_pernet(n->net);
621 683
622 if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) { 684 if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
623 int i; 685 int i;
624 686
625 /* destroy all instances for this portid */ 687 /* destroy all instances for this portid */
626 spin_lock(&instances_lock); 688 spin_lock(&q->instances_lock);
627 for (i = 0; i < INSTANCE_BUCKETS; i++) { 689 for (i = 0; i < INSTANCE_BUCKETS; i++) {
628 struct hlist_node *t2; 690 struct hlist_node *t2;
629 struct nfqnl_instance *inst; 691 struct nfqnl_instance *inst;
630 struct hlist_head *head = &instance_table[i]; 692 struct hlist_head *head = &q->instance_table[i];
631 693
632 hlist_for_each_entry_safe(inst, t2, head, hlist) { 694 hlist_for_each_entry_safe(inst, t2, head, hlist) {
633 if ((n->net == &init_net) && 695 if (n->portid == inst->peer_portid)
634 (n->portid == inst->peer_portid))
635 __instance_destroy(inst); 696 __instance_destroy(inst);
636 } 697 }
637 } 698 }
638 spin_unlock(&instances_lock); 699 spin_unlock(&q->instances_lock);
639 } 700 }
640 return NOTIFY_DONE; 701 return NOTIFY_DONE;
641} 702}
@@ -656,11 +717,12 @@ static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
656 [NFQA_MARK] = { .type = NLA_U32 }, 717 [NFQA_MARK] = { .type = NLA_U32 },
657}; 718};
658 719
659static struct nfqnl_instance *verdict_instance_lookup(u16 queue_num, int nlportid) 720static struct nfqnl_instance *
721verdict_instance_lookup(struct nfnl_queue_net *q, u16 queue_num, int nlportid)
660{ 722{
661 struct nfqnl_instance *queue; 723 struct nfqnl_instance *queue;
662 724
663 queue = instance_lookup(queue_num); 725 queue = instance_lookup(q, queue_num);
664 if (!queue) 726 if (!queue)
665 return ERR_PTR(-ENODEV); 727 return ERR_PTR(-ENODEV);
666 728
@@ -704,7 +766,11 @@ nfqnl_recv_verdict_batch(struct sock *ctnl, struct sk_buff *skb,
704 LIST_HEAD(batch_list); 766 LIST_HEAD(batch_list);
705 u16 queue_num = ntohs(nfmsg->res_id); 767 u16 queue_num = ntohs(nfmsg->res_id);
706 768
707 queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).portid); 769 struct net *net = sock_net(ctnl);
770 struct nfnl_queue_net *q = nfnl_queue_pernet(net);
771
772 queue = verdict_instance_lookup(q, queue_num,
773 NETLINK_CB(skb).portid);
708 if (IS_ERR(queue)) 774 if (IS_ERR(queue))
709 return PTR_ERR(queue); 775 return PTR_ERR(queue);
710 776
@@ -752,10 +818,13 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
752 enum ip_conntrack_info uninitialized_var(ctinfo); 818 enum ip_conntrack_info uninitialized_var(ctinfo);
753 struct nf_conn *ct = NULL; 819 struct nf_conn *ct = NULL;
754 820
755 queue = instance_lookup(queue_num); 821 struct net *net = sock_net(ctnl);
756 if (!queue) 822 struct nfnl_queue_net *q = nfnl_queue_pernet(net);
757 823
758 queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).portid); 824 queue = instance_lookup(q, queue_num);
825 if (!queue)
826 queue = verdict_instance_lookup(q, queue_num,
827 NETLINK_CB(skb).portid);
759 if (IS_ERR(queue)) 828 if (IS_ERR(queue))
760 return PTR_ERR(queue); 829 return PTR_ERR(queue);
761 830
@@ -819,6 +888,8 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
819 u_int16_t queue_num = ntohs(nfmsg->res_id); 888 u_int16_t queue_num = ntohs(nfmsg->res_id);
820 struct nfqnl_instance *queue; 889 struct nfqnl_instance *queue;
821 struct nfqnl_msg_config_cmd *cmd = NULL; 890 struct nfqnl_msg_config_cmd *cmd = NULL;
891 struct net *net = sock_net(ctnl);
892 struct nfnl_queue_net *q = nfnl_queue_pernet(net);
822 int ret = 0; 893 int ret = 0;
823 894
824 if (nfqa[NFQA_CFG_CMD]) { 895 if (nfqa[NFQA_CFG_CMD]) {
@@ -832,7 +903,7 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
832 } 903 }
833 904
834 rcu_read_lock(); 905 rcu_read_lock();
835 queue = instance_lookup(queue_num); 906 queue = instance_lookup(q, queue_num);
836 if (queue && queue->peer_portid != NETLINK_CB(skb).portid) { 907 if (queue && queue->peer_portid != NETLINK_CB(skb).portid) {
837 ret = -EPERM; 908 ret = -EPERM;
838 goto err_out_unlock; 909 goto err_out_unlock;
@@ -845,7 +916,8 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
845 ret = -EBUSY; 916 ret = -EBUSY;
846 goto err_out_unlock; 917 goto err_out_unlock;
847 } 918 }
848 queue = instance_create(queue_num, NETLINK_CB(skb).portid); 919 queue = instance_create(q, queue_num,
920 NETLINK_CB(skb).portid);
849 if (IS_ERR(queue)) { 921 if (IS_ERR(queue)) {
850 ret = PTR_ERR(queue); 922 ret = PTR_ERR(queue);
851 goto err_out_unlock; 923 goto err_out_unlock;
@@ -856,7 +928,7 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
856 ret = -ENODEV; 928 ret = -ENODEV;
857 goto err_out_unlock; 929 goto err_out_unlock;
858 } 930 }
859 instance_destroy(queue); 931 instance_destroy(q, queue);
860 break; 932 break;
861 case NFQNL_CFG_CMD_PF_BIND: 933 case NFQNL_CFG_CMD_PF_BIND:
862 case NFQNL_CFG_CMD_PF_UNBIND: 934 case NFQNL_CFG_CMD_PF_UNBIND:
@@ -950,19 +1022,24 @@ static const struct nfnetlink_subsystem nfqnl_subsys = {
950 1022
951#ifdef CONFIG_PROC_FS 1023#ifdef CONFIG_PROC_FS
952struct iter_state { 1024struct iter_state {
1025 struct seq_net_private p;
953 unsigned int bucket; 1026 unsigned int bucket;
954}; 1027};
955 1028
956static struct hlist_node *get_first(struct seq_file *seq) 1029static struct hlist_node *get_first(struct seq_file *seq)
957{ 1030{
958 struct iter_state *st = seq->private; 1031 struct iter_state *st = seq->private;
1032 struct net *net;
1033 struct nfnl_queue_net *q;
959 1034
960 if (!st) 1035 if (!st)
961 return NULL; 1036 return NULL;
962 1037
1038 net = seq_file_net(seq);
1039 q = nfnl_queue_pernet(net);
963 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { 1040 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
964 if (!hlist_empty(&instance_table[st->bucket])) 1041 if (!hlist_empty(&q->instance_table[st->bucket]))
965 return instance_table[st->bucket].first; 1042 return q->instance_table[st->bucket].first;
966 } 1043 }
967 return NULL; 1044 return NULL;
968} 1045}
@@ -970,13 +1047,17 @@ static struct hlist_node *get_first(struct seq_file *seq)
970static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h) 1047static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
971{ 1048{
972 struct iter_state *st = seq->private; 1049 struct iter_state *st = seq->private;
1050 struct net *net = seq_file_net(seq);
973 1051
974 h = h->next; 1052 h = h->next;
975 while (!h) { 1053 while (!h) {
1054 struct nfnl_queue_net *q;
1055
976 if (++st->bucket >= INSTANCE_BUCKETS) 1056 if (++st->bucket >= INSTANCE_BUCKETS)
977 return NULL; 1057 return NULL;
978 1058
979 h = instance_table[st->bucket].first; 1059 q = nfnl_queue_pernet(net);
1060 h = q->instance_table[st->bucket].first;
980 } 1061 }
981 return h; 1062 return h;
982} 1063}
@@ -992,11 +1073,11 @@ static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
992 return pos ? NULL : head; 1073 return pos ? NULL : head;
993} 1074}
994 1075
995static void *seq_start(struct seq_file *seq, loff_t *pos) 1076static void *seq_start(struct seq_file *s, loff_t *pos)
996 __acquires(instances_lock) 1077 __acquires(nfnl_queue_pernet(seq_file_net(s))->instances_lock)
997{ 1078{
998 spin_lock(&instances_lock); 1079 spin_lock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock);
999 return get_idx(seq, *pos); 1080 return get_idx(s, *pos);
1000} 1081}
1001 1082
1002static void *seq_next(struct seq_file *s, void *v, loff_t *pos) 1083static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
@@ -1006,9 +1087,9 @@ static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
1006} 1087}
1007 1088
1008static void seq_stop(struct seq_file *s, void *v) 1089static void seq_stop(struct seq_file *s, void *v)
1009 __releases(instances_lock) 1090 __releases(nfnl_queue_pernet(seq_file_net(s))->instances_lock)
1010{ 1091{
1011 spin_unlock(&instances_lock); 1092 spin_unlock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock);
1012} 1093}
1013 1094
1014static int seq_show(struct seq_file *s, void *v) 1095static int seq_show(struct seq_file *s, void *v)
@@ -1032,7 +1113,7 @@ static const struct seq_operations nfqnl_seq_ops = {
1032 1113
1033static int nfqnl_open(struct inode *inode, struct file *file) 1114static int nfqnl_open(struct inode *inode, struct file *file)
1034{ 1115{
1035 return seq_open_private(file, &nfqnl_seq_ops, 1116 return seq_open_net(inode, file, &nfqnl_seq_ops,
1036 sizeof(struct iter_state)); 1117 sizeof(struct iter_state));
1037} 1118}
1038 1119
@@ -1041,41 +1122,63 @@ static const struct file_operations nfqnl_file_ops = {
1041 .open = nfqnl_open, 1122 .open = nfqnl_open,
1042 .read = seq_read, 1123 .read = seq_read,
1043 .llseek = seq_lseek, 1124 .llseek = seq_lseek,
1044 .release = seq_release_private, 1125 .release = seq_release_net,
1045}; 1126};
1046 1127
1047#endif /* PROC_FS */ 1128#endif /* PROC_FS */
1048 1129
1049static int __init nfnetlink_queue_init(void) 1130static int __net_init nfnl_queue_net_init(struct net *net)
1050{ 1131{
1051 int i, status = -ENOMEM; 1132 unsigned int i;
1133 struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1052 1134
1053 for (i = 0; i < INSTANCE_BUCKETS; i++) 1135 for (i = 0; i < INSTANCE_BUCKETS; i++)
1054 INIT_HLIST_HEAD(&instance_table[i]); 1136 INIT_HLIST_HEAD(&q->instance_table[i]);
1137
1138 spin_lock_init(&q->instances_lock);
1139
1140#ifdef CONFIG_PROC_FS
1141 if (!proc_create("nfnetlink_queue", 0440,
1142 net->nf.proc_netfilter, &nfqnl_file_ops))
1143 return -ENOMEM;
1144#endif
1145 return 0;
1146}
1147
1148static void __net_exit nfnl_queue_net_exit(struct net *net)
1149{
1150 remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
1151}
1152
1153static struct pernet_operations nfnl_queue_net_ops = {
1154 .init = nfnl_queue_net_init,
1155 .exit = nfnl_queue_net_exit,
1156 .id = &nfnl_queue_net_id,
1157 .size = sizeof(struct nfnl_queue_net),
1158};
1159
1160static int __init nfnetlink_queue_init(void)
1161{
1162 int status = -ENOMEM;
1055 1163
1056 netlink_register_notifier(&nfqnl_rtnl_notifier); 1164 netlink_register_notifier(&nfqnl_rtnl_notifier);
1057 status = nfnetlink_subsys_register(&nfqnl_subsys); 1165 status = nfnetlink_subsys_register(&nfqnl_subsys);
1058 if (status < 0) { 1166 if (status < 0) {
1059 printk(KERN_ERR "nf_queue: failed to create netlink socket\n"); 1167 pr_err("nf_queue: failed to create netlink socket\n");
1060 goto cleanup_netlink_notifier; 1168 goto cleanup_netlink_notifier;
1061 } 1169 }
1062 1170
1063#ifdef CONFIG_PROC_FS 1171 status = register_pernet_subsys(&nfnl_queue_net_ops);
1064 if (!proc_create("nfnetlink_queue", 0440, 1172 if (status < 0) {
1065 proc_net_netfilter, &nfqnl_file_ops)) { 1173 pr_err("nf_queue: failed to register pernet ops\n");
1066 status = -ENOMEM;
1067 goto cleanup_subsys; 1174 goto cleanup_subsys;
1068 } 1175 }
1069#endif
1070
1071 register_netdevice_notifier(&nfqnl_dev_notifier); 1176 register_netdevice_notifier(&nfqnl_dev_notifier);
1072 nf_register_queue_handler(&nfqh); 1177 nf_register_queue_handler(&nfqh);
1073 return status; 1178 return status;
1074 1179
1075#ifdef CONFIG_PROC_FS
1076cleanup_subsys: 1180cleanup_subsys:
1077 nfnetlink_subsys_unregister(&nfqnl_subsys); 1181 nfnetlink_subsys_unregister(&nfqnl_subsys);
1078#endif
1079cleanup_netlink_notifier: 1182cleanup_netlink_notifier:
1080 netlink_unregister_notifier(&nfqnl_rtnl_notifier); 1183 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1081 return status; 1184 return status;
@@ -1085,9 +1188,7 @@ static void __exit nfnetlink_queue_fini(void)
1085{ 1188{
1086 nf_unregister_queue_handler(); 1189 nf_unregister_queue_handler();
1087 unregister_netdevice_notifier(&nfqnl_dev_notifier); 1190 unregister_netdevice_notifier(&nfqnl_dev_notifier);
1088#ifdef CONFIG_PROC_FS 1191 unregister_pernet_subsys(&nfnl_queue_net_ops);
1089 remove_proc_entry("nfnetlink_queue", proc_net_netfilter);
1090#endif
1091 nfnetlink_subsys_unregister(&nfqnl_subsys); 1192 nfnetlink_subsys_unregister(&nfqnl_subsys);
1092 netlink_unregister_notifier(&nfqnl_rtnl_notifier); 1193 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1093 1194
diff --git a/net/netfilter/xt_LOG.c b/net/netfilter/xt_LOG.c
index fa40096940a1..fe573f6c9e91 100644
--- a/net/netfilter/xt_LOG.c
+++ b/net/netfilter/xt_LOG.c
@@ -474,7 +474,14 @@ ipt_log_packet(u_int8_t pf,
474 const struct nf_loginfo *loginfo, 474 const struct nf_loginfo *loginfo,
475 const char *prefix) 475 const char *prefix)
476{ 476{
477 struct sbuff *m = sb_open(); 477 struct sbuff *m;
478 struct net *net = dev_net(in ? in : out);
479
480 /* FIXME: Disabled from containers until syslog ns is supported */
481 if (!net_eq(net, &init_net))
482 return;
483
484 m = sb_open();
478 485
479 if (!loginfo) 486 if (!loginfo)
480 loginfo = &default_loginfo; 487 loginfo = &default_loginfo;
@@ -798,7 +805,14 @@ ip6t_log_packet(u_int8_t pf,
798 const struct nf_loginfo *loginfo, 805 const struct nf_loginfo *loginfo,
799 const char *prefix) 806 const char *prefix)
800{ 807{
801 struct sbuff *m = sb_open(); 808 struct sbuff *m;
809 struct net *net = dev_net(in ? in : out);
810
811 /* FIXME: Disabled from containers until syslog ns is supported */
812 if (!net_eq(net, &init_net))
813 return;
814
815 m = sb_open();
802 816
803 if (!loginfo) 817 if (!loginfo)
804 loginfo = &default_loginfo; 818 loginfo = &default_loginfo;
@@ -893,23 +907,55 @@ static struct nf_logger ip6t_log_logger __read_mostly = {
893}; 907};
894#endif 908#endif
895 909
910static int __net_init log_net_init(struct net *net)
911{
912 nf_log_set(net, NFPROTO_IPV4, &ipt_log_logger);
913#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
914 nf_log_set(net, NFPROTO_IPV6, &ip6t_log_logger);
915#endif
916 return 0;
917}
918
919static void __net_exit log_net_exit(struct net *net)
920{
921 nf_log_unset(net, &ipt_log_logger);
922#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
923 nf_log_unset(net, &ip6t_log_logger);
924#endif
925}
926
927static struct pernet_operations log_net_ops = {
928 .init = log_net_init,
929 .exit = log_net_exit,
930};
931
896static int __init log_tg_init(void) 932static int __init log_tg_init(void)
897{ 933{
898 int ret; 934 int ret;
899 935
936 ret = register_pernet_subsys(&log_net_ops);
937 if (ret < 0)
938 goto err_pernet;
939
900 ret = xt_register_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs)); 940 ret = xt_register_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs));
901 if (ret < 0) 941 if (ret < 0)
902 return ret; 942 goto err_target;
903 943
904 nf_log_register(NFPROTO_IPV4, &ipt_log_logger); 944 nf_log_register(NFPROTO_IPV4, &ipt_log_logger);
905#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 945#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
906 nf_log_register(NFPROTO_IPV6, &ip6t_log_logger); 946 nf_log_register(NFPROTO_IPV6, &ip6t_log_logger);
907#endif 947#endif
908 return 0; 948 return 0;
949
950err_target:
951 unregister_pernet_subsys(&log_net_ops);
952err_pernet:
953 return ret;
909} 954}
910 955
911static void __exit log_tg_exit(void) 956static void __exit log_tg_exit(void)
912{ 957{
958 unregister_pernet_subsys(&log_net_ops);
913 nf_log_unregister(&ipt_log_logger); 959 nf_log_unregister(&ipt_log_logger);
914#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 960#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
915 nf_log_unregister(&ip6t_log_logger); 961 nf_log_unregister(&ip6t_log_logger);
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index 817f9e9f2b16..1e2fae32f81b 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -76,22 +76,31 @@ static u32 hash_v6(const struct sk_buff *skb)
76} 76}
77#endif 77#endif
78 78
79static unsigned int 79static u32
80nfqueue_tg_v1(struct sk_buff *skb, const struct xt_action_param *par) 80nfqueue_hash(const struct sk_buff *skb, const struct xt_action_param *par)
81{ 81{
82 const struct xt_NFQ_info_v1 *info = par->targinfo; 82 const struct xt_NFQ_info_v1 *info = par->targinfo;
83 u32 queue = info->queuenum; 83 u32 queue = info->queuenum;
84 84
85 if (info->queues_total > 1) { 85 if (par->family == NFPROTO_IPV4)
86 if (par->family == NFPROTO_IPV4) 86 queue += ((u64) hash_v4(skb) * info->queues_total) >> 32;
87 queue = (((u64) hash_v4(skb) * info->queues_total) >>
88 32) + queue;
89#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 87#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
90 else if (par->family == NFPROTO_IPV6) 88 else if (par->family == NFPROTO_IPV6)
91 queue = (((u64) hash_v6(skb) * info->queues_total) >> 89 queue += ((u64) hash_v6(skb) * info->queues_total) >> 32;
92 32) + queue;
93#endif 90#endif
94 } 91
92 return queue;
93}
94
95static unsigned int
96nfqueue_tg_v1(struct sk_buff *skb, const struct xt_action_param *par)
97{
98 const struct xt_NFQ_info_v1 *info = par->targinfo;
99 u32 queue = info->queuenum;
100
101 if (info->queues_total > 1)
102 queue = nfqueue_hash(skb, par);
103
95 return NF_QUEUE_NR(queue); 104 return NF_QUEUE_NR(queue);
96} 105}
97 106
@@ -108,7 +117,7 @@ nfqueue_tg_v2(struct sk_buff *skb, const struct xt_action_param *par)
108 117
109static int nfqueue_tg_check(const struct xt_tgchk_param *par) 118static int nfqueue_tg_check(const struct xt_tgchk_param *par)
110{ 119{
111 const struct xt_NFQ_info_v2 *info = par->targinfo; 120 const struct xt_NFQ_info_v3 *info = par->targinfo;
112 u32 maxid; 121 u32 maxid;
113 122
114 if (unlikely(!rnd_inited)) { 123 if (unlikely(!rnd_inited)) {
@@ -125,11 +134,32 @@ static int nfqueue_tg_check(const struct xt_tgchk_param *par)
125 info->queues_total, maxid); 134 info->queues_total, maxid);
126 return -ERANGE; 135 return -ERANGE;
127 } 136 }
128 if (par->target->revision == 2 && info->bypass > 1) 137 if (par->target->revision == 2 && info->flags > 1)
129 return -EINVAL; 138 return -EINVAL;
139 if (par->target->revision == 3 && info->flags & ~NFQ_FLAG_MASK)
140 return -EINVAL;
141
130 return 0; 142 return 0;
131} 143}
132 144
145static unsigned int
146nfqueue_tg_v3(struct sk_buff *skb, const struct xt_action_param *par)
147{
148 const struct xt_NFQ_info_v3 *info = par->targinfo;
149 u32 queue = info->queuenum;
150
151 if (info->queues_total > 1) {
152 if (info->flags & NFQ_FLAG_CPU_FANOUT) {
153 int cpu = smp_processor_id();
154
155 queue = info->queuenum + cpu % info->queues_total;
156 } else
157 queue = nfqueue_hash(skb, par);
158 }
159
160 return NF_QUEUE_NR(queue);
161}
162
133static struct xt_target nfqueue_tg_reg[] __read_mostly = { 163static struct xt_target nfqueue_tg_reg[] __read_mostly = {
134 { 164 {
135 .name = "NFQUEUE", 165 .name = "NFQUEUE",
@@ -156,6 +186,15 @@ static struct xt_target nfqueue_tg_reg[] __read_mostly = {
156 .targetsize = sizeof(struct xt_NFQ_info_v2), 186 .targetsize = sizeof(struct xt_NFQ_info_v2),
157 .me = THIS_MODULE, 187 .me = THIS_MODULE,
158 }, 188 },
189 {
190 .name = "NFQUEUE",
191 .revision = 3,
192 .family = NFPROTO_UNSPEC,
193 .checkentry = nfqueue_tg_check,
194 .target = nfqueue_tg_v3,
195 .targetsize = sizeof(struct xt_NFQ_info_v3),
196 .me = THIS_MODULE,
197 },
159}; 198};
160 199
161static int __init nfqueue_tg_init(void) 200static int __init nfqueue_tg_init(void)
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c
index a5e673d32bda..647d989a01e6 100644
--- a/net/netfilter/xt_osf.c
+++ b/net/netfilter/xt_osf.c
@@ -201,6 +201,7 @@ xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p)
201 unsigned char opts[MAX_IPOPTLEN]; 201 unsigned char opts[MAX_IPOPTLEN];
202 const struct xt_osf_finger *kf; 202 const struct xt_osf_finger *kf;
203 const struct xt_osf_user_finger *f; 203 const struct xt_osf_user_finger *f;
204 struct net *net = dev_net(p->in ? p->in : p->out);
204 205
205 if (!info) 206 if (!info)
206 return false; 207 return false;
@@ -325,7 +326,7 @@ xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p)
325 fcount++; 326 fcount++;
326 327
327 if (info->flags & XT_OSF_LOG) 328 if (info->flags & XT_OSF_LOG)
328 nf_log_packet(p->family, p->hooknum, skb, 329 nf_log_packet(net, p->family, p->hooknum, skb,
329 p->in, p->out, NULL, 330 p->in, p->out, NULL,
330 "%s [%s:%s] : %pI4:%d -> %pI4:%d hops=%d\n", 331 "%s [%s:%s] : %pI4:%d -> %pI4:%d hops=%d\n",
331 f->genre, f->version, f->subtype, 332 f->genre, f->version, f->subtype,
@@ -341,7 +342,8 @@ xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p)
341 rcu_read_unlock(); 342 rcu_read_unlock();
342 343
343 if (!fcount && (info->flags & XT_OSF_LOG)) 344 if (!fcount && (info->flags & XT_OSF_LOG))
344 nf_log_packet(p->family, p->hooknum, skb, p->in, p->out, NULL, 345 nf_log_packet(net, p->family, p->hooknum, skb, p->in,
346 p->out, NULL,
345 "Remote OS is not known: %pI4:%u -> %pI4:%u\n", 347 "Remote OS is not known: %pI4:%u -> %pI4:%u\n",
346 &ip->saddr, ntohs(tcp->source), 348 &ip->saddr, ntohs(tcp->source),
347 &ip->daddr, ntohs(tcp->dest)); 349 &ip->daddr, ntohs(tcp->dest));
diff --git a/net/netlink/Kconfig b/net/netlink/Kconfig
new file mode 100644
index 000000000000..5d6e8c05b3d4
--- /dev/null
+++ b/net/netlink/Kconfig
@@ -0,0 +1,10 @@
1#
2# Netlink Sockets
3#
4
5config NETLINK_DIAG
6 tristate "NETLINK: socket monitoring interface"
7 default n
8 ---help---
9 Support for NETLINK socket monitoring interface used by the ss tool.
10 If unsure, say Y.
diff --git a/net/netlink/Makefile b/net/netlink/Makefile
index bdd6ddf4e95b..e837917f6c03 100644
--- a/net/netlink/Makefile
+++ b/net/netlink/Makefile
@@ -3,3 +3,6 @@
3# 3#
4 4
5obj-y := af_netlink.o genetlink.o 5obj-y := af_netlink.o genetlink.o
6
7obj-$(CONFIG_NETLINK_DIAG) += netlink_diag.o
8netlink_diag-y := diag.o
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 1e3fd5bfcd86..ce2e0064e7f6 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -61,28 +61,7 @@
61#include <net/scm.h> 61#include <net/scm.h>
62#include <net/netlink.h> 62#include <net/netlink.h>
63 63
64#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8) 64#include "af_netlink.h"
65#define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long))
66
67struct netlink_sock {
68 /* struct sock has to be the first member of netlink_sock */
69 struct sock sk;
70 u32 portid;
71 u32 dst_portid;
72 u32 dst_group;
73 u32 flags;
74 u32 subscriptions;
75 u32 ngroups;
76 unsigned long *groups;
77 unsigned long state;
78 wait_queue_head_t wait;
79 struct netlink_callback *cb;
80 struct mutex *cb_mutex;
81 struct mutex cb_def_mutex;
82 void (*netlink_rcv)(struct sk_buff *skb);
83 void (*netlink_bind)(int group);
84 struct module *module;
85};
86 65
87struct listeners { 66struct listeners {
88 struct rcu_head rcu; 67 struct rcu_head rcu;
@@ -94,48 +73,20 @@ struct listeners {
94#define NETLINK_BROADCAST_SEND_ERROR 0x4 73#define NETLINK_BROADCAST_SEND_ERROR 0x4
95#define NETLINK_RECV_NO_ENOBUFS 0x8 74#define NETLINK_RECV_NO_ENOBUFS 0x8
96 75
97static inline struct netlink_sock *nlk_sk(struct sock *sk)
98{
99 return container_of(sk, struct netlink_sock, sk);
100}
101
102static inline int netlink_is_kernel(struct sock *sk) 76static inline int netlink_is_kernel(struct sock *sk)
103{ 77{
104 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET; 78 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
105} 79}
106 80
107struct nl_portid_hash { 81struct netlink_table *nl_table;
108 struct hlist_head *table; 82EXPORT_SYMBOL_GPL(nl_table);
109 unsigned long rehash_time;
110
111 unsigned int mask;
112 unsigned int shift;
113
114 unsigned int entries;
115 unsigned int max_shift;
116
117 u32 rnd;
118};
119
120struct netlink_table {
121 struct nl_portid_hash hash;
122 struct hlist_head mc_list;
123 struct listeners __rcu *listeners;
124 unsigned int flags;
125 unsigned int groups;
126 struct mutex *cb_mutex;
127 struct module *module;
128 void (*bind)(int group);
129 int registered;
130};
131
132static struct netlink_table *nl_table;
133 83
134static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); 84static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
135 85
136static int netlink_dump(struct sock *sk); 86static int netlink_dump(struct sock *sk);
137 87
138static DEFINE_RWLOCK(nl_table_lock); 88DEFINE_RWLOCK(nl_table_lock);
89EXPORT_SYMBOL_GPL(nl_table_lock);
139static atomic_t nl_table_users = ATOMIC_INIT(0); 90static atomic_t nl_table_users = ATOMIC_INIT(0);
140 91
141#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock)); 92#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
@@ -1695,7 +1646,7 @@ struct nlmsghdr *
1695__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags) 1646__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
1696{ 1647{
1697 struct nlmsghdr *nlh; 1648 struct nlmsghdr *nlh;
1698 int size = NLMSG_LENGTH(len); 1649 int size = nlmsg_msg_size(len);
1699 1650
1700 nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size)); 1651 nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
1701 nlh->nlmsg_type = type; 1652 nlh->nlmsg_type = type;
@@ -1704,7 +1655,7 @@ __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int fla
1704 nlh->nlmsg_pid = portid; 1655 nlh->nlmsg_pid = portid;
1705 nlh->nlmsg_seq = seq; 1656 nlh->nlmsg_seq = seq;
1706 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0) 1657 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
1707 memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size); 1658 memset(nlmsg_data(nlh) + len, 0, NLMSG_ALIGN(size) - size);
1708 return nlh; 1659 return nlh;
1709} 1660}
1710EXPORT_SYMBOL(__nlmsg_put); 1661EXPORT_SYMBOL(__nlmsg_put);
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
new file mode 100644
index 000000000000..d9acb2a1d855
--- /dev/null
+++ b/net/netlink/af_netlink.h
@@ -0,0 +1,62 @@
1#ifndef _AF_NETLINK_H
2#define _AF_NETLINK_H
3
4#include <net/sock.h>
5
6#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
7#define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long))
8
9struct netlink_sock {
10 /* struct sock has to be the first member of netlink_sock */
11 struct sock sk;
12 u32 portid;
13 u32 dst_portid;
14 u32 dst_group;
15 u32 flags;
16 u32 subscriptions;
17 u32 ngroups;
18 unsigned long *groups;
19 unsigned long state;
20 wait_queue_head_t wait;
21 struct netlink_callback *cb;
22 struct mutex *cb_mutex;
23 struct mutex cb_def_mutex;
24 void (*netlink_rcv)(struct sk_buff *skb);
25 void (*netlink_bind)(int group);
26 struct module *module;
27};
28
29static inline struct netlink_sock *nlk_sk(struct sock *sk)
30{
31 return container_of(sk, struct netlink_sock, sk);
32}
33
34struct nl_portid_hash {
35 struct hlist_head *table;
36 unsigned long rehash_time;
37
38 unsigned int mask;
39 unsigned int shift;
40
41 unsigned int entries;
42 unsigned int max_shift;
43
44 u32 rnd;
45};
46
47struct netlink_table {
48 struct nl_portid_hash hash;
49 struct hlist_head mc_list;
50 struct listeners __rcu *listeners;
51 unsigned int flags;
52 unsigned int groups;
53 struct mutex *cb_mutex;
54 struct module *module;
55 void (*bind)(int group);
56 int registered;
57};
58
59extern struct netlink_table *nl_table;
60extern rwlock_t nl_table_lock;
61
62#endif
diff --git a/net/netlink/diag.c b/net/netlink/diag.c
new file mode 100644
index 000000000000..5ffb1d1cf402
--- /dev/null
+++ b/net/netlink/diag.c
@@ -0,0 +1,188 @@
1#include <linux/module.h>
2
3#include <net/sock.h>
4#include <linux/netlink.h>
5#include <linux/sock_diag.h>
6#include <linux/netlink_diag.h>
7
8#include "af_netlink.h"
9
10static int sk_diag_dump_groups(struct sock *sk, struct sk_buff *nlskb)
11{
12 struct netlink_sock *nlk = nlk_sk(sk);
13
14 if (nlk->groups == NULL)
15 return 0;
16
17 return nla_put(nlskb, NETLINK_DIAG_GROUPS, NLGRPSZ(nlk->ngroups),
18 nlk->groups);
19}
20
21static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
22 struct netlink_diag_req *req,
23 u32 portid, u32 seq, u32 flags, int sk_ino)
24{
25 struct nlmsghdr *nlh;
26 struct netlink_diag_msg *rep;
27 struct netlink_sock *nlk = nlk_sk(sk);
28
29 nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
30 flags);
31 if (!nlh)
32 return -EMSGSIZE;
33
34 rep = nlmsg_data(nlh);
35 rep->ndiag_family = AF_NETLINK;
36 rep->ndiag_type = sk->sk_type;
37 rep->ndiag_protocol = sk->sk_protocol;
38 rep->ndiag_state = sk->sk_state;
39
40 rep->ndiag_ino = sk_ino;
41 rep->ndiag_portid = nlk->portid;
42 rep->ndiag_dst_portid = nlk->dst_portid;
43 rep->ndiag_dst_group = nlk->dst_group;
44 sock_diag_save_cookie(sk, rep->ndiag_cookie);
45
46 if ((req->ndiag_show & NDIAG_SHOW_GROUPS) &&
47 sk_diag_dump_groups(sk, skb))
48 goto out_nlmsg_trim;
49
50 if ((req->ndiag_show & NDIAG_SHOW_MEMINFO) &&
51 sock_diag_put_meminfo(sk, skb, NETLINK_DIAG_MEMINFO))
52 goto out_nlmsg_trim;
53
54 return nlmsg_end(skb, nlh);
55
56out_nlmsg_trim:
57 nlmsg_cancel(skb, nlh);
58 return -EMSGSIZE;
59}
60
61static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
62 int protocol, int s_num)
63{
64 struct netlink_table *tbl = &nl_table[protocol];
65 struct nl_portid_hash *hash = &tbl->hash;
66 struct net *net = sock_net(skb->sk);
67 struct netlink_diag_req *req;
68 struct sock *sk;
69 int ret = 0, num = 0, i;
70
71 req = nlmsg_data(cb->nlh);
72
73 for (i = 0; i <= hash->mask; i++) {
74 sk_for_each(sk, &hash->table[i]) {
75 if (!net_eq(sock_net(sk), net))
76 continue;
77 if (num < s_num) {
78 num++;
79 continue;
80 }
81
82 if (sk_diag_fill(sk, skb, req,
83 NETLINK_CB(cb->skb).portid,
84 cb->nlh->nlmsg_seq,
85 NLM_F_MULTI,
86 sock_i_ino(sk)) < 0) {
87 ret = 1;
88 goto done;
89 }
90
91 num++;
92 }
93 }
94
95 sk_for_each_bound(sk, &tbl->mc_list) {
96 if (sk_hashed(sk))
97 continue;
98 if (!net_eq(sock_net(sk), net))
99 continue;
100 if (num < s_num) {
101 num++;
102 continue;
103 }
104
105 if (sk_diag_fill(sk, skb, req,
106 NETLINK_CB(cb->skb).portid,
107 cb->nlh->nlmsg_seq,
108 NLM_F_MULTI,
109 sock_i_ino(sk)) < 0) {
110 ret = 1;
111 goto done;
112 }
113 num++;
114 }
115done:
116 cb->args[0] = num;
117 cb->args[1] = protocol;
118
119 return ret;
120}
121
122static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
123{
124 struct netlink_diag_req *req;
125 int s_num = cb->args[0];
126
127 req = nlmsg_data(cb->nlh);
128
129 read_lock(&nl_table_lock);
130
131 if (req->sdiag_protocol == NDIAG_PROTO_ALL) {
132 int i;
133
134 for (i = cb->args[1]; i < MAX_LINKS; i++) {
135 if (__netlink_diag_dump(skb, cb, i, s_num))
136 break;
137 s_num = 0;
138 }
139 } else {
140 if (req->sdiag_protocol >= MAX_LINKS) {
141 read_unlock(&nl_table_lock);
142 return -ENOENT;
143 }
144
145 __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num);
146 }
147
148 read_unlock(&nl_table_lock);
149
150 return skb->len;
151}
152
153static int netlink_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
154{
155 int hdrlen = sizeof(struct netlink_diag_req);
156 struct net *net = sock_net(skb->sk);
157
158 if (nlmsg_len(h) < hdrlen)
159 return -EINVAL;
160
161 if (h->nlmsg_flags & NLM_F_DUMP) {
162 struct netlink_dump_control c = {
163 .dump = netlink_diag_dump,
164 };
165 return netlink_dump_start(net->diag_nlsk, skb, h, &c);
166 } else
167 return -EOPNOTSUPP;
168}
169
170static const struct sock_diag_handler netlink_diag_handler = {
171 .family = AF_NETLINK,
172 .dump = netlink_diag_handler_dump,
173};
174
175static int __init netlink_diag_init(void)
176{
177 return sock_diag_register(&netlink_diag_handler);
178}
179
180static void __exit netlink_diag_exit(void)
181{
182 sock_diag_unregister(&netlink_diag_handler);
183}
184
185module_init(netlink_diag_init);
186module_exit(netlink_diag_exit);
187MODULE_LICENSE("GPL");
188MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, 16 /* AF_NETLINK */);
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c
index c6bc3bd95052..b75a9b3f9e89 100644
--- a/net/nfc/llcp/commands.c
+++ b/net/nfc/llcp/commands.c
@@ -117,6 +117,88 @@ u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length)
117 return tlv; 117 return tlv;
118} 118}
119 119
120struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdres_tlv(u8 tid, u8 sap)
121{
122 struct nfc_llcp_sdp_tlv *sdres;
123 u8 value[2];
124
125 sdres = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL);
126 if (sdres == NULL)
127 return NULL;
128
129 value[0] = tid;
130 value[1] = sap;
131
132 sdres->tlv = nfc_llcp_build_tlv(LLCP_TLV_SDRES, value, 2,
133 &sdres->tlv_len);
134 if (sdres->tlv == NULL) {
135 kfree(sdres);
136 return NULL;
137 }
138
139 sdres->tid = tid;
140 sdres->sap = sap;
141
142 INIT_HLIST_NODE(&sdres->node);
143
144 return sdres;
145}
146
147struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, char *uri,
148 size_t uri_len)
149{
150 struct nfc_llcp_sdp_tlv *sdreq;
151
152 pr_debug("uri: %s, len: %zu\n", uri, uri_len);
153
154 sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL);
155 if (sdreq == NULL)
156 return NULL;
157
158 sdreq->tlv_len = uri_len + 3;
159
160 if (uri[uri_len - 1] == 0)
161 sdreq->tlv_len--;
162
163 sdreq->tlv = kzalloc(sdreq->tlv_len + 1, GFP_KERNEL);
164 if (sdreq->tlv == NULL) {
165 kfree(sdreq);
166 return NULL;
167 }
168
169 sdreq->tlv[0] = LLCP_TLV_SDREQ;
170 sdreq->tlv[1] = sdreq->tlv_len - 2;
171 sdreq->tlv[2] = tid;
172
173 sdreq->tid = tid;
174 sdreq->uri = sdreq->tlv + 3;
175 memcpy(sdreq->uri, uri, uri_len);
176
177 sdreq->time = jiffies;
178
179 INIT_HLIST_NODE(&sdreq->node);
180
181 return sdreq;
182}
183
184void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp)
185{
186 kfree(sdp->tlv);
187 kfree(sdp);
188}
189
190void nfc_llcp_free_sdp_tlv_list(struct hlist_head *head)
191{
192 struct nfc_llcp_sdp_tlv *sdp;
193 struct hlist_node *n;
194
195 hlist_for_each_entry_safe(sdp, n, head, node) {
196 hlist_del(&sdp->node);
197
198 nfc_llcp_free_sdp_tlv(sdp);
199 }
200}
201
120int nfc_llcp_parse_gb_tlv(struct nfc_llcp_local *local, 202int nfc_llcp_parse_gb_tlv(struct nfc_llcp_local *local,
121 u8 *tlv_array, u16 tlv_array_len) 203 u8 *tlv_array, u16 tlv_array_len)
122{ 204{
@@ -184,10 +266,10 @@ int nfc_llcp_parse_connection_tlv(struct nfc_llcp_sock *sock,
184 266
185 switch (type) { 267 switch (type) {
186 case LLCP_TLV_MIUX: 268 case LLCP_TLV_MIUX:
187 sock->miu = llcp_tlv_miux(tlv) + 128; 269 sock->remote_miu = llcp_tlv_miux(tlv) + 128;
188 break; 270 break;
189 case LLCP_TLV_RW: 271 case LLCP_TLV_RW:
190 sock->rw = llcp_tlv_rw(tlv); 272 sock->remote_rw = llcp_tlv_rw(tlv);
191 break; 273 break;
192 case LLCP_TLV_SN: 274 case LLCP_TLV_SN:
193 break; 275 break;
@@ -200,7 +282,8 @@ int nfc_llcp_parse_connection_tlv(struct nfc_llcp_sock *sock,
200 tlv += length + 2; 282 tlv += length + 2;
201 } 283 }
202 284
203 pr_debug("sock %p rw %d miu %d\n", sock, sock->rw, sock->miu); 285 pr_debug("sock %p rw %d miu %d\n", sock,
286 sock->remote_rw, sock->remote_miu);
204 287
205 return 0; 288 return 0;
206} 289}
@@ -318,9 +401,9 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
318 struct sk_buff *skb; 401 struct sk_buff *skb;
319 u8 *service_name_tlv = NULL, service_name_tlv_length; 402 u8 *service_name_tlv = NULL, service_name_tlv_length;
320 u8 *miux_tlv = NULL, miux_tlv_length; 403 u8 *miux_tlv = NULL, miux_tlv_length;
321 u8 *rw_tlv = NULL, rw_tlv_length; 404 u8 *rw_tlv = NULL, rw_tlv_length, rw;
322 int err; 405 int err;
323 u16 size = 0; 406 u16 size = 0, miux;
324 407
325 pr_debug("Sending CONNECT\n"); 408 pr_debug("Sending CONNECT\n");
326 409
@@ -336,11 +419,15 @@ int nfc_llcp_send_connect(struct nfc_llcp_sock *sock)
336 size += service_name_tlv_length; 419 size += service_name_tlv_length;
337 } 420 }
338 421
339 miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0, 422 /* If the socket parameters are not set, use the local ones */
423 miux = sock->miux > LLCP_MAX_MIUX ? local->miux : sock->miux;
424 rw = sock->rw > LLCP_MAX_RW ? local->rw : sock->rw;
425
426 miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
340 &miux_tlv_length); 427 &miux_tlv_length);
341 size += miux_tlv_length; 428 size += miux_tlv_length;
342 429
343 rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &local->rw, 0, &rw_tlv_length); 430 rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
344 size += rw_tlv_length; 431 size += rw_tlv_length;
345 432
346 pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len); 433 pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len);
@@ -377,9 +464,9 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock)
377 struct nfc_llcp_local *local; 464 struct nfc_llcp_local *local;
378 struct sk_buff *skb; 465 struct sk_buff *skb;
379 u8 *miux_tlv = NULL, miux_tlv_length; 466 u8 *miux_tlv = NULL, miux_tlv_length;
380 u8 *rw_tlv = NULL, rw_tlv_length; 467 u8 *rw_tlv = NULL, rw_tlv_length, rw;
381 int err; 468 int err;
382 u16 size = 0; 469 u16 size = 0, miux;
383 470
384 pr_debug("Sending CC\n"); 471 pr_debug("Sending CC\n");
385 472
@@ -387,11 +474,15 @@ int nfc_llcp_send_cc(struct nfc_llcp_sock *sock)
387 if (local == NULL) 474 if (local == NULL)
388 return -ENODEV; 475 return -ENODEV;
389 476
390 miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0, 477 /* If the socket parameters are not set, use the local ones */
478 miux = sock->miux > LLCP_MAX_MIUX ? local->miux : sock->miux;
479 rw = sock->rw > LLCP_MAX_RW ? local->rw : sock->rw;
480
481 miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
391 &miux_tlv_length); 482 &miux_tlv_length);
392 size += miux_tlv_length; 483 size += miux_tlv_length;
393 484
394 rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &local->rw, 0, &rw_tlv_length); 485 rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
395 size += rw_tlv_length; 486 size += rw_tlv_length;
396 487
397 skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size); 488 skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size);
@@ -416,48 +507,90 @@ error_tlv:
416 return err; 507 return err;
417} 508}
418 509
419int nfc_llcp_send_snl(struct nfc_llcp_local *local, u8 tid, u8 sap) 510static struct sk_buff *nfc_llcp_allocate_snl(struct nfc_llcp_local *local,
511 size_t tlv_length)
420{ 512{
421 struct sk_buff *skb; 513 struct sk_buff *skb;
422 struct nfc_dev *dev; 514 struct nfc_dev *dev;
423 u8 *sdres_tlv = NULL, sdres_tlv_length, sdres[2];
424 u16 size = 0; 515 u16 size = 0;
425 516
426 pr_debug("Sending SNL tid 0x%x sap 0x%x\n", tid, sap);
427
428 if (local == NULL) 517 if (local == NULL)
429 return -ENODEV; 518 return ERR_PTR(-ENODEV);
430 519
431 dev = local->dev; 520 dev = local->dev;
432 if (dev == NULL) 521 if (dev == NULL)
433 return -ENODEV; 522 return ERR_PTR(-ENODEV);
434
435 sdres[0] = tid;
436 sdres[1] = sap;
437 sdres_tlv = nfc_llcp_build_tlv(LLCP_TLV_SDRES, sdres, 0,
438 &sdres_tlv_length);
439 if (sdres_tlv == NULL)
440 return -ENOMEM;
441 523
442 size += LLCP_HEADER_SIZE; 524 size += LLCP_HEADER_SIZE;
443 size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE; 525 size += dev->tx_headroom + dev->tx_tailroom + NFC_HEADER_SIZE;
444 size += sdres_tlv_length; 526 size += tlv_length;
445 527
446 skb = alloc_skb(size, GFP_KERNEL); 528 skb = alloc_skb(size, GFP_KERNEL);
447 if (skb == NULL) { 529 if (skb == NULL)
448 kfree(sdres_tlv); 530 return ERR_PTR(-ENOMEM);
449 return -ENOMEM;
450 }
451 531
452 skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE); 532 skb_reserve(skb, dev->tx_headroom + NFC_HEADER_SIZE);
453 533
454 skb = llcp_add_header(skb, LLCP_SAP_SDP, LLCP_SAP_SDP, LLCP_PDU_SNL); 534 skb = llcp_add_header(skb, LLCP_SAP_SDP, LLCP_SAP_SDP, LLCP_PDU_SNL);
455 535
456 memcpy(skb_put(skb, sdres_tlv_length), sdres_tlv, sdres_tlv_length); 536 return skb;
537}
538
539int nfc_llcp_send_snl_sdres(struct nfc_llcp_local *local,
540 struct hlist_head *tlv_list, size_t tlvs_len)
541{
542 struct nfc_llcp_sdp_tlv *sdp;
543 struct hlist_node *n;
544 struct sk_buff *skb;
545
546 skb = nfc_llcp_allocate_snl(local, tlvs_len);
547 if (IS_ERR(skb))
548 return PTR_ERR(skb);
549
550 hlist_for_each_entry_safe(sdp, n, tlv_list, node) {
551 memcpy(skb_put(skb, sdp->tlv_len), sdp->tlv, sdp->tlv_len);
552
553 hlist_del(&sdp->node);
554
555 nfc_llcp_free_sdp_tlv(sdp);
556 }
457 557
458 skb_queue_tail(&local->tx_queue, skb); 558 skb_queue_tail(&local->tx_queue, skb);
459 559
460 kfree(sdres_tlv); 560 return 0;
561}
562
563int nfc_llcp_send_snl_sdreq(struct nfc_llcp_local *local,
564 struct hlist_head *tlv_list, size_t tlvs_len)
565{
566 struct nfc_llcp_sdp_tlv *sdreq;
567 struct hlist_node *n;
568 struct sk_buff *skb;
569
570 skb = nfc_llcp_allocate_snl(local, tlvs_len);
571 if (IS_ERR(skb))
572 return PTR_ERR(skb);
573
574 mutex_lock(&local->sdreq_lock);
575
576 if (hlist_empty(&local->pending_sdreqs))
577 mod_timer(&local->sdreq_timer,
578 jiffies + msecs_to_jiffies(3 * local->remote_lto));
579
580 hlist_for_each_entry_safe(sdreq, n, tlv_list, node) {
581 pr_debug("tid %d for %s\n", sdreq->tid, sdreq->uri);
582
583 memcpy(skb_put(skb, sdreq->tlv_len), sdreq->tlv,
584 sdreq->tlv_len);
585
586 hlist_del(&sdreq->node);
587
588 hlist_add_head(&sdreq->node, &local->pending_sdreqs);
589 }
590
591 mutex_unlock(&local->sdreq_lock);
592
593 skb_queue_tail(&local->tx_queue, skb);
461 594
462 return 0; 595 return 0;
463} 596}
@@ -532,8 +665,8 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
532 665
533 /* Remote is ready but has not acknowledged our frames */ 666 /* Remote is ready but has not acknowledged our frames */
534 if((sock->remote_ready && 667 if((sock->remote_ready &&
535 skb_queue_len(&sock->tx_pending_queue) >= sock->rw && 668 skb_queue_len(&sock->tx_pending_queue) >= sock->remote_rw &&
536 skb_queue_len(&sock->tx_queue) >= 2 * sock->rw)) { 669 skb_queue_len(&sock->tx_queue) >= 2 * sock->remote_rw)) {
537 pr_err("Pending queue is full %d frames\n", 670 pr_err("Pending queue is full %d frames\n",
538 skb_queue_len(&sock->tx_pending_queue)); 671 skb_queue_len(&sock->tx_pending_queue));
539 return -ENOBUFS; 672 return -ENOBUFS;
@@ -541,7 +674,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
541 674
542 /* Remote is not ready and we've been queueing enough frames */ 675 /* Remote is not ready and we've been queueing enough frames */
543 if ((!sock->remote_ready && 676 if ((!sock->remote_ready &&
544 skb_queue_len(&sock->tx_queue) >= 2 * sock->rw)) { 677 skb_queue_len(&sock->tx_queue) >= 2 * sock->remote_rw)) {
545 pr_err("Tx queue is full %d frames\n", 678 pr_err("Tx queue is full %d frames\n",
546 skb_queue_len(&sock->tx_queue)); 679 skb_queue_len(&sock->tx_queue));
547 return -ENOBUFS; 680 return -ENOBUFS;
@@ -561,7 +694,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
561 694
562 while (remaining_len > 0) { 695 while (remaining_len > 0) {
563 696
564 frag_len = min_t(size_t, sock->miu, remaining_len); 697 frag_len = min_t(size_t, sock->remote_miu, remaining_len);
565 698
566 pr_debug("Fragment %zd bytes remaining %zd", 699 pr_debug("Fragment %zd bytes remaining %zd",
567 frag_len, remaining_len); 700 frag_len, remaining_len);
@@ -621,7 +754,7 @@ int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap,
621 754
622 while (remaining_len > 0) { 755 while (remaining_len > 0) {
623 756
624 frag_len = min_t(size_t, sock->miu, remaining_len); 757 frag_len = min_t(size_t, sock->remote_miu, remaining_len);
625 758
626 pr_debug("Fragment %zd bytes remaining %zd", 759 pr_debug("Fragment %zd bytes remaining %zd",
627 frag_len, remaining_len); 760 frag_len, remaining_len);
diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c
index ee25f25f0cd6..7de0368aff0c 100644
--- a/net/nfc/llcp/llcp.c
+++ b/net/nfc/llcp/llcp.c
@@ -182,6 +182,9 @@ static void local_cleanup(struct nfc_llcp_local *local, bool listen)
182 cancel_work_sync(&local->rx_work); 182 cancel_work_sync(&local->rx_work);
183 cancel_work_sync(&local->timeout_work); 183 cancel_work_sync(&local->timeout_work);
184 kfree_skb(local->rx_pending); 184 kfree_skb(local->rx_pending);
185 del_timer_sync(&local->sdreq_timer);
186 cancel_work_sync(&local->sdreq_timeout_work);
187 nfc_llcp_free_sdp_tlv_list(&local->pending_sdreqs);
185} 188}
186 189
187static void local_release(struct kref *ref) 190static void local_release(struct kref *ref)
@@ -259,6 +262,47 @@ static void nfc_llcp_symm_timer(unsigned long data)
259 schedule_work(&local->timeout_work); 262 schedule_work(&local->timeout_work);
260} 263}
261 264
265static void nfc_llcp_sdreq_timeout_work(struct work_struct *work)
266{
267 unsigned long time;
268 HLIST_HEAD(nl_sdres_list);
269 struct hlist_node *n;
270 struct nfc_llcp_sdp_tlv *sdp;
271 struct nfc_llcp_local *local = container_of(work, struct nfc_llcp_local,
272 sdreq_timeout_work);
273
274 mutex_lock(&local->sdreq_lock);
275
276 time = jiffies - msecs_to_jiffies(3 * local->remote_lto);
277
278 hlist_for_each_entry_safe(sdp, n, &local->pending_sdreqs, node) {
279 if (time_after(sdp->time, time))
280 continue;
281
282 sdp->sap = LLCP_SDP_UNBOUND;
283
284 hlist_del(&sdp->node);
285
286 hlist_add_head(&sdp->node, &nl_sdres_list);
287 }
288
289 if (!hlist_empty(&local->pending_sdreqs))
290 mod_timer(&local->sdreq_timer,
291 jiffies + msecs_to_jiffies(3 * local->remote_lto));
292
293 mutex_unlock(&local->sdreq_lock);
294
295 if (!hlist_empty(&nl_sdres_list))
296 nfc_genl_llc_send_sdres(local->dev, &nl_sdres_list);
297}
298
299static void nfc_llcp_sdreq_timer(unsigned long data)
300{
301 struct nfc_llcp_local *local = (struct nfc_llcp_local *) data;
302
303 schedule_work(&local->sdreq_timeout_work);
304}
305
262struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev) 306struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev)
263{ 307{
264 struct nfc_llcp_local *local, *n; 308 struct nfc_llcp_local *local, *n;
@@ -802,8 +846,6 @@ static void nfc_llcp_recv_ui(struct nfc_llcp_local *local,
802 ui_cb->dsap = dsap; 846 ui_cb->dsap = dsap;
803 ui_cb->ssap = ssap; 847 ui_cb->ssap = ssap;
804 848
805 printk("%s %d %d\n", __func__, dsap, ssap);
806
807 pr_debug("%d %d\n", dsap, ssap); 849 pr_debug("%d %d\n", dsap, ssap);
808 850
809 /* We're looking for a bound socket, not a client one */ 851 /* We're looking for a bound socket, not a client one */
@@ -900,7 +942,9 @@ static void nfc_llcp_recv_connect(struct nfc_llcp_local *local,
900 new_sock = nfc_llcp_sock(new_sk); 942 new_sock = nfc_llcp_sock(new_sk);
901 new_sock->dev = local->dev; 943 new_sock->dev = local->dev;
902 new_sock->local = nfc_llcp_local_get(local); 944 new_sock->local = nfc_llcp_local_get(local);
903 new_sock->miu = local->remote_miu; 945 new_sock->rw = sock->rw;
946 new_sock->miux = sock->miux;
947 new_sock->remote_miu = local->remote_miu;
904 new_sock->nfc_protocol = sock->nfc_protocol; 948 new_sock->nfc_protocol = sock->nfc_protocol;
905 new_sock->dsap = ssap; 949 new_sock->dsap = ssap;
906 new_sock->target_idx = local->target_idx; 950 new_sock->target_idx = local->target_idx;
@@ -954,11 +998,11 @@ int nfc_llcp_queue_i_frames(struct nfc_llcp_sock *sock)
954 998
955 pr_debug("Remote ready %d tx queue len %d remote rw %d", 999 pr_debug("Remote ready %d tx queue len %d remote rw %d",
956 sock->remote_ready, skb_queue_len(&sock->tx_pending_queue), 1000 sock->remote_ready, skb_queue_len(&sock->tx_pending_queue),
957 sock->rw); 1001 sock->remote_rw);
958 1002
959 /* Try to queue some I frames for transmission */ 1003 /* Try to queue some I frames for transmission */
960 while (sock->remote_ready && 1004 while (sock->remote_ready &&
961 skb_queue_len(&sock->tx_pending_queue) < sock->rw) { 1005 skb_queue_len(&sock->tx_pending_queue) < sock->remote_rw) {
962 struct sk_buff *pdu; 1006 struct sk_buff *pdu;
963 1007
964 pdu = skb_dequeue(&sock->tx_queue); 1008 pdu = skb_dequeue(&sock->tx_queue);
@@ -1178,6 +1222,10 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
1178 u16 tlv_len, offset; 1222 u16 tlv_len, offset;
1179 char *service_name; 1223 char *service_name;
1180 size_t service_name_len; 1224 size_t service_name_len;
1225 struct nfc_llcp_sdp_tlv *sdp;
1226 HLIST_HEAD(llc_sdres_list);
1227 size_t sdres_tlvs_len;
1228 HLIST_HEAD(nl_sdres_list);
1181 1229
1182 dsap = nfc_llcp_dsap(skb); 1230 dsap = nfc_llcp_dsap(skb);
1183 ssap = nfc_llcp_ssap(skb); 1231 ssap = nfc_llcp_ssap(skb);
@@ -1192,6 +1240,7 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
1192 tlv = &skb->data[LLCP_HEADER_SIZE]; 1240 tlv = &skb->data[LLCP_HEADER_SIZE];
1193 tlv_len = skb->len - LLCP_HEADER_SIZE; 1241 tlv_len = skb->len - LLCP_HEADER_SIZE;
1194 offset = 0; 1242 offset = 0;
1243 sdres_tlvs_len = 0;
1195 1244
1196 while (offset < tlv_len) { 1245 while (offset < tlv_len) {
1197 type = tlv[0]; 1246 type = tlv[0];
@@ -1209,14 +1258,14 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
1209 !strncmp(service_name, "urn:nfc:sn:sdp", 1258 !strncmp(service_name, "urn:nfc:sn:sdp",
1210 service_name_len)) { 1259 service_name_len)) {
1211 sap = 1; 1260 sap = 1;
1212 goto send_snl; 1261 goto add_snl;
1213 } 1262 }
1214 1263
1215 llcp_sock = nfc_llcp_sock_from_sn(local, service_name, 1264 llcp_sock = nfc_llcp_sock_from_sn(local, service_name,
1216 service_name_len); 1265 service_name_len);
1217 if (!llcp_sock) { 1266 if (!llcp_sock) {
1218 sap = 0; 1267 sap = 0;
1219 goto send_snl; 1268 goto add_snl;
1220 } 1269 }
1221 1270
1222 /* 1271 /*
@@ -1233,7 +1282,7 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
1233 1282
1234 if (sap == LLCP_SAP_MAX) { 1283 if (sap == LLCP_SAP_MAX) {
1235 sap = 0; 1284 sap = 0;
1236 goto send_snl; 1285 goto add_snl;
1237 } 1286 }
1238 1287
1239 client_count = 1288 client_count =
@@ -1250,8 +1299,37 @@ static void nfc_llcp_recv_snl(struct nfc_llcp_local *local,
1250 1299
1251 pr_debug("%p %d\n", llcp_sock, sap); 1300 pr_debug("%p %d\n", llcp_sock, sap);
1252 1301
1253send_snl: 1302add_snl:
1254 nfc_llcp_send_snl(local, tid, sap); 1303 sdp = nfc_llcp_build_sdres_tlv(tid, sap);
1304 if (sdp == NULL)
1305 goto exit;
1306
1307 sdres_tlvs_len += sdp->tlv_len;
1308 hlist_add_head(&sdp->node, &llc_sdres_list);
1309 break;
1310
1311 case LLCP_TLV_SDRES:
1312 mutex_lock(&local->sdreq_lock);
1313
1314 pr_debug("LLCP_TLV_SDRES: searching tid %d\n", tlv[2]);
1315
1316 hlist_for_each_entry(sdp, &local->pending_sdreqs, node) {
1317 if (sdp->tid != tlv[2])
1318 continue;
1319
1320 sdp->sap = tlv[3];
1321
1322 pr_debug("Found: uri=%s, sap=%d\n",
1323 sdp->uri, sdp->sap);
1324
1325 hlist_del(&sdp->node);
1326
1327 hlist_add_head(&sdp->node, &nl_sdres_list);
1328
1329 break;
1330 }
1331
1332 mutex_unlock(&local->sdreq_lock);
1255 break; 1333 break;
1256 1334
1257 default: 1335 default:
@@ -1262,6 +1340,13 @@ send_snl:
1262 offset += length + 2; 1340 offset += length + 2;
1263 tlv += length + 2; 1341 tlv += length + 2;
1264 } 1342 }
1343
1344exit:
1345 if (!hlist_empty(&nl_sdres_list))
1346 nfc_genl_llc_send_sdres(local->dev, &nl_sdres_list);
1347
1348 if (!hlist_empty(&llc_sdres_list))
1349 nfc_llcp_send_snl_sdres(local, &llc_sdres_list, sdres_tlvs_len);
1265} 1350}
1266 1351
1267static void nfc_llcp_rx_work(struct work_struct *work) 1352static void nfc_llcp_rx_work(struct work_struct *work)
@@ -1447,6 +1532,13 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
1447 local->remote_miu = LLCP_DEFAULT_MIU; 1532 local->remote_miu = LLCP_DEFAULT_MIU;
1448 local->remote_lto = LLCP_DEFAULT_LTO; 1533 local->remote_lto = LLCP_DEFAULT_LTO;
1449 1534
1535 mutex_init(&local->sdreq_lock);
1536 INIT_HLIST_HEAD(&local->pending_sdreqs);
1537 init_timer(&local->sdreq_timer);
1538 local->sdreq_timer.data = (unsigned long) local;
1539 local->sdreq_timer.function = nfc_llcp_sdreq_timer;
1540 INIT_WORK(&local->sdreq_timeout_work, nfc_llcp_sdreq_timeout_work);
1541
1450 list_add(&local->list, &llcp_devices); 1542 list_add(&local->list, &llcp_devices);
1451 1543
1452 return 0; 1544 return 0;
diff --git a/net/nfc/llcp/llcp.h b/net/nfc/llcp/llcp.h
index 0eae5c509504..7e87a66b02ec 100644
--- a/net/nfc/llcp/llcp.h
+++ b/net/nfc/llcp/llcp.h
@@ -46,6 +46,19 @@ struct llcp_sock_list {
46 rwlock_t lock; 46 rwlock_t lock;
47}; 47};
48 48
49struct nfc_llcp_sdp_tlv {
50 u8 *tlv;
51 u8 tlv_len;
52
53 char *uri;
54 u8 tid;
55 u8 sap;
56
57 unsigned long time;
58
59 struct hlist_node node;
60};
61
49struct nfc_llcp_local { 62struct nfc_llcp_local {
50 struct list_head list; 63 struct list_head list;
51 struct nfc_dev *dev; 64 struct nfc_dev *dev;
@@ -86,6 +99,12 @@ struct nfc_llcp_local {
86 u8 remote_opt; 99 u8 remote_opt;
87 u16 remote_wks; 100 u16 remote_wks;
88 101
102 struct mutex sdreq_lock;
103 struct hlist_head pending_sdreqs;
104 struct timer_list sdreq_timer;
105 struct work_struct sdreq_timeout_work;
106 u8 sdreq_next_tid;
107
89 /* sockets array */ 108 /* sockets array */
90 struct llcp_sock_list sockets; 109 struct llcp_sock_list sockets;
91 struct llcp_sock_list connecting_sockets; 110 struct llcp_sock_list connecting_sockets;
@@ -105,7 +124,12 @@ struct nfc_llcp_sock {
105 char *service_name; 124 char *service_name;
106 size_t service_name_len; 125 size_t service_name_len;
107 u8 rw; 126 u8 rw;
108 u16 miu; 127 u16 miux;
128
129
130 /* Remote link parameters */
131 u8 remote_rw;
132 u16 remote_miu;
109 133
110 /* Link variables */ 134 /* Link variables */
111 u8 send_n; 135 u8 send_n;
@@ -213,12 +237,20 @@ int nfc_llcp_parse_connection_tlv(struct nfc_llcp_sock *sock,
213/* Commands API */ 237/* Commands API */
214void nfc_llcp_recv(void *data, struct sk_buff *skb, int err); 238void nfc_llcp_recv(void *data, struct sk_buff *skb, int err);
215u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length); 239u8 *nfc_llcp_build_tlv(u8 type, u8 *value, u8 value_length, u8 *tlv_length);
240struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdres_tlv(u8 tid, u8 sap);
241struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, char *uri,
242 size_t uri_len);
243void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp);
244void nfc_llcp_free_sdp_tlv_list(struct hlist_head *sdp_head);
216void nfc_llcp_recv(void *data, struct sk_buff *skb, int err); 245void nfc_llcp_recv(void *data, struct sk_buff *skb, int err);
217int nfc_llcp_disconnect(struct nfc_llcp_sock *sock); 246int nfc_llcp_disconnect(struct nfc_llcp_sock *sock);
218int nfc_llcp_send_symm(struct nfc_dev *dev); 247int nfc_llcp_send_symm(struct nfc_dev *dev);
219int nfc_llcp_send_connect(struct nfc_llcp_sock *sock); 248int nfc_llcp_send_connect(struct nfc_llcp_sock *sock);
220int nfc_llcp_send_cc(struct nfc_llcp_sock *sock); 249int nfc_llcp_send_cc(struct nfc_llcp_sock *sock);
221int nfc_llcp_send_snl(struct nfc_llcp_local *local, u8 tid, u8 sap); 250int nfc_llcp_send_snl_sdres(struct nfc_llcp_local *local,
251 struct hlist_head *tlv_list, size_t tlvs_len);
252int nfc_llcp_send_snl_sdreq(struct nfc_llcp_local *local,
253 struct hlist_head *tlv_list, size_t tlvs_len);
222int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason); 254int nfc_llcp_send_dm(struct nfc_llcp_local *local, u8 ssap, u8 dsap, u8 reason);
223int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock); 255int nfc_llcp_send_disconnect(struct nfc_llcp_sock *sock);
224int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock, 256int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index 6c94447ec414..c1101e6de170 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -223,6 +223,124 @@ error:
223 return ret; 223 return ret;
224} 224}
225 225
226static int nfc_llcp_setsockopt(struct socket *sock, int level, int optname,
227 char __user *optval, unsigned int optlen)
228{
229 struct sock *sk = sock->sk;
230 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
231 u32 opt;
232 int err = 0;
233
234 pr_debug("%p optname %d\n", sk, optname);
235
236 if (level != SOL_NFC)
237 return -ENOPROTOOPT;
238
239 lock_sock(sk);
240
241 switch (optname) {
242 case NFC_LLCP_RW:
243 if (sk->sk_state == LLCP_CONNECTED ||
244 sk->sk_state == LLCP_BOUND ||
245 sk->sk_state == LLCP_LISTEN) {
246 err = -EINVAL;
247 break;
248 }
249
250 if (get_user(opt, (u32 __user *) optval)) {
251 err = -EFAULT;
252 break;
253 }
254
255 if (opt > LLCP_MAX_RW) {
256 err = -EINVAL;
257 break;
258 }
259
260 llcp_sock->rw = (u8) opt;
261
262 break;
263
264 case NFC_LLCP_MIUX:
265 if (sk->sk_state == LLCP_CONNECTED ||
266 sk->sk_state == LLCP_BOUND ||
267 sk->sk_state == LLCP_LISTEN) {
268 err = -EINVAL;
269 break;
270 }
271
272 if (get_user(opt, (u32 __user *) optval)) {
273 err = -EFAULT;
274 break;
275 }
276
277 if (opt > LLCP_MAX_MIUX) {
278 err = -EINVAL;
279 break;
280 }
281
282 llcp_sock->miux = (u16) opt;
283
284 break;
285
286 default:
287 err = -ENOPROTOOPT;
288 break;
289 }
290
291 release_sock(sk);
292
293 pr_debug("%p rw %d miux %d\n", llcp_sock,
294 llcp_sock->rw, llcp_sock->miux);
295
296 return err;
297}
298
299static int nfc_llcp_getsockopt(struct socket *sock, int level, int optname,
300 char __user *optval, int __user *optlen)
301{
302 struct sock *sk = sock->sk;
303 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
304 int len, err = 0;
305
306 pr_debug("%p optname %d\n", sk, optname);
307
308 if (level != SOL_NFC)
309 return -ENOPROTOOPT;
310
311 if (get_user(len, optlen))
312 return -EFAULT;
313
314 len = min_t(u32, len, sizeof(u32));
315
316 lock_sock(sk);
317
318 switch (optname) {
319 case NFC_LLCP_RW:
320 if (put_user(llcp_sock->rw, (u32 __user *) optval))
321 err = -EFAULT;
322
323 break;
324
325 case NFC_LLCP_MIUX:
326 if (put_user(llcp_sock->miux, (u32 __user *) optval))
327 err = -EFAULT;
328
329 break;
330
331 default:
332 err = -ENOPROTOOPT;
333 break;
334 }
335
336 release_sock(sk);
337
338 if (put_user(len, optlen))
339 return -EFAULT;
340
341 return err;
342}
343
226void nfc_llcp_accept_unlink(struct sock *sk) 344void nfc_llcp_accept_unlink(struct sock *sk)
227{ 345{
228 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk); 346 struct nfc_llcp_sock *llcp_sock = nfc_llcp_sock(sk);
@@ -405,7 +523,8 @@ static unsigned int llcp_sock_poll(struct file *file, struct socket *sock,
405 return llcp_accept_poll(sk); 523 return llcp_accept_poll(sk);
406 524
407 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 525 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
408 mask |= POLLERR; 526 mask |= POLLERR |
527 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
409 528
410 if (!skb_queue_empty(&sk->sk_receive_queue)) 529 if (!skb_queue_empty(&sk->sk_receive_queue))
411 mask |= POLLIN | POLLRDNORM; 530 mask |= POLLIN | POLLRDNORM;
@@ -543,7 +662,7 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
543 662
544 llcp_sock->dev = dev; 663 llcp_sock->dev = dev;
545 llcp_sock->local = nfc_llcp_local_get(local); 664 llcp_sock->local = nfc_llcp_local_get(local);
546 llcp_sock->miu = llcp_sock->local->remote_miu; 665 llcp_sock->remote_miu = llcp_sock->local->remote_miu;
547 llcp_sock->ssap = nfc_llcp_get_local_ssap(local); 666 llcp_sock->ssap = nfc_llcp_get_local_ssap(local);
548 if (llcp_sock->ssap == LLCP_SAP_MAX) { 667 if (llcp_sock->ssap == LLCP_SAP_MAX) {
549 ret = -ENOMEM; 668 ret = -ENOMEM;
@@ -740,8 +859,8 @@ static const struct proto_ops llcp_sock_ops = {
740 .ioctl = sock_no_ioctl, 859 .ioctl = sock_no_ioctl,
741 .listen = llcp_sock_listen, 860 .listen = llcp_sock_listen,
742 .shutdown = sock_no_shutdown, 861 .shutdown = sock_no_shutdown,
743 .setsockopt = sock_no_setsockopt, 862 .setsockopt = nfc_llcp_setsockopt,
744 .getsockopt = sock_no_getsockopt, 863 .getsockopt = nfc_llcp_getsockopt,
745 .sendmsg = llcp_sock_sendmsg, 864 .sendmsg = llcp_sock_sendmsg,
746 .recvmsg = llcp_sock_recvmsg, 865 .recvmsg = llcp_sock_recvmsg,
747 .mmap = sock_no_mmap, 866 .mmap = sock_no_mmap,
@@ -805,8 +924,10 @@ struct sock *nfc_llcp_sock_alloc(struct socket *sock, int type, gfp_t gfp)
805 924
806 llcp_sock->ssap = 0; 925 llcp_sock->ssap = 0;
807 llcp_sock->dsap = LLCP_SAP_SDP; 926 llcp_sock->dsap = LLCP_SAP_SDP;
808 llcp_sock->rw = LLCP_DEFAULT_RW; 927 llcp_sock->rw = LLCP_MAX_RW + 1;
809 llcp_sock->miu = LLCP_DEFAULT_MIU; 928 llcp_sock->miux = LLCP_MAX_MIUX + 1;
929 llcp_sock->remote_rw = LLCP_DEFAULT_RW;
930 llcp_sock->remote_miu = LLCP_DEFAULT_MIU;
810 llcp_sock->send_n = llcp_sock->send_ack_n = 0; 931 llcp_sock->send_n = llcp_sock->send_ack_n = 0;
811 llcp_sock->recv_n = llcp_sock->recv_ack_n = 0; 932 llcp_sock->recv_n = llcp_sock->recv_ack_n = 0;
812 llcp_sock->remote_ready = 1; 933 llcp_sock->remote_ready = 1;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 504b883439f1..73fd51098f4d 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -53,6 +53,15 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
53 [NFC_ATTR_DEVICE_POWERED] = { .type = NLA_U8 }, 53 [NFC_ATTR_DEVICE_POWERED] = { .type = NLA_U8 },
54 [NFC_ATTR_IM_PROTOCOLS] = { .type = NLA_U32 }, 54 [NFC_ATTR_IM_PROTOCOLS] = { .type = NLA_U32 },
55 [NFC_ATTR_TM_PROTOCOLS] = { .type = NLA_U32 }, 55 [NFC_ATTR_TM_PROTOCOLS] = { .type = NLA_U32 },
56 [NFC_ATTR_LLC_PARAM_LTO] = { .type = NLA_U8 },
57 [NFC_ATTR_LLC_PARAM_RW] = { .type = NLA_U8 },
58 [NFC_ATTR_LLC_PARAM_MIUX] = { .type = NLA_U16 },
59 [NFC_ATTR_LLC_SDP] = { .type = NLA_NESTED },
60};
61
62static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = {
63 [NFC_SDP_ATTR_URI] = { .type = NLA_STRING },
64 [NFC_SDP_ATTR_SAP] = { .type = NLA_U8 },
56}; 65};
57 66
58static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target, 67static int nfc_genl_send_target(struct sk_buff *msg, struct nfc_target *target,
@@ -348,6 +357,74 @@ free_msg:
348 return -EMSGSIZE; 357 return -EMSGSIZE;
349} 358}
350 359
360int nfc_genl_llc_send_sdres(struct nfc_dev *dev, struct hlist_head *sdres_list)
361{
362 struct sk_buff *msg;
363 struct nlattr *sdp_attr, *uri_attr;
364 struct nfc_llcp_sdp_tlv *sdres;
365 struct hlist_node *n;
366 void *hdr;
367 int rc = -EMSGSIZE;
368 int i;
369
370 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
371 if (!msg)
372 return -ENOMEM;
373
374 hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
375 NFC_EVENT_LLC_SDRES);
376 if (!hdr)
377 goto free_msg;
378
379 if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
380 goto nla_put_failure;
381
382 sdp_attr = nla_nest_start(msg, NFC_ATTR_LLC_SDP);
383 if (sdp_attr == NULL) {
384 rc = -ENOMEM;
385 goto nla_put_failure;
386 }
387
388 i = 1;
389 hlist_for_each_entry_safe(sdres, n, sdres_list, node) {
390 pr_debug("uri: %s, sap: %d\n", sdres->uri, sdres->sap);
391
392 uri_attr = nla_nest_start(msg, i++);
393 if (uri_attr == NULL) {
394 rc = -ENOMEM;
395 goto nla_put_failure;
396 }
397
398 if (nla_put_u8(msg, NFC_SDP_ATTR_SAP, sdres->sap))
399 goto nla_put_failure;
400
401 if (nla_put_string(msg, NFC_SDP_ATTR_URI, sdres->uri))
402 goto nla_put_failure;
403
404 nla_nest_end(msg, uri_attr);
405
406 hlist_del(&sdres->node);
407
408 nfc_llcp_free_sdp_tlv(sdres);
409 }
410
411 nla_nest_end(msg, sdp_attr);
412
413 genlmsg_end(msg, hdr);
414
415 return genlmsg_multicast(msg, 0, nfc_genl_event_mcgrp.id, GFP_ATOMIC);
416
417nla_put_failure:
418 genlmsg_cancel(msg, hdr);
419
420free_msg:
421 nlmsg_free(msg);
422
423 nfc_llcp_free_sdp_tlv_list(sdres_list);
424
425 return rc;
426}
427
351static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev, 428static int nfc_genl_send_device(struct sk_buff *msg, struct nfc_dev *dev,
352 u32 portid, u32 seq, 429 u32 portid, u32 seq,
353 struct netlink_callback *cb, 430 struct netlink_callback *cb,
@@ -859,6 +936,96 @@ exit:
859 return rc; 936 return rc;
860} 937}
861 938
939static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
940{
941 struct nfc_dev *dev;
942 struct nfc_llcp_local *local;
943 struct nlattr *attr, *sdp_attrs[NFC_SDP_ATTR_MAX+1];
944 u32 idx;
945 u8 tid;
946 char *uri;
947 int rc = 0, rem;
948 size_t uri_len, tlvs_len;
949 struct hlist_head sdreq_list;
950 struct nfc_llcp_sdp_tlv *sdreq;
951
952 if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
953 !info->attrs[NFC_ATTR_LLC_SDP])
954 return -EINVAL;
955
956 idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
957
958 dev = nfc_get_device(idx);
959 if (!dev) {
960 rc = -ENODEV;
961 goto exit;
962 }
963
964 device_lock(&dev->dev);
965
966 if (dev->dep_link_up == false) {
967 rc = -ENOLINK;
968 goto exit;
969 }
970
971 local = nfc_llcp_find_local(dev);
972 if (!local) {
973 nfc_put_device(dev);
974 rc = -ENODEV;
975 goto exit;
976 }
977
978 INIT_HLIST_HEAD(&sdreq_list);
979
980 tlvs_len = 0;
981
982 nla_for_each_nested(attr, info->attrs[NFC_ATTR_LLC_SDP], rem) {
983 rc = nla_parse_nested(sdp_attrs, NFC_SDP_ATTR_MAX, attr,
984 nfc_sdp_genl_policy);
985
986 if (rc != 0) {
987 rc = -EINVAL;
988 goto exit;
989 }
990
991 if (!sdp_attrs[NFC_SDP_ATTR_URI])
992 continue;
993
994 uri_len = nla_len(sdp_attrs[NFC_SDP_ATTR_URI]);
995 if (uri_len == 0)
996 continue;
997
998 uri = nla_data(sdp_attrs[NFC_SDP_ATTR_URI]);
999 if (uri == NULL || *uri == 0)
1000 continue;
1001
1002 tid = local->sdreq_next_tid++;
1003
1004 sdreq = nfc_llcp_build_sdreq_tlv(tid, uri, uri_len);
1005 if (sdreq == NULL) {
1006 rc = -ENOMEM;
1007 goto exit;
1008 }
1009
1010 tlvs_len += sdreq->tlv_len;
1011
1012 hlist_add_head(&sdreq->node, &sdreq_list);
1013 }
1014
1015 if (hlist_empty(&sdreq_list)) {
1016 rc = -EINVAL;
1017 goto exit;
1018 }
1019
1020 rc = nfc_llcp_send_snl_sdreq(local, &sdreq_list, tlvs_len);
1021exit:
1022 device_unlock(&dev->dev);
1023
1024 nfc_put_device(dev);
1025
1026 return rc;
1027}
1028
862static struct genl_ops nfc_genl_ops[] = { 1029static struct genl_ops nfc_genl_ops[] = {
863 { 1030 {
864 .cmd = NFC_CMD_GET_DEVICE, 1031 .cmd = NFC_CMD_GET_DEVICE,
@@ -913,6 +1080,11 @@ static struct genl_ops nfc_genl_ops[] = {
913 .doit = nfc_genl_llc_set_params, 1080 .doit = nfc_genl_llc_set_params,
914 .policy = nfc_genl_policy, 1081 .policy = nfc_genl_policy,
915 }, 1082 },
1083 {
1084 .cmd = NFC_CMD_LLC_SDREQ,
1085 .doit = nfc_genl_llc_sdreq,
1086 .policy = nfc_genl_policy,
1087 },
916}; 1088};
917 1089
918 1090
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index 87d914d2876a..94bfe19ba678 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -46,6 +46,8 @@ struct nfc_rawsock {
46#define to_rawsock_sk(_tx_work) \ 46#define to_rawsock_sk(_tx_work) \
47 ((struct sock *) container_of(_tx_work, struct nfc_rawsock, tx_work)) 47 ((struct sock *) container_of(_tx_work, struct nfc_rawsock, tx_work))
48 48
49struct nfc_llcp_sdp_tlv;
50
49#ifdef CONFIG_NFC_LLCP 51#ifdef CONFIG_NFC_LLCP
50 52
51void nfc_llcp_mac_is_down(struct nfc_dev *dev); 53void nfc_llcp_mac_is_down(struct nfc_dev *dev);
@@ -59,6 +61,8 @@ int nfc_llcp_data_received(struct nfc_dev *dev, struct sk_buff *skb);
59struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev); 61struct nfc_llcp_local *nfc_llcp_find_local(struct nfc_dev *dev);
60int __init nfc_llcp_init(void); 62int __init nfc_llcp_init(void);
61void nfc_llcp_exit(void); 63void nfc_llcp_exit(void);
64void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp);
65void nfc_llcp_free_sdp_tlv_list(struct hlist_head *head);
62 66
63#else 67#else
64 68
@@ -112,6 +116,14 @@ static inline void nfc_llcp_exit(void)
112{ 116{
113} 117}
114 118
119static inline void nfc_llcp_free_sdp_tlv(struct nfc_llcp_sdp_tlv *sdp)
120{
121}
122
123static inline void nfc_llcp_free_sdp_tlv_list(struct hlist_head *sdp_head)
124{
125}
126
115#endif 127#endif
116 128
117int __init rawsock_init(void); 129int __init rawsock_init(void);
@@ -144,6 +156,8 @@ int nfc_genl_dep_link_down_event(struct nfc_dev *dev);
144int nfc_genl_tm_activated(struct nfc_dev *dev, u32 protocol); 156int nfc_genl_tm_activated(struct nfc_dev *dev, u32 protocol);
145int nfc_genl_tm_deactivated(struct nfc_dev *dev); 157int nfc_genl_tm_deactivated(struct nfc_dev *dev);
146 158
159int nfc_genl_llc_send_sdres(struct nfc_dev *dev, struct hlist_head *sdres_list);
160
147struct nfc_dev *nfc_get_device(unsigned int idx); 161struct nfc_dev *nfc_get_device(unsigned int idx);
148 162
149static inline void nfc_put_device(struct nfc_dev *dev) 163static inline void nfc_put_device(struct nfc_dev *dev)
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index a4b724708a1a..8759265a3e46 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -369,8 +369,8 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex,
369 len = sizeof(struct ovs_header); 369 len = sizeof(struct ovs_header);
370 len += nla_total_size(skb->len); 370 len += nla_total_size(skb->len);
371 len += nla_total_size(FLOW_BUFSIZE); 371 len += nla_total_size(FLOW_BUFSIZE);
372 if (upcall_info->cmd == OVS_PACKET_CMD_ACTION) 372 if (upcall_info->userdata)
373 len += nla_total_size(8); 373 len += NLA_ALIGN(upcall_info->userdata->nla_len);
374 374
375 user_skb = genlmsg_new(len, GFP_ATOMIC); 375 user_skb = genlmsg_new(len, GFP_ATOMIC);
376 if (!user_skb) { 376 if (!user_skb) {
@@ -387,8 +387,9 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex,
387 nla_nest_end(user_skb, nla); 387 nla_nest_end(user_skb, nla);
388 388
389 if (upcall_info->userdata) 389 if (upcall_info->userdata)
390 nla_put_u64(user_skb, OVS_PACKET_ATTR_USERDATA, 390 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
391 nla_get_u64(upcall_info->userdata)); 391 nla_len(upcall_info->userdata),
392 nla_data(upcall_info->userdata));
392 393
393 nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len); 394 nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
394 395
@@ -544,7 +545,7 @@ static int validate_userspace(const struct nlattr *attr)
544{ 545{
545 static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = { 546 static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = {
546 [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 }, 547 [OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
547 [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_U64 }, 548 [OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_UNSPEC },
548 }; 549 };
549 struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1]; 550 struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
550 int error; 551 int error;
@@ -680,7 +681,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
680 /* Normally, setting the skb 'protocol' field would be handled by a 681 /* Normally, setting the skb 'protocol' field would be handled by a
681 * call to eth_type_trans(), but it assumes there's a sending 682 * call to eth_type_trans(), but it assumes there's a sending
682 * device, which we may not have. */ 683 * device, which we may not have. */
683 if (ntohs(eth->h_proto) >= 1536) 684 if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
684 packet->protocol = eth->h_proto; 685 packet->protocol = eth->h_proto;
685 else 686 else
686 packet->protocol = htons(ETH_P_802_2); 687 packet->protocol = htons(ETH_P_802_2);
@@ -1628,7 +1629,7 @@ static struct vport *lookup_vport(struct net *net,
1628 1629
1629 vport = ovs_vport_rtnl_rcu(dp, port_no); 1630 vport = ovs_vport_rtnl_rcu(dp, port_no);
1630 if (!vport) 1631 if (!vport)
1631 return ERR_PTR(-ENOENT); 1632 return ERR_PTR(-ENODEV);
1632 return vport; 1633 return vport;
1633 } else 1634 } else
1634 return ERR_PTR(-EINVAL); 1635 return ERR_PTR(-EINVAL);
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 031dfbf37c93..9125ad5c5aeb 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -119,7 +119,7 @@ struct ovs_skb_cb {
119 * struct dp_upcall - metadata to include with a packet to send to userspace 119 * struct dp_upcall - metadata to include with a packet to send to userspace
120 * @cmd: One of %OVS_PACKET_CMD_*. 120 * @cmd: One of %OVS_PACKET_CMD_*.
121 * @key: Becomes %OVS_PACKET_ATTR_KEY. Must be nonnull. 121 * @key: Becomes %OVS_PACKET_ATTR_KEY. Must be nonnull.
122 * @userdata: If nonnull, its u64 value is extracted and passed to userspace as 122 * @userdata: If nonnull, its variable-length value is passed to userspace as
123 * %OVS_PACKET_ATTR_USERDATA. 123 * %OVS_PACKET_ATTR_USERDATA.
124 * @pid: Netlink PID to which packet should be sent. If @pid is 0 then no 124 * @pid: Netlink PID to which packet should be sent. If @pid is 0 then no
125 * packet is sent and the packet is accounted in the datapath's @n_lost 125 * packet is sent and the packet is accounted in the datapath's @n_lost
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index fe0e4215c73d..332486839347 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -466,7 +466,7 @@ static __be16 parse_ethertype(struct sk_buff *skb)
466 proto = *(__be16 *) skb->data; 466 proto = *(__be16 *) skb->data;
467 __skb_pull(skb, sizeof(__be16)); 467 __skb_pull(skb, sizeof(__be16));
468 468
469 if (ntohs(proto) >= 1536) 469 if (ntohs(proto) >= ETH_P_802_3_MIN)
470 return proto; 470 return proto;
471 471
472 if (skb->len < sizeof(struct llc_snap_hdr)) 472 if (skb->len < sizeof(struct llc_snap_hdr))
@@ -483,7 +483,7 @@ static __be16 parse_ethertype(struct sk_buff *skb)
483 483
484 __skb_pull(skb, sizeof(struct llc_snap_hdr)); 484 __skb_pull(skb, sizeof(struct llc_snap_hdr));
485 485
486 if (ntohs(llc->ethertype) >= 1536) 486 if (ntohs(llc->ethertype) >= ETH_P_802_3_MIN)
487 return llc->ethertype; 487 return llc->ethertype;
488 488
489 return htons(ETH_P_802_2); 489 return htons(ETH_P_802_2);
@@ -1038,7 +1038,7 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
1038 1038
1039 if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) { 1039 if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
1040 swkey->eth.type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]); 1040 swkey->eth.type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
1041 if (ntohs(swkey->eth.type) < 1536) 1041 if (ntohs(swkey->eth.type) < ETH_P_802_3_MIN)
1042 return -EINVAL; 1042 return -EINVAL;
1043 attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE); 1043 attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
1044 } else { 1044 } else {
diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
index 0531de6c7a4a..40f8a2489c90 100644
--- a/net/openvswitch/vport-internal_dev.c
+++ b/net/openvswitch/vport-internal_dev.c
@@ -63,16 +63,6 @@ static struct rtnl_link_stats64 *internal_dev_get_stats(struct net_device *netde
63 return stats; 63 return stats;
64} 64}
65 65
66static int internal_dev_mac_addr(struct net_device *dev, void *p)
67{
68 struct sockaddr *addr = p;
69
70 if (!is_valid_ether_addr(addr->sa_data))
71 return -EADDRNOTAVAIL;
72 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
73 return 0;
74}
75
76/* Called with rcu_read_lock_bh. */ 66/* Called with rcu_read_lock_bh. */
77static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev) 67static int internal_dev_xmit(struct sk_buff *skb, struct net_device *netdev)
78{ 68{
@@ -126,7 +116,7 @@ static const struct net_device_ops internal_dev_netdev_ops = {
126 .ndo_open = internal_dev_open, 116 .ndo_open = internal_dev_open,
127 .ndo_stop = internal_dev_stop, 117 .ndo_stop = internal_dev_stop,
128 .ndo_start_xmit = internal_dev_xmit, 118 .ndo_start_xmit = internal_dev_xmit,
129 .ndo_set_mac_address = internal_dev_mac_addr, 119 .ndo_set_mac_address = eth_mac_addr,
130 .ndo_change_mtu = internal_dev_change_mtu, 120 .ndo_change_mtu = internal_dev_change_mtu,
131 .ndo_get_stats64 = internal_dev_get_stats, 121 .ndo_get_stats64 = internal_dev_get_stats,
132}; 122};
@@ -138,6 +128,7 @@ static void do_setup(struct net_device *netdev)
138 netdev->netdev_ops = &internal_dev_netdev_ops; 128 netdev->netdev_ops = &internal_dev_netdev_ops;
139 129
140 netdev->priv_flags &= ~IFF_TX_SKB_SHARING; 130 netdev->priv_flags &= ~IFF_TX_SKB_SHARING;
131 netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
141 netdev->destructor = internal_dev_destructor; 132 netdev->destructor = internal_dev_destructor;
142 SET_ETHTOOL_OPS(netdev, &internal_dev_ethtool_ops); 133 SET_ETHTOOL_OPS(netdev, &internal_dev_ethtool_ops);
143 netdev->tx_queue_len = 0; 134 netdev->tx_queue_len = 0;
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index 3f7961ea3c56..aee7d43114c9 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -68,10 +68,10 @@ struct vport_err_stats {
68/** 68/**
69 * struct vport - one port within a datapath 69 * struct vport - one port within a datapath
70 * @rcu: RCU callback head for deferred destruction. 70 * @rcu: RCU callback head for deferred destruction.
71 * @port_no: Index into @dp's @ports array.
72 * @dp: Datapath to which this port belongs. 71 * @dp: Datapath to which this port belongs.
73 * @upcall_portid: The Netlink port to use for packets received on this port that 72 * @upcall_portid: The Netlink port to use for packets received on this port that
74 * miss the flow table. 73 * miss the flow table.
74 * @port_no: Index into @dp's @ports array.
75 * @hash_node: Element in @dev_table hash table in vport.c. 75 * @hash_node: Element in @dev_table hash table in vport.c.
76 * @dp_hash_node: Element in @datapath->ports hash table in datapath.c. 76 * @dp_hash_node: Element in @datapath->ports hash table in datapath.c.
77 * @ops: Class structure. 77 * @ops: Class structure.
@@ -81,9 +81,9 @@ struct vport_err_stats {
81 */ 81 */
82struct vport { 82struct vport {
83 struct rcu_head rcu; 83 struct rcu_head rcu;
84 u16 port_no;
85 struct datapath *dp; 84 struct datapath *dp;
86 u32 upcall_portid; 85 u32 upcall_portid;
86 u16 port_no;
87 87
88 struct hlist_node hash_node; 88 struct hlist_node hash_node;
89 struct hlist_node dp_hash_node; 89 struct hlist_node dp_hash_node;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 1d6793dbfbae..8e4644ff8d34 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -181,6 +181,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
181 181
182struct packet_sock; 182struct packet_sock;
183static int tpacket_snd(struct packet_sock *po, struct msghdr *msg); 183static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
184static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
185 struct packet_type *pt, struct net_device *orig_dev);
184 186
185static void *packet_previous_frame(struct packet_sock *po, 187static void *packet_previous_frame(struct packet_sock *po,
186 struct packet_ring_buffer *rb, 188 struct packet_ring_buffer *rb,
@@ -973,11 +975,11 @@ static void *packet_current_rx_frame(struct packet_sock *po,
973 975
974static void *prb_lookup_block(struct packet_sock *po, 976static void *prb_lookup_block(struct packet_sock *po,
975 struct packet_ring_buffer *rb, 977 struct packet_ring_buffer *rb,
976 unsigned int previous, 978 unsigned int idx,
977 int status) 979 int status)
978{ 980{
979 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb); 981 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
980 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, previous); 982 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
981 983
982 if (status != BLOCK_STATUS(pbd)) 984 if (status != BLOCK_STATUS(pbd))
983 return NULL; 985 return NULL;
@@ -1041,6 +1043,29 @@ static void packet_increment_head(struct packet_ring_buffer *buff)
1041 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0; 1043 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1042} 1044}
1043 1045
1046static bool packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1047{
1048 struct sock *sk = &po->sk;
1049 bool has_room;
1050
1051 if (po->prot_hook.func != tpacket_rcv)
1052 return (atomic_read(&sk->sk_rmem_alloc) + skb->truesize)
1053 <= sk->sk_rcvbuf;
1054
1055 spin_lock(&sk->sk_receive_queue.lock);
1056 if (po->tp_version == TPACKET_V3)
1057 has_room = prb_lookup_block(po, &po->rx_ring,
1058 po->rx_ring.prb_bdqc.kactive_blk_num,
1059 TP_STATUS_KERNEL);
1060 else
1061 has_room = packet_lookup_frame(po, &po->rx_ring,
1062 po->rx_ring.head,
1063 TP_STATUS_KERNEL);
1064 spin_unlock(&sk->sk_receive_queue.lock);
1065
1066 return has_room;
1067}
1068
1044static void packet_sock_destruct(struct sock *sk) 1069static void packet_sock_destruct(struct sock *sk)
1045{ 1070{
1046 skb_queue_purge(&sk->sk_error_queue); 1071 skb_queue_purge(&sk->sk_error_queue);
@@ -1066,16 +1091,16 @@ static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
1066 return x; 1091 return x;
1067} 1092}
1068 1093
1069static struct sock *fanout_demux_hash(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) 1094static unsigned int fanout_demux_hash(struct packet_fanout *f,
1095 struct sk_buff *skb,
1096 unsigned int num)
1070{ 1097{
1071 u32 idx, hash = skb->rxhash; 1098 return (((u64)skb->rxhash) * num) >> 32;
1072
1073 idx = ((u64)hash * num) >> 32;
1074
1075 return f->arr[idx];
1076} 1099}
1077 1100
1078static struct sock *fanout_demux_lb(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) 1101static unsigned int fanout_demux_lb(struct packet_fanout *f,
1102 struct sk_buff *skb,
1103 unsigned int num)
1079{ 1104{
1080 int cur, old; 1105 int cur, old;
1081 1106
@@ -1083,14 +1108,40 @@ static struct sock *fanout_demux_lb(struct packet_fanout *f, struct sk_buff *skb
1083 while ((old = atomic_cmpxchg(&f->rr_cur, cur, 1108 while ((old = atomic_cmpxchg(&f->rr_cur, cur,
1084 fanout_rr_next(f, num))) != cur) 1109 fanout_rr_next(f, num))) != cur)
1085 cur = old; 1110 cur = old;
1086 return f->arr[cur]; 1111 return cur;
1112}
1113
1114static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1115 struct sk_buff *skb,
1116 unsigned int num)
1117{
1118 return smp_processor_id() % num;
1087} 1119}
1088 1120
1089static struct sock *fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *skb, unsigned int num) 1121static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1122 struct sk_buff *skb,
1123 unsigned int idx, unsigned int skip,
1124 unsigned int num)
1090{ 1125{
1091 unsigned int cpu = smp_processor_id(); 1126 unsigned int i, j;
1127
1128 i = j = min_t(int, f->next[idx], num - 1);
1129 do {
1130 if (i != skip && packet_rcv_has_room(pkt_sk(f->arr[i]), skb)) {
1131 if (i != j)
1132 f->next[idx] = i;
1133 return i;
1134 }
1135 if (++i == num)
1136 i = 0;
1137 } while (i != j);
1092 1138
1093 return f->arr[cpu % num]; 1139 return idx;
1140}
1141
1142static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1143{
1144 return f->flags & (flag >> 8);
1094} 1145}
1095 1146
1096static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, 1147static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
@@ -1099,7 +1150,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1099 struct packet_fanout *f = pt->af_packet_priv; 1150 struct packet_fanout *f = pt->af_packet_priv;
1100 unsigned int num = f->num_members; 1151 unsigned int num = f->num_members;
1101 struct packet_sock *po; 1152 struct packet_sock *po;
1102 struct sock *sk; 1153 unsigned int idx;
1103 1154
1104 if (!net_eq(dev_net(dev), read_pnet(&f->net)) || 1155 if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
1105 !num) { 1156 !num) {
@@ -1110,23 +1161,31 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1110 switch (f->type) { 1161 switch (f->type) {
1111 case PACKET_FANOUT_HASH: 1162 case PACKET_FANOUT_HASH:
1112 default: 1163 default:
1113 if (f->defrag) { 1164 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1114 skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET); 1165 skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
1115 if (!skb) 1166 if (!skb)
1116 return 0; 1167 return 0;
1117 } 1168 }
1118 skb_get_rxhash(skb); 1169 skb_get_rxhash(skb);
1119 sk = fanout_demux_hash(f, skb, num); 1170 idx = fanout_demux_hash(f, skb, num);
1120 break; 1171 break;
1121 case PACKET_FANOUT_LB: 1172 case PACKET_FANOUT_LB:
1122 sk = fanout_demux_lb(f, skb, num); 1173 idx = fanout_demux_lb(f, skb, num);
1123 break; 1174 break;
1124 case PACKET_FANOUT_CPU: 1175 case PACKET_FANOUT_CPU:
1125 sk = fanout_demux_cpu(f, skb, num); 1176 idx = fanout_demux_cpu(f, skb, num);
1177 break;
1178 case PACKET_FANOUT_ROLLOVER:
1179 idx = fanout_demux_rollover(f, skb, 0, (unsigned int) -1, num);
1126 break; 1180 break;
1127 } 1181 }
1128 1182
1129 po = pkt_sk(sk); 1183 po = pkt_sk(f->arr[idx]);
1184 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER) &&
1185 unlikely(!packet_rcv_has_room(po, skb))) {
1186 idx = fanout_demux_rollover(f, skb, idx, idx, num);
1187 po = pkt_sk(f->arr[idx]);
1188 }
1130 1189
1131 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev); 1190 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1132} 1191}
@@ -1175,10 +1234,13 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1175 struct packet_sock *po = pkt_sk(sk); 1234 struct packet_sock *po = pkt_sk(sk);
1176 struct packet_fanout *f, *match; 1235 struct packet_fanout *f, *match;
1177 u8 type = type_flags & 0xff; 1236 u8 type = type_flags & 0xff;
1178 u8 defrag = (type_flags & PACKET_FANOUT_FLAG_DEFRAG) ? 1 : 0; 1237 u8 flags = type_flags >> 8;
1179 int err; 1238 int err;
1180 1239
1181 switch (type) { 1240 switch (type) {
1241 case PACKET_FANOUT_ROLLOVER:
1242 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1243 return -EINVAL;
1182 case PACKET_FANOUT_HASH: 1244 case PACKET_FANOUT_HASH:
1183 case PACKET_FANOUT_LB: 1245 case PACKET_FANOUT_LB:
1184 case PACKET_FANOUT_CPU: 1246 case PACKET_FANOUT_CPU:
@@ -1203,7 +1265,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1203 } 1265 }
1204 } 1266 }
1205 err = -EINVAL; 1267 err = -EINVAL;
1206 if (match && match->defrag != defrag) 1268 if (match && match->flags != flags)
1207 goto out; 1269 goto out;
1208 if (!match) { 1270 if (!match) {
1209 err = -ENOMEM; 1271 err = -ENOMEM;
@@ -1213,7 +1275,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1213 write_pnet(&match->net, sock_net(sk)); 1275 write_pnet(&match->net, sock_net(sk));
1214 match->id = id; 1276 match->id = id;
1215 match->type = type; 1277 match->type = type;
1216 match->defrag = defrag; 1278 match->flags = flags;
1217 atomic_set(&match->rr_cur, 0); 1279 atomic_set(&match->rr_cur, 0);
1218 INIT_LIST_HEAD(&match->list); 1280 INIT_LIST_HEAD(&match->list);
1219 spin_lock_init(&match->lock); 1281 spin_lock_init(&match->lock);
@@ -1450,6 +1512,8 @@ retry:
1450 if (unlikely(extra_len == 4)) 1512 if (unlikely(extra_len == 4))
1451 skb->no_fcs = 1; 1513 skb->no_fcs = 1;
1452 1514
1515 skb_probe_transport_header(skb, 0);
1516
1453 dev_queue_xmit(skb); 1517 dev_queue_xmit(skb);
1454 rcu_read_unlock(); 1518 rcu_read_unlock();
1455 return len; 1519 return len;
@@ -1880,6 +1944,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
1880 1944
1881 skb_reserve(skb, hlen); 1945 skb_reserve(skb, hlen);
1882 skb_reset_network_header(skb); 1946 skb_reset_network_header(skb);
1947 skb_probe_transport_header(skb, 0);
1883 1948
1884 if (po->tp_tx_has_off) { 1949 if (po->tp_tx_has_off) {
1885 int off_min, off_max, off; 1950 int off_min, off_max, off;
@@ -2289,6 +2354,8 @@ static int packet_snd(struct socket *sock,
2289 len += vnet_hdr_len; 2354 len += vnet_hdr_len;
2290 } 2355 }
2291 2356
2357 skb_probe_transport_header(skb, reserve);
2358
2292 if (unlikely(extra_len == 4)) 2359 if (unlikely(extra_len == 4))
2293 skb->no_fcs = 1; 2360 skb->no_fcs = 1;
2294 2361
@@ -3240,7 +3307,8 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3240 case PACKET_FANOUT: 3307 case PACKET_FANOUT:
3241 val = (po->fanout ? 3308 val = (po->fanout ?
3242 ((u32)po->fanout->id | 3309 ((u32)po->fanout->id |
3243 ((u32)po->fanout->type << 16)) : 3310 ((u32)po->fanout->type << 16) |
3311 ((u32)po->fanout->flags << 24)) :
3244 0); 3312 0);
3245 break; 3313 break;
3246 case PACKET_TX_HAS_OFF: 3314 case PACKET_TX_HAS_OFF:
diff --git a/net/packet/internal.h b/net/packet/internal.h
index e84cab8cb7a9..e891f025a1b9 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -77,10 +77,11 @@ struct packet_fanout {
77 unsigned int num_members; 77 unsigned int num_members;
78 u16 id; 78 u16 id;
79 u8 type; 79 u8 type;
80 u8 defrag; 80 u8 flags;
81 atomic_t rr_cur; 81 atomic_t rr_cur;
82 struct list_head list; 82 struct list_head list;
83 struct sock *arr[PACKET_FANOUT_MAX]; 83 struct sock *arr[PACKET_FANOUT_MAX];
84 int next[PACKET_FANOUT_MAX];
84 spinlock_t lock; 85 spinlock_t lock;
85 atomic_t sk_ref; 86 atomic_t sk_ref;
86 struct packet_type prot_hook ____cacheline_aligned_in_smp; 87 struct packet_type prot_hook ____cacheline_aligned_in_smp;
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
index 0193630d3061..dc15f4300808 100644
--- a/net/phonet/pn_netlink.c
+++ b/net/phonet/pn_netlink.c
@@ -61,7 +61,7 @@ static const struct nla_policy ifa_phonet_policy[IFA_MAX+1] = {
61 [IFA_LOCAL] = { .type = NLA_U8 }, 61 [IFA_LOCAL] = { .type = NLA_U8 },
62}; 62};
63 63
64static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *attr) 64static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
65{ 65{
66 struct net *net = sock_net(skb->sk); 66 struct net *net = sock_net(skb->sk);
67 struct nlattr *tb[IFA_MAX+1]; 67 struct nlattr *tb[IFA_MAX+1];
@@ -224,7 +224,7 @@ static const struct nla_policy rtm_phonet_policy[RTA_MAX+1] = {
224 [RTA_OIF] = { .type = NLA_U32 }, 224 [RTA_OIF] = { .type = NLA_U32 },
225}; 225};
226 226
227static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *attr) 227static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
228{ 228{
229 struct net *net = sock_net(skb->sk); 229 struct net *net = sock_net(skb->sk);
230 struct nlattr *tb[RTA_MAX+1]; 230 struct nlattr *tb[RTA_MAX+1];
diff --git a/net/rfkill/rfkill-regulator.c b/net/rfkill/rfkill-regulator.c
index 4b5ab21ecb24..d11ac79246e4 100644
--- a/net/rfkill/rfkill-regulator.c
+++ b/net/rfkill/rfkill-regulator.c
@@ -51,7 +51,7 @@ static int rfkill_regulator_set_block(void *data, bool blocked)
51 return 0; 51 return 0;
52} 52}
53 53
54struct rfkill_ops rfkill_regulator_ops = { 54static struct rfkill_ops rfkill_regulator_ops = {
55 .set_block = rfkill_regulator_set_block, 55 .set_block = rfkill_regulator_set_block,
56}; 56};
57 57
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 8579c4bb20c9..fd7072827a40 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -982,7 +982,7 @@ done:
982 return ret; 982 return ret;
983} 983}
984 984
985static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 985static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n)
986{ 986{
987 struct net *net = sock_net(skb->sk); 987 struct net *net = sock_net(skb->sk);
988 struct nlattr *tca[TCA_ACT_MAX + 1]; 988 struct nlattr *tca[TCA_ACT_MAX + 1];
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 964f5e4f4b8a..8e118af90973 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -22,7 +22,6 @@
22#include <linux/skbuff.h> 22#include <linux/skbuff.h>
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/kmod.h> 24#include <linux/kmod.h>
25#include <linux/netlink.h>
26#include <linux/err.h> 25#include <linux/err.h>
27#include <linux/slab.h> 26#include <linux/slab.h>
28#include <net/net_namespace.h> 27#include <net/net_namespace.h>
@@ -118,7 +117,7 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp)
118 117
119/* Add/change/delete/get a filter node */ 118/* Add/change/delete/get a filter node */
120 119
121static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 120static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n)
122{ 121{
123 struct net *net = sock_net(skb->sk); 122 struct net *net = sock_net(skb->sk);
124 struct nlattr *tca[TCA_MAX + 1]; 123 struct nlattr *tca[TCA_MAX + 1];
@@ -141,7 +140,12 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
141 140
142 if ((n->nlmsg_type != RTM_GETTFILTER) && !capable(CAP_NET_ADMIN)) 141 if ((n->nlmsg_type != RTM_GETTFILTER) && !capable(CAP_NET_ADMIN))
143 return -EPERM; 142 return -EPERM;
143
144replay: 144replay:
145 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL);
146 if (err < 0)
147 return err;
148
145 t = nlmsg_data(n); 149 t = nlmsg_data(n);
146 protocol = TC_H_MIN(t->tcm_info); 150 protocol = TC_H_MIN(t->tcm_info);
147 prio = TC_H_MAJ(t->tcm_info); 151 prio = TC_H_MAJ(t->tcm_info);
@@ -164,10 +168,6 @@ replay:
164 if (dev == NULL) 168 if (dev == NULL)
165 return -ENODEV; 169 return -ENODEV;
166 170
167 err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL);
168 if (err < 0)
169 return err;
170
171 /* Find qdisc */ 171 /* Find qdisc */
172 if (!parent) { 172 if (!parent) {
173 q = dev->qdisc; 173 q = dev->qdisc;
@@ -427,7 +427,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
427 const struct Qdisc_class_ops *cops; 427 const struct Qdisc_class_ops *cops;
428 struct tcf_dump_args arg; 428 struct tcf_dump_args arg;
429 429
430 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) 430 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
431 return skb->len; 431 return skb->len;
432 dev = __dev_get_by_index(net, tcm->tcm_ifindex); 432 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
433 if (!dev) 433 if (!dev)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index c297e2a8e2a1..2b935e7cfe7b 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -971,13 +971,13 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
971 * Delete/get qdisc. 971 * Delete/get qdisc.
972 */ 972 */
973 973
974static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 974static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
975{ 975{
976 struct net *net = sock_net(skb->sk); 976 struct net *net = sock_net(skb->sk);
977 struct tcmsg *tcm = nlmsg_data(n); 977 struct tcmsg *tcm = nlmsg_data(n);
978 struct nlattr *tca[TCA_MAX + 1]; 978 struct nlattr *tca[TCA_MAX + 1];
979 struct net_device *dev; 979 struct net_device *dev;
980 u32 clid = tcm->tcm_parent; 980 u32 clid;
981 struct Qdisc *q = NULL; 981 struct Qdisc *q = NULL;
982 struct Qdisc *p = NULL; 982 struct Qdisc *p = NULL;
983 int err; 983 int err;
@@ -985,14 +985,15 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
985 if ((n->nlmsg_type != RTM_GETQDISC) && !capable(CAP_NET_ADMIN)) 985 if ((n->nlmsg_type != RTM_GETQDISC) && !capable(CAP_NET_ADMIN))
986 return -EPERM; 986 return -EPERM;
987 987
988 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
989 if (!dev)
990 return -ENODEV;
991
992 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); 988 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
993 if (err < 0) 989 if (err < 0)
994 return err; 990 return err;
995 991
992 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
993 if (!dev)
994 return -ENODEV;
995
996 clid = tcm->tcm_parent;
996 if (clid) { 997 if (clid) {
997 if (clid != TC_H_ROOT) { 998 if (clid != TC_H_ROOT) {
998 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) { 999 if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
@@ -1038,7 +1039,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1038 * Create/change qdisc. 1039 * Create/change qdisc.
1039 */ 1040 */
1040 1041
1041static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 1042static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
1042{ 1043{
1043 struct net *net = sock_net(skb->sk); 1044 struct net *net = sock_net(skb->sk);
1044 struct tcmsg *tcm; 1045 struct tcmsg *tcm;
@@ -1053,6 +1054,10 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1053 1054
1054replay: 1055replay:
1055 /* Reinit, just in case something touches this. */ 1056 /* Reinit, just in case something touches this. */
1057 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1058 if (err < 0)
1059 return err;
1060
1056 tcm = nlmsg_data(n); 1061 tcm = nlmsg_data(n);
1057 clid = tcm->tcm_parent; 1062 clid = tcm->tcm_parent;
1058 q = p = NULL; 1063 q = p = NULL;
@@ -1061,9 +1066,6 @@ replay:
1061 if (!dev) 1066 if (!dev)
1062 return -ENODEV; 1067 return -ENODEV;
1063 1068
1064 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1065 if (err < 0)
1066 return err;
1067 1069
1068 if (clid) { 1070 if (clid) {
1069 if (clid != TC_H_ROOT) { 1071 if (clid != TC_H_ROOT) {
@@ -1372,7 +1374,7 @@ done:
1372 1374
1373 1375
1374 1376
1375static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg) 1377static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n)
1376{ 1378{
1377 struct net *net = sock_net(skb->sk); 1379 struct net *net = sock_net(skb->sk);
1378 struct tcmsg *tcm = nlmsg_data(n); 1380 struct tcmsg *tcm = nlmsg_data(n);
@@ -1382,22 +1384,22 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1382 const struct Qdisc_class_ops *cops; 1384 const struct Qdisc_class_ops *cops;
1383 unsigned long cl = 0; 1385 unsigned long cl = 0;
1384 unsigned long new_cl; 1386 unsigned long new_cl;
1385 u32 portid = tcm->tcm_parent; 1387 u32 portid;
1386 u32 clid = tcm->tcm_handle; 1388 u32 clid;
1387 u32 qid = TC_H_MAJ(clid); 1389 u32 qid;
1388 int err; 1390 int err;
1389 1391
1390 if ((n->nlmsg_type != RTM_GETTCLASS) && !capable(CAP_NET_ADMIN)) 1392 if ((n->nlmsg_type != RTM_GETTCLASS) && !capable(CAP_NET_ADMIN))
1391 return -EPERM; 1393 return -EPERM;
1392 1394
1393 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1394 if (!dev)
1395 return -ENODEV;
1396
1397 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); 1395 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
1398 if (err < 0) 1396 if (err < 0)
1399 return err; 1397 return err;
1400 1398
1399 dev = __dev_get_by_index(net, tcm->tcm_ifindex);
1400 if (!dev)
1401 return -ENODEV;
1402
1401 /* 1403 /*
1402 parent == TC_H_UNSPEC - unspecified parent. 1404 parent == TC_H_UNSPEC - unspecified parent.
1403 parent == TC_H_ROOT - class is root, which has no parent. 1405 parent == TC_H_ROOT - class is root, which has no parent.
@@ -1413,6 +1415,10 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1413 1415
1414 /* Step 1. Determine qdisc handle X:0 */ 1416 /* Step 1. Determine qdisc handle X:0 */
1415 1417
1418 portid = tcm->tcm_parent;
1419 clid = tcm->tcm_handle;
1420 qid = TC_H_MAJ(clid);
1421
1416 if (portid != TC_H_ROOT) { 1422 if (portid != TC_H_ROOT) {
1417 u32 qid1 = TC_H_MAJ(portid); 1423 u32 qid1 = TC_H_MAJ(portid);
1418 1424
@@ -1636,7 +1642,7 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1636 struct net_device *dev; 1642 struct net_device *dev;
1637 int t, s_t; 1643 int t, s_t;
1638 1644
1639 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) 1645 if (nlmsg_len(cb->nlh) < sizeof(*tcm))
1640 return 0; 1646 return 0;
1641 dev = dev_get_by_index(net, tcm->tcm_ifindex); 1647 dev = dev_get_by_index(net, tcm->tcm_ifindex);
1642 if (!dev) 1648 if (!dev)
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 571f1d211f4d..79b1876b6cd2 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -981,6 +981,7 @@ static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
981 [TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) }, 981 [TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) },
982 [TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, 982 [TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
983 [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE }, 983 [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
984 [TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
984}; 985};
985 986
986static void htb_work_func(struct work_struct *work) 987static void htb_work_func(struct work_struct *work)
@@ -994,7 +995,7 @@ static void htb_work_func(struct work_struct *work)
994static int htb_init(struct Qdisc *sch, struct nlattr *opt) 995static int htb_init(struct Qdisc *sch, struct nlattr *opt)
995{ 996{
996 struct htb_sched *q = qdisc_priv(sch); 997 struct htb_sched *q = qdisc_priv(sch);
997 struct nlattr *tb[TCA_HTB_INIT + 1]; 998 struct nlattr *tb[TCA_HTB_MAX + 1];
998 struct tc_htb_glob *gopt; 999 struct tc_htb_glob *gopt;
999 int err; 1000 int err;
1000 int i; 1001 int i;
@@ -1002,20 +1003,16 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1002 if (!opt) 1003 if (!opt)
1003 return -EINVAL; 1004 return -EINVAL;
1004 1005
1005 err = nla_parse_nested(tb, TCA_HTB_INIT, opt, htb_policy); 1006 err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy);
1006 if (err < 0) 1007 if (err < 0)
1007 return err; 1008 return err;
1008 1009
1009 if (tb[TCA_HTB_INIT] == NULL) { 1010 if (!tb[TCA_HTB_INIT])
1010 pr_err("HTB: hey probably you have bad tc tool ?\n");
1011 return -EINVAL; 1011 return -EINVAL;
1012 } 1012
1013 gopt = nla_data(tb[TCA_HTB_INIT]); 1013 gopt = nla_data(tb[TCA_HTB_INIT]);
1014 if (gopt->version != HTB_VER >> 16) { 1014 if (gopt->version != HTB_VER >> 16)
1015 pr_err("HTB: need tc/htb version %d (minor is %d), you have %d\n",
1016 HTB_VER >> 16, HTB_VER & 0xffff, gopt->version);
1017 return -EINVAL; 1015 return -EINVAL;
1018 }
1019 1016
1020 err = qdisc_class_hash_init(&q->clhash); 1017 err = qdisc_class_hash_init(&q->clhash);
1021 if (err < 0) 1018 if (err < 0)
@@ -1027,10 +1024,13 @@ static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1027 INIT_WORK(&q->work, htb_work_func); 1024 INIT_WORK(&q->work, htb_work_func);
1028 skb_queue_head_init(&q->direct_queue); 1025 skb_queue_head_init(&q->direct_queue);
1029 1026
1030 q->direct_qlen = qdisc_dev(sch)->tx_queue_len; 1027 if (tb[TCA_HTB_DIRECT_QLEN])
1031 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */ 1028 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
1032 q->direct_qlen = 2; 1029 else {
1033 1030 q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1031 if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
1032 q->direct_qlen = 2;
1033 }
1034 if ((q->rate2quantum = gopt->rate2quantum) < 1) 1034 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1035 q->rate2quantum = 1; 1035 q->rate2quantum = 1;
1036 q->defcls = gopt->defcls; 1036 q->defcls = gopt->defcls;
@@ -1056,7 +1056,8 @@ static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1056 nest = nla_nest_start(skb, TCA_OPTIONS); 1056 nest = nla_nest_start(skb, TCA_OPTIONS);
1057 if (nest == NULL) 1057 if (nest == NULL)
1058 goto nla_put_failure; 1058 goto nla_put_failure;
1059 if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt)) 1059 if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
1060 nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
1060 goto nla_put_failure; 1061 goto nla_put_failure;
1061 nla_nest_end(skb, nest); 1062 nla_nest_end(skb, nest);
1062 1063
@@ -1311,7 +1312,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1311 struct htb_sched *q = qdisc_priv(sch); 1312 struct htb_sched *q = qdisc_priv(sch);
1312 struct htb_class *cl = (struct htb_class *)*arg, *parent; 1313 struct htb_class *cl = (struct htb_class *)*arg, *parent;
1313 struct nlattr *opt = tca[TCA_OPTIONS]; 1314 struct nlattr *opt = tca[TCA_OPTIONS];
1314 struct nlattr *tb[__TCA_HTB_MAX]; 1315 struct nlattr *tb[TCA_HTB_MAX + 1];
1315 struct tc_htb_opt *hopt; 1316 struct tc_htb_opt *hopt;
1316 1317
1317 /* extract all subattrs from opt attr */ 1318 /* extract all subattrs from opt attr */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index b9070736b8d9..f631c5ff4dbf 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1119,9 +1119,10 @@ static int __sctp_connect(struct sock* sk,
1119 /* Make sure the destination port is correctly set 1119 /* Make sure the destination port is correctly set
1120 * in all addresses. 1120 * in all addresses.
1121 */ 1121 */
1122 if (asoc && asoc->peer.port && asoc->peer.port != port) 1122 if (asoc && asoc->peer.port && asoc->peer.port != port) {
1123 err = -EINVAL;
1123 goto out_free; 1124 goto out_free;
1124 1125 }
1125 1126
1126 /* Check if there already is a matching association on the 1127 /* Check if there already is a matching association on the
1127 * endpoint (other than the one created here). 1128 * endpoint (other than the one created here).
@@ -6185,7 +6186,8 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
6185 6186
6186 /* Is there any exceptional events? */ 6187 /* Is there any exceptional events? */
6187 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 6188 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
6188 mask |= POLLERR; 6189 mask |= POLLERR |
6190 sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0;
6189 if (sk->sk_shutdown & RCV_SHUTDOWN) 6191 if (sk->sk_shutdown & RCV_SHUTDOWN)
6190 mask |= POLLRDHUP | POLLIN | POLLRDNORM; 6192 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
6191 if (sk->sk_shutdown == SHUTDOWN_MASK) 6193 if (sk->sk_shutdown == SHUTDOWN_MASK)
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 6675914dc592..8bcd4985d0fb 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -44,7 +44,7 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
44 struct nlmsghdr *rep_nlh; 44 struct nlmsghdr *rep_nlh;
45 struct nlmsghdr *req_nlh = info->nlhdr; 45 struct nlmsghdr *req_nlh = info->nlhdr;
46 struct tipc_genlmsghdr *req_userhdr = info->userhdr; 46 struct tipc_genlmsghdr *req_userhdr = info->userhdr;
47 int hdr_space = NLMSG_SPACE(GENL_HDRLEN + TIPC_GENL_HDRLEN); 47 int hdr_space = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN);
48 u16 cmd; 48 u16 cmd;
49 49
50 if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN))) 50 if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN)))
@@ -53,8 +53,8 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
53 cmd = req_userhdr->cmd; 53 cmd = req_userhdr->cmd;
54 54
55 rep_buf = tipc_cfg_do_cmd(req_userhdr->dest, cmd, 55 rep_buf = tipc_cfg_do_cmd(req_userhdr->dest, cmd,
56 NLMSG_DATA(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN, 56 nlmsg_data(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN,
57 NLMSG_PAYLOAD(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN), 57 nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN),
58 hdr_space); 58 hdr_space);
59 59
60 if (rep_buf) { 60 if (rep_buf) {
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 2db702d82e7d..824eaf2c3afa 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2196,7 +2196,9 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2196 2196
2197 /* exceptional events? */ 2197 /* exceptional events? */
2198 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 2198 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
2199 mask |= POLLERR; 2199 mask |= POLLERR |
2200 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
2201
2200 if (sk->sk_shutdown & RCV_SHUTDOWN) 2202 if (sk->sk_shutdown & RCV_SHUTDOWN)
2201 mask |= POLLRDHUP | POLLIN | POLLRDNORM; 2203 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
2202 if (sk->sk_shutdown == SHUTDOWN_MASK) 2204 if (sk->sk_shutdown == SHUTDOWN_MASK)
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 5e04d3d96285..daff75200e25 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -123,6 +123,14 @@ static s32 vmci_transport_error_to_vsock_error(s32 vmci_error)
123 return err > 0 ? -err : err; 123 return err > 0 ? -err : err;
124} 124}
125 125
126static u32 vmci_transport_peer_rid(u32 peer_cid)
127{
128 if (VMADDR_CID_HYPERVISOR == peer_cid)
129 return VMCI_TRANSPORT_HYPERVISOR_PACKET_RID;
130
131 return VMCI_TRANSPORT_PACKET_RID;
132}
133
126static inline void 134static inline void
127vmci_transport_packet_init(struct vmci_transport_packet *pkt, 135vmci_transport_packet_init(struct vmci_transport_packet *pkt,
128 struct sockaddr_vm *src, 136 struct sockaddr_vm *src,
@@ -140,7 +148,7 @@ vmci_transport_packet_init(struct vmci_transport_packet *pkt,
140 pkt->dg.src = vmci_make_handle(VMADDR_CID_ANY, 148 pkt->dg.src = vmci_make_handle(VMADDR_CID_ANY,
141 VMCI_TRANSPORT_PACKET_RID); 149 VMCI_TRANSPORT_PACKET_RID);
142 pkt->dg.dst = vmci_make_handle(dst->svm_cid, 150 pkt->dg.dst = vmci_make_handle(dst->svm_cid,
143 VMCI_TRANSPORT_PACKET_RID); 151 vmci_transport_peer_rid(dst->svm_cid));
144 pkt->dg.payload_size = sizeof(*pkt) - sizeof(pkt->dg); 152 pkt->dg.payload_size = sizeof(*pkt) - sizeof(pkt->dg);
145 pkt->version = VMCI_TRANSPORT_PACKET_VERSION; 153 pkt->version = VMCI_TRANSPORT_PACKET_VERSION;
146 pkt->type = type; 154 pkt->type = type;
@@ -508,6 +516,9 @@ static bool vmci_transport_is_trusted(struct vsock_sock *vsock, u32 peer_cid)
508 516
509static bool vmci_transport_allow_dgram(struct vsock_sock *vsock, u32 peer_cid) 517static bool vmci_transport_allow_dgram(struct vsock_sock *vsock, u32 peer_cid)
510{ 518{
519 if (VMADDR_CID_HYPERVISOR == peer_cid)
520 return true;
521
511 if (vsock->cached_peer != peer_cid) { 522 if (vsock->cached_peer != peer_cid) {
512 vsock->cached_peer = peer_cid; 523 vsock->cached_peer = peer_cid;
513 if (!vmci_transport_is_trusted(vsock, peer_cid) && 524 if (!vmci_transport_is_trusted(vsock, peer_cid) &&
@@ -628,7 +639,6 @@ static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg)
628static bool vmci_transport_stream_allow(u32 cid, u32 port) 639static bool vmci_transport_stream_allow(u32 cid, u32 port)
629{ 640{
630 static const u32 non_socket_contexts[] = { 641 static const u32 non_socket_contexts[] = {
631 VMADDR_CID_HYPERVISOR,
632 VMADDR_CID_RESERVED, 642 VMADDR_CID_RESERVED,
633 }; 643 };
634 int i; 644 int i;
@@ -667,7 +677,7 @@ static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg)
667 */ 677 */
668 678
669 if (!vmci_transport_stream_allow(dg->src.context, -1) 679 if (!vmci_transport_stream_allow(dg->src.context, -1)
670 || VMCI_TRANSPORT_PACKET_RID != dg->src.resource) 680 || vmci_transport_peer_rid(dg->src.context) != dg->src.resource)
671 return VMCI_ERROR_NO_ACCESS; 681 return VMCI_ERROR_NO_ACCESS;
672 682
673 if (VMCI_DG_SIZE(dg) < sizeof(*pkt)) 683 if (VMCI_DG_SIZE(dg) < sizeof(*pkt))
diff --git a/net/vmw_vsock/vmci_transport.h b/net/vmw_vsock/vmci_transport.h
index 1bf991803ec0..fd88ea8924e4 100644
--- a/net/vmw_vsock/vmci_transport.h
+++ b/net/vmw_vsock/vmci_transport.h
@@ -28,6 +28,9 @@
28/* The resource ID on which control packets are sent. */ 28/* The resource ID on which control packets are sent. */
29#define VMCI_TRANSPORT_PACKET_RID 1 29#define VMCI_TRANSPORT_PACKET_RID 1
30 30
31/* The resource ID on which control packets are sent to the hypervisor. */
32#define VMCI_TRANSPORT_HYPERVISOR_PACKET_RID 15
33
31#define VSOCK_PROTO_INVALID 0 34#define VSOCK_PROTO_INVALID 0
32#define VSOCK_PROTO_PKT_ON_NOTIFY (1 << 0) 35#define VSOCK_PROTO_PKT_ON_NOTIFY (1 << 0)
33#define VSOCK_PROTO_ALL_SUPPORTED (VSOCK_PROTO_PKT_ON_NOTIFY) 36#define VSOCK_PROTO_ALL_SUPPORTED (VSOCK_PROTO_PKT_ON_NOTIFY)
diff --git a/net/wireless/ap.c b/net/wireless/ap.c
index a4a14e8f55cc..324e8d851dc4 100644
--- a/net/wireless/ap.c
+++ b/net/wireless/ap.c
@@ -46,65 +46,3 @@ int cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
46 46
47 return err; 47 return err;
48} 48}
49
50void cfg80211_ch_switch_notify(struct net_device *dev,
51 struct cfg80211_chan_def *chandef)
52{
53 struct wireless_dev *wdev = dev->ieee80211_ptr;
54 struct wiphy *wiphy = wdev->wiphy;
55 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
56
57 trace_cfg80211_ch_switch_notify(dev, chandef);
58
59 wdev_lock(wdev);
60
61 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
62 wdev->iftype != NL80211_IFTYPE_P2P_GO))
63 goto out;
64
65 wdev->channel = chandef->chan;
66 nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL);
67out:
68 wdev_unlock(wdev);
69 return;
70}
71EXPORT_SYMBOL(cfg80211_ch_switch_notify);
72
73bool cfg80211_rx_spurious_frame(struct net_device *dev,
74 const u8 *addr, gfp_t gfp)
75{
76 struct wireless_dev *wdev = dev->ieee80211_ptr;
77 bool ret;
78
79 trace_cfg80211_rx_spurious_frame(dev, addr);
80
81 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
82 wdev->iftype != NL80211_IFTYPE_P2P_GO)) {
83 trace_cfg80211_return_bool(false);
84 return false;
85 }
86 ret = nl80211_unexpected_frame(dev, addr, gfp);
87 trace_cfg80211_return_bool(ret);
88 return ret;
89}
90EXPORT_SYMBOL(cfg80211_rx_spurious_frame);
91
92bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev,
93 const u8 *addr, gfp_t gfp)
94{
95 struct wireless_dev *wdev = dev->ieee80211_ptr;
96 bool ret;
97
98 trace_cfg80211_rx_unexpected_4addr_frame(dev, addr);
99
100 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
101 wdev->iftype != NL80211_IFTYPE_P2P_GO &&
102 wdev->iftype != NL80211_IFTYPE_AP_VLAN)) {
103 trace_cfg80211_return_bool(false);
104 return false;
105 }
106 ret = nl80211_unexpected_4addr_frame(dev, addr, gfp);
107 trace_cfg80211_return_bool(ret);
108 return ret;
109}
110EXPORT_SYMBOL(cfg80211_rx_unexpected_4addr_frame);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 6ddf74f0ae1e..00be55530a32 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -842,6 +842,46 @@ void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
842 rdev->num_running_monitor_ifaces += num; 842 rdev->num_running_monitor_ifaces += num;
843} 843}
844 844
845void cfg80211_leave(struct cfg80211_registered_device *rdev,
846 struct wireless_dev *wdev)
847{
848 struct net_device *dev = wdev->netdev;
849
850 switch (wdev->iftype) {
851 case NL80211_IFTYPE_ADHOC:
852 cfg80211_leave_ibss(rdev, dev, true);
853 break;
854 case NL80211_IFTYPE_P2P_CLIENT:
855 case NL80211_IFTYPE_STATION:
856 mutex_lock(&rdev->sched_scan_mtx);
857 __cfg80211_stop_sched_scan(rdev, false);
858 mutex_unlock(&rdev->sched_scan_mtx);
859
860 wdev_lock(wdev);
861#ifdef CONFIG_CFG80211_WEXT
862 kfree(wdev->wext.ie);
863 wdev->wext.ie = NULL;
864 wdev->wext.ie_len = 0;
865 wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
866#endif
867 __cfg80211_disconnect(rdev, dev,
868 WLAN_REASON_DEAUTH_LEAVING, true);
869 cfg80211_mlme_down(rdev, dev);
870 wdev_unlock(wdev);
871 break;
872 case NL80211_IFTYPE_MESH_POINT:
873 cfg80211_leave_mesh(rdev, dev);
874 break;
875 case NL80211_IFTYPE_AP:
876 cfg80211_stop_ap(rdev, dev);
877 break;
878 default:
879 break;
880 }
881
882 wdev->beacon_interval = 0;
883}
884
845static int cfg80211_netdev_notifier_call(struct notifier_block *nb, 885static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
846 unsigned long state, 886 unsigned long state,
847 void *ndev) 887 void *ndev)
@@ -910,38 +950,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
910 dev->priv_flags |= IFF_DONT_BRIDGE; 950 dev->priv_flags |= IFF_DONT_BRIDGE;
911 break; 951 break;
912 case NETDEV_GOING_DOWN: 952 case NETDEV_GOING_DOWN:
913 switch (wdev->iftype) { 953 cfg80211_leave(rdev, wdev);
914 case NL80211_IFTYPE_ADHOC:
915 cfg80211_leave_ibss(rdev, dev, true);
916 break;
917 case NL80211_IFTYPE_P2P_CLIENT:
918 case NL80211_IFTYPE_STATION:
919 mutex_lock(&rdev->sched_scan_mtx);
920 __cfg80211_stop_sched_scan(rdev, false);
921 mutex_unlock(&rdev->sched_scan_mtx);
922
923 wdev_lock(wdev);
924#ifdef CONFIG_CFG80211_WEXT
925 kfree(wdev->wext.ie);
926 wdev->wext.ie = NULL;
927 wdev->wext.ie_len = 0;
928 wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC;
929#endif
930 __cfg80211_disconnect(rdev, dev,
931 WLAN_REASON_DEAUTH_LEAVING, true);
932 cfg80211_mlme_down(rdev, dev);
933 wdev_unlock(wdev);
934 break;
935 case NL80211_IFTYPE_MESH_POINT:
936 cfg80211_leave_mesh(rdev, dev);
937 break;
938 case NL80211_IFTYPE_AP:
939 cfg80211_stop_ap(rdev, dev);
940 break;
941 default:
942 break;
943 }
944 wdev->beacon_interval = 0;
945 break; 954 break;
946 case NETDEV_DOWN: 955 case NETDEV_DOWN:
947 cfg80211_update_iface_num(rdev, wdev->iftype, -1); 956 cfg80211_update_iface_num(rdev, wdev->iftype, -1);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 5845c2b37aa8..b5174f65cc9a 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -330,20 +330,15 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
330int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, 330int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
331 struct net_device *dev, 331 struct net_device *dev,
332 struct ieee80211_channel *chan, 332 struct ieee80211_channel *chan,
333 const u8 *bssid, const u8 *prev_bssid, 333 const u8 *bssid,
334 const u8 *ssid, int ssid_len, 334 const u8 *ssid, int ssid_len,
335 const u8 *ie, int ie_len, bool use_mfp, 335 struct cfg80211_assoc_request *req);
336 struct cfg80211_crypto_settings *crypt,
337 u32 assoc_flags, struct ieee80211_ht_cap *ht_capa,
338 struct ieee80211_ht_cap *ht_capa_mask);
339int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, 336int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
340 struct net_device *dev, struct ieee80211_channel *chan, 337 struct net_device *dev,
341 const u8 *bssid, const u8 *prev_bssid, 338 struct ieee80211_channel *chan,
339 const u8 *bssid,
342 const u8 *ssid, int ssid_len, 340 const u8 *ssid, int ssid_len,
343 const u8 *ie, int ie_len, bool use_mfp, 341 struct cfg80211_assoc_request *req);
344 struct cfg80211_crypto_settings *crypt,
345 u32 assoc_flags, struct ieee80211_ht_cap *ht_capa,
346 struct ieee80211_ht_cap *ht_capa_mask);
347int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, 342int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
348 struct net_device *dev, const u8 *bssid, 343 struct net_device *dev, const u8 *bssid,
349 const u8 *ie, int ie_len, u16 reason, 344 const u8 *ie, int ie_len, u16 reason,
@@ -375,6 +370,8 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
375 bool no_cck, bool dont_wait_for_ack, u64 *cookie); 370 bool no_cck, bool dont_wait_for_ack, u64 *cookie);
376void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa, 371void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa,
377 const struct ieee80211_ht_cap *ht_capa_mask); 372 const struct ieee80211_ht_cap *ht_capa_mask);
373void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa,
374 const struct ieee80211_vht_cap *vht_capa_mask);
378 375
379/* SME */ 376/* SME */
380int __cfg80211_connect(struct cfg80211_registered_device *rdev, 377int __cfg80211_connect(struct cfg80211_registered_device *rdev,
@@ -503,9 +500,14 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
503void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev, 500void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
504 enum nl80211_iftype iftype, int num); 501 enum nl80211_iftype iftype, int num);
505 502
503
504void cfg80211_leave(struct cfg80211_registered_device *rdev,
505 struct wireless_dev *wdev);
506
506void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, 507void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
507 struct wireless_dev *wdev); 508 struct wireless_dev *wdev);
508 509
510
509#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10 511#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
510 512
511#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS 513#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 55957a284f6c..0bb93f3061a4 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -85,6 +85,7 @@ const struct mesh_setup default_mesh_setup = {
85 .ie = NULL, 85 .ie = NULL,
86 .ie_len = 0, 86 .ie_len = 0,
87 .is_secure = false, 87 .is_secure = false,
88 .user_mpm = false,
88 .beacon_interval = MESH_DEFAULT_BEACON_INTERVAL, 89 .beacon_interval = MESH_DEFAULT_BEACON_INTERVAL,
89 .dtim_period = MESH_DEFAULT_DTIM_PERIOD, 90 .dtim_period = MESH_DEFAULT_DTIM_PERIOD,
90}; 91};
@@ -233,20 +234,6 @@ int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
233 return 0; 234 return 0;
234} 235}
235 236
236void cfg80211_notify_new_peer_candidate(struct net_device *dev,
237 const u8 *macaddr, const u8* ie, u8 ie_len, gfp_t gfp)
238{
239 struct wireless_dev *wdev = dev->ieee80211_ptr;
240
241 trace_cfg80211_notify_new_peer_candidate(dev, macaddr);
242 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_MESH_POINT))
243 return;
244
245 nl80211_send_new_peer_candidate(wiphy_to_dev(wdev->wiphy), dev,
246 macaddr, ie, ie_len, gfp);
247}
248EXPORT_SYMBOL(cfg80211_notify_new_peer_candidate);
249
250static int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev, 237static int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
251 struct net_device *dev) 238 struct net_device *dev)
252{ 239{
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index caddca35d686..390198bf4b36 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -187,30 +187,6 @@ void cfg80211_send_disassoc(struct net_device *dev, const u8 *buf, size_t len)
187} 187}
188EXPORT_SYMBOL(cfg80211_send_disassoc); 188EXPORT_SYMBOL(cfg80211_send_disassoc);
189 189
190void cfg80211_send_unprot_deauth(struct net_device *dev, const u8 *buf,
191 size_t len)
192{
193 struct wireless_dev *wdev = dev->ieee80211_ptr;
194 struct wiphy *wiphy = wdev->wiphy;
195 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
196
197 trace_cfg80211_send_unprot_deauth(dev);
198 nl80211_send_unprot_deauth(rdev, dev, buf, len, GFP_ATOMIC);
199}
200EXPORT_SYMBOL(cfg80211_send_unprot_deauth);
201
202void cfg80211_send_unprot_disassoc(struct net_device *dev, const u8 *buf,
203 size_t len)
204{
205 struct wireless_dev *wdev = dev->ieee80211_ptr;
206 struct wiphy *wiphy = wdev->wiphy;
207 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
208
209 trace_cfg80211_send_unprot_disassoc(dev);
210 nl80211_send_unprot_disassoc(rdev, dev, buf, len, GFP_ATOMIC);
211}
212EXPORT_SYMBOL(cfg80211_send_unprot_disassoc);
213
214void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr) 190void cfg80211_send_auth_timeout(struct net_device *dev, const u8 *addr)
215{ 191{
216 struct wireless_dev *wdev = dev->ieee80211_ptr; 192 struct wireless_dev *wdev = dev->ieee80211_ptr;
@@ -367,27 +343,38 @@ void cfg80211_oper_and_ht_capa(struct ieee80211_ht_cap *ht_capa,
367 p1[i] &= p2[i]; 343 p1[i] &= p2[i];
368} 344}
369 345
346/* Do a logical ht_capa &= ht_capa_mask. */
347void cfg80211_oper_and_vht_capa(struct ieee80211_vht_cap *vht_capa,
348 const struct ieee80211_vht_cap *vht_capa_mask)
349{
350 int i;
351 u8 *p1, *p2;
352 if (!vht_capa_mask) {
353 memset(vht_capa, 0, sizeof(*vht_capa));
354 return;
355 }
356
357 p1 = (u8*)(vht_capa);
358 p2 = (u8*)(vht_capa_mask);
359 for (i = 0; i < sizeof(*vht_capa); i++)
360 p1[i] &= p2[i];
361}
362
370int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, 363int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
371 struct net_device *dev, 364 struct net_device *dev,
372 struct ieee80211_channel *chan, 365 struct ieee80211_channel *chan,
373 const u8 *bssid, const u8 *prev_bssid, 366 const u8 *bssid,
374 const u8 *ssid, int ssid_len, 367 const u8 *ssid, int ssid_len,
375 const u8 *ie, int ie_len, bool use_mfp, 368 struct cfg80211_assoc_request *req)
376 struct cfg80211_crypto_settings *crypt,
377 u32 assoc_flags, struct ieee80211_ht_cap *ht_capa,
378 struct ieee80211_ht_cap *ht_capa_mask)
379{ 369{
380 struct wireless_dev *wdev = dev->ieee80211_ptr; 370 struct wireless_dev *wdev = dev->ieee80211_ptr;
381 struct cfg80211_assoc_request req;
382 int err; 371 int err;
383 bool was_connected = false; 372 bool was_connected = false;
384 373
385 ASSERT_WDEV_LOCK(wdev); 374 ASSERT_WDEV_LOCK(wdev);
386 375
387 memset(&req, 0, sizeof(req)); 376 if (wdev->current_bss && req->prev_bssid &&
388 377 ether_addr_equal(wdev->current_bss->pub.bssid, req->prev_bssid)) {
389 if (wdev->current_bss && prev_bssid &&
390 ether_addr_equal(wdev->current_bss->pub.bssid, prev_bssid)) {
391 /* 378 /*
392 * Trying to reassociate: Allow this to proceed and let the old 379 * Trying to reassociate: Allow this to proceed and let the old
393 * association to be dropped when the new one is completed. 380 * association to be dropped when the new one is completed.
@@ -399,40 +386,30 @@ int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
399 } else if (wdev->current_bss) 386 } else if (wdev->current_bss)
400 return -EALREADY; 387 return -EALREADY;
401 388
402 req.ie = ie; 389 cfg80211_oper_and_ht_capa(&req->ht_capa_mask,
403 req.ie_len = ie_len;
404 memcpy(&req.crypto, crypt, sizeof(req.crypto));
405 req.use_mfp = use_mfp;
406 req.prev_bssid = prev_bssid;
407 req.flags = assoc_flags;
408 if (ht_capa)
409 memcpy(&req.ht_capa, ht_capa, sizeof(req.ht_capa));
410 if (ht_capa_mask)
411 memcpy(&req.ht_capa_mask, ht_capa_mask,
412 sizeof(req.ht_capa_mask));
413 cfg80211_oper_and_ht_capa(&req.ht_capa_mask,
414 rdev->wiphy.ht_capa_mod_mask); 390 rdev->wiphy.ht_capa_mod_mask);
391 cfg80211_oper_and_vht_capa(&req->vht_capa_mask,
392 rdev->wiphy.vht_capa_mod_mask);
415 393
416 req.bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len, 394 req->bss = cfg80211_get_bss(&rdev->wiphy, chan, bssid, ssid, ssid_len,
417 WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); 395 WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS);
418 if (!req.bss) { 396 if (!req->bss) {
419 if (was_connected) 397 if (was_connected)
420 wdev->sme_state = CFG80211_SME_CONNECTED; 398 wdev->sme_state = CFG80211_SME_CONNECTED;
421 return -ENOENT; 399 return -ENOENT;
422 } 400 }
423 401
424 err = cfg80211_can_use_chan(rdev, wdev, req.bss->channel, 402 err = cfg80211_can_use_chan(rdev, wdev, chan, CHAN_MODE_SHARED);
425 CHAN_MODE_SHARED);
426 if (err) 403 if (err)
427 goto out; 404 goto out;
428 405
429 err = rdev_assoc(rdev, dev, &req); 406 err = rdev_assoc(rdev, dev, req);
430 407
431out: 408out:
432 if (err) { 409 if (err) {
433 if (was_connected) 410 if (was_connected)
434 wdev->sme_state = CFG80211_SME_CONNECTED; 411 wdev->sme_state = CFG80211_SME_CONNECTED;
435 cfg80211_put_bss(&rdev->wiphy, req.bss); 412 cfg80211_put_bss(&rdev->wiphy, req->bss);
436 } 413 }
437 414
438 return err; 415 return err;
@@ -441,21 +418,17 @@ out:
441int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, 418int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
442 struct net_device *dev, 419 struct net_device *dev,
443 struct ieee80211_channel *chan, 420 struct ieee80211_channel *chan,
444 const u8 *bssid, const u8 *prev_bssid, 421 const u8 *bssid,
445 const u8 *ssid, int ssid_len, 422 const u8 *ssid, int ssid_len,
446 const u8 *ie, int ie_len, bool use_mfp, 423 struct cfg80211_assoc_request *req)
447 struct cfg80211_crypto_settings *crypt,
448 u32 assoc_flags, struct ieee80211_ht_cap *ht_capa,
449 struct ieee80211_ht_cap *ht_capa_mask)
450{ 424{
451 struct wireless_dev *wdev = dev->ieee80211_ptr; 425 struct wireless_dev *wdev = dev->ieee80211_ptr;
452 int err; 426 int err;
453 427
454 mutex_lock(&rdev->devlist_mtx); 428 mutex_lock(&rdev->devlist_mtx);
455 wdev_lock(wdev); 429 wdev_lock(wdev);
456 err = __cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid, 430 err = __cfg80211_mlme_assoc(rdev, dev, chan, bssid,
457 ssid, ssid_len, ie, ie_len, use_mfp, crypt, 431 ssid, ssid_len, req);
458 assoc_flags, ht_capa, ht_capa_mask);
459 wdev_unlock(wdev); 432 wdev_unlock(wdev);
460 mutex_unlock(&rdev->devlist_mtx); 433 mutex_unlock(&rdev->devlist_mtx);
461 434
@@ -577,62 +550,6 @@ void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
577 } 550 }
578} 551}
579 552
580void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie,
581 struct ieee80211_channel *chan,
582 unsigned int duration, gfp_t gfp)
583{
584 struct wiphy *wiphy = wdev->wiphy;
585 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
586
587 trace_cfg80211_ready_on_channel(wdev, cookie, chan, duration);
588 nl80211_send_remain_on_channel(rdev, wdev, cookie, chan, duration, gfp);
589}
590EXPORT_SYMBOL(cfg80211_ready_on_channel);
591
592void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie,
593 struct ieee80211_channel *chan,
594 gfp_t gfp)
595{
596 struct wiphy *wiphy = wdev->wiphy;
597 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
598
599 trace_cfg80211_ready_on_channel_expired(wdev, cookie, chan);
600 nl80211_send_remain_on_channel_cancel(rdev, wdev, cookie, chan, gfp);
601}
602EXPORT_SYMBOL(cfg80211_remain_on_channel_expired);
603
604void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
605 struct station_info *sinfo, gfp_t gfp)
606{
607 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
608 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
609
610 trace_cfg80211_new_sta(dev, mac_addr, sinfo);
611 nl80211_send_sta_event(rdev, dev, mac_addr, sinfo, gfp);
612}
613EXPORT_SYMBOL(cfg80211_new_sta);
614
615void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp)
616{
617 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
618 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
619
620 trace_cfg80211_del_sta(dev, mac_addr);
621 nl80211_send_sta_del_event(rdev, dev, mac_addr, gfp);
622}
623EXPORT_SYMBOL(cfg80211_del_sta);
624
625void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
626 enum nl80211_connect_failed_reason reason,
627 gfp_t gfp)
628{
629 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
630 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
631
632 nl80211_send_conn_failed_event(rdev, dev, mac_addr, reason, gfp);
633}
634EXPORT_SYMBOL(cfg80211_conn_failed);
635
636struct cfg80211_mgmt_registration { 553struct cfg80211_mgmt_registration {
637 struct list_head list; 554 struct list_head list;
638 555
@@ -909,85 +826,6 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
909} 826}
910EXPORT_SYMBOL(cfg80211_rx_mgmt); 827EXPORT_SYMBOL(cfg80211_rx_mgmt);
911 828
912void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
913 const u8 *buf, size_t len, bool ack, gfp_t gfp)
914{
915 struct wiphy *wiphy = wdev->wiphy;
916 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
917
918 trace_cfg80211_mgmt_tx_status(wdev, cookie, ack);
919
920 /* Indicate TX status of the Action frame to user space */
921 nl80211_send_mgmt_tx_status(rdev, wdev, cookie, buf, len, ack, gfp);
922}
923EXPORT_SYMBOL(cfg80211_mgmt_tx_status);
924
925void cfg80211_cqm_rssi_notify(struct net_device *dev,
926 enum nl80211_cqm_rssi_threshold_event rssi_event,
927 gfp_t gfp)
928{
929 struct wireless_dev *wdev = dev->ieee80211_ptr;
930 struct wiphy *wiphy = wdev->wiphy;
931 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
932
933 trace_cfg80211_cqm_rssi_notify(dev, rssi_event);
934
935 /* Indicate roaming trigger event to user space */
936 nl80211_send_cqm_rssi_notify(rdev, dev, rssi_event, gfp);
937}
938EXPORT_SYMBOL(cfg80211_cqm_rssi_notify);
939
940void cfg80211_cqm_pktloss_notify(struct net_device *dev,
941 const u8 *peer, u32 num_packets, gfp_t gfp)
942{
943 struct wireless_dev *wdev = dev->ieee80211_ptr;
944 struct wiphy *wiphy = wdev->wiphy;
945 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
946
947 trace_cfg80211_cqm_pktloss_notify(dev, peer, num_packets);
948
949 /* Indicate roaming trigger event to user space */
950 nl80211_send_cqm_pktloss_notify(rdev, dev, peer, num_packets, gfp);
951}
952EXPORT_SYMBOL(cfg80211_cqm_pktloss_notify);
953
954void cfg80211_cqm_txe_notify(struct net_device *dev,
955 const u8 *peer, u32 num_packets,
956 u32 rate, u32 intvl, gfp_t gfp)
957{
958 struct wireless_dev *wdev = dev->ieee80211_ptr;
959 struct wiphy *wiphy = wdev->wiphy;
960 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
961
962 nl80211_send_cqm_txe_notify(rdev, dev, peer, num_packets,
963 rate, intvl, gfp);
964}
965EXPORT_SYMBOL(cfg80211_cqm_txe_notify);
966
967void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid,
968 const u8 *replay_ctr, gfp_t gfp)
969{
970 struct wireless_dev *wdev = dev->ieee80211_ptr;
971 struct wiphy *wiphy = wdev->wiphy;
972 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
973
974 trace_cfg80211_gtk_rekey_notify(dev, bssid);
975 nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp);
976}
977EXPORT_SYMBOL(cfg80211_gtk_rekey_notify);
978
979void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
980 const u8 *bssid, bool preauth, gfp_t gfp)
981{
982 struct wireless_dev *wdev = dev->ieee80211_ptr;
983 struct wiphy *wiphy = wdev->wiphy;
984 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
985
986 trace_cfg80211_pmksa_candidate_notify(dev, index, bssid, preauth);
987 nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp);
988}
989EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify);
990
991void cfg80211_dfs_channels_update_work(struct work_struct *work) 829void cfg80211_dfs_channels_update_work(struct work_struct *work)
992{ 830{
993 struct delayed_work *delayed_work; 831 struct delayed_work *delayed_work;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 58e13a8c95f9..671b69a3c136 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -370,6 +370,14 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
370 [NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED }, 370 [NL80211_ATTR_MAC_ADDRS] = { .type = NLA_NESTED },
371 [NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 }, 371 [NL80211_ATTR_STA_CAPABILITY] = { .type = NLA_U16 },
372 [NL80211_ATTR_STA_EXT_CAPABILITY] = { .type = NLA_BINARY, }, 372 [NL80211_ATTR_STA_EXT_CAPABILITY] = { .type = NLA_BINARY, },
373 [NL80211_ATTR_SPLIT_WIPHY_DUMP] = { .type = NLA_FLAG, },
374 [NL80211_ATTR_DISABLE_VHT] = { .type = NLA_FLAG },
375 [NL80211_ATTR_VHT_CAPABILITY_MASK] = {
376 .len = NL80211_VHT_CAPABILITY_LEN,
377 },
378 [NL80211_ATTR_MDID] = { .type = NLA_U16 },
379 [NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY,
380 .len = IEEE80211_MAX_DATA_LEN },
373}; 381};
374 382
375/* policy for the key attributes */ 383/* policy for the key attributes */
@@ -539,7 +547,8 @@ static inline void *nl80211hdr_put(struct sk_buff *skb, u32 portid, u32 seq,
539} 547}
540 548
541static int nl80211_msg_put_channel(struct sk_buff *msg, 549static int nl80211_msg_put_channel(struct sk_buff *msg,
542 struct ieee80211_channel *chan) 550 struct ieee80211_channel *chan,
551 bool large)
543{ 552{
544 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_FREQ, 553 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_FREQ,
545 chan->center_freq)) 554 chan->center_freq))
@@ -554,9 +563,37 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,
554 if ((chan->flags & IEEE80211_CHAN_NO_IBSS) && 563 if ((chan->flags & IEEE80211_CHAN_NO_IBSS) &&
555 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS)) 564 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_IBSS))
556 goto nla_put_failure; 565 goto nla_put_failure;
557 if ((chan->flags & IEEE80211_CHAN_RADAR) && 566 if (chan->flags & IEEE80211_CHAN_RADAR) {
558 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR)) 567 if (nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))
559 goto nla_put_failure; 568 goto nla_put_failure;
569 if (large) {
570 u32 time;
571
572 time = elapsed_jiffies_msecs(chan->dfs_state_entered);
573
574 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_STATE,
575 chan->dfs_state))
576 goto nla_put_failure;
577 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_DFS_TIME,
578 time))
579 goto nla_put_failure;
580 }
581 }
582
583 if (large) {
584 if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) &&
585 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS))
586 goto nla_put_failure;
587 if ((chan->flags & IEEE80211_CHAN_NO_HT40PLUS) &&
588 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_PLUS))
589 goto nla_put_failure;
590 if ((chan->flags & IEEE80211_CHAN_NO_80MHZ) &&
591 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_80MHZ))
592 goto nla_put_failure;
593 if ((chan->flags & IEEE80211_CHAN_NO_160MHZ) &&
594 nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ))
595 goto nla_put_failure;
596 }
560 597
561 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, 598 if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,
562 DBM_TO_MBM(chan->max_power))) 599 DBM_TO_MBM(chan->max_power)))
@@ -832,7 +869,8 @@ nla_put_failure:
832} 869}
833 870
834static int nl80211_put_iface_combinations(struct wiphy *wiphy, 871static int nl80211_put_iface_combinations(struct wiphy *wiphy,
835 struct sk_buff *msg) 872 struct sk_buff *msg,
873 bool large)
836{ 874{
837 struct nlattr *nl_combis; 875 struct nlattr *nl_combis;
838 int i, j; 876 int i, j;
@@ -881,6 +919,10 @@ static int nl80211_put_iface_combinations(struct wiphy *wiphy,
881 nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM, 919 nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM,
882 c->max_interfaces)) 920 c->max_interfaces))
883 goto nla_put_failure; 921 goto nla_put_failure;
922 if (large &&
923 nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS,
924 c->radar_detect_widths))
925 goto nla_put_failure;
884 926
885 nla_nest_end(msg, nl_combi); 927 nla_nest_end(msg, nl_combi);
886 } 928 }
@@ -892,412 +934,611 @@ nla_put_failure:
892 return -ENOBUFS; 934 return -ENOBUFS;
893} 935}
894 936
895static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flags, 937#ifdef CONFIG_PM
896 struct cfg80211_registered_device *dev) 938static int nl80211_send_wowlan_tcp_caps(struct cfg80211_registered_device *rdev,
939 struct sk_buff *msg)
897{ 940{
898 void *hdr; 941 const struct wiphy_wowlan_tcp_support *tcp = rdev->wiphy.wowlan.tcp;
899 struct nlattr *nl_bands, *nl_band; 942 struct nlattr *nl_tcp;
900 struct nlattr *nl_freqs, *nl_freq;
901 struct nlattr *nl_rates, *nl_rate;
902 struct nlattr *nl_cmds;
903 enum ieee80211_band band;
904 struct ieee80211_channel *chan;
905 struct ieee80211_rate *rate;
906 int i;
907 const struct ieee80211_txrx_stypes *mgmt_stypes =
908 dev->wiphy.mgmt_stypes;
909 943
910 hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_WIPHY); 944 if (!tcp)
911 if (!hdr) 945 return 0;
912 return -1;
913 946
914 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx) || 947 nl_tcp = nla_nest_start(msg, NL80211_WOWLAN_TRIG_TCP_CONNECTION);
915 nla_put_string(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)) || 948 if (!nl_tcp)
916 nla_put_u32(msg, NL80211_ATTR_GENERATION, 949 return -ENOBUFS;
917 cfg80211_rdev_list_generation) ||
918 nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT,
919 dev->wiphy.retry_short) ||
920 nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_LONG,
921 dev->wiphy.retry_long) ||
922 nla_put_u32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD,
923 dev->wiphy.frag_threshold) ||
924 nla_put_u32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
925 dev->wiphy.rts_threshold) ||
926 nla_put_u8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
927 dev->wiphy.coverage_class) ||
928 nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
929 dev->wiphy.max_scan_ssids) ||
930 nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS,
931 dev->wiphy.max_sched_scan_ssids) ||
932 nla_put_u16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN,
933 dev->wiphy.max_scan_ie_len) ||
934 nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
935 dev->wiphy.max_sched_scan_ie_len) ||
936 nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS,
937 dev->wiphy.max_match_sets))
938 goto nla_put_failure;
939 950
940 if ((dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) && 951 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD,
941 nla_put_flag(msg, NL80211_ATTR_SUPPORT_IBSS_RSN)) 952 tcp->data_payload_max))
942 goto nla_put_failure; 953 return -ENOBUFS;
943 if ((dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) &&
944 nla_put_flag(msg, NL80211_ATTR_SUPPORT_MESH_AUTH))
945 goto nla_put_failure;
946 if ((dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
947 nla_put_flag(msg, NL80211_ATTR_SUPPORT_AP_UAPSD))
948 goto nla_put_failure;
949 if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) &&
950 nla_put_flag(msg, NL80211_ATTR_ROAM_SUPPORT))
951 goto nla_put_failure;
952 if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
953 nla_put_flag(msg, NL80211_ATTR_TDLS_SUPPORT))
954 goto nla_put_failure;
955 if ((dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) &&
956 nla_put_flag(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP))
957 goto nla_put_failure;
958 954
959 if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES, 955 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD,
960 sizeof(u32) * dev->wiphy.n_cipher_suites, 956 tcp->data_payload_max))
961 dev->wiphy.cipher_suites)) 957 return -ENOBUFS;
962 goto nla_put_failure;
963 958
964 if (nla_put_u8(msg, NL80211_ATTR_MAX_NUM_PMKIDS, 959 if (tcp->seq && nla_put_flag(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_SEQ))
965 dev->wiphy.max_num_pmkids)) 960 return -ENOBUFS;
966 goto nla_put_failure;
967 961
968 if ((dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) && 962 if (tcp->tok && nla_put(msg, NL80211_WOWLAN_TCP_DATA_PAYLOAD_TOKEN,
969 nla_put_flag(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE)) 963 sizeof(*tcp->tok), tcp->tok))
970 goto nla_put_failure; 964 return -ENOBUFS;
971 965
972 if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX, 966 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_DATA_INTERVAL,
973 dev->wiphy.available_antennas_tx) || 967 tcp->data_interval_max))
974 nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX, 968 return -ENOBUFS;
975 dev->wiphy.available_antennas_rx))
976 goto nla_put_failure;
977 969
978 if ((dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) && 970 if (nla_put_u32(msg, NL80211_WOWLAN_TCP_WAKE_PAYLOAD,
979 nla_put_u32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD, 971 tcp->wake_payload_max))
980 dev->wiphy.probe_resp_offload)) 972 return -ENOBUFS;
981 goto nla_put_failure;
982 973
983 if ((dev->wiphy.available_antennas_tx || 974 nla_nest_end(msg, nl_tcp);
984 dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) { 975 return 0;
985 u32 tx_ant = 0, rx_ant = 0; 976}
986 int res; 977
987 res = rdev_get_antenna(dev, &tx_ant, &rx_ant); 978static int nl80211_send_wowlan(struct sk_buff *msg,
988 if (!res) { 979 struct cfg80211_registered_device *dev,
989 if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX, 980 bool large)
990 tx_ant) || 981{
991 nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_RX, 982 struct nlattr *nl_wowlan;
992 rx_ant)) 983
993 goto nla_put_failure; 984 if (!dev->wiphy.wowlan.flags && !dev->wiphy.wowlan.n_patterns)
994 } 985 return 0;
986
987 nl_wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED);
988 if (!nl_wowlan)
989 return -ENOBUFS;
990
991 if (((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY) &&
992 nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) ||
993 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT) &&
994 nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) ||
995 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT) &&
996 nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) ||
997 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) &&
998 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) ||
999 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
1000 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) ||
1001 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) &&
1002 nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) ||
1003 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) &&
1004 nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) ||
1005 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_RFKILL_RELEASE) &&
1006 nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)))
1007 return -ENOBUFS;
1008
1009 if (dev->wiphy.wowlan.n_patterns) {
1010 struct nl80211_wowlan_pattern_support pat = {
1011 .max_patterns = dev->wiphy.wowlan.n_patterns,
1012 .min_pattern_len = dev->wiphy.wowlan.pattern_min_len,
1013 .max_pattern_len = dev->wiphy.wowlan.pattern_max_len,
1014 .max_pkt_offset = dev->wiphy.wowlan.max_pkt_offset,
1015 };
1016
1017 if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
1018 sizeof(pat), &pat))
1019 return -ENOBUFS;
995 } 1020 }
996 1021
997 if (nl80211_put_iftypes(msg, NL80211_ATTR_SUPPORTED_IFTYPES, 1022 if (large && nl80211_send_wowlan_tcp_caps(dev, msg))
998 dev->wiphy.interface_modes)) 1023 return -ENOBUFS;
999 goto nla_put_failure;
1000 1024
1001 nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS); 1025 nla_nest_end(msg, nl_wowlan);
1002 if (!nl_bands)
1003 goto nla_put_failure;
1004 1026
1005 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 1027 return 0;
1006 if (!dev->wiphy.bands[band]) 1028}
1007 continue; 1029#endif
1008 1030
1009 nl_band = nla_nest_start(msg, band); 1031static int nl80211_send_band_rateinfo(struct sk_buff *msg,
1010 if (!nl_band) 1032 struct ieee80211_supported_band *sband)
1011 goto nla_put_failure; 1033{
1034 struct nlattr *nl_rates, *nl_rate;
1035 struct ieee80211_rate *rate;
1036 int i;
1012 1037
1013 /* add HT info */ 1038 /* add HT info */
1014 if (dev->wiphy.bands[band]->ht_cap.ht_supported && 1039 if (sband->ht_cap.ht_supported &&
1015 (nla_put(msg, NL80211_BAND_ATTR_HT_MCS_SET, 1040 (nla_put(msg, NL80211_BAND_ATTR_HT_MCS_SET,
1016 sizeof(dev->wiphy.bands[band]->ht_cap.mcs), 1041 sizeof(sband->ht_cap.mcs),
1017 &dev->wiphy.bands[band]->ht_cap.mcs) || 1042 &sband->ht_cap.mcs) ||
1018 nla_put_u16(msg, NL80211_BAND_ATTR_HT_CAPA, 1043 nla_put_u16(msg, NL80211_BAND_ATTR_HT_CAPA,
1019 dev->wiphy.bands[band]->ht_cap.cap) || 1044 sband->ht_cap.cap) ||
1020 nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR, 1045 nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR,
1021 dev->wiphy.bands[band]->ht_cap.ampdu_factor) || 1046 sband->ht_cap.ampdu_factor) ||
1022 nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY, 1047 nla_put_u8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY,
1023 dev->wiphy.bands[band]->ht_cap.ampdu_density))) 1048 sband->ht_cap.ampdu_density)))
1024 goto nla_put_failure; 1049 return -ENOBUFS;
1025 1050
1026 /* add VHT info */ 1051 /* add VHT info */
1027 if (dev->wiphy.bands[band]->vht_cap.vht_supported && 1052 if (sband->vht_cap.vht_supported &&
1028 (nla_put(msg, NL80211_BAND_ATTR_VHT_MCS_SET, 1053 (nla_put(msg, NL80211_BAND_ATTR_VHT_MCS_SET,
1029 sizeof(dev->wiphy.bands[band]->vht_cap.vht_mcs), 1054 sizeof(sband->vht_cap.vht_mcs),
1030 &dev->wiphy.bands[band]->vht_cap.vht_mcs) || 1055 &sband->vht_cap.vht_mcs) ||
1031 nla_put_u32(msg, NL80211_BAND_ATTR_VHT_CAPA, 1056 nla_put_u32(msg, NL80211_BAND_ATTR_VHT_CAPA,
1032 dev->wiphy.bands[band]->vht_cap.cap))) 1057 sband->vht_cap.cap)))
1033 goto nla_put_failure; 1058 return -ENOBUFS;
1034 1059
1035 /* add frequencies */ 1060 /* add bitrates */
1036 nl_freqs = nla_nest_start(msg, NL80211_BAND_ATTR_FREQS); 1061 nl_rates = nla_nest_start(msg, NL80211_BAND_ATTR_RATES);
1037 if (!nl_freqs) 1062 if (!nl_rates)
1038 goto nla_put_failure; 1063 return -ENOBUFS;
1039 1064
1040 for (i = 0; i < dev->wiphy.bands[band]->n_channels; i++) { 1065 for (i = 0; i < sband->n_bitrates; i++) {
1041 nl_freq = nla_nest_start(msg, i); 1066 nl_rate = nla_nest_start(msg, i);
1042 if (!nl_freq) 1067 if (!nl_rate)
1043 goto nla_put_failure; 1068 return -ENOBUFS;
1044 1069
1045 chan = &dev->wiphy.bands[band]->channels[i]; 1070 rate = &sband->bitrates[i];
1071 if (nla_put_u32(msg, NL80211_BITRATE_ATTR_RATE,
1072 rate->bitrate))
1073 return -ENOBUFS;
1074 if ((rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) &&
1075 nla_put_flag(msg,
1076 NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE))
1077 return -ENOBUFS;
1046 1078
1047 if (nl80211_msg_put_channel(msg, chan)) 1079 nla_nest_end(msg, nl_rate);
1048 goto nla_put_failure; 1080 }
1049 1081
1050 nla_nest_end(msg, nl_freq); 1082 nla_nest_end(msg, nl_rates);
1051 }
1052 1083
1053 nla_nest_end(msg, nl_freqs); 1084 return 0;
1085}
1054 1086
1055 /* add bitrates */ 1087static int
1056 nl_rates = nla_nest_start(msg, NL80211_BAND_ATTR_RATES); 1088nl80211_send_mgmt_stypes(struct sk_buff *msg,
1057 if (!nl_rates) 1089 const struct ieee80211_txrx_stypes *mgmt_stypes)
1058 goto nla_put_failure; 1090{
1091 u16 stypes;
1092 struct nlattr *nl_ftypes, *nl_ifs;
1093 enum nl80211_iftype ift;
1094 int i;
1059 1095
1060 for (i = 0; i < dev->wiphy.bands[band]->n_bitrates; i++) { 1096 if (!mgmt_stypes)
1061 nl_rate = nla_nest_start(msg, i); 1097 return 0;
1062 if (!nl_rate)
1063 goto nla_put_failure;
1064 1098
1065 rate = &dev->wiphy.bands[band]->bitrates[i]; 1099 nl_ifs = nla_nest_start(msg, NL80211_ATTR_TX_FRAME_TYPES);
1066 if (nla_put_u32(msg, NL80211_BITRATE_ATTR_RATE, 1100 if (!nl_ifs)
1067 rate->bitrate)) 1101 return -ENOBUFS;
1068 goto nla_put_failure;
1069 if ((rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) &&
1070 nla_put_flag(msg,
1071 NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE))
1072 goto nla_put_failure;
1073 1102
1074 nla_nest_end(msg, nl_rate); 1103 for (ift = 0; ift < NUM_NL80211_IFTYPES; ift++) {
1104 nl_ftypes = nla_nest_start(msg, ift);
1105 if (!nl_ftypes)
1106 return -ENOBUFS;
1107 i = 0;
1108 stypes = mgmt_stypes[ift].tx;
1109 while (stypes) {
1110 if ((stypes & 1) &&
1111 nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE,
1112 (i << 4) | IEEE80211_FTYPE_MGMT))
1113 return -ENOBUFS;
1114 stypes >>= 1;
1115 i++;
1075 } 1116 }
1117 nla_nest_end(msg, nl_ftypes);
1118 }
1076 1119
1077 nla_nest_end(msg, nl_rates); 1120 nla_nest_end(msg, nl_ifs);
1078 1121
1079 nla_nest_end(msg, nl_band); 1122 nl_ifs = nla_nest_start(msg, NL80211_ATTR_RX_FRAME_TYPES);
1123 if (!nl_ifs)
1124 return -ENOBUFS;
1125
1126 for (ift = 0; ift < NUM_NL80211_IFTYPES; ift++) {
1127 nl_ftypes = nla_nest_start(msg, ift);
1128 if (!nl_ftypes)
1129 return -ENOBUFS;
1130 i = 0;
1131 stypes = mgmt_stypes[ift].rx;
1132 while (stypes) {
1133 if ((stypes & 1) &&
1134 nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE,
1135 (i << 4) | IEEE80211_FTYPE_MGMT))
1136 return -ENOBUFS;
1137 stypes >>= 1;
1138 i++;
1139 }
1140 nla_nest_end(msg, nl_ftypes);
1080 } 1141 }
1081 nla_nest_end(msg, nl_bands); 1142 nla_nest_end(msg, nl_ifs);
1082 1143
1083 nl_cmds = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_COMMANDS); 1144 return 0;
1084 if (!nl_cmds) 1145}
1085 goto nla_put_failure;
1086 1146
1087 i = 0; 1147static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
1088#define CMD(op, n) \ 1148 struct sk_buff *msg, u32 portid, u32 seq,
1089 do { \ 1149 int flags, bool split, long *split_start,
1090 if (dev->ops->op) { \ 1150 long *band_start, long *chan_start)
1091 i++; \ 1151{
1092 if (nla_put_u32(msg, i, NL80211_CMD_ ## n)) \ 1152 void *hdr;
1093 goto nla_put_failure; \ 1153 struct nlattr *nl_bands, *nl_band;
1094 } \ 1154 struct nlattr *nl_freqs, *nl_freq;
1095 } while (0) 1155 struct nlattr *nl_cmds;
1096 1156 enum ieee80211_band band;
1097 CMD(add_virtual_intf, NEW_INTERFACE); 1157 struct ieee80211_channel *chan;
1098 CMD(change_virtual_intf, SET_INTERFACE); 1158 int i;
1099 CMD(add_key, NEW_KEY); 1159 const struct ieee80211_txrx_stypes *mgmt_stypes =
1100 CMD(start_ap, START_AP); 1160 dev->wiphy.mgmt_stypes;
1101 CMD(add_station, NEW_STATION); 1161 long start = 0, start_chan = 0, start_band = 0;
1102 CMD(add_mpath, NEW_MPATH); 1162 u32 features;
1103 CMD(update_mesh_config, SET_MESH_CONFIG); 1163
1104 CMD(change_bss, SET_BSS); 1164 hdr = nl80211hdr_put(msg, portid, seq, flags, NL80211_CMD_NEW_WIPHY);
1105 CMD(auth, AUTHENTICATE); 1165 if (!hdr)
1106 CMD(assoc, ASSOCIATE); 1166 return -ENOBUFS;
1107 CMD(deauth, DEAUTHENTICATE); 1167
1108 CMD(disassoc, DISASSOCIATE); 1168 /* allow always using the variables */
1109 CMD(join_ibss, JOIN_IBSS); 1169 if (!split) {
1110 CMD(join_mesh, JOIN_MESH); 1170 split_start = &start;
1111 CMD(set_pmksa, SET_PMKSA); 1171 band_start = &start_band;
1112 CMD(del_pmksa, DEL_PMKSA); 1172 chan_start = &start_chan;
1113 CMD(flush_pmksa, FLUSH_PMKSA);
1114 if (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)
1115 CMD(remain_on_channel, REMAIN_ON_CHANNEL);
1116 CMD(set_bitrate_mask, SET_TX_BITRATE_MASK);
1117 CMD(mgmt_tx, FRAME);
1118 CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL);
1119 if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
1120 i++;
1121 if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS))
1122 goto nla_put_failure;
1123 } 1173 }
1124 if (dev->ops->set_monitor_channel || dev->ops->start_ap || 1174
1125 dev->ops->join_mesh) { 1175 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx) ||
1126 i++; 1176 nla_put_string(msg, NL80211_ATTR_WIPHY_NAME,
1127 if (nla_put_u32(msg, i, NL80211_CMD_SET_CHANNEL)) 1177 wiphy_name(&dev->wiphy)) ||
1178 nla_put_u32(msg, NL80211_ATTR_GENERATION,
1179 cfg80211_rdev_list_generation))
1180 goto nla_put_failure;
1181
1182 switch (*split_start) {
1183 case 0:
1184 if (nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT,
1185 dev->wiphy.retry_short) ||
1186 nla_put_u8(msg, NL80211_ATTR_WIPHY_RETRY_LONG,
1187 dev->wiphy.retry_long) ||
1188 nla_put_u32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD,
1189 dev->wiphy.frag_threshold) ||
1190 nla_put_u32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD,
1191 dev->wiphy.rts_threshold) ||
1192 nla_put_u8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS,
1193 dev->wiphy.coverage_class) ||
1194 nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS,
1195 dev->wiphy.max_scan_ssids) ||
1196 nla_put_u8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS,
1197 dev->wiphy.max_sched_scan_ssids) ||
1198 nla_put_u16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN,
1199 dev->wiphy.max_scan_ie_len) ||
1200 nla_put_u16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN,
1201 dev->wiphy.max_sched_scan_ie_len) ||
1202 nla_put_u8(msg, NL80211_ATTR_MAX_MATCH_SETS,
1203 dev->wiphy.max_match_sets))
1128 goto nla_put_failure; 1204 goto nla_put_failure;
1129 } 1205
1130 CMD(set_wds_peer, SET_WDS_PEER); 1206 if ((dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) &&
1131 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) { 1207 nla_put_flag(msg, NL80211_ATTR_SUPPORT_IBSS_RSN))
1132 CMD(tdls_mgmt, TDLS_MGMT); 1208 goto nla_put_failure;
1133 CMD(tdls_oper, TDLS_OPER); 1209 if ((dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) &&
1134 } 1210 nla_put_flag(msg, NL80211_ATTR_SUPPORT_MESH_AUTH))
1135 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) 1211 goto nla_put_failure;
1136 CMD(sched_scan_start, START_SCHED_SCAN); 1212 if ((dev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) &&
1137 CMD(probe_client, PROBE_CLIENT); 1213 nla_put_flag(msg, NL80211_ATTR_SUPPORT_AP_UAPSD))
1138 CMD(set_noack_map, SET_NOACK_MAP); 1214 goto nla_put_failure;
1139 if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) { 1215 if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_FW_ROAM) &&
1140 i++; 1216 nla_put_flag(msg, NL80211_ATTR_ROAM_SUPPORT))
1141 if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS)) 1217 goto nla_put_failure;
1218 if ((dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) &&
1219 nla_put_flag(msg, NL80211_ATTR_TDLS_SUPPORT))
1220 goto nla_put_failure;
1221 if ((dev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP) &&
1222 nla_put_flag(msg, NL80211_ATTR_TDLS_EXTERNAL_SETUP))
1142 goto nla_put_failure; 1223 goto nla_put_failure;
1143 }
1144 CMD(start_p2p_device, START_P2P_DEVICE);
1145 CMD(set_mcast_rate, SET_MCAST_RATE);
1146 1224
1147#ifdef CONFIG_NL80211_TESTMODE 1225 (*split_start)++;
1148 CMD(testmode_cmd, TESTMODE); 1226 if (split)
1149#endif 1227 break;
1228 case 1:
1229 if (nla_put(msg, NL80211_ATTR_CIPHER_SUITES,
1230 sizeof(u32) * dev->wiphy.n_cipher_suites,
1231 dev->wiphy.cipher_suites))
1232 goto nla_put_failure;
1150 1233
1151#undef CMD 1234 if (nla_put_u8(msg, NL80211_ATTR_MAX_NUM_PMKIDS,
1235 dev->wiphy.max_num_pmkids))
1236 goto nla_put_failure;
1152 1237
1153 if (dev->ops->connect || dev->ops->auth) { 1238 if ((dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) &&
1154 i++; 1239 nla_put_flag(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE))
1155 if (nla_put_u32(msg, i, NL80211_CMD_CONNECT))
1156 goto nla_put_failure; 1240 goto nla_put_failure;
1157 }
1158 1241
1159 if (dev->ops->disconnect || dev->ops->deauth) { 1242 if (nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX,
1160 i++; 1243 dev->wiphy.available_antennas_tx) ||
1161 if (nla_put_u32(msg, i, NL80211_CMD_DISCONNECT)) 1244 nla_put_u32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX,
1245 dev->wiphy.available_antennas_rx))
1162 goto nla_put_failure; 1246 goto nla_put_failure;
1163 }
1164 1247
1165 nla_nest_end(msg, nl_cmds); 1248 if ((dev->wiphy.flags & WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD) &&
1249 nla_put_u32(msg, NL80211_ATTR_PROBE_RESP_OFFLOAD,
1250 dev->wiphy.probe_resp_offload))
1251 goto nla_put_failure;
1166 1252
1167 if (dev->ops->remain_on_channel && 1253 if ((dev->wiphy.available_antennas_tx ||
1168 (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) && 1254 dev->wiphy.available_antennas_rx) &&
1169 nla_put_u32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION, 1255 dev->ops->get_antenna) {
1170 dev->wiphy.max_remain_on_channel_duration)) 1256 u32 tx_ant = 0, rx_ant = 0;
1171 goto nla_put_failure; 1257 int res;
1258 res = rdev_get_antenna(dev, &tx_ant, &rx_ant);
1259 if (!res) {
1260 if (nla_put_u32(msg,
1261 NL80211_ATTR_WIPHY_ANTENNA_TX,
1262 tx_ant) ||
1263 nla_put_u32(msg,
1264 NL80211_ATTR_WIPHY_ANTENNA_RX,
1265 rx_ant))
1266 goto nla_put_failure;
1267 }
1268 }
1172 1269
1173 if ((dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) && 1270 (*split_start)++;
1174 nla_put_flag(msg, NL80211_ATTR_OFFCHANNEL_TX_OK)) 1271 if (split)
1175 goto nla_put_failure; 1272 break;
1273 case 2:
1274 if (nl80211_put_iftypes(msg, NL80211_ATTR_SUPPORTED_IFTYPES,
1275 dev->wiphy.interface_modes))
1276 goto nla_put_failure;
1277 (*split_start)++;
1278 if (split)
1279 break;
1280 case 3:
1281 nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS);
1282 if (!nl_bands)
1283 goto nla_put_failure;
1176 1284
1177 if (mgmt_stypes) { 1285 for (band = *band_start; band < IEEE80211_NUM_BANDS; band++) {
1178 u16 stypes; 1286 struct ieee80211_supported_band *sband;
1179 struct nlattr *nl_ftypes, *nl_ifs;
1180 enum nl80211_iftype ift;
1181 1287
1182 nl_ifs = nla_nest_start(msg, NL80211_ATTR_TX_FRAME_TYPES); 1288 sband = dev->wiphy.bands[band];
1183 if (!nl_ifs)
1184 goto nla_put_failure;
1185 1289
1186 for (ift = 0; ift < NUM_NL80211_IFTYPES; ift++) { 1290 if (!sband)
1187 nl_ftypes = nla_nest_start(msg, ift); 1291 continue;
1188 if (!nl_ftypes) 1292
1293 nl_band = nla_nest_start(msg, band);
1294 if (!nl_band)
1189 goto nla_put_failure; 1295 goto nla_put_failure;
1190 i = 0; 1296
1191 stypes = mgmt_stypes[ift].tx; 1297 switch (*chan_start) {
1192 while (stypes) { 1298 case 0:
1193 if ((stypes & 1) && 1299 if (nl80211_send_band_rateinfo(msg, sband))
1194 nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE,
1195 (i << 4) | IEEE80211_FTYPE_MGMT))
1196 goto nla_put_failure; 1300 goto nla_put_failure;
1197 stypes >>= 1; 1301 (*chan_start)++;
1198 i++; 1302 if (split)
1303 break;
1304 default:
1305 /* add frequencies */
1306 nl_freqs = nla_nest_start(
1307 msg, NL80211_BAND_ATTR_FREQS);
1308 if (!nl_freqs)
1309 goto nla_put_failure;
1310
1311 for (i = *chan_start - 1;
1312 i < sband->n_channels;
1313 i++) {
1314 nl_freq = nla_nest_start(msg, i);
1315 if (!nl_freq)
1316 goto nla_put_failure;
1317
1318 chan = &sband->channels[i];
1319
1320 if (nl80211_msg_put_channel(msg, chan,
1321 split))
1322 goto nla_put_failure;
1323
1324 nla_nest_end(msg, nl_freq);
1325 if (split)
1326 break;
1327 }
1328 if (i < sband->n_channels)
1329 *chan_start = i + 2;
1330 else
1331 *chan_start = 0;
1332 nla_nest_end(msg, nl_freqs);
1333 }
1334
1335 nla_nest_end(msg, nl_band);
1336
1337 if (split) {
1338 /* start again here */
1339 if (*chan_start)
1340 band--;
1341 break;
1199 } 1342 }
1200 nla_nest_end(msg, nl_ftypes);
1201 } 1343 }
1344 nla_nest_end(msg, nl_bands);
1202 1345
1203 nla_nest_end(msg, nl_ifs); 1346 if (band < IEEE80211_NUM_BANDS)
1347 *band_start = band + 1;
1348 else
1349 *band_start = 0;
1204 1350
1205 nl_ifs = nla_nest_start(msg, NL80211_ATTR_RX_FRAME_TYPES); 1351 /* if bands & channels are done, continue outside */
1206 if (!nl_ifs) 1352 if (*band_start == 0 && *chan_start == 0)
1353 (*split_start)++;
1354 if (split)
1355 break;
1356 case 4:
1357 nl_cmds = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_COMMANDS);
1358 if (!nl_cmds)
1207 goto nla_put_failure; 1359 goto nla_put_failure;
1208 1360
1209 for (ift = 0; ift < NUM_NL80211_IFTYPES; ift++) { 1361 i = 0;
1210 nl_ftypes = nla_nest_start(msg, ift); 1362#define CMD(op, n) \
1211 if (!nl_ftypes) 1363 do { \
1364 if (dev->ops->op) { \
1365 i++; \
1366 if (nla_put_u32(msg, i, NL80211_CMD_ ## n)) \
1367 goto nla_put_failure; \
1368 } \
1369 } while (0)
1370
1371 CMD(add_virtual_intf, NEW_INTERFACE);
1372 CMD(change_virtual_intf, SET_INTERFACE);
1373 CMD(add_key, NEW_KEY);
1374 CMD(start_ap, START_AP);
1375 CMD(add_station, NEW_STATION);
1376 CMD(add_mpath, NEW_MPATH);
1377 CMD(update_mesh_config, SET_MESH_CONFIG);
1378 CMD(change_bss, SET_BSS);
1379 CMD(auth, AUTHENTICATE);
1380 CMD(assoc, ASSOCIATE);
1381 CMD(deauth, DEAUTHENTICATE);
1382 CMD(disassoc, DISASSOCIATE);
1383 CMD(join_ibss, JOIN_IBSS);
1384 CMD(join_mesh, JOIN_MESH);
1385 CMD(set_pmksa, SET_PMKSA);
1386 CMD(del_pmksa, DEL_PMKSA);
1387 CMD(flush_pmksa, FLUSH_PMKSA);
1388 if (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL)
1389 CMD(remain_on_channel, REMAIN_ON_CHANNEL);
1390 CMD(set_bitrate_mask, SET_TX_BITRATE_MASK);
1391 CMD(mgmt_tx, FRAME);
1392 CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL);
1393 if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) {
1394 i++;
1395 if (nla_put_u32(msg, i, NL80211_CMD_SET_WIPHY_NETNS))
1212 goto nla_put_failure; 1396 goto nla_put_failure;
1213 i = 0;
1214 stypes = mgmt_stypes[ift].rx;
1215 while (stypes) {
1216 if ((stypes & 1) &&
1217 nla_put_u16(msg, NL80211_ATTR_FRAME_TYPE,
1218 (i << 4) | IEEE80211_FTYPE_MGMT))
1219 goto nla_put_failure;
1220 stypes >>= 1;
1221 i++;
1222 }
1223 nla_nest_end(msg, nl_ftypes);
1224 } 1397 }
1225 nla_nest_end(msg, nl_ifs); 1398 if (dev->ops->set_monitor_channel || dev->ops->start_ap ||
1226 } 1399 dev->ops->join_mesh) {
1400 i++;
1401 if (nla_put_u32(msg, i, NL80211_CMD_SET_CHANNEL))
1402 goto nla_put_failure;
1403 }
1404 CMD(set_wds_peer, SET_WDS_PEER);
1405 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_TDLS) {
1406 CMD(tdls_mgmt, TDLS_MGMT);
1407 CMD(tdls_oper, TDLS_OPER);
1408 }
1409 if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN)
1410 CMD(sched_scan_start, START_SCHED_SCAN);
1411 CMD(probe_client, PROBE_CLIENT);
1412 CMD(set_noack_map, SET_NOACK_MAP);
1413 if (dev->wiphy.flags & WIPHY_FLAG_REPORTS_OBSS) {
1414 i++;
1415 if (nla_put_u32(msg, i, NL80211_CMD_REGISTER_BEACONS))
1416 goto nla_put_failure;
1417 }
1418 CMD(start_p2p_device, START_P2P_DEVICE);
1419 CMD(set_mcast_rate, SET_MCAST_RATE);
1227 1420
1228#ifdef CONFIG_PM 1421#ifdef CONFIG_NL80211_TESTMODE
1229 if (dev->wiphy.wowlan.flags || dev->wiphy.wowlan.n_patterns) { 1422 CMD(testmode_cmd, TESTMODE);
1230 struct nlattr *nl_wowlan; 1423#endif
1231 1424
1232 nl_wowlan = nla_nest_start(msg, 1425#undef CMD
1233 NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED);
1234 if (!nl_wowlan)
1235 goto nla_put_failure;
1236 1426
1237 if (((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY) && 1427 if (dev->ops->connect || dev->ops->auth) {
1238 nla_put_flag(msg, NL80211_WOWLAN_TRIG_ANY)) || 1428 i++;
1239 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT) && 1429 if (nla_put_u32(msg, i, NL80211_CMD_CONNECT))
1240 nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT)) ||
1241 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT) &&
1242 nla_put_flag(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT)) ||
1243 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) &&
1244 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED)) ||
1245 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
1246 nla_put_flag(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE)) ||
1247 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) &&
1248 nla_put_flag(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST)) ||
1249 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) &&
1250 nla_put_flag(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE)) ||
1251 ((dev->wiphy.wowlan.flags & WIPHY_WOWLAN_RFKILL_RELEASE) &&
1252 nla_put_flag(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE)))
1253 goto nla_put_failure;
1254 if (dev->wiphy.wowlan.n_patterns) {
1255 struct nl80211_wowlan_pattern_support pat = {
1256 .max_patterns = dev->wiphy.wowlan.n_patterns,
1257 .min_pattern_len =
1258 dev->wiphy.wowlan.pattern_min_len,
1259 .max_pattern_len =
1260 dev->wiphy.wowlan.pattern_max_len,
1261 .max_pkt_offset =
1262 dev->wiphy.wowlan.max_pkt_offset,
1263 };
1264 if (nla_put(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN,
1265 sizeof(pat), &pat))
1266 goto nla_put_failure; 1430 goto nla_put_failure;
1267 } 1431 }
1268 1432
1269 nla_nest_end(msg, nl_wowlan); 1433 if (dev->ops->disconnect || dev->ops->deauth) {
1270 } 1434 i++;
1435 if (nla_put_u32(msg, i, NL80211_CMD_DISCONNECT))
1436 goto nla_put_failure;
1437 }
1438
1439 nla_nest_end(msg, nl_cmds);
1440 (*split_start)++;
1441 if (split)
1442 break;
1443 case 5:
1444 if (dev->ops->remain_on_channel &&
1445 (dev->wiphy.flags & WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL) &&
1446 nla_put_u32(msg,
1447 NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION,
1448 dev->wiphy.max_remain_on_channel_duration))
1449 goto nla_put_failure;
1450
1451 if ((dev->wiphy.flags & WIPHY_FLAG_OFFCHAN_TX) &&
1452 nla_put_flag(msg, NL80211_ATTR_OFFCHANNEL_TX_OK))
1453 goto nla_put_failure;
1454
1455 if (nl80211_send_mgmt_stypes(msg, mgmt_stypes))
1456 goto nla_put_failure;
1457 (*split_start)++;
1458 if (split)
1459 break;
1460 case 6:
1461#ifdef CONFIG_PM
1462 if (nl80211_send_wowlan(msg, dev, split))
1463 goto nla_put_failure;
1464 (*split_start)++;
1465 if (split)
1466 break;
1467#else
1468 (*split_start)++;
1271#endif 1469#endif
1470 case 7:
1471 if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES,
1472 dev->wiphy.software_iftypes))
1473 goto nla_put_failure;
1272 1474
1273 if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES, 1475 if (nl80211_put_iface_combinations(&dev->wiphy, msg, split))
1274 dev->wiphy.software_iftypes)) 1476 goto nla_put_failure;
1275 goto nla_put_failure;
1276 1477
1277 if (nl80211_put_iface_combinations(&dev->wiphy, msg)) 1478 (*split_start)++;
1278 goto nla_put_failure; 1479 if (split)
1480 break;
1481 case 8:
1482 if ((dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) &&
1483 nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME,
1484 dev->wiphy.ap_sme_capa))
1485 goto nla_put_failure;
1279 1486
1280 if ((dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME) && 1487 features = dev->wiphy.features;
1281 nla_put_u32(msg, NL80211_ATTR_DEVICE_AP_SME, 1488 /*
1282 dev->wiphy.ap_sme_capa)) 1489 * We can only add the per-channel limit information if the
1283 goto nla_put_failure; 1490 * dump is split, otherwise it makes it too big. Therefore
1491 * only advertise it in that case.
1492 */
1493 if (split)
1494 features |= NL80211_FEATURE_ADVERTISE_CHAN_LIMITS;
1495 if (nla_put_u32(msg, NL80211_ATTR_FEATURE_FLAGS, features))
1496 goto nla_put_failure;
1284 1497
1285 if (nla_put_u32(msg, NL80211_ATTR_FEATURE_FLAGS, 1498 if (dev->wiphy.ht_capa_mod_mask &&
1286 dev->wiphy.features)) 1499 nla_put(msg, NL80211_ATTR_HT_CAPABILITY_MASK,
1287 goto nla_put_failure; 1500 sizeof(*dev->wiphy.ht_capa_mod_mask),
1501 dev->wiphy.ht_capa_mod_mask))
1502 goto nla_put_failure;
1288 1503
1289 if (dev->wiphy.ht_capa_mod_mask && 1504 if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME &&
1290 nla_put(msg, NL80211_ATTR_HT_CAPABILITY_MASK, 1505 dev->wiphy.max_acl_mac_addrs &&
1291 sizeof(*dev->wiphy.ht_capa_mod_mask), 1506 nla_put_u32(msg, NL80211_ATTR_MAC_ACL_MAX,
1292 dev->wiphy.ht_capa_mod_mask)) 1507 dev->wiphy.max_acl_mac_addrs))
1293 goto nla_put_failure; 1508 goto nla_put_failure;
1294 1509
1295 if (dev->wiphy.flags & WIPHY_FLAG_HAVE_AP_SME && 1510 /*
1296 dev->wiphy.max_acl_mac_addrs && 1511 * Any information below this point is only available to
1297 nla_put_u32(msg, NL80211_ATTR_MAC_ACL_MAX, 1512 * applications that can deal with it being split. This
1298 dev->wiphy.max_acl_mac_addrs)) 1513 * helps ensure that newly added capabilities don't break
1299 goto nla_put_failure; 1514 * older tools by overrunning their buffers.
1515 *
1516 * We still increment split_start so that in the split
1517 * case we'll continue with more data in the next round,
1518 * but break unconditionally so unsplit data stops here.
1519 */
1520 (*split_start)++;
1521 break;
1522 case 9:
1523 if (dev->wiphy.extended_capabilities &&
1524 (nla_put(msg, NL80211_ATTR_EXT_CAPA,
1525 dev->wiphy.extended_capabilities_len,
1526 dev->wiphy.extended_capabilities) ||
1527 nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK,
1528 dev->wiphy.extended_capabilities_len,
1529 dev->wiphy.extended_capabilities_mask)))
1530 goto nla_put_failure;
1300 1531
1532 if (dev->wiphy.vht_capa_mod_mask &&
1533 nla_put(msg, NL80211_ATTR_VHT_CAPABILITY_MASK,
1534 sizeof(*dev->wiphy.vht_capa_mod_mask),
1535 dev->wiphy.vht_capa_mod_mask))
1536 goto nla_put_failure;
1537
1538 /* done */
1539 *split_start = 0;
1540 break;
1541 }
1301 return genlmsg_end(msg, hdr); 1542 return genlmsg_end(msg, hdr);
1302 1543
1303 nla_put_failure: 1544 nla_put_failure:
@@ -1310,39 +1551,80 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
1310 int idx = 0, ret; 1551 int idx = 0, ret;
1311 int start = cb->args[0]; 1552 int start = cb->args[0];
1312 struct cfg80211_registered_device *dev; 1553 struct cfg80211_registered_device *dev;
1554 s64 filter_wiphy = -1;
1555 bool split = false;
1556 struct nlattr **tb = nl80211_fam.attrbuf;
1557 int res;
1313 1558
1314 mutex_lock(&cfg80211_mutex); 1559 mutex_lock(&cfg80211_mutex);
1560 res = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
1561 tb, nl80211_fam.maxattr, nl80211_policy);
1562 if (res == 0) {
1563 split = tb[NL80211_ATTR_SPLIT_WIPHY_DUMP];
1564 if (tb[NL80211_ATTR_WIPHY])
1565 filter_wiphy = nla_get_u32(tb[NL80211_ATTR_WIPHY]);
1566 if (tb[NL80211_ATTR_WDEV])
1567 filter_wiphy = nla_get_u64(tb[NL80211_ATTR_WDEV]) >> 32;
1568 if (tb[NL80211_ATTR_IFINDEX]) {
1569 struct net_device *netdev;
1570 int ifidx = nla_get_u32(tb[NL80211_ATTR_IFINDEX]);
1571
1572 netdev = dev_get_by_index(sock_net(skb->sk), ifidx);
1573 if (!netdev) {
1574 mutex_unlock(&cfg80211_mutex);
1575 return -ENODEV;
1576 }
1577 if (netdev->ieee80211_ptr) {
1578 dev = wiphy_to_dev(
1579 netdev->ieee80211_ptr->wiphy);
1580 filter_wiphy = dev->wiphy_idx;
1581 }
1582 dev_put(netdev);
1583 }
1584 }
1585
1315 list_for_each_entry(dev, &cfg80211_rdev_list, list) { 1586 list_for_each_entry(dev, &cfg80211_rdev_list, list) {
1316 if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk))) 1587 if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk)))
1317 continue; 1588 continue;
1318 if (++idx <= start) 1589 if (++idx <= start)
1319 continue; 1590 continue;
1320 ret = nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid, 1591 if (filter_wiphy != -1 && dev->wiphy_idx != filter_wiphy)
1321 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1592 continue;
1322 dev); 1593 /* attempt to fit multiple wiphy data chunks into the skb */
1323 if (ret < 0) { 1594 do {
1324 /* 1595 ret = nl80211_send_wiphy(dev, skb,
1325 * If sending the wiphy data didn't fit (ENOBUFS or 1596 NETLINK_CB(cb->skb).portid,
1326 * EMSGSIZE returned), this SKB is still empty (so 1597 cb->nlh->nlmsg_seq,
1327 * it's not too big because another wiphy dataset is 1598 NLM_F_MULTI,
1328 * already in the skb) and we've not tried to adjust 1599 split, &cb->args[1],
1329 * the dump allocation yet ... then adjust the alloc 1600 &cb->args[2],
1330 * size to be bigger, and return 1 but with the empty 1601 &cb->args[3]);
1331 * skb. This results in an empty message being RX'ed 1602 if (ret < 0) {
1332 * in userspace, but that is ignored. 1603 /*
1333 * 1604 * If sending the wiphy data didn't fit (ENOBUFS
1334 * We can then retry with the larger buffer. 1605 * or EMSGSIZE returned), this SKB is still
1335 */ 1606 * empty (so it's not too big because another
1336 if ((ret == -ENOBUFS || ret == -EMSGSIZE) && 1607 * wiphy dataset is already in the skb) and
1337 !skb->len && 1608 * we've not tried to adjust the dump allocation
1338 cb->min_dump_alloc < 4096) { 1609 * yet ... then adjust the alloc size to be
1339 cb->min_dump_alloc = 4096; 1610 * bigger, and return 1 but with the empty skb.
1340 mutex_unlock(&cfg80211_mutex); 1611 * This results in an empty message being RX'ed
1341 return 1; 1612 * in userspace, but that is ignored.
1613 *
1614 * We can then retry with the larger buffer.
1615 */
1616 if ((ret == -ENOBUFS || ret == -EMSGSIZE) &&
1617 !skb->len &&
1618 cb->min_dump_alloc < 4096) {
1619 cb->min_dump_alloc = 4096;
1620 mutex_unlock(&cfg80211_mutex);
1621 return 1;
1622 }
1623 idx--;
1624 break;
1342 } 1625 }
1343 idx--; 1626 } while (cb->args[1] > 0);
1344 break; 1627 break;
1345 }
1346 } 1628 }
1347 mutex_unlock(&cfg80211_mutex); 1629 mutex_unlock(&cfg80211_mutex);
1348 1630
@@ -1360,7 +1642,8 @@ static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info)
1360 if (!msg) 1642 if (!msg)
1361 return -ENOMEM; 1643 return -ENOMEM;
1362 1644
1363 if (nl80211_send_wiphy(msg, info->snd_portid, info->snd_seq, 0, dev) < 0) { 1645 if (nl80211_send_wiphy(dev, msg, info->snd_portid, info->snd_seq, 0,
1646 false, NULL, NULL, NULL) < 0) {
1364 nlmsg_free(msg); 1647 nlmsg_free(msg);
1365 return -ENOBUFS; 1648 return -ENOBUFS;
1366 } 1649 }
@@ -2967,6 +3250,7 @@ static int parse_station_flags(struct genl_info *info,
2967 sta_flags = nla_data(nla); 3250 sta_flags = nla_data(nla);
2968 params->sta_flags_mask = sta_flags->mask; 3251 params->sta_flags_mask = sta_flags->mask;
2969 params->sta_flags_set = sta_flags->set; 3252 params->sta_flags_set = sta_flags->set;
3253 params->sta_flags_set &= params->sta_flags_mask;
2970 if ((params->sta_flags_mask | 3254 if ((params->sta_flags_mask |
2971 params->sta_flags_set) & BIT(__NL80211_STA_FLAG_INVALID)) 3255 params->sta_flags_set) & BIT(__NL80211_STA_FLAG_INVALID))
2972 return -EINVAL; 3256 return -EINVAL;
@@ -3320,6 +3604,136 @@ static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info)
3320 return genlmsg_reply(msg, info); 3604 return genlmsg_reply(msg, info);
3321} 3605}
3322 3606
3607int cfg80211_check_station_change(struct wiphy *wiphy,
3608 struct station_parameters *params,
3609 enum cfg80211_station_type statype)
3610{
3611 if (params->listen_interval != -1)
3612 return -EINVAL;
3613 if (params->aid)
3614 return -EINVAL;
3615
3616 /* When you run into this, adjust the code below for the new flag */
3617 BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7);
3618
3619 switch (statype) {
3620 case CFG80211_STA_MESH_PEER_KERNEL:
3621 case CFG80211_STA_MESH_PEER_USER:
3622 /*
3623 * No ignoring the TDLS flag here -- the userspace mesh
3624 * code doesn't have the bug of including TDLS in the
3625 * mask everywhere.
3626 */
3627 if (params->sta_flags_mask &
3628 ~(BIT(NL80211_STA_FLAG_AUTHENTICATED) |
3629 BIT(NL80211_STA_FLAG_MFP) |
3630 BIT(NL80211_STA_FLAG_AUTHORIZED)))
3631 return -EINVAL;
3632 break;
3633 case CFG80211_STA_TDLS_PEER_SETUP:
3634 case CFG80211_STA_TDLS_PEER_ACTIVE:
3635 if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
3636 return -EINVAL;
3637 /* ignore since it can't change */
3638 params->sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
3639 break;
3640 default:
3641 /* disallow mesh-specific things */
3642 if (params->plink_action != NL80211_PLINK_ACTION_NO_ACTION)
3643 return -EINVAL;
3644 if (params->local_pm)
3645 return -EINVAL;
3646 if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE)
3647 return -EINVAL;
3648 }
3649
3650 if (statype != CFG80211_STA_TDLS_PEER_SETUP &&
3651 statype != CFG80211_STA_TDLS_PEER_ACTIVE) {
3652 /* TDLS can't be set, ... */
3653 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
3654 return -EINVAL;
3655 /*
3656 * ... but don't bother the driver with it. This works around
3657 * a hostapd/wpa_supplicant issue -- it always includes the
3658 * TLDS_PEER flag in the mask even for AP mode.
3659 */
3660 params->sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
3661 }
3662
3663 if (statype != CFG80211_STA_TDLS_PEER_SETUP) {
3664 /* reject other things that can't change */
3665 if (params->sta_modify_mask & STATION_PARAM_APPLY_UAPSD)
3666 return -EINVAL;
3667 if (params->sta_modify_mask & STATION_PARAM_APPLY_CAPABILITY)
3668 return -EINVAL;
3669 if (params->supported_rates)
3670 return -EINVAL;
3671 if (params->ext_capab || params->ht_capa || params->vht_capa)
3672 return -EINVAL;
3673 }
3674
3675 if (statype != CFG80211_STA_AP_CLIENT) {
3676 if (params->vlan)
3677 return -EINVAL;
3678 }
3679
3680 switch (statype) {
3681 case CFG80211_STA_AP_MLME_CLIENT:
3682 /* Use this only for authorizing/unauthorizing a station */
3683 if (!(params->sta_flags_mask & BIT(NL80211_STA_FLAG_AUTHORIZED)))
3684 return -EOPNOTSUPP;
3685 break;
3686 case CFG80211_STA_AP_CLIENT:
3687 /* accept only the listed bits */
3688 if (params->sta_flags_mask &
3689 ~(BIT(NL80211_STA_FLAG_AUTHORIZED) |
3690 BIT(NL80211_STA_FLAG_AUTHENTICATED) |
3691 BIT(NL80211_STA_FLAG_ASSOCIATED) |
3692 BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
3693 BIT(NL80211_STA_FLAG_WME) |
3694 BIT(NL80211_STA_FLAG_MFP)))
3695 return -EINVAL;
3696
3697 /* but authenticated/associated only if driver handles it */
3698 if (!(wiphy->features & NL80211_FEATURE_FULL_AP_CLIENT_STATE) &&
3699 params->sta_flags_mask &
3700 (BIT(NL80211_STA_FLAG_AUTHENTICATED) |
3701 BIT(NL80211_STA_FLAG_ASSOCIATED)))
3702 return -EINVAL;
3703 break;
3704 case CFG80211_STA_IBSS:
3705 case CFG80211_STA_AP_STA:
3706 /* reject any changes other than AUTHORIZED */
3707 if (params->sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED))
3708 return -EINVAL;
3709 break;
3710 case CFG80211_STA_TDLS_PEER_SETUP:
3711 /* reject any changes other than AUTHORIZED or WME */
3712 if (params->sta_flags_mask & ~(BIT(NL80211_STA_FLAG_AUTHORIZED) |
3713 BIT(NL80211_STA_FLAG_WME)))
3714 return -EINVAL;
3715 /* force (at least) rates when authorizing */
3716 if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED) &&
3717 !params->supported_rates)
3718 return -EINVAL;
3719 break;
3720 case CFG80211_STA_TDLS_PEER_ACTIVE:
3721 /* reject any changes */
3722 return -EINVAL;
3723 case CFG80211_STA_MESH_PEER_KERNEL:
3724 if (params->sta_modify_mask & STATION_PARAM_APPLY_PLINK_STATE)
3725 return -EINVAL;
3726 break;
3727 case CFG80211_STA_MESH_PEER_USER:
3728 if (params->plink_action != NL80211_PLINK_ACTION_NO_ACTION)
3729 return -EINVAL;
3730 break;
3731 }
3732
3733 return 0;
3734}
3735EXPORT_SYMBOL(cfg80211_check_station_change);
3736
3323/* 3737/*
3324 * Get vlan interface making sure it is running and on the right wiphy. 3738 * Get vlan interface making sure it is running and on the right wiphy.
3325 */ 3739 */
@@ -3342,6 +3756,13 @@ static struct net_device *get_vlan(struct genl_info *info,
3342 goto error; 3756 goto error;
3343 } 3757 }
3344 3758
3759 if (v->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
3760 v->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
3761 v->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) {
3762 ret = -EINVAL;
3763 goto error;
3764 }
3765
3345 if (!netif_running(v)) { 3766 if (!netif_running(v)) {
3346 ret = -ENETDOWN; 3767 ret = -ENETDOWN;
3347 goto error; 3768 goto error;
@@ -3359,21 +3780,13 @@ nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] __read_mostly = {
3359 [NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 }, 3780 [NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 },
3360}; 3781};
3361 3782
3362static int nl80211_set_station_tdls(struct genl_info *info, 3783static int nl80211_parse_sta_wme(struct genl_info *info,
3363 struct station_parameters *params) 3784 struct station_parameters *params)
3364{ 3785{
3365 struct nlattr *tb[NL80211_STA_WME_MAX + 1]; 3786 struct nlattr *tb[NL80211_STA_WME_MAX + 1];
3366 struct nlattr *nla; 3787 struct nlattr *nla;
3367 int err; 3788 int err;
3368 3789
3369 /* Dummy STA entry gets updated once the peer capabilities are known */
3370 if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
3371 params->ht_capa =
3372 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
3373 if (info->attrs[NL80211_ATTR_VHT_CAPABILITY])
3374 params->vht_capa =
3375 nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]);
3376
3377 /* parse WME attributes if present */ 3790 /* parse WME attributes if present */
3378 if (!info->attrs[NL80211_ATTR_STA_WME]) 3791 if (!info->attrs[NL80211_ATTR_STA_WME])
3379 return 0; 3792 return 0;
@@ -3401,18 +3814,34 @@ static int nl80211_set_station_tdls(struct genl_info *info,
3401 return 0; 3814 return 0;
3402} 3815}
3403 3816
3817static int nl80211_set_station_tdls(struct genl_info *info,
3818 struct station_parameters *params)
3819{
3820 /* Dummy STA entry gets updated once the peer capabilities are known */
3821 if (info->attrs[NL80211_ATTR_HT_CAPABILITY])
3822 params->ht_capa =
3823 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]);
3824 if (info->attrs[NL80211_ATTR_VHT_CAPABILITY])
3825 params->vht_capa =
3826 nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]);
3827
3828 return nl80211_parse_sta_wme(info, params);
3829}
3830
3404static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) 3831static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
3405{ 3832{
3406 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 3833 struct cfg80211_registered_device *rdev = info->user_ptr[0];
3407 int err;
3408 struct net_device *dev = info->user_ptr[1]; 3834 struct net_device *dev = info->user_ptr[1];
3409 struct station_parameters params; 3835 struct station_parameters params;
3410 u8 *mac_addr = NULL; 3836 u8 *mac_addr;
3837 int err;
3411 3838
3412 memset(&params, 0, sizeof(params)); 3839 memset(&params, 0, sizeof(params));
3413 3840
3414 params.listen_interval = -1; 3841 params.listen_interval = -1;
3415 params.plink_state = -1; 3842
3843 if (!rdev->ops->change_station)
3844 return -EOPNOTSUPP;
3416 3845
3417 if (info->attrs[NL80211_ATTR_STA_AID]) 3846 if (info->attrs[NL80211_ATTR_STA_AID])
3418 return -EINVAL; 3847 return -EINVAL;
@@ -3445,19 +3874,23 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
3445 if (info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]) 3874 if (info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL])
3446 return -EINVAL; 3875 return -EINVAL;
3447 3876
3448 if (!rdev->ops->change_station)
3449 return -EOPNOTSUPP;
3450
3451 if (parse_station_flags(info, dev->ieee80211_ptr->iftype, &params)) 3877 if (parse_station_flags(info, dev->ieee80211_ptr->iftype, &params))
3452 return -EINVAL; 3878 return -EINVAL;
3453 3879
3454 if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) 3880 if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) {
3455 params.plink_action = 3881 params.plink_action =
3456 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]); 3882 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]);
3883 if (params.plink_action >= NUM_NL80211_PLINK_ACTIONS)
3884 return -EINVAL;
3885 }
3457 3886
3458 if (info->attrs[NL80211_ATTR_STA_PLINK_STATE]) 3887 if (info->attrs[NL80211_ATTR_STA_PLINK_STATE]) {
3459 params.plink_state = 3888 params.plink_state =
3460 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]); 3889 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]);
3890 if (params.plink_state >= NUM_NL80211_PLINK_STATES)
3891 return -EINVAL;
3892 params.sta_modify_mask |= STATION_PARAM_APPLY_PLINK_STATE;
3893 }
3461 3894
3462 if (info->attrs[NL80211_ATTR_LOCAL_MESH_POWER_MODE]) { 3895 if (info->attrs[NL80211_ATTR_LOCAL_MESH_POWER_MODE]) {
3463 enum nl80211_mesh_power_mode pm = nla_get_u32( 3896 enum nl80211_mesh_power_mode pm = nla_get_u32(
@@ -3470,127 +3903,33 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
3470 params.local_pm = pm; 3903 params.local_pm = pm;
3471 } 3904 }
3472 3905
3906 /* Include parameters for TDLS peer (will check later) */
3907 err = nl80211_set_station_tdls(info, &params);
3908 if (err)
3909 return err;
3910
3911 params.vlan = get_vlan(info, rdev);
3912 if (IS_ERR(params.vlan))
3913 return PTR_ERR(params.vlan);
3914
3473 switch (dev->ieee80211_ptr->iftype) { 3915 switch (dev->ieee80211_ptr->iftype) {
3474 case NL80211_IFTYPE_AP: 3916 case NL80211_IFTYPE_AP:
3475 case NL80211_IFTYPE_AP_VLAN: 3917 case NL80211_IFTYPE_AP_VLAN:
3476 case NL80211_IFTYPE_P2P_GO: 3918 case NL80211_IFTYPE_P2P_GO:
3477 /* disallow mesh-specific things */
3478 if (params.plink_action)
3479 return -EINVAL;
3480 if (params.local_pm)
3481 return -EINVAL;
3482
3483 /* TDLS can't be set, ... */
3484 if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
3485 return -EINVAL;
3486 /*
3487 * ... but don't bother the driver with it. This works around
3488 * a hostapd/wpa_supplicant issue -- it always includes the
3489 * TLDS_PEER flag in the mask even for AP mode.
3490 */
3491 params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
3492
3493 /* accept only the listed bits */
3494 if (params.sta_flags_mask &
3495 ~(BIT(NL80211_STA_FLAG_AUTHORIZED) |
3496 BIT(NL80211_STA_FLAG_AUTHENTICATED) |
3497 BIT(NL80211_STA_FLAG_ASSOCIATED) |
3498 BIT(NL80211_STA_FLAG_SHORT_PREAMBLE) |
3499 BIT(NL80211_STA_FLAG_WME) |
3500 BIT(NL80211_STA_FLAG_MFP)))
3501 return -EINVAL;
3502
3503 /* but authenticated/associated only if driver handles it */
3504 if (!(rdev->wiphy.features &
3505 NL80211_FEATURE_FULL_AP_CLIENT_STATE) &&
3506 params.sta_flags_mask &
3507 (BIT(NL80211_STA_FLAG_AUTHENTICATED) |
3508 BIT(NL80211_STA_FLAG_ASSOCIATED)))
3509 return -EINVAL;
3510
3511 /* reject other things that can't change */
3512 if (params.supported_rates)
3513 return -EINVAL;
3514 if (info->attrs[NL80211_ATTR_STA_CAPABILITY])
3515 return -EINVAL;
3516 if (info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY])
3517 return -EINVAL;
3518 if (info->attrs[NL80211_ATTR_HT_CAPABILITY] ||
3519 info->attrs[NL80211_ATTR_VHT_CAPABILITY])
3520 return -EINVAL;
3521
3522 /* must be last in here for error handling */
3523 params.vlan = get_vlan(info, rdev);
3524 if (IS_ERR(params.vlan))
3525 return PTR_ERR(params.vlan);
3526 break;
3527 case NL80211_IFTYPE_P2P_CLIENT: 3919 case NL80211_IFTYPE_P2P_CLIENT:
3528 case NL80211_IFTYPE_STATION: 3920 case NL80211_IFTYPE_STATION:
3529 /*
3530 * Don't allow userspace to change the TDLS_PEER flag,
3531 * but silently ignore attempts to change it since we
3532 * don't have state here to verify that it doesn't try
3533 * to change the flag.
3534 */
3535 params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_TDLS_PEER);
3536 /* Include parameters for TDLS peer (driver will check) */
3537 err = nl80211_set_station_tdls(info, &params);
3538 if (err)
3539 return err;
3540 /* disallow things sta doesn't support */
3541 if (params.plink_action)
3542 return -EINVAL;
3543 if (params.local_pm)
3544 return -EINVAL;
3545 /* reject any changes other than AUTHORIZED or WME (for TDLS) */
3546 if (params.sta_flags_mask & ~(BIT(NL80211_STA_FLAG_AUTHORIZED) |
3547 BIT(NL80211_STA_FLAG_WME)))
3548 return -EINVAL;
3549 break;
3550 case NL80211_IFTYPE_ADHOC: 3921 case NL80211_IFTYPE_ADHOC:
3551 /* disallow things sta doesn't support */
3552 if (params.plink_action)
3553 return -EINVAL;
3554 if (params.local_pm)
3555 return -EINVAL;
3556 if (info->attrs[NL80211_ATTR_HT_CAPABILITY] ||
3557 info->attrs[NL80211_ATTR_VHT_CAPABILITY])
3558 return -EINVAL;
3559 /* reject any changes other than AUTHORIZED */
3560 if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED))
3561 return -EINVAL;
3562 break;
3563 case NL80211_IFTYPE_MESH_POINT: 3922 case NL80211_IFTYPE_MESH_POINT:
3564 /* disallow things mesh doesn't support */
3565 if (params.vlan)
3566 return -EINVAL;
3567 if (params.supported_rates)
3568 return -EINVAL;
3569 if (info->attrs[NL80211_ATTR_STA_CAPABILITY])
3570 return -EINVAL;
3571 if (info->attrs[NL80211_ATTR_STA_EXT_CAPABILITY])
3572 return -EINVAL;
3573 if (info->attrs[NL80211_ATTR_HT_CAPABILITY] ||
3574 info->attrs[NL80211_ATTR_VHT_CAPABILITY])
3575 return -EINVAL;
3576 /*
3577 * No special handling for TDLS here -- the userspace
3578 * mesh code doesn't have this bug.
3579 */
3580 if (params.sta_flags_mask &
3581 ~(BIT(NL80211_STA_FLAG_AUTHENTICATED) |
3582 BIT(NL80211_STA_FLAG_MFP) |
3583 BIT(NL80211_STA_FLAG_AUTHORIZED)))
3584 return -EINVAL;
3585 break; 3923 break;
3586 default: 3924 default:
3587 return -EOPNOTSUPP; 3925 err = -EOPNOTSUPP;
3926 goto out_put_vlan;
3588 } 3927 }
3589 3928
3590 /* be aware of params.vlan when changing code here */ 3929 /* driver will call cfg80211_check_station_change() */
3591
3592 err = rdev_change_station(rdev, dev, mac_addr, &params); 3930 err = rdev_change_station(rdev, dev, mac_addr, &params);
3593 3931
3932 out_put_vlan:
3594 if (params.vlan) 3933 if (params.vlan)
3595 dev_put(params.vlan); 3934 dev_put(params.vlan);
3596 3935
@@ -3607,6 +3946,9 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
3607 3946
3608 memset(&params, 0, sizeof(params)); 3947 memset(&params, 0, sizeof(params));
3609 3948
3949 if (!rdev->ops->add_station)
3950 return -EOPNOTSUPP;
3951
3610 if (!info->attrs[NL80211_ATTR_MAC]) 3952 if (!info->attrs[NL80211_ATTR_MAC])
3611 return -EINVAL; 3953 return -EINVAL;
3612 3954
@@ -3652,50 +3994,32 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
3652 params.vht_capa = 3994 params.vht_capa =
3653 nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]); 3995 nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]);
3654 3996
3655 if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) 3997 if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) {
3656 params.plink_action = 3998 params.plink_action =
3657 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]); 3999 nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]);
4000 if (params.plink_action >= NUM_NL80211_PLINK_ACTIONS)
4001 return -EINVAL;
4002 }
3658 4003
3659 if (!rdev->ops->add_station) 4004 err = nl80211_parse_sta_wme(info, &params);
3660 return -EOPNOTSUPP; 4005 if (err)
4006 return err;
3661 4007
3662 if (parse_station_flags(info, dev->ieee80211_ptr->iftype, &params)) 4008 if (parse_station_flags(info, dev->ieee80211_ptr->iftype, &params))
3663 return -EINVAL; 4009 return -EINVAL;
3664 4010
4011 /* When you run into this, adjust the code below for the new flag */
4012 BUILD_BUG_ON(NL80211_STA_FLAG_MAX != 7);
4013
3665 switch (dev->ieee80211_ptr->iftype) { 4014 switch (dev->ieee80211_ptr->iftype) {
3666 case NL80211_IFTYPE_AP: 4015 case NL80211_IFTYPE_AP:
3667 case NL80211_IFTYPE_AP_VLAN: 4016 case NL80211_IFTYPE_AP_VLAN:
3668 case NL80211_IFTYPE_P2P_GO: 4017 case NL80211_IFTYPE_P2P_GO:
3669 /* parse WME attributes if sta is WME capable */ 4018 /* ignore WME attributes if iface/sta is not capable */
3670 if ((rdev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) && 4019 if (!(rdev->wiphy.flags & WIPHY_FLAG_AP_UAPSD) ||
3671 (params.sta_flags_set & BIT(NL80211_STA_FLAG_WME)) && 4020 !(params.sta_flags_set & BIT(NL80211_STA_FLAG_WME)))
3672 info->attrs[NL80211_ATTR_STA_WME]) { 4021 params.sta_modify_mask &= ~STATION_PARAM_APPLY_UAPSD;
3673 struct nlattr *tb[NL80211_STA_WME_MAX + 1];
3674 struct nlattr *nla;
3675
3676 nla = info->attrs[NL80211_ATTR_STA_WME];
3677 err = nla_parse_nested(tb, NL80211_STA_WME_MAX, nla,
3678 nl80211_sta_wme_policy);
3679 if (err)
3680 return err;
3681 4022
3682 if (tb[NL80211_STA_WME_UAPSD_QUEUES])
3683 params.uapsd_queues =
3684 nla_get_u8(tb[NL80211_STA_WME_UAPSD_QUEUES]);
3685 if (params.uapsd_queues &
3686 ~IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK)
3687 return -EINVAL;
3688
3689 if (tb[NL80211_STA_WME_MAX_SP])
3690 params.max_sp =
3691 nla_get_u8(tb[NL80211_STA_WME_MAX_SP]);
3692
3693 if (params.max_sp &
3694 ~IEEE80211_WMM_IE_STA_QOSINFO_SP_MASK)
3695 return -EINVAL;
3696
3697 params.sta_modify_mask |= STATION_PARAM_APPLY_UAPSD;
3698 }
3699 /* TDLS peers cannot be added */ 4023 /* TDLS peers cannot be added */
3700 if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)) 4024 if (params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
3701 return -EINVAL; 4025 return -EINVAL;
@@ -3716,6 +4040,9 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
3716 return PTR_ERR(params.vlan); 4040 return PTR_ERR(params.vlan);
3717 break; 4041 break;
3718 case NL80211_IFTYPE_MESH_POINT: 4042 case NL80211_IFTYPE_MESH_POINT:
4043 /* ignore uAPSD data */
4044 params.sta_modify_mask &= ~STATION_PARAM_APPLY_UAPSD;
4045
3719 /* associated is disallowed */ 4046 /* associated is disallowed */
3720 if (params.sta_flags_mask & BIT(NL80211_STA_FLAG_ASSOCIATED)) 4047 if (params.sta_flags_mask & BIT(NL80211_STA_FLAG_ASSOCIATED))
3721 return -EINVAL; 4048 return -EINVAL;
@@ -3724,8 +4051,14 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
3724 return -EINVAL; 4051 return -EINVAL;
3725 break; 4052 break;
3726 case NL80211_IFTYPE_STATION: 4053 case NL80211_IFTYPE_STATION:
3727 /* associated is disallowed */ 4054 case NL80211_IFTYPE_P2P_CLIENT:
3728 if (params.sta_flags_mask & BIT(NL80211_STA_FLAG_ASSOCIATED)) 4055 /* ignore uAPSD data */
4056 params.sta_modify_mask &= ~STATION_PARAM_APPLY_UAPSD;
4057
4058 /* these are disallowed */
4059 if (params.sta_flags_mask &
4060 (BIT(NL80211_STA_FLAG_ASSOCIATED) |
4061 BIT(NL80211_STA_FLAG_AUTHENTICATED)))
3729 return -EINVAL; 4062 return -EINVAL;
3730 /* Only TDLS peers can be added */ 4063 /* Only TDLS peers can be added */
3731 if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))) 4064 if (!(params.sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
@@ -3736,6 +4069,11 @@ static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info)
3736 /* ... with external setup is supported */ 4069 /* ... with external setup is supported */
3737 if (!(rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP)) 4070 if (!(rdev->wiphy.flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP))
3738 return -EOPNOTSUPP; 4071 return -EOPNOTSUPP;
4072 /*
4073 * Older wpa_supplicant versions always mark the TDLS peer
4074 * as authorized, but it shouldn't yet be.
4075 */
4076 params.sta_flags_mask &= ~BIT(NL80211_STA_FLAG_AUTHORIZED);
3739 break; 4077 break;
3740 default: 4078 default:
3741 return -EOPNOTSUPP; 4079 return -EOPNOTSUPP;
@@ -4280,6 +4618,7 @@ static const struct nla_policy
4280 [NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 }, 4618 [NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 },
4281 [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 }, 4619 [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 },
4282 [NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG }, 4620 [NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG },
4621 [NL80211_MESH_SETUP_USERSPACE_MPM] = { .type = NLA_FLAG },
4283 [NL80211_MESH_SETUP_IE] = { .type = NLA_BINARY, 4622 [NL80211_MESH_SETUP_IE] = { .type = NLA_BINARY,
4284 .len = IEEE80211_MAX_DATA_LEN }, 4623 .len = IEEE80211_MAX_DATA_LEN },
4285 [NL80211_MESH_SETUP_USERSPACE_AMPE] = { .type = NLA_FLAG }, 4624 [NL80211_MESH_SETUP_USERSPACE_AMPE] = { .type = NLA_FLAG },
@@ -4418,6 +4757,7 @@ do { \
4418static int nl80211_parse_mesh_setup(struct genl_info *info, 4757static int nl80211_parse_mesh_setup(struct genl_info *info,
4419 struct mesh_setup *setup) 4758 struct mesh_setup *setup)
4420{ 4759{
4760 struct cfg80211_registered_device *rdev = info->user_ptr[0];
4421 struct nlattr *tb[NL80211_MESH_SETUP_ATTR_MAX + 1]; 4761 struct nlattr *tb[NL80211_MESH_SETUP_ATTR_MAX + 1];
4422 4762
4423 if (!info->attrs[NL80211_ATTR_MESH_SETUP]) 4763 if (!info->attrs[NL80211_ATTR_MESH_SETUP])
@@ -4454,8 +4794,14 @@ static int nl80211_parse_mesh_setup(struct genl_info *info,
4454 setup->ie = nla_data(ieattr); 4794 setup->ie = nla_data(ieattr);
4455 setup->ie_len = nla_len(ieattr); 4795 setup->ie_len = nla_len(ieattr);
4456 } 4796 }
4797 if (tb[NL80211_MESH_SETUP_USERSPACE_MPM] &&
4798 !(rdev->wiphy.features & NL80211_FEATURE_USERSPACE_MPM))
4799 return -EINVAL;
4800 setup->user_mpm = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_MPM]);
4457 setup->is_authenticated = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_AUTH]); 4801 setup->is_authenticated = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_AUTH]);
4458 setup->is_secure = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_AMPE]); 4802 setup->is_secure = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_AMPE]);
4803 if (setup->is_secure)
4804 setup->user_mpm = true;
4459 4805
4460 return 0; 4806 return 0;
4461} 4807}
@@ -5663,14 +6009,10 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
5663{ 6009{
5664 struct cfg80211_registered_device *rdev = info->user_ptr[0]; 6010 struct cfg80211_registered_device *rdev = info->user_ptr[0];
5665 struct net_device *dev = info->user_ptr[1]; 6011 struct net_device *dev = info->user_ptr[1];
5666 struct cfg80211_crypto_settings crypto;
5667 struct ieee80211_channel *chan; 6012 struct ieee80211_channel *chan;
5668 const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL; 6013 struct cfg80211_assoc_request req = {};
5669 int err, ssid_len, ie_len = 0; 6014 const u8 *bssid, *ssid;
5670 bool use_mfp = false; 6015 int err, ssid_len = 0;
5671 u32 flags = 0;
5672 struct ieee80211_ht_cap *ht_capa = NULL;
5673 struct ieee80211_ht_cap *ht_capa_mask = NULL;
5674 6016
5675 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) 6017 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
5676 return -EINVAL; 6018 return -EINVAL;
@@ -5698,41 +6040,58 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
5698 ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); 6040 ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
5699 6041
5700 if (info->attrs[NL80211_ATTR_IE]) { 6042 if (info->attrs[NL80211_ATTR_IE]) {
5701 ie = nla_data(info->attrs[NL80211_ATTR_IE]); 6043 req.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
5702 ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); 6044 req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
5703 } 6045 }
5704 6046
5705 if (info->attrs[NL80211_ATTR_USE_MFP]) { 6047 if (info->attrs[NL80211_ATTR_USE_MFP]) {
5706 enum nl80211_mfp mfp = 6048 enum nl80211_mfp mfp =
5707 nla_get_u32(info->attrs[NL80211_ATTR_USE_MFP]); 6049 nla_get_u32(info->attrs[NL80211_ATTR_USE_MFP]);
5708 if (mfp == NL80211_MFP_REQUIRED) 6050 if (mfp == NL80211_MFP_REQUIRED)
5709 use_mfp = true; 6051 req.use_mfp = true;
5710 else if (mfp != NL80211_MFP_NO) 6052 else if (mfp != NL80211_MFP_NO)
5711 return -EINVAL; 6053 return -EINVAL;
5712 } 6054 }
5713 6055
5714 if (info->attrs[NL80211_ATTR_PREV_BSSID]) 6056 if (info->attrs[NL80211_ATTR_PREV_BSSID])
5715 prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]); 6057 req.prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]);
5716 6058
5717 if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HT])) 6059 if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HT]))
5718 flags |= ASSOC_REQ_DISABLE_HT; 6060 req.flags |= ASSOC_REQ_DISABLE_HT;
5719 6061
5720 if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) 6062 if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
5721 ht_capa_mask = 6063 memcpy(&req.ht_capa_mask,
5722 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]); 6064 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]),
6065 sizeof(req.ht_capa_mask));
5723 6066
5724 if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) { 6067 if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) {
5725 if (!ht_capa_mask) 6068 if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
5726 return -EINVAL; 6069 return -EINVAL;
5727 ht_capa = nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); 6070 memcpy(&req.ht_capa,
6071 nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]),
6072 sizeof(req.ht_capa));
5728 } 6073 }
5729 6074
5730 err = nl80211_crypto_settings(rdev, info, &crypto, 1); 6075 if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_VHT]))
6076 req.flags |= ASSOC_REQ_DISABLE_VHT;
6077
6078 if (info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK])
6079 memcpy(&req.vht_capa_mask,
6080 nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]),
6081 sizeof(req.vht_capa_mask));
6082
6083 if (info->attrs[NL80211_ATTR_VHT_CAPABILITY]) {
6084 if (!info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK])
6085 return -EINVAL;
6086 memcpy(&req.vht_capa,
6087 nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]),
6088 sizeof(req.vht_capa));
6089 }
6090
6091 err = nl80211_crypto_settings(rdev, info, &req.crypto, 1);
5731 if (!err) 6092 if (!err)
5732 err = cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid, 6093 err = cfg80211_mlme_assoc(rdev, dev, chan, bssid,
5733 ssid, ssid_len, ie, ie_len, use_mfp, 6094 ssid, ssid_len, &req);
5734 &crypto, flags, ht_capa,
5735 ht_capa_mask);
5736 6095
5737 return err; 6096 return err;
5738} 6097}
@@ -6312,6 +6671,24 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
6312 sizeof(connect.ht_capa)); 6671 sizeof(connect.ht_capa));
6313 } 6672 }
6314 6673
6674 if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_VHT]))
6675 connect.flags |= ASSOC_REQ_DISABLE_VHT;
6676
6677 if (info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK])
6678 memcpy(&connect.vht_capa_mask,
6679 nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]),
6680 sizeof(connect.vht_capa_mask));
6681
6682 if (info->attrs[NL80211_ATTR_VHT_CAPABILITY]) {
6683 if (!info->attrs[NL80211_ATTR_VHT_CAPABILITY_MASK]) {
6684 kfree(connkeys);
6685 return -EINVAL;
6686 }
6687 memcpy(&connect.vht_capa,
6688 nla_data(info->attrs[NL80211_ATTR_VHT_CAPABILITY]),
6689 sizeof(connect.vht_capa));
6690 }
6691
6315 err = cfg80211_connect(rdev, dev, &connect, connkeys); 6692 err = cfg80211_connect(rdev, dev, &connect, connkeys);
6316 if (err) 6693 if (err)
6317 kfree(connkeys); 6694 kfree(connkeys);
@@ -7085,6 +7462,9 @@ static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
7085 return err; 7462 return err;
7086 } 7463 }
7087 7464
7465 if (setup.user_mpm)
7466 cfg.auto_open_plinks = false;
7467
7088 if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { 7468 if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
7089 err = nl80211_parse_chandef(rdev, info, &setup.chandef); 7469 err = nl80211_parse_chandef(rdev, info, &setup.chandef);
7090 if (err) 7470 if (err)
@@ -7284,7 +7664,8 @@ static int nl80211_parse_wowlan_tcp(struct cfg80211_registered_device *rdev,
7284 return -EINVAL; 7664 return -EINVAL;
7285 7665
7286 if (nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) > 7666 if (nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) >
7287 rdev->wiphy.wowlan.tcp->data_interval_max) 7667 rdev->wiphy.wowlan.tcp->data_interval_max ||
7668 nla_get_u32(tb[NL80211_WOWLAN_TCP_DATA_INTERVAL]) == 0)
7288 return -EINVAL; 7669 return -EINVAL;
7289 7670
7290 wake_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD]); 7671 wake_size = nla_len(tb[NL80211_WOWLAN_TCP_WAKE_PAYLOAD]);
@@ -7769,6 +8150,54 @@ static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info)
7769 return 0; 8150 return 0;
7770} 8151}
7771 8152
8153static int nl80211_get_protocol_features(struct sk_buff *skb,
8154 struct genl_info *info)
8155{
8156 void *hdr;
8157 struct sk_buff *msg;
8158
8159 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
8160 if (!msg)
8161 return -ENOMEM;
8162
8163 hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
8164 NL80211_CMD_GET_PROTOCOL_FEATURES);
8165 if (!hdr)
8166 goto nla_put_failure;
8167
8168 if (nla_put_u32(msg, NL80211_ATTR_PROTOCOL_FEATURES,
8169 NL80211_PROTOCOL_FEATURE_SPLIT_WIPHY_DUMP))
8170 goto nla_put_failure;
8171
8172 genlmsg_end(msg, hdr);
8173 return genlmsg_reply(msg, info);
8174
8175 nla_put_failure:
8176 kfree_skb(msg);
8177 return -ENOBUFS;
8178}
8179
8180static int nl80211_update_ft_ies(struct sk_buff *skb, struct genl_info *info)
8181{
8182 struct cfg80211_registered_device *rdev = info->user_ptr[0];
8183 struct cfg80211_update_ft_ies_params ft_params;
8184 struct net_device *dev = info->user_ptr[1];
8185
8186 if (!rdev->ops->update_ft_ies)
8187 return -EOPNOTSUPP;
8188
8189 if (!info->attrs[NL80211_ATTR_MDID] ||
8190 !is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
8191 return -EINVAL;
8192
8193 memset(&ft_params, 0, sizeof(ft_params));
8194 ft_params.md = nla_get_u16(info->attrs[NL80211_ATTR_MDID]);
8195 ft_params.ie = nla_data(info->attrs[NL80211_ATTR_IE]);
8196 ft_params.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
8197
8198 return rdev_update_ft_ies(rdev, dev, &ft_params);
8199}
8200
7772#define NL80211_FLAG_NEED_WIPHY 0x01 8201#define NL80211_FLAG_NEED_WIPHY 0x01
7773#define NL80211_FLAG_NEED_NETDEV 0x02 8202#define NL80211_FLAG_NEED_NETDEV 0x02
7774#define NL80211_FLAG_NEED_RTNL 0x04 8203#define NL80211_FLAG_NEED_RTNL 0x04
@@ -8445,6 +8874,19 @@ static struct genl_ops nl80211_ops[] = {
8445 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 8874 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
8446 NL80211_FLAG_NEED_RTNL, 8875 NL80211_FLAG_NEED_RTNL,
8447 }, 8876 },
8877 {
8878 .cmd = NL80211_CMD_GET_PROTOCOL_FEATURES,
8879 .doit = nl80211_get_protocol_features,
8880 .policy = nl80211_policy,
8881 },
8882 {
8883 .cmd = NL80211_CMD_UPDATE_FT_IES,
8884 .doit = nl80211_update_ft_ies,
8885 .policy = nl80211_policy,
8886 .flags = GENL_ADMIN_PERM,
8887 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
8888 NL80211_FLAG_NEED_RTNL,
8889 },
8448}; 8890};
8449 8891
8450static struct genl_multicast_group nl80211_mlme_mcgrp = { 8892static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -8472,7 +8914,8 @@ void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev)
8472 if (!msg) 8914 if (!msg)
8473 return; 8915 return;
8474 8916
8475 if (nl80211_send_wiphy(msg, 0, 0, 0, rdev) < 0) { 8917 if (nl80211_send_wiphy(rdev, msg, 0, 0, 0,
8918 false, NULL, NULL, NULL) < 0) {
8476 nlmsg_free(msg); 8919 nlmsg_free(msg);
8477 return; 8920 return;
8478 } 8921 }
@@ -8796,21 +9239,31 @@ void nl80211_send_disassoc(struct cfg80211_registered_device *rdev,
8796 NL80211_CMD_DISASSOCIATE, gfp); 9239 NL80211_CMD_DISASSOCIATE, gfp);
8797} 9240}
8798 9241
8799void nl80211_send_unprot_deauth(struct cfg80211_registered_device *rdev, 9242void cfg80211_send_unprot_deauth(struct net_device *dev, const u8 *buf,
8800 struct net_device *netdev, const u8 *buf, 9243 size_t len)
8801 size_t len, gfp_t gfp)
8802{ 9244{
8803 nl80211_send_mlme_event(rdev, netdev, buf, len, 9245 struct wireless_dev *wdev = dev->ieee80211_ptr;
8804 NL80211_CMD_UNPROT_DEAUTHENTICATE, gfp); 9246 struct wiphy *wiphy = wdev->wiphy;
9247 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
9248
9249 trace_cfg80211_send_unprot_deauth(dev);
9250 nl80211_send_mlme_event(rdev, dev, buf, len,
9251 NL80211_CMD_UNPROT_DEAUTHENTICATE, GFP_ATOMIC);
8805} 9252}
9253EXPORT_SYMBOL(cfg80211_send_unprot_deauth);
8806 9254
8807void nl80211_send_unprot_disassoc(struct cfg80211_registered_device *rdev, 9255void cfg80211_send_unprot_disassoc(struct net_device *dev, const u8 *buf,
8808 struct net_device *netdev, const u8 *buf, 9256 size_t len)
8809 size_t len, gfp_t gfp)
8810{ 9257{
8811 nl80211_send_mlme_event(rdev, netdev, buf, len, 9258 struct wireless_dev *wdev = dev->ieee80211_ptr;
8812 NL80211_CMD_UNPROT_DISASSOCIATE, gfp); 9259 struct wiphy *wiphy = wdev->wiphy;
9260 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
9261
9262 trace_cfg80211_send_unprot_disassoc(dev);
9263 nl80211_send_mlme_event(rdev, dev, buf, len,
9264 NL80211_CMD_UNPROT_DISASSOCIATE, GFP_ATOMIC);
8813} 9265}
9266EXPORT_SYMBOL(cfg80211_send_unprot_disassoc);
8814 9267
8815static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev, 9268static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev,
8816 struct net_device *netdev, int cmd, 9269 struct net_device *netdev, int cmd,
@@ -9013,14 +9466,19 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
9013 nlmsg_free(msg); 9466 nlmsg_free(msg);
9014} 9467}
9015 9468
9016void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev, 9469void cfg80211_notify_new_peer_candidate(struct net_device *dev, const u8 *addr,
9017 struct net_device *netdev, 9470 const u8* ie, u8 ie_len, gfp_t gfp)
9018 const u8 *macaddr, const u8* ie, u8 ie_len,
9019 gfp_t gfp)
9020{ 9471{
9472 struct wireless_dev *wdev = dev->ieee80211_ptr;
9473 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
9021 struct sk_buff *msg; 9474 struct sk_buff *msg;
9022 void *hdr; 9475 void *hdr;
9023 9476
9477 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_MESH_POINT))
9478 return;
9479
9480 trace_cfg80211_notify_new_peer_candidate(dev, addr);
9481
9024 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); 9482 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
9025 if (!msg) 9483 if (!msg)
9026 return; 9484 return;
@@ -9032,8 +9490,8 @@ void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev,
9032 } 9490 }
9033 9491
9034 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || 9492 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
9035 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || 9493 nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
9036 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, macaddr) || 9494 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) ||
9037 (ie_len && ie && 9495 (ie_len && ie &&
9038 nla_put(msg, NL80211_ATTR_IE, ie_len , ie))) 9496 nla_put(msg, NL80211_ATTR_IE, ie_len , ie)))
9039 goto nla_put_failure; 9497 goto nla_put_failure;
@@ -9048,6 +9506,7 @@ void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev,
9048 genlmsg_cancel(msg, hdr); 9506 genlmsg_cancel(msg, hdr);
9049 nlmsg_free(msg); 9507 nlmsg_free(msg);
9050} 9508}
9509EXPORT_SYMBOL(cfg80211_notify_new_peer_candidate);
9051 9510
9052void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, 9511void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
9053 struct net_device *netdev, const u8 *addr, 9512 struct net_device *netdev, const u8 *addr,
@@ -9116,7 +9575,7 @@ void nl80211_send_beacon_hint_event(struct wiphy *wiphy,
9116 nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_BEFORE); 9575 nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_BEFORE);
9117 if (!nl_freq) 9576 if (!nl_freq)
9118 goto nla_put_failure; 9577 goto nla_put_failure;
9119 if (nl80211_msg_put_channel(msg, channel_before)) 9578 if (nl80211_msg_put_channel(msg, channel_before, false))
9120 goto nla_put_failure; 9579 goto nla_put_failure;
9121 nla_nest_end(msg, nl_freq); 9580 nla_nest_end(msg, nl_freq);
9122 9581
@@ -9124,7 +9583,7 @@ void nl80211_send_beacon_hint_event(struct wiphy *wiphy,
9124 nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_AFTER); 9583 nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_AFTER);
9125 if (!nl_freq) 9584 if (!nl_freq)
9126 goto nla_put_failure; 9585 goto nla_put_failure;
9127 if (nl80211_msg_put_channel(msg, channel_after)) 9586 if (nl80211_msg_put_channel(msg, channel_after, false))
9128 goto nla_put_failure; 9587 goto nla_put_failure;
9129 nla_nest_end(msg, nl_freq); 9588 nla_nest_end(msg, nl_freq);
9130 9589
@@ -9186,31 +9645,42 @@ static void nl80211_send_remain_on_chan_event(
9186 nlmsg_free(msg); 9645 nlmsg_free(msg);
9187} 9646}
9188 9647
9189void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev, 9648void cfg80211_ready_on_channel(struct wireless_dev *wdev, u64 cookie,
9190 struct wireless_dev *wdev, u64 cookie, 9649 struct ieee80211_channel *chan,
9191 struct ieee80211_channel *chan, 9650 unsigned int duration, gfp_t gfp)
9192 unsigned int duration, gfp_t gfp)
9193{ 9651{
9652 struct wiphy *wiphy = wdev->wiphy;
9653 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
9654
9655 trace_cfg80211_ready_on_channel(wdev, cookie, chan, duration);
9194 nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL, 9656 nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL,
9195 rdev, wdev, cookie, chan, 9657 rdev, wdev, cookie, chan,
9196 duration, gfp); 9658 duration, gfp);
9197} 9659}
9660EXPORT_SYMBOL(cfg80211_ready_on_channel);
9198 9661
9199void nl80211_send_remain_on_channel_cancel( 9662void cfg80211_remain_on_channel_expired(struct wireless_dev *wdev, u64 cookie,
9200 struct cfg80211_registered_device *rdev, 9663 struct ieee80211_channel *chan,
9201 struct wireless_dev *wdev, 9664 gfp_t gfp)
9202 u64 cookie, struct ieee80211_channel *chan, gfp_t gfp)
9203{ 9665{
9666 struct wiphy *wiphy = wdev->wiphy;
9667 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
9668
9669 trace_cfg80211_ready_on_channel_expired(wdev, cookie, chan);
9204 nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL, 9670 nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL,
9205 rdev, wdev, cookie, chan, 0, gfp); 9671 rdev, wdev, cookie, chan, 0, gfp);
9206} 9672}
9673EXPORT_SYMBOL(cfg80211_remain_on_channel_expired);
9207 9674
9208void nl80211_send_sta_event(struct cfg80211_registered_device *rdev, 9675void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr,
9209 struct net_device *dev, const u8 *mac_addr, 9676 struct station_info *sinfo, gfp_t gfp)
9210 struct station_info *sinfo, gfp_t gfp)
9211{ 9677{
9678 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
9679 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
9212 struct sk_buff *msg; 9680 struct sk_buff *msg;
9213 9681
9682 trace_cfg80211_new_sta(dev, mac_addr, sinfo);
9683
9214 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); 9684 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
9215 if (!msg) 9685 if (!msg)
9216 return; 9686 return;
@@ -9224,14 +9694,17 @@ void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
9224 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, 9694 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
9225 nl80211_mlme_mcgrp.id, gfp); 9695 nl80211_mlme_mcgrp.id, gfp);
9226} 9696}
9697EXPORT_SYMBOL(cfg80211_new_sta);
9227 9698
9228void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev, 9699void cfg80211_del_sta(struct net_device *dev, const u8 *mac_addr, gfp_t gfp)
9229 struct net_device *dev, const u8 *mac_addr,
9230 gfp_t gfp)
9231{ 9700{
9701 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
9702 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
9232 struct sk_buff *msg; 9703 struct sk_buff *msg;
9233 void *hdr; 9704 void *hdr;
9234 9705
9706 trace_cfg80211_del_sta(dev, mac_addr);
9707
9235 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); 9708 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
9236 if (!msg) 9709 if (!msg)
9237 return; 9710 return;
@@ -9256,12 +9729,14 @@ void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
9256 genlmsg_cancel(msg, hdr); 9729 genlmsg_cancel(msg, hdr);
9257 nlmsg_free(msg); 9730 nlmsg_free(msg);
9258} 9731}
9732EXPORT_SYMBOL(cfg80211_del_sta);
9259 9733
9260void nl80211_send_conn_failed_event(struct cfg80211_registered_device *rdev, 9734void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
9261 struct net_device *dev, const u8 *mac_addr, 9735 enum nl80211_connect_failed_reason reason,
9262 enum nl80211_connect_failed_reason reason, 9736 gfp_t gfp)
9263 gfp_t gfp)
9264{ 9737{
9738 struct wiphy *wiphy = dev->ieee80211_ptr->wiphy;
9739 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
9265 struct sk_buff *msg; 9740 struct sk_buff *msg;
9266 void *hdr; 9741 void *hdr;
9267 9742
@@ -9290,6 +9765,7 @@ void nl80211_send_conn_failed_event(struct cfg80211_registered_device *rdev,
9290 genlmsg_cancel(msg, hdr); 9765 genlmsg_cancel(msg, hdr);
9291 nlmsg_free(msg); 9766 nlmsg_free(msg);
9292} 9767}
9768EXPORT_SYMBOL(cfg80211_conn_failed);
9293 9769
9294static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd, 9770static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
9295 const u8 *addr, gfp_t gfp) 9771 const u8 *addr, gfp_t gfp)
@@ -9334,19 +9810,47 @@ static bool __nl80211_unexpected_frame(struct net_device *dev, u8 cmd,
9334 return true; 9810 return true;
9335} 9811}
9336 9812
9337bool nl80211_unexpected_frame(struct net_device *dev, const u8 *addr, gfp_t gfp) 9813bool cfg80211_rx_spurious_frame(struct net_device *dev,
9814 const u8 *addr, gfp_t gfp)
9338{ 9815{
9339 return __nl80211_unexpected_frame(dev, NL80211_CMD_UNEXPECTED_FRAME, 9816 struct wireless_dev *wdev = dev->ieee80211_ptr;
9340 addr, gfp); 9817 bool ret;
9818
9819 trace_cfg80211_rx_spurious_frame(dev, addr);
9820
9821 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
9822 wdev->iftype != NL80211_IFTYPE_P2P_GO)) {
9823 trace_cfg80211_return_bool(false);
9824 return false;
9825 }
9826 ret = __nl80211_unexpected_frame(dev, NL80211_CMD_UNEXPECTED_FRAME,
9827 addr, gfp);
9828 trace_cfg80211_return_bool(ret);
9829 return ret;
9341} 9830}
9831EXPORT_SYMBOL(cfg80211_rx_spurious_frame);
9342 9832
9343bool nl80211_unexpected_4addr_frame(struct net_device *dev, 9833bool cfg80211_rx_unexpected_4addr_frame(struct net_device *dev,
9344 const u8 *addr, gfp_t gfp) 9834 const u8 *addr, gfp_t gfp)
9345{ 9835{
9346 return __nl80211_unexpected_frame(dev, 9836 struct wireless_dev *wdev = dev->ieee80211_ptr;
9347 NL80211_CMD_UNEXPECTED_4ADDR_FRAME, 9837 bool ret;
9348 addr, gfp); 9838
9839 trace_cfg80211_rx_unexpected_4addr_frame(dev, addr);
9840
9841 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
9842 wdev->iftype != NL80211_IFTYPE_P2P_GO &&
9843 wdev->iftype != NL80211_IFTYPE_AP_VLAN)) {
9844 trace_cfg80211_return_bool(false);
9845 return false;
9846 }
9847 ret = __nl80211_unexpected_frame(dev,
9848 NL80211_CMD_UNEXPECTED_4ADDR_FRAME,
9849 addr, gfp);
9850 trace_cfg80211_return_bool(ret);
9851 return ret;
9349} 9852}
9853EXPORT_SYMBOL(cfg80211_rx_unexpected_4addr_frame);
9350 9854
9351int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, 9855int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
9352 struct wireless_dev *wdev, u32 nlportid, 9856 struct wireless_dev *wdev, u32 nlportid,
@@ -9386,15 +9890,17 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
9386 return -ENOBUFS; 9890 return -ENOBUFS;
9387} 9891}
9388 9892
9389void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev, 9893void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
9390 struct wireless_dev *wdev, u64 cookie, 9894 const u8 *buf, size_t len, bool ack, gfp_t gfp)
9391 const u8 *buf, size_t len, bool ack,
9392 gfp_t gfp)
9393{ 9895{
9896 struct wiphy *wiphy = wdev->wiphy;
9897 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
9394 struct net_device *netdev = wdev->netdev; 9898 struct net_device *netdev = wdev->netdev;
9395 struct sk_buff *msg; 9899 struct sk_buff *msg;
9396 void *hdr; 9900 void *hdr;
9397 9901
9902 trace_cfg80211_mgmt_tx_status(wdev, cookie, ack);
9903
9398 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); 9904 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
9399 if (!msg) 9905 if (!msg)
9400 return; 9906 return;
@@ -9422,17 +9928,21 @@ void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
9422 genlmsg_cancel(msg, hdr); 9928 genlmsg_cancel(msg, hdr);
9423 nlmsg_free(msg); 9929 nlmsg_free(msg);
9424} 9930}
9931EXPORT_SYMBOL(cfg80211_mgmt_tx_status);
9425 9932
9426void 9933void cfg80211_cqm_rssi_notify(struct net_device *dev,
9427nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev, 9934 enum nl80211_cqm_rssi_threshold_event rssi_event,
9428 struct net_device *netdev, 9935 gfp_t gfp)
9429 enum nl80211_cqm_rssi_threshold_event rssi_event,
9430 gfp_t gfp)
9431{ 9936{
9937 struct wireless_dev *wdev = dev->ieee80211_ptr;
9938 struct wiphy *wiphy = wdev->wiphy;
9939 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
9432 struct sk_buff *msg; 9940 struct sk_buff *msg;
9433 struct nlattr *pinfoattr; 9941 struct nlattr *pinfoattr;
9434 void *hdr; 9942 void *hdr;
9435 9943
9944 trace_cfg80211_cqm_rssi_notify(dev, rssi_event);
9945
9436 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); 9946 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
9437 if (!msg) 9947 if (!msg)
9438 return; 9948 return;
@@ -9444,7 +9954,7 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
9444 } 9954 }
9445 9955
9446 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || 9956 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
9447 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex)) 9957 nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex))
9448 goto nla_put_failure; 9958 goto nla_put_failure;
9449 9959
9450 pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM); 9960 pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
@@ -9467,10 +9977,11 @@ nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
9467 genlmsg_cancel(msg, hdr); 9977 genlmsg_cancel(msg, hdr);
9468 nlmsg_free(msg); 9978 nlmsg_free(msg);
9469} 9979}
9980EXPORT_SYMBOL(cfg80211_cqm_rssi_notify);
9470 9981
9471void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev, 9982static void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
9472 struct net_device *netdev, const u8 *bssid, 9983 struct net_device *netdev, const u8 *bssid,
9473 const u8 *replay_ctr, gfp_t gfp) 9984 const u8 *replay_ctr, gfp_t gfp)
9474{ 9985{
9475 struct sk_buff *msg; 9986 struct sk_buff *msg;
9476 struct nlattr *rekey_attr; 9987 struct nlattr *rekey_attr;
@@ -9512,9 +10023,22 @@ void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
9512 nlmsg_free(msg); 10023 nlmsg_free(msg);
9513} 10024}
9514 10025
9515void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev, 10026void cfg80211_gtk_rekey_notify(struct net_device *dev, const u8 *bssid,
9516 struct net_device *netdev, int index, 10027 const u8 *replay_ctr, gfp_t gfp)
9517 const u8 *bssid, bool preauth, gfp_t gfp) 10028{
10029 struct wireless_dev *wdev = dev->ieee80211_ptr;
10030 struct wiphy *wiphy = wdev->wiphy;
10031 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
10032
10033 trace_cfg80211_gtk_rekey_notify(dev, bssid);
10034 nl80211_gtk_rekey_notify(rdev, dev, bssid, replay_ctr, gfp);
10035}
10036EXPORT_SYMBOL(cfg80211_gtk_rekey_notify);
10037
10038static void
10039nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
10040 struct net_device *netdev, int index,
10041 const u8 *bssid, bool preauth, gfp_t gfp)
9518{ 10042{
9519 struct sk_buff *msg; 10043 struct sk_buff *msg;
9520 struct nlattr *attr; 10044 struct nlattr *attr;
@@ -9557,9 +10081,22 @@ void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
9557 nlmsg_free(msg); 10081 nlmsg_free(msg);
9558} 10082}
9559 10083
9560void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev, 10084void cfg80211_pmksa_candidate_notify(struct net_device *dev, int index,
9561 struct net_device *netdev, 10085 const u8 *bssid, bool preauth, gfp_t gfp)
9562 struct cfg80211_chan_def *chandef, gfp_t gfp) 10086{
10087 struct wireless_dev *wdev = dev->ieee80211_ptr;
10088 struct wiphy *wiphy = wdev->wiphy;
10089 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
10090
10091 trace_cfg80211_pmksa_candidate_notify(dev, index, bssid, preauth);
10092 nl80211_pmksa_candidate_notify(rdev, dev, index, bssid, preauth, gfp);
10093}
10094EXPORT_SYMBOL(cfg80211_pmksa_candidate_notify);
10095
10096static void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
10097 struct net_device *netdev,
10098 struct cfg80211_chan_def *chandef,
10099 gfp_t gfp)
9563{ 10100{
9564 struct sk_buff *msg; 10101 struct sk_buff *msg;
9565 void *hdr; 10102 void *hdr;
@@ -9591,11 +10128,36 @@ void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
9591 nlmsg_free(msg); 10128 nlmsg_free(msg);
9592} 10129}
9593 10130
9594void 10131void cfg80211_ch_switch_notify(struct net_device *dev,
9595nl80211_send_cqm_txe_notify(struct cfg80211_registered_device *rdev, 10132 struct cfg80211_chan_def *chandef)
9596 struct net_device *netdev, const u8 *peer,
9597 u32 num_packets, u32 rate, u32 intvl, gfp_t gfp)
9598{ 10133{
10134 struct wireless_dev *wdev = dev->ieee80211_ptr;
10135 struct wiphy *wiphy = wdev->wiphy;
10136 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
10137
10138 trace_cfg80211_ch_switch_notify(dev, chandef);
10139
10140 wdev_lock(wdev);
10141
10142 if (WARN_ON(wdev->iftype != NL80211_IFTYPE_AP &&
10143 wdev->iftype != NL80211_IFTYPE_P2P_GO))
10144 goto out;
10145
10146 wdev->channel = chandef->chan;
10147 nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL);
10148out:
10149 wdev_unlock(wdev);
10150 return;
10151}
10152EXPORT_SYMBOL(cfg80211_ch_switch_notify);
10153
10154void cfg80211_cqm_txe_notify(struct net_device *dev,
10155 const u8 *peer, u32 num_packets,
10156 u32 rate, u32 intvl, gfp_t gfp)
10157{
10158 struct wireless_dev *wdev = dev->ieee80211_ptr;
10159 struct wiphy *wiphy = wdev->wiphy;
10160 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
9599 struct sk_buff *msg; 10161 struct sk_buff *msg;
9600 struct nlattr *pinfoattr; 10162 struct nlattr *pinfoattr;
9601 void *hdr; 10163 void *hdr;
@@ -9611,7 +10173,7 @@ nl80211_send_cqm_txe_notify(struct cfg80211_registered_device *rdev,
9611 } 10173 }
9612 10174
9613 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || 10175 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
9614 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || 10176 nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
9615 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer)) 10177 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer))
9616 goto nla_put_failure; 10178 goto nla_put_failure;
9617 10179
@@ -9640,6 +10202,7 @@ nl80211_send_cqm_txe_notify(struct cfg80211_registered_device *rdev,
9640 genlmsg_cancel(msg, hdr); 10202 genlmsg_cancel(msg, hdr);
9641 nlmsg_free(msg); 10203 nlmsg_free(msg);
9642} 10204}
10205EXPORT_SYMBOL(cfg80211_cqm_txe_notify);
9643 10206
9644void 10207void
9645nl80211_radar_notify(struct cfg80211_registered_device *rdev, 10208nl80211_radar_notify(struct cfg80211_registered_device *rdev,
@@ -9692,15 +10255,18 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev,
9692 nlmsg_free(msg); 10255 nlmsg_free(msg);
9693} 10256}
9694 10257
9695void 10258void cfg80211_cqm_pktloss_notify(struct net_device *dev,
9696nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev, 10259 const u8 *peer, u32 num_packets, gfp_t gfp)
9697 struct net_device *netdev, const u8 *peer,
9698 u32 num_packets, gfp_t gfp)
9699{ 10260{
10261 struct wireless_dev *wdev = dev->ieee80211_ptr;
10262 struct wiphy *wiphy = wdev->wiphy;
10263 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
9700 struct sk_buff *msg; 10264 struct sk_buff *msg;
9701 struct nlattr *pinfoattr; 10265 struct nlattr *pinfoattr;
9702 void *hdr; 10266 void *hdr;
9703 10267
10268 trace_cfg80211_cqm_pktloss_notify(dev, peer, num_packets);
10269
9704 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); 10270 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
9705 if (!msg) 10271 if (!msg)
9706 return; 10272 return;
@@ -9712,7 +10278,7 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
9712 } 10278 }
9713 10279
9714 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || 10280 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
9715 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) || 10281 nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
9716 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer)) 10282 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer))
9717 goto nla_put_failure; 10283 goto nla_put_failure;
9718 10284
@@ -9735,6 +10301,7 @@ nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
9735 genlmsg_cancel(msg, hdr); 10301 genlmsg_cancel(msg, hdr);
9736 nlmsg_free(msg); 10302 nlmsg_free(msg);
9737} 10303}
10304EXPORT_SYMBOL(cfg80211_cqm_pktloss_notify);
9738 10305
9739void cfg80211_probe_status(struct net_device *dev, const u8 *addr, 10306void cfg80211_probe_status(struct net_device *dev, const u8 *addr,
9740 u64 cookie, bool acked, gfp_t gfp) 10307 u64 cookie, bool acked, gfp_t gfp)
@@ -10021,6 +10588,50 @@ static struct notifier_block nl80211_netlink_notifier = {
10021 .notifier_call = nl80211_netlink_notify, 10588 .notifier_call = nl80211_netlink_notify,
10022}; 10589};
10023 10590
10591void cfg80211_ft_event(struct net_device *netdev,
10592 struct cfg80211_ft_event_params *ft_event)
10593{
10594 struct wiphy *wiphy = netdev->ieee80211_ptr->wiphy;
10595 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
10596 struct sk_buff *msg;
10597 void *hdr;
10598 int err;
10599
10600 trace_cfg80211_ft_event(wiphy, netdev, ft_event);
10601
10602 if (!ft_event->target_ap)
10603 return;
10604
10605 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
10606 if (!msg)
10607 return;
10608
10609 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FT_EVENT);
10610 if (!hdr) {
10611 nlmsg_free(msg);
10612 return;
10613 }
10614
10615 nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
10616 nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
10617 nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, ft_event->target_ap);
10618 if (ft_event->ies)
10619 nla_put(msg, NL80211_ATTR_IE, ft_event->ies_len, ft_event->ies);
10620 if (ft_event->ric_ies)
10621 nla_put(msg, NL80211_ATTR_IE_RIC, ft_event->ric_ies_len,
10622 ft_event->ric_ies);
10623
10624 err = genlmsg_end(msg, hdr);
10625 if (err < 0) {
10626 nlmsg_free(msg);
10627 return;
10628 }
10629
10630 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
10631 nl80211_mlme_mcgrp.id, GFP_KERNEL);
10632}
10633EXPORT_SYMBOL(cfg80211_ft_event);
10634
10024/* initialisation/exit functions */ 10635/* initialisation/exit functions */
10025 10636
10026int nl80211_init(void) 10637int nl80211_init(void)
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index b061da4919e1..a4073e808c13 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -29,12 +29,6 @@ void nl80211_send_deauth(struct cfg80211_registered_device *rdev,
29void nl80211_send_disassoc(struct cfg80211_registered_device *rdev, 29void nl80211_send_disassoc(struct cfg80211_registered_device *rdev,
30 struct net_device *netdev, 30 struct net_device *netdev,
31 const u8 *buf, size_t len, gfp_t gfp); 31 const u8 *buf, size_t len, gfp_t gfp);
32void nl80211_send_unprot_deauth(struct cfg80211_registered_device *rdev,
33 struct net_device *netdev,
34 const u8 *buf, size_t len, gfp_t gfp);
35void nl80211_send_unprot_disassoc(struct cfg80211_registered_device *rdev,
36 struct net_device *netdev,
37 const u8 *buf, size_t len, gfp_t gfp);
38void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev, 32void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev,
39 struct net_device *netdev, 33 struct net_device *netdev,
40 const u8 *addr, gfp_t gfp); 34 const u8 *addr, gfp_t gfp);
@@ -54,10 +48,6 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
54 struct net_device *netdev, u16 reason, 48 struct net_device *netdev, u16 reason,
55 const u8 *ie, size_t ie_len, bool from_ap); 49 const u8 *ie, size_t ie_len, bool from_ap);
56 50
57void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev,
58 struct net_device *netdev,
59 const u8 *macaddr, const u8* ie, u8 ie_len,
60 gfp_t gfp);
61void 51void
62nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, 52nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev,
63 struct net_device *netdev, const u8 *addr, 53 struct net_device *netdev, const u8 *addr,
@@ -73,41 +63,10 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
73 struct net_device *netdev, const u8 *bssid, 63 struct net_device *netdev, const u8 *bssid,
74 gfp_t gfp); 64 gfp_t gfp);
75 65
76void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev,
77 struct wireless_dev *wdev, u64 cookie,
78 struct ieee80211_channel *chan,
79 unsigned int duration, gfp_t gfp);
80void nl80211_send_remain_on_channel_cancel(
81 struct cfg80211_registered_device *rdev,
82 struct wireless_dev *wdev,
83 u64 cookie, struct ieee80211_channel *chan, gfp_t gfp);
84
85void nl80211_send_sta_event(struct cfg80211_registered_device *rdev,
86 struct net_device *dev, const u8 *mac_addr,
87 struct station_info *sinfo, gfp_t gfp);
88void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev,
89 struct net_device *dev, const u8 *mac_addr,
90 gfp_t gfp);
91
92void nl80211_send_conn_failed_event(struct cfg80211_registered_device *rdev,
93 struct net_device *dev, const u8 *mac_addr,
94 enum nl80211_connect_failed_reason reason,
95 gfp_t gfp);
96
97int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, 66int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
98 struct wireless_dev *wdev, u32 nlpid, 67 struct wireless_dev *wdev, u32 nlpid,
99 int freq, int sig_dbm, 68 int freq, int sig_dbm,
100 const u8 *buf, size_t len, gfp_t gfp); 69 const u8 *buf, size_t len, gfp_t gfp);
101void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev,
102 struct wireless_dev *wdev, u64 cookie,
103 const u8 *buf, size_t len, bool ack,
104 gfp_t gfp);
105
106void
107nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
108 struct net_device *netdev,
109 enum nl80211_cqm_rssi_threshold_event rssi_event,
110 gfp_t gfp);
111 70
112void 71void
113nl80211_radar_notify(struct cfg80211_registered_device *rdev, 72nl80211_radar_notify(struct cfg80211_registered_device *rdev,
@@ -115,31 +74,4 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev,
115 enum nl80211_radar_event event, 74 enum nl80211_radar_event event,
116 struct net_device *netdev, gfp_t gfp); 75 struct net_device *netdev, gfp_t gfp);
117 76
118void
119nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev,
120 struct net_device *netdev, const u8 *peer,
121 u32 num_packets, gfp_t gfp);
122
123void
124nl80211_send_cqm_txe_notify(struct cfg80211_registered_device *rdev,
125 struct net_device *netdev, const u8 *peer,
126 u32 num_packets, u32 rate, u32 intvl, gfp_t gfp);
127
128void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev,
129 struct net_device *netdev, const u8 *bssid,
130 const u8 *replay_ctr, gfp_t gfp);
131
132void nl80211_pmksa_candidate_notify(struct cfg80211_registered_device *rdev,
133 struct net_device *netdev, int index,
134 const u8 *bssid, bool preauth, gfp_t gfp);
135
136void nl80211_ch_switch_notify(struct cfg80211_registered_device *rdev,
137 struct net_device *dev,
138 struct cfg80211_chan_def *chandef, gfp_t gfp);
139
140bool nl80211_unexpected_frame(struct net_device *dev,
141 const u8 *addr, gfp_t gfp);
142bool nl80211_unexpected_4addr_frame(struct net_device *dev,
143 const u8 *addr, gfp_t gfp);
144
145#endif /* __NET_WIRELESS_NL80211_H */ 77#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 422d38291d66..d77e1c1d3a0e 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -6,11 +6,12 @@
6#include "core.h" 6#include "core.h"
7#include "trace.h" 7#include "trace.h"
8 8
9static inline int rdev_suspend(struct cfg80211_registered_device *rdev) 9static inline int rdev_suspend(struct cfg80211_registered_device *rdev,
10 struct cfg80211_wowlan *wowlan)
10{ 11{
11 int ret; 12 int ret;
12 trace_rdev_suspend(&rdev->wiphy, rdev->wowlan); 13 trace_rdev_suspend(&rdev->wiphy, wowlan);
13 ret = rdev->ops->suspend(&rdev->wiphy, rdev->wowlan); 14 ret = rdev->ops->suspend(&rdev->wiphy, wowlan);
14 trace_rdev_return_int(&rdev->wiphy, ret); 15 trace_rdev_return_int(&rdev->wiphy, ret);
15 return ret; 16 return ret;
16} 17}
@@ -887,4 +888,17 @@ static inline int rdev_set_mac_acl(struct cfg80211_registered_device *rdev,
887 trace_rdev_return_int(&rdev->wiphy, ret); 888 trace_rdev_return_int(&rdev->wiphy, ret);
888 return ret; 889 return ret;
889} 890}
891
892static inline int rdev_update_ft_ies(struct cfg80211_registered_device *rdev,
893 struct net_device *dev,
894 struct cfg80211_update_ft_ies_params *ftie)
895{
896 int ret;
897
898 trace_rdev_update_ft_ies(&rdev->wiphy, dev, ftie);
899 ret = rdev->ops->update_ft_ies(&rdev->wiphy, dev, ftie);
900 trace_rdev_return_int(&rdev->wiphy, ret);
901 return ret;
902}
903
890#endif /* __CFG80211_RDEV_OPS */ 904#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 98532c00242d..e6df52dc8c69 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -184,14 +184,14 @@ static const struct ieee80211_regdomain world_regdom = {
184 NL80211_RRF_NO_IBSS | 184 NL80211_RRF_NO_IBSS |
185 NL80211_RRF_NO_OFDM), 185 NL80211_RRF_NO_OFDM),
186 /* IEEE 802.11a, channel 36..48 */ 186 /* IEEE 802.11a, channel 36..48 */
187 REG_RULE(5180-10, 5240+10, 40, 6, 20, 187 REG_RULE(5180-10, 5240+10, 80, 6, 20,
188 NL80211_RRF_PASSIVE_SCAN | 188 NL80211_RRF_PASSIVE_SCAN |
189 NL80211_RRF_NO_IBSS), 189 NL80211_RRF_NO_IBSS),
190 190
191 /* NB: 5260 MHz - 5700 MHz requies DFS */ 191 /* NB: 5260 MHz - 5700 MHz requires DFS */
192 192
193 /* IEEE 802.11a, channel 149..165 */ 193 /* IEEE 802.11a, channel 149..165 */
194 REG_RULE(5745-10, 5825+10, 40, 6, 20, 194 REG_RULE(5745-10, 5825+10, 80, 6, 20,
195 NL80211_RRF_PASSIVE_SCAN | 195 NL80211_RRF_PASSIVE_SCAN |
196 NL80211_RRF_NO_IBSS), 196 NL80211_RRF_NO_IBSS),
197 197
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 09d994d192ff..ff6f7ae35586 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -160,7 +160,7 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
160{ 160{
161 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy); 161 struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
162 struct cfg80211_connect_params *params; 162 struct cfg80211_connect_params *params;
163 const u8 *prev_bssid = NULL; 163 struct cfg80211_assoc_request req = {};
164 int err; 164 int err;
165 165
166 ASSERT_WDEV_LOCK(wdev); 166 ASSERT_WDEV_LOCK(wdev);
@@ -187,16 +187,20 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
187 BUG_ON(!rdev->ops->assoc); 187 BUG_ON(!rdev->ops->assoc);
188 wdev->conn->state = CFG80211_CONN_ASSOCIATING; 188 wdev->conn->state = CFG80211_CONN_ASSOCIATING;
189 if (wdev->conn->prev_bssid_valid) 189 if (wdev->conn->prev_bssid_valid)
190 prev_bssid = wdev->conn->prev_bssid; 190 req.prev_bssid = wdev->conn->prev_bssid;
191 err = __cfg80211_mlme_assoc(rdev, wdev->netdev, 191 req.ie = params->ie;
192 params->channel, params->bssid, 192 req.ie_len = params->ie_len;
193 prev_bssid, 193 req.use_mfp = params->mfp != NL80211_MFP_NO;
194 params->ssid, params->ssid_len, 194 req.crypto = params->crypto;
195 params->ie, params->ie_len, 195 req.flags = params->flags;
196 params->mfp != NL80211_MFP_NO, 196 req.ht_capa = params->ht_capa;
197 &params->crypto, 197 req.ht_capa_mask = params->ht_capa_mask;
198 params->flags, &params->ht_capa, 198 req.vht_capa = params->vht_capa;
199 &params->ht_capa_mask); 199 req.vht_capa_mask = params->vht_capa_mask;
200
201 err = __cfg80211_mlme_assoc(rdev, wdev->netdev, params->channel,
202 params->bssid, params->ssid,
203 params->ssid_len, &req);
200 if (err) 204 if (err)
201 __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, 205 __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
202 NULL, 0, 206 NULL, 0,
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 238ee49b3868..8f28b9f798d8 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -83,6 +83,14 @@ static int wiphy_uevent(struct device *dev, struct kobj_uevent_env *env)
83 return 0; 83 return 0;
84} 84}
85 85
86static void cfg80211_leave_all(struct cfg80211_registered_device *rdev)
87{
88 struct wireless_dev *wdev;
89
90 list_for_each_entry(wdev, &rdev->wdev_list, list)
91 cfg80211_leave(rdev, wdev);
92}
93
86static int wiphy_suspend(struct device *dev, pm_message_t state) 94static int wiphy_suspend(struct device *dev, pm_message_t state)
87{ 95{
88 struct cfg80211_registered_device *rdev = dev_to_rdev(dev); 96 struct cfg80211_registered_device *rdev = dev_to_rdev(dev);
@@ -90,12 +98,19 @@ static int wiphy_suspend(struct device *dev, pm_message_t state)
90 98
91 rdev->suspend_at = get_seconds(); 99 rdev->suspend_at = get_seconds();
92 100
93 if (rdev->ops->suspend) { 101 rtnl_lock();
94 rtnl_lock(); 102 if (rdev->wiphy.registered) {
95 if (rdev->wiphy.registered) 103 if (!rdev->wowlan)
96 ret = rdev_suspend(rdev); 104 cfg80211_leave_all(rdev);
97 rtnl_unlock(); 105 if (rdev->ops->suspend)
106 ret = rdev_suspend(rdev, rdev->wowlan);
107 if (ret == 1) {
108 /* Driver refuse to configure wowlan */
109 cfg80211_leave_all(rdev);
110 ret = rdev_suspend(rdev, NULL);
111 }
98 } 112 }
113 rtnl_unlock();
99 114
100 return ret; 115 return ret;
101} 116}
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 7586de77a2f8..3c2033b8f596 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -1786,6 +1786,26 @@ TRACE_EVENT(rdev_set_mac_acl,
1786 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->acl_policy) 1786 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->acl_policy)
1787); 1787);
1788 1788
1789TRACE_EVENT(rdev_update_ft_ies,
1790 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
1791 struct cfg80211_update_ft_ies_params *ftie),
1792 TP_ARGS(wiphy, netdev, ftie),
1793 TP_STRUCT__entry(
1794 WIPHY_ENTRY
1795 NETDEV_ENTRY
1796 __field(u16, md)
1797 __dynamic_array(u8, ie, ftie->ie_len)
1798 ),
1799 TP_fast_assign(
1800 WIPHY_ASSIGN;
1801 NETDEV_ASSIGN;
1802 __entry->md = ftie->md;
1803 memcpy(__get_dynamic_array(ie), ftie->ie, ftie->ie_len);
1804 ),
1805 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", md: 0x%x",
1806 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->md)
1807);
1808
1789/************************************************************* 1809/*************************************************************
1790 * cfg80211 exported functions traces * 1810 * cfg80211 exported functions traces *
1791 *************************************************************/ 1811 *************************************************************/
@@ -2414,6 +2434,32 @@ TRACE_EVENT(cfg80211_report_wowlan_wakeup,
2414 TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG) 2434 TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG)
2415); 2435);
2416 2436
2437TRACE_EVENT(cfg80211_ft_event,
2438 TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
2439 struct cfg80211_ft_event_params *ft_event),
2440 TP_ARGS(wiphy, netdev, ft_event),
2441 TP_STRUCT__entry(
2442 WIPHY_ENTRY
2443 NETDEV_ENTRY
2444 __dynamic_array(u8, ies, ft_event->ies_len)
2445 MAC_ENTRY(target_ap)
2446 __dynamic_array(u8, ric_ies, ft_event->ric_ies_len)
2447 ),
2448 TP_fast_assign(
2449 WIPHY_ASSIGN;
2450 NETDEV_ASSIGN;
2451 if (ft_event->ies)
2452 memcpy(__get_dynamic_array(ies), ft_event->ies,
2453 ft_event->ies_len);
2454 MAC_ASSIGN(target_ap, ft_event->target_ap);
2455 if (ft_event->ric_ies)
2456 memcpy(__get_dynamic_array(ric_ies), ft_event->ric_ies,
2457 ft_event->ric_ies_len);
2458 ),
2459 TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", target_ap: " MAC_PR_FMT,
2460 WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(target_ap))
2461);
2462
2417#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */ 2463#endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
2418 2464
2419#undef TRACE_INCLUDE_PATH 2465#undef TRACE_INCLUDE_PATH
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 37a56ee1e1ed..6cbac99ae03d 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -511,7 +511,7 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
511 encaps_data = bridge_tunnel_header; 511 encaps_data = bridge_tunnel_header;
512 encaps_len = sizeof(bridge_tunnel_header); 512 encaps_len = sizeof(bridge_tunnel_header);
513 skip_header_bytes -= 2; 513 skip_header_bytes -= 2;
514 } else if (ethertype > 0x600) { 514 } else if (ethertype >= ETH_P_802_3_MIN) {
515 encaps_data = rfc1042_header; 515 encaps_data = rfc1042_header;
516 encaps_len = sizeof(rfc1042_header); 516 encaps_len = sizeof(rfc1042_header);
517 skip_header_bytes -= 2; 517 skip_header_bytes -= 2;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 167c67d46c6a..23cea0f74336 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1037,6 +1037,24 @@ __xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir
1037 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir); 1037 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
1038} 1038}
1039 1039
1040static int flow_to_policy_dir(int dir)
1041{
1042 if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1043 XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1044 XFRM_POLICY_FWD == FLOW_DIR_FWD)
1045 return dir;
1046
1047 switch (dir) {
1048 default:
1049 case FLOW_DIR_IN:
1050 return XFRM_POLICY_IN;
1051 case FLOW_DIR_OUT:
1052 return XFRM_POLICY_OUT;
1053 case FLOW_DIR_FWD:
1054 return XFRM_POLICY_FWD;
1055 }
1056}
1057
1040static struct flow_cache_object * 1058static struct flow_cache_object *
1041xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, 1059xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family,
1042 u8 dir, struct flow_cache_object *old_obj, void *ctx) 1060 u8 dir, struct flow_cache_object *old_obj, void *ctx)
@@ -1046,7 +1064,7 @@ xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family,
1046 if (old_obj) 1064 if (old_obj)
1047 xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo)); 1065 xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
1048 1066
1049 pol = __xfrm_policy_lookup(net, fl, family, dir); 1067 pol = __xfrm_policy_lookup(net, fl, family, flow_to_policy_dir(dir));
1050 if (IS_ERR_OR_NULL(pol)) 1068 if (IS_ERR_OR_NULL(pol))
1051 return ERR_CAST(pol); 1069 return ERR_CAST(pol);
1052 1070
@@ -1932,7 +1950,8 @@ xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir,
1932 * previous cache entry */ 1950 * previous cache entry */
1933 if (xdst == NULL) { 1951 if (xdst == NULL) {
1934 num_pols = 1; 1952 num_pols = 1;
1935 pols[0] = __xfrm_policy_lookup(net, fl, family, dir); 1953 pols[0] = __xfrm_policy_lookup(net, fl, family,
1954 flow_to_policy_dir(dir));
1936 err = xfrm_expand_policies(fl, family, pols, 1955 err = xfrm_expand_policies(fl, family, pols,
1937 &num_pols, &num_xfrms); 1956 &num_pols, &num_xfrms);
1938 if (err < 0) 1957 if (err < 0)
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 2fa28c88900c..0a0609fce28b 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -60,7 +60,7 @@
60#include <linux/bitops.h> 60#include <linux/bitops.h>
61#include <linux/interrupt.h> 61#include <linux/interrupt.h>
62#include <linux/netdevice.h> /* for network interface checks */ 62#include <linux/netdevice.h> /* for network interface checks */
63#include <linux/netlink.h> 63#include <net/netlink.h>
64#include <linux/tcp.h> 64#include <linux/tcp.h>
65#include <linux/udp.h> 65#include <linux/udp.h>
66#include <linux/dccp.h> 66#include <linux/dccp.h>
@@ -4475,7 +4475,7 @@ static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
4475 struct nlmsghdr *nlh; 4475 struct nlmsghdr *nlh;
4476 struct sk_security_struct *sksec = sk->sk_security; 4476 struct sk_security_struct *sksec = sk->sk_security;
4477 4477
4478 if (skb->len < NLMSG_SPACE(0)) { 4478 if (skb->len < NLMSG_HDRLEN) {
4479 err = -EINVAL; 4479 err = -EINVAL;
4480 goto out; 4480 goto out;
4481 } 4481 }
diff --git a/security/selinux/netlink.c b/security/selinux/netlink.c
index 14d810ead420..828fb6a4e941 100644
--- a/security/selinux/netlink.c
+++ b/security/selinux/netlink.c
@@ -16,7 +16,6 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/export.h> 17#include <linux/export.h>
18#include <linux/skbuff.h> 18#include <linux/skbuff.h>
19#include <linux/netlink.h>
20#include <linux/selinux_netlink.h> 19#include <linux/selinux_netlink.h>
21#include <net/net_namespace.h> 20#include <net/net_namespace.h>
22#include <net/netlink.h> 21#include <net/netlink.h>
@@ -77,7 +76,7 @@ static void selnl_notify(int msgtype, void *data)
77 76
78 len = selnl_msglen(msgtype); 77 len = selnl_msglen(msgtype);
79 78
80 skb = alloc_skb(NLMSG_SPACE(len), GFP_USER); 79 skb = nlmsg_new(len, GFP_USER);
81 if (!skb) 80 if (!skb)
82 goto oom; 81 goto oom;
83 82
diff --git a/tools/Makefile b/tools/Makefile
index fa36565b209d..c73c6357481c 100644
--- a/tools/Makefile
+++ b/tools/Makefile
@@ -12,6 +12,7 @@ help:
12 @echo ' turbostat - Intel CPU idle stats and freq reporting tool' 12 @echo ' turbostat - Intel CPU idle stats and freq reporting tool'
13 @echo ' usb - USB testing tools' 13 @echo ' usb - USB testing tools'
14 @echo ' virtio - vhost test module' 14 @echo ' virtio - vhost test module'
15 @echo ' net - misc networking tools'
15 @echo ' vm - misc vm tools' 16 @echo ' vm - misc vm tools'
16 @echo ' x86_energy_perf_policy - Intel energy policy tool' 17 @echo ' x86_energy_perf_policy - Intel energy policy tool'
17 @echo '' 18 @echo ''
@@ -34,7 +35,7 @@ help:
34cpupower: FORCE 35cpupower: FORCE
35 $(call descend,power/$@) 36 $(call descend,power/$@)
36 37
37cgroup firewire lguest perf usb virtio vm: FORCE 38cgroup firewire lguest perf usb virtio vm net: FORCE
38 $(call descend,$@) 39 $(call descend,$@)
39 40
40selftests: FORCE 41selftests: FORCE
@@ -46,7 +47,7 @@ turbostat x86_energy_perf_policy: FORCE
46cpupower_install: 47cpupower_install:
47 $(call descend,power/$(@:_install=),install) 48 $(call descend,power/$(@:_install=),install)
48 49
49cgroup_install firewire_install lguest_install perf_install usb_install virtio_install vm_install: 50cgroup_install firewire_install lguest_install perf_install usb_install virtio_install vm_install net_install:
50 $(call descend,$(@:_install=),install) 51 $(call descend,$(@:_install=),install)
51 52
52selftests_install: 53selftests_install:
@@ -57,12 +58,12 @@ turbostat_install x86_energy_perf_policy_install:
57 58
58install: cgroup_install cpupower_install firewire_install lguest_install \ 59install: cgroup_install cpupower_install firewire_install lguest_install \
59 perf_install selftests_install turbostat_install usb_install \ 60 perf_install selftests_install turbostat_install usb_install \
60 virtio_install vm_install x86_energy_perf_policy_install 61 virtio_install vm_install net_install x86_energy_perf_policy_install
61 62
62cpupower_clean: 63cpupower_clean:
63 $(call descend,power/cpupower,clean) 64 $(call descend,power/cpupower,clean)
64 65
65cgroup_clean firewire_clean lguest_clean perf_clean usb_clean virtio_clean vm_clean: 66cgroup_clean firewire_clean lguest_clean perf_clean usb_clean virtio_clean vm_clean net_clean:
66 $(call descend,$(@:_clean=),clean) 67 $(call descend,$(@:_clean=),clean)
67 68
68selftests_clean: 69selftests_clean:
@@ -73,6 +74,6 @@ turbostat_clean x86_energy_perf_policy_clean:
73 74
74clean: cgroup_clean cpupower_clean firewire_clean lguest_clean perf_clean \ 75clean: cgroup_clean cpupower_clean firewire_clean lguest_clean perf_clean \
75 selftests_clean turbostat_clean usb_clean virtio_clean \ 76 selftests_clean turbostat_clean usb_clean virtio_clean \
76 vm_clean x86_energy_perf_policy_clean 77 vm_clean net_clean x86_energy_perf_policy_clean
77 78
78.PHONY: FORCE 79.PHONY: FORCE
diff --git a/tools/net/Makefile b/tools/net/Makefile
new file mode 100644
index 000000000000..b4444d53b73f
--- /dev/null
+++ b/tools/net/Makefile
@@ -0,0 +1,15 @@
1prefix = /usr
2
3CC = gcc
4
5all : bpf_jit_disasm
6
7bpf_jit_disasm : CFLAGS = -Wall -O2
8bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl
9bpf_jit_disasm : bpf_jit_disasm.o
10
11clean :
12 rm -rf *.o bpf_jit_disasm
13
14install :
15 install bpf_jit_disasm $(prefix)/bin/bpf_jit_disasm
diff --git a/tools/net/bpf_jit_disasm.c b/tools/net/bpf_jit_disasm.c
new file mode 100644
index 000000000000..cfe0cdcda3de
--- /dev/null
+++ b/tools/net/bpf_jit_disasm.c
@@ -0,0 +1,199 @@
1/*
2 * Minimal BPF JIT image disassembler
3 *
4 * Disassembles BPF JIT compiler emitted opcodes back to asm insn's for
5 * debugging or verification purposes.
6 *
7 * To get the disassembly of the JIT code, do the following:
8 *
9 * 1) `echo 2 > /proc/sys/net/core/bpf_jit_enable`
10 * 2) Load a BPF filter (e.g. `tcpdump -p -n -s 0 -i eth1 host 192.168.20.0/24`)
11 * 3) Run e.g. `bpf_jit_disasm -o` to read out the last JIT code
12 *
13 * Copyright 2013 Daniel Borkmann <borkmann@redhat.com>
14 * Licensed under the GNU General Public License, version 2.0 (GPLv2)
15 */
16
17#include <stdint.h>
18#include <stdio.h>
19#include <stdlib.h>
20#include <assert.h>
21#include <unistd.h>
22#include <string.h>
23#include <bfd.h>
24#include <dis-asm.h>
25#include <sys/klog.h>
26#include <sys/types.h>
27#include <regex.h>
28
29static void get_exec_path(char *tpath, size_t size)
30{
31 char *path;
32 ssize_t len;
33
34 snprintf(tpath, size, "/proc/%d/exe", (int) getpid());
35 tpath[size - 1] = 0;
36
37 path = strdup(tpath);
38 assert(path);
39
40 len = readlink(path, tpath, size);
41 tpath[len] = 0;
42
43 free(path);
44}
45
46static void get_asm_insns(uint8_t *image, size_t len, unsigned long base,
47 int opcodes)
48{
49 int count, i, pc = 0;
50 char tpath[256];
51 struct disassemble_info info;
52 disassembler_ftype disassemble;
53 bfd *bfdf;
54
55 memset(tpath, 0, sizeof(tpath));
56 get_exec_path(tpath, sizeof(tpath));
57
58 bfdf = bfd_openr(tpath, NULL);
59 assert(bfdf);
60 assert(bfd_check_format(bfdf, bfd_object));
61
62 init_disassemble_info(&info, stdout, (fprintf_ftype) fprintf);
63 info.arch = bfd_get_arch(bfdf);
64 info.mach = bfd_get_mach(bfdf);
65 info.buffer = image;
66 info.buffer_length = len;
67
68 disassemble_init_for_target(&info);
69
70 disassemble = disassembler(bfdf);
71 assert(disassemble);
72
73 do {
74 printf("%4x:\t", pc);
75
76 count = disassemble(pc, &info);
77
78 if (opcodes) {
79 printf("\n\t");
80 for (i = 0; i < count; ++i)
81 printf("%02x ", (uint8_t) image[pc + i]);
82 }
83 printf("\n");
84
85 pc += count;
86 } while(count > 0 && pc < len);
87
88 bfd_close(bfdf);
89}
90
91static char *get_klog_buff(int *klen)
92{
93 int ret, len = klogctl(10, NULL, 0);
94 char *buff = malloc(len);
95
96 assert(buff && klen);
97 ret = klogctl(3, buff, len);
98 assert(ret >= 0);
99 *klen = ret;
100
101 return buff;
102}
103
104static void put_klog_buff(char *buff)
105{
106 free(buff);
107}
108
109static int get_last_jit_image(char *haystack, size_t hlen,
110 uint8_t *image, size_t ilen,
111 unsigned long *base)
112{
113 char *ptr, *pptr, *tmp;
114 off_t off = 0;
115 int ret, flen, proglen, pass, ulen = 0;
116 regmatch_t pmatch[1];
117 regex_t regex;
118
119 if (hlen == 0)
120 return 0;
121
122 ret = regcomp(&regex, "flen=[[:alnum:]]+ proglen=[[:digit:]]+ "
123 "pass=[[:digit:]]+ image=[[:xdigit:]]+", REG_EXTENDED);
124 assert(ret == 0);
125
126 ptr = haystack;
127 while (1) {
128 ret = regexec(&regex, ptr, 1, pmatch, 0);
129 if (ret == 0) {
130 ptr += pmatch[0].rm_eo;
131 off += pmatch[0].rm_eo;
132 assert(off < hlen);
133 } else
134 break;
135 }
136
137 ptr = haystack + off - (pmatch[0].rm_eo - pmatch[0].rm_so);
138 ret = sscanf(ptr, "flen=%d proglen=%d pass=%d image=%lx",
139 &flen, &proglen, &pass, base);
140 if (ret != 4)
141 return 0;
142
143 tmp = ptr = haystack + off;
144 while ((ptr = strtok(tmp, "\n")) != NULL && ulen < ilen) {
145 tmp = NULL;
146 if (!strstr(ptr, "JIT code"))
147 continue;
148 pptr = ptr;
149 while ((ptr = strstr(pptr, ":")))
150 pptr = ptr + 1;
151 ptr = pptr;
152 do {
153 image[ulen++] = (uint8_t) strtoul(pptr, &pptr, 16);
154 if (ptr == pptr || ulen >= ilen) {
155 ulen--;
156 break;
157 }
158 ptr = pptr;
159 } while (1);
160 }
161
162 assert(ulen == proglen);
163 printf("%d bytes emitted from JIT compiler (pass:%d, flen:%d)\n",
164 proglen, pass, flen);
165 printf("%lx + <x>:\n", *base);
166
167 regfree(&regex);
168 return ulen;
169}
170
171int main(int argc, char **argv)
172{
173 int len, klen, opcodes = 0;
174 char *kbuff;
175 unsigned long base;
176 uint8_t image[4096];
177
178 if (argc > 1) {
179 if (!strncmp("-o", argv[argc - 1], 2)) {
180 opcodes = 1;
181 } else {
182 printf("usage: bpf_jit_disasm [-o: show opcodes]\n");
183 exit(0);
184 }
185 }
186
187 bfd_init();
188 memset(image, 0, sizeof(image));
189
190 kbuff = get_klog_buff(&klen);
191
192 len = get_last_jit_image(kbuff, klen, image, sizeof(image), &base);
193 if (len > 0 && base > 0)
194 get_asm_insns(image, len, base, opcodes);
195
196 put_klog_buff(kbuff);
197
198 return 0;
199}
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 3cc0ad7ae863..a4805932972b 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -5,6 +5,7 @@ TARGETS += vm
5TARGETS += cpu-hotplug 5TARGETS += cpu-hotplug
6TARGETS += memory-hotplug 6TARGETS += memory-hotplug
7TARGETS += efivarfs 7TARGETS += efivarfs
8TARGETS += net
8 9
9all: 10all:
10 for TARGET in $(TARGETS); do \ 11 for TARGET in $(TARGETS); do \
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
new file mode 100644
index 000000000000..750512ba2c88
--- /dev/null
+++ b/tools/testing/selftests/net/Makefile
@@ -0,0 +1,19 @@
1# Makefile for net selftests
2
3CC = $(CROSS_COMPILE)gcc
4CFLAGS = -Wall -O2 -g
5
6CFLAGS += -I../../../../usr/include/
7
8NET_PROGS = socket psock_fanout psock_tpacket
9
10all: $(NET_PROGS)
11%: %.c
12 $(CC) $(CFLAGS) -o $@ $^
13
14run_tests: all
15 @/bin/sh ./run_netsocktests || echo "sockettests: [FAIL]"
16 @/bin/sh ./run_afpackettests || echo "afpackettests: [FAIL]"
17
18clean:
19 $(RM) $(NET_PROGS)
diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c
new file mode 100644
index 000000000000..57b9c2b7c4ff
--- /dev/null
+++ b/tools/testing/selftests/net/psock_fanout.c
@@ -0,0 +1,312 @@
1/*
2 * Copyright 2013 Google Inc.
3 * Author: Willem de Bruijn (willemb@google.com)
4 *
5 * A basic test of packet socket fanout behavior.
6 *
7 * Control:
8 * - create fanout fails as expected with illegal flag combinations
9 * - join fanout fails as expected with diverging types or flags
10 *
11 * Datapath:
12 * Open a pair of packet sockets and a pair of INET sockets, send a known
13 * number of packets across the two INET sockets and count the number of
14 * packets enqueued onto the two packet sockets.
15 *
16 * The test currently runs for
17 * - PACKET_FANOUT_HASH
18 * - PACKET_FANOUT_HASH with PACKET_FANOUT_FLAG_ROLLOVER
19 * - PACKET_FANOUT_LB
20 * - PACKET_FANOUT_CPU
21 * - PACKET_FANOUT_ROLLOVER
22 *
23 * Todo:
24 * - functionality: PACKET_FANOUT_FLAG_DEFRAG
25 *
26 * License (GPLv2):
27 *
28 * This program is free software; you can redistribute it and/or modify it
29 * under the terms and conditions of the GNU General Public License,
30 * version 2, as published by the Free Software Foundation.
31 *
32 * This program is distributed in the hope it will be useful, but WITHOUT
33 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
34 * FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for
35 * more details.
36 *
37 * You should have received a copy of the GNU General Public License along with
38 * this program; if not, write to the Free Software Foundation, Inc.,
39 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
40 */
41
42#define _GNU_SOURCE /* for sched_setaffinity */
43
44#include <arpa/inet.h>
45#include <errno.h>
46#include <fcntl.h>
47#include <linux/filter.h>
48#include <linux/if_packet.h>
49#include <net/ethernet.h>
50#include <netinet/ip.h>
51#include <netinet/udp.h>
52#include <poll.h>
53#include <sched.h>
54#include <stdint.h>
55#include <stdio.h>
56#include <stdlib.h>
57#include <string.h>
58#include <sys/mman.h>
59#include <sys/socket.h>
60#include <sys/stat.h>
61#include <sys/types.h>
62#include <unistd.h>
63
64#include "psock_lib.h"
65
66#define RING_NUM_FRAMES 20
67
68/* Open a socket in a given fanout mode.
69 * @return -1 if mode is bad, a valid socket otherwise */
70static int sock_fanout_open(uint16_t typeflags, int num_packets)
71{
72 int fd, val;
73
74 fd = socket(PF_PACKET, SOCK_DGRAM, htons(ETH_P_IP));
75 if (fd < 0) {
76 perror("socket packet");
77 exit(1);
78 }
79
80 /* fanout group ID is always 0: tests whether old groups are deleted */
81 val = ((int) typeflags) << 16;
82 if (setsockopt(fd, SOL_PACKET, PACKET_FANOUT, &val, sizeof(val))) {
83 if (close(fd)) {
84 perror("close packet");
85 exit(1);
86 }
87 return -1;
88 }
89
90 pair_udp_setfilter(fd);
91 return fd;
92}
93
94static char *sock_fanout_open_ring(int fd)
95{
96 struct tpacket_req req = {
97 .tp_block_size = getpagesize(),
98 .tp_frame_size = getpagesize(),
99 .tp_block_nr = RING_NUM_FRAMES,
100 .tp_frame_nr = RING_NUM_FRAMES,
101 };
102 char *ring;
103 int val = TPACKET_V2;
104
105 if (setsockopt(fd, SOL_PACKET, PACKET_VERSION, (void *) &val,
106 sizeof(val))) {
107 perror("packetsock ring setsockopt version");
108 exit(1);
109 }
110 if (setsockopt(fd, SOL_PACKET, PACKET_RX_RING, (void *) &req,
111 sizeof(req))) {
112 perror("packetsock ring setsockopt");
113 exit(1);
114 }
115
116 ring = mmap(0, req.tp_block_size * req.tp_block_nr,
117 PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
118 if (!ring) {
119 fprintf(stderr, "packetsock ring mmap\n");
120 exit(1);
121 }
122
123 return ring;
124}
125
126static int sock_fanout_read_ring(int fd, void *ring)
127{
128 struct tpacket2_hdr *header = ring;
129 int count = 0;
130
131 while (header->tp_status & TP_STATUS_USER && count < RING_NUM_FRAMES) {
132 count++;
133 header = ring + (count * getpagesize());
134 }
135
136 return count;
137}
138
139static int sock_fanout_read(int fds[], char *rings[], const int expect[])
140{
141 int ret[2];
142
143 ret[0] = sock_fanout_read_ring(fds[0], rings[0]);
144 ret[1] = sock_fanout_read_ring(fds[1], rings[1]);
145
146 fprintf(stderr, "info: count=%d,%d, expect=%d,%d\n",
147 ret[0], ret[1], expect[0], expect[1]);
148
149 if ((!(ret[0] == expect[0] && ret[1] == expect[1])) &&
150 (!(ret[0] == expect[1] && ret[1] == expect[0]))) {
151 fprintf(stderr, "ERROR: incorrect queue lengths\n");
152 return 1;
153 }
154
155 return 0;
156}
157
158/* Test illegal mode + flag combination */
159static void test_control_single(void)
160{
161 fprintf(stderr, "test: control single socket\n");
162
163 if (sock_fanout_open(PACKET_FANOUT_ROLLOVER |
164 PACKET_FANOUT_FLAG_ROLLOVER, 0) != -1) {
165 fprintf(stderr, "ERROR: opened socket with dual rollover\n");
166 exit(1);
167 }
168}
169
170/* Test illegal group with different modes or flags */
171static void test_control_group(void)
172{
173 int fds[2];
174
175 fprintf(stderr, "test: control multiple sockets\n");
176
177 fds[0] = sock_fanout_open(PACKET_FANOUT_HASH, 20);
178 if (fds[0] == -1) {
179 fprintf(stderr, "ERROR: failed to open HASH socket\n");
180 exit(1);
181 }
182 if (sock_fanout_open(PACKET_FANOUT_HASH |
183 PACKET_FANOUT_FLAG_DEFRAG, 10) != -1) {
184 fprintf(stderr, "ERROR: joined group with wrong flag defrag\n");
185 exit(1);
186 }
187 if (sock_fanout_open(PACKET_FANOUT_HASH |
188 PACKET_FANOUT_FLAG_ROLLOVER, 10) != -1) {
189 fprintf(stderr, "ERROR: joined group with wrong flag ro\n");
190 exit(1);
191 }
192 if (sock_fanout_open(PACKET_FANOUT_CPU, 10) != -1) {
193 fprintf(stderr, "ERROR: joined group with wrong mode\n");
194 exit(1);
195 }
196 fds[1] = sock_fanout_open(PACKET_FANOUT_HASH, 20);
197 if (fds[1] == -1) {
198 fprintf(stderr, "ERROR: failed to join group\n");
199 exit(1);
200 }
201 if (close(fds[1]) || close(fds[0])) {
202 fprintf(stderr, "ERROR: closing sockets\n");
203 exit(1);
204 }
205}
206
207static int test_datapath(uint16_t typeflags, int port_off,
208 const int expect1[], const int expect2[])
209{
210 const int expect0[] = { 0, 0 };
211 char *rings[2];
212 int fds[2], fds_udp[2][2], ret;
213
214 fprintf(stderr, "test: datapath 0x%hx\n", typeflags);
215
216 fds[0] = sock_fanout_open(typeflags, 20);
217 fds[1] = sock_fanout_open(typeflags, 20);
218 if (fds[0] == -1 || fds[1] == -1) {
219 fprintf(stderr, "ERROR: failed open\n");
220 exit(1);
221 }
222 rings[0] = sock_fanout_open_ring(fds[0]);
223 rings[1] = sock_fanout_open_ring(fds[1]);
224 pair_udp_open(fds_udp[0], PORT_BASE);
225 pair_udp_open(fds_udp[1], PORT_BASE + port_off);
226 sock_fanout_read(fds, rings, expect0);
227
228 /* Send data, but not enough to overflow a queue */
229 pair_udp_send(fds_udp[0], 15);
230 pair_udp_send(fds_udp[1], 5);
231 ret = sock_fanout_read(fds, rings, expect1);
232
233 /* Send more data, overflow the queue */
234 pair_udp_send(fds_udp[0], 15);
235 /* TODO: ensure consistent order between expect1 and expect2 */
236 ret |= sock_fanout_read(fds, rings, expect2);
237
238 if (munmap(rings[1], RING_NUM_FRAMES * getpagesize()) ||
239 munmap(rings[0], RING_NUM_FRAMES * getpagesize())) {
240 fprintf(stderr, "close rings\n");
241 exit(1);
242 }
243 if (close(fds_udp[1][1]) || close(fds_udp[1][0]) ||
244 close(fds_udp[0][1]) || close(fds_udp[0][0]) ||
245 close(fds[1]) || close(fds[0])) {
246 fprintf(stderr, "close datapath\n");
247 exit(1);
248 }
249
250 return ret;
251}
252
253static int set_cpuaffinity(int cpuid)
254{
255 cpu_set_t mask;
256
257 CPU_ZERO(&mask);
258 CPU_SET(cpuid, &mask);
259 if (sched_setaffinity(0, sizeof(mask), &mask)) {
260 if (errno != EINVAL) {
261 fprintf(stderr, "setaffinity %d\n", cpuid);
262 exit(1);
263 }
264 return 1;
265 }
266
267 return 0;
268}
269
270int main(int argc, char **argv)
271{
272 const int expect_hash[2][2] = { { 15, 5 }, { 20, 5 } };
273 const int expect_hash_rb[2][2] = { { 15, 5 }, { 20, 15 } };
274 const int expect_lb[2][2] = { { 10, 10 }, { 18, 17 } };
275 const int expect_rb[2][2] = { { 20, 0 }, { 20, 15 } };
276 const int expect_cpu0[2][2] = { { 20, 0 }, { 20, 0 } };
277 const int expect_cpu1[2][2] = { { 0, 20 }, { 0, 20 } };
278 int port_off = 2, tries = 5, ret;
279
280 test_control_single();
281 test_control_group();
282
283 /* find a set of ports that do not collide onto the same socket */
284 ret = test_datapath(PACKET_FANOUT_HASH, port_off,
285 expect_hash[0], expect_hash[1]);
286 while (ret && tries--) {
287 fprintf(stderr, "info: trying alternate ports (%d)\n", tries);
288 ret = test_datapath(PACKET_FANOUT_HASH, ++port_off,
289 expect_hash[0], expect_hash[1]);
290 }
291
292 ret |= test_datapath(PACKET_FANOUT_HASH | PACKET_FANOUT_FLAG_ROLLOVER,
293 port_off, expect_hash_rb[0], expect_hash_rb[1]);
294 ret |= test_datapath(PACKET_FANOUT_LB,
295 port_off, expect_lb[0], expect_lb[1]);
296 ret |= test_datapath(PACKET_FANOUT_ROLLOVER,
297 port_off, expect_rb[0], expect_rb[1]);
298
299 set_cpuaffinity(0);
300 ret |= test_datapath(PACKET_FANOUT_CPU, port_off,
301 expect_cpu0[0], expect_cpu0[1]);
302 if (!set_cpuaffinity(1))
303 /* TODO: test that choice alternates with previous */
304 ret |= test_datapath(PACKET_FANOUT_CPU, port_off,
305 expect_cpu1[0], expect_cpu1[1]);
306
307 if (ret)
308 return 1;
309
310 printf("OK. All tests passed\n");
311 return 0;
312}
diff --git a/tools/testing/selftests/net/psock_lib.h b/tools/testing/selftests/net/psock_lib.h
new file mode 100644
index 000000000000..37da54ac85a9
--- /dev/null
+++ b/tools/testing/selftests/net/psock_lib.h
@@ -0,0 +1,127 @@
1/*
2 * Copyright 2013 Google Inc.
3 * Author: Willem de Bruijn <willemb@google.com>
4 * Daniel Borkmann <dborkman@redhat.com>
5 *
6 * License (GPLv2):
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
20 */
21
22#ifndef PSOCK_LIB_H
23#define PSOCK_LIB_H
24
25#include <sys/types.h>
26#include <sys/socket.h>
27#include <string.h>
28#include <arpa/inet.h>
29#include <unistd.h>
30
31#define DATA_LEN 100
32#define DATA_CHAR 'a'
33
34#define PORT_BASE 8000
35
36#ifndef __maybe_unused
37# define __maybe_unused __attribute__ ((__unused__))
38#endif
39
40static __maybe_unused void pair_udp_setfilter(int fd)
41{
42 struct sock_filter bpf_filter[] = {
43 { 0x80, 0, 0, 0x00000000 }, /* LD pktlen */
44 { 0x35, 0, 5, DATA_LEN }, /* JGE DATA_LEN [f goto nomatch]*/
45 { 0x30, 0, 0, 0x00000050 }, /* LD ip[80] */
46 { 0x15, 0, 3, DATA_CHAR }, /* JEQ DATA_CHAR [f goto nomatch]*/
47 { 0x30, 0, 0, 0x00000051 }, /* LD ip[81] */
48 { 0x15, 0, 1, DATA_CHAR }, /* JEQ DATA_CHAR [f goto nomatch]*/
49 { 0x06, 0, 0, 0x00000060 }, /* RET match */
50 { 0x06, 0, 0, 0x00000000 }, /* RET no match */
51 };
52 struct sock_fprog bpf_prog;
53
54 bpf_prog.filter = bpf_filter;
55 bpf_prog.len = sizeof(bpf_filter) / sizeof(struct sock_filter);
56 if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &bpf_prog,
57 sizeof(bpf_prog))) {
58 perror("setsockopt SO_ATTACH_FILTER");
59 exit(1);
60 }
61}
62
63static __maybe_unused void pair_udp_open(int fds[], uint16_t port)
64{
65 struct sockaddr_in saddr, daddr;
66
67 fds[0] = socket(PF_INET, SOCK_DGRAM, 0);
68 fds[1] = socket(PF_INET, SOCK_DGRAM, 0);
69 if (fds[0] == -1 || fds[1] == -1) {
70 fprintf(stderr, "ERROR: socket dgram\n");
71 exit(1);
72 }
73
74 memset(&saddr, 0, sizeof(saddr));
75 saddr.sin_family = AF_INET;
76 saddr.sin_port = htons(port);
77 saddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
78
79 memset(&daddr, 0, sizeof(daddr));
80 daddr.sin_family = AF_INET;
81 daddr.sin_port = htons(port + 1);
82 daddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
83
84 /* must bind both to get consistent hash result */
85 if (bind(fds[1], (void *) &daddr, sizeof(daddr))) {
86 perror("bind");
87 exit(1);
88 }
89 if (bind(fds[0], (void *) &saddr, sizeof(saddr))) {
90 perror("bind");
91 exit(1);
92 }
93 if (connect(fds[0], (void *) &daddr, sizeof(daddr))) {
94 perror("connect");
95 exit(1);
96 }
97}
98
99static __maybe_unused void pair_udp_send(int fds[], int num)
100{
101 char buf[DATA_LEN], rbuf[DATA_LEN];
102
103 memset(buf, DATA_CHAR, sizeof(buf));
104 while (num--) {
105 /* Should really handle EINTR and EAGAIN */
106 if (write(fds[0], buf, sizeof(buf)) != sizeof(buf)) {
107 fprintf(stderr, "ERROR: send failed left=%d\n", num);
108 exit(1);
109 }
110 if (read(fds[1], rbuf, sizeof(rbuf)) != sizeof(rbuf)) {
111 fprintf(stderr, "ERROR: recv failed left=%d\n", num);
112 exit(1);
113 }
114 if (memcmp(buf, rbuf, sizeof(buf))) {
115 fprintf(stderr, "ERROR: data failed left=%d\n", num);
116 exit(1);
117 }
118 }
119}
120
121static __maybe_unused void pair_udp_close(int fds[])
122{
123 close(fds[0]);
124 close(fds[1]);
125}
126
127#endif /* PSOCK_LIB_H */
diff --git a/tools/testing/selftests/net/psock_tpacket.c b/tools/testing/selftests/net/psock_tpacket.c
new file mode 100644
index 000000000000..a8d7ffadd49b
--- /dev/null
+++ b/tools/testing/selftests/net/psock_tpacket.c
@@ -0,0 +1,824 @@
1/*
2 * Copyright 2013 Red Hat, Inc.
3 * Author: Daniel Borkmann <dborkman@redhat.com>
4 *
5 * A basic test of packet socket's TPACKET_V1/TPACKET_V2/TPACKET_V3 behavior.
6 *
7 * Control:
8 * Test the setup of the TPACKET socket with different patterns that are
9 * known to fail (TODO) resp. succeed (OK).
10 *
11 * Datapath:
12 * Open a pair of packet sockets and send resp. receive an a priori known
13 * packet pattern accross the sockets and check if it was received resp.
14 * sent correctly. Fanout in combination with RX_RING is currently not
15 * tested here.
16 *
17 * The test currently runs for
18 * - TPACKET_V1: RX_RING, TX_RING
19 * - TPACKET_V2: RX_RING, TX_RING
20 * - TPACKET_V3: RX_RING
21 *
22 * License (GPLv2):
23 *
24 * This program is free software; you can redistribute it and/or modify it
25 * under the terms and conditions of the GNU General Public License,
26 * version 2, as published by the Free Software Foundation.
27 *
28 * This program is distributed in the hope it will be useful, but WITHOUT
29 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
30 * FITNESS FOR A PARTICULAR PURPOSE. * See the GNU General Public License for
31 * more details.
32 *
33 * You should have received a copy of the GNU General Public License along with
34 * this program; if not, write to the Free Software Foundation, Inc.,
35 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
36 */
37
38#include <stdio.h>
39#include <stdlib.h>
40#include <sys/types.h>
41#include <sys/stat.h>
42#include <sys/socket.h>
43#include <sys/mman.h>
44#include <linux/if_packet.h>
45#include <linux/filter.h>
46#include <ctype.h>
47#include <fcntl.h>
48#include <unistd.h>
49#include <bits/wordsize.h>
50#include <net/ethernet.h>
51#include <netinet/ip.h>
52#include <arpa/inet.h>
53#include <stdint.h>
54#include <string.h>
55#include <assert.h>
56#include <net/if.h>
57#include <inttypes.h>
58#include <poll.h>
59
60#include "psock_lib.h"
61
62#ifndef bug_on
63# define bug_on(cond) assert(!(cond))
64#endif
65
66#ifndef __aligned_tpacket
67# define __aligned_tpacket __attribute__((aligned(TPACKET_ALIGNMENT)))
68#endif
69
70#ifndef __align_tpacket
71# define __align_tpacket(x) __attribute__((aligned(TPACKET_ALIGN(x))))
72#endif
73
74#define BLOCK_STATUS(x) ((x)->h1.block_status)
75#define BLOCK_NUM_PKTS(x) ((x)->h1.num_pkts)
76#define BLOCK_O2FP(x) ((x)->h1.offset_to_first_pkt)
77#define BLOCK_LEN(x) ((x)->h1.blk_len)
78#define BLOCK_SNUM(x) ((x)->h1.seq_num)
79#define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
80#define BLOCK_PRIV(x) ((void *) ((uint8_t *) (x) + BLOCK_O2PRIV(x)))
81#define BLOCK_HDR_LEN (ALIGN_8(sizeof(struct block_desc)))
82#define ALIGN_8(x) (((x) + 8 - 1) & ~(8 - 1))
83#define BLOCK_PLUS_PRIV(sz_pri) (BLOCK_HDR_LEN + ALIGN_8((sz_pri)))
84
85#define NUM_PACKETS 100
86
87struct ring {
88 struct iovec *rd;
89 uint8_t *mm_space;
90 size_t mm_len, rd_len;
91 struct sockaddr_ll ll;
92 void (*walk)(int sock, struct ring *ring);
93 int type, rd_num, flen, version;
94 union {
95 struct tpacket_req req;
96 struct tpacket_req3 req3;
97 };
98};
99
100struct block_desc {
101 uint32_t version;
102 uint32_t offset_to_priv;
103 struct tpacket_hdr_v1 h1;
104};
105
106union frame_map {
107 struct {
108 struct tpacket_hdr tp_h __aligned_tpacket;
109 struct sockaddr_ll s_ll __align_tpacket(sizeof(struct tpacket_hdr));
110 } *v1;
111 struct {
112 struct tpacket2_hdr tp_h __aligned_tpacket;
113 struct sockaddr_ll s_ll __align_tpacket(sizeof(struct tpacket2_hdr));
114 } *v2;
115 void *raw;
116};
117
118static unsigned int total_packets, total_bytes;
119
120static int pfsocket(int ver)
121{
122 int ret, sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
123 if (sock == -1) {
124 perror("socket");
125 exit(1);
126 }
127
128 ret = setsockopt(sock, SOL_PACKET, PACKET_VERSION, &ver, sizeof(ver));
129 if (ret == -1) {
130 perror("setsockopt");
131 exit(1);
132 }
133
134 return sock;
135}
136
137static void status_bar_update(void)
138{
139 if (total_packets % 10 == 0) {
140 fprintf(stderr, ".");
141 fflush(stderr);
142 }
143}
144
145static void test_payload(void *pay, size_t len)
146{
147 struct ethhdr *eth = pay;
148
149 if (len < sizeof(struct ethhdr)) {
150 fprintf(stderr, "test_payload: packet too "
151 "small: %zu bytes!\n", len);
152 exit(1);
153 }
154
155 if (eth->h_proto != htons(ETH_P_IP)) {
156 fprintf(stderr, "test_payload: wrong ethernet "
157 "type: 0x%x!\n", ntohs(eth->h_proto));
158 exit(1);
159 }
160}
161
162static void create_payload(void *pay, size_t *len)
163{
164 int i;
165 struct ethhdr *eth = pay;
166 struct iphdr *ip = pay + sizeof(*eth);
167
168 /* Lets create some broken crap, that still passes
169 * our BPF filter.
170 */
171
172 *len = DATA_LEN + 42;
173
174 memset(pay, 0xff, ETH_ALEN * 2);
175 eth->h_proto = htons(ETH_P_IP);
176
177 for (i = 0; i < sizeof(*ip); ++i)
178 ((uint8_t *) pay)[i + sizeof(*eth)] = (uint8_t) rand();
179
180 ip->ihl = 5;
181 ip->version = 4;
182 ip->protocol = 0x11;
183 ip->frag_off = 0;
184 ip->ttl = 64;
185 ip->tot_len = htons((uint16_t) *len - sizeof(*eth));
186
187 ip->saddr = htonl(INADDR_LOOPBACK);
188 ip->daddr = htonl(INADDR_LOOPBACK);
189
190 memset(pay + sizeof(*eth) + sizeof(*ip),
191 DATA_CHAR, DATA_LEN);
192}
193
194static inline int __v1_rx_kernel_ready(struct tpacket_hdr *hdr)
195{
196 return ((hdr->tp_status & TP_STATUS_USER) == TP_STATUS_USER);
197}
198
199static inline void __v1_rx_user_ready(struct tpacket_hdr *hdr)
200{
201 hdr->tp_status = TP_STATUS_KERNEL;
202 __sync_synchronize();
203}
204
205static inline int __v2_rx_kernel_ready(struct tpacket2_hdr *hdr)
206{
207 return ((hdr->tp_status & TP_STATUS_USER) == TP_STATUS_USER);
208}
209
210static inline void __v2_rx_user_ready(struct tpacket2_hdr *hdr)
211{
212 hdr->tp_status = TP_STATUS_KERNEL;
213 __sync_synchronize();
214}
215
216static inline int __v1_v2_rx_kernel_ready(void *base, int version)
217{
218 switch (version) {
219 case TPACKET_V1:
220 return __v1_rx_kernel_ready(base);
221 case TPACKET_V2:
222 return __v2_rx_kernel_ready(base);
223 default:
224 bug_on(1);
225 return 0;
226 }
227}
228
229static inline void __v1_v2_rx_user_ready(void *base, int version)
230{
231 switch (version) {
232 case TPACKET_V1:
233 __v1_rx_user_ready(base);
234 break;
235 case TPACKET_V2:
236 __v2_rx_user_ready(base);
237 break;
238 }
239}
240
241static void walk_v1_v2_rx(int sock, struct ring *ring)
242{
243 struct pollfd pfd;
244 int udp_sock[2];
245 union frame_map ppd;
246 unsigned int frame_num = 0;
247
248 bug_on(ring->type != PACKET_RX_RING);
249
250 pair_udp_open(udp_sock, PORT_BASE);
251 pair_udp_setfilter(sock);
252
253 memset(&pfd, 0, sizeof(pfd));
254 pfd.fd = sock;
255 pfd.events = POLLIN | POLLERR;
256 pfd.revents = 0;
257
258 pair_udp_send(udp_sock, NUM_PACKETS);
259
260 while (total_packets < NUM_PACKETS * 2) {
261 while (__v1_v2_rx_kernel_ready(ring->rd[frame_num].iov_base,
262 ring->version)) {
263 ppd.raw = ring->rd[frame_num].iov_base;
264
265 switch (ring->version) {
266 case TPACKET_V1:
267 test_payload((uint8_t *) ppd.raw + ppd.v1->tp_h.tp_mac,
268 ppd.v1->tp_h.tp_snaplen);
269 total_bytes += ppd.v1->tp_h.tp_snaplen;
270 break;
271
272 case TPACKET_V2:
273 test_payload((uint8_t *) ppd.raw + ppd.v2->tp_h.tp_mac,
274 ppd.v2->tp_h.tp_snaplen);
275 total_bytes += ppd.v2->tp_h.tp_snaplen;
276 break;
277 }
278
279 status_bar_update();
280 total_packets++;
281
282 __v1_v2_rx_user_ready(ppd.raw, ring->version);
283
284 frame_num = (frame_num + 1) % ring->rd_num;
285 }
286
287 poll(&pfd, 1, 1);
288 }
289
290 pair_udp_close(udp_sock);
291
292 if (total_packets != 2 * NUM_PACKETS) {
293 fprintf(stderr, "walk_v%d_rx: received %u out of %u pkts\n",
294 ring->version, total_packets, NUM_PACKETS);
295 exit(1);
296 }
297
298 fprintf(stderr, " %u pkts (%u bytes)", NUM_PACKETS, total_bytes >> 1);
299}
300
301static inline int __v1_tx_kernel_ready(struct tpacket_hdr *hdr)
302{
303 return ((hdr->tp_status & TP_STATUS_AVAILABLE) == TP_STATUS_AVAILABLE);
304}
305
306static inline void __v1_tx_user_ready(struct tpacket_hdr *hdr)
307{
308 hdr->tp_status = TP_STATUS_SEND_REQUEST;
309 __sync_synchronize();
310}
311
312static inline int __v2_tx_kernel_ready(struct tpacket2_hdr *hdr)
313{
314 return ((hdr->tp_status & TP_STATUS_AVAILABLE) == TP_STATUS_AVAILABLE);
315}
316
317static inline void __v2_tx_user_ready(struct tpacket2_hdr *hdr)
318{
319 hdr->tp_status = TP_STATUS_SEND_REQUEST;
320 __sync_synchronize();
321}
322
323static inline int __v1_v2_tx_kernel_ready(void *base, int version)
324{
325 switch (version) {
326 case TPACKET_V1:
327 return __v1_tx_kernel_ready(base);
328 case TPACKET_V2:
329 return __v2_tx_kernel_ready(base);
330 default:
331 bug_on(1);
332 return 0;
333 }
334}
335
336static inline void __v1_v2_tx_user_ready(void *base, int version)
337{
338 switch (version) {
339 case TPACKET_V1:
340 __v1_tx_user_ready(base);
341 break;
342 case TPACKET_V2:
343 __v2_tx_user_ready(base);
344 break;
345 }
346}
347
348static void __v1_v2_set_packet_loss_discard(int sock)
349{
350 int ret, discard = 1;
351
352 ret = setsockopt(sock, SOL_PACKET, PACKET_LOSS, (void *) &discard,
353 sizeof(discard));
354 if (ret == -1) {
355 perror("setsockopt");
356 exit(1);
357 }
358}
359
360static void walk_v1_v2_tx(int sock, struct ring *ring)
361{
362 struct pollfd pfd;
363 int rcv_sock, ret;
364 size_t packet_len;
365 union frame_map ppd;
366 char packet[1024];
367 unsigned int frame_num = 0, got = 0;
368 struct sockaddr_ll ll = {
369 .sll_family = PF_PACKET,
370 .sll_halen = ETH_ALEN,
371 };
372
373 bug_on(ring->type != PACKET_TX_RING);
374 bug_on(ring->rd_num < NUM_PACKETS);
375
376 rcv_sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL));
377 if (rcv_sock == -1) {
378 perror("socket");
379 exit(1);
380 }
381
382 pair_udp_setfilter(rcv_sock);
383
384 ll.sll_ifindex = if_nametoindex("lo");
385 ret = bind(rcv_sock, (struct sockaddr *) &ll, sizeof(ll));
386 if (ret == -1) {
387 perror("bind");
388 exit(1);
389 }
390
391 memset(&pfd, 0, sizeof(pfd));
392 pfd.fd = sock;
393 pfd.events = POLLOUT | POLLERR;
394 pfd.revents = 0;
395
396 total_packets = NUM_PACKETS;
397 create_payload(packet, &packet_len);
398
399 while (total_packets > 0) {
400 while (__v1_v2_tx_kernel_ready(ring->rd[frame_num].iov_base,
401 ring->version) &&
402 total_packets > 0) {
403 ppd.raw = ring->rd[frame_num].iov_base;
404
405 switch (ring->version) {
406 case TPACKET_V1:
407 ppd.v1->tp_h.tp_snaplen = packet_len;
408 ppd.v1->tp_h.tp_len = packet_len;
409
410 memcpy((uint8_t *) ppd.raw + TPACKET_HDRLEN -
411 sizeof(struct sockaddr_ll), packet,
412 packet_len);
413 total_bytes += ppd.v1->tp_h.tp_snaplen;
414 break;
415
416 case TPACKET_V2:
417 ppd.v2->tp_h.tp_snaplen = packet_len;
418 ppd.v2->tp_h.tp_len = packet_len;
419
420 memcpy((uint8_t *) ppd.raw + TPACKET2_HDRLEN -
421 sizeof(struct sockaddr_ll), packet,
422 packet_len);
423 total_bytes += ppd.v2->tp_h.tp_snaplen;
424 break;
425 }
426
427 status_bar_update();
428 total_packets--;
429
430 __v1_v2_tx_user_ready(ppd.raw, ring->version);
431
432 frame_num = (frame_num + 1) % ring->rd_num;
433 }
434
435 poll(&pfd, 1, 1);
436 }
437
438 bug_on(total_packets != 0);
439
440 ret = sendto(sock, NULL, 0, 0, NULL, 0);
441 if (ret == -1) {
442 perror("sendto");
443 exit(1);
444 }
445
446 while ((ret = recvfrom(rcv_sock, packet, sizeof(packet),
447 0, NULL, NULL)) > 0 &&
448 total_packets < NUM_PACKETS) {
449 got += ret;
450 test_payload(packet, ret);
451
452 status_bar_update();
453 total_packets++;
454 }
455
456 close(rcv_sock);
457
458 if (total_packets != NUM_PACKETS) {
459 fprintf(stderr, "walk_v%d_rx: received %u out of %u pkts\n",
460 ring->version, total_packets, NUM_PACKETS);
461 exit(1);
462 }
463
464 fprintf(stderr, " %u pkts (%u bytes)", NUM_PACKETS, got);
465}
466
467static void walk_v1_v2(int sock, struct ring *ring)
468{
469 if (ring->type == PACKET_RX_RING)
470 walk_v1_v2_rx(sock, ring);
471 else
472 walk_v1_v2_tx(sock, ring);
473}
474
475static uint64_t __v3_prev_block_seq_num = 0;
476
477void __v3_test_block_seq_num(struct block_desc *pbd)
478{
479 if (__v3_prev_block_seq_num + 1 != BLOCK_SNUM(pbd)) {
480 fprintf(stderr, "\nprev_block_seq_num:%"PRIu64", expected "
481 "seq:%"PRIu64" != actual seq:%"PRIu64"\n",
482 __v3_prev_block_seq_num, __v3_prev_block_seq_num + 1,
483 (uint64_t) BLOCK_SNUM(pbd));
484 exit(1);
485 }
486
487 __v3_prev_block_seq_num = BLOCK_SNUM(pbd);
488}
489
490static void __v3_test_block_len(struct block_desc *pbd, uint32_t bytes, int block_num)
491{
492 if (BLOCK_NUM_PKTS(pbd)) {
493 if (bytes != BLOCK_LEN(pbd)) {
494 fprintf(stderr, "\nblock:%u with %upackets, expected "
495 "len:%u != actual len:%u\n", block_num,
496 BLOCK_NUM_PKTS(pbd), bytes, BLOCK_LEN(pbd));
497 exit(1);
498 }
499 } else {
500 if (BLOCK_LEN(pbd) != BLOCK_PLUS_PRIV(13)) {
501 fprintf(stderr, "\nblock:%u, expected len:%lu != "
502 "actual len:%u\n", block_num, BLOCK_HDR_LEN,
503 BLOCK_LEN(pbd));
504 exit(1);
505 }
506 }
507}
508
509static void __v3_test_block_header(struct block_desc *pbd, const int block_num)
510{
511 uint32_t block_status = BLOCK_STATUS(pbd);
512
513 if ((block_status & TP_STATUS_USER) == 0) {
514 fprintf(stderr, "\nblock %u: not in TP_STATUS_USER\n", block_num);
515 exit(1);
516 }
517
518 __v3_test_block_seq_num(pbd);
519}
520
521static void __v3_walk_block(struct block_desc *pbd, const int block_num)
522{
523 int num_pkts = BLOCK_NUM_PKTS(pbd), i;
524 unsigned long bytes = 0;
525 unsigned long bytes_with_padding = BLOCK_PLUS_PRIV(13);
526 struct tpacket3_hdr *ppd;
527
528 __v3_test_block_header(pbd, block_num);
529
530 ppd = (struct tpacket3_hdr *) ((uint8_t *) pbd + BLOCK_O2FP(pbd));
531 for (i = 0; i < num_pkts; ++i) {
532 bytes += ppd->tp_snaplen;
533
534 if (ppd->tp_next_offset)
535 bytes_with_padding += ppd->tp_next_offset;
536 else
537 bytes_with_padding += ALIGN_8(ppd->tp_snaplen + ppd->tp_mac);
538
539 test_payload((uint8_t *) ppd + ppd->tp_mac, ppd->tp_snaplen);
540
541 status_bar_update();
542 total_packets++;
543
544 ppd = (struct tpacket3_hdr *) ((uint8_t *) ppd + ppd->tp_next_offset);
545 __sync_synchronize();
546 }
547
548 __v3_test_block_len(pbd, bytes_with_padding, block_num);
549 total_bytes += bytes;
550}
551
552void __v3_flush_block(struct block_desc *pbd)
553{
554 BLOCK_STATUS(pbd) = TP_STATUS_KERNEL;
555 __sync_synchronize();
556}
557
558static void walk_v3_rx(int sock, struct ring *ring)
559{
560 unsigned int block_num = 0;
561 struct pollfd pfd;
562 struct block_desc *pbd;
563 int udp_sock[2];
564
565 bug_on(ring->type != PACKET_RX_RING);
566
567 pair_udp_open(udp_sock, PORT_BASE);
568 pair_udp_setfilter(sock);
569
570 memset(&pfd, 0, sizeof(pfd));
571 pfd.fd = sock;
572 pfd.events = POLLIN | POLLERR;
573 pfd.revents = 0;
574
575 pair_udp_send(udp_sock, NUM_PACKETS);
576
577 while (total_packets < NUM_PACKETS * 2) {
578 pbd = (struct block_desc *) ring->rd[block_num].iov_base;
579
580 while ((BLOCK_STATUS(pbd) & TP_STATUS_USER) == 0)
581 poll(&pfd, 1, 1);
582
583 __v3_walk_block(pbd, block_num);
584 __v3_flush_block(pbd);
585
586 block_num = (block_num + 1) % ring->rd_num;
587 }
588
589 pair_udp_close(udp_sock);
590
591 if (total_packets != 2 * NUM_PACKETS) {
592 fprintf(stderr, "walk_v3_rx: received %u out of %u pkts\n",
593 total_packets, NUM_PACKETS);
594 exit(1);
595 }
596
597 fprintf(stderr, " %u pkts (%u bytes)", NUM_PACKETS, total_bytes >> 1);
598}
599
600static void walk_v3(int sock, struct ring *ring)
601{
602 if (ring->type == PACKET_RX_RING)
603 walk_v3_rx(sock, ring);
604 else
605 bug_on(1);
606}
607
608static void __v1_v2_fill(struct ring *ring, unsigned int blocks)
609{
610 ring->req.tp_block_size = getpagesize() << 2;
611 ring->req.tp_frame_size = TPACKET_ALIGNMENT << 7;
612 ring->req.tp_block_nr = blocks;
613
614 ring->req.tp_frame_nr = ring->req.tp_block_size /
615 ring->req.tp_frame_size *
616 ring->req.tp_block_nr;
617
618 ring->mm_len = ring->req.tp_block_size * ring->req.tp_block_nr;
619 ring->walk = walk_v1_v2;
620 ring->rd_num = ring->req.tp_frame_nr;
621 ring->flen = ring->req.tp_frame_size;
622}
623
624static void __v3_fill(struct ring *ring, unsigned int blocks)
625{
626 ring->req3.tp_retire_blk_tov = 64;
627 ring->req3.tp_sizeof_priv = 13;
628 ring->req3.tp_feature_req_word |= TP_FT_REQ_FILL_RXHASH;
629
630 ring->req3.tp_block_size = getpagesize() << 2;
631 ring->req3.tp_frame_size = TPACKET_ALIGNMENT << 7;
632 ring->req3.tp_block_nr = blocks;
633
634 ring->req3.tp_frame_nr = ring->req3.tp_block_size /
635 ring->req3.tp_frame_size *
636 ring->req3.tp_block_nr;
637
638 ring->mm_len = ring->req3.tp_block_size * ring->req3.tp_block_nr;
639 ring->walk = walk_v3;
640 ring->rd_num = ring->req3.tp_block_nr;
641 ring->flen = ring->req3.tp_block_size;
642}
643
644static void setup_ring(int sock, struct ring *ring, int version, int type)
645{
646 int ret = 0;
647 unsigned int blocks = 256;
648
649 ring->type = type;
650 ring->version = version;
651
652 switch (version) {
653 case TPACKET_V1:
654 case TPACKET_V2:
655 if (type == PACKET_TX_RING)
656 __v1_v2_set_packet_loss_discard(sock);
657 __v1_v2_fill(ring, blocks);
658 ret = setsockopt(sock, SOL_PACKET, type, &ring->req,
659 sizeof(ring->req));
660 break;
661
662 case TPACKET_V3:
663 __v3_fill(ring, blocks);
664 ret = setsockopt(sock, SOL_PACKET, type, &ring->req3,
665 sizeof(ring->req3));
666 break;
667 }
668
669 if (ret == -1) {
670 perror("setsockopt");
671 exit(1);
672 }
673
674 ring->rd_len = ring->rd_num * sizeof(*ring->rd);
675 ring->rd = malloc(ring->rd_len);
676 if (ring->rd == NULL) {
677 perror("malloc");
678 exit(1);
679 }
680
681 total_packets = 0;
682 total_bytes = 0;
683}
684
685static void mmap_ring(int sock, struct ring *ring)
686{
687 int i;
688
689 ring->mm_space = mmap(0, ring->mm_len, PROT_READ | PROT_WRITE,
690 MAP_SHARED | MAP_LOCKED | MAP_POPULATE, sock, 0);
691 if (ring->mm_space == MAP_FAILED) {
692 perror("mmap");
693 exit(1);
694 }
695
696 memset(ring->rd, 0, ring->rd_len);
697 for (i = 0; i < ring->rd_num; ++i) {
698 ring->rd[i].iov_base = ring->mm_space + (i * ring->flen);
699 ring->rd[i].iov_len = ring->flen;
700 }
701}
702
703static void bind_ring(int sock, struct ring *ring)
704{
705 int ret;
706
707 ring->ll.sll_family = PF_PACKET;
708 ring->ll.sll_protocol = htons(ETH_P_ALL);
709 ring->ll.sll_ifindex = if_nametoindex("lo");
710 ring->ll.sll_hatype = 0;
711 ring->ll.sll_pkttype = 0;
712 ring->ll.sll_halen = 0;
713
714 ret = bind(sock, (struct sockaddr *) &ring->ll, sizeof(ring->ll));
715 if (ret == -1) {
716 perror("bind");
717 exit(1);
718 }
719}
720
721static void walk_ring(int sock, struct ring *ring)
722{
723 ring->walk(sock, ring);
724}
725
726static void unmap_ring(int sock, struct ring *ring)
727{
728 munmap(ring->mm_space, ring->mm_len);
729 free(ring->rd);
730}
731
732static int test_kernel_bit_width(void)
733{
734 char in[512], *ptr;
735 int num = 0, fd;
736 ssize_t ret;
737
738 fd = open("/proc/kallsyms", O_RDONLY);
739 if (fd == -1) {
740 perror("open");
741 exit(1);
742 }
743
744 ret = read(fd, in, sizeof(in));
745 if (ret <= 0) {
746 perror("read");
747 exit(1);
748 }
749
750 close(fd);
751
752 ptr = in;
753 while(!isspace(*ptr)) {
754 num++;
755 ptr++;
756 }
757
758 return num * 4;
759}
760
761static int test_user_bit_width(void)
762{
763 return __WORDSIZE;
764}
765
766static const char *tpacket_str[] = {
767 [TPACKET_V1] = "TPACKET_V1",
768 [TPACKET_V2] = "TPACKET_V2",
769 [TPACKET_V3] = "TPACKET_V3",
770};
771
772static const char *type_str[] = {
773 [PACKET_RX_RING] = "PACKET_RX_RING",
774 [PACKET_TX_RING] = "PACKET_TX_RING",
775};
776
777static int test_tpacket(int version, int type)
778{
779 int sock;
780 struct ring ring;
781
782 fprintf(stderr, "test: %s with %s ", tpacket_str[version],
783 type_str[type]);
784 fflush(stderr);
785
786 if (version == TPACKET_V1 &&
787 test_kernel_bit_width() != test_user_bit_width()) {
788 fprintf(stderr, "test: skip %s %s since user and kernel "
789 "space have different bit width\n",
790 tpacket_str[version], type_str[type]);
791 return 0;
792 }
793
794 sock = pfsocket(version);
795 memset(&ring, 0, sizeof(ring));
796 setup_ring(sock, &ring, version, type);
797 mmap_ring(sock, &ring);
798 bind_ring(sock, &ring);
799 walk_ring(sock, &ring);
800 unmap_ring(sock, &ring);
801 close(sock);
802
803 fprintf(stderr, "\n");
804 return 0;
805}
806
807int main(void)
808{
809 int ret = 0;
810
811 ret |= test_tpacket(TPACKET_V1, PACKET_RX_RING);
812 ret |= test_tpacket(TPACKET_V1, PACKET_TX_RING);
813
814 ret |= test_tpacket(TPACKET_V2, PACKET_RX_RING);
815 ret |= test_tpacket(TPACKET_V2, PACKET_TX_RING);
816
817 ret |= test_tpacket(TPACKET_V3, PACKET_RX_RING);
818
819 if (ret)
820 return 1;
821
822 printf("OK. All tests passed\n");
823 return 0;
824}
diff --git a/tools/testing/selftests/net/run_afpackettests b/tools/testing/selftests/net/run_afpackettests
new file mode 100644
index 000000000000..5246e782d6e8
--- /dev/null
+++ b/tools/testing/selftests/net/run_afpackettests
@@ -0,0 +1,26 @@
1#!/bin/sh
2
3if [ $(id -u) != 0 ]; then
4 echo $msg must be run as root >&2
5 exit 0
6fi
7
8echo "--------------------"
9echo "running psock_fanout test"
10echo "--------------------"
11./psock_fanout
12if [ $? -ne 0 ]; then
13 echo "[FAIL]"
14else
15 echo "[PASS]"
16fi
17
18echo "--------------------"
19echo "running psock_tpacket test"
20echo "--------------------"
21./psock_tpacket
22if [ $? -ne 0 ]; then
23 echo "[FAIL]"
24else
25 echo "[PASS]"
26fi
diff --git a/tools/testing/selftests/net/run_netsocktests b/tools/testing/selftests/net/run_netsocktests
new file mode 100644
index 000000000000..c09a682df56a
--- /dev/null
+++ b/tools/testing/selftests/net/run_netsocktests
@@ -0,0 +1,12 @@
1#!/bin/bash
2
3echo "--------------------"
4echo "running socket test"
5echo "--------------------"
6./socket
7if [ $? -ne 0 ]; then
8 echo "[FAIL]"
9else
10 echo "[PASS]"
11fi
12
diff --git a/tools/testing/selftests/net/socket.c b/tools/testing/selftests/net/socket.c
new file mode 100644
index 000000000000..0f227f2f9be9
--- /dev/null
+++ b/tools/testing/selftests/net/socket.c
@@ -0,0 +1,92 @@
1#include <stdio.h>
2#include <errno.h>
3#include <unistd.h>
4#include <string.h>
5#include <sys/types.h>
6#include <sys/socket.h>
7#include <netinet/in.h>
8
9struct socket_testcase {
10 int domain;
11 int type;
12 int protocol;
13
14 /* 0 = valid file descriptor
15 * -foo = error foo
16 */
17 int expect;
18
19 /* If non-zero, accept EAFNOSUPPORT to handle the case
20 * of the protocol not being configured into the kernel.
21 */
22 int nosupport_ok;
23};
24
25static struct socket_testcase tests[] = {
26 { AF_MAX, 0, 0, -EAFNOSUPPORT, 0 },
27 { AF_INET, SOCK_STREAM, IPPROTO_TCP, 0, 1 },
28 { AF_INET, SOCK_DGRAM, IPPROTO_TCP, -EPROTONOSUPPORT, 1 },
29 { AF_INET, SOCK_DGRAM, IPPROTO_UDP, 0, 1 },
30 { AF_INET, SOCK_STREAM, IPPROTO_UDP, -EPROTONOSUPPORT, 1 },
31};
32
33#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
34#define ERR_STRING_SZ 64
35
36static int run_tests(void)
37{
38 char err_string1[ERR_STRING_SZ];
39 char err_string2[ERR_STRING_SZ];
40 int i, err;
41
42 err = 0;
43 for (i = 0; i < ARRAY_SIZE(tests); i++) {
44 struct socket_testcase *s = &tests[i];
45 int fd;
46
47 fd = socket(s->domain, s->type, s->protocol);
48 if (fd < 0) {
49 if (s->nosupport_ok &&
50 errno == EAFNOSUPPORT)
51 continue;
52
53 if (s->expect < 0 &&
54 errno == -s->expect)
55 continue;
56
57 strerror_r(-s->expect, err_string1, ERR_STRING_SZ);
58 strerror_r(errno, err_string2, ERR_STRING_SZ);
59
60 fprintf(stderr, "socket(%d, %d, %d) expected "
61 "err (%s) got (%s)\n",
62 s->domain, s->type, s->protocol,
63 err_string1, err_string2);
64
65 err = -1;
66 break;
67 } else {
68 close(fd);
69
70 if (s->expect < 0) {
71 strerror_r(errno, err_string1, ERR_STRING_SZ);
72
73 fprintf(stderr, "socket(%d, %d, %d) expected "
74 "success got err (%s)\n",
75 s->domain, s->type, s->protocol,
76 err_string1);
77
78 err = -1;
79 break;
80 }
81 }
82 }
83
84 return err;
85}
86
87int main(void)
88{
89 int err = run_tests();
90
91 return err;
92}